serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
2,401 | #include <stdio.h>
#include <stdlib.h>
#define ERROR(s) printf("%s \n Usage: %s <no. of elements> <random seed>\n", s, argv[0]); exit(-1);
#define CUDA_ERROR_EXIT(str) do{\
cudaError err = cudaGetLastError();\
if( err != cudaSuccess){\
printf("Cuda Error: '%s' for %s\n", cudaGetErrorString(err), str);\
exit(-1);\
}\
}while(0);
__global__ void Xor(char *dev_in, int num_elements, int interval)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int k = tid * interval;
if(k >= num_elements)
return;
if(k + interval >= num_elements)
interval = num_elements - k;
unsigned *tmp1 = (unsigned *)(dev_in + k * sizeof(unsigned));
unsigned *tmp2 = tmp1 + interval - 1;
if(tmp1 == tmp2)
return;
if(tid % 2 == 0)
{
*tmp1 = (*tmp1) ^ (*tmp2);
*tmp2 = 0;
}
else
{
*tmp2 = (*tmp1) ^ (*tmp2);
*tmp1 = 0;
}
}
int main(int argc, char **argv)
{
int i, seed, interval;
unsigned *in;
char *tmp;
char *dev_in;
int blocks, threads = 1024;
unsigned num_elements;
if(argc != 3)
{
ERROR("Invalid number of parameters!");
}
num_elements = atoi(argv[1]);
if(num_elements<=0)
{
ERROR("Invalid number of elements!");
}
seed = atoi(argv[2]);
if(seed<=0)
{
ERROR("Invalid seed value!");
}
tmp = (char *)malloc(num_elements * sizeof(unsigned));
in = (unsigned *)tmp;
srand(seed);
for(i=0; i<num_elements; i++)
{
*in = rand();
in++;
}
cudaMalloc(&dev_in, num_elements * sizeof(unsigned));
CUDA_ERROR_EXIT("cudaMalloc");
cudaMemcpy(dev_in, tmp, num_elements * sizeof(unsigned), cudaMemcpyHostToDevice);
CUDA_ERROR_EXIT("cudaMemcpy");
blocks = (num_elements + 1023) / threads;
for(interval=1;;interval++)
{
if(blocks > 1)
blocks = ((num_elements >> interval) + 1023) / threads;
else
threads = threads >> 1;
Xor<<<blocks, threads>>>(dev_in, num_elements, 1 << interval);
CUDA_ERROR_EXIT("kernel invocation");
cudaMemcpy(tmp, dev_in, num_elements * sizeof(unsigned), cudaMemcpyDeviceToHost);
CUDA_ERROR_EXIT("cudaMemcpy");
in = (unsigned *)tmp;
if(threads <= 1)
break;
}
cudaMemcpy(tmp, dev_in, sizeof(unsigned), cudaMemcpyDeviceToHost);
cudaFree(dev_in);
in = (unsigned *)tmp;
printf("%u\n", *in);
free(tmp);
return 0;
}
|
2,402 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
__global__ void burst(float *dx, int n, int k, float *dxbar, int maxWinSize) {
int tid=threadIdx.y*blockDim.x+threadIdx.x;
int me=blockIdx.x*blockDim.x*blockDim.y+tid;
int width=n-k+1;
int x=me%width;
int y=me/width;
int perstart=x;//start
int perend;
int indx=0;
//extern __shared__ float sx[];
int perlen=y+k;//length of window, or window size. Notice if minimum windowSize k is smaller than n/2 ,we only need maximum windowSize to be 2k.
//each thread copy one number to shared memory, notice we have more threads than numbers/
indx=perstart*(n-k+1)+perlen-k;
dxbar[indx]=-1000.0;
/*
if(me<n){
sx[me]=dx[me];
}
__syncthreads();
*/
if(maxWinSize>n-perstart){
maxWinSize=n-perstart;
}
if (perstart<=n-k && perlen>=k && perlen<=maxWinSize){
perend=perstart+perlen-1;
int i; float tot=0;
for(i=perstart;i<=perend;i++) tot+=dx[i];
dxbar[indx]=tot/(perend-perstart+1);
}
else{
//printf("mean, indx=%f, %d\n", dxbar[indx], indx);
return;
}
__syncthreads();
//printf("mean,indx=%f, %d\n", dxbar[indx], indx);
}
__global__ void reduce(float *g_idata, float *g_odata){
extern __shared__ float sdata[];
int tid=threadIdx.y*blockDim.x+threadIdx.x;
unsigned int i=blockIdx.x*blockDim.x*blockDim.y+tid;
// sdata[tid]=g_idata[i];
//__syncthreads();
//printf("sdata[tid],tid=%f, %d\n", sdata[tid], tid);
for(unsigned int s=blockDim.x*blockDim.y/2; s>0;s>>=1){
if(tid<blockDim.x*blockDim.y){
if(g_idata[i+s]>g_idata[i]){
g_idata[i]=g_idata[i+s];
}
}
__syncthreads();
}
if(tid==0) {g_odata[blockIdx.x]=g_idata[i];
printf("in reduce, blockIdx.x, ans,%d %f,\n", blockIdx.x, g_odata[blockIdx.x]);
}
}
// things need to fix probably: bigmax allocate one int; passing n and k and bigmax to cuda function
void maxburst(float *x, int n, int k, int *startend, float *bigmax){
float *dx; //device x
int asize = n*sizeof(float);
float *out;//each block has an output max mean answer.
float *dout; //on device, out.
float* xbar; //Means for every possiblle start position, and window size.
float* dxbar;
int nblk=(n-k+1)*(n-k+1)/128+1;//Number of blocks
int maxWinSize=n;
// copy host matrix to device matrix
xbar=(float *) malloc(sizeof(float)*(n-k+1)*(n-k+1));
out=(float *) malloc(sizeof(float)*nblk);
// allocate space for device matrix
cudaMalloc ((void **)&dx,asize);
cudaMalloc ((void **)&dxbar, sizeof(float)*(n-k+1)*(n-k+1));
cudaMalloc (( void **)&dout, nblk*sizeof(float));
cudaMemcpy(dx,x,asize ,cudaMemcpyHostToDevice);
cudaMemcpy(dxbar,xbar,sizeof(float)*(n-k+1)*(n-k+1) ,cudaMemcpyHostToDevice);
cudaMemcpy(dout, out, sizeof(float)*(nblk), cudaMemcpyHostToDevice);
// set up parameters for threads structure
dim3 dimGrid(nblk,1); // n blocks
dim3 dimBlock(8, 16,1);
// invoke the ker
// make winsize
if(n>2*k){
maxWinSize=2*k;
}
burst<<<dimGrid,dimBlock>>>(dx,n,k,dxbar, maxWinSize);
cudaThreadSynchronize();
//SomeReduce function
reduce<<<dimGrid, dimBlock>>>(dxbar, dout);
// copy row vector from device to host
//cudaMemcpy(xbar, dxbar, sizeof(float)*(n-k+1)*(n-k+1), cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cudaMemcpy(out, dout, sizeof(float)*nblk, cudaMemcpyDeviceToHost);
for (int i=0; i<nblk; i++){
//printf("%f\n,",out[i]);
if (out[i]>bigmax[0]){
bigmax[0]=out[i];
}
}
printf("bigmax is%f\n", bigmax[0]);
cudaFree(dxbar);
cudaFree(dout);
cudaFree (dx);
}
int main(int arc, char **argv){
float *x;
int n=1000;
int k=900;
int *startend;
float *bigmax;
bigmax=(float*) malloc(sizeof(float));
startend=(int*) malloc(sizeof(int)*2);
x=(float*) malloc(sizeof(float)*n);
int i;
for(i=0; i<n; i++){
x[i]=i*1.0;
}
bigmax[0]=0;
maxburst(x, n, k, startend, bigmax);
}
|
2,403 | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <climits>
void printBoard(unsigned char* buffer, int width, int height)
{
printf("----------------\n");
for (int i = 0; i < height; i++){
for (int j = 0; j < width; j++){
printf("%c ", buffer[i * height + j]? 'o' : ' ');
}
printf("\n");
}
printf("----------------\n");
}
bool equal(unsigned char *array1, unsigned char *array2, int width, int height){
for (int i = 0; i < height; i++){
for (int j = 0; j < width; j++){
if(array1[i * height + j] != array2[i * height + j]){
return false;
}
}
}
printf("Evolution stoped!\n");
return true;
}
bool empty(unsigned char *array, int width, int height){
for (int i = 0; i < height; i++){
for (int j = 0; j < width; j++){
if(array[i * height + j] == 0x1){
return false;
}
}
}
printf("Everybody died!\n");
return true;
}
// bool equal(unsigned char *array1, unsigned char *array2, int w, int h){
// printf("Equal started!\n");
// for(int i = 0; i < w * h; i++)
// if(array1[i] != array2[i])
// return false;
//
// printf("Evolution stoped!\n");
// return true;
// }
//
// bool empty(unsigned char *array, int w, int h){
// printf("empty started!\n");
// for(int i = 0 ; i < w * h; i++)
// if(array[i] == 0x1)
// return false;
// printf("Everybody died!\n");
// return true;
// }
__global__ void golGpu(int height, int width, unsigned char* pBuffer1, unsigned char* pBuffer2){
int x = blockIdx.x * 2 + threadIdx.x;
int y = blockIdx.y * 2 + threadIdx.y;
int indx = x * height + y;
pBuffer2[indx] = pBuffer1[indx];
int num = 0;
if (x-1 >= 0 && x-1 < height && y >= 0 && y < width)
num += pBuffer1[(x-1) * height + y];
if (x+1 >= 0 && x+1 < height && y >= 0 && y < width)
num += pBuffer1[(x+1) * height + y];
if (x >= 0 && x < height && y-1 >= 0 && y-1 < width)
num += pBuffer1[x * height + (y-1)];
if (x >= 0 && x < height && y+1 >= 0 && y+1 < width)
num += pBuffer1[x * height + (y+1)];
if (x-1 >= 0 && x-1 < height && y-1 >= 0 && y-1 < width)
num += pBuffer1[(x-1) * height + (y-1)];
if (x-1 >= 0 && x-1 < height && y+1 >= 0 && y+1 < width)
num += pBuffer1[(x-1) * height + (y+1)];
if (x+1 >= 0 && x+1 < height && y-1 >= 0 && y-1 < width)
num += pBuffer1[(x+1) * height + (y-1)];
if (x+1 >= 0 && x+1 < height && y+1 >= 0 && y+1 < width)
num += pBuffer1[(x+1) * height + (y+1)];
if(num < 2)
pBuffer2[indx] = 0x0;
if(num > 3)
pBuffer2[indx] = 0x0;
if(num == 3 && !pBuffer1[indx])
pBuffer2[indx] = 0x1;
//return num;
}
void create_buffer(unsigned char* buffer, int width, int height, char** argv, int argc) {
if (argc == 2) {
char *filename = argv[1];
FILE *file;
file = fopen(filename, "r");
if (file) {
fscanf(file, "%*d");
fscanf(file, "%*d");
int arraySize = width * height;
for (int i = 0; i < arraySize; i++) {
int num;
fscanf(file, "%d", &num);
buffer[i] = num;
}
fclose(file);
return;
}
return;
}
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
float rnd = rand() / (float) RAND_MAX;
buffer[i * height + j] = (rnd >= 0.7f) ? 0x1 : 0x0;
}
}
return;
}
int main(int argc, char **argv){
int width, height;
int iterations = INT_MAX;
// Random seed
time_t t;
srand((unsigned) time(&t));
// Read file of dimensions from user (if none 12x12 is the default)
if(argc == 2){
char* filename = argv[1];
FILE *file;
file = fopen(filename, "r");
if (file) {
fscanf(file, "%d", &width);
fscanf(file, "%d", &height);
}
fclose(file);
}else if(argc == 3){
width = atoi(argv[1]);
height = atoi(argv[2]);
}else if(argc == 4){
width = atoi(argv[1]);
height = atoi(argv[2]);
iterations = atoi(argv[3]);
}
else{
width = 12;
height = 12;
}
//Initialise Buffer
unsigned char* buffer;
buffer = (unsigned char *) malloc(sizeof(unsigned char) * width * height);
create_buffer(buffer, width, height, argv, argc);
// printf("Starting board!\n");
// printBoard(buffer,width,height);
// Allocate GPU boards
unsigned char* pBuffer1;
cudaMalloc((void **)&pBuffer1, width * height * sizeof(unsigned char));
cudaMemcpy(pBuffer1, buffer, width * height * sizeof(unsigned char), cudaMemcpyHostToDevice);
unsigned char* pBuffer2;
cudaMalloc((void **)&pBuffer2, width * height * sizeof(unsigned char));
cudaMemcpy(pBuffer2, 0x0, width * height * sizeof(unsigned char), cudaMemcpyHostToDevice);
dim3 blocksize(2, 2);
dim3 gridsize((width + blocksize.x - 1)/blocksize.x, (height + blocksize.y - 1)/blocksize.y , 1);
unsigned char* current;
unsigned char* next;
unsigned char* previeousResult;
previeousResult = (unsigned char *)malloc(width * height * sizeof(unsigned char*));
int gen = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
do{
if(gen == iterations) break;
//printf("Gen: %d\n\n\n", gen);
memcpy(previeousResult, buffer,width * height * sizeof(unsigned char*));
// Switching buffers to save previeous state.
if ((gen % 2) == 0)
{
current = pBuffer1;
next = pBuffer2;
}
else
{
current = pBuffer2;
next = pBuffer1;
}
golGpu<<<gridsize, blocksize>>>(height, width, current, next);
gen++;
cudaMemcpy(buffer, next, width * height * sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaMemcpy(previeousResult, current, width * height * sizeof(unsigned char), cudaMemcpyDeviceToHost);
//printf("Evolved\n");
//printBoard(buffer, width, height);
// printf("\n\nPrevieous\n\n\n");
// printBoard(previeousResult, width, height);
}while(!empty(buffer,width,height) && !equal(buffer, previeousResult, width, height));
printf("Generations: %d\n", gen);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Time elapsed: %f ms\n",milliseconds);
cudaFree(pBuffer1);
cudaFree(pBuffer2);
free(buffer);
free(previeousResult);
return 0;
}
|
2,404 | __global__
void deviceKernel(int *a, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < N; i += stride)
{
a[i] = 1;
}
}
void hostFunction(int *a, int N)
{
for (int i = 0; i < N; ++i)
{
a[i] = 1;
}
}
int main()
{
int N = 2<<24;
size_t size = N * sizeof(int);
int *a;
cudaMallocManaged(&a, size);
/*
* "`cudaMallocManaged` の動作を詳しく確認するために実験を行います。
*
* ユニファイド メモリに GPU だけがアクセスした場合、どうなるでしょうか?
* ユニファイド メモリに CPU だけがアクセスした場合、どうなるでしょうか?
* ユニファイド メモリに最初に GPU、次に CPU がアクセスした場合、どうなるでしょうか?
* ユニファイド メモリに最初に CPU、次に GPU がアクセスした場合、どうなるでしょうか?
*
* 各実験の前にユニファイド メモリ の動作、特にページ フォールトについて仮説を立ててから、`nvprof` を実行して検証します。
*/
cudaFree(a);
}
|
2,405 | #include <math.h>
#include <fstream>
#include <stdio.h>
#include <exception>
#define MAX_THREADS 1024
#define INF 99999999999.0
#define PI 3.14159265
using namespace std;
// 3 points.
__global__ void nj_step1(float* mat, float* res,int width) // Calculate the tree-divergence for every object.
{
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < width){
float rpta = 0.0f;
for(int i=0; i<width; i++){
if(i<idx)
rpta += mat[idx*width + i];
else
rpta += mat[i*width + idx];
}
res[idx] = rpta;
}
}
// 6 points.
__global__ void nj_step2(float* mat_t, float* mat, float* diverg, int width, int* limits) // Calculate a new matrix (Mt) of distances.
{
int bx = blockIdx.x;
int k = 0;
int blockfil = 0;
int blockcol = 0;
while(limits[k] != NULL && limits[k] < bx){
k++;
}
if(k!=0)
blockfil = k - 1;
if(k!=0)
blockcol = bx - limits[k - 1] - 1;
int idx = threadIdx.x;
int idy = threadIdx.y;
if( (limits[k]) == blockcol){
int i = (blockfil * blockDim.x) + idx;
int j = (blockcol * blockDim.y) + idy;
if (i < width && j < width){
if(idy < idx){
mat_t[i*width + j] = mat[i*width + j] - (diverg[i] + diverg[j])/(width-2);
}else
mat_t[i*width + j] = PI;
}
}else{
int i = (blockfil * blockDim.x) + idx;
int j = (blockcol * blockDim.y) + idy;
if (i < width && j < width)
mat_t[i*width + j] = mat[i*width + j] - (diverg[i] + diverg[j])/(width-2);
}
}
int main()
{
int N; // number of elements (the same as the width of the matrix).
int numblocks; // number of necessary blocks in the GPU
int b = 8; // dimension of the block (blocks of 8x8 is 64 threads in the block,
// which benefits CUDA 'cause is it multiple of 32 (for warp control)
float* M; // matrix of distances
float* Mt; // temporal matrix for finding the smallest values.
float* r; // array of divergences
char buffer[100];
try{
printf("Name of the input_file: ");
scanf("%s",buffer);
ifstream input(buffer);
input>>N; // getting the number of elements.
printf("%d elements.\n",N);
// garbage
input.getline(buffer,100);
input.getline(buffer,100);
input.getline(buffer,100);
M = new float[N*N];
r = new float[N];
// Initialize the matrix with 0-values
for(int i=0; i<N; i++)
for(int j=0; j<N; j++)
M[i*N+j]=0;
// Passing data from input to Matrix
for(int i=1; i<N; i++)
for(int j=0; j<i; j++)
input>>M[i*N + j];
// Printing Matrix
printf("Printing input matrix");
for(int i=0; i<N; i++){
for (int j=0; j<N; j++)
printf("%4.2f ",M[i*N + j]);
printf("\n");
}
printf("----------------------- o ----------------------\n\n");
input.close();
}catch(exception& e){
printf("Problem trying to read file.\n");
return 1;
}
while(N>2)
{
printf("***********************N=%d***********************\n\n",N);
numblocks = ceil((float)N/MAX_THREADS); // Update the number of blocks for every iteration.
r = new float[N];
Mt = new float[N*N]; // initializing the temporal Matrix.
float* r_d; // Allocate divergency array in the device.
cudaMalloc((void**) &r_d, sizeof(float)*N);
float* M_d; // Allocate distance matrix in the device and copy.
cudaMalloc((void**) &M_d, sizeof(float)*N*N);
cudaMemcpy(M_d,M,sizeof(float)*N*N,cudaMemcpyHostToDevice);
nj_step1<<<numblocks,N>>>(M_d,r_d,N); // Kernel launch for step 1.
cudaMemcpy(r,r_d,sizeof(float)*N,cudaMemcpyDeviceToHost); // Copying response array to the Host.
// Printing new divergence matrix.
for(int i=0; i<N; i++)
printf("%4.2f ",r[i]);
printf("\n");
int nb = ceil((double)((double)N/(double)b));
printf("nb: %d\n",nb);
int numblocks = (nb*(nb+1))/2.0; // Number of blocks like a triangular matrix.
printf("number of blocks for step2: %d\n\n",numblocks);
int* limits = new int[nb];
for(int i=0; i<nb ; i++)
limits[i] = (int)((((i+1)*(i+2))/2.0) - 1);
float* Mt_d;
cudaMalloc((void**) &Mt_d, sizeof(float)*N*N);
int* limits_d;
cudaMalloc((void**) &limits_d, sizeof(int)*nb);
cudaMemcpy(limits_d,limits,sizeof(int)*nb,cudaMemcpyHostToDevice);
nj_step2<<<numblocks,dim3(b,b)>>>(Mt_d,M_d,r_d,N,limits_d); // Kernel launch for step 2.
cudaMemcpy(Mt,Mt_d,sizeof(float)*N*N,cudaMemcpyDeviceToHost); // Copying response matrix to the Host.
// Printing temporal distance matrix (Mt).
printf("Printing temporal distance matrix (Mt).\n");
for(int i=0; i<N; i++){
for(int j=0; j<N; j++)
printf("%4.2f ",Mt[i*N + j]);
printf("\n");
}
scanf("%s",buffer);
// Step 3: Select objects "i" and "j" where M[i][j] is the minimum. 1 point.
// Step 4: Create a new object U and delete "i" and "j". 3 points.
// Step 5: Calculate distances from "i" to U and "j" to U. 2 points.
// Step 6: Calculate the distance between U and the rest. 4 points.
N = N - 1;
}
return 0;
} |
2,406 | #include <stdio.h>
#include <stdlib.h>
#define N 22
__global__ void MatAdd(int A[][N], int B[][N], int C[][N]){
int i = threadIdx.x; // create threds for use 1024 threads in a single block in a single dimension
int j = threadIdx.y; // create threds for use 1024 threads in a single block in a single dimension
C[i][j] = A[i][j] + B[i][j]; //calculation between arrays
}
//int** randmatfunc();
void randmatfunc(int newmat[N][N]){ // genarating random genarated multidiomentional arrays
int i, j, k;
for(i=0;i<N;i++){
for(j=0;j<N;j++){
k = rand() % 100 + 1;;
printf("%d ", k);
newmat[i][j] =k; // printing those multidiomentional array list over here using for loop
}
printf("\n");
}
printf("\n--------------------------------------\n"); //printing new line
}
int main(){
int A[N][N];
randmatfunc(A); //inside the main function calling randumfunction (A)
int B[N][N];
randmatfunc(B); //inside the main function calling randumfunction (B)
int C[N][N];
int (*d_A)[N], (*d_B)[N], (*d_C)[N]; // calculating genarated multidiomentional arrays
cudaMalloc((void**)&d_A, (N*N)*sizeof(int)); // Allocates size bytes of linear memory on the device and returns in *devPtr a pointer to the allocated memory. returns cudaSuccess, cudaErrorMemoryAllocation
cudaMalloc((void**)&d_B, (N*N)*sizeof(int));// Allocates size bytes of linear memory on the device and returns in *devPtr a pointer to the allocated memory. returns cudaSuccess, cudaErrorMemoryAllocation
cudaMalloc((void**)&d_C, (N*N)*sizeof(int));// Allocates size bytes of linear memory on the device and returns in *devPtr a pointer to the allocated memory. returns cudaSuccess, cudaErrorMemoryAllocation
cudaMemcpy(d_A, A, (N*N)*sizeof(int), cudaMemcpyHostToDevice); //Copies count bytes from the memory area pointed to by src to the memory area pointed to by dst, where kind is one of cudaMemcpyHostToHost, cudaMemcpyHostToDevice,
cudaMemcpy(d_B, B, (N*N)*sizeof(int), cudaMemcpyHostToDevice);//Copies count bytes from the memory area pointed to by src to the memory area pointed to by dst, where kind is one of cudaMemcpyHostToHost, cudaMemcpyHostToDevice,
cudaMemcpy(d_C, C, (N*N)*sizeof(int), cudaMemcpyHostToDevice);//Copies count bytes from the memory area pointed to by src to the memory area pointed to by dst, where kind is one of cudaMemcpyHostToHost, cudaMemcpyHostToDevice,
int numBlocks = 1;
dim3 threadsPerBlock(N,N); // is an integer struct type defined in the file
MatAdd<<<numBlocks,threadsPerBlock>>>(d_A,d_B,d_C);
cudaMemcpy(C, d_C, (N*N)*sizeof(int), cudaMemcpyDeviceToHost); //CUDA memory copy types
int i, j; printf("C = \n");
for(i=0;i<N;i++){
for(j=0;j<N;j++){
printf("%d ", C[i][j]);
}
printf("\n"); // printing new lines (multidiomentional arrays)
}
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
printf("\n");
return 0;
}
|
2,407 | /*#include "GPU_function.cuh"
void cudaFTshift(cufftComplex * input, int sizeX, int sizeY)
{
int blocksInX = (sizeX+8-1)/8;
int blocksInY = (sizeY+8-1)/8;
dim3 grid(blocksInX, blocksInY);
dim3 block(8, 8);
cuFFT2Dshift<<<grid,block>>>(input, sizeX, sizeY);
}*/
|
2,408 | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <time.h>
uint getTimeMicroseconds64()
{
uint nTime;
struct timespec tSpec;
clock_gettime(CLOCK_REALTIME, &tSpec);
nTime = (uint)tSpec.tv_sec * 1000000 + (uint)tSpec.tv_nsec / 1000;
return nTime;
}
__global__ void lrp_perc(int *in, int *out, int *relevance, int *weights, int *activations, int *activation_sum, int n, int m)
{
int out_idx = threadIdx.y;
int in_idx = threadIdx.x;
activations[out_idx * n + in_idx] = in[in_idx] * weights[out_idx * n + in_idx];
atomicAdd(&activation_sum[out_idx], activations[out_idx * n + in_idx]);
__syncthreads();
if (activation_sum[out_idx] < 0) { out[out_idx] = 0; } else { out[out_idx] = activation_sum[out_idx]; }
atomicAdd(&relevance[in_idx], (activations[out_idx * n + in_idx] * out[out_idx]) / activation_sum[out_idx]);
}
void lrp_perc_gm(int *in, int *out, int *relevance, int *weights, int *activations, int *activation_sum, int n, int m)
{
for (int j = 0; j < m; j++) {
for (int i_prime = 0; i_prime < n; i_prime++) {
activations[j * n + i_prime] = in[i_prime] * weights[j * n + i_prime];
activation_sum[j] += activations[j * n + i_prime];
}
if (activation_sum[j] < 0) { out[j] = 0; } else { out[j] = activation_sum[j]; }
}
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
relevance[i] += (activations[j * n + i] * out[j]) / activation_sum[j];
}
}
}
__global__ void v_m_mul(int *in, int *out, int *weights, int n, int m)
{
int b = blockIdx.x;
int t = threadIdx.x;
__syncthreads();
int mul = in[t] * weights[b * n + t];
atomicAdd(&out[b], mul);
}
void v_m_mul_gm(int *in, int *out, int *weights, int n, int m)
{
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
out[j] += in[i] * weights[j * n + i];
}
}
}
__global__ void add(int *in, int n)
{
__shared__ int s;
int t = threadIdx.x;
s = 0;
__syncthreads();
atomicAdd(&s, in[t]);
__syncthreads();
in[t] = s;
}
void add_gm(int *in, int n)
{
int tmp;
tmp = 0;
for (int i = 0; i < n; i++) {
tmp += in[i];
}
for (int i = 0; i < n; i++) {
in[i] = tmp;
}
}
int main(void)
{
const int n = 128, m = 8;
uint dT1 = 0, dT2 = 0, hT1 = 0, hT2 = 0;
int input[n], golden_out[m], cuda_out[m], weights[m * n], golden_relevance[n], cuda_relevance[n], golden_activations[m * n], cuda_activations[m * n], golden_asum[m], cuda_asum[m];
cudaError_t s;
// initialize variables on host
for (int i = 0; i < n; i++) {
input[i] = rand() % 10;
golden_relevance[i] = 0;
cuda_relevance[i] = 0;
for (int j = 0; j < m; j++) {
weights[j * n + i] = 1;
golden_activations[j * n + i] = 0;
cuda_activations[j * n + i] = 0;
}
}
for (int i = 0; i < m; i++) {
golden_out[i] = 0;
cuda_out[i] = 0;
golden_asum[i] = 0;
cuda_asum[i] = 0;
}
// allocating memory for variables for device
int *input_, *weights_, *output_, *relevance_, *activations_, *asum_;
cudaMalloc(&input_, n * sizeof(int));
cudaMalloc(&weights_, m * n * sizeof(int));
cudaMalloc(&output_, m * sizeof(int));
cudaMalloc(&relevance_, n * sizeof(int));
cudaMalloc(&activations_, m * n * sizeof(int));
cudaMalloc(&asum_, m * sizeof(int));
// run version with static shared memory
cudaMemcpy(input_, input, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(weights_, weights, n * m *sizeof(int), cudaMemcpyHostToDevice);
cudaMemset(output_, 0, m * sizeof(int));
cudaMemset(relevance_, 0, n * sizeof(int));
cudaMemset(activations_, 0, m * n * sizeof(int));
cudaMemset(asum_, 0, m * sizeof(int));
// run cuda kernel and host function and compare the results
hT1 = getTimeMicroseconds64();
lrp_perc_gm(input, golden_out, golden_relevance, weights, golden_activations, golden_asum, n, m);
hT2 = getTimeMicroseconds64();
dT1 = getTimeMicroseconds64();
lrp_perc<<<1,dim3(n,m)>>>(input_, output_, relevance_, weights_, activations_, asum_, n, m);
s = cudaDeviceSynchronize();
dT2 = getTimeMicroseconds64();
printf("%s\n", cudaGetErrorName(s));
// relvance
printf("### RELEVANCE ###\n");
s = cudaMemcpy(cuda_relevance, relevance_, n * sizeof(int), cudaMemcpyDeviceToHost);
printf("%s\n", cudaGetErrorName(s));
for (int i = 0; i < n; i++) {
if (golden_relevance[i] != cuda_relevance[i]) {
printf("Error: golden_relevance[%d]!=cuda_relevance[%d] (%d, %d)\n", i, i, golden_relevance[i], cuda_relevance[i]);
}
}
// out
printf("### OUT ###\n");
s = cudaMemcpy(cuda_out, output_, m * sizeof(int), cudaMemcpyDeviceToHost);
printf("%s\n", cudaGetErrorName(s));
for (int i = 0; i < m; i++) {
if (golden_out[i] != cuda_out[i]) {
printf("Error: golden_out[%d]!=cuda_out[%d] (%d, %d)\n", i, i, golden_out[i], cuda_out[i]);
}
}
// activations
printf("### ACTIVATIONS ###\n");
s = cudaMemcpy(cuda_activations, activations_, m * n * sizeof(int), cudaMemcpyDeviceToHost);
printf("%s\n", cudaGetErrorName(s));
for (int i = 0; i < m * n; i++) {
if (golden_activations[i] != cuda_activations[i]) {
printf("Error: golden_activations[%d]!=cuda_activations[%d] (%d, %d)\n", i, i, golden_activations[i], cuda_activations[i]);
}
}
// asum
printf("### ASUM ###\n");
s = cudaMemcpy(cuda_asum, asum_, m * sizeof(int), cudaMemcpyDeviceToHost);
printf("%s\n", cudaGetErrorName(s));
for (int i = 0; i < m; i++) {
if (golden_asum[i] != cuda_asum[i]) {
printf("Error: golden_asum[%d]!=cuda_asum[%d] (%d, %d)\n", i, i, golden_asum[i], cuda_asum[i]);
}
}
printf("GPU time: %d, \tCPU time: %d\n", (dT2 - dT1) << 16, (hT2 - hT1) << 16);
}
|
2,409 | #include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <iostream>
#include <fstream>
#include <tuple>
#include <random>
#include <functional>
#include <chrono>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__device__ bool getCell( bool* input, int x, int y, int* size ) {
if ( x < 0 ) { x = x + size[1]; }
else if ( x >= size[1] ) { x = x - size[1]; }
if ( y < 0 ) { y = y + size[0]; }
else if ( y >= size[0] ) { y = y - size[0]; }
return input[ y * size[0] + x ];
}
__device__ int getNeighbourCount( bool* input, int x, int y, int* size ) {
int count = 0;
if ( getCell( input, x - 1 , y - 1, size )) { count++; }
if ( getCell( input, x , y - 1, size )) { count++; }
if ( getCell( input, x + 1 , y - 1, size )) { count++; }
if ( getCell( input, x - 1 , y, size )) { count++; }
if ( getCell( input, x + 1 , y, size )) { count++; }
if ( getCell( input, x - 1 , y + 1, size )) { count++; }
if ( getCell( input, x , y + 1, size )) { count++; }
if ( getCell( input, x + 1 , y + 1, size )) { count++; }
return count;
}
__global__ void simulate( bool* input, bool* output, int width, int height, int steps ) {
int index = threadIdx.x;
int stride = blockDim.x;
if ( index >= ( width * height )) {
// Index out of range
printf("Out of range: %d\n", index);
return;
}
// Find X and Y
int y = index / width;
int x = index % width;
int size[2] = {width, height};
//printf("X: %d, Y: %d\n", x, y);
int count = getNeighbourCount( input, x, y, size );
if ( input[ index ] ) {
//printf("alive");
// Cell is alive
if ( count == 2 || count == 3 ) {
// Cell has 2 or 3 neighbours
output[ index ] = true;
} else {
output[ index ] = false;
}
} else {
//printf("dead");
// Cell is dead
if ( count == 3 ) {
// Cell has exactly 3 neighbours
output[ index ] = true;
} else {
output[ index ] = false;
}
}
}
/*
Clears screen and moves cursor to home pos on POSIX systems
*/
void clear() {
std::cout << "\033[2J;" << "\033[1;1H";
}
/*
*/
void printGrid( bool* grid, int* size ) {
for ( int y = 0; y < size[1]; y++ ) {
for ( int x = 0; x < size[0]; x++ ) {
if ( grid[ y * size[1] + x ] == true ) {
std::cout << "0";
}
else {
std::cout << ".";
}
}
std::cout << std::endl;
}
}
static void show_usage(std::string name)
{
std::cerr << "Usage: " << name << " [-i input.txt]/[-r] [-o output.txt] [-s 10]\n"
<< "Options:" << std::endl
<< "\t-h, --help\t\tShow this help message and exit" << std::endl
<< "\t-i, --input\t\tProvide an input file for the starting state" << std::endl
<< "\t-r, --random\t\tInstead start with a randomized starting state, provide a seed, 0 will set a random seed" << std::endl
<< "\t-o, --output\t\tOptionally save the final state as a file" << std::endl
<< "\t-s, --steps\t\tThe number of simulation step to take" << std::endl
<< "\t-p, --play\t\tOptionally play the simulation in the console" << std::endl
<< std::endl;
}
int main( int argc, char* argv[] ) {
int opt;
char* input;
char* output;
bool isRandom = false;
std::ofstream outfile;
bool play = false;
int seed;
int steps = 0;
int size[2] = {10, 10}; // x, y
int width, height;
int gridSize = size[1] * size[0] * sizeof(bool*);
bool* grid;
bool* d_in; // The read-only input array for kernel
bool* d_out; // The write-only output for kernel
if ( argc < 2 ) {
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
while (( opt = getopt(argc, argv, "hi:o:r:s:p" )) != -1 ) {
switch ( opt ) {
case 'h':
show_usage( argv[0] );
exit( EXIT_FAILURE );
break;
case 'i':
input = optarg;
break;
case 'o':
output = optarg;
break;
case 'r':
isRandom = true;
seed = atoi(optarg);
break;
case 's':
steps = atoi(optarg);
break;
case 'p':
play = true;
break;
default: /* '?' */
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
}
// Init empty grid
grid = (bool*) malloc( gridSize );
gpuErrchk( cudaMalloc( &d_in, gridSize ) );
gpuErrchk( cudaMalloc( &d_out, gridSize ) );
for ( int y = 0; y < size[1]; y++ ) {
for ( int x = 0; x < size[0]; x++ ) {
grid[ y * size[1] + x ] = false; // Init host grid to empty
}
}
if ( isRandom ) {
if ( ! seed ) {
seed = std::chrono::steady_clock::now().time_since_epoch().count();
}
std::default_random_engine engine(seed);
std::uniform_int_distribution<> boolGen( 0, 1 );
for ( int y = 0; y < size[1]; y++ ) {
for ( int x = 0; x < size[0]; x++ ) {
grid[ y * size[1] + x ] = boolGen( engine );
}
}
} else {
// File is assumed to have {width} {height} on the first line
std::ifstream infile( input );
std::string size_string;
std::string delimiter = " ";
// parsing string into two ints for width and height
std::getline( infile, size_string );
std::string str1 = size_string.substr( 0, size_string.find( delimiter ));
size_string.erase( 0, size_string.find( delimiter ) + delimiter.length() );
std::string str2 = size_string;
int width = stoi( str1 );
int height = stoi( str2 );
std::string line;
int count = 0; // current line count
while ( std::getline( infile, line )) {
for ( int x = 0; x < width; x++ ) {
if ( line[x] == '0' ) {
grid[ count * width + x ] = true;
} else {
grid[ count * width + x ] = false;
}
}
count++;
}
}
if ( play ) {
clear();
printGrid( grid, size );
sleep( 1 );
}
gpuErrchk( cudaMemcpy ( d_in, grid, gridSize, cudaMemcpyHostToDevice ) );
for (int step = 0; step < steps; step++) {
simulate<<< 1, 100 >>>( d_in, d_out, size[0], size[1], steps );
if ( play ) {
gpuErrchk( cudaMemcpy ( grid, d_out, gridSize, cudaMemcpyDeviceToHost ) );
gpuErrchk( cudaDeviceSynchronize() );
clear();
printGrid( grid, size );
sleep( 1 );
}
gpuErrchk( cudaMemcpy ( d_in, d_out, gridSize, cudaMemcpyHostToHost ) );
}
if ( !play ) {
// Wait for GPU to finish before accessing on host
gpuErrchk( cudaMemcpy ( grid, d_out, gridSize, cudaMemcpyDeviceToHost ) );
gpuErrchk( cudaDeviceSynchronize() );
printGrid( grid, size );
}
// Clean up memory allocations
free( grid );
cudaFree( d_in );
cudaFree( d_out );
exit( EXIT_SUCCESS );
}
|
2,410 | #include "includes.h"
__global__ void minus_one(float *matrix, unsigned int *indices, unsigned int row, unsigned int col) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < row)
matrix[index * col + indices[index]] -= 1;
} |
2,411 | #include "includes.h"
__global__ void writeSimilarities(const float* nvccResults, int* activelayers, int writestep, int writenum, float* similarities, int active_slices, int slices)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < active_slices)
{
float res = nvccResults[tid];
int slice = activelayers[tid];
for (int i = 0; i < writenum; ++i)
similarities[slices*writestep*i + slice] = res;
}
} |
2,412 | #include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include<math.h>
#include<curand.h>
#include<curand_kernel.h>
#include<string.h>
#include<new>
#define FALSE 0
#define TRUE 1
#define STR_EQ 0
#define max(a, b) \
({__typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a > _b ? _a : _b; })
#define min(a, b) \
({__typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a > _b ? _b : _a; })
#define abs(a) \
({__typeof__ (a) _a = (a); \
_a >= 0 ? _a : -_a; })
/* =================== BASIC FUNCTIONS =====================================================================*/
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
__device__ void curandInit(curandState_t* state_ptr, int tid){
curand_init((unsigned long long)clock(), tid, 0, state_ptr);
}
__device__ float cudaFloatRand(float min, float max, curandState_t* state_ptr){
return min + curand_uniform(state_ptr) * (max - min);
}
__device__ int cudaIntRand(int min, int max, curandState_t* state_ptr){
return int(cudaFloatRand(float(min), float(max + 1.0), state_ptr));
}
__host__ float floatRand(float min, float max){
float scale = rand() / (float) RAND_MAX;
return min + scale * (max - min);
}
__host__ char roll(float probability){
if(floatRand(0.0, 1.0) < probability)
return TRUE;
return FALSE;
}
__device__ char cudaRoll(float probability, curandState_t* curand_state_ptr){
if(cudaFloatRand(0.0, 1.0, curand_state_ptr) < probability)
return TRUE;
return FALSE;
}
/* =================== STRUCTS AND METHODS =====================================================================*/
typedef struct SimulationOptions{
int N;
float DIM;
int simulation_time;
float infection_r;
float infection_p;
float velocity;
int immune_time;
int sympthoms_time;
int blocks;
int threads_per_block;
char* output;
float lawful_p;
int quarantine_sick_time;
int quarantine_all_time;
int gathering_points_n;
float gathering_point_p;
int buffor_size;
} SimulationOptions;
typedef enum{HEALTHY, CARRIER, SICK, IMMUNE} Health;
typedef enum{GOING_TO, GOING_BACK, NO_DESTINATION} GatheringPointTravel;
typedef struct Point{
float x;
float y;
} Point;
__host__ Point randPoint(float DIM){
Point point;
point.x = floatRand(0.0, DIM);
point.y = floatRand(0.0, DIM);
return point;
}
__device__ Point cudaRandPoint(float DIM, curandState_t* state_ptr){
Point point;
point.x = cudaFloatRand(0.0, DIM, state_ptr);
point.y = cudaFloatRand(0.0, DIM, state_ptr);
return point;
}
__host__ __device__ float distance(Point p1, Point p2){
float dx = abs(p1.x - p2.x);
float dy = abs(p1.y - p2.y);
return sqrt(dx * dx + dy * dy);
}
typedef struct Person{
Point location;
Point home;
Health health;
GatheringPointTravel travel;
char quarantined; // SICK people are totaly quarantined, the rest is partialy quarantined
int time_sick;
Point destination;
char lawful;
} Person;
typedef struct PersonInfo{
Point location;
Health health;
} PersonInfo;
/* =================== DEVICE CODE =====================================================================*/
__device__ void updateQuarantine(SimulationOptions settings, Person* person_ptr, int time){
if(!(person_ptr->lawful))
return;
if(settings.quarantine_all_time && settings.quarantine_all_time < time)
person_ptr->quarantined = TRUE;
else if(settings.quarantine_sick_time && settings.quarantine_sick_time < time){
if(person_ptr->health == SICK){
person_ptr->quarantined = TRUE;
person_ptr->travel = NO_DESTINATION;
}
else
person_ptr->quarantined = FALSE;
}
}
__device__ void migrate(
SimulationOptions settings,
Person* person_ptr,
curandState_t* state_ptr,
Point* gathering_points
){
float angle, dy, dx;
float destination_r = settings.velocity;
if(person_ptr->quarantined){
if(person_ptr->health == SICK)
return;
if(person_ptr->travel == GOING_TO && distance(person_ptr->location, person_ptr->destination) < destination_r){
person_ptr->destination = person_ptr->home;
person_ptr->travel = GOING_BACK;
}
if(person_ptr->travel == GOING_BACK && distance(person_ptr->location, person_ptr->destination) < destination_r){
person_ptr->travel = NO_DESTINATION;
}
if(person_ptr->travel == NO_DESTINATION){
if(!settings.gathering_points_n)
return;
if(!cudaRoll(settings.gathering_point_p, state_ptr))
return;
person_ptr->destination = gathering_points[cudaIntRand(0, settings.gathering_points_n - 1, state_ptr)];
person_ptr->travel = GOING_TO;
}
}
else if(distance(person_ptr->location, person_ptr->destination) < destination_r){
person_ptr->destination = cudaRandPoint(settings.DIM, state_ptr);
}
dy = person_ptr->destination.y - person_ptr->location.y;
dx = person_ptr->destination.x - person_ptr->location.x;
angle = atan2(dy, dx);
person_ptr->location.x = min(max(person_ptr->location.x + cos(angle) * settings.velocity, 0.0), settings.DIM);
person_ptr->location.y = min(max(person_ptr->location.y + sin(angle) * settings.velocity, 0.0), settings.DIM);
}
__device__ void developDisease(SimulationOptions settings, Person* person_ptr){
if(person_ptr->health == CARRIER || person_ptr->health == SICK)
person_ptr->time_sick += 1;
if(person_ptr->time_sick > settings.immune_time)
person_ptr->health = IMMUNE;
else if(person_ptr->time_sick > settings.sympthoms_time)
person_ptr->health = SICK;
}
// there may be races, but it doesn't matter (I think?)
__device__ void infect(
SimulationOptions settings,
Person* population,
int me_idx,
curandState_t* curand_state_ptr
){
Person* me_ptr = &population[me_idx];
Person* person_ptr;
int i;
if((me_ptr->health == CARRIER || me_ptr->health == SICK) && !(me_ptr->quarantined && me_ptr->health == SICK)){
for(i = 0; i < settings.N; i++){
person_ptr = &population[i];
if(i == me_idx) continue;
if(person_ptr->quarantined && person_ptr->travel == NO_DESTINATION) continue;
if(person_ptr->health == CARRIER || person_ptr->health == SICK) continue;
if(distance(me_ptr->location, person_ptr->location) > settings.infection_r) continue;
if(cudaRoll(settings.infection_p, curand_state_ptr))
person_ptr->health = CARRIER;
}
}
}
__global__ void simulate(
SimulationOptions settings,
Person* population,
curandState_t* curand_states,
int time,
Point* gathering_points,
int buffor_index,
PersonInfo* population_info
){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int i;
Person* person_ptr;
curandState_t my_curand_state = curand_states[tid];
curandInit(&my_curand_state, tid);
// develop disease
i = tid;
while(i < settings.N){
person_ptr = &population[i];
developDisease(settings, person_ptr);
i += gridDim.x * blockDim.x;
}
// update population quarantine_all_time
i = tid;
while(i < settings.N){
person_ptr = &population[i];
updateQuarantine(settings, person_ptr, time);
i += gridDim.x * blockDim.x;
}
// migration of population
i = tid;
while(i < settings.N){
person_ptr = &population[i];
migrate(settings, person_ptr, &my_curand_state, gathering_points);
i += gridDim.x * blockDim.x;
}
// spread of disease
i = tid;
while(i < settings.N){
infect(settings, population, i, &my_curand_state);
i += gridDim.x * blockDim.x;
}
// save to buffor
i = tid;
while(i < settings.N){
population_info[settings.N * buffor_index + i].location = population[i].location;
population_info[settings.N * buffor_index + i].health = population[i].health;
i += gridDim.x * blockDim.x;
}
}
/* =================== HOST =====================================================================*/
int main(int argc, char** argv){
SimulationOptions settings;
int i, j, buffors_simulated;
FILE* file;
char save_output;
Person* population;
Person* dev_population;
curandState_t* curand_states;
Point* gathering_points;
Point* dev_gathering_points;
PersonInfo* population_info;
PersonInfo* dev_population_info;
settings.N = 10000;
settings.DIM = 100;
settings.simulation_time = 500;
settings.velocity = 1.0;
settings.infection_p = 0.33;
settings.infection_r = 3.0;
settings.immune_time = 100;
settings.sympthoms_time = 10;
settings.blocks = 128;
settings.threads_per_block = 128;
settings.output = "output.sim";
settings.quarantine_all_time = 0;
settings.quarantine_sick_time = 0;
settings.lawful_p = 1.0;
settings.gathering_points_n = 0;
settings.gathering_point_p = 0.05;
settings.buffor_size = 1;
//read commandline args
i = 1;
while(i < argc - 1){
if(strcmp(argv[i], "--N") == STR_EQ || strcmp(argv[i], "-N") == STR_EQ){
settings.N = atoi(argv[++i]);
if(settings.N < 1) return 1;
}
else if(strcmp(argv[i], "-=DIM") == STR_EQ || strcmp(argv[i], "-DIM") == STR_EQ){
settings.DIM = atof(argv[++i]);
if(settings.DIM <= 0.0) return 1;
}
else if(strcmp(argv[i], "--simulation_n") == STR_EQ || strcmp(argv[i], "-simn") == STR_EQ){
settings.simulation_time = atoi(argv[++i]);
if(settings.simulation_time < 1) return 1;
}
else if(strcmp(argv[i], "--velocity") == STR_EQ || strcmp(argv[i], "-v") == STR_EQ){
settings.velocity = atof(argv[++i]);
if(settings.velocity < 0) return 1;
}
else if(strcmp(argv[i], "--infection_p") == STR_EQ || strcmp(argv[i], "-infp") == STR_EQ){
settings.infection_p = atof(argv[++i]);
if(settings.infection_p <= 0.0) return 1;
}
else if(strcmp(argv[i], "--infection_r") == STR_EQ || strcmp(argv[i], "-infr") == STR_EQ){
settings.infection_r = atof(argv[++i]);
if(settings.infection_r <= 0.0) return 1;
}
else if(strcmp(argv[i], "--immune_time") == STR_EQ || strcmp(argv[i], "-immt") == STR_EQ){
settings.immune_time = atoi(argv[++i]);
if(settings.immune_time < 0) return 1;
}
else if(strcmp(argv[i], "--sympthoms_time") == STR_EQ || strcmp(argv[i], "-symt") == STR_EQ){
settings.sympthoms_time = atoi(argv[++i]);
if(settings.sympthoms_time < 0) return 1;
}
else if(strcmp(argv[i], "--blocks") == STR_EQ || strcmp(argv[i], "-b") == STR_EQ){
settings.blocks = atoi(argv[++i]);
if(settings.blocks < 1) return 1;
}
else if(strcmp(argv[i], "--threads_per_block") == STR_EQ || strcmp(argv[i], "-tpb") == STR_EQ){
settings.threads_per_block = atoi(argv[++i]);
if(settings.threads_per_block < 1) return 1;
}
else if(strcmp(argv[i], "--output") == STR_EQ || strcmp(argv[i], "-o") == STR_EQ){
settings.output = argv[++i];
if(!settings.output) return 1;
}
else if(strcmp(argv[i], "--quarantine_all_time") == STR_EQ || strcmp(argv[i], "-qat") == STR_EQ){
settings.quarantine_all_time = atoi(argv[++i]);
if(settings.quarantine_all_time < 0) return 1;
}
else if(strcmp(argv[i], "--quarantine_sick_time") == STR_EQ || strcmp(argv[i], "-qst") == STR_EQ){
settings.quarantine_sick_time = atoi(argv[++i]);
if(settings.quarantine_sick_time < 0) return 1;
}
else if(strcmp(argv[i], "--lawful_p") == STR_EQ || strcmp(argv[i], "-lawp") == STR_EQ){
settings.lawful_p = atof(argv[++i]);
if(settings.lawful_p < 0.0) return 1;
}
else if(strcmp(argv[i], "--gathering_points_n") == STR_EQ || strcmp(argv[i], "-gn") == STR_EQ){
settings.gathering_points_n = atoi(argv[++i]);
if(settings.gathering_points_n < 0) return 1;
}
else if(strcmp(argv[i], "--gathering_point_p") == STR_EQ || strcmp(argv[i], "-gp") == STR_EQ){
settings.gathering_point_p = atof(argv[++i]);
if(settings.gathering_point_p < 0.0) return 1;
}
else if(strcmp(argv[i], "--buffor_size") == STR_EQ || strcmp(argv[i], "-buff") == STR_EQ){
settings.buffor_size = atoi(argv[++i]);
if(settings.buffor_size < 1) return 1;
}
i++;
}
if(strcmp(settings.output, "none") == STR_EQ)
save_output = FALSE;
else
save_output = TRUE;
try{
population_info = new PersonInfo[settings.N * settings.buffor_size];
population = new Person[settings.N];
}
catch(const std::bad_alloc& e){
printf("Insufficent memory on host\n");
return 1;
}
srand((unsigned int)time(NULL));
for(i = 0; i < settings.N; i++){
population[i].location.x = floatRand(0.0, settings.DIM);
population[i].location.y = floatRand(0.0, settings.DIM);
population[i].home = population[i].location;
population[i].destination.x = floatRand(0.0, settings.DIM);
population[i].destination.y = floatRand(0.0, settings.DIM);
population[i].health = HEALTHY;
population[i].quarantined = FALSE;
population[i].time_sick = 0;
population[i].travel = NO_DESTINATION;
if(roll(settings.lawful_p))
population[i].lawful = TRUE;
else
population[i].lawful = FALSE;
}
gathering_points = new Point[settings.gathering_points_n];
for(i = 0; i < settings.gathering_points_n; i++){
gathering_points[i].x = floatRand(0.0, settings.DIM);
gathering_points[i].y = floatRand(0.0, settings.DIM);
}
//patient zero
population[0].health = CARRIER;
HANDLE_ERROR( cudaMalloc((void**)&dev_population, sizeof(Person) * settings.N) );
HANDLE_ERROR( cudaMalloc((void**)&curand_states, sizeof(curandState_t) * settings.blocks * settings.threads_per_block) );
HANDLE_ERROR( cudaMalloc((void**)&dev_gathering_points, sizeof(Point) * settings.gathering_points_n) );
HANDLE_ERROR( cudaMalloc((void**)&dev_population_info, sizeof(PersonInfo) * settings.N * settings.buffor_size) );
HANDLE_ERROR( cudaMemcpy(dev_population, population, sizeof(Person) * settings.N, cudaMemcpyHostToDevice) );
HANDLE_ERROR( cudaMemcpy(dev_gathering_points, gathering_points, sizeof(Point) * settings.gathering_points_n, cudaMemcpyHostToDevice) );
if(save_output){
file = fopen(settings.output, "w");
fprintf(file, "%d %f %d %d\n", settings.N, settings.DIM, settings.simulation_time, settings.gathering_points_n);
for(i = 0; i < settings.gathering_points_n; i++)
fprintf(file, "%f %f\n", gathering_points[i].x, gathering_points[i].y);
}
// for(i = 0; i < settings.simulation_time; i++){
// printf("==========SIM%d==========\n", i);
// simulate<<<settings.blocks, settings.threads_per_block>>>(settings, dev_population, curand_states, i, dev_gathering_points);
// cudaDeviceSynchronize();
// HANDLE_ERROR( cudaMemcpy(population, dev_population, sizeof(Person) * settings.N, cudaMemcpyDeviceToHost) );
// if(save_output){
// for(j = 0; j < settings.N; j++){
// fprintf(file, "%f %f %d\n", population[j].location.x, population[j].location.y, population[j].health);
// }
// }
// }
i = 0;
while(i < settings.simulation_time){
for(j = 0; j < settings.buffor_size; j++){
printf("==========SIM%d==========\n", i);
simulate<<<settings.blocks, settings.threads_per_block>>>(
settings, dev_population, curand_states, i, dev_gathering_points, j, dev_population_info
);
cudaDeviceSynchronize();
buffors_simulated = j + 1;
i++;
if(i >= settings.simulation_time)
break;
}
printf("Coping buffor from GPU to host...\n");
HANDLE_ERROR( cudaMemcpy(
population_info, dev_population_info, sizeof(PersonInfo) * settings.N * settings.buffor_size, cudaMemcpyDeviceToHost
) );
if(save_output){
for(j = 0; j < settings.N * buffors_simulated; j++){
fprintf(file, "%f %f %d\n", population_info[j].location.x, population_info[j].location.y, population_info[j].health);
}
}
}
if(save_output)
fclose(file);
cudaFree(curand_states);
cudaFree(dev_population);
cudaFree(dev_gathering_points);
cudaFree(dev_population_info);
delete[] population;
delete[] gathering_points;
delete[] population_info;
return 0;
} |
2,413 | #include "includes.h"
__global__ void setGroupsPointersDead(multipassConfig_t* mbk, unsigned numBuckets)
{
int index = TID;
if(index < numBuckets)
{
mbk->isNextDeads[index] = 1;
}
} |
2,414 | //function kernel
__device__ float length(float3 r) {
return r.x*r.x + r.y*r.y + r.z*r.z;
}
__device__ float3 mul_float3(float3 r1, float3 r2) {
return make_float3(r1.x * r2.x, r1.y * r2.y, r1.z * r2.z);
}
__device__ float3 add_float3(float3 r1, float3 r2) {
return make_float3(r1.x + r2.x, r1.y + r2.y, r1.z + r2.z);
}
__device__ float3 dif_float3(float3 r1, float3 r2) {
return make_float3(r1.x - r2.x, r1.y - r2.y, r1.z - r2.z);
}
__device__ float3 scale_float3(float s, float3 r) {
r.x *= s;
r.y *= s;
r.z *= s;
return r;
}
__device__ float Kernel_Poly6(float3 r, float h) {
float PI = 3.14159;
return 315.0f / (64 * PI * pow(h, 9)) * pow(pow(h, 2) - length(r), 3);
}
__device__ float3 Gradient_Kernel_Poly6(float3 r, float h) {
float PI = 3.14159;
return make_float3(
r.x * -945.0f / ( 32.0f * PI * pow(h,9) ) * pow(pow(h, 2) - length(r), 2),
r.y * -945.0f / ( 32.0f * PI * pow(h,9) ) * pow(pow(h, 2) - length(r), 2),
r.z * -945.0f / ( 32.0f * PI * pow(h,9) ) * pow(pow(h, 2) - length(r), 2));
}
__device__ float Lap_Kernel_Poly6(float3 r, float h) {
float PI = 3.14159;
return 945.0f / (8 * PI * pow(h, 9)) * (pow(h, 2) - length(r)) * (length(r) - 3 / 4 * (pow(h, 2) - length(r)));
}
__device__ float3 Gradient_Kernel_Spiky(float3 r, float h) {
float PI = 3.14159;
float _r = sqrt(length(r));
float v = -45.0f / (PI * pow(h, 6) * _r) * pow(h - _r, 2);
return make_float3(r.x*v, r.y*v, r.z*v);
}
__device__ float Lap_Kernel_Viscosity(float3 r, float h) {
float PI = 3.14159;
return 45.0f / (PI * pow(h, 5)) * (1 - sqrt(length(r)) / h);
}
//SPH particle struct
struct pSPH {
float3 pos;
float3 vel;
float m;
float rho;
float _;
float col;
};
extern "C" __global__ void
SPH_2(pSPH *p, const float h, const float g, const float t, const int N)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx > N) return;
pSPH _p = p[idx];
if (_p._ <= 0.2) return;
float rho_0 = 1.0f;
float y = 5;
float p_i = rho_0 * pow(_p.rho/rho_0 - 1.0f, y);
float p_j;
float3 F_p = make_float3(0,0,0);
float3 F_v = make_float3(0,0,0);
float3 F_ex = make_float3(0,0,0);
float3 G_cs = make_float3(0,0,0);
float L_cs = 0.0f;
float3 accel = make_float3(0,0,0);
float3 gravity = make_float3(0, g, 0);
int i;
for (i = 0; i < N; ++i)
{
pSPH __p = p[i];
float3 r = dif_float3(_p.pos, __p.pos);
if (i == idx) continue;
if (length(r) > h*h) continue;
float scale_p = 1.0f;
float scale_v = 1.0f;
//wall
if (__p._ <= 0.2)
{
scale_p = 2.0f;
scale_v = 2.0f;
}
p_j = rho_0 * pow(__p.rho/rho_0 - 1.0f,y);
F_p = add_float3(F_p, scale_float3(scale_p * -1.0f * __p.m * (p_i + p_j) / (2.0f*__p.rho), Gradient_Kernel_Spiky(r, h)));
F_v = add_float3(F_v, scale_float3(scale_v * Lap_Kernel_Viscosity(r, h), scale_float3(0.1f * __p.m, dif_float3(__p.vel, _p.vel))));
//G_cs = add_float3(G_cs, scale_float3(__p.m, Gradient_Kernel_Poly6(r, h)));
//L_cs = __p.m * Lap_Kernel_Poly6(r, h);
}
if (L_cs > 0.01)
{
//F_ex = dif_float3(F_ex, scale_float3(0.002f * L_cs / length(G_cs), G_cs));
}
accel = add_float3(accel, gravity);
accel = add_float3(accel, F_ex);
accel = add_float3(accel, scale_float3(rho_0, add_float3(F_p, F_v)));
//p[idx].vel = add_float3(_p.vel, scale_float3(0.01f, accel));
//p[idx].pos = add_float3(_p.pos, scale_float3(0.01f, p[idx].vel));
p[idx].vel = add_float3(_p.vel, scale_float3(t, accel));
p[idx].pos = add_float3(_p.pos, scale_float3(t, p[idx].vel));
return;
}
|
2,415 | #include <iostream>
#include <stdio.h>
__global__ void kernel() {
printf("Just a test! I am point cloud %d!\n", blockIdx.x);
}
int main() {
kernel<<<9, 1>>>();
cudaDeviceSynchronize(); ///wait for the kernel function to finish the execution, and then continue to execute the following code
return 1;
} |
2,416 | #include <stdio.h>
//#include <cuda_runtime.h>
#include <inttypes.h> //для использования uint8_t
#define CUDA_CHECK_RETURN(value) {\
cudaError_t _m_cudaStat = value;\
if (_m_cudaStat != cudaSuccess) {\
fprintf(stderr, "Error \"%s\" at line %d in file %s\n",\
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);\
exit(1);\
}\
}
struct complex { //структура для комлексного числа
float real; //действительная часть
float imag; //мнимая часть
};
__device__ struct complex complex_mul(struct complex z1, struct complex z2)
{
struct complex rez;
rez.real = z1.real * z2.real - z1.imag * z2.imag;
rez.imag = z1.real * z2.imag + z2.real * z1.imag;
return rez;
}
__device__ struct complex complex_sum(struct complex z1, struct complex z2)
{
struct complex rez;
rez.real = z1.real + z2.real;
rez.imag = z1.imag + z2.imag;
return rez;
}
__device__ uint8_t mandel(float x, float y, int max_iters) {
struct complex c; //c = complex(x, y)
c.real = x, c.imag = y;
struct complex z; //z = 0.0j
z.real = z.imag = 0.0f;
for (int i = 0; i < max_iters; i++) {
z = complex_mul(z, z); //z = z*z + c; //отображение Мандельброта
z = complex_sum(z, c);
if ((z.real * z.real + z.imag * z.imag) >= 4)
return i;
}
return max_iters;
}
__global__ void create_fractal_dev(float min_x, float max_x, float min_y, float max_y,
uint8_t *image, int height, int width, int iters)
{
float pixel_size_x = (max_x - min_x) / (width); //задание размеров пикселя
float pixel_size_y = (max_y - min_y) / (height);
int startX = threadIdx.x + blockDim.x * blockIdx.x;
int startY = threadIdx.y + blockDim.y * blockIdx.y;
int gridX = gridDim.x * blockDim.x;
int gridY = gridDim.y * blockDim.y;
for (int x = startX; x < width; x += gridX) {
float real = min_x + x * pixel_size_x;
for (int y = startY; y < height; y += gridY) {
float imag = min_y + y * pixel_size_y;
uint8_t color = mandel(real, imag, iters);
image[x + y * width] = color; //задание цвета пикселя
}
}
}
int main() {
/// размерности массивов:
int N = 1024, M = 1536; //размерности массива
const size_t size_in_bytes = N * M * sizeof(uint8_t);
/// создание массивов:
uint8_t *A_dev;
CUDA_CHECK_RETURN(cudaMalloc( (void **) &A_dev, size_in_bytes));
cudaMemset(A_dev, 0, size_in_bytes); //заполнить нулями
uint8_t *A_hos;
A_hos = (uint8_t*) malloc(size_in_bytes);
cudaMemset(A_hos, 0, size_in_bytes); //заполнить нулями
/// как расспараллелить код
dim3 dimBlock(32, 8); //число выделенных блоков
dim3 dimGrid(32,16); //размер и размерность сетки
/// создание CUDA-событий
cudaEvent_t start, stop;
CUDA_CHECK_RETURN(cudaEventCreate(&start));
CUDA_CHECK_RETURN(cudaEventCreate(&stop));
/// запуск ядра
cudaEventRecord(start, 0);
create_fractal_dev <<<dimGrid,dimBlock >>> (-2.0, 1.0, -1.0, 1.0, A_dev, N, M, 20);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
/// время
float time;
cudaEventElapsedTime(&time, start, stop);
printf("time = %f s\n", time / 1000);
/// запись массива в файл
cudaMemcpy(A_hos, A_dev, size_in_bytes, cudaMemcpyDeviceToHost);
FILE *fp = fopen("rez.dat", "w");
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
fprintf(fp, "%d ", A_hos[i * M + j]);
}
fprintf(fp, "\n");
}
fclose(fp);
/*
struct complex z1, z2;
z1.real = 3; z1.imag = 1;
z2.real = 2; z2.imag = -3;
struct complex rez = complex_mul(z1, z2);
printf("%f %f \n", rez.real, rez.imag);
rez = complex_sum(z1, z2);
printf("%f %f \n", rez.real, rez.imag);
*/
return 0;
}
|
2,417 | #if GOOGLE_CUDA
#define EIGEN_USE_GPU
__global__ void default_function_kernel0(const float* __restrict__ Data,
const float* __restrict__ K0,
const float* __restrict__ K1,
const float* __restrict__ KC,
float* __restrict__ Output) {
float Output_local[128];
__shared__ float pad_temp_shared[640];
__shared__ float K0_shared[8];
__shared__ float K1_shared[1];
__shared__ float KC_shared[3];
for (int ww_inner_outer = 0; ww_inner_outer < 2; ++ww_inner_outer) {
for (int nn_c_init = 0; nn_c_init < 2; ++nn_c_init) {
for (int oca_c_init = 0; oca_c_init < 4; ++oca_c_init) {
for (int hh_c_init = 0; hh_c_init < 8; ++hh_c_init) {
for (int ww_c_init = 0; ww_c_init < 2; ++ww_c_init) {
Output_local[((((nn_c_init * 64) + (oca_c_init * 16)) + (hh_c_init * 2)) + ww_c_init)] = 0.000000e+00f;
}
}
}
}
for (int rr_outer = 0; rr_outer < 11; ++rr_outer) {
for (int rca_outer = 0; rca_outer < 2; ++rca_outer) {
for (int rcb_outer = 0; rcb_outer < 4; ++rcb_outer) {
for (int rw_outer = 0; rw_outer < 3; ++rw_outer) {
__syncthreads();
for (int ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner = 0; ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner < 80; ++ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner) {
pad_temp_shared[(((((int)threadIdx.z) * 320) + (((int)threadIdx.x) * 80)) + ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner)] = ((((((1 - (ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner / 8)) <= (((int)blockIdx.y) * 8)) && ((((int)blockIdx.y) * 8) < (33 - (ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner / 8)))) && (((1 - rw_outer) - (ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner % 8)) <= ((((int)blockIdx.x) * 16) + (ww_inner_outer * 8)))) && (((((int)blockIdx.x) * 16) + (ww_inner_outer * 8)) < ((33 - rw_outer) - (ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner % 8)))) ? Data[((((((((((((((((int)blockIdx.z) / 4) * 65536) + (((int)threadIdx.z) * 32768)) + ((((((int)threadIdx.x) * 80) + ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner) / 160) * 16384)) + (rca_outer * 8192)) + (((((((int)threadIdx.x) * 80) + ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner) % 160) / 80) * 4096)) + (rcb_outer * 1024)) + (((int)blockIdx.y) * 256)) + ((ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner / 8) * 32)) + (((int)blockIdx.x) * 16)) + (ww_inner_outer * 8)) + rw_outer) + (ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner % 8)) - 33)] : 0.000000e+00f);
}
if ((((((int)threadIdx.z) * 4) + ((int)threadIdx.x)) / 8) < (4 - rr_outer)) {
K0_shared[((((int)threadIdx.z) * 4) + ((int)threadIdx.x))] = K0[((((((((((int)threadIdx.z) * 4) + ((int)threadIdx.x)) / 8) * 44) + (rr_outer * 44)) + (rca_outer * 22)) + (((((((int)threadIdx.z) * 4) + ((int)threadIdx.x)) % 8) / 4) * 11)) + ((int)threadIdx.x))];
}
if (((int)threadIdx.x) < (1 - ((int)threadIdx.z))) {
if (((int)threadIdx.x) < 1) {
if (((int)threadIdx.x) < ((4 - rr_outer) - ((int)threadIdx.z))) {
K1_shared[(((int)threadIdx.x) + ((int)threadIdx.z))] = K1[(((((((int)threadIdx.x) * 44) + (((int)threadIdx.z) * 44)) + (rr_outer * 44)) + (rcb_outer * 11)) + (((int)blockIdx.z) % 4))];
}
}
}
if ((((int)threadIdx.z) * 2) < (3 - ((int)threadIdx.x))) {
if (((int)threadIdx.x) < 2) {
KC_shared[((((int)threadIdx.z) * 2) + ((int)threadIdx.x))] = KC[((((((int)threadIdx.z) * 66) + (((int)threadIdx.x) * 33)) + (rw_outer * 11)) + rr_outer)];
}
}
__syncthreads();
for (int rca_inner = 0; rca_inner < 2; ++rca_inner) {
for (int rh_inner = 0; rh_inner < 3; ++rh_inner) {
for (int nn_c = 0; nn_c < 2; ++nn_c) {
for (int oca_c = 0; oca_c < 4; ++oca_c) {
for (int hh_c = 0; hh_c < 8; ++hh_c) {
for (int ww_c = 0; ww_c < 2; ++ww_c) {
Output_local[((((nn_c * 64) + (oca_c * 16)) + (hh_c * 2)) + ww_c)] = (Output_local[((((nn_c * 64) + (oca_c * 16)) + (hh_c * 2)) + ww_c)] + (((pad_temp_shared[(((((((((int)threadIdx.z) * 320) + (nn_c * 160)) + (rca_inner * 80)) + (hh_c * 8)) + (rh_inner * 8)) + (((int)threadIdx.x) * 2)) + ww_c)] * K0_shared[((rca_inner * 4) + oca_c)]) * K1_shared[0]) * KC_shared[rh_inner]));
}
}
}
}
}
}
}
}
}
}
for (int nn_inner_inner_inner = 0; nn_inner_inner_inner < 2; ++nn_inner_inner_inner) {
for (int oca_inner_inner_inner = 0; oca_inner_inner_inner < 4; ++oca_inner_inner_inner) {
for (int hh_inner_inner_inner = 0; hh_inner_inner_inner < 8; ++hh_inner_inner_inner) {
for (int ww_inner_inner_inner = 0; ww_inner_inner_inner < 2; ++ww_inner_inner_inner) {
Output[((((((((((((((int)blockIdx.z) / 4) * 65536) + (((int)threadIdx.z) * 32768)) + (nn_inner_inner_inner * 16384)) + (oca_inner_inner_inner * 4096)) + ((((int)blockIdx.z) % 4) * 1024)) + (((int)blockIdx.y) * 256)) + (hh_inner_inner_inner * 32)) + (((int)blockIdx.x) * 16)) + (ww_inner_outer * 8)) + (((int)threadIdx.x) * 2)) + ww_inner_inner_inner)] = Output_local[((((nn_inner_inner_inner * 64) + (oca_inner_inner_inner * 16)) + (hh_inner_inner_inner * 2)) + ww_inner_inner_inner)];
}
}
}
}
}
}
void Conv2dRcpFusedNchwKernelLauncher(const float* U, const float* K0,
const float* K1, const float* KC, float* V){
dim3 gridDim0(2, 4, 8);
dim3 blockDim0(4, 1, 2);
default_function_kernel0<<<gridDim0, blockDim0>>>(U, K0, K1, KC, V);
cudaDeviceSynchronize();
}
#endif
|
2,418 | #include <stdio.h>
#include <assert.h>
#include <cuda.h>
int main(void)
{
const int N = 1000;
double *a_h, *b_h; // pointers to host memory
double *a_d, *b_d; // pointers to device memory
// allocate arrays on host
a_h = new double [N];
b_h = new double [N];
// allocate arrays on device
cudaMalloc((void **) &a_d, sizeof(*a_d)*N);
cudaMalloc((void **) &b_d, sizeof(*a_d)*N);
// initialize host data
for (int i=0; i<N; i++) {
a_h[i] = 10.0+i;
b_h[i] = 0.0;
}
// send data from host to device: a_h to a_d
cudaMemcpy(a_d, a_h, sizeof(double)*N, cudaMemcpyDefault );
// copy data within device: a_d to b_d
cudaMemcpy(b_d, a_d, sizeof(double)*N, cudaMemcpyDefault);
// retrieve data from device: b_d to b_h
cudaMemcpy(b_h, b_d, sizeof(double)*N, cudaMemcpyDefault);
// check result
for (int i=0; i<N; i++)
assert(a_h[i] == b_h[i]);
// cleanup
delete [] a_h;
delete [] b_h;
cudaFree(a_d);
cudaFree(b_d);
printf("Jezeli widzisz ten napis, to program dziala poprawnie\n");
}
|
2,419 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <iostream>
#include <ctype.h>
#include <vector>
#include <string>
typedef std::vector<double> double_vec;
int main()
{
double_vec stocks;
std::string value;
while (true)
{
std::getline(std::cin, value);
if (!isdigit(value[0]))
{
break;
}
else
{
stocks.push_back(std::stod(value));
}
}
thrust::host_vector<double> host(int(stocks.size()));
host = stocks;
thrust::device_vector<double> dev(host);
double sumTotal = thrust::reduce(dev.begin(), dev.end(), 0.0, thrust::plus<double>());
double avg10y = sumTotal / int(stocks.size());
double sumLastYear = thrust::reduce(dev.end() - 365, dev.end(), 0.0, thrust::plus<double>());
double avgLastYear = sumLastYear / 365;
double maxTotal = thrust::reduce(dev.begin(), dev.end(), 0.0, thrust::maximum<double>());
double minTotal = thrust::reduce(dev.begin(), dev.end(), maxTotal, thrust::minimum<double>());
double maxLastYear = thrust::reduce(dev.end() - 365, dev.end(), 0.0, thrust::maximum<double>());
double minLastYear = thrust::reduce(dev.end() - 365, dev.end(), maxLastYear, thrust::minimum<double>());
std::cout << "Preço médio nos últimos 10 anos: US$ " << avg10y << std::endl;
std::cout << "Preço máximo e mínimo dos últimos 10 anos: " << std::endl;
std::cout << " Máximo -> US$ " << maxTotal << std::endl;
std::cout << " Mínimo -> US$ " << minTotal << std::endl;
std::cout << "Preço médio no último ano: US$ " << avgLastYear << std::endl;
std::cout << "Preço máximo e mínimo do último ano: " << std::endl;
std::cout << " Máximo -> US$ " << maxLastYear << std::endl;
std::cout << " Mínimo -> US$ " << minLastYear << std::endl;
}
|
2,420 | #include <stdio.h>
#include <algorithm>
#include <cstdlib>
#include <curand.h>
#include <curand_kernel.h>
// In the following section, define the prob distribution parameters
#define N_PARAMS 3
#define PARAM1 50.0f, 3.0f, 0.5f // format: LAMBDA, A, B
#define PARAM2 1.5f, 0.8f, 5.0f
// parameters saved as constants
unsigned int N_BYTES_PRM = N_PARAMS * sizeof(float); // size of parameter
unsigned int N_SIMS, N_BLK, N_THRD, N_BYTES_I, N_BYTES_F;
const unsigned int MAX_THREADS = 512; // max threads per block
// Calculate and return mean of an array of floats
float calcMean(float arr[], unsigned int const n) {
double sum = 0.0;
for (unsigned int i=0; i<n; i++) {
sum += (arr[i] / n);
}
return sum;
}
__host__ cudaEvent_t get_time(void) {
cudaEvent_t time;
cudaEventCreate(&time);
cudaEventRecord(time);
return time;
}
// Based on parameter, draw poisson random number as frequency of losses
__global__ void sim_freq(unsigned int *f_out, float *prm, unsigned int N) {
unsigned int const tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < N) {
float lambda = prm[0]; // lambda for poisson
curandState_t state; // initialize rand state
curand_init(tid, 0, 0, &state); // set seed to thread index
f_out[tid] = curand_poisson(&state, lambda); // save loss frequency
}
}
// Based on parameter and freq, draw and sum pareto loss amounts
__global__ void sim_severity(float *loss_out, unsigned int *freq, float *prm,
const unsigned int N) {
unsigned int const tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < N) {
double A = prm[1]; double B = prm[2]; // two pareto parameters
curandState_t state; // initialize rand state
curand_init(tid, 0, 0, &state); // set seed to thread index
double sum = 0.0;
double unif = 0.0; // temp var for storing uniform rand
for (int f=0; f < freq[tid]; f++) {
unif = curand_uniform_double(&state); // draw unif rand as double
sum += B / pow(1-unif, 1/A); // quantile function (inverse CDF)
}
loss_out[tid] = (float) sum; // sum of all losses
}
}
void asynch() {
return;
}
int main(int argc, char* argv[]) {
if (argc == 2) { // get number of simulations based on CMDLINE input
N_SIMS = atoi(argv[1]);
} else {
printf("Usage: %s [nSimulations].\n", argv[0]);
return EXIT_FAILURE;
}
N_BLK = N_SIMS / MAX_THREADS + 1; // min of one block
N_THRD = std::min(N_SIMS, MAX_THREADS); // num of threads per block
N_BYTES_F = N_SIMS * sizeof(float); // size of loss array
N_BYTES_I = N_SIMS * sizeof(unsigned int); // size of frequency array
printf("Running %u simulations ...\n", N_SIMS);
cudaStream_t s1, s2; // Create and initialize streams
cudaStreamCreate(&s1);
cudaStreamCreate(&s2);
// allocate and copy parameter to device
float h_prm1 [N_PARAMS] = {PARAM1};
float h_prm2 [N_PARAMS] = {PARAM2};
cudaHostRegister(h_prm1, N_BYTES_PRM, cudaHostRegisterDefault);
cudaHostRegister(h_prm2, N_BYTES_PRM, cudaHostRegisterDefault);
float *d_prm1, *d_prm2; // parameter on device memory
cudaMalloc((void **)&d_prm1, N_BYTES_PRM); // allocate and copy
cudaMalloc((void **)&d_prm2, N_BYTES_PRM);
cudaMemcpyAsync(d_prm1, h_prm1, N_BYTES_PRM, cudaMemcpyHostToDevice, s1);
cudaMemcpyAsync(d_prm2, h_prm2, N_BYTES_PRM, cudaMemcpyHostToDevice, s2);
unsigned int *h_freq1, *d_freq1, *h_freq2, *d_freq2; // frequency arrays
float *h_loss1, *d_loss1, *h_loss2, *d_loss2; // loss arrays
cudaMalloc((void **)&d_freq1, N_BYTES_I); // device mem for freq and loss
cudaMalloc((void **)&d_loss1, N_BYTES_F);
cudaMalloc((void **)&d_freq2, N_BYTES_I);
cudaMalloc((void **)&d_loss2, N_BYTES_F);
cudaMallocHost((void**)&h_freq1, N_BYTES_I); // pinned host array
cudaMallocHost((void**)&h_loss1, N_BYTES_F);
cudaMallocHost((void**)&h_freq2, N_BYTES_I);
cudaMallocHost((void**)&h_loss2, N_BYTES_F);
float dur, mean1, mean2; // to record duration and averages
/****** asynchronus run ******
******************************/
cudaEvent_t start = get_time(); // start clock
cudaEvent_t copyEnd1, copyEnd2; // event to ensure copying loss is finished
cudaEventCreate(©End1); cudaEventCreate(©End2);
// first simulate frequency of losses in two streams
sim_freq<<<N_BLK, N_THRD, 0, s1>>>(d_freq1, d_prm1, N_SIMS);
sim_freq<<<N_BLK, N_THRD, 0, s2>>>(d_freq2, d_prm2, N_SIMS);
// based on frequency, draw pareto loss amounts and sum
sim_severity<<<N_BLK, N_THRD, 0, s1>>>(d_loss1, d_freq1, d_prm1, N_SIMS);
sim_severity<<<N_BLK, N_THRD, 0, s2>>>(d_loss2, d_freq2, d_prm2, N_SIMS);
cudaMemcpyAsync(h_loss1, d_loss1, N_BYTES_F, cudaMemcpyDeviceToHost, s1);
cudaEventRecord(copyEnd1, s1); // finish copying result in stream 1
cudaMemcpyAsync(h_loss2, d_loss2, N_BYTES_F, cudaMemcpyDeviceToHost, s2);
cudaEventRecord(copyEnd2, s2); // finish copying result in stream 2
cudaMemcpyAsync(h_freq1, d_freq1, N_BYTES_I, cudaMemcpyDeviceToHost, s1);
cudaMemcpyAsync(h_freq2, d_freq2, N_BYTES_I, cudaMemcpyDeviceToHost, s2);
cudaEventSynchronize(copyEnd1); // wait for result copy before calculation
mean1 = calcMean(h_loss1, N_SIMS);
cudaEventSynchronize(copyEnd2); // wait for result copy before calculation
mean2 = calcMean(h_loss2, N_SIMS);
cudaStreamSynchronize( s1 );
cudaStreamSynchronize( s2 );
cudaEvent_t stop = get_time(); // stop clock
cudaEventSynchronize(stop);
cudaEventElapsedTime(&dur, start, stop);
printf("\tasynchronously:\t loss1=%.3f, loss2=%.3f, %.3f ms taken, \n",
mean1, mean2, dur);
/****** synchronus run *******
******************************/
start = get_time();
sim_freq<<<N_BLK, N_THRD>>>(d_freq1, d_prm1, N_SIMS);
sim_severity<<<N_BLK, N_THRD>>>(d_loss1, d_freq1, d_prm1, N_SIMS);
cudaMemcpy(h_freq1, d_freq1, N_BYTES_I, cudaMemcpyDeviceToHost);
cudaMemcpy(h_loss1, d_loss1, N_BYTES_F, cudaMemcpyDeviceToHost);
sim_freq<<<N_BLK, N_THRD>>>(d_freq2, d_prm2, N_SIMS);
sim_severity<<<N_BLK, N_THRD>>>(d_loss2, d_freq2, d_prm2, N_SIMS);
cudaMemcpy(h_freq2, d_freq2, N_BYTES_I, cudaMemcpyDeviceToHost);
cudaMemcpy(h_loss2, d_loss2, N_BYTES_F, cudaMemcpyDeviceToHost);
mean1 = calcMean(h_loss1, N_SIMS);
mean2 = calcMean(h_loss2, N_SIMS);
stop = get_time(); // stop time
cudaEventSynchronize(stop);
cudaEventElapsedTime(&dur, start, stop);
printf("\tsynchronously:\t loss1=%.3f, loss2=%.3f, %.3f ms taken, \n",
mean1, mean2, dur);
return EXIT_SUCCESS;
}
|
2,421 | #include "includes.h"
__global__ void cuConvertRGBToLABKernel(const float4* src, float4* dst, size_t stride, int width, int height, bool isNormalized)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
int c = y*stride + x;
if (x<width && y<height)
{
// Read
float4 in = src[c];
if (!isNormalized)
{
in.x /= 255.0f;
in.y /= 255.0f;
in.z /= 255.0f;
in.w /= 255.0f;
}
float R = in.x;
float G = in.y;
float B = in.z;
// convert to XYZ
float4 XYZ;
XYZ.x = 0.4124564f*R + 0.3575761f*G + 0.1804375f*B;
XYZ.y = 0.2126729f*R + 0.7151522f*G + 0.0721750f*B;
XYZ.z = 0.0193339f*R + 0.1191920f*G + 0.9503041f*B;
// normalize for D65 white point
XYZ.x /= 0.950456f;
XYZ.z /= 1.088754f;
float cubeRootX, cubeRootY, cubeRootZ;
const float T1 = 216/24389.0f;
const float T2 = 24389/27.0f;
if (XYZ.x > T1)
cubeRootX = cbrtf(XYZ.x);
else
cubeRootX = (T2 * XYZ.x + 16) / 116;
if (XYZ.y > T1)
cubeRootY = cbrtf(XYZ.y);
else
cubeRootY = (T2 * XYZ.y + 16) / 116;
if (XYZ.z > T1)
cubeRootZ = cbrtf(XYZ.z);
else
cubeRootZ = (T2 * XYZ.z + 16) / 116;
dst[c] = make_float4(116*cubeRootY-16, 500*(cubeRootX-cubeRootY), 200*(cubeRootY-cubeRootZ), in.w);
}
} |
2,422 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <device_functions.h>
#include <sstream>
#include <fstream>
#include <iostream>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include "changeDatatype.cuh"
using namespace std;
__global__ void changeType(float* srcData, float* dstData, int n, int c, int h, int w, int filtersPerThread_x, int filtersPerThread_y) {
const int idxCol = threadIdx.y + blockDim.y*blockIdx.y;
const int idxRow = threadIdx.x + blockDim.x*blockIdx.x;
int maxBlock = (n * c) / (filtersPerThread_x * filtersPerThread_y);
int idxBlock = (int)fminf((float)(blockIdx.y * gridDim.x + blockIdx.x), (float)(maxBlock));
const int idxfilterW = threadIdx.x % w;
const int idxfilterH = threadIdx.y % h;
int threadChannelX = threadIdx.x / w;
int threadChannelY = threadIdx.y / h;
int idxChannel_a =idxBlock * filtersPerThread_x * filtersPerThread_y + threadChannelY *filtersPerThread_x + threadChannelX;
int idxChannel = idxChannel_a % c;
int idxN = (int)fminf((float)(idxChannel_a / c), (float)(n-1));
dstData[idxN * (c * w* h) + idxChannel * (w*h) + idxfilterH * w + idxfilterW] = srcData[idxfilterH * (n * c * w) + idxfilterW * (c * n) + idxChannel * n + idxN];
}
void changeDataType(float* srcData, float* dstData, int n, int c, int h, int w) {
int filtersPerThread_x = 30 / w;
int filtersPerThread_y = 30 / h;
int totalBlocks = (c * n) / (filtersPerThread_x * filtersPerThread_y) + 1;
int numBlock_y = totalBlocks / 255 + 1;
dim3 numOfBlocks(255, numBlock_y, 1);
dim3 threadsPerBlock(30, 30, 1);
changeType <<< numOfBlocks, threadsPerBlock >> > (srcData, dstData, n, c, h, w, filtersPerThread_x, filtersPerThread_y);
} |
2,423 | //pass
//--blockDim=[1,128] --gridDim=[512,6]
#include <cuda.h>
//////////////////////////////////////////////////////////////////////////////
//// THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF
//// ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO
//// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A
//// PARTICULAR PURPOSE.
////
//// Copyright (c) Microsoft Corporation. All rights reserved
//////////////////////////////////////////////////////////////////////////////
//----------------------------------------------------------------------------
// File: Convolution.cpp
//
// Implement C++ AMP based simple and tiled version of Convolution filter used in
// image processing.
//----------------------------------------------------------------------------
#define DEFAULT_WIDTH 512
#define DEFAULT_HEIGHT 512
// TILE_SIZE should be multiple of both DEFAULT_WIDTH and DEFAULT_HEIGHT
#define TILE_SIZE 128
#define width DEFAULT_WIDTH
#define height DEFAULT_HEIGHT
#define clamp(a, b, c) ((a) < (b) ? (b) : ((a) > (c) ? (c) : (a)))
#define dim_to_convolve y
#define radius 7
//----------------------------------------------------------------------------
// Tile implementation of convolution filter along different dimension
//----------------------------------------------------------------------------
__global__ void convolution_tiling(const float* img, const float* filter, float* result)
{
__shared__ float local_buf[TILE_SIZE];
int idx_convolve = (blockIdx.dim_to_convolve)*(TILE_SIZE - 2 * radius) + (int)(threadIdx.dim_to_convolve) - radius;
int max_idx_convolve = height;
float sum = 0.0f;
int a_idxY = blockIdx.y;
int a_idxX = blockIdx.x;
a_idxY = clamp(idx_convolve, 0, max_idx_convolve-1);
if (idx_convolve < (max_idx_convolve + radius))
{
local_buf[threadIdx.dim_to_convolve] = img[a_idxY*width + a_idxX];
}
#ifndef MUTATION
/* BUGINJECT: REMOVE_BARRIER, DOWN */
__syncthreads();
#endif
if ((int)(threadIdx.dim_to_convolve) >= radius && (int)(threadIdx.dim_to_convolve) < (TILE_SIZE - radius) && idx_convolve < max_idx_convolve)
{
for (int k = -radius; k <= radius; k++)
{
int k_idx = k + radius;
sum += local_buf[threadIdx.dim_to_convolve + k]*filter[k_idx];
}
result[a_idxY*width + a_idxX] = sum;
}
}
|
2,424 | #include "includes.h"
__global__ void ComputeHistogramKernel( float *globalMemData, int *globalHist )
{
//the kernel should be only 1D
int globalThreadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
int localThreadId = threadIdx.x;
extern __shared__ int partialHist[];
if(localThreadId < D_BINS)
{
//set the partial histogram in shared memory to zero
partialHist[localThreadId] = 0;
}
__syncthreads();
//if the global thread id is within bounds of the data array size
if(globalThreadId < D_MEMORY_BLOCK_SIZE)
{
//copy the global data to local memory
float myLocalDataValue = globalMemData[globalThreadId];
int binIdToWrite = 0 + (D_BINS - 1) * (myLocalDataValue > D_MAX_VALUE);
//if the local value is within limits
if(myLocalDataValue >= D_MIN_VALUE && myLocalDataValue <= D_MAX_VALUE)
{
float biasedValue = myLocalDataValue - D_MIN_VALUE;
binIdToWrite = (int)floor((double)(biasedValue/D_BIN_VALUE_WIDTH)) + 1;
if(myLocalDataValue == D_MAX_VALUE)
{
binIdToWrite = D_BINS - 2;
}
}
//write to local histogram
atomicAdd( &(partialHist[binIdToWrite]), 1);
__syncthreads();
if(localThreadId < D_BINS)
{
//copy values to global histogam
atomicAdd( &(globalHist[localThreadId]), partialHist[localThreadId]);
}
}
} |
2,425 |
__global__ void choleskyParalelo(float *db, int num){
int id=threadIdx.x + blockIdx.x*blockDim.x;
int x=0;
int inicio=0;
int k=0, N=num;
int id1=id+inicio, ids=id,id2;
int N2 = N;
int NN=0, KK=0;
while(k < N){
id1=id+inicio;
//Checamos si es un elemnto de la diagonal
if(id1 == inicio){
db[id1] = sqrt(db[id1]);
}else //si no es elemento de la diagonal, lo dividimos por el elemento diagonal de su columna
{
x=0;
while(id1 <N2){
while(x<1000)
x++;
__syncthreads();
db[id1] = db[id1]/db[inicio];
id1 += gridDim.x * blockDim.x;
__syncthreads();
}
//__syncthreads();//hacemos que todos los threads esperen a los que faltan
}__syncthreads();
//id=ids;
inicio += (N-k); //Preparo el siguiente salto al siguiente elemento diagonal
NN = N2; //Empiezo actaulizar valores de las columnas restantes a la actualizada
KK = k+1;//cada columna posterior tiene 1 elemento menos a la anterior
while(NN < (int)N*(N+1)/2){
id2=id + NN; // saltamos a la siguiente columna
while(id2 < NN + (N-KK)){
db[id2] = db[id2] -db[id + KK]* db[KK];
id2 += gridDim.x * blockDim.x;
__syncthreads();
}
//__syncthreads();
NN += (N-KK);
KK++;
}
//__syncthreads();
k++; //pasamos a la siguiente columna
N2 += (N-k); //Siguiente elemento diagonal
__syncthreads();
}
} |
2,426 | #include <iostream>
#include <cstdlib>
#include <cmath>
#include <cstdio>
#include <fstream>
#include <time.h>
using namespace std;
//swap two int arrays
void swapPtrs(int **A, int **B){
int *temp = *A;
*A = *B;
*B = temp;
}
//clear a cuda array to -1
__global__ void cudaClear(int* dev_clear, int size){
int index=blockIdx.x*blockDim.x+threadIdx.x;
if(index<size){
dev_clear[index]=-1;;
}
__syncthreads();
}
//does not modify dev_dups or dev_prefix, puts into dev_out the arrayB
__global__ void cudaArrayCopy(int* dev_orig, int* dev_dupli, int size){
int index=blockIdx.x*blockDim.x+threadIdx.x;
if(index<size){
dev_dupli[index]=dev_orig[index];
}
__syncthreads();
}
//assuming that the input dev_out is array A
__global__ void arrayC(int* dev_dups, int* dev_out, int size){
int index=blockIdx.x*blockDim.x+threadIdx.x;
if(dev_dups[index]==1 && index<size){
dev_out[index]=-1;
}
__syncthreads();
}
//generate array B
__global__ void arrayB(int* dev_dups, int* dev_prefix, int* dev_out, int size){
int index=blockIdx.x*blockDim.x+threadIdx.x;
if(dev_dups[index]==1 && index<size){
dev_out[dev_prefix[index]-1]=index;
}
__syncthreads();
}
//does not modify dev_in, puts into dev_out the find_dups array
__global__ void findDups(int* dev_in, int* dev_out, int size){
int index=blockIdx.x*blockDim.x+threadIdx.x;
if((index<size-1) && (index>=0)){
if(dev_in[index]==dev_in[index+1]){
dev_out[index]=1;
}
else{
dev_out[index]=0;
}
}
else if(index==size-1){
dev_out[index]=0;
}
__syncthreads();
}
//see wrapper
__global__ void prefixSum(int* dev_in, int* dev_out, int twopwr, int size){
int index=blockIdx.x*blockDim.x+threadIdx.x;
if((index<size) && (index>=twopwr)){
dev_out[index]=dev_in[index]+dev_in[index-twopwr];
}
else if(index<twopwr){
dev_out[index]=dev_in[index];
}
__syncthreads();
}
//calls prefixSum. Modifies dev_in!!! puts into dev_out the prefix sum array
void prefixSumWrapper(int* dev_in, int* dev_out, int log2size, int size){
int twopwr;
for(int i=0; i<log2size; ++i){
twopwr=(int) pow(2,i);
prefixSum<<<(size/256)+1,256>>>(dev_in, dev_out, twopwr, size);
//flip array pointers so that we can avoid allocating a temp array
//and copying back and forth between temp and orignals
//bad side effect, dev_in will be gibberish after this function
swapPtrs(&dev_in, &dev_out);
}
swapPtrs(&dev_in, &dev_out);
}
int main(){
//generate cuda timers
float cuda_elapsed_time, cuda_time_real;
cudaEvent_t cuda_start, cuda_stop, cuda_real_start;
cudaEventCreate(&cuda_start);
cudaEventCreate(&cuda_real_start);
cudaEventCreate(&cuda_stop);
//start recording total time
cudaEventRecord(cuda_real_start, 0);
//file stuff
std::ofstream afile, bfile, cfile;
remove("Adata.txt");
remove("Bdata.txt");
remove("Cdata.txt");
afile.open("Adata.txt", std::ofstream::out | std::ofstream::app);
bfile.open("Bdata.txt", std::ofstream::out | std::ofstream::app);
cfile.open("Cdata.txt", std::ofstream::out | std::ofstream::app);
//inits and allocs
int *in, *dev_in, *dev_out, size, log2size, *dev_exc, *dev_orig, *temp;
//powers of 2 only, 2^20 = 1,048,576
size=(int) pow(2,20);
log2size=(int)log2((float)size);
in = (int*)malloc(size*sizeof(int));
temp = (int*)malloc(size*sizeof(int));
cudaMalloc(&dev_in, size*sizeof(int));
cudaMalloc(&dev_out, size*sizeof(int));
cudaMalloc(&dev_exc, size*sizeof(int));
cudaMalloc(&dev_orig, size*sizeof(int));
//gen nums
srand(time(NULL));
for(int i=0; i<size; ++i){
in[i]=rand()%101;
afile<<in[i]<<"\n";
}
//dev_exc contains the prefix sum in dev_exc for the array in
//put input data into dev_in
cudaMemcpy(dev_in, in, size*sizeof(int), cudaMemcpyHostToDevice);
cudaArrayCopy<<<(size/256)+1,256>>>(dev_in, dev_orig, size);
//start recording actual algorithm after initialization
cudaEventRecord(cuda_start, 0);
//into dev_out put the find_repeats
findDups<<<(size/256)+1,256>>>(dev_in, dev_out, size);
//into dev_in put find_repeats
cudaArrayCopy<<<(size/256)+1,256>>>(dev_out, dev_in, size);
//now, dev_exc will be the prefix sum and dev_in gibberish
prefixSumWrapper(dev_in, dev_exc, log2size, size);
//therefore clear dev_in for reuse
cudaClear<<<(size/256)+1,256>>>(dev_in,size);
//and then put array B into dev_in
arrayB<<<(size/256)+1,256>>>(dev_out, dev_exc, dev_in, size);
//now, the duplicate indexes (Array B) is in dev_in
//the end of the array is signaled by -1
//generated array c with -1 at indexes to ignore
arrayC<<<(size/256)+1,256>>>(dev_out, dev_orig, size);
//stop recording time after algorithm is complete
cudaEventRecord(cuda_stop, 0);
//print final duplicate item
cudaMemcpy(temp, dev_out, size*sizeof(int), cudaMemcpyDeviceToHost);
for(int ty=size-1; ty>=0; --ty)
{
if(temp[ty]==1)
{
printf("The final duplicate is : %d at index %d\n", in[ty], ty);
break;
}
}
cudaMemcpy(in, dev_in, size*sizeof(int), cudaMemcpyDeviceToHost);
//write results to file for array B
for(int q=0; q<size; ++q){
if(in[q]!=-1){
bfile<<in[q]<<"\n";
}
}
cudaMemcpy(in, dev_orig, size*sizeof(int), cudaMemcpyDeviceToHost);
//write results to file for C
for(int q=0; q<size; ++q){
if(in[q]!=-1){
cfile<<in[q]<<"\n";
}
}
//print time
cudaEventElapsedTime(&cuda_elapsed_time, cuda_start, cuda_stop);
cudaEventElapsedTime(&cuda_time_real, cuda_real_start, cuda_stop);
printf("Total cycles including memory allocation and memcopy\nTotal cuda clock cycles : %f\nAlgorithm only cuda clock cycles : %f\n", cuda_time_real, cuda_elapsed_time);
}
|
2,427 | #include <iostream>
#include <string>
using namespace std;
__global__ void convertData(char* text, int length)
{
for (unsigned int i = 0; i < length; i++)
{
//ALPHA
if (text[i] >= 'A')
text[i] = '.';
}
}
int main()
{
char input[] = "aabfdh.fdsjkl.+@!ckdsj/khj";
cout << "INPUT: " << input << endl;
int N = sizeof(input);
char* d_text;
//Memory Allocation
cudaMalloc(&d_text, N*sizeof(char));
//Copy input datastream to GPU
cudaMemcpy(d_text, input, N*sizeof(char), cudaMemcpyHostToDevice);
convertData <<<1, 1>>>(d_text, N);
//Copy output datastream back to the CPU
cudaMemcpy(input, d_text, N*sizeof(char), cudaMemcpyDeviceToHost);
cout << "OUTPUT: " << input << endl;
cudaFree(d_text);
return 0;
}
|
2,428 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <iostream>
#define N 3000
using namespace std;
__global__ void add(int *a, int *b, int *c) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<N)
c[i] = a[i] + b[i];
}
int main() {
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
int i;
a = (int *)malloc(N*sizeof(int));
b = (int *)malloc(N*sizeof(int));
c = (int *)malloc(N*sizeof(int));
cudaMalloc( (void**)&dev_a, N * sizeof(int) );
cudaMalloc( (void**)&dev_b, N * sizeof(int) );
cudaMalloc( (void**)&dev_c, N * sizeof(int) );
for(i=0; i<N; i++){
a[i] = i;
b[i] = 2*i;
}
cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);
add<<< ceil(N/512.0),512>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost);
for(i=0; i<N; i++){
cout << a[i] << " + " << b[i] << " = " << c[i] << endl;
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
2,429 | #include "includes.h"
__global__ void constrain_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] = fminf(ALPHA, fmaxf(-ALPHA, X[i*INCX]));
} |
2,430 | //
// Created by root on 2020/11/19.
//
#include "stdio.h"
#include <cuda_runtime.h>
#define DIM 128
__global__ void reduceGmem(int *g_idata, int *g_odata, int n) {
int tid = threadIdx.x;
int *idata = g_idata + blockIdx.x * blockDim.x;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= n) {
return;
}
// if current thread id is less than half of block dim, reduce in place
if (blockDim.x >= 1024 && tid < 512) {
idata[tid] += idata[tid + 512];
}
if (blockDim.x >= 512 && tid < 256) {
idata[tid] += idata[tid + 256];
}
if (blockDim.x >= 256 && tid < 128) {
idata[tid] += idata[tid + 128];
}
if (blockDim.x >= 128 && tid < 64) {
idata[tid] += idata[tid + 64];
}
__syncthreads();
// unrolling warp into the first thread of this warp
if (tid < 32) {
volatile int *vsmem = idata;
vsmem[tid] += vsmem[tid + 32]; // I only applied block dim = 128, so tid + 64 has been reduced, but tid + 32 not
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result stored in thread 0 into output
if (tid == 0) {
g_odata[blockIdx.x] = idata[0];
}
}
__global__ void reduceGmemUnrolling4(int *g_idata, int *g_odata, int n) {
int tid = threadIdx.x;
int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x; // one thread per block processes 4 data
int *idata = g_idata + blockDim.x * blockIdx.x * 4;
if (idx >= n) {
return;
}
// process 4 data per thread
int a = 0, b = 0, c = 0, d = 0;
a = g_idata[idx];
if (idx + blockDim.x < n) {
b = g_idata[idx + blockDim.x];
}
if (idx + 2 * blockDim.x < n) {
c = g_idata[idx + blockDim.x * 2];
}
if (idx + 3 * blockDim.x < n) {
d = g_idata[idx + blockDim.x * 3];
}
g_idata[idx] = a + b + c + d;
__syncthreads();
if (blockDim.x >= 1024 && tid < 512) {
idata[tid] += idata[tid + 512];
}
if (blockDim.x >= 512 && tid < 256) {
idata[tid] += idata[tid + 256];
}
if (blockDim.x >= 256 && tid < 128) {
idata[tid] += idata[tid + 128];
}
if (blockDim.x >= 128 && tid < 64) {
idata[tid] += idata[tid + 64];
}
__syncthreads();
if (tid < 32) {
volatile int *s_vmem = idata;
s_vmem[tid] += s_vmem[tid + 32];
s_vmem[tid] += s_vmem[tid + 16];
s_vmem[tid] += s_vmem[tid + 8];
s_vmem[tid] += s_vmem[tid + 4];
s_vmem[tid] += s_vmem[tid + 2];
s_vmem[tid] += s_vmem[tid + 1];
}
if (tid == 0) {
g_odata[blockIdx.x] = idata[0];
}
}
__global__ void reduceSMemUnrolling4(int *g_idata, int *g_odata, int n) {
int tid = threadIdx.x;
int idx = blockIdx.x * blockDim.x * 4 + tid;
if (idx >= n) {
return;
}
extern __shared__ int smem[]; // I use dynamic shared memory to reduce data
int a = 0, b = 0, c = 0, d = 0; // Each thread still processes 4 data
a = g_idata[idx];
if (idx + blockDim.x < n) {
b = g_idata[idx + blockDim.x];
}
if (idx + 2 * blockDim.x < n) {
c = g_idata[idx + 2 * blockDim.x];
}
if (idx + 3 * blockDim.x < n) {
d = g_idata[idx + 3 * blockDim.x];
}
smem[tid] = a + b + c + d; // Save result of 4 data into shared memory
__syncthreads();
// Reduce data for block using shared memory
if (blockDim.x >= 1024 && tid < 512) {
smem[tid] += smem[tid + 512];
}
if (blockDim.x >= 512 && tid < 256) {
smem[tid] += smem[tid + 256];
}
if (blockDim.x >= 256 && tid < 128) {
smem[tid] += smem[tid + 128];
}
if (blockDim.x >= 128 && tid < 64) {
smem[tid] += smem[tid + 64];
}
__syncthreads();
if (tid < 32) {
volatile int* s_vmem = smem;
s_vmem[tid] += s_vmem[tid + 32];
s_vmem[tid] += s_vmem[tid + 16];
s_vmem[tid] += s_vmem[tid + 8];
s_vmem[tid] += s_vmem[tid + 4];
s_vmem[tid] += s_vmem[tid + 2];
s_vmem[tid] += s_vmem[tid + 1];
}
if (tid == 0) {
g_odata[blockIdx.x] = smem[0];
}
}
void test(int size) {
int sum = 0;
for (int i = 0; i < size; i++) {
sum += i;
}
printf("Target is %d\n", sum);
}
int main() {
int size = 1 << 22;
int blockSize = DIM;
test(size); // verify the result
dim3 blockDim(blockSize);
dim3 gridDim((size + blockDim.x - 1) / blockDim.x);
printf("grid:(%d), block:(%d)\n", gridDim.x, blockDim.x);
int nBytes = size * sizeof(int);
int *h_idata = (int *) malloc(nBytes);
int *h_odata = (int *) malloc(gridDim.x * sizeof(int));
// Valid output per block is stored in the first thread of each block.
// So the number of output to be added is equal to the grid dim
int *d_odata;
int *d_idata;
cudaMalloc(&d_idata, nBytes);
cudaMalloc(&d_odata, gridDim.x * sizeof(int));
for (int i = 0; i < size; i++) {
h_idata[i] = i;
}
cudaMemcpy(d_idata, h_idata, nBytes, cudaMemcpyHostToDevice);
reduceGmem<<<gridDim, blockDim>>>(d_idata, d_odata, size);
cudaDeviceSynchronize();
cudaMemcpy(h_odata, d_odata, gridDim.x * sizeof(int), cudaMemcpyDeviceToHost);
int sum = 0;
for (int i = 0; i < gridDim.x; i++) {
sum += h_odata[i];
}
printf("\n=========\n");
printf("sum = %d\n", sum);
memset(h_odata, 0, gridDim.x * sizeof(int));
cudaMemset(d_odata, 0, gridDim.x * sizeof(int));
cudaMemcpy(d_idata, h_idata, nBytes, cudaMemcpyHostToDevice);
dim3 gridDim_(gridDim.x / 4);
reduceGmemUnrolling4<<<gridDim_, blockDim>>>(d_idata, d_odata, size);
cudaDeviceSynchronize();
cudaMemcpy(h_odata, d_odata, gridDim_.x * sizeof(int), cudaMemcpyDeviceToHost);
sum = 0;
for (int i = 0; i < gridDim_.x; i++) {
sum += h_odata[i];
}
printf("\n=========\n");
printf("sum = %d\n", sum);
memset(h_odata, 0, gridDim.x * sizeof(int));
cudaMemset(d_odata, 0, gridDim.x * sizeof(int));
cudaMemcpy(d_idata, h_idata, nBytes, cudaMemcpyHostToDevice);
reduceSMemUnrolling4<<<gridDim_, blockDim, DIM * sizeof(int )>>>(d_idata, d_odata, size);
cudaDeviceSynchronize();
cudaMemcpy(h_odata, d_odata, gridDim_.x * sizeof(int), cudaMemcpyDeviceToHost);
sum = 0;
for (int i = 0; i < gridDim_.x; i++) {
sum += h_odata[i];
}
printf("\n=========\n");
printf("sum = %d\n", sum);
cudaFree(d_idata);
cudaFree(d_odata);
free(h_idata);
free(h_odata);
return 0;
} |
2,431 | #define TILE_DIM 1024
#include <limits>
template<typename T>
__device__ void argmaxColumn(const T* matrix, int* result, const int numRows, const int numColumns) {
__shared__ T partsVals[TILE_DIM];
__shared__ int partsArgs[TILE_DIM];
int index = threadIdx.x;
int rowStride = blockDim.x;
int partLength = (numColumns + TILE_DIM - 1) / TILE_DIM;
int limit = numColumns < TILE_DIM ? numColumns : TILE_DIM;
for (int row = blockIdx.x; row < numRows; row += rowStride) {
T max = std::numeric_limits<T>::min();;
int argmax = -1;
for (int i = 0; i < partLength; i++) {
int columnIndex = i * TILE_DIM + index;
if (columnIndex < numColumns) {
T value = matrix[row * numColumns + columnIndex];
if (value > max) {
max = value;
argmax = columnIndex;
}
}
}
partsVals[index] = max;
partsArgs[index] = argmax;
for (int d = 1; d < limit; d <<= 1) {
__syncthreads();
if (index % (d << 1) == 0) {
int valueIndex = index + d;
if (valueIndex < limit) {
T value = partsVals[valueIndex];
int arg = partsArgs[valueIndex];
if (value > max) {
max = value;
partsVals[index] = max;
argmax = arg;
partsArgs[index] = argmax;
}
}
}
}
if (index == 0) {
result[row] = argmax;
}
}
} |
2,432 | #include<iostream>
#include<vector>
const int SHARED_MEM = 64;
__global__ void dotProdKernel(int *a, int *b, int *r, int N){
__shared__ int sh[SHARED_MEM*sizeof(int)];
int index = threadIdx.x + blockDim.x*blockIdx.x;
int offset = 0;
int stride = blockDim.x;
while(index+offset < N){
sh[threadIdx.x] += a[index+offset]*b[index+offset];
offset += stride;
}
__syncthreads();
int i = blockDim.x/2;
while(i > 0){
if(threadIdx.x < i){
sh[threadIdx.x] += sh[threadIdx.x+i];
}
i /= 2;
}
__syncthreads();
if(threadIdx.x == 0){
*r = sh[0];
}
}
int main(){
int N = 1024;
int size = N*sizeof(int);
std::vector<int> vec_a(N);
std::vector<int> vec_b(N);
int res = 0;
int *d_a, *d_b, *d_res;
cudaMalloc(&d_a, size);
cudaMalloc(&d_b, size);
cudaMalloc(&d_res, sizeof(int));
for(auto i = 0; i < N; i++){
vec_a[i] = rand()%10;
vec_b[i] = rand()%10;
}
cudaMemcpy(d_a, vec_a.data(), size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, vec_b.data(), size, cudaMemcpyHostToDevice);
int ThreadsPerBlock = 64;
int BlocksPerGrid = (N+ThreadsPerBlock-1)/ThreadsPerBlock;
dotProdKernel<<<BlocksPerGrid,ThreadsPerBlock>>>(d_a, d_b, d_res, N);
cudaMemcpy(&res, d_res, sizeof(int), cudaMemcpyDeviceToHost);
std::cout << "Inner product is: " << res << std::endl;
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_res);
return 0;
}
|
2,433 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
// Optimized using shared memory and on chip memory
// nvcc nbodyGPU5.cu -o GPU5 -lglut -lm -lGLU -lGL
//To stop hit "control c" in the window you launched it from.
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
//gtx 1060: N = 20480, BLOCK = 1024
//average time = 95.8 milliseconds
#define N 20480
#define BLOCK 1024
#define DAMP 0.5
#define DT 0.001
#define STOP_TIME 1.0
#define G 1.0
#define H 1.0
// Globals
float4 p[N];
float3 v[N], f[N];
float4 *p_GPU;
float3 *v_GPU, *f_GPU;
dim3 block, grid;
void set_initail_conditions()
{
int i, j, k, num, particles_per_side;
float position_start, temp;
float initail_seperation;
temp = pow((float)N, 1.0 / 3.0) + 0.99999;
particles_per_side = temp;
printf("\n cube root of N = %d \n", particles_per_side);
position_start = -(particles_per_side - 1.0) / 2.0;
initail_seperation = 2.0;
for (i = 0; i<N; i++)
{
p[i].w = 1.0;
}
num = 0;
for (i = 0; i<particles_per_side; i++)
{
for (j = 0; j<particles_per_side; j++)
{
for (k = 0; k<particles_per_side; k++)
{
if (N <= num) break;
p[num].x = position_start + i*initail_seperation;
p[num].y = position_start + j*initail_seperation;
p[num].z = position_start + k*initail_seperation;
v[num].x = 0.0;
v[num].y = 0.0;
v[num].z = 0.0;
num++;
}
}
}
block.x = BLOCK;
block.y = 1;
block.z = 1;
grid.x = (N - 1) / block.x + 1;
grid.y = 1;
grid.z = 1;
cudaMalloc((void**)&p_GPU, N * sizeof(float4));
cudaMalloc((void**)&v_GPU, N * sizeof(float3));
cudaMalloc((void**)&f_GPU, N * sizeof(float3));
}
__device__ float3 getBodyBodyForce(float4 p0, float4 p1)
{
float3 f;
float dx = p1.x - p0.x;
float dy = p1.y - p0.y;
float dz = p1.z - p0.z;
float r2 = dx*dx + dy*dy + dz*dz;
float r = sqrt(r2);
float force = (G*p0.w*p1.w) / (r2)-(H*p0.w*p1.w) / (r2*r2);
f.x = force*dx / r;
f.y = force*dy / r;
f.z = force*dz / r;
return(f);
}
__global__ void getForces(float4 *pos, float3 *vel, float3 * force)
{
int j, ii;
float3 force_mag, forceSum;
float4 posMe;
__shared__ float4 shPos[BLOCK];
int id = threadIdx.x + blockDim.x*blockIdx.x;
forceSum.x = 0.0;
forceSum.y = 0.0;
forceSum.z = 0.0;
posMe.x = pos[id].x;
posMe.y = pos[id].y;
posMe.z = pos[id].z;
posMe.w = pos[id].w;
for (j = 0; j < gridDim.x; j++)
{
shPos[threadIdx.x] = pos[threadIdx.x + blockDim.x*j];
__syncthreads();
#pragma unroll 32
for (int i = 0; i < blockDim.x; i++)
{
ii = i + blockDim.x*j;
if (ii != id && ii < N)
{
force_mag = getBodyBodyForce(posMe, shPos[i]);
forceSum.x += force_mag.x;
forceSum.y += force_mag.y;
forceSum.z += force_mag.z;
}
}
}
if (id <N)
{
force[id].x = forceSum.x;
force[id].y = forceSum.y;
force[id].z = forceSum.z;
}
}
__global__ void moveBodies(float4 *pos, float3 *vel, float3 * force)
{
int id = threadIdx.x + blockDim.x*blockIdx.x;
if (id < N)
{
vel[id].x += ((force[id].x - DAMP*vel[id].x) / pos[id].w)*DT;
vel[id].y += ((force[id].y - DAMP*vel[id].y) / pos[id].w)*DT;
vel[id].z += ((force[id].z - DAMP*vel[id].z) / pos[id].w)*DT;
pos[id].x += vel[id].x*DT;
pos[id].y += vel[id].y*DT;
pos[id].z += vel[id].z*DT;
}
}
void n_body(int runs)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMemcpy(p_GPU, p, N * sizeof(float4), cudaMemcpyHostToDevice);
cudaMemcpy(v_GPU, v, N * sizeof(float3), cudaMemcpyHostToDevice);
cudaEventRecord(start);
for(int i = 0; i < runs; i++) {
getForces <<<grid, block >>>(p_GPU, v_GPU, f_GPU);
moveBodies <<<grid, block >>>(p_GPU, v_GPU, f_GPU);
}
cudaEventRecord(stop);
cudaMemcpy(p, p_GPU, N * sizeof(float4), cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("\n\nGPU time = %3.1f milliseconds\n", elapsedTime/runs);
cudaFree(p_GPU); cudaFree(v_GPU); cudaFree(f_GPU);
cudaEventDestroy(start); cudaEventDestroy(stop);
}
int main(int argc, char** argv)
{
cudaSetDevice(0);
set_initail_conditions();
n_body(1000);
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
} |
2,434 | #include <stdio.h>
#include <iostream>
#include "cuda_runtime.h"
//Kernel code.
__global__ void square(float * d_in, float * d_out)
{
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f;
}
int main()
{
const int ARRAY_SIZE = 4;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
//input array on host.
float h_in[ARRAY_SIZE];
int i;
for (i=0;i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
printf("%f\n", h_in[i]);
}
//output array of host.
float h_out[ARRAY_SIZE];
//Declare GPU memory pointers.
float * d_in;
float * d_out;
//Allocate GPU memory.
cudaMalloc((void **)&d_in, ARRAY_BYTES);
cudaMalloc((void **)&d_out, ARRAY_BYTES);
//transfer the array to GPU.
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
square<<<1,ARRAY_SIZE>>>(d_in, d_out);
//Copy back to host.
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
//Print the results.
for (int i=0; i < ARRAY_SIZE; i++) {
printf("%f\n", h_out[i]);
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
2,435 | // Utilities and system includes
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
// #include <cupti.h>
#include <cuda_profiler_api.h>
#define DATA_TYPE 0 // 0-SP, 1-INT, 2-DP
#define THREADS 1024
#define TILE_DIM 1024
#define SIZE 60000000
#define INNER_REPS 8192
template <class T> __global__ void simpleKernel2()
{
__shared__ T shared[THREADS];
T r0;
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
if (xIndex < SIZE) {
#pragma unroll 8192
for (int i=0;i<INNER_REPS;i++) {
r0 = shared[threadIdx.x];
shared[THREADS - threadIdx.x - 1] = r0;
}
}
}
int main(int argc, char **argv) {
int inner_reps, outer_reps, vector_size, tile_dim;
inner_reps = INNER_REPS;
vector_size = SIZE;
tile_dim = TILE_DIM;
if (argc>1){
outer_reps = atoi(argv[1]);
}else{
outer_reps = 1;
}
// execution configuration parameters
dim3 grid(vector_size/tile_dim, 1), threads(tile_dim, 1);
// CUDA events
cudaEvent_t start, stop;
// print out common data for all kernels
printf("\nVector size: %d TotalBlocks: %d blockSize: %d\n\n", vector_size, grid.x, threads.x);
// initialize events
cudaEventCreate(&start);
cudaEventCreate(&stop);
// take measurements for loop over kernel launches
cudaEventRecord(start, 0);
for (int i=0; i < outer_reps; i++)
{
simpleKernel2<float><<<grid, threads>>>();
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float kernelTime;
cudaEventElapsedTime(&kernelTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaDeviceReset();
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
|
2,436 | #include <iostream>
#include <chrono>
#define M 512
__device__ float polynomial (float x, float* poly, int degree) {
float out = 0.;
float xtothepowerof = 1.;
for (int i=0; i<=degree; ++i) {
out += xtothepowerof*poly[i];
xtothepowerof *= x;
}
return out;
}
__global__ void polynomial_expansion (float* poly, int degree,
int n, float* array) {
int index = threadIdx.x + blockIdx.x*blockDim.x;
if(index<n)
{
array[index] = polynomial (array[index], poly, degree);
}
}
int main (int argc, char* argv[]) {
//TODO: add usage
if (argc < 3) {
std::cerr<<"usage: "<<argv[0]<<" n degree"<<std::endl;
return -1;
}
int n = atoi(argv[1]); //TODO: atoi is an unsafe function
int degree = atoi(argv[2]);
int nbiter = 1;
float* array,*poly;
int size_array = n*sizeof(float);
int size_poly = (degree+1)*sizeof(float);
float* d_array,*d_poly;
array = (float *)malloc(size_array);
poly = (float *)malloc(size_poly);
cudaMalloc((void **)&d_array,size_array);
cudaMalloc((void **)&d_poly,size_poly);
for (int i=0; i<n; ++i)
array[i] = 1.;
for (int i=0; i<degree+1; ++i)
poly[i] = 1.;
cudaMemcpy(d_array,array,size_array,cudaMemcpyHostToDevice);
cudaMemcpy(d_poly,poly,size_poly,cudaMemcpyHostToDevice);
std::chrono::time_point<std::chrono::system_clock> begin, end;
begin = std::chrono::system_clock::now();
for (int iter = 0; iter<nbiter; ++iter)
polynomial_expansion<<<(n+M-1)/M,M>>>(poly, degree, n, array);
cudaMemcpy(array,d_array,size_array,cudaMemcpyDeviceToHost);
end = std::chrono::system_clock::now();
std::chrono::duration<double> totaltime = (end-begin)/nbiter;
std::cerr<<array[0]<<std::endl;
std::cout<<n<<" "<<degree<<" "<<totaltime.count()<<std::endl;
free(array);
free(poly);
cudaFree(d_array);
cudaFree(d_poly);
return 0;
}
|
2,437 | #include <stdio.h>
#include <stdlib.h>
#include <string.h> /* memcpy */
#include <math.h>
#include <stdint.h>
void *cuda_upload_var(void *host_var, int size)
{
void *cuda_var;
cudaMalloc(&cuda_var, 4);
cudaMemcpy(cuda_var, host_var, size, cudaMemcpyHostToDevice);
return cuda_var;
}
void cuda_download_var(void *cuda_var, void *host_var, int size)
{
cudaMemcpy(host_var, cuda_var, size, cudaMemcpyDeviceToHost);
cudaFree(cuda_var);
}
int printf(const char *fmt, ...);
int main(int argc, char **argv)
{
printf("Hello World!\n");
return 0;
}
|
2,438 | //
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <iomanip>
#include <time.h>
#include <iostream>
using namespace std;
#define N 1000
#define S 2
#define BLOCK_SIZE 1
__global__ void zeta(float* c)
{
int tid = threadIdx.x;
int idx = blockIdx.x;
int ind = blockDim.x * idx + tid + blockDim.y * blockIdx.y + threadIdx.y;
if (ind > N - 1) return;
float res = float(1) / pow(double(ind + 1), S);
c[ind] = res;
}
int homeWork3() {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float* host_c;
float* dev_c;
cout << endl;
host_c = (float*)malloc(N * sizeof(float));
cudaMalloc((void**)& dev_c, N * sizeof(float));
cudaEventRecord(start, 0);
dim3 threadPerBlock = dim3(N / BLOCK_SIZE, 1);
dim3 blockPerGrid = dim3(BLOCK_SIZE, BLOCK_SIZE);
zeta << <blockPerGrid, threadPerBlock >> > (dev_c);
//
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float KernelTime;
cudaEventElapsedTime(&KernelTime, start, stop);
printf("KernelTme: %f millseconds\n", KernelTime);
//
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("%s ", cudaGetErrorString(err));
cudaMemcpy(host_c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost);
float sum = 0;
for (int i = 0; i < N; i++)
{
//cout << host_c[i] << " ";
sum += host_c[i];
}
cout << endl;
cout << "Value Zeta Function: " << sum << " ";
float error = abs(4 * atan(1) * 4 * atan(1) / float(6) - sum);
printf("\nError: %f\n", error);
cudaFree(dev_c);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
} |
2,439 | #include "includes.h"
__global__ void awkward_ByteMaskedArray_getitem_nextcarry_kernel(int64_t* prefixed_mask, int64_t* to_carry, int8_t* mask, int64_t length) {
int64_t block_id =
blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int64_t thread_id = block_id * blockDim.x + threadIdx.x;
if(thread_id < length) {
if (mask[thread_id] != 0) {
to_carry[prefixed_mask[thread_id] - 1] = thread_id;
}
}
} |
2,440 | #include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <cuda_runtime_api.h>
/*
To compile:
nvcc -o LinearRegressionCuda LinearRegressionCuda.cu
*/
typedef struct point_t {
double x;
double y;
} point_t;
int n_data = 1000;
__device__ int d_n_data = 1000;
point_t data[] = {
{71.81,133.75},{85.89,133.55},{82.39,150.79},{67.30,111.91},
{72.43,119.97},{65.93,108.62},{82.77,155.40},{65.23,109.17},
{78.50,123.83},{82.84,131.75},{65.34,120.82},{73.67,130.20},
{78.94,120.51},{34.42,62.04},{ 8.73,15.55},{23.45,55.17},
{31.72,58.36},{65.08,113.47},{71.81,128.61},{75.72,121.32},
{10.85,42.15},{49.29,87.56},{69.32,107.59},{23.18,60.57},
{57.50,104.58},{51.22,95.42},{ 1.87,34.88},{ 0.25,33.58},
{92.31,161.96},{90.44,165.21},{51.17,117.82},{70.31,114.64},
{59.11,100.04},{17.46,59.44},{78.86,122.45},{74.48,144.72},
{97.05,153.21},{53.79,90.54},{80.87,143.61},{42.49,86.11},
{69.93,125.88},{71.86,141.65},{ 4.82,30.58},{45.61,65.66},
{84.52,134.78},{97.06,154.56},{21.32,56.30},{44.18,64.58},
{ 2.66,23.04},{51.37,76.64},{21.43,59.05},{19.79,52.99},
{81.06,137.68},{44.78,82.78},{50.11,76.99},{77.74,130.65},
{85.48,153.88},{74.76,126.04},{87.23,132.37},{37.96,70.32},
{92.10,148.88},{35.49,60.54},{98.95,158.99},{ 9.66,37.03},
{21.07,56.81},{75.75,104.60},{42.63,80.73},{19.18,73.68},
{69.42,124.30},{79.12,137.33},{11.06,39.94},{13.98,41.17},
{15.48,39.69},{ 0.35,19.09},{79.17,148.87},{93.33,143.52},
{39.32,61.76},{20.18,60.38},{11.79,32.52},{16.51,50.62},
{71.53,104.03},{16.04,34.95},{51.37,105.66},{47.44,65.97},
{79.12,141.21},{ 4.00,16.20},{97.62,153.70},{65.44,130.26},
{50.20,121.77},{99.79,170.26},{76.36,135.83},{82.17,147.58},
{12.23,34.62},{31.39,80.38},{63.59,110.08},{79.87,112.29},
{91.99,148.73},{95.57,163.30},{36.91,68.25},{35.42,100.44},
{88.93,165.23},{54.48,101.08},{42.25,81.06},{63.79,109.19},
{69.30,125.01},{22.12,44.31},{ 4.31,37.11},{50.40,102.16},
{44.11,64.66},{15.53,37.62},{11.52,30.78},{37.50,88.62},
{84.92,145.18},{14.65,45.40},{44.26,88.55},{64.47,109.85},
{20.17,62.08},{75.44,118.67},{56.76,102.67},{97.74,157.14},
{90.10,130.66},{23.82,60.15},{95.21,154.21},{63.42,116.59},
{47.35,86.90},{21.22,36.64},{47.07,76.04},{93.51,161.37},
{90.74,135.61},{ 1.89,42.37},{99.41,153.91},{79.58,142.84},
{47.90,83.54},{30.89,73.61},{ 2.05,37.83},{89.51,157.44},
{ 8.02,30.93},{50.97,78.93},{43.22,86.36},{32.69,64.67},
{94.74,130.84},{27.82,53.54},{85.05,151.88},{82.34,127.32},
{49.14,98.33},{ 6.85,10.92},{32.17,72.66},{44.61,92.76},
{22.51,78.78},{18.37,41.97},{62.72,102.57},{44.41,107.65},
{46.40,88.98},{43.14,93.46},{67.92,105.60},{23.84,40.69},
{ 4.03,34.92},{ 5.64,34.23},{79.83,129.40},{82.70,144.49},
{83.02,124.05},{64.88,124.17},{20.12,55.01},{49.36,86.36},
{77.62,117.64},{18.52,45.33},{52.21,109.18},{15.02,39.89},
{79.26,136.34},{54.53,95.15},{28.56,77.08},{ 7.83,16.21},
{26.24,55.37},{37.16,70.27},{19.93,39.84},{87.64,120.18},
{88.19,129.91},{15.68,56.99},{55.77,104.48},{57.24,106.34},
{69.18,125.86},{50.75,91.00},{54.75,96.92},{88.10,153.26},
{97.23,161.42},{55.52,128.90},{42.43,79.44},{43.30,71.53},
{17.73,40.42},{93.22,172.37},{86.72,137.77},{51.68,125.48},
{ 6.20,53.74},{36.62,79.91},{95.96,156.68},{19.80,45.74},
{ 7.95,33.30},{19.11,65.48},{73.86,156.39},{13.35,58.42},
{15.98,58.41},{27.63,75.79},{39.36,100.90},{23.30,49.78},
{91.02,174.92},{58.76,113.32},{56.86,89.12},{74.60,124.47},
{87.95,145.95},{ 5.65,26.01},{44.38,84.85},{61.21,111.78},
{96.07,168.07},{64.95,119.02},{39.36,61.92},{ 6.43,37.54},
{32.17,68.34},{30.40,73.48},{15.41,40.03},{49.93,85.61},
{72.84,121.47},{79.49,135.27},{12.54,51.85},{96.52,163.46},
{66.51,98.57},{48.32,96.21},{90.47,139.35},{49.45,90.23},
{97.31,153.19},{ 6.99,33.04},{79.27,155.01},{28.09,59.98},
{29.65,46.80},{ 6.41,26.53},{89.09,155.66},{83.66,135.40},
{87.21,128.39},{39.34,75.70},{ 5.29,18.70},{75.91,128.16},
{36.17,73.07},{ 2.95,25.49},{70.12,119.10},{76.57,121.06},
{37.18,81.73},{26.54,71.98},{ 1.86,22.98},{63.22,130.78},
{ 9.44,52.14},{22.05,59.05},{33.75,77.73},{17.22,41.22},
{97.39,159.14},{83.45,133.76},{53.44,80.57},{68.93,97.49},
{30.97,46.79},{60.63,115.57},{80.33,131.34},{21.99,50.93},
{55.51,114.58},{55.53,96.06},{63.02,107.21},{97.16,161.85},
{22.71,59.05},{53.90,93.15},{79.03,135.50},{24.98,58.91},
{53.35,77.77},{62.54,111.55},{ 8.14,30.34},{21.20,57.84},
{80.80,131.03},{82.97,147.94},{13.34,45.07},{36.39,79.55},
{49.26,97.81},{39.35,85.87},{17.81,57.99},{10.79,50.78},
{86.11,143.90},{75.31,132.09},{89.64,140.60},{92.60,137.96},
{55.41,93.53},{12.59,48.40},{15.58,59.60},{34.90,86.37},
{99.68,154.75},{88.60,136.03},{82.50,126.48},{88.50,134.98},
{ 1.07,28.75},{99.30,167.43},{ 8.11,14.26},{86.82,145.36},
{ 4.77,19.71},{32.49,68.46},{35.63,75.41},{80.60,141.36},
{53.62,102.19},{26.15,63.08},{67.67,115.13},{61.72,126.25},
{50.58,91.97},{71.46,123.77},{61.42,98.61},{89.04,147.12},
{38.41,71.83},{21.14,51.38},{59.04,108.78},{29.09,57.48},
{11.38,50.98},{10.28,35.84},{45.56,86.89},{95.39,145.33},
{51.24,96.97},{97.41,163.34},{31.73,77.61},{ 9.73,36.27},
{41.55,73.15},{92.88,158.72},{60.16,99.18},{48.55,94.55},
{82.19,147.90},{60.01,104.26},{93.66,150.29},{13.82,37.15},
{51.11,85.26},{65.26,105.78},{19.26,32.52},{38.43,77.91},
{51.47,85.84},{96.97,153.73},{59.30,107.67},{ 0.07,26.65},
{80.56,135.72},{76.04,137.06},{88.61,142.04},{65.39,113.36},
{59.77,101.26},{68.59,132.36},{66.76,114.37},{54.10,95.76},
{73.43,114.04},{25.25,49.82},{40.70,76.08},{67.14,123.21},
{58.08,107.35},{25.91,67.67},{70.24,125.48},{ 4.39,35.71},
{28.12,63.20},{57.87,93.54},{14.98,54.26},{73.40,129.67},
{11.65,43.23},{ 6.48, 7.79},{ 4.34,23.19},{57.96,117.91},
{40.59,64.13},{59.70,102.44},{69.77,123.84},{81.09,140.18},
{24.78,31.31},{ 9.11,18.71},{52.35,95.53},{41.51,75.96},
{14.25,42.30},{95.18,161.64},{82.80,147.19},{37.69,85.89},
{50.22,86.58},{ 9.61,34.74},{85.43,133.04},{30.83,55.35},
{28.01,59.72},{17.65,69.10},{22.88,49.17},{92.61,145.34},
{48.89,99.29},{ 3.27,27.70},{63.87,101.75},{74.76,122.07},
{13.74,37.53},{ 5.66,32.70},{72.30,123.40},{ 0.51,27.51},
{14.73,26.65},{99.81,165.29},{55.72,101.54},{76.73,121.37},
{84.59,156.55},{63.89,113.13},{72.47,104.87},{19.83,50.51},
{ 8.05,32.87},{60.34,115.79},{23.94,52.20},{42.28,82.34},
{26.31,58.45},{21.64,52.47},{23.67,53.46},{56.81,92.74},
{75.74,118.51},{20.25,44.91},{83.80,152.05},{45.21,109.99},
{53.40,94.95},{48.01,79.89},{86.15,126.70},{31.76,65.23},
{37.00,71.39},{97.00,150.49},{74.45,158.08},{56.43,95.25},
{15.26,50.28},{72.99,134.83},{25.73,50.39},{66.12,103.53},
{84.47,142.16},{34.60,60.59},{39.31,83.77},{ 5.83, 7.76},
{58.47,115.10},{13.25,34.10},{15.05,57.12},{21.60,48.62},
{86.43,158.68},{93.70,157.58},{21.89,49.21},{36.54,90.96},
{11.65,44.60},{69.47,126.67},{15.50,72.13},{32.62,77.71},
{88.46,147.60},{57.82,105.16},{ 3.04,24.52},{73.18,114.88},
{ 1.89,17.39},{47.48,80.24},{94.69,166.52},{86.61,162.53},
{93.68,169.74},{20.15,49.66},{89.47,150.21},{27.20,73.32},
{54.12,97.33},{48.92,96.41},{81.94,152.06},{77.89,132.22},
{96.42,164.13},{79.34,130.28},{51.37,102.16},{55.97,89.64},
{43.23,78.79},{77.07,139.29},{75.65,138.43},{18.77,43.38},
{90.52,144.64},{19.17,49.23},{34.35,70.96},{70.52,121.88},
{30.72,61.05},{53.35,94.84},{86.15,147.31},{20.81,60.94},
{27.15,53.45},{17.63,35.98},{99.82,158.99},{66.97,110.36},
{65.87,123.68},{78.08,129.36},{99.91,175.45},{72.93,125.14},
{35.08,70.94},{28.46,49.01},{80.54,147.46},{77.18,122.58},
{77.71,156.43},{91.31,146.54},{20.92,51.17},{50.87,97.76},
{99.08,167.07},{62.57,121.71},{81.74,145.81},{30.88,71.61},
{81.94,146.49},{ 1.36,21.86},{13.60,27.68},{13.72,31.35},
{25.24,41.75},{60.65,114.39},{ 5.10,16.14},{96.04,163.74},
{30.16,69.39},{ 5.21,21.80},{13.26,47.80},{34.04,61.40},
{41.31,84.48},{13.87,42.78},{80.57,155.45},{73.85,117.81},
{44.09,85.04},{67.97,110.42},{66.05,144.78},{76.72,136.42},
{31.80,68.47},{21.76,40.62},{19.65,57.37},{47.05,92.14},
{72.80,140.18},{28.75,67.93},{81.58,153.50},{ 3.91,31.01},
{93.56,155.20},{45.11,111.36},{95.57,150.14},{31.63,80.29},
{81.56,130.14},{69.79,136.07},{30.14,44.97},{56.51,119.88},
{91.57,144.92},{73.90,127.37},{86.56,130.40},{33.26,57.53},
{72.16,124.71},{ 4.90,31.13},{26.34,68.73},{18.22,43.77},
{ 5.42,28.57},{75.23,141.83},{13.72,44.69},{98.92,154.34},
{94.30,157.85},{97.47,160.05},{49.57,97.75},{99.09,161.98},
{51.01,87.85},{29.22,69.17},{87.34,146.82},{26.04,49.38},
{44.11,89.25},{ 8.74,34.33},{24.41,70.14},{57.84,105.52},
{46.64,89.59},{61.47,115.25},{87.64,138.31},{10.52,34.23},
{90.60,151.93},{63.39,110.69},{ 0.97,33.88},{13.54,42.86},
{87.02,163.01},{48.04,67.13},{84.91,153.18},{17.13,50.11},
{98.46,159.86},{24.74,40.26},{36.78,76.12},{93.00,169.21},
{96.16,154.92},{88.44,165.84},{54.06,91.19},{38.91,71.38},
{ 4.61,27.77},{82.45,127.85},{47.04,81.41},{54.53,104.53},
{ 1.99,13.20},{48.19,84.44},{83.25,148.38},{ 3.93,24.18},
{86.70,149.70},{58.11,111.23},{81.87,134.40},{ 4.81,35.75},
{43.63,83.58},{ 4.86,37.40},{ 2.93,16.55},{40.61,76.67},
{18.91,47.84},{74.21,129.32},{33.67,82.38},{41.75,88.69},
{93.55,146.01},{32.84,54.09},{36.18,71.94},{30.59,68.87},
{27.18,69.28},{17.02,59.72},{89.56,159.21},{76.28,131.23},
{55.97,98.43},{65.93,131.28},{34.72,68.34},{10.65,42.12},
{ 9.01,38.63},{ 9.35,52.74},{13.36,44.38},{ 5.36,47.57},
{49.10,80.75},{35.50,81.38},{85.86,143.17},{57.65,89.93},
{65.35,102.37},{99.12,168.40},{49.52,94.52},{18.03,53.14},
{78.95,135.86},{22.47,54.44},{51.75,103.26},{ 0.48,17.73},
{82.00,159.36},{ 5.63,32.73},{95.25,155.74},{51.55,90.42},
{98.90,154.14},{25.95,64.50},{79.12,140.50},{20.59,52.04},
{54.86,113.02},{22.06,55.16},{77.06,136.48},{65.61,122.37},
{31.20,76.49},{82.60,148.76},{19.53,54.99},{81.21,140.19},
{81.87,150.51},{69.68,136.12},{40.20,75.56},{27.08,66.95},
{20.48,54.12},{72.17,125.66},{52.48,101.11},{30.77,81.74},
{99.74,154.30},{29.30,71.54},{ 2.32,26.03},{51.04,91.71},
{37.30,75.39},{14.81,41.92},{19.85,57.62},{37.60,64.73},
{77.39,136.96},{65.69,118.79},{ 5.57,20.49},{60.47,104.70},
{12.32,49.43},{71.33,133.27},{22.04,62.24},{49.04,98.96},
{28.51,77.93},{35.03,75.84},{79.22,118.50},{26.65,80.44},
{69.35,108.29},{56.03,102.40},{26.20,62.66},{66.56,131.81},
{44.51,94.10},{34.08,62.61},{17.47,52.36},{51.22,108.60},
{93.35,157.84},{69.55,114.79},{18.68,50.77},{41.56,91.26},
{65.19,127.38},{88.77,148.88},{ 9.99,30.10},{49.67,77.20},
{90.28,165.16},{26.69,53.43},{ 7.95,39.17},{75.34,127.81},
{ 0.12, 7.67},{95.35,170.18},{63.57,116.43},{54.73,97.98},
{95.11,149.50},{65.12,108.21},{65.62,118.79},{55.24,103.25},
{45.00,86.45},{89.93,170.27},{63.15,104.46},{98.62,150.57},
{54.04,101.18},{38.92,97.39},{65.66,112.45},{94.08,145.09},
{ 8.02,52.57},{90.39,149.90},{49.97,104.95},{69.39,144.12},
{56.28,104.01},{30.13,42.35},{83.41,135.62},{25.42,59.47},
{ 8.60,47.93},{ 7.96,26.67},{84.46,135.72},{96.40,173.29},
{47.38,91.27},{90.73,145.91},{70.02,129.57},{37.86,68.46},
{38.18,87.29},{28.46,66.31},{29.23,49.56},{ 2.83,25.34},
{55.92,103.82},{80.53,145.86},{55.34,97.42},{91.58,119.46},
{76.04,143.07},{60.28,117.98},{ 0.54, 5.00},{ 9.50,44.08},
{59.81,107.20},{ 4.35,31.95},{48.87,86.27},{93.95,146.06},
{99.07,150.56},{26.88,55.92},{12.98,45.95},{16.74,39.38},
{35.39,72.22},{84.23,144.44},{90.69,141.06},{65.69,127.54},
{26.91,56.78},{15.06,46.26},{32.40,64.69},{44.46,91.33},
{76.51,145.64},{94.09,151.96},{57.45,92.66},{15.97,42.98},
{81.54,132.18},{58.37,113.71},{29.14,53.90},{61.15,101.46},
{17.28,45.91},{40.12,93.12},{ 0.36,10.11},{17.99,61.07},
{35.90,82.30},{22.60,65.99},{32.75,78.46},{23.83,73.75},
{94.73,154.62},{ 4.38,23.01},{78.50,118.27},{64.21,97.91},
{92.39,159.66},{38.28,83.70},{79.26,136.65},{17.66,36.98},
{32.52,67.86},{77.98,145.93},{79.98,144.76},{20.69,45.94},
{98.63,172.79},{58.09,117.55},{82.28,142.01},{60.87,104.83},
{44.91,92.45},{97.89,168.22},{20.75,62.93},{10.51,48.50},
{53.90,101.25},{58.42,101.98},{72.74,123.84},{46.59,90.86},
{29.87,56.12},{62.39,122.94},{57.68,92.27},{33.30,76.22},
{ 3.56,28.54},{46.08,106.41},{97.39,151.31},{35.45,66.84},
{34.63,87.83},{87.79,144.36},{28.20,53.50},{78.44,140.84},
{11.40,49.61},{85.04,142.50},{85.04,158.15},{68.40,94.57},
{17.19,53.83},{82.41,146.61},{23.63,39.81},{29.03,54.35},
{83.65,138.45},{48.46,97.27},{18.77,50.92},{44.94,97.79},
{67.65,105.01},{45.70,54.12},{ 9.58,43.90},{99.27,150.15},
{12.73,36.30},{98.01,174.70},{85.29,145.34},{ 7.32,47.81},
{87.71,165.09},{46.50,92.73},{33.04,76.52},{40.94,78.03},
{12.53,29.91},{50.90,92.49},{51.52,104.94},{99.07,151.71},
{80.84,138.04},{14.73,24.97},{61.84,96.48},{96.23,140.28},
{14.95,47.46},{39.87,70.43},{61.79,120.74},{78.53,123.10},
{30.06,63.50},{57.80,109.88},{18.02,55.74},{66.63,96.24},
{71.62,121.35},{50.25,98.35},{ 9.15,30.45},{38.30,59.16},
{12.10,48.06},{68.87,123.37},{74.45,133.33},{95.47,158.56},
{67.57,122.32},{62.27,97.67},{33.02,68.44},{78.82,135.53},
{89.73,160.49},{16.73,45.84},{86.27,130.72},{16.96,34.50},
{ 7.70,17.39},{89.55,147.99},{28.07,73.54},{28.11,75.97},
{39.00,78.29},{47.87,96.73},{88.54,133.29},{71.47,130.74},
{63.14,114.27},{20.93,54.58},{74.15,138.21},{66.42,116.12},
{ 2.90,46.27},{71.33,115.60},{40.72,96.90},{45.43,76.82},
{63.10,113.57},{10.91,45.11},{94.11,157.71},{ 2.02,46.92},
{83.43,150.57},{18.49,38.88},{70.62,123.24},{36.43,81.80},
{61.06,110.80},{41.93,69.91},{78.62,144.55},{ 3.05,17.87},
{40.72,96.97},{28.88,73.84},{50.41,90.62},{59.30,96.67},
{ 6.13,45.55},{70.11,134.27},{74.61,127.51},{56.90,123.42},
{94.13,144.09},{28.26,72.79},{14.47,58.59},{95.53,151.16},
{51.96,102.88},{69.25,129.38},{27.16,51.62},{97.88,180.78},
{94.78,168.77},{80.56,155.02},{27.92,64.69},{ 4.77,31.18},
{31.24,71.23},{27.42,78.61},{29.42,67.70},{76.86,134.28},
{31.36,54.76},{50.42,94.73},{80.73,121.10},{ 1.99,20.51},
{21.56,57.13},{21.75,66.02},{13.85,59.84},{13.48,52.82},
{59.32,103.64},{94.53,158.77},{18.54,35.66},{64.88,103.79},
{46.65,98.03},{ 5.95,33.17},{ 6.57,17.17},{76.32,133.82},
{94.11,154.07},{62.67,108.53},{30.27,58.27},{98.45,158.59},
{81.31,152.48},{23.06,65.35},{82.91,129.73},{96.03,150.73},
{ 2.11, 6.42},{27.24,69.29},{75.21,135.75},{ 5.44,34.12},
{80.93,128.91},{79.60,108.46},{99.99,162.20},{62.44,115.75},
{13.28,33.20},{43.22,86.14},{26.50,58.88},{34.72,85.01},
{54.56,105.50},{12.27,18.15},{13.66,70.53},{ 7.74,35.72},
{84.51,134.88},{30.75,39.17},{57.14,90.09},{ 1.40,10.91},
{67.44,110.82},{53.03,96.34},{35.39,65.18},{65.29,102.95},
{59.35,102.78},{54.80,101.59},{16.56,57.37},{92.71,170.67},
{68.68,125.65},{77.44,129.75},{23.68,51.06},{85.89,142.67},
{94.91,153.05},{56.30,104.40},{58.30,107.14},{45.98,91.75},
{90.18,150.31},{97.63,150.23},{55.62,114.59},{50.83,111.00},
{37.56,86.33},{ 9.99,42.83},{71.14,122.30},{42.92,63.75},
{98.82,166.58},{65.32,119.34},{12.09,46.70},{88.17,134.52},
{68.55,137.66},{93.42,143.64},{57.99,112.75},{29.60,47.62},
{23.24,67.34},{60.36,121.02},{79.82,135.52},{72.66,122.07},
{24.54,69.68},{43.34,81.69},{70.53,129.80},{65.37,126.24},
{31.11,57.14},{ 1.22,25.79},{19.20,63.95},{68.51,105.34},
{46.51,87.43},{60.17,109.16},{ 3.50,31.20},{76.58,127.58},
{ 1.75,11.07},{85.08,132.82},{12.42,46.23},{23.30,48.69},
{ 9.16,27.43},{55.45,118.52},{87.40,162.78},{72.97,122.98}
};
double residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<n_data; i++) {
error_sum += residual_error(data[i].x, data[i].y, m, c);
}
mean = error_sum / n_data;
return sqrt(mean);
}
int time_difference(struct timespec *start, struct timespec *finish, long long int *difference)
{
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 )
{
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
__device__ double d_residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
__global__ void d_rms_error(double *m, double *c, double *error_sum_arr, point_t *d_data) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c);
}
int main() {
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
cudaError_t error;
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
point_t *d_data;
be = rms_error(bm, bc);
error = cudaMalloc(&d_dm, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_dc, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_error_sum_arr, (sizeof(double) * 1000));
if(error){
fprintf(stderr, "cudaMalloc on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_data, sizeof(data));
if(error){
fprintf(stderr, "cudaMalloc on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i] = bc + (oc[i] * step);
}
error = cudaMemcpy(d_dm, dm, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dm returned %d %s\n", error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_dc, dc, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dc returned %d %s\n", error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_data, data, sizeof(data), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_data returned %d %s\n", error,
cudaGetErrorString(error));
}
for(i=0;i<8;i++) {
double h_error_sum_arr[1000];
double error_sum_total;
double error_sum_mean;
d_rms_error <<<100,10>>>(&d_dm[i], &d_dc[i], d_error_sum_arr, d_data);
cudaThreadSynchronize();
error = cudaMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), cudaMemcpyDeviceToHost);
if(error){
fprintf(stderr, "cudaMemcpy to error_sum returned %d %s\n", error,
cudaGetErrorString(error));
}
for(int j=0; j<n_data; j++) {
//Add each error sum to the error sum total.
error_sum_total += h_error_sum_arr[j];
}
error_sum_mean = error_sum_total / n_data;
e[i] = sqrt(error_sum_mean);
if(e[i] < best_error) {
best_error = e[i];
best_error_i = i;
}
//Reset the error sum total.
error_sum_total = 0;
}
printf("best m,c is %lf,%lf with error %lf in direction %d\n",
dm[best_error_i], dc[best_error_i], best_error, best_error_i);
if(best_error < be) {
be = best_error;
bm = dm[best_error_i];
bc = dc[best_error_i];
} else {
minimum_found = 1;
}
}
//Free memory for d_dm
error = cudaFree(d_dm);
if(error){
fprintf(stderr, "cudaFree on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Free memory for d_dc
error = cudaFree(d_dc);
if(error){
fprintf(stderr, "cudaFree on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_data);
if(error){
fprintf(stderr, "cudaFree on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_error_sum_arr);
if(error){
fprintf(stderr, "cudaFree on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
2,441 | __global__ void repeatedActivations(float* H, int K, int M, int r, float iterfac) {
/*
Avoid repeated activations with a maximum filter
:param H: An KxM matrix whose repeated activations will be suppressed row-wise
:param K, M: Dimensions
:param r: Width of repeated activation filter
:param iterfac: The shrinkage factor for non-maximum values in a neighborhood
*/
extern __shared__ float x[];
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
//TODO: FINISH THIS
}
|
2,442 | #include <stdio.h>
// 这个是kernel函数,就是GPU函数
__global__ void kernelfunction(int*a,int*b,int*c){
*c=*a+*b;
}
int main(void){
printf("Cuda_Performance Hello World\n");
int a,b,c;
int *d_a,*d_b,*d_c;
int size =sizeof(int);
// take the address of d_a,and cast into void**
// 取d_a的地址(一个二级指针),然后类型转换成void**
// Create Status for error check
// 这个是为了错误检查
cudaError_t cudastatus;
// Allocate space for device
// 分配gpu内存
cudastatus=cudaMalloc((void **)&d_a, size);
cudastatus=cudaMalloc((void **)&d_b, size);
cudastatus=cudaMalloc((void **)&d_c, size);
a = 1;
b = 2;
// Start Timing
// 计时模块
cudaEvent_t start, stop;
float timeall;
cudastatus=cudaEventCreate(&start);
cudastatus=cudaEventCreate(&stop);
cudastatus=cudaEventRecord( start, 0 );
// CopyToGPU
// 上传到GPU
cudastatus=cudaMemcpy(d_a,&a,size,cudaMemcpyHostToDevice);
cudastatus=cudaMemcpy(d_b,&b,size,cudaMemcpyHostToDevice);
kernelfunction<<<1,1>>>(d_a,d_b,d_c);
cudastatus=cudaMemcpy(&c,d_c,size,cudaMemcpyDeviceToHost);
// Timing
// 计时结束
cudastatus=cudaEventRecord( stop, 0 );
cudastatus=cudaEventSynchronize( stop );
cudastatus=cudaEventElapsedTime( &timeall, start, stop );
cudastatus=cudaEventDestroy( start );
cudastatus=cudaEventDestroy( stop );
printf("c:%i \n",c);
printf("time:%f \n",timeall);
// 释放内存
cudastatus=cudaFree(d_a);
cudastatus=cudaFree(d_b);
cudastatus=cudaFree(d_c);
if (cudastatus != cudaSuccess) {
fprintf(stderr, "Failed %s\n", cudaGetErrorString(cudastatus));
}
return 0;
}
|
2,443 | //
// Created by binhpht on 27.3.2021.
//
#include <stdio.h>
#include "iostream"
#include "square.cuh"
__global__ void square (float * d_out, float * d_in) {
// int idx = threadIdx.x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f;
}
void call_square (int thread_num, float * d_out, float * d_in) {
::square<<<8,8>>>(d_out, d_in);
} |
2,444 | #include <stdio.h>
int get_GPU_Rate()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp,0);
return deviceProp.clockRate;
}
int main() {
printf("GPU Rate is %d\n", get_GPU_Rate());
}
|
2,445 | //pass
//--gridDim=[11377,1,1] --blockDim=[256,1,1]
#include "common.h"
__global__ void addScalar(uint *array, int scalar, uint size)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size)
{
array[tid] += scalar;
}
}
|
2,446 | /*
KAM PUI SO (ANTHONY)
CS 510 GPU
Homework 1
The Cross-Over Point
CUDA really shines when given problems involving lots of data, but for small problems, using CUDA can be slower than a pure CPU solution. Since it can be difficult to get a feel for how large a problem needs to be before using the GPU becomes useful, this lab encourages you to find the "crossover point" for vector addition. Specifically: how large do the vectors need to be for the speed of GPU vector addition to eclipse the speed of CPU vector addition?
Modify the vector_addition.cu example to time how long it takes the CPU and GPU vector addition functions to operate on vectors of different magnitudes. Find (roughly) what magnitude constitutes the cross-over point for this problem on your system.
*/
#include <sys/time.h>
#include <time.h>
#include <stdio.h>
const int SIZE = 2;
const int MAX = 214783647 ;
/* The old-fashioned CPU-only way to add two vectors */
void add_vectors_host(int *result, int *a, int *b, int n)
{
for (int i=0; i<n; i++)
result[i] = a[i] + b[i];
}
/* The kernel that will execute on the GPU */
__global__ void add_vectors_kernel(int *result, int *a, int *b, int n)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
// If we have more threads than the magnitude of our vector, we need to
// make sure that the excess threads don't try to save results into
// unallocated memory.
if (idx < n)
result[idx] = a[idx] + b[idx];
}
/* This function encapsulates the process of creating and tearing down the
* environment used to execute our vector addition kernel. The steps of the
* process are:
* 1. Allocate memory on the device to hold our vectors
* 2. Copy the vectors to device memory
* 3. Execute the kernel
* 4. Retrieve the result vector from the device by copying it to the host
* 5. Free memory on the device
*/
void add_vectors_dev(int *result, int *a, int *b, int n)
{
// Step 1: Allocate memory
int *a_dev, *b_dev, *result_dev;
// Since cudaMalloc does not return a pointer like C's traditional malloc
// (it returns a success status instead), we provide as it's first argument
// the address of our device pointer variable so that it can change the
// value of our pointer to the correct device address.
cudaMalloc((void **) &a_dev, sizeof(int) * n);
cudaMalloc((void **) &b_dev, sizeof(int) * n);
cudaMalloc((void **) &result_dev, sizeof(int) * n);
// Step 2: Copy the input vectors to the device
cudaMemcpy(a_dev, a, sizeof(int) * n, cudaMemcpyHostToDevice);
cudaMemcpy(b_dev, b, sizeof(int) * n, cudaMemcpyHostToDevice);
// Step 3: Invoke the kernel
// We allocate enough blocks (each 512 threads long) in the grid to
// accomodate all `n` elements in the vectors. The 512 long block size
// is somewhat arbitrary, but with the constraint that we know the
// hardware will support blocks of that size.
dim3 dimGrid((n + 512 - 1) / 512, 1, 1);
dim3 dimBlock(512, 1, 1);
add_vectors_kernel<<<dimGrid, dimBlock>>>(result_dev, a_dev, b_dev, n);
// Step 4: Retrieve the results
cudaMemcpy(result, result_dev, sizeof(int) * n, cudaMemcpyDeviceToHost);
// Step 5: Free device memory
cudaFree(a_dev);
cudaFree(b_dev);
cudaFree(result_dev);
}
void print_vector(int *array, int n)
{
int i;
for (i=0; i<n; i++)
printf("%d ", array[i]);
printf("\n");
}
// This function print out the different in time
void print_time(timeval start, timeval end)
{
printf("Time = %ld us\n", ((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec )));
}
// loop over size
int vector_add_size(int size)
{
int n = size; // Length of the arrays
int rand = n % 7;
// int a[] = {0, 1, 2, 3, 4};
// int b[] = {5, 6, 7, 8, 9};
// int host_result[5];
// int device_result[5];
int *a = (int *) malloc(n * sizeof(int));
int *b = (int *) malloc(n * sizeof(int));
int *host_result = (int *) malloc(n * sizeof(int));
int *device_result = (int *) malloc(n * sizeof(int));
// verify malloc
if (!(a && b && host_result && device_result))
{
printf("out of memory\n");
return(-1);
}
struct timeval start, end;
// create variable size matrix
for (int i = rand; i < n; ++i)
{
a[i] = i;
b[i] = SIZE + i;
}
/*
int deviceCount;
int device;
// show cuda capability
cudaGetDeviceCount(&deviceCount);
for (device = 0; device < deviceCount; ++device) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
printf("Device %d has compute capability %d.%d.\n",
device, deviceProp.major, deviceProp.minor);
}
*/
// print answers:
printf("The CPU's answer: ");
gettimeofday(&start, NULL);
add_vectors_host(host_result, a, b, n);
gettimeofday(&end, NULL);
// print_vector(host_result, n);
print_time(start, end);
printf("The GPU's answer: ");
gettimeofday(&start, NULL);
add_vectors_dev(device_result, a, b, n);
gettimeofday(&end, NULL);
// print_vector(device_result, n);
print_time(start, end);
// free memory
free(a);
free(b);
free(host_result);
free(device_result);
return 0;
}
// main function
int main(void)
{
int size = SIZE;
int min = size;
int max = 30000000;
int inc = 50000;
// for (int i = size; i < MAX; i*=size)
for (int i = min; i <= max; i+=inc)
{
printf("\nsize = %d\n", i);
vector_add_size(i);
}
return 0;
}
|
2,447 | /**
* MIT License
*
* Copyright (c) 2017 Karan Vivek Bhargava
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without
* limitation the rights to use, copy, modify, merge, publish, distribute,
* sublicense, and/or sell copies of the Software, and to permit persons to
* whom the Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* @file addImages.cu
* @author Karan Vivek Bhargava
* @copyright MIT License
*
* @brief CUDA Programming Boilerplate Code
*
* @section DESCRIPTION
*
* This program will open two images 'start' and 'end' inside the images folder and
* add them up on the GPU.
*
*/
#pragma once
// Global specifier indicates that the code is for the device side / GPU side
__global__
/// @brief addImages takes in the x and y pointers and sums the data up (averaging each pixel from x and y)
/// It changes the x memory to store the result.
/// @param n The total memory size
/// @param x The pointer to an array of image data
/// @param y The pointer to another array of image data
void addImages(int n, unsigned char *x, unsigned char *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n)
{
x[i] = (x[i]/2) + (y[i]/2);
}
} |
2,448 | #include <cufft.h>
#include <fstream>
int main(){
// Initializing variables
int n = 1024;
cufftHandle plan1d;
double2 *h_a, *d_a, *h_b;
std::ofstream time_out("time_out.dat"), freq_out("freq_out.dat");
// Allocations / definition
h_a = (double2 *)malloc(sizeof(double2)*n);
h_b = (double2 *)malloc(sizeof(double2)*n);
for (int i = 0; i < n; ++i){
h_a[i].x = sin(20*2*M_PI*i/n);
h_a[i].y = 0;
}
cudaMalloc(&d_a, sizeof(double2)*n);
cudaMemcpy(d_a, h_a, sizeof(double2)*n, cudaMemcpyHostToDevice);
cufftPlan1d(&plan1d, n, CUFFT_Z2Z, 1);
// FFT
cufftExecZ2Z(plan1d, d_a, d_a, CUFFT_FORWARD);
// Copying back
cudaMemcpy(h_b, d_a, sizeof(double2)*n, cudaMemcpyDeviceToHost);
for (int i = 0; i < n; ++i){
time_out << h_a[i].x << '\n';
freq_out << sqrt(h_b[i].x*h_b[i].x + h_b[i].y*h_b[i].y) << '\n';
}
time_out.close();
freq_out.close();
}
|
2,449 | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void checkIndex(void) {
printf("threadIdx: (%d, %d, %d) \n"
"blockIdx: (%d, %d, %d) \n"
"blockDim: (%d, %d, %d) \n"
"gridDim: (%d, %d, %d) \n",
threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z,
blockDim.x, blockDim.y, blockDim.z,
gridDim.x, gridDim.y, gridDim.z);
}
int main(int argc, char **argv){
const int nElem = 12;
dim3 block(4);
dim3 grid( (nElem + block.x - 1)/block.x );
// check the grid and block sizes on the host
printf("Block: (%d, %d, %d) \n"
"Grid: (%d, %d, %d) \n",
block.x, block.y, block.z, grid.x, grid.y, grid.z);
// check the grid and block sizes on the device
checkIndex <<< grid, block >>>();
// done
cudaDeviceReset();
return 0;
}
|
2,450 | #include "includes.h"
__global__ void reduceSmemUnroll(int *g_idata, int *g_odata, unsigned int n)
{
// static shared memory
__shared__ int smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
// global index, 4 blocks of input data processed at a time
unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
// unrolling 4 blocks
int tmpSum = 0;
// boundary check
if (idx + 4 * blockDim.x <= n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
tmpSum = a1 + a2 + a3 + a4;
}
smem[tid] = tmpSum;
__syncthreads();
// in-place reduction in shared memory
if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
} |
2,451 | #include "includes.h"
__global__ void update_presynaptic_activities_C_kernel (float* d_recent_presynaptic_activities_C, float* d_time_of_last_spike_to_reach_synapse, float timestep, float current_time_in_seconds, float synaptic_neurotransmitter_concentration_alpha_C, float decay_term_tau_C, int* d_plastic_synapse_indices, size_t total_number_of_plastic_synapses) {
int indx = threadIdx.x + blockIdx.x * blockDim.x;
while (indx < total_number_of_plastic_synapses) {
int idx = d_plastic_synapse_indices[indx];
float recent_presynaptic_activity_C = d_recent_presynaptic_activities_C[idx];
float new_recent_presynaptic_activity_C = (1 - (timestep/decay_term_tau_C)) * recent_presynaptic_activity_C;
if (d_time_of_last_spike_to_reach_synapse[idx] == current_time_in_seconds) {
new_recent_presynaptic_activity_C += timestep * synaptic_neurotransmitter_concentration_alpha_C * (1 - recent_presynaptic_activity_C);
}
if (recent_presynaptic_activity_C != new_recent_presynaptic_activity_C) {
d_recent_presynaptic_activities_C[idx] = new_recent_presynaptic_activity_C;
}
indx += blockDim.x * gridDim.x;
}
} |
2,452 | //#include "particle.cuh"
//
//
//// constants
//const unsigned int g_window_width = 512;
//const unsigned int g_window_height = 512;
//
//const unsigned int g_mesh_width = 256;
//const unsigned int g_mesh_height = 256;
//
//int vectorCount;
//GLuint vbo;
//struct cudaGraphicsResource *vbo_cuda;
//
////method declaration
//void display(void);
//void mouse(int button, int state, int x, int y);
//void motion(int x, int y);
//void keyboard(unsigned char key, int, int);
//
//__global__ void square_coordinate(float2 *vectors);
//__global__ void dummyCoordinate(float2* vectors);
//
//class Dummy{
//public:
// float2* pos;
// int size;
// Dummy(float2* input, int input2){
// pos = input;
// size = input2;
// }
//
// __device__
// void move(){
// for (int i = 0; i < size; i++)
// {
// pos[i].x += 0.01;
// }
// }
//
//private:
//};
//
//Dummy *d;
//Dummy *d_g;
//ParticleController *cuda_particles;
//float2 *changes;
//
//__global__
//void justForTest(ParticleController* device, int count, float2* result){
// for (int i = 0; i < count; i++){
// result[i] = make_float2(device->particles[i].pos.x, device->particles[i].pos.y);
// }
//}
//
////Actual program------------------------------------------------------------------------------------
//int wamain(int argc, char** argv)
//{
// // Create GL context
// glutInit(&argc, argv);
//
// glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
// glutInitWindowSize(g_window_width, g_window_height);
// glutCreateWindow("interop");
//
// GLenum glewInitResult = glewInit();
// if (glewInitResult != GLEW_OK) {
// throw std::runtime_error("Couldn't initialize GLEW");
// }
//
//
// // initialize GL
// glClearColor(0.0, 0.0, 0.0, 1.0);
// glDisable(GL_DEPTH_TEST);
//
// // viewport
// glViewport(0, 0, g_window_width, g_window_height);
//
// // projection
// glMatrixMode(GL_PROJECTION);
// glLoadIdentity();
// glOrtho(0, g_window_width, g_window_height, 0, -10, 10);
//
// cudaGLSetGLDevice(0);
//
//
// // register callbacks
// glutDisplayFunc(display);
// glutKeyboardFunc(keyboard);
//
// //initialize particles
// ParticleController particles;
// particles.initializeGrid(g_window_width, g_window_height);
// particles.addParticles();
// particles.scale = 1.0f;
// int particle_count = 300;
//
// cout << particles.particles[200].pos.x;
// cout << particles.print(100);
//
// for (int iii = 0; iii < 100; iii++){
// particles.update();
// }
//
//
// Node* g_grid;
// bool* g_active;
// Particle* g_particles;
// Material* g_material;
// Material* g_particles_material;
//
// cudaMalloc((void**)&cuda_particles, sizeof(ParticleController));
// cudaMemcpy(cuda_particles, &particles, sizeof(ParticleController), cudaMemcpyHostToDevice);
//
// cudaMalloc((void**)&g_particles, sizeof(Particle)*particle_count);
// cudaMemcpy(g_particles, particles.particles, sizeof(Particle)*particle_count, cudaMemcpyHostToDevice);
// cudaMemcpy(&(cuda_particles->particles), &(g_particles), sizeof(Particle*), cudaMemcpyHostToDevice);
//
// cudaMalloc((void**)&g_material, sizeof(Material)*particle_count);
// cudaMemcpy(g_material, particles.P_Materials, sizeof(Material)*particle_count, cudaMemcpyHostToDevice);
// cudaMemcpy(&(cuda_particles->materials), &(g_material), sizeof(Material*), cudaMemcpyHostToDevice);
//
// cudaMalloc((void**)&g_grid, sizeof(Node)*g_window_width*g_window_height);
// cudaMemcpy(g_grid, particles.grid, sizeof(Node)*g_window_width*g_window_height, cudaMemcpyHostToDevice);
// cudaMemcpy(&(cuda_particles->grid), &(g_grid), sizeof(Node*), cudaMemcpyHostToDevice);
//
//
//
// // create vbo
// vectorCount = particle_count;
// unsigned int size = vectorCount * sizeof(float2);
// glGenBuffers(1, &vbo);
//
// // bind, initialize, unbind
// glBindBuffer(GL_ARRAY_BUFFER, vbo);
// glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW);
// glBindBuffer(GL_ARRAY_BUFFER, 0);
//
// // register buffer object with CUDA
// cudaGraphicsGLRegisterBuffer(&vbo_cuda, vbo, cudaGraphicsMapFlagsWriteDiscard);
//
// glutMainLoop();
//
// cudaFree(cuda_particles);
// cudaFree(g_active);
// cudaFree(g_grid);
// cudaFree(g_material);
// cudaFree(g_particles);
//
// return 0;
//}
//
//__global__
//void changeVectorParticles(float2* vectors, ParticleController* d_g, int count){
// d_g->cuda_update();
// Particle* changes = d_g->particles;
// for (int i = 0; i < count; i++){
// //vectors[i] = make_float2(changes[i].pos.x, changes[i].pos.y*(0.3));
// vectors[i] = make_float2(50.0f*i, 50.0*i);
// }
//}
//
//void display(void)
//{
// float2 *raw_ptr;
// size_t buf_size;
//
// cudaGraphicsMapResources(1, &vbo_cuda, 0);
// cudaGraphicsResourceGetMappedPointer((void **)&raw_ptr, &buf_size, vbo_cuda);
//
// changeVectorParticles << <1, 1 >> >(raw_ptr, cuda_particles, vectorCount);
//
// cudaGraphicsUnmapResources(1, &vbo_cuda, 0);
//
// glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
//
// // render from the vbo
// glBindBuffer(GL_ARRAY_BUFFER, vbo);
// glVertexPointer(2, GL_FLOAT, 0, 0);
//
// glEnableClientState(GL_VERTEX_ARRAY);
// glColor3f(1.0, 0.0, 0.0);
// glDrawArrays(GL_POINTS, 0, vectorCount);
// glDisableClientState(GL_VERTEX_ARRAY);
//
// glBindBuffer(GL_ARRAY_BUFFER, 0);
//
// glutSwapBuffers();
// glutPostRedisplay();
//}
//
//
//void keyboard(unsigned char key, int, int)
//{
// switch (key)
// {
// case(27) :
// // deallocate memory
// //g_vec.clear();
// //g_vec.shrink_to_fit();
// exit(0);
// default:
// break;
// }
//} |
2,453 | #include <cstdlib>
#include <cassert>
#include <iostream>
// __global__ indicates it will called from the host and run on the device
// __device__ is for device/device and __host__ for host/host
__global__ void vectorAdd (float*a, float* b, float* c, int N)
{
// get the global thread ID
int TID = blockIdx.x * blockDim.x + threadIdx.x;
// put a predication to check whether the element for that thread
// exists in the array
if (TID < N)
{
c[TID] = a[TID] + b[TID];
}
}
int main ()
{
int N = 1 << 20;
size_t bytes = N * sizeof(float);
// Allocate memory on the host size
float *a, *b, *c;
// using CUDA unified memory - we do not need to do memcpy to/from the GPU
// this can be accessed from the host and device
cudaMallocManaged(&a, bytes);
cudaMallocManaged(&b, bytes);
cudaMallocManaged(&c, bytes);
for (int i = 0; i < N; i++)
{
a[i] = rand() % 100;
b[i] = rand() % 100;
}
// number of thread blocks and threads per block
const int THREADS = 256;
const int BLOCKS = (N + THREADS - 1)/THREADS;
void* args[4] = {&a, &b, &c, &N};
cudaLaunchKernel((const void*) &vectorAdd, BLOCKS, THREADS, (void**) &args);
cudaDeviceSynchronize();
for (int i = 0; i < N; i++)
{
assert(c[i] == a[i] + b[i]);
}
std::cout << "Program completed!" << std::endl;
return 0;
} |
2,454 | #include <stdio.h>
#include <time.h>
#define PerThread 1024*4*8//每个线程计算多少个i
#define N 64*256*1024*4//积分计算PI总共划分为这么多项相加
#define BlockNum 32 //block的数量
#define ThreadNum 64 //每个block中threads的数量
__global__ void Gpu_calPI(double* Gpu_list)
{
__shared__ double cache[ThreadNum];//每个block共享一个shared memory.
int cacheIdx=threadIdx.x;
int tid=blockIdx.x*blockDim.x*blockDim.y+threadIdx.x;
int begin=tid*PerThread;
int end=begin+PerThread-1;
double temp=0;
for(int i=begin;i<end;i++){
temp+=4.0/(1+((i+0.5)/(N))*((i+0.5)/(N)));
}
cache[cacheIdx]=temp;
__syncthreads();//同步
int i=blockDim.x/2;
while(i!=0){
if(cacheIdx<i) cache[cacheIdx]+=cache[cacheIdx+i];
__syncthreads();//同步
i=i/2;
}
if(cacheIdx==0){
Gpu_list[blockIdx.x]=cache[0];
}
}
int main(void)
{
double * cpu_list;
double * Gpu_list;
double outcome=0;
cpu_list=(double*)malloc(sizeof(double)*BlockNum);
cudaMalloc((void**)&Gpu_list,sizeof(double)*BlockNum);
// dim3 blocksize=dim3(1,ThreadNum);
// dim3 gridsize=dim3(1,BlockNum);
double begin = clock();
Gpu_calPI<<<BlockNum,ThreadNum>>>(Gpu_list);
cudaMemcpy(cpu_list,Gpu_list,sizeof(double)*BlockNum,cudaMemcpyDeviceToHost);
for(int i=0;i<BlockNum;i++){
outcome+=cpu_list[i];
}
outcome=outcome/(N);
double end=clock();
printf("Scu1: N=%d, outcome=%.10f,time spend %.10f\n",N,outcome,(end-begin)/(CLOCKS_PER_SEC));
// printf("block x=%d,y=%d\n",blocksize.x,blocksize.y);
// printf("grid x=%d,y=%d\n",gridsize.x,gridsize.y);
} |
2,455 | #include "includes.h"
__global__ void normalization(int *glcm,float *norm,int Max,int sum){
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * Max + ix;
__syncthreads();
if(idx<(Max+1)*(Max+1)){
norm[idx]=float(glcm[idx])/float(sum);
}
} |
2,456 | #include <stdio.h>
int main(int argc, char const *argv[]) {
int dev_count;
cudaGetDeviceCount(&dev_count);
printf("There are %d cuda Devices\n", dev_count);
cudaDeviceProp dev_prop;
for (int i = 0; i < dev_count; i++)
{
cudaGetDeviceProperties(&dev_prop, i);
printf("Device %d: \n", i);
printf("Maximum number of threads per block: %d \n", dev_prop.maxThreadsPerBlock);
printf("Number of SMs in the device: %d \n", dev_prop.multiProcessorCount);
printf("Clockrate: %d \n", dev_prop.clockRate);
printf("Maximum threads in x = %d, y = %d and z = %d \n",
dev_prop.maxThreadsDim[0], dev_prop.maxThreadsDim[1],
dev_prop.maxThreadsDim[2]);
printf("Maximum size of grid in x = %d, y = %d, z = %d \n",
dev_prop.maxGridSize[0], dev_prop.maxGridSize[1],
dev_prop.maxGridSize[2]);
printf("\n");
}
return 0;
}
|
2,457 | #include <iostream>
#include "cuda.h"
#include "cuda_runtime.h"
#include "cuda_runtime_api.h"
#include "device_launch_parameters.h"
namespace kernel
{
__global__ void measure_global_bandwidth_kb(int *out, int *device, int size)
{
int r=0;
for(int i=0; i<size; ++i)
{
r+=device[i];
}
*out=r;
}
}
auto measure_host_device_bandwidth_mb(const int n, const bool enable_sync)
{
const int bytes=n*(1<<20);
int *host, *device;
cudaMallocHost((void **)&host, bytes);
cudaMalloc((void **)&device, bytes);
cudaEvent_t hd_start, hd_stop, dh_start, dh_stop;
cudaEventCreate(&hd_start);
cudaEventCreate(&hd_stop);
cudaEventCreate(&dh_start);
cudaEventCreate(&dh_stop);
float hd_time, dh_time;
if(enable_sync)
{
cudaEventRecord(hd_start);
cudaMemcpy(device, host, bytes, cudaMemcpyHostToDevice);
cudaEventRecord(hd_stop);
cudaEventSynchronize(hd_stop);
cudaEventRecord(dh_start);
cudaMemcpy(host, device, bytes, cudaMemcpyDeviceToHost);
cudaEventRecord(dh_stop);
cudaEventSynchronize(dh_stop);
}
else
{
cudaEventRecord(hd_start);
cudaMemcpyAsync(device, host, bytes, cudaMemcpyHostToDevice);
cudaEventRecord(hd_stop);
cudaEventSynchronize(hd_stop);
cudaEventRecord(dh_start);
cudaMemcpyAsync(host, device, bytes, cudaMemcpyDeviceToHost);
cudaEventRecord(dh_stop);
cudaEventSynchronize(dh_stop);
}
cudaEventElapsedTime(&hd_time, hd_start, hd_stop);
cudaEventElapsedTime(&dh_time, dh_start, dh_stop);
cudaFreeHost(host);
cudaFree(device);
cudaEventDestroy(hd_start);
cudaEventDestroy(hd_stop);
cudaEventDestroy(dh_start);
cudaEventDestroy(dh_stop);
return std::make_pair(hd_time, dh_time);
}
void measure_host_device_bandwidth(const bool enable_sync)
{
const int repeat=10;
std::cout<<"host <-> device "<<(enable_sync ? "sync" : "aysnc")<<std::endl;
std::cout<<"data size[MB], host to device[ms], device to host[ms]"<<std::endl;
for(int n=32; n<=256; n+=32)
{
float hd_sum=0, dh_sum=0;
for(int i=0; i<repeat; ++i)
{
const auto time=measure_host_device_bandwidth_mb(n, enable_sync);
hd_sum+=time.first;
dh_sum+=time.second;
}
std::cout<<n<<", "<<hd_sum/repeat<<", "<<dh_sum/repeat<<std::endl;
}
std::cout<<"--\n"<<std::endl;
}
auto measure_global_bandwidth_kb(const int n)
{
const int bytes=n*(1<<10)/2;
int *out, *device;
cudaMalloc((void **)&out, sizeof(int));
cudaMalloc((void **)&device, bytes);
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 grid(1);
dim3 threads(1);
cudaEventRecord(start);
kernel::measure_global_bandwidth_kb<<<grid, threads>>>(out, device, bytes/sizeof(int));
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaFree(out);
cudaFree(device);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return time;
}
void measure_global_bandwidth()
{
const int repeat=10;
std::cout<<"global memory"<<std::endl;
std::cout<<"data size[KB], time[ms]"<<std::endl;
for(int n=32; n<=256; n+=32)
{
float sum=0;
for(int i=0; i<repeat; ++i)
{
sum+=measure_global_bandwidth_kb(n);
}
std::cout<<n<<", "<<sum/repeat<<std::endl;
}
std::cout<<"--\n"<<std::endl;
}
int main()
{
measure_host_device_bandwidth(true);
measure_host_device_bandwidth(false);
measure_global_bandwidth();
return 0;
}
|
2,458 | #include <stdio.h>
#include <cuda.h>
#define N 1024
#define BLOCK_SIZE 32
__global__ void mm(double *a, double *b, double *c)
{
/* ----- YOUR CODE HERE ----- */
/* -------------------------- */
}
int main () {
double *a, *b, *c;
double *d_a, *d_b, *d_c;
double size = sizeof(double) * N*N;
dim3 grid(1, 1);
dim3 block(N, N);
a = (double *) malloc (size);
b = (double *) malloc (size);
c = (double *) malloc (size);
cudaMalloc ((void**)&d_a, size);
cudaMalloc ((void**)&d_b, size);
cudaMalloc ((void**)&d_c, size);
for( int i = 0; i < N*N; i++ )
{
a[i] = (double) ( rand() ) / ( RAND_MAX + 1.0 );
b[i] = (double) ( rand() ) / ( RAND_MAX + 1.0 );
c[i] = 0;
}
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, size, cudaMemcpyHostToDevice);
for (int i=1; i<100; i++) {
mm <<<grid, block>>> (d_a, d_b, d_c);
}
cudaDeviceSynchronize();
cudaMemcpy (c, d_c, size, cudaMemcpyDeviceToHost);
//for( int i=0; i < N*N; i++ )
//{
// printf("%f\t%f\t%f\n", a[i], b[i], c[i]);
//}
printf("%f\n", c[N * N/2]);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(a);
free(b);
free(c);
return 0;
}
|
2,459 | //
// 【normalize_vector】
//
// 概要: ベクトルの正規化関数サンプル
// 参考:
// CUDA for Engineers: An Introduction to High-Performance Parallel Computing
//
#include <thrust/device_vector.h>
#include <thrust/inner_product.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <cmath>
#include <iostream>
using namespace std;
// normalize vector
template <typename T>
void normalize(thrust::device_vector<T> &v){
double norm = sqrt(thrust::inner_product(v.begin(), v.end(), v.begin(), 0.0));
using namespace thrust::placeholders;
thrust::transform(v.begin(), v.end(), v.begin(), _1 /= norm);
}
int main(){
thrust::device_vector<double> vec(2);
vec[0] = 1.0;
vec[1] = 2.0;
// ベクトルの正規化
normalize<double>(vec);
// 確認
for(int i = 0; i < vec.size(); i++){
cout << "vec[" << i << "] = " << vec[i] << endl;
}
}
|
2,460 | #include <iostream>
#include <math.h>
// Kernel function to generate random numbers
__global__
void genran(int *rnd,double m)
{
double n,a=1103515245, c=12345;
n=blockIdx.x*blockDim.x+threadIdx.x;
//n=threadIdx.x;
for(int i=0;i<threadIdx.x;i++)
n=fmod(((n*a)+c),m);
__syncthreads();
atomicAdd(&rnd[(unsigned long int)n],1);
}
int main(void)
{
int t=29;
long int m = pow(2,t);
int *rnd;
double val;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&rnd, m*sizeof(int));
// initialize
val = m;
for (int i = 0; i < m; i++) {
rnd[i] = 0;
}
//generate random numbers
int blockSize = 128;
int numblocks = (m+blockSize-1)/blockSize;
// Run kernel
genran<<<numblocks, blockSize>>>(rnd,val);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
//Generate Histrogram
long double count =0,j=0;
for(long int i=0;i<m;++i)
{
count+=rnd[i];
j++;
if(j==pow(2,t-5))
{
j=0;
printf("|");
count/=pow(2,t-10);
for(int k=0;k<count;++k)
printf("*");
printf("\n");
count=0;
}
}
// Free memory
cudaFree(rnd);
return 0;
}
|
2,461 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
typedef unsigned long long ul;
typedef unsigned int uint;
int banyakdata = 256;
int dimensigrid = 2;
int dimensiblok = 128;
typedef struct {
char size;
uint* value;
}big;
typedef struct {
short size;
char* value;
}stringnumber;
__host__ __device__ short ukuranbit(big *a);
__host__ __device__ char getbit(big* a, short count);
__host__ __device__ uint getShiftedBlock(big *num, char noblok, char geser);
__host__ __device__ void kali(big *a, big *b, big* res);
__host__ __device__ void modulo(big* a, big* b, big* res, uint* minbuff);
__host__ __device__ void modexp(big* a, big* b, big* c, big* res, uint* minbuff, big* mulbuff);
void enkripsi(big *m, big *k, big *g, big *p, big *y, big *res, big *minbuff, big *mulbuff);
void dekripsi(big *c1, big *c2, big *e, big *p, big *res, big *minbuff, big *mulbuff);
void kernelenk(big *m, big *k, big *g, big *p, big *y, big *res, big *minbuff, big *mulbuff);
void kerneldek(big *c, big *e, big *p, big *res, big *minbuff, big *mulbuff);
void CUDAenk(big *m, big *k, big* g, big* p, big* y, big *res);
void CUDAdek(big *c, big *e, big* p, big *res);
void mainenkripsi(big *m, big *k, big *res, big *g, big *p, big *y);
void maindekripsi(big* c,big* x,big* p,big* res2);
void tambah(big* a, char b, big* res);
void kurang(big* a, big *b, big* res);
void divandmod(big* a, big* &b, big* divres, big* modres, uint* minbuff);
void carikunciy(big *g, big *x, big *p, big *y, uint *minbuff, big *mulbuff);
void init(big *p, big *g, big *x, big*e, big *y, big *m, big *k, big *res, big *res2);
void copybig(big* a, big* res);
void stringtobig(stringnumber* sn, big* res, big* mulbuff, big* ten);
void bigtostring(big* x, stringnumber* sn, big* ten, big* xbuff, big* divbuff, big* modbuff, uint* minbuff);
void printsn(stringnumber* sn);
void teskonversi();
void enkripsi(big *m, big *k, big *g, big *p, big *y, big *res, big *minbuff, big *mulbuff) {
// BLok 1 Cipher
modexp(g,k,p,res,minbuff->value,mulbuff);
// Blok 2 Cipher
modexp(y, k, p, res + 1,minbuff->value,mulbuff);
kali(res + 1, m, mulbuff);
modulo(mulbuff, p, res+1, minbuff->value);
}
void dekripsi(big *c1, big *c2, big *e, big *p, big *res, big *minbuff, big *mulbuff) {
modexp(c1,e,p,res,minbuff->value,mulbuff);
kali(res, c2, mulbuff);
modulo(mulbuff, p, res, minbuff->value);
}
void kernelenk(big *m, big *k, big *g, big *p, big *y, big *res, big *minbuff, big *mulbuff){
for (int i = 0; i < banyakdata; i++)
{
enkripsi(m + i, k + i, g, p, y, res + 2 * i, minbuff+i, mulbuff+i);
}
}
void kerneldek(big *c, big *e, big *p, big *res, big *minbuff, big *mulbuff){
for (int i = 0; i < banyakdata; i++)
{
dekripsi(c + 2*i, c + 2*i+1, e, p, res+i, minbuff+i, mulbuff+i);
}
}
void CUDAenk(big *m, big *k, big* g, big* p, big* y, big *res) {
big *minbuff, *mulbuff;
minbuff = (big*) malloc(banyakdata * sizeof(big));
mulbuff = (big*) malloc(banyakdata * sizeof(big));
for (int i = 0; i < banyakdata; i++) {
minbuff[i].value = (uint*) malloc(sizeof(uint) * p->size * 2);
mulbuff[i].value = (uint*) malloc(sizeof(uint) * p->size * 2);
}
clock_t begin = clock();
kernelenk(m, k, g, p, y, res, minbuff, mulbuff);
clock_t end = clock();
double time_spent = (double)(end - begin) / 1000;
printf("Durasi : %f ms\n", time_spent);
}
void CUDAdek(big *c, big *e, big* p, big *res) {
big *minbuff, *mulbuff;
minbuff = (big*) malloc(banyakdata * sizeof(big));
mulbuff = (big*) malloc(banyakdata * sizeof(big));
for (int i = 0; i < banyakdata; i++) {
minbuff[i].value = (uint*) malloc(sizeof(uint) * p->size * 2);
mulbuff[i].value = (uint*) malloc(sizeof(uint) * p->size * 2);
}
clock_t begin = clock();
kerneldek(c, e, p, res, minbuff, mulbuff);
clock_t end = clock();
double time_spent = (double)(end - begin) / 1000;
printf("Durasi : %f ms\n", time_spent);
}
void mainenkripsi(big *m, big *k, big *res, big *g, big *p, big *y){
printf("Encrypting...\n");
CUDAenk(m, k, g, p, y, res);
for (int i = 0; i < 5; i++)
{
printf("Cipher %d size %d : %u\n",i, res[i].size, res[i].value[0]);
}
printf("Cipher ... : ...\n");
printf("Cipher %d size %d : %u\n",banyakdata*2-2, res[banyakdata*2-2].size, res[banyakdata*2-2].value[0]);
printf("Cipher %d size %d : %u\n",banyakdata*2-1, res[banyakdata*2-2].size, res[banyakdata*2-1].value[0]);
}
void maindekripsi(big* c, big* e,big* p,big* res2){
printf("Decrypting...\n");
CUDAdek(c, e, p, res2);
for (int i = 0; i < 5; i++)
{
printf("Plain %d size %d : %u\n",i, res2[i].size, res2[i].value[0]);
printf("Plain %d size %d : %u\n",i, res2[i].size, res2[i].value[1]);
}
printf("Plain ... : ...\n");
printf("Plain %d size %d : %u\n",banyakdata-1, res2[banyakdata-1].size, res2[banyakdata-1].value[0]);
}
void carikunciy(big *g, big *x, big *p, big *y, uint *minbuff, big *mulbuff){
modexp(g,x,p,y,minbuff,mulbuff);
}
void init(big *p, big *g, big *x, big*e, big *y, big *m, big *k, big *res, big *res2){
// Kunci publik p
p->size = 16;
p->value = (uint*) malloc(p->size * sizeof(uint));
p->value[0] = UINT_MAX;
for (int i = 1; i < p->size; i++)
{
//p->value[i] = 2357;
p->value[i] = rand() % UINT_MAX;
}
// p->value[0] = UINT_MAX-4;
// p->value[0] = 2387;
// p->value[1] = 2357;
// Kunci publik g
g->size = 16;
g->value = (uint*) malloc(g->size * sizeof(uint));
for (int i = 0; i < g->size; i++)
{
// g->value[i] = 2;
g->value[i] = rand() % UINT_MAX;
}
// Kunci privat x
x->size = 16;
x->value = (uint*) malloc(x->size * sizeof(uint));
for (int i = 0; i < x->size; i++)
{
// x->value[i] = 1751;
x->value[i] = rand() % UINT_MAX;
}
// Cari nilai eksponen e = (p-x-1) untuk dekripsi
big *xplus1 = (big*) malloc(sizeof(big));
xplus1->value = (uint*) malloc(p->size * sizeof(uint));
e->value = (uint*) malloc(p->size * sizeof(uint));
tambah(x, 1, xplus1);
kurang(p,xplus1,e);
// printf("e adalah %u\n", e->value[0]);
free(xplus1->value);
free(xplus1);
// Cari nilai kunci publik y = (g^x) mod p
big* mulbuff = (big*) malloc(sizeof(big));
mulbuff->value = (uint*) malloc(sizeof(uint) * p->size * 2);
uint* minbuff = (uint*) malloc(sizeof(uint) * p->size * 2);
y->value = (uint*) malloc(p->size * 2 * sizeof(uint));
carikunciy(g,x,p,y,minbuff,mulbuff);
// printf("y adalah %u\n",y->value[0]);
//========================================================//
// Blok plainteks
for(int i = 0 ; i < banyakdata ; i++){
m[i].size = 16;
m[i].value = (uint*) malloc(m[i].size * sizeof(uint));
for (int j = 0; j < m[i].size; j++)
{
// m[i].value[j] = 1001;
m[i].value[j] = rand() % UINT_MAX;
}
// Nilai k masing-masing blok
k[i].size = 16;
k[i].value = (uint*) malloc(k[i].size * sizeof(uint));
for (int j = 0; j < k[i].size; j++)
{
// k[i].value[j] = 77;
k[i].value[j] = rand() % UINT_MAX;
}
}
// Alokasi memori untuk result
for (int i = 0; i < banyakdata*2; i++)
{
res[i].value = (uint*) malloc(sizeof(uint) * p->size *2);
}
// Alokasi memori untuk result 2
for (int i = 0; i < banyakdata; i++)
{
res2[i].value = (uint*) malloc(sizeof(uint) * p->size * 2);
}
}
int main(){
big *p, *g, *x, *e, *y, *m, *k, *res, *res2;
p = (big*)malloc(sizeof(big));
g = (big*)malloc(sizeof(big));
x = (big*)malloc(sizeof(big));
e = (big*)malloc(sizeof(big));
y = (big*)malloc(sizeof(big));
m = (big*)malloc(banyakdata * sizeof(big));
k = (big*)malloc(banyakdata * sizeof(big));
res = (big*)malloc(banyakdata * 2 * sizeof(big));
res2 = (big*)malloc(banyakdata * sizeof(big));
init(p,g,x,e,y,m,k,res,res2);
mainenkripsi(m,k,res,g,p,y);
printf(" ========================= \n");
maindekripsi(res,e,p,res2);
free(p->value);
free(p);
free(g->value);
free(g);
free(x->value);
free(x);
free(e->value);
free(e);
free(y->value);
free(y);
free(m->value);
free(m);
free(k->value);
free(k);
free(res->value);
free(res);
free(res2->value);
free(res2);
//teskonversi();
return 0;
}
__host__ __device__ short ukuranbit(big *a) {
uint lastval = a->value[a->size-1];
short res = 0;
while (lastval != 0) {
lastval >>= 1;
res++;
}
return res + (a->size - 1) * 32;
}
__host__ __device__ char getbit(big* a, short count) {
return (a->value[count / 32] & ((uint) 1 << (count % 32))) != 0;
}
__host__ __device__ uint getShiftedBlock(big *num, char noblok, char geser) {
uint part1 = (noblok == 0 || geser == 0) ? 0 : (num->value[noblok - 1] >> (32-geser));
uint part2 = (noblok == num->size) ? 0 : (num->value[noblok] << geser);
return part1 | part2;
}
__host__ __device__ void kali(big *a, big *b, big* res) {
if (a->size == 0 || b->size == 0) {
res->size = 0;
return ;
}
char ukurana = a->size;
char ukuranb = b->size;
char ukuranres = ukurana + ukuranb;
res->size = ukuranres;
for (char i = 0; i < ukuranres; i++) {
res->value[i] = 0;
}
for (char i = 0; i < ukurana; i++) {
uint aval = a->value[i];
if (aval==0){
continue;
}
uint lebih = 0;
for (char j = 0, lebih = 0; j < ukuranb; j++) {
uint bval = b->value[j];
ul temp = res->value[i+j] + aval * bval + lebih;
res->value[i+j] = temp % UINT_MAX;
lebih = temp / UINT_MAX;
}
res->value[i+ukuranb] = lebih;
}
if (res->value[res->size - 1] == 0){
res->size--;
}
}
__host__ __device__ void modexp(big* a, big* b, big* c, big* res, uint* minbuff, big* mulbuff){
res->size = 1;
res->value[0] = 1;
short i = ukuranbit(b);
while (i > 0) {
i--;
kali(res,res,mulbuff);
modulo(mulbuff,c,res,minbuff);
if (getbit(b,i)) {
kali(res, a, mulbuff);
modulo(mulbuff, c, res, minbuff);
}
}
}
__host__ __device__ void modulo(big* a, big* b, big* res, uint* minbuff) {
res->size = a->size;
for(char i = 0 ; i < res->size ;i++){
res->value[i] = a->value[i];
}
if (a->size < b->size) {
return ;
}
char i, j, k;
char i2;
uint temp ;
char borrowIn, borrowOut;
char ukurana = a->size;
char ukuranb = b->size;
res->value[res->size] = 0;
res->size++;
i = ukurana - ukuranb + 1;
while (i > 0) {
i--;
i2 = 32;
while (i2 > 0) {
i2--;
for (j = 0, k = i, borrowIn = 0; j <= ukuranb; j++, k++) {
temp = res->value[k] - getShiftedBlock(b, j, i2);
borrowOut = (temp > res->value[k]);
if (borrowIn) {
borrowOut |= (temp == 0);
temp--;
}
minbuff[k] = temp;
borrowIn = borrowOut;
}
for (; k < ukurana && borrowIn; k++) {
borrowIn = (res->value[k] == 0);
minbuff[k] = res->value[k] - 1;
}
if (!borrowIn) {
while (k > i) {
k--;
res->value[k] = minbuff[k];
}
}
}
}
while (res->size > 0 && res->value[res->size - 1] == 0)
res->size--;
}
void divandmod(big* a, big* &b, big* divres, big* modres, uint* minbuff) {
modres->size = a->size;
for(char i = 0 ; i < modres->size ;i++){
modres->value[i] = a->value[i];
}
if (a->size < b->size) {
return ;
}
char i, j, k;
char i2;
uint temp ;
char borrowIn, borrowOut;
char ukurana = a->size;
char ukuranb = b->size;
modres->value[modres->size] = 0;
modres->size++;
divres->size = ukurana - ukuranb + 1;
for (i = 0; i < divres->size; i++)
divres->value[i] = 0;
i = ukurana - ukuranb + 1;
while (i > 0) {
i--;
divres->value[i] = 0;
i2 = 32;
while (i2 > 0) {
i2--;
for (j = 0, k = i, borrowIn = 0; j <= ukuranb; j++, k++) {
temp = modres->value[k] - getShiftedBlock(b, j, i2);
borrowOut = (temp > modres->value[k]);
if (borrowIn) {
borrowOut |= (temp == 0);
temp--;
}
minbuff[k] = temp;
borrowIn = borrowOut;
}
for (; k < ukurana && borrowIn; k++) {
borrowIn = (modres->value[k] == 0);
minbuff[k] = modres->value[k] - 1;
}
if (!borrowIn) {
divres->value[i] |= ((uint) 1 << i2);
while (k > i) {
k--;
modres->value[k] = minbuff[k];
}
}
}
}
if (divres->value[divres->size - 1] == 0)
divres->size--;
while (modres->size > 0 && modres->value[modres->size - 1] == 0)
modres->size--;
}
void tambah(big* a, char b, big* res) {
if (a->size == 0) {
res->size = 1;
res->value[0] = uint(b);
return;
}
char carryIn = 0;
uint temp;
res->size = a->size + 1;
res->value[0] = a->value[0] + (uint)b;
carryIn = (res->value[0] < a->value[0]);
char i = 1;
for (; i < a->size && carryIn; i++) {
temp = a->value[i] + (uint)1;
carryIn = (temp == 0);
res->value[i] = temp;
}
for (; i < a->size; i++)
res->value[i] = a->value[i];
if (carryIn)
res->value[i] = 1;
else
res->size--;
}
void kurang(big* a, big *b, big* res) {
res->size = a->size;
for (int i = 0; i < res->size; i++){
res->value[i] = 0;
}
if (b->size == 0) {
return;
}
char borrowIn, borrowOut;
uint temp;
char i;
for (i = 0, borrowIn = 0; i < b->size; i++) {
temp = a->value[i] - b->value[i];
borrowOut = (temp > a->value[i]);
if (borrowIn) {
borrowOut |= (temp == 0);
temp--;
}
res->value[i] = temp;
borrowIn = borrowOut;
}
for (; i < a->size && borrowIn; i++) {
borrowIn = (a->value[i] == 0);
res->value[i] = a->value[i] - 1;
}
for (; i < a->size; i++)
res->value[i] = a->value[i];
if (res->value[res->size - 1] == 0){
res->size--;
}
}
void copybig(big* a, big* res){
res->size = a->size;
for (int i = 0; i < res->size; i++){
res->value[i] = a->value[i];
}
}
void stringtobig(stringnumber* sn, big* res, big* mulbuff, big* ten){
res->size = 0;
for (int i = sn->size-1; i >= 0; i--){
kali(res, ten, mulbuff);
tambah(mulbuff, sn->value[i], res);
}
}
void bigtostring(big* x, stringnumber* sn, big* ten, big* xbuff, big* divbuff, big* modbuff, uint* minbuff) {
copybig(x,xbuff);
short snlength = 0;
while (xbuff->size != 0 ) {
divandmod(xbuff,ten,divbuff,modbuff,minbuff);
sn->value[snlength] = (char) modbuff->value[0];
snlength++;
copybig(divbuff,xbuff);
}
sn->size = snlength;
}
void printsn(stringnumber* sn){
for (int i = 0; i < sn->size; ++i){
printf("%d", sn->value[sn->size-i-1]);
}
printf("\n");
}
void teskonversi(){
int seed = time(NULL);
srand(seed);
stringnumber *sn = (stringnumber*) malloc(sizeof(stringnumber));
sn->size = 25;
sn->value = (char *) malloc(sn->size);
for (int i = 0; i < sn->size; i++)
{
sn->value[i] = rand() % 10;
}
big* konversi = (big*) malloc(sizeof(big));
big* mulbuff = (big*) malloc(sizeof(big));
big* ten = (big*) malloc(sizeof(big));
konversi->value = (uint*) malloc(sizeof(10));
mulbuff->value = (uint*) malloc(sizeof(10));
ten->value = (uint*) malloc(sizeof(1));
ten->size = 1;
ten->value[0] = 10;
printf("Stringnumber awal : ");
printsn(sn);
stringtobig(sn, konversi, mulbuff, ten);
printf("konversi size %d\n", konversi->size);
printf("konversi value 0 %u\n", konversi->value[0]);
printf("konversi value 0 %u\n", konversi->value[1]);
stringnumber *sn2 = (stringnumber*) malloc(sizeof(stringnumber));
big* xbuff = (big*) malloc(sizeof(big));
big* divbuff = (big*) malloc(sizeof(big));
big* modbuff = (big*) malloc(sizeof(big));
sn2->value = (char *) malloc(100);
xbuff->value = (uint *) malloc(sizeof(uint) * 10);
divbuff->value = (uint *) malloc(sizeof(uint) * 10);
modbuff->value = (uint *) malloc(sizeof(uint) * 10);
uint* minbuff = (uint*) malloc(sizeof(uint) * 10);
bigtostring(konversi,sn2,ten,xbuff,divbuff,modbuff,minbuff);
printf("Stringnumber akhir : ");
printsn(sn2);
}
|
2,462 | /* CUDA FFT Library */
/* written by Viktor K. Decyk, UCLA */
#include <stdlib.h>
#include <stdio.h>
#include "cuda.h"
#include <cufft.h>
extern int nblock_size;
extern int maxgsx;
static cudaError_t crc;
static cufftResult cfrc;
static cufftHandle planrx, planxr, planrxn, planxrn;
static cufftHandle plany, planyn;
__global__ void gpuctpose4(float2 f[], float2 g[], int nx, int ny,
int nxv, int nyv);
__global__ void gpuctpose4n(float2 fn[], float2 gn[], int nx, int ny,
int ndim, int nxv, int nyv);
/*--------------------------------------------------------------------*/
__global__ void gpusctpose4(float2 f[], float2 g[], float ani, int nx,
int ny, int nxv, int nyv) {
/* scaled complex transpose using blocking algorithm with gaps */
/* local data */
int j, k, js, ks, joff, koff, mx, mxv;
float2 a;
/* The size of the shared memory array is as follows: */
/* float2 shm2[(mx + 1)*mx]; */
extern __shared__ float2 shm2[];
mx = blockDim.x;
mxv = mx + 1;
joff = mx*blockIdx.x;
koff = mx*blockIdx.y;
js = threadIdx.x;
ks = threadIdx.y;
/* copy into block */
j = js + joff;
k = ks + koff;
if ((j < nx) && (k < ny)) {
shm2[js+mxv*ks] = f[j+nxv*k];
}
__syncthreads();
/* copy out from block with scaling */
j = ks + joff;
k = js + koff;
if ((j < nx) && (k < ny)) {
a = shm2[ks+mxv*js];
a.x = ani*a.x;
a.y = ani*a.y;
g[k+nyv*j] = a;
}
return;
}
/*--------------------------------------------------------------------*/
__global__ void gpusctpose4n(float2 fn[], float2 gn[], float ani,
int nx, int ny, int ndim, int nxv,
int nyv) {
/* scaled complex vector transpose using blocking algorithm with gaps */
/* ndim = vector dimension */
/* local data */
int i, j, k, js, ks, joff, koff, mx, mxv, nmxv, nnxv, nnyv, jj, kk;
float2 a;
/* The size of the shared memory array is as follows: */
/* float2 shmn2[ndim*(mx + 1)*mx]; */
extern __shared__ float2 shmn2[];
mx = blockDim.x;
mxv = mx + 1;
joff = mx*blockIdx.x;
koff = mx*blockIdx.y;
js = threadIdx.x;
ks = threadIdx.y;
nmxv = ndim*mxv;
nnxv = ndim*nxv;
nnyv = ndim*nyv;
/* copy into block */
j = js + joff;
k = ks + koff;
if ((j < nx) && (k < ny)) {
jj = j + nnxv*k;
kk = js + nmxv*ks;
for (i = 0; i < ndim; i++) {
shmn2[kk+mxv*i] = fn[jj+nxv*i];
}
}
__syncthreads();
/* copy out from block with scaling */
j = ks + joff;
k = js + koff;
if ((j < nx) && (k < ny)) {
kk = k + nnyv*j;
jj = ks + nmxv*js;
for (i = 0; i < ndim; i++) {
a = shmn2[jj+mxv*i];
a.x = ani*a.x;
a.y = ani*a.y;
gn[kk+nyv*i] = a;
}
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpufft2rrcuinit(int nx, int ny, int ndim) {
cfrc = cufftPlan1d(&planrx,nx,CUFFT_R2C,ny);
if (cfrc) {
printf("cufftPlan1d planrx error=%d\n",cfrc);
exit(1);
}
cfrc = cufftPlan1d(&planxr,nx,CUFFT_C2R,ny);
if (cfrc) {
printf("cufftPlan1d planxr error=%d\n",cfrc);
exit(1);
}
cfrc = cufftPlan1d(&planrxn,nx,CUFFT_R2C,ndim*ny);
if (cfrc) {
printf("cufftPlan1d planrxn error=%d\n",cfrc);
exit(1);
}
cfrc = cufftPlan1d(&planxrn,nx,CUFFT_C2R,ndim*ny);
if (cfrc) {
printf("cufftPlan1d planxrn error=%d\n",cfrc);
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpufft2cuinit(int nx, int ny, int ndim) {
int nxh1;
nxh1 = nx/2 + 1;
cfrc = cufftPlan1d(&plany,ny,CUFFT_C2C,nxh1);
if (cfrc) {
printf("cufftPlan1d plany error=%d\n",cfrc);
exit(1);
}
cfrc = cufftPlan1d(&planyn,ny,CUFFT_C2C,ndim*nxh1);
if (cfrc) {
printf("cufftPlan1d planyn error=%d\n",cfrc);
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpufft2rrcudel() {
cfrc = cufftDestroy(planrx);
if (cfrc) {
printf("cufftDestroy planrx error=%d\n",cfrc);
exit(1);
}
cfrc = cufftDestroy(planxr);
if (cfrc) {
printf("cufftDestroy planxr error=%d\n",cfrc);
exit(1);
}
cfrc = cufftDestroy(planrxn);
if (cfrc) {
printf("cufftDestroy planrxn error=%d\n",cfrc);
exit(1);
}
cfrc = cufftDestroy(planxrn);
if (cfrc) {
printf("cufftDestroy planxrn error=%d\n",cfrc);
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpufft2cudel() {
cfrc = cufftDestroy(plany);
if (cfrc) {
printf("cufftDestroy plany error=%d\n",cfrc);
exit(1);
}
cfrc = cufftDestroy(planyn);
if (cfrc) {
printf("cufftDestroy planyn error=%d\n",cfrc);
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpufft2rrcu(float2 f[], float2 g[], int isign,
int indx, int indy, int nxh1d, int nyd) {
/* wrapper function for real to complex fft, without packed data */
/* uses 1D real to complex and complex to complex NVIDIA FFTs */
/* nxh1d must be = nx/2+1 */
/* local data */
int nx, nxh1, ny, ns;
int mx = 16;
float ani;
dim3 dimBlock(nblock_size);
dim3 dimBlockt(mx,mx);
/* calculate range of indices */
nx = 1L<<indx;
nxh1 = nx/2 + 1;
ny = 1L<<indy;
dim3 dimGridtx((nxh1-1)/mx+1,(ny-1)/mx+1);
dim3 dimGridty((ny-1)/mx+1,(nxh1-1)/mx+1);
ns = (mx+1)*mx*sizeof(float2);
/* inverse fourier transform */
if (isign < 0) {
/* perform x fft */
cfrc = cufftExecR2C(planrx,(cufftReal *)f,(cufftComplex *)f);
/* cudaThreadSynchronize(); */
if (cfrc) {
printf("cufftExecR2C(-1) planrx error=%d\n",cfrc);
exit(1);
}
/* transpose f to g and normalize */
ani = 1.0f/(((float) nx)*((float) ny));
crc = cudaGetLastError();
gpusctpose4<<<dimGridtx,dimBlockt,ns>>>(f,g,ani,nxh1,ny,nxh1d,
nyd);
/* cudaThreadSynchronize(); */
crc = cudaGetLastError();
if (crc) {
printf("gpusctpose4 error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
/* perform y fft */
cfrc = cufftExecC2C(plany,(cufftComplex *)g,(cufftComplex *)g,
CUFFT_FORWARD);
cudaThreadSynchronize();
if (cfrc) {
printf("cufftExecC2C(-1) plany error=%d\n",cfrc);
exit(1);
}
}
/* forward fourier transform */
else if (isign > 0) {
/* perform y fft */
cfrc = cufftExecC2C(plany,(cufftComplex *)g,(cufftComplex *)g,
CUFFT_INVERSE);
/* cudaThreadSynchronize(); */
if (cfrc) {
printf("cufftExecC2C(1) plany error=%d\n",cfrc);
exit(1);
}
/* transpose g to f */
crc = cudaGetLastError();
gpuctpose4<<<dimGridty,dimBlockt,ns>>>(g,f,ny,nxh1,nyd,nxh1d);
/* cudaThreadSynchronize(); */
crc = cudaGetLastError();
if (crc) {
printf("gpuctpose4 error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
/* perform x fft */
cfrc = cufftExecC2R(planxr,(cufftComplex *)f,(cufftReal *)f);
cudaThreadSynchronize();
if (cfrc) {
printf("cufftExecC2R(1) planxr error=%d\n",cfrc);
exit(1);
}
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpufft2rrcun(float2 fn[], float2 gn[], int isign,
int indx, int indy, int ndim, int nxh1d,
int nyd) {
/* wrapper function for real to complex fft, without packed data */
/* for vector data */
/* uses 1D real to complex and complex to complex NVIDIA FFTs */
/* ndim = vector dimension */
/* nxh1d must be = nx/2+1 */
/* local data */
int nx, nxh1, ny, ns;
int mx = 16;
float ani;
dim3 dimBlock(nblock_size);
dim3 dimBlockt(mx,mx);
/* calculate range of indices */
nx = 1L<<indx;
nxh1 = nx/2 + 1;
ny = 1L<<indy;
dim3 dimGridtx((nxh1-1)/mx+1,(ny-1)/mx+1);
dim3 dimGridty((ny-1)/mx+1,(nxh1-1)/mx+1);
ns = ndim*(mx+1)*mx*sizeof(float2);
/* inverse fourier transform */
if (isign < 0) {
/* perform x fft */
cfrc = cufftExecR2C(planrxn,(cufftReal *)fn,(cufftComplex *)fn);
/* cudaThreadSynchronize(); */
if (cfrc) {
printf("cufftExecR2C(-1) planrxn error=%d\n",cfrc);
exit(1);
}
/* transpose f to g and normalize */
ani = 1.0f/(((float) nx)*((float) ny));
crc = cudaGetLastError();
gpusctpose4n<<<dimGridtx,dimBlockt,ns>>>(fn,gn,ani,nxh1,ny,ndim,
nxh1d,nyd);
/* cudaThreadSynchronize(); */
crc = cudaGetLastError();
if (crc) {
printf("gpusctpose4n error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
/* perform y fft */
cfrc = cufftExecC2C(planyn,(cufftComplex *)gn,(cufftComplex *)gn,
CUFFT_FORWARD);
cudaThreadSynchronize();
if (cfrc) {
printf("cufftExecC2C(-1) planyn error=%d\n",cfrc);
exit(1);
}
}
/* forward fourier transform */
else if (isign > 0) {
/* perform y fft */
cfrc = cufftExecC2C(planyn,(cufftComplex *)gn,(cufftComplex *)gn,
CUFFT_INVERSE);
/* cudaThreadSynchronize(); */
if (cfrc) {
printf("cufftExecC2C(1) planyn error=%d\n",cfrc);
exit(1);
}
/* transpose g to f */
crc = cudaGetLastError();
gpuctpose4n<<<dimGridty,dimBlockt,ns>>>(gn,fn,ny,nxh1,ndim,nyd,
nxh1d);
/* cudaThreadSynchronize(); */
crc = cudaGetLastError();
if (crc) {
printf("gpuctpose4n error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
/* perform x fft */
cfrc = cufftExecC2R(planxrn,(cufftComplex *)fn,(cufftReal *)fn);
cudaThreadSynchronize();
if (cfrc) {
printf("cufftExecC2R(1) planxrn error=%d\n",cfrc);
exit(1);
}
}
return;
}
/* Interfaces to Fortran */
/*--------------------------------------------------------------------*/
extern "C" void gpufft2rrcuinit_(int *nx, int *ny, int *ndim) {
gpufft2rrcuinit(*nx,*ny,*ndim);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpufft2cuinit_(int *nx, int *ny, int *ndim) {
gpufft2cuinit(*nx,*ny,*ndim);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpufft2rrcudel_() {
gpufft2rrcudel();
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpufft2cudel_() {
gpufft2cudel();
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpufft2rrcu_(unsigned long *gp_f, unsigned long *gp_g,
int *isign, int *indx, int *indy,
int *nxh1d, int *nyd) {
float2 *f, *g;
f = (float2 *)*gp_f;
g = (float2 *)*gp_g;
gpufft2rrcu(f,g,*isign,*indx,*indy,*nxh1d,*nyd);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpufft2rrcun_(unsigned long *gp_fn,
unsigned long *gp_gn, int *isign,
int *indx, int *indy, int *ndim,
int *nxh1d, int *nyd) {
float2 *fn, *gn;
fn = (float2 *)*gp_fn;
gn = (float2 *)*gp_gn;
gpufft2rrcun(fn,gn,*isign,*indx,*indy,*ndim,*nxh1d,*nyd);
return;
}
|
2,463 | #include <cuda_runtime.h>
#include <stdio.h>
__device__ float devData;
__global__ void checkGlobalVariable() {
// display the original value
printf("Device: the value of the global variable is %f\n",devData);
// alter the value
devData +=2.0f;
}
int main(void) {
// initialize the global variable
float value = 3.14f;
cudaMemcpyToSymbol(devData, &value, sizeof(float));
printf("Host: copied %f to the global variable\n", value);
// invoke the kernel
checkGlobalVariable <<<1, 1>>>();
// copy the global variable back to the host
cudaMemcpyFromSymbol(&value, devData, sizeof(float));
printf("Host: the value changed by the kernel to %f\n", value);
cudaDeviceReset();
return EXIT_SUCCESS;
} |
2,464 | #include "includes.h"
__global__ void update_synaptic_efficacies_or_weights_kernel (float * d_recent_presynaptic_activities_C, float * d_recent_postsynaptic_activities_D, int* d_postsynaptic_neuron_indices, float* d_synaptic_efficacies_or_weights, float current_time_in_seconds, float * d_time_of_last_spike_to_reach_synapse, float * d_last_spike_time_of_each_neuron, float learning_rate_rho, int* d_plastic_synapse_indices, size_t total_number_of_plastic_synapses) {
int indx = threadIdx.x + blockIdx.x * blockDim.x;
while (indx < total_number_of_plastic_synapses) {
int idx = d_plastic_synapse_indices[indx];
float synaptic_efficacy_delta_g = d_synaptic_efficacies_or_weights[idx];
float new_synaptic_efficacy = synaptic_efficacy_delta_g;
float new_componet = 0.0;
int postsynaptic_neuron_index = d_postsynaptic_neuron_indices[idx];
if (d_last_spike_time_of_each_neuron[postsynaptic_neuron_index] == current_time_in_seconds) {
float recent_presynaptic_activity_C = d_recent_presynaptic_activities_C[idx];
float new_componet_addition = ((1 - synaptic_efficacy_delta_g) * recent_presynaptic_activity_C);
new_componet += new_componet_addition;
}
if (d_time_of_last_spike_to_reach_synapse[idx] == current_time_in_seconds) {
float recent_postsynaptic_activity_D = d_recent_postsynaptic_activities_D[postsynaptic_neuron_index];
new_componet -= (synaptic_efficacy_delta_g * recent_postsynaptic_activity_D);
}
if (new_componet != 0.0) {
new_componet = learning_rate_rho * new_componet;
new_synaptic_efficacy += new_componet;
}
if (synaptic_efficacy_delta_g != new_synaptic_efficacy) {
new_synaptic_efficacy = max(new_synaptic_efficacy, 0.0);
new_synaptic_efficacy = min(new_synaptic_efficacy, 1.0);
d_synaptic_efficacies_or_weights[idx] = new_synaptic_efficacy;
}
indx += blockDim.x * gridDim.x;
}
} |
2,465 |
//Pia Wetzel
/*
Program uses the KNN (with K = 3) classification algorithm to classify the Iris species Setosa, Virginica, and Versicolor.
Goal is it to identify an Iris species based on the four parameters Sepal-width, Sepal-length, Petal-width, and Petal-length.
*/
#include <stdio.h>
#include <cuda.h>
#include "math.h"
#include<stdio.h>
#include<string.h>
__global__ void knn (double *oMatrix, double *topN, double*knns, unsigned matrixsize) {
//Extracts the species of the "k" best euclidean distances
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
for(unsigned i = 0 ; i < matrixsize-1; i++)
{
if(oMatrix[i] == topN[id]){
knns[id] = oMatrix[i+1];
}
}
}
//Calculated Euclidean Distance between each matrix row and a given test vector.
//The "result" is a matrix with x rows and 2 colums, containing the euclidean
//distance of the row plus the numerical idenifier of the Iris species associated with the row
__global__ void eucl_dist ( double *matrix, double *test, double *result, double *result2, unsigned matrixsize) {
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
unsigned max = 4;
for (unsigned jj = 0; jj < max; ++jj) {
result[id*2] += (matrix[id * matrixsize+jj] - test[jj])*(matrix[id * matrixsize +jj] - test[jj]);
result2[id] += (matrix[id * matrixsize+jj] - test[jj])*(matrix[id * matrixsize +jj] - test[jj]);
if(jj == 0)
{
result[id*2 + 1] = matrix[id * matrixsize + 4];
}
if(jj == max-1)
{
result[id*2] = sqrt(result[2*id]);
result2[id] = sqrt(result2[id]);
}
}
}
//Parallel sorts the matrix with x rows and 2 column, ordered by increasing euclidean distance
__global__ void even_sort(double *arr, unsigned matrixsize) {
double temp;
int id = (threadIdx.x)*2;
if(id <= matrixsize-2)
{
if(arr[id] > arr[id+1])
{
temp = arr[id];
arr[id] = arr[id+1];
arr[id+1] = temp;
}
}
}
__global__ void odd_sort(double *arr, unsigned matrixsize) {
double temp;
int id = (threadIdx.x)*2+1;
if(id <= matrixsize-2)
{
if(arr[id] > arr[id+1])
{
temp = arr[id];
arr[id] = arr[id+1];
arr[id+1] = temp;
}
}
}
#define N 147
#define M 5
int main() {
const unsigned KNN = 3;
//Some test values ("randomly" taken out of original data set)
//double test_iris[5] = {5.6,3.0,4.5,1.5,200}; //I'm a Versicolor
double test_iris[5] = {5.1,3.5,1.4,0.2, 100}; //I'm a Setosa
//double test_iris[5] = {6.7,3.0,5.2,2.3,300}; //I'm a Virginica
dim3 block(N, M, 1);
double *eucl_distance,*eucl_distance2, *result,*result2, *test, *matrix, *knns, *knnres;
cudaMalloc(&result,2*N*sizeof(double));
cudaMalloc(&result2,N*sizeof(double));
cudaMalloc(&test,(M)*sizeof(double));
cudaMalloc(&matrix,N*M*sizeof(double));
cudaMalloc(&knnres,KNN*sizeof(double));
eucl_distance = (double *)malloc(2*N * sizeof(double));
eucl_distance2 = (double *)malloc(N * sizeof(double));
knns = (double *)malloc(KNN * sizeof(double));
//Training data
//Setosa = 100
//Versicolor = 200
//Virginica = 300
//Data is taken from https://archive.ics.uci.edu/ml/datasets/iris
double iris2[147][5] ={
{4.9,3.0,1.4,0.2,100},
{4.7,3.2,1.3,0.2,100},
{4.6,3.1,1.5,0.2,100},
{5.0,3.6,1.4,0.2,100},
{5.4,3.9,1.7,0.4,100},
{4.6,3.4,1.4,0.3,100},
{5.0,3.4,1.5,0.2,100},
{4.4,2.9,1.4,0.2,100},
{4.9,3.1,1.5,0.1,100},
{5.4,3.7,1.5,0.2,100},
{4.8,3.4,1.6,0.2,100},
{4.8,3.0,1.4,0.1,100},
{4.3,3.0,1.1,0.1,100},
{5.8,4.0,1.2,0.2,100},
{5.7,4.4,1.5,0.4,100},
{5.4,3.9,1.3,0.4,100},
{5.1,3.5,1.4,0.3,100},
{5.7,3.8,1.7,0.3,100},
{5.1,3.8,1.5,0.3,100},
{5.4,3.4,1.7,0.2,100},
{5.1,3.7,1.5,0.4,100},
{4.6,3.6,1.0,0.2,100},
{5.1,3.3,1.7,0.5,100},
{4.8,3.4,1.9,0.2,100},
{5.0,3.0,1.6,0.2,100},
{5.0,3.4,1.6,0.4,100},
{5.2,3.5,1.5,0.2,100},
{5.2,3.4,1.4,0.2,100},
{4.7,3.2,1.6,0.2,100},
{4.8,3.1,1.6,0.2,100},
{5.4,3.4,1.5,0.4,100},
{5.2,4.1,1.5,0.1,100},
{5.5,4.2,1.4,0.2,100},
{4.9,3.1,1.5,0.1,100},
{5.0,3.2,1.2,0.2,100},
{5.5,3.5,1.3,0.2,100},
{4.9,3.1,1.5,0.1,100},
{4.4,3.0,1.3,0.2,100},
{5.1,3.4,1.5,0.2,100},
{5.0,3.5,1.3,0.3,100},
{4.5,2.3,1.3,0.3,100},
{4.4,3.2,1.3,0.2,100},
{5.0,3.5,1.6,0.6,100},
{5.1,3.8,1.9,0.4,100},
{4.8,3.0,1.4,0.3,100},
{5.1,3.8,1.6,0.2,100},
{4.6,3.2,1.4,0.2,100},
{5.3,3.7,1.5,0.2,100},
{5.0,3.3,1.4,0.2,100},
{7.0,3.2,4.7,1.4,200},
{6.4,3.2,4.5,1.5,200},
{6.9,3.1,4.9,1.5,200},
{5.5,2.3,4.0,1.3,200},
{6.5,2.8,4.6,1.5,200},
{5.7,2.8,4.5,1.3,200},
{6.3,3.3,4.7,1.6,200},
{4.9,2.4,3.3,1.0,200},
{6.6,2.9,4.6,1.3,200},
{5.2,2.7,3.9,1.4,200},
{5.0,2.0,3.5,1.0,200},
{5.9,3.0,4.2,1.5,200},
{6.0,2.2,4.0,1.0,200},
{6.1,2.9,4.7,1.4,200},
{5.6,2.9,3.6,1.3,200},
{6.7,3.1,4.4,1.4,200},
{5.8,2.7,4.1,1.0,200},
{6.2,2.2,4.5,1.5,200},
{5.6,2.5,3.9,1.1,200},
{5.9,3.2,4.8,1.8,200},
{6.1,2.8,4.0,1.3,200},
{6.3,2.5,4.9,1.5,200},
{6.1,2.8,4.7,1.2,200},
{6.4,2.9,4.3,1.3,200},
{6.6,3.0,4.4,1.4,200},
{6.8,2.8,4.8,1.4,200},
{6.7,3.0,5.0,1.7,200},
{6.0,2.9,4.5,1.5,200},
{5.7,2.6,3.5,1.0,200},
{5.5,2.4,3.8,1.1,200},
{5.5,2.4,3.7,1.0,200},
{5.8,2.7,3.9,1.2,200},
{6.0,2.7,5.1,1.6,200},
{5.4,3.0,4.5,1.5,200},
{6.0,3.4,4.5,1.6,200},
{6.7,3.1,4.7,1.5,200},
{6.3,2.3,4.4,1.3,200},
{5.6,3.0,4.1,1.3,200},
{5.5,2.5,4.0,1.3,200},
{5.5,2.6,4.4,1.2,200},
{6.1,3.0,4.6,1.4,200},
{5.8,2.6,4.0,1.2,200},
{5.0,2.3,3.3,1.0,200},
{5.6,2.7,4.2,1.3,200},
{5.7,3.0,4.2,1.2,200},
{5.7,2.9,4.2,1.3,200},
{6.2,2.9,4.3,1.3,200},
{5.1,2.5,3.0,1.1,200},
{5.7,2.8,4.1,1.3,200},
{6.3,3.3,6.0,2.5,300},
{5.8,2.7,5.1,1.9,300},
{7.1,3.0,5.9,2.1,300},
{6.3,2.9,5.6,1.8,300},
{6.5,3.0,5.8,2.2,300},
{7.6,3.0,6.6,2.1,300},
{4.9,2.5,4.5,1.7,300},
{7.3,2.9,6.3,1.8,300},
{6.7,2.5,5.8,1.8,300},
{7.2,3.6,6.1,2.5,300},
{6.5,3.2,5.1,2.0,300},
{6.4,2.7,5.3,1.9,300},
{6.8,3.0,5.5,2.1,300},
{5.7,2.5,5.0,2.0,300},
{5.8,2.8,5.1,2.4,300},
{6.4,3.2,5.3,2.3,300},
{6.5,3.0,5.5,1.8,300},
{7.7,3.8,6.7,2.2,300},
{7.7,2.6,6.9,2.3,300},
{6.0,2.2,5.0,1.5,300},
{6.9,3.2,5.7,2.3,300},
{5.6,2.8,4.9,2.0,300},
{7.7,2.8,6.7,2.0,300},
{6.3,2.7,4.9,1.8,300},
{6.7,3.3,5.7,2.1,300},
{7.2,3.2,6.0,1.8,300},
{6.2,2.8,4.8,1.8,300},
{6.1,3.0,4.9,1.8,300},
{6.4,2.8,5.6,2.1,300},
{7.2,3.0,5.8,1.6,300},
{7.4,2.8,6.1,1.9,300},
{7.9,3.8,6.4,2.0,300},
{6.4,2.8,5.6,2.2,300},
{6.3,2.8,5.1,1.5,300},
{6.1,2.6,5.6,1.4,300},
{7.7,3.0,6.1,2.3,300},
{6.3,3.4,5.6,2.4,300},
{6.4,3.1,5.5,1.8,300},
{6.0,3.0,4.8,1.8,300},
{6.9,3.1,5.4,2.1,300},
{6.7,3.1,5.6,2.4,300},
{6.9,3.1,5.1,2.3,300},
{5.8,2.7,5.1,1.9,300},
{6.8,3.2,5.9,2.3,300},
{6.7,3.3,5.7,2.5,300},
{6.3,2.5,5.0,1.9,300},
{6.5,3.0,5.2,2.0,300},
{6.2,3.4,5.4,2.3,300},
{5.9,3.0,5.1,1.8,300}};
cudaMemcpy(matrix,iris2, N*M*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(test,test_iris, (M)*sizeof(double), cudaMemcpyHostToDevice);
eucl_dist<<<1, N>>>(matrix, test, result,result2, M);
cudaMemcpy(eucl_distance, result, 2*N * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(eucl_distance2, result2, N * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(result2,eucl_distance2, N*sizeof(double), cudaMemcpyHostToDevice);
for(unsigned i = 0; i <= N/2; i++){
even_sort<<<1, N>>>(result2, N);
odd_sort<<<1, N>>>(result2, N);
}
cudaMemcpy(eucl_distance2, result2, N*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(result,eucl_distance, 2*N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(result2,eucl_distance2, N*sizeof(double), cudaMemcpyHostToDevice);
knn<<<1,3>>>(result, result2, knnres, 2*N);
cudaMemcpy(knns, knnres, KNN*sizeof(double), cudaMemcpyDeviceToHost);
unsigned versicolor, virginica, setosa;
for (unsigned i = 0; i < KNN; ++i) {
if(knns[i] == 100){setosa++;}
else if(knns[i] == 200){versicolor++;}
else if (knns[i] == 300){virginica++;}
}
printf("\n--------------------------------------------------------\n");
printf("\n\n\nInput:\n\nSepal-length: %2f\nSepal-width: %2f\nPetal-length: %2f\nPetal-width: %2f", test_iris[0], test_iris[1], test_iris[2], test_iris[3]);
printf("\n\nThe %2d closest neighbors:\nsetosa: %2d virginica: %2d versicolor: %2d",KNN, setosa, virginica, versicolor);
printf("\n\nApplying KNN classification with k=%2d yields: ", KNN);
if(setosa > virginica && setosa > versicolor)
{
printf("The input is a Setosa\n\n");
}else if(virginica > setosa && virginica > versicolor)
{
printf("The input is a Virginica\n\n");
}else if(versicolor > setosa && versicolor > virginica)
{
printf("The input is a Versicolor\n\n");
}
else
{
printf("There is a tie! Try different values.\n\n");
}
printf("--------------------------------------------------------\n");
if(test_iris[4] == 100){printf("\nCorrect answer: Setosa\n\n");}
else if(test_iris[4] == 200){printf("\nCorrect answer: Versicolor\n\n");}
else if(test_iris[4] == 300){printf("\nCorrect answer: Virginica\n\n");}
return 0;
}
|
2,466 | // Tests that ptxas and fatbinary are correctly during CUDA compilation.
//
// REQUIRES: clang-driver
// REQUIRES: x86-registered-target
// REQUIRES: nvptx-registered-target
// Regular compiles with -O{0,1,2,3,4,fast}. -O4 and -Ofast map to ptxas O3.
// RUN: %clang -### -target x86_64-linux-gnu -O0 -c %s 2>&1 \
// RUN: | FileCheck -check-prefix ARCH64 -check-prefix SM20 -check-prefix OPT0 %s
// RUN: %clang -### -target x86_64-linux-gnu -O1 -c %s 2>&1 \
// RUN: | FileCheck -check-prefix ARCH64 -check-prefix SM20 -check-prefix OPT1 %s
// RUN: %clang -### -target x86_64-linux-gnu -O2 -c %s 2>&1 \
// RUN: | FileCheck -check-prefix ARCH64 -check-prefix SM20 -check-prefix OPT2 %s
// RUN: %clang -### -target x86_64-linux-gnu -O3 -c %s 2>&1 \
// RUN: | FileCheck -check-prefix ARCH64 -check-prefix SM20 -check-prefix OPT3 %s
// RUN: %clang -### -target x86_64-linux-gnu -O4 -c %s 2>&1 \
// RUN: | FileCheck -check-prefix ARCH64 -check-prefix SM20 -check-prefix OPT3 %s
// RUN: %clang -### -target x86_64-linux-gnu -Ofast -c %s 2>&1 \
// RUN: | FileCheck -check-prefix ARCH64 -check-prefix SM20 -check-prefix OPT3 %s
// With debugging enabled, ptxas should be run with with no ptxas optimizations.
// RUN: %clang -### -target x86_64-linux-gnu --cuda-noopt-device-debug -O2 -c %s 2>&1 \
// RUN: | FileCheck -check-prefix ARCH64 -check-prefix SM20 -check-prefix DBG %s
// Regular compile without -O. This should result in us passing -O0 to ptxas.
// RUN: %clang -### -target x86_64-linux-gnu -c %s 2>&1 \
// RUN: | FileCheck -check-prefix ARCH64 -check-prefix SM20 -check-prefix OPT0 %s
// Regular compiles with -Os and -Oz. For lack of a better option, we map
// these to ptxas -O3.
// RUN: %clang -### -target x86_64-linux-gnu -Os -c %s 2>&1 \
// RUN: | FileCheck -check-prefix ARCH64 -check-prefix SM20 -check-prefix OPT2 %s
// RUN: %clang -### -target x86_64-linux-gnu -Oz -c %s 2>&1 \
// RUN: | FileCheck -check-prefix ARCH64 -check-prefix SM20 -check-prefix OPT2 %s
// Regular compile targeting sm_35.
// RUN: %clang -### -target x86_64-linux-gnu --cuda-gpu-arch=sm_35 -c %s 2>&1 \
// RUN: | FileCheck -check-prefix ARCH64 -check-prefix SM35 %s
// 32-bit compile.
// RUN: %clang -### -target x86_32-linux-gnu -c %s 2>&1 \
// RUN: | FileCheck -check-prefix ARCH32 -check-prefix SM20 %s
// Compile with -fintegrated-as. This should still cause us to invoke ptxas.
// RUN: %clang -### -target x86_64-linux-gnu -fintegrated-as -c %s 2>&1 \
// RUN: | FileCheck -check-prefix ARCH64 -check-prefix SM20 -check-prefix OPT0 %s
// Check -Xcuda-ptxas and -Xcuda-fatbinary
// RUN: %clang -### -target x86_64-linux-gnu -c -Xcuda-ptxas -foo1 \
// RUN: -Xcuda-fatbinary -bar1 -Xcuda-ptxas -foo2 -Xcuda-fatbinary -bar2 %s 2>&1 \
// RUN: | FileCheck -check-prefix SM20 -check-prefix PTXAS-EXTRA \
// RUN: -check-prefix FATBINARY-EXTRA %s
// Match clang job that produces PTX assembly.
// CHECK: "-cc1" "-triple" "nvptx64-nvidia-cuda"
// SM20: "-target-cpu" "sm_20"
// SM35: "-target-cpu" "sm_35"
// SM20: "-o" "[[PTXFILE:[^"]*]]"
// SM35: "-o" "[[PTXFILE:[^"]*]]"
// Match the call to ptxas (which assembles PTX to SASS).
// CHECK:ptxas
// ARCH64: "-m64"
// ARCH32: "-m32"
// OPT0: "-O0"
// OPT0-NOT: "-g"
// OPT1: "-O1"
// OPT1-NOT: "-g"
// OPT2: "-O2"
// OPT2-NOT: "-g"
// OPT3: "-O3"
// OPT3-NOT: "-g"
// DBG: "-g" "--dont-merge-basicblocks" "--return-at-end"
// SM20: "--gpu-name" "sm_20"
// SM35: "--gpu-name" "sm_35"
// SM20: "--output-file" "[[CUBINFILE:[^"]*]]"
// SM35: "--output-file" "[[CUBINFILE:[^"]*]]"
// PTXAS-EXTRA: "-foo1"
// PTXAS-EXTRA-SAME: "-foo2"
// CHECK-SAME: "[[PTXFILE]]"
// Match the call to fatbinary (which combines all our PTX and SASS into one
// blob).
// CHECK:fatbinary
// CHECK-DAG: "--cuda"
// ARCH64-DAG: "-64"
// ARCH32-DAG: "-32"
// CHECK-DAG: "--create" "[[FATBINARY:[^"]*]]"
// SM20-DAG: "--image=profile=compute_20,file=[[PTXFILE]]"
// SM35-DAG: "--image=profile=compute_35,file=[[PTXFILE]]"
// SM20-DAG: "--image=profile=sm_20,file=[[CUBINFILE]]"
// SM35-DAG: "--image=profile=sm_35,file=[[CUBINFILE]]"
// FATBINARY-EXTRA: "-bar1"
// FATBINARY-EXTRA-SAME: "-bar2"
// Match the clang job for host compilation.
// CHECK: "-cc1" "-triple" "x86_64--linux-gnu"
// CHECK-SAME: "-fcuda-include-gpubinary" "[[FATBINARY]]"
|
2,467 | #include <stdio.h>
#include <stdlib.h>
// cuda runtime
#include <cuda_runtime.h>
__global__ void kernel(int *a)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
a[idx] = idx;
//var2: a[idx] = blockIdx.x;
//var3: a[idx] = threadIdx.x;
}
int main()
{
int dimx = 16;
int num_bytes= dimx*sizeof(int);
int *d_a=0, *h_a=0; // device and host pointers
h_a = (int*) malloc(num_bytes);
cudaMalloc((void**) &d_a, num_bytes);
if (h_a == 0 || d_a == 0)
{
printf("couldn't allocate memory\n");
return 1;
}
cudaMemset(d_a, 0, num_bytes);
dim3 grid, block;
block.x = 4;
grid.x = dimx / block.x;
kernel<<<grid, block>>>(d_a);
cudaMemcpy(h_a, d_a, num_bytes, cudaMemcpyDeviceToHost); // dest_ptr, src_ptr, direction (dev2host)
for (int i=0; i < dimx; i++)
printf("%d ", h_a[i]);
printf("\n");
free(h_a);
cudaFree(d_a);
return 0;
}
|
2,468 | #include <iostream>
#include <iomanip>
using namespace std;
void Error(cudaError_t error)
{
if (error != cudaSuccess){
cout << "ERROR:" << cudaGetErrorString(error) << endl;
exit(0);
}
}
__global__ void sqr_items_vectors(double* a, double* result, int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < n) {
result[tid] = a[tid]*a[tid];
tid += blockDim.x*gridDim.x;
}
}
int main()
{
int n, size;
double *a, *result;
double *gpu_a, *gpu_result;
cin >> n;
size = sizeof(double) * n;
a = (double*)malloc(size);
result = (double*)malloc(size);
for (int i = 0; i < n; ++i) {
cin >> a[i];
}
Error(cudaMalloc(&gpu_a, size));
Error(cudaMalloc(&gpu_result, size));
Error(cudaMemcpy(gpu_a, a, size, cudaMemcpyHostToDevice));
sqr_items_vectors<<<256, 256>>>(gpu_a, gpu_result, n);
Error(cudaMemcpy(result, gpu_result, size, cudaMemcpyDeviceToHost));
for (int i = 0; i < n; ++i) {
cout << scientific << setprecision(10) << result[i] << " ";
}
cout << endl;
Error(cudaFree(gpu_a));
Error(cudaFree(gpu_result));
free(a);
free(result);
return 0;
}
|
2,469 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define ERR_CHK(call) { gpuAssert((call), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t err, const char* file, int line, bool abort = true)
{
if (err != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(err), file, line);
if (abort) exit(err);
}
}
__global__ void vecAddKernel(int* A, int* B, int* C, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
C[i] = A[i] + B[i];
}
}
int main() {
int* d_A, * d_B, * d_C;
int* h_A, * h_B, * h_C;
const int n = 1024;
h_A = (int*)malloc(n * sizeof(int));
h_B = (int*)malloc(n * sizeof(int));
h_C = (int*)malloc(n * sizeof(int));
for (int i = 0; i < n; i++) {
h_A[i] = rand();
h_B[i] = rand();
h_C[i] = 0;
}
ERR_CHK(cudaMalloc((void**)&d_A, n * sizeof(int)));
ERR_CHK(cudaMalloc((void**)&d_B, n * sizeof(int)));
ERR_CHK(cudaMalloc((void**)&d_C, n * sizeof(int)));
ERR_CHK(cudaMemcpy(d_A, h_A, n * sizeof(int), cudaMemcpyHostToDevice));
ERR_CHK(cudaMemcpy(d_B, h_B, n * sizeof(int), cudaMemcpyHostToDevice));
dim3 gridSize(ceil(n / 256), 1, 1);
dim3 blockSize(256, 1, 1);
vecAddKernel <<< gridSize, blockSize >>> (d_A, d_B, d_C, n);
cudaError_t err = cudaGetLastError();
ERR_CHK(err);
ERR_CHK(cudaMemcpy(h_C, d_C, n * sizeof(int), cudaMemcpyDeviceToHost));
//verifying our solution
for (int i = 0; i < n; i++) {
if (h_A[i] + h_B[i] != h_C[i]) {
printf("Incorrect addition");
printf("%d + %d = %d for i = %d\n", h_A[i], h_B[i], h_C[i], i);
}
}
printf("SUCCESS!!!!!!!!!!!");
return 0;
}
|
2,470 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
int main(int argc, char **argv)
{
// memory size 128 MBs
int isize = 1<<25;
int nbytes = isize * sizeof(float);
// allocate the host memory
//float *h_a = (float *)malloc(nbytes);
float *h_a;
cudaMallocHost((float **)&h_a, nbytes);
// allocate the device memory
float *d_a;
cudaMalloc((float **)&d_a, nbytes);
// initialize the host memory
for(int i=0;i<isize;i++)
h_a[i] = 7;
// transfer data from the host to the device
cudaMemcpy(d_a, h_a, nbytes, cudaMemcpyHostToDevice);
// transfer data from the device to the host
cudaMemcpy(h_a, d_a, nbytes, cudaMemcpyDeviceToHost);
// free memory
cudaFree(d_a);
//free(h_a);
cudaFreeHost(h_a);
// reset device
cudaDeviceReset();
return 0;
} |
2,471 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <cuda_runtime.h>
// prints error if detected and exits
void inline check(cudaError_t err, const char* filename, int line)
{
if (err != cudaSuccess)
{
printf("%s-l%i: %s\n", filename, line, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
// prints start and end of integer array
void printArrayTerse(int* array, int length, int num)
{
if (length<2*num) { num = length/2; }
for (int i=0; i<num; i++)
{
printf("%i ",array[i]);
}
printf("... ");
for (int i=length-num-1; i<length; i++)
{
printf("%i ",array[i]);
}
printf("\n");
}
// copies an array to the GPU and back
int main(int argc, char** argv)
{
// variable declarations
cudaError_t err; // variable for error codes
int* hostArray; // pointer for array in host memory
int* deviceArray; // pointer for array in device memory
int length = 262144; // length of array
int size = length*sizeof(int); // size of array in bytes
// allocate host memory
err = cudaHostAlloc((void**)&hostArray,size,cudaHostAllocDefault);
check(err, __FILE__, __LINE__);
// allocate device memory
err = cudaMalloc((void**)&deviceArray,size);
check(err, __FILE__, __LINE__);
// initialise host memory
for(int i=0; i<length; i++)
{
hostArray[i] = i;
}
printArrayTerse(hostArray,length,8);
// copy host to device
// HINT: insert cudaMemcpy here <--
check(err, __FILE__, __LINE__);
// clear host memory
memset(hostArray, 0, size);
printArrayTerse(hostArray,length,8);
// copy device to host
// HINT: insert cudaMemcpy here <--
check(err, __FILE__, __LINE__);
printArrayTerse(hostArray,length,8);
// free device memory
err = cudaFree(deviceArray);
check(err, __FILE__, __LINE__);
// free host memory
err = cudaFreeHost(hostArray);
check(err, __FILE__, __LINE__);
// exit
return EXIT_SUCCESS;
}
|
2,472 | #include "includes.h"
__global__ void kernelInterpolationRow(double *original, double *result, int rows, int cols, int factor){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int idOriginal,idResult;
// Puntos de referencia para interpolacion
double a,b;
double m;
//
// Interpolacion de filas
// ----------------------
while (x < rows){
idOriginal = y*rows + x ;
idResult = y*rows*factor*factor + x*factor;
a = original[ idOriginal ];
b = original[ idOriginal + 1];
m = (b - a)/((double)factor);
// Antes de llegar al final
if (x != rows-1){
for(int p=0; p<=factor; ++p){
result[idResult] = a;
a += m;
++idResult;
}
}
// Borde final
else{
for(int p=0; p<factor; ++p){
result[idResult] = b;
b -= m;
++idResult;
}
}
}
} |
2,473 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
//-----------------------------------------------------------------------------
// GpuConstantsPackage: a struct to hold many constants (including pointers
// to allocated memory on the device) that can be
// uploaded all at once. Placing this in the "constants
// cache" is a convenient and performant way of handling
// constant information on the GPU.
//-----------------------------------------------------------------------------
struct GpuConstantsPackage {
int nparticle;
int* partType;
float* partX;
float* partY;
float* partZ;
float* partQ;
float* Etot;
};
typedef struct GpuConstantsPackage cribSheet;
// This device constant is available to all functions in this CUDA unit
__device__ __constant__ cribSheet cSh;
//-----------------------------------------------------------------------------
// GpuMirroredInt: a struct holding mirrored int data on both the CPU and the
// GPU. Functions below will operate on this struct
// (because this isn't a workshop on C++)
//-----------------------------------------------------------------------------
struct GpuMirroredInt {
int len; // Length of the array (again, this is not a C++ course)
int IsPinned; // "Pinned" memory is best for Host <= => GPU transfers.
// In fact, if non-pinned memory is transferred to the
// GPU from the host, a temporary allocation of pinned
// memory will be created and then destroyed. Pinned
// memory is not host-pageable, but the only performance
// implication is that creating lots of pinned memory
// may make it harder for the host OS to manage large
// memory jobs.
int* HostData; // Pointer to allocated memory on the host
int* DevcData; // Pointer to allocated memory on the GPU. Note that the
// host can know what the address of memory on the GPU
// is, but it cannot simply de-reference that pointer
// in host code.
};
typedef struct GpuMirroredInt gpuInt;
//-----------------------------------------------------------------------------
// GpuMirroredInt: a struct holding mirrored fp32 data on both the CPU and the
// GPU. Functions below will operate on this struct
// (because this isn't a workshop on C++)
//-----------------------------------------------------------------------------
struct GpuMirroredFloat {
int len; // Length of the array (again, this is not a C++ course)
int IsPinned; // "Pinned" memory is best for Host <= => GPU transfers.
// In fact, if non-pinned memory is transferred to the
// GPU from the host, a temporary allocation of pinned
// memory will be created and then destroyed. Pinned
// memory is not host-pageable, but the only performance
// implication is that creating lots of pinned memory
// may make it harder for the host OS to manage large
// memory jobs.
float* HostData; // Pointer to allocated memory on the host
float* DevcData; // Pointer to allocated memory on the GPU. Note that the
// host can know what the address of memory on the GPU
// is, but it cannot simply de-reference that pointer
// in host code.
};
typedef struct GpuMirroredFloat gpuFloat;
//-----------------------------------------------------------------------------
// ParticleSimulator: run a rudimentary simulation of particles
//-----------------------------------------------------------------------------
__global__ void ParticleSimulator()
{
// Loop over all particles and compute the electrostatic potential.
// Each thread will accumulate its own portion of the potential,
// then pool the results at the end.
int tidx = threadIdx.x;
float qq = 0.0;
while (tidx < cSh.nparticle) {
// Still the naive way, to show how slow it is
int i;
for (i = 0; i < tidx; i++) {
float dx = cSh.partX[tidx] - cSh.partX[i];
float dy = cSh.partY[tidx] - cSh.partY[i];
float dz = cSh.partZ[tidx] - cSh.partZ[i];
float r = sqrt(dx*dx + dy*dy + dz*dz);
qq += cSh.partQ[tidx] * cSh.partQ[i] / r;
}
// Increment counter
tidx += blockDim.x;
}
// Accumulate energy
atomicAdd(&cSh.Etot[0], qq);
}
//-----------------------------------------------------------------------------
// CreateGpuInt: constructor function for allocating memory in a gpuInt
// instance.
//
// Arguments:
// len: the length of array to allocate
// pin: flag to have the memory pinned (non-pageable on the host side
// for optimal transfer speed to the device)
//-----------------------------------------------------------------------------
gpuInt CreateGpuInt(int len, int pin)
{
gpuInt G;
G.len = len;
G.IsPinned = pin;
// Now that the official length is recorded, upgrade the real length
// to the next convenient multiple of 128, so as to always allocate
// GPU memory in 512-byte blocks. This is for alignment purposes,
// and keeping host to device transfers in line.
len = ((len + 127) / 128) * 128;
if (pin == 1) {
cudaHostAlloc((void **)&G.HostData, len * sizeof(int),
cudaHostAllocMapped);
}
else {
G.HostData = (int*)malloc(len * sizeof(int));
}
cudaMalloc((void **)&G.DevcData, len * sizeof(int));
memset(G.HostData, 0, len * sizeof(int));
cudaMemset((void *)G.DevcData, 0, len * sizeof(int));
return G;
}
//-----------------------------------------------------------------------------
// DestroyGpuInt: destructor function for freeing memory in a gpuInt
// instance.
//-----------------------------------------------------------------------------
void DestroyGpuInt(gpuInt *G)
{
if (G->IsPinned == 1) {
cudaFreeHost(G->HostData);
}
else {
free(G->HostData);
}
cudaFree(G->DevcData);
}
//-----------------------------------------------------------------------------
// UploadGpuInt: upload an integer array from the host to the device.
//-----------------------------------------------------------------------------
void UploadGpuInt(gpuInt *G)
{
cudaMemcpy(G->DevcData, G->HostData, G->len * sizeof(int),
cudaMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// DownloadGpuInt: download an integer array from the host to the device.
//-----------------------------------------------------------------------------
void DownloadGpuInt(gpuInt *G)
{
cudaMemcpy(G->HostData, G->DevcData, G->len * sizeof(int),
cudaMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// CreateGpuFloat: constructor function for allocating memory in a gpuFloat
// instance.
//
// Arguments:
// len: the length of array to allocate
// pin: flag to have the memory pinned (non-pageable on the host side
// for optimal transfer speed ot the device)
//-----------------------------------------------------------------------------
gpuFloat CreateGpuFloat(int len, int pin)
{
gpuFloat G;
G.len = len;
G.IsPinned = pin;
// Now that the official length is recorded, upgrade the real length
// to the next convenient multiple of 128, so as to always allocate
// GPU memory in 512-byte blocks. This is for alignment purposes,
// and keeping host to device transfers in line.
len = ((len + 127) / 128) * 128;
if (pin == 1) {
cudaHostAlloc((void **)&G.HostData, len * sizeof(float),
cudaHostAllocMapped);
}
else {
G.HostData = (float*)malloc(len * sizeof(float));
}
cudaMalloc((void **)&G.DevcData, len * sizeof(float));
memset(G.HostData, 0, len * sizeof(float));
cudaMemset((void *)G.DevcData, 0, len * sizeof(float));
return G;
}
//-----------------------------------------------------------------------------
// DestroyGpuFloat: destructor function for freeing memory in a gpuFloat
// instance.
//-----------------------------------------------------------------------------
void DestroyGpuFloat(gpuFloat *G)
{
if (G->IsPinned == 1) {
cudaFreeHost(G->HostData);
}
else {
free(G->HostData);
}
cudaFree(G->DevcData);
}
//-----------------------------------------------------------------------------
// UploadGpuFloat: upload an float array from the host to the device.
//-----------------------------------------------------------------------------
void UploadGpuFloat(gpuFloat *G)
{
cudaMemcpy(G->DevcData, G->HostData, G->len * sizeof(float),
cudaMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// DownloadGpuFloat: download an float array from the host to the device.
//-----------------------------------------------------------------------------
void DownloadGpuFloat(gpuFloat *G)
{
cudaMemcpy(G->HostData, G->DevcData, G->len * sizeof(float),
cudaMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// main
//-----------------------------------------------------------------------------
int main()
{
int i, np;
gpuInt particleTypes;
gpuFloat particleXcoord, particleYcoord, particleZcoord, particleCharge;
gpuFloat etot;
// Create a small array of particles and populate it
particleTypes = CreateGpuInt(100000, 1);
particleXcoord = CreateGpuFloat(100000, 1);
particleYcoord = CreateGpuFloat(100000, 1);
particleZcoord = CreateGpuFloat(100000, 1);
particleCharge = CreateGpuFloat(100000, 1);
// Allocate and initialize the total energy
// accumulator on the host and on the device.
etot = CreateGpuFloat(1, 1);
// Initialize random number generator. srand() SEEDS the generator,
// thereafter each call to rand() will return a different number.
// This is a reeally bad generator (much better methods with longer
// periods before they start looping back over the same sequence are
// available).
srand(62052);
// Place many, many particles
np = 97913;
for (i = 0; i < np; i++) {
// Integer truncation would happen anyway, I'm just making it explicit
particleTypes.HostData[i] = (int)(8 * rand());
// Create some random coordinates (double-to-float conversion
// is happening here. On the GPU this can have performance
// impact, so keep an eye on the data types at all times!
particleXcoord.HostData[i] = 200.0 * (double)rand() / (double)RAND_MAX;
particleYcoord.HostData[i] = 200.0 * (double)rand() / (double)RAND_MAX;
particleZcoord.HostData[i] = 200.0 * (double)rand() / (double)RAND_MAX;
particleCharge.HostData[i] = 0.5 - (double)rand() / (double)RAND_MAX;
}
// Show the CPU result
#if 0
int j;
double qq = 0.0;
for (i = 0; i < np; i++) {
for (j = 0; j < i; j++) {
double dx = particleXcoord.HostData[i] - particleXcoord.HostData[j];
double dy = particleYcoord.HostData[i] - particleYcoord.HostData[j];
double dz = particleZcoord.HostData[i] - particleZcoord.HostData[j];
double qfac = particleCharge.HostData[i] * particleCharge.HostData[j];
qq += qfac / sqrt(dx*dx + dy*dy + dz*dz);
}
}
printf("CPU result = %9.4lf\n", qq);
#endif
// Stage critical constants--see cribSheet struct instance cSh above.
cribSheet cnstage;
cnstage.nparticle = np;
cnstage.partX = particleXcoord.DevcData;
cnstage.partY = particleYcoord.DevcData;
cnstage.partZ = particleZcoord.DevcData;
cnstage.partQ = particleCharge.DevcData;
cnstage.Etot = etot.DevcData;
// Upload all data to the device
UploadGpuInt(&particleTypes);
UploadGpuFloat(&particleXcoord);
UploadGpuFloat(&particleYcoord);
UploadGpuFloat(&particleZcoord);
UploadGpuFloat(&particleCharge);
// Upload the constants to the constants cache
cudaMemcpyToSymbol(cSh, &cnstage, sizeof(cribSheet));
// Launch the kernel with different numbers of threads
for (i = 1024; i >= 128; i /= 2) {
// Zero the total energy and upload (this could be done by the GPU in
// a separate kernel, but it's convenient enough to do it this way)
etot.HostData[0] = 0.0;
UploadGpuFloat(&etot);
ParticleSimulator<<<1, i>>>();
// Download the total energy
DownloadGpuFloat(&etot);
printf("Total energy (%4d threads) = %10.4f\n", i, etot.HostData[0]);
}
// Device synchronization
cudaDeviceSynchronize();
return 0;
}
|
2,474 | #include <iostream>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#define N 4
int n = 20; //it defines the range of the random number
using namespace std;
__device__ float generate( curandState* globalState, int ind ) // ind varies from 0 to N
{
//int ind = threadIdx.x;
curandState localState = globalState[ind];
float RANDOM = curand_uniform( &localState ); // float curand_uniform(curandStateXORWOW_t *state
// Return a uniformly distributed float between \p 0.0f and \p 1.0f
globalState[ind] = localState; // localState received a new value based on its own 'seed'
return RANDOM;
}
__global__ void setup_seed ( curandState * state, unsigned long seed )
{
int id = threadIdx.x;
curand_init ( seed, id, 0, &state[id] );
// curand_init generate random numbers that will be stored on state[id]
// seed must be a random number
}
__global__ void kernel(float* N3, curandState* globalState, int n) // n = 2
{
// generate random numbers
for(int i=0;i<N;i++)
{ // globalState received the 'seeds'
int k = generate(globalState, i) * (10*N/4); // float generate (curandState* globalState, int ind) is a __device__ function
while(k > n*n-1) // k >3 THIS WHILE DEFINES THE RANGE OF THE RANDOM NUBER, TO n=2 and N=4 deinine the range (0,3]
// ESTE WHILE DEFINE O RANGE DO NÚMERO ALEATÓRIO, neste caso o limita a (0,3]
{
k-=(n*n-1); // k = k -3
}
N3[i] = k; // 10 -> 1; 9 -> 3; 8 -> 2; 7 -> 1; etc
}
}
|
2,475 | #include <cuda.h>
#include <cuda_runtime.h>
///all parallel implementations of this algorithim will require two functions or else delay a function significantly
__global__
void sundPartOnePerRow(int bound, bool * findArray)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx < 1)
{
return;
}
if (idx > bound)
return;
int denom = (idx * 2) + 1;
int max = (bound - idx) / denom;
for(int j = idx; j <= max; j++)
{
findArray[idx + j * denom] = true;
}
}
__global__
void sundPartOnePerElement(int bound, bool * findArray)
{
uint idx = (blockIdx.x * blockDim.x) + threadIdx.x; //x is j
if(idx == 0) //j >= 1
{
return;
}
if(idx >= bound) //j < bound
{
return;
}
uint idy = (blockIdx.y * blockDim.y) + threadIdx.y; //y is i
if(idy == 0) //i >= 1
{
return;
}
if(idy >= bound) // i < bound
{
return;
}
if(idy > idx) //i <= j
{
return;
}
uint bin = idy + idx + ((idy * idx) << 1); //form i + j + 2ij might be better to do parts of this function individually
if( bin > bound) // i + j + 2ij <= bound
{
return;
}
findArray[bin] = true; //collisions arnt a problem as its a set
}
__global__
void sundPartTwoPerElementOneD(int bound, bool * findArray, bool * primeArray)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx == 0) //let thread 0 handle setting 2 as prime
{
primeArray[2] = false;
return;
}
int realBound = (bound - 1) >> 1;
if(idx >= realBound)
{
return;
}
if(!findArray[idx])
{
int bin = (idx << 1) + 1;
primeArray[bin] = false;
}
}
__global__
void sundPartTwoPerElementTwoD(int bound, bool * findArray, bool * primeArray)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int idy = blockDim.y * blockIdx.y + threadIdx.y;
int id = idx + idy;
if(id == 0) //let thread 0 handle setting 2 as prime
{
primeArray[2] = false;
return;
}
int realBound = (bound - 1) >> 1;
if(id >= realBound)
{
return;
}
if(!findArray[id])
{
int bin = (id << 1) + 1;
primeArray[bin] = false;
}
}
__global__
void eratosPerElement(int bound, bool * primeArray)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
int sqrtBound = (int)sqrt((double)bound);
if (id == 0)
{
primeArray[0] = true;
primeArray[1] = true;
}
if(id < 2)
{
return;
}
if(id > sqrtBound)
{
return;
}
for(int k = id * id; k <= bound; k+=id)
{
primeArray[k] = true;
}
}
///this parallel function should be launched in the following manner
///for( int i = 2; i < (bound / 2); i++)
///{
/// if(!primeArray[i])
/// {
/// eratosParallelMult<<<(bound /2)/1024, 1024>>>(i, bound, primeArray); //or some other way to calculate size dynamically
/// }
///}
__global__
void eratosParallelMult(int i, int bound, bool * primeArray)
{
uint idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx == 0)
{
primeArray[0] = true;
primeArray[1] = true;
return;
}
if (idx == 1)
return;
uint bin = i * idx;
if(bin > bound)
{
return;
}
primeArray[bin] = true;
}
///this should work because we dont care about collisions and all eratos does is find multiples, this will do some redundant calculationg but hopefully so fast it doesnt matter
__global__
void eratosPerElement2D(int bound, bool * primeArray)
{
int sqrtBound = (int)sqrt((double)bound);
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx == 0)
{
primeArray[0] = true;
primeArray[1] = true;
}
if(idx < 2)
{
return;
}
int idy = blockDim.y * blockIdx.y + threadIdx.y;
if(idy < 2)
{
return;
}
int bin = idx * idy;
if(bin > bound)
{
return;
}
primeArray[bin] = true;
}
|
2,476 | #include "includes.h"
__global__ void totalSequentialSharedMem(float *input, float *output, int len) {
//@@ Compute reduction for a segment of the input vector
int tid = threadIdx.x, i = blockIdx.x * blockDim.x;
__shared__ float sdata[BLOCK_SIZE];
sdata[tid] = i + tid < len ? input[i+tid] : 0.0;
if(tid == 0) {
for(unsigned int j = 1; j <blockDim.x; j++)
{
sdata[0] += sdata[j];
}
output[blockIdx.x] = sdata[0];
}
} |
2,477 | #include "GPURandom.cuh"
#include "GPURandomState.generated.cu"
typedef unsigned int uint;
// Random number generators from: http://http.developer.nvidia.com/GPUGems3/gpugems3_ch37.html
// S1, S2, S3, and M are all constants, and z is part of the
// private per-thread generator state.
__device__
uint TausStep(uint* z, int S1, int S2, int S3, uint M) {
uint b = (((*z << S1) ^ *z) >> S2);
return *z = (((*z & M) << S3) ^ b);
}
// A and C are constants
__device__
uint LCGStep(uint* z, uint A, uint C) {
return *z = (A * (*z) + C);
}
__device__
uint HybridTaus(RandomState* state) {
// Combined period is lcm(p1, p2, p3, p4) ~ 2^121
return ( // Periods
TausStep(&state->Taus1, 13, 19, 12, 4294967294UL) ^ // p1 = 2^31 - 1
TausStep(&state->Taus2, 2, 25, 4, 4294967288UL) ^ // p2 = 2^30 - 1
TausStep(&state->Taus3, 3, 11, 17, 4294967280UL) ^ // p3 = 2^28 - 1
LCGStep(&state->Lcg, 1664525, 1013904223UL) // p4 = 2^32
);
}
__device__
float randomFloat(RandomState* state, float maxValue) {
const int Precision = 1e7;
return (HybridTaus(state) % Precision) / (float)Precision * maxValue;
}
__device__
int randomInt(RandomState* state, int maxValue) {
return HybridTaus(state) % maxValue;
}
__device__
int randomSharedInt(RandomState* state, int maxValue, int *sharedRandom) {
if(threadIdx.x == 0) {
sharedRandom[0] = randomInt(state, maxValue);
}
__syncthreads();
return sharedRandom[0];
}
__device__
void _swapOrInc(int *a, int *b) {
// ensure distinct & a < b
if (*b < *a) {
int temp = *a;
*a = *b;
*b = temp;
} else {
*b += 1;
}
}
__device__
void Pick2Distinct(RandomState *randomState, int n, int *a, int *b) {
// choose 2 values without replacement
*a = randomInt(randomState, n);
*b = randomInt(randomState, n - 1);
if (*b >= *a) {
*b += 1;
}
}
__device__
void Pick2DistinctOrdered(RandomState *randomState, int offset, int n, int *a, int *b) {
// choose 2 values without replacement and sort a < b
*a = offset + randomInt(randomState, n);
*b = offset + randomInt(randomState, n - 1);
_swapOrInc(a, b);
}
__device__
void Pick3DistinctOrdered(RandomState *randomState, int offset, int n, int *a, int *b, int *c) {
// choose 3 values without replacement and sort a < b < c
*a = offset + randomInt(randomState, n);
*b = offset + randomInt(randomState, n - 1);
_swapOrInc(a, b);
*c = offset + randomInt(randomState, n - 2);
_swapOrInc(a, c);
_swapOrInc(b, c);
}
|
2,478 | #include "CrossSectionUtilities.hh"
#include <cmath>
namespace MonteRay {
void
thinGrid(const totalXSFunct_t& xsFunc, linearGrid_t& linearGrid, double max_error) {
// thin grid
bool done;
do {
done = true;
unsigned i = 0;
for( auto previous_itr = linearGrid.begin(); previous_itr != linearGrid.end(); ++previous_itr) {
auto itr = previous_itr; ++itr;
if( itr == linearGrid.end() ) break;
auto next_itr = itr; ++next_itr;
if( next_itr == linearGrid.end() ) break;
// check log mid-point
double energy1 = previous_itr->first;
double energy2 = next_itr->first;
double energy = itr->first;
// calculated interpolatedXS
double lower = previous_itr->second;
double upper = next_itr->second;
double deltaE = energy2 - energy1;
double interpolatedXS = lower + (upper-lower) * (energy - energy1)/deltaE;
// check difference with real xs
double totalXS = xsFunc( energy );
double percentDiff = std::abs(totalXS - interpolatedXS ) * 100.0 / totalXS;
// printf( "Debug: i=%d E=%f, interp=%f, real=%f diff=%f \n", i, energy, interpolatedXS, totalXS, percentDiff);
if( percentDiff < max_error * 0.5 ) {
linearGrid.erase(itr);
done = false;
break;
}
++i;
}
} while( !done );
}
void
addPointsToGrid(const totalXSFunct_t& xsFunc, linearGrid_t& linearGrid, double max_error ) {
bool done;
// linearize
do {
done = true;
for( auto previous_itr = linearGrid.begin(); previous_itr != linearGrid.end(); ++previous_itr) {
auto itr = previous_itr; ++itr;
if( itr == linearGrid.end() ) break;
// check log mid-point
double energy1 = previous_itr->first;
double energy2 = itr->first;
double deltaE = energy2 - energy1;
if( deltaE > 1e-6 ) {
// don't add points finer than 1e-6
double energy = std::exp(( std::log(energy2) - std::log(energy1) )*0.5 + std::log(energy1));
// calculated interpolatedXS
double lower = previous_itr->second;
double upper = itr->second;
double interpolatedXS = lower + (upper-lower) * (energy - energy1)/deltaE;
// check difference with real xs
double totalXS = xsFunc( energy );
double percentDiff = std::abs(totalXS - interpolatedXS ) * 100.0 / totalXS;
if( percentDiff > max_error ) {
linearGrid.insert(itr, std::make_pair(energy, totalXS));
done = false;
}
}
}
} while ( !done );
}
bool
checkGrid(const totalXSFunct_t& xsFunc, linearGrid_t& linearGrid, double max_error, unsigned nIntermediateBins){
const bool debug = false;
if( debug ) printf( "Debug: createLinearGrid - checking linearization\n");
// check linearization
bool done = true;
do {
done = true;
auto start_itr = linearGrid.begin();
for( auto previous_itr = start_itr; previous_itr != linearGrid.end(); ++previous_itr) {
auto itr = previous_itr; ++itr;
if( itr == linearGrid.end() ) break;
// check log mid-point
double energy1 = previous_itr->first;
double energy2 = itr->first;
double deltaE = energy2 - energy1;
double lower = previous_itr->second;
double upper = itr->second;
// no need to go below 1-eV for photon data
if( std::abs( deltaE ) > 1e-6 ) {
nIntermediateBins = std::min( unsigned( deltaE / 1e-6 ), nIntermediateBins ) ;
} else {
nIntermediateBins = 0;
}
for( auto j=0; j<nIntermediateBins; ++j) {
double energy = energy1 + (deltaE*j)/nIntermediateBins;
// calculated interpolatedXS
double interpolatedXS = lower + (upper-lower) * (energy - energy1)/deltaE;
double totalXS = xsFunc( energy );
double percentDiff = std::abs(totalXS - interpolatedXS ) * 100.0 / totalXS;
if( percentDiff > max_error ) {
if( debug ) {
printf( "Debug: createLinearGrid - linearization failed for E=%.10f, real XS=%f, interpolated XS=%f, percent diff=%f\n",
energy, totalXS, interpolatedXS, percentDiff );
}
start_itr = linearGrid.insert(itr, std::make_pair(energy, totalXS));
done = false;
break;
}
if( debug ) {
printf( "Debug: createLinearGrid - linearization passed for E=%.10f, real XS=%f, interpolated XS=%f, percent diff=%f\n",
energy, totalXS, interpolatedXS, percentDiff );
}
}
}
} while ( !done );
return true;
}
}
|
2,479 | #define NUM_THREADS 32
#define size_t int
extern "C"
__global__ void
euclidean_kernel(const float * vg_a, size_t pitch_a, size_t n_a,
const float * vg_b, size_t pitch_b, size_t n_b,
size_t k,
float * d, size_t pitch_d)
{
size_t x = blockIdx.x;
size_t y = blockIdx.y;
// If an element is to be computed
if(x < n_a && y < n_b) {
__shared__ float temp[NUM_THREADS];
temp[threadIdx.x] = 0.0;
for(size_t offset = threadIdx.x; offset < k; offset += blockDim.x) {
float t = vg_a[x * pitch_a + offset] - vg_b[y * pitch_b + offset];
temp[threadIdx.x] += (t * t);
}
// Sync with other threads
__syncthreads();
// Reduce
for(size_t stride = blockDim.x >> 1; stride > 0; stride >>= 1) {
if(threadIdx.x < stride) {
temp[threadIdx.x] += temp[threadIdx.x + stride];
}
__syncthreads();
}
// Write to global memory
if(threadIdx.x == 0) {
d[y * pitch_d + x] = sqrt(temp[0]);
}
}
}
extern "C"
__global__ void
euclidean_kernel_same(const float * vg_a, size_t pitch_a, size_t n_a,
const float * vg_b, size_t pitch_b, size_t n_b,
size_t k, float * d, size_t pitch_d, float p)
{
size_t x = blockIdx.x, y = blockIdx.y;
if((x == y) && (x < n_a) && (threadIdx.x == 0))
d[y * pitch_d + x] = 0.0;
// If all element is to be computed
if(y < n_a && x < y) {
__shared__ float temp[NUM_THREADS];
temp[threadIdx.x] = 0.0;
for(size_t offset = threadIdx.x; offset < k; offset += NUM_THREADS) {
float t = vg_a[x * pitch_a + offset] - vg_a[y * pitch_a + offset];
temp[threadIdx.x] += (t * t);
}
// Sync with other threads
__syncthreads();
// http://http.developer.nvidia.com/GPUGems3/gpugems3_ch39.html
// Reduce
// This is a cumsum.
// Vital that __syncthreads is called so all threads
// update temp[threadIdx.x]
for(size_t stride = blockDim.x >> 1; stride > 0; stride >>= 1) {
if(threadIdx.x < stride)
temp[threadIdx.x] += temp[threadIdx.x + stride];
__syncthreads();
}
// Write to global memory
if(threadIdx.x == 0) {
float s = sqrt(temp[0]);
d[y * pitch_d + x] = s;
d[x * pitch_d + y] = s;
}
}
}
extern "C"
__global__ void minkowski_kernel(const float * vg_a, size_t pitch_a, size_t n_a,
const float * vg_b, size_t pitch_b, size_t n_b,
size_t k,
float * d, size_t pitch_d,
float p)
{
size_t
x = blockIdx.x, y = blockIdx.y;
// If all element is to be computed
if(x < n_a && y < n_b) {
__shared__ float temp[NUM_THREADS];
temp[threadIdx.x] = 0.0;
for(size_t offset = threadIdx.x; offset < k; offset += blockDim.x) {
float t = fabsf(vg_a[x * pitch_a + offset] - vg_b[y * pitch_b + offset]);
temp[threadIdx.x] += __powf(t, p);
}
// Sync with other threads
__syncthreads();
// Reduce
for(size_t stride = blockDim.x >> 1; stride > 0; stride >>= 1) {
if(threadIdx.x < stride)
temp[threadIdx.x] += temp[threadIdx.x + stride];
__syncthreads();
}
// Write to global memory
if(threadIdx.x == 0) {
float power = 1.f/p;
d[y * pitch_d + x] = __powf(temp[0], power);
}
}
}
extern "C"
__global__ void canberra_kernel(const float * vg_a, size_t pitch_a, size_t n_a,
const float * vg_b, size_t pitch_b, size_t n_b,
size_t k,
float * d, size_t pitch_d,
float p)
{
size_t x = blockIdx.x;
size_t y = blockIdx.y;
// If all element is to be computed
if(x < n_a && y < n_b) {
__shared__ float temp[NUM_THREADS];
temp[threadIdx.x] = 0.0;
for(size_t offset = threadIdx.x; offset < k; offset += blockDim.x) {
float num = abs(vg_a[x * pitch_a + offset] - vg_b[y * pitch_b + offset]);
float den = abs(vg_a[x * pitch_a + offset] + vg_b[y * pitch_b + offset]);
if(den != 0.0) {
temp[threadIdx.x] += num / den;
}
}
// Sync with other threads
__syncthreads();
// Reduce
for(size_t stride = blockDim.x >> 1; stride > 0; stride >>= 1) {
if(threadIdx.x < stride) {
temp[threadIdx.x] += temp[threadIdx.x + stride];
}
__syncthreads();
}
// Write to global memory
if(threadIdx.x == 0) {
d[y * pitch_d + x] = temp[0];
}
}
}
|
2,480 | #include "includes.h"
__global__ void add(int* a, int* b, int* c) {
// calculate global id
int id = blockIdx.x * blockDim.x + threadIdx.x;
// perform calculation
c[id] = a[id] + b[id];
} |
2,481 | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void func(void)
{
printf("hello world from GPU\n");
}
int main(void)
{
printf("hello world from CPU\n");
func <<<1, 10>>>();
cudaDeviceReset();
return 0;
}
|
2,482 | // (c) 2017 John Freeman and Jose Rivas
// Sorts and array using the bitonic sorting algorithm.
// For more information, go here: http://www.cse.buffalo.edu/faculty/miller/Courses/CSE633/Mullapudi-Spring-2014-CSE633.pdf
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
__global__ void swap(int *a, const int arraySize, const int step, const int stage)
{
int i = threadIdx.x;
int listSize = 2 << step;
// Thanks to Matthias Endler for these lines (19 and 20) of code, which was what we needed to get our algorithm working correctly.
// Code can be found here: https://gist.github.com/mre/1392067
int ij = i^stage;
if (ij > i) {
if ((i&listSize) == 0) {
if (a[i] > a[ij]) {
int temp = a[ij];
a[ij] = a[i];
a[i] = temp;
}
}
else if ((i&listSize) != 0) {
if (a[i] < a[ij]) {
int temp = a[ij];
a[ij] = a[i];
a[i] = temp;
}
}
}
}
__global__ void minCompare(int *a, bool *check){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
if(idx == idy) { return; }
int xval = a[idx];
int yval = a[idy];
if(xval > yval) {
check[idx] = false;
}
}
__global__ void cudaMin(int *a, bool *check, int* min) {
//int idx = threadIdx.x;
int idx = blockIdx.x;
if(check[idx]) {
min[0] = a[idx];
}
}
void array_fill(int *arr, int length)
{
srand(time(NULL));
int i;
for (i = 0; i < length; ++i) {
arr[i] = (int) (float)rand();
}
}
int main()
{
const int arraySize = 1024;
int *a = (int*) malloc( arraySize * sizeof(int));
array_fill(a, arraySize);
bool bools[arraySize];
for (int k = 0; k < arraySize; ++k) {
bools[k] = true;
}
bool *check;
int *ad;
const int asize = arraySize * sizeof(int);
const int bsize = arraySize * sizeof(bool);
cudaMalloc((void**) &check, bsize);
cudaMemcpy(check, bools, bsize, cudaMemcpyHostToDevice);
cudaMalloc((void**)&ad, asize);
cudaMemcpy(ad, a, asize, cudaMemcpyHostToDevice);
dim3 dimBlock(arraySize, arraySize);
dim3 dimGrid(1, 1);
dim3 minBlock(arraySize, 1);
int *min;
const int intSize = sizeof(int);
cudaMalloc((void**) &min, intSize);
clock_t start = clock();
minCompare <<< dim3(arraySize, arraySize), 1 >>> (ad, check);
cudaMin <<< dim3(arraySize, 1), 1 >>> (ad, check, min);
clock_t stop = clock();
double elapsed = ((double) (stop - start)) / CLOCKS_PER_SEC;
printf("Elapsed time: %.3fs\n", elapsed);
bool bools2[arraySize];
int minhost[1];
cudaMemcpy(minhost, min, intSize, cudaMemcpyDeviceToHost);
cudaMemcpy(bools2, check, bsize, cudaMemcpyDeviceToHost);
printf("min is %d\n", minhost[0]);
for (int k = 0; k < arraySize; ++k) {
printf(bools2[k] ? "true " : "false ");
}
start = clock();
//iterate through steps
for (int i = 0; i < 10; ++i) {
//iterate through stages
for (int j = i; j >= 0; --j) {
dim3 dimBlock2(arraySize, 1);
int t = 1 << j;
swap <<< dimGrid, dimBlock2 >>> (ad, arraySize, i, t);
}
}
stop = clock();
elapsed = ((double) (stop - start)) / CLOCKS_PER_SEC;
printf("Elapsed time: %.3fs\n", elapsed);
int b[arraySize];
cudaMemcpy(b, ad, asize, cudaMemcpyDeviceToHost);
for (int k = 0; k < arraySize; ++k) {
printf("%d ", b[k]);
}
printf("\n\n%d\n", a[0]);
cudaFree(ad);
while (1) {}
}
|
2,483 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#define N 1000
void addMatrices(float *h_A, float *h_B, float *h_C);
void fillMatrix(float *h_A);
void printMatrix(float *A);
int main(int argc, char const *argv[]) {
float *h_A = (float *) malloc(N * N * sizeof(float));
float *h_B = (float *) malloc(N * N * sizeof(float));
float *h_C = (float *) malloc(N * N * sizeof(float));
fillMatrix(h_A);
fillMatrix(h_B);
addMatrices(h_A, h_B, h_C);
printMatrix(h_C);
free(h_A);
free(h_B);
free(h_C);
return 0;
}
__global__
void matAddKernel(float *d_A, float *d_B, float *d_C, int size)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int element;
if (i < size)
{
for (int j = 0; j < size; j++)
{
element = i * size + j;
d_C[element] = d_A[element] + d_B[element];
//printf("Element %d from thread %d\n", element, i);
}
}
}
void fillMatrix(float *h_A)
{
int size = N * N;
for (int i = 0; i < size; i++)
{
h_A[i] = i + 1;
}
}
void addMatrices(float *h_A, float *h_B, float *h_C)
{
int size = N * N;
int d_size = size * sizeof(float);
float *d_A, *d_B, *d_C;
// Allocate device memory for A, B, and C
// copy h_A and h_B to device memory
cudaError_t err = cudaMalloc((void**) &d_A, d_size);
if (err != cudaSuccess)
{
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cudaMemcpy(d_A, h_A, d_size, cudaMemcpyHostToDevice);
err = cudaMalloc((void**) &d_B, d_size);
if (err != cudaSuccess)
{
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cudaMemcpy(d_B, h_B, d_size, cudaMemcpyHostToDevice);
err = cudaMalloc((void**) &d_C, d_size);
if (err != cudaSuccess)
{
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// Kernel launch code - to have the device to perform the actual matrix addition
matAddKernel<<<ceil((N)/256.0), 256>>>(d_A, d_B, d_C, N);
// copy C from the device memory
cudaMemcpy(h_C, d_C, d_size, cudaMemcpyDeviceToHost);
// Free device vector (which represents our matrices)
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
void printMatrix(float *A)
{
int size = N * N;
for (int i = 0; i < size; i++)
{
if (i % N == 0 && i != 0)
printf("\n");
printf("%d\t", (int)A[i]);
}
}
|
2,484 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define FALSE 0
#define TRUE 1
#define THREADS_PER_BLOCK 32
struct Parms {
float cx;
float cy;
};
void printGridToFile(float *grid, const int totalRows, const int totalColumns, const char *fileName);
__global__
void iniData(const int totalRows, const int totalColumns, float *u1, float *u2);
__global__
void update(const int totalRows, const int totalColumns, const int currentConvergenceCheck, int *convergence, struct Parms *parms, float *oldGrid, float *nextGrid);
int main(int argc, char **argv) {
int steps;
int convFreqSteps;
int totalRows, totalColumns;
int convergenceCheck;
int currentStep;
int currentConvergenceCheck;
int convergenceStep = -1;
//////////////////////////////////////////////////////////////////////////////////
////////////// Argument Check //////////////
//////////////////////////////////////////////////////////////////////////////////
if (argc == 5) {
steps = atoi(argv[1]);
totalRows = atoi(argv[2]);
totalColumns = atoi(argv[3]);
convergenceCheck = atoi(argv[4]);
} else {
printf("Usage: heatconv <ROWS> <COLUMNS> <CONVERGENCE_FLAG>\n");
exit(EXIT_FAILURE);
}
convFreqSteps = (int) sqrt(steps);
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////// Unified Memory – accessible from CPU or GPU //////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
int totalGridSize = totalRows * totalColumns;
unsigned int totalGridBytesSize = sizeof(float) * totalGridSize;
dim3 dimBlock(THREADS_PER_BLOCK, THREADS_PER_BLOCK);
dim3 dimGrid((totalRows + dimBlock.x - 1) / dimBlock.x, (totalColumns + dimBlock.y - 1) / dimBlock.y);
float *gridOdd, *gridEven;
cudaMallocManaged(&gridOdd, totalGridBytesSize);
cudaMallocManaged(&gridEven, totalGridBytesSize);
iniData<<<dimGrid, dimBlock>>>(totalRows, totalColumns, gridOdd, gridEven);
cudaDeviceSynchronize();
struct Parms *parms;
cudaMallocManaged(&parms, sizeof(struct Parms));
parms->cx = 0.1f;
parms->cy = 0.1f;
int *convergenceResult;
cudaMallocManaged(&convergenceResult, sizeof(int));
*convergenceResult = 1;
/////////////////////////////////////////////////////////////////////////////
////////////// Main loop //////////////
/////////////////////////////////////////////////////////////////////////////
time_t begin = time(NULL);
for (currentStep = 0; currentStep < steps; ++currentStep) {
currentConvergenceCheck = convergenceCheck && currentStep % convFreqSteps == 0;
if (currentStep % 2)
update<<<dimGrid, dimBlock>>>(totalRows, totalColumns, currentConvergenceCheck, convergenceResult, parms, gridEven, gridOdd);
else
update<<<dimGrid, dimBlock>>>(totalRows, totalColumns, currentConvergenceCheck, convergenceResult, parms, gridOdd, gridEven);
cudaDeviceSynchronize();
if (currentConvergenceCheck) {
if (*convergenceResult) {
convergenceStep = currentStep;
break;
} else *convergenceResult = 1;
}
}
time_t end = time(NULL);
//////////////////////////////////////////////////////////////////////////////////
////////////// Gather Results //////////////
//////////////////////////////////////////////////////////////////////////////////
printf("Results:\n");
printf("- Runtime: %lld sec\n", end - begin);
printf("- Convergence:\n");
printf("-- checking: %s\n", convergenceCheck ? "YES" : "NO");
printf("-- achieved: %s\n", *convergenceResult ? "YES" : "NO");
printf("-- at step: %d\n", convergenceStep);
/////////////////////////////////////////////////////////////////////////////////
////////////// Write to FIle //////////////
/////////////////////////////////////////////////////////////////////////////////
printGridToFile(currentStep % 2 ? gridOdd : gridEven, totalRows, totalColumns, "final.dat");
//////////////////////////////////////////////////////////////////////////////////
////////////// Free Resources //////////////
//////////////////////////////////////////////////////////////////////////////////
cudaFree(gridEven);
cudaFree(gridOdd);
cudaFree(parms);
cudaFree(convergenceResult);
////////////////////////////////////////////////////////////////////////////
////////////// Finalize //////////////
////////////////////////////////////////////////////////////////////////////
return EXIT_SUCCESS;
}
void printTable(float **grid, int totalRows, int totalColumns) {
printf("\n");
for (int currentRow = 0; currentRow < totalRows; ++currentRow) {
for (int currentColumn = 0; currentColumn < totalColumns; ++currentColumn) {
printf("%.1f\t", grid[currentRow][currentColumn]);
}
printf("\n");
}
printf("\n");
}
void printGridToFile(float *grid, const int totalRows, const int totalColumns, const char *fileName) {
int currentRow, currentColumn;
FILE *fp;
printf("Writing to file %s...\n", fileName);
fp = fopen(fileName, "w");
for (currentRow = 1; currentRow < totalRows - 1; ++currentRow) {
for (currentColumn = 1; currentColumn < totalColumns - 1; ++currentColumn)
fprintf(fp, "\t%6.1f\t", grid[currentRow * totalColumns + currentColumn]);
fprintf(fp, "\n");
}
fclose(fp);
}
__global__
void iniData(const int totalRows, const int totalColumns, float *u1, float *u2) {
int currentRow = blockIdx.x * blockDim.x + threadIdx.x;
int currentColumn = blockIdx.y * blockDim.y + threadIdx.y;
if ((currentRow >= 0 && currentRow < totalRows) && (currentColumn >= 0 && currentColumn < totalColumns)) {
*(u1 + currentRow * totalColumns + currentColumn) = (float) (currentRow * (totalRows - currentRow - 1) * currentColumn * (totalColumns - currentColumn - 1));
*(u2 + currentRow * totalColumns + currentColumn) = *(u1 + currentRow * totalColumns + currentColumn);
}
}
__global__
void update(const int totalRows, const int totalColumns, const int currentConvergenceCheck, int *convergence, struct Parms *parms, float *oldGrid, float *nextGrid) {
int currentRow = blockIdx.x * blockDim.x + threadIdx.x;
int currentColumn = blockIdx.y * blockDim.y + threadIdx.y;
if (currentRow > 0 && currentRow < totalRows - 1 && currentColumn > 0 && currentColumn < totalColumns - 1) {
*(nextGrid + currentRow * totalColumns + currentColumn) = *(oldGrid + currentRow * totalColumns + currentColumn) +
parms->cx * (*(oldGrid + (currentRow + 1) * totalColumns + currentColumn) +
*(oldGrid + (currentRow - 1) * totalColumns + currentColumn) -
2.0 * *(oldGrid + currentRow * totalColumns + currentColumn)) +
parms->cy * (*(oldGrid + currentRow * totalColumns + currentColumn + 1) +
*(oldGrid + currentRow * totalColumns + currentColumn - 1) -
2.0 * *(oldGrid + currentRow * totalColumns + currentColumn));
if (currentConvergenceCheck && fabs((double) *(nextGrid + currentRow * totalColumns + currentColumn) - *(oldGrid + currentRow * totalColumns + currentColumn)) > 1e-2)
*convergence = 0;
}
} |
2,485 | #define NX 8
#define BATCH_SIZE 1
#include "cufft.h"
#include <math.h>
#include <stdio.h>
//#include "soundfile-2.2/libsoundfile.h"
typedef float2 Complex;
void testcuFFT(){
cufftReal *h_signal = (cufftReal *)malloc(sizeof(cufftReal) * BATCH_SIZE);
cufftComplex *h_data = (cufftComplex *)malloc(sizeof(cufftComplex) * (NX/2+1)*BATCH_SIZE);
float ryanSignal [NX] = {0.0,
1.15443278102,
1.50377819535,
0.957393116649,
0.19925316202,
0.0408603874003,
0.663651234058,
1.44858683588};
// Initalize the memory for the signal
for (unsigned int i = 0; i < NX; ++i)
{
//h_signal[i].x = rand() / (float)RAND_MAX+1;
h_signal[i] = ryanSignal[i];
//h_signal[i].y = 0;
printf("h_signal[%u]: %f\n", i, h_signal[i]);
}
cufftHandle plan;
cufftComplex *d_data;
cufftReal *d_signal;
cudaMalloc((void**)&d_data, sizeof(cufftComplex)*(NX/2+1)*BATCH_SIZE);
cudaMalloc((void**)&d_signal, sizeof(cufftReal)*NX);
//cudaMalloc((void**)&d_signal, sizeof(cufftReal)*SIGNAL_SIZE);
cudaMemcpy(d_signal, h_signal, sizeof(cufftReal)*NX, cudaMemcpyHostToDevice);
free(h_signal);
if(cudaGetLastError() != cudaSuccess){
fprintf(stderr, "Cuda error: Failed to allocate\n");
return;
}
if(cufftPlan1d(&plan, NX, CUFFT_R2C, BATCH_SIZE) != CUFFT_SUCCESS){
fprintf(stderr, "CUFFT error: Plan creation failed");
return;
}
// Use the CUFFT plan to transform the signal in place.
if(cufftExecR2C(plan, (cufftReal*)d_signal, d_data) != CUFFT_SUCCESS){
fprintf(stderr, "CUFFT error: ExecC2C Forward failed");
return;
}
if(cudaThreadSynchronize() != cudaSuccess){
fprintf(stderr, "Cuda error: Failed to synchronize\n");
return;
}
cudaMemcpy(h_data,d_data,sizeof(cufftComplex)*BATCH_SIZE * (NX/2+1),cudaMemcpyDeviceToHost);
for(unsigned int k=0; k<10; k++){
//printf("h_data[%i]: %f\n",k,h_data[k].x);
printf("h_data[%u]: %f\n", k, h_data[k].x);
}
cufftDestroy(plan);
cudaFree(d_data);
}
int main(){
testcuFFT();
return 0;
}
|
2,486 | //
// Created by gautam on 18/04/20.
//
#include "tokenizer.cuh"
const char tokenizer::delims1[] = {' ', '\t'};
const char tokenizer::delims2[] = {',', ';', '(', ')'};
const int tokenizer::DELIM1_SIZE = 2;
const int tokenizer::DELIM2_SIZE = 4;
tokenizer::tokenizer(std::string &s) {
this->query = s;
utils::toLower(this->query);
query = query.substr(0, query.find(';'));
ltrim(query);
}
bool tokenizer::operator>>(std::string &s) {
return nextToken(s);
}
bool tokenizer::nextToken(std::string &s) {
if (query.empty())
return false;
s = "";
int i;
char ch;
for (i = 0; i < query.size(); ++i) {
ch = query[i];
if (!find(delims1, DELIM1_SIZE, ch)) { // Not space char
if (!find(delims2, DELIM2_SIZE, ch)) { // Not spl char
s += ch;
} else if (s.empty()) { // Spl char and s is empty
s = ch;
if (i < query.size() - 1) {
query = query.substr(i + 1, query.size());
ltrim(query);
} else {
query = "";
}
break;
} else { // s is not empty
query = query.substr(i, query.size());
ltrim(query);
break;
}
} else { // Space char
query = query.substr(i, query.size());
ltrim(query);
break;
}
}
return true;
}
void tokenizer::ltrim(std::string &s) {
int i = 0;
while (i < s.size()){
if(!find(delims1, DELIM1_SIZE, s[i])) break;
++i;
}
if (i == s.size()) s = "";
else s = s.substr(i, s.size());
}
bool tokenizer::find(const char *arr, int size, char ch) {
int i;
for (i = 0; i < size; ++i) {
if(arr[i] == ch) break;
}
return i != size;
}
|
2,487 | #include <iostream>
#define DEFAULT_BLOCK_COUNT 128
#define DEFAULT_TPB_COUNT 128
using namespace std;
typedef struct {
int x;
int y;
} GridSize;
typedef struct {
int x;
int y;
int z;
} BlockSize;
GridSize gs = {1, 1};
BlockSize bs = {1, 1, 1};
int blockCnt = DEFAULT_BLOCK_COUNT;
int tpbCnt = DEFAULT_TPB_COUNT;
int totalThreads;
int * id;
//Declaration of pointers to CPU memory (host)
int * blockx_h, * blocky_h;
int * idx_h, * idy_h, * idz_h;
//Declaration of pointers to GPU memory (device)
int * blockx_d, * blocky_d;
int * idx_d, * idy_d, * idz_d;
__global__ void MyFirstKernel(
int * blkx, int * blky,
int * idx, int * idy, int * idz) {
int blockId = (blockIdx.y * gridDim.x) + blockIdx.x;
int f_threadId = (blockId * blockDim.z * blockDim.y * blockDim.x);
int l_threadId = (threadIdx.z * blockDim.y * blockDim.x) +
(threadIdx.y * blockDim.x) +
threadIdx.x;
int threadId = f_threadId + l_threadId;
blkx[threadId] = blockIdx.x;
blky[threadId] = blockIdx.y;
idx[threadId] = threadIdx.x;
idy[threadId] = threadIdx.y;
idz[threadId] = threadIdx.z;
}
int ParseArguments(int argc, char ** argv) {
if (argc == 1) {
cout << "Usage: " << argv[0] << " [gs.y] [gs.x] [bs.z] [bs.y] [bs.x]" << endl;
return -1;
}
if (argc != 6) {
cout << "Error: Not enough arguments specified." << endl;
return -1;
}
for (int i=1;i<6;i++) {
if (atoi(argv[i]) <= 0) {
cout << "Error: Invalid arguments" << endl;
return -1;
}
}
gs.y = atoi(argv[1]);
gs.x = atoi(argv[2]);
bs.z = atoi(argv[3]);
bs.y = atoi(argv[4]);
bs.x = atoi(argv[5]);
if (bs.x * bs.y * bs.z > 1024) {
cout << "Error: Too many threads per block (<= 1024)" << endl;
return -1;
}
return 0;
}
void CheckCudaError(cudaError_t ce) {
if (ce == cudaSuccess)
return;
cout << "Error: " << cudaGetErrorString(ce) << endl;
exit(-1);
}
int AllocateHostMemory(int totalThreads) {
try {
blockx_h = new int[totalThreads];
blocky_h = new int[totalThreads];
idx_h = new int[totalThreads];
idy_h = new int[totalThreads];
idz_h = new int[totalThreads];
}
catch(bad_alloc e) {
return -1;
}
return 0;
}
int main(int argc, char ** argv) {
if (ParseArguments(argc, argv))
exit(-1);
totalThreads = gs.x * gs.y * bs.x * bs.y * bs.z;
int totalMem = totalThreads * sizeof(int);
if (AllocateHostMemory(totalThreads)) {
cout << "Error: Memory allocation on host failed." << endl;
exit(-1);
}
//Allocate memory on GPU to store block identifiers
CheckCudaError( cudaMalloc(&blockx_d, totalMem));
CheckCudaError( cudaMalloc(&blocky_d, totalMem));
//Allocate memory on GPU to store thread identifiers
CheckCudaError( cudaMalloc(&idx_d, totalMem));
CheckCudaError( cudaMalloc(&idy_d, totalMem));
CheckCudaError( cudaMalloc(&idz_d, totalMem));
//Clear allocated memory block on GPU for storing block identifiers to 0
CheckCudaError( cudaMemset(blockx_d, 0, totalMem));
CheckCudaError( cudaMemset(blocky_d, 0, totalMem));
//Clear allocated memory block on GPU for storing thread identifiers to 0
CheckCudaError( cudaMemset(idx_d, 0, totalMem));
CheckCudaError( cudaMemset(idy_d, 0, totalMem));
CheckCudaError( cudaMemset(idz_d, 0, totalMem));
//Invoke the kernel
dim3 gridDim(gs.x, gs.y);
dim3 blockDim(bs.x, bs.y, bs.z);
MyFirstKernel <<<gridDim, blockDim>>>(
blockx_d, blocky_d, idx_d, idy_d, idz_d);
cudaDeviceSynchronize();
//Copying data generated by the kernel from GPU back to CPU
CheckCudaError(
cudaMemcpy(blockx_h, blockx_d, totalMem, cudaMemcpyDeviceToHost));
CheckCudaError(
cudaMemcpy(blocky_h, blocky_d, totalMem, cudaMemcpyDeviceToHost));
CheckCudaError(
cudaMemcpy(idx_h, idx_d, totalMem, cudaMemcpyDeviceToHost));
CheckCudaError(
cudaMemcpy(idy_h, idy_d, totalMem, cudaMemcpyDeviceToHost));
CheckCudaError(
cudaMemcpy(idz_h, idz_d, totalMem, cudaMemcpyDeviceToHost));
for (int i=0;i<totalThreads;i++)
cout << "[" << i << "]\t" <<
blocky_h[i] << "\t" <<
blockx_h[i] << "\t" <<
idz_h[i] << "\t" <<
idy_h[i] << "\t" <<
idx_h[i] << endl;
return 0;
}
|
2,488 | #include "includes.h"
__global__ void cuda_rotate_internal_kernel(float* dst, const float* src, float theta, const int nx, const int ny)
{
// this is flawed and should not be production
int src_size = nx * ny;
float xoff = (0.5f * nx) - 0.5f;
float yoff = (0.5f * ny) - 0.5f;
int j0 = blockIdx.x * blockDim.x + threadIdx.x;
int jstride = blockDim.x * gridDim.x;
for(int j = j0; j < ny; j += jstride)
{
for(int i = 0; i < nx; ++i)
{
// indices in 2D
float rx = float(i) - xoff;
float ry = float(j) - yoff;
// transformation
float tx = rx * cosf(theta) + -ry * sinf(theta);
float ty = rx * sinf(theta) + ry * cosf(theta);
// indices in 2D
float x = (tx + xoff);
float y = (ty + yoff);
// index in 1D array
int rz = j * nx + i;
auto index = [&](int _x, int _y) { return _y * nx + _x; };
// within bounds
int x1 = floorf(tx + xoff);
int y1 = floorf(ty + yoff);
int x2 = x1 + 1;
int y2 = y1 + 1;
float fxy1 = 0.0f;
float fxy2 = 0.0f;
int ixy11 = index(x1, y1);
int ixy21 = index(x2, y1);
int ixy12 = index(x1, y2);
int ixy22 = index(x2, y2);
if(ixy11 >= 0 && ixy11 < src_size)
fxy1 += (x2 - x) * src[ixy11];
if(ixy21 >= 0 && ixy21 < src_size)
fxy1 += (x - x1) * src[ixy21];
if(ixy12 >= 0 && ixy12 < src_size)
fxy2 += (x2 - x) * src[ixy12];
if(ixy22 >= 0 && ixy22 < src_size)
fxy2 += (x - x1) * src[ixy22];
dst[rz] += (y2 - y) * fxy1 + (y - y1) * fxy2;
}
}
} |
2,489 | #include <stdlib.h>
#include <stdio.h>
#include <ctype.h>
#include <cuda.h>
#include <math.h>
#define ALPHABET_SIZE 26
#define CHUNK_SIZE 64
#define MAX_THREADS 64
#define ASCII_CONST 97
#define DEBUG 0
__global__ void compute_hist(char *dev_text, unsigned int *dev_hist, unsigned int chunk_size, unsigned int max) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
extern __shared__ int hist[];
unsigned int i = tid * chunk_size,
text_end = i + chunk_size,
offset = threadIdx.x * ALPHABET_SIZE,
block_start = blockIdx.x * ALPHABET_SIZE;
int c = 0;
if(tid > max)
return;
for(;i<text_end;i++) {
if((c = dev_text[i]) == '\0')
break;
c = (c|(1 << 5)) - ASCII_CONST;
if(c >= 0)
if(c < ALPHABET_SIZE)
hist[c+offset]++;
}
#if DEBUG
printf("tid: %d, block: %d, start: %d, end: %d, hist offset: %d\n", tid, blockIdx.x, i, text_end, offset);
for(i = offset; i < ALPHABET_SIZE + offset; i++)
if(hist[i] != 0)
printf("%d: %c: %d\n", tid, (i%ALPHABET_SIZE)+ ASCII_CONST, hist[i]);
#endif
__syncthreads();
for(i = 0; i < ALPHABET_SIZE; i++)
atomicAdd(&dev_hist[i+block_start], hist[i+offset]);
}
__global__ void sum_hist(unsigned int *dev_hist, unsigned int blocks) {
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
int i;
#if DEBUG
// keep number of blocks small to maintain formatting
if(tid == 0) {
int j;
printf("total blocks: %d\n\n", blocks);
for(i = 0; i < blocks; i++)
printf("\tblock %d:", i);
printf("\n");
for(i = 0; i < ALPHABET_SIZE; i++) {
printf("%c:\t", i + ASCII_CONST);
for(j = 0; j < blocks; j++)
printf("%d\t\t", dev_hist[i + (j * ALPHABET_SIZE)]);
printf("\n");
}
}
#endif
for(i = 1; i < blocks; i++)
//dev_hist[tid] += dev_hist[tid + i * ALPHABET_SIZE];
atomicAdd(&dev_hist[tid], dev_hist[tid +i * ALPHABET_SIZE]);
}
int main(int argc, char **argv) {
FILE *fp;
char *text,
*dev_text;
unsigned int *dev_hist;
int BLOCKS = 0,
THREADS = 0,
sz = 0,
i;
float time_1, time_2;
cudaEvent_t start,
stop;
if(argc != 2) {
printf("enter file name as first argument\n");
return 1;
}
fp = fopen(argv[1], "r");
fseek(fp, 0, SEEK_END);
printf("length of file: %d\n", sz = ftell(fp)+1);
fseek(fp, 0, SEEK_SET);
text = (char *)malloc(sz * sizeof(char));
fread(text, sz, 1, fp);
printf("chunk size: %d\n", CHUNK_SIZE);
printf("total threads: %d\n", THREADS = ceil(sz/CHUNK_SIZE));
int max = THREADS;
while(THREADS > 0) {
THREADS -= MAX_THREADS;
BLOCKS++;
}
int hist[ALPHABET_SIZE * BLOCKS];
cudaMalloc((void **) &dev_hist, BLOCKS * ALPHABET_SIZE * sizeof(int));
cudaMalloc((void **) &dev_text, sz * sizeof(char));
cudaMemcpy(dev_text, text, sz * sizeof(char), cudaMemcpyHostToDevice);
cudaMemset(dev_hist, 0, BLOCKS*ALPHABET_SIZE * sizeof(int));
printf("blocks: %d\n", BLOCKS);
printf("threads per block: %d\n", MAX_THREADS);
printf("leftover threads: %d\n", THREADS+MAX_THREADS);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
compute_hist<<<BLOCKS, MAX_THREADS, MAX_THREADS * ALPHABET_SIZE * sizeof(int)>>>(dev_text, dev_hist, CHUNK_SIZE, max);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time_1, start, stop);
cudaDeviceSynchronize();
cudaEventRecord(start, 0);
sum_hist<<<1,ALPHABET_SIZE>>>(dev_hist, BLOCKS);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time_2, start, stop);
cudaMemcpy(hist, dev_hist, ALPHABET_SIZE * sizeof(int), cudaMemcpyDeviceToHost);
printf("\nsum of characters:\n");
for(i = 0; i < ALPHABET_SIZE; i++)
printf("%c: %d\n", i + 97, hist[i]);
printf("time to make buckets: \t%3.3f ms\n", time_1);
printf("time to sum hist: \t%3.3f ms\n", time_2);
printf("total time to run: \t%3.3f ms\n", time_1 + time_2);
cudaFree(dev_hist);
cudaFree(dev_text);
}
|
2,490 | // Copyright (c) 2020 Saurabh Yadav
//
// This software is released under the MIT License.
// https://opensource.org/licenses/MIT
/* This example to analyse practically the performance benefits of
using tiled algorithms that use shared memory of the gpu */
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#define MAT_A_TOTAL_ROWS 4000U
#define MAT_A_TOTAL_COLS 5000U
#define MAT_B_TOTAL_ROWS MAT_A_TOTAL_COLS
#define MAT_B_TOTAL_COLS 6000U
#define TILE_WIDTH 16
__global__
void init_matrix(float *matrix, int width, int height, float val) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = idx; i < width * height; i += gridDim.x * blockDim.x) {
matrix[i]=val;
}
}
__global__
void tiled_matrix_multiplication(float * mat_A_arr, float * mat_B_arr, float * mat_C_arr,
int num_A_rows, int num_A_cols, int num_B_cols) {
__shared__ float ds_A[TILE_WIDTH][TILE_WIDTH]; // tiled shared memory for matrix A
__shared__ float ds_B[TILE_WIDTH][TILE_WIDTH]; // tiled shared memory for matrix B
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = ty + by * blockDim.y;
int col = tx + bx * blockDim.x;
float c_value = 0.0;
for(size_t t=0; t<((num_A_cols-1)/TILE_WIDTH+1); t++) {
if( row < num_A_rows && (t*TILE_WIDTH+tx) < num_A_cols ) {
ds_A[ty][tx] = mat_A_arr[row*num_A_cols + t*TILE_WIDTH+tx];
} else {
ds_A[ty][tx] = 0.0;
}
if( (t*TILE_WIDTH+ty) < num_A_cols && col < num_B_cols ) {
ds_B[ty][tx] = mat_B_arr[(t*TILE_WIDTH+ty)*num_B_cols + col];
} else {
ds_B[ty][tx] = 0.0;
}
__syncthreads();
for(size_t i=0; i<TILE_WIDTH; i++) {
c_value += ds_A[ty][i] * ds_B[i][tx];
}
__syncthreads();
}
if (row < num_A_rows && col < num_B_cols) {
mat_C_arr[row*num_B_cols + col] = c_value;
}
}
int main() {
cudaError_t err = cudaSuccess;
float *mat_A, *mat_B, *mat_C;
size_t memsize_A = MAT_A_TOTAL_ROWS * MAT_A_TOTAL_COLS * sizeof(float);
size_t memsize_B = MAT_B_TOTAL_ROWS * MAT_B_TOTAL_COLS * sizeof(float);
size_t memsize_C = MAT_A_TOTAL_ROWS * MAT_B_TOTAL_COLS * sizeof(float);
/* Allocate memories for the matrices*/
err = cudaMallocManaged(&mat_A, memsize_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate memory for matrix A (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMallocManaged(&mat_B, memsize_B);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate memory for matrix B (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMallocManaged(&mat_C, memsize_C);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate memory for matrix C (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Initialize matrices A and B */
int blocksize_for_init = 256;
int blocks_for_matA = (MAT_A_TOTAL_ROWS*MAT_A_TOTAL_COLS + blocksize_for_init - 1)
/ (blocksize_for_init);
int blocks_for_matB = (MAT_B_TOTAL_ROWS*MAT_B_TOTAL_COLS + blocksize_for_init - 1)
/ (blocksize_for_init);
init_matrix<<<blocks_for_matA, blocksize_for_init>>>(mat_A, MAT_A_TOTAL_COLS,
MAT_A_TOTAL_ROWS, 1);
init_matrix<<<blocks_for_matB, blocksize_for_init>>>(mat_B, MAT_B_TOTAL_COLS,
MAT_B_TOTAL_ROWS, 2);
err = cudaGetLastError();
if( err != cudaSuccess) {
fprintf(stderr, "Failed to initialize matrix (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Do the matrix addition */
size_t blocksizeX = TILE_WIDTH;
size_t blocksizeY = TILE_WIDTH;
dim3 DimGrid( (MAT_B_TOTAL_COLS-1)/blocksizeX + 1, (MAT_A_TOTAL_ROWS-1)/blocksizeY + 1);
dim3 DimBlock( blocksizeX, blocksizeY);
tiled_matrix_multiplication<<<DimGrid, DimBlock>>>(mat_A, mat_B, mat_C,
MAT_A_TOTAL_ROWS, MAT_A_TOTAL_COLS, MAT_B_TOTAL_COLS);
err = cudaGetLastError();
if( err != cudaSuccess) {
fprintf(stderr, "Failed to perform matrix addition (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
return EXIT_SUCCESS;
} |
2,491 | #include <iostream>
static void HandleError(cudaError_t err,
const char *file,
int line) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err),
file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
int main(void) {
cudaDeviceProp prop; int dev;
HANDLE_ERROR(cudaGetDevice(&dev));
printf("ID of current CUDA device: %d\n", dev);
memset(&prop, 0, sizeof(cudaDeviceProp));
prop.major = 6;
prop.minor = 1;
HANDLE_ERROR(cudaChooseDevice(&dev, &prop));
printf("ID of CUDA device closest to revision 6.1: %d\n", dev); HANDLE_ERROR(cudaSetDevice(dev));
system("pause");
return 0;
}
|
2,492 | // scan sample
#include <thrust/scan.h>
#include <iostream>
void print_array(int* data, int len){
for(int i=0; i<len; i++){
std::cout << data[i];
}
std::cout << std::endl;
}
int main(void){
const int len = 6;
int data[len] = {1,0,2,2,1,3};
int inout[len];
thrust::inclusive_scan(data, data+len, inout);
print_array(inout, len);
int exout[len];
thrust::exclusive_scan(data, data+len, exout);
print_array(exout, len);
return 0;
}
|
2,493 | //
// (C) 2021, E. Wes Bethel
// sobel_gpu.cpp
// usage:
// sobel_gpu [no args, all is hard coded]
//
#include <iostream>
#include <vector>
#include <chrono>
#include <unistd.h>
#include <string.h>
#include <math.h>
// see https://en.wikipedia.org/wiki/Sobel_operator
// easy-to-find and change variables for the input.
// specify the name of a file containing data to be read in as bytes, along with
// dimensions [columns, rows]
// this is the original laughing zebra image
//static char input_fname[] = "../data/zebra-gray-int8";
//static int data_dims[2] = {3556, 2573}; // width=ncols, height=nrows
//char output_fname[] = "../data/processed-raw-int8-cpu.dat";
// this one is a 4x augmentation of the laughing zebra
static char input_fname[] = "../data/zebra-gray-int8-4x";
static int data_dims[2] = {7112, 5146}; // width=ncols, height=nrows
char output_fname[] = "../data/processed-raw-int8-4x-gpu.dat";
// see https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
// macro to check for cuda errors. basic idea: wrap this macro around every cuda call
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
//
// this function is callable only from device code
//
// perform the sobel filtering at a given i,j location
// input: float *s - the source data
// input: int i,j - the location of the pixel in the source data where we want to center our sobel convolution
// input: int nrows, ncols: the dimensions of the input and output image buffers
// input: float *gx, gy: arrays of length 9 each, these are logically 3x3 arrays of sobel filter weights
//
// this routine computes Gx=gx*s centered at (i,j), Gy=gy*s centered at (i,j),
// and returns G = sqrt(Gx^2 + Gy^2)
// see https://en.wikipedia.org/wiki/Sobel_operator
//
__device__ float
sobel_filtered_pixel(float *s, int i, int j, int ncols, int nrows, float *gx, float *gy) {
float t = 0.0;
float Gx = 0;
float Gy = 0;
for (int m = 0; m < 3; m++) {
int x, y;
x = (i - 1 + m);
for (int n = 0; n < 3; n++) {
y = (j - 1 + n);
Gx += gx[3 * m + n] * s[x * ncols + y];
Gy += gy[3 * m + n] * s[x * ncols + y];
}
}
t = sqrt(pow(Gx, 2) + pow(Gy, 2));
return t;
}
//
// this function is the kernel that runs on the device
//
// this code will look at CUDA variables: blockIdx, blockDim, threadIdx, blockDim and gridDim
// to compute the index/stride to use in striding through the source array, calling the
// sobel_filtered_pixel() function at each location to do the work.
//
// input: float *s - the source data, size=rows*cols
// input: int i,j - the location of the pixel in the source data where we want to center our sobel convolution
// input: int nrows, ncols: the dimensions of the input and output image buffers
// input: float *gx, gy: arrays of length 9 each, these are logically 3x3 arrays of sobel filter weights
// output: float *d - the buffer for the output, size=rows*cols.
//
__global__ void
sobel_kernel_gpu(float *s, // source image pixels
float *d, // dst image pixels
int n, // size of image cols*rows,
int nrows,
int ncols,
float *gx, float *gy) // gx and gy are stencil weights for the sobel filter
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int m = index; m < nrows * ncols; m += stride) {
int i = m / ncols;
int j = m % ncols;
if (i == 0 || i == nrows - 1 || j == 0 || j == ncols - 1) {
d[m] = 0;
}
else {
d[m] = sobel_filtered_pixel(s, i, j, ncols, nrows, gx, gy);
}
}
}
int
main(int ac, char *av[]) {
// input, output file names hard coded at top of file
// load the input file
off_t nvalues = data_dims[0] * data_dims[1];
unsigned char *in_data_bytes = (unsigned char *) malloc(sizeof(unsigned char) * nvalues);
FILE *f = fopen(input_fname, "r");
if (fread((void *) in_data_bytes, sizeof(unsigned char), nvalues, f) != nvalues * sizeof(unsigned char)) {
printf("Error reading input file. \n");
fclose(f);
return 1;
} else
printf(" Read data from the file %s \n", input_fname);
fclose(f);
#define ONE_OVER_255 0.003921568627451
// now convert input from byte, in range 0..255, to float, in range 0..1
float *in_data_floats;
gpuErrchk(cudaMallocManaged(&in_data_floats, sizeof(float) * nvalues));
for (off_t i = 0; i < nvalues; i++)
in_data_floats[i] = (float) in_data_bytes[i] * ONE_OVER_255;
// now, create a buffer for output
float *out_data_floats;
gpuErrchk(cudaMallocManaged(&out_data_floats, sizeof(float) * nvalues));
for (int i = 0; i < nvalues; i++)
out_data_floats[i] = 1.0; // assign "white" to all output values for debug
// define sobel filter weights, copy to a device accessible buffer
float Gx[9] = {1.0, 0.0, -1.0, 2.0, 0.0, -2.0, 1.0, 0.0, -1.0};
float Gy[9] = {1.0, 2.0, 1.0, 0.0, 0.0, 0.0, -1.0, -2.0, -1.0};
float *device_gx, *device_gy;
gpuErrchk(cudaMallocManaged(&device_gx, sizeof(float) * sizeof(Gx)));
gpuErrchk(cudaMallocManaged(&device_gy, sizeof(float) * sizeof(Gy)));
for (int i = 0; i < 9; i++) // copy from Gx/Gy to device_gx/device_gy
{
device_gx[i] = Gx[i];
device_gy[i] = Gy[i];
}
// now, induce memory movement to the GPU of the data in unified memory buffers
int deviceID = 0; // assume GPU#0, always. OK assumption for this program
cudaMemPrefetchAsync((void *) in_data_floats, nvalues * sizeof(float), deviceID);
cudaMemPrefetchAsync((void *) out_data_floats, nvalues * sizeof(float), deviceID);
cudaMemPrefetchAsync((void *) device_gx, sizeof(Gx) * sizeof(float), deviceID);
cudaMemPrefetchAsync((void *) device_gy, sizeof(Gy) * sizeof(float), deviceID);
// set up to run the kernel
int nBlocks = 1, nThreadsPerBlock = 256;
// ADD CODE HERE: insert your code here to set a different number of thread blocks or # of threads per block
if (ac == 3) {
nBlocks = atoi(av[1]);
nThreadsPerBlock = atoi(av[2]);
}
printf(" GPU configuration: %d blocks, %d threads per block \n", nBlocks, nThreadsPerBlock);
// invoke the kernel on the device
sobel_kernel_gpu<<<nBlocks, nThreadsPerBlock>>>(in_data_floats, out_data_floats, nvalues, data_dims[1],
data_dims[0], device_gx, device_gy);
// wait for it to finish, check errors
gpuErrchk (cudaDeviceSynchronize());
// write output after converting from floats in range 0..1 to bytes in range 0..255
unsigned char *out_data_bytes = in_data_bytes; // just reuse the buffer from before
for (off_t i = 0; i < nvalues; i++)
out_data_bytes[i] = (unsigned char) (out_data_floats[i] * 255.0);
f = fopen(output_fname, "w");
if (fwrite((void *) out_data_bytes, sizeof(unsigned char), nvalues, f) != nvalues * sizeof(unsigned char)) {
printf("Error writing output file. \n");
fclose(f);
return 1;
} else
printf(" Wrote the output file %s \n", output_fname);
fclose(f);
}
// eof
|
2,494 |
// Babak Poursartip
// 10/01/2020
// profile/profiling with nvprof
/*
nvprof modes:
1- summary mode
2- GPU and API trace mode
3- event metrics summary mode
4- event, metrics trace mode
- To run nvprof, first create the executable (nvcc file.cu -o file.out). Then,
profile using: nvprof ./file.out (This would be the summary mode)
metrics:
- sm_efficiency
- achieved_occupancy
- branch_efficiency
- gld_efficiency
- gld_throughput
- dram_read_throughput
-inst_per_warp
- stall_sync
- To run with metrics: nvprof --metrics
gld_efficiency,sm_efficiency,achieved_occupancy ./file.out
*/
#include <cstdio>
#include <iostream>
// =================================
// cuda error check macro
#define gpuErrchk(ans) \
{ gpuAssert(ans, __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line,
bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s. File: %s, line: %d. \n",
cudaGetErrorString(code), file, line);
if (abort) {
// printf(" Exists from the gpuErrorCheck func.\n");
exit(code);
}
}
}
// ==============================================
void compare_arrays(const float *a, const float *b, const int size) {
for (int i = 0; i < size; ++i) {
if (a[i] != b[i]) {
printf("\n Arrays are not equal!! %d %f %f \n", i, a[i], b[i]);
return;
}
}
printf("\n Arrays are identical!! \n");
}
// ==============================================
void sum_array_cpu(float *a, float *b, float *c, const int size) {
for (int i = 0; i < size; ++i) {
c[i] = a[i] + b[i];
}
}
// ==============================================
// 1D grid, and 1D block. Thus, nx = size
__global__ void sum_array_1Dgrid_1Dblock(float *a, float *b, float *c, int nx) {
int gid = blockDim.x * blockIdx.x + threadIdx.x;
c[gid] = a[gid] + b[gid];
// printf("inside %d \n", gid);
}
// ==============================================
// 2D grid, and 2D block. Thus, nx*ny = size.
__global__ void sum_arrays_2Dgrid_2Dblock(float *a, float *b, float *c, int nx,
int ny) {
int gidx = blockDim.x * blockIdx.x + threadIdx.x;
int gidy = blockDim.y * blockIdx.y + threadIdx.y;
int gid = gidy * nx + gidx;
if (gidx < nx && gidy < ny)
c[gid] = a[gid] + b[gid];
}
// ==============================================
void run_sum_array_1d(int argc, char **argv) {
printf(" Running 1D grid ");
int size = 1 << 22; // the default size of the array.
int block_size = 128;
// int nx, ny = 0;
if (argc > 2)
size = 1 << atoi(argv[2]);
if (argc > 4)
block_size = 1 << atoi(argv[4]);
const int byte_size = size * sizeof(float);
printf(" size of the array: %d, %d \n", size, byte_size);
float *h_a, *h_b, *h_out, *h_ref;
h_a = (float *)malloc(byte_size);
h_b = (float *)malloc(byte_size);
h_out = (float *)malloc(byte_size);
h_ref = (float *)malloc(byte_size);
if (!h_a)
printf(" host memory allocation error\n");
for (int i = 0; i < size; ++i) {
h_a[i] = i % 10;
h_b[i] = i % 7;
}
sum_array_cpu(h_a, h_b, h_out, size);
dim3 block(block_size);
dim3 grid((size + block.x - 1) / block.x);
printf(" launching the Kernel: grid(%d,%d,%d) - block(%d,%d,%d) \n", grid.x,
grid.y, grid.z, block.x, block.y, block.z);
float *d_a, *d_b, *d_c;
gpuErrchk(cudaMalloc((void **)&d_a, byte_size));
gpuErrchk(cudaMalloc((void **)&d_b, byte_size));
gpuErrchk(cudaMalloc((void **)&d_c, byte_size));
gpuErrchk(cudaMemset(d_c, 0, byte_size));
gpuErrchk(cudaMemcpy(d_a, h_a, byte_size, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_b, h_b, byte_size, cudaMemcpyHostToDevice));
sum_array_1Dgrid_1Dblock<<<grid, block>>>(d_a, d_b, d_c, size);
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(h_ref, d_c, byte_size, cudaMemcpyDeviceToHost));
compare_arrays(h_out, h_ref, size);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_ref);
free(h_out);
free(h_b);
free(h_a);
}
// ==============================================
void run_sum_array_2d(int argc, char **argv) {
printf(" Running 2D grid ");
int size = 1 << 22; // the default size of the array = 4194304
int block_x = 128;
int nx = 1 << 14; // 16384
int ny = size / nx;
int block_y = 8;
if (argc > 2)
size = 1 << atoi(argv[2]);
if (argc > 3) {
nx = 1 << atoi(argv[3]);
ny = size / nx;
}
if (argc > 4) {
int pow = atoi(argv[4]);
if (pow < 3 || pow > 10) {
printf("Block size is invalid, default block size used (%d,%d)\n",
block_x, block_y);
} else {
block_x = 1 << pow;
block_y = 1024 / block_x;
}
}
unsigned int byte_size = size * sizeof(float);
printf("Input size : %d, nx : %d, ny : %d, block_x : %d, block_y : %d \n",
size, nx, ny, block_x, block_y);
float *h_a, *h_b, *h_out, *h_ref;
h_a = (float *)malloc(byte_size);
h_b = (float *)malloc(byte_size);
h_out = (float *)malloc(byte_size);
h_ref = (float *)malloc(byte_size);
if (!h_a)
printf(" host memory allocation error\n");
for (int i = 0; i < size; ++i) {
h_a[i] = i % 10;
h_b[i] = i % 7;
}
sum_array_cpu(h_a, h_b, h_out, size);
dim3 block(block_x, block_y);
dim3 grid((nx + block_x - 1) / block_x, (ny + block_y - 1) / block_y);
printf(" launching the Kernel: grid(%d,%d,%d) - block(%d,%d,%d) \n", grid.x,
grid.y, grid.z, block.x, block.y, block.z);
float *d_a, *d_b, *d_c;
gpuErrchk(cudaMalloc((void **)&d_a, byte_size));
gpuErrchk(cudaMalloc((void **)&d_b, byte_size));
gpuErrchk(cudaMalloc((void **)&d_c, byte_size));
gpuErrchk(cudaMemset(d_c, 0, byte_size));
gpuErrchk(cudaMemcpy(d_a, h_a, byte_size, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_b, h_b, byte_size, cudaMemcpyHostToDevice));
sum_arrays_2Dgrid_2Dblock<<<grid, block>>>(d_a, d_b, d_c, nx, ny);
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(h_ref, d_c, byte_size, cudaMemcpyDeviceToHost));
compare_arrays(h_out, h_ref, size);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_ref);
free(h_out);
free(h_b);
free(h_a);
}
// ==============================================
////arguments :
////1 - kernel (0:1D or 1:2D),
////2 - input size (2 pow (x))
////3 - for 2D kernel nx,
////4 - block.x
////5 - block.y
// ==============================================
int main(int argc, char **argv) {
printf("Sum array code for nvprof: \n");
if (argc > 1) {
if (atoi(argv[1]) > 0)
run_sum_array_2d(argc, argv);
else
run_sum_array_1d(argc, argv);
} else
run_sum_array_1d(argc, argv);
// query_device();
return 0;
}
|
2,495 | #include "pre_and_post_processor.cuh"
#define MAX_THREADS 1024
#include <iostream>
/******************************************************************************
* gpu_reorient: re-orientation and/or re-ordering of the axes
*
* Arguments:
* data: input data
* data_o: output data
* cord0: current ordinal number of the first axis
* cord1: current ordinal number of the second axis
* cord2: current ordinal number of the third axis
* dord0: desired ordinal number of the first axis
* dord1: desired ordinal number of the seconds axis
* dord2: desired ordinal number of the third axis
* corient0: current orientation of the first axis
* corient1: current orientation of the seconds axis
* corient2: current orientation of the third axis
* dorient0: desired orientation of the first axis
* dorient1: desired orientation of the seconds axis
* dorient2: desired orientation of the third axis
*****************************************************************************/
template<typename T>
__global__ void gpu_reorient(T *data, T *data_o,
unsigned w, unsigned h, unsigned d,
unsigned cord0, unsigned cord1, unsigned cord2,
unsigned dord0, unsigned dord1, unsigned dord2,
short corient0, short corient1, short corient2,
short dorient0, short dorient1, short dorient2)
{
unsigned int t = (cord0 == 0) * w + (cord1 == 0) * h + (cord2 == 0) * d;
if(threadIdx.x < t)
{
unsigned int in_idx = blockIdx.y * gridDim.x * t +
blockIdx.x * t + threadIdx.x;
unsigned int out_idx = 0;
unsigned int crs[3] = {threadIdx.x, blockIdx.x, blockIdx.y};
unsigned int dord[3] = {dord0, dord1, dord2};
unsigned int s[3] = {w, h, d};
unsigned int cord[3] = {cord0, cord1, cord2};
if(corient0 == dorient0)
out_idx += crs[cord[dord[0]]];
else
out_idx += (s[dord[0]] - 1 - crs[cord[dord[0]]]);
if(corient1 == dorient1)
out_idx += crs[cord[dord[1]]] * s[dord[0]];
else
out_idx += (s[dord[1]] - 1 - crs[cord[dord[1]]]) * s[dord[0]];
if(corient2 == dorient2)
out_idx += crs[dord[cord[2]]] * s[dord[0]] * s[dord[1]];
else
out_idx += (s[dord[2]] - 1 - crs[dord[cord[2]]]) *
s[dord[0]] * s[dord[1]];
data_o[out_idx] = data[in_idx];
}
}
/******************************************************************************
* gpu_normalize: clip and normalize input data
*
* Arguments:
* volume: input volume
* w: volume width
* h: volume height
* d: volume depth
* lower_th_: lower clip value
* upper_th_: upper clip value
* minimum_value_: minimum output value
* maximum_value_: maximum output value
*****************************************************************************/
__global__ void gpu_normalize(float *volume,
unsigned w, unsigned h, unsigned d,
float lower_th_, float upper_th_,
float minimum_value_, float maximum_value_)
{
unsigned int idx = blockIdx.y * gridDim.x * blockDim.x +
blockIdx.x * blockDim.x + threadIdx.x;
if(idx < w * h * d)
{
if (volume[idx] < lower_th_)
volume[idx] = minimum_value_;
else if (volume[idx] > upper_th_)
volume[idx] = maximum_value_;
else
volume[idx] = (volume[idx] - lower_th_) *
(maximum_value_ - minimum_value_) /
(upper_th_ - lower_th_) + minimum_value_;
}
}
/******************************************************************************
* gpu_median_filter_3: median filtering of slices with kernel size 3
*
* Arguments:
* volume: input volume
* volume_f: output filtered volume
* w: volume width
* h: volume height
* d: volume depth
*****************************************************************************/
__global__ void gpu_median_filter_3(float *volume, float *volume_f,
unsigned w, unsigned h, unsigned d)
{
unsigned int idx = blockIdx.y * gridDim.x * blockDim.x +
blockIdx.x * blockDim.x + threadIdx.x;
int k = 3;
if(idx < w * h * d)
{
int d_idx = idx / (w * h);
int r_idx = (idx - d_idx * w * h) / w;
int c_idx = (idx - d_idx * w * h - r_idx * w);
if((c_idx - k / 2) >= 0 and (c_idx + k / 2) < w and
(r_idx - k / 2) >= 0 and (r_idx + k / 2) < h)
{
volume_f[idx] = volume[idx];
float array_to_sort[9];
for(short i = (- k / 2); i <= (k / 2); i++)
{
for(short j = (- k / 2); j <= (k / 2); j++)
{
int idx_tmp = d_idx * w * h;
idx_tmp += (r_idx + i) * w;
idx_tmp += (c_idx + j);
array_to_sort[(i + k / 2) * k + j + k / 2] =\
volume[idx_tmp];
}
}
float min;
unsigned min_idx;
for(unsigned i = 0; i < (k * k - 1); i++)
{
min = array_to_sort[i];
min_idx = i;
for(unsigned j = (i+1); j < k * k; j++)
{
if(array_to_sort[j] < min)
{
min = array_to_sort[j];
min_idx = j;
}
}
array_to_sort[min_idx] = array_to_sort[i];
array_to_sort[i] = min;
}
volume_f[idx] = array_to_sort[4];
}
else
volume_f[idx] = volume[idx];
}
}
/******************************************************************************
* gpu_median_filter_5: median filtering of slices with kernel size 5
*
* Arguments:
* volume: input volume
* volume_f: output filtered volume
* w: volume width
* h: volume height
* d: volume depth
*****************************************************************************/
__global__ void gpu_median_filter_5(float *volume, float *volume_f,
unsigned w, unsigned h, unsigned d)
{
unsigned int idx = blockIdx.y * gridDim.x * blockDim.x +
blockIdx.x * blockDim.x + threadIdx.x;
int k = 5;
if(idx < w * h * d)
{
int d_idx = idx / (w * h);
int r_idx = (idx - d_idx * w * h) / w;
int c_idx = (idx - d_idx * w * h - r_idx * w);
if((c_idx - k / 2) >= 0 and (c_idx + k / 2) < w and
(r_idx - k / 2) >= 0 and (r_idx + k / 2) < h)
{
volume_f[idx] = volume[idx];
float array_to_sort[25];
for(short i = (- k / 2); i <= (k / 2); i++)
{
for(short j = (- k / 2); j <= (k / 2); j++)
{
int idx_tmp = d_idx * w * h;
idx_tmp += (r_idx + i) * w;
idx_tmp += (c_idx + j);
array_to_sort[(i + k / 2) * k + j + k / 2] =\
volume[idx_tmp];
}
}
float min;
unsigned min_idx;
for(unsigned i = 0; i < (k * k - 1); i++)
{
min = array_to_sort[i];
min_idx = i;
for(unsigned j = (i+1); j < k * k; j++)
{
if(array_to_sort[j] < min)
{
min = array_to_sort[j];
min_idx = j;
}
}
array_to_sort[min_idx] = array_to_sort[i];
array_to_sort[i] = min;
}
volume_f[idx] = array_to_sort[12];
}
else
volume_f[idx] = volume[idx];
}
}
/******************************************************************************
* reorient_permute: determine if there is a need to re-orient and/or permute
* axes
*
* Arguments:
* re-orient: flag whether to re-orient axis
* permute: flag whether to permute axis
* cord: current axis order
* cornt: current axis orientations
* dord: desired axis order
* dornt: desired axis orientation
*****************************************************************************/
void reorient_permute(bool &reorient, bool &permute,
unsigned *cord, short *cornt,
unsigned *dord, short *dornt)
{
for(unsigned int i = 0; i < 3; i++)
{
if(cornt[i] != dornt[i])
reorient = true;
if(cord[i] != dord[i])
permute = true;
}
}
/******************************************************************************
* preprocess_volume_cuda: normalize voxel intensities and re-orient volume
* axes if necessary
*
* Arguments:
* volume_cpu: volume to be processed
* w: volume width
* h: volume height
* d: volume depth / number of slices
* cord: current order of axis
* cornt: current orientation of axis
* lower_threshold: lower limit for voxel intensity
* upper_threshold: upper limit for voxel intensity
* minimum_value: minimum voxel intensity value in the
* normalized voxel range
* maximum_value: maximum voxel intensity value in the
* normalized voxel range
*****************************************************************************/
void preprocess_volume_cuda(float *in_volume,
unsigned int w, unsigned int h, unsigned int d,
unsigned int *cord, short *cornt,
float lower_threshold, float upper_threshold,
float minimum_value, float maximum_value)
{
short dornt[3] = {1, 1, 1};
unsigned dord[3] = {0, 1, 2};
bool reorient = false;
bool permute = false;
reorient_permute(reorient, permute, cord, cornt, dord, dornt);
float *volume_d;
unsigned int volume_B = h * w * d * sizeof(float);
cudaMalloc((void **) &volume_d, volume_B);
cudaMemcpy(volume_d, in_volume, volume_B, cudaMemcpyHostToDevice);
unsigned int i1, i2;
i1 = (cord[1] == 0) * w + (cord[1] == 1) * h + (cord[1] == 2) * d;
i2 = (cord[2] == 0) * w + (cord[2] == 1) * h + (cord[2] == 2) * d;
dim3 grid(i1, i2);
gpu_normalize<<<grid, MAX_THREADS>>>(volume_d,
w, h, d,
lower_threshold, upper_threshold,
minimum_value, maximum_value);
if(reorient or permute)
{
float *volume_o_d;
cudaMalloc((void **) &volume_o_d, volume_B);
gpu_reorient<float><<<grid, MAX_THREADS>>>
(volume_d, volume_o_d, w, h, d,
cord[0], cord[1], cord[2], dord[0], dord[1], dord[2],
cornt[0], cornt[1], cornt[2], dornt[0], dornt[1], dornt[2]);
cudaMemcpy(in_volume, volume_o_d, volume_B, cudaMemcpyDeviceToHost);
cudaFree(volume_o_d);
}
else
cudaMemcpy(in_volume, volume_d, volume_B, cudaMemcpyDeviceToHost);
cudaFree(volume_d);
}
/******************************************************************************
* normalize_volume_cuda: normalize voxel intensities
*
* Arguments:
* in_volume: volume to be processed
* w: volume width
* h: volume height
* d: volume depth / number of slices
* lower_threshold: lower limit for voxel intensity
* upper_threshold: upper limit for voxel intensity
* minimum_value: minimum voxel intensity value in the
* normalized voxel range
* maximum_value: maximum voxel intensity value in the
* normalized voxel range
*****************************************************************************/
void normalize_volume_cuda(float *in_volume,
unsigned int w, unsigned int h, unsigned int d,
float lower_threshold, float upper_threshold,
float minimum_value, float maximum_value)
{
float *volume_d;
unsigned int volume_B = h * w * d * sizeof(float);
cudaMalloc((void **) &volume_d, volume_B);
cudaMemcpy(volume_d, in_volume, volume_B, cudaMemcpyHostToDevice);
dim3 grid(h, d);
gpu_normalize<<<grid, MAX_THREADS>>>(volume_d, w, h, d,
lower_threshold, upper_threshold,
minimum_value, maximum_value);
cudaMemcpy(in_volume, volume_d, volume_B, cudaMemcpyDeviceToHost);
cudaFree(volume_d);
}
/******************************************************************************
* filter_with_median_cuda: de-noise volume with median filter
*
* Arguments:
* volume: volume to be processed
* w: volume width
* h: volume height
* d: volume depth / number of slices
* k: median kernel's size
*****************************************************************************/
void filter_with_median_cuda(float *volume,
unsigned int w, unsigned int h, unsigned int d,
int k)
{
float *volume_d;
float *volume_f_d;
unsigned int volume_B = h * w * d * sizeof(float);
cudaMalloc((void **) &volume_d, volume_B);
cudaMalloc((void **) &volume_f_d, volume_B);
cudaMemcpy(volume_d, volume, volume_B, cudaMemcpyHostToDevice);
dim3 grid(h, d);
if(k == 3)
gpu_median_filter_3<<<grid, MAX_THREADS>>>(volume_d, volume_f_d,
w, h, d);
else if(k == 5)
gpu_median_filter_5<<<grid, MAX_THREADS>>>(volume_d, volume_f_d,
w, h, d);
else
{
std::cout<<"Selected median filter size is not supported"<<std::endl;
exit(EXIT_FAILURE);
}
cudaMemcpy(volume, volume_f_d, volume_B, cudaMemcpyDeviceToHost);
cudaFree(volume_d);
cudaFree(volume_f_d);
}
/******************************************************************************
* reorient_volume_cuda: re-orient axes of volume if necessary
*
* Arguments:
* volume: volume to be reoriented
* w: volume width
* h: volume height
* d: volume depth / number of slices
* cord - current order of the axes
* cornt - current orientation of the axes
* dord - desired order of the axes
* dornt - desired orientation of the axes
*****************************************************************************/
void reorient_volume_cuda(float *volume,
unsigned int w, unsigned int h, unsigned int d,
unsigned *cord, short *cornt,
unsigned *dord, short *dornt)
{
bool reorient = false;
bool permute = false;
reorient_permute(reorient, permute, cord, cornt, dord, dornt);
if(reorient or permute)
{
float *volume_d;
float *volume_o_d;
unsigned int volume_B = h * w * d * sizeof(float);
cudaMalloc((void **) &volume_d, volume_B);
cudaMemcpy(volume_d, volume, volume_B, cudaMemcpyHostToDevice);
cudaMalloc((void **) &volume_o_d, volume_B);
unsigned int i1, i2;
i1 = (cord[1] == 0) * w + (cord[1] == 1) * h + (cord[1] == 2) * d;
i2 = (cord[2] == 0) * w + (cord[2] == 1) * h + (cord[2] == 2) * d;
dim3 grid(i1, i2);
gpu_reorient<float><<<grid, MAX_THREADS>>>
(volume_d, volume_o_d, w, h, d,
cord[0], cord[1], cord[2], dord[0], dord[1], dord[2],
cornt[0], cornt[1], cornt[2], dornt[0], dornt[1], dornt[2]);
cudaMemcpy(volume, volume_o_d, volume_B, cudaMemcpyDeviceToHost);
cudaFree(volume_d);
cudaFree(volume_o_d);
}
}
/******************************************************************************
* reorient_segmentation_cuda: re-orient axes of segmentation if necessary
*
* Arguments:
* segment: segmentation to be reoriented
* w: volume width
* h: volume height
* d: volume depth / number of slices
* cord - current order of the axes
* cornt - current orientation of the axes
* dord - desired order of the axes
* dornt - desired orientation of the axes
*****************************************************************************/
void reorient_segment_cuda(unsigned char *segment,
unsigned int w, unsigned int h, unsigned int d,
unsigned *cord, short *cornt,
unsigned *dord, short *dornt)
{
bool reorient = false;
bool permute = false;
reorient_permute(reorient, permute, cord, cornt, dord, dornt);
if(reorient or permute)
{
unsigned char *segment_d;
unsigned char *segment_o_d;
unsigned int segment_B = h * w * d * sizeof(unsigned char);
cudaMalloc((void **) &segment_d, segment_B);
cudaMemcpy(segment_d, segment, segment_B, cudaMemcpyHostToDevice);
cudaMalloc((void **) &segment_o_d, segment_B);
unsigned int i1, i2;
i1 = (cord[1] == 0) * w + (cord[1] == 1) * h + (cord[1] == 2) * d;
i2 = (cord[2] == 0) * w + (cord[2] == 1) * h + (cord[2] == 2) * d;
dim3 grid(i1, i2);
gpu_reorient<unsigned char><<<grid, MAX_THREADS>>>
(segment_d, segment_o_d, w, h, d,
cord[0], cord[1], cord[2], dord[0], dord[1], dord[2],
cornt[0], cornt[1], cornt[2], dornt[0], dornt[1], dornt[2]);
cudaMemcpy(segment, segment_o_d, segment_B, cudaMemcpyDeviceToHost);
cudaFree(segment_d);
cudaFree(segment_o_d);
}
}
|
2,496 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#define DataSize 1024
__global__ void Add(unsigned int *Da,int high,int width,int half)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int bn = blockDim.x;
//int gn = gridDim.x;
int id = bx*bn+tx;
//for(int i=id;i<(high*width);i+=(bn*gn))
//Da[i] = 255 - Da[i];
if (id < half) {
Da[id] += Da[id + half];
}
}
int main()
{
FILE *fp = NULL;
unsigned int high, width, offset;
unsigned char *head;
unsigned char *img; // i줸A1 byte = 8 bits
high = 0;
width = 0;
offset = 0;
fp = fopen("lena.bmp","rb");
fseek(fp, 10, SEEK_SET);
fread(&offset, sizeof(unsigned int), 1, fp);
fseek(fp, 18, SEEK_SET);
fread(&width, sizeof(unsigned int), 1, fp);
fseek(fp, 22, SEEK_SET);
fread(&high, sizeof(unsigned int), 1, fp);
img = (unsigned char*)malloc(sizeof(unsigned char)*(width*high));
fseek(fp, offset, SEEK_SET);
fread(img, sizeof(char), (width*high), fp);
head =(unsigned char*)malloc(sizeof(unsigned char)*(offset));
fseek(fp, 0, SEEK_SET);
fread(head, sizeof(unsigned char), offset, fp);
dim3 block(1024, 1, 1); // @block1024threads
dim3 grid(256, 1, 1); // @grid256block
unsigned int Dimg[512*512]; // CPU
for (int j = 0; j < 512*512; j++) {
Dimg[j] = img[j]; // TO@pixelAӤpNe0A|YeȡAuO^
}
unsigned int *Da; // GPUA4ytes
cudaMalloc((void**)&Da, (sizeof(unsigned int)*(width*high))); // tmGPUx}Ŷ
cudaMemcpy(Da, Dimg, (sizeof(unsigned int)*(width*high)), cudaMemcpyHostToDevice);
int round = 0, half = 512*512;
while (round < 18) { // lg(512*512)=lg(2^9*2^9)=lg(2^9)+lg(2^9)=9+9=18
half /= 2;
Add <<< grid, block >>> (Da,high,width,half); // Iskernel
cudaThreadSynchronize();
round++;
}
cudaMemcpy(Dimg, Da, (sizeof(unsigned int)*(width*high)), cudaMemcpyDeviceToHost); // ƻsƨGPU
fclose(fp);
Dimg[0] /= (512*512);
printf("\n%3d\n", Dimg[0]);
}
|
2,497 | /*
* Ejercicio 2 Práctica 3: CUDA
* Desempeño en función de la homogeneidad para acceder a memoria
* y de la regularidad del código
*/
#include <stdio.h>
//PP#include <cuda.h>
#define STRIDE 8
#define OFFSET 1
#define GROUP_SIZE 8
/* Utilidad para checar errores de CUDA */
void checkCUDAError(const char*);
// Kernel that executes on the CUDA device
__global__ void square_array(float *a, int N)
{
int n_elem_per_thread = N / (gridDim.x * blockDim.x);
int block_start_idx = n_elem_per_thread * blockIdx.x * blockDim.x;
int thread_start_idx = block_start_idx
+ (threadIdx.x / STRIDE) * n_elem_per_thread * STRIDE
+ ((threadIdx.x + OFFSET) % STRIDE);
int thread_end_idx = thread_start_idx + n_elem_per_thread * STRIDE;
if(thread_end_idx > N) thread_end_idx = N;
int group = (threadIdx.x / GROUP_SIZE) & 1;
for(int idx=thread_start_idx; idx < thread_end_idx; idx+=STRIDE)
{
if(!group) a[idx] = a[idx] * a[idx];
else a[idx] = a[idx] + a[idx];
}
}
// main routine that executes on the host
int main(void)
{
float *a_h, *a_d; // Pointer to host & device arrays
const int N = 1<<10; // Make a big array with 2**N elements
size_t size = N * sizeof(float);
/* Auxiliares para medir tiempos */
cudaEvent_t start, stop;
float time;
a_h = (float *)malloc(size); // Allocate array on host
cudaMalloc((void **) &a_d, size); // Allocate array on device
// Initialize host array and copy it to CUDA device
for (int i=0; i<N; i++)
a_h[i] = (float)i;
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
checkCUDAError("memcpy");
// Create timer for timing CUDA calculation
//PPunsigned int timer = 0;
//PPcutCreateTimer( &timer );
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Set number of threads and blocks
int n_threads_per_block = 128;//1<<9; // 512 threads per block
int n_blocks = 256;//1<<10; // 1024 blocks
// Do calculation on device
cudaEventRecord(start,0);
square_array <<< n_blocks, n_threads_per_block >>> (a_d, N);
cudaDeviceSynchronize(); // Wait for square_array to finish on CUDA
checkCUDAError("kernel invocation");
// Retrieve result from device and store it in host array
cudaMemcpy(a_h, a_d, size, cudaMemcpyDeviceToHost);
checkCUDAError("memcpy");
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime( &time, start, stop );
// Print some of the results
for (int i=0; i<N; i+=N/50) printf("%d %f\n", i, a_h[i]);
// Imprime tiempo de ejecución
printf("\n\nTIEMPO DE EJECUCIÓN: %f mSeg\n\n", time);
cudaEventDestroy( start );
cudaEventDestroy( stop );
free(a_h); cudaFree(a_d);
}
/* Utility function to check for and report CUDA errors */
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
2,498 | #include <stdio.h>
__global__ void helloFromGPU(void)
{
printf("Hello from GPU.\n");
}
int main()
{
printf("Hello from CPU.\n");
helloFromGPU<<<2, 5>>>();
cudaDeviceReset();
return 0;
} |
2,499 | /*
* Example from Udacity Intro to Parallel Programming https://www.udacity.com/course/intro-to-parallel-programming--cs344
* nvcc -ccbin clang-3.8 cube.cu
*/
#include <stdio.h>
__global__ void cube(float * d_out, float * d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f * f;
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
cudaMalloc((void**) &d_in, ARRAY_BYTES);
cudaMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
// launch the kernel
cube<<<1, ARRAY_SIZE>>>(d_out, d_in);
// copy back the result array to the CPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
} |
2,500 | #include<stdio.h>
bool check_gpu(void)
{
int count;
cudaGetDeviceCount(&count);
printf("device:%d\n", count);
if(count < 1)
{
return false;
}
else
{
return true;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.