serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
22,901 |
#include <stdlib.h>
#include <stdio.h>
int main(int argc,char **argv) {
printf("Usage ./dump_arrays.out size array_file1 array_file2");
int N =pow(2,atoi(argv[1]));
size_t size = N * sizeof(float);
int loop;
// Allocate input vectors h_A and h_B in host memory
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
FILE *arrayfile_a;
FILE *arrayfile_b;
if (argc<3){
printf("Too few arguments.\nUsage is ./ee16b068_3.out file1.txt file2.txt ");
return 1;
}
// Initialize input vectors
arrayfile_a = fopen(argv[2], "w");
arrayfile_b = fopen(argv[3], "w");
printf("\n Array A (first 10 values) \n ");
for (loop = 0; loop < N; loop++)
{
h_A[loop] = rand() % 100 + 1;
if (loop<10){
printf("%f ", h_A[loop]);
}
fprintf(arrayfile_a, "%f\n", h_A[loop]);
}
printf("\n Array B (first 10 values) \n ");
for (loop = 0; loop < N; loop++)
{
h_B[loop] = rand() % 100 + 1;
if (loop<10){
printf("%f ", h_B[loop]);
}
fprintf(arrayfile_b, "%f\n", h_B[loop]);
}
// Free host memory
free(arrayfile_a); free(arrayfile_b);
free(h_A);
free(h_B);
return 0;
} |
22,902 | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
/* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 3
* of the programming guide with some additions like error checking.
*
*/
// Device code
extern "C" __global__ void VecAdd(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
|
22,903 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cstdlib>
#include <time.h>
#include <unistd.h>
#define MASK_WIDTH 5
#define TILE_SIZE 4
__constant__ float Mc[1024];
__global__ void Convolution3d(float* input, float* output, int numARows, int numACols, int numAHeight, int numCRows, int numCCols, int numCHeight, int k_dim)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int tz = threadIdx.z;
int row_o = blockIdx.y*TILE_SIZE + ty;
int col_o = blockIdx.x*TILE_SIZE + tx;
int height_o = blockIdx.z*TILE_SIZE + tz;
int row_i = row_o - ((k_dim -1)/2);
int col_i = col_o - ((k_dim -1)/2);
int height_i = height_o - ((k_dim -1)/2);
__shared__ float Ns[1024];
if((row_i >= 0) && (row_i < numARows) && (col_i >= 0) && (col_i < numACols) && (height_i >= 0) && (height_i < numAHeight)){
Ns[tz*(TILE_SIZE + (k_dim - 1))*(TILE_SIZE + (k_dim - 1)) + ty*(TILE_SIZE + (k_dim - 1)) + tx] = input[numACols*numARows*height_i + row_i*numACols+col_i];
}else{
Ns[tz*(TILE_SIZE + (k_dim - 1))*(TILE_SIZE + (k_dim - 1)) + ty*(TILE_SIZE + (k_dim - 1)) + tx] = 0.0f;
}
__syncthreads();
if(ty < TILE_SIZE && tx < TILE_SIZE && tz < TILE_SIZE){
float out = 0.0f;
for(int i = 0; i < k_dim; i++){
for(int j = 0; j < k_dim; j++){
for(int k = 0; k < k_dim; k++){
out += Mc[i*k_dim*k_dim+j*k_dim+k] * Ns[(i+tz)*(TILE_SIZE + (k_dim - 1))*(TILE_SIZE + (k_dim - 1)) + (j+ty)*(TILE_SIZE + (k_dim - 1)) + k+tx];
}
}
}
__syncthreads();
if(row_o < numCRows && col_o < numCCols && height_o < numCHeight){
output[height_o * numCCols * numCRows + row_o * numCCols + col_o] = out;
}
}
}
int main(int argc, char** argv)
{
int check;
extern char *optarg;
extern int optind;
check = getopt(argc, argv, "abcde :");
FILE* input_file;
FILE* output_file;
FILE* kernel_file;
float *input,*output,*kernel;
float *d_input;
switch (check)
{
case 'a':
input_file = fopen("./sample/test1/input.txt", "r");
if(input_file==NULL) printf("input파일 열기 실패\n");
output_file = fopen("./sample/test1/output.txt","r");
if(output_file==NULL) printf("output파일 열기 실패\n");
kernel_file = fopen("./sample/test1/kernel.txt","r");
if(kernel_file==NULL) printf("kernel파일 열기 실패\n");
break;
case 'b':
input_file = fopen("./sample/test2/input.txt", "r");
if(input_file==NULL) printf("input파일 열기 실패\n");
output_file = fopen("./sample/test2/output.txt","r");
if(output_file==NULL) printf("output파일 열기 실패\n");
kernel_file = fopen("./sample/test2/kernel.txt","r");
if(kernel_file==NULL) printf("kernel파일 열기 실패\n");
break;
case 'c':
input_file = fopen("./sample/test3/input.txt", "r");
if(input_file==NULL) printf("input파일 열기 실패\n");
output_file = fopen("./sample/test3/output.txt","r");
if(output_file==NULL) printf("output파일 열기 실패\n");
kernel_file = fopen("./sample/test3/kernel.txt","r");
if(kernel_file==NULL) printf("kernel파일 열기 실패\n");
break;
case 'd':
input_file = fopen("./sample/test4/input.txt", "r");
if(input_file==NULL) printf("input파일 열기 실패\n");
output_file = fopen("./sample/test4/output.txt","r");
if(output_file==NULL) printf("output파일 열기 실패\n");
kernel_file = fopen("./sample/test4/kernel.txt","r");
if(kernel_file==NULL) printf("kernel파일 열기 실패\n");
break;
case 'e':
input_file = fopen("./sample/test5/input.txt", "r");
if(input_file==NULL) printf("input파일 열기 실패\n");
output_file = fopen("./sample/test5/output.txt","r");
if(output_file==NULL) printf("output파일 열기 실패\n");
kernel_file = fopen("./sample/test5/kernel.txt","r");
if(kernel_file==NULL) printf("kernel파일 열기 실패\n");
break;
default:
printf("Wrong argument ./conv3d -[a...e]");
break;
}
int i_x,i_y,i_z;
int o_x,o_y,o_z;
int k_dim;
fscanf(input_file,"%d %d %d",&i_z,&i_y,&i_x);
input=(float*)malloc(sizeof(float)*i_x*i_y*i_z);
cudaMalloc(&d_input,sizeof(float)*i_x*i_y*i_z);
for(int i=0;i<i_x*i_y*i_z;i++){
fscanf(input_file,"%f",input+i);
}
cudaMemcpy(d_input,input, sizeof(float)*i_x*i_y*i_z, cudaMemcpyHostToDevice);
fscanf(kernel_file,"%d",&k_dim);
kernel=(float*)malloc(sizeof(float)*k_dim*k_dim*k_dim);
for(int i=0;i<k_dim*k_dim*k_dim;i++){
fscanf(kernel_file,"%f",kernel+i);
}
cudaMemcpyToSymbol(Mc,kernel, sizeof(float)*k_dim*k_dim*k_dim);
fscanf(output_file,"%d %d %d",&o_z,&o_y,&o_x);
output=(float*)malloc(sizeof(float)*o_x*o_y*o_z);
for(int i=0;i<o_x*o_y*o_z;i++){
fscanf(output_file,"%f",output+i);
}
printf("input: %d %d %d \n", i_x, i_y, i_z);
printf("kernel: %d\n", k_dim);
printf("output: %d %d %d\n", o_x, o_y, o_z);
float *h_out, *d_out;
h_out = (float*)malloc(sizeof(float)*o_x*o_y*o_z);
cudaMalloc(&d_out, sizeof(float)*o_x*o_y*o_z);
int blocksize = TILE_SIZE + (k_dim - 1);
dim3 dimGrid(ceil(i_x/(TILE_SIZE*1.0)), ceil(i_y/(TILE_SIZE*1.0)), ceil(i_z/(TILE_SIZE*1.0)));
dim3 dimBlock(blocksize, blocksize, blocksize);
cudaEvent_t start, end;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
Convolution3d<<<dimGrid, dimBlock>>>(d_input, d_out, i_y, i_x, i_z, o_y, o_x, o_z, k_dim);
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&elapsedTime, start, end);
printf("Execution time for CUDA: %.3fms\n", elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(end);
//cudaEventRecord(end,0);
//cudaEventSynchronize(end);
//cudaEventElapsedTime(&time_ms, start, end);
cudaDeviceSynchronize();
cudaMemcpy(h_out, d_out, sizeof(float)*o_x*o_y*o_z, cudaMemcpyDeviceToHost);
/*
for(int i = 0; i < o_x*o_y*o_z; i++){
if(i % (o_x*o_y) == 0){
printf("\n");
}
if(i % o_x == 0){
printf("\n");
}
printf("%f ", h_out[i]);
}
*/
int err = 0;
for(int i=0;i<o_x*o_y*o_z;i++){
if(abs(h_out[i] - output[i]) >= 0.001f){
err++;
}
}
if(err == 0){
printf("validation complete\n");
}else{
printf("%d\n", err);
}
return EXIT_SUCCESS;
} |
22,904 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#define ARRAYSIZE 1024
__global__ void minCompare(int *a, bool *check) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
if (idx == idy) { return; }
int xval = a[idx];
int yval = a[idy];
if (xval > yval) {
check[idx] = false;
}
}
__global__ void cudaMin(int *a, bool *check, int* min) {
int idx = blockIdx.x;
if (check[idx]) {
min[0] = a[idx];
}
}
__global__ void cudaBoolFill(bool *arr, int length) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < length) {
arr[i] = true;
}
}
void array_fill(int *arr, int length)
{
srand(time(NULL));
int i;
for (i = 0; i < length; ++i) {
arr[i] = (int)rand();
}
}
int findMin(int *arr, const int length){
bool *check;
int *ad, *min;
const int intSize = sizeof(int);
const int asize = length * intSize;
const int bsize = length * sizeof(bool);
cudaMalloc((void**)&ad, asize);
cudaMemcpy(ad, arr, asize, cudaMemcpyHostToDevice);
cudaMalloc((void**)&check, bsize);
cudaBoolFill <<< dim3(length, 1), 1 >>> (check, length);
cudaMalloc((void**)&min, intSize);
minCompare <<< dim3(length, length), 1 >>> (ad, check);
cudaMin <<< dim3(length, 1), 1 >>> (ad, check, min);
int minhost[1];
cudaMemcpy(minhost, min, intSize, cudaMemcpyDeviceToHost);
cudaFree(ad);
cudaFree(min);
cudaFree(check);
return minhost[0];
}
int main()
{
int *a = (int*)malloc(ARRAYSIZE * sizeof(int));
array_fill(a, ARRAYSIZE);
clock_t start = clock();
int min = findMin(a, ARRAYSIZE);
clock_t stop = clock();
double elapsed = ((double)(stop - start)) / CLOCKS_PER_SEC;
printf("Elapsed time: %.3fs\n", elapsed);
printf("min is %d\n", min);
free(a);
while (1) {}
return 0;
}
|
22,905 | #include "includes.h"
__global__ void findDiffLabelsAtomicFree(float* devDiff, int diffPitchInFloats, int nPoints, int nClusters, int* devClusters, int* devChanges) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
if (x < nPoints) {
int index = x;
float minDistance = 10000000;
int minCluster = -1;
for(int cluster = 0; cluster < nClusters; cluster++) {
float clusterDistance = devDiff[index];
if (clusterDistance < minDistance) {
minDistance = clusterDistance;
minCluster = cluster;
}
index += diffPitchInFloats;
}
int previousCluster = devClusters[x];
devClusters[x] = minCluster;
if (minCluster != previousCluster) {
int change=*devChanges;
change++;
*devChanges = change;
}
}
} |
22,906 | #include "includes.h"
__global__ void stretch_kernel(int acc, int samps, float tsamp, float *d_input, float *d_output, float t_zero, float multiplier, float tsamp_inverse) {
int t = blockIdx.x * blockDim.x + threadIdx.x;
float p_time = t * ( t_zero + ( multiplier * ( t - 1.0f ) ) );
int stretch_index = __float2int_rz(p_time * tsamp_inverse);
if (stretch_index >= 0 && stretch_index < samps)
d_output[stretch_index] = d_input[t];
} |
22,907 | #include "includes.h"
__global__ void normalized_aligned_dot_products(const double* A, const double divisor, const unsigned int m, const unsigned int n, double* QT)
{
int a = blockIdx.x * blockDim.x + threadIdx.x;
if (a < n) {
QT[a] = A[a + m - 1] / divisor;
}
} |
22,908 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand_kernel.h>
extern "C"
{
__global__ void setup_kernel(curandState *state, int seed, int n, int verbose)
{
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
if (verbose){
printf("Setting up RNG in thread %d (n=%d)...\n",idx,n);
}
curand_init(seed, idx, 0, &state[idx]);
return;
}
__global__ void rnorm_basic_kernel(curandState *state, double *vals, int n, double mu, double sigma)
{
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
if (idx < n) {
vals[idx] = mu + sigma * curand_normal_double(&state[idx]);
}
return;
}
__global__ void rnorm_kernel(curandState *state, double *vals, int n, double mu, double sigma, int numSamples)
{
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
int k;
int startIdx = idx*numSamples;
for(k = 0; k < numSamples; k++) {
if(startIdx + k < n)
vals[startIdx + k] = mu + sigma * curand_normal_double(&state[idx]);
}
return;
}
} // END extern
|
22,909 | #include "includes.h"
__global__ void SigmoidProbPolynomForwardImpl( const float* probs, int batchSize, const float* values, int polynomCount, int outputDim, float* out) {
//out: batch_elem0 dim0, dim1, dimk batch_elem1 dim0 dim1 dimk
//so threads
int polynomId = blockIdx.x;
const int dimId = blockIdx.y;
int tid = threadIdx.x;
if (tid >= batchSize) {
return;
}
float sum = 0;
probs += threadIdx.x;
values += dimId;
while (polynomId < polynomCount) {
const float polynomProb = __ldg(probs + polynomId * batchSize);
const float out = __ldg(values + polynomId * outputDim);
sum += polynomProb * out;
polynomId += gridDim.x;
}
atomicAdd(out + dimId * batchSize + threadIdx.x, sum);
} |
22,910 | #include <cuda.h>
#include <iostream>
void matrixMul(int** A, int** B, int** C, int WIDTH); //loading, transfer, execution(host code)
void printArray(int **array, int WIDTH); // print Array elements
int main(int argc, char * argv[]){
int WIDTH;
int x,y;
int **a,**b,**c;
if(argc<2){
printf("Not inserted block size, Try again\n");
exit(-1);
}
//allocating program arguments
WIDTH=atoi(argv[1]);
//2D array dynamic allocation
a=(int**)malloc(sizeof(int*)*WIDTH);
b=(int**)malloc(sizeof(int*)*WIDTH);
c=(int**)malloc(sizeof(int*)*WIDTH);
for(y=0;y<WIDTH;y++){
a[y]=(int*)malloc(sizeof(int)*WIDTH);
b[y]=(int*)malloc(sizeof(int)*WIDTH);
c[y]=(int*)malloc(sizeof(int)*WIDTH);
}
//initializing 2D array
for(y=0;y<WIDTH;y++){
for(x=0;x<WIDTH;x++){
a[y][x]=y*10+x;
b[y][x]=(y*10+x)*100;
}
}
clock_t start=clock();
matrixMul(a,b,c,WIDTH);
printArray(c,WIDTH);
printf("Matrix Multiplication execution time(Serial): %fms\n",(double)(clock()-start));
//free 2D array
for(y=0;y<WIDTH;y++){
free(a[y]);
free(b[y]);
free(c[y]);
}
free(a);
free(b);
free(c);
return 0;
}
void matrixMul(int** A, int** B, int** C,int WIDTH)
{
int x,y,k,sum;
for(y=0;y<WIDTH;y++){
for(x=0;x<WIDTH;x++){
sum=0;
for(k=0;k<WIDTH;k++){
sum+=A[y][k]*B[k][x];
}
C[y][x]=sum;
}
}
}
void printArray(int **array, int WIDTH){
int x,y;
for(y=0;y<WIDTH;y++){
for(x=0;x<WIDTH;x++)
printf("%10d",array[y][x]);
printf("\n");
}
}
|
22,911 | /*
* a simple scan program. compute the partial sums of
* the elements of the input array. This version uses
* only one block
*/
__global__ void cudaScan(float* d_out, float* d_in, int n) {
// shared array allocated by the launch of the kernel
extern __shared__ float temp[];
int threadId = threadIdx.x;
if (threadId >= n) return;
int fromBuffer = 1;
int toBuffer = 0;
// make a local copy of the data
temp[threadId] = d_in[threadId];
__syncthreads();
int maxOffset =(int)ceil(log2(1.0f*n));
maxOffset = pow(2.0f,1.0f*maxOffset);
for (int offset = 1; offset < maxOffset; offset *= 2) {
fromBuffer = 1-fromBuffer;
toBuffer = 1-toBuffer;
if (threadId >= offset) {
temp[toBuffer*n + threadId] = temp[fromBuffer*n + threadId - offset] +
temp[fromBuffer*n + threadId];
} else {
temp[toBuffer*n+ threadId] = temp[fromBuffer*n + threadId];
}
__syncthreads();
}
d_out[threadId] = temp[toBuffer*n + threadId];
}
|
22,912 |
#include<stdio.h>
#include<cuda.h>
#include<string.h>
#include <stdint.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/binary_search.h>
int null = INT_MAX;
int ninf = INT_MIN;
typedef struct Node{
Node* parent;
thrust :: host_vector<int> keys;
thrust :: host_vector<Node*> pointer;
bool isLeaf;
bool isDead;
Node* buffer;
}Node;
typedef struct dNode{
int keys[7];
Node* pointer[8];
bool isLeaf;
int no_keys=0;
int *data_pointer[7];
}dNode;
__global__ void range(int *prefix_sum , dNode *nodes,int **result,int* count ,int n , int d , int tree_size , int *ab)
{
__shared__ int node_idx;
node_idx=0;
int idx = threadIdx.x ;
bool flag = true;
__shared__ int a;
__shared__ int b;
dNode curr;
__shared__ int c;
c=0;
__shared__ int level;
level=0;
a=ab[blockIdx.x*2 + 0];
b=ab[blockIdx.x*2 + 1];
__syncthreads();
if(a!=-1 && b!=-1)
{
while(true)
{
curr = nodes[node_idx];
if(level >= n || node_idx>=tree_size)
{
break;
}
if(curr.isLeaf)
{
flag=true;
break;
}
int diff=INT_MAX;
__shared__ int min_idx,min_diff;
min_idx=0;
min_diff=INT_MAX;
if(idx < curr.no_keys )
diff = abs(a - curr.keys[idx]);
__syncthreads();
atomicMin(&min_diff,diff);
//printf("min_diff : %d\n",min_diff);
if(min_diff == diff)
{
min_idx = idx ;
if(min_idx == 0 )
{
if(a<curr.keys[0])
{
node_idx = prefix_sum[node_idx] ;
}
else
node_idx = prefix_sum[node_idx]+1;
}
else if(min_idx == d-1 )
{
if(a<curr.keys[d-1])
node_idx = prefix_sum[node_idx] + d-1;
else
node_idx = prefix_sum[node_idx] + d ;
}
else
{
if(a<curr.keys[min_idx])
{
node_idx = prefix_sum[node_idx] + min_idx;
}
else if(a>=curr.keys[min_idx])
{
node_idx = prefix_sum[node_idx] + min_idx + 1;
}
}
}
}
__shared__ int ele_count;
ele_count=0;
bool flag=false;
int ite=node_idx;
__syncthreads();
while(ele_count < n && idx==0)
{
for(int i=0;i<curr.no_keys;i++)
{
if( a <= curr.keys[i] && curr.keys[i] <= b )
{
c++;
result[blockIdx.x * n + ele_count]=curr.data_pointer[i];
ele_count++;
}
else if (curr.keys[i] > b)
flag=true;
}
if(flag)
break;
ite++;
if(ite >= tree_size)
break;
curr = nodes[ite];
}
// printf("Block %d : %d\n",blockIdx.x,c);
count[blockIdx.x] = c;
//printf("Block %d : %d\n",blockIdx.x,c);
//printf("%d ::: %d\n",blockIdx.x, count[blockIdx.x]);
}
}
__global__ void find(int *prefix_sum , dNode *nodes , int **result , int* found , int tree_size , int n , int d , int *keys)
{
int idx = threadIdx.x ;
__shared__ int node_idx;
node_idx=0;
__shared__ bool flag;
flag=false;
dNode curr;
__shared__ int key;
key=keys[blockIdx.x];
__shared__ int level;
level=0;
__syncthreads();
if(key!=-1)
{
while(true)
{
//printf("\n");
level++;
if(level >= n || node_idx>=tree_size)
{
break;
}
curr = nodes[node_idx];
if(curr.isLeaf )
{
flag=true;
break;
}
int diff=INT_MAX;
__shared__ int min_idx,min_diff;
min_idx=0;
min_diff=INT_MAX;
if(idx < curr.no_keys )
diff = abs(key - curr.keys[idx]);
atomicMin(&min_diff,diff);
__syncthreads();
//printf("min_diff : %d\n",min_diff);
if(min_diff == diff)
{
min_idx = idx ;
if(min_idx == 0 )
{
if(key<curr.keys[0])
{
node_idx = prefix_sum[node_idx] ;
}
else
node_idx = prefix_sum[node_idx]+1;
}
else if(min_idx == d-1 )
{
if(key<curr.keys[d-1])
node_idx = prefix_sum[node_idx] + d-1;
else
node_idx = prefix_sum[node_idx] + d ;
}
else
{
if(key<curr.keys[min_idx])
{
node_idx = prefix_sum[node_idx] + min_idx;
}
else if(key>=curr.keys[min_idx])
{
node_idx = prefix_sum[node_idx] + min_idx + 1;
}
}
}
__syncthreads();
}
}
if(flag)
{
if(curr.keys[idx] == key)
{
//printf("FOUND THE KEY : %d.\n",key);
found[blockIdx.x] = 1;
result[blockIdx.x] = curr.data_pointer[idx];
//printf("AAAA\n");
}
}
}
__global__ void path_trace(int *prefix_sum , dNode *nodes,int* keys, int *count , int tree_size , int n , int d,int k)
{
int idx = threadIdx.x ;
__shared__ int node_idx;
node_idx=0;
__shared__ int it;
it=0;
bool flag=false;
dNode curr;
__shared__ int key;
key=k;
__shared__ int level;
level = 0;
while(true)
{
//printf("\n");
level++;
if(level >= n || node_idx>=tree_size)
{
break;
}
curr = nodes[node_idx];
if(idx==0)
{
keys[it]=curr.keys[0];
it++;
++*count;
}
if(curr.isLeaf )
{
flag=true;
break;
}
int diff=INT_MAX;
__shared__ int min_idx,min_diff;
min_idx=0;
min_diff=INT_MAX;
if(idx < curr.no_keys )
diff = abs(key - curr.keys[idx]);
atomicMin(&min_diff,diff);
__syncthreads();
//printf("min_diff : %d\n",min_diff);
if(min_diff == diff)
{
min_idx = idx ;
if(min_idx == 0 )
{
if(key<curr.keys[0])
{
node_idx = prefix_sum[node_idx] ;
}
else
node_idx = prefix_sum[node_idx]+1;
}
else if(min_idx == d-1 )
{
if(key<curr.keys[d-1])
node_idx = prefix_sum[node_idx] + d-1;
else
node_idx = prefix_sum[node_idx] + d ;
}
else
{
if(key<curr.keys[min_idx])
{
node_idx = prefix_sum[node_idx] + min_idx;
}
else if(key>=curr.keys[min_idx])
{
node_idx = prefix_sum[node_idx] + min_idx + 1;
}
}
}
__syncthreads();
}
}
Node* init_node(int n, bool flag)
{
Node* node = new Node;
node->parent = NULL;
node->keys = thrust :: host_vector<int>(n, null);
node->pointer = thrust :: host_vector<Node*>(n+1);
node->isLeaf = flag;
node->isDead = false;
node->buffer = NULL;
return node;
}
void unMark(Node* parent, Node* child, int value)
{
if(parent != NULL)
{
bool flag = false;
for (int i = 1; i < parent->pointer.size(); ++i)
{
if(parent->pointer[i] == child)
{
flag = true;
parent->keys[i - 1] = value;
}
}
if(parent->isDead && flag)
unMark(parent->parent, parent, value);
}
}
Node* insert(Node* node, int value)
{
Node* root = NULL;
int node_size = node->keys.size();
bool full_flag = false;
if(node->keys[node_size - 1] != null)
full_flag = true;
if(full_flag)
{
thrust :: host_vector<int> tempKeys = node->keys;
thrust :: host_vector<Node*> tempPointers = node->pointer;
int tempIndex = thrust :: upper_bound(tempKeys.begin(), tempKeys.end(), value) - tempKeys.begin();
int ubp, newVal;
tempKeys.insert(tempKeys.begin() + tempIndex, value);
if(!node->isLeaf)
tempPointers.insert(tempPointers.begin() + tempIndex + 1, node->buffer);
Node* new_node = init_node(node_size, node->isLeaf);
new_node->parent = node->parent;
if(node->isLeaf)
{
new_node->pointer[node_size] = node->pointer[node_size];
node->pointer[node_size] = new_node;
double tempFloat = node_size + 1;
if(node_size % 2 == 1)
ubp = (int)ceil(tempFloat/2);
else
ubp = (int)ceil(tempFloat/2)-1;
}
else
{
double tempFloat = node_size + 2;
if(node_size % 2 == 1)
ubp = (int)ceil((tempFloat)/2);
else
ubp = (int)ceil(tempFloat/2)-1;
for (int i = 0; i < tempPointers.size(); ++i)
{
if(i <= ubp)
node->pointer[i] = tempPointers[i];
else
{
new_node->pointer[i - ubp-1] = tempPointers[i];
new_node->pointer[i - ubp-1]->parent = new_node;
if(i <= node_size)
node->pointer[i] = NULL;
}
}
newVal = tempKeys[ubp];
tempKeys.erase(tempKeys.begin() + ubp);
}
for (int i = 0; i < tempKeys.size(); ++i)
{
if(i < ubp)
node->keys[i] = tempKeys[i];
else
{
new_node->keys[i - ubp] = tempKeys[i];
if(i < node_size)
node->keys[i] = null;
}
}
if(node->isDead && value != node->keys[0] && tempIndex < ubp)
{
node->isDead = false;
unMark(node->parent, node, value);
}
tempIndex = upper_bound(new_node->keys.begin(), new_node->keys.end(), node->keys[ubp - 1]) - new_node->keys.begin();
if(new_node->keys[tempIndex] == null)
{
newVal = new_node->keys[0];
new_node->isDead = true;
}
else if(node->isLeaf)
newVal = new_node->keys[tempIndex];
if(node->parent != NULL)
{
node->parent->buffer = new_node;
root = insert(node->parent, newVal);
}
else
{
root = init_node(node_size, false);
root->keys[0] = newVal;
root->pointer[0] = node;
root->pointer[1] = new_node;
node->parent = root;
new_node->parent = root;
}
}
else
{
bool insert_flag = false;
int tempKey = null;
Node* tempPointer = NULL;
for (int i = 0; i < node_size; i++)
{
if(insert_flag)
{
int temp = node->keys[i] ;
node->keys[i]=tempKey ;
tempKey = temp ;
if(!node->isLeaf)
{
Node* temp = node->pointer[i + 1];
node->pointer[i + 1] = tempPointer ;
tempPointer = temp;
//swap(node->pointer[i + 1], tempPointer);
}
}
else
{
if(value < node->keys[i] || node->keys[i] == null)
{
insert_flag = true;
tempKey = node->keys[i];
node->keys[i] = value;
if(!node->isLeaf)
{
tempPointer = node->pointer[i + 1];
node->pointer[i + 1] = node->buffer;
}
}
if(value != node->keys[0] && node->isDead)
{
node->isDead = false;
unMark(node->parent, node, value);
}
}
}
}
return root;
}
Node* find_pos(Node* node, int value, bool up)
{
while(!node->isLeaf)
{
int lb = ninf, ub, node_size = node->keys.size(), index;
for (int i = 0; i < node_size; i++)
{
if(node->keys[i] == null)
{
index = i;
break;
}
ub = node->keys[i];
if(lb <= value && value < ub)
{
index = i;
break;
}
else if(lb <= value && value == ub && !up && node->pointer[i + 1]->isDead)
{
index = i;
break;
}
else
index = i + 1;
lb = ub;
}
node = node->pointer[index];
}
return node;
}
Node* insert_(Node* root, int value)
{
Node* temp = root;
temp = insert(find_pos(root, value, true), value);
if(temp != NULL)
root = temp;
return root;
}
int main(int argc,char **argv)
{
int n,m;
FILE *inputfilepointer;
char *inputfilename = argv[1];
inputfilepointer = fopen( inputfilename , "r");
if ( inputfilepointer == NULL ) {
printf( "input.txt file failed to open." );
return 0;
}
fscanf( inputfilepointer, "%d", &n );
fscanf( inputfilepointer, "%d", &m );
int arr[n][m];
for(int i=0;i<n;i++)
{
for(int j=0;j<m;j++)
{
fscanf( inputfilepointer, "%d", &arr[i][j] );
}
}
int d=7;
int keys[n];
int min_key = INT_MAX;
for(int i=0;i<n;i++)
{
keys[i]=arr[i][0];
if(min_key > keys[i])
min_key=keys[i];
}
Node *root=init_node(d,true);
for(int i=0;i<n;i++)
{
root = insert_(root , keys[i]);
}
int idx = 0 ;
thrust :: host_vector<int>t;
Node *node = root ;
thrust :: host_vector<Node*>tree;
tree.push_back(node);
t.push_back(1);
while (idx < tree.size())
{
int count=0;
Node *temp = tree[idx];
idx++;
if(!temp->isLeaf)
{
for(int i=0;i<=d;i++)
{
if(temp->pointer[i] != NULL)
{
count++;
tree.push_back(temp->pointer[i]);
}
}
t.push_back(count);
}
}
dNode* dtree=(dNode*)malloc(tree.size()*sizeof(dNode));
for(int i=0;i<tree.size();i++)
{
Node *curr=tree[i];
dNode new_curr;
new_curr.isLeaf = curr->isLeaf;
for(int j=0;j<d;j++)
{
new_curr.keys[j] = curr->keys[j];
new_curr.pointer[j] = curr->pointer[j];
}
new_curr.pointer[d]=curr->pointer[d];
dtree[i]=new_curr;
}
for(int i=0;i<tree.size();i++)
{
int count=0;
Node* curr = tree[i];
for(int j=0;j<d;j++)
{
if(curr->keys[j]!=null )
count++;
}
dtree[i].no_keys=count;
if(curr->isLeaf)
{
for(int j=0;j<dtree[i].no_keys;j++)
{
int val = curr->keys[j];
for(int k=0;k<n;k++)
{
if(val == arr[k][0])
{
dtree[i].data_pointer[j]=&arr[k][0];
break;
}
}
}
}
}
int prefix_sum[t.size()-1];
prefix_sum[0]=1;
for(int i=1;i<t.size()-1;i++)
{
prefix_sum[i]=t[i]+prefix_sum[i-1];
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
cudaEventRecord(start,0);
dNode* d_tree ;
cudaMalloc(&d_tree , tree.size()*sizeof(dNode)) ;
cudaMemcpy(d_tree , dtree, tree.size()*sizeof(dNode), cudaMemcpyHostToDevice);
int * d_prefix_sum ;
cudaMalloc(&d_prefix_sum,(t.size()-1)*sizeof(int));
cudaMemcpy(d_prefix_sum , prefix_sum, (t.size()-1)*sizeof(int), cudaMemcpyHostToDevice);
char *outputfilename = argv[2];
FILE *outputfilepointer;
outputfilepointer = fopen(outputfilename,"w");
int q;
//scanf("%d",&q);
fscanf( inputfilepointer, "%d", &q );
while(q--)
{
int type;
fscanf( inputfilepointer, "%d", &type );
if(type == 1)
{
int p;
fscanf( inputfilepointer, "%d", &p );
int find_keys[p];
for(int i=0;i<p;i++)
{
fscanf( inputfilepointer, "%d", &find_keys[i] );
}
int no_calls=ceil(float(p)/float(100));
int extra = p%100;
int idx=0;
int *h_result[100];
if(extra == 0)
{
for(int i=0;i<(no_calls)*100;i+=100)
{
idx=i;
int h_keys[100];
int ite=0;
for(int x=i;x<i+100;x++)
{
h_keys[ite]=find_keys[x];
ite++;
}
int *d_keys;
cudaMalloc(&d_keys,100*sizeof(int));
cudaMemcpy(d_keys,h_keys,100*sizeof(int),cudaMemcpyHostToDevice);
int found[100];
for(int y=0;y<100;y++)
found[y]=0;
int *d_found;
cudaMalloc(&d_found , 100*sizeof(int));
cudaMemcpy(d_found , found, 100*sizeof(int) , cudaMemcpyHostToDevice);
int **d_result;
cudaMalloc(&d_result,100*sizeof(int*));
find<<<100,7>>>(d_prefix_sum , d_tree , d_result , d_found , tree.size() , n , d , d_keys);
cudaMemcpy(h_result,d_result,100*sizeof(int*),cudaMemcpyDeviceToHost);
cudaMemcpy(found , d_found , 100*(sizeof(int)) , cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
for(int j=0;j<100;j++)
{
if(found[j])
{
int * addr = h_result[j];
for(int k=0;k<m;k++)
{
fprintf( outputfilepointer, "%d ", addr[k]);
}
fprintf( outputfilepointer, "\n");
}
else
{
fprintf( outputfilepointer, "-1\n");
}
}
}
}
if(extra!=0)
{
for(int i=0;i<(no_calls-1)*100;i+=100)
{
idx=i;
int h_keys[100];
int ite=0;
for(int x=i;x<i+100;x++)
{
h_keys[ite]=find_keys[x];
ite++;
}
int *d_keys;
cudaMalloc(&d_keys,100*sizeof(int));
cudaMemcpy(d_keys,h_keys,100*sizeof(int),cudaMemcpyHostToDevice);
int found[100];
for(int y=0;y<100;y++)
found[y]=0;
int *d_found;
cudaMalloc(&d_found , 100*sizeof(int));
cudaMemcpy(d_found , found, 100*sizeof(int) , cudaMemcpyHostToDevice);
int **d_result;
cudaMalloc(&d_result,100*sizeof(int*));
find<<<100,7>>>(d_prefix_sum , d_tree , d_result , d_found , tree.size() , n , d , d_keys);
cudaMemcpy(h_result,d_result,100*sizeof(int*),cudaMemcpyDeviceToHost);
cudaMemcpy(found , d_found , 100*(sizeof(int)) , cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
for(int j=0;j<100;j++)
{
if(found[j])
{
int * addr = h_result[j];
for(int k=0;k<m;k++)
{
fprintf( outputfilepointer, "%d ", addr[k]);
}
fprintf( outputfilepointer, "\n");
}
else
{
fprintf( outputfilepointer,"-1\n" );
}
}
}
int h_keys[100]={-1};
idx=0;
for(int i=(no_calls-1)*100;i<p;i++)
{
h_keys[idx]=find_keys[i];
idx++;
}
int *d_keys;
cudaMalloc(&d_keys,100*sizeof(int));
cudaMemcpy(d_keys,h_keys,100*sizeof(int),cudaMemcpyHostToDevice);
int found[100];
for(int y=0;y<100;y++)
found[y]=0;
int *d_found;
cudaMalloc(&d_found , 100*sizeof(int));
cudaMemcpy(d_found , found, 100*sizeof(int) , cudaMemcpyHostToDevice);
int **d_result;
cudaMalloc(&d_result,100*sizeof(int*));
find<<<100,7>>>(d_prefix_sum , d_tree , d_result , d_found , tree.size() , n , d , d_keys);
cudaMemcpy(h_result,d_result , 100*sizeof(int*) ,cudaMemcpyDeviceToHost);
cudaMemcpy(found , d_found , 100*(sizeof(int)) , cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
for(int i=0;i<extra;i++)
{
if(found[i])
{
int * addr = h_result[i];
for(int k=0;k<m;k++)
{
fprintf( outputfilepointer, "%d ", addr[k]);
//printf("%d ",addr[k]);
}
//printf("\n");
fprintf( outputfilepointer, "\n");
}
else
{
//printf("-1\n");
fprintf( outputfilepointer, "-1\n");
}
}
}
}
else if(type == 2)
{
int p;
fscanf( inputfilepointer, "%d", &p );
int points[p][2];
for(int i=0;i<p;i++)
{
fscanf( inputfilepointer, "%d", &points[i][0] ); //scaning for toll tax zone passing time
fscanf( inputfilepointer, "%d", &points[i][1] ); //scaning for toll tax zone passing time
}
int no_calls=ceil(float(p)/float(100));
int extra = p%100;
if(extra == 0)
{
for(int i=0;i<(no_calls)*100;i+=100)
{
idx=0;
int ab[100][2];
for(int x=i;x<i+100;x++)
{
ab[idx][0]=points[x][0];
ab[idx][1]=points[x][1];
idx++;
}
int *d_ab;
cudaMalloc(&d_ab,200*sizeof(int));
cudaMemcpy(d_ab,ab,200*sizeof(int),cudaMemcpyHostToDevice);
int **h_result;
h_result = (int**)malloc(100*n*sizeof(int*));
int **d_result;
cudaMalloc(&d_result,100*n*sizeof(int*));
int count[100];
for(int y=0;y<100;y++)
count[y]=-1;
int *d_count;
cudaMalloc(&d_count,100*sizeof(int));
cudaMemcpy(d_count ,count , 100*(sizeof(int)) , cudaMemcpyHostToDevice);
range<<<100,7>>>(d_prefix_sum , d_tree , d_result , d_count , n , d , tree.size(), d_ab);
cudaMemcpy(h_result,d_result,100*n*sizeof(int*),cudaMemcpyDeviceToHost);
cudaMemcpy(count , d_count , 100*(sizeof(int)) , cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
for(int l=0;l<100;l++)
{
if(count[l] > 0)
{
for(int j=0;j<count[l];j++)
{
int *addr = h_result[l*n + j];
for(int k=0;k<m;k++)
{
fprintf( outputfilepointer, "%d " , addr[k]);
//printf("%d ",addr[k]);
}
//printf("\n");
fprintf( outputfilepointer, "\n");
}
}
else if(count[l]==0)
{
fprintf( outputfilepointer, "-1\n" );
//printf("-1\n");
}
}
}
}
if(extra!=0)
{
for(int i=0;i<(no_calls-1)*100;i+=100)
{
idx=0;
int ab[100][2];
for(int x=i;x<i+100;x++)
{
printf("%d & %d \n", idx , x);
ab[idx][0]=points[x][0];
ab[idx][1]=points[x][1];
idx++;
}
int *d_ab;
cudaMalloc(&d_ab,200*sizeof(int));
cudaMemcpy(d_ab,ab,200*sizeof(int),cudaMemcpyHostToDevice);
int **h_result;
h_result = (int**)malloc(100*n*sizeof(int*));
int **d_result;
cudaMalloc(&d_result,100*n*sizeof(int*));
int count[100];
for(int y=0;y<100;y++)
count[y]=-1;
int *d_count;
cudaMalloc(&d_count,100*sizeof(int));
cudaMemcpy(d_count ,count , 100*(sizeof(int)) , cudaMemcpyHostToDevice);
range<<<100,7>>>(d_prefix_sum , d_tree , d_result , d_count , n , d , tree.size(), d_ab);
cudaMemcpy(h_result,d_result,100*n*sizeof(int*),cudaMemcpyDeviceToHost);
cudaMemcpy(count , d_count , 100*(sizeof(int)) , cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
for(int l=0;l<100;l++)
{
if(count[l] > 0)
{
for(int j=0;j<count[l];j++)
{
int *addr = h_result[l*n + j];
for(int k=0;k<m;k++)
{
fprintf( outputfilepointer, "%d " , addr[k]);
//printf("%d ",addr[k]);
}
//printf("\n");
fprintf( outputfilepointer, "\n");
}
}
else if(count[l]==0)
{
fprintf( outputfilepointer, "-1\n" );
//printf("-1\n");
}
}
}
int ab[100][2];
for(int x=0;x<100;x++)
{
ab[x][0]=-1;
ab[x][1]=-1;
}
idx=0;
for(int i=(no_calls-1)*100;i<p;i++)
{
ab[idx][0]=points[i][0];
ab[idx][1]=points[i][1];
idx++;
}
int *d_ab;
cudaMalloc(&d_ab,200*sizeof(int));
cudaMemcpy(d_ab,ab,200*sizeof(int),cudaMemcpyHostToDevice);
int **h_result;
h_result = (int**)malloc(100*n*sizeof(int*));
int **d_result;
cudaMalloc(&d_result,100*n*sizeof(int*));
int count[100];
for(int y=0;y<100;y++)
count[y]=-1;
int *d_count;
cudaMalloc(&d_count,100*sizeof(int));
cudaMemcpy(d_count ,count , 100*(sizeof(int)) , cudaMemcpyHostToDevice);
range<<<100,7>>>(d_prefix_sum , d_tree , d_result , d_count , n , d , tree.size(), d_ab);
cudaDeviceSynchronize();
cudaMemcpy(h_result,d_result,100*n*sizeof(int*),cudaMemcpyDeviceToHost);
cudaMemcpy(count , d_count , 100*(sizeof(int)) , cudaMemcpyDeviceToHost);
for(int l=0;l<extra;l++)
{
if(count[l] > 0)
{
for(int j=0;j<count[l];j++)
{
int *addr = h_result[l*n + j];
for(int k=0;k<m;k++)
{
fprintf( outputfilepointer, "%d " , addr[k]);
//printf("%d ",addr[k]);
}
//printf("\n");
fprintf( outputfilepointer, "\n");
}
}
else if(count[l]==0)
{
fprintf( outputfilepointer, "-1\n" );
//printf("-1\n");
}
}
}
}
else if(type == 3)
{
//int p=3;
int p;
//scanf("%d",&p);
fscanf( inputfilepointer, "%d", &p ); //scaning for toll tax zone passing time
int find_keys[p][3];
//int find_keys[p][3]={{21,4,987},{18,3,143},{6,2,100}};
for(int i=0;i<p;i++)
{
//scanf("%d",&find_keys[i][0]);
//scanf("%d",&find_keys[i][1]);
//scanf("%d",&find_keys[i][2]);
fscanf( inputfilepointer, "%d", &find_keys[i][0] ); //scaning for toll tax zone passing time
fscanf( inputfilepointer, "%d", &find_keys[i][1] ); //scaning for toll tax zone passing time
fscanf( inputfilepointer, "%d", &find_keys[i][2] ); //scaning for toll tax zone passing time
}
int no_calls=ceil(float(p)/float(100));
int extra = p%100;
int idx=0;
if(extra == 0)
{
for(int i=0;i<(no_calls)*100;i+=100)
{
idx=i;
int h_keys[100];
int ite=0;
for(int x=i;x<i+100;x++)
{
h_keys[ite]=find_keys[x][0];
ite++;
}
int *d_keys;
cudaMalloc(&d_keys,100*sizeof(int));
cudaMemcpy(d_keys,h_keys,100*sizeof(int),cudaMemcpyHostToDevice);
int found[100];
for(int y=0;y<100;y++)
found[y]=0;
int *d_found;
cudaMalloc(&d_found , 100*sizeof(int));
cudaMemcpy(d_found , found, 100*sizeof(int) , cudaMemcpyHostToDevice);
int *h_result[100];
int **d_result;
cudaMalloc(&d_result,100*sizeof(int*));
find<<<100,7>>>(d_prefix_sum , d_tree , d_result , d_found , tree.size(), n , d , d_keys);
cudaDeviceSynchronize();
cudaMemcpy(h_result,d_result,100*sizeof(int*),cudaMemcpyDeviceToHost);
cudaMemcpy(found , d_found , 100*(sizeof(int)) , cudaMemcpyDeviceToHost);
for(int j=0;j<100;j++)
{
if(found[j])
{
int * addr = h_result[j];
addr[find_keys[i+j][1]-1] = addr[find_keys[i+j][1]-1] + find_keys[i+j][2];
}
}
}
}
if(extra!=0)
{
for(int i=0;i<(no_calls-1)*100;i+=100)
{
//printf("Inside type 3 : %d\n",i);
idx=i;
int h_keys[100];
int ite=0;
for(int x=i;x<i+100;x++)
{
h_keys[ite]=find_keys[x][0];
ite++;
}
int *d_keys;
cudaMalloc(&d_keys,100*sizeof(int));
cudaMemcpy(d_keys,h_keys,100*sizeof(int),cudaMemcpyHostToDevice);
int found[100];
for(int y=0;y<100;y++)
found[y]=0;
int *d_found;
cudaMalloc(&d_found , 100*sizeof(int));
cudaMemcpy(d_found , found, 100*sizeof(int) , cudaMemcpyHostToDevice);
int *h_result[100];
int **d_result;
cudaMalloc(&d_result,100*sizeof(int*));
find<<<100,7>>>(d_prefix_sum , d_tree , d_result , d_found , tree.size() , n , d , d_keys);
cudaDeviceSynchronize();
cudaMemcpy(h_result,d_result,100*sizeof(int*),cudaMemcpyDeviceToHost);
cudaMemcpy(found , d_found , 100*(sizeof(int)) , cudaMemcpyDeviceToHost);
for(int j=0;j<100;j++)
{
if(found[j])
{
int * addr = h_result[j];
addr[find_keys[i+j][1]-1] = addr[find_keys[i+j][1]-1] + find_keys[i+j][2];
}
else
{
//printf("-1\n");
}
}
}
int h_keys[100];
for(int y=0;y<100;y++)
{
h_keys[y]=-1;
}
idx=0;
for(int i=(no_calls-1)*100;i<p;i++)
{
h_keys[idx]=find_keys[i][0];
idx++;
}
int *d_keys;
cudaMalloc(&d_keys,100*sizeof(int));
cudaMemcpy(d_keys,h_keys,100*sizeof(int),cudaMemcpyHostToDevice);
int found[100];
for(int y=0;y<100;y++)
found[y]=0;
int *d_found;
cudaMalloc(&d_found , 100*sizeof(int));
cudaMemcpy(d_found , found, 100*sizeof(int) , cudaMemcpyHostToDevice);
int *h_result[100];
int **d_result;
cudaMalloc(&d_result,100*sizeof(int*));
find<<<100,7>>>(d_prefix_sum , d_tree , d_result , d_found , tree.size(), n , d , d_keys);
cudaDeviceSynchronize();
cudaMemcpy(h_result,d_result , 100*sizeof(int*) ,cudaMemcpyDeviceToHost);
cudaMemcpy(found , d_found , 100*(sizeof(int)) , cudaMemcpyDeviceToHost);
idx = (no_calls-1)*100;
for(int i=0;i<extra;i++)
{
if(found[i])
{
int * addr = h_result[i];
addr[find_keys[i+idx][1] - 1] = addr[find_keys[i+idx][1] - 1] + find_keys[i+idx][2];
//printf("\n");
//fprintf( outputfilepointer, "\n");
}
else
{
//printf("-1\n");
//fprintf( outputfilepointer, "-1\n");
}
}
}
}
else
{
int key_;
fscanf( inputfilepointer, "%d", &key_ );
int kk[n];
int *k;
cudaMalloc(&k,n*sizeof(int));
int h_count=0;
int *d_count;
cudaMalloc(&d_count,sizeof(int));
cudaMemcpy(d_count,&h_count,sizeof(int),cudaMemcpyHostToDevice);
path_trace<<<1,7>>>(d_prefix_sum , d_tree , k , d_count , tree.size() , n , d , key_);
cudaMemcpy(&h_count,d_count,sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(kk,k,n*sizeof(int),cudaMemcpyDeviceToHost);
for(int i=0;i<h_count;i++)
{
fprintf( outputfilepointer, "%d ",kk[i]);
//printf("%d ",kk[i]);
}
//printf("\n");
fprintf( outputfilepointer, "\n");
}
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Time taken by function to execute is: %.6f ms\n", milliseconds);
fclose( outputfilepointer );
fclose( inputfilepointer );
}
|
22,913 | #include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
using namespace std;
__global__ void arrayadd(int *a,int *c){
int tid=threadIdx.x;
if(tid<100)
{
c[tid]=a[tid]*a[tid];
}
}
int main()
{
int size=100;
int a[size],c[size];
int *h_a,*h_c;
for(int i=0;i<size;i++)
{
a[i]=i*8;
c[i]=0;
}
int gpu_size=sizeof(int)*size;
cudaMalloc((void**)&h_a,gpu_size);
cudaMalloc((void**)&h_c,gpu_size);
cudaMemcpy(h_a,a,gpu_size,cudaMemcpyHostToDevice);
arrayadd<<<1,1024>>>(h_a,h_c);
cudaMemcpy(c,h_c,gpu_size,cudaMemcpyDeviceToHost);
cout<<"Array_Square is \n";
for(int i=0;i<size;i++)
{
cout<<a[i]<<" * "<<a[i]<<" = "<<c[i]<<"\n";
}
}
|
22,914 | #include <stdio.h>
__global__ void kernel_A( float *g_data, int dimx, int dimy )
{
int ix = blockIdx.x;
int iy = blockIdx.y*blockDim.y + threadIdx.y;
int idx = iy*dimx + ix;
float value = g_data[idx];
if( ix % 2 )
{
value += sqrtf( logf(value) + 1.f );
}
else
{
value += sqrtf( cosf(value) + 1.f );
}
g_data[idx] = value;
}
__global__ void kernel_B( float *g_data, int dimx, int dimy )
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
float value = g_data[id];
if( id % 2 )
{
value += sqrtf( logf(value) + 1.f );
}
else
{
value += sqrtf( cosf(value) + 1.f );
}
g_data[id] = value;
}
__global__ void kernel_C( float * _g_data, int dimx, int dimy )
{
float2* g_data = reinterpret_cast<float2 *>(_g_data);
int id = blockIdx.x*blockDim.x + threadIdx.x;
float2 value = g_data[id];
value.x += sqrtf( cosf(value.x) + 1.f );
value.y += sqrtf( logf(value.y) + 1.f );
g_data[id] = value;
}
__global__ void kernel_D( float * _g_data, int dimx, int dimy )
{
float4* g_data = reinterpret_cast<float4 *>(_g_data);
int id = blockIdx.x*blockDim.x + threadIdx.x;
float4 value = g_data[id];
value.x += sqrtf( cosf(value.x) + 1.f );
value.y += sqrtf( logf(value.y) + 1.f );
value.z += sqrtf( cosf(value.z) + 1.f );
value.w += sqrtf( logf(value.w) + 1.f );
g_data[id] = value;
}
float timing_experiment( void (*kernel)( float*, int,int), float *d_data, int dimx, int dimy, int nreps, int blockx, int blocky )
{
float elapsed_time_ms=0.0f;
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
dim3 block( blockx, blocky );
dim3 grid( dimx/block.x, dimy/block.y );
cudaEventRecord( start, 0 );
for(int i=0; i<nreps; i++) // do not change this loop, it's not part of the algorithm - it's just to average time over several kernel launches
kernel<<<grid,block>>>( d_data, dimx,dimy );
cudaEventRecord( stop, 0 );
cudaThreadSynchronize();
cudaEventElapsedTime( &elapsed_time_ms, start, stop );
elapsed_time_ms /= nreps;
cudaEventDestroy( start );
cudaEventDestroy( stop );
return elapsed_time_ms;
}
int main(int argc, char *argv[])
{
//begin choosing whether testing correctness, and code version
size_t version = 1;
bool testCorretness = 0;
if(argc >= 2)
version = atoi(argv[1]);
if(argc >= 3)
testCorretness = atoi(argv[2]);
//end choosing whether testing correctness, and code version
int dimx = 2*1024;
int dimy = 2*1024;
int nreps = 10;
int nbytes = dimx*dimy*sizeof(float);
float *d_data=0, *h_data=0;
cudaMalloc( (void**)&d_data, nbytes );
if( 0 == d_data )
{
printf("couldn't allocate GPU memory\n");
return -1;
}
printf("allocated %.2f MB on GPU\n", nbytes/(1024.f*1024.f) );
h_data = (float*)malloc( nbytes );
if( 0 == h_data )
{
printf("couldn't allocate CPU memory\n");
return -2;
}
printf("allocated %.2f MB on CPU\n", nbytes/(1024.f*1024.f) );
for(int i=0; i<dimx*dimy; i++)
h_data[i] = 10.f + rand() % 256;
cudaMemcpy( d_data, h_data, nbytes, cudaMemcpyHostToDevice );
float elapsed_time_ms=0.0f;
//start choosing different versions and run
if ( version == 1 ) {
elapsed_time_ms = timing_experiment( kernel_A, d_data, dimx,dimy, nreps, 1, 512 );
} else if ( version == 2 ) {
elapsed_time_ms = timing_experiment( kernel_B, d_data, dimx*dimy, 1, nreps, 256, 1 );
} else if ( version == 3 ) {
elapsed_time_ms = timing_experiment( kernel_C, d_data, dimx*dimy/2, 1, nreps, 256, 1 );
} else if ( version == 4 ) {
elapsed_time_ms = timing_experiment( kernel_D, d_data, dimx*dimy/4, 1, nreps, 256, 1 );
} else {
printf( "code version does not exist.\n" );
return -3;
}
printf("%c: %8.6f ms\n", (char)(version-1+'A'), elapsed_time_ms );
printf("CUDA: %s\n", cudaGetErrorString( cudaGetLastError() ) );
//end choosing different versions and run
//start test correctness
if(testCorretness) {
printf("\ncorrectness:\n");
//read data from gpu to array "h_gpuRes"
float *h_gpuRes=0;
h_gpuRes = (float*)malloc( nbytes );
if ( 0 == h_gpuRes )
{
printf("couldn't allocate CPU memory\n");
return -2;
}
cudaMemcpy( h_gpuRes, d_data, nbytes, cudaMemcpyDeviceToHost);
//execute the original version to test correctness
cudaMemcpy( d_data, h_data, nbytes, cudaMemcpyHostToDevice );
elapsed_time_ms = timing_experiment( kernel_A, d_data, dimx,dimy, nreps, 1, 512 );
//read kernel A's data from gpu to array "h_gpuResA"
float *h_gpuResA=0;
h_gpuResA = (float*)malloc( nbytes );
if ( 0 == h_gpuResA )
{
printf("couldn't allocate CPU memory\n");
return -2;
}
cudaMemcpy( h_gpuResA, d_data, nbytes, cudaMemcpyDeviceToHost);
//compare result
int i;
for(i=0; i<dimx*dimy; i++) {
if( abs(h_gpuRes[i] - h_gpuResA[i]) > 1e-7 ) {
printf( "calculation error in GPU results in %d\n", i );
printf( "data: %f\nA's gpu result: %f\nOther version's gpu result: %f\n", h_data[i], h_gpuResA[i], h_gpuRes[i]);
break;
}
}
if( i >= dimx*dimy ) {
printf( "calculation correct in GPU results! Congrats!\n" );
}
//release cpu memory
if( h_gpuRes )
free( h_gpuRes);
if( h_gpuResA )
free( h_gpuResA);
}
//end test correctness
if( d_data )
cudaFree( d_data );
if( h_data )
free( h_data );
cudaThreadExit();
return 0;
}
|
22,915 | /* Good Example of handling main-stack overflow */
#include <iostream>
#include <cmath>
#include <cuda.h>
#include <string>
#include <chrono>
#define PI 3.1415927
static void HandleError(cudaError_t err, const char *file, int line )
{
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
__global__ void addVector(int vec_size, float* v1, float* v2, float* vec_out) {
unsigned tid = blockIdx.x * blockDim.x + threadIdx.x;
while(tid < vec_size) {
vec_out[tid] = v1[tid] + v2[tid];
tid += blockDim.x * gridDim.x;
}
}
int main(int argc, char* argv[]) {
size_t vec_len = 1 << std::stoi(argv[1]);
size_t size = vec_len * sizeof(float);
float *v1 = (float *)malloc(size);
float *v2 = (float *)malloc(size);
float *v_out = (float *)malloc(size);
float *dev_v1, *dev_v2, *dev_v_out;
// CUDA Morphology
int nthreads = std::stoi(argv[2]);
int nblocks = std::stoi(argv[3]);
// int nblocks = (vec_len+nthreads-1) / nthreads;
std::cout << "Number of threads per block: " << std::to_string(nthreads) << '\n'
<< "Number of blocks in the grid: " << std::to_string(nblocks) << '\n'
<< "Total threads: " << std::to_string(nthreads*nblocks) << "\n"
<< "Vector length: " << std::to_string(vec_len) << std::endl;
dim3 nBlocks(nblocks, 1, 1);
dim3 nThreads(nthreads, 1, 1);
// Initiate values
for(size_t i=0; i<vec_len; ++i) {
v1[i] = std::sin(i*PI*1E-2);
v2[i] = std::cos(i*PI*1E-2);
}
auto t1 = std::chrono::system_clock::now();
HANDLE_ERROR( cudaMalloc((void**) &dev_v1, size) );
HANDLE_ERROR( cudaMalloc((void**) &dev_v2, size) );
HANDLE_ERROR( cudaMalloc((void**) &dev_v_out, size) );
HANDLE_ERROR( cudaMemcpy(dev_v1, v1, size, cudaMemcpyHostToDevice) );
HANDLE_ERROR( cudaMemcpy(dev_v2, v2, size, cudaMemcpyHostToDevice) );
addVector<<< nBlocks, nThreads >>>(vec_len, dev_v1, dev_v2, dev_v_out);
cudaDeviceSynchronize();
HANDLE_ERROR( cudaMemcpy(v_out, dev_v_out, size, cudaMemcpyDeviceToHost) );
auto t2 = std::chrono::system_clock::now();
// Check results
for(size_t i=0; i<vec_len; ++i) {
if(v1[i] + v2[i] != v_out[i]){
std::string err_message = "value dismatch at index " + std::to_string(i) + ".\n";
printf("v1[%i] = %.18f \nv2[%i] = %.18f\nv_out[%i] = %.18f.\n",
i, v1[i], i, v2[i], i, v_out[i]);
throw std::runtime_error(err_message);
}
}
printf("Work done, time consummed: %.5fms.\n",
std::chrono::duration <double, std::milli> (t2 - t1).count());
cudaFree(dev_v1);
cudaFree(dev_v2);
cudaFree(dev_v_out);
free(v1);
free(v2);
free(v_out);
return 0;
}
|
22,916 | #include <stdio.h>
__global__ void shift_forward(int * value)
{
int index = threadIdx.x;
__shared__ int array[64];
array[index] = threadIdx.x;
__syncthreads(); // Garantir que todos os valores foram armazenados antes de começar o shift
if(index < 63)
{
int tmp = array[index + 1];
__syncthreads(); // Salvar cada valor antes que o mesmo seja trocado por outra thread
value[index] = tmp;
__syncthreads();
}
}
int main(int argc,char ** argv)
{
const int ARRAY_SIZE = 64;
const int SIZE = ARRAY_SIZE * sizeof(int);
int * d_out;
cudaMalloc((void **) &d_out,SIZE);
shift_forward<<<1,64>>>(d_out);
int h_out[ARRAY_SIZE];
cudaMemcpy(h_out,d_out,SIZE,cudaMemcpyDeviceToHost);
for(int i = 0 ; i < ARRAY_SIZE;i++)
{
printf("%d ",h_out[i]);
}
printf("\n");
return 0;
}
|
22,917 | //xfail:TIMEOUT
//--gridDim=64 --blockDim=128
#include "common.h"
template <unsigned int blockSize, bool nIsPow2> __global__ void reduceSinglePass(const float *g_idata, float *g_odata, unsigned int n);
template __global__ void reduceSinglePass<128, true>(const float *g_idata, float *g_odata, unsigned int n);
__device__ unsigned int retirementCount = 0;
template <unsigned int blockSize, bool nIsPow2>
__global__ void reduceSinglePass(const float *g_idata, float *g_odata, unsigned int n)
{
//
// PHASE 1: Process all inputs assigned to this block
//
reduceBlocks<blockSize, nIsPow2>(g_idata, g_odata, n);
//
// PHASE 2: Last block finished will process all partial sums
//
if (gridDim.x > 1)
{
const unsigned int tid = threadIdx.x;
__shared__ bool amLast;
extern float __shared__ smem[];
// wait until all outstanding memory instructions in this thread are finished
__threadfence();
// Thread 0 takes a ticket
if (tid==0)
{
unsigned int ticket = atomicInc(&retirementCount, gridDim.x);
// If the ticket ID is equal to the number of blocks, we are the last block!
amLast = (ticket == gridDim.x-1);
}
__syncthreads();
// The last block sums the results of all other blocks
if (amLast)
{
int i = tid;
float mySum = 0;
while (i < gridDim.x)
{
mySum += g_odata[i];
i += blockSize;
}
reduceBlock<blockSize>(smem, mySum, tid);
if (tid==0)
{
g_odata[0] = smem[0];
// reset retirement count so that next run succeeds
retirementCount = 0;
}
}
}
}
|
22,918 | #include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include <cuda_runtime_api.h>
#define restrict __restrict__
#define PADDINGCLASS -2
#define EXP 2
#define OUTPUT_FILE "ocuda"
#define INPUT_FILE "data"
void printStats(cudaEvent_t before, cudaEvent_t after, const char *msg);
void check_error(cudaError_t err, const char *msg);
void readInput(FILE* file, float* coords, float* coordsnew, int* classes, int spacedim, int numels, int newels);
void writeOutput(float* coords, int* classes, int spacedim, int numels);
__device__ int findMode(float4* elements, int classes_num, int k);
__global__ void findClass(float* coords, float* coordsnew, int* input_classes, float4* d_output, int spacedim, int classes_num, int numels, int offset, int newPointIndex);
__device__ float distance(float* coords, float* coords2, int spacedim);
__global__ void findMin(float4* input, float* coords, float* coordsnew, int* classes, int classes_num, int spacedim, int numels, int offset, float4* result, int k, int newPointIndex, int eleInBlock);
__device__ void swapfloat(float* x, float* y);
__device__ void swapInt(int* x, int* y);
//Declaration of shared-memory. It's going to contains partial minimum of distances
extern __shared__ float4 mPartial[];
int main(int argc, char *argv[])
{
int newels; //number of points we want classify
int k; //number of nearest points we use to classify
int numels; //total element already classified
int spacedim;
char filePath[255]; //path + filname of input file
int classes_num; //number of classes
float* h_coords; //coords of existing points with a class
float* h_coordsnew; //coords of points we want to classify
int* h_classes; //array contains the class for each points
//*** Device-variables-declaration ***
float* d_coords;
float* d_coordsnew;
float4* d_result;
int* d_classes;
float4* d_output;
//*** end-device-declaration
//***cudaEvent-declaration***
cudaEvent_t before_allocation, before_input, before_upload, before_knn, before_download;
cudaEvent_t after_allocation, after_input, after_upload, after_knn, after_download;
//***end-cudaEvent-declaration***
if (argc > 2)
{
strcpy(filePath, argv[1]);
k = atoi(argv[2]);
}
else
{
printf("how-to-use: knn <inputfile> <k> \n");
exit(1);
}
//***cuda-init-event***
check_error(cudaEventCreate(&before_allocation), "create before_allocation cudaEvent");
check_error(cudaEventCreate(&before_input), "create before_input cudaEvent");
check_error(cudaEventCreate(&before_upload), "create before_upload cudaEvent");
check_error(cudaEventCreate(&before_knn), "create before_knn cudaEvent");
check_error(cudaEventCreate(&before_download), "create before_download cudaEvent");
check_error(cudaEventCreate(&after_allocation), "create after_allocation cudaEvent");
check_error(cudaEventCreate(&after_input), "create after_input cudaEvent");
check_error(cudaEventCreate(&after_upload), "create after_upload cudaEvent");
check_error(cudaEventCreate(&after_knn), "create after_knn cudaEvent");
check_error(cudaEventCreate(&after_download), "create after_download cudaEvent");
//***end-cuda-init-event***
FILE *fp;
if((fp = fopen(filePath, "r")) == NULL)
{
printf("No such file\n");
exit(1);
}
fseek(fp, 0L, SEEK_END);
float fileSize = ftell(fp);
rewind(fp);
int count = fscanf(fp, "%d,%d,%d,%d\n", &numels, &newels, &classes_num, &spacedim);
int totalElements = numels + newels;
//*** allocation ***
cudaEventRecord(before_allocation);
h_coords = (float*) malloc(sizeof(float)*totalElements*spacedim);
h_coordsnew = (float*) malloc(sizeof(float)*newels*spacedim);
h_classes = (int*) malloc(sizeof(int)*totalElements);
const int blockSize = 512;
int numBlocks;
//*** device-allocation ***
check_error(cudaMalloc(&d_coords, totalElements*spacedim*sizeof(float)), "alloc d_coords_x");
check_error(cudaMalloc(&d_output, ((totalElements + blockSize - 1)/blockSize)*4*sizeof(float)), "alloc d_output");
check_error(cudaMalloc(&d_classes, totalElements*sizeof(int)), "alloc d_classes");
check_error(cudaMalloc(&d_result, 4*k*sizeof(float)), "alloc d_result");
check_error(cudaMalloc(&d_coordsnew, newels*spacedim*sizeof(float)), "alloc d_coordsnew");
//*** end-device-allocation ***
cudaEventRecord(after_allocation);
///***input-from-file***
cudaEventRecord(before_input);
readInput(fp, h_coords, h_coordsnew, h_classes, spacedim, numels, newels);
cudaEventRecord(after_input);
fclose(fp);
///***end-input-from-file***
//***copy-arrays-on-device***
cudaEventRecord(before_upload);
check_error(cudaMemcpy(d_coords, h_coords, totalElements*spacedim*sizeof(float), cudaMemcpyHostToDevice), "copy d_coords");
check_error(cudaMemcpy(d_classes, h_classes, totalElements*sizeof(int), cudaMemcpyHostToDevice), "copy d_classes");
check_error(cudaMemcpy(d_coordsnew, h_coordsnew, newels*spacedim*sizeof(float), cudaMemcpyHostToDevice), "copy d_coordsnew");
cudaEventRecord(after_upload);
//***end-copy-arrays-on-device***
cudaEventRecord(before_knn);
int i, j;
for (i = 0; i < newels; i++)
{
numBlocks = (numels + blockSize - 1)/blockSize;
j = 0;
for (j = 0; j < k; j++)
{
findClass<<<numBlocks, blockSize, blockSize*4*sizeof(float)>>>(
d_coords, d_coordsnew, d_classes,
d_output,
spacedim, classes_num,
numels, j, i);
findMin<<<1, blockSize, blockSize*4*sizeof(float)>>>(d_output, d_coords, d_coordsnew, d_classes, classes_num, spacedim, numels, j, d_result, k, i, numBlocks);
}
numels++;
}
cudaEventRecord(after_knn);
cudaEventRecord(before_download);
check_error(cudaMemcpy(h_coords, d_coords, spacedim*totalElements*sizeof(float), cudaMemcpyDeviceToHost), "download coords");
check_error(cudaMemcpy(h_classes, d_classes, totalElements*sizeof(int), cudaMemcpyDeviceToHost), "download classes");
cudaEventRecord(after_download);
check_error(cudaEventSynchronize(after_download), "sync cudaEvents");
printStats(before_allocation, after_allocation, "[time] allocation");
printStats(before_input, after_input, "[time] read input file");
printStats(before_upload, after_upload, "[time] upload host->device");
printStats(before_knn, after_knn, "[time] knn algorithm");
printStats(before_download, after_download, "[time] download device->host");
writeOutput(h_coords, h_classes, spacedim, numels);
return 0;
}
void check_error(cudaError_t err, const char *msg)
{
if (err != cudaSuccess)
{
fprintf(stderr, "%s : error %d (%s)\n", msg, err, cudaGetErrorString(err));
exit(err);
}
}
float runtime;
void printStats(cudaEvent_t before, cudaEvent_t after, const char *msg)
{
check_error(cudaEventElapsedTime(&runtime, before, after), msg);
printf("%s %gms\n", msg, runtime);
}
//Parallel reduction to find the k-minimum distances
__global__ void findClass(
float* coords, float* coordsnew,
int* input_classes, float4* d_output,
int spacedim, int classes_num, int numels, int offset, int newPointIndex)
{
int gid = offset + threadIdx.x + blockIdx.x*blockDim.x;
int lid = threadIdx.x;
mPartial[lid] = make_float4(-1, PADDINGCLASS, -1, -1);
if (gid >= numels) return;
float min = distance(&(coordsnew[newPointIndex*spacedim]), &(coords[gid*spacedim]), spacedim);
float d;
int c = input_classes[gid];
int minID = gid;
while (gid < numels)
{
d = distance(&(coordsnew[newPointIndex*spacedim]), &(coords[gid*spacedim]), spacedim);
if(d < min)
{
min = d;
minID = gid;
c = input_classes[gid];
}
gid += gridDim.x*blockDim.x;
}
mPartial[lid] = make_float4(min, (float)c, minID, -1);
//Part 2: reduction in shared memory
int stride = (blockDim.x)/2;
while (stride > 0)
{
__syncthreads();
if (lid < stride && mPartial[lid+stride].y != PADDINGCLASS && mPartial[lid].y != PADDINGCLASS && mPartial[lid+stride].x < mPartial[lid].x)
mPartial[lid] = mPartial[lid+stride];
stride /= 2;
}
/* Part 3: save the block's result in global memory */
if (lid == 0)
d_output[blockIdx.x] = mPartial[0];
}
__global__ void findMin(float4* input, float* coords, float* coordsnew, int* classes, int classes_num, int spacedim, int numels, int offset, float4* result, int k, int newPointIndex, int eleInBlock)
{
int gid = threadIdx.x + blockIdx.x*blockDim.x;
int lid = threadIdx.x;
mPartial[lid] = make_float4(-1, PADDINGCLASS, -1, -1);
if (gid >= eleInBlock || gid >= blockDim.x) return;
float distmin = input[gid].x;
float classmin = input[gid].y;
float gidMin = input[gid].z;
while (gid < eleInBlock)
{
if(input[gid].x < distmin)
{
distmin = input[gid].x;
classmin = input[gid].y;
gidMin = input[gid].z;
}
gid += gridDim.x*blockDim.x;
}
mPartial[lid] = make_float4(distmin, classmin, gidMin, -1);
//Part 2: reduction in shared memory
int stride = (blockDim.x)/2;
while (stride > 0)
{
__syncthreads();
if (lid < stride && mPartial[lid+stride].y != PADDINGCLASS && mPartial[lid].y != PADDINGCLASS && mPartial[lid+stride].x < mPartial[lid].x)
mPartial[lid] = mPartial[lid + stride];
stride /= 2;
}
/* Part 3: save the block's result in global memory */
if (lid == 0)
{
input[0] = mPartial[0];
int minID = mPartial[0].z;
int i;
for (i = 0; i < spacedim; i++)
swapfloat(&(coords[spacedim*minID+i]), &(coords[offset*spacedim+i]));
swapInt(&(classes[minID]), &(classes[offset]));
result[offset] = input[0];
if (offset == k-1)
{
int j;
for (j = 0; j < spacedim; j++)
coords[spacedim*numels+j] = coordsnew[spacedim*newPointIndex + j];
classes[numels] = findMode(result, classes_num, k);
}
}
}
// read input from file
void readInput(FILE* file, float* coords, float* coordsnew, int* classes, int spacedim, int numels, int newels)
{
int i, j;
int count;
for(i=0; i<numels; i++)
{
for (j = 0; j < spacedim; j++)
count = fscanf(file, "%f,", &(coords[i*spacedim +j]));
count = fscanf(file, "%d\n", &(classes[i]));
}
for(i = 0; i < newels; i++)
{
for (j = 0; j < spacedim; j++)
count = fscanf(file, "%f,", &(coordsnew[i*spacedim+j]));
count = fscanf(file, "-1\n");
}
count++;
}
//Write Output on file
void writeOutput(float* coords, int* classes, int spacedim, int numels)
{
FILE *fp;
fp = fopen(OUTPUT_FILE, "w");
int i, j;
for( i = 0; i < numels; i++)
{
for (j = 0; j < spacedim; j++)
fprintf(fp, "%lf,", coords[i*spacedim+j]);
fprintf(fp, "%d\n", classes[i]);
}
fclose(fp);
}
__device__ float distance(float* coords, float* coords2, int spacedim)
{
float sum = 0;
int i;
for (i = 0; i < spacedim; i++)
{
float diff = coords[i] - coords2[i];
sum += diff*diff;
}
return sum;
}
__device__ void swapfloat(float* x, float* y)
{
float tmp = *x;
*x = *y;
*y = tmp;
}
__device__ void swapInt(int* x, int* y)
{
int tmp = *x;
*x = *y;
*y = tmp;
}
__device__ int findMode(float4* elements, int classes_num, int k)
{
int* classCount = (int*) (malloc(sizeof(int)*classes_num));
int i;
for (i = 0; i < classes_num; i++)
classCount[i] = 0;
for (i = 0; i < k; i++)
classCount[(int)(elements[i].y)]++;
int max = 0;
int maxValue = classCount[0];
for (i = 1; i < classes_num; i++)
{
int value = classCount[i];
if (value > maxValue)
{
max = i;
maxValue = value;
}
else if (value != 0 && maxValue == value)
{
int j = 0;
for (j = 0; j < k; j++)
{
if (elements[j].y == i)
{
max = i;
break;
}
else if (elements[j].y == max)
break;
}
}
}
free(classCount);
return max;
}
|
22,919 | #include "includes.h"
/*
sergeim19
April 27, 2015
Burgers equation - GPU CUDA version
*/
#define NADVANCE (4000)
#define nu (5.0e-2)
__global__ void kernel_calc_uu(double *u_dev, double *uu_dev)
{
int j;
j = blockIdx.x * blockDim.x + threadIdx.x;
uu_dev[j] = 0.5 * u_dev[j] * u_dev[j];
} |
22,920 |
extern "C" __global__ void hello_world(float *a, float *b)
{
int tx = threadIdx.x;
b[tx] = a[tx];
}
|
22,921 | #include<iostream>
struct colours{
int red;
int green;
int blue;
};
__global__ void imageReverse(colours* c_arr, colours* rev_c_arr, int N){
int index = threadIdx.x + blockIdx.x*blockDim.x;
rev_c_arr[index].red = 255 - c_arr[index].red;
rev_c_arr[index].green = 255 - c_arr[index].green;
rev_c_arr[index].blue = 255 - c_arr[index].blue;
}
int main(){
int N = 1024;
int size = N*sizeof(colours);
colours colour_array[N], reverse_colour_array[N];
colours* d_colour_array, *d_reverse_colour_array;
for(auto& i:colour_array){
i.red = rand()%255;
i.green = rand()%255;
i.blue = rand()%255;
}
cudaMalloc(&d_colour_array, size);
cudaMalloc(&d_reverse_colour_array, size);
cudaMemcpy(d_colour_array, colour_array, size, cudaMemcpyHostToDevice);
int threadsPerBlock = 64;
int blocksPerGrid = (N+threadsPerBlock-1)/threadsPerBlock;
imageReverse<<<blocksPerGrid,threadsPerBlock>>>(d_colour_array, d_reverse_colour_array, N);
cudaMemcpy(reverse_colour_array, d_reverse_colour_array, size, cudaMemcpyDeviceToHost);
// for(const auto& i:reverse_colour_array){
// std::cout << i.red << "," << i.green << "," << i.blue << std::endl;
// }
cudaFree(d_colour_array);
cudaFree(d_reverse_colour_array);
return 0;
}
|
22,922 |
// Includes
#include <stdio.h>
// Type of the array in which we search for the maximum.
// If you use float, don't forget to type %f in the printf later on..
#define TYPE int
#define USE_NAIVE
// Variables
TYPE* h_A;
TYPE* d_A;
// Functions
void Cleanup(void);
void WorstCaseInit(TYPE*, int);
__device__ __host__ TYPE cumax(TYPE a, TYPE b)
{
return a > b ? a : b;
}
// Schema des naiven Ansatz
// o o o o o o o o n=1
// |/ |/ |/ |/
// o o o o n=2
// | / | /
// | / | /
// o o n=4
// | /
// | /
// | /
// |/
// o Ergebnis
__global__ void reduce_max_naive(TYPE* A, int n)
{
int i = blockIdx.x * n;
A[2*i] = cumax( A[2*i], A[2*i+n]);
}
// Host code
int main(int argc, char** argv)
{
printf("Reduce\n");
int N = 1<<15;
int Nh = N / 2;
size_t size = N * sizeof(TYPE);
// Allocate input vector h_A
h_A = (TYPE*)malloc(size);
// Initialize input vector
WorstCaseInit(h_A, N);
// Allocate vector in device memory
cudaMalloc((void**)&d_A, size);
// Copy vector from host memory to device memory
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
// Start tracking of elapsed time.
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start, 0 );
#ifdef USE_NAIVE // Naive approach
for (int n=1; n<N; n*=2)
reduce_max_naive<<<Nh / n,1>>>(d_A, n);
#else // Better approach
// TODO: Implement!
#endif
// End tracking of elapsed time.
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
float elapsedTime;
cudaEventElapsedTime( &elapsedTime, start, stop );
printf( "Time: %f ms\n", elapsedTime );
cudaEventDestroy( start );
cudaEventDestroy( stop );
// Find the truth... :)
TYPE maximum = 0;
for (int i = 0; i < N; ++i)
maximum = cumax(h_A[i], maximum);
// Copy result (first element only) from device memory to host memory
cudaMemcpy(h_A, d_A, sizeof(TYPE), cudaMemcpyDeviceToHost);
// Validate result from GPU.
if (maximum == h_A[0])
printf("PASSED: %i == %i", maximum, h_A[0]);
else printf("FAILED: %i != %i", maximum, h_A[0]);
Cleanup();
}
void Cleanup(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
// Free host memory
if (h_A)
free(h_A);
cudaThreadExit();
printf("\nPress ENTER to exit...\n");
fflush( stdout);
fflush( stderr);
getchar();
exit(0);
}
void WorstCaseInit(TYPE* data, int n)
{
// Using a list sorted in ascending order is the worst case.
for (int i = 0; i < n; ++i)
data[i] = (TYPE)(i);
}
|
22,923 | // Simplified attempt
#define STARTING_MATCHING_COST 100.0f
struct pixel {
float R;
float G;
float B;
};
__global__ void computeDisparity(
const struct pixel * imageR, // Input pixel array of left image
// array dim: image[image_width][image_height]
const struct pixel * imageL, // Input pixel array of right image
const int window_size, // size of window used for block searching
const int image_height, // height of the image, in pixels
const int image_width, // width of the image, in pixels
const char * foregroundR, // Array of foreground flags for each pixel in imageR
const char * foregroundL, // Array of foreground flags for each pixel in imageL
float * disparity_output) // Output array (same dim as imageR/L) of disparity values
{
// Local variables:
long int pixel_index = blockIdx.x * blockDim.x + threadIdx.x; // Index of current pixel
float matching_cost = 0.0;
float min_matching_cost = STARTING_MATCHING_COST;
long int min_cost_offset = 0;
long int offset_pixel_index = 0;
while(pixel_index < image_height * image_width) {
// while... the thread index hasn't gone outside the image dimensions
if (true) { //foregroundL[pixel_index] == 1) {
// Calculate matching cost for this foreground pixel
// ensure that we are not going over the end of the pixel row
offset_pixel_index = pixel_index;
min_matching_cost = STARTING_MATCHING_COST;
min_cost_offset = 0;
for (int offset = 0; offset < window_size; offset++) {
if ((pixel_index % image_width) + offset >= image_width) {
break;
}
matching_cost = powf(imageL[pixel_index].R - imageR[offset_pixel_index].R, 2);
matching_cost += powf(imageL[pixel_index].G - imageR[offset_pixel_index].G, 2);
matching_cost += powf(imageL[pixel_index].B - imageR[offset_pixel_index].B, 2);
if (matching_cost < min_matching_cost) {
min_matching_cost = matching_cost;
min_cost_offset = offset;
}
offset_pixel_index++;
}
if (min_matching_cost == STARTING_MATCHING_COST) {
disparity_output[pixel_index] = 0;
} else {
disparity_output[pixel_index] = powf(min_cost_offset, 2); // + image_width;
}
} else {
disparity_output[pixel_index] = -1;
}
pixel_index += blockDim.x * gridDim.x;
}
}
|
22,924 | #include "includes.h"
__global__ void cudaDmult_kernel(unsigned int size, const double *x1, const double *x2, double *y)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < size; i += stride) {
y[i] = x1[i] * x2[i];
}
} |
22,925 | #include <iostream>
#include <fstream>
#include <cmath>
#define N 512
#define THREADS 32
#define BLOCKS 16
#define eps 0.005
using namespace std;
double *devU,*devU_new,*devF;
double *u,*u_new,*f;
double h;
int numberOfBytes;
double f1(int i,int j)
{
double x=(double)i*h;
double y=(double)j*h;
return 4.0+2.0*x*x-2.0*x+2.0*y*y-2.0*y;
}
__global__ void relax_cu(double *u,double *f,double h2,double relax_param)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i>0 && i<N-1 &&j>0 && j<N-1)
u[i*N+j]=relax_param*(u[(i-1)*N+j]+u[(i+1)*N+j]+u[i*N+j-1]+u[i*N+j+1]-h2*f[i*N+j])+(1-relax_param*4)*u[i*N+j];
}
__global__ void relax_cu2(double *u,double *u_new,double *f,double h2,double relax_param)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i>0 && i<N-1 &&j>0 && j<N-1)
u_new[i*N+j]=relax_param*(u_new[(i-1)*N+j]+u[(i+1)*N+j]+u_new[i*N+j-1]+u[i*N+j+1]-h2*f[i*N+j])+(1-relax_param*4)*u[i*N+j];
}
__global__ void jacobi_cu(double *cu,double *cu_new,double *f,double h2)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i>0 && i<N-1 &&j>0 && j<N-1)
cu_new[i*N+j]=0.25*(cu[(i-1)*N+j]+cu[(i+1)*N+j]+cu[i*N+j-1]+cu[i*N+j+1]-h2*f[i*N+j]);
}
void err_cuda(cudaError_t i)
{
if(i!=0){
printf("Error: %i\t%s\n",i,cudaGetErrorString(i));
exit(-1);
}
}
void cudaInit()
{
err_cuda(cudaMalloc((void**)&devU,numberOfBytes));
err_cuda(cudaMalloc((void**)&devU_new,numberOfBytes));
err_cuda(cudaMalloc((void**)&devF,numberOfBytes));
err_cuda(cudaMemcpy(devU,u,numberOfBytes,cudaMemcpyHostToDevice));
err_cuda(cudaMemcpy(devF,f,numberOfBytes,cudaMemcpyHostToDevice));
err_cuda(cudaMemcpy(devU_new,u_new,numberOfBytes,cudaMemcpyHostToDevice));
}
void cudaFinal()
{
err_cuda(cudaFree(devU));
err_cuda(cudaFree(devU_new));
err_cuda(cudaFree(devF));
}
void init()
{
h=1.0/N;
numberOfBytes=sizeof(double)*N*N;
u=new double[numberOfBytes];
u_new=new double[numberOfBytes];
f=new double[numberOfBytes];
for(int i=0;i<N;i++)
{
for(int j=0;j<N;j++)
{
u[i*N+j]=0;
u_new[i*N+j]=0;
f[i*N+j]=f1(i,j);
}
}
double tmp;
for(int i=0;i<N;i++)
{
tmp=(double)i*h*(double)i*h-(double)i*h+1;
u[i*N]=tmp;
u[i*N+N-1]=tmp;
u[i]=tmp;
u[(N-1)*N+i]=tmp;
u_new[i*N]=tmp;
u_new[i*N+N-1]=tmp;
u_new[i]=tmp;
u_new[(N-1)*N+i]=tmp;
}
}
void final()
{
delete[] u;
delete[] u_new;
delete[] f;
}
double exact_solve(int i,int j)
{
double x=(double)i*h;
double y=(double)j*h;
double res=(x*x-x+1.0)*(y*y-y+1.0);
return res;
}
bool err()
{
double max=0;
double tmp;
for(int i=1;i<N-1;i++)
for(int j=1;j<N-1;j++)
{
tmp=fabs(u[i*N+j]-exact_solve(i,j));
if(tmp>max)max=tmp;
}
cout<<"err: "<<max<<endl;
if(max<eps)return true;
return false;
}
void writeToFile(string fname)
{
ofstream output_file;
output_file.open(fname.c_str(),std::ofstream::out);
for(int i=0;i<N;i++)
for(int j=0;j<N;j++)
{
output_file<<(double)i*h <<" "<<(double)j*h<<" "<<u[i*N+j]<<endl;
}
}
void jacobi(double param)
{
dim3 threads = dim3(THREADS, THREADS,1);
dim3 blocks = dim3(BLOCKS, BLOCKS,1);
for(int i=0;;i++)
{
jacobi_cu<<<threads,blocks,0>>>(devU,devU_new,devF,h*h);
double *tmp=devU;
devU=devU_new;
devU_new=tmp;
if(i%10000==0)
{
err_cuda(cudaMemcpy(u,devU,numberOfBytes,cudaMemcpyDeviceToHost));
cout<<i<<endl;
if(err())break;
}
}
}
void relax(double param)
{
dim3 threads = dim3(THREADS, THREADS,1);
dim3 blocks = dim3(BLOCKS, BLOCKS,1);
for(int i=0;;i++)
{
relax_cu<<<threads,blocks,0>>>(devU,devF,h*h,param);
if(i%100==0)
{
cout<<i<<endl;
err_cuda(cudaMemcpy(u,devU,numberOfBytes,cudaMemcpyDeviceToHost));
if(err())break;
}
}
}
void relax2(double param)
{
dim3 threads = dim3(THREADS, THREADS,1);
dim3 blocks = dim3(BLOCKS, BLOCKS,1);
for(int i=0;;i++)
{
relax_cu2<<<threads,blocks,0>>>(devU,devU_new,devF,h*h,param);
if(i%10000==0)
{
double *tmp=devU;
devU=devU_new;
devU_new=tmp;
cout<<i<<endl;
err_cuda(cudaMemcpy(u,devU,numberOfBytes,cudaMemcpyDeviceToHost));
if(err())break;
}
}
}
double CalcTimeOfMethod(void (*method)(double),double param)
{
init();
cudaInit();
clock_t start=clock();
method(param);
double time=(double)(clock()-start)/CLOCKS_PER_SEC;
//writeToFile("output2.dat");
cudaFinal();
final();
return time;
}
int main(int argc, char* argv[])
{
double t1=CalcTimeOfMethod(&jacobi,0.25);
double t2=CalcTimeOfMethod(&relax,0.25);
double t3=CalcTimeOfMethod(&relax,0.18);
double t4=CalcTimeOfMethod(&relax,0.33);
cout<<"Time: "<<t1<<endl;
cout<<"Time: "<<t2<<endl;
cout<<"Time: "<<t3<<endl;
cout<<"Time: "<<t4<<endl;
return 0;
} |
22,926 | #include "includes.h"
__global__ void mult2Matrix(float *M, float *N, float *P) {
// Calculate the row index of the P element and M
int Row = blockIdx.y * blockDim.y + threadIdx.y;
// Calculate the column index of P and N
int Col = blockIdx.x * blockDim.x + threadIdx.x;
if ((Row < WIDTH) && (Col < WIDTH)) {
float Pvalue = 0;
// each thread computes one element of the block sub-matrix
for (int k = 0; k < WIDTH; ++k) {
Pvalue += M[Row*WIDTH + k] * N[k*WIDTH + Col];
}
P[Row*WIDTH + Col] = Pvalue;
}
} |
22,927 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define thread_size 128
#include <stdio.h>
#include <math.h>
double N = 400;
const int size = 4000;
// CUDA Kernel for Vector Addition
__global__ void Vector_Addition(const int *dev_a, const int *dev_b, int *dev_c)
{
//Get the id of thread within a block
unsigned int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < size) // check the boundry condition for the threads
dev_c[tid] = dev_a[tid] + dev_b[tid];
}
int main(void)
{
//Host array
int Host_a[size], Host_b[size], Host_c[size];
//Device array
int *dev_a, *dev_b, *dev_c;
int block_size =(int)((N / thread_size)+0.99);
//Allocate the memory on the GPU
cudaMalloc((void **)&dev_a, size*sizeof(int));
cudaMalloc((void **)&dev_b, size*sizeof(int));
cudaMalloc((void **)&dev_c, size*sizeof(int));
//fill the Host array with random elements on the CPU
for (int i = 0; i <size; i++)
{
Host_a[i] = i+2;
Host_b[i] = i*i;
}
//Copy Host array to Device array
cudaMemcpy(dev_a, Host_a, size*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, Host_b, size*sizeof(int), cudaMemcpyHostToDevice);
//Make a call to GPU kernel
Vector_Addition <<< block_size, 128>>> (dev_a, dev_b, dev_c);
//Copy back to Host array from Device array
cudaMemcpy(Host_c, dev_c, size*sizeof(int), cudaMemcpyDeviceToHost);
//Display the result
for (int i = 0; i<size; i++)
printf("%d + %d = %d\n", Host_a[i], Host_b[i], Host_c[i]);
//printf("%d + %d = %d",Host_a[400],Host_b[400],Host_c[400]);
//Free the Device array memory
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
22,928 | #include <stdio.h>
__global__ void helloCuda(void)
{
printf("hello from GPU\n");
}
int main(void)
{
printf("hello from CPU\n");
helloCuda <<< 1, 10 >>> ();
cudaDeviceReset();
return 0;
} |
22,929 | #include "includes.h"
__device__ __host__ int maximum( int a, int b, int c){
int k;
if( a <= b )
k = b;
else
k = a;
if( k <=c )
return(c);
else
return(k);
}
__global__ void upper_left(int *dst, int *input_itemsets, int *reference, int max_rows, int max_cols, int i, int penalty)
{
int r, c;
r = blockIdx.y*blockDim.y+threadIdx.y+1;
c = blockIdx.x*blockDim.x+threadIdx.x+1;
if( r >= i+1 || c >= i+1) return;
if( r == (i - c + 1)) {
int base = r*max_cols+c;
dst[base] = maximum( input_itemsets[base-max_cols-1]+ reference[base],
input_itemsets[base-1] - penalty,
input_itemsets[base-max_cols] - penalty);
}
} |
22,930 |
/*!
* Compute the next power of 2 which occurs after a number.
*
* @param n
*/
__device__
int nextPower2(int n)
{
int pow2 = 2;
while ( pow2 < n )
{
pow2 *= 2;
}
return pow2;
}
/*!
* Swap two values
*
* @param a
* @param b
*/
__device__
void swap(float *a, float *b)
{
float c = *a;
*a = *b;
*b = c;
}
/*!
* Sort an array using bitonic sort. The array should have a size which is a
* power of two.
*
* @param array
* @param size
*/
__device__
void bitonicSort(float *array, int size)
{
int bsize = size / 2;
int dir, a, b, t;
for ( int ob = 2; ob <= size; ob *= 2 )
{
for ( int ib = ob; ib >= 2; ib /= 2 )
{
t = ib/2;
for ( int i = 0; i < bsize; ++i )
{
dir = -((i/(ob/2)) & 0x1);
a = (i/t) * ib + (i%t);
b = a + t;
if ( (!dir && (array[a] > array[b])) || (dir && (array[a] < array[b])) )
{
swap(&array[a], &array[b]);
}
}
}
}
}
/*!
* Sort an array using bitonic sort, while also applying the same swap operations
* to a second array of the same size. The arrays should have a size which is a
* power of two.
*
* @param size
* @param array
* @param extra
*/
__device__
void bitonicSortFF(int size, float *array, float *extra)
{
int bsize = size / 2;
int dir, a, b, t;
for ( int ob = 2; ob <= size; ob *= 2 )
{
for ( int ib = ob; ib >= 2; ib /= 2 )
{
t = ib/2;
for ( int i = 0; i < bsize; ++i )
{
dir = -((i/(ob/2)) & 0x1);
a = (i/t) * ib + (i%t);
b = a + t;
if ( (!dir && (array[a] > array[b])) || (dir && (array[a] < array[b])) )
{
swap(&array[a], &array[b]);
swap(&extra[a], &extra[b]);
}
}
}
}
}
/*!
* Compute the rank of a sorted vector in place. In the event of ties,
* the ranks are corrected using fractional ranking.
*
* @param array
* @param n
*/
__device__
void computeRank(float *array, int n)
{
int i = 0;
while ( i < n - 1 )
{
float a_i = array[i];
if ( a_i == array[i + 1] )
{
int j = i + 2;
int k;
float rank = 0;
// we have detected a tie, find number of equal elements
while ( j < n && a_i == array[j] )
{
++j;
}
// compute rank
for ( k = i; k < j; ++k )
{
rank += k;
}
// divide by number of ties
rank /= (float) (j - i);
for ( k = i; k < j; ++k )
{
array[k] = rank;
}
i = j;
}
else
{
// no tie - set rank to natural ordered position
array[i] = i;
++i;
}
}
if ( i == n - 1 )
{
array[n - 1] = (float) (n - 1);
}
}
|
22,931 | #include "includes.h"
extern "C" {
}
#define IDX2C(i, j, ld) ((j)*(ld)+(i))
#define SQR(x) ((x)*(x)) // x^2
__global__ void weighting_kernel (double const* matrices, double const* weights, double* results) {
int matrix_grid_index = blockIdx.x * blockDim.x * blockDim.y;
int block_index = blockDim.y * threadIdx.x + threadIdx.y;
int matrix_index = matrix_grid_index + block_index;
int weight_index = blockIdx.x * blockDim.y + threadIdx.y;
results[matrix_index] = matrices[block_index] * weights[weight_index];
} |
22,932 | #include "includes.h"
__global__ void ElementWiseMultiply_CUDA(double *C, double *A, double *B, int rows, int cols)
{
int j = blockDim.x * blockIdx.x + threadIdx.x;
int i = blockDim.y * blockIdx.y + threadIdx.y;
int sourceLength = cols * rows;
int sourceIndex = i + (j * blockDim.y);
int targetIndex = i + (j * blockDim.y);
if ((sourceIndex <= sourceLength - 1) & (targetIndex < rows))
{
//if (i == 0 & j == 0)
//{
// printf("ElementWiseMultiply_CUDA, matrix A:\r\n");
// printMatrix_CUDA << <1, 1 >> > (A, dimA);
// printf("ElementWiseMultiply_CUDA, matrix B:\r\n");
// printMatrix_CUDA << <1, 1 >> > (B, dimB);
//}
//int idx = i + (j * dimC.y);
double a = A[sourceIndex];
double b = B[sourceIndex];
C[targetIndex] = a * b;
//printf("i=%i, j=%i idx=%i | %i = %i * %i\r\n", i, j, idx, C[idx], a, b);
}
} |
22,933 | #include <cuda_runtime.h>
#include<iostream>
using namespace std;
#include <device_launch_parameters.h>
#define N 5
__global__ void add(int* a, int* b, int* c)
{
int id = threadIdx.x;
if (id < N)
{
c[id] = b[id] + a[id];
}
}
int main(void) {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void**)&dev_a, sizeof(int) * N);
cudaMalloc((void**)&dev_b, sizeof(int) * N);
cudaMalloc((void**)&dev_c, sizeof(int) * N);
for (int i = 0; i < N; i++)
{
a[i] = -i;
b[i] = i * i;
}
cudaMemcpy(dev_a, a, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, c, sizeof(int) * N, cudaMemcpyHostToDevice);
//Invoke device, N within <<<>>> defines N threads are allocated to run in parallel within one block
add <<<1, N>>> (dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, sizeof(int) * N, cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
{
cout << "Element at " << i << " is: " << c[i] << endl;
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
}
|
22,934 | /**
*
* This is a cuda version of the array addition program as created from the
* tutorial from here:
*
* https://devblogs.nvidia.com/even-easier-introduction-cuda/
*
* Any adjustments made are made from suggestions from Programming Massively
* Parallel Processors, 3rd Edition:
*
* https://www.amazon.com/Programming-Massively-Parallel-Processors-Hands/dp/0128119861/ref=dp_ob_title_bk
*
* */
#include <cuda.h>
#include <iostream>
#include <math.h>
// Signifies a kernel function
__global__ // Runs on device code
void add(int n, float *x, float *y)
{
// Get the index for the thread that
// is running this kernel
int currentThread = threadIdx.x;
// Get the size of a block
int blockSize = blockDim.x;
// Loop over each thread. You start with
// the current thread, and then jump
// by the size of a block. This means
// that each thread handles 1 / blockSize
// of the data.
for (int i = currentThread; i < n; i += blockSize)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 100<<20; // 100M elements
float *x, *y;
// Allocate memory using Unified memory.
// Accessible either from the device or host.
cudaMallocManaged(&x, N * sizeof(float));
cudaMallocManaged(&y, N * sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on N elements on the CPU
add<<<1, 256>>>(N, x, y);
// Wait for the GPU to finish before accessing the host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free the memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
22,935 | #include "includes.h"
__device__ int translate_idx_inv(int ii, int d1, int d2, int d3, int d4, int scale_factor_t, int scale_factor_xy, int off_time, int off_x, int off_y)
{
/* d1 = channel
d2 = time
d3, d4 = height, width
*/
int x, y, t, z, w;
w = ii % d4;
ii = ii/d4;
z = ii % d3;
ii = ii/d3;
t = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
t = t*scale_factor_t+off_time;
w = w*scale_factor_xy+off_x;
z = z*scale_factor_xy+off_y;
d2 *= scale_factor_t;
d3 *= scale_factor_xy;
d4 *= scale_factor_xy;
return (((((x*d1+y)*d2)+t)*d3)+z)*d4+w;
}
__device__ int translate_idx(int ii, int d1, int d2, int d3, int d4, int scale_factor_t, int scale_factor_xy)
{
int x, y, t, z, w;
w = ii % d4;
ii = ii/d4;
z = ii % d3;
ii = ii/d3;
t = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w/scale_factor_xy;
z = z/scale_factor_xy;
t = t/scale_factor_t;
d2 /= scale_factor_t;
d3 /= scale_factor_xy;
d4 /= scale_factor_xy;
return (((((x*d1+y)*d2)+t)*d3)+z)*d4+w;
}
__global__ void downscale(float *gradInput_data, float *gradOutput_data, long no_elements, int scale_factor_t, int scale_factor_xy, int d1, int d2, int d3, int d4)
{
// output offset:
long ii = threadIdx.x + blockDim.x * blockIdx.x;
ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y;
if (ii >= no_elements) return;
for (int i=0; i < scale_factor_t; i++){
for(int j=0; j < scale_factor_xy; j++){
for(int k=0; k < scale_factor_xy; k++){
int ipidx = translate_idx_inv(ii, d1, d2, d3, d4, scale_factor_t, scale_factor_xy, i, j, k);
gradInput_data[ii] += gradOutput_data[ipidx];
}
}
}
} |
22,936 | #include <stdio.h>
#include <math.h>
#include <cuda.h>
__host__ void checkCudaState(cudaError_t& cudaState,const char *message){
/* it will print an error message if there is */
if(cudaState != cudaSuccess) printf("%s",message);
}
__device__ void swap(int *points,uint lowIndex,uint upIndex){
/* it will swap two points */
int aux = points[lowIndex];
points[lowIndex] = points[upIndex];
points[upIndex] = aux;
}
__global__ void sort(int *points,uint phase,uint n){
/* it will sort with points array with respect to phase*/
uint ti = blockIdx.x*blockDim.x+threadIdx.x;
if(ti >= n || ti == 0) return;
if(ti%phase == 0){ // multiplier phase
uint top = ti, lower = (top - phase) + 1;
uint middle = lower + phase/2;
uint lowG1 = lower, lowG2 = middle, topG1 = middle-1, topG2 = top;
while(true){
if(lowG1 > topG1 && lowG2 > topG2) break;
// --------------------- case 1 ---------------------
if(lowG1 <= topG1 && lowG2 <= topG2){
if(points[lowG1] > points[lowG2]){
swap(points,lowG1,lowG2);
lowG2++;
}
else lowG1++;
}
// --------------------- case 2 ---------------------
else if(lowG1 < topG1 && lowG2 > topG2){
uint next = lowG1 + 1;
if(points[lowG1] > points[next])
swap(points,lowG1,next);
lowG1++;
}
// --------------------- case 3 ---------------------
else if(lowG2 < topG2 && lowG1 > topG1){
uint next = lowG2 + 1;
if(points[lowG2] > points[next])
swap(points,lowG2,next);
lowG2++;
}
else if(lowG1 == topG1)
lowG1++;
else if(lowG2 == topG2)
lowG2++;
}
}
}
__host__ void fill(int *points,size_t n){
/* it will fill points array */
for(size_t i=0; i<n; i++)
points[i] = n-i;
}
__host__ void show(int* points,size_t n){
/* it will show points array */
for(size_t i=0; i<n; i++)
printf("%d ",points[i]);
printf("\n\n");
}
int main(int argc, char const *argv[]) {
size_t items = 2049;
size_t size = items*sizeof(int);
cudaError_t cudaState = cudaSuccess;
int *h_points = NULL, *d_points = NULL, *h_result = NULL;
h_points = (int*)malloc(size);
h_result = (int*)malloc(size);
fill(h_points,items);
cudaState = cudaMalloc((void**)&d_points,size);
checkCudaState(cudaState,"Impossible allocate data\n");
if(d_points != NULL){
cudaState = cudaMemcpy(d_points,h_points,size,cudaMemcpyHostToDevice);
checkCudaState(cudaState,"Impossible copy data from host to device\n");
show(h_points,items);
dim3 blockSize(1024,1,1);
dim3 gridSize((int)(ceil(items/1024.0)),1,1);
uint i = 1;
while(pow(2,i) <= items){
sort<<<gridSize,blockSize>>>(d_points,pow(2,i),items);
cudaDeviceSynchronize();
i++;
}
cudaState = cudaMemcpy(h_result,d_points,size,cudaMemcpyDeviceToHost);
checkCudaState(cudaState,"Impossible copy data from device to host\n");
show(h_result,items);
}
if(h_points != NULL) free(h_points);
if(h_result != NULL) free(h_result);
if(d_points != NULL) cudaFree(d_points);
return 0;
}
|
22,937 | #include "includes.h"
__global__ void cuInsertionSort(float *dist, int dist_pitch, int *ind, int ind_pitch, int width, int height, int k){
// Variables
int l, i, j;
float *p_dist;
int *p_ind;
float curr_dist, max_dist;
int curr_row, max_row;
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex<width){
// Pointer shift, initialization, and max value
p_dist = dist + xIndex;
p_ind = ind + xIndex;
max_dist = p_dist[0];
p_ind[0] = 1;
// Part 1 : sort kth firt elementZ
for (l = 1; l<k; l++){
curr_row = l * dist_pitch;
curr_dist = p_dist[curr_row];
if (curr_dist<max_dist){
i = l - 1;
for (int a = 0; a<l - 1; a++){
if (p_dist[a*dist_pitch]>curr_dist){
i = a;
break;
}
}
for (j = l; j>i; j--){
p_dist[j*dist_pitch] = p_dist[(j - 1)*dist_pitch];
p_ind[j*ind_pitch] = p_ind[(j - 1)*ind_pitch];
}
p_dist[i*dist_pitch] = curr_dist;
p_ind[i*ind_pitch] = l + 1;
}
else
p_ind[l*ind_pitch] = l + 1;
max_dist = p_dist[curr_row];
}
// Part 2 : insert element in the k-th first lines
max_row = (k - 1)*dist_pitch;
for (l = k; l<height; l++){
curr_dist = p_dist[l*dist_pitch];
if (curr_dist<max_dist){
i = k - 1;
for (int a = 0; a<k - 1; a++){
if (p_dist[a*dist_pitch]>curr_dist){
i = a;
break;
}
}
for (j = k - 1; j>i; j--){
p_dist[j*dist_pitch] = p_dist[(j - 1)*dist_pitch];
p_ind[j*ind_pitch] = p_ind[(j - 1)*ind_pitch];
}
p_dist[i*dist_pitch] = curr_dist;
p_ind[i*ind_pitch] = l + 1;
max_dist = p_dist[max_row];
}
}
}
} |
22,938 | // Huang Tianwei 20026141 twhuang@connect.ust.hk
#include <iostream>
#include <cstdio>
#include <cmath>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cstdlib>
#include <ctime>
#include <thrust/scan.h>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/device_vector.h>
using namespace std;
const int numBits = 6;
const int totalBits = 19;
const int numPart = 1 << numBits;
const int numPerPart = 1 << (totalBits - numBits);
const int mask = (1 << numBits) - 1;
const int numThreads = 128;
const int numBlocks = 512;
const int Hist_len = numThreads * numBlocks * numPart;
/*
return the partition ID of the input element
*/
__device__
int getPartID(int element)
{
element >>= (totalBits - numBits);
return element & mask;
}
/*
input: d_key[], array size N
output: d_pixArray[]
funciton: for input array d_key[] with size N, return the partition ID array d_pixArray[]
*/
__global__
void mapPart(int d_key[],int d_pidArray[],int N)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadNumber = blockDim.x * gridDim.x;
while(tid < N)
{
d_pidArray[tid] = getPartID(d_key[tid]);
tid += threadNumber;
}
}
/*
input: d_pidArray[], array size N
output: d_Hist[]
function: calculate the histogram d_Hist[] based on the partition ID array d_pidArray[]
*/
__global__
void count_Hist(int d_Hist[],int d_pidArray[],int N)
{
__shared__ int s_Hist[numThreads * numPart];
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
int threadNumber = blockDim.x * gridDim.x;
int offset = threadIdx.x * numPart;
for(int i = 0; i < numPart; ++i)
s_Hist[i + offset] = 0;
for(int i = threadId; i < N; i += threadNumber)
s_Hist[offset + d_pidArray[i]]++;
for(int i = 0; i < numPart; ++i)
d_Hist[i * threadNumber + threadId] = s_Hist[offset + i];
__syncthreads();
}
/*
input: d_pidArray[] (partition ID array), d_psSum[] (prefix sum of histogram), array size N
output: d_loc[] (location array)
function: for each element, calculate its corresponding location in the result array based on its partition ID and prefix sum of histogram
*/
__global__
void write_Hist(int d_pidArray[],int d_psSum[],int d_loc[],int N)
{
__shared__ int s_psSum[numThreads * numPart];
int threadId = threadIdx.x + blockIdx.x * blockDim.x;
int threadNumber = gridDim.x * blockDim.x;
int offset = threadIdx.x * numPart;
for(int i = 0; i < numPart; ++i)
s_psSum[i + offset] = d_psSum[threadId + i * threadNumber];
for(int i = threadId; i < N; i += threadNumber)
{
int pid = d_pidArray[i];
d_loc[i] = s_psSum[pid + offset];
s_psSum[pid + offset]++;
}
}
/*
input: d_psSum[] (prefix sum of histogram), array size N
output: start position of each partition
function: for each partition (chunck to be loaded in the join step), calculate its start position in the result array (the first element's position of this partition)
*/
__global__
void getStartPos(int d_psSum[],int d_startPos[],int N)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int threadNumber = gridDim.x * blockDim.x;
if(tid >= numPart)
return;
d_startPos[tid] = d_psSum[tid * threadNumber];
}
/*
input: d_key[],d_value[],d_loc[],array size []
output: out_key[],out_value[]
function: rewrite the (key,value) pair to its corresponding position based on location array d_loc[]
*/
__global__
void scatter(int d_key[],float d_value[],int out_key[],float out_value[],int d_loc[],int N)
{
int threadId = threadIdx.x + blockIdx.x * blockDim.x;
int threadNumber = blockDim.x * gridDim.x;
while(threadId < N)
{
out_key[d_loc[threadId]] = d_key[threadId];
out_value[d_loc[threadId]] = d_value[threadId];
threadId += threadNumber;
}
}
/*
function: split the (key,value) array with size N, record the start position of each partition at the same time
*/
void split(int *d_key,float *d_value,int *d_startPos,int N)
{
/* add your code here */
int *d_pidArray, *d_loc, *d_Hist, *d_psSum;
int *d_keyResult;
float *d_valueResult;
cudaMalloc(&d_keyResult, sizeof(int) * N);
cudaMalloc(&d_valueResult, sizeof(float) * N);
cudaMalloc(&d_pidArray,sizeof(int) * N);
cudaMalloc(&d_loc,sizeof(int) * N);
cudaMalloc(&d_Hist,sizeof(int) * Hist_len);
cudaMalloc(&d_psSum,sizeof(int) * Hist_len);
mapPart<<<numBlocks, numThreads>>>(d_key, d_pidArray, N);
count_Hist<<<numBlocks, numThreads>>>(d_Hist, d_pidArray, N);
thrust::device_ptr<int> dev_Hist(d_Hist);
thrust::device_ptr<int> dev_psSum(d_psSum);
thrust::exclusive_scan(dev_Hist, dev_Hist + Hist_len, dev_psSum);
getStartPos<<<numBlocks, numThreads>>>(d_psSum, d_startPos, N);
write_Hist<<<numBlocks, numThreads>>>(d_pidArray, d_psSum, d_loc, N);
scatter<<<numBlocks, numThreads>>>(d_key, d_value, d_keyResult, d_valueResult, d_loc, N);
//copy the result from d_keyResult/d_valueResult back to d_key/d_value
cudaMemcpy(d_key, d_keyResult, sizeof(int) * N, cudaMemcpyDeviceToDevice);
cudaMemcpy(d_value, d_valueResult, sizeof(float) * N, cudaMemcpyDeviceToDevice);
cudaFree(d_psSum);
cudaFree(d_Hist);
cudaFree(d_pidArray);
cudaFree(d_loc);
cudaFree(d_valueResult);
cudaFree(d_keyResult);
//cudaCheckError();
}
/*
function: perform hash join on two (key,value) arrays
*/
__global__
void join(int d_key1[],float d_value1[],int d_key2[],float d_value2[],int d_startPos1[],int d_startPos2[],int d_result[],int N1,int N2)
{
/* add your code here */
int partID = blockIdx.x;
int numThreadsInBlock = blockDim.x;
int tID = threadIdx.x;
int startPos1 = d_startPos1[partID];
int endPos1 = partID==numPart?N1:d_startPos1[partID + 1];
int startPos2 = d_startPos2[partID];
int endPos2 = partID==numPart?N2:d_startPos2[partID + 1];
//bool found = false;
int useful_length = endPos2 - startPos2;
__shared__ int inner[numPerPart];
if ( tID + startPos2 < endPos2) {
for (int i = tID; i + startPos2 < endPos2; i += numThreadsInBlock) {
inner[i] = d_key2[i + startPos2];
}
}
for (int i = tID; i + startPos1 < endPos1; i += numThreadsInBlock ) {
int targetKey = d_key1[i + startPos1];
d_result[i + startPos1] = -1;
for (int j = 0; j < useful_length; j ++) {
if (inner[j] == targetKey) {
d_result[i + startPos1] = j + startPos2;
break;
}
}
}
}
void hashJoin(int *d_key1,float *d_value1,int *d_key2,float *d_value2,int N1,int N2,int *d_result)
{
int *d_startPos1,*d_startPos2;
cudaMalloc(&d_startPos1,sizeof(int) * numPart);
cudaMalloc(&d_startPos2,sizeof(int) * numPart);
split(d_key1,d_value1,d_startPos1,N1);
split(d_key2,d_value2,d_startPos2,N2);
dim3 grid(numPart);
dim3 block(1024);
join<<<grid,block>>>(d_key1,d_value1,d_key2,d_value2,d_startPos1,d_startPos2,d_result,N1,N2);
}
int main()
{
freopen("in.txt","r",stdin);
int *h_key1, *h_key2, *d_key1, *d_key2;
float *h_value1, *h_value2, *d_value1, *d_value2;
int *h_result, *d_result;
int N1,N2;
scanf("%d%d",&N1,&N2);
h_key1 = (int*)malloc(N1 * sizeof(int));
h_key2 = (int*)malloc(N2 * sizeof(int));
h_value1 = (float*)malloc(N1 * sizeof(float));
h_value2 = (float*)malloc(N2 * sizeof(float));
h_result = (int*)malloc(N1 * sizeof(int));
cudaMalloc(&d_key1, N1 * sizeof(int));
cudaMalloc(&d_key2, N2 * sizeof(int));
cudaMalloc(&d_value1, N1 * sizeof(float));
cudaMalloc(&d_value2, N2 * sizeof(float));
cudaMalloc(&d_result, N1 * sizeof(int));
for(int i = 0; i < N1; ++i)
scanf("%d%f",&h_key1[i],&h_value1[i]);
for(int i = 0; i < N2; ++i)
scanf("%d%f",&h_key2[i],&h_value2[i]);
memset(h_result,-1,sizeof(int) * N1);
cudaMemcpy(d_key1,h_key1, sizeof(int) * N1, cudaMemcpyHostToDevice);
cudaMemcpy(d_result,h_result, sizeof(int) * N1, cudaMemcpyHostToDevice);
cudaMemcpy(d_key2,h_key2, sizeof(int) * N2, cudaMemcpyHostToDevice);
cudaMemcpy(d_value1,h_value1, sizeof(float) * N1, cudaMemcpyHostToDevice);
cudaMemcpy(d_value2,h_value2, sizeof(float) * N2, cudaMemcpyHostToDevice);
hashJoin(d_key1,d_value1,d_key2,d_value2,N1,N2,d_result);
cudaMemcpy(h_result,d_result,sizeof(int) * N1, cudaMemcpyDeviceToHost);
cudaMemcpy(h_key1,d_key1,sizeof(int) * N1, cudaMemcpyDeviceToHost);
cudaMemcpy(h_key2,d_key2,sizeof(int) * N2, cudaMemcpyDeviceToHost);
cudaMemcpy(h_value1,d_value1,sizeof(float) * N1, cudaMemcpyDeviceToHost);
cudaMemcpy(h_value2,d_value2,sizeof(float) * N2, cudaMemcpyDeviceToHost);
int matched = 0;
freopen("out.txt","w",stdout);
for(int i = 0;i < N1; ++i)
{
if(h_result[i] == -1)
continue;
matched++;
printf("Key %d\nValue1 %.2f Value2 %.2f\n\n",h_key1[i],h_value1[i],h_value2[h_result[i]]);
}
printf("Matched %d\n",matched);
fclose(stdout);
freopen("/dev/tty","w",stdout);
free(h_key1);
free(h_key2);
free(h_value1);
free(h_value2);
free(h_result);
cudaFree(d_key1);
cudaFree(d_key2);
cudaFree(d_value1);
cudaFree(d_value2);
cudaFree(d_result);
cudaDeviceReset();
return 0;
}
|
22,939 | __global__ void assignTID(int *data)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
data[tid] = tid;
} |
22,940 | /**
* File : gpu_conv.cu
* Author : Xianglan Piao <lanxlpiao@gmail.com>
* Date : 2020.06.16
* Last Modified Date: 2020.07.31
* Last Modified By : Xianglan Piao <lanxlpiao@gmail.com>
* NOTE: : cuda conv2d
*/
#include <iostream>
#define ifm_size 8
#define wgt_size 5
#define stride 2
#define padding 2 // For same ifm/ofm size padding = wgt_size / 2
#define ofm_size ((ifm_size + 2 * padding - wgt_size) / stride + 1)
//// constant memory
__constant__ float wgt[wgt_size];
//// global memory
__global__ void cuda_conv1d_naive(float* ifm, float* ofm) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
float temp = 0.0f;
for (int k = 0; k < wgt_size; k++) {
int col_offset = col * stride - padding + k;
if (col_offset >= 0 && col_offset < ifm_size) {
temp += ifm[col_offset] * wgt[k];
}
}
ofm[col] = temp;
}
//// shared memory
#define BLOCK_size 4
__global__ void cuda_conv1d_shared(float* ifm, float* ofm) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float shared_block[BLOCK_size];
float temp = 0.0f;
for (int k = 0; k < wgt_size; k++) {
int col_offset = col * stride - padding + k;
shared_block[threadIdx.x] = ifm[col_offset];
__syncthreads();
if (col_offset >= 0 && col_offset < ifm_size) {
temp += shared_block[threadIdx.x] * wgt[k];
}
}
ofm[col] = temp;
}
void initData(float* data, int size) {
for (int i = 0; i < size; i++) {
data[i] = i + 1;
}
}
void print(float* data, dim3 dim) {
for (int x = 0; x < dim.x; x++) {
if (dim.y == 0) {
std::cout << data[x] << ", ";
} else {
for (int y = 0; y < dim.y; y++) {
std::cout << data[x * dim.x + y] << ", ";
}
std::cout << std::endl;
}
}
std::cout << std::endl;
}
//// constant memory
__constant__ float wgt2d[wgt_size * wgt_size];
//// global memory
__global__ void cuda_conv2d_naive(float* ifm, float* ofm) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
float temp = 0.0f;
for (int m = 0; m < wgt_size; m++) {
for (int n = 0; n < wgt_size; n++) {
int col_offset = col * stride - padding + m;
int row_offset = row * stride - padding + n;
if ((col_offset >= 0 && col_offset < ifm_size) &&
(row_offset >= 0 && row_offset < ifm_size)) {
temp +=
ifm[row_offset * ifm_size + col_offset] * wgt2d[m * wgt_size + n];
}
}
ofm[row * ofm_size + col] = temp;
}
}
void test() {
dim3 block(0);
dim3 grid(0);
float* h_ifm = NULL;
float* d_ifm = NULL;
float* d_ofm = NULL;
float* h_ofm = NULL;
//// 1D convolution
std::cout << "\n--- 1D convolution ---\n" << std::endl;
std::cout << "ifm: " << std::endl;
h_ifm = (float*)malloc(ifm_size * sizeof(float));
initData(h_ifm, ifm_size);
print(h_ifm, dim3(ifm_size, 0, 0));
std::cout << std::endl;
cudaMalloc((void**)&d_ifm, ifm_size * sizeof(float));
cudaMemcpy(d_ifm, h_ifm, ifm_size * sizeof(float), cudaMemcpyHostToDevice);
float h_wgt[wgt_size] = {1, 2, 4, 2, 1};
std::cout << "wgt: " << std::endl;
print(h_wgt, dim3(wgt_size, 0, 0));
std::cout << std::endl;
cudaMemcpyToSymbol(wgt, &h_wgt, sizeof(wgt));
block.x = BLOCK_size;
grid.x = ofm_size / block.x;
cudaMalloc((void**)&d_ofm, ofm_size * sizeof(float));
h_ofm = (float*)calloc(ofm_size, sizeof(float));
//// using global memory
cudaMemset(d_ofm, 0, ofm_size * sizeof(float));
cuda_conv1d_naive<<<grid, block>>>(d_ifm, d_ofm);
cudaDeviceSynchronize();
memset(h_ofm, 0, ofm_size);
cudaMemcpy(h_ofm, d_ofm, ofm_size * sizeof(float), cudaMemcpyDeviceToHost);
std::cout << "ofm: " << std::endl;
print(h_ofm, dim3(ofm_size, 0, 0));
std::cout << std::endl;
//// using shared memory
cudaMemset(d_ofm, 0, ofm_size * sizeof(float));
cuda_conv1d_shared<<<grid, block>>>(d_ifm, d_ofm);
cudaDeviceSynchronize();
memset(h_ofm, 0, ofm_size);
cudaMemcpy(h_ofm, d_ofm, ofm_size * sizeof(float), cudaMemcpyDeviceToHost);
std::cout << "ofm: " << std::endl;
print(h_ofm, dim3(ofm_size, 0, 0));
std::cout << std::endl;
//// free
cudaFree(d_ifm);
cudaFree(d_ofm);
free(h_ifm);
free(h_ofm);
//// 2D convolution
std::cout << "\n--- 2D convolution ---\n" << std::endl;
h_ifm = (float*)calloc(ifm_size * ifm_size, sizeof(float));
initData(h_ifm, ifm_size * ifm_size);
std::cout << "ifm: " << std::endl;
print(h_ifm, dim3(ifm_size, ifm_size, 0));
std::cout << std::endl;
cudaMalloc((void**)&d_ifm, ifm_size * ifm_size * sizeof(float));
cudaMemcpy(d_ifm,
h_ifm,
ifm_size * ifm_size * sizeof(float),
cudaMemcpyHostToDevice);
float h_wgt2d[wgt_size * wgt_size] = {1, 1, 1, 1, 1, //
1, 2, 2, 2, 1, //
1, 2, 4, 2, 1, //
1, 2, 2, 2, 1, //
1, 1, 1, 1, 1}; //
std::cout << "wgt: " << std::endl;
print(h_wgt2d, dim3(wgt_size, wgt_size, 0));
std::cout << std::endl;
cudaMemcpyToSymbol(wgt2d, &h_wgt2d, sizeof(wgt2d));
block.x = BLOCK_size;
block.y = BLOCK_size;
grid.x = ofm_size / block.x;
grid.y = ofm_size / block.y;
cudaMalloc((void**)&d_ofm, ofm_size * ofm_size * sizeof(float));
h_ofm = (float*)calloc(ofm_size * ofm_size, sizeof(float));
cudaMemset(d_ofm, 0, ofm_size * ofm_size * sizeof(float));
cuda_conv2d_naive<<<grid, block>>>(d_ifm, d_ofm);
cudaDeviceSynchronize();
memset(h_ofm, 0, ofm_size * ofm_size);
cudaMemcpy(h_ofm,
d_ofm,
ofm_size * ofm_size * sizeof(float),
cudaMemcpyDeviceToHost);
std::cout << "ofm: " << std::endl;
print(h_ofm, dim3(ofm_size, ofm_size, 0));
std::cout << std::endl;
//// free
cudaFree(d_ifm);
cudaFree(d_ofm);
free(h_ifm);
free(h_ofm);
//// reset
cudaDeviceReset();
}
int main(void) {
test();
return 0;
}
|
22,941 |
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <sys/types.h>
struct timeval startTime, stopTime;
int started = 0;
void start_timer() {
started = 1;
gettimeofday(&startTime, NULL);
}
double stop_timer() {
long seconds, useconds;
double duration = -1;
if (started) {
gettimeofday(&stopTime, NULL);
seconds = stopTime.tv_sec - startTime.tv_sec;
useconds = stopTime.tv_usec - startTime.tv_usec;
duration = (seconds * 1000.0) + (useconds / 1000.0);
started = 0;
}
return duration;
}
void printprimes(int limit, int *arr) {
int c;
//#pragma omp parallel for shared(arr, limit) private(c)
for(c = 2; c <limit; c++) {
if(arr[c] == 0) {
fprintf(stdout,"%d ", c);
}
}
fprintf(stdout,"\n");
/* code */
}
__global__ void init(int *arr, int sqroot, int limit) {
int c;
for(c = 2; c <= sqroot; c++) {
if(arr[c] == 0) {
/*
#pragma omp parallel for shared(arr, limit, c) private(m)
for(m = c+1; m < limit; m++) {
if(m%c == 0) {
arr[m] = 1;
}
}
*/
int tid = c+1+ threadIdx.x + (blockIdx.x * blockDim.x);
if (tid<limit){
if (tid % c ==0) {
arr[tid] = 1;
}
}
}
}
}
double getThreadAndInfo(cudaDeviceProp devProp)
{
/*
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %lu\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %lu\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %lu\n", devProp.memPitch);
printf("Maximum threads per block: %i\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %lu\n", devProp.totalConstMem);
printf("Texture alignment: %lu\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
*/
return devProp.maxThreadsPerBlock;
}
int main(int argc, char **argv) {
// Number of CUDA devices
int threads=1000000;
int devCount;
cudaGetDeviceCount(&devCount);
//printf("CUDA Device Query...\n");
//printf("There are %d CUDA devices.\n", devCount);
// Iterate through devices
for (int i = 0; i < devCount; ++i)
{
// Get device properties
//printf("\nCUDA Device #%d\n", i);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
if (getThreadAndInfo(devProp)<threads){
threads=getThreadAndInfo(devProp);
}
}
int N=10;
int limit ;
if (argc>3){
fprintf(stderr, "Error: uso: %s [limite_superior_positivo]\n", argv[0]);
return -1;
}else if (argc==2 || argc==3) {
int parsed=atoi(argv[1]);
if (parsed<0){
fprintf(stderr, "Error: uso: %s [limite_superior_positivo]\n", argv[0]);
return -1;
}else{
limit=parsed;
}
if (argc==3) {
N=1;
}
}else {
limit=16;
}
int *arr;
double ms;
ms = 0;
int i;
int *p_array;
for (i = 0; i < N; i++) {
start_timer();
//->
int sqroot = (int)sqrt(limit);
arr = (int*)malloc(limit * sizeof(int));
cudaMalloc((void**) &p_array, limit * sizeof(int));
cudaMemset(p_array, 0, limit*sizeof(int));
if (limit<=threads){
threads=limit;
init<<<1, threads>>>(p_array, sqroot, limit);
}else{
init<<<int(limit/threads)+1, threads>>>(p_array, sqroot, limit);
}
cudaMemcpy(arr, p_array, limit * sizeof(int), cudaMemcpyDeviceToHost);
//->
ms += stop_timer();
}
if (argc==2){
printf("times %i - avg time = %.5lf ms, %i threads\n",N,(ms / N), threads);
}
//
printprimes(limit, arr);
free(arr);
cudaFree(p_array);
return 0;
}
|
22,942 | #include <iostream>
using namespace std;
__host__ __device__
void swap(int & a, int &b){
a=a^b;
b=a^b;
a=b^a;
}
// Scan, limited to 1 block, upto 1024 threads;
__global__
void scan(unsigned int *g_data, unsigned int * g_intermediate, int n, int flag) {
// flag =0 inclusive; flag =1 Exclusive
extern __shared__ unsigned int temp[]; // allocated on invocation
int gid=threadIdx.x + blockIdx.x*blockDim.x;
int Ndim=blockDim.x;
int thid = threadIdx.x;
int ln= (blockDim.x*(blockIdx.x+1)>n)? (n - blockDim.x*blockIdx.x) : blockDim.x;
int pin=0,pout=1;
unsigned int data_end;
// if(threadIdx.x==0) printf("in Scan %d %d\n", d_bins[0],d_bins[1]);
// Load input into shared memory.
// This is exclusive scan, so shift right by one
// and set first element to 0
if (gid>=n)
return;
// temp[thid+pin*Ndim]=g_idata[gid]; // Inclusive
if (flag ==0){
temp[thid+pout*Ndim]=g_data[gid]; // Inclusive
}else{
temp[thid+pout*Ndim] = (thid > 0) ? g_data[gid-1] : 0; // Exclusive
data_end=g_data[gid];
}
// if(threadIdx.x==0) printf("in Scan %d %d\n", d_bins[0],d_bins[1]);
__syncthreads();
// printf("%d\n",thid);
for (int offset = 1; offset < ln; offset *= 2)
{
swap(pin,pout);
if (thid >= offset)
temp[pout*Ndim+thid] = temp[pin*Ndim+thid]+ temp[pin*Ndim+thid - offset];
else
temp[pout*Ndim+thid] = temp[pin*Ndim+thid];
__syncthreads();
}
g_data[gid] = temp[pout*Ndim+thid]; // write output
if(thid==ln-1){
if(flag == 0){
g_intermediate[blockIdx.x]=temp[pout*Ndim+thid];
} else{
g_intermediate[blockIdx.x]=temp[pout*Ndim+thid]+data_end; // Exclusive
}
}
}
__global__
void scan_extra(unsigned int *g_io, unsigned int * g_intermediate, int n){
int gid=threadIdx.x + blockIdx.x*blockDim.x;
int interid=blockIdx.x;
// int thid = threadIdx.x;
if(gid<n)
g_io[gid] +=g_intermediate[interid];
}
void scan_large(unsigned int * d_in,const int N){
unsigned int * d_intermediate;
int Nthread=1024;
int Nblock=(N+Nthread-1)/Nthread;
int Nblock_s=Nblock;
int flag =1; // 0 inclusive; flag =1 Exclusive
// h_intermediate=(unsigned int *) malloc(Nblock*sizeof(unsigned int));
cudaMalloc(&d_intermediate,Nblock*sizeof(unsigned int));
scan<<<Nblock,Nthread,2*Nthread*sizeof(unsigned int)>>>(d_in,d_intermediate,N, flag);
Nthread=Nblock;
Nblock=1;
flag =1; // 0 inclusive; flag =1 Exclusive
unsigned int * d_junk;
cudaMalloc(&d_junk,Nblock*sizeof(unsigned int));
scan<<<Nblock,Nthread,2*Nthread*sizeof(unsigned int)>>>(d_intermediate,d_junk,Nthread,flag);
Nthread=1024;
Nblock=(N+Nthread-1)/Nthread;
scan_extra<<<Nblock,Nthread>>>(d_in,d_intermediate,N);
cudaFree(d_intermediate); cudaFree(d_junk);
}
int main(){
const int N=10000;
unsigned int sizeN=N*sizeof(unsigned int);
unsigned int *h_input=new unsigned int[N];
unsigned int *h_cdf=new unsigned int[N]();
for(int i=0;i<N;++i){
h_input[i]=1;
}
unsigned int * d_input, *d_cdf;
cudaMalloc(&d_input, sizeN);
cudaMalloc(&d_cdf, sizeN);
cudaMemcpy(d_input,h_input,sizeN,cudaMemcpyHostToDevice);
cudaMemcpy(d_cdf,d_input,sizeN,cudaMemcpyDeviceToDevice);
// scan_small(d_cdf,d_input,N);
scan_large(d_cdf, N);
cudaMemcpy(h_cdf,d_cdf,sizeN,cudaMemcpyDeviceToHost);
unsigned int acc=0;
for(int i=0;i<N;++i){
printf("%u ", acc);
acc += h_input[i];
}
printf("\n");
for(int i=0;i<N;++i){
printf("%u ", h_cdf[i]);
}
cudaFree(d_input); cudaFree(d_cdf);
delete[] h_input; delete[] h_cdf;
} |
22,943 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define SEC_AS_NANO 1000000000.0
struct _matriz
{
int n;
int m;
int **cont;
}; typedef struct _matriz Matriz;
Matriz *criarMatriz(int n, int m)
{
Matriz *mat = (Matriz*) malloc(sizeof(Matriz));
mat->n = n;
mat->m = m;
mat->cont = (int**) malloc(n * sizeof(int*));
for(int i = 0; i < n; i++)
mat->cont[i] = (int*) malloc(m * sizeof(int));
return mat;
}
void liberarMatriz(Matriz *m)
{
for(int i = 0; i < m->n; i++)
free(m->cont[i]);
free(m->cont);
free(m);
}
Matriz *gerarMatriz(int n, int m)
{
Matriz *mat = criarMatriz(n, m);
for(int i = 0; i < n; i++)
for(int j = 0; j < m; j++)
{
mat->cont[i][j] = rand() % 100;
}
return mat;
}
void printarMatriz(Matriz *mat)
{
for(int i = 0; i < mat->n; i++)
{
for(int j = 0; j < mat->m; j++)
printf("%d ", mat->cont[i][j]);
printf("\n");
}
}
void multiplicarMatrizes(Matriz *a, Matriz *b, Matriz *c)
{
for(int i = 0; i < a->n; i++)
for(int j = 0; j < b->m; j++)
{
c->cont[i][j] = 0;
for(int k = 0; k < a->m; k++)
c->cont[i][j] += a->cont[i][k] * b->cont[k][j];
}
}
Matriz *lerMatriz(char *nome, int n, int m)
{
Matriz *mat = criarMatriz(n, m);
FILE *f = fopen(nome, "r");
for(int i = 0; i < n; i++)
for(int j = 0; j < m; j++)
fscanf(f, " %d", &(mat->cont[i][j]));
fclose(f);
return mat;
}
void salvarMatriz(Matriz *mat)
{
static int i = 0;
char nome[100];
sprintf(nome, "%d-%dx%d.txt", i, mat->n, mat->m);
FILE *f = fopen(nome, "w");
for(int i = 0; i < mat->n; i++)
{
for(int j = 0; j < mat->m; j++)
fprintf(f, "%d ", mat->cont[i][j]);
fprintf(f, "\n");
}
fclose(f);
i++;
}
struct _input
{
Matriz *a;
Matriz *b;
Matriz *c;
short int salvar;
}; typedef struct _input Input;
Input *lerInput(int argc, char **argv)
{
if(argc >= 6)
{
Input *i = (Input *) malloc(sizeof(Input));
i->salvar = 0;
int n1, m1, n2, m2;
char op;
op = argv[1][0];
sscanf(argv[2], " %d", &n1);
sscanf(argv[3], " %d", &m1);
sscanf(argv[4], " %d", &n2);
sscanf(argv[5], " %d", &m2);
if(m1 == n2)
{
Matriz *a, *b, *c;
switch(op)
{
case 'g':
srand(time(NULL));
a = gerarMatriz(n1, m1);
b = gerarMatriz(n2, m2);
if(argc == 7 && argv[6][0] == 's')
i->salvar = 1;
break;
case 'f':
a = lerMatriz(argv[6], n1, m1);
b = lerMatriz(argv[7], n2, m2);
break;
default:
return 0;
}
c = criarMatriz(n1, m2);
i->a = a;
i->b = b;
i->c = c;
return i;
}
else
printf("Incompatible Matrices!\n");
}
else
printf("Invalid arguments!\n");
return NULL;
}
double medirTempoInput(Input **i, int argc, char **argv, Input *ler(int, char**))
{
timespec ini, fim;
clock_gettime(CLOCK_REALTIME, &ini);
*i = ler(argc, argv);
clock_gettime(CLOCK_REALTIME, &fim);
double iniSec = ini.tv_sec + ini.tv_nsec / SEC_AS_NANO;
double fimSec = fim.tv_sec + fim.tv_nsec / SEC_AS_NANO;
return (fimSec - iniSec);
}
double medirTempoExecMul(Input *i)
{
timespec ini, fim;
clock_gettime(CLOCK_REALTIME, &ini);
multiplicarMatrizes(i->a, i->b, i->c);
clock_gettime(CLOCK_REALTIME, &fim);
double iniSec = ini.tv_sec + ini.tv_nsec / SEC_AS_NANO;
double fimSec = fim.tv_sec + fim.tv_nsec / SEC_AS_NANO;
return (fimSec - iniSec);
}
void salvarELiberarMatrizes(Input *i)
{
if(i->salvar)
{
salvarMatriz(i->a);
salvarMatriz(i->b);
}
salvarMatriz(i->c);
liberarMatriz(i->a);
liberarMatriz(i->b);
liberarMatriz(i->c);
free(i);
}
int verificarArgumentos(int argc, char **argv)
{
if(argc < 6)
{
printf("Not enough arguments\n"
"# SOURCE: f for files, g for generation\n"
"# LINSA: matrix A lines\n"
"# COLSA: matrix A columns\n"
"# LINSB: matrix B lines\n"
"# COLSB: matrix B columns\n"
"# FILEA: matrix A file\n"
"# FILEB: matrix B file\n"
"# SAV (opcional): saves generated matrices A and B"
"## ./bin f LA CA LB CB FILEA FILEB\n"
"## ./bin g LA CA LB CB SAV\n");
return 0;
}
else
{
if(argv[1][0] != 'f' && argv[1][0] != 'g')
{
printf("Invalid source argument, try using g or f\n");
return 0;
}
int aux;
for(int i = 2; i < 6; i++)
if(!sscanf(argv[i], "%d", &aux))
{
printf("%d is not a number, type matrices A and B dimensions\n", (i - 1));
return 0;
}
if(argv[1][0] == 'g')
if(argc == 7)
if(argv[6][0] != 's')
{
printf("Add 's' to save matrices A and B\n");
return 0;
}
if(argv[1][0] == 'f')
{
FILE *f;
if((f = fopen(argv[6], "r")) == NULL)
{
printf("Matrix A file does not exist\n");
return 0;
}
else
fclose(f);
if((f = fopen(argv[7], "r")) == NULL)
{
printf("Matrix B file does not exist\n");
return 0;
}
else
fclose(f);
}
}
return 1;
}
int main(int argc, char ** argv)
{
if(verificarArgumentos(argc, argv))
{
Input *i;
printf("Creation time: %lf\n", medirTempoInput(&i, argc, argv, &lerInput));
printf("Execution time: %lf\n", medirTempoExecMul(i));
salvarELiberarMatrizes(i);
}
return 0;
}
|
22,944 |
#include <stdio.h>
#include <cuda_runtime.h>
__global__ void checkIndex() {
printf("threadIdx = ( %d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z);
printf("blockDim = ( %d, %d, %d)\n", blockDim.x, blockDim.y, blockDim.z);
printf("gridDim = ( %d, %d, %d)\n", gridDim.x, gridDim.y, gridDim.z);
}
int main() {
int nElem = 6;
dim3 block(3);
dim3 grid((nElem - block.x - 1)/block.x);
printf("block = ( %d, %d, %d)\n", block.x, block.y, block.z);
printf("grid = ( %d, %d, %d)\n", grid.x, grid.y, grid.z);
checkIndex <<<block, grid>>> ();
// check
printf("==== test ====\n");
checkIndex <<<2, 3>>> ();
cudaDeviceSynchronize();
return 0;
}
|
22,945 | // filename: vsquare.cu
// a simple CUDA kernel to element multiply vector with itself
extern "C" // ensure function name to be exactly "vsquare"
{
__global__ void vsquare(const double *a, double *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
c[i] = a[i] * a[i];
}
} |
22,946 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <fstream>
#include <chrono>
#include <string>
using namespace std;
#include <stdio.h>
#include <assert.h>
#include <math.h>
#define ll long long int
const ll m = 0x5DEECE66Dll;
const ll mask = (1ll << 48) - 1;
#define advance1(s) s = (s * m + 11ll) & mask
#define advance3759(s) s = (s*0x6fe85c031f25ll + 0x8f50ecff899ll)&mask
#define advance16(s) s = (s*0x6dc260740241ll + 0xd0352014d90ll)&mask
#define advance387(s) s = (s*0x5fe2bcef32b5ll + 0xb072b3bf0cbdll)&mask
#define advance774(s) s = (s*0xf8d900133f9ll + 0x5738cac2f85ell)&mask
#define advance11(s) s = (s*0x53bce7b8c655ll + 0x3bb194f24a25ll)&mask
#define advance3(s) s = (s*0xd498bd0ac4b5ll + 0xaa8544e593dll)&mask
#define advance17(s) s = (s*0xee96bd575badll + 0xc45d76fd665bll)&mask
#define regress1(s) s = (s*0xdfe05bcb1365ll + 0x615c0e462aa9ll)&mask
#define regress3(s) s = (s*0x13a1f16f099dll + 0x95756c5d2097ll)&mask
#define regress3759(s) s = (s*0x63a9985be4adll + 0xa9aa8da9bc9bll)&mask
#define advance2(s) s = (s*0xbb20b4600a69ll + 0x40942de6ball)&mask
#define getNextInt(x, s) advance1(s); x = (int)(s>>16)
// need spare longs temp1 and temp2
#define getNextLong(x, s) getNextInt(temp1, s); getNextInt(temp2, s); x = (temp1 << 32) + temp2
#define getIntBounded(x, s, n) if ((n&(-n))==n) {advance1(s); x = (int)((n*(s>>17)) >> 31);} else {do{advance1(s); bits = s>>17; val = bits%n;}while(bits-val+(n-1)<0); x=val;}
#define getBits(x, s, n) advance1(s); x = (int) (s >> (48-n));
#define setSeed(s, x) s = x^m&mask
#define MAX_TREES 12 // can change this later (performance & output is not very sensitive to this parameter)
#define x_1 9
#define z_1 2
const int x_2 = x_1 - 7;
const int z_2 = z_1 + 1;
#define SMALL_TREE_SPACING 2
#define BIG_TREE_SPACING 7
__device__ __managed__ unsigned long long int num_found = 0;
#define memsz 100000
__device__ __managed__ ll ret[memsz];
__device__ __managed__ char table[16][16]; // can a tree spawn here
// 0 : no
// 1 : tree_1's territory
// 2 : tree_2's territory
// 3 : joint tree_1 and tree_2
// 8 : a small tree's leaves
// 9 : I don't know (assume yes)
__device__ __managed__ int visited = 0;
__device__ void output_seed(ll s) {
regress1(s);
ll id = atomicAdd(&num_found, 1ull); // dw about red underline
ret[id] = s;
}
// coordinates are relative to tree_1 position
void add_unseen(int sz, int ez, int x) {
sz -= 2; // fudge factor for safety
int i = x + x_1;
for (int j = sz + z_1; j < ez + z_1; j++) {
if ((i & 15) == i && (j & 15) == j)
{
table[i][j] = 9;
}
}
}
// fill a rectangle with a number (1 or 2 or 3)
void fill_rect(int sx, int sz, int ex, int ez, int fill) {
assert(fill == 1 || fill == 2 || fill == 3);
for (int i = max(0, x_1 + sx); i < min(16, x_1 + ex); i++) {
for (int j = max(0, z_1 + sz); j < min(16, z_1 + ez); ++j)
{
table[i][j] = fill;
}
}
}
void init_table() {
for (int i = 0; i < 16; ++i)
{
for (int j = 0; j < 16; ++j)
{
table[i][j] = 0;
}
}
/*
// big tree leaves
fill_rect(-1, -2, 5, 10, 1);
fill_rect(-6, -2, -1, 10, 3);
fill_rect(-13, 0, -6, 10, 2);
// small pieces of tree:
fill_rect(1, -3, 4, -2, 1);
fill_rect(5, 2, 6, 3, 1);
fill_rect(5, 5, 6, 7, 1);
fill_rect(-8, -1, -6, 0, 2);
*/
for (int i = max(0, x_1 - BIG_TREE_SPACING); i <= min(15, x_1 + BIG_TREE_SPACING); ++i)
{
for (int j = max(0, z_1 - BIG_TREE_SPACING); j <= min(15, z_1 + BIG_TREE_SPACING); ++j)
{
table[i][j] |= 1;
}
}
for (int i = max(0, x_2 - BIG_TREE_SPACING); i <= min(15, x_2 + BIG_TREE_SPACING); ++i)
{
for (int j = max(0, z_2 - BIG_TREE_SPACING); j <= min(15, z_2 + BIG_TREE_SPACING); ++j)
{
table[i][j] |= 2;
}
}
// add locations where it's unclear if there is a tree or not
add_unseen(4, 14, 6);
add_unseen(7, 14, 5);
add_unseen(8, 14, 4);
add_unseen(9, 14, 3);
add_unseen(10, 14, 2);
add_unseen(11, 14, 1);
add_unseen(12, 14, 0);
add_unseen(12, 14, -1);
add_unseen(12, 14, -2);
add_unseen(13, 14, -3);
add_unseen(13, 14, -4);
add_unseen(13, 14, -5);
add_unseen(14, 14, -6);
add_unseen(14, 14, -7);
add_unseen(14, 14, -8);
add_unseen(14, 14, -9);
add_unseen(14, 14, -10);
add_unseen(14, 14, -11);
add_unseen(14, 14, -12);
add_unseen(14, 14, -13);
// near a small tree 3
int x_3 = x_1 - 12;
int z_3 = z_1 - 7;
for (int i = max(0, x_3 - SMALL_TREE_SPACING); i <= min(15, x_3 + SMALL_TREE_SPACING); ++i)
{
for (int j = max(0, z_3 - SMALL_TREE_SPACING); j <= min(15, z_3 + SMALL_TREE_SPACING); ++j)
{
table[i][j] = 8;
}
}
// near a small tree 4
int x_4 = x_1 - 10;
int z_4 = z_1 - 12;
for (int i = max(0, x_4 - SMALL_TREE_SPACING); i <= min(15, x_4 + SMALL_TREE_SPACING); ++i)
{
for (int j = max(0, z_4 - SMALL_TREE_SPACING); j <= min(15, z_4 + SMALL_TREE_SPACING); ++j)
{
table[i][j] = 8;
}
}
// near a small tree 5
int x_5 = x_1 - 5;
int z_5 = z_1 - 15;
for (int i = max(0, x_5 - SMALL_TREE_SPACING); i <= min(15, x_5 + SMALL_TREE_SPACING); ++i)
{
for (int j = max(0, z_5 - SMALL_TREE_SPACING); j <= min(15, z_5 + SMALL_TREE_SPACING); ++j)
{
table[i][j] = 8;
}
}
for (int i = 15; i >= 0; --i)
{
for (int j = 0; j < 16; ++j)
{
printf("%d ", table[i][j]);
}
printf("\n");
}
printf("\n");
}
int file_num = 0;
ofstream get_next_file() {
string path("_inter21/_intermediate");
path = path + to_string(file_num++) + ".txt";
ofstream of(path);
return of;
}
// todo: work this out
__device__ int is_field_878_e_ok(int field_878_e) {
//return 1;
return field_878_e == 11;// || field_878_e == 12;
//return field_878_e >= 11 && field_878_e <= 12;
}
// we get a 48-bit candidate; (it's given that this is a big tree chunk)
__device__ void check_tree_seed(ll s, char s_table[16][16]) {
ll original = s;
int found_1 = 0;
int found_2 = 0;
int tree_x, tree_z;
int bits, val;
ll temp1, temp2;
ll saved_seed;
for (int i = 0; i < MAX_TREES; ++i) {
getBits(tree_x, s, 4);
getBits(tree_z, s, 4);
if (i == 0) {
saved_seed = s; // save for tree-height check
}
advance2(s);
if (!found_1 && tree_x == x_1 && tree_z == z_1) {
if (found_2) {
output_seed(original);
return;
}
found_1 = 1;
}
else if (!found_2 && tree_x == x_2 && tree_z == z_2) {
if (found_1) {
output_seed(original);
return;
}
found_2 = 1;
}
else {
char lookup = s_table[tree_x][tree_z];
if ((lookup == 0) || (lookup == 1 && !found_1) || (lookup == 2 && !found_2) || (lookup == 3 && !found_1 && !found_2)) {
return; // seed is eliminated
}
}
if (i == 0) {
// we do this check as late as possible because it is more expensive
ll internal_big_tree_seed;
getNextLong(internal_big_tree_seed, saved_seed);
setSeed(internal_big_tree_seed, internal_big_tree_seed);
int field_878_e;
getIntBounded(field_878_e, internal_big_tree_seed, 12);
if (!is_field_878_e_ok(field_878_e + 5)) {
return; // wrong tree-size
}
}
}
}
__global__ void treeKernel(ll global_id) {
__shared__ char s_table[16][16];
ll tid = threadIdx.x;
s_table[tid % 16][tid / 16] = table[tid % 16][tid / 16];
__syncthreads();
ll bid = blockIdx.x;
ll div_ten = ((global_id << 25) | (bid << 17));
ll upper31 = mask & (10ll*div_ten); // upper 31 bits of seed are multiple of ten
if (upper31 < div_ten) {
return; // overflowed the 48 bits (happens on final few blocks)
}
ll upper39 = upper31 | (tid << 9);
for (ll lower9 = 0; lower9 < (1ll << 9); lower9++) {
ll seed = upper39 | lower9;
check_tree_seed(seed, s_table);
}
}
#define RUN_ID 200
cudaError_t do_work() {
ofstream log("big_tree_log_200.txt");
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
return cudaStatus;
}
int threads_per_block = 256;
int num_blocks = 256; // can't change these without breaking code
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "big tree kernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
//ll num_found = 0;
printf("begin xyz\n");
auto start = chrono::steady_clock::now();
int num_written = 0;
ll NUM_ITERS = 838861; // ceil(2^31 / 2^8 / 10)
//NUM_ITERS = 10000;
for (ll o = 0; o < NUM_ITERS; o ++) {
treeKernel <<<num_blocks, threads_per_block >>> (o);
if (o % 100 == 0) {
ofstream fout = get_next_file();
cudaDeviceSynchronize();
fout << RUN_ID << endl;
fout << x_1 << endl << z_1 << endl;
fout << num_found << endl;
for (int i = 0; i < num_found; i++) {
fout << ret[i] << endl;
num_written++;
}
fout.close();
num_found = 0;
//printf("%lld\n", o);
auto end = chrono::steady_clock::now();
ll time = (chrono::duration_cast<chrono::microseconds>(end - start).count());
float eta = ((838861-o) / ((float)o)) * ((float)time) / 3600.0 / 1000000.0;
log << "doing " << o << " time taken us =" << time << " eta (hrs) = " << eta << endl;
log.flush();
}
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cuda not sync: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
auto end = chrono::steady_clock::now();
cout << "time taken us =" << chrono::duration_cast<chrono::microseconds>(end - start).count() << endl;
ofstream fout = get_next_file();
fout << RUN_ID << endl;
fout << x_1 << endl << z_1 << endl;
fout << num_found << endl;
for (int i = 0; i < num_found; i++) {
fout << ret[i] << endl;
num_written++;
}
fout.close();
cout << "total seeds written=" << num_written << endl;
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching big tree kernel!\n", cudaStatus);
}
return cudaStatus;
}
int main()
{
init_table();
// Add vectors in parallel.
cudaError_t cudaStatus = do_work();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cuda failed!");
return 1;
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
} |
22,947 |
// cuda-kernel: add 2 numbers
__global__ void addnums (double *pi, double c){
*pi += c;
}
// cuda-kernel: add 2 vectors
__global__ void addvecs (double *v1, double *v2){
int idx = threadIdx.x;
v1[idx] += v2[idx];
}
|
22,948 | #include "includes.h"
__global__ void callOperation(int *a, int *b, int *res, int k, int p, int n)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int idy = blockDim.y * blockIdx.y + threadIdx.y;
if (idx >= n || idy >= n) {
return;
}
int tid = idx * n + idy;
res[tid] = a[tid] + b[tid];
if (res[tid] > k) {
res[tid] = p;
}
} |
22,949 | #include "includes.h"
__device__ void compute_conv(int row, int col, double2 *d_c, double *d_a, double2 *d_b, int *o_row_vect, int *o_col_vect, int ma, int na, int mb, int nb, int mc, int nc) {
int count_row = o_row_vect[row];
int count_col = o_col_vect[col];
int row_idx;
int col_idx;
int k_row_idx;
int k_col_idx;
int k_col_start_idx;
int i_row_idx;
int i_col_idx;
int i_col_start_idx;
k_row_idx = row - (ma - 1);
k_row_idx = k_row_idx < 0 ? 0:k_row_idx;
k_col_start_idx = col - (na - 1);
k_col_start_idx = k_col_start_idx < 1? 0: k_col_start_idx;
k_col_idx = k_col_start_idx;
i_row_idx = row > (ma - 1) ? (ma - 1) : row;
i_col_idx = col > (na - 1) ? (na - 1) : col;
i_col_start_idx = i_col_idx;
for ( row_idx = 0; row_idx < count_row; row_idx++) {
for (col_idx = 0; col_idx < count_col; col_idx++) {
d_c[col + nc * row].x += d_a[i_col_idx + na * i_row_idx] * d_b[k_col_idx + nb * k_row_idx].x;
d_c[col + nc * row].y += d_a[i_col_idx + na * i_row_idx] * d_b[k_col_idx + nb * k_row_idx].y;
k_col_idx++;
i_col_idx--;
}
k_row_idx++;
i_row_idx--;
k_col_idx = k_col_start_idx;
i_col_idx = i_col_start_idx;
}
}
__global__ void kernel_conv(double2 *d_c, double *d_a, double2 *d_b, int *d_row_vect, int *d_col_vect, int ma, int na, int mb, int nb, int mc, int nc) {
int i, idx;
int rownum, colnum, num_threads;
idx = threadIdx.x + blockIdx.x * blockDim.x;
num_threads = gridDim.x * blockDim.x;
for(i=idx; i< (mc *nc); i=i+num_threads){
rownum = i / nc;
colnum = i % nc;
// Device Function call to multiply the Image pixel with the Kernel Image pixel and perform addition
compute_conv(rownum, colnum, d_c, d_a, d_b, d_row_vect, d_col_vect, ma, na, mb, nb, mc, nc);
}
} |
22,950 |
extern "C"
__global__
void SearchPatternKernel_naive(int *d_nFound, int *d_offsets, int nMaxMatched,
const unsigned char *d_pattern, int patternLength,
const unsigned char *d_text, int searchLength) {
int gid = blockDim.x * blockIdx.x + threadIdx.x;
if (gid < searchLength) {
const unsigned char *d_myPos = &d_text[gid];
int idx = 0;
for (; idx < patternLength; ++idx) {
if (d_pattern[idx] != d_myPos[idx])
break;
}
if (idx == patternLength) {
int offsetPos = atomicAdd(d_nFound, 1);
if (offsetPos < nMaxMatched)
d_offsets[offsetPos] = gid;
}
}
}
|
22,951 | #include <stdio.h>
#include <time.h>
#include <assert.h>
inline cudaError_t checkCuda(cudaError_t result)
{
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
return result;
}
void initWith(float num, float *a, int N)
{
for(int i = 0; i < N; ++i)
{
a[i] = num;
}
}
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int gridStride = gridDim.x * blockDim.x;
for(i; i < N; i+=gridStride)
{
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *array, int N)
{
for(int i = 0; i < N; i++)
{
if(array[i] != target)
{
printf("FAIL: array[%d] - %0.0f does not equal %0.0f\n", i, array[i], target);
exit(1);
}
}
printf("SUCCESS! All values added correctly.\n");
}
int main()
{
const int N = 2<<20;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
checkCuda(cudaMallocManaged(&a,size));
checkCuda(cudaMallocManaged(&b,size));
checkCuda(cudaMallocManaged(&c,size));
initWith(3, a, N);
initWith(4, b, N);
initWith(0, c, N);
const int threads_per_block = 32;
const int num_blocks = 256;
clock_t begin = clock();
addVectorsInto<<<num_blocks, threads_per_block>>>(c, a, b, N);
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("Elapsed time for addVectorsInto on GPU is %lf \n", time_spent);
checkCuda(cudaDeviceSynchronize());
checkElementsAre(7, c, N);
cudaFree(a);
cudaFree(b);
cudaFree(c);
}
|
22,952 | #include "includes.h"
__global__ void backward_avgpool_layer_kernel(int n, int w, int h, int c, float *in_delta, float *out_delta)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int k = id % c;
id /= c;
int b = id;
int i;
int out_index = (k + c*b);
for(i = 0; i < w*h; ++i){
int in_index = i + h*w*(k + b*c);
in_delta[in_index] += out_delta[out_index] / (w*h);
}
} |
22,953 | #include<iostream>
#include<cstdlib>
#include<cmath>
#include<time.h>
using namespace std;
__global__ void matrixVectorMultiplication(float *a, float *mat, float *c, int n)
{
int row=threadIdx.x+blockDim.x*blockIdx.x;
float sum=0;
if(row<n){
for(int j=0;j<n;j++)
{
sum=sum+mat[row*n+j]*a[j];
}
}
c[row]=sum;
}
int main()
{
float *a, *b, *c, *d;
float *dev_a, *dev_b, *dev_c;
int n=32*1024;
a=(float*)malloc(sizeof(float)*n);
b=(float*)malloc(sizeof(float)*n*n);
c=(float*)malloc(sizeof(float)*n);
d=(float*)malloc(sizeof(float)*n);
int i, j;
for(i=0; i<n; i++)
a[i] = 1.0;
c[i] = 1.0;
for(i=0; i<n; i++)
for(j=0; j<n; j++)
b[i*n+j] = 2.0;
printf("<<<<<<<<<< initial data:\n");
cudaMalloc((void**)&dev_a, sizeof(float)*n);
cudaMalloc((void**)&dev_b, sizeof(float)*n*n);
cudaMalloc((void**)&dev_c, sizeof(float)*n);
cudaMemcpy(dev_a, a, sizeof(float)*n, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, sizeof(float)*n*n, cudaMemcpyHostToDevice);
cudaEvent_t start,end;
cudaEventCreate(&start);
cudaEventCreate(&end);
int threadsPerBlock;
threadsPerBlock = 32;
int blocksPerGrid;
blocksPerGrid = n/threadsPerBlock;
cudaEventRecord(start);
matrixVectorMultiplication<<<blocksPerGrid,threadsPerBlock>>>(dev_a,dev_b,dev_c,n);
cudaEventRecord(end);
cudaEventSynchronize(end);
float time=0.0;
cudaEventElapsedTime(&time,start,end);
cudaMemcpy(c,dev_c,sizeof(float)*n,cudaMemcpyDeviceToHost);
cout<<"\nGPU Time Elapsed: "<<time;
int sum=0;
for(int row=0;row<n;row++)
{
sum=0;
for(int col=0;col<n;col++)
{
sum=sum+a[row*n+col]*b[col];
}
d[row]=sum;
}
//t=clock()-t;
//cout<<"\nCPU Time Elapsed: "<<((double)t); //((double)t)/CLOCKS_PER_SEC;
int error=0;
for(int i=0;i<n;i++){
error+=d[i]-c[i];
// cout<<" gpu "<<c[i]<<" CPU "<<d[i]<<endl;
}
cout<<"Error : "<<error;
return 0;
};
void init_array(float *a, const int N) {
int i;
for(i=0; i<N; i++)
a[i] = 1.0;
}
void init_mat(float *a, const int N, const int M) {
int i, j;
for(i=0; i<N; i++)
for(j=0; j<M; j++)
a[i*M+j] = 2.0;
}
|
22,954 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void increase(int *c, int N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < N)
c[tid] = tid;
}
__global__ void kernel0( int *a )
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
a[idx] = 7;
}
__global__ void kernel1( int *a )
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
a[idx] = blockIdx.x;
}
__global__ void kernel2( int *a )
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
a[idx] = threadIdx.x;
}
void printarray0(int *c, int n)
{
for (int i = 0; i < n; i++) printf("c[%d] = %d \n ", i, c[i]);
}
void printarray1(int *a, int n)
{
int i = 0;
for (i = 0; i < n; i++) printf("%d ", a[i]);
printf("\n");
}
int main(int argc, char** argv)
{
if (argc < 3) {
printf("Args kurang. usage: blockgrid [data array length] [threadsPerBlock] [numBlocks]");
return EXIT_SUCCESS;
}
int N = atoi(argv[1]);
int N2 = N+5;
int isize = N*sizeof(int);
dim3 threadsPerBlock(atoi(argv[2]));
dim3 numBlocks(atoi(argv[3]));
int c[N2];
int *dev_c;
// fungsi index incement
printf("=== index thread ke isi array ===\n");
cudaMalloc( (void**)&dev_c, N*sizeof(int) );
for(int i=0; i< N; ++i)/* Untuk isi array dengan -1 */
{
c[i] = -1;
}
cudaMemcpy(dev_c, c, N*sizeof(int), cudaMemcpyHostToDevice);
increase<<<numBlocks, threadsPerBlock>>>(dev_c,N);
cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost );
printarray0(c, N);
cudaFree( dev_c );
// function kernel0
printf("=== program kernel0 (7) ===\n");
int *a_h, *a_d;
a_h = (int*) malloc(isize);
cudaMalloc((void**)&a_d, isize);
// printarray("before", a_h, N);
kernel0<<<numBlocks, threadsPerBlock>>>(a_d);
cudaMemcpy(a_h, a_d, isize, cudaMemcpyDeviceToHost);
printarray1(a_h, N);
cudaFree(a_d);
free(a_h);
// function kernel1
printf("=== program kernel1 blockIdx.x ===\n");
int *b_h, *b_d;
b_h = (int*) malloc(isize);
cudaMalloc((void**)&b_d, isize);
// printarray("before", b_h, N);
kernel1<<<numBlocks, threadsPerBlock>>>(b_d);
cudaMemcpy(b_h, b_d, isize, cudaMemcpyDeviceToHost);
printarray1(b_h, N);
cudaFree(b_d);
free(b_h);
// function kernel2
printf("=== program kernel2 threadIdx.x ===\n");
int *c_h, *c_d;
c_h = (int*) malloc(isize);
cudaMalloc((void**)&c_d, isize);
// printarray("before", c_h, N);
kernel2<<<numBlocks, threadsPerBlock>>>(c_d);
cudaMemcpy(c_h, c_d, isize, cudaMemcpyDeviceToHost);
printarray1(c_h, N);
cudaFree(c_d);
free(c_h);
return EXIT_SUCCESS;
}
|
22,955 | #include <stdio.h>
#include <cuda_runtime.h>
#include <sys/time.h>
double seconds(){
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp,&tzp);
return ((double)tp.tv_sec+(double)tp.tv_usec*1.e-6);
}
int recursiveReduce(int *data, int const size){
// terminate check
if (size == 1) return data[0];
// renew stride
int const stride = size / 2;
// in-place reduction
for (int i = 0; i < stride; i++){
data[i] += data[i+stride];
}
return recursiveReduce(data,stride);
}
// kernel 1
__global__ void reduceNeighbored(int *g_idata, int *g_odata, unsigned int n){
/*
Do calculations on each block, save partly reduced result on the series of g_odata.
After it, sum g_odata up for getting the last result on the host side.
*/
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if (idx >= n) return;
// in-place reduction in global memory on each block
for (int stride = 1; stride < blockDim.x; stride *= 2){
if ((tid % (2 * stride)) == 0){
idata[tid] += idata[tid+stride];
}
// synchronize within block
__syncthreads();
}
// write result for this block to global memory
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
// kernel 2
__global__ void reduceNeighboredLess(int *g_idata,int *g_odata,unsigned int n){
// set threadID
unsigned int tid = threadIdx.x;
unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x*blockDim.x;
// boundary check
if (idx >= n) return;
// in-place reduction
for (int stride = 1; stride < blockDim.x; stride *= 2){
// convert tid into local array index
int index = 2 * stride * tid;
if (index < blockDim.x){
idata[index] += idata[index+stride];
}
// synchronize within threadblock
__syncthreads();
}
// write the result for this block to global memory
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
// kernel 3
__global__ void reduceInterleaved (int *g_idata, int *g_odata, unsigned int n){
// interleaved pair implementation with less divergence
unsigned int tid = threadIdx.x;
unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x*blockDim.x;
// boundary check
if (idx >= n) return;
// in-place reduction in global memory
for (int stride = blockDim.x/2; stride > 0; stride >>= 1){
if (tid < stride) idata[tid] += idata[tid+stride];
__syncthreads();
}
// write result for this block to global memory
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
// kernel 4
__global__ void reduceUnrolling2(int *g_idata,int *g_odata, unsigned int n){
// set thread ID, one threadblock operates on two datablocks
// so that blockidx * 2 * blockdim.x
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 2;
// unrolling 2 data blocks, add data from an adjacent data block
if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx+blockDim.x];
__syncthreads();
// inplace reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1){
if (tid < stride){
idata[tid] += idata[tid+stride];
}
// synchronize within threadBlock
__syncthreads();
}
// write result for this block to global memory
if (tid ==0) g_odata[blockIdx.x] = idata[0];
}
// kernel 5
__global__ void reduceUnrolling4(int *g_idata,int *g_odata,unsigned int n){
// set threadId, one threadBlock operates on four dataBlocks
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 4;
// unrolling 4 data blocks
if (idx + 3 * blockDim.x < n){
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + blockDim.x * 2];
int a4 = g_idata[idx + blockDim.x * 3];
g_idata[idx] = a1 + a2 + a3 + a4;
}
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1){
if (tid < stride){
idata[tid] += idata[tid+stride];
}
__syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
// kernel 6
__global__ void reduceUnrollWarps4(int *g_idata,int *g_odata,unsigned int n){
// set threadId, one threadBlock operates on four dataBlocks
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 4;
// unrolling 4 data blocks
if (idx + 3 * blockDim.x < n){
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + blockDim.x * 2];
int a4 = g_idata[idx + blockDim.x * 3];
g_idata[idx] = a1 + a2 + a3 + a4;
}
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 32; stride >>= 1){
if (tid < stride){
idata[tid] += idata[tid+stride];
}
__syncthreads();
}
// unrolling warp
if (tid <32){
volatile int *vmem = idata;
vmem[tid] += vmem[tid+32];
vmem[tid] += vmem[tid+16];
vmem[tid] += vmem[tid+8];
vmem[tid] += vmem[tid+4];
vmem[tid] += vmem[tid+2];
vmem[tid] += vmem[tid+1];
}
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
// kernel 7
__global__ void reduceCompleteUnrollWarps4(int *g_idata, int *g_odata, unsigned int n){
// set threadId
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 4;
// unrolling 4
if (idx + 3*blockDim.x < n){
int a1 = g_idata[idx];
int a2 = g_idata[idx+blockDim.x];
int a3 = g_idata[idx+2*blockDim.x];
int a4 = g_idata[idx+3*blockDim.x];
g_idata[idx] = a1+a2+a3+a4;
}
__syncthreads();
// in-place reduction and complete unroll
if (blockDim.x>=1024 && tid < 512) idata[tid] += idata[tid+512];
__syncthreads();
if (blockDim.x>=512 && tid < 256) idata[tid] += idata[tid+256];
__syncthreads();
if (blockDim.x>=256 && tid < 128) idata[tid] += idata[tid+128];
__syncthreads();
if (blockDim.x>=128 && tid < 64) idata[tid] += idata[tid+64];
__syncthreads();
// unrolling warp
if (tid < 32) {
volatile int *vsmem = idata;
vsmem[tid] += vsmem[tid+32];
vsmem[tid] += vsmem[tid+16];
vsmem[tid] += vsmem[tid+ 8];
vsmem[tid] += vsmem[tid+ 4];
vsmem[tid] += vsmem[tid+ 2];
vsmem[tid] += vsmem[tid+ 1];
}
// write result
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
template <unsigned int iBlockSize>
__global__ void reduceCompleteUnroll(int *g_idata, int *g_odata, unsigned int n){
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 4;
// unrolling 4
if (idx + 3 * blockDim.x < n){
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + blockDim.x * 2];
int a4 = g_idata[idx + blockDim.x * 3];
g_idata[idx] = a1 + a2 + a3 + a4;
}
__syncthreads();
// in-place reduction and complete unroll
if (iBlockSize>=1024 && tid < 512) idata[tid] += idata[tid+512];
__syncthreads();
if (iBlockSize>=512 && tid < 256) idata[tid] += idata[tid+256];
__syncthreads();
if (iBlockSize>=256 && tid < 128) idata[tid] += idata[tid+128];
__syncthreads();
if (iBlockSize>=128 && tid < 64) idata[tid] += idata[tid+64];
__syncthreads();
// unrolling warp
if (tid < 32) {
volatile int *vsmem = idata;
vsmem[tid] += vsmem[tid+32];
vsmem[tid] += vsmem[tid+16];
vsmem[tid] += vsmem[tid+ 8];
vsmem[tid] += vsmem[tid+ 4];
vsmem[tid] += vsmem[tid+ 2];
vsmem[tid] += vsmem[tid+ 1];
}
// write result
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
int main(int argc, char** argv){
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp,dev);
printf("%s starting reduction at ",argv[0]);
printf("device %d: %s ",dev,deviceProp.name);
cudaSetDevice(dev);
bool bResult = false;
// initialization
int size = 1 << 24; // total number of elementse to reduce
printf("reduce with array size %d ",size);
// execution configuration
int blocksize = 512;
if (argc > 1) blocksize = atoi(argv[1]);
dim3 block(blocksize,1);
dim3 grid ((size+block.x-1)/block.x,1);
printf("grid %d block %d\n",grid.x,block.x);
// allocate host memory
size_t bytes = size * sizeof(int);
int *h_idata = (int *) malloc(bytes);
int *h_odata = (int *) malloc(grid.x*sizeof(int));
int *tmp = (int *) malloc(bytes);
// initialize the array
for (int i = 0; i <size; i++){
// mask off high 2 bytes to force max number to 255
h_idata[i] = (int)(rand() & 0xFF);
}
memcpy(tmp,h_idata,bytes);
double iStart,iElaps;
int gpu_sum = 0;
// allocate device memory
int *d_idata = NULL;
int *d_odata = NULL;
cudaMalloc((void **) &d_idata,bytes);
cudaMalloc((void **) &d_odata,grid.x*sizeof(int));
// cpu reduction
iStart = seconds();
int cpu_sum = recursiveReduce(tmp,size);
iElaps = seconds() - iStart;
printf("cpu reduce elapsed %f sec cpu_sum: %d \n",iElaps,cpu_sum);
// kernel 1: reduceNeighbored
cudaMemcpy(d_idata,h_idata,bytes,cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
iStart = seconds();
reduceNeighbored<<<grid,block>>>(d_idata,d_odata,size);
cudaDeviceSynchronize();
iElaps = seconds() - iStart;
cudaMemcpy(h_odata,d_odata,grid.x * sizeof(int),cudaMemcpyDeviceToHost);
for (int i=0; i <grid.x; i++) gpu_sum += h_odata[i];
printf("gpu Neighbored elapsed %f sec gpu_sum: %d <<<grid %d block %d>>>\n",
iElaps,gpu_sum,grid.x,block.x);
// kernel 2: reduceNeighbored with less divergence
cudaMemcpy(d_idata,h_idata,bytes,cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
iStart = seconds();
reduceNeighboredLess<<<grid,block>>>(d_idata,d_odata,size);
cudaDeviceSynchronize();
iElaps = seconds() - iStart;
cudaMemcpy(h_odata,d_odata,grid.x * sizeof(int),cudaMemcpyDeviceToHost);
gpu_sum = 0;
for(int i = 0;i<grid.x;i++) gpu_sum += h_odata[i];
printf("gpu Neighbored2 elapsed %f sec gpu_sum: %d <<<grid %d block %d>>>\n",
iElaps,gpu_sum,grid.x,block.x);
// kernel 3: reduceInterleaved
cudaMemcpy(d_idata,h_idata,bytes,cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
iStart = seconds();
reduceInterleaved<<<grid,block>>>(d_idata,d_odata,size);
cudaDeviceSynchronize();
iElaps = seconds() - iStart;
cudaMemcpy(h_odata,d_odata,grid.x*sizeof(int),cudaMemcpyDeviceToHost);
gpu_sum = 0;
for(int i=0;i<grid.x;i++) gpu_sum += h_odata[i];
printf("gpu Interleaved elaped %f sec gpu_sum %d <<<grid %d block %d>>>\n",
iElaps,gpu_sum,grid.x,block.x);
// kernel 4: reduceUnrolling2
cudaMemcpy(d_idata,h_idata,bytes,cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
iStart = seconds();
reduceUnrolling2<<<grid.x/2,block>>>(d_idata,d_odata,size);
cudaDeviceSynchronize();
iElaps = seconds() - iStart;
cudaMemcpy(h_odata,d_odata,(grid.x/2)*sizeof(int),cudaMemcpyDeviceToHost);
gpu_sum = 0;
for(int i=0;i<grid.x/2;i++) gpu_sum += h_odata[i];
printf("gpu Unrolling2 elaped %f sec gpu_sum %d <<<grid %d block %d>>>\n",
iElaps,gpu_sum,grid.x/2,block.x);
// kernel 5: reduceUnrolling4
cudaMemcpy(d_idata,h_idata,bytes,cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
iStart = seconds();
reduceUnrolling4<<<grid.x/4,block>>>(d_idata,d_odata,size);
cudaDeviceSynchronize();
iElaps = seconds() - iStart;
cudaMemcpy(h_odata,d_odata,(grid.x/4)*sizeof(int),cudaMemcpyDeviceToHost);
gpu_sum = 0;
for(int i=0;i<grid.x/4;i++) gpu_sum += h_odata[i];
printf("gpu Unrolling4 elaped %f sec gpu_sum %d <<<grid %d block %d>>>\n",
iElaps,gpu_sum,grid.x/4,block.x);
// kernel 6: reduceUnrollWarps4
cudaMemcpy(d_idata,h_idata,bytes,cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
iStart = seconds();
reduceUnrollWarps4<<<grid.x/4,block>>>(d_idata,d_odata,size);
cudaDeviceSynchronize();
iElaps = seconds() - iStart;
cudaMemcpy(h_odata,d_odata,(grid.x/4)*sizeof(int),cudaMemcpyDeviceToHost);
gpu_sum = 0;
for(int i=0;i<grid.x/4;i++) gpu_sum += h_odata[i];
printf("gpu UnrollWarps4 elaped %f sec gpu_sum %d <<<grid %d block %d>>>\n",
iElaps,gpu_sum,grid.x/4,block.x);
// kernel 7: reduceCompleteUnrollWarps4
cudaMemcpy(d_idata,h_idata,bytes,cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
iStart = seconds();
reduceCompleteUnrollWarps4<<<grid.x/4,block>>>(d_idata,d_odata,size);
cudaDeviceSynchronize();
iElaps = seconds() - iStart;
cudaMemcpy(h_odata,d_odata,(grid.x/4)*sizeof(int),cudaMemcpyDeviceToHost);
gpu_sum = 0;
for(int i=0;i<grid.x/4;i++) gpu_sum += h_odata[i];
printf("gpu CompleteUnrollWarps4 elaped %f sec gpu_sum %d <<<grid %d block %d>>>\n",
iElaps,gpu_sum,grid.x/4,block.x);
// kernel 8: reduceCompleteUnroll (template function)
cudaMemcpy(d_idata,h_idata,bytes,cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
iStart = seconds();
switch (blocksize)
{
case 1024:
reduceCompleteUnroll<1024><<<grid.x/4,block>>>(d_idata,d_odata,size);
break;
case 512:
reduceCompleteUnroll<512><<<grid.x/4,block>>>(d_idata,d_odata,size);
break;
case 256:
reduceCompleteUnroll<256><<<grid.x/4,block>>>(d_idata,d_odata,size);
break;
case 128:
reduceCompleteUnroll<128><<<grid.x/4,block>>>(d_idata,d_odata,size);
break;
case 64:
reduceCompleteUnroll<64><<<grid.x/4,block>>>(d_idata,d_odata,size);
break;
}
cudaDeviceSynchronize();
iElaps = seconds() - iStart;
cudaMemcpy(h_odata,d_odata,(grid.x/4)*sizeof(int),cudaMemcpyDeviceToHost);
gpu_sum = 0;
for(int i=0;i<grid.x/4;i++) gpu_sum += h_odata[i];
printf("gpu CompleteUnroll<template> elaped %f sec gpu_sum %d <<<grid %d block %d>>>\n",
iElaps,gpu_sum,grid.x/4,block.x);
free(h_idata);
free(h_odata);
cudaFree(d_idata);
cudaFree(d_odata);
cudaDeviceReset();
// check the results
bResult = (gpu_sum == cpu_sum);
if (!bResult) printf("Test Failed!\n");
return EXIT_SUCCESS;
}
|
22,956 | #include "includes.h"
__global__ void cuArraysCopyToBatchWithOffset_kernel(const float2 *imageIn, const int inNY, float2 *imageOut, const int outNX, const int outNY, const int nImages, const int *offsetX, const int *offsetY)
{
int idxImage = blockIdx.z;
int outx = threadIdx.x + blockDim.x*blockIdx.x;
int outy = threadIdx.y + blockDim.y*blockIdx.y;
if(idxImage>=nImages || outx >= outNX || outy >= outNY) return;
int idxOut = idxImage*outNX*outNY + outx*outNY + outy;
int idxIn = (offsetX[idxImage]+outx)*inNY + offsetY[idxImage] + outy;
imageOut[idxOut] = imageIn[idxIn];
} |
22,957 | #include <chrono>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cuda_runtime.h>
#include <iomanip>
#include <iostream>
// helper for time measurement
typedef std::chrono::duration<double, std::milli> d_ms;
const auto &now = std::chrono::high_resolution_clock::now;
// Define Error Checking Macro
#define CU_CHK(ERRORCODE) \
{ \
cudaError_t error = ERRORCODE; \
if (error != 0) { \
std::cerr << cudaGetErrorName(error) << ": " \
<< cudaGetErrorString(error) << " at " << __FILE__ << ":" \
<< __LINE__ << "\n"; \
} \
}
// Constants
const static int DEFAULT_NUM_ELEMENTS = 1024;
const static int DEFAULT_NUM_ITERATIONS = 5;
const static int DEFAULT_BLOCK_DIM = 128;
// Structures
struct StencilArray_t {
float *array;
float *tmp_array;
int size; // size == width == height
};
// Function Prototypes
void writeToFile(float *matrix, const char *name, int size);
// Stencil Code Kernel for the speed calculation
extern void simpleStencil_Kernel_Wrapper(int gridSize, int blockSize, int size,
float *grid_old, float *grid_new);
extern void optStencil_Kernel_Wrapper(int gridSize, int blockSize, int shm_size,
int size, float *grid_old,
float *grid_new);
// Main
int main(int argc, char *argv[]) {
// Process Arguments
if (argc < 2 or std::string(argv[1]) == "-h") {
std::cout << "Usage:\n\t" << argv[0] << " <problemsize> [iterations] [result.pgm]\n";
return 1;
}
int numElements = 0;
numElements = std::stoi(argv[1]);
numElements = numElements > 0 ? numElements : DEFAULT_NUM_ELEMENTS;
int numIterations = 0;
if (argc > 2)
numIterations = std::stoi(argv[2]);
numIterations = numIterations != 0 ? numIterations : DEFAULT_NUM_ITERATIONS;
// Allocate Memory
// Host Memory
StencilArray_t h_array;
h_array.size = numElements;
// Pinned Memory
CU_CHK(cudaMallocHost(
&(h_array.array),
static_cast<size_t>(h_array.size * h_array.size * sizeof(float))));
CU_CHK(cudaMallocHost(
&(h_array.tmp_array),
static_cast<size_t>(h_array.size * h_array.size * sizeof(float))));
// Init Particles
// srand(static_cast<unsigned>(time(0)));
srand(0); // Always the same random numbers
for (int i = 0; i < h_array.size; i++) {
for (int j = 0; j < h_array.size; j++) {
if (i == 0)
h_array.array[i + h_array.size * j] = 127;
else if (i > h_array.size / 4 && i < h_array.size * 3 / 4 &&
j > h_array.size / 4 && j < h_array.size * 3 / 4)
h_array.array[i + h_array.size * j] = 100;
else
h_array.array[i + h_array.size * j] = 0;
}
}
// Device Memory
StencilArray_t d_array;
d_array.size = h_array.size;
CU_CHK(cudaMalloc(
&(d_array.array),
static_cast<size_t>(d_array.size * d_array.size * sizeof(float))));
CU_CHK(cudaMalloc(
&(d_array.tmp_array),
static_cast<size_t>(d_array.size * d_array.size * sizeof(float))));
// Copy Data to the Device
auto t1 = now();
CU_CHK(cudaMemcpy(
d_array.array, h_array.array,
static_cast<size_t>(d_array.size * d_array.size * sizeof(float)),
cudaMemcpyHostToDevice));
CU_CHK(cudaMemcpy(
d_array.tmp_array, h_array.array,
static_cast<size_t>(d_array.size * d_array.size * sizeof(float)),
cudaMemcpyHostToDevice));
auto t2 = now();
// Block Dimension / Threads per Block
int block_dim = DEFAULT_BLOCK_DIM;
#ifdef OPT_KERNEL
std::cout << "Using optimized Kernel\n";
#endif
int grid_dim = ceil(static_cast<float>(d_array.size) /
static_cast<float>(block_dim - 2));
std::cout << "Computing grid with " << d_array.size << "x" << d_array.size
<< " elements and " << numIterations << " iterations\n";
std::cout << "Launch kernel with " << grid_dim << " blocks and " << block_dim
<< " threads per block\n";
auto t3 = now();
for (int i = 0; i < numIterations; i++) {
#ifdef OPT_KERNEL
optStencil_Kernel_Wrapper(grid_dim, block_dim,
block_dim * 4 * sizeof(float), d_array.size,
d_array.array, d_array.tmp_array);
#else
simpleStencil_Kernel_Wrapper(grid_dim, block_dim, d_array.size,
d_array.array, d_array.tmp_array);
#endif
float *tmp = d_array.array;
d_array.array = d_array.tmp_array;
d_array.tmp_array = tmp;
}
// Synchronize
cudaDeviceSynchronize();
auto t4 = now();
// Copy Back Data
auto t5 = now();
cudaMemcpy(h_array.array, d_array.array,
static_cast<size_t>(h_array.size * h_array.size * sizeof(float)),
cudaMemcpyDeviceToHost);
auto t6 = now();
if(argc > 3)
writeToFile(h_array.array, argv[3], h_array.size);
// Compute time for copies and kernel
d_ms time_copyH2D = t2 - t1;
d_ms time_kernel = t4 - t3;
d_ms time_copyD2H = t6 - t5;
// Free Memory
cudaFreeHost(h_array.array);
cudaFreeHost(h_array.tmp_array);
cudaFree(d_array.array);
cudaFree(d_array.tmp_array);
// Print Meassurement Results
std::cout << "Results:\n"
<< "H2D \[s], kernel [s], D2H [s]\n"
<< time_copyH2D.count() << ", " << time_kernel.count() << ", "
<< time_copyD2H.count() << "\n";
return 0;
}
void writeToFile(float *grid, const char *name, int size) {
FILE *pFile;
pFile = fopen(name, "w");
int i, j;
fprintf(pFile, "P2 %d %d %d\n", size, size, 127);
for (i = 0; i < size; i++) {
for (j = 0; j < size; j++) {
fprintf(pFile, "%d ", (int)grid[j * size + i]);
}
fprintf(pFile, "\n");
}
fclose(pFile);
return;
}
|
22,958 | // Variable num_cores denotes the number of threads to run the code on.
#include <stdio.h>
//#include <omp.h>
#include <string.h>
#include <math.h>
//#include "../common/common.h"
#include <cuda_runtime.h>
/*
* compute string value, length should be small than strlen
*/
__global__ void findHashes(char *d_css, int d_len, int *d_iss, int subpattern_length, int d, int p)
{
int i = 0;
int ind = d_len * threadIdx.x;
int d_iss_length = d_len - subpattern_length + 1;
int d_iss_index = d_iss_length * threadIdx.x;
d_iss += d_iss_index;
d_css += ind;
d_iss[0] = 0;
int pw = 1;
for (; i < subpattern_length; i++) {
d_iss[0] += pw * (d_css[i]);
pw *= d;
}
//d_iss[0] %= q;
//printf("first item : %d\n ", d_iss[0]);
//printf(" The hashes for the subtext %d", threadIdx.x );
for (i = 1; i < d_len - subpattern_length + 1; i++)
{
d_iss[i] = ((d_css[i + subpattern_length - 1]) * p + (d_iss[i - 1] - (d_css[i - 1])) / d); //% q;
//printf("(d_css[i + subpattern_length - 1]) : %c\n ",(d_css[i + subpattern_length - 1]) );
//printf("(d_iss[i - 1] - (d_css[i - 1])): %d \n",(d_iss[i - 1] - (d_css[i - 1])) );
//printf("index: %d, value: %d \n ",i,d_iss[i]);
}
}
__global__ void findSubpatternHashes( char *d_cpatterns, int subpattern_length, int *d_ipatterns, int d )
{
int pw = 1;
int p0=0;
int index = threadIdx.x;
for (int i=0; i < subpattern_length; i++)
{
p0 += pw * (d_cpatterns[i + index * subpattern_length]);
pw *= d;
}
d_ipatterns[index] = p0;
printf("\nThe hash of the subpattern %d is %d\n", index, p0 );
}
__global__ void seekPattern(char *d_css, int d_len, int *d_iss, int subpattern_length, char* d_cpatterns, int* d_ipatterns, int d, int* d_matches, char *d_pattern, int pattern_length)
{
int i = 0;
int j=0;
int k = 0;
int index = blockIdx.x * blockDim.x + threadIdx.x;
//printf("blockId: %d, blockDim: %d, threadId: %d, index: %d\n",blockIdx.x,blockDim.x,threadIdx.x,index);
int d_iss_len = d_len - subpattern_length + 1;
//printf("d_iss_len : %d\n",d_iss_len);
int ind = d_len * threadIdx.x;
//pointing the first element of every row of d_iss
int d_iss_index = d_iss_len * threadIdx.x;
d_iss += d_iss_index;
//d_css += ind;
for (i = 0; i < d_iss_len; i++)
{
if (d_iss[i] == d_ipatterns[blockIdx.x])
{
printf("pattern hash %d, text hash %d, block id %d, i = %d, thread id: %d\n",d_ipatterns[blockIdx.x],d_iss[i],blockIdx.x,i,threadIdx.x);
int pos = threadIdx.x * (d_len - subpattern_length + 1) + i;
printf("pos of subpattern %d is :%d\n", blockIdx.x, pos);
if( blockIdx.x == 0 )
{
d_matches[k] = pos; // use a stack instead
k++;
//Trying the final matching here
// Todo: make another function/class
for (j = 0; j < pattern_length; j++)
{
printf(" pattern char: %c, text char: %c, pattern pos: %d, text pos: %d\n",d_pattern[j],d_css[d_len * threadIdx.x + i+j],j,d_len * threadIdx.x + i +j);
if (d_pattern[j] != d_css[d_len * threadIdx.x + i + j])
{
break;
}
else if (j == pattern_length - 1)
{
//printf("here!\n");
// printf("ThreadId: %d\n", threadIdx.x);
printf("position of the pattern is :%d\n", pos);
//printf("pos for :id\n", threadIdx.x*(d_len)+i-subpattern_length+1);
}
}
}
/*for (j = 0; j < pattern_length; j++)
{
if (d_cpatterns[subpattern_length * blockIdx.x + j] != d_css[i + j])
{
break;
} else if (j == subpattern_length - 1)
{
// printf("ThreadId: %d\n", threadIdx.x);
printf("pos of subpattern %d is :%d\n", blockIdx.x, threadIdx.x + i + g - 1);
//printf("pos for :id\n", threadIdx.x*(d_len)+i-subpattern_length+1);
}
}*/
}
}
}
int main(int argc, char *argv[])
{
int i = 0;
int j = 0;
//char str[] = "ACTTATATACCCCCCCTATTATATACCCCCCCTATTATATACCCCCGGAGC";
//char pattern[] = "TATTATATACCCCCCC";
char str[] = "ABCDEFGDFSDDEABCGFGXCVMSG";
char pattern[] = "DEFG";
int d = 3;
//int q = 50000;
int num_cores = 8;
int subpattern_length = 4;
printf("the text is %s\n",str);
int str_length = strlen(str);
printf("Length of the text : %d\n",str_length);
//int nElem=str_length;
int pattern_length = strlen(pattern);
printf("Length of the pattern : %d\n",pattern_length);
int wrap = pattern_length / subpattern_length;
printf("wraps : %d\n",wrap);
//Division of text according to the subpattern
int g = ( str_length - subpattern_length + 1 ) / num_cores;
printf("value of g : %d\n",g);
int padding_len = subpattern_length - 1;
int el_chunk_len = g + padding_len;
printf(" text chunk length: %d\n",el_chunk_len);
// for host
//holds the text chuncks
char css[num_cores][el_chunk_len];
int iss[num_cores][el_chunk_len];
//matrix for the subpatterns
char cpatterns[wrap][subpattern_length];
int ipatterns[wrap][subpattern_length]; //for hash values
int matches[wrap][subpattern_length]; //holds the potential matches for each of the subpatterns
printf("The subpatterns are: \n");
for(int i=0; i<wrap; i++)
{
for(int j=0; j < subpattern_length; j++)
{
cpatterns[i][j] = pattern[subpattern_length*i + j];
printf("%c",cpatterns[i][j]);
}
printf("\n");
}
//on the device
char *d_css;
char *d_pattern;
char *d_cpatterns;
int *d_matches; //holds the potential matches for each of the subpatterns
//hashes on the device
int *d_iss;
int *d_ipatterns;
int nchars = num_cores * el_chunk_len;
int mchars = wrap * subpattern_length;
//memory allocation
cudaMalloc((char **)&d_css, nchars * sizeof(char));
cudaMalloc((int **)&d_iss, nchars * sizeof(int));
cudaMalloc((char **)&d_cpatterns, mchars * sizeof(char));
cudaMalloc((int **)&d_ipatterns, mchars * sizeof(char));
cudaMalloc((int **)&d_matches, mchars * sizeof(int));
cudaMalloc((char **)&d_pattern, pattern_length*sizeof(char));
//Building up the matrix to hold the text's chunks
//Filling the exculsive characters
for (int i=0; i < num_cores; i++)
{
for( j = 0; j < g; j++)
{
css[i][j] = str[ i * g + j ];
}
}
//Filling the overlapping characters
for (int i = 0; i < num_cores; i++)
{
int k = 0;
for (int j = g ; j < el_chunk_len; j++ )
{
css[i][j] = str[ ((i+1)*g) + k ];
k++;
}
}
printf(" The subtexts are: \n");
for ( int i = 0; i < num_cores; i++ )
{
for( int j = 0; j < el_chunk_len; j++ )
{
printf("%c",css[i][j]);
}
printf("\n");
}
//transfer css to device
cudaMemcpy(d_css, css, nchars, cudaMemcpyHostToDevice);
cudaMemcpy(d_iss, iss, nchars, cudaMemcpyHostToDevice);
cudaMemcpy(d_cpatterns, cpatterns, mchars, cudaMemcpyHostToDevice);
cudaMemcpy(d_ipatterns, ipatterns, mchars, cudaMemcpyHostToDevice);
cudaMemcpy(d_matches, matches, mchars, cudaMemcpyHostToDevice);
cudaMemcpy(d_pattern, pattern, pattern_length, cudaMemcpyHostToDevice);
dim3 block(num_cores); //str_length/pattern_length
//__global__ void findHashes(char *d_css, int d_len, int *d_iss, int pattern_length, int d, int q, int p)
int p = pow(d, subpattern_length - 1);
//initialising 1 block, with 8 threads each
printf("The text hashes are \n");
findHashes <<< 1, num_cores >>> (d_css, el_chunk_len, d_iss, subpattern_length, d, /*q,*/ p);
cudaMemcpy(iss,d_iss,num_cores * (el_chunk_len - subpattern_length + 1), cudaMemcpyDeviceToHost);
/*for( int i =0; i < num_cores; i++)
{
for( int j=0; j < el_chunk_len - subpattern_length + 1; j++)
{
printf("%d ",iss[i][j]);
}
printf("\n");
}*/
findSubpatternHashes <<< 1, wrap >>> (d_cpatterns, subpattern_length, d_ipatterns, d );
//find the hash of the pattern
int pw = 1;
int patternHash=0;
for (i=0; i < pattern_length; i++) {
patternHash += pw * (pattern[i]);
pw *= d;
}
printf("The hash of the pattern is %d\n", patternHash);
//finding hashes of the subpatterns sequenctially
/* printf("hashes calculated sequentially\n");
for(int i = 0; i < wrap; i++)
{
int pw = 1;
int p0 = 0;
for( int j = 0; j<subpattern_length; j++)
{
p0 += pw * cpatterns[i][j];
pw *= d;
}
printf("%d\n",p0);
}*/
seekPattern<<<wrap, num_cores>>>(d_css, el_chunk_len, d_iss, subpattern_length, d_cpatterns, d_ipatterns, d, d_matches, d_pattern, pattern_length);
//printf("%d %d %d %d %d \n", el_chunk_len, pattern_length, d, q, p);
//cudaMemcpy(iss, d_iss, nchars * sizeof(int), cudaMemcpyDeviceToHost);
/*for (i=0;i<num_cores;i++)
{
for (j=0;j<el_chunk_len;j++)
printf("%d ", iss[i][j]);
printf("\n");
}
*/
cudaFree(d_iss);
cudaFree(d_css);
}
|
22,959 | __global__ void _InitI(int *res,int val,int rows,int columns){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < rows && y < columns){
int pos = x*columns + y;
res[pos] = val;
}
}
__global__ void _AddI(int *res,int *arr,int arrRows,int arrColumns,int rows,int columns){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < rows && y < columns){
int pos = x*columns + y;
int _x = x % arrRows;
int _y = y % arrColumns;
int _pos = _x * arrColumns + _y;
res[pos] += arr[_pos];
}
}
__global__ void _LAddI(int *res,int *arr,int lamda,int arrRows,int arrColumns,int rows,int columns){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < rows && y < columns){
int pos = x*columns + y;
int _x = x % arrRows;
int _y = y % arrColumns;
int _pos = _x * arrColumns + _y;
res[pos] += lamda*arr[_pos];
}
}
__global__ void _MulI(int *res,int *a,int *b,int cDim,int rows,int columns){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < rows && y < columns){
int sum = 0;
for (int i=0; i<cDim; i++){
sum += a[x*cDim + i] * b[i*cDim + y];
}
res[x*columns + y] = sum;
}
}
__global__ void _InitD(double *res,double val,int rows,int columns){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//
if (x < rows && y < columns){
int pos = x*columns + y;
res[pos] = val;
}
}
__global__ void _AddD(double *res,double *arr,int arrRows,int arrColumns,int rows,int columns){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//
if (x < rows && y < columns){
int pos = x*columns + y;
int _x = x % arrRows;
int _y = y % arrColumns;
int _pos = _x * arrColumns + _y;
res[pos] += arr[_pos];
}
}
__global__ void _2AddD(double *res,double *a,double *b,int bRows,int bColumns,int rows,int columns){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//
if (x < rows && y < columns){
int pos = x*columns + y;
int _x = x % bRows;
int _y = y % bColumns;
int _pos = _x * bColumns + _y;
res[pos] = a[pos] + b[_pos];
}
}
__global__ void _LAddD(double *res,double *arr,double lamda,int arrRows,int arrColumns,int rows,int columns){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//
if (x < rows && y < columns){
int pos = x*columns + y;
int _x = x % arrRows;
int _y = y % arrColumns;
int _pos = _x * arrColumns + _y;
res[pos] += lamda*arr[_pos];
}
}
__global__ void _MulD(double *res,double *a,double *b,int cDim,int rows,int columns){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//
if (x < rows && y < columns){
double sum = 0.0;
for (int i=0; i<cDim; i++){
sum += a[x*cDim + i] * b[i*columns + y];
}
res[x*columns + y] = sum;
}
}
__global__ void _SMulD(double *res,double val,int rows,int columns){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//
if (x < rows && y < columns){
int pos = x * columns + y;
res[pos] = res[pos]*val;
}
}
__global__ void _2SMulD(double *res,double *a,double val,int rows,int columns){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//
if (x < rows && y < columns){
int pos = x * columns + y;
res[pos] = a[pos]*val;
}
}
__global__ void _DivD(double *res,double val,int rows,int columns){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//
if (x < rows && y < columns){
int pos = x * columns + y;
res[pos] = res[pos]/val;
}
}
__global__ void _TransposeD(double *res,double *a,int rows,int columns){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//
if (x < rows && y < columns){
int pos1 = x * columns + y;
int pos2 = y * rows + x;
res[pos1] = a[pos2];
}
}
__global__ void _GetDiagonalD(double *res,int rows,int columns){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//
if (x < rows && y < columns){
if (x != y){
res[x*columns + y] = 0.0;
}
}
}
__global__ void _DotD(double *res,double *a,double *b,int bRows,int bColumns,int rows,int columns){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//
if (x < rows && y < columns){
int pos = x * columns + y;
int _x = x % bRows;
int _y = y % bColumns;
int _pos = _x * bColumns + _y;
res[pos] = a[pos] * b[_pos];
}
}
__global__ void _ColAddD(double *res,int col,double val,int rows,int columns){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//
if (x < rows && y < columns){
int pos = x*columns + y;
if (y == col){
res[pos] += val;
}
}
}
|
22,960 | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void cudahello(){
int thread = threadIdx.x;
int block = blockIdx.x;
printf("Hola Mundo! Soy el hilo %d del bloque %d\n", thread, block);
}
int main(){
cudahello<<<4,4>>>();
cudaDeviceSynchronize();
}
|
22,961 | #include "device_launch_parameters.h"
#include <stdio.h>
#define arraySize 5
#define threadPerBlock 5
// Kernel Function for Rank sort
__global__ void addKernel(int *d_a, int *d_b) {
int count = 0;
int tid = threadIdx.x;
int ttid = blockIdx.x * threadPerBlock + tid;
int val = d_a[ttid];
__shared__ int cache[threadPerBlock];
for (int i = tid; i < arraySize; i += threadPerBlock) {
cache[tid] = d_a[i];
__syncthreads();
for (int j = 0; j < threadPerBlock; ++j)
if (val > cache[j])
count++;
__syncthreads();
}
d_b[count] = val;
}
int main() {
// Define Host and Device Array
int h_a[arraySize] = {5, 9, 3, 4, 8};
int h_b[arraySize];
int *d_a, *d_b;
// Allocate Memory on the device
cudaMalloc((void **)&d_b, arraySize * sizeof(int));
cudaMalloc((void **)&d_a, arraySize * sizeof(int));
// Copy input vector from host memory to device memory.
cudaMemcpy(d_a, h_a, arraySize * sizeof(int), cudaMemcpyHostToDevice);
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<arraySize / threadPerBlock, threadPerBlock>>>(d_a, d_b);
// Wait for device to finish operations
cudaDeviceSynchronize();
// Copy output vector from GPU buffer to host memory.
cudaMemcpy(h_b, d_b, arraySize * sizeof(int), cudaMemcpyDeviceToHost);
printf("The Enumeration sorted Array is: \n");
for (int i = 0; i < arraySize; i++) {
printf("%d\n", h_b[i]);
}
// Free up device memory
cudaFree(d_a);
cudaFree(d_b);
return 0;
} |
22,962 | void
init_bounds(int size, double *u) {
int i;
for (i = 0; i < size; i++) {
u[i * size + 0] = 0;
u[0 * size + i] = 20;
u[i * size + (size - 1)] = 20;
u[(size - 1) * size + i] = 20;
}
}
void init_interior(int size, double *u, double guess) {
int i, j;
for (i = 1; i < size - 1; i++) {
for (j = 1; j < size - 1; j++) {
u[i * size + j] = guess;
}
}
}
void
init_f(int N, double *f) {
int i,j, i_min, i_max, j_min, j_max;
i_min = (N + 1) / 2. * (0 + 1);
i_max = (N + 1) / 2. * (1. / 3 + 1);
j_max = (N + 1) / 2. * ( - 1. / 3 + 1);
j_min = (N + 1) / 2. * (- 2. / 3 + 1);
for (i = i_min; i <= i_max; i++) {
for (j = j_min; j <= j_max; j++) {
f[i * (N + 2) + j] = 200.;
}
}
}
void
init_u(int size, double *u, double guess) {
init_bounds(size, u);
init_interior(size, u, guess);
}
|
22,963 |
/* Fold each FFT chunk separately.
* pol0, pol1 are input baseband data
* Only works for 4 pol output.
* Call with grid dims (nffts, nbins/BINS_PER_BLOCK)
* All shared blocks need to fit into shared mem (16kB)
*/
#define BINS_PER_BLOCK 64
#define NTHREAD_FOLD BINS_PER_BLOCK
__global__ void fold_fft_blocks(const float2 *pol0, const float2 *pol1,
const double *phase, const double *step,
int fftlen, int overlap, int nbin,
float4 *foldtmp, unsigned *foldtmp_c) {
// Size params
const int ifft = blockIdx.x;
const int ibin = blockIdx.y;
const int tid = threadIdx.x; // Thread index within the block
const int nvalid = fftlen - overlap;
// Pointers to start of valid data in global mem
const float2 *ptr0 = pol0 + ifft*fftlen + overlap/2;
const float2 *ptr1 = pol1 + ifft*fftlen + overlap/2;
// Fold info
const double bin0 = phase[ifft];
const double bin_samp = step[ifft]; // bins/sample
const double samp_bin = 1.0/bin_samp; // samples/bin
const int bin_lo = ibin*BINS_PER_BLOCK + tid; // assumes 1 thread/bin
const int nturn = ((double)nvalid*bin_samp)/(double)nbin + 2; // GD : Change +1 into +2
// Fold results for this thread
float4 folddata = make_float4(0,0,0,0);
unsigned foldcount = 0;
// Loop over number of pulse periods in data block
for (int iturn=0; iturn<nturn; iturn++) {
// Determine range of samples needed for this bin, turn
int samp0 = samp_bin*((double)bin_lo-bin0+(double)iturn*nbin)+0.5;
int samp1 = samp_bin*((double)bin_lo-bin0+(double)iturn*nbin+1)+0.5;
// Range checks
if (samp0<0) { samp0=0; }
if (samp1<0) { samp1=0; }
if (samp0>nvalid) { samp0=nvalid; }
if (samp1>nvalid) { samp1=nvalid; }
// Read in and add samples
for (int isamp=samp0; isamp<samp1; isamp++) {
float2 p0 = ptr0[isamp];
float2 p1 = ptr1[isamp];
folddata.x += p0.x*p0.x + p0.y*p0.y;
folddata.y += p1.x*p1.x + p1.y*p1.y;
folddata.z += p0.x*p1.x + p0.y*p1.y;
folddata.w += p0.x*p1.y - p0.y*p1.x;
foldcount++;
}
}
// Copy results into global mem
const unsigned prof_offset = ifft * nbin;
foldtmp[prof_offset + bin_lo].x = folddata.x;
foldtmp[prof_offset + bin_lo].y = folddata.y;
foldtmp[prof_offset + bin_lo].z = folddata.z;
foldtmp[prof_offset + bin_lo].w = folddata.w;
foldtmp_c[prof_offset + bin_lo] = foldcount;
}
|
22,964 | /* ==================================================================
Programmer: Yicheng Tu (ytu@cse.usf.edu)
The basic SDH algorithm implementation for 3D data
To compile: nvcc SDH.c -o SDH in the C4 lab machines
==================================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <cuda.h>
#define BOX_SIZE 23000 /* size of the data box on one dimension */
/* descriptors for single atom in the tree */
typedef struct atomdesc {
double x_pos;
double y_pos;
double z_pos;
} atom;
typedef struct hist_entry{
//float min;
//float max;
long long d_cnt; /* need a long long type as the count might be huge */
} bucket;
bucket * histogram; /* list of all buckets in the histogram */
bucket * h_gpu_histogram;
bucket * d_gpu_histogram;
bucket * diff_histogram;
long long PDH_acnt; /* total number of data points */
int num_buckets; /* total number of buckets in the histogram */
double PDH_res; /* value of w */
atom * atom_list; /* list of all data points */
atom * d_atom_list;
/* These are for an old way of tracking time */
struct timezone Idunno;
struct timeval startTime, endTime;
/*
distance of two points in the atom_list
*/
double p2p_distance(int ind1, int ind2) {
double x1 = atom_list[ind1].x_pos;
double x2 = atom_list[ind2].x_pos;
double y1 = atom_list[ind1].y_pos;
double y2 = atom_list[ind2].y_pos;
double z1 = atom_list[ind1].z_pos;
double z2 = atom_list[ind2].z_pos;
return sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
}
//get cuda error
// void cuda_err(cudaError_t e, char in[])
// {
// if (e != cudaSuccess){
// printf("CUDA Error: %s, %s \n", in, cudaGetErrorString(e));
// exit(EXIT_FAILURE);
// }
// }
/*
brute-force SDH solution in a single CPU thread
*/
int PDH_baseline() {
int i, j, h_pos;
double dist;
for(i = 0; i < PDH_acnt; i++) {
for(j = i+1; j < PDH_acnt; j++) {
dist = p2p_distance(i,j);
h_pos = (int) (dist / PDH_res);
histogram[h_pos].d_cnt++;
}
}
return 0;
}
/*
SDH kernel - a really crappy one
*/
__global__ void PDH_kernel(bucket* d_histogram, atom* d_atom_list, long long acnt, double res)
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
int j, h_pos;
double dist;
double x1;
double x2;
double y1;
double y2;
double z1;
double z2;
if(id < acnt)
for(j = id+1; j < acnt; j++)
{
x1 = d_atom_list[id].x_pos;
x2 = d_atom_list[j].x_pos;
y1 = d_atom_list[id].y_pos;
y2 = d_atom_list[j].y_pos;
z1 = d_atom_list[id].z_pos;
z2 = d_atom_list[j].z_pos;
dist = sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
h_pos = (int) (dist / res);
// __syncthreads();
// d_histogram[h_pos].d_cnt += 1; //very odd that this doesnt work but atomicAdd does... I wonder why
//long story short... it's basically because having synchronized threads
//at this point doesnt prevent race condiions on the increment itself
// __syncthreads();
atomicAdd((unsigned long long int*)&d_histogram[h_pos].d_cnt,1);
}
}
/*
SDH kernel 2.0 - now the goal is to make a better kernel, that is at least 10x faster (at first)
*/
__global__ void PDH_kernel2(bucket* d_histogram, atom* d_atom_list, long long acnt, double res)
{
}
/*
set a checkpoint and show the (natural) running time in seconds
*/
double report_running_time() {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for CPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
double report_running_time_GPU() {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for GPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
/*
print the counts in all buckets of the histogram
*/
void output_histogram(bucket* histogram){
int i;
long long total_cnt = 0;
for(i=0; i< num_buckets; i++) {
if(i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", histogram[i].d_cnt);
total_cnt += histogram[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if(i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
int main(int argc, char **argv)
{
int i;
PDH_acnt = atoi(argv[1]);
PDH_res = atof(argv[2]);
//printf("args are %d and %f\n", PDH_acnt, PDH_res);
num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1;
histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
atom_list = (atom *)malloc(sizeof(atom)*PDH_acnt);
srand(1);
/* generate data following a uniform distribution */
for(i = 0; i < PDH_acnt; i++) {
atom_list[i].x_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].y_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].z_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
}
/* start counting time */
printf("Starting CPU...\n");
gettimeofday(&startTime, &Idunno);
/* call CPU single thread version to compute the histogram */
PDH_baseline();
/* check the total running time */
report_running_time();
/* print out the histogram */
output_histogram(histogram);
printf("Starting GPU...\n");
//cudaDeviceReset();
//gpu code--------------------------------------------------------------------------------
h_gpu_histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
//copy the atomlist over from host to device
cudaMalloc((void**)&d_atom_list, sizeof(atom)*PDH_acnt);
cudaMemcpy(d_atom_list, atom_list, sizeof(atom)*PDH_acnt, cudaMemcpyHostToDevice);
//allocate the histogram data on the device
cudaMalloc((void**)&d_gpu_histogram, sizeof(bucket)*num_buckets);
cudaMemcpy(d_gpu_histogram, h_gpu_histogram, sizeof(bucket)*num_buckets,cudaMemcpyHostToDevice);
//start the timer
gettimeofday(&startTime, &Idunno);
//run the kernel
PDH_kernel<<<ceil(PDH_acnt/256.0), 256>>>(d_gpu_histogram, d_atom_list, PDH_acnt, PDH_res);
//copy the histogram results back from gpu over to cpu
cudaMemcpy(h_gpu_histogram, d_gpu_histogram, sizeof(bucket)*num_buckets, cudaMemcpyDeviceToHost);
//check total running time
report_running_time_GPU();
//print out the resulting histogram from the GPU
output_histogram(h_gpu_histogram);
//difference calculation--------------------------------------------------------------------------------
printf("Difference: \n");
diff_histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
int bi;
for(bi = 0; bi < num_buckets; bi++)
{
diff_histogram[bi].d_cnt = histogram[bi].d_cnt - h_gpu_histogram[bi].d_cnt;
}
output_histogram(diff_histogram);
cudaFree(d_gpu_histogram);
cudaFree(d_atom_list);
free(histogram);
free(atom_list);
free(h_gpu_histogram);
free(diff_histogram);
return 0;
}
|
22,965 | //
// CasAES128_CUDA.c
// CasAES128_CUDA
// Created by Carter McCardwell on 11/11/14.
//
#include <stdint.h>
#include <stdio.h>
#include <time.h>
#include <string.h>
#include <cuda_runtime.h>
struct timing_pair{
long time;
long times[4];
char cipher[16];
char hits[10][4][4];
//long memory_usage;
//int ctx_delta;
//unsigned long tp_delta;
//unsigned short cp_delta;
};
const int Nb_h = 4;
const int Nr_h = 10;
const int Nk_h = 4;
const uint8_t s_h[256]=
{
0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16
};
uint8_t Rcon_h[256] = {
0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a,
0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39,
0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a,
0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8,
0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef,
0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc,
0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b,
0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3,
0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94,
0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20,
0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35,
0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f,
0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04,
0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63,
0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd,
0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d
};
__constant__ uint8_t s[256];
__constant__ int Nb;
__constant__ int Nr;
__constant__ int Nk;
__constant__ uint32_t ek[44];
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void cudaDevAssist(cudaError_t code, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"cudaDevAssistant: %s %d\n", cudaGetErrorString(code), line);
if (abort) exit(code);
}
}
uint32_t sw(uint32_t word)
{
union {
uint32_t word;
uint8_t bytes[4];
} subWord __attribute__ ((aligned));
subWord.word = word;
subWord.bytes[3] = s_h[subWord.bytes[3]];
subWord.bytes[2] = s_h[subWord.bytes[2]];
subWord.bytes[1] = s_h[subWord.bytes[1]];
subWord.bytes[0] = s_h[subWord.bytes[0]];
return subWord.word;
}
__device__ void sb(uint8_t* in)
{
for (int i = 0; i < 16; i++) { in[i] = s[in[i]]; }
}
__device__ void mc(uint8_t* arr)
{
for (int i = 0; i < 4; i++)
{
uint8_t a[4];
uint8_t b[4];
uint8_t c;
uint8_t h;
for(c=0;c<4;c++) {
a[c] = arr[(4*c+i)];
h = (uint8_t)((signed char)arr[(4*c+i)] >> 7);
b[c] = arr[(4*c+i)] << 1;
b[c] ^= 0x1B & h;
}
arr[(i)] = b[0] ^ a[3] ^ a[2] ^ b[1] ^ a[1];
arr[(4+i)] = b[1] ^ a[0] ^ a[3] ^ b[2] ^ a[2];
arr[(8+i)] = b[2] ^ a[1] ^ a[0] ^ b[3] ^ a[3];
arr[(12+i)] = b[3] ^ a[2] ^ a[1] ^ b[0] ^ a[0];
}
}
__device__ void sr(uint8_t* arr)
{
uint8_t out[16];
//On per-row basis (+1 shift ea row)
//Row 1
out[0] = arr[0];
out[1] = arr[1];
out[2] = arr[2];
out[3] = arr[3];
//Row 2
out[4] = arr[5];
out[5] = arr[6];
out[6] = arr[7];
out[7] = arr[4];
//Row 3
out[8] = arr[10];
out[9] = arr[11];
out[10] = arr[8];
out[11] = arr[9];
//Row 4
out[12] = arr[15];
out[13] = arr[12];
out[14] = arr[13];
out[15] = arr[14];
for (int i = 0; i < 16; i++)
{
arr[i] = out[i];
}
}
uint32_t rw(uint32_t word)
{
union {
uint8_t bytes[4];
uint32_t word;
} subWord __attribute__ ((aligned));
subWord.word = word;
uint8_t B0 = subWord.bytes[3], B1 = subWord.bytes[2], B2 = subWord.bytes[1], B3 = subWord.bytes[0];
subWord.bytes[3] = B1; //0
subWord.bytes[2] = B2; //1
subWord.bytes[1] = B3; //2
subWord.bytes[0] = B0; //3
return subWord.word;
}
void K_Exp(uint8_t* pk, uint32_t* out)
{
int i = 0;
union {
uint8_t bytes[4];
uint32_t word;
} temp __attribute__ ((aligned));
union {
uint8_t bytes[4];
uint32_t word;
} univar[44] __attribute__ ((aligned));
for (i = 0; i < Nk_h; i++)
{
univar[i].bytes[3] = pk[i*4];
univar[i].bytes[2] = pk[i*4+1];
univar[i].bytes[1] = pk[i*4+2];
univar[i].bytes[0] = pk[i*4+3];
}
for (i = Nk_h; i < Nb_h*(Nr_h+1); i++)
{
temp.word = univar[i-1].word;
if (i % Nk_h == 0)
{
temp.word = (sw(rw(temp.word)));
temp.bytes[3] = temp.bytes[3] ^ (Rcon_h[i/Nk_h]);
}
else if (Nk_h > 6 && i % Nk_h == 4)
{
temp.word = sw(temp.word);
}
if (i-4 % Nk_h == 0)
{
temp.word = sw(temp.word);
}
univar[i].word = univar[i-Nk_h].word ^ temp.word;
}
for (i = 0; i < 44; i++)
{
out[i] = univar[i].word;
}
}
__device__ void ark(uint8_t* state, int strD)
{
union {
uint32_t word;
uint8_t bytes[4];
} zero __attribute__ ((aligned));
union {
uint32_t word;
uint8_t bytes[4];
} one __attribute__ ((aligned));
union {
uint32_t word;
uint8_t bytes[4];
} two __attribute__ ((aligned));
union {
uint32_t word;
uint8_t bytes[4];
} three __attribute__ ((aligned));
zero.word = ek[strD];
one.word = ek[strD+1];
two.word = ek[strD+2];
three.word = ek[strD+3];
state[0] = state[0] ^ zero.bytes[3];
state[4] = state[4] ^ zero.bytes[2];
state[8] = state[8] ^ zero.bytes[1];
state[12] = state[12] ^ zero.bytes[0];
state[1] = state[1] ^ one.bytes[3];
state[5] = state[5] ^ one.bytes[2];
state[9] = state[9] ^ one.bytes[1];
state[13] = state[13] ^ one.bytes[0];
state[2] = state[2] ^ two.bytes[3];
state[6] = state[6] ^ two.bytes[2];
state[10] = state[10] ^ two.bytes[1];
state[14] = state[14] ^ two.bytes[0];
state[3] = state[3] ^ three.bytes[3];
state[7] = state[7] ^ three.bytes[2];
state[11] = state[11] ^ three.bytes[1];
state[15] = state[15] ^ three.bytes[0];
}
__global__ void cudaRunner(uint8_t *in)
{
uint8_t state[16];
int localid = blockDim.x * blockIdx.x + threadIdx.x; //Data is shifted by 16 * ID of worker
for (int i = 0; i < 16; i++) { state[i] = in[(localid*16)+i]; }
ark(state, 0);
for (int i = 1; i < Nr; i++)
{
sb(state);
sr(state);
mc(state);
ark(state, i*Nb);
}
sb(state);
sr(state);
ark(state, Nr*Nb);
for (int i = 0; i < 16; i++) { in[(localid*16)+i] = state[i]; }
}
long AES_CUDA(char* in_cypher, char* cypher, char* in_key)
{
clock_t c_start, c_stop;
c_start = clock();
uint8_t key[16];
uint32_t ek_h[44];
uint8_t states[16] = { 0x00 };
for (int i = 0; i < 16; i++)
{
states[i] = in_cypher[i];
key[i] = in_key[i];
}
K_Exp(key, ek_h);
//send constants to GPU
cudaSetDevice(0);
cudaDevAssist(cudaMemcpyToSymbol(Nk, &Nk_h, sizeof(int), 0, cudaMemcpyHostToDevice), 535, true);
cudaDevAssist(cudaMemcpyToSymbol(Nr, &Nr_h, sizeof(int), 0, cudaMemcpyHostToDevice), 543, true);
cudaDevAssist(cudaMemcpyToSymbol(Nb, &Nb_h, sizeof(int), 0, cudaMemcpyHostToDevice), 903, true);
cudaDevAssist(cudaMemcpyToSymbol(s, &s_h, 256*sizeof(uint8_t), 0, cudaMemcpyHostToDevice), 920, true);
cudaDevAssist(cudaMemcpyToSymbol(ek, &ek_h, 44*sizeof(uint32_t), 0, cudaMemcpyHostToDevice), 823, true);
cudaThreadSynchronize();
uint8_t *devState = NULL;
cudaDevAssist(cudaMalloc((void**)&devState, 16*sizeof(uint8_t)), 425, true);
//arrange data correctly
uint8_t temp[16];
memcpy(&temp[0], &states[0], sizeof(uint8_t));
memcpy(&temp[4], &states[1], sizeof(uint8_t));
memcpy(&temp[8], &states[2], sizeof(uint8_t));
memcpy(&temp[12], &states[3], sizeof(uint8_t));
memcpy(&temp[1], &states[4], sizeof(uint8_t));
memcpy(&temp[5], &states[5], sizeof(uint8_t));
memcpy(&temp[9], &states[6], sizeof(uint8_t));
memcpy(&temp[13], &states[7], sizeof(uint8_t));
memcpy(&temp[2], &states[8], sizeof(uint8_t));
memcpy(&temp[6], &states[9], sizeof(uint8_t));
memcpy(&temp[10], &states[10], sizeof(uint8_t));
memcpy(&temp[14], &states[11], sizeof(uint8_t));
memcpy(&temp[3], &states[12], sizeof(uint8_t));
memcpy(&temp[7], &states[13], sizeof(uint8_t));
memcpy(&temp[11], &states[14], sizeof(uint8_t));
memcpy(&temp[15], &states[15], sizeof(uint8_t));
for (int c = 0; c < 16; c++) { memcpy(&states[c], &temp[c], sizeof(uint8_t)); }
cudaDevAssist(cudaMemcpy(devState, states, 16*sizeof(uint8_t), cudaMemcpyHostToDevice), 426, true);
cudaDevAssist(cudaDeviceSynchronize(), 268, true);
cudaRunner<<<1,1>>>(devState);
cudaDevAssist(cudaDeviceSynchronize(), 270, true);
cudaDevAssist(cudaMemcpy(states, devState, 16*sizeof(uint8_t), cudaMemcpyDeviceToHost), 431, true);
memcpy(&cypher[0], &states[0], sizeof(uint8_t));
memcpy(&cypher[4], &states[1], sizeof(uint8_t));
memcpy(&cypher[8], &states[2], sizeof(uint8_t));
memcpy(&cypher[12], &states[3], sizeof(uint8_t));
memcpy(&cypher[1], &states[4], sizeof(uint8_t));
memcpy(&cypher[5], &states[5], sizeof(uint8_t));
memcpy(&cypher[9], &states[6], sizeof(uint8_t));
memcpy(&cypher[13], &states[7], sizeof(uint8_t));
memcpy(&cypher[2], &states[8], sizeof(uint8_t));
memcpy(&cypher[6], &states[9], sizeof(uint8_t));
memcpy(&cypher[10], &states[10], sizeof(uint8_t));
memcpy(&cypher[14], &states[11], sizeof(uint8_t));
memcpy(&cypher[3], &states[12], sizeof(uint8_t));
memcpy(&cypher[7], &states[13], sizeof(uint8_t));
memcpy(&cypher[11], &states[14], sizeof(uint8_t));
memcpy(&cypher[15], &states[15], sizeof(uint8_t));
c_stop = clock();
float diff = (((float)c_stop - (float)c_start) / CLOCKS_PER_SEC ) * 1000;
cudaFree(devState);
cudaDeviceReset();
return diff;
}
|
22,966 | #include "includes.h"
__global__ void cudaSNearestNeighborKernel(const float* input, size_t inputSizeX, size_t inputSizeY, float* output, size_t outputSizeX, size_t outputSizeY, size_t nbChannels, size_t batchSize)
{
const size_t inputOffset = (blockIdx.z*blockDim.z + threadIdx.z) * (nbChannels*inputSizeY*inputSizeX);
const size_t outputOffset = (blockIdx.z*blockDim.z + threadIdx.z) * (nbChannels*outputSizeY*outputSizeX);
const float multy = ((float) inputSizeY)/((float) outputSizeY);
const float multx = ((float) inputSizeX)/((float) outputSizeX);
for(size_t channel = blockIdx.x; channel < nbChannels; channel += gridDim.x) {
for(size_t oy = threadIdx.y; oy < outputSizeY; oy += blockDim.y) {
for(size_t ox = threadIdx.x; ox < outputSizeX; ox += blockDim.x) {
const size_t iy = (size_t) oy*multy;
const size_t ix = (size_t) ox*multx;
output[outputOffset +
channel*outputSizeY*outputSizeX +
oy*outputSizeX +
ox] = input[inputOffset +
channel*inputSizeY*inputSizeX +
iy*inputSizeX +
ix];
}
}
}
} |
22,967 | #include "includes.h"
__global__ void scatter(unsigned int *in,unsigned int *in_pos, unsigned int *out, unsigned int *out_pos, unsigned int n, unsigned int *d_histScan, unsigned int mask, unsigned int current_bits, unsigned int nBins)
{
extern __shared__ unsigned int min_Idx[];
for(int j = threadIdx.x; j < nBins; j += blockDim.x)
min_Idx[j] = n;
__syncthreads();
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
unsigned int bin = (in[i] >> current_bits) & mask;
atomicMin(&min_Idx[bin], i);
}
__syncthreads();
if(i < n)
{
unsigned int bin = (in[i] >> current_bits) & mask;
out[d_histScan[blockIdx.x + bin*gridDim.x] + i - min_Idx[bin]] = in[i];
out_pos[d_histScan[blockIdx.x + bin*gridDim.x] + i - min_Idx[bin]] = in_pos[i];
}
} |
22,968 | /*
struct Trim
{
Trim(t3<const int> n)
: left(n.y*n.z, n.x),
right(n.y*n.z, 0),
edgeCases((n.x-1)*n.y*n.z),
iter_helper(edgeCases.begin(), n.y*n.z, n.x-1)
{}
thrust::device_vector<int> left;
thrust::device_vector<int> right;
thrust::device_vector<uchar> edgeCases;
iter_access_helper<uchar> iter_helper;
using reference_type =
thrust::tuple<
int,
int,
typename thrust::device_vector<uchar>::iterator>;
using iterator_tuple =
thrust::tuple<
typename thrust::device_vector<int>::iterator,
typename thrust::device_vector<int>::iterator,
typename iter_access_helper<uchar>::iterator>;
using iterator = thrust::zip_iterator<iterator_tuple>;
iterator begin()
{
return thrust::make_zip_iterator(
thrust::make_tuple(
left.begin(),
right.begin(),
iter_helper.begin()));
}
iterator end()
{
return thrust::make_zip_iterator(
thrust::make_tuple(
left.end(),
right.end(),
iter_helper.end()));
}
};
*/
/*
struct set_trim_values
: public thrust::binary_function<
typename Trim::reference_type, // arg 1
typename thrust::device_vector<scalar_t>::iterator, // arg 2
typename Trim::reference_type> // out
{
set_trim_values(
scalar_t const& isoval,
int const& nx)
: isoval(isoval),
nx(nx)
{}
// This will not work on host because of using
// edgeCases from device_vector...
__host__
typename Trim::reference_type
operator()(
typename Trim::reference_type trim_values,
typename thrust::device_vector<scalar_t>::iterator curPoints)
{
// use curPoints to set xl, xr, curEdgeCases
int xl = thrust::get<0>(trim_values);
int xr = thrust::get<1>(trim_values);
using iterator = typename thrust::device_vector<uchar>::iterator;
iterator edgeCases = thrust::get<2>(trim_values);
// TODO set all of isGE at once.
bool isGE[2];
isGE[0] = (curPoints[0] >= isoval);
for(int i = 1; i != nx; ++i)
{
isGE[i%2] = (curPoints[i] >= isoval);
// edgeCases[i-1] = calcCaseEdge(isGE[(i+1)%2], isGE[i%2]);
//
if(*(edgeCases + i-1) == 1 || *(edgeCases + i-1) == 2)
{
if(xl > xr)
{
xl = i-1;
}
xr = i;
}
}
return thrust::make_tuple(xl, xr, edgeCases);
}
scalar_t const& isoval;
int const& nx;
};
void pass1()
{
thrust::transform(
trim.begin(), // input1
trim.end(),
image.ray_begin(), // input2
trim.begin(), // output
set_trim_values( // binary function (*input1, *input2)
isoval,
n.x));
}
*/
|
22,969 | // Salt and pepper noise simulation with Cuda C/C++
// Original framework for code taken from imflipG.cu
// Modified by Ethan Webster
#include <cuda_runtime.h>
#include <curand_kernel.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <iostream>
#include <math.h>
#include <ctype.h>
#include <cuda.h>
#include <time.h>
#define DATAMB(bytes) (bytes/1024/1024)
#define DATABW(bytes,timems) ((float)bytes/(timems * 1.024*1024.0*1024.0))
#define CEIL(a,b) ((a+b-1)/b)
typedef unsigned char uch;
typedef unsigned long ul;
typedef unsigned int ui;
uch *TheImg, *CopyImg; // Where images are stored in CPU
uch *GPUImg, *GPUCopyImg, *GPUptr, *GPUResult; // Where images are stored in GPU
struct ImgProp{
int Hpixels;
int Vpixels;
uch HeaderInfo[54];
ul Hbytes;
} ip;
#define IPHB ip.Hbytes
#define IPH ip.Hpixels
#define IPV ip.Vpixels
#define IMAGESIZE (IPHB*IPV)
#define IMAGEPIX (IPH*IPV)
// Kernel that adds salt&pepper noise of given probability density to an image
__global__
void corruptPixels(uch *ImgDst, uch *ImgSrc, ui Hpixels, ui Vpixels, double prob)
{
// threads/blocks info and IDs
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
//ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
ui MYrow = MYbid / BlkPerRow;
ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
// leave buffer frame around image to avoid 8 edge cases for convolutions
if (MYcol > Hpixels-4 || MYcol < 3 || MYrow > Vpixels-4 || MYrow < 3) return;
ui MYpixIndex = MYrow * Hpixels + MYcol; // pixel index in B&W image
ui RowBytes = (Hpixels * 3 + 3) & (~3); // bytes in row of R=G=B grayscale output image
ui MYresultIndex = MYrow * RowBytes + 3 * MYcol; // pixel index in grayscale image (R=B=G)
// seed cuRAND random number generator function with clock cycle + threadID
curandState state;
curand_init((unsigned long long)clock() + MYtid, 0, 0, &state);
// sample uniform distribution from 0 to 255 (random pixel intensity)
ui loc = ((ui)(curand(&state)))%255;
/*
half of the probability is used for the following because
the salt vs pepper contribution is split 50/50
*/
// if pixel intensity is located in the lower half of the
// probability region, then add pepper noise
if( loc <= (ui)(prob/2.0f)) {
ImgDst[MYresultIndex] = 0;
ImgDst[MYresultIndex+1] = 0;
ImgDst[MYresultIndex+2] = 0;
}
// otherwise if pixel intensity is located in the upper half of the
// probability region, then add salt noise
else if(loc > (ui)(prob/2.0f) && loc < (ui)prob ) {
ImgDst[MYresultIndex] = 255;
ImgDst[MYresultIndex+1] = 255;
ImgDst[MYresultIndex+2] = 255;
}
// if we reached this, then no noise is added
else {
ImgDst[MYresultIndex] = ImgSrc[MYpixIndex];
ImgDst[MYresultIndex+1] = ImgSrc[MYpixIndex];
ImgDst[MYresultIndex+2] = ImgSrc[MYpixIndex];
}
}
// Kernel that calculates a B&W image from an RGB image
// resulting image has a double type for each pixel position
__global__
void BWKernel(uch *ImgBW, uch *ImgGPU, ui Hpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
double R, G, B;
//ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
ui RowBytes = (Hpixels * 3 + 3) & (~3);
ui MYrow = MYbid / BlkPerRow;
ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYsrcIndex = MYrow * RowBytes + 3 * MYcol;
ui MYpixIndex = MYrow * Hpixels + MYcol;
B = (double)ImgGPU[MYsrcIndex];
G = (double)ImgGPU[MYsrcIndex + 1];
R = (double)ImgGPU[MYsrcIndex + 2];
ImgBW[MYpixIndex] = (uch)((R+G+B)/3.0);
}
// Kernel that copies an image from one part of the
// GPU memory (ImgSrc) to another (ImgDst)
__global__
void PixCopy(uch *ImgDst, uch *ImgSrc, ui FS)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
if (MYgtid > FS) return; // outside the allocated memory
ImgDst[MYgtid] = ImgSrc[MYgtid];
}
// Read a 24-bit/pixel BMP file into a 1D linear array.
// Allocate memory to store the 1D image and return its pointer.
uch *ReadBMPlin(char* fn)
{
static uch *Img;
FILE* f = fopen(fn, "rb");
if (f == NULL){ printf("\n\n%s NOT FOUND\n\n", fn); exit(EXIT_FAILURE); }
uch HeaderInfo[54];
fread(HeaderInfo, sizeof(uch), 54, f); // read the 54-byte header
// extract image height and width from header
int width = *(int*)&HeaderInfo[18]; ip.Hpixels = width;
int height = *(int*)&HeaderInfo[22]; ip.Vpixels = height;
int RowBytes = (width * 3 + 3) & (~3); ip.Hbytes = RowBytes;
//save header for re-use
memcpy(ip.HeaderInfo, HeaderInfo,54);
printf("\n Input File name: %17s (%u x %u) File Size=%u", fn,
ip.Hpixels, ip.Vpixels, IMAGESIZE);
// allocate memory to store the main image (1 Dimensional array)
Img = (uch *)malloc(IMAGESIZE);
if (Img == NULL) return Img; // Cannot allocate memory
// read the image from disk
fread(Img, sizeof(uch), IMAGESIZE, f);
fclose(f);
return Img;
}
// Write the 1D linear-memory stored image into file.
void WriteBMPlin(uch *Img, char* fn)
{
FILE* f = fopen(fn, "wb");
if (f == NULL){ printf("\n\nFILE CREATION ERROR: %s\n\n", fn); exit(1); }
//write header
fwrite(ip.HeaderInfo, sizeof(uch), 54, f);
//write data
fwrite(Img, sizeof(uch), IMAGESIZE, f);
printf("\nOutput File name: %17s (%u x %u) File Size=%u", fn, ip.Hpixels, ip.Vpixels, IMAGESIZE);
fclose(f);
}
int main(int argc, char **argv)
{
cudaError_t cudaStatus;
char InputFileName[255], OutputFileName[255], ProgName[255];
ui BlkPerRow, ThrPerBlk=256, NumBlocks;
//cudaDeviceProp GPUprop;
//ul SupportedKBlocks, SupportedMBlocks, MaxThrPerBlk; char SupportedBlocks[100];
ui amt, GPUtotalBufferSize;
double inputNL;
strcpy(ProgName, "randNoise");
switch (argc){
case 5: ThrPerBlk=atoi(argv[4]);
case 4: amt=atoi(argv[3]);
case 3: strcpy(InputFileName, argv[1]);
strcpy(OutputFileName, argv[2]);
break;
default: printf("\n\nUsage: %s InputFilename OutputFilename [NoiseDensity] [ThrPerBlk]", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp 50", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp 50 128",ProgName);
printf("\n\nNoise Density is in percent, from 0-100\n\n");
exit(EXIT_FAILURE);
}
if (amt > 100) {
printf("Invalid noise amount. Must be between 0 and 100");
exit(EXIT_FAILURE);
}
if ((ThrPerBlk < 32) || (ThrPerBlk > 1024)) {
printf("Invalid ThrPerBlk option '%u'. Must be between 32 and 1024. \n", ThrPerBlk);
exit(EXIT_FAILURE);
}
// Create CPU memory to store the input and output images
TheImg = ReadBMPlin(InputFileName); // Read the input image if memory can be allocated
if (TheImg == NULL){
printf("Cannot allocate memory for the input image...\n");
exit(EXIT_FAILURE);
}
CopyImg = (uch *)malloc(IMAGESIZE);
if (CopyImg == NULL){
free(TheImg);
printf("Cannot allocate memory for the input image...\n");
exit(EXIT_FAILURE);
}
// Choose which GPU to run on, change this on a multi-GPU system.
int NumGPUs = 0;
cudaGetDeviceCount(&NumGPUs);
if (NumGPUs == 0){
printf("\nNo CUDA Device is available\n");
exit(EXIT_FAILURE);
}
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
exit(EXIT_FAILURE);
}
// cudaGetDeviceProperties(&GPUprop, 0);
// SupportedKBlocks = (ui)GPUprop.maxGridSize[0] * (ui)GPUprop.maxGridSize[1] * (ui)GPUprop.maxGridSize[2] / 1024;
// SupportedMBlocks = SupportedKBlocks / 1024;
// sprintf(SupportedBlocks, "%u %c", (SupportedMBlocks >= 5) ? SupportedMBlocks : SupportedKBlocks, (SupportedMBlocks >= 5) ? 'M' : 'K');
// MaxThrPerBlk = (ui)GPUprop.maxThreadsPerBlock;
// allocate sufficient memory on the GPU to hold B&W image and grayscale output image
GPUtotalBufferSize = IMAGEPIX+IMAGESIZE;
cudaStatus = cudaMalloc((void**)&GPUptr, GPUtotalBufferSize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed! Can't allocate GPU memory");
exit(EXIT_FAILURE);
}
// setup pointers to B&W image and output corrupted image
GPUImg = (uch *)GPUptr;
GPUCopyImg = GPUImg + IMAGESIZE;
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(GPUImg, TheImg, IMAGESIZE, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy CPU to GPU failed!");
exit(EXIT_FAILURE);
}
BlkPerRow = CEIL(ip.Hpixels, ThrPerBlk);
NumBlocks = IPV*BlkPerRow;
BWKernel <<< NumBlocks, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPH);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n\n cudaDeviceSynchronize 1 returned error code %d after launching the kernel!\n", cudaStatus);
exit(EXIT_FAILURE);
}
// add random noise to the image
inputNL = 255.0f*(double)amt/100.0f;
corruptPixels <<< NumBlocks, ThrPerBlk >>> (GPUImg, GPUCopyImg, IPH, IPV, inputNL);
GPUResult = GPUImg;
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n\ncudaDeviceSynchronize 2 returned error code %d after launching the kernel!\n", cudaStatus);
exit(EXIT_FAILURE);
}
// Copy output (results) from GPU buffer to host (CPU) memory.
cudaStatus = cudaMemcpy(CopyImg, GPUResult, IMAGESIZE, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy GPU to CPU failed!");
exit(EXIT_FAILURE);
}
cudaStatus = cudaDeviceSynchronize();
//checkError(cudaGetLastError()); // screen for errors in kernel launches
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n Program failed after cudaDeviceSynchronize()!");
free(TheImg);
free(CopyImg);
exit(EXIT_FAILURE);
}
WriteBMPlin(CopyImg, OutputFileName); // Write the flipped image back to disk
printf("\n\n--------------------------------------------------------------------------\n");
printf("Successfully added %d%% noise to the given image and converted to grayscale.\n", amt);
// Deallocate CPU, GPU memory and destroy events.
cudaFree(GPUptr);
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
free(TheImg);
free(CopyImg);
exit(EXIT_FAILURE);
}
free(TheImg);
free(CopyImg);
return(EXIT_SUCCESS);
}
|
22,970 | #include "includes.h"
__global__ void gpu_matrixmult(int *gpu_a, int *gpu_b, int *gpu_c, int N) {
int k, sum = 0;
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if(col < N && row < N) {
for(k = 0; k < N; k++)
sum += gpu_a[row * N + k] * gpu_b[k * N + col];
gpu_c[row * N + col] = sum;
}
} |
22,971 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
#include <time.h>
//-------- Generation of matrix of random single point precision numbers --------//
float * generateMatrix(int n){
float* matrix = (float *)malloc(n*n*sizeof(float));
for(int i=0; i<n*n; i++){
matrix[i] = (float)(rand()%60) + drand48();
}
return matrix;
}
//-------- Save matrix to dat file --------//
void saveMatrixToFile(float *matrix, int size, const char *mode){
FILE *filePointer = fopen( "product.dat", mode);
for (int i = 0; i < size*size; i++) {
fprintf(filePointer, "%.6g\t", matrix[i]);
if (i%size == size-1 && i != 0) {
fprintf(filePointer, "\n");
}
}
fprintf(filePointer, "\n-------------------------------------\n");
fclose(filePointer);
}
//-------- Matrix multiplication --------//
__global__ void multiplyMatrices(float *matrix1, float *matrix2, float *resultMatrix, int size){
float sum = 0;
int j = blockIdx.x*blockDim.x + threadIdx.x;
int i = blockIdx.y*blockDim.y + threadIdx.y;
if (i < size && j < size) {
for (int k = 0; k < size; k++) {
sum += matrix1[(size*i)+k]*matrix2[(size*k)+j];
}
}
resultMatrix[(i*size)+j]=sum;
}
int main (int argc, char *argv[]) {
//--Testing parameters
if (argc != 2){
printf("Incorrect number of parameters :(\n");
printf("Try: \"./MatrixMult <MATRIX SIZE>\"\n");
exit(0);
}
int size = atoi(argv[1]);
int tileSize = 16;
//--Generating matrices
float *matrix1, *matrix2, *resultMatrix;
float *dev_matrix1, *dev_matrix2, *dev_resultMatrix;
int memorySize = size*size*sizeof(float);
srand48(time(NULL));
matrix1 = generateMatrix(size);
matrix2 = generateMatrix(size);
resultMatrix = (float *)malloc(memorySize);
//--Initializing CUDA memory
cudaMalloc((void **)&dev_matrix1, memorySize);
cudaMalloc((void **)&dev_matrix2, memorySize);
cudaMalloc((void **)&dev_resultMatrix, memorySize);
cudaMemcpy(dev_matrix1, matrix1, memorySize, cudaMemcpyHostToDevice);
cudaMemcpy(dev_matrix2, matrix2, memorySize, cudaMemcpyHostToDevice);
//-- Multiplying matrices
dim3 dimBlock(tileSize, tileSize);
dim3 dimGrid((int)ceil((float)size/(float)dimBlock.x), (int)ceil((float)size/(float)dimBlock.y));
clock_t start = clock();
multiplyMatrices<<<dimGrid, dimBlock>>>(dev_matrix1, dev_matrix2, dev_resultMatrix, size);
cudaThreadSynchronize();
clock_t stop = clock();
double time = (double)(stop - start) / CLOCKS_PER_SEC;
printf("Execution time: %f seconds\n", time);
cudaMemcpy(resultMatrix, dev_resultMatrix, memorySize, cudaMemcpyDeviceToHost);
cudaFree(dev_matrix1); cudaFree(dev_matrix2); cudaFree(dev_resultMatrix);
//-- Saving matrices to file
saveMatrixToFile(matrix1, size, "w");
saveMatrixToFile(matrix2, size, "a");
saveMatrixToFile(resultMatrix, size, "a");
free(matrix1); free(matrix2); free(resultMatrix);
exit(0);
}
|
22,972 | /**
* One Way Hash with CUDA (Fall 2016):
*
* Members:
* Emanuelle Crespi, Tolga Keskinoglu
*
* This test implements a simple hash from a space of size 2n --> n
*
* The following code makes use of the kernel call hash(char *f, char *h, int n)
* to perform a parallel hash of elements f --> h with corresponding indices 2i --> i
*
* The result is a mapping of the data within f to the data within h
* The output is verified before the program terminates to see that every
* element at index 2i of f is indeed at index i in h
*
* We can see that there is a significant speedup in comparison to the time it takes
* to perform the hash in the serial code.
*
* The output of the performance is displayed in seconds.
* The performance results are to be compared with the performance of hash.c
*
*/
// System includes
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
// Jetson TK1 has device capability 1.x allowing 1024 threads/block
// We also indicate EVEN_NUM as the vector size since this hash requires even length arrays
#define THREADS_PER_BLOCK 1024
#define EVEN_NUM 123374234
__global__ void hash(char *f, char *h, int n) {
int i = blockIdx.x*blockDim.x+threadIdx.x;
if( i < n ){
h[i] = f[2*i];
}
}
int main(void) {
int two_n = EVEN_NUM, i, r=50;
char *f, *h, *d_f,*d_h;
cudaError_t error;
if ( two_n % 2 ){
printf("NO NO NO!!! Even numbers only please.\n");
exit(EXIT_FAILURE);
}
//printf("Malloc space on CPU (f,h)");
f = (char *)calloc(sizeof(char), two_n);
if( f == NULL ){
fprintf(stderr,"Failed to allocate %d bytes for f.",two_n);
exit(EXIT_FAILURE);
}
h = (char *)calloc(sizeof(char), two_n/2);
if( h == NULL ){
fprintf(stderr,"Failed to allocate %d bytes for h.",two_n/2);
exit(EXIT_FAILURE);
}
/* Identify our streams */
//printf("Malloc space on GPU (d_f,d_h)\n");
error = cudaMalloc((void **)&d_f, sizeof(char) * two_n);
if( error != cudaSuccess ){
fprintf(stderr,"Failed to cudaMalloc %d bytes for d_f.",two_n);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **)&d_h, sizeof(char) * two_n/2);
if( error != cudaSuccess ){
fprintf(stderr,"Failed to cudaMalloc %d bytes for d_h.",two_n/2);
exit(EXIT_FAILURE);
}
//populate data into array
//printf("Generate vectored data (Size=%d bytes)\n",two_n);
for (i = 0; i < two_n; i++) {
f[i] = (char) ((i % 94) + 33);
}
//send data over the bus
//printf("Send data to GPU\n");
error = cudaMemcpy( d_f, f, two_n, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_f,f) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
/*************************** Setup for testing ************************************/
//printf("Run kernel code \n");
cudaDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
printf("Running...\n");
//run kernel
for( i = 0; i < r; i++){
hash<<<(two_n/2+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(d_f,d_h,two_n/2);
}
// Record the stop event
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
/*******************************************************************************************
****************************** for testing purposes ***************************************/
//send data over the bus
error = cudaMemcpy( h, d_h, sizeof(char)*two_n/2, cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMemcpy (h,d_h) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
//printf("Done.\n");
//validate for correctness
for (i = 0; i < two_n/2; i++) {
if (h[i] != f[2*i]) {
//printf("index %d FAILED!\n", i);
exit(EXIT_FAILURE);
}
}
// Compute and print the performance
float msecPerhash = msecTotal / 1;
printf( "Performance= %.06f sec\n", msecPerhash/1000.0 );
free(f); free(h);
cudaFree(d_f); cudaFree(d_h);
cudaDeviceReset();
return 0;
}
|
22,973 | #include<bits/stdc++.h>
using namespace std;
#define BLOCK_SIZE 256
__global__ void type1(int n, double lr, double lambda, double * W) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i=index;i<n;i+=stride) {
W[i] = (1.0 - lr* lambda) * W[i];
}
}
__global__ void type2(int n, double lr, double lambda, double * W, int rand_choice, double * X, double * Y) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i=index;i<n;i+=stride) {
W[i] = (1.0 - lr* lambda) * W[i] + (lr * Y[rand_choice])*X[rand_choice * n + i];
}
}
__device__ double atomicAddDouble(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
__global__ void dot(int n, double * W, double *X, int rand_choice, double * res) {
__shared__ double temp[BLOCK_SIZE];
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < n) temp[threadIdx.x] = W[index] * X[rand_choice * n + index];
else temp[threadIdx.x] = 0;
__syncthreads();
if (threadIdx.x == 0){
double sum = 0;
for (int i=0;i<BLOCK_SIZE;i++) sum += temp[i];
atomicAddDouble(res, sum);
}
}
int main() {
srand(time(NULL));
ifstream trainfile ("train.txt");
ifstream labelfile ("labels.txt");
int n_samples=200;
int n_features=50000;
double *W, *X, *Y, *res;
double *d_W, *d_X, *d_Y, *d_res;
cudaEvent_t start, stop;
float elapsedTime;
W = (double *) malloc(n_features * sizeof(double));
X = (double *) malloc(n_samples * n_features * sizeof(double));
Y = (double *) malloc(n_samples * sizeof(double));
res = (double *) malloc(sizeof(double));
cudaMalloc(&d_W, n_features * sizeof(double));
cudaMalloc(&d_X, n_samples * n_features * sizeof(double));
cudaMalloc(&d_Y, n_samples * sizeof(double));
cudaMalloc(&d_res, sizeof(double));
for (int i=0;i<n_samples;i++) {
for (int j=0;j<n_features;j++)
trainfile >> X[i*n_features + j];
}
for (int i=0;i<n_samples;i++) {
labelfile >> Y[i];
if (Y[i] == 0) {
Y[i] = -1;
}
}
for (int i=0;i<n_features;i++) W[i] = 0;
cudaMemcpy(d_X, X, n_samples * n_features * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_W, W, n_features * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_Y, Y, n_samples * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_W, W, n_features * sizeof(double), cudaMemcpyHostToDevice);
int num_iters = 100;
double lambda = 1.0;
cudaEventCreate(&start);
cudaEventRecord(start,0);
for (int iters=1;iters<=num_iters;iters++) {
int numBlocks = (n_features + BLOCK_SIZE - 1) / BLOCK_SIZE;
double lr = 1.0 / (lambda * iters);
int rand_choice = rand() % n_samples;
cout << rand_choice << endl;
*res = 0;
cudaMemcpy(d_res, res, sizeof(double), cudaMemcpyHostToDevice);
dot<<<numBlocks, BLOCK_SIZE>>>(n_features, d_W, d_X, rand_choice, d_res);
cudaMemcpy(res, d_res, sizeof(double), cudaMemcpyDeviceToHost);
if (Y[rand_choice] * res[0] >= 1.0)
type1<<<numBlocks, BLOCK_SIZE>>>(n_features, lr, lambda, d_W);
else
type2<<<numBlocks, BLOCK_SIZE>>>(n_features, lr, lambda, d_W, rand_choice, d_X, d_Y);
cudaMemcpy(W, d_W, n_features * sizeof(double), cudaMemcpyDeviceToHost);
}
cudaMemcpy(W, d_W, n_features * sizeof(double), cudaMemcpyDeviceToHost);
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
cout << "Train time " << elapsedTime << endl;
double correct = 0.0;
for (int i=0;i<n_samples;i++) {
double val = 0.0;
for (int j=0;j<n_features;j++)
val += W[j] * X[i * n_features + j];
if (val * Y[i] >= 0)
correct += 1;
}
cout << "Correct " << correct << endl;
printf("Accuracy %lf\n", correct / n_samples);
return 0;
}
|
22,974 | // Take From
// https://stackoverflow.com/questions/35137213/texture-objects-for-doubles
#include <vector>
#include <cstdio>
static __inline__ __device__ double fetch_double(uint2 p){
return __hiloint2double(p.y, p.x);
}
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void my_print(cudaTextureObject_t texObject)
{
uint2 rval = tex1Dfetch<uint2>(texObject, 0);
double dval = fetch_double(rval);
printf("%f\n", dval);
}
int main()
{
double i = 0.35;
int numel = 50;
std::vector<double> h_data(numel, i);
double* d_data;
cudaMalloc(&d_data,numel*sizeof(double));
cudaMemcpy((void*)d_data, &h_data[0], numel*sizeof(double), cudaMemcpyHostToDevice);
cudaTextureDesc td;
memset(&td, 0, sizeof(td));
td.normalizedCoords = 0;
td.addressMode[0] = cudaAddressModeClamp;
td.readMode = cudaReadModeElementType;
struct cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = d_data;
resDesc.res.linear.sizeInBytes = numel*sizeof(double);
resDesc.res.linear.desc.f = cudaChannelFormatKindUnsigned;
resDesc.res.linear.desc.x = 32;
resDesc.res.linear.desc.y = 32;
cudaTextureObject_t texObject;
gpuErrchk(cudaCreateTextureObject(&texObject, &resDesc, &td, NULL));
my_print<<<1,1>>>(texObject);
gpuErrchk(cudaDeviceSynchronize());
return 0;
} |
22,975 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#define ARR_SIZE 102400
#define THREADS 512
#define ARR_BYTE sizeof(int) * ARR_SIZE
__global__ void gpuSort(int * d_arr, size_t maxSize);
int main(int argv, char ** argc)
{
int * h_arr;
int * d_arr;
int temp;
int blockSize;
size_t i;
cudaEvent_t start, stop;
float milliseconds;
cudaEventCreate(&start);
cudaEventCreate(&stop);
h_arr = (int *)malloc(ARR_BYTE);
cudaMalloc((void **)&d_arr, ARR_BYTE);
for (i = 0; i < ARR_SIZE; i++)
h_arr[i] = rand() % 1024;
cudaMemcpy(d_arr, h_arr, ARR_BYTE, cudaMemcpyHostToDevice);
blockSize = ARR_SIZE / THREADS;
blockSize += (ARR_SIZE%THREADS ? 1 : 0);
blockSize += (blockSize / 2) + (blockSize % 2);
cudaEventRecord(start);
for (i = 0; i < ARR_SIZE/2+1; i++) {
gpuSort <<< blockSize, THREADS >>> (d_arr, ARR_SIZE);
}
cudaEventRecord(stop);
cudaMemcpy(h_arr, d_arr, ARR_BYTE, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
for (i = 1; i < ARR_SIZE; i++) {
if (h_arr[i - 1] > h_arr[i]) {
printf("\nNot sorted\n\n");
break;
}
}
if (i == ARR_SIZE) {
printf("\nSorted\n");
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Time taken : %llf\n", milliseconds / 1000);
}
cudaFree(d_arr);
free(h_arr);
}
__global__ void gpuSort(int * d_arr, size_t maxSize)
{
int temp;
size_t threadIndex = threadIdx.x + blockDim.x*blockIdx.x;
threadIndex *= 2;
if (threadIndex + 1 < maxSize) {
if (d_arr[threadIndex] > d_arr[threadIndex + 1]) {
temp = d_arr[threadIndex];
d_arr[threadIndex] = d_arr[threadIndex + 1];
d_arr[threadIndex + 1] = temp;
}
}
threadIndex++;
if (threadIndex + 1 < maxSize) {
if (d_arr[threadIndex] > d_arr[threadIndex + 1]) {
temp = d_arr[threadIndex];
d_arr[threadIndex] = d_arr[threadIndex + 1];
d_arr[threadIndex + 1] = temp;
}
}
threadIndex--;
if (threadIndex + 1 < maxSize) {
if (d_arr[threadIndex] > d_arr[threadIndex + 1]) {
temp = d_arr[threadIndex];
d_arr[threadIndex] = d_arr[threadIndex + 1];
d_arr[threadIndex + 1] = temp;
}
}
}
|
22,976 | #include "includes.h"
__global__ void ZeroMeanImpl(float* solutions, int rowSize, int matCount) {
const int matricesPerBlock = BLOCK_SIZE / rowSize;
const int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / rowSize;
const int tid = threadIdx.x;
const int col = threadIdx.x & (rowSize - 1);
const int inBlockOffset = threadIdx.x / rowSize;
__shared__ double beta[BLOCK_SIZE];
__shared__ double line[BLOCK_SIZE];
if (matrixIdx >= matCount) {
return;
}
solutions += matrixIdx * rowSize;
beta[tid] = col != (rowSize - 1) ? solutions[col] : 0;
line[tid] = beta[tid];
__syncthreads();
for (int s = rowSize >> 1; s > 0; s >>= 1) {
if (col < s) {
line[tid] += line[tid + s];
}
__syncthreads();
}
beta[tid] -= line[rowSize * inBlockOffset] / rowSize;
solutions[col] = beta[tid];
} |
22,977 | #include "includes.h"
__global__ void gradient_array_normalize_channels_kernel(float *x, int size, int batch, int channels, int wh_step, float *delta_gpu)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int wh_i = i % wh_step;
int b = i / wh_step;
if (i < size) {
int k;
/*
float grad = 0;
for (k = 0; k < channels; ++k) {
const int index = wh_i + k * wh_step + b*wh_step*channels;
float out = x[index];
float delta = delta_gpu[index];
grad += out*fabs(delta);
}
*/
for (k = 0; k < channels; ++k) {
const int index = wh_i + k * wh_step + b*wh_step*channels;
if (x[index] > 0) {
float delta = delta_gpu[index];
float grad = x[index];
delta = delta * grad;
delta_gpu[index] = delta;
}
}
}
} |
22,978 | #include "includes.h"
__global__ void accumulatedPartSizesKernel(int size, int *part, int *weights, int *accumulatedSize)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx == size - 1)
accumulatedSize[part[idx]] = weights[idx];
if(idx < size - 1)
{
int thisPart = part[idx];
if(thisPart != part[idx + 1])
accumulatedSize[thisPart] = weights[idx];
}
} |
22,979 | #include "includes.h"
__global__ void fill_with_average(unsigned char *img, int * nz, int * average, int scale)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
//int h = width /2;
for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS)
{
int iw = x;
int ih = y + j;
if ((img[3*(ih*width + iw)] + img[3*(ih*width + iw)+1] + img[3*(ih*width + iw)+2] == 0) && (nz[ih/scale * width + iw/scale] > 0))
{
img[3*(ih*width + iw)] = (unsigned char)(average[3*(ih/scale*width + iw/scale)] / nz[ih/scale * width + iw/scale]);
img[3*(ih*width + iw) + 1] = (unsigned char)(average[3*(ih/scale*width + iw/scale) + 1] / nz[ih/scale * width + iw/scale]);
img[3*(ih*width + iw) + 2] = (unsigned char)(average[3*(ih/scale*width + iw/scale) + 2] / nz[ih/scale * width + iw/scale]);
}
}
} |
22,980 |
#include <iostream>
using namespace std;
__global__ void kernel( int* b, int* t)
{
if( !threadIdx.x)
{
*b = blockDim.x; // num threads per block
}
t[threadIdx.x] = threadIdx.x;
}
int main()
{
int numthreads = 4;
int b;
int* t;
t = new int[numthreads];
int* d_b; // pointer to device memory
int* d_t; // pointer to device memory
cudaMalloc( (void**)&d_b, sizeof(int));
cudaMalloc( (void**)&d_t, numthreads*sizeof(int));
kernel<<<1,numthreads>>>( d_b, d_t);
cudaMemcpy( &b, d_b, sizeof(int)
, cudaMemcpyDeviceToHost);
cudaMemcpy( t, d_t, numthreads*sizeof(int)
, cudaMemcpyDeviceToHost);
cout << "blockDim.x = " << b << endl;
int thread;
for( thread=0; thread<numthreads; thread++)
{
cout << "thread " << thread
<< ": " << t[thread]
<< endl;
}
cudaFree(d_t);
cudaFree(d_b);
return 0;
}
|
22,981 | #include <cuda_runtime_api.h>
#include <iostream>
// Define a function that will only be compiled for and called from host
__host__ void HostOnly()
{
std::cout << "This function may only be called from the host" << std::endl;
}
// Define a function that will only be compiled for and called from device
__device__ void DeviceOnly()
{
printf("This function may only be called from the device\n");
}
// Define a function that will be compiled for both architectures
__host__ __device__ float SquareAnywhere(float x)
{
return x * x;
}
// Call device and portable functions from a kernel
__global__ void RunGPU(float x)
{
DeviceOnly();
printf("%f\n", SquareAnywhere(x));
}
/*
Call host and portable functions from a kernel
Note that, by default, if a function has no architecture
specified, it is assumed to be __host__ by NVCC.
*/
void RunCPU(float x)
{
HostOnly();
std::cout << SquareAnywhere(x) << std::endl;
}
int main()
{
std::cout << "==== Sample 02 - Host / Device Functions ====\n" << std::endl;
/*
Expected output:
"This function may only be called from the host"
1764
"This function may only be called from the device"
1764.00
*/
RunCPU(42);
RunGPU<<<1, 1>>>(42);
cudaDeviceSynchronize();
return 0;
}
/*
Exercises:
1) Write a function that prints a message and can run on both the device and host
2) Revise the function from 1, such that the CPU version use std::cout. Use the
__CUDA_ARCH__ macro to write code paths that contain architecture-specific code.
*/ |
22,982 | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
// Define your kernels in this file you may use more than one kernel if you
// need to
// INSERT KERNEL(S) HERE
__global__ void histo_kernel(unsigned int* input, unsigned int *bins, unsigned int num_elements,
unsigned int num_bins)
{
extern __shared__ unsigned int histo_private[];
int i = threadIdx.x;
int stride = blockDim.x;
while(i < num_bins){
histo_private[i] = 0;
i += stride;
}
__syncthreads();
i = blockIdx.x*blockDim.x + threadIdx.x;
stride = blockDim.x*gridDim.x;
while(i < num_elements){
atomicAdd(&histo_private[input[i]], 1);
i += stride;
}
__syncthreads();
i = threadIdx.x;
stride = blockDim.x;
while(i < num_bins){
atomicAdd(&bins[i], histo_private[i]);
i += stride;
}
}
/******************************************************************************
Setup and invoke your kernel(s) in this function. You may also allocate more
GPU memory if you need to
*******************************************************************************/
void histogram(unsigned int* input, unsigned int* bins, unsigned int num_elements,
unsigned int num_bins) {
// INSERT CODE HERE
// Initialize thread block and kernel grid dimensions ---------------------
const unsigned int BLOCK_SIZE = 512;
dim3 dim_grid = 32;
dim3 dim_block = BLOCK_SIZE;
// Invoke CUDA kernel -----------------------------------------------------
size_t SIZEOF_SHARED_MEMORY_IN_BYTES = num_bins*sizeof(unsigned int);
histo_kernel<<<dim_grid, dim_block, SIZEOF_SHARED_MEMORY_IN_BYTES>>>(input, bins, num_elements, num_bins);
}
|
22,983 | #include <stdio.h>
#include <cuda.h>
#define BLOCK_SIZE 1024
#define CUDA_CHECK(value, label) { \
cudaError_t c = (value); \
if (c != cudaSuccess) { \
fprintf(stderr, \
"Error: '%s' at line %d in %s\n", \
cudaGetErrorString(c),__LINE__,__FILE__); \
goto label; \
} }
// Kernel for the first iteration of parallel scan
static __global__ void parallelScan(float *d_out, float *d_in, int length) {
volatile extern __shared__ double sharedData[];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int threadx = threadIdx.x;
// load the data into the shared memory
sharedData[threadx] = d_in[tid];
__syncthreads();
int pout = 0; int pin = 1;
if (tid < length) {
for (int offset = 1; offset < blockDim.x; offset <<= 1) {
pout = 1 - pout;
pin = 1 - pin;
if (threadx >= offset) {
sharedData[pout * blockDim.x + threadx] = sharedData[pin * blockDim.x + threadx] + sharedData[pin * blockDim.x + threadx - offset];
} else {
sharedData[pout * blockDim.x + threadx] = sharedData[pin * blockDim.x + threadx];
}
__syncthreads();
}
d_out[tid] = sharedData[pout * blockDim.x + threadx];
//save the sum of the block back to d_in
if (threadx == blockDim.x - 1) d_in[tid] = sharedData[pout * blockDim.x + threadx];
}
}
static __global__ void addPreSums(float *d_out, float *d_in)
{
volatile extern __shared__ double sharedData[];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int bid = blockIdx.x + 1;
int threadx = threadIdx.x;
int bDim = blockDim.x;
//load seperatedly scanned blocks (with one block offset)
sharedData[threadx] = d_out[tid + bDim];
//each thread adds the previous sums stored in the d_in
for (int i=0; i<bid; i++) {
sharedData[threadx] += d_in[i*bDim + bDim - 1];
}
__syncthreads();
//store the sums back to the correct position
d_out[tid + bDim] = sharedData[threadx];
}
static __global__ void addPartialSum(float *d_in, float preSum, int size)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < size){
d_in[tid] = d_in[tid] + preSum;
}
}
void addFirstHalf(float *in, float preSum, int size){
float *d_in=0;
cudaMalloc(&d_in, size * sizeof(float));
cudaMemcpy(d_in, in, size * sizeof(float), cudaMemcpyHostToDevice);
int blockx = (size + BLOCK_SIZE - 1)/BLOCK_SIZE;
dim3 dimGrid(blockx, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
addPartialSum<<<dimGrid, dimBlock>>>(d_in, preSum, size);
cudaMemcpy(in, d_in, size * sizeof(float), cudaMemcpyDeviceToHost);
if(d_in) cudaFree(d_in);
}
void prefix_scan(float *in, float *out, int size) {
float *d_in=0, *d_out=0;
cudaMalloc(&d_in, size * sizeof(float));
cudaMalloc(&d_out, size * sizeof(float));
cudaMemcpy(d_in, in, size * sizeof(float), cudaMemcpyHostToDevice);
//prefix_scan_device<<<128, 1>>>(d_in, d_out, size);
int blockx = (size + BLOCK_SIZE - 1)/BLOCK_SIZE; //blockx < 2^14, not exceeding the maximum
dim3 dimGrid(blockx, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
parallelScan<<<dimGrid, dimBlock, sizeof(double)*BLOCK_SIZE*2>>>(d_out, d_in, size);
//when there is one more block
if (blockx > 1) {
//launch with one less block since we do not need to add previous sum for the very first block
addPreSums<<<blockx - 1, BLOCK_SIZE, sizeof(double)*(BLOCK_SIZE)>>>(d_out,d_in);
}
cudaMemcpy(out, d_out, size * sizeof(float), cudaMemcpyDeviceToHost);
if(d_in) cudaFree(d_in);
if(d_out) cudaFree(d_out);
}
|
22,984 | #include "includes.h"
__global__ void TgvSolveTpMaskedKernel(float* mask, float*a, float *b, float*c, float2* p, float2* Tp, int width, int height, int stride) {
int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row
int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column
if ((iy >= height) && (ix >= width)) return;
int pos = ix + iy * stride;
if (mask[pos] == 0.0f) return;
Tp[pos].x = a[pos] * p[pos].x + c[pos] * p[pos].y;
Tp[pos].y = c[pos] * p[pos].x + b[pos] * p[pos].y;
} |
22,985 | __device__ float dist(float x[3], float y[3])
{
float d1=x[0]-y[0];
float d2=x[1]-y[1];
float d3=x[2]-y[2];
return (d1*d1 + d2*d2 + d3*d3);
}
__device__ float dot(float a[], float b[])
{
return (a[0] * b[0] + a[1] * b[1] + a[2] * b[2]);
}
__device__ void transform(float t[3], float u[3][3], float *x, float *x1)
{
x1[0]=t[0]+dot(&u[0][0], x);
x1[1]=t[1]+dot(&u[1][0], x);
x1[2]=t[2]+dot(&u[2][0], x);
}
__device__ void do_rotation(float x[][3], float x1[][3], int len, float t[3], float u[3][3])
{
int i=0;
for(; i<len; i++)
{
transform(t,u,&x[i][0],&x1[i][0]);
}
}
char* get50(int a,char *c)
{
c[0]=c[1]=c[2]=c[3]=c[4]='0';
c[5]='\0';
if(a<10) {
c[4]='\0';
return c;
}
if(a<100) {
c[3]='\0';
return c;
}
if(a<1000) {
c[2]='\0';
return c;
}
if(a<10000) {
c[1]='\0';
return c;
}
if(a<100000) {
c[0]='\0';
return c;
}
return c;
}
|
22,986 | /*
Programa: mst_cuda_semGB.c (Versão 1)
Descrição: Implementa o Algoritmo para árvore geradora mínima.
Programadora: Jucele Vasconcellos
Data: 25/08/2017
Versão 3: sem o atomicAddD em Calcula_num_zerodiff
Compilacao: nvcc -arch sm_30 -o mst_cuda_semGB.exe mst_cuda_semGB.cu
Execucao: ./st_cuda.exe in/grafo/grafo1000a cuda.out
Entrada de dados: Este programa lê os dados de um grafo no formato
8
16
4 5 0.35
4 7 0.37
5 7 0.28
0 7 0.16
1 5 0.32
0 4 0.38
2 3 0.17
1 7 0.19
0 2 0.26
1 2 0.36
1 3 0.29
2 7 0.34
6 2 0.40
3 6 0.52
6 0 0.58
6 4 0.93
sendo a primeira linha o número de vértices, a segunda linha o número de arestas
e as linhas subsequentes as arestas v1 v2 custo
Saída de Dados: Este programa produz um arquivo de saída as arestas que compõem a árvore geradora
*/
#include <stdio.h> // printf
#include<stdbool.h> // true, false
#include <stdlib.h> //malloc
#include <time.h> //clock
#include <cuda.h>
#include <cuda_runtime.h>
// Grafo Original
typedef struct {
unsigned int v, u;
unsigned int grau_st;
} aresta;
typedef struct {
int n, m;
aresta *arestas;
float *custos;
} grafo;
typedef struct {
int v1, v2;
} aresta_E;
// Funções e Procedimentos
static void HandleError(cudaError_t err, const char *file, int line)
{
if (err != cudaSuccess)
{
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line );
exit(EXIT_FAILURE);
}
}
#define CHECK_ERROR(err) (HandleError(err, __FILE__, __LINE__))
grafo LeGrafo(char *);
__device__ double atomicAddD(double* address, float val);
__global__ void EncontraMenorAresta1(aresta*, float*, int*, int, int);
__global__ void EncontraMenorAresta2(aresta*, float*, int*, int, int);
__global__ void MarcarArestas_Strut(aresta*, int*, int);
__global__ void Calcula_num_zerodiff(aresta*, float*, int, int*, unsigned int*, unsigned int*, double*);
__global__ void Inicializa_arestasE_C(aresta*, int*, int, int*, aresta_E*, unsigned int*);
__global__ void AtualizaC_1(aresta_E*, int*, int*, int);
__global__ void DefineNovosVU(int*, int, char *, unsigned int *);
__global__ void AtualizaC_3(int*, int, char *);
__global__ void MarcarArestas(aresta *, int*, int, int);
// Função Principal
int main (int argc, char** argv){
grafo G;
double tempoTotal, tempo1, tempo2;
double tempo1p, tempo2p;
int *SolutionEdgeSet;
int SolutionSize, i, it;
double SolutionVal;
int num_zerodiff;
FILE *Arq;
int dimBloco, dimGrid;
aresta *d_arestas;
float *d_custos;
int *d_SolutionEdgeSet;
unsigned int *d_SolutionSize, *d_num_zerodiff;
double *d_SolutionVal;
int *d_menorAresta;
int n;
// Passo 1: Verificação de parâmetros
// Passo 2: Leitura dos dados do grafo
// Passo 3: Criação do grafo bipartido correspondente às arestas recebidas
// Passo 4: Encontra a solução
// Passo 4.1: Escolher arestas que comporão a strut
// Passo 4.2: Calcular o num_zerodiff e computar novas componenetes conexas
// Passo 4.3: Compactar o grafo
// ==============================================================================
// Passo 1: Verificação de parâmetros
// ==============================================================================
//Verificando os parametros
if(argc < 3 ){
printf( "\nParametros incorretos\n Uso: ./cms_seq.exe <ArqEntrada> <ArqSaida> <dimBloco> <S/N> onde:\n" );
printf( "\t <ArqEntrada> (obrigatorio) - Nome do arquivo com as informações do grafo (número de vértices, número de arestas e arestas.\n" );
printf( "\t <ArqSaida> (obrigatorio) - Nome do arquivo de saida.\n" );
printf( "\t <S ou N> - Mostrar ou não as arestas da MST.\n" );
return 0;
}
//Define a dimensão do bloco fixada em 32 threads
//dimBloco = 32;
dimBloco = 64;
// ==============================================================================
// Passo 2: Leitura dos dados do Grafo G
// ==============================================================================
tempo1p = (double) clock( ) / CLOCKS_PER_SEC;
G = LeGrafo(argv[1]);
n = G.n;
// printf("****************************\n");
// printf("****** GRAFO ORIGINAL ******\n");
// MostraGrafoOriginal(GO);
// printf("Grafo de entrada lido\n");
//Alocação de variável para armazenar solução
SolutionEdgeSet = (int *) malloc((G.n-1)*sizeof(int));
//Aloca memória no device para as arestas da d_SolutionEdgeSet
CHECK_ERROR(cudaMalloc((void **) &d_SolutionEdgeSet, G.n * sizeof(int)));
SolutionSize = 0;
SolutionVal = 0;
//Aloca memória no device para a variável d_SolutionSize
CHECK_ERROR(cudaMalloc((void **) &d_SolutionSize, sizeof(unsigned int)));
//Inicializa d_SolutionSize com 0
CHECK_ERROR(cudaMemset(d_SolutionSize, 0, sizeof(unsigned int)));
//Aloca memória no device para a variável d_SolutionVal
CHECK_ERROR(cudaMalloc((void **) &d_SolutionVal, sizeof(double)));
//Inicializa d_SolutionVal com 0
CHECK_ERROR(cudaMemset(d_SolutionVal, 0, sizeof(double)));
//Aloca memória no device para a variável d_num_zerodiff
CHECK_ERROR(cudaMalloc((void **) &d_num_zerodiff, sizeof(unsigned int)));
tempo2p = (double) clock( ) / CLOCKS_PER_SEC;
// printf("Tempo Passo 2: %lf\n", tempo2p - tempo1p);
// ==============================================================================
// Passo 3: Tranfere informações para device e inicializa variáveis
// ==============================================================================
//Iniciando contagem do tempo
tempo1 = (double) clock( ) / CLOCKS_PER_SEC;
tempo1p = (double) clock( ) / CLOCKS_PER_SEC;
//Aloca memória no device para as arestas do grafo
CHECK_ERROR(cudaMalloc((void **) &d_arestas, G.m * sizeof(aresta)));
//Copia as arestas do grafo do host para o device
CHECK_ERROR(cudaMemcpy(d_arestas, G.arestas, G.m * sizeof(aresta), cudaMemcpyHostToDevice));
//Aloca memória no device para as arestas do grafo
CHECK_ERROR(cudaMalloc((void **) &d_custos, G.m * sizeof(float)));
//Copia as arestas do grafo do host para o device
CHECK_ERROR(cudaMemcpy(d_custos, G.custos, G.m * sizeof(float), cudaMemcpyHostToDevice));
//Aloca memória no device para o vetor d_menorAresta
CHECK_ERROR(cudaMalloc((void **) &d_menorAresta, G.n * sizeof(int)));
tempo2p = (double) clock( ) / CLOCKS_PER_SEC;
// printf("Tempo Passo 3: %lf\n", tempo2p - tempo1p);
// ==============================================================================
// Passo 4: Encontra solução
// ==============================================================================
it = 0;
num_zerodiff = 0;
while(num_zerodiff != 1)
{
// ==============================================================================
// Passo 4.1: Escolher arestas que comporão a strut
// ==============================================================================
tempo1p = (double) clock( ) / CLOCKS_PER_SEC;
//Inicializa d_menorAresta com -1
CHECK_ERROR(cudaMemset(d_menorAresta, -1, G.n * sizeof(int)));
//Chama kernel para encontrar menorAresta de cada v
dimGrid = ((G.m-1)/dimBloco)+1;
EncontraMenorAresta1<<<dimGrid, dimBloco>>>(d_arestas, d_custos, d_menorAresta, G.m, G.n);
// if(it >= 0)
// {
// printf("Após EncontraMenorAresta1\n");
// int *h_menorAresta;
// h_menorAresta = (int *) malloc(G.n * sizeof(int));
// CHECK_ERROR(cudaMemcpy(h_menorAresta, d_menorAresta, G.n * sizeof(int), cudaMemcpyDeviceToHost));
// for(i = 0; i < n; i++)
// printf("MenorAresta[%d] = %d\n", i, h_menorAresta[i]);
// free(h_menorAresta);
// }
dimGrid = ((G.m-1)/dimBloco)+1;
EncontraMenorAresta2<<<dimGrid, dimBloco>>>(d_arestas, d_custos, d_menorAresta, G.m, G.n);
// if(it >= 0)
// {
// printf("Após EncontraMenorAresta2\n");
// int *h_menorAresta;
// h_menorAresta = (int *) malloc(G.n * sizeof(int));
// CHECK_ERROR(cudaMemcpy(h_menorAresta, d_menorAresta, G.n * sizeof(int), cudaMemcpyDeviceToHost));
// for(i = 0; i < n; i++)
// printf("MenorAresta[%d] = %d\n", i, h_menorAresta[i]);
// free(h_menorAresta);
// }
//Chama kernel para marcar arestas da Strut
dimGrid = ((n-1)/dimBloco)+1;
MarcarArestas_Strut<<<dimGrid, dimBloco>>>(d_arestas, d_menorAresta, n);
tempo2p = (double) clock( ) / CLOCKS_PER_SEC;
// printf("Tempo Passo 4.1: %lf\n", tempo2p - tempo1p);
// ==============================================================================
// Passo 4.2: Calcular o num_zerodiff
// ==============================================================================
tempo1p = (double) clock( ) / CLOCKS_PER_SEC;
//Inicializa d_num_zerodiff com 0
CHECK_ERROR(cudaMemset(d_num_zerodiff, 0, sizeof(unsigned int)));
//Chama kernel para calcular num_zerodiff e preencher SolutionEdgeSet
dimGrid = ((G.m-1)/dimBloco)+1;
Calcula_num_zerodiff<<<dimGrid, dimBloco>>>(d_arestas, d_custos, G.m, d_SolutionEdgeSet, d_SolutionSize, d_num_zerodiff, d_SolutionVal);
CHECK_ERROR(cudaMemcpy(&num_zerodiff, d_num_zerodiff, sizeof(unsigned int), cudaMemcpyDeviceToHost));
tempo2p = (double) clock( ) / CLOCKS_PER_SEC;
// printf("Tempo Passo 4.2: %lf it = %d num_zerodiff = %d SolutionSize = %d\n", tempo2p - tempo1p, it, num_zerodiff, SolutionSize);
// ==============================================================================
// Passo 4.3: Compactar o grafo
// ==============================================================================
if(num_zerodiff != 1)
{
// ==============================================================================
// Passo 4.3.1: Computar componenetes conexas
// ==============================================================================
tempo1p = (double) clock( ) / CLOCKS_PER_SEC;
//Declara variável d_aux
unsigned int *d_aux;
//Aloca memória no device para a variável d_aux
CHECK_ERROR(cudaMalloc((void **) &d_aux, sizeof(unsigned int)));
//Inicializa d_aux com 0
CHECK_ERROR(cudaMemset(d_aux, 0, sizeof(unsigned int)));
//Declara variável d_arestasE
aresta_E *d_arestasE;
//Aloca memória no device para a variável d_arestasE
CHECK_ERROR(cudaMalloc((void **) &d_arestasE, n * sizeof(aresta_E)));
//Declara variável d_CD
int *d_C;
//Aloca memória no device para a variável d_CD
CHECK_ERROR(cudaMalloc((void **) &d_C, n * sizeof(int)));
//Chama kernel para inicializar d_arestasE e d_C
dimGrid = ((n-1)/dimBloco)+1;
Inicializa_arestasE_C<<<dimGrid, dimBloco>>>(d_arestas, d_menorAresta, n, d_C, d_arestasE, d_aux);
int h_fim, *d_fim;
CHECK_ERROR(cudaMalloc((void**)&d_fim, sizeof(int)));
dimGrid = ((n-1-num_zerodiff)/dimBloco)+1;
do
{
h_fim = 0;
CHECK_ERROR(cudaMemcpy(d_fim, &h_fim, sizeof(int), cudaMemcpyHostToDevice));
AtualizaC_1<<<dimGrid, dimBloco>>>(d_arestasE, d_C, d_fim, n-num_zerodiff);
CHECK_ERROR(cudaMemcpy(&h_fim, d_fim, sizeof(int), cudaMemcpyDeviceToHost));
}while (h_fim == 1);
CHECK_ERROR(cudaDeviceSynchronize());
//Declara variável d_aux2
char *d_aux2;
//Aloca memória no device para a variável d_aux2
CHECK_ERROR(cudaMalloc((void **) &d_aux2, n * sizeof(char)));
//Inicializa d_aux2 com 0
CHECK_ERROR(cudaMemset(d_aux2, 0, n * sizeof(char)));
//Inicializa d_aux com 0
CHECK_ERROR(cudaMemset(d_aux, 0, sizeof(unsigned int)));
//Chama kernel para atualizar d_C
dimGrid = ((n-1)/dimBloco)+1;
// printf("Vou chamar DefineNovosVU para dimGrid = %d e dimBloco = %d com n = %d\n", dimGrid, dimBloco, n);
DefineNovosVU<<<dimGrid, dimBloco>>>(d_C, n, d_aux2, d_aux);
CHECK_ERROR(cudaDeviceSynchronize());
//Chama kernel para atualizar d_C
dimGrid = ((n-1)/dimBloco)+1;
AtualizaC_3<<<dimGrid, dimBloco>>>(d_C, n, d_aux2);
//Liberando variáveis
CHECK_ERROR(cudaFree(d_aux));
CHECK_ERROR(cudaFree(d_arestasE));
CHECK_ERROR(cudaFree(d_fim));
CHECK_ERROR(cudaFree(d_aux2));
tempo2p = (double) clock( ) / CLOCKS_PER_SEC;
// printf("Tempo Passo 4.3.1: %lf\n", tempo2p - tempo1p);
// ==============================================================================
// Passo 4.3.2: Marcar arestas
// ==============================================================================
tempo1p = (double) clock( ) / CLOCKS_PER_SEC;
//Marca as arestas para remoção
dimGrid = ((G.m-1)/dimBloco)+1;
MarcarArestas<<<dimGrid, dimBloco>>>(d_arestas, d_C, G.m, G.n);
CHECK_ERROR(cudaDeviceSynchronize());
CHECK_ERROR(cudaFree(d_C));
// aresta *h_arestas;
// h_arestas = (aresta *) malloc(G.m * sizeof(aresta));
// CHECK_ERROR(cudaMemcpy(h_arestas, d_arestas, G.m * sizeof(aresta), cudaMemcpyDeviceToHost));
// for(i = 0; i < G.m; i++)
// if(h_arestas[i].v != G.n)
// printf("Aresta[%d] v = %d u = %d\n", i, h_arestas[i].v, h_arestas[i].u);
// free(h_arestas);
tempo2p = (double) clock( ) / CLOCKS_PER_SEC;
// printf("Tempo Passo 4.3.2: %lf\n", tempo2p - tempo1p);
}
it++;
n = num_zerodiff;
} // fim while(num_zerodiff != 1)
// Copia a Solução para o host
// CHECK_ERROR(cudaMemcpy(&SolutionVal, d_SolutionVal, sizeof(double), cudaMemcpyDeviceToHost));
CHECK_ERROR(cudaMemcpy(&SolutionSize, d_SolutionSize, sizeof(unsigned int), cudaMemcpyDeviceToHost));
CHECK_ERROR(cudaMemcpy(SolutionEdgeSet, d_SolutionEdgeSet, (G.n-1) * sizeof(int), cudaMemcpyDeviceToHost));
SolutionVal = 0;
for(i = 0; i < SolutionSize; i++)
SolutionVal += G.custos[SolutionEdgeSet[i]];
tempo2 = (double) clock( ) / CLOCKS_PER_SEC;
tempoTotal = tempo2 - tempo1;
printf("%lf\n", tempoTotal);
Arq = fopen(argv[2], "a");
fprintf(Arq, "\n*** Input file: %s\n", argv[1]);
fprintf(Arq, "Total Time: %lf\n", tempoTotal);
fprintf(Arq, "Number of iterations: %d\n", it);
fprintf(Arq, "SolutionSize: %d\n", SolutionSize);
fprintf(Arq, "SolutionValue: %f\n", SolutionVal);
if((argc == 4) && (argv[3][0] == 'S' || argv[3][0] == 's'))
{
fprintf(Arq, "*** MST formed by %d edges\n", SolutionSize);
for(i = 0; i < SolutionSize; i++)
fprintf(Arq, "Edge %d - %d\n", G.arestas[SolutionEdgeSet[i]].v, G.arestas[SolutionEdgeSet[i]].u);
}
fclose(Arq);
// Liberando variávais alocadas no device
CHECK_ERROR(cudaFree(d_SolutionEdgeSet));
CHECK_ERROR(cudaFree(d_SolutionSize));
CHECK_ERROR(cudaFree(d_SolutionVal));
CHECK_ERROR(cudaFree(d_num_zerodiff));
CHECK_ERROR(cudaFree(d_arestas));
CHECK_ERROR(cudaFree(d_custos));
CHECK_ERROR(cudaFree(d_menorAresta));
// Liberando variávais alocadas no host
free(G.arestas);
free(G.custos);
free(SolutionEdgeSet);
return 0;
}
// ==============================================================================
// Função LeGrafo: Lê as informações do Grafo de um arquivo e armazena em uma
// estrutura
// ==============================================================================
grafo LeGrafo(char *Arquivo){
int i, aux;
grafo G;
FILE *Arq;
Arq = fopen(Arquivo, "r");
i = 0;
fscanf(Arq,"%d",&i);
G.n = i;
fscanf(Arq,"%d",&i);
G.m = i;
G.arestas = (aresta *) malloc(G.m*sizeof(aresta));
G.custos = (float *) malloc(G.m*sizeof(float));
for(i = 0; i < G.m; i++){
fscanf(Arq,"%d",&G.arestas[i].u);
fscanf(Arq,"%d",&G.arestas[i].v);
if(G.arestas[i].v > G.arestas[i].u)
{
aux = G.arestas[i].v;
G.arestas[i].v = G.arestas[i].u;
G.arestas[i].u = aux;
}
fscanf(Arq,"%f",&G.custos[i]);
G.arestas[i].grau_st = 0;
}
fclose(Arq);
return G;
}
__device__ double atomicAddD(double* address, float val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
// ==============================================================================
// Função EncontraMenorAresta1: Para cada vértice v encontra o id da aresta menor
// ==============================================================================
__global__ void EncontraMenorAresta1(aresta* arestas, float *custos, int* menorAresta, int m, int n)
{
int id = threadIdx.x + blockDim.x * blockIdx.x;
int x, aux;
int old;
if( id < m )
{
if(arestas[id].v != n)
{
x = arestas[id].v;
// printf("1.1 Aresta %d v = %d u = %d custo = %f menorAresta[%d] = %d\n", id, arestas[id].v, arestas[id].u, custos[id], x, menorAresta[x]);
if((menorAresta[x] == -1) ||
(custos[menorAresta[x]] > custos[id]) ||
((custos[menorAresta[x]] == custos[id]) && (menorAresta[x] > id)))
{
aux = -1;
// atomicCAS = atomic Compare And Swap
// lê o conteúdo endereçado por menorAresta[x] e o armazena em old.
// computa (old == aux ? meu_id: old)
// ou seja se (old == aux) então menorAresta[x] = id
// senão menorAresta[x] = old
// retorna old
old = atomicCAS(&menorAresta[x], aux, id);
if(old != aux)
{
while(((custos[old] > custos[id]) || ((custos[old] == custos[id]) && (old > id))) && (old != aux))
{
aux = atomicCAS(&menorAresta[x], old, id);
if(old != aux)
{
old = aux;
aux = -1;
}
}// fim while((old > id) && (old != aux))
} // fim if(old_id != aux)
// printf("1.2 Aresta %d v = %d u = %d custo = %f menorAresta[%d] = %d\n", id, arestas[id].v, arestas[id].u, custos[id], x, menorAresta[x]);
} // fim if((menorAresta[x] == -1) || (menorAresta[x] > id))
} // fim if(arestas[id].v != n)
} //fim if( id < m )
}
// ==============================================================================
// Função EncontraMenorAresta2: Para cada vértice v encontra o id da aresta menor
// ==============================================================================
__global__ void EncontraMenorAresta2(aresta* arestas, float *custos, int* menorAresta, int m, int n)
{
int id = threadIdx.x + blockDim.x * blockIdx.x;
int x, aux;
int old;
if( id < m )
{
if(arestas[id].v != n)
{
x = arestas[id].u;
// printf("2.1 Aresta %d v = %d u = %d custo = %f menorAresta[%d] = %d\n", id, arestas[id].v, arestas[id].u, custos[id], x, menorAresta[x]);
if((menorAresta[x] == -1) ||
(custos[menorAresta[x]] > custos[id]) ||
((custos[menorAresta[x]] == custos[id]) && (menorAresta[x] > id)))
{
aux = -1;
// atomicCAS = atomic Compare And Swap
// lê o conteúdo endereçado por menorAresta[x] e o armazena em old.
// computa (old == aux ? meu_id: old)
// ou seja se (old == aux) então menorAresta[x] = id
// senão menorAresta[x] = old
// retorna old
old = atomicCAS(&menorAresta[x], aux, id);
if(old != aux)
{
while(((custos[old] > custos[id]) || ((custos[old] == custos[id]) && (old > id))) && (old != aux))
{
aux = atomicCAS(&menorAresta[x], old, id);
if(old != aux)
{
old = aux;
aux = -1;
}
}// fim while((old > id) && (old != aux))
} // fim if(old_id != aux)
// printf("2.2 Aresta %d v = %d u = %d custo = %f menorAresta[%d] = %d\n", id, arestas[id].v, arestas[id].u, custos[id], x, menorAresta[x]);
} // if((menorAresta[x] == -1) || (menorAresta[x] > id))
} // fim if(arestas[id].v != n)
} //fim if( id < m )
}
// ==============================================================================
// Função MarcarArestas_Strut: Para cada vértice v marca aresta pertencente a strut
// ==============================================================================
__global__ void MarcarArestas_Strut(aresta* d_arestas, int* d_menorAresta, int n)
{
int id = threadIdx.x + blockDim.x * blockIdx.x;
if( id < n )
{
atomicInc(&d_arestas[d_menorAresta[id]].grau_st, UINT_MAX);
// printf("MenorAresta[%d] = %d\n", id, d_menorAresta[id]);
}
}
// ==============================================================================
// Função Calcula_num_zerodiff: Calcula o número de vértices zero diferença e
// preenche o vetor d_SolutionEdgeSet
// ==============================================================================
__global__ void Calcula_num_zerodiff(aresta* arestas, float *custos, int m, int* SolutionEdgeSet, unsigned int* SolutionSize, unsigned int* num_zerodiff, double* SolutionVal)
{
int id = threadIdx.x + blockDim.x * blockIdx.x;
int pos;
if( id < m )
{
if(arestas[id].grau_st > 0)
{
pos = atomicInc(&SolutionSize[0], UINT_MAX);
SolutionEdgeSet[pos] = id;
// atomicAddD(&SolutionVal[0], custos[id]);
if(arestas[id].grau_st == 2)
{
pos = atomicInc(&num_zerodiff[0], UINT_MAX);
// printf("num_zerodiff = %d\n", num_zerodiff[0]);
}
}
}
}
// ==============================================================================
// Função Inicializa_arestasE_CD: Inicializa as variáveis CD, arestasE e arestasE_size
// ==============================================================================
__global__ void Inicializa_arestasE_C(aresta *arestas, int *menorAresta, int n, int *C, aresta_E *arestasE, unsigned int *arestasE_size)
{
int id = threadIdx.x + blockDim.x * blockIdx.x;
int pos;
if( id < n )
{
if((arestas[menorAresta[id]].grau_st == 1) || (((arestas[menorAresta[id]].grau_st == 2)) && (id == arestas[menorAresta[id]].v)))
{
pos = atomicInc(&arestasE_size[0], UINT_MAX);
arestasE[pos].v1 = arestas[menorAresta[id]].v;
arestasE[pos].v2 = arestas[menorAresta[id]].u;
// printf("Thread %d arestasE[%d] v1 = %d v2 = %d\n", id, pos, arestasE[pos].v1, arestasE[pos].v2);
}
C[id] = id;
}
}
// ==============================================================================
// Função AtualizaC_1: Atualiza vetor C para definição das componentes conexas
// ==============================================================================
__global__ void AtualizaC_1(aresta_E *arestasE, int* C, int *m, int n)
{
int id = threadIdx.x + blockDim.x * blockIdx.x;
int c1, c2, v1, v2;
if(id < n)
{
v1 = arestasE[id].v1;
v2 = arestasE[id].v2;
c1 = C[v1];
c2 = C[v2];
if(c1 < c2)
{
atomicMin(&C[v2], c1);
m[0] = 1;
}
else if(c2 < c1)
{
atomicMin(&C[v1], c2);
m[0] = 1;
}
}
}
// ==============================================================================
// Função DefineNovosVU: Atualiza vetor C
// ==============================================================================
__global__ void DefineNovosVU(int* C, int n, char *marcador, unsigned int *num_comp)
{
int id = threadIdx.x + blockDim.x * blockIdx.x;
int pos;
if(id < n)
{
if(C[id] == id)
{
pos = atomicInc(&num_comp[0], UINT_MAX);
C[id] = pos;
marcador[id] = 1;
//printf("C2[%d] = %d\n", id, C[id]);
}
}
}
// ==============================================================================
// Função AtualizaC_3: Atualiza vetor C
// ==============================================================================
__global__ void AtualizaC_3(int* C, int n, char *marcador)
{
int id = threadIdx.x + blockDim.x * blockIdx.x;
if(id < n)
{
if(marcador[id] == 0)
{
C[id] = C[C[id]];
//printf("C3[%d] = %d\n", id, C[id]);
}
}
}
// ==============================================================================
// Função MarcarArestas: Marca as arestas do grafo a serem removidas
// ==============================================================================
__global__ void MarcarArestas(aresta *arestas, int* C, int m, int n)
{
int id = threadIdx.x + blockDim.x * blockIdx.x;
int x, y;
if( id < m )
{
if(arestas[id].v != n)
{
x = C[arestas[id].v];
y = C[arestas[id].u];
if( x != y )
{
arestas[id].v = x;
arestas[id].u = y;
}
else
{
arestas[id].v = n;
arestas[id].u = n;
}
arestas[id].grau_st = 0;
}
} //fim if( id < m )
}
|
22,987 | #include "includes.h"
__global__ void blendingGray(uchar3 *input, uchar3 *input2, uchar3 *output,int width, int height,float coefficient) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int tid = y*width + x;
int nbPixels = width * height;
float prod = coefficient * (float) nbPixels;
int prodfin = (int) prod;
if (x<width){
if (y<height){
if (tid <= prodfin){
output[tid].x = input[tid].x;
output[tid].z = output[tid].y = output[tid].x;
}
else{
output[tid].x = input2[tid].x;
output[tid].z = output[tid].y = output[tid].x;
}
}
}
} |
22,988 | #include<stdio.h>
#include<stdlib.h>
int N = 1<<10;
__global__ void add(int *a,int *b,int n){
int index = threadIdx.x;
if( index < n)
b[index] = a[index]+b[index];
}
int main(void){
int *A,*B;
int *a,*b;
A = (int*)malloc(N*sizeof(int));
B = (int*)malloc(N*sizeof(int));
cudaMalloc(&a, N*sizeof(int));
cudaMalloc(&b, N*sizeof(int));
for(int i=0;i<N;i++){
A[i]=-i;
B[i]=i*i;
}
cudaMemcpy(a, A, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(b, B, N*sizeof(int), cudaMemcpyHostToDevice);
add<<<1,512>>>(a,b,N);
cudaMemcpy(B, b, N*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(a);
cudaFree(b);
free(A);
free(B);
return 0;
}
|
22,989 | #include <cuda_runtime_api.h>
#include <stdio.h>
#include "cputime.h"
// float *accnew_gpu;
// float *velnew_gpu;
float *parforce_gpu;
float *parpot_gpu;
float *parvel_gpu;
float *acc_gpu;
float *force_gpu;
float *pos_gpu;
float *vel_gpu;
/*
extern "C"
double cputime()
{
struct timeval tp;
int rtn;
rtn=gettimeofday(&tp, NULL);
return ((double)tp.tv_sec+(1.e-6)*tp.tv_usec);
}
*/
extern "C"
void allocMemOnGPU(int nd, int np)
{
// cudaMalloc ((void**)(&accnew_gpu), nd*np*sizeof(float));
// cudaMalloc ((void**)(&velnew_gpu), nd*np*sizeof(float));
cudaMalloc ((void**)(&parforce_gpu), nd*np*sizeof(float));
cudaMalloc ((void**)(&parpot_gpu), np*sizeof(float));
cudaMalloc ((void**)(&parvel_gpu), nd*np*sizeof(float));
cudaMalloc ((void**)(&acc_gpu), nd*np*sizeof(float));
cudaMalloc ((void**)(&force_gpu), nd*np*sizeof(float));
cudaMalloc ((void**)(&pos_gpu), nd*np*sizeof(float));
cudaMalloc ((void**)(&vel_gpu), nd*np*sizeof(float));
}
extern "C"
void copyDataToGPU(float *h_acc, float *h_force, float *h_vel, float *h_pos, int nd, int np)
{
cudaMemcpy(acc_gpu, h_acc, nd*np*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(force_gpu, h_force, nd*np*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(vel_gpu, h_vel, nd*np*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(pos_gpu, h_pos, nd*np*sizeof(float), cudaMemcpyHostToDevice);
}
__global__ void dummyCopy(float *g_idata, float *g_odata)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x ;
g_odata[idx] = g_idata[idx];
__syncthreads();
}
// START K1 - Compute Force on Particle
__global__ void GPU_compute_forceonparticle_KERNEL(int np, int numberOfThreads, int currentMoleculeIndex, const float PI2,
float * pos_gpu, float * parforce_gpu, float * parpot_gpu)
{
float posx, posy, posz, dist, dist2;
__shared__ float currentposx,currentposy,currentposz;
int idx = blockIdx.x * blockDim.x + threadIdx.x ;
if (idx >= numberOfThreads) return ;
posx = pos_gpu[idx];
posy = pos_gpu[idx+np];
posz = pos_gpu[idx+np+np];
currentposx = pos_gpu[currentMoleculeIndex];
currentposy = pos_gpu[currentMoleculeIndex+np];
currentposz = pos_gpu[currentMoleculeIndex+np+np];
posx = currentposx - posx;
posy = currentposy - posy;
posz = currentposz - posz;
dist = posx*posx + posy*posy + posz*posz;
dist = sqrt(dist);
dist2 = (dist < PI2) ? dist : PI2;
if(idx==currentMoleculeIndex){
posx=0.0;
posy=0.0;
posz=0.0;
dist=0.0;
}else{
posx= - (posx * sin(2.0 * dist2) / dist);
posy= - (posy * sin(2.0 * dist2) / dist);
posz= - (posz * sin(2.0 * dist2) / dist);
dist= 0.5 * sin(dist2) * sin(dist2);
}
parforce_gpu[idx] = posx;
parforce_gpu[idx+np] = posy;
parforce_gpu[idx+np+np] = posz;
parpot_gpu[idx] = dist;
}
extern "C"
void GPU_compute_forceonparticle(int nd, int np, int currentMoleculeIndex, const float PI2, int step, double *time_elapsed)
{
int BLOCK_SIZE = 128;
int numberOfThreads = np;
int numBlocks = numberOfThreads / BLOCK_SIZE + (numberOfThreads % BLOCK_SIZE == 0 ? 0 : 1) ;
dim3 dimGrid(numBlocks) ;
dim3 dimBlock(BLOCK_SIZE) ;
/* if(step == 4)
{
for(i=0; i<iter; i++)
{
//printf("Molecule index is %d in iter %d\n",currentMoleculeIndex, i);
t0=cputime();
GPU_compute_forceonparticle_KERNEL<<<dimGrid,dimBlock>>>(np, numberOfThreads, currentMoleculeIndex, PI2, pos_gpu, parforce_gpu, parpot_gpu);
cudaThreadSynchronize();
t1=cputime();
time_sum += (t1-t0)*iCPS*1000*1000;
}
*time_elapsed = time_sum/iter;
}
else
*/ {
//t0=cputime();
//printf("Molecule index is %d in step %d\n", currentMoleculeIndex, step);
GPU_compute_forceonparticle_KERNEL<<<dimGrid,dimBlock>>>(np, numberOfThreads, currentMoleculeIndex, PI2, pos_gpu, parforce_gpu, parpot_gpu);
cudaThreadSynchronize();
//t1=cputime();
//*time_elapsed = (t1-t0);
}
}
//END K1 - Compute Force on Particle
//START - GENERAL REDUCTION KERNEL THAT USES SHARED MEMORY
template<class T>
struct SharedMemory
{
__device__ inline operator T*()
{
extern __shared__ int __smem[];
return (T*)__smem;
}
__device__ inline operator const T*() const
{
extern __shared__ int __smem[];
return (T*)__smem;
}
};
template <class T>
__global__ void reduce_wShrdMem(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
__syncthreads();
}
//END - GENERAL REDUCTION KERNEL THAT USES SHARED MEMORY
float GPU_accumulate_parpot_wShrdMem(int nd, int np, int step, double *time_elapsed)
{
int BLOCK_SIZE = 128;
int numberOfThreads = np;
int numBlocks = numberOfThreads / BLOCK_SIZE + (numberOfThreads % BLOCK_SIZE == 0 ? 0 : 1) ;
float *parpot, sum;
dim3 dimBlock = dim3(BLOCK_SIZE, 1, 1);
dim3 dimGrid;
int smemSize = (BLOCK_SIZE <= 32) ? 2 * BLOCK_SIZE * sizeof(float) : BLOCK_SIZE * sizeof(float);
/* if(step==4)
{
numBlocksT = numBlocks;
numberOfThreadsT = numberOfThreads;
cudaMalloc ((void**)(&parpotT_gpu), np*sizeof(float));
cudaMemcpy(parpotT_gpu, parpot_gpu, np *sizeof(float), cudaMemcpyDeviceToDevice);
for(i=0; i<iter; i++)
{
cudaMemcpy(parpot_gpu, parpotT_gpu, np *sizeof(float), cudaMemcpyDeviceToDevice);
numBlocks = numBlocksT;
numberOfThreads = numberOfThreadsT;
t0=cputime();
while(numberOfThreads>512)
{
dimGrid = dim3(numBlocks, 1, 1);
reduce_wShrdMem<float><<< dimGrid, dimBlock, smemSize >>>(parpot_gpu, parpot_gpu, numberOfThreads);
cudaThreadSynchronize();
numberOfThreads = numBlocks;
numBlocks = numberOfThreads / BLOCK_SIZE + (numberOfThreads % BLOCK_SIZE == 0 ? 0 : 1) ;
}
t1=cputime();
time_sum1 += (t1-t0)*iCPS*1000*1000;
}
cudaFree(parpotT_gpu);
parpot = (float*) malloc(numberOfThreads * sizeof (float));
cudaMemcpy(parpot, parpot_gpu, numberOfThreads*sizeof(float), cudaMemcpyDeviceToHost);
for(i=0; i<iter; i++)
{
sum = 0.0;
t0=cputime();
for(int j=0; j<numberOfThreads; j++)
sum += parpot[j];
t1=cputime();
time_sum2 += (t1-t0)*iCPS*1000*1000;
}
*time_elapsed = time_sum1/iter + time_sum2/iter;
}
else
*/ {
while(numBlocks > 1)
//while(numberOfThreads>512)
{
dimGrid = dim3(numBlocks, 1, 1);
reduce_wShrdMem<float><<< dimGrid, dimBlock, smemSize >>>(parpot_gpu, parpot_gpu, numberOfThreads);
cudaThreadSynchronize();
numberOfThreads = numBlocks;
numBlocks = numberOfThreads / BLOCK_SIZE + (numberOfThreads % BLOCK_SIZE == 0 ? 0 : 1) ;
}
parpot = (float*) malloc(numberOfThreads * sizeof (float));
cudaMemcpy(parpot, parpot_gpu, numberOfThreads*sizeof(float), cudaMemcpyDeviceToHost);
sum = 0.0;
for(int i=0; i<numberOfThreads; i++)
sum += parpot[i];
}
free(parpot);
return sum;
}
//END K2 - Accumulate PE with/without shared memory
//START K3 - Accumulate Force with/without shared memory
void GPU_accumulate_parforce_wShrdMem(int nd, int np, int currentMoleculeIndex, int step, double *time_elapsed)
{
int BLOCK_SIZE = 128;
int numberOfThreads = np;
int numBlocks = numberOfThreads / BLOCK_SIZE + (numberOfThreads % BLOCK_SIZE == 0 ? 0 : 1) ;
int smemSize = (BLOCK_SIZE <= 32) ? 2 * BLOCK_SIZE * sizeof(float) : BLOCK_SIZE * sizeof(float);
dim3 dimBlock = dim3(BLOCK_SIZE, 1, 1);
dim3 dimGrid;
/* if(step ==4)
{
numBlocksT = numBlocks;
numberOfThreadsT = numberOfThreads;
cudaMalloc ((void**)(&parforceT_gpu), nd*np*sizeof(float));
cudaMemcpy(parforceT_gpu, parforce_gpu, nd * np *sizeof(float), cudaMemcpyDeviceToDevice);
for(i=0; i<iter; i++)
{
numBlocks = numBlocksT;
numberOfThreads = numberOfThreadsT;
cudaMemcpy(parforce_gpu, parforceT_gpu, nd * np *sizeof(float), cudaMemcpyDeviceToDevice);
t0=cputime();
while(numberOfThreads>1)
{
dimGrid = dim3(numBlocks, 1);
reduce_wShrdMem<float><<< dimGrid, dimBlock, smemSize >>>(parforce_gpu, parforce_gpu, numberOfThreads);
cudaThreadSynchronize();
reduce_wShrdMem<float><<< dimGrid, dimBlock, smemSize >>>(parforce_gpu+(np), parforce_gpu+(np), numberOfThreads);
cudaThreadSynchronize();
reduce_wShrdMem<float><<< dimGrid, dimBlock, smemSize >>>(parforce_gpu+(np*2),
parforce_gpu+(np*2),numberOfThreads);
cudaThreadSynchronize();
numberOfThreads = numBlocks;
numBlocks = numberOfThreads / BLOCK_SIZE + (numberOfThreads % BLOCK_SIZE == 0 ? 0 : 1);
}
dimGrid = dim3(numBlocks, 1);
reduce_wShrdMem<float><<< dimGrid, dimBlock, smemSize >>>(parforce_gpu, force_gpu+currentMoleculeIndex, numberOfThreads);
cudaThreadSynchronize();
reduce_wShrdMem<float><<< dimGrid, dimBlock, smemSize >>>(parforce_gpu+(np), force_gpu+
(np+currentMoleculeIndex), numberOfThreads);
cudaThreadSynchronize();
reduce_wShrdMem<float><<< dimGrid, dimBlock, smemSize >>>(parforce_gpu+(np*2), force_gpu+
(np+np+currentMoleculeIndex), numberOfThreads);
cudaThreadSynchronize();
t1=cputime();
time_sum += (t1-t0)*iCPS*1000*1000;
}
*time_elapsed = time_sum/iter;
cudaFree(parforceT_gpu);
}
else
*/ {
while(numBlocks > 1)
//while(numberOfThreads>1)
{
dimGrid = dim3(numBlocks, 1);
reduce_wShrdMem<float><<< dimGrid, dimBlock, smemSize >>>(parforce_gpu, parforce_gpu, numberOfThreads);
cudaThreadSynchronize();
reduce_wShrdMem<float><<< dimGrid, dimBlock, smemSize >>>(parforce_gpu+(np), parforce_gpu+(np), numberOfThreads);
cudaThreadSynchronize();
reduce_wShrdMem<float><<< dimGrid, dimBlock, smemSize >>>(parforce_gpu+(np*2),
parforce_gpu+(np*2),numberOfThreads);
cudaThreadSynchronize();
numberOfThreads = numBlocks;
numBlocks = numberOfThreads / BLOCK_SIZE + (numberOfThreads % BLOCK_SIZE == 0 ? 0 : 1);
}
dimGrid = dim3(numBlocks, 1);
reduce_wShrdMem<float><<< dimGrid, dimBlock, smemSize >>>(parforce_gpu, force_gpu+currentMoleculeIndex, numberOfThreads);
// float x;
// cudaMemcpy(&x,force_gpu + currentMoleculeIndex,sizeof(float),cudaMemcpyDeviceToHost);
// printf("%f \n",x);
cudaThreadSynchronize();
reduce_wShrdMem<float><<< dimGrid, dimBlock, smemSize >>>(parforce_gpu+(np), force_gpu+
(np+currentMoleculeIndex), numberOfThreads);
cudaThreadSynchronize();
reduce_wShrdMem<float><<< dimGrid, dimBlock, smemSize >>>(parforce_gpu+(np*2), force_gpu+
(np+np+currentMoleculeIndex), numberOfThreads);
cudaThreadSynchronize();
}
}
//END K3 - Accumulate Force with/without shared memory
//Accumulates PE and Force using K2 and K3
extern "C"
float GPU_seq_wShrdMem_accumulate_parpot_and_parforce(int nd, int np, int currentMoleculeIndex, int step, double *time_elap1, double *time_elap2)
{
GPU_accumulate_parforce_wShrdMem(nd,np,currentMoleculeIndex, step, time_elap1);
return GPU_accumulate_parpot_wShrdMem(nd, np, step, time_elap2);
}
//START K4 - Compute and accumulate KE without shared memory
//Compute KE with shared memory
template <class T>
__global__ void GPU_compute_KE_wShrdMem(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
__syncthreads();
sdata[tid] *= sdata[tid];
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
__syncthreads();
}
extern "C"
float GPU_accumulate_KE_wShrdMem(int nd, int np, float mass, int step, double *time_elapsed)
{
int BLOCK_SIZE = 128;
int numberOfThreads = nd*np;
int numBlocks = numberOfThreads / BLOCK_SIZE + (numberOfThreads % BLOCK_SIZE == 0 ? 0 : 1) ;
float* parvel, sum;
dim3 dimBlock = dim3(BLOCK_SIZE, 1, 1);
dim3 dimGrid;
int smemSize = (BLOCK_SIZE <= 32) ? 2 * BLOCK_SIZE * sizeof(double) : BLOCK_SIZE * sizeof(double);
dimGrid = dim3(numBlocks, 1, 1);
float val[100];
cudaMemcpy(val,vel_gpu,100*sizeof(float),cudaMemcpyDeviceToHost);
/* if(step == 4)
{
t0 = cputime();
for(i=0; i<iter; i++)
{
GPU_compute_KE_wShrdMem<float><<< dimGrid, dimBlock, smemSize >>>(vel_gpu, parvel_gpu, numberOfThreads);
cudaThreadSynchronize();
}
t1=cputime();
time_sum1 = (t1-t0)*iCPS*1000*1000/iter;
numberOfThreads = numBlocks;
numberOfThreadsT = numberOfThreads;
numBlocks = numberOfThreads / BLOCK_SIZE + (numberOfThreads % BLOCK_SIZE == 0 ? 0 : 1) ;
numBlocksT = numBlocks;
cudaMalloc ((void**)(&parvelT_gpu), nd*np*sizeof(float));
cudaMemcpy(parvelT_gpu, parvel_gpu, nd * np *sizeof(float), cudaMemcpyDeviceToDevice);
for(i=0; i<iter; i++)
{
numBlocks = numBlocksT;
numberOfThreads = numberOfThreadsT;
cudaMemcpy(parvel_gpu, parvelT_gpu, nd * np *sizeof(float), cudaMemcpyDeviceToDevice);
t0=cputime();
while(numBlocks>1)
{
dimGrid = dim3(numBlocks, 1, 1);
reduce_wShrdMem<float><<< dimGrid, dimBlock, smemSize >>>(parvel_gpu, parvel_gpu, numberOfThreads);
cudaThreadSynchronize();
numberOfThreads = numBlocks;
numBlocks = numberOfThreads / BLOCK_SIZE + (numberOfThreads % BLOCK_SIZE == 0 ? 0 : 1) ;
}
t1=cputime();
time_sum2 += (t1-t0)*iCPS*1000*1000;
}
cudaFree(parvelT_gpu);
parvel = (float*) malloc(numberOfThreads * sizeof (float));
cudaMemcpy(parvel, parvel_gpu, numberOfThreads*sizeof(float), cudaMemcpyDeviceToHost);
for(i=0; i<iter; i++)
{
t0=cputime();
sum = 0.0;
for(int j=0; j<numberOfThreads; j++)
{
sum += parvel[j];
}
t1=cputime();
time_sum3 += (t1-t0)*iCPS*1000*1000;
}
*time_elapsed = time_sum1 + time_sum2/iter + time_sum3/iter;
}
else
*/ {
GPU_compute_KE_wShrdMem<float><<< dimGrid, dimBlock, smemSize >>>(vel_gpu, parvel_gpu, numberOfThreads);
cudaThreadSynchronize();
numberOfThreads = numBlocks;
numBlocks = numberOfThreads / BLOCK_SIZE + (numberOfThreads % BLOCK_SIZE == 0 ? 0 : 1) ;
while(numBlocks>1)
{
dimGrid = dim3(numBlocks, 1, 1);
reduce_wShrdMem<float><<< dimGrid, dimBlock, smemSize >>>(parvel_gpu, parvel_gpu, numberOfThreads);
cudaThreadSynchronize();
numberOfThreads = numBlocks;
numBlocks = numberOfThreads / BLOCK_SIZE + (numberOfThreads % BLOCK_SIZE == 0 ? 0 : 1) ;
}
parvel = (float*) malloc(numberOfThreads * sizeof (float));
cudaMemcpy(parvel, parvel_gpu, numberOfThreads*sizeof(float), cudaMemcpyDeviceToHost);
sum = 0.0;
for(int i=0; i<numberOfThreads; i++)
sum += parvel[i];
}
free(parvel);
return 0.5 * mass * sum;
}
//END K4 - Compute and accumulate KE with shared memory
//START K5 - Update position
__global__ void GPU_updatePos(int numberOfThreads, float dt, float * pos_gpu, const float * vel_gpu, const float * acc_gpu)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x ;
if (idx >= numberOfThreads)
return ;
pos_gpu[idx] += vel_gpu[idx] * dt + 0.5 * acc_gpu[idx] * dt * dt;
}
extern "C"
void GPU_updatePos(int nd, int np, float dt, int step, double *time_elapsedCPU, float *time_elapsedGPU)
{
int BLOCK_SIZE = 128;
int numberOfThreads = nd*np;
int numBlocks = numberOfThreads / BLOCK_SIZE + (numberOfThreads % BLOCK_SIZE == 0 ? 0 : 1) ;
dim3 dimGrid(numBlocks) ;
dim3 dimBlock(BLOCK_SIZE) ;
/* if(step==4)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMalloc ((void**)(&posT_gpu), nd*np*sizeof(float));
for(i=0; i<iter; i++)
{
dummyCopy<<<dimGrid,dimBlock>>>(pos_gpu, posT_gpu); //(input, output)
t0=cputime();
//cudaEventRecord(start,0);
GPU_updatePos<<<dimGrid,dimBlock>>>(numberOfThreads, dt, pos_gpu, vel_gpu, acc_gpu);
cudaThreadSynchronize();
//cudaEventRecord(stop,0);
//cudaEventSynchronize(stop);
t1=cputime();
//cudaEventElapsedTime(&time_diff, start, stop);
if(i<(iter-1))
dummyCopy<<<dimGrid,dimBlock>>>(posT_gpu, pos_gpu);
time_sumCPU += (t1-t0)*iCPS*1000*1000;
//time_diff /= 1000;
//time_sumGPU += time_diff;
}
*time_elapsedCPU=time_sumCPU/iter;
//*time_elapsedGPU=time_sumGPU/iter;
cudaFree(posT_gpu);
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
else
*/ {
GPU_updatePos<<<dimGrid,dimBlock>>>(numberOfThreads, dt, pos_gpu, vel_gpu, acc_gpu) ;
cudaThreadSynchronize();
}
}
//END K5 - Update position
//START K6 - Update velocity
__global__ void GPU_updateVel(int numberOfThreads, float dt, float rmass, float * vel_gpu, const float * force_gpu, const float * acc_gpu)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x ;
if (idx >= numberOfThreads)
return ;
vel_gpu[idx] += 0.5 * dt * (force_gpu[idx] * rmass + acc_gpu[idx]);
}
extern "C"
void GPU_updateVel(int nd, int np, float dt, float rmass, int step, double *time_elapsedCPU, float *time_elapsedGPU)
{
int BLOCK_SIZE = 128;
int numberOfThreads = nd*np;
int numBlocks = numberOfThreads / BLOCK_SIZE + (numberOfThreads % BLOCK_SIZE == 0 ? 0 : 1) ;
dim3 dimGrid(numBlocks) ;
dim3 dimBlock(BLOCK_SIZE) ;
/* if(step==4)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMalloc ((void**)(&velT_gpu), nd*np*sizeof(float));
for(i=0; i<iter; i++)
{
dummyCopy<<<dimGrid,dimBlock>>>(vel_gpu, velT_gpu); //(input, output)
t0=cputime();
//cudaEventRecord(start,0);
GPU_updateVel<<<dimGrid,dimBlock>>>(numberOfThreads, dt, rmass, vel_gpu, force_gpu, acc_gpu) ;
cudaThreadSynchronize();
//cudaEventRecord(stop,0);
//cudaEventSynchronize(stop);
t1=cputime();
//cudaEventElapsedTime(&time_diff, start, stop);
if(i<(iter-1))
dummyCopy<<<dimGrid,dimBlock>>>(velT_gpu, vel_gpu);
time_sumCPU += (t1-t0)*iCPS*1000*1000;
//time_diff /= 1000;
//time_sumGPU += time_diff;
}
*time_elapsedCPU=time_sumCPU/iter;
//*time_elapsedGPU=time_sumGPU/iter;
cudaFree(velT_gpu);
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
else
*/ {
GPU_updateVel<<<dimGrid,dimBlock>>>(numberOfThreads, dt, rmass, vel_gpu, force_gpu, acc_gpu) ;
cudaThreadSynchronize();
}
}
//END K6 - Update velocity
//START K7 - Update acceleration
__global__ void GPU_updateAcc(int numberOfThreads, float rmass, float * acc_gpu, const float * force_gpu)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x ;
if (idx >= numberOfThreads)
return ;
acc_gpu[idx] = force_gpu[idx] * rmass;
}
extern "C"
void GPU_updateAcc(int nd, int np, float rmass, int step, double *time_elapsedCPU, float *time_elapsedGPU)
{
int BLOCK_SIZE = 128;
int numberOfThreads = nd*np;
int numBlocks = numberOfThreads / BLOCK_SIZE + (numberOfThreads % BLOCK_SIZE == 0 ? 0 : 1) ;
dim3 dimGrid(numBlocks) ;
dim3 dimBlock(BLOCK_SIZE) ;
/* if(step==4)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
for(i=0; i<iter; i++)
{
t0=cputime();
//cudaEventRecord(start,0);
GPU_updateAcc<<<dimGrid,dimBlock>>>(numberOfThreads, rmass, acc_gpu, force_gpu) ;
cudaThreadSynchronize();
//cudaEventRecord(stop,0);
//cudaEventSynchronize(stop);
t1=cputime();
//cudaEventElapsedTime(&time_diff, start, stop);
time_sumCPU += (t1-t0)*iCPS*1000*1000;
//time_diff /= 1000;
//time_sumGPU += time_diff;
}
*time_elapsedCPU=time_sumCPU/iter;
//*time_elapsedGPU = time_sumGPU/iter;
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
else
*/ {
GPU_updateAcc<<<dimGrid,dimBlock>>>(numberOfThreads, rmass, acc_gpu, force_gpu) ;
cudaThreadSynchronize();
}
}
//END K7 - Update acceleration
|
22,990 | #include <iostream>
namespace ckt {
void check_cuda_error_always(const char *kernelname, const char *file, int line_no, cudaStream_t stream);
void check_cuda_error(const char *kernelname, const char *file, int line_no, cudaStream_t stream )
{
#ifdef DEBUG
check_cuda_error_always(kernelname, file, line_no, stream);
#endif
}
void check_cuda_error_always(const char *kernelname, const char *file, int line_no, cudaStream_t stream)
{
cudaError_t err;
if (stream)
err = cudaStreamSynchronize(stream);
else
err = cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
std::cerr << "CUDA error at kernel " << kernelname << " @ file " << file << " line " << line_no << " for reason: " <<
cudaGetErrorString(err) << std::endl;
}
}
}
|
22,991 | #include <stdio.h>
#include <cuda_runtime.h>
#define CHECK(call){ \
const cudaError_t error = call; \
if( error != cudaSuccess ){ \
printf("Error: %s:%d\n", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
__global__ void helloFromGPU(void){
printf("Hello from GPU! %d\n", threadIdx.x);
}
int main(void){
printf("Hello from CPU!\n");
helloFromGPU <<< 1,10 >>>();
CHECK(cudaDeviceReset());
}
|
22,992 | #include "includes.h"
__global__ void update_population_metadata( unsigned int * pop , unsigned int rows , unsigned int cols , unsigned int * free , unsigned int * lost , unsigned int * fixed ) {
unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x;
__shared__ unsigned int sPop[ MAX_THREADS ];
__shared__ unsigned int sMeta[ MAX_THREADS ];
bool is_fixed_thread = ((threadIdx.y & 1) == 0);
unsigned int eoffset = tid + ((is_fixed_thread) ? blockDim.x : -blockDim.x);
sMeta[ tid ] = ((is_fixed_thread) ? -1 : 0);
__syncthreads();
unsigned int b_offset = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int offset = threadIdx.y * cols + b_offset;
unsigned int mat_size = rows * cols;
unsigned int i = 0;
// scan population
while( i < rows ) {
sPop[ tid ] = ((offset < mat_size) ? pop[offset] : 0 );
__syncthreads();
unsigned int d = sPop[tid], e = sPop[eoffset];
__syncthreads();
unsigned int res = sMeta[ tid ];
if( offset < mat_size ) {
// should only fail when rows is NOT a multiple of THREAD_ROWS
if( is_fixed_thread ) {
res &= (d & e);
} else {
res |= (d | e);
}
}
__syncthreads();
sMeta[ tid ] = res;
__syncthreads();
i += blockDim.y;
offset += blockDim.y * cols;
}
// reduce the fixed and lost lists
i = 4;
while( i <= 32 ) {
unsigned int masked = (threadIdx.y & (i - 1));
unsigned int t = ((tid + (i / 2) * blockDim.x) & (MAX_THREADS - 1));
// how will branches execute?
// assuming that threads are grouped into warps according to their threadIdx.x coordinate
// all threads in a warp should execute same logic
//
unsigned int res = sMeta[tid], v = sMeta[t];
__syncthreads();
if( masked == 0 ) {
res &= v;
} else if( masked == 1 ) {
res |= v;
}
__syncthreads();
sMeta[ tid ] = res;
__syncthreads();
i <<= 1;
}
// use a single warp to write shared data back to global memory
if( threadIdx.y == 0 ) {
unsigned int fxd = sMeta[ threadIdx.x ];
unsigned int lst = (~sMeta[ blockDim.x + threadIdx.x ]);
free[ b_offset ] = (fxd | lst);
fixed[ b_offset ] = fxd;
lost[ b_offset ] = lst;
}
__syncthreads();
} |
22,993 | #include "includes.h"
__global__ void rgbUtoLab3F_kernel(int width, int height, float gamma, unsigned int* rgbU, float* devL, float* devA, float* devB) {
int x0 = blockDim.x * blockIdx.x + threadIdx.x;
int y0 = blockDim.y * blockIdx.y + threadIdx.y;
if ((x0 < width) && (y0 < height)) {
int index = y0 * width + x0;
unsigned int rgb = rgbU[index];
float r = (float)(rgb & 0xff)/255.0;
float g = (float)((rgb & 0xff00) >> 8)/255.0;
float b = (float)((rgb & 0xff0000) >> 16)/255.0;
r = powf(r, gamma);
g = powf(g, gamma);
b = powf(b, gamma);
float x = (0.412453 * r) + (0.357580 * g) + (0.180423 * b);
float y = (0.212671 * r) + (0.715160 * g) + (0.072169 * b);
float z = (0.019334 * r) + (0.119193 * g) + (0.950227 * b);
/*D65 white point reference */
const float x_ref = 0.950456;
const float y_ref = 1.000000;
const float z_ref = 1.088754;
/* threshold value */
const float threshold = 0.008856;
x = x / x_ref;
y = y / y_ref;
z = z / z_ref;
float fx =
(x > threshold) ? powf(x,(1.0/3.0)) : (7.787*x + (16.0/116.0));
float fy =
(y > threshold) ? powf(y,(1.0/3.0)) : (7.787*y + (16.0/116.0));
float fz =
(z > threshold) ? powf(z,(1.0/3.0)) : (7.787*z + (16.0/116.0));
/* compute Lab color value */
devL[index] =
(y > threshold) ? (116*powf(y,(1.0/3.0)) - 16) : (903.3*y);
devA[index] = 500.0f * (fx - fy);
devB[index] = 200.0f * (fy - fz);
}
} |
22,994 | #include <stdio.h>
__global__ void vecAdd1(int *A, int *B,int *C){
int id = blockIdx.x;
C[id] = A[id] + B[id];
}
__global__ void vecAdd2(int *A, int *B, int *C){
int id = threadIdx.x;
C[id] = A[id] + B[id];
}
__global__ void vecAdd3(int *A, int *B, int *C){
int id = blockIdx.x*blockDim.x + threadIdx.x;
C[id] = A[id] + B[id];
}
int main(){
int A[100],B[100],C[100],n,i,size;
printf("Enter value for n: ");
scanf("%d",&n);
printf("Enter the values for vector A:\n");
for(i=0;i<n;i++)
scanf("%d",&A[i]);
printf("Enter the values for vector B:\n");
for(i=0;i<n;i++)
scanf("%d",&B[i]);
int *da, *db, *dc;
size = sizeof(int)*n;
cudaMalloc((void **)&da,size);
cudaMalloc((void**)&db,size);
cudaMalloc((void **)&dc,size);
cudaMemcpy(da,A,size,cudaMemcpyHostToDevice);
cudaMemcpy(db,B,size,cudaMemcpyHostToDevice);
printf("Result through %d blocks:\n",n);
vecAdd1<<<n,1>>>(da,db,dc);
cudaMemcpy(C,dc,size,cudaMemcpyDeviceToHost);
for(i=0;i<n;i++)
printf("%d ",C[i]);
printf("\nResult through %d threads:\n",n);
vecAdd2<<<1,n>>>(da,db,dc);
cudaMemcpy(C,dc,size,cudaMemcpyDeviceToHost);
for(i=0;i<n;i++)
printf("%d ",C[i]);
printf("\nResult through varying block size:\n");
vecAdd3<<<ceil(n/3),3>>>(da,db,dc);
cudaMemcpy(C,dc,size,cudaMemcpyDeviceToHost);
for(i=0;i<n;i++)
printf("%d ",C[i]);
cudaFree(da);
cudaFree(db);
cudaFree(dc);
return 0;
}
|
22,995 | #include <cuda.h>
#include <cuda_fp16.h>
#include <stdio.h>
#include <stdint.h>
#include <zlib.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <fcntl.h>
#include <limits.h>
typedef struct __ReadSeqList {
char* sequence;
unsigned int length;
struct __ReadSeqList* next;
} ReadSeqList;
typedef struct HashTable {
unsigned int bits;
unsigned int count;
unsigned int read_count;
unsigned long long int *keys;
unsigned int *values;
} HashTable;
__device__ const unsigned char seq_nt4_table[256] = { // translate ACGT to 0123
0, 1, 2, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
};
// funcion para calcular un hash de 64 bits
__device__ unsigned int hash_uint64(unsigned long long int key) {
key = ~key + (key << 21);
key = key ^ key >> 24;
key = (key + (key << 3)) + (key << 8);
key = key ^ key >> 14;
key = (key + (key << 2)) + (key << 4);
key = key ^ key >> 28;
key = key + (key << 31);
return (unsigned int)key;
}
HashTable* HashTable_init(unsigned int bits, unsigned int read_count){
HashTable *ht;
ht = (HashTable*)calloc(1, sizeof(HashTable));
ht->read_count = read_count;
ht->bits = bits;
ht->count = 0;
return ht;
}
void HashTable_destory(HashTable *ht) {
if (!ht) return;
free(ht);
}
__device__ unsigned int h2b(unsigned int hash, unsigned int bits) {
return hash * 2654435769U >> (32 - bits);
}
__device__ void hash_insert(HashTable *ht, unsigned long long int kmer) {
unsigned int iKey, last;
bool end = false;
iKey = last = h2b(hash_uint64(kmer), ht->bits);
while (true)
{
unsigned long long int prev = atomicCAS(&(ht->keys[iKey]), 0ULL, kmer);
if (prev == 0ULL || prev == kmer) {
atomicAdd(&(ht->values[iKey]), 1U);
return;
}
if(end) return;
// Collition: Open addressing
iKey = (iKey + 1U) & ((1U << ht->bits) - 1);
// loop back
end = (iKey == last);
}
}
// insert k-mers in $seq to hash table $ht
__global__ void kernel_count_seq_kmers(HashTable *ht, int k, char **d_reads)
{
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < ht->read_count) {
int i, l;
char *seq = d_reads[tid];
int len = 100;
unsigned long long int x[2], mask = (1ULL<<k*2) - 1, shift = (k - 1) * 2;
for (i = l = 0, x[0] = x[1] = 0; i < len; ++i) {
int c = seq_nt4_table[(unsigned char)seq[i]];
if (c < 4) { // not an "N" base
x[0] = (x[0] << 2 | c) & mask; // forward strand
x[1] = x[1] >> 2 | (unsigned long long int)(3 - c) << shift; // reverse strand
if (++l >= k) { // we find a k-mer
unsigned long long int kmer = x[0] < x[1]? x[0] : x[1];
hash_insert(ht, kmer); // only add one strand!
}
} else l = 0, x[0] = x[1] = 0; // if there is an "N", restart
}
}
}
__global__ void kernel_print_hist(const HashTable *ht, unsigned int *cnt_d)
{
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int pos;
if(tid < (1U << ht->bits)) {
if (ht->values[tid] > 0) {
pos = ht->values[tid] < 256U ? ht->values[tid] : 255;
atomicAdd(&(cnt_d[pos]), 1U);
}
}
}
static int count_file(const char *fn, int k, unsigned int p)
{
HashTable *ht;
unsigned int capacity = 1U << p;
unsigned int cnt[256];
unsigned int read_count = 0;
// variables para cuda
HashTable *ht_d;
char **reads_d;
unsigned int *cnt_d;
FILE * fp;
char * line = NULL;
size_t len = 0;
ssize_t read;
fp = fopen(fn, "r");
if (fp == NULL) exit(EXIT_FAILURE);
ReadSeqList *current, *head;
head = current = NULL;
while ((read = getline(&line, &len, fp)) != -1) {
read_count++;
ReadSeqList *node = (ReadSeqList*)malloc(sizeof(ReadSeqList));
node->sequence = (char*)malloc(strlen(line));
strcpy(node->sequence, line);
node->length = read;
node->next =NULL;
if(head == NULL){
current = head = node;
} else {
current = current->next = node;
}
}
fclose(fp);
if (line) free(line);
printf("%d\n", read_count);
unsigned int i;
char **reads = (char**)malloc(read_count * sizeof(char*));
for(i=0, current = head; current; current=current->next){
reads[i] = (char*)malloc(current->length);
sprintf(reads[i], "%s", current->sequence);
i++;
}
// inicializar hashtable
ht = HashTable_init(p, read_count);
unsigned long long int *keys_d;
unsigned int *values_d;
// allocate memory in device
cudaMalloc((void **)&ht_d, sizeof(HashTable));
cudaMalloc((void **)&keys_d, capacity * sizeof(unsigned long long int));
cudaMalloc((void **)&values_d, capacity * sizeof(unsigned int));
cudaMalloc((void **)&cnt_d, 256 * sizeof(unsigned int));
cudaMemset(keys_d, 0ULL, capacity * sizeof(unsigned long long int));
cudaMemset(values_d, 0, capacity * sizeof(unsigned int));
cudaMemset(cnt_d, 0, 256 * sizeof(unsigned int));
// copy data to device
ht->keys = keys_d;
ht->values = values_d;
cudaMemcpy(ht_d, ht, sizeof(HashTable), cudaMemcpyHostToDevice);
char **tmp = (char**)malloc (read_count * sizeof (char*));
for (int i = 0; i < read_count; i++) {
cudaMalloc ((void **)&tmp[i], head->length * sizeof (char));
}
cudaMalloc((void **)&reads_d, read_count * sizeof(char*));
cudaMemcpy(reads_d, tmp, read_count * sizeof (char*), cudaMemcpyHostToDevice);
for (int i = 0; i < read_count; i++) {
cudaMemcpy(tmp[i], reads[i], head->length * sizeof (char), cudaMemcpyHostToDevice);
}
free(tmp);
printf("total reads: %d\n", read_count);
// invocar kernels
unsigned int thr = 1024;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
kernel_count_seq_kmers<<<ceil(read_count/thr), thr>>>(ht_d, k, reads_d);
cudaDeviceSynchronize();
kernel_print_hist<<<ceil(capacity/thr), thr>>>(ht_d, cnt_d);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
float seconds = milliseconds / 1000.0f;
printf("GPU time: %fs\n", seconds);
cudaMemcpy(ht, ht_d, sizeof(HashTable), cudaMemcpyDeviceToHost);
cudaMemcpy(ht->keys, keys_d, capacity * sizeof(unsigned long long int), cudaMemcpyDeviceToHost);
cudaMemcpy(ht->values, values_d, capacity * sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaMemcpy(cnt, cnt_d, 256 * sizeof(unsigned int), cudaMemcpyDeviceToHost);
printf("COUNT: %d\n\n", ht->count);
for (i = 1; i < 256; ++i)
printf("%d\t%d\n", i, cnt[i]);
// limpieza
cudaFree(reads_d);
cudaFree(ht_d);
cudaFree(cnt_d);
cudaFree(keys_d);
cudaFree(values_d);
// limpieza
i = 0;
for(current = head; current; current=current->next){
free(current->sequence);
free(current);
free(reads[i]);
i++;
}
free(reads);
HashTable_destory(ht);
return 0;
}
int main(int argc, char *argv[])
{
int k = 31;
unsigned int p = 27;
k = (int)strtol(argv[1], NULL, 10);
p = (unsigned int)strtol(argv[2], NULL, 10);
count_file(argv[3], k, p);
return 0;
}
|
22,996 | #include "cuda_runtime.h"
#include <stdio.h>
int main(void) {
cudaDeviceProp prop;
int count;
cudaGetDeviceCount(&count);
for (int i=0; i<count; i++){
cudaGetDeviceProperties(&prop, i);
printf("---General information for device %d---\n", i);
printf("Name : %s\n", prop.name);
printf("Compute Capability : %d.%d\n", prop.major, prop.minor);
printf("\n---Memory information for device %d---\n", i);
printf("Total global Mem : %lu\n", prop.totalGlobalMem);
printf("Total constant Mem : %lu\n", prop.totalConstMem);
}
return 0;
} |
22,997 | #include "includes.h"
// Copyright 2019, Dimitra S. Kaitalidou, All rights reserved
#define N 256
#define THR_PER_BL 8
#define BL_PER_GR 32
__global__ void kernel1(int* D, int* Q, int k){
// Find index
int i = blockIdx.x * blockDim.x + threadIdx.x;
int block = (int)(i / (2 * k));
int j;
if(i % 2 == 0) j = 2 * block * k + (int)(i / 2) - k * ((int)(i / (2 * k)));
else j = (2 * block + 1) * k + (int)(i / 2) - k * ((int)(i / (2 * k)));
// Assign the values to the output array
Q[j] = D[i];
} |
22,998 | #include <iostream>
#include <cstdlib>
#include <random>
#include <ctime>
#include <fstream>
using namespace std;
#define MAX_T 10000
#define MIN_T 1000
#define MAX_K 400
#define MIN_K 50
#define N 10
#define NUM_SAMPLES 50
double T1[MAX_K][MAX_T];
int T2[MAX_K][MAX_T];
double pi[MAX_K];
int Y[MAX_T];
int X[MAX_T];
double A[MAX_K][MAX_K];
double B[MAX_K][N];
void printPath(int nT){
for (int i = 0; i < nT-1; ++i) {
cout << X[i] << " -> ";
}
cout << X[nT-1] <<"\n";
}
void generateTransitionProbabilities(){
double sum;
double rands[MAX_K];
for(int i =0; i<MAX_K;i++){
sum = 0.0;
for (int l = 0; l < MAX_K; ++l) {
rands[l] = (double)rand();
sum += rands[l];
}
for (int j = 0; j < MAX_K; j++) {
A[i][j] = rands[j] / sum;
}
}
}
void generateEmmisionProbabilities(){
double sum;
double rands[N];
for(int i =0; i<MAX_K;i++){
sum = 0.0;
for (int l = 0; l < N; ++l) {
rands[l] = (double)rand();
sum += rands[l];
}
for (int j = 0; j < N; j++) {
B[i][j] = rands[j] / sum;
}
}
}
void generatePriors(){
double rands[MAX_K];
double sum = 0.0;
for (int i = 0; i < MAX_K; ++i) {
rands[i] = (double)rand();
sum += rands[i];
}
for (int j = 0; j <MAX_K; ++j) {
pi[j] = rands[j]/sum;
}
}
void generateObservations(){
random_device rd;
mt19937 eng(rd());
uniform_int_distribution<> distr(0, N-1);
for(int i=0; i<MAX_T; ++i)
Y[i] = distr(eng);
}
void generateInputs(){
srand(time(NULL)); // New random seed
generateTransitionProbabilities();
generateEmmisionProbabilities();
generatePriors();
generateObservations();
}
void viterbiCPU(int nK, int nT){
double maxT1=-1;
double maxT2=-1;
double tempMaxT1;
double tempMaxT2;
int argmax=-1;
for (int i = 0; i <nK; ++i) { // For each state
T1[i][0] = pi[i]*B[i][Y[0]];
T2[i][0] = 0.0;
}
for (int j = 1; j <nT ; ++j) { // For each observation
for (int i = 0; i < nK; ++i) { // For each state
maxT1 = -1;
maxT2 = -1;
for (int k = 0; k < nK; ++k) { // For each state
tempMaxT2 = T1[k][j-1] * A[k][i];
tempMaxT1= tempMaxT2*B[i][Y[j]];
if( tempMaxT1 > maxT1) maxT1=tempMaxT1;
if( tempMaxT2 > maxT2){
maxT2 = tempMaxT2;
argmax = k;
}
}
T1[i][j] = maxT1;
T2[i][j] = argmax;
}
}
maxT1 = -1;
argmax = -1;
for (int k = 0; k < nK; ++k) {
if(T1[k][nT-1] > maxT1){
maxT1 = T1[k][nT-1];
argmax = k;
}
}
X[nT-1] = argmax;
for (int j = nT-1; j >0 ; j--) {
//
X[j-1] = T2[X[j]][j];
}
}
double cpuFLOPS(double time, float k, float t){
return (3*k +(3*k*k + 1)*(t-1))/(time + 1e-16);
}
__global__ void viterbi(int t, double *t1, int *t2, double * pi, int * y, int *x, double *a, double * b ){
int idx = threadIdx.x;
int states = blockDim.x;
double tempTA;
double maxTA=-1.0;
int argmax;
double tempTAB;
double maxTAB =-1.0;
double maxT1End = -1.0;
double tempMaxT1End;
int argmaxT1End;
// Index t2 t2[i*MAX_T+j];
// Index t1 t1[i*MAX_T+j];
for(int j = 0; j <t;j++){
if(j==0){
t1[idx*MAX_T] = pi[idx]*b[idx*MAX_K+y[0]];
}else{
maxTA = -1.0;
maxTAB = -1.0;
for(int k=0; k< states; k++){
tempTA = t1[k*MAX_T +j-1]* a[k*MAX_K+idx];
if(tempTA>maxTA){
maxTA = tempTA;
argmax = k;
}
tempTAB = tempTA* b[idx*MAX_K+y[j]];
if(tempTAB>maxTAB) maxTAB=tempTAB;
}
t1[idx*MAX_T+j] = maxTAB;
t2[idx*MAX_T+j] = argmax;
}
__syncthreads();
}
if(idx==0){
for(int k =0; k <states; k++){
tempMaxT1End = t1[k*MAX_T+t-1];
if(tempMaxT1End>maxT1End){
maxT1End = tempMaxT1End;
argmaxT1End = k;
}
}
x[t-1] = argmaxT1End;
for(int j = t-1; j>0; j--) x[j-1]= t2[x[j]*MAX_T+j];
// for (int j = 0; j < 10; j++) {
//
// printf("%d ->", x[j]);
// }
// printf("\n");
}
}
double GPUFlops(double time, int k, int t){
return (double)(2*k*k*(t-1) + 2*k) / (time);
}
int main() {
generateInputs();
double *d_T1;
int *d_T2;
double *d_pi;
int *d_Y;
int *d_X;
double *d_A;
double *d_B;
int T1_size = MAX_K*MAX_T * sizeof(double);
int T2_size = MAX_K*MAX_T * sizeof(int);
int pi_size = MAX_K*sizeof(double);
int Y_size = MAX_T * sizeof(int);
int X_size = MAX_T * sizeof(int);
int A_size = MAX_K * MAX_K * sizeof(double);
int B_size = MAX_K * N * sizeof(double);
cudaMalloc((void**)&d_T1, T1_size);
cudaMalloc((void**)&d_T2, T2_size);
cudaMalloc((void**)&d_pi, pi_size);
cudaMalloc((void**)&d_Y, Y_size);
cudaMalloc((void**)&d_X, X_size);
cudaMalloc((void**)&d_A, A_size);
cudaMalloc((void**)&d_B, B_size);
cudaMemcpy(d_X,X, X_size, cudaMemcpyHostToDevice);
int k = MAX_K;
int t = MAX_K;
cudaMemcpy(d_T1,T1, T1_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_T2,T2, T2_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_pi,pi, pi_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_Y,Y, Y_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_A,A, A_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B,B, B_size, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
//
//
// clock_t start = clock();
// viterbi<<<1,k>>>(t, d_T1, d_T2, d_pi, d_Y, d_X,d_A,d_B); //Kernel invocation
// cudaDeviceSynchronize();
// clock_t end = clock();
//
// viterbiCPU(k,t);
// printPath(10);
int Ks[NUM_SAMPLES];
int Ts[NUM_SAMPLES];
double Ktimes[NUM_SAMPLES], Ttimes[NUM_SAMPLES],Kflops[NUM_SAMPLES], Tflops[NUM_SAMPLES];
for (int i = 1; i < NUM_SAMPLES+1; ++i) {
Ks[i-1] =MIN_K + (i-1)*((MAX_K-MIN_K)/NUM_SAMPLES);
Ts[i-1] =MIN_T + (i-1)*((MAX_T-MIN_T)/NUM_SAMPLES);
}
for(int i =0; i < NUM_SAMPLES; i++){
k = Ks[i];
t = MAX_T;
clock_t start = clock();
viterbi<<<1,k>>>(t, d_T1, d_T2, d_pi, d_Y, d_X,d_A,d_B); //Kernel invocation
cudaDeviceSynchronize();
clock_t end = clock();
Ktimes[i] = (end - start)/ (double) CLOCKS_PER_SEC;
Kflops[i] = GPUFlops(Ktimes[i], k,t);
}
for(int i =0; i < NUM_SAMPLES; i++){
k = MAX_K;
t = Ts[i];
clock_t start = clock();
viterbi<<<1,k>>>(t, d_T1, d_T2, d_pi, d_Y, d_X,d_A,d_B); //Kernel invocation
cudaDeviceSynchronize();
clock_t end = clock();
Ttimes[i] = (end - start)/ (double) CLOCKS_PER_SEC;
Tflops[i] = GPUFlops(Ttimes[i], k,t);
}
ofstream outK("CUDAvariedK.txt");
ofstream outT("CUDAvariedT.txt");
outK << "K, T, Time, Flops\n";
outT << "K, T, Time, Flops\n";
for (int k = 0; k <NUM_SAMPLES ; ++k) {
outK << Ks[k] << ", " << MAX_T << ", " << Ktimes[k] << ", " << Kflops[k] <<endl;
outT << MAX_K << ", " << Ts[k] << ", " << Ttimes[k] << ", " << Tflops[k] <<endl;
}
return 0;
}
|
22,999 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
int * myloadFile(int n,char *s){
int i;
int *a=(int *)malloc(sizeof(int)*n);
for(i=0;i<n;i++)
{
a[i]=i;
}
return a;
}
void display(int *a,int n){
int i;
for(i=0;i<n;i++)
{
printf("%d %d \n",i,a[i]);
}
}
cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
int low=2031*i;
int high=2031*(i+1)-1;
for(int i=low;i<=high;i++){
c[i] = a[i] + b[i];
}
printf("%d %d ",low,high);
}
int main()
{
int arraySize =2079744;
const int *a=myloadFile(arraySize,"input1.txt");
const int *b=myloadFile(arraySize,"input2.txt");
int *c=myloadFile(arraySize,"input2.txt");
int i;
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
for(int i=0;i<arraySize;i++){
printf("%d %d %d\n",a[i],b[i],c[i]);
}
cudaStatus = cudaThreadExit();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaThreadExit failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, 1024>>>(dev_c, dev_a, dev_b);
// cudaThreadSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaThreadSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaThreadSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
23,000 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define DATASIZE 260
cudaError_t searchKeyword(int *result, char *data, char *keyword);
__global__ void searchKeywordKernel(int *result, char *data, char *keyword)
{
int i = threadIdx.x;
// Detect the first matching character
if (data[i] == keyword[0]) {
// Loop through next keyword character
for (int j=1; j<3; i++) {
if (data[i+j] != keyword[j])
break;
else
// Store the first matching character to the result list
result[i] = 1;
}
}
}
int main()
{
char data[DATASIZE];
char keyword[2] = { 'K', 'L'};
int result[DATASIZE] = { 0 };
// Set false value in result array
memset(result, 0, DATASIZE);
// Generate input data
int tmpindex = 65;
for (int i=0; i<DATASIZE; i++) {
data[i] = char(tmpindex);
(tmpindex == 90 ? tmpindex = 65 : tmpindex++);
}
// Print the input character
for (int i=0; i<DATASIZE; i++)
printf("i=%d,%c ", i, data[i]);
printf("\n");
// Search keyword in parallel.
cudaError_t cudaStatus = searchKeyword(result, data, keyword);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
// Print out the string match result position
int total_matches = 0;
for (int i=0; i<DATASIZE; i++) {
if (result[i] == 1) {
printf("Character found at position % i\n", i);
total_matches++;
}
}
printf("Total matches = %d\n", total_matches);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Parallel Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
system("pause");
return 0;
}
// Helper function for using CUDA to search a list of characters in parallel.
cudaError_t searchKeyword(int *result, char *data, char *keyword)
{
char *dev_data = 0;
char *dev_keyword = 0;
int *dev_result = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for result set.
cudaStatus = cudaMalloc((void**)&dev_result, DATASIZE * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Allocate GPU buffers for result set.
cudaStatus = cudaMalloc((void**)&dev_data, DATASIZE * sizeof(char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Allocate GPU buffers for keyword.
cudaStatus = cudaMalloc((void**)&dev_keyword, DATASIZE * sizeof(char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input data from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_data, data, DATASIZE * sizeof(char), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Copy keyword from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_keyword, keyword, DATASIZE * sizeof(char), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a search keyword kernel on the GPU with one thread for each element.
searchKeywordKernel<<<1, DATASIZE>>>(dev_result, dev_data, dev_keyword);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy result from GPU buffer to host memory.
cudaStatus = cudaMemcpy(result, dev_result, DATASIZE * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_result);
cudaFree(dev_data);
cudaFree(dev_keyword);
return cudaStatus;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.