serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
5,101 | #include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <stdbool.h>
#include <time.h>
#include <iostream>
#include <cstring>
using namespace std;
#define NO_OF_CHARS 256
// A utility function to get maximum of two integers
// The preprocessing function for Boyer Moore's
// bad character heuristic
void badCharHeuristic( char *str, int size,
int badchar[NO_OF_CHARS])
{
int i;
for (i = 0; i < NO_OF_CHARS; i++)
badchar[i] = -1;
for (i = 0; i < size; i++)
badchar[(int) str[i]] = i;
}
// preprocessing for strong good suffix rule
void preprocess_strong_suffix(int *shift, int *bpos,
char *pat, int m)
{
// m is the length of pattern
int i=m, j=m+1;
bpos[i]=j;
while(i>0)
{
/*if character at position i-1 is not equivalent to
character at j-1, then continue searching to right
of the pattern for border */
while(j<=m && pat[i-1] != pat[j-1])
{
/* the character preceding the occurence of t in
pattern P is different than mismatching character in P,
we stop skipping the occurences and shift the pattern
from i to j */
if (shift[j]==0)
shift[j] = j-i;
//Update the position of next border
j = bpos[j];
}
/* p[i-1] matched with p[j-1], border is found.
store the beginning position of border */
i--;j--;
bpos[i] = j;
}
}
//Preprocessing for case 2
void preprocess_case2(int *shift, int *bpos,
char *pat, int m)
{
int i, j;
j = bpos[0];
for(i=0; i<=m; i++)
{
/* set the border postion of first character of pattern
to all indices in array shift having shift[i] = 0 */
if(shift[i]==0)
shift[i] = j;
/* suffix become shorter than bpos[0], use the position of
next widest border as value of j */
if (i==j)
j = bpos[j];
}
}
//-----------------------------------------------------------------------------------------------
__device__ int d_retval;
__global__ void boyer_moore (char *string, int stringlen, char *pat, int patlen, int *delta1, int *delta2, int n) {
int i;
d_retval = -1;
int tid = blockIdx.x*blockDim.x+threadIdx.x;
__syncthreads();
if (tid<n)
{
int beg = tid*patlen;
int end = min (beg+(2*patlen), stringlen);
//printf("%d %d %d \n",tid,beg,end);
i = beg;
while (i < end) {
int j = patlen-1;
while (j >= 0 && (string[i+j] == pat[j])) {
//printf("here in loop\n");
--j;
}
if(threadIdx.x==0&&blockIdx.x==0)
printf("%d %d %d %d \n",j+1,i+j,delta1[j+1],j - delta2[string[i+j]]);
if (j < 0) {
d_retval = i+1;
printf("\nFound at: %d\n",i+1);
break;
}
else
i += max(delta1[j+1] , j - delta2[string[i+j]]);
}
}
}
char h_string[1000000];
char h_pat[100];
int main(int argc, char const *argv[]) {
char *d_s, *d_p;
int *d_d1, *d_d2;
cin>>h_string>>h_pat;
int stringlen = strlen(h_string);
int patlen = strlen(h_pat);
int *delta1 = (int*)malloc(sizeof(int)*(patlen+1));
for(int i=0;i<patlen+1;i++) delta1[i]=0;
int *bpos = (int*)malloc(sizeof(int)*(patlen+1));
int delta2[NO_OF_CHARS];
preprocess_strong_suffix(delta1, bpos, h_pat, patlen);
preprocess_case2(delta1, bpos, h_pat, patlen);
badCharHeuristic(h_pat, patlen, delta2);
for(int i=0;i<patlen+1;i++)
printf("%d ",delta1[i]);
cout<<endl;
for(int i=0;i<NO_OF_CHARS;i++)
printf("%d ",delta2[i]);
cout<<endl;
cudaMalloc(&d_s, stringlen*sizeof(char));
cudaMemcpy(d_s, h_string,stringlen*sizeof(char),cudaMemcpyHostToDevice);
cudaMalloc(&d_p, patlen*sizeof(char));
cudaMemcpy(d_p, h_pat,patlen*sizeof(char),cudaMemcpyHostToDevice);
cudaMalloc(&d_d1, (patlen+1)*sizeof(int));
cudaMemcpy(d_d1, delta1,(patlen+1)*sizeof(int),cudaMemcpyHostToDevice);
cudaMalloc(&d_d2, NO_OF_CHARS*sizeof(int));
cudaMemcpy(d_d2, delta2,NO_OF_CHARS*sizeof(int),cudaMemcpyHostToDevice);
int n = stringlen/patlen;
int block_size = 1024;
int n_blocks = n/block_size + (n%block_size==0?0:1);
boyer_moore<<<n_blocks,block_size>>>(d_s, stringlen, d_p, patlen, d_d1, d_d2, n);
return 0;
}
|
5,102 | #include "includes.h"
__global__ void Matrix_getRow_FloatPointer_naive(const float * A , int Acount, int Acols, const float * rowId , int empty_par1, int empty_par2, float * out0 , int out0count, int out0cols)
{
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id<Acols)
{
out0[id] = A[id + (int)rowId[0]*Acols];
}
} |
5,103 | #include <cfloat>
#include <climits>
#include <cmath>
__global__ void isinf_kernel(const double* value, bool* result)
{
result[threadIdx.x] = value[threadIdx.x] >= DBL_MAX;
}
|
5,104 | #include "cuda.h"
#include <stdio.h>
__global__ void mandel(double* ref_real_array,
double* ref_imag_array,
double* dc_real_array,
double* dc_imag_array,
int depth,
int* count_array)
{
unsigned int i = threadIdx.x + 512 * blockIdx.x;
double dc_real = dc_real_array[i];
double dc_imag = dc_imag_array[i];
int count = 0;
double d_real = 0;
double d_imag = 0;
double d_real_temp;
while((d_real + ref_real_array[count]) * (d_real + ref_real_array[count]) +
(d_imag + ref_imag_array[count]) * (d_imag + ref_imag_array[count]) < 4 &&
count < depth){
double z_real = ref_real_array[count];
double z_imag = ref_imag_array[count];
d_real_temp = 2 * z_real * d_real - 2 * z_imag * d_imag + d_real * d_real - d_imag * d_imag + dc_real;
d_imag = 2 * z_real * d_imag + 2 * z_imag * d_real + 2 * d_real * d_imag + dc_imag;
d_real = d_real_temp;
count ++;
}
count_array[i] = count;
}
extern "C" int cu_mandel(double* ref_real_array,
double* ref_imag_array,
double* dc_real_array,
double* dc_imag_array,
int depth,
int* count_array,
int l_ref){
printf("entering function");
double *dev_real_ref, *dev_imag_ref, *dev_dc_real, *dev_dc_imag;
int *dev_counts;
cudaMalloc((void**)&dev_real_ref, l_ref * sizeof(double));
cudaMalloc((void**)&dev_imag_ref, l_ref * sizeof(double));
cudaMalloc((void**)&dev_dc_real, 512 * 512 *sizeof(double));
cudaMalloc((void**)&dev_dc_imag, 512 * 512 *sizeof(double));
cudaMalloc((void**)&dev_counts, 512 * 512 *sizeof(int));
cudaMemcpy(dev_real_ref, ref_real_array, l_ref * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_imag_ref, ref_imag_array, l_ref * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_dc_real, dc_real_array, 512 * 512 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_dc_imag, dc_imag_array, 512 * 512 * sizeof(double), cudaMemcpyHostToDevice);
printf("calling kernel");
mandel<<<512, 512>>>(dev_real_ref, dev_imag_ref, dev_dc_real, dev_dc_imag, depth, dev_counts);
cudaMemcpy(count_array, dev_counts, 512 * 512 * sizeof(int), cudaMemcpyDeviceToHost);
printf("a count: %d\n", count_array[0]);
cudaFree(dev_real_ref);
cudaFree(dev_imag_ref);
cudaFree(dev_dc_real);
cudaFree(dev_dc_imag);
cudaFree(dev_counts);
return 0;
} |
5,105 | __global__ void stochasticGradientDescentKernel (
int numberIterations,
float learningRate,
int* parameterIndices,
int* counts,
int parameterSize,
float* parameters,
float* gradient) {
int startEntry = (blockIdx.y * blockDim.x * numberIterations) + threadIdx.x * numberIterations;
if(startEntry < parameterSize) {
int gradientIndex = blockIdx.x;
int parameterIndex = parameterIndices[gradientIndex];
if(parameterIndex != -1) {
int startParameter = parameterIndex * parameterSize + startEntry;
int startGradient = gradientIndex * parameterSize + startEntry;
float scalingFactor = 1.0 / (float)counts[gradientIndex];
for(int indexParameter = startParameter, indexGradient = startGradient; indexParameter < startParameter + numberIterations; indexParameter++, indexGradient++) {
parameters[indexParameter] -= scalingFactor * learningRate * gradient[indexGradient];
}
}
}
} |
5,106 | #include <stdio.h>
#include <cuda.h>
__global__ void sumKernel (double *d_a, double *d_b, double *d_c)
{
/* Sums the values in arrays d_a and d_b,
storing the result in d_c.
*/
int i = threadIdx.x;
d_c[i] = d_a[i] + d_b[i];
}
#define N 32
int main ()
{
double *a, *b, *c;
double *d_a, *d_b, *d_c;
int size = N * sizeof (double);
/* allocate space for host copies of a, b, c and setup input values */
a = (double *) malloc (size);
b = (double *) malloc (size);
c = (double *) malloc (size);
/* allocate space for device copies of a, b, c */
/* ----- YOUR CODE HERE ----- */
cudaMalloc ((void **) &d_a, size);
cudaMalloc ((void **) &d_b, size);
cudaMalloc ((void **) &d_c, size);
/* -------------------------- */
for (int i = 0; i < N; i++)
{
a[i] = b[i] = i;
c[i] = 0;
}
/* copy inputs to device */
/* ----- YOUR CODE HERE ----- */
cudaMemcpy (d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy (d_b, b, size, cudaMemcpyHostToDevice);
cudaMemcpy (d_c, c, size, cudaMemcpyHostToDevice);
/* -------------------------- */
/* launch the kernel on the GPU */
sumKernel <<< 1, N >>> (d_a, d_b, d_c);
cudaDeviceSynchronize();
/* copy result back to host */
cudaMemcpy (c, d_c, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
{
printf ("c[ %d ] = %f\n", i, c[i]);
}
/* clean up */
free (a);
free (b);
free (c);
cudaFree (d_a);
cudaFree (d_b);
cudaFree (d_c);
return 0;
}
|
5,107 | #include <stdio.h>
#include <cuda_runtime.h>
#include <time.h>
#include <sys/time.h>
__global__ void
vector(int *A, int *B, int *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[B[i]] = A[i];
}
}
int main(int argc, char **argv)
{
struct timeval start, end;
cudaError_t err = cudaSuccess;
int numElements = 5000000;
size_t size = numElements * sizeof(int);
int *h_A = (int *)malloc(size);
int *h_B = (int *)malloc(size);
int *h_C = (int *)malloc(size);
if (h_A == NULL || h_B == NULL || h_C == NULL) fprintf(stderr, "Failed to allocate host vectors!\n");
// Initialize the host input vectors
srand(time(NULL));
for (int i = 0; i < numElements; i++)
{
h_A[i] = i;
h_B[i] = rand()%numElements;
}
// Allocate the device input vector A
int *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess) fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
// Allocate the device input vector B
int *d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
if (err != cudaSuccess) fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
// Allocate the device output vector C
int *d_C = NULL;
err = cudaMalloc((void **)&d_C, size);
if (err != cudaSuccess) fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
// Copy the host input vectors A and B in host memory to the device input vectors in device memory
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess) fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess) fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 512;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
// printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
gettimeofday(&start, NULL);
vector<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
gettimeofday(&end, NULL);
printf("%ld\n", ((end.tv_sec * 1000000 + end.tv_usec)
- (start.tv_sec * 1000000 + start.tv_usec)));
err = cudaGetLastError();
if (err != cudaSuccess) fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
// Copy the device result vector in device memory to the host result vector in host memory.
// printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess) fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
// Verify that the result vector is correct
for (int i = 0; i < numElements; i++) fprintf(stderr, "Element in C[%d] is %d!\n", i, h_C[i]);
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess) fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
err = cudaFree(d_B);
if (err != cudaSuccess) fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
err = cudaFree(d_C);
if (err != cudaSuccess) fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
err = cudaDeviceReset();
if (err != cudaSuccess) fprintf(stderr, "Failed to deinitialize the device! error = %s\n", cudaGetErrorString(err));
}
|
5,108 | __global__
void f1( float3* __restrict__ ptr ) {
float3 v = ptr[threadIdx.x];
v.x += 1;
v.y += 1;
v.z += 1;
ptr[threadIdx.x] = v;
}
__global__
void f2( float* __restrict__ ptr1, float* __restrict__ ptr2, float* __restrict__ ptr3 ) {
ptr1[threadIdx.x] += 1;
ptr2[threadIdx.x] += 1;
ptr3[threadIdx.x] += 1;
}
int main() {
float *some_ptr;
cudaMalloc(&some_ptr, 96 * sizeof(float));
f1<<<1, 32>>>((float3*) some_ptr);
f2<<<1, 32>>>(some_ptr, some_ptr+32, some_ptr+64);
}
|
5,109 | //---------------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <iostream>
//---------------------------------------------------------------------------------
static const int WORK_SIZE = 200000000;
static const int BLK_SIZE = 256;
using namespace std;
//---------------------------------------------------------------------------------
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
//---------------------------------------------------------------------------------
__global__ void vecAdd(unsigned int *A_d, unsigned int *B_d,
unsigned int *C_d, int WORK_SIZE) {
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** Populate vecADD kernel function ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < WORK_SIZE)
C_d[id] = A_d[id] + B_d[id];
}
//---------------------------------------------------------------------------------
int main(void) {
unsigned int *A_h;
unsigned int *A_d;
unsigned int *B_h;
unsigned int *B_d;
unsigned int *C_h;
unsigned int *C_d;
//Set Device
CUDA_CHECK_RETURN(cudaSetDevice(0));
//See random number generator
srand(time(NULL));
//Clear command prompt
cout << "\033[2J\033[1;1H";
cout << "Allocating arrays on host ... ";
A_h = new unsigned int[WORK_SIZE];
B_h = new unsigned int[WORK_SIZE];
C_h = new unsigned int[WORK_SIZE];
cout << "done.\nPopluating arrays on host ... ";
for (int i = 0; i < WORK_SIZE; i++) {
A_h[i] = rand();
B_h[i] = rand();
}
cout << "done.\nAllocating arrays on device ... ";
CUDA_CHECK_RETURN(
cudaMalloc((void** ) &A_d, sizeof(unsigned int) * WORK_SIZE));
CUDA_CHECK_RETURN(
cudaMalloc((void** ) &B_d, sizeof(unsigned int) * WORK_SIZE));
CUDA_CHECK_RETURN(
cudaMalloc((void** ) &C_d, sizeof(unsigned int) * WORK_SIZE));
cout << "done.\nCopying arrays from host to device ... ";
CUDA_CHECK_RETURN(
cudaMemcpy(A_d, A_h, sizeof(int) * WORK_SIZE,
cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(
cudaMemcpy(B_d, B_h, sizeof(int) * WORK_SIZE,
cudaMemcpyHostToDevice));
cout << "done.\nLaunching kernel ... \n";
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** define kernel launch parameters ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
int threadsPerBlock,blocksPerGrid;
if (WORK_SIZE<BLK_SIZE){
threadsPerBlock = WORK_SIZE;
blocksPerGrid = 1;
} else {
threadsPerBlock = BLK_SIZE;
blocksPerGrid = ceil(double(WORK_SIZE)/double(threadsPerBlock));
}
//Time kernel launch
//Time kernel launch
cudaEvent_t start, stop;
CUDA_CHECK_RETURN(cudaEventCreate(&start));
CUDA_CHECK_RETURN(cudaEventCreate(&stop));
float elapsedTime;
CUDA_CHECK_RETURN(cudaEventRecord(start, 0));
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** Add kernel call here ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
vecAdd<<< blocksPerGrid, threadsPerBlock >>>(A_d, B_d, C_d, WORK_SIZE);
CUDA_CHECK_RETURN(cudaEventRecord(stop, 0));
CUDA_CHECK_RETURN(cudaEventSynchronize(stop));
CUDA_CHECK_RETURN(cudaEventElapsedTime(&elapsedTime, start, stop));
CUDA_CHECK_RETURN(cudaThreadSynchronize()); // Wait for the GPU launched work to complete
CUDA_CHECK_RETURN(cudaGetLastError()); //Check if an error occurred in device code
CUDA_CHECK_RETURN(cudaEventDestroy(start));
CUDA_CHECK_RETURN(cudaEventDestroy(stop));
cout << "done.\nElapsed kernel time: " << elapsedTime << " ms\n";
cout << "Copying results back to host .... ";
CUDA_CHECK_RETURN(
cudaMemcpy(C_h, C_d, sizeof(int) * WORK_SIZE,
cudaMemcpyDeviceToHost));
cout << "done.\nVerifying results on host ... ";
//Add code to time host calculations
clock_t st, ed;
st = clock();
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** Check that results from kernel are correct ****
// **** Complete validation code below ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
bool valid = true;
for (int i = 0; i < WORK_SIZE; i++) {
if (C_h[i] != (A_h[i]+B_h[i]) ) {
cout << "done.\n***GPU results are incorrect***";
valid = false;
break;
}
}
cout << "done\n";
if (valid) {
cout << "GPU results are valid.\n";
}
ed = clock() - st;
cout << "Elapsed time on host: " << ((float) ed) / CLOCKS_PER_SEC * 1000
<< " ms" << endl;
cout << "Freeing memory on device ... ";
CUDA_CHECK_RETURN(cudaFree((void* ) A_d));
CUDA_CHECK_RETURN(cudaFree((void* ) B_d));
CUDA_CHECK_RETURN(cudaFree((void* ) C_d));
CUDA_CHECK_RETURN(cudaDeviceReset());
cout << "done.\nFreeing memory on host ... ";
delete[] A_h;
delete[] B_h;
delete[] C_h;
cout << "done.\nExiting program.\n";
cout<<" Kushagra Trivedi\n 3080669\n";
return 0;
}
|
5,110 | #include "includes.h"
__global__ void Update(float *WHAT , float *WITH , float AMOUNT) {
int idx = threadIdx.x + blockIdx.x * blockDim.x; // which voxel
WHAT[idx] +=AMOUNT*WITH[idx];
} |
5,111 | #include "includes.h"
__global__ void colorDistDiff_kernel(uchar4 *out_image, const float *disparity, int disparity_pitch, const float *disparity_prior, int width, int height, float f, float b, float ox, float oy, float dist_thres) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
int ind = y * width + x;
uchar4 temp = out_image[ind];
float disp = *((float *)((char *)disparity + y * disparity_pitch) + x);
float disp_model = disparity_prior[ind];
// 3D reconstruct and measure Euclidian distance
float xt = __fdividef((x - ox), f);
float yt = -__fdividef((y - oy), f); // coord. transform
float Zm = -(f * b) / disp_model;
float Xm = xt * Zm;
float Ym = yt * Zm;
float Zd = -(f * b) / disp;
float Xd = xt * Zd;
float Yd = yt * Zd;
float d_md = sqrtf((Xm - Xd) * (Xm - Xd) + (Ym - Yd) * (Ym - Yd) +
(Zm - Zd) * (Zm - Zd));
bool color = (d_md > dist_thres) | (isfinite(disp) & ~isfinite(disp_model));
if (color) { // color
temp.x *= 0.5f;
temp.y *= 0.5f;
}
out_image[ind] = temp;
}
} |
5,112 | #include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <cmath>
#include <string>
#include <iostream>
using namespace std::chrono;
using namespace std;
__global__ void addMatOnDevice2D(float *in1, float *in2, float *out, int nx, int ny)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix < nx && iy < ny)
{
int i = iy * nx + ix;
out[i] = in1[i] + in2[i];
}
}
__global__ void addMatOnDevice1D(float *in1, float *in2, float *out, int nx, int ny)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < nx)
{
for (int iy = 0; iy < ny; iy++)
{
int idx = iy * nx + ix;
out[idx] = in1[idx] + in2[idx];
}
}
}
__global__ void addMatOnDeviceMix(float *in1, float *in2, float *out, int nx, int ny)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = blockIdx.y;
if (ix < nx)
{
int idx = iy * nx + ix;
out[idx] = in1[idx] + in2[idx];
}
}
void addMatOnHost(float *in1, float *in2, float *out,
int nx, int ny)
{
for (int i = 0; i < ny; i++)
for (int j = 0; j < nx; j++){
int idx = i * nx + j;
out[idx] = in1[idx] + in2[idx];
}
}
void printMatrix(float *matrix, int nx, int ny)
{
printf("\n");
for (int i = 0; i < ny; i++){
for (int j = 0; j < nx; j++){
int idx = i * ny + j;
printf("%f ", matrix[idx]);
}
printf("\n");
}
}
void calcTimeOnDevice(float *in1, float *in2, float *out, int nx, int ny, dim3 blockSize, dim3 gridSize, int typeDevice)
{
int size = nx * ny * sizeof(float);
// Allocate vector to device memory
float *d_in1, *d_in2, *d_out;
cudaMalloc(&d_in1, size);
cudaMalloc(&d_in2, size);
cudaMalloc(&d_out, size);
// Copy inputs to device
cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_in2, in2, size, cudaMemcpyHostToDevice);
auto start_device = high_resolution_clock::now();
string deviceName = "";
if (typeDevice == 1){
deviceName = "addMatOnDevice2D";
addMatOnDevice2D<<<gridSize, blockSize>>>(d_in1, d_in2, d_out, nx, ny);
}else if (typeDevice == 2){
deviceName = "addMatOnDevice1D";
addMatOnDevice1D<<<gridSize, blockSize>>>(d_in1, d_in2, d_out, nx, ny);
}else if (typeDevice == 3){
deviceName = "addMatOnDevice2DNotMix";
addMatOnDevice2D<<<gridSize, blockSize>>>(d_in1, d_in2, d_out, nx, ny);
}else {
deviceName = "addMatOnDeviceMix";
addMatOnDeviceMix<<<gridSize, blockSize>>>(d_in1, d_in2, d_out, nx, ny);
}
cudaDeviceSynchronize();
cudaGetLastError();
cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost);
auto stop_device = high_resolution_clock::now();
auto duration_device = duration_cast<microseconds>(stop_device - start_device);
auto duration = duration_device.count();
printf("%s|%d x %d\t|%d x %d\t|%d ms\t\n", deviceName.c_str(), blockSize.x, blockSize.y, gridSize.x, gridSize.y, duration);
// Cleanup
cudaFree(d_in1);
cudaFree(d_in2);
cudaFree(d_out);
}
int main()
{
int nx, ny; // Số cột và số dòng
float *in1, *in2; // input matrix
float *out; // output vector
nx = pow(2, 13) + 1;
ny = pow(2, 13) + 1;
int size = nx * ny * sizeof(float);
in1 = (float *)malloc(size);
in2 = (float *)malloc(size);
out = (float *)malloc(size);
// Setup input values
srand(time(0));
for (int i = 0; i < ny; i++){
for (int j = 0; j < nx; j++){
int idx = i * ny + j;
in1[idx] = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
in2[idx] = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
}
}
auto start_host = high_resolution_clock::now();
addMatOnHost(in1, in2, out, nx, ny);
auto stop_host = high_resolution_clock::now();
auto duration_host = duration_cast<microseconds>(stop_host - start_host);
printf("Function\t|Block size\t|Grid size\t|Time (ms)\n");
printf("addMatOnHost\t|\t\t|\t\t|%d\n", duration_host.count());
/********************************
addMatOnDevice2D
*********************************/
int typeDevice = 1;
dim3 blockSize(32, 32);
dim3 gridSize(257, 257);
calcTimeOnDevice(in1, in2, out, nx, ny, blockSize, gridSize, typeDevice);
blockSize = dim3(16, 32);
gridSize = dim3(513, 257);
calcTimeOnDevice(in1, in2, out, nx, ny, blockSize, gridSize, typeDevice);
blockSize = dim3(32, 16);
gridSize = dim3(257, 513);
calcTimeOnDevice(in1, in2, out, nx, ny, blockSize, gridSize, typeDevice);
blockSize = dim3(16, 16);
gridSize = dim3(513, 513);
calcTimeOnDevice(in1, in2, out, nx, ny, blockSize, gridSize, typeDevice);
/********************************
addMatOnDevice1D
*********************************/
typeDevice = 2;
blockSize = dim3(32, 1);
gridSize = dim3(257, 1);
calcTimeOnDevice(in1, in2, out, nx, ny, blockSize, gridSize, typeDevice);
blockSize = dim3(64, 1);
gridSize = dim3(129, 1);
calcTimeOnDevice(in1, in2, out, nx, ny, blockSize, gridSize, typeDevice);
blockSize = dim3(128, 1);
gridSize = dim3(65, 1);
calcTimeOnDevice(in1, in2, out, nx, ny, blockSize, gridSize, typeDevice);
/********************************
addMatOnDevice2DNotMix
*********************************/
typeDevice = 3;
blockSize = dim3(32, 1);
gridSize = dim3(257, 8193);
calcTimeOnDevice(in1, in2, out, nx, ny, blockSize, gridSize, typeDevice);
blockSize = dim3(64, 1);
gridSize = dim3(129, 8193);
calcTimeOnDevice(in1, in2, out, nx, ny, blockSize, gridSize, typeDevice);
blockSize = dim3(128, 1);
gridSize = dim3(65, 8193);
calcTimeOnDevice(in1, in2, out, nx, ny, blockSize, gridSize, typeDevice);
blockSize = dim3(256, 1);
gridSize = dim3(33, 8193);
calcTimeOnDevice(in1, in2, out, nx, ny, blockSize, gridSize, typeDevice);
blockSize = dim3(512, 1);
gridSize = dim3(17, 8193);
calcTimeOnDevice(in1, in2, out, nx, ny, blockSize, gridSize, typeDevice);
blockSize = dim3(1024, 1);
gridSize = dim3(9, 8193);
calcTimeOnDevice(in1, in2, out, nx, ny, blockSize, gridSize, typeDevice);
blockSize = dim3(2048, 1);
gridSize = dim3(5, 8193);
calcTimeOnDevice(in1, in2, out, nx, ny, blockSize, gridSize, typeDevice);
/********************************
addMatOnDeviceMix
*********************************/
typeDevice = 4;
blockSize = dim3(32, 1);
gridSize = dim3(257, 8193);
calcTimeOnDevice(in1, in2, out, nx, ny, blockSize, gridSize, typeDevice);
blockSize = dim3(64, 1);
gridSize = dim3(129, 8193);
calcTimeOnDevice(in1, in2, out, nx, ny, blockSize, gridSize, typeDevice);
blockSize = dim3(128, 1);
gridSize = dim3(65, 8193);
calcTimeOnDevice(in1, in2, out, nx, ny, blockSize, gridSize, typeDevice);
blockSize = dim3(256, 1);
gridSize = dim3(33, 8193);
calcTimeOnDevice(in1, in2, out, nx, ny, blockSize, gridSize, typeDevice);
blockSize = dim3(512, 1);
gridSize = dim3(17, 8193);
calcTimeOnDevice(in1, in2, out, nx, ny, blockSize, gridSize, typeDevice);
blockSize = dim3(1024, 1);
gridSize = dim3(9, 8193);
calcTimeOnDevice(in1, in2, out, nx, ny, blockSize, gridSize, typeDevice);
blockSize = dim3(2048, 1);
gridSize = dim3(5, 8193);
calcTimeOnDevice(in1, in2, out, nx, ny, blockSize, gridSize, typeDevice);
free(in1);
free(in2);
free(out);
return 0;
} |
5,113 | #include <iostream>
#include <chrono>
#include <cuda_runtime.h>
#include <string>
#include <iomanip>
using namespace std;
using ST = unsigned long long;
constexpr ST TOTAL_SIZE = 1 << 30; // 1 GB
constexpr ST TOTAL_SIZE_IN_BYTES = TOTAL_SIZE * sizeof(char);
constexpr ST CNT = 19;
const string grand_name[CNT] = {
"4KB", "8KB", "16KB", "32KB", "64KB", "128KB", "256KB", "512KB", "1MB", "2MB",
"4MB", "8MB", "16MB", "32MB", "64MB", "128MB", "256MB", "512MB", "1GB"
};
const ST grand_size[CNT] = {
1 << 12, 1 << 13, 1 << 14, 1 << 15,
1 << 16, 1 << 17, 1 << 18, 1 << 19,
1 << 20, 1 << 21, 1 << 22, 1 << 23,
1 << 24, 1 << 25, 1 << 26, 1 << 27,
1 << 28, 1 << 29, 1 << 30
};
// granularity
double copyTest(char *dst, char *src, ST total, ST granularity, cudaMemcpyKind kind) {
ST idx = 0;
auto t1 = chrono::system_clock::now();
for (; idx < total; idx += granularity) {
cudaMemcpy(dst + idx, src + idx, granularity * sizeof(char), kind);
}
auto t2 = chrono::system_clock::now();
chrono::duration<double> diff = t2 - t1;
return diff.count();
}
void rangeTest(char *dst, char *src, ST total, cudaMemcpyKind kind) {
for (int i = 0; i < CNT; i++) {
double tm = copyTest(dst, src, total, grand_size[i], kind);
double tp = 1024 / tm; // MB/s
cout << fixed << tp << endl;
}
}
int main() {
char *buf = new char[TOTAL_SIZE];
char *buf2 = new char[TOTAL_SIZE];
char *gpu_buf, *gpu_buf2;
cout << "granularity: " << endl;
for (const auto &s : grand_name)
cout << s << endl;
cout << endl;
cudaMalloc((void **)&gpu_buf, TOTAL_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_buf2, TOTAL_SIZE_IN_BYTES);
memset(buf, 'a', TOTAL_SIZE);
cout << "host to device: " << endl;
rangeTest(gpu_buf, buf, TOTAL_SIZE, cudaMemcpyHostToDevice);
cout << endl;
cout << "(MB/s)" << endl;
cout << "device to device: " << endl;
rangeTest(gpu_buf, gpu_buf2, TOTAL_SIZE, cudaMemcpyDeviceToDevice);
cout << endl;
cout << "(MB/s)" << endl;
cout << "device to host: " << endl;
rangeTest(buf2, gpu_buf2, TOTAL_SIZE, cudaMemcpyDeviceToHost);
cout << endl;
cout << buf2[32];
cout << "(MB/s)" << endl;
cudaFree(gpu_buf);
cudaFree(gpu_buf2);
delete[] buf;
delete[] buf2;
//#ifdef WIN32
// system("pause");
//#endif
return 0;
} |
5,114 | #include <iostream>
#include <iomanip>
#include <stdio.h>
#include <stdlib.h>
#include <thrust/extrema.h>
#include <thrust/device_vector.h>
#define CSC(call) \
do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "ERROR: in %s:%d. Message: %s\n", \
__FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while(0)
struct comparator {
__host__ __device__ double fabs(double a){
return a < 0.0 ? -a : a;
}
__host__ __device__ bool operator()(double a, double b)
{
return fabs(a) < fabs(b);
}
};
__host__ void Printer(double* matrix, int height, int width)
{
std::cout << "Printer\n";
for (int i = 0; i < width; ++i)
{
for (int j = 0; j < height; ++j)
{
printf("a[i=%d, j=%d->%d] = %.1f ", i, j, j * width + i, matrix[j * width + i]);
}
printf("\n");
}
}
__global__ void SwapGPU(double* matrix, int width, int height, int row, int rowWithMax)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int xOffset = gridDim.x * blockDim.x;
double tmp;
for (int i = idx + row; i < height; i += xOffset)
{
tmp = matrix[i * width + row];
matrix[i * width + row] = matrix[i * width + rowWithMax];
matrix[i * width + rowWithMax] = tmp;
}
}
__global__ void Normalization(double* matrix, int width, int height, int row)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int xOffset = gridDim.x * blockDim.x;
for (int i = idx + row + 1; i < height; i += xOffset)
{
matrix[i * width + row] /= matrix[row * width + row];
}
}
__global__ void ForwardGauss(double* matrix, int width, int height, int row)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int idy = blockDim.y * blockIdx.y + threadIdx.y;
int xOffset = gridDim.x * blockDim.x;
int yOffset = gridDim.y * blockDim.y;
for (int i = idx + row + 1; i < width; i += xOffset)
{
for (int j = idy + row + 1; j < height; j += yOffset)
{
matrix[j * width + i] -= matrix[j * width + row] * matrix[row * width + i];
}
}
}
__global__ void BackwardGauss(double* matrix, double* x, int size, int row)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int xOffset = gridDim.x * blockDim.x;
for (int i = row - 1 - idx; i >= 0; i -= xOffset)
{
x[i] -= matrix[row * size + i] * x[row];
}
}
int main(int argc, const char* argv[])
{
std::ios_base::sync_with_stdio(false);
std::cin.tie(nullptr);
int size;
std::cin >> size;
int height = size + 1;
int width = size;
double* matrix = new double[height * width];
for (int i = 0; i < size; ++i)
{
for (int j = 0; j < size; ++j)
{
std::cin >> matrix[j * width + i];
}
}
for (int i = 0; i < size; ++i)
{
std::cin >> matrix[size * size + i];
}
double* matrixGPU;
CSC(cudaMalloc(&matrixGPU, sizeof(double) * height * width));
CSC(cudaMemcpy(matrixGPU, matrix, sizeof(double) * height * width, cudaMemcpyHostToDevice));
int xThreadCount = 32;
int yThreadCount = 32;
int xBlockCount = 32;
int yBlockCount = 32;
comparator comp;
thrust::device_ptr<double> ptr, ptrMax;
int rowWithMax;
for (int row = 0; row < size - 1; ++row)
{
ptr = thrust::device_pointer_cast(matrixGPU + row * size);
ptrMax = thrust::max_element(ptr + row, ptr + size, comp);
rowWithMax = ptrMax - ptr;
if (rowWithMax != row)
{
SwapGPU<<<dim3(xBlockCount * yBlockCount), dim3(xThreadCount * yThreadCount)>>>(matrixGPU, width, height, row, rowWithMax);
CSC(cudaGetLastError());
}
Normalization<<<dim3(xBlockCount * yBlockCount), dim3(xThreadCount * yThreadCount)>>>(matrixGPU, width, height, row);
CSC(cudaGetLastError());
ForwardGauss<<<dim3(xBlockCount, yBlockCount), dim3(xThreadCount, yThreadCount)>>>(matrixGPU, width, height, row);
CSC(cudaGetLastError());
}
CSC(cudaMemcpy(matrix, matrixGPU, sizeof(double) * width * height, cudaMemcpyDeviceToHost));
double* x = new double[size];
for (int i = 0; i < size; ++i)
{
x[i] = matrix[width * width + i];
}
x[size - 1] /= matrix[(width - 1) * width + (width - 1)];
double* xGPU;
CSC(cudaMalloc(&xGPU, sizeof(double) * size));
CSC(cudaMemcpy(xGPU, x, sizeof(double) * size, cudaMemcpyHostToDevice));
for (int row = size - 1; row > 0; --row)
{
BackwardGauss<<<dim3(xBlockCount * yBlockCount), dim3(xThreadCount * yThreadCount)>>>(matrixGPU, xGPU, size, row);
CSC(cudaGetLastError());
}
CSC(cudaMemcpy(x, xGPU, sizeof(double) * size, cudaMemcpyDeviceToHost));
const int accuracy = 10;
for (int i = 0; i < size - 1; ++i)
{
std::cout << std::scientific << std::setprecision(accuracy) << x[i] << " ";
}
std::cout << std::scientific << std::setprecision(accuracy) << x[size - 1];
CSC(cudaFree(matrixGPU));
CSC(cudaFree(xGPU));
delete[] matrix;
delete[] x;
return 0;
} |
5,115 | /* CUDA finite difference wave equation solver, written by
* Jeff Amelang, 2012
*
* Modified by Kevin Yuh, 2013-14 */
#include <cstdio>
#include <cuda_runtime.h>
#include "Cuda1DFDWave_cuda.cuh"
/* TODO: You'll need a kernel here, as well as any helper functions
to call it */
__global__
void waveEquationKernal(float *old_data, float *current_data, float *new_data,
int numberOfNodes, float constant, float left_boundary) {
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
// This is to make sure that thread index 0 can still move on to the
// next thread at blockDim.x * gridDim.x
if (index == 0) {
new_data[0] = left_boundary;
new_data[numberOfNodes - 1] = 0;
index += blockDim.x * gridDim.x;
}
while (index > 0 && index < numberOfNodes - 1) {
// Wave Equation!
// y_x,t+1 = 2*y_x,t - y_x,t-1 +
// (c*dt/dx)^2 * (y_x+1,t - 2*y_x,t + y_x-1,t)
new_data[index] = 2 * current_data[index]
- old_data[index]
+ constant
* (current_data[index + 1]
- 2 * current_data[index]
+ current_data[index - 1]);
index += blockDim.x * gridDim.x;
}
}
void waveEquation(float *old_data, float *current_data, float *new_data,
int numberOfNodes, float constant, float left_boundary,
int blocks, int threadsPerBlock) {
waveEquationKernal<<<blocks, threadsPerBlock>>>(old_data, current_data,
new_data, numberOfNodes,
constant, left_boundary);
}
|
5,116 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<iostream>
#include <stdio.h>
using namespace std;
cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
//get device prop
cudaError_t cudaStatus;
int num = 0;
cudaDeviceProp deviceProp;
cudaStatus = cudaGetDeviceCount(&num);
for(int i=0;i<num;i++)
{
cudaGetDeviceProperties(&deviceProp, i);
cout << "设备 " << i + 1 << " 的主要属性: " << endl;
cout << "设备显卡型号: " << deviceProp.name << endl;
printf("maxGridSize:%d,%d,%d\n",deviceProp.maxGridSize[0],deviceProp.maxGridSize[1],deviceProp.maxGridSize[2]);
printf("maxThreadDim:%d,%d,%d\n",deviceProp.maxThreadsDim[0],deviceProp.maxThreadsDim[1],deviceProp.maxThreadsDim[2]);
printf("warpSize:%d\n",deviceProp.warpSize);
printf("constanMemory:%d(K)\n",deviceProp.totalConstMem/1024);
cout << "设备全局内存总量(以MB为单位): " << deviceProp.totalGlobalMem / 1024 / 1024 << endl;
cout << "设备上一个线程块(Block)中可用的最大共享内存(以KB为单位): " << deviceProp.sharedMemPerBlock / 1024 << endl;
cout << "设备上一个线程块(Block)种可用的32位寄存器数量: " << deviceProp.regsPerBlock << endl;
cout << "设备上一个线程块(Block)可包含的最大线程数量: " << deviceProp.maxThreadsPerBlock << endl;
cout << "设备的计算功能集(Compute Capability)的版本号: " << deviceProp.major << "." << deviceProp.minor << endl;
cout << "设备上多处理器的数量: " << deviceProp.multiProcessorCount << endl;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// cudaThreadSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaThreadSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaThreadSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
5,117 | #include "memory.h"
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
#define CACHED
#ifdef CACHED
#define CACHELEN 128
typedef struct _tup
{
size_t bytes;
void* ptr;
bool free;
} _tup;
_tup cache[CACHELEN];
bool initialized = false;
#endif // CACHED
void
cuda_malloc_clear(void** ptr, size_t bytes)
{
cudaError_t err;
#ifdef CACHED
if (!initialized)
{
for (int i = 0; i < CACHELEN; i++)
{
cache[i].bytes = 0;
cache[i].ptr = 0;
cache[i].free = true;
}
initialized = true;
}
for (int i = 0; i < CACHELEN; i++)
{
if (cache[i].free && cache[i].bytes == bytes)
{
// We don't have to remalloc, we already have a valid free ptr
cache[i].free = false;
*ptr = cache[i].ptr;
err = cudaMemset(*ptr, 0, bytes);
assert(err == cudaSuccess);
return;
}
}
#endif // CACHED
// Malloc to device, check for errors
err = cudaMalloc(ptr, bytes);
assert(err == cudaSuccess);
// Set val to 0, check for errors
err = cudaMemset(*ptr, 0, bytes);
assert(err == cudaSuccess);
#ifdef CACHED
for (int i = 0; i < CACHELEN; i++)
{
if (cache[i].free && cache[i].ptr == 0)
{
cache[i].free = false;
cache[i].ptr = *ptr;
cache[i].bytes = bytes;
return;
}
}
#endif // CACHED
}
void
cuda_malloc_free(void* ptr)
{
#ifdef CACHED
for (int i = 0; i < CACHELEN; i++)
{
if (cache[i].ptr == ptr)
{
cache[i].free = true;
return;
}
}
#endif // CACHED
cudaFree(ptr);
}
|
5,118 | #include "includes.h"
__global__ void warmup(float *input, float *output) {
const int i = threadIdx.x + blockIdx.x * blockDim.x;
output[i] = input[i] * input[i];
} |
5,119 | #include <iostream>
/*
This code is copied/adapted from
https://devblogs.nvidia.com/how-query-device-properties-and-handle-errors-cuda-cc/
*/
using namespace std;
int main(int argc, char const *argv[]) {
/* code */
int nDevices = 0;
cudaGetDeviceCount(&nDevices);
//sets nDevices to the number of CUDA capable devices (GPUs)
cout <<"Total Devices: "<< nDevices << '\n';
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
cout << "Device Numer: " << i <<"\n";
cout <<"\t"<< "Device Name:"<<prop.name<<"\n";
cout <<"\t"<< "Clock Rate(KHz):"<<prop.memoryClockRate<<"\n";
cout <<"\t"<< "But Width(bits):"<<prop.memoryBusWidth<<"\n";
cout << "\t" << "Memory Bandwidth(GB/s):" <<
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6 << "\n";
}
return 0;
}
|
5,120 | /**
* @file SgemmGPU.cu
*
* @author btran
*
*/
#include "SgemmGPU.cuh"
#include <cublas_v2.h>
#include <thrust/device_vector.h>
namespace cuda
{
void sgemmGPU(int n, float alpha, const float* A, const float* B, float beta, float* C)
{
cublasStatus_t status;
cublasHandle_t handle;
status = cublasCreate(&handle);
int n2 = n * n;
thrust::device_vector<float> dA(A, A + n2);
thrust::device_vector<float> dB(B, B + n2);
thrust::device_vector<float> dC(C, C + n2);
status = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, n, n, &alpha, thrust::raw_pointer_cast(dA.data()), n,
thrust::raw_pointer_cast(dB.data()), n, &beta, thrust::raw_pointer_cast(dC.data()), n);
thrust::copy(dC.begin(), dC.end(), C);
status = cublasDestroy(handle);
}
} // namespace cuda
|
5,121 | // ******************************************************************************************************
// PURPOSE : Print values for CUDA runtime variables for 3D configuration (4*4*4) threads. *
// LANGUAGE : CUDA C / CUDA C++ *
// ASSUMPTIONS : 3D Configuration 64 threads in each x,y & directions with thread block of (2*2*2) *
// DATE : 23 March 2020 *
// AUTHOR : Vaibhav BENDRE *
// vaibhav.bendre7520@gmail.com *
// ******************************************************************************************************
#include "cuda_runtime.h" // The C++ API with CUDA specific wrapper that deals with symbols, textures and device functions.
#include "device_launch_parameters.h" // Enables kernel launching parameters for device
#include<stdio.h>
__global__ void displayAttributeValues() {
printf("\nthreadIdx.x : %d, threadIdx.y : %d, threadIdx.z : %d,"
" blockIdx.x : %d, blockIdx.y : %d, blockIdx.z : %d,"
" blockDim.x : %d, blockDim.y : %d, blockDim.z : %d,"
" gridDim.x : %d, gridDim.y : %d, gridDim.z : %d\n",
threadIdx.x,threadIdx.y,threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z,
blockDim.x, blockDim.y, blockDim.z,
gridDim.x, gridDim.y, gridDim.z);
}
int main() {
//There are in total 64 threads that we need to arrange in a desired configuration.
unsigned int Nx{ 4 }, Ny{ 4 }, Nz{ 4 };
// We need to yield to following kernel call syntax
// kernelName <<< numberOfBlocks, threadsPerBlock >>>()
dim3 block(2, 2, 2); // This refers to 1 thread block made up how many threads in x,y,z.
dim3 grid(Nx / block.x, Ny / block.y, Nz / block.z);
//This kernel call is async call
displayAttributeValues <<< grid, block >>> ();
// Synchronize the call ask the kernel to wait for host function to complete the execution
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
} |
5,122 | #include "mnist.hh"
#include <cassert>
#include <cstdio>
#include <stdexcept>
namespace mnist
{
namespace
{
static constexpr std::size_t NIMGS = 70000;
static constexpr std::size_t IMG_SIZE = 784;
}
void load(const std::string& path, dbl_t** x, dbl_t** y)
{
FILE* f = fopen(path.c_str(), "rb");
if (!f)
throw std::runtime_error("mnist: can't open data file");
*x = new dbl_t[NIMGS * IMG_SIZE];
*y = new dbl_t[NIMGS * 10];
for (std::size_t i = 0; i < NIMGS; ++i)
{
dbl_t* x_row = *x + i * IMG_SIZE;
dbl_t* y_row = *y + i * 10;
unsigned char pixs[IMG_SIZE];
char digit;
fread(pixs, 1, IMG_SIZE, f);
fread(&digit, 1, 1, f);
for (std::size_t i = 0; i < IMG_SIZE; ++i)
x_row[i] = pixs[i] / 255.0;
digit_to_vector(digit, y_row);
}
fclose(f);
}
void digit_to_vector(std::size_t digit, dbl_t* out)
{
assert(digit < 10);
for (std::size_t i = 0; i < 10; ++i)
out[i] = 0;
out[digit] = 1;
}
std::size_t vector_to_digit(const dbl_t* v)
{
std::size_t res = 0;
for (std::size_t i = 1; i < 10; ++i)
if (v[i] > v[res])
res = i;
return res;
}
bool output_test(const dbl_t* a , const dbl_t* b)
{
return vector_to_digit(a) == vector_to_digit(b);
}
}
|
5,123 | #include "cuda_runtime.h"
#include <chrono>
#include <cstdlib>
#include <iostream>
#include<sys/time.h>
using namespace std;
__global__ void transposeKernel(const double* A, double* AT, int N) {
int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
AT[yIndex+xIndex*N]=A[xIndex+yIndex*N];
}
int main(void) {
int rank=100;//for 50*50 mart
struct timeval start, end;
int N =rank;
dim3 threadPerBlock(N, N);
dim3 blockNumber((N+threadPerBlock.x-1)/ threadPerBlock.x, (N+threadPerBlock.y-1)/ threadPerBlock.y );
size_t size = N * N * sizeof(double);
double* h_A = (double*)malloc(size);
double* h_AT = (double*)malloc(size);
for (int i = 0; i < N * N; i++) {
h_A[i] = i +1;
}
int i = 0, k = 0;
gettimeofday(&start,NULL);
while (i < N * N) {
for (int j = k; j < N * N; j += N) {
h_AT[i++] = h_A[j];
}
k++;
}
gettimeofday(&end,NULL);
int timeuseCPU = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
cout << "total time for cpu is " << timeuseCPU<< "us" <<endl;
double* d_A = NULL;
double* d_AT = NULL;
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_AT, size);
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
gettimeofday(&start,NULL);
transposeKernel<<<blockNumber, threadPerBlock>>>(d_A, d_AT, N);
cudaDeviceSynchronize();
gettimeofday(&end,NULL);
int timeuseGPU = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
cout << "total time use in GPU is " << timeuseGPU<< "us" <<endl;
cudaMemcpy(h_AT, d_AT, size, cudaMemcpyDeviceToHost);
if(timeuseGPU<timeuseCPU){
cout<<"GPU is faster than CPU for "<<timeuseCPU-timeuseGPU<<" us"<<endl;
}else{
cout<<"CPU is faster than GPU for "<<timeuseGPU-timeuseCPU<<" us"<<endl;
}
free(h_A);
free(h_AT);
cudaFree(d_A);
cudaFree(d_AT);
return 0;
} |
5,124 | __global__ void kernel( void ) {
int id = 1;
} |
5,125 | //===- elementwise.cu -----------------------------------------*--- C++ -*-===//
//
// Copyright 2022 ByteDance Ltd. and/or its affiliates. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//===----------------------------------------------------------------------===//
namespace brt {
namespace cuda {
namespace kernel {
template <typename T>
__global__ void add_kernel(const T *input_1, const T *input_2, T *output,
int n) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
output[idx] = input_1[idx] + input_2[idx];
}
}
// instantiate
template __global__ void add_kernel<float>(const float *, const float *,
float *, int);
template __global__ void add_kernel<int>(const int *, const int *, int *, int);
} // namespace kernel
} // namespace cuda
} // namespace brt
|
5,126 | #include <iostream>
#include <memory>
#include <chrono>
#include <random>
__global__ void add(float* vec_a, float* vec_b, float* vec_c, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
vec_c[i] = vec_a[i] + vec_b[i];
i += blockDim.x * gridDim.x;
}
}
int main(int args, char *argv[])
{
int n;
n = atoi(argv[1]);
float *vec_a, *vec_b, *vec_c;
std::unique_ptr<float[]> host_a(new float[n]);
std::unique_ptr<float[]> host_b(new float[n]);
std::unique_ptr<float[]> host_c(new float[n]);
cudaMalloc((void**)&vec_a, n * sizeof(float));
cudaMalloc((void**)&vec_b, n * sizeof(float));
cudaMalloc((void**)&vec_c, n * sizeof(float));
for (int i = 0; i < n; i++)
{
std::random_device rand{};
host_a[i] = rand();
host_b[i] = rand();
host_c[i] = 0;
}
cudaMemcpy(vec_a, host_a.get(), n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(vec_b, host_b.get(), n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(vec_c, host_c.get(), n*sizeof(float), cudaMemcpyHostToDevice);
int blocksize = 128;
dim3 block (blocksize, 1, 1);
dim3 grid ((n + blocksize + 1) / block.x, 1, 1);
std::chrono::system_clock::time_point start, end;
start = std::chrono::system_clock::now();
add<<<grid, block>>>(vec_a, vec_b, vec_c, n);
end = std::chrono::system_clock::now();
std::unique_ptr<float[]> host_result(new float[n]);
cudaMemcpy(host_result.get(), vec_c, n * sizeof(n), cudaMemcpyDeviceToHost);
int checker = 0;
for (int i = 0; i < n; i++)
{
if (fabs(host_result[i] - (host_a[i] + host_b[i])) > 10e-8)
{
std::cout << "ng: " << host_result[i] << std::endl;
checker++;
}
}
if (checker == 0)
{
std::cout << "ok" << std::endl;
}
else
{
std::cout << checker << std::endl;
}
double time = static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() / 1000.0);
std::cout << "n: " << n << " threads: " << blocksize << std::endl;
std::cout << "time: " << time << " [ms]" << std::endl;
std::cout << "perf: " << n / time / 1e6 << " [Gflops/sec]" << std::endl;
cudaFree(vec_a);
cudaFree(vec_b);
cudaFree(vec_c);
return 0;
}
|
5,127 | #include <cstdio>
extern "C" {
__global__ void helloWorld(char *data) {
#if __CUDA_ARCH__ >= 200
printf("Hello, world! I'm thread (%d,%d,%d) in block (%d,%d,%d).\n",
threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z);
#endif
int sum = 0;
for (int i=0; i<100; i++) {
sum += data[i];
}
#if __CUDA_ARCH__ >= 200
printf("The sum is: %d\n", sum);
#endif
}
}
|
5,128 | #include "includes.h"
__global__ void SolveSmoothMedianGlobalKernel3(float* u, float* v, float* bku, float* bkv, int width, int height, int stride, float *outputu, float *outputv, float *outputbku, float* outputbkv)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float mu[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
float mv[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
for (int j = 0; j < 3; j++) {
for (int i = 0; i < 3; i++) {
//get values
int col = (ix + i - 1);
int row = (iy + j - 1);
int index = j * 3 + i;
if ((col >= 0) && (col < width) && (row >= 0) && (row < height)) {
mu[index] = u[col + stride*row];
mv[index] = v[col + stride*row];
}
else if ((col < 0) && (row >= 0) && (row < height)) {
mu[index] = u[stride*row];
mv[index] = v[stride*row];
}
else if ((col > width) && (row >= 0) && (row < height)) {
mu[index] = u[width - 1 + stride*row];
mv[index] = v[width - 1 + stride*row];
}
else if ((col >= 0) && (col < width) && (row < 0)) {
mu[index] = u[col];
mv[index] = v[col];
}
else if ((col >= 0) && (col < width) && (row > height)) {
mu[index] = u[col + stride*(height - 1)];
mv[index] = v[col + stride*(height - 1)];
}
//solve gaussian
}
}
float tmpu, tmpv;
for (int j = 0; j < 9; j++) {
for (int i = j + 1; i < 9; i++) {
if (mu[j] > mu[i]) {
//Swap the variables.
tmpu = mu[j];
mu[j] = mu[i];
mu[i] = tmpu;
}
if (mv[j] > mv[i]) {
//Swap the variables.
tmpv = mv[j];
mv[j] = mv[i];
mv[i] = tmpv;
}
}
}
outputu[pos] = mu[4];
outputv[pos] = mv[4];
outputbku[pos] = bku[pos] + u[pos] - mu[4];
outputbkv[pos] = bkv[pos] + v[pos] - mv[4];
} |
5,129 | #include <stdio.h>
//onCPU
void onCPU()
{
printf("This is running on CPU\n");
}
//Kernel runs on GPU
__global__ void onGPU()
{
//keeps track of thread Index of the block
int i = threadIdx.x;
printf("This is running on GPU with the treadIndex of %d\n",&i);
}
int main()
{
//1 block/grid, runs 5 threads/block
onGPU<<<1, 5>>>();
//Waits for GPU to finish
cudaDeviceSynchronize();
//runs on CPU normal c++ execution
onCPU();
}
|
5,130 | extern "C" {
#define INPUT(i,j) input_grid[(j) + (i)*(N)]
#define WINDOW_SIZE (7)
#define NEIGHBOR_SIZE (3)
__global__ void nlmSimple(int N, double const *input_grid, double *output_grid, float filtSigma)
{
int gindex = threadIdx.x + blockIdx.x * blockDim.x;
int pix_ix,
pix_iy,
pix_jx,
pix_jy;
double neighbor_j,
neighbor_i,
output = 0,
sum_weights = 0;
pix_iy = gindex % N;
pix_ix = (gindex - pix_iy) / N;
if (pix_ix < N && pix_iy < N)
{
int window_radius = (WINDOW_SIZE - 1) / 2;
int neighbor_radius = (NEIGHBOR_SIZE - 1) / 2;
// Iterate through window
for (int k = -window_radius; k <= window_radius; k++)
for (int l = -window_radius; l <= window_radius; l++)
{
double weight = 0;
double distance = 0;
pix_jx = pix_ix + k;
pix_jy = pix_iy + l;
if (pix_jx < 0 || pix_jx >= N ||
pix_jy < 0 || pix_jy >= N)
continue;
// Iterate through every pix_j neighbors
for (int p = -neighbor_radius; p <= neighbor_radius; p++)
for (int q = -neighbor_radius; q <= neighbor_radius; q++)
{
if (pix_jx + p < 0 || pix_jx + p >= N ||
pix_jy + q < 0 || pix_jy + q >= N ||
pix_ix + p < 0 || pix_ix + p >= N ||
pix_iy + q < 0 || pix_iy + q >= N)
continue;
neighbor_j = INPUT(pix_jx + p, pix_jy + q);
neighbor_i = INPUT(pix_ix + p, pix_iy + q);
distance += (neighbor_i - neighbor_j) * (neighbor_i - neighbor_j);
}
// Derive weight for pixels i and j
weight = __expf(-(distance / filtSigma +
(k*k + l*l) * (1.0f)/(float)(WINDOW_SIZE* WINDOW_SIZE)));
sum_weights += weight;
// Sum for every pixel in the window
output += INPUT(pix_jx, pix_jy) * weight;
}
// Normalize
sum_weights = (double)(1 / sum_weights);
output *= sum_weights;
// Write output to global memory
output_grid[gindex] = output;
}
}
} |
5,131 | #include "includes.h"
__global__ void gpu_update_sign(int *G, double *w ,int *neighbors , int k , int n ,int *temp, int *flag,int it_b ,int it_t)
{
int result;
double sum = 0.0;
int buf=0;
//Find the indexes
int x = blockIdx.x+it_b*gridDim.x;
int y = threadIdx.x+it_t*blockDim.x;
if (blockIdx.x+it_b*gridDim.x<n && threadIdx.x+it_t*blockDim.x<n)
{
//Calculate result
for (int i = 0; i < k; i++){
for (int j = 0; j < k; j++){
sum += ((double)G[neighbors[x*n*k*k+y*k*k+i*k+j]])*w[i*k+j];
}
}
//Evaluate and write back
if ( sum > 1e-6){
result = 1;
if (result != G[neighbors[x*n*k*k+y*k*k+12]])
buf++;
}
else if( sum < -(1e-6)){
result = -1;
if (result != G[neighbors[x*n*k*k+y*k*k+12]])
buf++;
}
else
result = G[neighbors[x*n*k*k+y*k*k+12]];
*flag+=buf;
temp[x*n+y] =result;
}
} |
5,132 | #include <stdio.h>
unsigned char* dev_bitmap;
struct cuComplex
{
float r;
float i;
__device__ cuComplex(float a, float b) : r(a), i(b)
{
}
__device__ float magnitude2(void)
{
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a)
{
return cuComplex(r * a.r - i * a.i, i * a.r + r * a.i);
}
__device__ cuComplex operator+(const cuComplex& a)
{
return cuComplex(r + a.r, i + a.i);
}
};
__device__ int julia(int x, int y, int DIM)
{
const float scale = 1.5;
float jx = scale * (float)(DIM / 2 - x) / (DIM / 2);
float jy = scale * (float)(DIM / 2 - y) / (DIM / 2);
cuComplex c(-0.8, 0.156); // change this with different values
cuComplex a(jx, jy);
int i = 0;
for (i = 0; i < 200; i++)
{
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
__global__ void kernel(unsigned char* ptr, int DIM)
{
// map from threadIdx/BlockIdx to pixel position
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
// now calculate the value at that position
int juliaValue = julia(x, y, DIM);
ptr[offset * 4 + 0] = 255 * juliaValue;
ptr[offset * 4 + 1] = 255 * juliaValue;
ptr[offset * 4 + 2] = 255 * juliaValue;
ptr[offset * 4 + 3] = 255;
}
void InitializeGPU(int DIM, unsigned char* CheckImage)
{
cudaMalloc((void**)&dev_bitmap, DIM * DIM * 4);
dim3 grid(DIM, DIM);
kernel <<<grid, 1>>> (dev_bitmap, DIM);
cudaMemcpy(CheckImage, dev_bitmap, (DIM * DIM * 4), cudaMemcpyDeviceToHost);
cudaFree(dev_bitmap);
}
|
5,133 | #include "includes.h"
__global__ void min(int* U, int* d, int* outDel, int* minOutEdges, size_t gSize, int useD) {
int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x;
int pos1 = 2*globalThreadId;
int pos2 = 2*globalThreadId + 1;
int val1, val2;
if(pos1 < gSize) {
val1 = minOutEdges[pos1] + (useD ? d[pos1] : 0);
if(pos2 < gSize) {
val2 = minOutEdges[pos2] + (useD ? d[pos2] : 0);
val1 = val1 <= 0 ? INT_MAX : val1;
val2 = val2 <= 0 ? INT_MAX : val2;
if(useD) {
val1 = U[pos1] ? val1 : INT_MAX;
val2 = U[pos2] ? val2 : INT_MAX;
}
if(val1 > val2) {
outDel[globalThreadId] = val2;
}
else{
outDel[globalThreadId] = val1;
}
}
else {
val1 = val1 <= 0 ? INT_MAX : val1;
if(useD) {
val1 = U[pos1] ? val1 : INT_MAX;
}
outDel[globalThreadId] = val1;
}
}
} |
5,134 | #include "includes.h"
__global__ void x15(float* x16, float* x17, float* x18, int x19) {
int x20 = gridDim.x * blockDim.x;
int x21 = threadIdx.x + blockIdx.x * blockDim.x;
while (x21 < x19) {
int x22 = x21;
x18[x22] = x16[x22] - x17[x22];
x21 = x21 + x20;
}
} |
5,135 | #include <thrust/device_vector.h>
#include <thrust/inner_product.h>
#include <math.h>
#include <stdio.h>
#define N (1024*1024)
int main() {
thrust::device_vector<float> dvec_x(N, 1.f);
float norm = sqrt(thrust::inner_product(dvec_x.begin(), dvec_x.end(), dvec_x.begin(), 0.0f));
printf("norm = %.0f\n", norm);
return 0;
}
|
5,136 | #include <stdio.h>
__global__ void hello_GPU(void){
int i = threadIdx.x;
printf("hello from GPU[%d]!\n",i);
}
int main(void){
printf("Hello, World - from CPU!\n");
hello_GPU<<<2,3>>>();
cudaDeviceSynchronize();
return 0;
}
|
5,137 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
//#include "cuda_common.cuh"
#include <cstdio>
#include <cstdlib>
#include <ctime>
#include <cstring>
__global__ void sum_array_gpu(int* a, int* b, int* c, int size)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < size) {
c[gid] = a[gid] + b[gid];
}
}
void sum_array_cpu(int* a, int* b, int* c, int size)
{
for ( int i = 0;i < size;i++ ) {
c[i] = a[i] + b[i];
}
}
void compare_arrays(int* a, int* b, int size)
{
for ( int i = 0;i < size;i++ ) {
if (a[i] != b[i]) {
printf("Arrays are different!");
return;
}
}
printf("Arrays are same\n");
}
int main(void)
{
int size = 10000;
int block_size = 256;
int NO_BYTES = size * sizeof(int);
// host pointer
int* h_a, *h_b, *gpu_results;
int* h_c;
h_a = (int*)malloc(NO_BYTES);
h_b = (int*)malloc(NO_BYTES);
h_c = (int*)malloc(NO_BYTES);
gpu_results = (int*)malloc(NO_BYTES);
time_t t;
srand((unsigned)time(&t));
for (int i = 0;i < size;i++) {
h_a[i] = (int)(rand() & 0xff);
h_b[i] = (int)(rand() & 0xff);
}
clock_t cpu_start, cpu_end;
cpu_start = clock();
sum_array_cpu(h_a, h_b, h_c, size);
cpu_end = clock();
printf("Sum array CPU execution time : %4.6f \n",(double)((double)(cpu_end -
cpu_start)/CLOCKS_PER_SEC));
memset(gpu_results, 0, NO_BYTES);
// device pointer
int* d_a, *d_b, *d_c;
cudaMalloc((int**)&d_a, NO_BYTES);
cudaMalloc((int**)&d_b, NO_BYTES);
cudaMalloc((int**)&d_c, NO_BYTES);
clock_t htod_start, htod_end;
htod_start = clock();
cudaMemcpy(d_a, h_a, NO_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, NO_BYTES, cudaMemcpyHostToDevice);
htod_end = clock();
printf("Sum array host to device time : %4.6f \n",(double)((double)(htod_end -
htod_start)/CLOCKS_PER_SEC));
// launching the grid
dim3 block(block_size);
dim3 grid((size / block.x) + 1);
clock_t gpu_start, gpu_end;
gpu_start = clock();
sum_array_gpu<<<grid, block>>> (d_a, d_b, d_c, size);
gpu_end = clock();
printf("Sum array GPU execution time : %4.6f \n",(double)((double)(gpu_end -
gpu_start)/CLOCKS_PER_SEC));
cudaDeviceSynchronize();
clock_t dtoh_start, dtoh_end;
dtoh_start = clock();
cudaMemcpy(gpu_results, d_c, NO_BYTES, cudaMemcpyDeviceToHost);
dtoh_end = clock();
printf("Sum array GPU total time : %4.6f \n",(double)((double)(dtoh_end -
htod_start)/CLOCKS_PER_SEC));
// array comparison
compare_arrays(h_c, gpu_results, size);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(gpu_results);
cudaDeviceReset();
return 0;
}
|
5,138 | #include <stdio.h>
#define N 1000
__global__ void vector_add(float *out, float *a, float *b, int n) {
for (int i = 0; i < n; i++) {
out[i] = a[i] + b[i];
}
}
int main(){
float *d_a, *d_b, *d_c;
float *h_a, *h_b, *h_c;
h_a = (float*)malloc(N * sizeof(float));
h_b = (float*)malloc(N * sizeof(float));
h_c = (float*)malloc(N * sizeof(float));
cudaMalloc(&d_a, N * sizeof(float));
cudaMalloc(&d_b, N * sizeof(float));
cudaMalloc(&d_c, N * sizeof(float));
// Initialize array
for(int i = 0; i < N; i++){
h_a[i] = 1.0f; h_b[i] = 2.0f;
}
// copy to host
cudaMemcpy(d_a, h_a, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, h_c, N * sizeof(float), cudaMemcpyHostToDevice);
int blocks, grids;
blocks = 32;
grids = (float)ceil((float)N / blocks);
vector_add<<<grids, blocks>>>(d_c, d_a, d_b, N);
cudaMemcpy(h_c, d_c, N * sizeof(float), cudaMemcpyDeviceToHost);
int i;
for (i = 0; i < N; i++) {
printf("%f ", h_c[i]);
}
printf("\n");
// free
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFree(h_a);
cudaFree(h_b);
cudaFree(h_c);
return 0;
}
|
5,139 | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <pthread.h>
#include <assert.h>
#include <unistd.h>
#include <cuda_profiler_api.h>
#include <vector>
#include <unordered_map>
#include <iostream>
#include <fstream>
#include <numeric>
#include <functional>
#include <set>
#include <chrono>
//#include "lock.h"
using namespace std;
typedef struct trans_node {
int value;
} TransNode;
typedef struct {
int trans_no;
int item_size;
int item_code[1024];
} Transaction;
typedef struct {
int item_no;
int freq;
int trans_array_size;
int trans_array[128];
} Item;
typedef struct {
int freq;
int item_set_size;
int item_set_code[16];
int trans_array_size;
int trans_array[16];
/* the indices of previous sets */
int set1_index;
int set2_index;
bool pruned;
} ItemSet;
typedef struct {
int freq;
int count;
int *item_code_array;
} SupportCount;
#define TRANS_NUM 1000
#define ITEM_NUM 2000
#define NUM_THREADS 1
#define BLOCK_SIZE 1
__global__
void item_freq_count(int num_trans, Transaction *transArray, Item* itemArray)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int num_threads = gridDim.x*blockDim.x;
int i = tid;
while ( i < num_trans) {
int item_size = transArray[i].item_size;
for (int j = 0; j < item_size; j++) {
int item_code = transArray[i].item_code[j];
//itemArray[item_code].freq++;
atomicAdd(&(itemArray[item_code].freq), 1);
/* push the transaction to the item struct */
int _idx = atomicAdd(&(itemArray[item_code].trans_array_size), 1);
itemArray[item_code].trans_array[_idx] = i;
}
i += num_threads;
}
}
__global__
void select_with_min_support(int num_items, Item* itemArray, int min_support, ItemSet* itemsetArray, int* globalIdx)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int num_threads = gridDim.x * blockDim.x;
int i = tid;
while (i < num_items) {
if (itemArray[i].freq >= min_support) {
/* get a place in itemsetArray */
int _idx = atomicAdd(globalIdx, 1);
itemsetArray[_idx].freq = itemArray[i].freq;
itemsetArray[_idx].item_set_size = 1;
itemsetArray[_idx].item_set_code[0] = itemArray[i].item_no;
itemsetArray[_idx].trans_array_size = itemArray[i].trans_array_size;
memcpy(itemsetArray[_idx].trans_array, itemArray[i].trans_array, itemArray[i].trans_array_size*sizeof(int));
}
i += num_threads;
}
}
__device__
bool alreadyHasTrans(ItemSet* _item_set, int trans_no)
{
bool has = false;
for (int i = 0; i < _item_set->trans_array_size; i++ ) {
if (_item_set->trans_array[i] == trans_no) {
has = true;
break;
}
}
return has;
}
/* search for transactions in the previous itemset, updating the transaction records
and returning the count */
__device__
int find_support_count_for_itemset(ItemSet* candidate_itemset, ItemSet* checked_itemset, Transaction* trans_array)
{
int count = 0;
//printf("checked item set trans size %d\n", checked_itemset->trans_array_size);
for (int i = 0; i < checked_itemset->trans_array_size; i++) {
int trans_idx = checked_itemset->trans_array[i];
Transaction* trans = &(trans_array[trans_idx]);
bool itemset_found = true;
int trans_no = -1;
for (int j = 0; j < candidate_itemset->item_set_size; j++) {
int target_item_code = candidate_itemset->item_set_code[j];
bool single_item_found = false;
for (int k = 0; k < trans->item_size; k++) {
if (target_item_code == trans->item_code[k] &&
!alreadyHasTrans(candidate_itemset, trans_idx)) {
single_item_found = true;
trans_no = trans_idx;
break;
}
}
itemset_found &= single_item_found;
}
if (itemset_found) {
candidate_itemset->trans_array[candidate_itemset->trans_array_size++] = trans_no;
count++;
}
}
return count;
}
__global__
void find_support_count(int candidateSetSize, ItemSet* candidateSet, int* globalIdx, ItemSet* currSet, Transaction* trans_array, int min_support)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int num_threads = gridDim.x * blockDim.x;
int i = tid;
while (i < (candidateSetSize)) {
int set1_idx = candidateSet[i].set1_index;
int set2_idx = candidateSet[i].set2_index;
int count1 = find_support_count_for_itemset(&(candidateSet[i]), &(currSet[set1_idx]), trans_array);
int count2 = find_support_count_for_itemset(&(candidateSet[i]), &(currSet[set2_idx]), trans_array);
candidateSet[i].freq = count1 + count2;
/* check with minimum spport */
if ((count1 + count2) >= min_support) {
//int _global_idx = atomicAdd(globalIdx, 1);
candidateSet[i].pruned = false;
}
else {
candidateSet[i].pruned = true;
//candidateSet[i].freq = -1;
}
i += num_threads;
}
/* block-level barrier */
//__syncthreads();
}
int itemcodeComp(const void* a, const void* b)
{
return (*(int*)a - *(int*)b);
}
int itemsetComp(const void* a, const void* b)
{
ItemSet* set_a = (ItemSet*)(a);
ItemSet* set_b = (ItemSet*)(b);
int size = set_a->item_set_size;
for (int i = 0; i < size; i++) {
if (set_a->item_set_code[i] > set_b->item_set_code[i]) {
return 1;
}
else if (set_a->item_set_code[i] < set_b->item_set_code[i]) {
return -1;
}
}
return 0;
}
bool hasTheItemSet(std::set<ItemSet*>& itemsets_set, ItemSet* checked_set)
{
for (auto it = itemsets_set.begin(); it != itemsets_set.end(); it++) {
if ((*it)->item_set_size != checked_set->item_set_size) continue;
if (memcmp((*it)->item_set_code, checked_set->item_set_code, checked_set->item_set_size*sizeof(int)) == 0) {
return true;
}
}
return false;
}
int find_last_eq_class_item(int array_size, ItemSet* itemset_array, int base_pos, int start_pos, int cardinality)
{
ItemSet* base_item_set = &(itemset_array[base_pos]);
int last_pos = base_pos;
if (cardinality < 2) {
return -1;
}
for (int i = start_pos; i < array_size; i++) {
ItemSet* check_item_set = &(itemset_array[i]);
for (int j = 0; j < cardinality-1; j++) {
if (base_item_set->item_set_code[j] != check_item_set->item_set_code[j]) {
goto last_pos_ret;
}
}
last_pos = i;
}
last_pos_ret:
return last_pos;
}
void* genNextItemSetArray(int itemset_array_size, ItemSet* curr_itemset_array, int nextCardinality, int* nextSize)
{
int _arr_size = itemset_array_size;
int new_idx = 0;
if (itemset_array_size <= 0) {
return NULL;
}
assert(nextCardinality-1 == curr_itemset_array[0].item_set_size);
ItemSet* next_set = NULL;
if (nextCardinality == 2) {
int next_size = (_arr_size*(_arr_size-1)) / 2;
next_set = (ItemSet*)malloc(next_size*sizeof(ItemSet));
assert(next_set != NULL);
memset(next_set, 0, next_size*sizeof(ItemSet));
for (int i = 0; i < _arr_size-1; i++) {
for (int j = i+1; j < _arr_size; j++) {
/* set up new itemset */
next_set[new_idx].item_set_size = nextCardinality;
next_set[new_idx].item_set_code[0] = curr_itemset_array[i].item_set_code[0];
next_set[new_idx].item_set_code[1] = curr_itemset_array[j].item_set_code[0];
/* store the indices */
next_set[new_idx].set1_index = i;
next_set[new_idx].set2_index = j;
new_idx++;
}
}
*nextSize = next_size;
}
else {
int i = 0;
vector< pair<int,int> > ranges_vec;
while (i < itemset_array_size) {
int j = find_last_eq_class_item(itemset_array_size, curr_itemset_array, i, i+1, nextCardinality-1);
if ( (j != -1) && (i != j) ) {
ranges_vec.push_back(make_pair(i,j));
}
i = j+1;
}
auto pairSum = [](vector< pair<int,int> >& _vec) {
int sum = 0;
for (int i = 0; i < _vec.size(); i++) {
int _size = (_vec[i].second-_vec[i].first+1);
sum += (_size*(_size-1)/2);
}
return sum;
};
/* allocate next level item set memory */
int next_size = pairSum(ranges_vec);
if (next_size <= 0 || next_size > 1024*1024) {
return NULL;
}
set<ItemSet*> itemsets_set;
next_set = (ItemSet*)malloc((size_t)next_size*(size_t)sizeof(ItemSet));
assert(next_set != NULL);
memset(next_set, 0, next_size*sizeof(ItemSet));
for (auto range : ranges_vec) {
/* the priori nextCardinality-2 items should be the same */
for (int start_pos = range.first; start_pos <= range.second-1; start_pos++) {
for (int end_pos = start_pos+1; end_pos <= range.second; end_pos++) {
/* set up new itemset */
next_set[new_idx].item_set_size = nextCardinality;
memcpy(next_set[new_idx].item_set_code,
curr_itemset_array[start_pos].item_set_code,
curr_itemset_array[start_pos].item_set_size*sizeof(int));
next_set[new_idx].item_set_code[nextCardinality-1] = curr_itemset_array[end_pos].item_set_code[nextCardinality-2];
/*
if (hasTheItemSet(itemsets_set, &(next_set[new_idx]))) {
next_size--;
continue;
}
itemsets_set.insert(&(next_set[new_idx]));
*/
/* store the indices */
next_set[new_idx].set1_index = start_pos;
next_set[new_idx].set2_index = end_pos;
new_idx++;
}
}
}
*nextSize = next_size;
}
return (void*)next_set;
}
int main(int argc, char *argv[])
{
fstream fs;
string line;
unordered_map<string, int> item_code_map;
unordered_map<int, int> transaction_map;
vector<SupportCount> support_count_vec;
int trans_count = 0; /* number of transactions */
int item_count = 0; /* number of unique items */
int min_support = 6; /* mininum supoort of items */
/* profiling */
size_t memory_use = 0, max_memory_use = 0;
size_t dev_memory_use = 0, max_dev_memory_use = 0;
/* args */
int opt;
int num_threads = 0;
int block_size = 0;
int item_num = ITEM_NUM;
const char *optstr = "n:b:i:";
while ((opt = getopt(argc, argv, optstr)) != -1) {
switch (opt) {
case 'n':
num_threads = atoi(optarg);
break;
case 'b':
block_size = atoi(optarg);
break;
case 'i':
item_num = atoi(optarg);
break;
}
}
printf("num threads %d, block size %d\n", num_threads, block_size);
Transaction *transArray = (Transaction*)malloc(TRANS_NUM*sizeof(Transaction));
memset(transArray, 0, TRANS_NUM*sizeof(Transaction));
memory_use += TRANS_NUM*sizeof(Transaction);
/* read from the file */
//fs.open("test.csv", ios::in);
fs.open("data.csv", ios::in);
while (getline(fs, line)) {
if (line.size() == 0) continue;
/* get transaction number */
ssize_t pos = line.find(",");
int trans_no = atoi(line.substr(0, pos).c_str());
ssize_t pos2 = line.find(",", pos+1);
string item = line.substr(pos+1, pos2-pos-1);
/* find item number */
if (item_code_map.find(item) == item_code_map.end()) {
item_code_map[item] = item_count++;
//printf("Item Count :%d -> %s\n", item_count, item.c_str());
}
/* find transaction number */
if (transaction_map.find(trans_no) == transaction_map.end()) {
transArray[trans_count].trans_no = trans_count;
transArray[trans_count].item_code[transArray[trans_count].item_size++] = item_code_map[item];
transaction_map[trans_no] = trans_count;
trans_count++;
//printf("Transaction Count :%d -> %d\n", trans_count, trans_no);
}
//else
{
int _idx = transaction_map[trans_no];
auto checkItemExist = [](Transaction* _tr, int _code) -> bool
{
bool ret = false;
for (int idx = 0; idx < _tr->item_size; idx++) {
if (_tr->item_code[idx] == _code) return true;
}
return ret;
};
//if (!checkItemExist(&(transArray[_idx]), item_code_map[item]))
transArray[_idx].item_code[transArray[_idx].item_size++] = item_code_map[item];
}
if (trans_count >= TRANS_NUM) break;
if (item_count >= item_num) break;
}
fs.close();
printf("Item Count: %d, Transaction Count: %d\n", item_count, trans_count);
size_t total = 0;
auto begin = chrono::high_resolution_clock::now();
/* sort item code array for each transaction */
for (int _tr_idx = 0; _tr_idx < trans_count; _tr_idx++) {
qsort(transArray[_tr_idx].item_code, transArray[_tr_idx].item_size, sizeof(int), itemcodeComp);
int glb_i = 0;
int i;
for (i = 0; i < transArray[_tr_idx].item_size-1; i++) {
while (i < transArray[_tr_idx].item_size && transArray[_tr_idx].item_code[i] == transArray[_tr_idx].item_code[i+1]) {
i++;
}
transArray[_tr_idx].item_code[glb_i++] = transArray[_tr_idx].item_code[i];
}
if (i == transArray[_tr_idx].item_size-1) {
transArray[_tr_idx].item_code[glb_i++] = transArray[_tr_idx].item_code[i-1];
}
transArray[_tr_idx].item_size = glb_i;
}
auto end = chrono::high_resolution_clock::now();
total += chrono::duration_cast<chrono::milliseconds>(end-begin).count();
auto printTrans = [](int _arr_size, Transaction* _trans_array)
{
for (int _tr_idx = 0; _tr_idx < _arr_size; _tr_idx++) {
printf("Transaction %d:\n", _trans_array[_tr_idx].trans_no);
for (int _it_idx = 0; _it_idx < _trans_array[_tr_idx].item_size; _it_idx++) {
printf("\t Item %d\n", _trans_array[_tr_idx].item_code[_it_idx]);
}
}
};
//printTrans(trans_count, transArray);
begin = chrono::high_resolution_clock::now();
Item *itemArray = (Item*)malloc(item_count*sizeof(Item));
memset(itemArray, 0, item_count*sizeof(Item));
memory_use += item_count*sizeof(Item);
for (int i = 0; i < item_count; i++) {
itemArray[i].item_no = i;
}
/* request cuda memory */
Transaction *dev_transArray = NULL;
cudaMalloc(&dev_transArray, TRANS_NUM*sizeof(Transaction));
cudaMemcpy(dev_transArray, transArray, TRANS_NUM*sizeof(Transaction), cudaMemcpyHostToDevice);
dev_memory_use += TRANS_NUM*sizeof(Item);
Item *dev_itemArray = NULL;
cudaMalloc(&dev_itemArray, item_count*sizeof(Item));
cudaMemcpy(dev_itemArray, itemArray, item_count*sizeof(Item), cudaMemcpyHostToDevice);
dev_memory_use += item_count*sizeof(Item);
/* calculate single item frequency */
int num_threads_per_block = (num_threads < block_size) ? num_threads : block_size;
//dim3 gridSize(num_threads/block_size);
dim3 gridSize(NUM_THREADS/BLOCK_SIZE);
dim3 blockSize(BLOCK_SIZE);
item_freq_count<<<gridSize, blockSize>>>(trans_count, dev_transArray, dev_itemArray);
/* copy the results back to host */
cudaMemcpy(itemArray, dev_itemArray, item_count*sizeof(Item), cudaMemcpyDeviceToHost);
/* sort transaction array for each item */
for (int _it_idx = 0; _it_idx < item_count; _it_idx++) {
qsort(itemArray[_it_idx].trans_array, itemArray[_it_idx].trans_array_size, sizeof(int), itemcodeComp);
int glb_i = 0;
int i;
for (i = 0; i < itemArray[_it_idx].trans_array_size-1; i++) {
while (i < itemArray[_it_idx].trans_array_size && itemArray[_it_idx].trans_array[i] == itemArray[_it_idx].trans_array[i+1]) {
i++;
}
itemArray[_it_idx].trans_array[glb_i++] = itemArray[_it_idx].trans_array[i];
}
if (i == itemArray[_it_idx].trans_array_size-1) {
itemArray[_it_idx].trans_array[glb_i++] = itemArray[_it_idx].trans_array[i-1];
}
itemArray[_it_idx].trans_array_size = glb_i;
}
end = chrono::high_resolution_clock::now();
total += chrono::duration_cast<chrono::milliseconds>(end-begin).count();
/* check point of transposed database */
auto printItems = [](int _arr_size, Item* _item_array)
{
for (int _it_idx = 0; _it_idx < _arr_size; _it_idx++) {
printf("Item %d (freq %d):\n", _item_array[_it_idx].item_no, _item_array[_it_idx].freq);
for (int _tr_idx = 0; _tr_idx < _item_array[_it_idx].trans_array_size; _tr_idx++) {
printf("\t Transaction %d\n", _item_array[_it_idx].trans_array[_tr_idx]);
}
}
};
//printItems(item_count, itemArray);
begin = chrono::high_resolution_clock::now();
/* start to prune */
int globalIdx = 0;
int *dev_globalIdx = NULL;
cudaMalloc(&dev_globalIdx, sizeof(int));
cudaMemcpy(dev_globalIdx, &globalIdx, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_itemArray, itemArray, item_count*sizeof(Item), cudaMemcpyHostToDevice);
ItemSet *itemsetArray = (ItemSet*)malloc(item_count*sizeof(ItemSet));
memset(itemsetArray, 0, item_count*sizeof(ItemSet));
memory_use += item_count*sizeof(ItemSet);
ItemSet *dev_itemsetArray = NULL;
cudaMalloc(&dev_itemsetArray, item_count*sizeof(ItemSet));
cudaMemcpy(dev_itemsetArray, itemsetArray, item_count*sizeof(ItemSet), cudaMemcpyHostToDevice);
dev_memory_use += item_count*sizeof(ItemSet);
/* kernel doing selection for single item with minimum support */
select_with_min_support<<<gridSize, blockSize>>>(item_count, dev_itemArray, min_support, dev_itemsetArray, dev_globalIdx);
cudaMemcpy(itemsetArray, dev_itemsetArray, item_count*sizeof(ItemSet), cudaMemcpyDeviceToHost);
cudaMemcpy(&globalIdx, dev_globalIdx, sizeof(int), cudaMemcpyDeviceToHost);
free(itemArray);
end = chrono::high_resolution_clock::now();
total += chrono::duration_cast<chrono::milliseconds>(end-begin).count();
/* check point of transposed database */
auto printItemSet = [](int _arr_size, ItemSet* _itemset_array)
{
for (int _it_idx = 0; _it_idx < _arr_size; _it_idx++) {
printf("ItemSet %d (size %d):\n", _it_idx, _itemset_array[_it_idx].item_set_size);
for (int i = 0; i < _itemset_array[_it_idx].item_set_size; i++) {
printf("\tItem %d", _itemset_array[_it_idx].item_set_code[i]);
}
printf("\n");
for (int i = 0; i < _itemset_array[_it_idx].trans_array_size; i++) {
printf("\tTransaction %d", _itemset_array[_it_idx].trans_array[i]);
}
printf("\n");
printf("\tSet Index (%d,%d)\n", _itemset_array[_it_idx].set1_index, _itemset_array[_it_idx].set2_index);
}
};
//printItemSet(globalIdx, itemsetArray);
/* Record in Support Count */
auto sc_record_func = [](vector<SupportCount>& vec, int itemset_count, ItemSet* itemset_array)
{
for (int is_idx = 0; is_idx < itemset_count; is_idx++) {
SupportCount sc;
sc.freq = itemset_array[is_idx].freq;
sc.count = itemset_array[is_idx].item_set_size;
sc.item_code_array = (int*)malloc(sc.count * sizeof(int));
memcpy(sc.item_code_array, itemset_array[is_idx].item_set_code, sc.count*sizeof(int));
qsort(sc.item_code_array, sc.count, sizeof(int), itemcodeComp);
vec.push_back(sc);
}
};
sc_record_func(support_count_vec, item_count, itemsetArray);
/* Now we get the transposed database that every item set with size 1 has a corresponding list of transactions */
/* Generate itemset with size 2 */
int cardinality = 2;
int currSetSize = globalIdx;
int candidateSetSize = 0;
int *dev_candidateSetSize = NULL;
ItemSet* currSet = itemsetArray;
ItemSet* dev_currSet = NULL;
ItemSet* candidateSet = NULL;
ItemSet* dev_candidateSet = NULL;
cudaMalloc(&dev_candidateSetSize, sizeof(int));
//cudaMalloc(&dev_currSet, currSetSize*sizeof(ItemSet));
//cudaMemcpy(dev_currSet, currSet, currSetSize*sizeof(ItemSet), cudaMemcpyHostToDevice);
while (true) {
candidateSet = (ItemSet*)genNextItemSetArray(currSetSize, currSet, cardinality, &candidateSetSize);
if (candidateSetSize == 0 || candidateSet == NULL) {
break;
}
assert(candidateSet != NULL);
printf("\n\n Next candidate size is %d\n", candidateSetSize);
memory_use += candidateSetSize*sizeof(ItemSet);
begin = chrono::high_resolution_clock::now();
/* allocate GPU kernel memory */
cudaMemcpy(dev_candidateSetSize, &candidateSetSize, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_globalIdx, &globalIdx, sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc(&dev_currSet, currSetSize*sizeof(ItemSet));
assert(dev_currSet != NULL);
cudaMemcpy(dev_currSet, currSet, currSetSize*sizeof(ItemSet), cudaMemcpyHostToDevice);
cudaMalloc(&dev_candidateSet, candidateSetSize*sizeof(ItemSet));
cudaMemcpy(dev_candidateSet, candidateSet, candidateSetSize*sizeof(ItemSet), cudaMemcpyHostToDevice);
dev_memory_use += currSetSize*sizeof(ItemSet);
dev_memory_use += candidateSetSize*sizeof(ItemSet);
/* launch the kernel */
dim3 gSize(NUM_THREADS/BLOCK_SIZE);
dim3 bSize(BLOCK_SIZE);
find_support_count<<<gSize, bSize>>>(candidateSetSize,
dev_candidateSet,
dev_globalIdx,
dev_currSet,
dev_transArray,
min_support);
/* copy the result back */
cudaMemcpy(candidateSet, dev_candidateSet, candidateSetSize*sizeof(ItemSet), cudaMemcpyDeviceToHost);
end = chrono::high_resolution_clock::now();
total += chrono::duration_cast<chrono::milliseconds>(end-begin).count();
/* prune if freq == -1 */
int _glb_set_idx = 0;
for (int set_idx = 0; set_idx < candidateSetSize; set_idx++) {
if (!candidateSet[set_idx].pruned) {
//printf("---prune candidate %d freq %d\n", set_idx, candidateSet[set_idx].freq);
memcpy(&(candidateSet[_glb_set_idx++]), &(candidateSet[set_idx]), sizeof(ItemSet));
}
}
//candidateSetSize = _glb_set_idx;
//printItemSet(_glb_set_idx, candidateSet);
/* Make statistics for support count */
sc_record_func(support_count_vec, candidateSetSize, candidateSet);
/* update the parameters and free previously used memory */
free(currSet);
cudaFree(dev_currSet);
cardinality++;
currSet = candidateSet;
currSetSize = candidateSetSize;
//dev_currSet = dev_candidateSet;
cudaFree(dev_candidateSet);
globalIdx = 0;
if (_glb_set_idx <= 1) {
break;
}
max_memory_use = (max_memory_use < memory_use) ? memory_use : max_memory_use;
max_dev_memory_use = (max_dev_memory_use < dev_memory_use) ? dev_memory_use : max_dev_memory_use;
memory_use -= currSetSize*sizeof(ItemSet);
dev_memory_use -= candidateSetSize*sizeof(ItemSet);
}
/* final result */
//printItemSet(currSetSize, currSet);
/* Finally generate association rules */
auto get_support_count = [](vector<SupportCount>& vec, ItemSet* itemset)->int
{
int _size = itemset->item_set_size;
for (auto sc : vec) {
if (sc.count != _size) continue;
if (memcmp(itemset->item_set_code, sc.item_code_array, _size*sizeof(int)) != 0) continue;
return sc.freq;
}
return 0;
};
function<void(ItemSet*, int, int, int, ItemSet*, vector<SupportCount>&)> get_rules_per_size;
get_rules_per_size = [&get_support_count, &get_rules_per_size](ItemSet* sub_itemset, int array_index, int size, int start_pos, ItemSet* itemset, vector<SupportCount>& vec)
{
sub_itemset->item_set_code[array_index] = itemset->item_set_code[start_pos];
if (array_index+1 == size) {
int _support_count = get_support_count(vec, sub_itemset);
/* now we can calculate the confidence */
if (_support_count == 0) return;
//printf("freq %f, s_count %f\n", (float)(itemset->freq), (float)(_support_count));
float confidence = (float)(itemset->freq) / (float)(_support_count);
//printf("-----------------Association Rules--------------------\n");
//printf("Items: \n");
//for (int i = 0; i < size; i++) printf("\tItem %d\t", sub_itemset->item_set_code[i]);
//printf("\nBase: \n");
//for (int i = 0; i < itemset->item_set_size; i++) printf("\tItem %d\t", itemset->item_set_code[i]);
//printf("\n\n===== Confidence %f =====\n", confidence);
//printf("------------------------------------------------------\n");
return;
}
for (int next_pos = start_pos+1; next_pos < itemset->item_set_size; next_pos++) {
get_rules_per_size(sub_itemset, array_index+1, size, next_pos, itemset, vec);
}
};
auto getRules = [&get_rules_per_size](ItemSet* itemset, int size, vector<SupportCount>& vec)
{
//int *_code_array = (int*)malloc(size*sizeof(int));
ItemSet *sub_itemset = (ItemSet*)malloc(sizeof(ItemSet));
memset(sub_itemset, 0, sizeof(ItemSet));
sub_itemset->item_set_size = size;
int array_index = 0;
for (int start_pos = 0; start_pos < itemset->item_set_size; start_pos++) {
get_rules_per_size(sub_itemset, array_index, size, start_pos, itemset, vec);
}
free(sub_itemset);
};
for (int idx = 0; idx < currSetSize; idx++) {
ItemSet* item_set = &currSet[idx];
for (int _size = 1; _size <= item_set->item_set_size; _size++) {
//getRules(item_set, _size, support_count_vec);
}
}
cudaProfilerStop();
printf("Sumary : Item Count %d --- Trans Count %d\nExec Time %llu ms\n", item_count, trans_count, total);
printf("\t CPU memory max usage : %llu bytes\n", max_memory_use);
printf("\t GPU memory max usage : %llu bytes\n", max_dev_memory_use);
return 0;
}
|
5,140 | #include <iostream>
#include <stdio.h>
#include <vector>
#include <list>
#include <utility>
#include <algorithm>
#include <iomanip>
class Properties {
private:
typedef std::vector<std::pair<std::string, std::string>> PTYPE;
std::list<PTYPE> allprops;
PTYPE* theseprops = nullptr;
public:
Properties& add(const std::string& k, char* v) {
theseprops->emplace_back(std::make_pair(k, std::string(v)));
return *this;
}
template<typename T>
Properties& add(const std::string& k, T v) {
theseprops->emplace_back(std::make_pair(k, std::to_string(v)));
return *this;
}
template<typename T>
Properties& add(const std::string& k, T v[], int s) {
std::string temps;
for (int i = 0; i < s; ++i) {
temps += std::to_string(v[i]);
if (i < (s-1)) temps += ", ";
}
theseprops->emplace_back(std::make_pair(k, "[" + temps + "]"));
return *this;
}
void fill() {
int nDevices;
cudaGetDeviceCount(&nDevices);
cudaDeviceProp prop;
for (int i = 0; i < nDevices; i++) {
allprops.emplace_back(PTYPE());
theseprops = &allprops.back();
cudaGetDeviceProperties(&prop, i);
(*this)
.add("ECCEnabled", prop.ECCEnabled)
.add("accessPolicyMaxWindowSize", prop.accessPolicyMaxWindowSize)
.add("asyncEngineCount", prop.asyncEngineCount)
.add("canMapHostMemory", prop.canMapHostMemory)
.add("canUseHostPointerForRegisteredMem",
prop.canUseHostPointerForRegisteredMem)
.add("clockRate", prop.clockRate)
.add("computeMode", prop.computeMode)
.add("computePreemptionSupported", prop.computePreemptionSupported)
.add("concurrentKernels", prop.concurrentKernels)
.add("concurrentManagedAccess", prop.concurrentManagedAccess)
.add("cooperativeLaunch", prop.cooperativeLaunch)
.add("cooperativeMultiDeviceLaunch", prop.cooperativeMultiDeviceLaunch)
.add("deviceOverlap", prop.deviceOverlap)
.add("directManagedMemAccessFromHost", prop.directManagedMemAccessFromHost)
.add("globalL1CacheSupported", prop.globalL1CacheSupported)
.add("hostNativeAtomicSupported", prop.hostNativeAtomicSupported)
.add("integrated", prop.integrated)
.add("isMultiGpuBoard", prop.isMultiGpuBoard)
.add("kernelExecTimeoutEnabled", prop.kernelExecTimeoutEnabled)
.add("l2CacheSize", prop.l2CacheSize)
.add("localL1CacheSupported", prop.localL1CacheSupported)
.add("luid[8]", prop.luid)
.add("luidDeviceNodeMask", prop.luidDeviceNodeMask)
.add("major", prop.major)
.add("managedMemory", prop.managedMemory)
.add("maxBlocksPerMultiProcessor", prop.maxBlocksPerMultiProcessor)
.add("maxGridSize[3]", prop.maxGridSize, 3)
.add("maxSurface1D", prop.maxSurface1D)
.add("maxSurface1DLayered[2]", prop.maxSurface1DLayered, 2)
.add("maxSurface2D[2]", prop.maxSurface2D, 2)
.add("maxSurface2DLayered[3]", prop.maxSurface2DLayered, 3)
.add("maxSurface3D[3]", prop.maxSurface3D, 3)
.add("maxSurfaceCubemap", prop.maxSurfaceCubemap)
.add("maxSurfaceCubemapLayered[2]", prop.maxSurfaceCubemapLayered, 2)
.add("maxTexture1D", prop.maxTexture1D)
.add("maxTexture1DLayered[2]", prop.maxTexture1DLayered, 2)
.add("maxTexture1DLinear", prop.maxTexture1DLinear)
.add("maxTexture1DMipmap", prop.maxTexture1DMipmap)
.add("maxTexture2D[2]", prop.maxTexture2D, 2)
.add("maxTexture2DGather[2]", prop.maxTexture2DGather, 2)
.add("maxTexture2DLayered[3]", prop.maxTexture2DLayered, 3)
.add("maxTexture2DLinear[3]", prop.maxTexture2DLinear, 3)
.add("maxTexture2DMipmap[2]", prop.maxTexture2DMipmap, 3)
.add("maxTexture3D[3]", prop.maxTexture3D, 3)
.add("maxTexture3DAlt[3]", prop.maxTexture3DAlt, 3)
.add("maxTextureCubemap", prop.maxTextureCubemap)
.add("maxTextureCubemapLayered[2]", prop.maxTextureCubemapLayered, 2)
.add("maxThreadsDim[3]", prop.maxThreadsDim, 3)
.add("maxThreadsPerBlock", prop.maxThreadsPerBlock)
.add("maxThreadsPerMultiProcessor", prop.maxThreadsPerMultiProcessor)
.add("memPitch", prop.memPitch)
.add("memoryBusWidth", prop.memoryBusWidth)
.add("memoryClockRate", prop.memoryClockRate)
.add("minor", prop.minor)
.add("multiGpuBoardGroupID", prop.multiGpuBoardGroupID)
.add("multiProcessorCount", prop.multiProcessorCount)
.add("name[256]", prop.name)
.add("pageableMemoryAccess", prop.pageableMemoryAccess)
.add("pageableMemoryAccessUsesHostPageTables",
prop.pageableMemoryAccessUsesHostPageTables)
.add("pciBusID", prop.pciBusID)
.add("pciDeviceID", prop.pciDeviceID)
.add("pciDomainID", prop.pciDomainID)
.add("persistingL2CacheMaxSize", prop.persistingL2CacheMaxSize)
.add("regsPerBlock", prop.regsPerBlock)
.add("regsPerMultiprocessor", prop.regsPerMultiprocessor)
.add("reservedSharedMemPerBlock", prop.reservedSharedMemPerBlock)
.add("sharedMemPerBlock", prop.sharedMemPerBlock)
.add("sharedMemPerBlockOptin", prop.sharedMemPerBlockOptin)
.add("sharedMemPerMultiprocessor", prop.sharedMemPerMultiprocessor)
.add("singleToDoublePrecisionPerfRatio",
prop.singleToDoublePrecisionPerfRatio)
.add("streamPrioritiesSupported", prop.streamPrioritiesSupported)
.add("surfaceAlignment", prop.surfaceAlignment)
.add("tccDriver", prop.tccDriver)
.add("textureAlignment", prop.textureAlignment)
.add("texturePitchAlignment", prop.texturePitchAlignment)
.add("totalConstMem", prop.totalConstMem)
.add("totalGlobalMem", prop.totalGlobalMem)
.add("unifiedAddressing", prop.unifiedAddressing)
.add("uuid", (unsigned char*)prop.uuid.bytes, 16)
.add("warpSize", prop.warpSize);
}
}
void print() {
int did = 0;
size_t maxwidth = 0;
for (auto i : allprops.front())
maxwidth = std::max(i.first.length(), maxwidth);
for (auto p : allprops ) {
std::cout << "DeviceID: " << did << std::endl;
for (auto i : p) {
std::cout << " " << std::left << std::setw(maxwidth+1) << i.first << i.second << std::endl;
}
++did;
}
}
};
/* calculate cores per streaming multiprocessor
https://stackoverflow.com/questions/32530604/how-can-i-get-number-of-cores-in-cuda-device
#include "cuda_runtime_api.h"
// you must first call the cudaGetDeviceProperties() function, then pass
// the devProp structure returned to this function:
int getSPcores(cudaDeviceProp devProp)
{
int cores = 0;
int mp = devProp.multiProcessorCount;
switch (devProp.major){
case 2: // Fermi
if (devProp.minor == 1) cores = mp * 48;
else cores = mp * 32;
break;
case 3: // Kepler
cores = mp * 192;
break;
case 5: // Maxwell
cores = mp * 128;
break;
case 6: // Pascal
if ((devProp.minor == 1) || (devProp.minor == 2)) cores = mp * 128;
else if (devProp.minor == 0) cores = mp * 64;
else printf("Unknown device type\n");
break;
case 7: // Volta and Turing
if ((devProp.minor == 0) || (devProp.minor == 5)) cores = mp * 64;
else printf("Unknown device type\n");
break;
case 8: // Ampere
if (devProp.minor == 0) cores = mp * 64;
else if (devProp.minor == 6) cores = mp * 128;
else printf("Unknown device type\n");
break;
default:
printf("Unknown device type\n");
break;
}
return cores;
}
*/
int main() {
Properties p;
p.fill();
p.print();
exit(0);
}
|
5,141 | /* Metsai Aleksandros 7723
* metsalex@ece.auth.gr
*
* Game of life using CUDA. Multiple cells per thread and use of shared memory
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#define THRESHOLD 0.4
#define CELLS_PER_THREAD 2
#define THREADS_PER_BLOCK (500/CELLS_PER_THREAD)
struct timeval startwtime, endwtime;
double seq_time;
__global__ void game_c (int *newer, int *old, int N)
{
int lsize=THREADS_PER_BLOCK*CELLS_PER_THREAD;
__shared__ int top[THREADS_PER_BLOCK*CELLS_PER_THREAD+2]; //Extended Tables
__shared__ int mid[THREADS_PER_BLOCK*CELLS_PER_THREAD+2];
__shared__ int bot[THREADS_PER_BLOCK*CELLS_PER_THREAD+2];
int index = blockIdx.x*blockDim.x*CELLS_PER_THREAD + threadIdx.x*CELLS_PER_THREAD;
int count;
int sum=0;
int i=(int) index/N;
int j= index%N;
int lindex = threadIdx.x*CELLS_PER_THREAD +1; //Local Index
for(count=0; count<CELLS_PER_THREAD;count++){
if(i==0){
//top
if(j==0){
top[0]= old[N*N-1];
mid[0]= old[N-1];
bot[0]= old[2*N -1];
top[1]= old[N*(N-1)];
mid[1]= old[0];
bot[1]= old[N];
}else if(j==(N-1)){
top[lindex+1]= old[N*(N-1)];
mid[lindex+1]= old[0];
bot[lindex+1]= old[N];
top[lindex]= old[N*N -1];
mid[lindex]= old[N-1];
bot[lindex]= old[2*N -1];
}else{
if(lindex==1){
top[lindex-1]= old[N*(N-1) +(j-1)];
mid[lindex-1]= old[j-1];
bot[lindex-1]= old[N+(j-1)];
top[lindex]= old[N*(N-1) +j];
mid[lindex]= old[j];
bot[lindex]= old[N+j];
}else if(lindex==(lsize)){
top[lindex+1]= old[N*(N-1) +(j+1)];
mid[lindex+1]= old[j+1];
bot[lindex+1]= old[N+ (j+1)];
top[lindex]= old[N*(N-1) +j];
mid[lindex]= old[j];
bot[lindex]= old[N +j];
}else{
top[lindex]= old[N*(N-1) +j];
mid[lindex]= old[j];
bot[lindex]= old[N+j];
}
}
}else if(i==(N-1)){
//bottom
if(j==0){
top[0]= old[N*(N-1) -1];
mid[0]= old[N*N -1];
bot[0]= old[N-1];
top[1]= old[N*(N-2)];
mid[1]= old[N*(N-1)];
bot[1]= old[0];
}else if(j==(N-1)){
top[lindex+1]= old[N*(N-2)];
mid[lindex+1]= old[N*(N-1)];
bot[lindex+1]= old[0];
top[lindex]= old[N*(N-1) -1];
mid[lindex]= old[N*N -1];
bot[lindex]= old[N-1];
}else{
// !!
if(lindex==1){
top[lindex-1]= old[(i-1)*N +(j-1)];
mid[lindex-1]= old[i*N +(j-1)];
bot[lindex-1]= old[j-1];
top[lindex]= old[(i-1)*N +j];
mid[lindex]= old[i*N +j];
bot[lindex]= old[j];
}else if(lindex==(lsize)){
top[lindex+1]= old[(i-1)*N +(j+1)];
mid[lindex+1]= old[i*N +(j+1)];
bot[lindex+1]= old[(j+1)];
top[lindex]= old[(i-1)*N +j];
mid[lindex]= old[i*N +j];
bot[lindex]= old[j];
}else{
top[lindex]= old[(i-1)*N +j];
mid[lindex]= old[i*N +j];
bot[lindex]= old[j];
}
}
}else if(j==0){
//left
top[0]= old[(i-1)*N +(N-1)];
mid[0]= old[i*N +(N-1)];
bot[0]= old[(i+1)*N +(N-1)];
top[1]= old[(i-1)*N];
mid[1]= old[i*N];
bot[1]= old[(i+1)*N];
}else if(j==(N-1)){
//right
top[lindex+1]= old[(i-1)*N];
mid[lindex+1]= old[i*N];
bot[lindex+1]= old[(i+1)*N];
top[lindex]= old[(i-1)*N +j];
mid[lindex]= old[i*N +j];
bot[lindex]= old[(i+1)*N +j];
}else{
//general case
if(lindex==1){
top[lindex-1]= old[(i-1)*N +(j-1)];
mid[lindex-1]= old[i*N +(j-1)];
bot[lindex-1]= old[(i+1)*N +(j-1)];
top[lindex]= old[(i-1)*N +j];
mid[lindex]= old[i*N +j];
bot[lindex]= old[(i+1)*N +j];
}else if(lindex==(lsize)){
top[lindex+1]= old[(i-1)*N +(j+1)];
mid[lindex+1]= old[i*N +(j+1)];
bot[lindex+1]= old[(i+1)*N +(j+1)];
top[lindex]= old[(i-1)*N +j];
mid[lindex]= old[i*N +j];
bot[lindex]= old[(i+1)*N +j];
}else{
top[lindex]= old[(i-1)*N +j];
mid[lindex]= old[i*N +j];
bot[lindex]= old[(i+1)*N +j];
}
}
lindex++;
j++;
}
//Restore values
j=index%N;
lindex=threadIdx.x*CELLS_PER_THREAD +1;
__syncthreads();
for(count=0; count<CELLS_PER_THREAD; count++){
sum= top[lindex-1] +top[lindex]+top[lindex+1]
+mid[lindex-1] +mid[lindex+1]
+bot[lindex-1] +bot[lindex] +bot[lindex+1];
switch(sum){
case 3:
newer[i*N + j] = 1;
break;
case 2:
newer[i*N + j] = old[i*N + j];
break;
default:
newer[i*N + j] = 0;
}
lindex++;
j++;
}
}
void read_from_file(int *X, char *filename, int N);
void save_table(int *X, int N);
int main(){
int *table;
int* newer;
int* old;
int *temp;
int blocks, t, N, count;
printf("Set the number of generations\n");
scanf("%d", &t);
printf("Set N (table size = NxN)\n");
scanf("%d", &N);
int size=N*N*sizeof(int);
/*
Insert table here
*/
char filename[20];
sprintf(filename, "table%dx%d.bin", N, N);
printf("Reading %dx%d table from file %s\n", N, N, filename);
table = (int *)malloc(N*N*sizeof(int));
read_from_file(table, filename, N);
printf("This is kernel c\n");
printf("The game will be played for %d generations N=%d\n", t, N);
//!!!Start Timer!!!
gettimeofday (&startwtime, NULL);
//Allocate space of new and old in device
cudaMalloc(&newer, size);
cudaMalloc(&old, size);
//copy table
cudaMemcpy(old, table, size, cudaMemcpyHostToDevice);
blocks=(N*N)/(THREADS_PER_BLOCK*CELLS_PER_THREAD);
//Play game for t generations
for(count=0;count<t;count++){
game_c<<<blocks, THREADS_PER_BLOCK>>>(newer, old, N);
cudaThreadSynchronize();
//swap pointers
temp=old;
old=newer;
newer=temp;
}
//copy back table
cudaMemcpy(table, old, size, cudaMemcpyDeviceToHost);
//!!!End Timer!!!
gettimeofday (&endwtime, NULL);
seq_time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6
+ endwtime.tv_sec - startwtime.tv_sec);
printf("Cuda clock time = %f\n", seq_time);
save_table(table, N);
cudaFree(newer);
cudaFree(old);
free(table);
return(0);
}
void read_from_file(int *X, char *filename, int N){
FILE *fp = fopen(filename, "r+");
int size = fread(X, sizeof(int), N*N, fp);
printf("elements: %d\n", size);
fclose(fp);
}
void save_table(int *X, int N){
FILE *fp;
char filename[20];
sprintf(filename, "cuda_c_table%dx%d.bin", N, N);
printf("Saving table in file %s\n", filename);
fp = fopen(filename, "w+");
fwrite(X, sizeof(int), N*N, fp);
fclose(fp);
}
|
5,142 | #include<cuda_runtime.h>
#include<device_launch_parameters.h>
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
__global__ void add(int* d_a,int* d_b,int* d_r)
{
int col = threadIdx.x;
int row = blockIdx.x;
int size = blockDim.x;
d_r[row*(size)+col] = d_a[row*(size)+col] + d_b[row*(size)+col];
}
int main(void)
{
int *a,*b,*r,m,n,i;
int *d_a,*d_b,*d_r;
printf("Enter m,n : ");
scanf("%d %d",&m,&n);
a = (int*)malloc(m*n*sizeof(int));
b = (int*)malloc(m*n*sizeof(int));
r = (int*)malloc(m*n*sizeof(int));
printf("Enter matrix 1:\n");
for(i=0;i<m*n;i++)
{
scanf("%d",&a[i]);
}
printf("Enter matrix 2:\n");
for(i=0;i<m*n;i++)
{
scanf("%d",&b[i]);
}
cudaMalloc((void **)&d_a,(m*n)*sizeof(int));
cudaMalloc((void **)&d_b,(m*n)*sizeof(int));
cudaMalloc((void **)&d_r,(m*n)*sizeof(int));
cudaMemcpy(d_a,a,(m*n)*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,(m*n)*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_r,r,(m*n)*sizeof(int),cudaMemcpyHostToDevice);
add<<<m,n>>>(d_a,d_b,d_r);
cudaError_t error = cudaGetLastError();
if(error!= cudaSuccess)
{
printf("%s\n",cudaGetErrorString(error));
}
cudaMemcpy(r,d_r,(m*n)*sizeof(int),cudaMemcpyDeviceToHost);
printf("Result matrix :\n");
for(i=0;i<m*n;i++)
{
printf("%d ",r[i]);
if((i+1)%m==0)
printf("\n");
}
} |
5,143 | #include <stdio.h>
#include <cmath>
#include <math.h>
#include <stdlib.h>
#include <unistd.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void add(int *a, int *b, int *c)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
}
void testmain(int size, int *c)
{
int *a, *b; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c;
// Alloc space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Alloc space for host copies of a, b, c and setup input values
a = (int *)malloc(size); a[0]=1;
b = (int *)malloc(size); b[0]=4;
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU
add<<<1,1>>>(d_a, d_b, d_c);
// Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
// Cleanup
free(a); free(b);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return;
}
__global__ void para_find_loc_fine (float* pts,
int ptnum,
int* scores,
float* xyz_limits) {
int d_ix = blockIdx.x * blockDim.x + threadIdx.x;
int d_iy = blockIdx.y * blockDim.y + threadIdx.y;
int d_iz = blockIdx.z * blockDim.z + threadIdx.z;
//printf("d_ix: %d d_iy: %d d_iz: %d\n", d_ix, d_iy, d_iz);
float start_x = xyz_limits[0];
float start_y = xyz_limits[2];
float start_z = xyz_limits[4];
//printf("start_x: %.0f start_y: %.0f start_z: %.0f\n", start_x, start_y, start_z);
float end_x = xyz_limits[1];
float end_y = xyz_limits[3];
float end_z = xyz_limits[5];
float cx = start_x + d_ix*10;
float cy = start_y + d_iy*10;
float cz = start_z + d_iz*10;
if (cx > end_x || cy > end_y || cz > end_z) {
//printf("cx: %.0f cy: %.0f cz: %.0f end_x: %.0f end_y: %.0f end_z: %.0f ", cx, cy, cz, end_x, end_y, end_z);
scores[d_ix*100*400+d_iy*400+d_iz] = 0;
return;
}
//printf("cx: %.0f cy: %.0f cz: %.0f end_x: %.0f end_y: %.0f end_z: %.0f \n", cx, cy, cz, end_x, end_y, end_z);
int cnt = 0;
for(int i = 0; i < ptnum; i++) {
float tx = pts[i*3];
float ty = pts[i*3+1];
float tz = pts[i*3+2];
if (tz > cz) continue;
float d2c = sqrt((tx-cx)*(tx-cx) + (ty-cy)*(ty-cy) + (tz-cz)*(tz-cz));
//printf("tx: %.0f ty: %.0f tz: %.0f d2c: %.0f\n", tx, ty, tz, d2c);
/*
if (d2c < 1000) {
printf("tx: %.0f ty: %.0f tz: %.0f d2c: %.0f\n", tx, ty, tz, d2c);
}
*/
//printf("tx: %.0f ty: %.0f tz: %.0f cx: %.0f cy: %.0f cz: %.0f \n", tx, ty, tz, cx, cy, cz);
//printf("tx: %.0f ty: %.0f tz: %.0f d2c: %.0f\n", tx, ty, tz, d2c);
/*
if (d2c >= 50 && d2c <= 54 ) {
cnt += 1;
}
*/
if (d2c >= 34 && d2c <= 37 ) {
cnt += 1;
}
}
scores[d_ix*100*400+d_iy*400+d_iz] = cnt;
}
__global__ void find_best_score (int* scores,
float* xyz_limits,
float* device_pred_xyz) {
int c_best = 0;
device_pred_xyz[0] = -10000;
device_pred_xyz[1] = -10000;
device_pred_xyz[2] = -10000;
int ixmax = int((xyz_limits[1] - xyz_limits[0])/10);
if (ixmax > 100) ixmax = 100;
int iymax = int((xyz_limits[3] - xyz_limits[2])/10);
if (iymax > 100) iymax = 100;
int izmax = int((xyz_limits[5] - xyz_limits[4])/10);
if (izmax > 400) izmax = 400;
//printf("ixmax : %d; iymax : %d; izmax : %d\n", ixmax, iymax, izmax);
for (int ix = 0; ix < ixmax; ix++) {
for (int iy = 0; iy < iymax; iy++) {
for (int iz = 0; iz < izmax; iz++) {
//c_best = c_best > scores[ix*100*400+iy*400+iz] ? c_best : scores[ix*100*400+iy*400+iz];
if (c_best < scores[ix*100*400+iy*400+iz]) {
c_best = scores[ix*100*400+iy*400+iz];
device_pred_xyz[0] = xyz_limits[0] + 10*ix;
device_pred_xyz[1] = xyz_limits[2] + 10*iy;
device_pred_xyz[2] = xyz_limits[4] + 10*iz;
//printf("Score: %d x: %.0f y: %.0f z:%.0f \n", c_best, device_pred_xyz[0], device_pred_xyz[1], device_pred_xyz[2]);
}
}
}
}
}
void find_loc_fine(float* pts, int ptnum, int* scores, float* xyz_limits, float* device_pred_xyz) {
//dim3 grid(10, 100, 1);
//dim3 block(10, 1, 400);
dim3 grid(100, 10, 8);
dim3 block(1, 10, 50);
para_find_loc_fine<<<grid, block>>>(pts, ptnum, scores, xyz_limits);
find_best_score<<<1, 1>>>(scores, xyz_limits, device_pred_xyz);
cudaDeviceSynchronize();
}
|
5,144 | __device__ unsigned int reduce_sum(unsigned int in)
{
extern __shared__ unsigned int sdata[];
// Perform first level of reduction:
// - Write to shared memory
unsigned int ltid = threadIdx.x;
sdata[ltid] = in;
__syncthreads();
// Do reduction in shared mem
for (unsigned int s = blockDim.x / 2 ; s > 0 ; s >>= 1)
{
if (ltid < s)
{
sdata[ltid] += sdata[ltid + s];
}
__syncthreads();
}
return sdata[0];
}
__device__ float reduce_sum(float in)
{
extern __shared__ float sdataF[];
// Perform first level of reduction:
// - Write to shared memory
unsigned int ltid = threadIdx.x;
sdataF[ltid] = in;
__syncthreads();
// Do reduction in shared mem
for (unsigned int s = blockDim.x / 2 ; s > 0 ; s >>= 1)
{
if (ltid < s)
{
sdataF[ltid] += sdataF[ltid + s];
}
__syncthreads();
}
return sdataF[0];
}
|
5,145 | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define N 10
__global__ void gpu_shared_mem(float *d)
{
int i, idx = threadIdx.x;
float avg, sum=0.0;
//Defining shared memory
__shared__ float sh_arr[N];
sh_arr[idx] = d[idx];
__syncthreads();
for(i=0; i<=idx; i++)
sum += sh_arr[i];
avg = sum / (idx+1.0f);
d[idx] = avg;
}
int main(void)
{
float h[10], *d;
for(int i=0; i<N; i++)
h[i] = i;
cudaMalloc(&d, sizeof(float)*N);
cudaMemcpy(d, h, sizeof(float)*N, cudaMemcpyHostToDevice);
gpu_shared_mem<<<1, N>>>(d);
cudaMemcpy(h, d, sizeof(float)*N, cudaMemcpyDeviceToHost);
printf("Averaged array: ");
for(int i=0; i<N; i++)
printf("%f ", h[i]);
printf("\n");
}
|
5,146 | #include "includes.h"
#define BIN_WIDTH 0.25
#define BLOCK_DIM 256
#define COVERAGE 180
#define LINE_LENGTH 30
#define BINS_TOTAL (COVERAGE * (int)(1 / BIN_WIDTH))
typedef struct Galaxy
{
float declination;
float declination_cos;
float declination_sin;
float right_ascension;
} Galaxy;
__device__ float arcminutes_to_radians(float arcminute_value)
{
return (M_PI * arcminute_value) / (60 * 180);
}
__global__ void adjust_galaxy_set(Galaxy *galaxy_set, int n)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
{
float declination = arcminutes_to_radians(galaxy_set[i].declination);
galaxy_set[i].declination = declination;
galaxy_set[i].declination_cos = cosf(declination);
galaxy_set[i].declination_sin = sinf(declination);
galaxy_set[i].right_ascension = arcminutes_to_radians(galaxy_set[i].right_ascension);
}
} |
5,147 | #include <cuda_runtime.h>
#include <stdio.h>
int main(int argc, char **argv) {
// define total data elements
int nElem = 1024;
// define grid and block structure
dim3 block(1024);
dim3 grid((nElem + block.x - 1) / block.x);
printf("grid.x %d block.x %d \n", grid.x, block.x);
// reset block
block.x = 512;
grid.x = (nElem + block.x - 1) / block.x;
printf("grid.x %d block.x %d \n", grid.x, block.x);
// reset block
block.x = 256;
grid.x = (nElem + block.x - 1) / block.x;
printf("grid.x %d block.x %d \n", grid.x, block.x);
// reset block
block.x = 128;
grid.x = (nElem + block.x - 1) / block.x;
printf("grid.x %d block.x %d \n", grid.x, block.x);
// reset device before you leave
cudaDeviceReset();
return(0);
} |
5,148 | // Based on the Eric's Matlab implementation of ldpcEncoder1.
#include <math.h>
#include <string.h>
#include <stdio.h>
#include <time.h>
void ldpcEncoder (unsigned int *messageBits, unsigned int* W_ROW_ROM,
unsigned int numMsgBits, unsigned int numRowsInRom,
unsigned int numParBits, unsigned int shiftRegLength,
unsigned int *codeWord) {
unsigned int parityBits[numParBits];
unsigned int cSR [numParBits];
unsigned int msgPtr = 0;
unsigned int wIndex;
unsigned int wordSize = sizeof(unsigned int);
unsigned int temp;
memset(parityBits, 0, numParBits *sizeof(parityBits[0]));
// Loop on the number of rows in the ROM.
for (int romRowIndex = 0; romRowIndex< numRowsInRom; romRowIndex++) {
wIndex = romRowIndex * numParBits;
memmove(cSR, &(W_ROW_ROM[wIndex]), numParBits * wordSize);
// Loop through shiftRegLength cyclic (barrel) shifts of the registers
for (int dummyIndex = 0; dummyIndex < shiftRegLength; dummyIndex++) {
// Multiply (AND) the concatenated contents of shift registers with
// the incoming message bit and add (XOR) them with the accumulated
// parity bits, then store the result in the parity bit registers.
if (messageBits[msgPtr] == 1) {
for (unsigned int j=0; j< numParBits; j++) {
parityBits[j] = (parityBits[j] + cSR[j]) % 2;
}
}
// Clock the cyclic (barrel) shift registers. The values at the
// BOTTOM of the register matrix need to be placed at the
// TOP of the register matrix.
// It is a bit tricky, since the linear elements of cSR,
// contatin the shift register matrix in column order.
for (unsigned int j = 0; j < numParBits; j += shiftRegLength) {
temp = cSR[j - 1 + shiftRegLength];
memmove(&(cSR[j+1]), &(cSR[j]),(shiftRegLength -1)*wordSize);
cSR[j] = temp;
}
msgPtr++;
}
}
// Because this is a systematic code, we form the codeword by
// concatenating the parity bits at the end of the message bits.
memmove(codeWord, messageBits, numMsgBits*wordSize);
memmove(&(codeWord[numMsgBits]), parityBits, numParBits*wordSize);
}
|
5,149 | #include <iostream>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/sort.h>
#define MATRIX_SIZE 1024
#define BLOCK_SIZE 16;
int main() {
// allocate
thrust::host_vector<float> host_vec(3);
thrust::device_vector<float> device_vec(3);
// initialize
host_vec[0] = 1.1;
host_vec[1] = 3.3;
host_vec[2] = 2.2;
// copy host to device
thrust::copy(host_vec.begin(), host_vec.end(), device_vec.begin());
// sort
thrust::sort(device_vec.begin(), device_vec.end());
// copy device to host
thrust::copy(device_vec.begin(), device_vec.end(), host_vec.begin());
std::cout << host_vec[0] << std::endl;
std::cout << host_vec[1] << std::endl;
std::cout << host_vec[2] << std::endl;
return 0;
}
|
5,150 | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <cuda_runtime.h>
#include <cuda.h>
#define gpuErrchk(ans){gpuAssert((ans),__FILE__,__LINE__);}
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=false){
if(code != cudaSuccess){
printf("GPUassert: %s %s %d\n", cudaGetErrorString(code),file,line);
if(abort) exit(code);
}else{
printf("cuda returned code == cudaSuccess\n");
}
}
void MatrixMulCPU(float* C, float* A,float* B, int n){
float sum = 0;
for(int i = 0;i < n; i++){
for(int j = 0;j < n; j++){
sum = 0;
for(int k = 0; k < n; k++){
sum += A[i*n + k]*B[k*n + j];
}
C[i*n+j] = sum;
}
}
}
void matgen(float* a, int n){
int i,j;
for(i = 0;i<n; i++){
for(j =0; j<n ;j++){
a[i*n + j] = (float)rand()/RAND_MAX;
}
}
}
__global__ void matrixMulCUDA(float *a, float *b, float *c, int n){
int Bx = blockIdx.x;
int Tx = threadIdx.x;
int column = Bx * blockDim.x + Tx;
int By = blockIdx.y;
int Ty = threadIdx.y;
int row = By * blockDim.y + Ty;
if(row<n && column <n){
float sum=0;
for(int i = 0; i < n; i++){
sum += a[row * n + i] * b[i*n + column];
}
c[row*n+column] = sum;
}
}
int main(){
time_t start,end;
float time_cost;
float *a,*b,*c;
float *d_a,*d_b,*d_c;
/*matrix width*/
int n = 1000;
/*block width*/
int blockwidth =10;
a = (float*)malloc(n*n*sizeof(float));
b = (float*)malloc(n*n*sizeof(float));
c = (float*)malloc(n*n*sizeof(float));
srand(0);
matgen(a,n);
matgen(b,n);
start = clock();
cudaMalloc((void**)&d_a,sizeof(float)*n*n);
cudaMalloc((void**)&d_b,sizeof(float)*n*n);
cudaMalloc((void**)&d_c,sizeof(float)*n*n);
cudaMemcpy(d_a,a,sizeof(float)*n*n,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,sizeof(float)*n*n,cudaMemcpyHostToDevice);
dim3 blockdim(blockwidth,blockwidth,1);
dim3 griddim(n/blockwidth+1,n/blockwidth+1,1);
matrixMulCUDA<<<griddim, blockdim>>>(d_a, d_b, d_c, n);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
cudaDeviceSynchronize();
cudaMemcpy(c,d_c,sizeof(float)*n*n,cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
end = clock();
time_cost = (double)(end-start)/CLOCKS_PER_SEC;
printf("the GPU processing time is %f s\n",time_cost);
start = clock();
MatrixMulCPU(c, a, b, n);
end = clock();
time_cost = (double)(end-start)/CLOCKS_PER_SEC;
printf("the CPU processing time is %f s\n",time_cost);
return 0;
}
|
5,151 | // sudo nvprof --unified-memory-profiling off ./ManagedMemoryVecAdd
// Use this command for profiling without errors for unified memory profiling
#include<iostream>
__global__ void vecAdd(int *a, int *b, int *c, int N){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < N){
c[i] = a[i] + b[i];
}
}
__global__ void squareVec(int *a, int *b, int N){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < N){
b[i] = a[i]*a[i];
}
}
int main(){
int N = 20;
size_t size = N * sizeof(int);
int *a, *b, *c;
cudaMallocManaged(&a, size); // Unified memory; ALWAYS use cudaMemPrefetchAync() with Unified memory to reduce overhead time
cudaMallocManaged(&b, size);
cudaMallocManaged(&c, size);
for(auto i = 0; i < N; i ++){
a[i] = i;
b[i] = 2*i;
}
int id = cudaGetDevice(&id); // Get the device ID
cudaMemPrefetchAsync(a, size, id); // Use the device ID to prefetch 'a' to the GPU memory
cudaMemPrefetchAsync(b, size, id);
cudaMemPrefetchAsync(c, size, id);
int NumThreadsPerBlock = 256;
int BlockSize = (N + NumThreadsPerBlock -1)/NumThreadsPerBlock;
vecAdd<<<BlockSize, NumThreadsPerBlock>>>(a, b, c, N);
cudaDeviceSynchronize(); // Sunchronize all the threads before moving forward
cudaMemPrefetchAsync(a, size, cudaCpuDeviceId); // Prefetch 'a' to the CPU memory; directly use built-in function cudaCpuDeviceId
cudaMemPrefetchAsync(b, size, cudaCpuDeviceId);
cudaMemPrefetchAsync(c, size, cudaCpuDeviceId);
std::cout << "Printing the vector" << std::endl;
for(auto i = 0; i < N; i++){
std::cout << c[i] << std::endl;
}
cudaFree(a);
cudaFree(b);
int *c_squared;
cudaMallocManaged(&c_squared, size);
int id2 = cudaGetDevice(&id);
cudaMemPrefetchAsync(c, size, id);
cudaMemPrefetchAsync(c_squared, size, id2);
squareVec<<<BlockSize, NumThreadsPerBlock>>>(c,c_squared, N);
cudaDeviceSynchronize();
cudaMemPrefetchAsync(c, size, cudaCpuDeviceId);
cudaMemPrefetchAsync(c_squared, size, cudaCpuDeviceId);
std::cout << "Printing the vector squared" << std::endl;
for(auto i = 0; i < N; i++){
std::cout << c_squared[i] << std::endl;
}
cudaFree(c_squared);
cudaFree(c);
return 0;
}
|
5,152 | #include <iostream>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand.h>
#include <random>
#define BLOCK_SIZE 500
using namespace std;
__global__ void piCalcGPU(float* d_X, float* d_Y, int* d_countInBlocks, int blocksPerGrid, int N)
{
__shared__ int shared_blocks[500];
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * blocksPerGrid;
int points_in_circle = 0;
for (int i = index; i < N; i+= stride) {
if (d_X[i]*d_X[i] + d_Y[i]*d_Y[i] <= 1.0f) {
points_in_circle++;
}
}
shared_blocks[threadIdx.x] = points_in_circle;
__syncthreads();
if (threadIdx.x == 0)
{
int pointsInCircleBlock = 0;
for (int j = 0; j < blockDim.x; j++)
{
pointsInCircleBlock += shared_blocks[j];
}
d_countInBlocks[blockIdx.x] = pointsInCircleBlock;
}
}
float piCalcCPU(int interval, float * X, float * Y) {
int points_in_circle = 0;
float dist;
for(int i = 0; i < interval; i++) {
dist = X[i]*X[i] + Y[i]*Y[i];
if (dist <= 1.0){
points_in_circle++;
}
}
return 4.0f * points_in_circle / interval;
}
float * generateSequencesRandom(int N) {
float * randArr = new float[N];
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<> dis(0.0, 1.0);
for (int n = 0; n < N; ++n) {
randArr[n] = dis(gen);
}
return randArr;
}
int main(int argc, char *argv[]) {
srand(time(NULL));
int N = atoi(argv[1]);
float * h_X = generateSequencesRandom(N);
float * h_Y = generateSequencesRandom(N);
size_t size = N * sizeof(float);
float* d_X;
float* d_Y;
cudaMalloc((void **)&d_X, size);
cudaMalloc((void **)&d_Y, size);
cudaMemcpy(d_X, h_X, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_Y, h_Y, size, cudaMemcpyHostToDevice);
int threadsPerBlock = BLOCK_SIZE;
int blocks = N / BLOCK_SIZE;
int blocksPerGrid = (N % BLOCK_SIZE > 0) ? blocks + 1 : blocks;
size_t countBlocks = blocksPerGrid * sizeof(int);
int* d_countInBlocks;
cudaMalloc((void **)&d_countInBlocks, countBlocks);
clock_t start1 = clock();
piCalcGPU<<<blocksPerGrid, threadsPerBlock>>>(d_X, d_Y, d_countInBlocks, blocksPerGrid, N);
if (cudaSuccess != cudaGetLastError())
cout << "Error!\n";
int* h_countInBlocks = new int[blocksPerGrid];
cudaMemcpy(h_countInBlocks, d_countInBlocks, countBlocks, cudaMemcpyDeviceToHost);
int N_in_circle = 0;
for (int i = 0 ; i < blocksPerGrid; i++) {
N_in_circle = N_in_circle + h_countInBlocks[i];
}
float pi_gpu = 4.0 * float(N_in_circle) / N;
clock_t stop1 = clock();
float gpu_time = (stop1-start1)/(float)CLOCKS_PER_SEC;
printf("time Pi GPU: %f s.\n", gpu_time);
printf("value Pi GPU: %f\n", pi_gpu);
clock_t start2 = clock();
float pi_cpu = piCalcCPU(N, h_X, h_Y);
clock_t stop2 = clock();
float cpu_time = (stop2-start2)/(float)CLOCKS_PER_SEC;
printf("time Pi CPU: %f s.\n", cpu_time);
printf("value Pi CPU: %f\n", pi_cpu);
printf("Acceleration: %f\n", cpu_time/gpu_time);
delete[]h_X;
delete[]h_Y;
cudaFree(d_X);
cudaFree(d_Y);
cudaFree(d_countInBlocks);
} |
5,153 | #include "includes.h"
#ifndef _KERNEL_H
#define _KERNEL_H
typedef struct Node {
int starting;
int no_of_edges;
}Node;
#endif
__global__ void bfs_kernel(Node* d_graph_nodes, int* d_edge_list, bool* d_graph_level, bool* d_graph_visited, int* d_cost, bool* loop, int no_of_nodes) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
//d_graph_level[tid] is true means the vertex in the current level
//is being visited
if (tid < no_of_nodes && d_graph_level[tid]) {
d_graph_level[tid] = false;
d_graph_visited[tid] = true;
for (int i = d_graph_nodes[tid].starting; i <
(d_graph_nodes[tid].no_of_edges +
d_graph_nodes[tid].starting); i++) {
int id = d_edge_list[i];
if (!d_graph_visited[id]) {
//calculate in which level the vertex is visited
d_cost[id] = d_cost[tid] + 1;
d_graph_level[id] = true;
//to make the loop continues
*loop = true;
}
}
}
} |
5,154 | #include "includes.h"
__global__ void markSegments( unsigned short * d_mark, unsigned int circuitGraphEdgeCount, unsigned int * d_cg_edge_start, unsigned int * d_cedgeCount, unsigned int circuitVertexSize){
unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x;
if(tid<circuitVertexSize){
d_mark[ d_cg_edge_start[tid]]=d_cedgeCount[tid];
}
} |
5,155 | #include "includes.h"
__global__ void int_to_char(int * img2, unsigned char * img)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) {
img[3*((y+j)*width + x)] = img2[(y+j)*width + x] / (256*256);
img[3*((y+j)*width + x)+1] = img2[(y+j)*width + x] / 256 % 256;
img[3*((y+j)*width + x)+2] = img2[(y+j)*width + x] % 256;
}
} |
5,156 | #include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include <cuda_runtime.h>
#include <time.h>
using namespace std;
#define eps 1e-4
// 2d grid 2d block
__global__ void matadd(const float *a, const float *b, float *c, int n, int m){
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int idx = j * n + i;
if(i < n and j < m){
c[idx] = a[idx] + b[idx];
}
}
// 1d grid, 1d block
__global__ void matadd_1d(const float *a, const float *b, float *c, int n, int m){
int i = blockDim.x * blockIdx.x + threadIdx.x;
//处理m个数据相加
if(i < n){
for(int j = 0; j < m; j++){
int idx = j * n + i;
c[idx] = a[idx] + b[idx];
}
}
}
//2d grid, 1d block
__global__ void matadd_2d(const float *a, const float *b, float *c, int n, int m){
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockIdx.y;
if(i < n and j < m){
int idx = j * n + i;
c[idx] = a[idx] + b[idx];
}
}
void check_matadd(const float *a, const float *b, const float *c, int n, int m){
for(int i = 0; i < n; i++){
for(int j = 0; j < m; j++){
int idx = i * m + j;
cout<<a[idx]<<' '<<b[idx]<<' '<<c[idx]<<endl;
if(a[idx] + b[idx] != c[idx]){
printf("Not equal !!! \n");
exit(1);
}
}
}
printf("Check matadd success !!\n");
}
// __global__ void matmul(const float *a, const float *b, float *c, int n, int m){
// int i = blockDim.x * blockIdx.x + threadIdx.x;
// int j = blockDim.y * blockIdx.y + threadIdx.y;
// int idx = i * m + j;
// if(i < n and j < m){
// for(int k = 0; k < n; k++){
// }
// }
// }
// void check_matmul(const float *a, const float *b, const float *c, int n, int m){
// for(int i = 0; i < n; i++){
// for(int j = 0; j < m; j++){
// //c[i][j] += a[i][k] * b[k][j];
// float sum = 0;
// for(int k = 0; k < n; k++){
// sum += a[i][k] * b[k][j];
// }
// if(fabs(sum - c[i][j]) > eps){
// printf("Not equal !!\n");
// exit(1);
// }
// }
// }
// printf("Check matmul success!!!\n");
// }
int main(){
int n = 1<<1;
int m = 1<<1;
int total = n * m;
size_t size = (total) * sizeof(float);
float *ha = (float*)malloc(size);
float *hb = (float*)malloc(size);
float *hc = (float*)malloc(size);
float *da = NULL, *db = NULL, *dc = NULL;
cudaMalloc((void**)&da, size);
cudaMalloc((void**)&db, size);
cudaMalloc((void**)&dc, size);
for(int i = 0; i < total; i++){
ha[i] = rand() * 1.0/ (RAND_MAX);
hb[i] = rand() * 1.0/ (RAND_MAX);
}
cudaMemcpy(da, ha, size, cudaMemcpyHostToDevice);
cudaMemcpy(db, hb, size, cudaMemcpyHostToDevice);
//int threadPerBlock = 512;
//int blockPerGrid = (total + threadPerBlock - 1) / threadPerBlock;
//clock_t st = clock();
dim3 threadPerBlock(32,16);
dim3 blockPerGrid((n+threadPerBlock.x-1)/threadPerBlock.x, (m+threadPerBlock.y-1)/threadPerBlock.y);
matadd<<<blockPerGrid, threadPerBlock>>>(da, db, dc, n, m);
// dim3 threadPerBlock(512);
// dim3 blockPerGrid((threadPerBlock.x + total - 1) / threadPerBlock.x);
// matadd_1d<<<blockPerGrid, threadPerBlock>>>(da, db, dc, n, m);
// fastest
// dim3 threadPerBlock(512, 1);
// dim3 blockPerGrid((n+threadPerBlock.x-1)/threadPerBlock.x, (m+threadPerBlock.y-1)/threadPerBlock.y);
// matadd_2d<<<blockPerGrid, threadPerBlock>>>(da, db, dc, n, m);
//clock_t ed = clock();
//cout<<"time used: "<<ed-st<<endl;
cudaDeviceSynchronize();
cudaMemcpy(hc, dc, size, cudaMemcpyDeviceToHost);
check_matadd(ha, hb, hc, n, m);
cudaFree(da);
cudaFree(db);
cudaFree(dc);
free(ha);
free(hb);
free(hc);
return 0;
} |
5,157 | template<class T>
__device__ const T& mymin(const T& a, const T& b)
{
return (b < a) ? b : a;
}
__global__ void call_min(double* first, const double* second)
{
first[threadIdx.x] = mymin(first[threadIdx.x], second[threadIdx.x]);
}
|
5,158 | #include <stdio.h>
__global__ void hello_kernel(){
int bid = blockIdx.x;
int tid = threadIdx.x;
printf("Hello from block %d, thread %d of the GPU!\n", bid, tid);
}
extern "C" void hello(){
// do stuff here
printf("Executing kernel...\n");
hello_kernel<<<2,2>>>();
cudaDeviceSynchronize();
}
|
5,159 | #include <cuda_runtime.h>
#include <curand.h>
__device__ double doBinomial(int n, double p, double *randomNumbers,curandGenerator_t s) {
int x = 0;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for(int i = tid; i < n; i++) {
if(randomNumbers[i]< p )
x++;
}
return x;
}
extern "C"
__global__ void binomial_double(int len,int n,double *ps,double *randomNumbers,double *result, curandGenerator_t s) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for(int i = tid; i < len; i += blockDim.x * gridDim.x) {
result[i] = doBinomial(n,ps[i],randomNumbers,s);
}
}
|
5,160 | #include <thrust/iterator/counting_iterator.h>
#include <thrust/reduce.h>
#include <iostream>
int main(void)
{
thrust::counting_iterator<int64_t> start(1);
int64_t sum = thrust::reduce(start,
start + 1000000000,
0,
thrust::plus<int64_t>());
std::cout << sum << std::endl;
return 0;
}
|
5,161 | #include "includes.h"
#define N 128*256
#define THREADS_PER_BLOCK 256
#define N_BLOCKS N/THREADS_PER_BLOCK
// Kernel to add N integers using threads and blocks
// Main program
__global__ void add(int *a, int *b, int *c){
int index = blockIdx.x * blockDim.x + threadIdx.x;
c[index] = a[index] + b[index];
} |
5,162 | #include "includes.h"
__global__ void SoftmaxLossBackprop(const float *label, int num_labels, int batch_size, float *diff)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= batch_size)
return;
const int label_value = static_cast<int>(label[idx]);
// For each item in the batch, decrease the result of the label's value by 1
diff[idx * num_labels + label_value] -= 1.0f;
} |
5,163 | /**
* Calculates the histogram 256 with the CPU
* @param a - Input Data (1xN)
* @param H - Output 256x1 Histogram
* @param N - Length of a
*/
void h_HG(int* a, int N, int* H)
{
/* Set the data to 0 before cumulative sum */
for(int i = 0; i < 256; i++)
{
H[i] = 0;
}
/* Accumulate the sum for each data bin */
for(int i = 0; i < N; i++)
{
/* Calculate the brightness value ( a % 256 placeholder) */
H[ a[i] % 256 ]++;
}
} |
5,164 | #include<stdio.h>
#include<stdlib.h>
__global__ void matAdd(int *matrixA, int *matrixB, int *matrixC, int matSize)
{
int threadCol = blockIdx.x * blockDim.x + threadIdx.x;
int threadRow = blockIdx.y * blockDim.y + threadIdx.y;
int indexOfMatrix = threadCol + threadRow * matSize;
if(threadCol < matSize && threadRow < matSize)
matrixC[indexOfMatrix] = matrixA[indexOfMatrix] + matrixB[indexOfMatrix];
}
void printMatrix(int *matrix, int size, char * matrixName)
{
if(size > 10)
return;
int i = 0;
printf("Printing Matrix: %s\n", matrixName);
for( ; i < size * size ; i ++)
{
if(i % size == 0)
printf("\n");
printf("%-3d ", matrix[i]);
}
printf("\n\n");
}
void checkError(cudaError_t error, char * function)
{
if(error != cudaSuccess)
{
printf("\"%s\" has a problem with error code %d and desc: %s\n", function, error, cudaGetErrorString(error));
exit(-1);
}
}
bool checkIfMatricesEqual(int * mat1, int * mat2, int matSize)
{
int i = 0;
for( ; i < matSize; i++)
if(mat1[i] != mat2[i]){
printf("values different for i: %d\n", i);
printf("mat1[i] = %d, mat2[i] = %d\n", mat1[i], mat2[i]);
return false;
}
return true;
}
void readValue(int *value, char * msg, int lowerBound, int upperBound)
{
while(true)
{
printf("%s(%d-%d): ", msg, lowerBound, upperBound);
scanf("%d", value);
if(*value <= upperBound && *value >= lowerBound)
return;
}
}
int main()
{
//Have some variables required for loop counters.
int i;
//have variables for threads per block, number of blocks.
int threadsPerBlock = 0, blocksInGrid = 0;
//create cuda event variables
cudaEvent_t hostStart, hostStop, deviceStart, deviceStop;
float timeDifferenceOnHost, timeDifferenceOnDevice;
//program variables
int matrixSize = 0;
size_t size; //variable to have the size of arrays on device
int *matA, *matB, *matC, *matCFromGPU; //matrices for host
int *gpuMatA, *gpuMatB, *gpuMatC; //matrices for Device
//initialize cuda timing variables
cudaEventCreate(&hostStart);
cudaEventCreate(&hostStop);
cudaEventCreate(&deviceStart);
cudaEventCreate(&deviceStop);
printf("Enter the size of the matrix: ");
scanf("%d", &matrixSize);
//calculate the size required on GPU
size = matrixSize * matrixSize * sizeof(int);
matA = (int *)malloc(matrixSize * sizeof(int) * matrixSize);
matB = (int *)malloc(matrixSize * sizeof(int) * matrixSize);
matC = (int *)malloc(matrixSize * sizeof(int) * matrixSize);
for(i = 0 ; i < matrixSize * matrixSize; i ++)
matA[i] = matB[i] = (i*2)%1000;
//printMatrix(matA, matrixSize, "Matrix A");
//printMatrix(matB, matrixSize, "Matrix B");
printf("Adding matrices on CPU...\n");
cudaEventRecord(hostStart, 0);
for(i = 0 ; i < matrixSize * matrixSize; i ++)
matC[i] = matA[i] + matB[i];
cudaEventRecord(hostStop, 0);
cudaEventElapsedTime(&timeDifferenceOnHost, hostStart, hostStop);
printf("Matrix addition over. Time taken on CPU: %5.5f\n", timeDifferenceOnHost);
printMatrix(matC, matrixSize, "Summation Matrix");
//allocate memory on GPU
checkError(cudaMalloc((void**)&gpuMatA, size), "Malloc for Matrix A");
checkError(cudaMalloc((void**)&gpuMatB, size), "Malloc for Matrix B");
checkError(cudaMalloc((void**)&gpuMatC, size), "Malloc for Matrix C");
//copy the matrix A and matrix B
checkError(cudaMemcpy(gpuMatA, matA, size, cudaMemcpyHostToDevice), "Matrix A Copy");
checkError(cudaMemcpy(gpuMatB, matB, size, cudaMemcpyHostToDevice), "Matrix B Copy");
bool done = false;
while(!done)
{
matCFromGPU = (int *)malloc(matrixSize * sizeof(int) * matrixSize);
//create a proper grid block using dim3
readValue(&threadsPerBlock, "Enter no. of threads per block(input of 'P' will construct PxP threads in block)", 4, 32);
readValue(&blocksInGrid, "Enter no. of blocks in grid(input of 'P' will construct PxP blocks)", (matrixSize + threadsPerBlock -1)/threadsPerBlock, 65535);
printf("Threads Per block: %d, Blocks in grid: %d\n", threadsPerBlock, blocksInGrid);
printf("Adding matrices on GPU..\n");
dim3 blocks(threadsPerBlock, threadsPerBlock);
dim3 grid(blocksInGrid, blocksInGrid); //(matrixSize + threadsPerBlock - 1/blocks.x), (matrixSize + blocks.y - 1/blocks.y));
//call the kernels to execute
cudaEventRecord(deviceStart, 0);
printf("Total linear threads: %d\n", blocksInGrid*threadsPerBlock);
matAdd<<<grid, blocks>>>(gpuMatA, gpuMatB, gpuMatC, matrixSize);
cudaEventRecord(deviceStop, 0);
cudaEventSynchronize(deviceStop);
cudaEventElapsedTime(&timeDifferenceOnDevice, deviceStart, deviceStop);
//copy the result back into host memory
checkError(cudaMemcpy(matCFromGPU, gpuMatC, size, cudaMemcpyDeviceToHost), "Matrix C Copy from device to Host");
if(checkIfMatricesEqual(matC, matCFromGPU, matrixSize))
printf("Kernels correct!\n");
else
printf("Kernel logic wrong!\n");
printf("Finished addition on GPU. Time taken: %5.5f\n", timeDifferenceOnDevice);
printf("Speedup: %5.5f\n", (float)timeDifferenceOnHost/timeDifferenceOnDevice);
printMatrix(matCFromGPU, matrixSize, "Summation Matrix from GPU");
char c = 'n';
printf("Again?(y/n): ");
while(true)
{
c = getchar();
if(c == 'y' || c == 'n')
break;
}
if(c == 'n')
break;
free(matCFromGPU);
}
free(matA);
free(matB);
free(matC);
cudaEventDestroy(deviceStart);
cudaEventDestroy(deviceStop);
cudaEventDestroy(hostStart);
cudaEventDestroy(hostStop);
return 0;
}
|
5,165 | #include "includes.h"
__global__ void kernel_diagdiv_fl(int M, float eps, float *y, float *x){
unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x;
/* make sure to use only M threads */
if (tid<M) {
if (x[tid]>eps) {
y[tid]=y[tid]/x[tid];
} else {
y[tid]=0.0f;
}
}
} |
5,166 | // Homework 8: CUDA implementation
// Mike James
// 5/3/2018
#include <cstdlib>
#include <stdio.h>
#define max 1024
#define elements 2
__global__ void dotprod(float *x, float *y, float *k, int *i) {
float sum = 0.0;
for (int m = 0; m < *i; m++) {
sum = x[m] * y[m];
__syncthreads();
*k = *k + sum;
}
}
int main(int argc, char *argv[])
{
// Host variables
unsigned int N[elements] = {100, 1024};
float x[max], y[max];
int i = 0, j = 0, intSize = 0;
float k = 0.0, size = 0.0;
// Calculate the total size of "size"
size = sizeof(float);
intSize = sizeof(int);
// GPU variables
float *d_x, *d_y, *d_k;
int *d_i;
// Adjusting the size
i = N[atoi(argv[1])]; // Setting "i" equal to N[x] based on the CLA (0 or 1)
size = i * size; // Recalculating "size" by taking the size of a float times the number of items in N[x]
intSize = i * intSize; // Recalculating "size" " " " " " " int " " " " " " "
// initialize data (x and y)
for (j = 0; j < max; j++) {
x[j] = y[j] = 1.0;
//x[j] = 1.0;
//if (j%2 == 0) {
// y[j] = 1.0;
//}
//else {
// y[j] = 2.0;
//}
}
// GPU variable allocation
cudaMalloc((void **)&d_x, size);
cudaMalloc((void **)&d_y, size);
cudaMalloc((void **)&d_k, size);
cudaMalloc((void **)&d_i, intSize);
// send partial data to GPU
cudaMemcpy(d_x, &x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, &y, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_i, &i, intSize, cudaMemcpyHostToDevice);
// Launching kernel on GPU
dotprod<<<1,i>>>(d_x, d_y, d_k, d_i);
// Collect everything back to Host
cudaMemcpy(&k, d_k, size, cudaMemcpyDeviceToHost);
// Answer
printf ("k = %f\n\n", k);
// Clean up after CUDA
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_k);
cudaFree(d_i);
return 0;
}
|
5,167 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#define N (1024 * 1024)
#define FULL_DATA_SIZE (N * 20)
__global__ void kernel(int *a, int *b, int *c)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N) {
int idx1 = (idx + 1) % 256;
int idx2 = (idx + 2) % 256;
float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f;
float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f;
c[idx] = (as + bs) / 2;
}
}
__global__ void gSumVector(int *a, int *b, int *c)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N)
c[idx] = a[idx] * b[idx];
}
__global__ void gScalarMultVect(int *a, int *b, int *c)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N)
c[idx] = a[idx] * b[idx];
}
int main()
{
cudaDeviceProp prop;
int whichDevice;
cudaGetDevice(&whichDevice);
cudaGetDeviceProperties(&prop, whichDevice);
if (!prop.deviceOverlap) {
printf("Device does not support overlapping\n");
return 0;
}
float time1, time2, time3, time4;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int *host_a, *host_b, *host_c;
cudaHostAlloc((void**) &host_a, FULL_DATA_SIZE * sizeof(int),
cudaHostAllocDefault);
cudaHostAlloc((void**) &host_b, FULL_DATA_SIZE * sizeof(int),
cudaHostAllocDefault);
cudaHostAlloc((void**) &host_c, FULL_DATA_SIZE * sizeof(int),
cudaHostAllocDefault);
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void**) &dev_a, N * sizeof(int));
cudaMalloc((void**) &dev_b, N * sizeof(int));
cudaMalloc((void**) &dev_c, N * sizeof(int));
int *dev_a0, *dev_a1, *dev_b0, *dev_b1, *dev_c0, *dev_c1;
cudaMalloc((void**) &dev_a0, N * sizeof(int));
cudaMalloc((void**) &dev_a1, N * sizeof(int));
cudaMalloc((void**) &dev_b0, N * sizeof(int));
cudaMalloc((void**) &dev_b1, N * sizeof(int));
cudaMalloc((void**) &dev_c0, N * sizeof(int));
cudaMalloc((void**) &dev_c1, N * sizeof(int));
cudaStream_t stream, stream0, stream1;
cudaStreamCreate(&stream);
cudaStreamCreate(&stream0);
cudaStreamCreate(&stream1);
cudaEventRecord(start, 0);
for (int i = 0; i < FULL_DATA_SIZE; i += N) {
cudaMemcpyAsync(dev_a, host_a + i, N * sizeof(int), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(dev_b, host_b + i, N * sizeof(int), cudaMemcpyHostToDevice, stream);
kernel<<<N / 256, 256, 0, stream>>>(dev_a, dev_b, dev_c);
cudaMemcpyAsync(host_c + i, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost, stream);
}
cudaStreamSynchronize(stream);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time1, start, stop);
cudaEventRecord(start, 0);
for (int i = 0; i < FULL_DATA_SIZE; i += N * 2) {
cudaMemcpyAsync(dev_a0, host_a + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0);
cudaMemcpyAsync(dev_b0, host_b + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0);
kernel<<<N / 256, 256, 0, stream0>>>(dev_a0, dev_b0, dev_c0);
cudaMemcpyAsync(host_c + i, dev_c0, N * sizeof(int), cudaMemcpyDeviceToHost, stream0);
cudaMemcpyAsync(dev_a1, host_a + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream1);
cudaMemcpyAsync(dev_b1, host_b + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream1);
kernel<<<N / 256, 256, 0, stream1>>>(dev_a1, dev_b1, dev_c1);
cudaMemcpyAsync(host_c + i + N, dev_c1, N * sizeof(int), cudaMemcpyDeviceToHost, stream1);
}
cudaStreamSynchronize(stream0);
cudaStreamSynchronize(stream1);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time2, start, stop);
cudaEventRecord(start,0);
for (int i = 0; i < FULL_DATA_SIZE; i += N * 2) {
cudaMemcpyAsync(dev_a0, host_a + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0);
cudaMemcpyAsync(dev_a1, host_a + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream1);
cudaMemcpyAsync(dev_b0, host_b + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0);
cudaMemcpyAsync(dev_b1, host_b + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream1);
kernel<<<N / 256, 256, 0, stream0>>>(dev_a0, dev_b0, dev_c0);
kernel<<<N / 256, 256, 0, stream1>>>(dev_a1, dev_b1, dev_c1);
cudaMemcpyAsync(host_c + i, dev_c0, N * sizeof(int), cudaMemcpyDeviceToHost, stream0);
cudaMemcpyAsync(host_c + i + N, dev_c1, N * sizeof(int), cudaMemcpyDeviceToHost, stream1);
}
cudaStreamSynchronize(stream0);
cudaStreamSynchronize(stream1);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time3, start, stop);
int *host_a1, *host_b1, *host_c1;
host_a1 = (int*) calloc(FULL_DATA_SIZE, sizeof(int));
host_b1 = (int*) calloc(FULL_DATA_SIZE, sizeof(int));
host_c1 = (int*) calloc(FULL_DATA_SIZE, sizeof(int));
cudaEventRecord(start, 0);
for (int i = 0; i < FULL_DATA_SIZE; i += N) {
cudaMemcpy(dev_a, host_a1 + i, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, host_b1 + i, N * sizeof(int), cudaMemcpyHostToDevice);
kernel<<<N / 256, 256>>>(dev_a, dev_b, dev_c);
cudaMemcpy(host_c1 + i, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time4, start, stop);
printf("%.4f ms\n", time1);
printf("%.4f ms\n", time2);
printf("%.4f ms\n", time3);
printf("%.4f ms\n", time4);
free(host_a1);
free(host_b1);
free(host_c1);
cudaStreamDestroy(stream);
cudaStreamDestroy(stream0);
cudaStreamDestroy(stream1);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaFree(dev_a0);
cudaFree(dev_a1);
cudaFree(dev_b0);
cudaFree(dev_b1);
cudaFree(dev_c0);
cudaFree(dev_c1);
cudaFreeHost(host_a);
cudaFreeHost(host_b);
cudaFreeHost(host_c);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
5,168 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define THREADS_PER_BLOCK 512
__global__ void dot(int *a, int *b, int *c) {
__shared__ int temp[THREADS_PER_BLOCK];
int index = threadIdx.x+blockIdx.x*blockDim.x;
temp [threadIdx.x]=a[index]*b[index];
__syncthreads();
if(0==threadIdx.x){
int sum = 0 ;
for(int i =0; i<THREADS_PER_BLOCK;i++)
sum+=temp[i];
atomicAdd(c,sum);
}
}
int main(void) {
int i, N = 2097152;
int *a, *b ,*c;
int *da, *db, *dc;
int size = N*sizeof(int);
// On alloue la memoire CPU et GPU
a=(int *)malloc(size);
b=(int *)malloc(size);
c=(int *)malloc(sizeof(int));
cudaMalloc((void**)&da,size);
cudaMalloc((void**)&db,size);
cudaMalloc((void**)&dc,sizeof(int));
// On donne des donnees aleatoire
for (i = 0; i < N; i++) {
a[i] = rand();
b[i] = rand();
}
// On copie les donnees du CPU vers le GPU
cudaMemcpy(da, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(db, b, N * sizeof(int), cudaMemcpyHostToDevice);
// On fait le produit scalaire
dot<<<N/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(da,db,dc);
// On copie le resultat du GPU sur le CPU
cudaMemcpy(c, dc, sizeof(int), cudaMemcpyDeviceToHost);
printf("sum=%d \n", *c);
// On libere la memoire
free(a);
free(b);
free(c);
cudaFree(da);
cudaFree(db);
cudaFree(dc);
return EXIT_SUCCESS;
} |
5,169 | /*
Integrantes: Juan Retamales
*/
//#include <pmmintrin.h>
/*C library to perform Input/Output operations*/
#include <stdio.h>
/*C library Añade funciones para convertir texto a otro formato*/
#include <stdlib.h>
#include <ctype.h>
#include <fcntl.h>
/*Libreria C para trabajar y comparar texto (de la linea de comando)*/
#include <string.h>
/* Librerias para open y write*/
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <math.h>
//incluyendo openMP
//#ifdef _OPENMP
//#include <omp.h>
//#endif
#include <time.h>
/* NOTAS
Para compilar: nvcc wave.cu -o wave.o
Para compilar2: nvcc wave.cu -o wave -lm -arch=sm_52
Para ejecutar: ./wave.o -N 256 -X 256 -Y 256 -T 100 -f salidaGrilla.raw -t 100
para probar: time ./wave.o -N 256 -X 128 -Y 128 -T 100 -f salidaGrilla.raw -t 26 > test.log
para enviar al servidor: scp code.cu jretamales@bioserver.diinf.usach.cl:/alumnos/jretamales/lab2
*/
__global__ void next(float *c_gt, float *c_gt1, float *c_gt2, int size, int t, int gridDimX){
//extern __shared__ float c_gt[size*size];
//__syncthreads();
int blockdX = blockDim.x;
int blockX = blockIdx.x;
int threadX = threadIdx.x;
int threadY = threadIdx.y;
int blockdY = blockDim.y;
int blockY = blockIdx.y;
//int ix = blockX * blockD + threadX;
//if(i < values)
// c[i] = a[i] + b[i];
int i = threadX + blockdX * blockX;// posicion de la hebra + (dimencion bloque * posicion del bloque en la grilla)
int j= threadY + blockdY * blockY;
/*int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
//int k = (threadX + blockdX * threadX) + (gridDimX * blockdX * (threadY + blockdY * threadY));//obtengo id X*Y del hilo
int j = threadId % size;
int i = threadId / size;*/
//extern __shared__ float c_gt;
//__syncthreads();
//printf("\nHello Im thread in global [i,j]=[%d,%d] ", i, j);
//get thread global id of 2dGrid and 2D block
//int blockId = blockIdx.x + blockIdx.y * gridDim.x;
//int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
//printf("\nHello Im thread, in X is %d in block %d of %d threads, in Y is %d in block %d of %d threads, and position global [i,j]=[%d,%d] ", blockDim.x, blockIdx.x, threadIdx.x, blockDim.y, blockIdx.y, threadIdx.y, i, j);
//printf("\nHello Im thread %d in block %d", blockId, threadId);
float dt=0.1;
float dd=2.0;
float c=1.0;
//para tiempo t==0
if(t==0)
{
//verificacion para condicion inicial
if((0.4*size)<i && (0.4*size)<j && i<(0.6*size) && j<(0.6*size))
{
c_gt[size*i+j]=20;
}
else
{
c_gt[size*i+j]=0;
}
}//fin if t==0
else
{
if(t==1)
{
if(i!=0 && j!=0 && i!=(size-1) && j!=(size-1))//verificando condion de borde
{
//ecuacion de Schroedinger para t=1
c_gt[size*i+j] = c_gt1[size*i+j]+(pow(c,2)/2)*(pow((dt/dd),2))*(c_gt1[size*(i+1)+j]+c_gt1[size*(i-1)+j]+c_gt1[size*(i)+(j-1)]+c_gt1[size*(i)+(j+1)]-4*c_gt1[size*i+j]);
}
else
{
c_gt[size*i+j] = 0;
}
}//fin if t==1
else
{// si t es mayor a 1
if(i!=0 && j!=0 && i!=(size-1) && j!=(size-1))//verificando condion de borde
{
//ecuacion de Schroedinger para t>1
c_gt[size*i+j] = 2*c_gt1[size*i+j]-c_gt2[size*i+j]+(pow(c,2))*(pow((dt/dd),2))*(c_gt1[size*(i+1)+j]+c_gt1[size*(i-1)+j]+c_gt1[size*(i)+(j-1)]+c_gt1[size*(i)+(j+1)]-4*c_gt1[size*i+j]);
}
else
{
c_gt[size*i+j] = 0;
}
}//fin if t==1 else
}//fin if t==0 else
//printf("\nHello Im thread, in X is %d in block %d of %d threads, in Y is %d in block %d of %d threads, and position global [i,j]=[%d,%d] Value=[%d] \n", blockDim.x, blockIdx.x, threadIdx.x, blockDim.y, blockIdx.y, threadIdx.y, i, j, &c_gt[size*i+j]);
//printf(" Value=[%d] \n", &c_gt[size*i+j]);*/
}
__global__ void next2(float *c_gt, float *c_gt1, float *c_gt2, int size, int t){
int blockD = blockDim.x;
int blockX = blockIdx.x;
int threadX = threadIdx.x;
//int ix = blockX * blockD + threadX;
//if(i < values)
// c[i] = a[i] + b[i];
printf("Hello Im thread %d in block %d of %d threads\n", threadX, blockX, blockD);
float dt=0.1;
float dd=2.0;
float c=1.0;
for(int i = 0; i<size; i++)
{
for(int j = 0; j<size; j++)
{
//para tiempo t==0
if(t==0)
{
//verificacion para condicion inicial
if((0.4*size)<i && (0.4*size)<j && i<(0.6*size) && j<(0.6*size))
{
c_gt[size*i+j]=20;
}
else
{
c_gt[size*i+j]=0;
}
}//fin if t==0
else
{
if(t==1)
{
if(i!=0 && j!=0 && i!=(size-1) && j!=(size-1))//verificando condion de borde
{
//ecuacion de Schroedinger para t=1
c_gt[size*i+j] = c_gt1[size*i+j]+(pow(c,2)/2)*(pow((dt/dd),2))*(c_gt1[size*(i+1)+j]+c_gt1[size*(i-1)+j]+c_gt1[size*(i)+(j-1)]+c_gt1[size*(i)+(j+1)]-4*c_gt1[size*i+j]);
}
else
{
c_gt[size*i+j] = 0;
}
}//fin if t==1
else
{// si t es mayor a 1
if(i!=0 && j!=0 && i!=(size-1) && j!=(size-1))//verificando condion de borde
{
//ecuacion de Schroedinger para t>1
c_gt[size*i+j] = 2*c_gt1[size*i+j]-c_gt2[size*i+j]+(pow(c,2))*(pow((dt/dd),2))*(c_gt1[size*(i+1)+j]+c_gt1[size*(i-1)+j]+c_gt1[size*(i)+(j-1)]+c_gt1[size*(i)+(j+1)]-4*c_gt1[size*i+j]);
}
else
{
c_gt[size*i+j] = 0;
}
}//fin if t==1 else
}//fin if t==0 else
}//fin for j
}//fin for i
}
__global__ void copyT1T(float *c_gt, float *c_gt1, int size){
//extern __shared__ float c_gt1;
int j = threadIdx.x + blockDim.x * blockIdx.x;
int i = threadIdx.y + blockDim.y * blockIdx.y;
c_gt1[size*i+j]=c_gt[size*i+j];
}
__global__ void copyT2T1(float *c_gt1, float *c_gt2, int size){
//extern __shared__ float c_gt2;
int j = threadIdx.x + blockDim.x * blockIdx.x;
int i = threadIdx.y + blockDim.y * blockIdx.y;
c_gt2[size*i+j]=c_gt1[size*i+j];
}
__global__ void copyT1Tx(float *c_gt, float *c_gt1, int size){
printf("\nCopianto T a T1");
for(int i=0;i<size;i++)
{
for(int j=0;j<size;j++)
{
c_gt1[size*i+j]=c_gt[size*i+j];
}
}
}
__global__ void copyT2T1x(float *c_gt1, float *c_gt2, int size){
printf("\nCopianto T1 a T2");
for(int i=0;i<size;i++)
{
for(int j=0;j<size;j++)
{
c_gt2[size*i+j]=c_gt1[size*i+j];
}
}
}
/*
* Function principal encargada de recibir y gestionar los datos recibidos
*/
int main(int argc, char *argv[])
{
/*Variables int guardan el archivo de salida */
int outputF;
/*Variables int guardan el archivo de entrada y salida respectivamente*/
int tamanoGrilla = 0;
int num_pasos = 0;
int iteracionSalida = 0;
int tamanoBlockX = 0;
int tamanoBlockY = 0;
int t, j, i;
//creo las variables para ver el tiempo transcurrido
clock_t start_t, end_t, total_t;
start_t = clock();
/*De tener menos de 5 elementos por parametros se cancela ya que es insuficiente para iniciar*/
if (argc<4)
{
perror("se esperaban mas parametros...\n");
return 0;
}
/*Se crea un loop para revisar los parametros recibidos por consola, como argc[0] es el nombre del ejecutable, se inicia en 1 para revisar del primer parametro*/
for(int i=1; i<argc;i++)
{
if(strcmp(argv[i],"-N")==0)
{
/*Se verifica que el argumento posterior a -N sea un numero*/
tamanoGrilla=atoi(argv[i+1]);
}
if(strcmp(argv[i],"-X")==0)
{
/*Se verifica que el argumento posterior a -X sea un numero*/
tamanoBlockX=atoi(argv[i+1]);
}
if(strcmp(argv[i],"-Y")==0)
{
/*Se verifica que el argumento posterior a -Y sea un numero*/
tamanoBlockY=atoi(argv[i+1]);
}
if(strcmp(argv[i],"-T")==0)
{
/*Se verifica que el argumento posterior a -T sea un numero*/
num_pasos=atoi(argv[i+1]);
}
if(strcmp(argv[i],"-f")==0 )
{
/*Se verifica que el argumento posterior abriendo o creando el archivo*/
outputF=open(argv[i+1], O_CREAT | O_WRONLY, 0600);
if(outputF == -1)
{
perror("\nFailed to create an open the file.");
//EXIT_FAILURE;
exit(1);
}
}
if(strcmp(argv[i],"-t")==0)
{
/*Se verifica que el argumento posterior a -t sea un numero*/
iteracionSalida=atoi(argv[i+1]);
}
}/*Fin loop*/
/*Se comprueba si llegaron todos los parametros obligatorios*/
if(outputF != -1 && tamanoGrilla>0 && iteracionSalida>0)
{
// declaracion de eventos
cudaEvent_t start;
cudaEvent_t stop;
// creacion de eventos
cudaEventCreate(&start);
cudaEventCreate(&stop);
// marca de inicio
cudaEventRecord(start,0);
//dim3 numBlocks (tamanoBlockX, tamanoBlockY);//asigno el blocksize
dim3 numBlocks;
numBlocks.x = tamanoBlockX;
numBlocks.y = tamanoBlockY;
//dim3 blocksize (tamanoGrilla / tamanoBlockX, tamanoGrilla / tamanoBlockY);
dim3 blocksize;
blocksize.x = tamanoGrilla / tamanoBlockX;
blocksize.y = tamanoGrilla / tamanoBlockY;
//printf("\n numblocks [%d, %d] and blocksize [%d, %d]", tamanoBlockX, tamanoBlockY, tamanoGrilla / tamanoBlockX, tamanoGrilla / tamanoBlockY );
//float grillaT2[tamanoGrilla][tamanoGrilla];//grilla en tiempo (t-2)
//float grillaT1[tamanoGrilla][tamanoGrilla];//Grilla en tiempo (t-1)
//float grilla[tamanoGrilla][tamanoGrilla]; //grilla en tiempo (t) actual
float *grillaT2 = (float*)malloc(tamanoGrilla*tamanoGrilla*sizeof(float));//grilla en tiempo (t-2)
float *grillaT1 = (float*)malloc(tamanoGrilla*tamanoGrilla*sizeof(float));//Grilla en tiempo (t-1)
float *grilla = (float*)malloc(tamanoGrilla*tamanoGrilla*sizeof(float));//grilla en tiempo (t) actual
float *c_gt2, *c_gt1, *c_gt;//todas las grilla cuda se guardan aqui
cudaMalloc((void**) &c_gt2, tamanoGrilla*tamanoGrilla*sizeof(float));//grilla cuda en tiempo (t-2)
cudaMalloc((void**) &c_gt1, tamanoGrilla*tamanoGrilla*sizeof(float));//Grilla cuda en tiempo (t-1)
cudaMalloc((void**) &c_gt, tamanoGrilla*tamanoGrilla*sizeof(float));//grilla cuda en tiempo (t) actual
//copiando arreglos desde el host al device
//cudaMemcpy(c_gt2, grillaT2, tamanoGrilla*tamanoGrilla*sizeof(float), cudaMemcpyHostToDevice);
//cudaMemcpy(c_gt1, grillaT1, tamanoGrilla*tamanoGrilla*sizeof(float), cudaMemcpyHostToDevice);
//cudaMemcpy(c_gt, grilla, tamanoGrilla*tamanoGrilla*sizeof(float), cudaMemcpyHostToDevice);
//test destino, origen, tamaño, type transfer
cudaMemcpy(grillaT2, c_gt2, tamanoGrilla*tamanoGrilla*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(grillaT1, c_gt1, tamanoGrilla*tamanoGrilla*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(grilla, c_gt, tamanoGrilla*tamanoGrilla*sizeof(float), cudaMemcpyHostToDevice);
for( t=0;t<num_pasos;t++)
{
//printf("\n usando t=%d \n", t);
//al final de la iteracion la grillaT1 de tiempo (t-1) pasa a ser grillaT2 que corresponde a grilla en tiempo (t-2)
//asigno num_hebras como numero de hebras para el siguiente bloque, y asigno cuales variables son compartidas y privadas.
next<<<numBlocks,blocksize>>>(c_gt, c_gt1, c_gt2, tamanoGrilla, t, ((int) (tamanoGrilla / tamanoBlockY)));
cudaDeviceSynchronize();
//copiando arreglos desde el device al host
//cudaMemcpy(c_gt, grilla, tamanoGrilla*tamanoGrilla*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(grilla, c_gt, tamanoGrilla*tamanoGrilla*sizeof(float), cudaMemcpyDeviceToHost);
//si iteracion de salida es igual al al tiempo (t) la recorro sin paralelismo e imprimo
if(t==(iteracionSalida-1))
{
for( i=0;i<tamanoGrilla;i++)
{
for( j=0;j<tamanoGrilla;j++)
{
//printf("\n intentando guardar %f", grilla[tamanoGrilla*i+j]);
printf("%f,", grilla[tamanoGrilla*i+j]);
write(outputF, &grilla[tamanoGrilla*i+j] , sizeof(float));
}
printf("\n");
}
}
//al final de la iteracion la grillaT1 de tiempo (t-1) pasa a ser grillaT2 que corresponde a grilla en tiempo (t-2)
//asigno num_hebras como numero de hebras para el siguiente bloque, y asigno cuales variables son compartidas y privadas.
copyT2T1<<<numBlocks,blocksize, tamanoGrilla*tamanoGrilla*sizeof(float)>>>(c_gt1, c_gt2, tamanoGrilla);
cudaDeviceSynchronize();//sincronizo los datos
//cudaMemcpy(c_gt2, c_gt1, tamanoGrilla*tamanoGrilla*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(c_gt1, c_gt2, tamanoGrilla*tamanoGrilla*sizeof(float), cudaMemcpyDeviceToHost);
//al final de la iteracion la grilla de tiempo (t) pasa a ser grillaT1 que corresponde a grilla en tiempo (t-1)
//asigno num_hebras como numero de hebras para el siguiente bloque, y asigno cuales variables son compartidas y privadas.
copyT1T<<<numBlocks,blocksize>>>(c_gt, c_gt1, tamanoGrilla);
cudaDeviceSynchronize();//sincronizo los datos
//copiando arreglos desde el device al host
//cudaMemcpy(c_gt, grilla, tamanoGrilla*tamanoGrilla*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(grilla, c_gt, tamanoGrilla*tamanoGrilla*sizeof(float), cudaMemcpyDeviceToHost);
}//fin for t
cudaFree(c_gt2);
cudaFree(c_gt1);
cudaFree(c_gt);
// marca de final
cudaEventRecord(stop,0);
// sincronizacion GPU-CPU
cudaEventSynchronize(stop);
// calculo del tiempo en milisegundos
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,start,stop);
// impresion de resultados
//printf("> Tiempo de ejecucion: %f ms\n",elapsedTime);
// liberacion de recursos
cudaEventDestroy(start);
cudaEventDestroy(stop);
close (outputF);
//descomentar si se desea ver el tiempo empleado
//printf("Tiempo usado con Tamano[%d] num_Pasos[%d] Salida[%d] = %f sec.\n", tamanoGrilla, num_pasos, iteracionSalida, end-start);
end_t = clock();
total_t = (double)(end_t - start_t) / CLOCKS_PER_SEC;
printf("Total time taken by CPU: %f\n", (double)total_t );
return 0;
}//fin if principal
}//fin main
|
5,170 | #include "includes.h"
__global__ void tridiag_x_matrix_k(float p_d, float p_m, float p_u, float* u, int n)
{
// Identifies the thread working within a group
int tidx = threadIdx.x % n;
// Identifies the data concerned by the computations
int Qt = (threadIdx.x - tidx) / n;
extern __shared__ float sAds[];
float* su = (float*)&sAds[Qt * n];
su[threadIdx.x] = u[blockIdx.x * blockDim.x + threadIdx.x];
__syncthreads();
float temp;
if (tidx > 0 && tidx < n - 1)
temp = p_d * su[tidx - 1] + p_m * su[tidx] + p_u * su[tidx + 1];
else if (tidx == 0)
temp = p_m * su[tidx] + p_u * su[tidx + 1];
else
temp = p_d * su[tidx - 1] + p_m * su[tidx];
u[blockIdx.x * blockDim.x + threadIdx.x] = temp;
} |
5,171 | extern "C"
__global__ void setRangePoolKernel(
int nBatch,int rbs,int nDegree,int nD,int rScale,
float *R, // array of range
// arrays pointer
float *RA,
float *BA,
float *EA,
// pointer of array of pointer to pointer of array in arrays, nevermind i just stun you.
// p(i) = data(i + size(data))
float **RP,
float **BP,
float **EP
)
{
int taskIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (taskIdx < nBatch)
{
// initialize domain arrays
int nCoeff = ((nDegree - 1) * nD + 1);
// pointing section
RP[taskIdx] = &RA[taskIdx * rbs * rScale];
BP[taskIdx] = &BA[taskIdx * nCoeff];
EP[taskIdx] = &EA[taskIdx * rbs];
// initialize range and error arrays
int raOffset = (taskIdx * rbs * rScale);
for(int j = 0; j < rbs * rScale; j++){
RA[raOffset + j] = R[j / rScale];
EA[raOffset + j] = R[j / rScale];
}
}
}
|
5,172 | //Includes for IntelliSense
#define _SIZE_T_DEFINED
#include <cuda.h>
#include <curand_kernel.h>
#include <device_launch_parameters.h>
#include "float.h"
#include <math.h>
#include <stdarg.h>
#include <stdio.h>
#define PI acos(-1.0)
extern "C"{
// Write coefficients back into the matrix, ready for fitness evaluation/ Inverse DCT
__global__ void implantCoeffs(float* matrices, float *coeffArray, int savedCoeffs, int dimsize){
int id = blockIdx.x * blockDim.x * blockDim.y * blockDim.z
+ threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
int offsetMatrix = id * dimsize * dimsize,
offsetCoeff = id * savedCoeffs,
coeffsLeft = savedCoeffs,
x, y, y_n = 0, x_n = 1,
numberinrow, tmp;
matrices[offsetMatrix] = coeffArray[offsetCoeff + (savedCoeffs - coeffsLeft)];
coeffsLeft -= 1;
while (coeffsLeft > 0){
// Work out number in row
x = x_n;
y = y_n;
if (x_n < dimsize - 1){
numberinrow = x_n + 1;
}
else{
numberinrow = x_n - (y_n - 1);
}
if (numberinrow % 2 == 0){
// Even
while (numberinrow > 0 && coeffsLeft > 0){
matrices[offsetMatrix + x + y * dimsize] = coeffArray[offsetCoeff + (savedCoeffs - coeffsLeft)];
numberinrow--;
coeffsLeft--;
if ((numberinrow + 1) % 2 == 0){
// Swap x and y
tmp = x;
x = y;
y = tmp;
}
else{
// Swap x and y
tmp = x;
x = y;
y = tmp;
x--;
y++;
}
}
}
else{
// Odd
while (numberinrow > 1 && coeffsLeft > 0){
matrices[offsetMatrix + x + y * dimsize] = coeffArray[offsetCoeff + (savedCoeffs - coeffsLeft)];
numberinrow--;
coeffsLeft--;
if ((numberinrow + 1) % 2 == 1){
// Swap x and y
tmp = x;
x = y;
y = tmp;
}
else{
// Swap x and y
tmp = x;
x = y;
y = tmp;
x--;
y++;
}
}
if (coeffsLeft > 0){
// add the odd one
matrices[offsetMatrix + x + y * dimsize] = coeffArray[offsetCoeff + (savedCoeffs - coeffsLeft)];
numberinrow--;
coeffsLeft--;
}
}
if (x_n == dimsize - 1){
y_n++;
}
else{
x_n++;
}
}
}
// Creates a square cosine matrix and its inverse
__global__ void createCosineMatrix(float* matrix, int xsize){
int threadGlobalID = blockIdx.x * blockDim.x * blockDim.y * blockDim.z
+ threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
int i;
for (i = 0; i < xsize; i++){
if (threadGlobalID == 0)
matrix[threadGlobalID + i * xsize] = 1 / sqrt((float)xsize);
else
matrix[threadGlobalID + i * xsize] = (sqrt((float)2 / xsize) * cos((PI * (2 * i + 1) * threadGlobalID) / (2 * xsize)));
}
}
// This is obscenely complex for something so seemingly simple
// Each thread, extracts savedCoeffs from a matrix, assumes square martix
__global__ void extractCoeffs(const float *matrices, float *coeffArray, int savedCoeffs, int dimsize){
int threadGlobalID = blockIdx.x * blockDim.x * blockDim.y * blockDim.z
+ threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
int offsetMatrix = threadGlobalID * dimsize * dimsize,
offsetCoeff = threadGlobalID * savedCoeffs,
coeffsLeft = savedCoeffs,
x, y, y_n = 0, x_n = 1,
numberinrow, tmp;
coeffArray[offsetCoeff + (savedCoeffs - coeffsLeft)] = matrices[offsetMatrix];
coeffsLeft -= 1;
while (coeffsLeft > 0){
// Work out number in row
x = x_n;
y = y_n;
if (x_n < dimsize - 1)
numberinrow = x_n + 1;
else
numberinrow = x_n - (y_n - 1);
if (numberinrow % 2 == 0){
// Even
while (numberinrow > 0 && coeffsLeft > 0){
coeffArray[offsetCoeff + (savedCoeffs - coeffsLeft)] = matrices[offsetMatrix + x + y * dimsize];
numberinrow--;
coeffsLeft--;
if ((numberinrow + 1) % 2 == 0){
// Swap x and y
tmp = x;
x = y;
y = tmp;
}
else{
// Swap x and y
tmp = x;
x = y;
y = tmp;
x--;
y++;
}
}
}
else{
// Odd
while (numberinrow > 1 && coeffsLeft > 0){
coeffArray[offsetCoeff + (savedCoeffs - coeffsLeft)] = matrices[offsetMatrix + x + y * dimsize];
numberinrow--;
coeffsLeft--;
if ((numberinrow + 1) % 2 == 1){
// Swap x and y
tmp = x;
x = y;
y = tmp;
}
else{
// Swap x and y
tmp = x;
x = y;
y = tmp;
x--;
y++;
}
}
if (coeffsLeft > 0){
// add the odd one
coeffArray[offsetCoeff + (savedCoeffs - coeffsLeft)] = matrices[offsetMatrix + x + y * dimsize];
numberinrow--;
coeffsLeft--;
}
}
if (x_n == dimsize - 1){
y_n++;
}
else{
x_n++;
}
}
}
// Generates chromSize random numbers between alpha and -alpha and stores them in the chromosomes array
__global__ void generateCoefficients(float *chromosomes, const int chromSize, const float* noise, const int population, const int alpha){
int i;
// For up to a 1D grid of 3D blocks...
int threadGlobalID = blockIdx.x * blockDim.x * blockDim.y * blockDim.z
+ threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
curandState st;
curand_init((int)noise[threadGlobalID] << threadGlobalID, threadGlobalID * (threadGlobalID == population - 1 ? noise[0] : noise[threadGlobalID]), 0, &st);
if (threadGlobalID > 0){
for (i = 0; i < chromSize; i++){
if (curand_uniform(&st) < 0.5){
chromosomes[chromSize*threadGlobalID + i] = curand_uniform(&st) *alpha;
}
else{
chromosomes[chromSize*threadGlobalID + i] = -1 * curand_uniform(&st) * alpha;
}
}
}
}
// Performs the CoSyNE genetic algorithm.
// -- Replace all non-survivors with crossover from two random parents
// -- Randomly mutate the new population members
// -- Permute the genes of the chromosome population
__global__ void grow(float *matrices, const int dimension, const int coefficients, const int population, float *chromosomes, const float * noise, const float mutationRate, const int kept, const float* fitnesses, int *mark, const int alpha){
int i, wloc;
curandState st;
// For up to a 1D grid of 3D blocks...
int threadGlobalID = blockIdx.x * blockDim.x * blockDim.y * blockDim.z
+ threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
int chromOffset = threadGlobalID * coefficients;
int parent1, parent2, point;
float tmp1, tmp2;
// Init the random number generator
curand_init((int)noise[threadGlobalID] << threadGlobalID, threadGlobalID * (threadGlobalID == population - 1 ? noise[0] : noise[threadGlobalID]), 0, &st);
// Repopulate
// The threads with the keepmask are kept, all others are replaced with crossovers
if (threadGlobalID > kept - 1){
// pick two parents -- 0 is not included in the random distribution
parent1 = floor(curand_uniform(&st) * kept);
parent2 = floor(curand_uniform(&st) * kept);
//pick a point on the chromosome
point = floor(curand_uniform(&st) * coefficients);
for (i = 0; i < point; i++){
chromosomes[chromOffset + i] = chromosomes[parent1 * coefficients + i];
}
//Copy past the point for parent 2
for (i = point; i < coefficients; i++){
chromosomes[chromOffset + i] = chromosomes[parent2 * coefficients + i];
}
}
// Mutate children
if (threadGlobalID > kept - 1){
for (i = 0; i < coefficients; i++){
if (curand_uniform(&st) <= mutationRate){
if (curand_uniform(&st) < 0.5){
chromosomes[chromOffset + i] = curand_uniform(&st) * -1 * alpha;
}
else{
chromosomes[chromOffset + i] = curand_uniform(&st) * alpha;
}
}
}
}
// Permute
if (threadGlobalID < coefficients){
// Mark genes for permutation
for (i = 0; i < population; i++){
if (curand_uniform(&st) < (1 - sqrt((fitnesses[i] - fitnesses[population - 1]) / (fitnesses[0] - fitnesses[population - 1])))){
mark[coefficients * i + threadGlobalID] = 1;
}
else{
mark[coefficients * i + threadGlobalID] = 0;
}
}
wloc = -1;
// Permute selected genes
for (i = 0; i < population; i++){
if (mark[coefficients * i + threadGlobalID] == 1){
if (wloc == -1){
wloc = i;
tmp1 = chromosomes[coefficients * i + threadGlobalID];
}
else{
tmp2 = chromosomes[coefficients * i + threadGlobalID];
chromosomes[coefficients * i + threadGlobalID] = tmp1;
tmp1 = tmp2;
}
}
}
if (wloc != -1){
chromosomes[coefficients * wloc + threadGlobalID] = tmp1;
}
}
__syncthreads();
//Place into relevant matrix
for (i = 0; i < dimension*dimension; i++){
matrices[threadGlobalID * dimension * dimension + i] = 0.0f;
}
}
} |
5,173 | #include "includes.h"
__global__ void addToKPlus(int msize, double* a, double* b, double* c, double* d)
{
int tid = threadIdx.x; // + blockIdx.x * blockDim.x;
if (tid < msize) {
d[tid] = a[tid] + b[tid] + c[tid];
// tid += blockDim.x*gridDim.x;`
}
} |
5,174 | #define LN2_INV 1.4426950408889634
#define TWO_PI 6.283185307179586
#define PI 3.141592653589793
#define E 2.718281828459045
__forceinline__ double __device__ exponent(double x)
{
return exp(x);
}
__forceinline__ float __device__ exponent(float x)
{
return expf(x);
}
__forceinline__ double __device__ cosine(double x)
{
return cos(x);
}
__forceinline__ float __device__ cosine(float x)
{
return cosf(x);
}
__forceinline__ double __device__ sine(double x)
{
return sin(x);
}
__forceinline__ float __device__ sine(float x)
{
return sinf(x);
}
template<class T>
__forceinline__ T __device__ clamp(const T& x, const T& lower, const T& upper)
{
return min(upper, max(x, lower));
}
template <typename T>
void __device__ render_fractal(unsigned char* nv12_yuv, unsigned int height, unsigned int width, unsigned int stride,
T x_0, T x_step, T y_1, T y_step, T cutoff_value_sq, unsigned int max_iterations,
unsigned int x, unsigned int y)
{
if(x>=width || y>= height)
return;
unsigned i;
T initial_real = x_0 + x*x_step;
T initial_imag = y_1 - y*y_step;
T current_real = initial_real;
T current_imag = initial_imag;
T current_abs_sq = current_imag*current_imag+current_real*current_real;
for(i=0; i<max_iterations && current_abs_sq < cutoff_value_sq;++i)
{
T current_exp_arg = exponent(-current_real);
T current_square_imag = 2 * current_real * current_imag;
T cos_current_imag = cosine(current_imag);
T sin_current_imag = sine(current_imag);
current_real = current_real*current_real - current_imag*current_imag + current_exp_arg*(cos_current_imag*initial_real + sin_current_imag*initial_imag);
current_imag = current_exp_arg*(cos_current_imag*initial_imag - sin_current_imag*initial_real) + current_square_imag;
current_abs_sq = current_imag*current_imag+current_real*current_real;
}
float luma,cr,cb;
/* ---- Colorization -----*/
if(i<max_iterations)
{
double ln_cutoff_val_sq = logf(cutoff_value_sq);
double smoothing_factor = LN2_INV*(log(0.5*log(current_abs_sq)-0.5*log(ln_cutoff_val_sq)));
double smoothed_iterations = i+1-smoothing_factor;
// double variable = log(smoothed_iterations*((E-1)/(max_iterations+1-smoothing_factor))+1)*TWO_PI;
double variable = smoothed_iterations/max_iterations*PI;
luma = 50 + (unsigned char)clamp((200.0 * variable),0.0,200.0);
cr = (unsigned char)clamp((235.5*cosine(0.5*variable)),16.0,235.0);
cb = (unsigned char)clamp((235.5*sine(3.0*variable)),16.0,128.0);
}
else
{
luma = 16;
cr = 128;
cb = 128;
}
nv12_yuv[y*stride+x] = luma;
if(((x & 1u) == 0u) && ((y & 1u) == 0u))
{
nv12_yuv[height*stride+y*stride/2+x] = cr;
nv12_yuv[height*stride+y*stride/2+x+1]= cb;
}
}
extern "C" __global__ void render_fractal_float(unsigned char* nv12_yuv,unsigned int height, unsigned int width, unsigned int stride,
float x_0, float x_step, float y_1, float y_step,float cutoff_value_sq,
unsigned int max_iterations)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
render_fractal(nv12_yuv,height,width,stride,x_0,x_step,y_1,y_step,cutoff_value_sq,max_iterations,x,y);
}
extern "C" __global__ void render_fractal_double(unsigned char* nv12_yuv, unsigned int height, unsigned int width, unsigned int stride,
double x_0, double x_step, double y_1, double y_step, double cutoff_value_sq,
unsigned int max_iterations)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
render_fractal(nv12_yuv,height,width,stride,x_0,x_step,y_1,y_step,cutoff_value_sq,max_iterations,x,y);
}
|
5,175 | __device__ int count = 0;
__global__ static void sum(int* data_gpu, int* block_gpu, int *sum_gpu, int length)
{
extern __shared__ int blocksum[];
__shared__ int islast;
int offset;
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int tnum = blockDim.x;
const int bnum = gridDim.x;
blocksum[tid] = 0;
for (int i = bid * tnum + tid; i < length; i += bnum * tnum) {
blocksum[tid] += data_gpu[i];
}
__syncthreads();
offset = tnum / 2;
while (offset > 0) {
if(tid < offset) {
blocksum[tid] += blocksum[tid + offset];
}
offset >>= 1;
__syncthreads();
}
if (tid == 0) {
block_gpu[bid] = blocksum[0];
__threadfence();
int value = atomicAdd(&count, 1);
islast = (value == gridDim.x - 1);
}
__syncthreads();
if (islast) {
if (tid == 0) {
int s = 0;
for (int i = 0; i < bnum; i++) {
s += block_gpu[i];
}
*sum_gpu = s;
}
}
}
|
5,176 | #include<stdio.h>
#include<cuda.h>
#include<time.h>
__global__
void vecAddKernel(float* A, float* B, float* C, int n)
{
int i = (threadIdx.x + blockDim.x * blockIdx.x)*2;
if(i<n) C[i] = A[i] + B[i];
}
void vecAdd(float* A, float* B, float* C, int n)
{
int size = n * sizeof(float);
float *d_A, *d_B, *d_C ;
//Allocating memory on device
cudaMalloc((void**) &d_A, size);
cudaMalloc((void**) &d_B, size);
//Copy data from Host to Device
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B,B, size, cudaMemcpyHostToDevice);
//Allocating memory for the output
cudaMalloc((void**) &d_C, size);
vecAddKernel<<<ceil(n/256.0),256>>>(d_A, d_B, d_C, n);
//Copy data from Device to Host
cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost);
//Free memory in Device
cudaFree(d_A);cudaFree(d_B);cudaFree(d_C);
}
int main()
{
int n;
//Size of the vectors
scanf("%d", &n);
//Allocating memory on Host
float *h_A= new float[n], *h_B= new float[n], *h_C = new float[n];
//Initializing vectors with random values
srand(time(NULL));
for (int i = 0; i < n; i++)
{
h_A[i] = rand(); h_B[i] = rand();
}
vecAdd(h_A,h_B,h_C,n);
for(int i = 0 ; i< n ;i++) printf("%f\n", h_C[i]);
return 0;
}
|
5,177 | /*
Parallel Processing Architecture and Algorithms, Spring-2015.
Project: Image Convolution with Cuda.
Muhammad Shahid Noman Siddiqui.
Sp-2014/M.Sc.CE/007
Note: The following heterogeneous code has been developed on Intel core i7, 2.8GHz processor with
Nvidia NVS3100m notebook business graphic card with 16 Cuda Cores. Visual Studio 2010 and Cuda
Toolkit 6.5 has been used. Plus this code is a simple display of the 2D convolution i.e it is a
test code for very small image size and it doesn't yet take into account the real image.
*/
/*
Program Name: Cuda_2DConvolution
This program has the CUDA only code for Convolution.
In future, this code can be mixed up with MPI and run
on CASE Cluster System. Plus this is only an illustration
of Convolution with a simple impulse filter.
*/
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
#define Width 1024 //Image Width and Height.
#define Height 1024
#define Tile 32
#define N (Width*Height)
// Device Side Convolution Function and is callable from Host.
__global__ void Image_Convolution(int *Input, int *Output)
{
int x= blockIdx.x * Tile + threadIdx.x; // Thread Column Index.
int y= blockIdx.y * Tile + threadIdx.y; // Thread Row Index.
int Mask[3][3]={{1,1,1},{1,1,1},{1,1,1}}; // Impulse Filter.
int Sum=0;
/* Each pixel of Image has been mapped to each thread and
multiplied by corresponding neighbouring Filter coefficients.
*/
for(int i=-1;i<=1;i++)
for(int j=-1;j<=1;j++)
Sum+= Input[(y+j)*Width+(x+i)]*Mask[j+1][i+1];
Output[y*Width+x] = Sum; // Writes result to Output Image pixel.
}
int main(void)
{
int *I_Image, *O_Image; // Host variables for Input and Output Images.
int *dev_I_Image, *dev_O_Image; // Device side pointers to Input and Output Images.
int Mask[3][3]={{1,1,1},{1,1,1},{1,1,1}}; // Impulse Filter.
int SIZE=Width*Height*sizeof(int);
I_Image=(int *)malloc(SIZE);
O_Image=(int *)malloc(SIZE);
clock_t Time_Start, Time_End, Time_Difference; // Clock used to measure time for CPU Convolution execution time.
double Time;
for(int i=0;i<Width;i++) // Image has been initialized with value 1 for all pixels.
for(int j=0;j<Height;j++)
{
I_Image[i*Width+j]=1;
}
Time_Start=clock(); // Start Time for CPU Convolution Kernel
printf ("CPU Executing Convolution Kernel...\n") ;
printf("\n");
for (int row=1;row<Height-1;row++) // CPU Kernel for Convolution.
for (int col=1;col<Width-1;col++) // Avoiding Memory access beyond the Image bounds.
{
int Sum= 0;
for (int i=-1;i<=1;i++)
for (int j=-1;j<=1;j++)
Sum += I_Image[i*row+j]*Mask[1+i][1+j];
}
Time_End=clock();
Time_Difference=Time_End-Time_Start;
Time=Time_Difference/(double)CLOCKS_PER_SEC ;
printf ("CPU time for Convolution = %f ms\n", Time*1000) ;
printf("\n");
cudaMalloc(&dev_I_Image,SIZE); // Allocating memory onto the GPU.
cudaMalloc(&dev_O_Image,SIZE);
cudaEvent_t start, stop; // Cuda API to measure time for Cuda Kernel Execution.
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaMemcpy(dev_I_Image,I_Image,SIZE,cudaMemcpyHostToDevice); // Copying Input Image to GPU Memory.
dim3 dimGrid(Width/Tile,Height/Tile); // Two Dimesional blocks with two dimensional threads.
dim3 dimBlock(Tile,Tile); // 16*16=256, max number of threads per block is 512.
printf ("GPU Executing Convolution Kernel...\n") ;
printf("\n");
Image_Convolution<<<dimGrid,dimBlock>>>(dev_I_Image,dev_O_Image); // Kernel Launch configuration.
cudaMemcpy(O_Image,dev_O_Image,SIZE,cudaMemcpyDeviceToHost); // Copying Output Image back to Host Memory.
cudaEventRecord(stop);
cudaEventSynchronize(stop); // Blocks CPU execution until Device Kernel finishes its job.
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("GPU Execution Time for Convolution Kernel: %fn\n", milliseconds); //GPU Execution Time.
printf("Effective Bandwidth (GB/s): %fn\n", N*4*2/milliseconds/1e6);
//N*4 is the total number of Bytes transferred and (1+1)=2 is for read Input Image and write Output Image.
printf("\n");
cudaFree(dev_I_Image); // Since we are good coders, freeing device memory to keep the atmosphere clean. :p
cudaFree(dev_O_Image);
free(I_Image);
free(O_Image);
return 0;
}
|
5,178 | #include "includes.h"
__global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
if(da) da[i] += dc[i] * s[i];
db[i] += dc[i] * (1-s[i]);
ds[i] += dc[i] * a[i] + dc[i] * -b[i];
}
} |
5,179 | //CUDE_Minimum_Fineding.cu
//Ben Talotta
#include "stdio.h"
#include "stdlib.h"
//based on cuda summing_Arrrays example
#define N 8000000
#define ThreadCount 8
__global__ void findMin(int* a, int* c )
{
int numToSort = N / 8;
int low = numToSort * threadIdx.x;
int high = low + numToSort - 1;
int minValForThread = a[low];
for(int i = low; i < high; ++i){
if(minValForThread > a[i]){
minValForThread = a[i];
}
}
c[threadIdx.x] = minValForThread;
}
int main()
{
dim3 grid(1);
int *a;
a = (int *)malloc(sizeof(int) * N);
int *dev_a;
int c[8];
int *dev_c;
cudaMalloc((void**)&dev_a, N * sizeof(int));
cudaMalloc((void**)&dev_c, ThreadCount * sizeof(int));
for(int i = 0; i < 8; i++){
c[i] = 1000000000;
}
//fill array
for (int i = 0; i < N; i++){
a[i] = rand() % 1000000000;
}
cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, c, ThreadCount * sizeof(int), cudaMemcpyHostToDevice);
findMin <<<grid, ThreadCount >>> (dev_a, dev_c);
cudaMemcpy(c, dev_c, ThreadCount * sizeof(int), cudaMemcpyDeviceToHost);
int min = c[0];
for(int i = 0; i < 8; i++){
if(min > c[i]){
min = c[i];
}
}
printf("minimum value using cuda is: %d\n", min);
cudaFree(dev_a);
cudaFree(dev_c);
return 0;
}
|
5,180 | #include <cstdio>
#define N 32
__global__ void k(volatile int* in)
{
__shared__ int volatile smem[N];
__shared__ int volatile tmem[N];
int idx = threadIdx.x + blockDim.x*blockIdx.x;
smem[idx] = in[idx];
tmem[idx] = smem[N-idx-1];
in[idx] = tmem[idx];
}
int main()
{
int* in = (int*) malloc(N*sizeof(int));
for(int i = 0; i < N; i++)
in[i]=i;
int* din;
cudaMalloc((void**)&din, N*sizeof(int));
cudaMemcpy(din, in, N*sizeof(int), cudaMemcpyHostToDevice);
k<<<1,N>>>(din);
cudaMemcpy(in, din, N*sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < N; i++)
printf("%d ", in[i]);
printf("\n");
free(in); cudaFree(din);
} |
5,181 | #include <stdio.h>
#include <iostream>
#include <cuda_runtime.h>
// kernels are C++ functions defined with CUDA
// They will be called with << >>()
// cudaGetDeviceCount (int* count)
// Returns the number of compute-capable devices
// cudaGetDeviceProperties (cudaDeviceProp* prop, int device)
// Returns information about the compute-device.
// Program that gives the information of the GPUs on the boards
int main() {
int devices;
cudaDeviceProp prop;
try {
cudaGetDeviceCount(&devices);
// Get information of all the Nvidia devices on the computer
for(int device = 0; device < devices; device++) {
cudaGetDeviceProperties(&prop, device);
// using std::cout as a display function
// using std::endl as a end of line character
std::cout << "Device Number : " << device << std::endl;
std::cout << "Device name : " << prop.name << std::endl;
std::cout << "Memory Clock Rate (KHz) : " << prop.memoryClockRate << std::endl;
std::cout << "Global Memory size (bits) : " << prop.memoryBusWidth << std::endl;
// get the warp size, i.e. the number of threads in a warp
std::cout << "Warp Size : " << prop.warpSize << std::endl;
std::cout << "Peak Memory Bandwidth (GB/s) : " << 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6 << std::endl;
}
}
catch (const cudaError_t & e) {
std::cerr << e;
}
return 0;
}
/*
Device Number : 0
Device name : GeForce RTX 2060 SUPER
Memory Clock Rate (KHz) : 7001000
Global Memory size (bits) : 256
Warp Size : 32
Peak Memory Bandwidth (GB/s) : 448.064
*/ |
5,182 | #include <cuda_runtime.h>
#include <stdio.h>
#include <math.h>
int getSPcores(cudaDeviceProp devProp)
{
int cores = 0;
int mp = devProp.multiProcessorCount;
switch (devProp.major){
case 2: // Fermi
if (devProp.minor == 1) cores = mp * 48;
else cores = mp * 32;
break;
case 3: // Kepler
cores = mp * 192;
break;
case 5: // Maxwell
cores = mp * 128;
break;
case 6: // Pascal
if ((devProp.minor == 1) || (devProp.minor == 2)) cores = mp * 128;
else if (devProp.minor == 0) cores = mp * 64;
else printf("Unknown device type\n");
break;
case 7: // Volta and Turing
if ((devProp.minor == 0) || (devProp.minor == 5)) cores = mp * 64;
else printf("Unknown device type\n");
break;
case 8: // Ampere
if (devProp.minor == 0) cores = mp * 64;
else if (devProp.minor == 6) cores = mp * 128;
else printf("Unknown device type\n");
break;
default:
printf("Unknown device type\n");
break;
}
return cores;
}
int main(int argc, char **argv) {
printf("%s Starting...\n", argv[0]);
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if(error_id != cudaSuccess) {
printf("cudaGetDeviceCount returned %d\n -> %s\n",
(int)error_id, cudaGetErrorString(error_id));
printf("Result = FAIL\n");
exit(EXIT_FAILURE);
}
if(deviceCount == 0) {
printf("There are no available device(s) that support CUDA\n");
} else {
printf("Detected %d CUDA Capable device(s)\n", deviceCount);
}
int dev, driverVersion = 0, runtimeVersion = 0;
dev = 0;
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("Device %d: \"%s\"\n", dev, deviceProp.name);
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version: %d.%d / %d.%d\n",
driverVersion/1000, (driverVersion%100) / 10,
runtimeVersion/1000, (runtimeVersion%100) / 10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n",
deviceProp.major, deviceProp.minor);
printf(" Total amount of global memory: %.2f GBytes (%llu bytes)\n",
(float)deviceProp.totalGlobalMem / (pow(1024.0, 3)),
(unsigned long long) deviceProp.totalGlobalMem);
printf(" GPU Clock rate: %.0f MHz (%0.2h GHz)\n",
deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f);
printf(" Memory Clock rate: %.0f MHz\n",
deviceProp.memoryClockRate * 1e-3f);
printf(" Memory Bus Width: %d-bit\n",
deviceProp.memoryBusWidth);
if(deviceProp.l2CacheSize) {
printf(" L2 Cache Size: %d bytes\n",
deviceProp.l2CacheSize);
}
printf(" Max Texture Dimension Size (x,y,z) 1D=%d, 2D=(%d,%d), 3D=(%d,%d,%d)\n",
deviceProp.maxTexture1D, deviceProp.maxTexture2D[0],
deviceProp.maxTexture2D[1],
deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1],
deviceProp.maxTexture3D[2]);
printf(" Max Layered Texture Size (dim) x layers 1D=(%d) x %d, 2D=(%d,%d) x %d\n",
deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1],
deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1],
deviceProp.maxTexture2DLayered[2]);
printf(" Total amount of constant memory: %lu bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n", deviceProp.warpSize);
printf(" Maximum number of threads per multiprocessor: %d\n",
deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimensions of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %lu bytes\n", deviceProp.memPitch);
printf(" Multiprocessor count: %d\n",
deviceProp.multiProcessorCount);
printf(" Number of CUDA cores (infered): %d\n", getSPcores(deviceProp));
exit(EXIT_SUCCESS);
}
|
5,183 |
__global__ void thresholding_filter_kernel(unsigned int *input, unsigned int *output, unsigned int thresh){
const int blockid = blockIdx.x + blockIdx.y *gridDim.x + gridDim.x * gridDim.y *blockIdx.z;
const int out_idx = blockid * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
// const int global_x = blockIdx.x * blockDim.x + threadIdx.x;
// const int global_y = blockIdx.y * blockDim.y + threadIdx.y;
// const int global_z = blockIdx.z * blockDim.z + threadIdx.z;
// const int out_idx = global_x + 512 * (global_y + 24 * global_z);
output[out_idx] = 0;
if (input[out_idx] > thresh){
output[out_idx] = 255;//input[out_idx];
}
return;
}
__global__ void marching_cubes_filter(unsigned int *input, int *lookup_one, int *lookup_two, float *triangles, unsigned int data_width, unsigned int data_height, unsigned int data_depth, unsigned int step){
const int global_x = blockIdx.x * blockDim.x + threadIdx.x;
const int global_y = blockIdx.y * blockDim.y + threadIdx.y;
const int global_z = blockIdx.z * blockDim.z + threadIdx.z;
const int master_vertex = global_z * data_width * data_height + global_y * data_width + global_x;
if(global_x + 1 < data_width-1 && global_y + 1 < data_height-1 && global_z + 1 < data_depth-1){
for(unsigned int tm=0; tm<(5*3*3); tm++){
triangles[master_vertex* (5*3*3) + tm] = 0.0;
}
// double check that these refer to the right vertices
int cube[8][4]{
{global_x, global_y, global_z,1},
{global_x, global_y+1, global_z,2},
{global_x+1, global_y+1, global_z,4},
{global_x+1, global_y, global_z,8},
{global_x, global_y, global_z+1,16},
{global_x, global_y+1, global_z+1,32},
{global_x+1, global_y+1, global_z+1,64},
{global_x+1, global_y, global_z+1,128}};
int case_lookup_idx = 0;
for(unsigned int ci=0; ci<8; ci++){
const int x = cube[ci][0];
const int y = cube[ci][1];
const int z = cube[ci][2];
const int vertex = z * data_width * data_height + y * data_width + x;
if (input[vertex] ==255){
case_lookup_idx |= cube[ci][3];
}
}
int edge_actual[12][6] = {
{cube[0][0],cube[0][1],cube[0][2],cube[1][0],cube[1][1],cube[1][2]},
{cube[1][0],cube[1][1],cube[1][2],cube[2][0],cube[2][1],cube[2][2]},
{cube[2][0],cube[2][1],cube[2][2],cube[3][0],cube[3][1],cube[3][2]},
{cube[3][0],cube[3][1],cube[3][2],cube[0][0],cube[0][1],cube[0][2]},
{cube[4][0],cube[4][1],cube[4][2],cube[5][0],cube[5][1],cube[5][2]},
{cube[5][0],cube[5][1],cube[5][2],cube[6][0],cube[6][1],cube[6][2]},
{cube[6][0],cube[6][1],cube[6][2],cube[7][0],cube[7][1],cube[7][2]},
{cube[7][0],cube[7][1],cube[7][2],cube[4][0],cube[4][1],cube[4][2]},
{cube[4][0],cube[4][1],cube[4][2],cube[0][0],cube[0][1],cube[0][2]},
{cube[5][0],cube[5][1],cube[5][2],cube[1][0],cube[1][1],cube[1][2]},
{cube[6][0],cube[6][1],cube[6][2],cube[2][0],cube[2][1],cube[2][2]},
{cube[7][0],cube[7][1],cube[7][2],cube[3][0],cube[3][1],cube[3][2]}
};
if(case_lookup_idx != 255 && case_lookup_idx != 0){
int current =0;
int edge_counter = 0;
for(int w=0; w<16; w++){
current = lookup_two[case_lookup_idx * 16 + w];
// current now gives an edge index so we need to add the point to the triangle list
if(current != -1){
int point1_x = edge_actual[current][0];
int point1_y = edge_actual[current][1];
int point1_z = edge_actual[current][2];
int point2_x = edge_actual[current][3];
int point2_y = edge_actual[current][4];
int point2_z = edge_actual[current][5];
triangles[master_vertex * (5*3*3) +(edge_counter*3) + 0] = (((float)point1_x + (float)point2_x)/2.0);
triangles[master_vertex * (5*3*3) +(edge_counter*3) + 1] = (((float)point1_y + (float)point2_y)/2.0);
triangles[master_vertex * (5*3*3) +(edge_counter*3) + 2] = (((float)point1_z + (float)point2_z)/2.0) * step;// could do better interpolation here
edge_counter++;
}
}
// printf("\n");
}
}
return;
} |
5,184 | /*
* HostDeviceVector.cpp
*
* Created on: 11 янв. 2016 г.
* Author: aleksandr
*/
#include "HostDeviceVector.h"
#include <thrust/fill.h>
#include <thrust/copy.h>
#include <iostream>
HostDeviceVector::HostDeviceVector() {}
HostDeviceVector::~HostDeviceVector() {}
HostDeviceVector::HostDeviceVector(std::size_t size, float placeholder) {
deviceVector.resize(size);
thrust::fill(deviceVector.begin(), deviceVector.end(), placeholder);
hostVector.resize(size);
thrust::fill(hostVector.begin(), hostVector.end(), placeholder);
}
void HostDeviceVector::resize(std::size_t size, float placeholder) {
deviceVector.resize(size);
thrust::fill(deviceVector.begin(), deviceVector.end(), placeholder);
hostVector.resize(size);
thrust::fill(hostVector.begin(), hostVector.end(), placeholder);
}
void HostDeviceVector::fill(float placeholder) {
thrust::fill(deviceVector.begin(), deviceVector.end(), placeholder);
thrust::fill(hostVector.begin(), hostVector.end(), placeholder);
}
void HostDeviceVector::GPUtoCPU() {
thrust::copy(deviceVector.begin(), deviceVector.end(), hostVector.begin());
}
void HostDeviceVector::CPUtoGPU() {
thrust::copy(hostVector.begin(), hostVector.end(), deviceVector.begin());
}
thrust::device_ptr<float> HostDeviceVector::getDevicePtr() {
return deviceVector.data();
}
float* HostDeviceVector::getHostPtr() {
return &hostVector[0];
}
std::size_t HostDeviceVector::getSize() const {
return hostVector.size();
}
float& HostDeviceVector::operator [] (std::size_t index) {
return hostVector[index];
}
|
5,185 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <iostream>
int main(int argc, char *argv[]) {
long n = atol(argv[1]);
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
thrust::host_vector<int> h_vec(n);
for (long i = 0; i < n; i++) {
h_vec[i] = 1;
}
thrust::device_vector<int> d_vec(h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
int init = 0;
cudaEventRecord(start);
int res = thrust::reduce(d_vec.begin(), d_vec.end(), init, thrust::plus<int>());
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float ms;
cudaEventElapsedTime(&ms, start, stop);
std::cout << res << std::endl;
std::cout << ms << std::endl;
return 0;
}
|
5,186 | // Program corresponding to CythonBM.cu that can be run directly from the command line. For testing purposes.
// Attempt to use 2D array. Doesn't work.
//#include <cmath>
#include <curand_kernel.h>
#include <stdio.h>
#include <cuda.h>
// Error handling code used in Nvidia example found here: https://docs.nvidia.com/cuda/curand/host-api-overview.html#generator-options
#define CUDA_CALL(x) do { if((x)!=cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
//Function to generate brownian path, which is stored in results. Executes on the GPU, hence the __global__ identifier
__global__ void randomWalk(double **results, int T, int N, int numSims) {
curandState_t state;
curand_init (1234, 0, 0, &state);
double random;
int start = (threadIdx.x + blockIdx.x * blockDim.x);
if (start < numSims) {
results[start][0] = 0.0;
for (int j = 1; j < N; j++) {
random = curand_normal_double(&state);
results[start][j] = results[start][j-1] + random * sqrt((double) T / N);
}
/*
Generate 2 doubles at once. Test later to see if this is more efficient:
double curand_normal2_double (state);
*/
}
}
int main() {
//Arrays to store the brownian path, one for the host and one for the device
const int N = 10;
int T = 1;
const int numSims = 5;
int numBlocks = (127 + numSims) / numSims;
int numThreads = 128;
double** results = new double*[numSims];
for(int i = 0; i < numSims; ++i)
results[i] = new double[N];
double** dev_results = new double*[numSims];
for(int i = 0; i < numSims; ++i)
dev_results[i] = new double[N];
// Allocate space for results array on device
CUDA_CALL(cudaMalloc(&dev_results, N * numSims * sizeof(double)));
//Call GPU function, with ony one block and one thread
randomWalk<<<numBlocks, numThreads>>>(dev_results, T, N, numSims);
//copy results array from device to host
CUDA_CALL(cudaMemcpy(results, dev_results , N * numSims * sizeof(double), cudaMemcpyDeviceToHost));
// print out path
for (int i=0; i< numSims; i++) {
for (int j = 0; j < N; j++) {
printf("%f ", results[i][j]);
}
printf("\n");
printf("\n");
}
//clean up
CUDA_CALL(cudaFree(dev_results));
return 0;
}
|
5,187 | #include "includes.h"
__global__ void rearrangePopulationWithRange(float *gene, float *fit, int *range)
{
const int idx = threadIdx.x + blockDim.x*blockIdx.x;
if(range[0]>range[1]) return;
int totalElements = range[1] - range[0] + 1;
int nHalf = totalElements / 2;
if(idx> nHalf) return;
int i = range[0] + idx;
int j = range[1] - idx;
if (fit[i] < fit[j]) {
for(int k=0; k<6; k++) {
float t = gene[i*6+k];
gene[i*6+k] = gene[j*6+k];
gene[j*6+k] = t;
}
float t = fit[i];
fit[i] = fit[j];
fit[j] = t;
}
} |
5,188 | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cuda.h>
__global__ void staticReverse(int *d, int n)
{
__shared__ int staticMem[12288];
int idx = threadIdx.x;
if (n <= blockDim.x & idx < n) {
staticMem[n - 1 - idx] = d[idx];
} else {
int k = idx * 12;
if (k < n) {
int i = 0;
while (i < 12 && (k + i) < n) {
staticMem[n - 1 - (k + i)] = d[k + i];
++i;
}
}
}
__syncthreads();
idx = threadIdx.x;
if (n <= blockDim.x && idx < n) {
d[idx] = staticMem[idx];
} else {
int k = idx * 12;
if (k < n) {
int i = 0;
while (i < 12 && (k + i) < n) {
d[k + i] = staticMem[k + i];
++i;
}
}
}
}
__global__ void dynamicReverse(int *d, int n)
{
extern __shared__ int dynamicMem[];
int idx = threadIdx.x;
int *arr = dynamicMem;
if (n <= blockDim.x & idx < n) {
arr[n - 1 - idx] = d[idx];
} else {
int k = idx * 12;
if (k < n) {
int i = 0;
while (i < 12 && (k + i) < n) {
arr[n - 1 - (k + i)] = d[k + i];
++i;
}
}
}
__syncthreads();
idx = threadIdx.x;
if (n <= blockDim.x && idx < n) {
d[idx] = arr[idx];
} else {
int k = idx * 12;
if (k < n) {
int i = 0;
while (i < 12 && (k + i) < n) {
d[k + i] = arr[k + i];
++i;
}
}
}
}
int main(int argc, char **argv)
{
int n = atoi(argv[1]); // FIX ME TO max possible size
int r[n]; // FIX ME TO dynamic arrays if neccesary
size_t bytes = n * sizeof(int);
int *a = (int *) malloc(bytes);
int *d = (int *) malloc(bytes);
for (int i = 0; i < n; i++) {
a[i] = i;
r[i] = n-i-1;
d[i] = 0;
}
int *d_d;
cudaMalloc(&d_d, bytes);
int blockSize = 1024;
// run version with static shared memory
cudaMemcpy(d_d, a, bytes, cudaMemcpyHostToDevice);
staticReverse<<<1, blockSize>>>(d_d, n); // FIX kernel execution params
cudaMemcpy(d, d_d, bytes, cudaMemcpyDeviceToHost);
int flag = 1;
for (int i = 0; i < n; i++) {
if (d[i] != r[i]) {
printf("Error: d[%d]!=r[%d] (%d, %d)\n", i, i, d[i], r[i]);
flag = 0;
}
}
if (flag) {
printf("staticReverse OK\n");
}
for (int i = 0; i < n; ++i) {
d[i] = 0;
}
// run dynamic shared memory version
cudaMemcpy(d_d, a, n*sizeof(int), cudaMemcpyHostToDevice);
dynamicReverse<<<1, blockSize, n*sizeof(int)>>>(d_d, n); // FIX kernel executon params
cudaMemcpy(d, d_d, n * sizeof(int), cudaMemcpyDeviceToHost);
flag = 1;
for (int i = 0; i < n; i++) {
if (d[i] != r[i]) {
printf("Error: d[%d]!=r[%d] (%d, %d)\n", i, i, d[i], r[i]);
flag = 0;
}
}
if (flag) {
printf("dynamicReverse OK\n");
}
free(a);
free(d);
cudaFree(d_d);
return 0;
}
|
5,189 | #include<stdio.h>
#include<cuda.h>
__global__ void convertToCaps(char *str,int length){
int index = threadIdx.x+blockIdx.x*blockDim.x;
if(index<length){
if(str[index]>=97&&str[index]<=122)
str[index]-=32;
}
}
__global__ void findMaxOccurence(char *str,int *count,int length){
int index = threadIdx.x+blockIdx.x*blockDim.x;
if(index<length){
atomicAdd(&count[(int)str[index]-65],1);
}
}
int countMax(int *count){
int max=0;
for(int i=1;i<26;i++){
if(count[i]>count[max]){
max=i;
}
}
return max;
}
int main(){
char *str;
int n;
char dummy;
printf("\nEnter length of string:");
scanf("%d",&n);
scanf("%c",&dummy);
str = (char*)malloc(n*sizeof(char));
printf("\nEnter the String:");
scanf("%[^\n]s",str);
int noOfBlocks = n/1024;
int noOfThreads;
noOfBlocks++;
if(noOfBlocks==1){
noOfThreads=n;
}
else{
noOfThreads=1024;
}
char *dev_str=NULL;int *count;
cudaMallocManaged((void**)&dev_str,n*sizeof(char));
cudaMallocManaged((void**)&count,26*sizeof(int));
for(int i=0;i<26;i++){
count[i]=0;
}
strcpy(dev_str,str);
convertToCaps<<<noOfBlocks,noOfThreads>>>(dev_str,n);
cudaDeviceSynchronize();
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
findMaxOccurence<<<noOfBlocks,noOfThreads>>>(dev_str,count,n);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
//printf("\n%s",dev_str);
int max = countMax(count);
printf("\nMaximum count = %d",count[max]);
printf("\nExecution Time = %f ms",milliseconds);
//printf("%s",str);
//printf("\n%d",findLen(str));
return 0;
} |
5,190 | // Name: H.G. Manesha Washani
// Student Id: 1432289
#include <stdio.h>
#include <stdlib.h>
#define N 20
__global__ void MatAdd(int A[][N], int B[][N], int C[][N]){
int g = blockIdx.x;
int h = blockIdx.y;
C[g][h] = A[g][h] + B[g][h];
}
//int** randmatfunc();
void randmatfunc(int newmat[N][N]){
int i, j, k;
for(i=0;i<N;i++){
for(j=0;j<N;j++){
k = rand() % 100 + 1;;
printf("%d ", k);
newmat[i][j] =k;
}
printf("\n");
}
printf("\n--------------------------------------\n");
}
int main(){
int A[N][N];
randmatfunc(A);
int B[N][N];
randmatfunc(B);
int C[N][N];
int (*d_A)[N], (*d_B)[N], (*d_C)[N];
cudaMalloc((void**)&d_A, (N*N)*sizeof(int));
cudaMalloc((void**)&d_B, (N*N)*sizeof(int));
cudaMalloc((void**)&d_C, (N*N)*sizeof(int));
cudaMemcpy(d_A, A, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, C, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
int numThreads = 1;
dim3 numBlocks(N,N);
MatAdd<<<numBlocks,numThreads>>>(d_A,d_B,d_C);
cudaMemcpy(C, d_C, (N*N)*sizeof(int), cudaMemcpyDeviceToHost);
int g, h; printf("C = \n");
for(g=0;g<N;g++){
for(h=0;h<N;h++){
printf("%d ", C[g][h]);
}
printf("\n");
}
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
printf("\n");
return 0;
}
|
5,191 | #include "includes.h"
__global__ void computeMoment(int *readArr, int *writeArr, double *weightArr, int n){
// The dimensions are hardcoded here to simplify extra syntax
// cuda uses for dynamic shared memory allocation
__shared__ int readArr_shared[32][32];
__shared__ double weightArr_shared[5][5];
int row = blockIdx.x*blockDim.x + threadIdx.x;
int col = blockIdx.y*blockDim.y + threadIdx.y;
if(threadIdx.x<5 && threadIdx.y < 5){
weightArr_shared[threadIdx.x][threadIdx.y] = weightArr[threadIdx.x*WINDOW_SIZE + threadIdx.y];
}
__syncthreads();
// Only values within the below borders will be used but the __syncthreads()
// function has to be called outside if statements so we load everything here
readArr_shared[threadIdx.x][threadIdx.y] = readArr[row*n + col];
__syncthreads();
// If coordinates are between boundaries
// update the write array accordingly
if(row < n && col < n){
float influence = 0.0f;
for (int i=-2; i<3; i++)
{
for (int j=-2; j<3; j++)
{
//add extra n so that modulo behaves like mathematics modulo
//that is return only positive values
if(threadIdx.x >= MIN_MARGIN && threadIdx.y >= MIN_MARGIN &&
threadIdx.x <= 31-MIN_MARGIN && threadIdx.y <= 31-MIN_MARGIN){
int y = threadIdx.x + i;
int x = threadIdx.y + j;
influence += weightArr_shared[i+2][j+2]*readArr_shared[y][x];
}else{
int y = (row+i+n)%n;
int x = (col+j+n)%n;
influence += weightArr_shared[i+2][j+2]*readArr[y*n + x];
}
}
}
if(threadIdx.x >= MIN_MARGIN && threadIdx.y >= MIN_MARGIN &&
threadIdx.x <= 31-MIN_MARGIN && threadIdx.y <= 31-MIN_MARGIN){
writeArr[row*n + col] = readArr_shared[threadIdx.x][threadIdx.y];
if (influence<-diff) writeArr[row*n + col] = -1;
else if (influence>diff) writeArr[row*n + col] = 1;
}else {
writeArr[row*n + col] = readArr[row*n + col];
if (influence<-diff) writeArr[row*n + col] = -1;
else if (influence>diff) writeArr[row*n + col] = 1;
}
}
__syncthreads();
} |
5,192 | template<typename T>
__device__ void vectorMulVector(const T* A, const T* B, T* result, const int length) {
T resultValue = 0;
for (int i = 0; i < length; i++) {
resultValue += A[i] * B[i];
}
result[0] = resultValue;
}
template<typename T>
__device__ void matrixMulVector(const T* matrix, const T* vector, T* result,
const int matrixRows, const int matrixColumns) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < matrixColumns) {
T resultValue = 0;
int rowStart = index * matrixColumns;
for (int i = 0; i < matrixColumns; i++) {
resultValue += matrix[rowStart + i] * vector[i];
}
result[index] = resultValue;
}
}
template<typename T>
__device__ void vectorMulMatrix(const T* vector, const T* matrix, T* result,
const int matrixRows, const int matrixColumns) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < matrixRows) {
T resultValue = 0;
for (int i = 0; i < matrixRows; i++) {
resultValue += vector[i] * matrix[index + i * matrixColumns];
}
result[index] = resultValue;
}
}
template<typename T>
__device__ void vectorMatrixMulVector(const T* vectorA, const T* vectorB, T* resultMatrix,
const int lengthA, const int lengthB) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
if (row < lengthA && col < lengthB) {
resultMatrix[row * lengthB + col] = vectorA[row] * vectorB[col];
}
} |
5,193 | #include "includes.h"
__global__ void ker_gkylCartFieldAccumulateOffset(unsigned sInp, unsigned sOut, unsigned nCells, unsigned compStart, unsigned nCompInp, unsigned nCompOut, double fact, const double *inp, double *out) {
if (nCompInp < nCompOut) {
for (unsigned i=blockIdx.x*blockDim.x + threadIdx.x; i<nCells; i += blockDim.x * gridDim.x) {
for (unsigned c=0; c<nCompInp; ++c) {
out[sOut + i*nCompOut + compStart + c] += fact*inp[sInp + i*nCompInp + c];
}
}
}
else {
for (unsigned i=blockIdx.x*blockDim.x + threadIdx.x; i<nCells; i += blockDim.x * gridDim.x) {
for (unsigned c=0; c<nCompOut; ++c) {
out[sOut + i*nCompOut + c] += fact*inp[sInp + i*nCompInp + compStart + c];
}
}
}
} |
5,194 | //
// kernel routine
//
__global__ void my_first_kernel(float *x)
{
// Uncomment line below and define integer "tid" as global index to vector "x"
// int tid =
// Uncomment line below and define x[tid] to be equal to the thread index
// x[tid] =
}
|
5,195 | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void j2d64pt (double * __restrict__ l_in, double * __restrict__ l_out, int N) {
//Determing the block's indices
int i0 = (int)(blockIdx.x)*(int)(blockDim.x) + 4;
int i = max(i0,4) + (int)(threadIdx.x);
int j0 = 4*(int)(blockIdx.y)*(int)(blockDim.y) + 4;
int j = max(j0,4) + 4*(int)(threadIdx.y);
double (*in)[8200] = (double (*)[8200]) l_in;
double (*out)[8200] = (double (*)[8200]) l_out;
if (i>=4 & j>=4 & i<=N-5 & j<=N-5) {
double outjc0ic0 = 1.27449 * in[j-4][i-4];
outjc0ic0 -= 1.27449 * in[j-4][i+4];
outjc0ic0 += 0.000136017 * -in[j-4][i-3];
outjc0ic0 += 0.000136017 * in[j-4][i+3];
outjc0ic0 += 0.000714 * in[j-4][i-2];
outjc0ic0 -= 0.000714 * in[j-4][i+2];
outjc0ic0 += 0.002856 * -in[j-4][i-1];
outjc0ic0 += 0.002856 * in[j-4][i+1];
outjc0ic0 -= 1.27449 * in[j+4][i-4];
double outjp1ic0 = 0.000136017 * in[j+4][i-4];
double outjp2ic0 = -(0.000714 * in[j+4][i-4]);
double outjp3ic0 = 0.002856 * in[j+4][i-4];
outjc0ic0 += 1.27449 * in[j+4][i+4];
outjp1ic0 -= 0.000136017 * in[j+4][i+4];
outjp2ic0 += 0.000714 * in[j+4][i+4];
outjp3ic0 -= 0.002856 * in[j+4][i+4];
outjc0ic0 += 0.000136017 * in[j-3][i+4];
outjp1ic0 -= 1.27449 * in[j-3][i+4];
outjc0ic0 -= 0.000136017 * in[j-3][i-4];
outjp1ic0 += 1.27449 * in[j-3][i-4];
outjc0ic0 += 0.000136017 * in[j+3][i-4];
outjp1ic0 -= 0.000714 * in[j+3][i-4];
outjp2ic0 += 0.002856 * in[j+3][i-4];
outjc0ic0 -= 0.000136017 * in[j+3][i+4];
outjp1ic0 += 0.000714 * in[j+3][i+4];
outjp2ic0 -= 0.002856 * in[j+3][i+4];
outjc0ic0 += 0.000136017 * in[j+4][i-3];
outjp1ic0 -= 0.00145161 * in[j+4][i-3];
outjp2ic0 += 0.00762 * in[j+4][i-3];
outjp3ic0 -= 0.03048 * in[j+4][i-3];
outjc0ic0 -= 0.000136017 * in[j+4][i+3];
outjp1ic0 += 0.00145161 * in[j+4][i+3];
outjp2ic0 -= 0.00762 * in[j+4][i+3];
outjp3ic0 += 0.03048 * in[j+4][i+3];
outjc0ic0 += 0.000714 * in[j-2][i-4];
outjp1ic0 -= 0.000136017 * in[j-2][i-4];
outjp2ic0 += 1.27449 * in[j-2][i-4];
outjc0ic0 -= 0.000714 * in[j-2][i+4];
outjp1ic0 += 0.000136017 * in[j-2][i+4];
outjp2ic0 -= 1.27449 * in[j-2][i+4];
outjc0ic0 -= 0.000714 * in[j+2][i-4];
outjp1ic0 += 0.002856 * in[j+2][i-4];
outjp3ic0 -= 0.002856 * in[j+2][i-4];
outjc0ic0 += 0.000714 * in[j+2][i+4];
outjp1ic0 -= 0.002856 * in[j+2][i+4];
outjp3ic0 += 0.002856 * in[j+2][i+4];
outjc0ic0 -= 0.000714 * in[j+4][i-2];
outjp1ic0 += 0.00762 * in[j+4][i-2];
outjp2ic0 -= 0.04 * in[j+4][i-2];
outjp3ic0 += 0.16 * in[j+4][i-2];
outjc0ic0 += 0.000714 * in[j+4][i+2];
outjp1ic0 -= 0.00762 * in[j+4][i+2];
outjp2ic0 += 0.04 * in[j+4][i+2];
outjp3ic0 -= 0.16 * in[j+4][i+2];
outjc0ic0 -= 0.002856 * in[j-1][i-4];
outjp1ic0 += 0.000714 * in[j-1][i-4];
outjp2ic0 -= 0.000136017 * in[j-1][i-4];
outjp3ic0 += 1.27449 * in[j-1][i-4];
outjc0ic0 += 0.002856 * in[j-1][i+4];
outjp1ic0 -= 0.000714 * in[j-1][i+4];
outjp2ic0 += 0.000136017 * in[j-1][i+4];
outjp3ic0 -= 1.27449 * in[j-1][i+4];
outjc0ic0 += 0.002856 * in[j+1][i-4];
outjp2ic0 -= 0.002856 * in[j+1][i-4];
outjp3ic0 += 0.000714 * in[j+1][i-4];
outjc0ic0 -= 0.002856 * in[j+1][i+4];
outjp2ic0 += 0.002856 * in[j+1][i+4];
outjp3ic0 -= 0.000714 * in[j+1][i+4];
outjc0ic0 += 0.002856 * in[j+4][i-1];
outjp1ic0 -= 0.03048 * in[j+4][i-1];
outjp2ic0 += 0.16 * in[j+4][i-1];
outjp3ic0 -= 0.64 * in[j+4][i-1];
outjc0ic0 -= 0.002856 * in[j+4][i+1];
outjp1ic0 += 0.03048 * in[j+4][i+1];
outjp2ic0 -= 0.16 * in[j+4][i+1];
outjp3ic0 += 0.64 * in[j+4][i+1];
outjc0ic0 += 0.00145161 * in[j-3][i-3];
outjp1ic0 += 0.000136017 * -in[j-3][i-3];
outjc0ic0 -= 0.00145161 * in[j-3][i+3];
outjp1ic0 += 0.000136017 * in[j-3][i+3];
outjc0ic0 -= 0.00145161 * in[j+3][i-3];
outjp1ic0 += 0.00762 * in[j+3][i-3];
outjp2ic0 -= 0.03048 * in[j+3][i-3];
outjc0ic0 += 0.00145161 * in[j+3][i+3];
outjp1ic0 -= 0.00762 * in[j+3][i+3];
outjp2ic0 += 0.03048 * in[j+3][i+3];
outjc0ic0 += 0.00762 * -in[j-3][i-2];
outjp1ic0 += 0.000714 * in[j-3][i-2];
outjc0ic0 += 0.00762 * in[j-3][i+2];
outjp1ic0 -= 0.000714 * in[j-3][i+2];
outjc0ic0 -= 0.00762 * in[j-2][i-3];
outjp1ic0 += 0.00145161 * in[j-2][i-3];
outjp2ic0 += 0.000136017 * -in[j-2][i-3];
outjc0ic0 += 0.00762 * in[j-2][i+3];
outjp1ic0 -= 0.00145161 * in[j-2][i+3];
outjp2ic0 += 0.000136017 * in[j-2][i+3];
outjc0ic0 += 0.00762 * in[j+2][i-3];
outjp1ic0 -= 0.03048 * in[j+2][i-3];
outjp3ic0 += 0.03048 * in[j+2][i-3];
outjc0ic0 -= 0.00762 * in[j+2][i+3];
outjp1ic0 += 0.03048 * in[j+2][i+3];
outjp3ic0 -= 0.03048 * in[j+2][i+3];
outjc0ic0 += 0.00762 * in[j+3][i-2];
outjp1ic0 -= 0.04 * in[j+3][i-2];
outjp2ic0 += 0.16 * in[j+3][i-2];
outjc0ic0 -= 0.00762 * in[j+3][i+2];
outjp1ic0 += 0.04 * in[j+3][i+2];
outjp2ic0 -= 0.16 * in[j+3][i+2];
outjc0ic0 += 0.03048 * in[j-3][i-1];
outjp1ic0 += 0.002856 * -in[j-3][i-1];
outjc0ic0 -= 0.03048 * in[j-3][i+1];
outjp1ic0 += 0.002856 * in[j-3][i+1];
outjc0ic0 += 0.03048 * in[j-1][i-3];
outjp1ic0 -= 0.00762 * in[j-1][i-3];
outjp2ic0 += 0.00145161 * in[j-1][i-3];
outjp3ic0 += 0.000136017 * -in[j-1][i-3];
outjc0ic0 -= 0.03048 * in[j-1][i+3];
outjp1ic0 += 0.00762 * in[j-1][i+3];
outjp2ic0 -= 0.00145161 * in[j-1][i+3];
outjp3ic0 += 0.000136017 * in[j-1][i+3];
outjc0ic0 -= 0.03048 * in[j+1][i-3];
outjp2ic0 += 0.03048 * in[j+1][i-3];
outjp3ic0 -= 0.00762 * in[j+1][i-3];
outjc0ic0 += 0.03048 * in[j+1][i+3];
outjp2ic0 -= 0.03048 * in[j+1][i+3];
outjp3ic0 += 0.00762 * in[j+1][i+3];
outjc0ic0 -= 0.03048 * in[j+3][i-1];
outjp1ic0 += 0.16 * in[j+3][i-1];
outjp2ic0 -= 0.64 * in[j+3][i-1];
outjc0ic0 += 0.03048 * in[j+3][i+1];
outjp1ic0 -= 0.16 * in[j+3][i+1];
outjp2ic0 += 0.64 * in[j+3][i+1];
outjc0ic0 += 0.04 * in[j-2][i-2];
outjp1ic0 += 0.00762 * -in[j-2][i-2];
outjp2ic0 += 0.000714 * in[j-2][i-2];
outjc0ic0 -= 0.04 * in[j-2][i+2];
outjp1ic0 += 0.00762 * in[j-2][i+2];
outjp2ic0 -= 0.000714 * in[j-2][i+2];
outjc0ic0 -= 0.04 * in[j+2][i-2];
outjp1ic0 += 0.16 * in[j+2][i-2];
outjp3ic0 -= 0.16 * in[j+2][i-2];
outjc0ic0 += 0.04 * in[j+2][i+2];
outjp1ic0 -= 0.16 * in[j+2][i+2];
outjp3ic0 += 0.16 * in[j+2][i+2];
outjc0ic0 += 0.16 * -in[j-2][i-1];
outjp1ic0 += 0.03048 * in[j-2][i-1];
outjp2ic0 += 0.002856 * -in[j-2][i-1];
outjc0ic0 += 0.16 * in[j-2][i+1];
outjp1ic0 -= 0.03048 * in[j-2][i+1];
outjp2ic0 += 0.002856 * in[j-2][i+1];
outjc0ic0 -= 0.16 * in[j-1][i-2];
outjp1ic0 += 0.04 * in[j-1][i-2];
outjp2ic0 += 0.00762 * -in[j-1][i-2];
outjp3ic0 += 0.000714 * in[j-1][i-2];
outjc0ic0 += 0.16 * in[j-1][i+2];
outjp1ic0 -= 0.04 * in[j-1][i+2];
outjp2ic0 += 0.00762 * in[j-1][i+2];
outjp3ic0 -= 0.000714 * in[j-1][i+2];
outjc0ic0 += 0.16 * in[j+1][i-2];
outjp2ic0 -= 0.16 * in[j+1][i-2];
outjp3ic0 += 0.04 * in[j+1][i-2];
outjc0ic0 -= 0.16 * in[j+1][i+2];
outjp2ic0 += 0.16 * in[j+1][i+2];
outjp3ic0 -= 0.04 * in[j+1][i+2];
outjc0ic0 += 0.16 * in[j+2][i-1];
outjp1ic0 -= 0.64 * in[j+2][i-1];
outjp3ic0 += 0.64 * in[j+2][i-1];
outjc0ic0 -= 0.16 * in[j+2][i+1];
outjp1ic0 += 0.64 * in[j+2][i+1];
outjp3ic0 -= 0.64 * in[j+2][i+1];
outjc0ic0 += 0.64 * in[j-1][i-1];
outjp1ic0 += 0.16 * -in[j-1][i-1];
outjp2ic0 += 0.03048 * in[j-1][i-1];
outjp3ic0 += 0.002856 * -in[j-1][i-1];
outjc0ic0 -= 0.64 * in[j-1][i+1];
outjp1ic0 += 0.16 * in[j-1][i+1];
outjp2ic0 -= 0.03048 * in[j-1][i+1];
outjp3ic0 += 0.002856 * in[j-1][i+1];
outjc0ic0 -= 0.64 * in[j+1][i-1];
outjp2ic0 += 0.64 * in[j+1][i-1];
outjp3ic0 += 0.16 * -in[j+1][i-1];
outjc0ic0 += 0.64 * in[j+1][i+1];
outjp2ic0 -= 0.64 * in[j+1][i+1];
outjp3ic0 += 0.16 * in[j+1][i+1];
outjp1ic0 -= 1.27449 * in[j+5][i-4];
outjp2ic0 += 0.000136017 * in[j+5][i-4];
outjp3ic0 -= 0.000714 * in[j+5][i-4];
outjp1ic0 += 1.27449 * in[j+5][i+4];
outjp2ic0 -= 0.000136017 * in[j+5][i+4];
outjp3ic0 += 0.000714 * in[j+5][i+4];
outjp1ic0 += 0.000136017 * in[j+5][i-3];
outjp2ic0 -= 0.00145161 * in[j+5][i-3];
outjp3ic0 += 0.00762 * in[j+5][i-3];
outjp1ic0 -= 0.000136017 * in[j+5][i+3];
outjp2ic0 += 0.00145161 * in[j+5][i+3];
outjp3ic0 -= 0.00762 * in[j+5][i+3];
outjp1ic0 -= 0.000714 * in[j+5][i-2];
outjp2ic0 += 0.00762 * in[j+5][i-2];
outjp3ic0 -= 0.04 * in[j+5][i-2];
outjp1ic0 += 0.000714 * in[j+5][i+2];
outjp2ic0 -= 0.00762 * in[j+5][i+2];
outjp3ic0 += 0.04 * in[j+5][i+2];
outjp1ic0 -= 0.002856 * in[j][i-4];
outjp2ic0 += 0.000714 * in[j][i-4];
outjp3ic0 -= 0.000136017 * in[j][i-4];
outjp1ic0 += 0.002856 * in[j][i+4];
outjp2ic0 -= 0.000714 * in[j][i+4];
outjp3ic0 += 0.000136017 * in[j][i+4];
outjp1ic0 += 0.002856 * in[j+5][i-1];
outjp2ic0 -= 0.03048 * in[j+5][i-1];
outjp3ic0 += 0.16 * in[j+5][i-1];
outjp1ic0 -= 0.002856 * in[j+5][i+1];
outjp2ic0 += 0.03048 * in[j+5][i+1];
outjp3ic0 -= 0.16 * in[j+5][i+1];
outjp1ic0 += 0.03048 * in[j][i-3];
outjp2ic0 -= 0.00762 * in[j][i-3];
outjp3ic0 += 0.00145161 * in[j][i-3];
outjp1ic0 -= 0.03048 * in[j][i+3];
outjp2ic0 += 0.00762 * in[j][i+3];
outjp3ic0 -= 0.00145161 * in[j][i+3];
outjp1ic0 -= 0.16 * in[j][i-2];
outjp2ic0 += 0.04 * in[j][i-2];
outjp3ic0 += 0.00762 * -in[j][i-2];
outjp1ic0 += 0.16 * in[j][i+2];
outjp2ic0 -= 0.04 * in[j][i+2];
outjp3ic0 += 0.00762 * in[j][i+2];
outjp1ic0 += 0.64 * in[j][i-1];
outjp2ic0 += 0.16 * -in[j][i-1];
outjp3ic0 += 0.03048 * in[j][i-1];
outjp1ic0 -= 0.64 * in[j][i+1];
outjp2ic0 += 0.16 * in[j][i+1];
outjp3ic0 -= 0.03048 * in[j][i+1];
outjp2ic0 -= 1.27449 * in[j+6][i-4];
outjp3ic0 += 0.000136017 * in[j+6][i-4];
outjp2ic0 += 1.27449 * in[j+6][i+4];
outjp3ic0 -= 0.000136017 * in[j+6][i+4];
outjp2ic0 += 0.000136017 * in[j+6][i-3];
outjp3ic0 -= 0.00145161 * in[j+6][i-3];
outjp2ic0 -= 0.000136017 * in[j+6][i+3];
outjp3ic0 += 0.00145161 * in[j+6][i+3];
outjp2ic0 -= 0.000714 * in[j+6][i-2];
outjp3ic0 += 0.00762 * in[j+6][i-2];
outjp2ic0 += 0.000714 * in[j+6][i+2];
outjp3ic0 -= 0.00762 * in[j+6][i+2];
outjp2ic0 += 0.002856 * in[j+6][i-1];
outjp3ic0 -= 0.03048 * in[j+6][i-1];
outjp2ic0 -= 0.002856 * in[j+6][i+1];
outjp3ic0 += 0.03048 * in[j+6][i+1];
outjp3ic0 -= 1.27449 * in[j+7][i-4];
outjp3ic0 += 1.27449 * in[j+7][i+4];
outjp3ic0 += 0.000136017 * in[j+7][i-3];
outjp3ic0 -= 0.000136017 * in[j+7][i+3];
outjp3ic0 -= 0.000714 * in[j+7][i-2];
outjp3ic0 += 0.000714 * in[j+7][i+2];
outjp3ic0 += 0.002856 * in[j+7][i-1];
outjp3ic0 -= 0.002856 * in[j+7][i+1];
out[j][i] = outjc0ic0;
out[j+1][i] = outjp1ic0;
out[j+2][i] = outjp2ic0;
out[j+3][i] = outjp3ic0;
}
}
extern "C" void host_code (double *h_in, double *h_out, int N) {
double *in;
cudaMalloc (&in, sizeof(double)*N*N);
check_error ("Failed to allocate device memory for in\n");
cudaMemcpy (in, h_in, sizeof(double)*N*N, cudaMemcpyHostToDevice);
double *out;
cudaMalloc (&out, sizeof(double)*N*N);
check_error ("Failed to allocate device memory for out\n");
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, 4*blockconfig.y));
j2d64pt<<<gridconfig, blockconfig>>> (in, out, N);
cudaMemcpy (h_out, out, sizeof(double)*N*N, cudaMemcpyDeviceToHost);
cudaFree (in);
cudaFree (out);
}
|
5,196 | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define N 65536
#define THREADS_PER_BLOCK 128
void checkCUDAError(const char *);
void random_ints(int *a);
__device__ int d_a[N], d_b[N], d_c[N];
__global__ void vectorAdd(int max) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < max) d_c[i] = d_a[i] + d_b[i];
}
void QueryDevices() {
int n;
cudaGetDeviceCount(&n);
for (int i = 0; i < n; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0 * prop.memoryClockRate * (prop.memoryBusWidth / 8) * 1e3 /
(1 << 30));
}
}
int main(void) {
QueryDevices();
int *a, *b, *c, *c_ref; // host copies of a, b, c
unsigned int size = N * sizeof(int);
// Alloc space for host copies of a, b, c and setup input values
a = (int *)malloc(size);
random_ints(a);
b = (int *)malloc(size);
random_ints(b);
c = (int *)malloc(size);
c_ref = (int *)malloc(size);
for (int i = 0; i < N; i++) c_ref[i] = a[i] + b[i];
cudaMemcpyToSymbol(d_a, a, size);
cudaMemcpyToSymbol(d_b, b, size);
checkCUDAError("CUDA memcpy");
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
// Launch add() kernel on GPU
vectorAdd<<<(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK,
THREADS_PER_BLOCK>>>(N);
cudaEventRecord(stop);
checkCUDAError("CUDA kernel");
cudaEventSynchronize(stop);
float ms;
cudaEventElapsedTime(&ms, start, stop);
printf("vectorAdd takes %.3lfms\n", ms);
printf("Measured Global Memory Bandwidth (GB/s): %f\n",
size * 3 / ms * 1e3 / (1 << 30));
// Copy result back to host
cudaMemcpyFromSymbol(c, d_c, size);
checkCUDAError("CUDA memcpy");
for (int i = 0; i < N; i++)
if (c_ref[i] != c[i]) {
puts("FAIL");
break;
}
// Cleanup
free(a);
free(b);
free(c);
return 0;
}
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA ERROR: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void random_ints(int *a) {
for (unsigned int i = 0; i < N; i++) {
a[i] = rand();
}
}
|
5,197 | #include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
#include <sys/time.h>
using namespace std;
__device__ double norm_calc_device;
__global__ void JacobiKernel(double *u, double *u_new, int N, double h_sq) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if( (idx>N) && (idx%N!=0) && (idx%N!=N-1) && (idx/N < N-1))
*(u_new + idx) = 0.25 * ((h_sq)+ *(u + idx - N) + *(u + idx - 1) + *(u + idx + N) + *(u + idx + 1));
}
double norm (double *u , int N, double h_sq)
{
double norm_2d = 0.0;
for(int i = 1 ; i < N-1 ; i++)
for(int j = 1; j < N-1 ; j++){
double temp = 0.0;
temp+=4.0 * *(u + i*N + j);
temp-= *(u + (i-1)*N + j);
temp-= *(u + i*N + (j-1));
temp-= *(u + (i+1)*N + j);
temp-= *(u + i*N + (j+1));
temp/=h_sq;
norm_2d+= pow((temp-1.0),2);
}
norm_2d = sqrt(norm_2d);
return norm_2d;
}
__global__ void normKernel(double *u , int N , double h_sq){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
double temp = 0.0;
if( (idx>N) && (idx%N!=0) && (idx%N!=N-1) && (idx/N < N-1)){
temp+=4.0 * *(u + idx);
temp-= *(u + idx - N);
temp-= *(u + idx - 1);
temp-= *(u + idx + N);
temp-= *(u + idx + 1);
temp/=h_sq;
temp = pow((temp-1.0),2);
}
atomicAdd(&norm_calc_device, temp);
}
void jacobi(double *u, double * u_new, int N, double h, double h_sq, double norm_init)
{
printf("Jacobi Method:\n");
int iter = 1;
double norm_calc = norm_init;
int max_iter = 1000;
printf("\nInitial Norm:%f\n", norm_init);
while(norm_calc * 1000000 > norm_init && iter <= max_iter)
{
for(int i = 1; i < N-1 ; i++)
for(int j = 1 ; j < N-1 ; j++)
*(u_new + i*N + j) = 0.25 * ((h_sq)+ *(u + (i-1)*N + j) + *(u + i*N + (j-1)) + *(u + (i+1)*N + j) + *(u + i*N + (j+1)));
swap(u,u_new);
norm_calc = norm( u, N,h_sq);
iter++;
}
printf("\nFinal Norm:%f\n", norm_calc);
}
void jacobiGPU(double *u, double * u_new, int N, double h, double h_sq, double norm_init, double *u_device, double *u_new_device)
{
printf("Jacobi Method:\n");
int iter = 1;
double norm_calc = norm_init;
int max_iter = 1000;
printf("\nInitial Norm:%f\n", norm_init);
cudaMemcpy(u_device, u, N*N*sizeof(double), cudaMemcpyHostToDevice);
while(norm_calc * 1000000 > norm_init && iter <= max_iter)
{
JacobiKernel<<<N*N/1024+1,1024>>>(u_device, u_new_device, N,h_sq);
cudaDeviceSynchronize();
cudaMemcpy(u_device, u_new_device, N*N*sizeof(double), cudaMemcpyDeviceToDevice);
norm_calc = 0.0;
cudaMemcpyToSymbol(norm_calc_device, &norm_calc, sizeof(double) );
normKernel<<<N*N/1024+1,1024>>>(u_device, N,h_sq);
cudaMemcpyFromSymbol(&norm_calc, norm_calc_device, sizeof(double), 0, cudaMemcpyDeviceToHost);
norm_calc = sqrt(norm_calc);
iter++;
}
cudaMemcpy(u, u_new_device, N*N*sizeof(double), cudaMemcpyDeviceToHost);
printf("\nFinal Norm:%f\n", norm_calc);
}
int main(int argc, char **argv)
{
int N;
double h, h_sq;
N = 1000;
if(argc == 2)
N = atoi(argv[1]);
h = (double)1/(double)(N+1);
h_sq = h*h;
double *u, *u_new;
cudaMallocHost((void**)&u, N * N * sizeof(double));
cudaMallocHost((void**)&u_new, N * N * sizeof(double));
for(int i = 0; i<N*N; i++)
*(u+i) = 0.0;
double norm_init = norm(u,N,h_sq);
struct timeval start, end;
gettimeofday(&start, NULL);
jacobi(u, u_new, N, h , h_sq,norm_init);
gettimeofday(&end, NULL);
double time_taken = ((end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec) / 1.e6;
printf("\nThe program took %f seconds to execute\n", time_taken);
cudaFreeHost(u_new);
double *u_GPU, *u_new_GPU;
cudaMallocHost((void**)&u_GPU, N * N * sizeof(double));
cudaMallocHost((void**)&u_new_GPU, N * N * sizeof(double));
for(int i = 0; i<N*N; i++)
*(u_GPU+i) = 0.0;
double *u_device;
double *u_new_device;
cudaMalloc(&u_device, N*N*sizeof(double));
cudaMalloc(&u_new_device, N*N*sizeof(double));
gettimeofday(&start, NULL);
jacobiGPU(u_GPU, u_new_GPU, N, h ,h_sq,norm_init,u_device,u_new_device);
gettimeofday(&end, NULL);
time_taken = ((end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec) / 1.e6;
printf("\nThe program took %f seconds to execute\n", time_taken);
cudaFree(u_device);
cudaFree(u_new_device);
/*-----Calculating errors----*/
double error = 0.0;
for(int i=0; i<N;i++)
for(int j=0; j<N; j++)
{
error+= *(u + i*N + j) - *(u_GPU + i*N + j);
}
printf("Calculated error between GPU and CPU code: %f", error);
cudaFreeHost(u);
cudaFreeHost(u_GPU);
cudaFreeHost(u_new_GPU);
return 0;
}
|
5,198 | #include "includes.h"
__global__ void gpu_Comput (int *h, int N, int T) {
// Array loaded with global thread ID that acesses that location
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
int threadID = col + row * N;
int index = row + col * N; // sequentially down each row
for (int t = 0; t < T; t++) // loop to repeat to reduce other time effects
h[index] = threadID; // load array with flattened global thread ID
} |
5,199 | #include <cuda.h>
#include <stdio.h>
#define TILE_WIDTH 2
__global__ void matMulKernel(float* d_N, float* d_M, float* d_P, int Width){
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Identify the row and column of the d_P element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
// Loop over the d_M and d_N tiles required to compute d_P element
for (int m = 0; m < Width/TILE_WIDTH; ++m) {// Coolaborative loading of d_M and d_N tiles into shared memory
Mds[ty][tx] = d_M[Row*Width + m*TILE_WIDTH + tx];
Nds[ty][tx] = d_N[(m*TILE_WIDTH + ty)*Width + Col];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k) {
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
d_P[Row*Width + Col] = Pvalue;
}
void matMul(float* A, float* B, float* C, int width)
{
int size = width * width * sizeof(float);
static float *d_A, *d_B, *d_C;
cudaMalloc((void **) &d_A, size);
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_B, size);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_C, size);
dim3 dimGrid(2, 2, 1);
dim3 dimBlock(2, 2, 1);
matMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, width);
cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost);
printf("\nA: \n");
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++){
printf("%2.0f ", A[i + j*width]);
}
printf("\n");
}
printf("\nB: \n");
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++){
printf("%2.0f ", B[i + j*width]);
}
printf("\n");
}
printf("\n-------------------------------------");
printf("\nC: \n");
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++){
printf("%2.0f ", C[i + j*width]);
}
printf("\n");
}
printf("\n-------------------------------------\n");
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
int main() {
int width = 4;
static float h_A[16];
static float h_B[16];
static float h_C[16];
for (int i = 0; i < width; i++) {
for (int j = 0; j < width; j++) {
h_A[i + j*width] = (i+j)%2;
h_B[i + j*width] = (i+j)%3;
}
}
matMul(h_A, h_B, h_C, width);
} |
5,200 | // Transpose checkRows matrix with rows == parity checks, to
// bitRows matrix with rows == bits
__global__ void
transposeRC (unsigned int* map, float *checkRows, float *bitRows,
unsigned int numChecks, unsigned int maxBitsForCheck) {
// index
unsigned int m,n;
unsigned int thisRowStart, thisRowLength;
unsigned int cellIndex, oneDindex;
m = blockIdx.x;
n = threadIdx.x + 1;
if (m < numChecks) {
thisRowStart = m * (maxBitsForCheck+1);
thisRowLength = map[thisRowStart];
if (n <= thisRowLength) {
cellIndex = thisRowStart + n;
oneDindex = map[cellIndex];
bitRows[oneDindex] = checkRows[cellIndex];
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.