serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
2,301 | #include "includes.h"
/**
* Programma che simula il comportamento del gpdt per
* la risoluzione di un kernel di una serie di
* valori di dimensione variabile utilizzando la
* tecnologia cuda.
* compilare con:
* nvcc -o simil_gpdt_si_cuda simil_gpdt_si_cuda.cu
* lanciare con:
* ./simil_gpdt_si_cuda [numero vettori] [numero componenti] [numero di righe da calcolare] [tipo di kernel] [grado(int)/sigma(float)]
**/
using namespace std;
/**
* Funzione che riempie i vettori con numeri
* casuali compresi tra 0 e 99.
**/
__global__ void Kernel_lineare(float *Vd, float *Ris, int N, int C, int dim_indici, int *ind, int *Vp, int *Vnp, int nr_max_val)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int j;
int pos;
int tmp_ind;
float lin;
for ( ; x < N ; x+=blockDim.x * gridDim.x)
{
for( ; y < dim_indici; y+=blockDim.y * gridDim.y)
{
tmp_ind = ind[y];
lin = 0.0;
int Nr_val = Vnp[x];
for(j = 0; j < Nr_val; j++)
{
pos = Vp[x * nr_max_val + j];
lin = lin + (Vd[x * C + pos] * Vd[tmp_ind * C + pos]);
}
//Ris[x * dim_indici + y] = lin;
Ris[y * N + x ] = lin;
}
}
} |
2,302 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
cudaError_t cudaDotProduct(int *c, const int *a, const int *b, unsigned int size);
int* allocAndAssignMat(int size);
__global__ void dot(int *c, const int *a, const int *b)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
c[i] += a[i] * b[i];
}
int main()
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
const int N = 10000; // this is the sqrt of the total elements or the len of one side of the square matrix
const int* a = allocAndAssignMat(N * N);
const int* b = allocAndAssignMat(N * N);
int* c = (int*)malloc((N * N) * sizeof(int));
for (int i = 0; i < N * N; i++) {
c[i] = 0;
}
int mySum = 0;
cudaEventRecord(start);
// Add vectors in parallel.
cudaError_t cudaStatus = cudaDotProduct(c, a, b, N*N);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDotProduct failed!");
return 1;
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
for (int i = 0; i < N*N; i++) {
//printf("%d ", c[i]);
mySum += c[i];
}
//Results
printf("Size of N*N: %d \nResult: %d \nTime in kernel %f \n", N * N, mySum, milliseconds);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t cudaDotProduct(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); // allocating the space on the gpu
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); // moving the data to the gpu counterpart not c as that is results
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
dot<<<(size+255)/256, 256>>>(dev_c, dev_a, dev_b); // execution configuration -
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
int* allocAndAssignMat(int size) {
/*
This function takes in the size of the matrix (N*N) and returns a pointer with appropriate memory allocated as well as filled with values
@params: int size
@returns: int* ptr
*/
int* ptr = (int*)malloc(size * sizeof(int));
for (int i = 0; i < size; i++) {
ptr[i] = 2;
}
return ptr;
}
|
2,303 | #include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "time.h"
#define NUM_THREADS 256
int nsamp = 65536, ndms = 1024, detrendLen = 32768;
// --------------------------- Detrend and Normalisation kernel ----------------------------
__global__ void detrend_normalise(float *input, int detrendLen)
{
// Store temporary least-square fit values
__shared__ float3 shared[NUM_THREADS];
// First pass, each thread computes its part of the buffer
{
float sy = 0, sxy = 0, sxx = 0;
for (unsigned i = threadIdx.x; i < detrendLen; i += blockDim.x)
{
float x = - detrendLen * 0.5 + 0.5 + i;
int index = blockIdx.y * gridDim.x * detrendLen +
blockIdx.x * detrendLen + i;
float y = input[index];
sy += y;
sxy += x * y;
sxx += x * x;
}
// Initialise shared memory
shared[threadIdx.x].x = sy;
shared[threadIdx.x].y = sxy;
shared[threadIdx.x].z = sxx;
}
__syncthreads();
// Perform the rest of the computation through reduction
for (unsigned i = NUM_THREADS / 2; i >= 1; i /= 2)
{
if (threadIdx.x < i)
{
shared[threadIdx.x].x += shared[threadIdx.x + i].x;
shared[threadIdx.x].y += shared[threadIdx.x + i].y;
shared[threadIdx.x].z += shared[threadIdx.x + i].z;
}
__syncthreads();
}
if (threadIdx.x == 0)
{
shared[0].y /= shared[0].z;
shared[0].x /= detrendLen;
}
__syncthreads();
// Detrend and compute partial standard deviation
{
float a = shared[0].x;
float b = shared[0].y;
float stddev = 0;
for (unsigned i = threadIdx.x; i < detrendLen ; i += blockDim.x)
{
float x = - detrendLen / 2.0 + 0.5 + i;
int index = blockIdx.y * gridDim.x * detrendLen +
blockIdx.x * detrendLen + i;
float val = input[index] - (a + b * x);
input[index] = val;
stddev += val * val;
}
shared[threadIdx.x].z = stddev;
}
__syncthreads();
// Compute the full standard deviation through reduction
for (unsigned i = NUM_THREADS / 2; i >= 1; i /= 2)
if (threadIdx.x < i)
shared[threadIdx.x].z += shared[threadIdx.x + i].z;
__syncthreads();
if (threadIdx.x == 0)
shared[0].z = sqrt(shared[0].z / detrendLen);
__syncthreads();
// Normalise Data
float stddev = shared[0].z;
for (unsigned i = threadIdx.x; i < detrendLen ; i += blockDim.x)
input[blockIdx.y * gridDim.x * detrendLen +
blockIdx.x * detrendLen + i] /= stddev;
}
// --------------------------- Main processing ----------------------------
int main(int argc, char *argv[])
{
float *input, *d_input;
int i, j;
// Allocate and generate buffers
input = (float *) malloc(nsamp * ndms * sizeof(float));
srand ( time(NULL) );
for(i = 0; i < ndms; i++)
for(j = 0; j < nsamp; j++)
input[i * nsamp + j] = ((float)rand() / (float)RAND_MAX) + j * 0.001 + i * j * 0.001;
printf("nsamp: %d, ndms: %d\n", nsamp, ndms);
// FILE *fp = fopen("/home/lessju/Code/MDSM/release/pelican-mdsm/pipelines/output.dat", "rb");
// fread(input, sizeof(float), nsamp, fp);
// fclose(fp);
// Initialise
cudaSetDevice(0);
cudaThreadSetCacheConfig(cudaFuncCachePreferL1);
cudaEvent_t event_start, event_stop;
float timestamp;
cudaEventCreate(&event_start);
cudaEventCreate(&event_stop);
// Allocate GPU memory and copy data
cudaMalloc((void **) &d_input, nsamp * ndms * sizeof(float) );
cudaMemcpy(d_input, input, nsamp * ndms * sizeof(float), cudaMemcpyHostToDevice);
// Call kernel
cudaEventRecord(event_start, 0);
detrend_normalise<<<dim3(nsamp / detrendLen, ndms), NUM_THREADS>>>(d_input, detrendLen);
cudaEventRecord(event_stop, 0);
cudaEventSynchronize(event_stop);
cudaEventElapsedTime(×tamp, event_start, event_stop);
printf("Performed detrending in: %lf\n", timestamp);
// Get output
cudaMemcpy(input, d_input, ndms * nsamp * sizeof(float), cudaMemcpyDeviceToHost);
FILE *fp = fopen("testDetrend.dat", "wb");
fwrite(input, sizeof(float), nsamp * ndms, fp);
fclose(fp);
}
|
2,304 |
//#include "cudacpp\DeviceVector.h"
template<typename type, int size>
__global__ void setKernel(type* c, type val)
{
auto idx = threadIdx.x * size;
#pragma unroll(size)
for (auto i = 0; i < size; i++) {
c[idx] = val;
idx++;
}
} |
2,305 | template<typename TF>
__global__ void doublify(TF* a, const int itot, const int jtot)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const int j = blockIdx.y*blockDim.y + threadIdx.y;
const int k = blockIdx.z;
int ijk = i + j*itot + k*itot*jtot;
a[ijk] += TF(ijk);
}
template<typename TF>
void launch_doublify(TF* a, const int itot, const int jtot, const int ktot)
{
dim3 grid_gpu (1, 1, ktot);
dim3 block_gpu(itot, jtot, 1);
doublify<<<grid_gpu, block_gpu>>>(a, itot, jtot);
}
|
2,306 | #include "includes.h"
__global__ void rowDiv(float* a, float* b, float* c, int M, int N){
int i = blockIdx.x*blockDim.x + threadIdx.x;
c[i] = a[i]/b[blockIdx.x];
} |
2,307 | #ifndef __HELPER__
#define __HELPER__
#include <bits/stdc++.h>
using namespace std;
#define __HD__ __host__ __device__
#define SQR(x) ((x) * (x))
__inline__ __HD__ int rnd(int x, int n)
{
return ((x + n - 1) / n) * n;
}
#define LG_BLOCK_N 64
#define LG_BLOCK_M 64
#define LG_BLOCK_K 8
#define LG_THREAD_N 8
#define LG_THREAD_M 4
#define LG_GRID_SIZE ((LG_BLOCK_N * LG_BLOCK_M) / (LG_THREAD_N * LG_THREAD_M))
#define SM_BLOCK_N 32
#define SM_BLOCK_M 32
#define SM_BLOCK_K 4
#define SM_THREAD_N 4
#define SM_THREAD_M 2
#define SM_GRID_SIZE ((SM_BLOCK_N * SM_BLOCK_M) / (SM_THREAD_N * SM_THREAD_M))
#endif
void setGrid(int n, dim3 &blockDim, dim3 &gridDim)
{
if (n > 384) {
blockDim.x = LG_GRID_SIZE;
blockDim.y = blockDim.z = 1;
int nn = rnd(n, LG_BLOCK_N);
gridDim.x = nn / LG_BLOCK_N;
gridDim.y = nn / LG_BLOCK_M;
gridDim.z = 1;
} else {
blockDim.x = SM_GRID_SIZE;
blockDim.y = blockDim.z = 1;
int nn = rnd(n, SM_BLOCK_N);
gridDim.x = nn / SM_BLOCK_N;
gridDim.y = nn / SM_BLOCK_M;
gridDim.z = 1;
}
}
|
2,308 | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define BLOCK_SIZE 32
#define WA 64
#define HA 64
#define HC 3
#define WC 3
#define PAD 1
#define WB (WA+2*PAD - WC + 1)
#define HB (HA+2*PAD - HC + 1)
#define CHANNEL_SIZE 3
__device__ void flat_conv(float* Input, float* Kernel, float* Output,int* image_size, int* kernel_size, int* pad,int* out_w)
{
//__shared__ float kernel_part[kernel_size[2]][kernel_size[3]][kernel_size[1]];
//__shared__ float kernel_part[3][3][3];
extern __shared__ float kernel_part[];
int col_idx = blockIdx.x - pad[0] + threadIdx.x;
int row_idx = blockIdx.y - pad[0] + threadIdx.y;
int img_flat_size = image_size[1]*image_size[2];
int kernel_flat_size = kernel_size[2]*kernel_size[3];
if( image_size[2]>col_idx && col_idx >=0 && image_size[1]>row_idx && row_idx >=0)
{
kernel_part[(threadIdx.y * kernel_size[3]+threadIdx.x)*kernel_size[1]+threadIdx.z]
= Input[(col_idx * image_size[2] +row_idx) + img_flat_size*threadIdx.z]
* Kernel[threadIdx.y*kernel_size[3] + threadIdx.x + kernel_flat_size*threadIdx.z];
}
else
{
kernel_part[(threadIdx.y * kernel_size[3]+threadIdx.x)*kernel_size[1]+threadIdx.z] = 0;
}
//__syncthreads;
atomicAdd(&(Output[blockIdx.x * out_w[0] +blockIdx.y]), kernel_part[(threadIdx.y * kernel_size[3]+threadIdx.x)*kernel_size[1]+threadIdx.z]);
}
__global__ void conv(float* Input, float* Kernel, float* Output,int* image_size, int* kernel_size,int* pad)
{
int out_w = image_size[2]+2*pad[0] - kernel_size[3] + 1;
int out_h = image_size[1]+2*pad[0] - kernel_size[2] + 1;
int flat_kernel_size = kernel_size[3]*kernel_size[2]*kernel_size[1];
int flat_img_size = out_w*out_h;
flat_conv(Input, Kernel + flat_kernel_size*blockIdx.z , Output + flat_img_size*blockIdx.z, image_size, kernel_size, pad,&out_w);
}
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = (rand() / (float)RAND_MAX);
}
__host__ int main(void)
{
// float h_a[3][64][64] ={0.0};
// h_a[0][0][0] = 2.1;
// h_a[1][0][0] = 2.1;
// h_a[2][0][0] = 2.1;
// float h_b[2][3][3][3] ={1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,
// 1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,
// 1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,
// 2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,
// 2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,
// 2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0};
// float h_c[2][64][64] ={0.0};
int kernel_size[4] ={2,3,3,3}; //O I H W;
int image_size[3] = {3,64,64}; // O H W;
float* h_a; float *h_b; float* h_c;
int h_a_size = sizeof(float)*3*64*64;
int h_b_size = sizeof(float)*3*64*64;
int h_c_size = sizeof(float)*3*64*64;
h_a = (float*)malloc(h_a_size);
h_b = (float*)malloc(h_b_size);
h_c = (float*)malloc(h_c_size);
randomInit(h_a,3*64*64);
randomInit(h_b,2*3*3*3);
randomInit(h_c,2*64*64);
int pad = 1;
float *cimg;
float *coimg;
float *ckernel;
int * cimg_size;
int * ckernel_size;
int * cpad;
cudaMalloc((void***)&cimg,h_a_size);
cudaMalloc((void***)&ckernel,h_b_size);
cudaMalloc((void***)&coimg,h_c_size);
cudaMalloc(&cimg_size,sizeof(image_size));
cudaMalloc(&ckernel_size,sizeof(kernel_size));
cudaMalloc(&cpad,sizeof(int));
cudaMemcpy(cimg,h_a,h_a_size,cudaMemcpyHostToDevice);
cudaMemcpy(ckernel,h_b,h_b_size,cudaMemcpyHostToDevice);
cudaMemcpy(cimg_size,image_size,sizeof(image_size),cudaMemcpyHostToDevice);
cudaMemcpy(ckernel_size,kernel_size,sizeof(kernel_size),cudaMemcpyHostToDevice);
cudaMemcpy(cpad,&pad,sizeof(int),cudaMemcpyHostToDevice);
dim3 threads(kernel_size[3], kernel_size[2], kernel_size[1]);
dim3 grid(image_size[2],image_size[1],kernel_size[0]);
clock_t start = clock();
int flat_kernel_size = kernel_size[3]* kernel_size[2]* kernel_size[1]*sizeof(float);
conv <<< grid,threads,flat_kernel_size>>>(cimg,ckernel,coimg,cimg_size,ckernel_size,cpad);
//Convolution <<< grid,threads>>>(cimg,ckernel,coimg,cimg_size,ckernel_size);
clock_t end = clock();
cudaMemcpy(h_c,coimg,h_c_size,cudaMemcpyDeviceToHost);
int cnt = 0;
// for(int i = 0;i < 2; i++)
// {
// for(int j =0; j < WB;j ++)
// {
// for(int k =0; k < WB;k ++)
// {
// printf("%.0f ",h_c[cnt]);
// cnt +=1;
// }
// printf("\n");
// }
// printf("\n");
// }
cudaFree(cimg);
cudaFree(ckernel);
cudaFree(coimg);
cudaFree(cimg_size);
cudaFree(ckernel_size);
cudaFree(cpad);
printf("%f",(float)(end - start)/CLOCKS_PER_SEC);
} |
2,309 | __global__ void vectorAddition(const float* a, const float* b, float* result, const float scalar)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
result[index] = a[index] + b[index] + scalar;
}
|
2,310 | #include<cuda.h>
#include<stdio.h>
#include<math.h>
#define TILEWIDTH 32
__global__
void vecMulMatrixKernel(float* A, float* B, float* C, int n){
//each block loads the corresponding row of blocks of A matrix and column of blocks of B matrix, one block at a time and then clculates the product for that part then product of a the parts is added.
// each thread loads 2 elements one from A and one from B in each phase
// there are total gridDim.x phases
// the element loaded is the element at the same position as this thread but in a different block
//if run more thread than max then not run
int tx=threadIdx.x; int ty=threadIdx.y;
int bx=blockIdx.x; int by=blockIdx.y;
int row=by*blockDim.y+ty;
int col=bx*blockDim.x+tx;
__shared__ float Ads[TILEWIDTH][TILEWIDTH];
__shared__ float Bds[TILEWIDTH][TILEWIDTH];
if(row<n && col <n){
int i; float val=0.0;
for(i=0;i<gridDim.x-1;i++){
Ads[ty][tx] = A[ row*n + i*TILEWIDTH + tx];
Bds[ty][tx] = B[ (i*TILEWIDTH + ty)*n + col];
__syncthreads();
for(int k=0;k<TILEWIDTH;k++){
val+= Ads[ty][k]*Bds[tx][k];
}
__syncthreads();
}
if(i*TILEWIDTH + tx <n ) //if n was a multiple of blockDim then this was not required
Ads[ty][tx] = A[ row*n + i*TILEWIDTH + tx];
if(i*TILEWIDTH + ty <n )
Bds[ty][tx] = B[ (i*TILEWIDTH + ty)*n + col];
__syncthreads();
int m =n%TILEWIDTH;
if(m==0)
m=TILEWIDTH;
for(int k=0;k<m;k++){//printf("add");
val+= Ads[ty][k]*Bds[tx][k];
}
__syncthreads();
C[row*n + col]= val;
}
}
int min2Power(int x){
int res=1;
while(res<x){
res*=2;
}
return res/2;
}
__host__
void vecMulMatrix(float* A,float* B,float* C, int n){
int size = n * n * sizeof(float);
float *d_A, *d_B, *d_C;
//Allocate device memory for A,B,C
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_B, size);
cudaMalloc((void**)&d_C, size);
//copy A,B to device memory
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
//call kernal function that the calculates the product and stores it in C
dim3 dimBlock(TILEWIDTH,TILEWIDTH,1);
dim3 dimGrid(ceil(n/(float)TILEWIDTH),ceil(n/(float)TILEWIDTH),1);
vecMulMatrixKernel<<<dimGrid,dimBlock >>>(d_A,d_B,d_C,n);
//copy C from devce memory
cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost);
//free device memories
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
//Kernal function that runs in each thread
int main(){
int n=10;
int i,j;
float A[n][n],C[n][n],B[n][n];
for(i=0;i<n;i++){
for(j=0;j<n;j++){
A[i][j]=i+j;
B[i][j]=i*j;
}
}
vecMulMatrix(&A[0][0],&B[0][0],&C[0][0],n);
for(i=0;i<n;i++){
for(j=0;j<n;j++){
printf("%.3f ",A[i][j]);
}
printf("\n");
}
printf("---\n");
for(i=0;i<n;i++){
for(j=0;j<n;j++){
printf("%.3f ",B[i][j]);
}
printf("\n");
}
printf("---\n");
for(i=0;i<n;i++){
for(j=0;j<n;j++){
printf("%.3f ",C[i][j]);
}
printf("\n");
}
return 0;
}
|
2,311 | /*******************************************************************************
* PROGRAM: canny_edge_detector
* FILE: non_maximal_supp.cu
* PURPOSE: Apply non maximal suppression.
* NAME: Vuong Pham-Duy
* Faculty of Computer Science and Technology
* Ho Chi Minh University of Technology, Viet Nam
* vuongpd95@gmail.com
* DATE: 11/10/2016
*******************************************************************************/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define _USE_MATH_DEFINES
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#define NOEDGE 255
#define POSSIBLE_EDGE 128
#define EDGE 0
#define VERBOSE 1
__global__ void non_max_supp_kernel(int rows, int cols, int blockSize,
short int *d_magnitude, short int *d_delta_x, short int *d_delta_y, unsigned char *d_nms);
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true);
/*******************************************************************************
* PROCEDURE: non_maximal_supp
* PURPOSE: perform non-maximal suppression
* NAME: Vuong Pham-duy
* DATE: 10/11/2016
*******************************************************************************/
void non_maximal_supp(int rows, int cols, int blockSize, int gridSize,
short int **d_magnitude, short int **d_delta_x, short int **d_delta_y, unsigned char **d_nms)
{
/****************************************************************************
* Perform non-maximal suppression.
****************************************************************************/
if (VERBOSE) printf("Doing the non-maximal suppression.\n");
gpuErrchk(cudaMemset((*d_nms), 0, rows * cols * sizeof(unsigned char)));
non_max_supp_kernel<<<gridSize, blockSize>>>(rows, cols, blockSize, (*d_magnitude), (*d_delta_x), (*d_delta_y), (*d_nms));
}
/*******************************************************************************
* PROCEDURE: non_max_supp_kernel
* PURPOSE: This routine applies non-maximal suppression to the magnitude of
* every pixel of the gradient image
* NAME: Vuong Pham-duy
* DATE: 10/11/2016
*******************************************************************************/
__global__ void non_max_supp_kernel(int rows, int cols, int blockSize,
short int *d_magnitude, short int *d_delta_x, short int *d_delta_y, unsigned char *d_nms)
{
/* This thread process the number img_idx element of image */
int img_idx = blockIdx.x * blockSize + threadIdx.x;
if (img_idx >= (rows * cols)) return;
int r = img_idx / cols; /* row position of the pixel, range [0, rows - 1] */
int c = img_idx - r * cols; /* col position of the pixel, range [0, cols - 1] */
if ((r != rows - 1) && (r != 0) && (c != 0) && (c != cols - 1))
{
short m00;
m00 = d_magnitude[img_idx];
if (m00 == 0) d_nms[img_idx] = (unsigned char)NOEDGE;
else
{
short gx, gy, z1, z2;
float mag1, mag2, xperp, yperp;
gx = d_delta_x[img_idx];
gy = d_delta_y[img_idx];
xperp = -gx / ((float)m00);
yperp = gy / ((float)m00);
if (gx >= 0){
if (gy >= 0){
if (gx >= gy)
{
/* 111 */
/* Left point */
z1 = d_magnitude[img_idx - 1];
z2 = d_magnitude[img_idx - cols - 1];
mag1 = (m00 - z1)*xperp + (z2 - z1)*yperp;
/* Right point */
z1 = d_magnitude[img_idx + 1];
z2 = d_magnitude[img_idx + cols + 1];
mag2 = (m00 - z1)*xperp + (z2 - z1)*yperp;
}
else
{
/* 110 */
/* Left point */
z1 = d_magnitude[img_idx - cols];
z2 = d_magnitude[img_idx - cols - 1];
mag1 = (z1 - z2)*xperp + (z1 - m00)*yperp;
/* Right point */
z1 = d_magnitude[img_idx + cols];
z2 = d_magnitude[img_idx + cols + 1];
mag2 = (z1 - z2)*xperp + (z1 - m00)*yperp;
}
}
else
{
if (gx >= -gy)
{
/* 101 */
/* Left point */
z1 = d_magnitude[img_idx - 1];
z2 = d_magnitude[img_idx + cols - 1];
mag1 = (m00 - z1)*xperp + (z1 - z2)*yperp;
/* Right point */
z1 = d_magnitude[img_idx + 1];
z2 = d_magnitude[img_idx - cols + 1];
mag2 = (m00 - z1)*xperp + (z1 - z2)*yperp;
}
else
{
/* 100 */
/* Left point */
z1 = d_magnitude[img_idx + cols];
z2 = d_magnitude[img_idx + cols - 1];
mag1 = (z1 - z2)*xperp + (m00 - z1)*yperp;
/* Right point */
z1 = d_magnitude[img_idx - cols];
z2 = d_magnitude[img_idx - cols + 1];
mag2 = (z1 - z2)*xperp + (m00 - z1)*yperp;
}
}
}
else
{
if (gy >= 0)
{
if (-gx >= gy)
{
/* 011 */
/* Left point */
z1 = d_magnitude[img_idx + 1];
z2 = d_magnitude[img_idx - cols + 1];
mag1 = (z1 - m00)*xperp + (z2 - z1)*yperp;
/* Right point */
z1 = d_magnitude[img_idx - 1];
z2 = d_magnitude[img_idx + cols - 1];
mag2 = (z1 - m00)*xperp + (z2 - z1)*yperp;
}
else
{
/* 010 */
/* Left point */
z1 = d_magnitude[img_idx - cols];
z2 = d_magnitude[img_idx - cols + 1];
mag1 = (z2 - z1)*xperp + (z1 - m00)*yperp;
/* Right point */
z1 = d_magnitude[img_idx + cols];
z2 = d_magnitude[img_idx + cols - 1];
mag2 = (z2 - z1)*xperp + (z1 - m00)*yperp;
}
}
else
{
if (-gx > -gy)
{
/* 001 */
/* Left point */
z1 = d_magnitude[img_idx + 1];
z2 = d_magnitude[img_idx + cols + 1];
mag1 = (z1 - m00)*xperp + (z1 - z2)*yperp;
/* Right point */
z1 = d_magnitude[img_idx - 1];
z2 = d_magnitude[img_idx - cols - 1];
mag2 = (z1 - m00)*xperp + (z1 - z2)*yperp;
}
else
{
/* 000 */
/* Left point */
z1 = d_magnitude[img_idx + cols];
z2 = d_magnitude[img_idx + cols + 1];
mag1 = (z2 - z1)*xperp + (m00 - z1)*yperp;
/* Right point */
z1 = d_magnitude[img_idx - cols];
z2 = d_magnitude[img_idx - cols - 1];
mag2 = (z2 - z1)*xperp + (m00 - z1)*yperp;
}
}
}
/* Now determine if the current point is a maximum point */
if ((mag1 > 0.0) || (mag2 > 0.0))
{
d_nms[img_idx] = (unsigned char)NOEDGE;
}
else
{
if (mag2 == 0.0)
d_nms[img_idx] = (unsigned char)NOEDGE;
else
d_nms[img_idx] = (unsigned char)POSSIBLE_EDGE;
}
}
}
}
|
2,312 | #include <cuda.h>
#include <thrust/device_vector.h>
#include <thrust/inner_product.h>
#include <iostream>
#include <stdio.h>
//NOTE: COMPILE WITH -arch=sm_20
#define CUDA_CHECK {cudaThreadSynchronize(); \
cudaError_t err = cudaGetLastError();\
if(err){\
std::cout << "Error: " << cudaGetErrorString(err) << " line " << __LINE__ << std::endl; \
exit(1);\
}}
double calcDotProductThrust(double* x, double* y, int N){
thrust::device_ptr<double> xThStart(x);
thrust::device_ptr<double> yThStart(y);
thrust::device_ptr<double> xThEnd(x + N);
thrust::device_ptr<double> yThEnd(y + N);
return thrust::inner_product(xThStart, xThEnd, yThStart, 0.0);
}
double timeDotProduct(double (*kernel)(double*, double*, int), double *x, double *y, int N, double ans)
{
CUDA_CHECK;
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
CUDA_CHECK;
cudaEventRecord(start, 0);
double prod = kernel(x, y, N);
cudaEventRecord(end, 0);
CUDA_CHECK;
cudaEventSynchronize(end);
CUDA_CHECK;
cudaError_t err = cudaGetLastError();
if(err){
std::cout << "Error: " << cudaGetErrorString(err) << std::endl;
}
if( fabs(prod - ans) / fabs(ans) > 1e-4 )
{
std::cout << "Multiplication wrong! " << prod << " != " << ans << std::endl;
}
else
{
std::cout << "Multiplication right " << prod << " = " << ans << std::endl;
}
float timeInMs;
cudaEventElapsedTime(&timeInMs, start, end);
std::cout << "Time: " << timeInMs << "ms" << std::endl << std::endl;
CUDA_CHECK;
cudaEventDestroy(start);
cudaEventDestroy(end);
CUDA_CHECK;
return prod;
}
//int main(void)
double inner_product(int N, std::vector<double>& first_vector, std::vector<double>& second_vector)
{
//const int N = 20000000;
/*
float *x_host = new float[N];
float *y_host = new float[N];
*/
double *x_host = new double[N];
double *y_host = new double[N];
std::copy(first_vector.begin(), first_vector.end(), x_host);
std::copy(second_vector.begin(), second_vector.end(), y_host);
// Fill matrix and vector on host
/*for(int i=0 ; i < N ; i++)
{
//x_host[i]=first_vector[i];
//y_host[i]=second_vector[i];
x_host[i] = sin(i*0.013);
y_host[i] = cos(i*0.019);
}*/
/*
float *x;
float *y;
*/
double *x;
double *y;
cudaMalloc(&x, N*sizeof(double));
cudaMalloc(&y, N*sizeof(double));
CUDA_CHECK;
// Copy x and y to device
cudaMemcpy(x, x_host, N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(y, y_host, N*sizeof(double), cudaMemcpyHostToDevice);
CUDA_CHECK;
//cudaMemcpy(y_host, y, N*sizeof(float), cudaMemcpyDeviceToHost);
// Check result
/*
clock_t st = clock();
double prod = 0;
for(int i=0 ; i < N ; i++)
{
prod += y_host[i] * x_host[i];
}
clock_t end = clock();
*/
// std::cout << "CPU time = " << (end - st) / (float)CLOCKS_PER_SEC * 1000 << " ms" << std::endl;
/* std::cout << "Naive approach - wrong" << std::endl;
timeDotProduct(calcDotProduct1, x, y, N, prod);
std::cout << "Using atomic operations" << std::endl;
timeDotProduct(calcDotProduct2, x, y, N, prod);
std::cout << "Reduction across one thread block only" << std::endl;
timeDotProduct(calcDotProduct3, x, y, N, prod);
std::cout << "Repeated reduction" << std::endl;
timeDotProduct(calcDotProduct3Reduce, x, y, N, prod);
*/
//std::cout << "Thrust" << std::endl;
//double dot = timeDotProduct(calcDotProductThrust, x, y, N, prod);
double dot = calcDotProductThrust(x, y, N);
cudaFree(x);
cudaFree(y);
delete[] x_host;
delete[] y_host;
return dot;
}
|
2,313 | #include "includes.h"
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor)
{
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w/scale_factor;
z = z/scale_factor;
d2 /= scale_factor;
d3 /= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
__global__ void upscale(float *input, float *output, long no_elements, int scale_factor, int d1, int d2, int d3)
{
// output offset:
long ii = threadIdx.x + blockDim.x * blockIdx.x;
ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y;
if (ii >= no_elements) return;
int ipidx = translate_idx(ii, d1, d2, d3, scale_factor);
output[ii]=input[ipidx];
} |
2,314 | #include "includes.h"
__global__ void convolutionLayers3DKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD, int kernel_index, int kernel_radius )
{
__shared__ float s_Data[LAYERS_BLOCKDIM_X][LAYERS_BLOCKDIM_Y][(LAYERS_RESULT_STEPS + 2 * LAYERS_HALO_STEPS) * LAYERS_BLOCKDIM_Z + 1];
//Offset to the upper halo edge
const int baseX = blockIdx.x * LAYERS_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * LAYERS_BLOCKDIM_Y + threadIdx.y;
const int baseZ = (blockIdx.z * LAYERS_RESULT_STEPS - LAYERS_HALO_STEPS) * LAYERS_BLOCKDIM_Z + threadIdx.z;
d_Src += (baseZ * imageH + baseY) * imageW + baseX;
d_Dst += (baseZ * imageH + baseY) * imageW + baseX;
const int pitch = imageW*imageH;
const float* kernel = &c_Kernel[kernel_index*MAX_KERNEL_LENGTH];
//Main data
#pragma unroll
for (int i = LAYERS_HALO_STEPS; i < LAYERS_HALO_STEPS + LAYERS_RESULT_STEPS; i++) {
s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * LAYERS_BLOCKDIM_Z] = d_Src[i * LAYERS_BLOCKDIM_Z * pitch];
}
//Upper halo
#pragma unroll
for (int i = 0; i < LAYERS_HALO_STEPS; i++) {
s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * LAYERS_BLOCKDIM_Z] = (baseZ + i * LAYERS_BLOCKDIM_Z >= 0) ? d_Src[i * LAYERS_BLOCKDIM_Z * pitch] : 0;
}
//Lower halo
#pragma unroll
for (int i = LAYERS_HALO_STEPS + LAYERS_RESULT_STEPS; i < LAYERS_HALO_STEPS + LAYERS_RESULT_STEPS + LAYERS_HALO_STEPS; i++) {
s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * LAYERS_BLOCKDIM_Z]= (baseZ + i * LAYERS_BLOCKDIM_Z < imageD) ? d_Src[i * LAYERS_BLOCKDIM_Z * pitch] : 0;
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = LAYERS_HALO_STEPS; i < LAYERS_HALO_STEPS + LAYERS_RESULT_STEPS; i++) {
float sum = 0;
//#pragma unroll
for (int j = -kernel_radius; j <= kernel_radius; j++) {
sum += kernel[kernel_radius - j] * s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * LAYERS_BLOCKDIM_Z + j];
}
d_Dst[i * LAYERS_BLOCKDIM_Z * pitch] = sum;
}
} |
2,315 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
void mean_filter_h(int **img, int **res, int N, int M, int k)
{
int count;
float temp;
for(int n = 0; n < N; n++) {
for(int m = 0; m < M; m++) {
count = 0;
temp = 0.0;
for(int i = N - k; i <= N + k; i++) {
for(int j = M - k; j <= M + k; j++) {
if(i >= 0 && i < N && j >= 0 && j < M) {
count = count + 1;
temp = res[n][m] + img[i][j];
}
}
temp = temp / count;
res[n][m] = (int)temp;
}
}
}
}
int main()
{
const int N = 4;
const int M = 4;
int k = 3;
int *a[N], *b[N];
for(int i = 0; i < N; i++ ) {
a[i] = (int *)malloc(M * sizeof(int));
b[i] = (int *)malloc(M * sizeof(int));
}
for(int i = 0; i < N; i++ ) {
for(int j = 0; j < M; j++ ) {
a[i][j] = 1;
b[i][j] = 0;
}
}
//clock_t start_h = clock();
mean_filter_h(a, b, N, M, k / 2);
//clock_t end_h = clock();
//double time_h = (double)(end_h-start_h)/CLOCKS_PER_SEC;
//printf("CPU Time: %f\n", time_h);
for(int i = 0; i < N; i++ ) {
for(int j = 0; j < M; j++ ) {
printf("%d ", b[i][j]);
}
}
return 0;
}
|
2,316 | //#include <algorithm>
//#include <vector>
//
//#include "caffe/layers/clusters_triplet_loss_layer.hpp"
//#include "caffe/util/math_functions.hpp"
//
//namespace caffe {
//
// template <typename Dtype>
// __global__ void ClustersTripletForward(const int nthreads, const int batch_size,
// const int dim, const Dtype margin, const Dtype* pos_data,const Dtype* neg_data,
// Dtype* vec_loss) {
//
// CUDA_KERNEL_LOOP(index, nthreads) {
// const int ind_a = index / (batch_size * batch_size);
// const int ind_p = (index % (batch_size * batch_size)) / batch_size;
// const int ind_n = index % batch_size;
//
// Dtype dpa(0.0), dna(0.0), t;
// for (int i = 0; i < dim; i++)
// {
// t = pos_data[ind_p * dim + i] - pos_data[ind_a * dim + i];
// dpa += t * t;
// t = neg_data[ind_n * dim + i] - pos_data[ind_a * dim + i];
// dna += t * t;
// }
// vec_loss[index] = max(Dtype(0), dpa + margin - dna);
// }
// }
//
// template <typename Dtype>
// __global__ void ClustersTripletBackward(const int nthreads, const int batch_size,
// const int dim, const Dtype* pos_data, const Dtype* neg_data, const Dtype* vec_loss,
// Dtype* pos_diff, Dtype* neg_diff) {
//
// CUDA_KERNEL_LOOP(index, nthreads) {
// const int item = index / dim;
// const int k = index % dim;
//
// Dtype diff;
// for (int i = 0; i < batch_size; i++)
// {
// for (int j = 0; j < batch_size; j++)
// {
// if (vec_loss[(item * batch_size + i) * batch_size + j] > 0)
// {
// pos_diff[item * dim + k] += neg_data[j * dim + k] - pos_data[i * dim + k];
// }
//
// if (vec_loss[(i * batch_size + item) * batch_size + j] > 0)
// {
// pos_diff[item * dim + k] += pos_data[item * dim + k] - pos_data[i * dim + k];
// }
//
// if (vec_loss[(i * batch_size + j) * batch_size + item] > 0)
// {
// neg_diff[item * dim + k] += pos_data[i * dim + k] - neg_data[item * dim + k];
// }
// }
// }
// }
// }
//
// template <typename Dtype>
// void ClustersTripletLossLayer<Dtype>::Forward_gpu(
// const vector<Blob<Dtype>*>& bottom,
// const vector<Blob<Dtype>*>& top) {
//
// //ClustersTripletLossLayer<Dtype>::Forward_cpu(bottom, top);
//
// Dtype margin = this->layer_param_.contrastive_loss_param().margin();
// int batch_size = bottom[0]->num() / 2;
// int dim = bottom[0]->channels();
// const Dtype* pos_data = bottom[0]->gpu_data();
// const Dtype* neg_data = bottom[0]->gpu_data() + batch_size * dim;
// Dtype* vec_loss = vec_loss_.mutable_gpu_data();
// Dtype nthreads = batch_size * batch_size * batch_size;
// caffe_gpu_set(vec_loss_.count(), Dtype(0), vec_loss);
// ClustersTripletForward<Dtype> << <CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS >> >(
// nthreads, batch_size, dim, margin, pos_data, neg_data, vec_loss);
//
// Dtype loss(0.0);
// for (int i = 0; i < vec_loss_.count(); i++)
// {
// loss += vec_loss_.cpu_data()[i];
// }
// top[0]->mutable_cpu_data()[0] = loss / batch_size / 2;
// }
//
// template <typename Dtype>
// void ClustersTripletLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
// const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
//
// //ClustersTripletLossLayer<Dtype>::Backward_cpu(top, propagate_down, bottom);
//
// int batch_size = bottom[0]->num() / 2;
// int dim = bottom[0]->channels();
// const Dtype* pos_data = bottom[0]->gpu_data();
// const Dtype* neg_data = bottom[0]->gpu_data() + batch_size * dim;
// const Dtype* vec_loss = vec_loss_.gpu_data();
// Dtype* pos_diff = bottom[0]->mutable_gpu_diff();
// Dtype* neg_diff = bottom[0]->mutable_gpu_diff() + batch_size * dim;
// Dtype nthreads = batch_size * dim;
//
// if (propagate_down[0]) {
// caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff());
// ClustersTripletBackward<Dtype> << <CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS >> >(
// nthreads, batch_size, dim, pos_data, neg_data, vec_loss, pos_diff, neg_diff);
// const Dtype alpha = top[0]->cpu_diff()[0] / batch_size;
// caffe_gpu_scal(bottom[0]->count(), alpha, bottom[0]->mutable_gpu_diff());
// }
//
// }
//
//INSTANTIATE_LAYER_GPU_FUNCS(ClustersTripletLossLayer);
//
//} // namespace caffe
|
2,317 | __device__ float magnitude(const float *r) {
return pow((r[0] * r[0]) + (r[1] * r[1]) + (r[2] * r[2]), 0.5);
}
__device__ float magnitude_withId(const float *r) {
const int i = blockIdx.x;
return pow((r[3 * i] * r[3 * i]) + (r[3 * i + 1] * r[3 * i + 1]) +
(r[3 * i + 2] * r[3 * i + 2]),
0.5);
}
__device__ float clamp(float x, float max_v, float min_v) {
return max(min_v, min(max_v, x));
}
__device__ float WPoly6(const float mag, const float *h,
const float *WPoly6_const) {
if (mag < *h && mag > 0) {
float inner_val = ((*h * *h) - (mag * mag));
return inner_val * inner_val * inner_val * *WPoly6_const;
} else
return 0;
}
__device__ void grad_WPoly6(float *grad, float *r, const float mag,
const float *h, const float *grad_WPoly6_const) {
float inner_val = ((*h * *h) - (mag * mag));
inner_val =
inner_val * inner_val * *grad_WPoly6_const * (1 / (mag + 0.000001f));
grad[0] = inner_val * r[0];
grad[1] = inner_val * r[1];
grad[2] = inner_val * r[2];
}
__device__ float lap_WPoly6(const float *h, const float mag,
const float *lap_WPoly6_const) {
float inner_val = ((*h * *h) - (mag * mag));
return *lap_WPoly6_const * inner_val * ((3 * *h * *h) - (7 * mag * mag));
}
__device__ float Wspiky(const float *h, const float mag,
const float *Wpiky_const) {
if (mag < *h && mag > 0) {
return *Wpiky_const * (*h - mag) * (*h - mag) * (*h - mag);
} else {
return 0;
}
}
__device__ void grad_WSpiky(float *grad, const float mag, const float *r,
const float *h, const float *grad_Wspiky_const) {
float inner_val =
*grad_Wspiky_const * (*h - mag) * (*h - mag) * (1 / (mag + 0.0000001f));
grad[0] = r[0] * inner_val;
grad[1] = r[1] * inner_val;
grad[2] = r[2] * inner_val;
}
__device__ float Wviscosity(const float *h, const float mag,
const float *Wviscosity_const) {
float inner_val = ((-mag * mag * mag) / (2 * *h * *h * *h)) +
((mag * mag) / (*h * *h)) + (*h / (2 * mag + 0.0000001f)) -
1;
return inner_val * *Wviscosity_const;
}
__device__ float lap_Wviscosity(const float mag, const float *h,
const float *lap_Wviscosity_const) {
return *lap_Wviscosity_const * (*h - mag);
}
__device__ void pressure_force(float *press_force, const float *mass,
const float mag, const float *r, const float *h,
const float *density_n, const int *neighbors,
const float *rest_density, const float *k,
const float *grad_Wspiky_const) {
const int i = neighbors[blockIdx.x * blockDim.x + threadIdx.x];
float grad[3];
grad_WSpiky(grad, mag, r, h, grad_Wspiky_const);
float const_val = *mass * *k *
(density_n[blockIdx.x] + density_n[i] - 2 * *rest_density) *
1 / (2 * density_n[i]);
press_force[0] = const_val * grad[0];
press_force[1] = const_val * grad[1];
press_force[2] = const_val * grad[2];
}
__device__ void viscosity_force(float *visc_force, const float *eta,
const float *mass, const float mag,
const float *h, const float *density_n,
const float *vel_n, const int *neighbors,
const float *lap_Wviscosity_const) {
const int i = neighbors[blockIdx.x * blockDim.x + threadIdx.x];
float const_val = *eta * *mass * (1 / (density_n[i] + 0.0000001f)) *
lap_Wviscosity(mag, h, lap_Wviscosity_const);
visc_force[0] = const_val * (vel_n[3 * i] - vel_n[3 * blockIdx.x]);
visc_force[1] = const_val * (vel_n[3 * i + 1] - vel_n[3 * blockIdx.x + 1]);
visc_force[2] = const_val * (vel_n[3 * i + 2] - vel_n[3 * blockIdx.x + 2]);
}
__device__ void color_field_grad(float *color_field_grad_val, const float *mass,
const float *density_n, float *r_dash,
const int *neighbors, const float *h,
const float mag,
const float *grad_WPoly6_const) {
const int i = neighbors[blockIdx.x * blockDim.x + threadIdx.x];
float inner_val = *mass * 1 / (density_n[i] + 0.0000001f);
float grad_Wpoly[3];
grad_WPoly6(grad_Wpoly, r_dash, mag, h, grad_WPoly6_const);
color_field_grad_val[blockIdx.x * blockDim.x * 3 + 3 * threadIdx.x] =
grad_Wpoly[0] * inner_val;
color_field_grad_val[blockIdx.x * blockDim.x * 3 + 3 * threadIdx.x + 1] =
grad_Wpoly[1] * inner_val;
color_field_grad_val[blockIdx.x * blockDim.x * 3 + 3 * threadIdx.x + 2] =
grad_Wpoly[2] * inner_val;
}
__device__ void color_field_lap(float *color_field_lap_val, const float *mass,
const float *density_n, const float *r,
const int *neighbors, const float *h,
const float mag,
const float *lap_WPoly6_const) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
color_field_lap_val[i] = *mass * 1 / (density_n[neighbors[i]] + 0.0000001f) *
lap_WPoly6(h, mag, lap_WPoly6_const);
}
__global__ void calc_density(float *density, const float *r1,
const int *neighbors, const float *mass,
const float *h, const float *WPoly6_const) {
float r_dash[3];
const int i = threadIdx.x;
const int j = blockIdx.x;
const int n = j * blockDim.x + i;
r_dash[0] = r1[3 * j] - r1[3 * neighbors[n]];
r_dash[1] = r1[3 * j + 1] - r1[3 * neighbors[n] + 1];
r_dash[2] = r1[3 * j + 2] - r1[3 * neighbors[n] + 2];
const float mag = magnitude(r_dash);
density[n] = WPoly6(mag, h, WPoly6_const) * *mass;
}
__global__ void
calc_forces(float *force, float *color_field_lap_val,
float *color_field_grad_val, const float *r1,
const float *density_n, float *vel_n, int *neighbors,
const float *h, const float *eta, const float *mass,
const float *rest_density, const float *k,
const float *grad_WPoly6_const, const float *lap_WPoly6_const,
const float *Wspiky_const, const float *grad_Wspiky_const,
const float *Wviscosity_const, const float *lap_Wviscosity_const) {
const int i = threadIdx.x;
const int j = blockIdx.x;
const int n = j * blockDim.x + i;
float r_dash[3], press_force[3], visc_force[3];
r_dash[0] = r1[3 * j] - r1[3 * neighbors[n]];
r_dash[1] = r1[3 * j + 1] - r1[3 * neighbors[n] + 1];
r_dash[2] = r1[3 * j + 1] - r1[3 * neighbors[n] + 2];
float mag = magnitude(r_dash);
if (mag > 0.000001f && mag < *h) {
pressure_force(press_force, mass, mag, r_dash, h, density_n, neighbors,
rest_density, k, grad_Wspiky_const);
viscosity_force(visc_force, eta, mass, mag, h, density_n, vel_n, neighbors,
lap_Wviscosity_const);
color_field_grad(color_field_grad_val, mass, density_n, r_dash, neighbors,
h, mag, grad_WPoly6_const);
color_field_lap(color_field_lap_val, mass, density_n, r_dash, neighbors, h,
mag, lap_WPoly6_const);
force[blockIdx.x * blockDim.x * 3 + 3 * i] =
clamp(press_force[0] + visc_force[0], 200, -200);
force[blockIdx.x * blockDim.x * 3 + 3 * i + 1] =
clamp(press_force[1] + visc_force[1], 200, -200);
force[blockIdx.x * blockDim.x * 3 + 3 * i + 2] =
clamp(press_force[2] + visc_force[2], 200, -200);
} else {
force[blockIdx.x * blockDim.x * 3 + 3 * i] = 0;
force[blockIdx.x * blockDim.x * 3 + 3 * i + 1] = 0;
force[blockIdx.x * blockDim.x * 3 + 3 * i + 2] = 0;
color_field_grad_val[blockIdx.x * blockDim.x * 3 + 3 * i] = 0;
color_field_grad_val[blockIdx.x * blockDim.x * 3 + 3 * i + 1] = 0;
color_field_grad_val[blockIdx.x * blockDim.x * 3 + 3 * i + 2] = 0;
color_field_lap_val[blockIdx.x * blockDim.x + threadIdx.x] = 0;
}
}
__global__ void update_pos(float *r, float *vel_p, float *force,
const float *threshold, const float *mass,
const float *time, const float *sigma,
const float *Width, const float *damping,
const float *eps, const float *color_field_lap_val,
const float *color_field_grad_val) {
const int i = blockIdx.x;
float gradient_length = magnitude_withId(color_field_grad_val);
float force_surface[3] = {0, 0, 0};
if (gradient_length >= *threshold) {
float const_val = -1 * *sigma * color_field_lap_val[i] *
(1 / (gradient_length + 0.0000001f));
force_surface[0] = const_val * color_field_grad_val[3 * i];
force_surface[1] = const_val * color_field_grad_val[3 * i + 1];
force_surface[2] = const_val * color_field_grad_val[3 * i + 2];
}
vel_p[3 * i] += clamp(
(force[3 * i] + force_surface[0]) * (1 / (*mass)) * *time, 200, -200);
vel_p[3 * i + 1] += clamp(
(force[3 * i + 1] + force_surface[1]) * (1 / (*mass)) * *time, 200, -200);
vel_p[3 * i + 2] += clamp(
(((force[3 * i + 2] + force_surface[2]) * 1 / (*mass)) - 10 * 12000) *
*time,
200, -200);
r[3 * i] += vel_p[3 * i] * *time;
r[3 * i + 1] += vel_p[3 * i + 1] * *time;
r[3 * i + 2] += vel_p[3 * i + 2] * *time;
if (r[3 * i] < -*Width) {
r[3 * i] = -*Width + *eps;
vel_p[3 * i] = *damping * vel_p[3 * i];
}
if (r[3 * i + 1] < -*Width) {
r[3 * i + 1] = -*Width + *eps;
vel_p[3 * i + 1] = *damping * vel_p[3 * i + 1];
}
if (r[3 * i + 2] < -*Width) {
r[3 * i + 2] = -*Width + *eps;
vel_p[3 * i + 2] = *damping * vel_p[3 * i + 2];
}
if (r[3 * i] > *Width) {
r[3 * i] = *Width - *eps;
vel_p[3 * i] = *damping * vel_p[3 * i];
}
if (r[3 * i + 1] > *Width) {
r[3 * i + 1] = *Width - *eps;
vel_p[3 * i + 1] = *damping * vel_p[3 * i + 1];
}
if (r[3 * i + 2] > *Width) {
r[3 * i + 2] = *Width * 10 - *eps;
vel_p[3 * i + 2] = *damping * vel_p[3 * i + 2];
}
}
|
2,318 | // #include <algorithm>
// #include <vector>
// #include "omp.h"
// #include <iostream>
// using namespace std;
// #include "caffe/layers/set_loss2_layer.hpp"
// #include "caffe/util/math_functions.hpp"
// #include "caffe/util/io.hpp"
// namespace caffe
// {
// template <typename Dtype>
// void SetLoss2Layer<Dtype>::Forward_gpu(
// const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
// {
// const Dtype* label = bottom[1]->cpu_data();
// const Dtype* data = bottom[0]->cpu_data();
// const Dtype* data_gpu = bottom[0]->gpu_data();
// // initialization of cat_per_iter_, im_per_cat_ and cat_mean_.
// if ( im_per_cat_ < 0 )
// {
// int j;
// for ( j = 0; j < batch_size_; ++j)
// {
// if (label[0] != label[j])
// {
// break;
// }
// }
// im_per_cat_ = j;
// cat_per_iter_ = batch_size_ / j;
// temp_loss_.Reshape(1, 1, cat_per_iter_, code_length_);
// cat_mean_.Reshape(1, 1, cat_per_iter_, code_length_);
// cat_grad_.Reshape(1, 1, cat_per_iter_, code_length_);
// }
// // compute cat_mean_
// memset(cat_mean_.mutable_cpu_data(), 0, sizeof(Dtype) * code_length_ * cat_per_iter_);
// for ( int i = 0; i < batch_size_; i += im_per_cat_)
// {
// for (int j = 0; j < im_per_cat_; ++j)
// {
// caffe_gpu_axpby<Dtype>(code_length_, 1. / im_per_cat_, data_gpu + (i + j) * code_length_, 1, cat_mean_.mutable_gpu_data() + int(i / im_per_cat_) * code_length_);
// }
// }
// // compute loss and cat grad
// memset(cat_grad_.mutable_cpu_data(), 0, sizeof(Dtype) * code_length_ * cat_per_iter_);
// float loss(0);// = new float* [cat_per_iter_];
// for ( int i = 0; i < cat_per_iter_; ++i)
// {
// // loss[i] = new float [cat_per_iter_];
// for (int j = i + 1; j < cat_per_iter_; ++j)
// {
// caffe_gpu_sub<Dtype>(code_length_, cat_mean_.gpu_data() + i * code_length_, cat_mean_.gpu_data() + j * code_length_, temp_loss_.mutable_gpu_data()+i*code_length_);
// Dtype sub_loss;
// caffe_gpu_dot<Dtype>(code_length_, temp_loss_.gpu_data()+i*code_length_, temp_loss_.gpu_data()+i*code_length_, &sub_loss);
// sub_loss = alpha_ - 0.25 * sub_loss;
// if (sub_loss > 0)
// {
// loss += sub_loss;
// caffe_gpu_axpby<Dtype>(code_length_, 1., temp_loss_.gpu_data()+i*code_length_, 1., cat_grad_.mutable_gpu_data() + i * code_length_);
// caffe_gpu_axpby<Dtype>(code_length_, -1., temp_loss_.gpu_data()+i*code_length_, 1., cat_grad_.mutable_gpu_data() + j * code_length_);
// }
// // else
// // {
// // loss[i][j] = 0;
// // }
// }
// }
// // float loss_sum(0.);
// // for (int i = 0; i < cat_per_iter_; ++i)
// // {
// // for (int j = i + 1; j < cat_per_iter_; ++j)
// // {
// // loss_sum += loss[i][j];
// // }
// // }
// top[0]->mutable_cpu_data()[0] = 2 * loss / cat_per_iter_ / (cat_per_iter_ - 1);
// }
// template <typename Dtype>
// void SetLoss2Layer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
// const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom)
// {
// if (propagate_down[1])
// {
// LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs. ";
// }
// Dtype* diff = bottom[0]->mutable_gpu_diff();
// loss_weight_ = top[0]->cpu_diff()[0];
// caffe_gpu_scal<Dtype>(code_length_ * cat_per_iter_, - loss_weight_ / cat_per_iter_ / (cat_per_iter_ - 1) / im_per_cat_, cat_grad_.mutable_gpu_data());
// // copy gradient to each sample
// for (int i = 0; i < batch_size_; ++i)
// {
// cudaMemcpy(diff + i * code_length_, cat_grad_.gpu_data() + int(i / im_per_cat_) * code_length_, sizeof(Dtype) * code_length_, cudaMemcpyDefault);
// }
// }
// INSTANTIATE_LAYER_GPU_FUNCS(SetLoss2Layer);
// } |
2,319 | #include "includes.h"
__device__ void find_index(short *vec, const int vec_length, int *value, int *index) {
for (int i = threadIdx.x; i < vec_length; i = i + blockDim.x) {
if (vec[i] == *value) {
atomicMax(index, i);
}
}
}
__global__ void find_index(int *vec, const int vec_length, int *value, int *index){
for (int i = threadIdx.x; i < vec_length; i = i + blockDim.x) {
if(vec[i]==*value){
atomicMax(index, i);
}
}
} |
2,320 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
/*
__global__ static inline int mandel(float c_re, float c_im, int count)
{
float z_re = c_re, z_im = c_im;
int i;
for (i = 0; i < count; ++i)
{
if (z_re * z_re + z_im * z_im > 4.f)
break;
float new_re = z_re * z_re - z_im * z_im;
float new_im = 2.f * z_re * z_im;
z_re = c_re + new_re;
z_im = c_im + new_im;
}
return i;
}
*/
__global__ void mandelKernel( int* d_data,float stepX, float stepY,float lowerX,float lowerY ,int width, int maxIterations, size_t pitch) {
// To avoid error caused by the floating number, use the following pseudo code
//
int thisX= blockIdx.x * blockDim.x + threadIdx.x;
int thisY= blockIdx.y * blockDim.y + threadIdx.y;
float x = lowerX + thisX * stepX;
float y = lowerY + thisY * stepY;
float c_re = x, c_im = y;
float z_re = c_re, z_im = c_im;
int i;
for (i = 0; i < maxIterations; ++i)
{
if (z_re * z_re + z_im * z_im > 4.f)
break;
float new_re = z_re * z_re - z_im * z_im;
float new_im = 2.f * z_re * z_im;
z_re = c_re + new_re;
z_im = c_im + new_im;
}
int *ptr = (int *)((char*)d_data+thisY*pitch);
ptr[thisX] = i;
//d_data[ thisX + thisY * width ] = i;
}
// Host front-end function that allocates the memory and launches the GPU kernel
void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations)
{
float stepX = (upperX - lowerX) / resX;
float stepY = (upperY - lowerY) / resY;
int *data, *d_data;
dim3 threadPerBlock(25,25);
dim3 numBlocks(resX/threadPerBlock.x,resY/threadPerBlock.y);
size_t pitch;
//data = (int*)malloc( sizeof(int)*resX*resY );
cudaHostAlloc(&data, sizeof(int) * resX*resY, cudaHostAllocMapped);
cudaMallocPitch((void **)&d_data, &pitch, sizeof(int)*resX, resY);
// cudaMalloc((void**)&d_data, sizeof(int)*resX*resY );
mandelKernel<<<numBlocks,threadPerBlock>>>(d_data,stepX,stepY,lowerX,lowerY,resX,maxIterations,pitch);
cudaMemcpy2D(data, sizeof(int)*resX, d_data, pitch, sizeof(int)*resX, resY, cudaMemcpyDeviceToHost);
memcpy(img,data,sizeof(int)*resX*resY);
cudaFreeHost(data);
cudaFree(d_data);
}
|
2,321 | #include<stdio.h>
#include<cuda_runtime.h>
bool init_cuda(){
int count;
cudaGetDeviceCount(&count);
if(0 == count) {
fprintf(stderr, "There is no device \n");
return false;
}
int i;
for(i = 0; i < count; i++) {
cudaDeviceProp prop;
if(cudaSuccess == cudaGetDeviceProperties(&prop, i)) {
if(prop.major >= 1){
break;
}
}
}
if(i == count){
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
cudaSetDevice(i);
return true;
}
int main() {
if(!init_cuda()) {
return 0;
}
printf("CUDA initialize.\n");
return 0;
}
|
2,322 | #include <stdio.h>
#include <cuda.h>
// CPU code to do matrix ADdition
void matrixAdd(int *a, int *b, int *c, int N)
{
int index;
for(int col=0; col<N; col++)
{
for(int row=0; row<N; row++)
{
index = row * N + col;
c[index] = a[index] + b[index];
}
}
}
// GPU code to do matrix addition
__global__ void matrixAddKernel(int *a, int *b, int *c, int N)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
int index = row * N + col;
if(col < N && row < N)
{
c[index] = a[index] + b[index];
}
}
int main(void)
{
cudaSetDevice(3);
//size of the matrix (the matrix will have NxN elements)
int N = 2000;
dim3 grid(16,1,1);
dim3 block(1024,1,1);
// pointers to host memory
int *a_h;
int *b_h;
int *c_h;
int *d_h;
// pointers to device memory
int *a_d;
int *b_d;
int *c_d;
// this variable holds the number of bytes required by arrays
int size;
// use CUDA events to measure time
cudaEvent_t start;
cudaEvent_t stop;
float elapsedTime;
//print out the information about number of blocks and threads
printf("Number of threads: %i (%ix%i)\n", block.x*block.y, block.x, block.y);
printf("Number of blocks: %i (%ix%i)\n", grid.x*grid.y, grid.x, grid.y);
//dynamically alocate host memory and load the arrays with some data
size = N * N * sizeof(int);
a_h = (int*) malloc(size);
b_h = (int*) malloc(size);
c_h = (int*) malloc(size);
d_h = (int*) malloc(size);
for(int i=0; i<N; i++)
{
for(int j=0; j<N; j++)
{
a_h[i * N + j] = i;
b_h[i * N + j] = i;
}
}
//allocate memory on the device
cudaMalloc((void**)&a_d, size);
cudaMalloc((void**)&b_d, size);
cudaMalloc((void**)&c_d, size);
//copy the host memory to the device
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(c_d, c_h, size, cudaMemcpyHostToDevice);
//start the timers
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//launch the kernel
matrixAddKernel<<<grid,block>>>(a_d, b_d, c_d, N);
//stop the timer and print out the execution time
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Time to calculate results on GPU: %f ms.\n",
elapsedTime);
//copy the results to host
cudaMemcpy(c_h, c_d, size ,cudaMemcpyDeviceToHost);
//time to measure CPU performance
cudaEventRecord(start,0);
//launch the matrixAdd function that executes on the CPU
matrixAdd(a_h, b_h, d_h, N);
//strop the timer and print out the results
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop );
printf("Time to calculate results on CPU: %f ms.\n",
elapsedTime);
//check that GPU and CPU results match
for(int i=0; i<N*N; i++)
{
if (c_h[i] != d_h[i]) printf("Error: CPU and GPU results do not match\n");
break;
}
// clean up
free(a_h);
free(b_h);
free(c_h);
free(d_h);
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
2,323 | /*
============================================================================
Filename : implementation.cu
Author : Martino Milani / Sébastien Gachoud
SCIPER : 286204 / 250083
============================================================================
*/
#include <iostream>
#include <iomanip>
#include <sys/time.h>
#include <cuda_runtime.h>
using namespace std;
// CPU Baseline
void array_process(double *input, double *output, int length, int iterations)
{
double *temp;
for(int n=0; n<(int) iterations; n++)
{
for(int i=1; i<length-1; i++)
{
for(int j=1; j<length-1; j++)
{
output[(i)*(length)+(j)] = (input[(i-1)*(length)+(j-1)] +
input[(i-1)*(length)+(j)] +
input[(i-1)*(length)+(j+1)] +
input[(i)*(length)+(j-1)] +
input[(i)*(length)+(j)] +
input[(i)*(length)+(j+1)] +
input[(i+1)*(length)+(j-1)] +
input[(i+1)*(length)+(j)] +
input[(i+1)*(length)+(j+1)] ) / 9;
}
}
output[(length/2-1)*length+(length/2-1)] = 1000;
output[(length/2)*length+(length/2-1)] = 1000;
output[(length/2-1)*length+(length/2)] = 1000;
output[(length/2)*length+(length/2)] = 1000;
temp = input;
input = output;
output = temp;
}
}
__global__
void gpu_computation(double* input, double* output, int length);
// GPU Optimized function
void GPU_array_process(double *input, double *output, int length, int iterations)
{
//Cuda events for calculating elapsed time
cudaEvent_t cpy_H2D_start, cpy_H2D_end, comp_start, comp_end, cpy_D2H_start, cpy_D2H_end;
cudaEventCreate(&cpy_H2D_start);
cudaEventCreate(&cpy_H2D_end);
cudaEventCreate(&cpy_D2H_start);
cudaEventCreate(&cpy_D2H_end);
cudaEventCreate(&comp_start);
cudaEventCreate(&comp_end);
/* Preprocessing goes here */
/*----- What I did -----*/
const long SIZE = length * length * sizeof(double);
double* gpu_input;
double* gpu_output;
dim3 threadsPerBlock(32,32);
dim3 nbBlocks(length / threadsPerBlock.x + 1, length / threadsPerBlock.y + 1);
const long PADDED_SIZE = (nbBlocks.x+1) * threadsPerBlock.x * (nbBlocks.y+1) * threadsPerBlock.y * sizeof(double); //+1 to avoid going out of the input
cudaSetDevice(0);
if(cudaMalloc((void**)&gpu_input, PADDED_SIZE) != cudaSuccess){
cerr << "Error allocating input" << endl;
}
if(cudaMalloc((void**)&gpu_output, PADDED_SIZE) != cudaSuccess){
cerr << "Error allocating output" << endl;
}
/*----------------------*/
cudaEventRecord(cpy_H2D_start);
/* Copying array from host to device goes here */
/*----- What I did -----*/
if(cudaMemcpy(gpu_input, input, SIZE, cudaMemcpyHostToDevice) != cudaSuccess){
cerr << "Error copying input to gpu" << endl;
}
if(cudaMemcpy(gpu_output, output, SIZE, cudaMemcpyHostToDevice) != cudaSuccess){
cerr << "Error copying output to gpu" << endl;
}
/*----------------------*/
cudaEventRecord(cpy_H2D_end);
cudaEventSynchronize(cpy_H2D_end);
//Copy array from host to device
cudaEventRecord(comp_start);
/* GPU calculation goes here */
/*----- What I did -----*/
for(int iter(0); iter < iterations; iter++){
if(iter%2){
gpu_computation <<< nbBlocks, threadsPerBlock >>> (gpu_output, gpu_input, length);
}
else{
gpu_computation <<< nbBlocks, threadsPerBlock >>> (gpu_input, gpu_output, length);
}
cudaThreadSynchronize();
}
/*----------------------*/
cudaEventRecord(comp_end);
cudaEventSynchronize(comp_end);
cudaEventRecord(cpy_D2H_start);
/* Copying array from device to host goes here */
/*----- What I did -----*/
if(iterations%2==0)
{
if(cudaMemcpy(output, gpu_input, SIZE, cudaMemcpyDeviceToHost) != cudaSuccess){
cerr << "failed to retrieve gpu_input" << endl;
}
}
else{
if(cudaMemcpy(output, gpu_output, SIZE, cudaMemcpyDeviceToHost) != cudaSuccess){
cerr << "failed to retrieve gpu_output" << endl;
}
}
/*----------------------*/
cudaEventRecord(cpy_D2H_end);
cudaEventSynchronize(cpy_D2H_end);
/* Postprocessing goes here */
/*----- What I did -----*/
cudaFree(&gpu_input);
cudaFree(&gpu_output);
/*----------------------*/
float time;
cudaEventElapsedTime(&time, cpy_H2D_start, cpy_H2D_end);
cout<<"Host to Device MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl;
cudaEventElapsedTime(&time, comp_start, comp_end);
cout<<"Computation takes "<<setprecision(4)<<time/1000<<"s"<<endl;
cudaEventElapsedTime(&time, cpy_D2H_start, cpy_D2H_end);
cout<<"Device to Host MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl;
}
__global__
void gpu_computation(double* input, double* output, int length){
int x_glob = (blockIdx.x * blockDim.x) + threadIdx.x + 1; //+1 to avoid first column
int y_glob = (blockIdx.y * blockDim.y) + threadIdx.y + 1; //+1 to avoid first row
int element_id = (y_glob * length) + x_glob;
if ( ((x_glob == length/2-1) || (x_glob == length/2)) && ((y_glob == length/2-1) || (y_glob == length/2))
|| x_glob >= length - 1 || y_glob >= length-1)
{
return;
}
output[element_id] = (input[(y_glob-1)*(length)+(x_glob-1)] +
input[(y_glob-1)*(length)+(x_glob)] +
input[(y_glob-1)*(length)+(x_glob+1)] +
input[(y_glob)*(length)+(x_glob-1)] +
input[(y_glob)*(length)+(x_glob)] +
input[(y_glob)*(length)+(x_glob+1)] +
input[(y_glob+1)*(length)+(x_glob-1)] +
input[(y_glob+1)*(length)+(x_glob)] +
input[(y_glob+1)*(length)+(x_glob+1)] ) / 9;
} |
2,324 | #include <stdio.h>
#define TILE_SIZE 32
#define KERNEL_RADIUS 8
#define KERNEL_LENGTH (2 * KERNEL_RADIUS + 1)
__constant__ float c_M[KERNEL_LENGTH][KERNEL_LENGTH];
const int Width = 3072;
const int Height = 3072;
const int nIter = 300;
float * h_Kernel,*h_Input,*h_Output;
float * d_Input, *d_Output;
//this optimization is not good.
__global__ void convolution_2D_tiled_kernel(float* P, float* N, int height, int width, int pitch, int Mask_Width){
int tx = threadIdx.x;
int ty = threadIdx.y;
int row_o = blockIdx.y * TILE_SIZE + ty;
int col_o = blockIdx.x * TILE_SIZE + tx;
int row_i = row_o - Mask_Width/2;
int col_i = col_o - Mask_Width/2;
__shared__ float N_ds[TILE_SIZE + KERNEL_LENGTH - 1][TILE_SIZE + KERNEL_LENGTH - 1];
if((row_i >= 0) && (row_i < height) &&
(col_i >= 0) && (col_i < width)){
N_ds[ty][tx] = N[row_i * pitch + col_i];
}else{
N_ds[ty][tx] = 0.0f;
}
__syncthreads();
float output = 0.0f;
if(ty < TILE_SIZE && tx < TILE_SIZE){
for(int i = 0;i<Mask_Width;i++){
for(int j = 0;j<Mask_Width;j++){
output += c_M[i][j] * N_ds[i + ty][j + tx];
}
}
if(row_o < height && col_o < width){
P[row_o * width + col_o] = output;
}
}
}
void gpuRunTiledKernel(){
dim3 blockDim(TILE_SIZE,TILE_SIZE);
dim3 gridDim((Width + TILE_SIZE - 1)/TILE_SIZE,(Height + TILE_SIZE - 1)/TILE_SIZE);
convolution_2D_tiled_kernel<<<gridDim,blockDim>>>(d_Output,d_Input,Height,Width,Width,KERNEL_LENGTH);
cudaDeviceSynchronize();
cudaEvent_t start;
cudaEventCreate(&start);
cudaEvent_t stop;
cudaEventCreate(&stop);
cudaEventRecord(start,NULL);
for(int i = 0;i<nIter;i++){
convolution_2D_tiled_kernel<<<gridDim,blockDim>>>(d_Output,d_Input,Height,Width,Width,KERNEL_LENGTH);
}
cudaEventRecord(stop, NULL);
cudaDeviceSynchronize();
float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal,start,stop);
float gpuTime = (msecTotal / nIter) * 0.001;
printf("Tiled Kernel Throughput = %0.4f MPixels/sec, Time= %.5f sec, Size= %u Pixels\n",
(1.0e-6 * (Width*Height)/gpuTime),
gpuTime,
Width);
cudaMemcpy(h_Output,d_Output,Width*Height* sizeof(float),cudaMemcpyDeviceToHost);
bool correct = true;
double eps = 1.e-6;
for(int i = 0;i<Width*Height;i++){
double abs_err = fabs(h_Output[i] - KERNEL_LENGTH * KERNEL_LENGTH * 1.0);
if (abs_err > eps)
{
//printf("Error! Index = %d,h_Output = %f,true value = %d\n",
// i, h_Output[i], KERNEL_LENGTH*KERNEL_LENGTH);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
}
int main(){
h_Kernel = (float*)malloc(KERNEL_LENGTH * KERNEL_LENGTH * sizeof(float));
h_Input = (float*)malloc(Width * Height * sizeof(float));
h_Output = (float*)malloc(Width * Height * sizeof(float));
for(unsigned int i = 0;i<KERNEL_LENGTH*KERNEL_LENGTH;i++){
h_Kernel[i] = 1.0f;
}
for(unsigned int i = 0;i<Width*Height;i++){
h_Input[i] = 1.0f;
}
cudaMalloc((void**)&d_Input, Width * Height * sizeof(float));
cudaMalloc((void**)&d_Output,Width * Height * sizeof(float));
cudaMemcpy(d_Input,h_Input,Width * Height * sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_M,h_Kernel,KERNEL_LENGTH * KERNEL_LENGTH * sizeof(float));
printf("Running GPU convlution 2D ....\n");
gpuRunTiledKernel();
free(h_Input);
free(h_Kernel);
free(h_Output);
cudaFree(d_Input);
cudaFree(d_Output);
} |
2,325 | #include "includes.h"
#define DOUBLE
#ifdef DOUBLE
#define Complex cufftDoubleComplex
#define Real double
#define Transform CUFFT_Z2Z
#define TransformExec cufftExecZ2Z
#else
#define Complex cufftComplex
#define Real float
#define Transform CUFFT_C2C
#define TransformExec cufftExecC2C
#endif
#define TILE_DIM 8
// synchronize blocks
__global__ void spread_i(Real* src, unsigned int spitch, Real* dst, unsigned int dpitch)
{
unsigned int bid = blockIdx.y * gridDim.x + blockIdx.x;
unsigned int tid = threadIdx.x;
Real res = src[bid * dpitch + tid];
if( tid < dpitch) dst[bid * spitch + tid] = res;
} |
2,326 | #include "includes.h"
__global__ void hyst_kernel(unsigned char *data, unsigned char *out, int rows, int cols) {
// Establish our high and low thresholds as floats
float lowThresh = 10;
float highThresh = 70;
// These variables are offset by one to avoid seg. fault errors
// As such, this kernel ignores the outside ring of pixels
const int row = blockIdx.y * blockDim.y + threadIdx.y + 1;
const int col = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int pos = row * cols + col;
const unsigned char EDGE = 255;
unsigned char magnitude = data[pos];
if(magnitude >= highThresh)
out[pos] = EDGE;
else if(magnitude <= lowThresh)
out[pos] = 0;
else {
float med = (highThresh + lowThresh) / 2;
if(magnitude >= med)
out[pos] = EDGE;
else
out[pos] = 0;
}
} |
2,327 | #include "includes.h"
__device__ u_char clamp(float t)
{
if (t < 0) {
return 0;
} else if (t > 255){
return 255;
}
return t;
}
__global__ void kernel_colorSpaceYUV420PToRGBA(dev_t *src, dev_t *dst, int pitch_src, int pitch_dst, int w, int h)
{
unsigned int dim_x = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int dim_y = blockDim.y * blockIdx.y + threadIdx.y;
int r,g,b,y,u,v;
if (dim_x < w && dim_y < h) {
y = *((u_char*)src + dim_y * pitch_src + dim_x);
u = *((u_char*)src + (h + dim_y / 4) * pitch_src + dim_x / 2);
v = *((u_char*)src + (h * 5 + dim_y) / 4 * pitch_src + dim_x / 2);
r = clamp(y + 1.402 * (v - 128) + 0.5);
g = clamp(y - 0.34414 * (u - 128) - 0.71414 * (v - 128) + 0.5);
b = clamp(y + 1.772 * (u - 128) + 0.5);
// *((uint32_t*)dst + dim_y * pitch_dst / 4 + dim_x) = (r << 24) + (g << 16) + (b << 8);
*((u_char*)dst + dim_y * pitch_dst + dim_x * 4) = r;
*((u_char*)dst + dim_y * pitch_dst + dim_x * 4 + 1) = g;
*((u_char*)dst + dim_y * pitch_dst + dim_x * 4 + 2) = b;
*((u_char*)dst + dim_y * pitch_dst + dim_x * 4 + 3) = 255;
}
} |
2,328 | /*
#include "dpCudaFFT.hpp"
#include "errorCheck.hpp"
#define BEGIN cudaEventRecord(begin, 0);
#define END cudaEventRecord(end, 0); cudaEventSynchronize(end); cudaEventElapsedTime(&delTime, begin, end);
#define cudaErrChk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
//code from stackexchange to print cuda return messages
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=false){
if (code != cudaSuccess){
fprintf(stderr,"%s %s %d\n", cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
dpCudaFFT::dpCudaFFT(cl_context ctx, cl_command_queue q){
context = ctx;
queue = q;
workDimension = ONE_D;
name = "FFT";
cudaEventCreate(&begin);
cudaEventCreate(&end);
}
void dpCudaFFT::setup(int dataMB, int xLocal, int yLocal, int zLocal){
localSize[0] =localSize[1]=localSize[2]=1;
Asize = (dataMB*1048576)/(sizeof(cufftComplex));
if (Asize%2 != 0)
Asize++;
MB = Asize * sizeof(cufftComplex)/1048576;
}
void dpCudaFFT::init(){
dataParameters.push_back(Asize);
dataNames.push_back("nVectors");
Ain = (cufftComplex*) malloc(Asize*sizeof(cufftComplex));
Aout = (cufftComplex*) malloc(Asize * sizeof(cufftComplex));
if (!Aout || !Ain)
fprintf(stderr,"error in malloc");
generate(Ain, Asize);
}
void dpCudaFFT::memoryCopyOut(){
BEGIN
cudaErrChk(cudaMalloc((void**)&A_d, Asize * sizeof(cufftComplex)));
cudaErrChk(cudaMemcpy(A_d, Ain, Asize * sizeof(cufftComplex), cudaMemcpyHostToDevice));
END
//printf("%0.3f,", delTime);
}
void dpCudaFFT::plan(){
int ret = 0;
BEGIN
ret = cufftPlan1d(&plancufft, Asize, CUFFT_C2C, 1);
if (ret != 0)
fprintf(stderr, "%s %d", "cufftplan1d fail err: ", ret);
END
//printf("%0.3f,", delTime);
}
int dpCudaFFT::execute(){
int ret = 0;
BEGIN
ret = cufftExecC2C(plancufft, A_d, A_d, CUFFT_FORWARD);
if (ret != 0){
fprintf(stderr, "%s %d", "cufftexecc2c fail err: ", ret);
return -1;
}
END
//printf("%0.3f,", delTime);
return 0;
}
void dpCudaFFT::memoryCopyIn(){
BEGIN
cudaErrChk(cudaMemcpy(Aout, A_d, Asize * sizeof(cufftComplex), cudaMemcpyDeviceToHost));
END
//printf("%0.3f,\n", delTime);
}
void dpCudaFFT::cleanUp(){
cudaErrChk(cudaFree(A_d));
cufftDestroy(plancufft);
free(Aout);
free(Ain);
}
void dpCudaFFT::generate(cufftComplex *A, int N){
int i;
srand(time(NULL));
for (i=0; i < N; i++){
A[i].x = rand() / (RAND_MAX/99999.9 + 1);
A[i].y = rand() / (RAND_MAX/99999.9 + 1);
}
}
*/
|
2,329 | #include<iostream>
#include<fstream>
using namespace std;
const int N = 4096;
const int BLOCKSIZE = 1024;
__global__
void add_me(int *a, int *b, int *c)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
ofstream outfile;
outfile.open("output.txt");
int a[N] = {0};
int b[N];
int c[N] = {0};
int sum = 0;
// Load b with 1s
for (int i = 0; i < N; i++)
b[i] = 1;
int *ad;
int *bd;
int *cd;
const int isize = N*sizeof(int);
cudaMalloc( (void**)&ad, isize );
cudaMalloc( (void**)&bd, isize );
cudaMalloc( (void**)&cd, isize );
cudaMemcpy( ad, a, isize, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );
cudaMemcpy( cd, c, isize, cudaMemcpyHostToDevice );
dim3 dimGrid( 4, 1 );
dim3 dimBlock( BLOCKSIZE, 1 );
add_me<<<dimGrid, dimBlock>>>(ad, bd, cd);
cudaMemcpy( c, cd, isize, cudaMemcpyDeviceToHost );
cudaFree( ad );
cudaFree( bd );
cudaFree( cd );
for (int i = 0; i < N; i++)
sum += c[i];
cout << "The sum is: " << sum << '\n';
return EXIT_SUCCESS;
}
|
2,330 | #include <stdio.h>
#include <stdlib.h>
#define DSIZE 256
#define nTPB 64
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
__global__ void vector_add_kernel(float *c, const float *a, const float *b){
int idx = threadIdx.x+blockDim.x*blockIdx.x;
if (idx < DSIZE)
c[idx] = a[idx] + b[idx];
}
int main(){
float *h_a, *h_b, *h_c, *d_a, *d_b, *d_c;
h_a = (float *)malloc(DSIZE * sizeof(float));
h_b = (float *)malloc(DSIZE * sizeof(float));
h_c = (float *)malloc(DSIZE * sizeof(float));
if (h_a == NULL) {printf("malloc fail\n"); return 1;}
if (h_b == NULL) {printf("malloc fail\n"); return 1;}
if (h_c == NULL) {printf("malloc fail\n"); return 1;}
for (int i = 0; i < DSIZE; i++){
h_c[i] = 0.0f;
h_a[i] = rand()/(float)RAND_MAX;
h_b[i] = rand()/(float)RAND_MAX;}
cudaMalloc(&d_a, DSIZE*sizeof(float));
cudaMalloc(&d_b, DSIZE*sizeof(float));
cudaMalloc(&d_c, DSIZE*sizeof(float));
cudaCheckErrors("cudaMalloc fail");
cudaMemcpy(d_a, h_a, DSIZE*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, DSIZE*sizeof(float), cudaMemcpyHostToDevice);
cudaMemset(d_c, 0, DSIZE*sizeof(float));
cudaCheckErrors("cudaMemcpy H2D fail");
dim3 threads(nTPB, 1, 1);
dim3 blocks((DSIZE+threads.x-1)/threads.x, 1, 1);
vector_add_kernel<<<blocks, threads>>>(d_c, d_a, d_b);
cudaDeviceSynchronize();
cudaCheckErrors("kernel fail");
cudaMemcpy(h_c, d_c, DSIZE*sizeof(float), cudaMemcpyDeviceToHost);
cudaCheckErrors("cudaMemcpy D2H fail");
printf("h_a[0] = %f\n", h_a[0]);
printf("h_b[0] = %f\n", h_b[0]);
printf("h_c[0] = %f\n", h_c[0]);
return 0;
}
|
2,331 | #include "includes.h"
__global__ void matrixMul(int *a, int *b, int *c){
int my_x, my_y;
my_x = blockIdx.x*blockDim.x + threadIdx.x;
my_y = blockIdx.y*blockDim.y + threadIdx.y;
int local_c = 0;
for(int i = 0 ; i < size; i++)
local_c += a[my_x * size + i] * b[i * size + my_y];
c[my_x * size + my_y ] = local_c;
} |
2,332 | #include <stdio.h>
/*
* This file is an attempt at producing what the generated target code
* should look like for the multiplyMatrixMatrix routine.
*/
/* Prototype matrix representation. */
struct dag_array_t{
size_t rows;
size_t cols;
int* matrix;
};
/*
DAG Primitive. Here, we leverage the NVIDIA developer examples
to obtain a high-bandwith operation. They make use of shared memory
to avoid strided global memory accesses, and instead perform the
strided access in the shared block, which is roughly a ~3x improvement.
TILE_DIM = 32
BLOCK_ROWS = 8
https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/
*/
const int tp_TILE_DIM = 32;
const int tp_BLOCK_ROWS = 8;
__global__ void transposeCoalesced(int *result, const int *in)
{
const int TILE_DIM = tp_TILE_DIM;
const int BLOCK_ROWS = tp_BLOCK_ROWS;
__shared__ int tile[TILE_DIM][TILE_DIM];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[threadIdx.y+j][threadIdx.x] = in[(y+j)*width + x];
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
result[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j];
}
__global__ void multiplyMatrixVector(int* result, int* matrix, int* vector, int cols)
{
__shared__ int reduce_array[256]; // Within a block
int vector_slice_offset = blockIdx.x * cols + threadIdx.x;
int matrix_slice_offset = blockIdx.y * cols + threadIdx.x;
reduce_array[threadIdx.x] = matrix[matrix_slice_offset] * vector[vector_slice_offset];
__syncthreads();
// Sequential reduce.
if (threadIdx.x == 0){
int accumulator = 0;
for (int i = 0; i < blockDim.x; i++)
{
accumulator += reduce_array[i];
}
result[blockIdx.x * cols + blockIdx.y] = accumulator;
}
}
// We use single-dimensional lists.
void matrixMultiply(dag_array_t* result, dag_array_t* m1, dag_array_t* m2){
// Precompute size information
size_t size_m1 = m1->rows * m1->cols;
size_t size_m2 = m2->rows * m2->cols;
size_t size_result = m1->rows * m2->cols;
// Copy onto device
int* d_m1;
cudaMalloc(&d_m1,size_m1);
cudaMemcpy(d_m1,m1->matrix,size_m1,cudaMemcpyHostToDevice);
int* d_m2;
cudaMalloc(&d_m2,size_m2);
cudaMemcpy(d_m2,m2->matrix,size_m2,cudaMemcpyHostToDevice);
int* d_col; // We know that transpose will return same # of elem.
cudaMalloc(&d_col,size_m2);
int* d_result; // Allocate our result.
cudaMalloc(&d_result,size_result);
// A cruical optimization involves removing extraneous cudaMemcpy and cudaMallocs.
dim3 dimGrid(m2->rows/tp_TILE_DIM, m2->cols/tp_TILE_DIM, 1);
dim3 dimBlock(tp_TILE_DIM, tp_BLOCK_ROWS, 1);
transposeCoalesced<<<dimGrid,dimBlock>>>(d_col,d_m2);
const int threadsPerBlock = 256;
dim3 dimBlock2(threadsPerBlock,1,1); // 256 threads per row
dim3 dimGrid2((m1->rows + threadsPerBlock - 1) / threadsPerBlock,
(m2->rows + threadsPerBlock - 1) / threadsPerBlock,1);
multiplyMatrixVector<<<dimGrid2,dimBlock2>>>(d_result,d_m1,d_col,m1->cols);
cudaMemcpy(result->matrix,d_result,size_result,cudaMemcpyDeviceToHost);
result->rows = m1->rows;
result->cols = m2->cols;
cudaFree(d_m1);
cudaFree(d_m2);
cudaFree(d_result);
cudaFree(d_col);
}
int main(){
int* a = (int*) malloc(100*sizeof(int));
int* b = (int*) malloc(100*sizeof(int));
dag_array_t A;
A.rows = 10;
A.cols = 10;
A.matrix = a;
dag_array_t B;
B.rows = 10;
B.cols = 10;
B.matrix = b;
dag_array_t C;
C.rows = 10;
C.cols = 10;
C.matrix = (int*) malloc(100*sizeof(int));
for (int i = 0; i < 10; i++)
{
for (int j = 0; j < 10; j++)
{
a[i*10+j] = 1;
b[i*10+j] = 2;
}
}
matrixMultiply(&C,&A,&B);
for (int i = 0; i < 10; i++)
{
for (int j = 0; j < 10; j++)
{
printf(" %d ",C.matrix[i*10+j]);
}
}
} |
2,333 | #include <cstdio>
#include <cstring>
#include<iostream>
using namespace std;
class Myclass
{
public:
Myclass(int a=0,int b=0)
{_a=a;_b=b;}
virtual __host__ __device__ void printValues()
{
printf("a = %d, b = %d\n", _a, _b);
}
private:
int _a;
int _b;
};
__global__ void virtualFunctions(Myclass *vf)
{
Myclass vf_local = Myclass(*vf);
memcpy(vf, &vf_local, sizeof(Myclass));
vf->printValues();
}
__global__ void callVFunc(Myclass *vf)
{
vf->printValues();
}
int main() {
//CPU
Myclass vf_host(4,5);
//GPU
Myclass *vf;
cudaMalloc(&vf, sizeof(Myclass));
// CPU --> GPU
cudaMemcpy(vf, &vf_host, sizeof(Myclass), cudaMemcpyHostToDevice);
virtualFunctions<<<1, 1>>>(vf);
cudaDeviceSynchronize();
callVFunc<<<1, 1>>>(vf);
cudaDeviceSynchronize();
return 0;
}
|
2,334 | #include <iostream>
#include <cmath>
#include <vector>
#include <random>
#include <cassert>
#define GPU_CHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
constexpr auto VECTOR_LENGTH = 1024u * 2;
constexpr auto EPS = 1e-4f;
inline void gpuAssert(cudaError_t code, const char *file, int line,
bool abort = true) {
if (code != cudaSuccess) {
std::cout << "GPUassert: " << cudaGetErrorString(code) << " " << file << ":"
<< line << "\n";
if (abort) {
std::exit(code);
}
}
}
void addHost(const std::vector<float> &A, const std::vector<float> &B,
std::vector<float> &C) {
assert(A.size() == B.size() && B.size() == C.size());
for (auto i = 0u; i < A.size(); ++i) {
C[i] = A[i] + B[i];
}
}
__global__ void saxpy(const float *A, const float *B, float *C) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
}
void addGPU(const std::vector<float> &A, const std::vector<float> &B,
std::vector<float> &C) {
float *A_gpu, *B_gpu, *C_gpu;
auto byte_size = VECTOR_LENGTH * sizeof(float);
GPU_CHECK(cudaMalloc(&A_gpu, byte_size));
GPU_CHECK(cudaMalloc(&B_gpu, byte_size));
GPU_CHECK(cudaMalloc(&C_gpu, byte_size));
GPU_CHECK(cudaMemcpy(A_gpu, A.data(), byte_size, cudaMemcpyHostToDevice));
GPU_CHECK(cudaMemcpy(B_gpu, B.data(), byte_size, cudaMemcpyHostToDevice));
GPU_CHECK(cudaMemcpy(C_gpu, C.data(), byte_size, cudaMemcpyHostToDevice));
saxpy<<<2, VECTOR_LENGTH / 2>>>(A_gpu, B_gpu, C_gpu);
GPU_CHECK(cudaMemcpy(C.data(), C_gpu, byte_size, cudaMemcpyDeviceToHost));
}
bool verify(const std::vector<float> &A, const std::vector<float> &B,
const std::vector<float> &C) {
for (auto i = 0u; i < VECTOR_LENGTH; ++i) {
if (A[i] + B[i] - C[i] > EPS) {
std::cout << "ERROR! Index " << i << "\n";
std::cout << A[i] + B[i] << " " << C[i] << "\n";
return false;
}
}
return true;
}
int main() {
std::random_device rd{};
std::mt19937 gen{rd()};
std::normal_distribution<float> dist{5, 2};
std::vector<float> A(VECTOR_LENGTH), B(VECTOR_LENGTH), C(VECTOR_LENGTH);
for (auto i = 0u; i < VECTOR_LENGTH; ++i) {
A[i] = dist(gen);
B[i] = dist(gen);
C[i] = M_PI;
}
addHost(A, B, C);
if (!verify(A, B, C)) {
return 1;
}
std::cout << "Host verified\n";
std::fill(C.begin(), C.end(), M_PI);
addGPU(A, B, C);
if (!verify(A, B, C)) {
return 1;
}
std::cout << "GPU verified\n";
return 0;
}
|
2,335 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30,float var_31,float var_32,float var_33) {
comp = sinf((var_1 * var_2 / coshf(+1.6899E-36f - var_3 - var_4 + var_5)));
comp = +1.2287E-43f + -1.8636E-35f;
comp += (var_6 - (var_7 - -1.9095E5f * (+1.9059E-37f - logf((-1.5088E-36f / fmodf(sinf(var_8 + sqrtf((var_9 + (+0.0f + var_10)))), (var_11 + ldexpf((-1.6471E25f * sqrtf((var_12 + -1.3755E-37f + var_13 * powf((var_14 - -1.9217E35f / var_15 / sinf(-1.4114E-42f)), +0.0f / (-1.5518E13f / var_16 / (var_17 / atanf(+1.0853E29f * +1.2548E-3f))))))), 2))))))));
comp += (+1.3202E-44f - +1.1656E-36f / var_18 - +1.7676E-36f * var_19);
if (comp == var_20 + fmodf((var_21 + +1.1755E-37f + var_22 * (var_23 - acosf(cosf((+0.0f - (-1.1293E36f + -0.0f / +1.7082E-42f)))))), +1.7465E-15f / var_24 * (+1.1625E36f + var_25))) {
comp = (-1.7123E15f - +1.0857E-44f / var_26 * -1.0677E-41f);
comp += (+1.1663E-20f * (+1.0656E-43f - ceilf(ldexpf((+1.8071E-25f * var_27 * var_28 - (var_29 + -1.1838E-28f * +1.2957E-36f)), 2))));
float tmp_1 = floorf(atan2f(+1.3948E-44f + (var_30 + var_31 / +1.9812E36f), (-0.0f + (-1.9448E-36f * atanf(+1.9803E-35f)))));
comp += tmp_1 - (-1.1810E-43f / (var_32 + sqrtf(-1.3258E35f / -1.0834E36f + (+0.0f - (-1.0444E-35f / var_33)))));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
float tmp_32 = atof(argv[32]);
float tmp_33 = atof(argv[33]);
float tmp_34 = atof(argv[34]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33,tmp_34);
cudaDeviceSynchronize();
return 0;
}
|
2,336 | /*
#include "LennardJones.h"
//------------------------Lennard Jones Potential -----------------------------//
__host__ __device__ double lennardJonesForce(double dist, double sig, double eps)
{
double sigsq = sig*sig;
double con = 24.0*eps/sigsq;
double dist2 = dist * dist;
dist2 /= sigsq;
double dist4 = dist2*dist2;
double dist8 = dist4*dist4;
double dist14 = dist2*dist4*dist8;
double invdist8= 1.0/dist8;
double invdist14= 1.0/dist14;
double s = 2.0*invdist14-invdist8;
return s * con;
}
//----------------------------------------------------------------------------//
__host__ __device__ double lennardJonesPotential(double dist, double sig, double eps)
{
double expr = sig/dist;
double expr2 = expr*expr;
double expr4 = expr2*expr2;
double expr6 = expr4*expr2;
double expr12 = expr6*expr6;
return 4.0*eps*(expr12-expr6);
}
*/ |
2,337 | #include "includes.h"
__global__ void ForwardCrossEntropy(float *output, float *labels, int nColsOutput, float *loss)
{
int col = blockIdx.x;
float temp = -(labels[col] * logf(output[col]) + logf(1 - output[col])
* (1 - labels[col]));
atomicAdd(loss, temp);
} |
2,338 | #include "includes.h"
__global__ void kHingeLinearRowMajor(float* mat, float* labels, float* target, unsigned int width, unsigned int height, float margin) {
int image_id = blockIdx.x * blockDim.x + threadIdx.x;
if (image_id < height) {
mat += image_id;
target += image_id;
const int correct_label = (int)labels[image_id];
const float correct_label_score = mat[correct_label * height];
float sum = 0;
for (unsigned int i = 0; i < width; i++) {
float diff = margin + mat[i*height] - correct_label_score;
float grad = (diff > 0) ? 1 : 0;
target[i*height] = (i == correct_label) ? 0 : grad;
sum += (i == correct_label) ? 0 : grad;
}
target[correct_label * height] = -sum;
}
} |
2,339 | #include <iostream>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#define MATRIX_SIZE 256
#define BLOCK_SIZE 16
using namespace std;
__global__ void matMul(float *x, float *y, float *z, int matrixSize){
float zTemp = 0.0f;
__shared__ float xblkMat[BLOCK_SIZE*BLOCK_SIZE], yblkMat[BLOCK_SIZE*BLOCK_SIZE];
const int global_x = threadIdx.x + blockIdx.x * blockDim.x;
const int global_y = threadIdx.y + blockIdx.y * blockDim.y;
const int blocked_x = blockIdx.x;
const int blocked_y = blockIdx.y;
const int blocked_x_id = threadIdx.x;
const int blocked_y_id = threadIdx.y;
const int numBlocks = matrixSize / BLOCK_SIZE;
int xStart = blocked_y * matrixSize * BLOCK_SIZE;
int yStart = blocked_x * BLOCK_SIZE;
for (int block = 0; block < numBlocks; block++) {
xblkMat[blocked_x_id + (blocked_y_id*BLOCK_SIZE)] = x[xStart + ((blocked_y_id*matrixSize) + blocked_x_id)];
yblkMat[blocked_x_id + (blocked_y_id*BLOCK_SIZE)] = y[yStart + ((blocked_y_id*matrixSize) + blocked_x_id)];
__syncthreads();
for (int k = 0;k < BLOCK_SIZE;k++) {
zTemp += xblkMat[k + (blocked_y_id * BLOCK_SIZE)] * yblkMat[blocked_x_id + (k * BLOCK_SIZE)];
}
__syncthreads();
xStart += BLOCK_SIZE;
yStart += BLOCK_SIZE;
}
z[global_x + (global_y * matrixSize)] = zTemp;
}
int main(){
float *x,*y,*z;
struct timeval start;
struct timeval end;
double elapsedTime;
double numOps;
float gFLOPS;
cudaMallocManaged(&x, MATRIX_SIZE * MATRIX_SIZE * sizeof(float));
cudaMallocManaged(&y, MATRIX_SIZE * MATRIX_SIZE * sizeof(float));
cudaMallocManaged(&z, MATRIX_SIZE * MATRIX_SIZE * sizeof(float));
for(int i=0;i<MATRIX_SIZE;i++){
for(int j=0;j<MATRIX_SIZE;j++){
*(x + i*MATRIX_SIZE + j) = 1.0f;
*(y + i*MATRIX_SIZE + j) = 1.0f;
*(z + i*MATRIX_SIZE + j) = 0.0f;
}
}
// Keep track of when we start doing work
gettimeofday(&start, NULL);
dim3 threads(BLOCK_SIZE,BLOCK_SIZE);
dim3 grid(MATRIX_SIZE/threads.x,MATRIX_SIZE/threads.y);
matMul<<<grid,threads>>>(x,y,z,MATRIX_SIZE);
cudaDeviceSynchronize();
// Keep track of when we finish our work
gettimeofday(&end, NULL);
// Calculate the time it took to do the above task
elapsedTime = (end.tv_sec - start.tv_sec) * 1000.0;
elapsedTime += (end.tv_usec - start.tv_usec) / 1000.0;
elapsedTime /= 1000;
//Calculate the GFLOPS obtained and print it along with the execution time
numOps = 2 * pow(MATRIX_SIZE, 3);
gFLOPS = float(1.0e-9 * numOps / elapsedTime);
printf("CUDA : %.3f seconds ( %f GFLOPS )\n",elapsedTime,gFLOPS);
/*cout << "X[23][65] : " << *(x + 23*MATRIX_SIZE + 65) << endl;
cout << "Y[23][65] : " << *(y + 23*MATRIX_SIZE + 65) << endl;
cout << "Z[23][65] : " << *(z + 23*MATRIX_SIZE + 65) << endl;*/
cudaFree(x);
cudaFree(y);
cudaFree(z);
return 0;
}
|
2,340 | #include "includes.h"
__global__ void kernel_push_atomic2(int *g_terminate, int *g_push_reser, int *s_push_reser, int *g_block_num, int width1)
{
int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
int thid = __umul24(y, width1) + x;
if (s_push_reser[thid] - g_push_reser[thid] != 0)
{
g_terminate[blockIdx.y * (*g_block_num) + blockIdx.x] = 1;
}
} |
2,341 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <cuda.h>
#define BLOCKS 8
#define THREADS 16
#define WIDTH 128
#define HEIGHT 64
__global__ void add(int* a, int* b, int* c)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
if (idx > WIDTH || idy > HEIGHT) return;
c[idy * WIDTH + idx] = a[idy * WIDTH + idx] + b[idy * WIDTH + idx];
}
void init(int* h_v, int numb) {
for (int i = 0; i < HEIGHT; i++) {
for (int j = 0; j < WIDTH; ++j) {
h_v[i * HEIGHT + j] = numb;
}
}
}
// N
int main( void ) {
int *result, *h_a, *h_b;
int *dev_a, *dev_b, *dev_c;
int size = HEIGHT * WIDTH * sizeof(int);
size_t size_pitch;
result = (int*) malloc( size );
h_a = (int*) malloc( size );
h_b = (int*) malloc( size );
init(h_a, 1);
init(h_b, 2);
// cudaMallocPitch cudaMemcpy2D
cudaError_t error = cudaMallocPitch( &dev_a, &size_pitch, WIDTH * sizeof(int), HEIGHT );
// Warning: el pitch varia por el tamaño que reservas si a b c fueren diferente tamaño..
error = cudaMallocPitch( &dev_b, &size_pitch, WIDTH * sizeof(int), HEIGHT );
error = cudaMallocPitch( &dev_c, &size_pitch, WIDTH * sizeof(int), HEIGHT );
// cudaError_t error = cudaMalloc( &dev_a, WIDTH * sizeof(int) * HEIGHT );
// error = cudaMalloc( &dev_b, size );
// error = cudaMalloc( &dev_c, size );
// fprintf(stderr, "Size %lu\n", WIDTH * sizeof(int) * HEIGHT);
// se transfieren los datos a memoria de dispositivo.
// cudaMemcpy( dev_a, h_a, size , cudaMemcpyHostToDevice);
// cudaMemcpy( dev_b, h_b, size , cudaMemcpyHostToDevice);
// cudaMemset(dev_c, 0, size);
// cudaMemcpy2D (void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind)
error = cudaMemcpy2D( dev_a, size_pitch, h_a, WIDTH * sizeof(int), WIDTH * sizeof(int), HEIGHT, cudaMemcpyHostToDevice);
error = cudaMemcpy2D( dev_b, size_pitch, h_b, WIDTH * sizeof(int), WIDTH * sizeof(int), HEIGHT, cudaMemcpyHostToDevice);
// cudaError_t cudaMemset2D (void *devPtr, size_t pitch, int value, size_t width, size_t height)
error = cudaMemset2D(dev_c, size_pitch, 0, sizeof(int) * WIDTH, HEIGHT);
dim3 t(THREADS, THREADS);
dim3 b( (WIDTH + t.x - 1) / t.x , (HEIGHT + t.y - 1) / t.y);
add<<<b, t>>>( dev_a, dev_b, dev_c);
// se transfieren los datos del dispositivo a memoria.
// cudaMemcpy(result, dev_c, size, cudaMemcpyDeviceToHost);
// cudaMemcpy2D (void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind)
cudaMemcpy2D(result, WIDTH * sizeof(int), dev_c, size_pitch, WIDTH * sizeof(int), HEIGHT, cudaMemcpyDeviceToHost);
fprintf(stderr, "%s\n", "Result...");
for (int i = 0; i < HEIGHT; ++i) {
for (int j = 0; j < WIDTH; ++j) {
fprintf(stderr, " %d ", result[i * HEIGHT + j]);
}
fprintf(stderr, "%s\n", "");
}
fprintf(stderr, "%s\n", "");
free(h_a), free(h_b), free(result);
cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c);
return 0;
}
|
2,342 | #include <iostream>
#include <chrono>
#include <cassert>
#include <cmath>
#include <cstdlib>
#include <vector>
#include <algorithm>
#define BLOCKSIZE 128
#define LOG_BLOCKSIZE 7
// MUST BE ASSOCIATIVE
__device__ inline int f(int a, int b){
return a + b;
}
/**
* In this variant, several optimizations have been applied:
*
* - Since only half the threads were doing work in parallel_main.cu, only BLOCKSIZE / 2 threads
* now make up a block, reducing the number of warps scheduled.
*
* - In parallel_cu, a warp would always have divergence in that some threads would do work and
* others would idle. In this variant, if the current downsweep iteration needs 8 workers, then
* only the first 8 threads of the block will actually do work, reducing dependence.
**/
__global__ void scan(const int n, int *x, int *out){
__shared__ int scan_v[2 * BLOCKSIZE];
int tid = threadIdx.x;
int tcount = blockDim.x;
int i = blockIdx.x * 2 * blockDim.x + threadIdx.x * 2;
scan_v[2 * tid] = x[i];
scan_v[(2 * tid) ^ 1] = x[i ^ 1];
__syncthreads();
for(int i = 1, j = 1; j <= LOG_BLOCKSIZE; i = (i << 1) + 1, j++){
int curr_tid = (tid << j) | i;
int oth = curr_tid - (1 << (j - 1));
if(curr_tid < 2 * tcount){
scan_v[curr_tid] = f(scan_v[curr_tid], scan_v[oth]);
}
__syncthreads();
}
for(int i = BLOCKSIZE / 2, j = LOG_BLOCKSIZE - 1; i >= 1; i /= 2, j--){
int curr_tid = ((tid + 1) << (j + 1)) | (i - 1);
int oth = curr_tid - i;
if(curr_tid < BLOCKSIZE){
scan_v[curr_tid] = f(scan_v[curr_tid], scan_v[oth]);
}
__syncthreads();
}
x[i] = scan_v[2 * tid];
x[i ^ 1] = scan_v[(2 * tid) ^ 1];
if(tid == blockDim.x - 1){
out[blockIdx.x] = scan_v[(2 * tid) ^ 1];
}
}
/**
* One slight optimization was done here:
* - The first block in parallel_main.cu doesn't do any work. We shift each block by +1, resulting
* in one less thread block scheduled, and no need for the condition anymore.
**/
__global__ void propagate(const int n, int *in, int *out){
int bid = blockIdx.x + 1;
int tcount = blockDim.x;
int tid = threadIdx.x;
int i = bid * tcount + tid;
out[i] = f(out[i], in[bid - 1]);
}
std::vector<int> get_levels(const int n, int block_size){
std::vector<int> res;
int x = n;
while(x > 1){
res.push_back(x);
x = (x + block_size - 1) / block_size;
}
res.push_back(1);
return res;
}
int main(){
const int n = (1 << 28);
const int block_size = BLOCKSIZE;
assert(n % block_size == 0);
std::vector<int> levels = get_levels(n, block_size);
for(int i : levels){
std::cout << i << ' ';
}
std::cout << std::endl;
int *x = (int *) malloc(n * sizeof(int));
assert(x != NULL);
for(int i = 0; i < n; i++){
x[i] = 1;
}
int *d_arrays[levels.size()];
for(int i = 0; i < levels.size(); i++){
cudaMalloc(&d_arrays[i], levels[i] * sizeof(int));
assert(d_arrays[i] != NULL);
}
cudaMemcpy(d_arrays[0], x, levels[0] * sizeof(int), cudaMemcpyHostToDevice);
for(int i = 1; i < levels.size(); i++){
int block_count = levels[i];
scan<<<block_count, block_size / 2>>>(levels[i - 1], d_arrays[i - 1], d_arrays[i]);
}
for(int i = levels.size() - 2; i >= 1; i--){
int block_count = levels[i];
propagate<<<block_count - 1, block_size>>>(levels[i - 1], d_arrays[i], d_arrays[i - 1]);
}
int *result = (int *) malloc(n * sizeof(int));
cudaMemcpy(result, d_arrays[0], n * sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < n; i++){
if(result[i] != i + 1){
std::cerr << i << ' ' << i + 1 << ' ' << result[i] << '\n';
return -1;
}
}
std::cout << "memory usage: " << n * sizeof(int) << " bytes" << std::endl;
} |
2,343 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,int var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30,float var_31,float var_32,float var_33,float var_34,float var_35,float var_36,float var_37,float var_38,float var_39,float var_40,float var_41) {
if (comp == var_1 / +0.0f - var_2 - (var_3 * var_4 - var_5)) {
comp = +1.4329E-43f * (+0.0f / (var_7 / -1.1450E36f));
comp += asinf((+1.5663E-22f / var_8 + logf(ceilf((var_9 + +1.8261E-44f * fmodf((var_10 + sqrtf(+1.6147E-36f)), (+1.2292E35f * (-0.0f - +1.5430E-41f))))))));
comp += (var_11 * sinf((+1.5481E36f / var_12 - atanf(tanhf(-1.8254E-44f)))));
if (comp == (+1.6499E35f * atanf(atanf(var_13 / var_14)))) {
comp += var_15 - +0.0f / var_16;
float tmp_1 = var_17 * +0.0f - var_18 / tanhf(-1.6246E-36f);
comp += tmp_1 - log10f((+1.3728E-44f / fmodf((var_19 / (var_20 * +0.0f / -1.4205E-35f / +1.7331E-41f)), var_21 + var_22 * (var_23 / +1.4503E34f))));
comp = (+1.9415E34f * (var_24 * +1.0316E-29f * powf(+1.4698E-42f, -1.9273E35f - +1.8795E34f)));
}
if (comp < -1.5516E21f / (+1.6774E35f * (var_25 + -1.9158E36f))) {
comp += (var_26 * (-1.3676E-37f / (var_27 / var_28)));
float tmp_2 = log10f((var_29 * (+1.6247E-42f / var_30)));
comp += tmp_2 * (-1.5496E-42f / var_31 + (var_32 + var_33 - var_34));
}
for (int i=0; i < var_6; ++i) {
comp += (+1.8339E-43f + -0.0f * (-1.8858E-41f + (var_35 + expf((var_36 + var_37 / -0.0f / var_38)))));
comp = (-1.1668E-37f + fabsf((+1.9394E34f + var_39 / var_40 + (var_41 * -1.3973E-21f))));
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
int tmp_7 = atoi(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
float tmp_32 = atof(argv[32]);
float tmp_33 = atof(argv[33]);
float tmp_34 = atof(argv[34]);
float tmp_35 = atof(argv[35]);
float tmp_36 = atof(argv[36]);
float tmp_37 = atof(argv[37]);
float tmp_38 = atof(argv[38]);
float tmp_39 = atof(argv[39]);
float tmp_40 = atof(argv[40]);
float tmp_41 = atof(argv[41]);
float tmp_42 = atof(argv[42]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33,tmp_34,tmp_35,tmp_36,tmp_37,tmp_38,tmp_39,tmp_40,tmp_41,tmp_42);
cudaDeviceSynchronize();
return 0;
}
|
2,344 | #include <stdio.h>
const int TILE_DIM = 32;
const int BLOCK_ROWS = 8;
const int NUM_REPS = 100;
__global__ void copy(float *odata, const float *idata)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
{
// row is (tile y + offset into tile j)*width of matrix + col x
odata[(y+j)*width + x] = idata[(y+j)*width + x];
}
}
__global__ void transposeNaive(float *odata, const float *idata)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
{
odata[x*width + (y+j)] = idata[(y+j)*width + x];
}
}
__global__ void transposeCoalesced(float *odata, float *idata)
{
__shared__ float tile[TILE_DIM][TILE_DIM + 1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
{
tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*width + x];
}
__syncthreads();
// Now these are the offsets into the transposed tile
x = blockIdx.y * TILE_DIM + threadIdx.x;
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
{
odata[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j];
}
}
|
2,345 | #include "includes.h"
__global__ void copy(float *odata, const float *idata)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS)
odata[(y+j)*width + x] = idata[(y+j)*width + x];
} |
2,346 | #include <iostream>
#include <complex>
#include <math.h>
#include <thrust/complex.h>
#include <sys/time.h>
using namespace std;
__global__
void fft(thrust::complex<double> *g_odata, thrust::complex<double> *g_idata, int n)
{
extern __shared__ thrust::complex<double> temp[]; // allocated on invocation
int thid = threadIdx.x;
int pout = 0, pin = 1;
temp[pout*n + thid] = g_idata[thid];
__syncthreads();
int thid1=0;
int b = __log2f(n+1);
for(int i=0; i<b;i++){
if(thid & (1<<i))
thid1 |= (1<<(b-1-i));
}
pout = 1 - pout;
pin = 1 - pin;
temp[pout*n + thid] = temp[pin*n + thid1];
__syncthreads();
for(int i=1; i<n; i*=2){
pout = 1 - pout;
pin = 1 - pin;
thid1 = thid ^ i;
thrust::complex<double> factor(cos(-M_PI*thid/i), sin(-M_PI*thid/i));
if(thid1 > thid){
temp[pout*n + thid] = temp[pin*n + thid] + factor * temp[pin*n + thid1];
}
else{
temp[pout*n + thid] = temp[pin*n + thid1] + factor * temp[pin*n + thid];
}
__syncthreads();
}
g_odata[thid] = temp[pout*n + thid];
}
void checkError(){
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
printf("Sync kernel error: %s\n", cudaGetErrorString(errSync));
if (errAsync != cudaSuccess)
printf("Async kernel error: %s\n", cudaGetErrorString(errAsync));
}
int main(void)
{
int N;
cin>>N;
thrust::complex<double> *x, *y;
cudaMallocManaged(&x, N*sizeof(thrust::complex<double>));
cudaMallocManaged(&y, N*sizeof(thrust::complex<double>));
for(int i=0; i<N;i++){
int t,u; cin>>t>>u;
x[i] = complex<double>(t, u);
}
int blockSize = N;
int numBlocks = (N + blockSize - 1) / blockSize;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
fft<<<numBlocks, blockSize, 2*N*sizeof(thrust::complex<double>)>>>(y, x, N);
checkError();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout<<milliseconds;
// for (int i = 0; i < N; i++)
// cout<<y[i]<<"\n";
// cout<<endl;
cudaFree(x);
cudaFree(y);
}
|
2,347 | #include <iostream>
#include <math.h>
__global__
void add(int n, float *x, float *y){
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < n; i+=stride) y[i] = x[i] + y[i];
}
int main(void){
int N = 1<<20; // 1M elemenets
std::cout << N << std::endl;
float *x, *y;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
for (int i = 0; i < N; i++){
x[i] = 1.0f;
y[i] = 2.0f;
}
///Run kernel
add <<< 1, 256 >>> (N,x,y);
//wait for synchro
cudaDeviceSynchronize();
float maxError = 0.0f;
for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "max error: " << maxError << std::endl;
cudaFree(x);
cudaFree(y);
return 0;
} |
2,348 | extern "C" __global__ void mtranReference(
float *output,
float *input,
const int width,
const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
output[y*width + x] = input[x*height + y];
}
|
2,349 | #include <iostream>
#include <sys/time.h>
#include <cuda.h>
using namespace std;
#define CUDA_CHECK_RETURN(value) {\
cudaError_t _m_cudaStat = value;\
if (_m_cudaStat != cudaSuccess) {\
fprintf(stderr, "Error %s at line %d in file %s\n", cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);\
exit(1);\
}}
__global__ void VecSum(float *A, float *B, float *C, int size)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < size)
C[i] = A[i] + B[i];
}
void printVec(float *C, int size)
{
for (int i = 0; i < size; i++)
cout << C[i] << "\t";
cout << endl;
}
int main(int argc, char* argv[])
{
if (argc != 3) {
cout << "launch parametrs: [vector size] [threads per block]" << endl;
return 1;
}
int size = atoi(argv[1]);
int threads_per_block = atoi(argv[2]);
srand(time(NULL));
float *A = new float[size];
float *B = new float[size];
float *C = new float[size];
float *dev_A, *dev_B, *dev_C;
cudaMalloc((void**)&dev_A, size * sizeof(float));
cudaMalloc((void**)&dev_B, size * sizeof(float));
cudaMalloc((void**)&dev_C, size * sizeof(float));
for (int i = 0; i < size; i++) {
A[i] = rand();
B[i] = rand();
}
cudaMemcpy(dev_A, A, size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_B, B, size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_C, C, size * sizeof(float), cudaMemcpyHostToDevice);
int blockTotal = ceilf(float(size) / float(threads_per_block));
cout << "Block total: " << blockTotal << endl;
cout << "Threads per block : " << threads_per_block << endl;
cout << "Threads total: " << blockTotal * threads_per_block << endl;
float elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
VecSum <<< blockTotal, threads_per_block >>> (dev_A, dev_B, dev_C, size);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
CUDA_CHECK_RETURN(cudaGetLastError());
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaMemcpy(C, dev_C, size * sizeof(float), cudaMemcpyDeviceToHost);
cout << "time: " << elapsedTime << " ms" << endl;
//printVec(C, size);
delete [] A; delete [] B; delete [] C;
cudaEventDestroy(start); cudaEventDestroy(stop);
cudaFree(dev_A); cudaFree(dev_B); cudaFree(dev_C);
return 0;
}
|
2,350 | #include <iostream>
using namespace std;
__global__ void square(float *d_out, float *d_in){
int idx = blockDim.x*blockIdx.x + threadIdx.x;
float f = d_in[idx];
d_out[idx] = f*f;
}
int main(){
const int ARRAY_SIZE = 10000;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
float h_in[ARRAY_SIZE];
for(int i=0; i < ARRAY_SIZE; i++){
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
float *d_in;
float *d_out;
cudaMalloc((void**) &d_in, ARRAY_BYTES);
cudaMalloc((void**) &d_out, ARRAY_BYTES);
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
int NUM_THREADS = 512;
int NUM_BLOCKS = ARRAY_SIZE / NUM_THREADS + 1;
square<<<NUM_BLOCKS, NUM_THREADS>>>(d_out, d_in);
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
for(int i=0; i< ARRAY_SIZE; i++){
cout << h_out[i];
if(i%10!=9) cout << "\t";
else cout << endl;
}
} |
2,351 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
__global__ void Read_texture_obj_kernel(float *iptr, cudaTextureObject_t tex) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float c = tex1Dfetch<float>(tex, offset);
iptr[offset] = c;
}
cudaTextureObject_t *TexObjFloat1D(float *devPtr, int length)
{
// create texture object
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = devPtr;
resDesc.res.linear.desc.f = cudaChannelFormatKindFloat;
resDesc.res.linear.desc.x = 32; // bits per channel
resDesc.res.linear.sizeInBytes = length * sizeof(float);
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = cudaReadModeElementType;
cudaTextureObject_t *tex = (cudaTextureObject_t *)malloc(sizeof(cudaTextureObject_t));
cudaCreateTextureObject(tex, &resDesc, &texDesc, NULL);
return tex;
}
|
2,352 | #include <array>
// CUDA kernel. Each thread takes care of one element of c
template<class T>
__global__ void vecAdd(T *a, T *b, T *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
}
template <typename T, size_t N>
void simple_vadd_cuda(const std::array<T, N>& VA, const std::array<T, N>& VB,
std::array<T, N>& VC) {
// Device input vectors
T *d_a;
T *d_b;
//Device output vector
T *d_c;
// Size, in bytes, of each vector
const size_t bytes = N*sizeof(T);
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
// Copy host vectors to device
cudaMemcpy( d_a, VA.data(), bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, VB.data(), bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)N/blockSize);
// Execute the kernel
vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, N);
// Copy array back to host
cudaMemcpy( VC.data(), d_c, bytes, cudaMemcpyDeviceToHost );
// Release device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
template void simple_vadd_cuda<float, 4>(const std::array<float, 4>& VA, const std::array<float, 4>& VB,
std::array<float, 4>& VC);
template void simple_vadd_cuda<int, 4>(const std::array<int, 4>& VA, const std::array<int, 4>& VB,
std::array<int, 4>& VC);
|
2,353 | /***************************************************************************//**
* \file projectVelocity.cu
* \author Anush Krishnan (anush@bu.edu),
* \author Christopher Minar (minarc@oregonstate.edu)
* \brief kernels to update the velocity field
*/
#include "projectVelocity.h"
namespace kernels
{
__global__
void project_velocity_X(double *u, double *uhat, double *uold, double *pressure, int *ghostTagsP, double *dx, double dt, int nx, int ny)
{
int i = threadIdx.x + (blockDim.x * blockIdx.x),
I = i % (nx-1),
J = i / (nx-1),
ip = J*nx + I,
numU = (nx-1)*ny;
if (i >= numU)
return;
u[i] = uhat[i] - (ghostTagsP[ip+1] ==-1 && ghostTagsP[ip] == -1) * dt*(pressure[ip+1]-pressure[ip]) / (0.5*dx[I+1]+0.5*dx[I]);
}
__global__
void project_velocity_Y(double *u, double *uhat, double *uold, double *pressure, int *ghostTagsP, double *dy, double dt, int nx, int ny)
{
int numU= (nx-1)*ny,
i = threadIdx.x + (blockDim.x * blockIdx.x),
I = i % nx,
J = i / nx,
ip = J*nx + I,
numUV = (ny-1)*nx + numU;
i += numU;
if (i >= numUV)
return;
u[i] = uhat[i] - (ghostTagsP[ip+nx] == -1 && ghostTagsP[ip] == -1) * dt*(pressure[ip+nx]-pressure[ip]) / (0.5*dy[J+1]+0.5*dy[J]);
}
}//end namespace kernels
|
2,354 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
//#include <atlimage.h>
enum color_transform_t
{
grayscale,
sRGB,
LAB
};
enum transform_t
{
Gaussian
};
#define SIZE 1000
//typedef struct
//{
// int r;
// int g;
// int b;
//} rgb_t;
//
//typedef rgb_t* rgb_ptr;
//typedef rgb_ptr* rgb_list;
//typedef rgb_list* rgb_mat;
//
cudaError_t transform(uchar3 *dst_img, uchar3 *src_img, int img_size, int block_size, int grid_size, color_transform_t type);
cudaError_t transform();
// convert one scanline to grayscale in parallel
__global__ void grayscale_transform(uchar3 *dst_img, uchar3 *src_img, int img_size)
{
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int idx = y * SIZE + x;
uchar3 rgb = src_img[idx];
int average = (rgb.x + rgb.y + rgb.z) / 3;
dst_img[idx].x = average;
dst_img[idx].y = average;
dst_img[idx].z = average;
}
void host_grayscale(uchar3 *dst_img, uchar3 *src_img, int img_size)
{
for (int i = 0; i < SIZE * SIZE; i++)
{
uchar3 rgb = src_img[i];
int average = (rgb.x + rgb.y + rgb.z) / 3;
dst_img[i].x = average;
dst_img[i].y = average;
dst_img[i].z = average;
}
}
int main()
{
// genreate a dummy image
int size = SIZE * SIZE;
int img_size = size * sizeof(uchar3);
int block_size = size / SIZE;
int grid_size = size / block_size;
//CImage img;
uchar3 *src_img, *gray_img, srgb;
src_img = (uchar3*)malloc(img_size);
gray_img = (uchar3*)malloc(img_size);
for (int i = 0; i < SIZE * SIZE; i++)
{
uchar3 src, gray;
src.x = 128;
src.y = 64;
src.x = 256;
gray.x = 0;
gray.y = 0;
gray.z = 0;
src_img[i] = src;
gray_img[i] = gray;
}
cudaError_t cudaStatus = transform(gray_img, src_img, img_size, block_size, grid_size, grayscale);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "addWithCuda failed!");
return 1;
}
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudadevicereset failed!");
return 1;
}
clock_t begin = clock();
host_grayscale(gray_img, src_img, img_size);
clock_t end = clock();
double time_spent = 1000 * (double)(end - begin) / CLOCKS_PER_SEC;
printf("CPU Execution Time: %32fms", time_spent);
free(gray_img);
free(src_img);
return 0;
system("pause");
return 0;
system("pause");
}
// transform an image
cudaError_t transform(uchar3 *dst_img, uchar3 *src_img, int img_size, int block_size, int grid_size, color_transform_t type)
{
cudaError_t cudaStatus;
uchar3 *t_src, *gpu_output;
cudaStatus = cudaMalloc((void**)&t_src, img_size);
if (cudaStatus != cudaSuccess)
fprintf(stderr, "cudaMalloc failed!");
cudaStatus = cudaMalloc((void**)&gpu_output, img_size);
if (cudaStatus != cudaSuccess)
fprintf(stderr, "cudaMalloc failed!");
cudaStatus = cudaMemcpy(t_src, src_img, img_size, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
fprintf(stderr, "cudaMemcpy failed!");
float et;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
if (type == grayscale)
grayscale_transform<<<grid_size, block_size>>>(gpu_output, t_src, img_size);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&et, start, stop);
printf("GPU Execution Time: %32fms\n", et);
//// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
//// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(dst_img, gpu_output, img_size, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
fprintf(stderr, "cudaMemcpy failed!");
return cudaStatus;
}
//cudaError_t transform(rgb_mat dst_img, rgb_mat src_img, unsigned int img_size, color_transform_t type);
//cudaError_t transform(rgb_t ***image, unsigned int line, transform_t type);
//
//// convert one scanline to grayscale in parallel
//__global__ void grayscale_transform(rgb_list gpu_output, rgb_mat t_src, unsigned int line)
//{
// unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
// unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
// unsigned int idx = y * 100 + x;
//
// rgb_ptr src_rgb = t_src[y][x];
//
// rgb_ptr gpu_rgb = nullptr;
// gpu_rgb->r = 0;
// gpu_rgb->g = 0;
// gpu_rgb->b = 0;
//
// double average = (src_rgb->r + src_rgb->g + src_rgb->b) / 3;
//
// gpu_rgb->r = average;
// gpu_rgb->g = average;
// gpu_rgb->b = average;
//
// gpu_output[idx] = gpu_rgb;
//}
//
//int main()
//{
// // genreate a dummy image
// int img_size = 100 * 100 * sizeof(rgb_ptr);
// int block_size = 4;
// int grid_size = 100000 / block_size;
//
// rgb_mat src_img = (rgb_list*)malloc(100 * sizeof(rgb_list));
// rgb_mat gray_img = (rgb_list*)malloc(100 * sizeof(rgb_list));
//
// for (int y = 0; y < 100; y++)
// {
// src_img[y] = (rgb_list)malloc(100 * sizeof(rgb_ptr));
// gray_img[y] = (rgb_list)malloc(100 * sizeof(rgb_ptr));
// for (int x = 0; x < 100; x++)
// {
// src_img[y][x] = (rgb_ptr)malloc(sizeof(rgb_ptr));
// gray_img[y][x] = (rgb_ptr)malloc(sizeof(rgb_ptr));
// }
// }
//
//
// int i, j;
// for (i = 0; i < 100; i++)
// {
// for (j = 0; j < 100; j++)
// {
// rgb_ptr s_p = src_img[i][j];
// s_p->r = 128.0;
// s_p->g = 76.0;
// s_p->b = 256.0;
// src_img[i][j] = s_p;
//
// rgb_ptr g_p = gray_img[i][j];
// g_p->r = 0.0;
// g_p->g = 0.0;
// g_p->b = 0.0;
// gray_img[i][j] = g_p;
// }
// }
//
// cudaError_t cudaStatus = transform(gray_img, src_img, img_size, block_size, grid_size, grayscale);
// if (cudaStatus != cudaSuccess)
// {
// fprintf(stderr, "addWithCuda failed!");
// return 1;
// }
//
// //cudaStatus = cudaDeviceReset();
// //if (cudaStatus != cudaSuccess)
// //{
// // fprintf(stderr, "cudadevicereset failed!");
// // return 1;
// //}
//
// for (i = 0; i < 100; i++)
// {
// for (int j = 0; j < 100; j++)
// {
// rgb_ptr rgb = gray_img[i][j];
// printf("%.3f : %.3f : %.3f\n", rgb->r, rgb->g, rgb->b);
// }
// }
//
// free(gray_img);
// free(src_img);
//
// return 0;
// system("pause");
//}
//
//// transform an image
//cudaError_t transform(rgb_mat dst_img, rgb_mat src_img, unsigned int img_size, int block_size, int grid_size, color_transform_t type)
//{
// rgb_mat t_src;
// rgb_list gpu_output;
//
// cudaError_t cudaStatus;
//
// cudaStatus = cudaMalloc((void**)&t_src, img_size);
// if (cudaStatus != cudaSuccess)
// fprintf(stderr, "cudaMalloc failed!");
//
// cudaStatus = cudaMalloc((void**)&gpu_output, img_size);
// if (cudaStatus != cudaSuccess)
// fprintf(stderr, "cudaMalloc failed!");
//
// cudaStatus = cudaMemcpy(t_src, src_img, img_size, cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess)
// fprintf(stderr, "cudaMemcpy failed!");
//
// if (type == grayscale)
// grayscale_transform<<<grid_size, block_size>>>(gpu_output, t_src, img_size);
//
// //// Check for any errors launching the kernel
// cudaStatus = cudaGetLastError();
// if (cudaStatus != cudaSuccess)
// fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
//
// // cudaDeviceSynchronize waits for the kernel to finish, and returns
// // any errors encountered during the launch.
// cudaStatus = cudaDeviceSynchronize();
// if (cudaStatus != cudaSuccess)
// fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
//
// //// Copy output vector from GPU buffer to host memory.
// cudaStatus = cudaMemcpy(dst_img, gpu_output, img_size, cudaMemcpyDeviceToHost);
// if (cudaStatus != cudaSuccess)
// fprintf(stderr, "cudaMemcpy failed!");
//
// //cudaStatus = cudaFree(gpu_output);
// //if (cudaStatus != cudaSuccess)
// // fprintf(stderr, "cudaFree failed!");
//
// return cudaStatus;
//}
/// HERE IS A WORKING EXAMPLE
//cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
//
//__global__ void addKernel(int *c, const int *a, const int *b)
//{
// int i = threadIdx.x;
// c[i] = a[i] + b[i];
//}
//
//int main()
//{
// const int arraySize = 5;
// const int a[arraySize] = { 1, 2, 3, 4, 5 };
// const int b[arraySize] = { 10, 20, 30, 40, 50 };
// int c[arraySize] = { 0 };
//
// // Add vectors in parallel.
// cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "addWithCuda failed!");
// return 1;
// }
//
// printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
// c[0], c[1], c[2], c[3], c[4]);
//
// // cudaDeviceReset must be called before exiting in order for profiling and
// // tracing tools such as Nsight and Visual Profiler to show complete traces.
// cudaStatus = cudaDeviceReset();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaDeviceReset failed!");
// return 1;
// }
//
// return 0;
//}
//
//// Helper function for using CUDA to add vectors in parallel.
//cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
//{
// int *dev_a = 0;
// int *dev_b = 0;
// int *dev_c = 0;
// cudaError_t cudaStatus;
//
// // Choose which GPU to run on, change this on a multi-GPU system.
// cudaStatus = cudaSetDevice(0);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
// goto Error;
// }
//
// // Allocate GPU buffers for three vectors (two input, one output) .
// cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// // Copy input vectors from host memory to GPU buffers.
// cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
//
// cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
//
// // Launch a kernel on the GPU with one thread for each element.
// addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
//
// // Check for any errors launching the kernel
// cudaStatus = cudaGetLastError();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
// goto Error;
// }
//
// // cudaDeviceSynchronize waits for the kernel to finish, and returns
// // any errors encountered during the launch.
// cudaStatus = cudaDeviceSynchronize();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
// goto Error;
// }
//
// // Copy output vector from GPU buffer to host memory.
// cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
//
//Error:
// cudaFree(dev_c);
// cudaFree(dev_a);
// cudaFree(dev_b);
//
// return cudaStatus;
//}
|
2,355 | #include <stdio.h>
#include <iostream>
#include <cstdlib>
#include <limits.h>
#include <algorithm>
#include <sys/time.h>
#include <cuda_runtime.h>
using namespace std;
#define INF INT_MAX-1
__global__
void FloydWarshall(int k, int i, float *matrix, int n)
{
int col = blockIdx.x * blockDim.x + threadIdx.x; /* This thread’s matrix column */
if(col >= n)
return;
int arrayIndex = n * i + col; // Calculating D[i][j]
float matrix_value_i_k = matrix[n * i + k]; // Calculating D[i][k]
float matrix_value_k_j = matrix[k*n + col]; /* this column, kth row */
matrix[arrayIndex] = fmin(matrix[arrayIndex], matrix_value_i_k + matrix_value_k_j);
}
int main(int argc, char *argv[])
{
char *arg_vertices = getenv("N_VERTICES");
char *arg_threads_per_block = getenv("N_THREADS");
size_t vertices = atoi(arg_vertices);
int threads_per_block = atoi(arg_threads_per_block);
float *host_matrix =(float *)malloc( vertices * vertices *
sizeof(float));
for(int i = 0 ; i < vertices ; i++ ) {
for(int j = 0 ; j< vertices; j++ ) {
if( i == j )
host_matrix[i * vertices + j] = 0;
else {
int num = i + j;
if (num % 3 == 0)
host_matrix[i * vertices + j] = num / 2;
else if (num % 2 == 0)
host_matrix[i * vertices + j] = num * 2;
else
host_matrix[i * vertices + j] = num;
}
}
}
size_t tot = vertices * vertices * sizeof(float);
float *device_matrix = NULL;
cudaMalloc((float **)&device_matrix, tot);
cudaMemcpy(device_matrix, host_matrix, tot, cudaMemcpyHostToDevice);
int blocks_per_grid = vertices + (threads_per_block - 1) /threads_per_block;
struct timeval tvalBefore, tvalAfter;
gettimeofday (&tvalBefore, NULL);
for(int via = 0; via < vertices; via++) {
for(int j = 0; j < vertices; j++){
FloydWarshall<<<blocks_per_grid, threads_per_block>>>(via, j, device_matrix, vertices);
cudaThreadSynchronize();
}
}
gettimeofday (&tvalAfter, NULL);
printf("Time: %ld microseconds\n",
((tvalAfter.tv_sec - tvalBefore.tv_sec)*1000000L
+tvalAfter.tv_usec) - tvalBefore.tv_usec
);
float *result_matrix =(float *)malloc( vertices * vertices *
sizeof(float));
cudaMemcpy(result_matrix, device_matrix, tot, cudaMemcpyDeviceToHost);
return 0;
}
|
2,356 | #include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void hello_kernel()
{
printf("Hello World from Thread %d\n", threadIdx.x);
}
int main(int argc, char *argv[])
{
//set the CUDA device to the default CUDA GPU (device 0)
cudaError result = cudaSetDevice(0);
if (result != cudaSuccess ){
printf("Error setting default GPU device.\n");
}
//call a CUDA kernel
dim3 blocksPerGrid(1, 1, 1);
dim3 threadsPerBlock(10, 1, 1);
hello_kernel<<<blocksPerGrid, threadsPerBlock>>>();
//synchronise
cudaDeviceSynchronize();
return 0;
} |
2,357 | #include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <unistd.h>
#include <cuda.h>
#include <cuda_runtime.h>
//extern __device__ int testxyz[1000];
//int localtrace[10000];
//__device__ float* tracehandle;
__device__ float foo_CC(float a)
{
return a*0.9;
}
__device__ int foo_DD(float a)
{
if (threadIdx.x < 2 || threadIdx.y > 2)
return (int) a;
else
return a+2;
}
__device__ float foo_BB(float a)
{
if (threadIdx.x > 3 || threadIdx.y > 11)
return a + foo_CC(a);
else
return a + (float)foo_DD(a) /2;
}
__device__ float foo_AA( float a, float b)
{
if (threadIdx.x < 8 || threadIdx.y > 4)
return a*3.1415+1;
else
return (b*a)*0.5 + foo_BB(b);
}
__global__ void axpy_kernel2(float a, float* x, float* y)
{
//tracehandle = newbu;
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
int index = threadId;
float aa = y[index] + x[index] + 1.1;
float b = 0.5*y[index] + 0.25* x[index] + 1.0;
y[index] += ( x[index]*1.67 + foo_AA(aa, b) );
// y[index] += ( x[index]*1.67 + aa + b );
}
__global__ void axpy_kernel1(float a, float* x, float* y)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
int index = threadId;
y[index] = x[index]*0.3;
if (index>2)
y[index] += 99;
else
y[index] += 999 + foo_CC(a);
}
int main(int argc, char* argv[])
{
//const int kDataLen2 = 128;
float a = 2.0f;
//int blocks2 = 600;
cudaSetDevice(0);
if (argc != 5)
{
printf("usage: ./axpy [blocks_x] [blocks_y] [threads_x] [threads_y]\n");
exit(1);
}
int blocksx = atoi(argv[1]) ;
int blocksy = atoi(argv[2]) ;
int kDataLenx = atoi(argv[3]);
int kDataLeny = atoi(argv[4]);
int sizen = blocksx *blocksy *kDataLenx *kDataLeny;
// cudaThreadSetLimit(cudaLimitMallocHeapSize, 1024*1024); //sderek
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 1024*1024*500); //sderek
// tracetest = (int*)malloc( 1234);
// float host_y[blocks*kDataLen];
// float host_y[blocks*kDataLen];
float* host_x = (float*) malloc( sizen* sizeof(float));
float* host_y = (float*) malloc( sizen* sizeof(float));
void* host_newbu = (void*) malloc( 1000 );
int ii;
for( ii=0; ii<sizen; ii++)
host_x[ii] = ii%8;
for( ii=0; ii<sizen; ii++)
host_y[ii] = ii%5;
/* int x[5];
x[0] = 13;
printf("%p\n",x);
printf("%p\n",&x);
printf("%d\n",*x);
printf("%d\n",*(x+1));
*/
// Copy input data to device.
float* device_x;
float* device_y;
// void* newbu;
// printf(" %p\n", device_x);
cudaMalloc((void**)&device_x, sizen * sizeof(float));
// printf(" %p\n", device_x);
// printf(" %p\n", device_y);
cudaMalloc((void**)&device_y, sizen * sizeof(float) + 18);
// printf(" %p\n", device_y);
// printf(" %p\n", newbu);
// cudaMalloc(&newbu, 1000);
// printf(" %p\n", newbu);
/* std::cout << &(device_x) << "\n";
std::cout << &(device_y) << "\n";
std::cout << &(*device_x) << "\n";
std::cout << &(*device_y) << "\n";
std::cout << (device_x) << "\n";
std::cout << (device_y) << "\n";
*/
cudaMemcpy(device_x, host_x, sizen * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_y, host_y, sizen * sizeof(float), cudaMemcpyHostToDevice);
dim3 CTAs(blocksx, blocksy);
dim3 Threads(kDataLenx, kDataLeny);
std::cout << "launching kernel...\n";
axpy_kernel1<<<CTAs, Threads>>>(a, device_x, device_y);
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
printf ("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
axpy_kernel2<<<CTAs, Threads>>>(a, device_x, device_y);
cudaDeviceSynchronize();
error = cudaGetLastError();
if (error != cudaSuccess) {
printf ("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
cudaMemcpy(host_y, device_y, sizen* sizeof(float), cudaMemcpyDeviceToHost);
// cudaMemcpy(host_newbu, newbu, 1000, cudaMemcpyDeviceToHost);
free(host_newbu);
// cudaFree(newbu);
int verify = 0;
for (int ii = 0; ii < 8; ii++)
std::cout << "y[" << ii << "] = " << host_y[ii] << "\n";
for (int ii = 0; ii < sizen; ii++) {
if( host_y[ii] == ii%5)
verify ++;
// std::cout << "y[" << i << "] = " << host_y[i] << "\n";
}
std::cout << "\n\n[TOOL verify] There are a total of\t" << verify << " incorrect numbers." << std::endl;
if (verify==0)
std::cout << "[TOOL verify] passed!" << std::endl << std::endl;
// for (int i = 0; i < 20; ++i) {
// std::cout << "newtrace [" << i << "] = " << host_newbu[i] << "\n";
// std::cout << & (host_y[i] )<< "\n";
// }
/* cudaMemcpyFromSymbol(localtrace, testxyz, 40*sizeof(int), 0, cudaMemcpyDeviceToHost);
for (int i = 0; i < 20; ++i)
printf("%d\t", localtrace[i] );
std::cout << std::endl;
cudaMemcpyFromSymbol(localtrace+8, testxyz, 40*sizeof(int), 0, cudaMemcpyDeviceToHost);
for (int i = 0; i < 20; ++i)
printf("%d\t", localtrace[i] );
std::cout << std::endl;
*/
// int* show_h;
// cudaMemcpyFromSymbol(show_h, show, sizeof(int), 0, cudaMemcpyDeviceToHost);
// msg = cudaGetSymbolAddress((void **)&d_G, test);
// printf("the address is %p\n", d_G);
// if (msg == cudaSuccess)
// {
// int tmp[4];
// printf("before %d %d %d %d@ %p\n", *tmp, *(tmp+1), *(tmp+2), *(tmp+3), tmp);
// cudaMemcpyFromSymbol(tracetest, test1, 4*sizeof(int), 0, cudaMemcpyDeviceToHost);
// cudaMemcpyFromSymbol(tmp, test2, 4*sizeof(int), 0, cudaMemcpyDeviceToHost);
// printf("copy %d %d %d %d@ %p\n", *tmp, *(tmp+1), *(tmp+2), *(tmp+3), tmp);
// cudaMemcpyFromSymbol(tmp, test2, 4*sizeof(int), 0, cudaMemcpyDeviceToHost);
// printf("after %d %d %d %d@ %p\n", tmp[0], tmp[1], tmp[2], tmp[3], tmp);
// }
//else
// std::cout << cudaGetErrorString(msg) << "\n\n";
cudaFree(device_x);
cudaFree(device_y);
cudaDeviceReset();
return 0;
}
|
2,358 | //xfail:BOOGIE_ERROR
//--blockDim=1024 --gridDim=1 --no-inline
//error: possible null pointer access
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#define N 2//8
#define tid (blockIdx.x * blockDim.x + threadIdx.x)
__device__ float multiplyByTwo(float *v, unsigned int index)
{
return v[index] * 2.0f;
}
__device__ float divideByTwo(float *v, unsigned int index)
{
return v[index] * 0.5f;
}
typedef float(*funcType)(float*, unsigned int);
__global__ void foo(float *v)
{
funcType f = (funcType)3; // it's a null pointer
v[tid] = (*f)(v, tid);
}
|
2,359 | #include<stdio.h>
#include<stdlib.h>
#include<curand_kernel.h>
#include<curand.h>
#include<sys/time.h>
unsigned int NUM_PARTICLES = 1000000;
unsigned int NUM_ITERATIONS = 10;
unsigned int BLOCK_SIZE = 192;
//unsigned int GRID_SIZE = ((NUM_PARTICLES/BLOCK_SIZE) + 1);
unsigned int NUM_STREAMS = 10;
typedef struct {
float3 posId;
}position;
typedef struct {
float3 velId;
}velocity;
typedef struct {
position pos;
velocity vel;
}Particle;
void fill_data(Particle *p)
{
for(int i=0; i< NUM_PARTICLES; i++)
{
p[i].pos.posId.x = 10*((float)rand()/RAND_MAX);
p[i].pos.posId.y = 10*((float)rand()/RAND_MAX);
p[i].pos.posId.z = 10*((float)rand()/RAND_MAX);
p[i].vel.velId.x = 100*((float)rand()/RAND_MAX);
p[i].vel.velId.y = 100*((float)rand()/RAND_MAX);
p[i].vel.velId.z = 100*((float)rand()/RAND_MAX);
}
}
__global__ void particle_kernel_per_iteration(Particle *p, int offset, int streamSize)
{
int i = (blockIdx.x*blockDim.x)+threadIdx.x;
if(i < streamSize) {
p[offset + i].pos.posId.x += p[offset + i].vel.velId.x;
p[offset + i].pos.posId.y += p[offset + i].vel.velId.y;
p[offset + i].pos.posId.z += p[offset + i].vel.velId.z;
}
__syncthreads();
}
void update_velocity_position_in_gpu(Particle *p)
{
struct timeval start_time;
struct timeval stop_time;
Particle *gPar = NULL;
cudaMalloc(&gPar, NUM_PARTICLES * sizeof(Particle));
unsigned long streamSize = NUM_PARTICLES/NUM_STREAMS;
unsigned long streamBytes = streamSize * sizeof(Particle);
cudaStream_t stream[NUM_STREAMS];
for(int i=0; i<NUM_STREAMS; i++)
cudaStreamCreate(&stream[i]);
//Start time
gettimeofday(&start_time, NULL);
#ifdef TYPE1
for(int i=0; i<NUM_ITERATIONS; i++)
{
for(int s=0; s<NUM_STREAMS; s++)
{
unsigned long offset = s * streamSize;
// Copy Data to GPU Memory Asynchronously
cudaMemcpyAsync(&gPar[offset], &p[offset], streamBytes, cudaMemcpyHostToDevice, stream[s]);
//Launch kernel
particle_kernel_per_iteration<<<((streamSize/BLOCK_SIZE) + 1), BLOCK_SIZE, 0, stream[s]>>>(gPar, offset, streamSize);
//cudaDeviceSynchronize();
//Copy Data back to Host
cudaMemcpyAsync(&p[offset], &gPar[offset], streamBytes, cudaMemcpyDeviceToHost, stream[s]);
}
cudaDeviceSynchronize();
//Update Velocity in Host before copying data to GPU Memory
for(int j=0; j<NUM_PARTICLES;j++)
{
p[j].vel.velId.x = 100*((float)rand()/RAND_MAX);
p[j].vel.velId.y = 100*((float)rand()/RAND_MAX);
p[j].vel.velId.z = 100*((float)rand()/RAND_MAX);
}
}
#else
for(int i=0; i<NUM_ITERATIONS; i++)
{
for(int s=0; s<NUM_STREAMS; s++)
{
unsigned long offset = s * streamSize;
// Copy Data to GPU Memory Asynchronously
cudaMemcpyAsync(&gPar[offset], &p[offset], streamBytes, cudaMemcpyHostToDevice, stream[s]);
}
for(int s=0; s<NUM_STREAMS; s++)
{
unsigned long offset = s * streamSize;
//Launch kernel
particle_kernel_per_iteration<<<((streamSize/BLOCK_SIZE) + 1), BLOCK_SIZE, 0, stream[s]>>>(gPar, offset, streamSize);
}
for(int s=0; s<NUM_STREAMS; s++)
{
unsigned long offset = s * streamSize;
//Copy Data back to Host
cudaMemcpyAsync(&p[offset], &gPar[offset], streamBytes, cudaMemcpyDeviceToHost, stream[s]);
}
cudaDeviceSynchronize();
//Update Velocity in Host before copying data to GPU Memory
for(int j=0; j<NUM_PARTICLES;j++)
{
p[j].vel.velId.x = 100*((float)rand()/RAND_MAX);
p[j].vel.velId.y = 100*((float)rand()/RAND_MAX);
p[j].vel.velId.z = 100*((float)rand()/RAND_MAX);
}
}
#endif
//Stop time
gettimeofday(&stop_time, NULL);
for(int i=0; i<NUM_STREAMS; i++)
cudaStreamDestroy(stream[i]);
cudaFree(gPar);
printf("Total time of Execution in GPU: %ld msec\n\n",
((stop_time.tv_sec*1000000 + stop_time.tv_usec)-(start_time.tv_sec*1000000 + start_time.tv_usec))/1000);
}
int main(int argc, char *argv[])
{
if(argc != 3)
{
printf("No. of arguments to be passed should be 2 i.e. 1st as NUM_PARTICLES and 2nd as NUM_STREAMS\n");
exit(1);
}
NUM_PARTICLES = atoi(argv[1]);
NUM_STREAMS = atoi(argv[2]);
Particle *par = NULL;
cudaMallocHost(&par, NUM_PARTICLES*sizeof(Particle));
fill_data(par);
update_velocity_position_in_gpu(par);
cudaFree(par);
return 0;
}
|
2,360 | #include "includes.h"
__global__ void kernel_1(int columns, int rows, float* mat1, float* matanswer) {
int columna = threadIdx.x; //En que columna operamos (no filas)
float temp_value = 0;
for (int k = 0; k < rows; k++) {
temp_value = temp_value + mat1[(k * columns) + columna];
}
matanswer[columna] = temp_value;
} |
2,361 | #include <iostream>
#include <sys/time.h>
#define N 16
__global__ void add(int *a, int *b, int *c)
{
int i = blockIdx.x;
if(i < N)
c[i] = a[i] + b[i];
}
void add_host(int *a, int *b, int *c)
{
for(int i = 0; i < N; i++)
{
c[i] = a[i] + b[i];
}
}
int main (void)
{
// variables to store host and device data
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate memory on the GPU
cudaMalloc( (void **) &dev_a, N * sizeof(int) );
cudaMalloc( (void **) &dev_b, N * sizeof(int) );
cudaMalloc( (void **) &dev_c, N * sizeof(int) );
// fill the arrays with data
for(int i = 0; i < N; i++)
{
a[i] = -i;
b[i] = i * i;
}
// copy arrays a and b to the device
cudaMemcpy( dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice );
// print the current time
struct timeval tv;
struct timezone tz;
struct tm *tm;
gettimeofday(&tv, &tz);
tm = localtime(&tv.tv_sec);
printf(" %d:%02d:%02d %d \n", tm->tm_hour, tm->tm_min, tm->tm_sec, tv.tv_usec);
// Do the addition operation
add<<<N,1>>>( dev_a, dev_b, dev_c );
//add_host((int *)a, (int *)b, (int *)c);
// print current time
gettimeofday(&tv, &tz);
tm = localtime(&tv.tv_sec);
printf(" %d:%02d:%02d %d \n", tm->tm_hour, tm->tm_min, tm->tm_sec, tv.tv_usec);
// copy the array 'c' back from the device
cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost );
// display the results
for(int i = 0; i<N; i++)
{
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
// free the memory used on the device
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
return 0;
}
|
2,362 | #include <stdio.h>
#define FIRST_CHAR 32
#define LAST_CHAR 128
#define NBR 96
__global__ void histo_kernel(unsigned char *buffer,long size, unsigned int *histo){
__shared__ unsigned int temp[256];
temp[threadIdx.x]=0;
int i = threadIdx.x + blockIdx.x *blockDim.x;
int offset = blockDim.x *gridDim.x;
while(i<size){
atomicAdd(&(histo[buffer[i]]),1);
i+=offset;
}
__syncthreads();
atomicAdd( &(histo[threadIdx.x]), temp[threadIdx.x] );
}
int main(int argc, char *argv[]){
// unsigned char *buffer = (unsigned char *) big_random_block(SIZE);
if(argc <= 2){
fprintf(stderr, "Arguments non valide");
return 1;
}
FILE *f_input;
FILE *f_output;
long lSize;
char *buffer;
f_input = fopen ( argv[1] , "r" );
f_output = fopen( argv[2],"w");
if( !f_input ) perror(argv[1]),exit(1);
fseek( f_input , 0L , SEEK_END);
lSize = ftell( f_input );
rewind( f_input );
printf("The size is : %li", lSize);
//buffer = calloc( 1, lSize+1 );
buffer =(char*) malloc(lSize);
if( !buffer ) fclose(f_input),fputs("memory alloc fails",stderr),exit(1);
if( 1!=fread( buffer , lSize, 1 , f_input) )
fclose(f_input),free(buffer),fputs("entire read fails",stderr),exit(1);
/*Create event for co;pute running time*/
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
/*Launch event to specify the start of running*/
cudaEventRecord( start, 0);
/*allocate device memory*/
unsigned char *dev_buffer;
unsigned int *dev_histo;
/*Give space in Global memory of GPU to store different variable*/
cudaMalloc( (void**)&dev_buffer, lSize);
/*Copy from CPU Global memory to GPU Global memory*/
cudaMemcpy( dev_buffer, buffer, lSize, cudaMemcpyHostToDevice );
/*Create space for histo variable and initialize at 0 each slopt*/
cudaMalloc( (void**)&dev_histo, 256 * sizeof( long));
cudaMemset( dev_histo, 0, 256 * sizeof( int ));
cudaDeviceProp prop;
cudaGetDeviceProperties( &prop, 0 );
int blocks = prop.multiProcessorCount;
histo_kernel<<<blocks*2,256>>>( dev_buffer, lSize, dev_histo );
/*Define histo vqriqble and copy on GPU global memory*/
unsigned int histo[256];
cudaMemcpy( histo, dev_histo,256 * sizeof( int ),cudaMemcpyDeviceToHost);
for(int i =FIRST_CHAR;i< LAST_CHAR;i++){
printf("%c:%d\n",i,histo[i]);
fprintf(f_output, "%c:%d\n",i,histo[i]);
}
/*Get event at the end of loop*/
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
float elapsedTime;
cudaEventElapsedTime( &elapsedTime, start, stop );
printf( "Time to generate: %3.1f ms\n", elapsedTime );
/*Destroy event for running time*/
cudaEventDestroy( start );
cudaEventDestroy( stop );
/*Free memory and close the files**/
cudaFree( dev_histo );
cudaFree( dev_buffer );
fclose(f_input);
fclose(f_output);
free( buffer );
return 0;
}
|
2,363 | #include <cuda_runtime.h>
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h> // needed for the function sqrtf()
#define TILE_SIZE 32 // NB // Block SIZE
/*
* Function to perform rank-k update
* half of the threads working
*/
__device__ void ssyrk_tile(float* rA1, float* rA2)
{
int row = blockIdx.y * TILE_SIZE + threadIdx.y;
int column = blockIdx.x * TILE_SIZE + threadIdx.x;
if(column <= row)
{
float updatedValue = rA2[row * TILE_SIZE + column];
for(int k=0; k<TILE_SIZE; k++)
{
updatedValue -= rA1[row * TILE_SIZE + k] * rA1[column * TILE_SIZE + k];
}
rA2[row * TILE_SIZE + column] = updatedValue;
}
}
/*
* Function to perform general matrix multiplication
* DOUBT: I think calculation is given wrong in paper it should be rA2[k][n]
*/
__device__ void sgemm_tile(const float* rA1, const float* rA2, float* rA3)
{
int row = blockIdx.y * TILE_SIZE + threadIdx.y;
int column = blockIdx.x * TILE_SIZE + threadIdx.x;
float updatedValue = rA3[row * TILE_SIZE + column];
for(int i=0; i<TILE_SIZE; i++)
{
updatedValue -= rA1[row * TILE_SIZE + i] * rA2[i * TILE_SIZE + column];
}
rA3[row * TILE_SIZE + column] = updatedValue;
}
/*
* Function to store full tile from shared memory to global memory
*/
__device__ void store_full(const float* s_data, float* g_data)
{
int g_row = blockIdx.y * TILE_SIZE + threadIdx.y;
int g_column = blockIdx.x * TILE_SIZE + threadIdx.x;
int l_row = threadIdx.y;
int l_column = threadIdx.x;
g_data[g_row * TILE_SIZE + g_column] = s_data[l_row * TILE_SIZE + l_column];
__syncthreads();
}
/*
* Function to store lower triangular tile from shared memory to global memory
*/
__device__ void store_lower(const float* s_data, float* g_data)
{
int g_row = blockIdx.y * TILE_SIZE + threadIdx.y;
int g_column = blockIdx.x * TILE_SIZE + threadIdx.x;
int l_row = threadIdx.y;
int l_column = threadIdx.x;
if(l_column <= l_row)
g_data[g_row * TILE_SIZE + g_column] = s_data[l_row * TILE_SIZE + l_column];
else
g_data[g_row * TILE_SIZE + g_column] = 0;
__syncthreads();
}
/*
* Function to perform Choleshky Factorization for a tile
*/
__device__ void spotrf_tile(float* t_A)
{
int ty = blockIdx.x*blockDim.x + threadIdx.x; // col
int tx = blockIdx.y*blockDim.y + threadIdx.y; // row
for(int k{0};k<TILE_SIZE;k++){
// square root of diagonal elements
if(tx==0 && ty==0)
t_A[k*TILE_SIZE + k] = sqrtf(t_A[k*TILE_SIZE + k]);
__syncthreads();
// division step done parallaly
if(ty<=tx && tx<TILE_SIZE - 1 && ty<TILE_SIZE - 1 && ty == k)
{
t_A[(tx+1)*TILE_SIZE + k]/= t_A[k*TILE_SIZE + k];
}
__syncthreads();
if(ty<=tx && tx<TILE_SIZE - 1 && ty<TILE_SIZE - 1 && ty >= k)
{
t_A[(tx+1)*TILE_SIZE + (ty+1)]-= t_A[(tx+1)*TILE_SIZE + k]*t_A[(ty+1)*TILE_SIZE + k];
}
__syncthreads();
}
}
/*
* Function to perform triangular solve for a tile
*/
__device__ void strsm_tile(float *t_A1, float *t_A2)
{
// t_A2 is current unkonown
int ty = blockIdx.x*TILE_SIZE + threadIdx.x;
int tx = blockIdx.y*TILE_SIZE + threadIdx.y;
for(int i{0};i<TILE_SIZE;i++){
if(ty==0){
t_A2[tx*TILE_SIZE + i]/= t_A1[i*TILE_SIZE + i];
}
__syncthreads();
if(ty>i && i<TILE_SIZE-1)
{
t_A2[tx*TILE_SIZE+ty]-= t_A2[tx*TILE_SIZE + i]*t_A1[ty*TILE_SIZE + i];
}
__syncthreads();
}
}
/*
* Function to load a full tile from global memory to shared memory
*/
__device__ void load_full(float *t_A,float * S_A)
{
// assigning a 2-D array in shared memory
int g_ty = blockIdx.x*blockDim.x + threadIdx.x; // col
int g_tx = blockIdx.y*blockDim.y + threadIdx.y; // row
int l_tx = threadIdx.x;
int l_ty = threadIdx.y;
if(l_tx<TILE_SIZE && l_ty<TILE_SIZE)
S_A[l_tx * TILE_SIZE + l_ty] = t_A[g_tx*TILE_SIZE + g_ty];
__syncthreads();
}
|
2,364 | #include <cuda.h>
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
// kernel
__global__ void convolution_1D_Kernel(float* d_m, float* d_mask, float* d_n, size_t length, size_t maskLength)
{
// indexing variables
int i = blockIdx.x * blockDim.x + threadIdx.x;
int m_index = i - maskLength / 2;
// thread boundary check
if(i < length)
{
for(int j = 0; j < maskLength; ++j)
{
if(m_index + j >= 0 && m_index + j < length)
{
d_n[i] += d_m[m_index + j] * d_mask[j];
}
}
}
}
// CUDA error checking
void errorCheck(unsigned int line)
{
cudaError_t cudaError = cudaGetLastError();
if(cudaError != cudaSuccess)
{
printf("CUDA error in line %u in file %s: %s\n", line - 1, __FILE__, cudaGetErrorString(cudaError));
exit(EXIT_FAILURE);
}
}
// host function containing kernel call
void convolution_1D(float* m, float* mask, float* n, size_t length, size_t maskLength)
{
dim3 numOfBlocks(ceil(length / 1024.0), 1, 1);
dim3 numOfThreads(1024, 1, 1);
size_t bytes_m = length * sizeof(float);
size_t bytes_mask = maskLength * sizeof(float);
size_t bytes_n = length * sizeof(float);
float* d_m;
float* d_mask;
float* d_n;
cudaMalloc((void**) &d_m, bytes_m);
errorCheck(__LINE__);
cudaMalloc((void**) &d_mask, bytes_mask);
errorCheck(__LINE__);
cudaMalloc((void**) &d_n, bytes_n);
errorCheck(__LINE__);
cudaMemcpy(d_m, m, bytes_m, cudaMemcpyHostToDevice);
errorCheck(__LINE__);
cudaMemcpy(d_mask, mask, bytes_mask, cudaMemcpyHostToDevice);
errorCheck(__LINE__);
convolution_1D_Kernel<<<numOfBlocks, numOfThreads>>>(d_m, d_mask, d_n, length, maskLength);
errorCheck(__LINE__);
cudaMemcpy(n, d_n, bytes_n, cudaMemcpyDeviceToHost);
errorCheck(__LINE__);
cudaFree(d_m);
errorCheck(__LINE__);
cudaFree(d_mask);
errorCheck(__LINE__);
cudaFree(d_n);
errorCheck(__LINE__);
}
int main()
{
struct timespec start, end;
srand(time(NULL));
size_t length = rand() % 1048577 + 15728640;
size_t maskLength = 121;
float* m = (float*) malloc(length * sizeof(float));
float* mask = (float*) malloc(maskLength * sizeof(float));
float* n = (float*) malloc(length * sizeof(float));
for(int i = 0; i < length; ++i)
{
m[i] = rand() % 129 - 64;
}
for(int j = 0; j < maskLength; ++j)
{
mask[j] = rand() % 1001 / 1000.0;
}
clock_gettime(CLOCK_REALTIME, &start);
// do convolution
convolution_1D(m, mask, n, length, maskLength);
clock_gettime(CLOCK_REALTIME, &end);
time_t execTime = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000;
printf("Execution time: %d microseconds.", execTime);
return 0;
}
|
2,365 | #include <stdio.h>
//compilar: nvcc matrizMultiplicacao.cu -o matrizMultiplicacao
//for i in `seq 1 10`; do ./matrizMultiplicacao; done
#define N 2048
#define B 32
__global__ void matrix_multi(float *a, float *b, float *c) {
int y = blockIdx.x * blockDim.x + threadIdx.x;
int x = blockIdx.y * blockDim.y + threadIdx.y;
int i;
float soma = 0.0;
if(x < N && y < N){
for(i=0; i<N; i++){
soma += a[x * N + i ] * b[y + N * i];
}
c[x + y * N] = soma;
}
}
int main() {
float *a, *b, *c;
float *d_a, *d_b, *d_c;
int size = N;
dim3 dimen (B, B);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMalloc( (void **) &d_a, size*size*sizeof(float) );
cudaMalloc( (void **) &d_b, size*size*sizeof(float) );
cudaMalloc( (void **) &d_c, size*size*sizeof(float) );
a = (float *)malloc( size*size*sizeof(float) );
b = (float *)malloc( size*size*sizeof(float) );
c = (float *)malloc( size*size*sizeof(float) );
for( int i = 0; i < N*N; i++ ) {
a[i] = b[i] = i;
c[i] = 0;
}
cudaEventRecord(start);
cudaMemcpy( d_a, a, size*size*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_b, b, size*size*sizeof(float), cudaMemcpyHostToDevice );
dim3 grade ((N + B-1)/B, (N + B-1)/B);
matrix_multi<<<grade, dimen>>>( d_a, d_b, d_c );
cudaMemcpy( c, d_c, size*size*sizeof(float), cudaMemcpyDeviceToHost );
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("%f\n", milliseconds/1000.0);
//printf( "c[0] = %lf\n", c[0] );
//printf( "c[%d] = %lf\n",N*N, c[N*N-1] );
/*
int i;
for(i=0; i<N*N; i++){
printf( "c[%d] = %lf\n",i, c[i] );
}
*/
free(a);
free(b);
free(c);
cudaFree( d_a );
cudaFree( d_b );
cudaFree( d_c );
return 0;
} /* end main */
|
2,366 | #include<stdio.h>
#include<cuda_runtime.h>
#include "device_launch_parameters.h"
#include <ctime>
#include <iostream>
using namespace std;
#define BLOCK 16
#define WIDTH 1024
float* d_A, * d_B, * d_C;
__global__ void d_multiply0(float* A, float* B, float* C) {
unsigned int r = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int c = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int i;
float tmp;
tmp = 0.0f;
for (i = 0; i < WIDTH; i++) {
tmp += A[WIDTH * r + i] * B[WIDTH * i + c];
}
C[WIDTH * r + c] = tmp;
}
float ha_A[WIDTH * WIDTH];
float ha_B[WIDTH * WIDTH];
float ha_C[WIDTH * WIDTH];
void d_multiply() {
clock_t start = clock();
size_t size = sizeof(float) * WIDTH * WIDTH;
cudaMemcpy(d_A, ha_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, ha_B, size, cudaMemcpyHostToDevice);
dim3 grid(WIDTH / BLOCK, WIDTH / BLOCK);
dim3 block(BLOCK, BLOCK);
d_multiply0 << < grid, block >> > (d_A, d_B, d_C);
cudaMemcpy(ha_C, d_C, size, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
clock_t end = clock();
cout << "CUDA計算時間:" << (double)(end - start) << endl;
}
float* d_process() {
unsigned int i;
cudaMalloc((void**)&d_A, sizeof(float) * WIDTH * WIDTH);
cudaMalloc((void**)&d_B, sizeof(float) * WIDTH * WIDTH);
cudaMalloc((void**)&d_C, sizeof(float) * WIDTH * WIDTH);
for (i = 0; i < WIDTH * WIDTH; i++) {
ha_A[i] = (float)i;
ha_B[i] = (float)i;
}
d_multiply();
return &ha_C[0];
} |
2,367 | #include "includes.h"
__global__ void cunn_MSECriterion_updateGradInput_kernel(float *gradInput, float *input, float *target, float norm, int nframe, int dim)
{
int k = blockIdx.x;
float *gradInput_k = gradInput + k*dim;
float *input_k = input + k*dim;
float *target_k = target + k*dim;
int i_start = threadIdx.x;
int i_end = dim;
int i_step = blockDim.x;
// gradInput
for (int i=i_start; i<i_end; i+=i_step)
gradInput_k[i] = norm*(input_k[i] - target_k[i]);
} |
2,368 | __global__ void doublify(float *a)
{
int idx = threadIdx.x + threadIdx.y*4;
a[idx] *= 2;
}
|
2,369 | #include <stdio.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
// problem parameters
const double a = 1.;
const double b = 1.;
const int nx = 1024; //number of node points along y
const int ny = 1024; //number of node points along x
//convergence parameters
double tol = 1e-4;
int iter_max = 1000;
double sol_ref(double x, double y)
{
return sin(M_PI*x)*exp(-M_PI*y);
}
void discretisation(double *x, double *y)
{
double dx = a/nx; // spatial step along x
double dy = b/ny; // spatial step along y
for (int i=0; i<=nx; i++)
{
x[i] = i*dx;
y[i] = i*dy;
}
}
void boundary(double *T, double *x, double *y)
{
/*Boundary conditions along the x axis for all processes */
for ( int i=0; i<=nx; i++)
{
T[i*nx] = sol_ref(x[i], y[0]);
T[i*ny+ny] = sol_ref(x[i], y[ny]);
}
/*Boundary conditions along the y axis for all processes */
for ( int j=0; j<=ny; j++)
{
T[j] = sol_ref(x[0], y[j]);
T[nx*ny+j] = sol_ref(x[nx], y[j]);
}
}
void laplace2d(double *T, double *Tnew, double *error)
{
for( int j = 1; j <= nx-1; j++)
{
for( int i = 1; i <= ny-1; i++)
{
Tnew[j*nx + i] = 0.25 * ( T[j*nx + (i+1)] + T[j*nx + (i-1)] + T[(j-1)*nx + i] + T[(j+1)*nx + i] );
*error = fmax(*error, fabs(Tnew[j*nx + i] - T[j*nx + i]));
}
}
for( int j = 1; j <= nx-1; j++)
{
for( int i = 1; i <= ny-1; i++)
{
T[j*nx + i] = Tnew[j*nx + i];
}
}
}
int main(int argc,char **argv)
{
double tol = 1e-4;
int iter = 0;
double error;
double *T = (double*) malloc(sizeof(double) * (nx+1) * (ny+1));
double *Tnew = (double*) malloc(sizeof(double) * (nx+1) * (ny+1));
double *x = (double*) malloc(nx * sizeof(double));
double *y = (double*) malloc(ny * sizeof(double));
if(!x || !y || !T || !Tnew ) return 0;
discretisation(x, y);
boundary(T, x, y);
while (iter < iter_max )
{
error = 0.0;
laplace2d(T, Tnew, &error);
if (iter % 100 == 0 ) printf("%d, %0.6f\n", iter, error);
if (error < tol) break;
iter++;
}
free(T);
free(Tnew);
free(x);
free(y);
return 0;
}
|
2,370 | #include "includes.h"
__device__ unsigned int getGid3d3d(){
int blockId = blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.y * blockDim.x)
+ (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x;
return threadId;
}
__global__ void derive(double *data, double *out, int stride, int gsize, double dx){
int gid = getGid3d3d();
if (gid < gsize){
if (gid + stride < gsize){
out[gid] = (data[gid+stride] - data[gid])/dx;
}
else{
out[gid] = data[gid]/dx;
}
}
} |
2,371 | #include "includes.h"
__global__ void RBMDropoutMaskKernel( float *maskPtr, float dropout, int thisLayerSize )
{
int index = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (index < thisLayerSize)
{
maskPtr[index] = dropout < maskPtr[index];
}
} |
2,372 | #include "cuda_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#define NUM_ELEMENTS (1024 * 1024 * 10)
#define RUN (100)
void globalReduceSum(float *out, const float *in, int numElements);
void sharedReduceSum(float *out, const float *in, int numElements);
void warpReduceSum(float *out, const float *in, int numElements);
void runTest(const char *label, void(*fptr) (float *, const float *, int), float *out, const float *in, int numElements)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// warm up
for (int i = 0; i < 10; i++)
{
fptr(out, in, numElements);
cudaDeviceSynchronize();
}
cudaEventRecord(start);
for (int i = 0; i < RUN; i++)
{
fptr(out, in, numElements);
cudaDeviceSynchronize();
}
cudaEventRecord(stop);
cudaDeviceSynchronize();
float time_ms;
cudaEventElapsedTime(&time_ms, start, stop);
float time_s = time_ms / (float)1e3;
float GB = (float) numElements * sizeof(float) * RUN;
float GBs = GB / time_s / (float)1e9;
printf("%s. Time: %f, GB/s: %f\n", label, time_s, GBs);
}
int main(int argc, char **argv)
{
float *out_d, *in_d, *out_h, *in_h;
int numElements = NUM_ELEMENTS;
in_h = (float *)malloc(sizeof(float) * numElements);
out_h = (float *)malloc(sizeof(float) * numElements);
for (int i = 0; i < numElements; ++i)
{
in_h[i] = 1.f;
}
cudaMalloc(&out_d, sizeof(float));
cudaMalloc(&in_d, sizeof(float) * numElements);
cudaMemcpy(in_d, in_h, sizeof(float) * numElements, cudaMemcpyHostToDevice);
cudaMemset(out_d, 0, sizeof(float));
runTest("Reduce with global memory", globalReduceSum, out_d, in_d, numElements);
cudaMemcpy(out_h, out_d, sizeof(float), cudaMemcpyDeviceToHost);
printf("sum: %.4f\n", *out_h);
cudaMemset(out_d, 0, sizeof(float));
runTest("Reduce with shared memory", sharedReduceSum, out_d, in_d, numElements);
cudaMemcpy(out_h, out_d, sizeof(float), cudaMemcpyDeviceToHost);
printf("sum: %.4f\n", *out_h);
cudaMemset(out_d, 0, sizeof(float));
runTest("Reduce with warp shuffle", warpReduceSum, out_d, in_d, numElements);
cudaMemcpy(out_h, out_d, sizeof(float), cudaMemcpyDeviceToHost);
printf("sum: %.4f\n", *out_h);
double sum = 0;
for (int i = 0; i < numElements; ++i)
{
sum += in_h[i];
}
printf("num: %d, reference sum: %f\n", numElements, sum);
cudaFree(out_d);
cudaFree(in_d);
free(out_h);
free(in_h);
return 0;
}
|
2,373 | // CUDA libraries.
#include <cuda.h>
#include <cuda_runtime.h> //dynamical
// Include associated header file.
#include "cuda_kernel.cuh"
#include <stdio.h>
__global__ void dotProductKernel(double* A, double* B, double* result) {
// number of elements per thread block
const int N = 256;
// shared memory for storing partial sums
__shared__ double sums[N];
// thread index
int i = threadIdx.x;
// partial dot product
double temp = 0;
for (int j = i; j < N; j += N) {
temp += A[j] * B[j];
}
// store partial sum in shared memory
sums[i] = temp;
// synchronize threads in block
__syncthreads();
// reduction in shared memory
for (int offset = 1; offset < N; offset *= 2) {
if (i % (2*offset) == 0) {
sums[i] += sums[i + offset];
}
__syncthreads();
}
// result[i] = sums[i];
// write result for block to global memory
if (i == 0) {
result[blockIdx.x] = sums[0];
}
}
void kernel(double *A, double *B, double *Result, int arraySize) {
// Initialize device pointers.
double* gpu_A;
double* gpu_B;
double* gpu_result;
// Always Allocate new device memory for A every time I call this fuction
cudaMalloc((void**) &gpu_A, arraySize * sizeof(double));
cudaMalloc((void**) &gpu_B, arraySize * sizeof(double));
cudaMalloc((void**) &gpu_result, sizeof(double));
// Transfer arrays A and B to device.
cudaMemcpy(gpu_A, A, arraySize * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_B, B, arraySize * sizeof(double), cudaMemcpyHostToDevice);
// cudaMemcpy(gpu_result, Result, arraySize * sizeof(double), cudaMemcpyHostToDevice);
dim3 threads(256);
dim3 blocks(1);
dotProductKernel<<<blocks,threads>>>(gpu_A,gpu_B, gpu_result);
cudaMemcpy(Result, gpu_result, sizeof(double), cudaMemcpyDeviceToHost);
// Result[1] = 44;
// Free device memory
cudaFree(gpu_A);
cudaFree(gpu_B);
cudaFree(gpu_result);
}
|
2,374 | //Test 1
#include<cuda.h>
#include <bits/stdc++.h>
using namespace std;
//Sequential
//Vector addition kernel
void vecAdd(float *h_A, float *h_B, float *h_C, int n){
int i;
for (i = 0; i < n; i++)
h_C[i] = h_A[i] + h_B[i];
}
//Parallel
__global__ void vecAddP (float *A, float *B, float *C, int n){
int i = threadIdx.x + blockDim.x * blockIdx.x;
if(i < n)
C[i] = A[i] + B[i];
}
void vectorAdd(float *A, float *B, float *C, int n){
int size = n * sizeof(float);
float *d_A, *d_B, *d_C;
//1. Allocate memory for d_A, etc. on the device (cudaMalloc)
cudaMalloc(&d_A, size);
cudaMalloc(&d_B, size);
cudaMalloc(&d_C, size);
//2. Copy Data from host to d_A, etc. (cudaMemcpy)
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
//3. Kernel Launch Code
vecAddP<<<ceil(n/256.0), 256>>> (d_A, d_B, d_C, n);
//4. Copy d_C to C from device, free device memory (cusdaFree), sync if neccessary
cudaMemcpy (C, d_C, size, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
int main(){
//Memalloc A, B, C
//I/O to read A, B, C
float A[5];
float B[5];
float C[5];
float j = 0.0;
int n = 5;
for (int i = 0; i < 5; i = i + 1, j = j + 1.0){
A[i] = j;
B[i] = j +1.5;
}
vectorAdd(A, B, C, n);
for (int i = 0 ; i < 5; i++){
cout << C[i] << endl;
}
}
|
2,375 | /*
* Team - Suraj Singh and Mahir Jain
* Roll Numbers - 16CO146 and 16CO123 respectively.
*/
#include <stdio.h>
#include <cuda.h>
#include <time.h>
#define r_size 10
#define c_size 28
// Only one black is used.
#define BLOCKS 1
// Depends on the GPU used.
#define THREADS 1024
// Function for calculating execution time
void print_elapsed(clock_t start, clock_t stop)
{
double elapsed = ((double) (stop - start)) / CLOCKS_PER_SEC;
printf("Elapsed time: %fs\n", elapsed);
}
// Main CUDA kernel
// most of the parameters here are too big to be stored in shared memory
// and hence we have used global memory instead
__global__ void brandes(int s, int *R, int *C, int *S, int *d, float *sigma, float *delta, int *Q, int *Q2, int * ends, float *bc) {
int idx = threadIdx.x;
// Initialise values for BFS
for(int k=idx; k < r_size; k+= blockDim.x) {
//printf("hi");
if( k == s ) {
d[k] = 0;
sigma[k] =1;
} else {
d[k] = -1;
sigma[k] = 0;
}
}
// initialize variables common to all threads in the block that fit in shared memory
__shared__ int Q_len;
__shared__ int Q2_len;
__shared__ int ends_len;
__shared__ int depth;
__shared__ int S_len;
int count;
if( idx == 0 ) {
Q[0] = s;
Q_len = 1;
S[0] = s;
S_len=1;
Q2_len = 0;
ends[0] =0;
ends[1] = 1;
ends_len = 2;
}
__syncthreads();
count =0;
while(1) {
for(int k=idx; k < ends[count+1] - ends[count]; k+=blockDim.x) {
int v = Q[k];
// Same logic as ocean kernel!
__syncthreads();
for(int r = R[v]; r< R[v+1]; r++) {
int w = C[r];
int t;
// Adding neighbours to our 'stack' implemented as a queue
if (atomicCAS(&d[w], -1, d[v]+1) == -1) {
//printf("%d\n", w);
t = atomicAdd(&Q2_len,1);
//f =1;
Q2[t] = w;
}
// if v was the shortest path to w, update sigma
if(d[w] == (d[v]+1)) {
atomicAdd(&sigma[w],sigma[v]);
}
}
}
__syncthreads();
if(Q2_len==0) {
if(idx==0) {
// calculate depth for next section of code
depth = d[S[S_len-1]];
}
break;
} else {
// swap Q with Q2
for(int k =idx; k < Q2_len; k+=blockDim.x) {
Q[k] = Q2[k];
S[k+S_len] = Q2[k];
}
__syncthreads();
if(idx==0) {
ends[ends_len] = ends[ends_len-1] + Q2_len;
ends_len = ends_len +1;
Q_len = Q2_len;
S_len = S_len + Q2_len;
Q2_len = 0;
}
__syncthreads();
}
count++;
__syncthreads();
}
// everyone needs to stop after breaking out of while loop
__syncthreads();
while(depth > 0) {
// all threads execute in parallel
if(idx >= ends[depth] && idx <= ends[depth+1] -1)
{
int w = S[idx];
float dsw = 0;
float sw = sigma[w];
// update delta for a vertex by traversing its neighbours
for(int r = R[w]; r< R[w+1]; r++) {
int v = C[r];
if(d[v] == d[w] + 1) {
dsw += (sw/sigma[v])*(1 + delta[v]);
}
}
delta[w] = dsw;
__syncthreads();
// add to BC value of the vertex!
if(w!=s) {
atomicAdd(&bc[w],delta[w]/2);
//bc[w] += delta[w]/2;
}
}
depth--;
}
}
int main(int argc, char const *argv[])
{
FILE *R = fopen("R.txt", "r");
FILE *C = fopen("C.txt", "r");
clock_t start, stop;
int r[r_size];
int c[c_size];
for(int i=0;i< r_size; i++) {
fscanf(R, "%d\n", &r[i]);
}
for(int i=0;i< c_size; i++) {
fscanf(C, "%d\n", &c[i]);
}
int *dev_r, *dev_c, *dev_d, *dev_Q, *dev_Q2,*ends, *dev_S;
float *dev_sigma, *dev_delta, *dev_bc;
float *bc_val = (float*)malloc(r_size*sizeof(float));
cudaMalloc((void**) &dev_r, r_size*sizeof(int));
cudaMalloc((void**) &dev_c, c_size*sizeof(int));
cudaMalloc((void**) &dev_bc, r_size*sizeof(float));
cudaMalloc((void**) &dev_d, r_size*sizeof(int));
cudaMalloc((void**) &dev_sigma, r_size*sizeof(float));
cudaMalloc((void**) &dev_delta, r_size*sizeof(float));
cudaMalloc((void**) &dev_Q, r_size*sizeof(int));
cudaMalloc((void**) &dev_Q2, r_size*sizeof(int));
cudaMalloc((void**) &dev_S, r_size*sizeof(int));
cudaMalloc((void**) &ends, (r_size+1)*sizeof(int));
cudaMemcpy(dev_r, r, r_size*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, c, c_size*sizeof(int), cudaMemcpyHostToDevice);
dim3 blocks(BLOCKS,1);
dim3 threads(THREADS,1);
start = clock();
for(int s=0; s < r_size; s++) {
brandes<<<blocks, threads>>>(s, dev_r, dev_c, dev_S, dev_d , dev_sigma, dev_delta,dev_Q,dev_Q2, ends, dev_bc);
}
stop=clock();
print_elapsed(start,stop);
cudaMemcpy(bc_val, dev_bc, r_size*(sizeof(float)), cudaMemcpyDeviceToHost);
return 0;
}
|
2,376 | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void checkIndex(void){
printf("threadIdx: (%d, %d, %d) blockIdx: (%d, %d, %d) \
blockDim: (%d, %d, %d) gridDim: (%d, %d, %d)\n",
threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x,
blockIdx.y, blockIdx.z, blockDim.x, blockDim.y,
blockDim.z, gridDim.x, gridDim.y, gridDim.z);
}
int main(int argc, char **argv){
int n = 6;
dim3 block(3);
dim3 grid((n-1)/block.x+1);
printf("grid: (%d, %d, %d)\n", grid.x, grid.y, grid.z);
printf("block: (%d, %d, %d)\n", block.x, block.y, block.z);
checkIndex<<<grid, block>>>();
cudaDeviceReset();
return 0;
} |
2,377 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <iostream>
#include <chrono>
#include <thrust/extrema.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
int main() {
thrust::host_vector<double> host(10, 0);
for(int i = 0; i<10;i++){
double s;
std::cin >> s;
host[i] = s;
}
// for(int i =0; i < 2517; i++){
// std::cin >> stocks;
// host.push_back(stocks);
// }
auto start = std::chrono::steady_clock::now();
thrust::device_vector<double> dev(host);
auto end = std::chrono::steady_clock::now();
std::chrono::duration<double> elapsed_seconds = end-start;
printf("Host vector: ");
for (auto i = host.begin(); i != host.end(); i++) {
std::cout << *i << " "; // este acesso é rápido -- CPU
}
printf("\n");
printf("Device vector: ");
for (auto i = dev.begin(); i != dev.end(); i++) {
std::cout << *i << " "; // este acesso é lento! -- GPU
}
printf("\n");
std::cerr << "elapsed time: " << elapsed_seconds.count() << "s\n";
// val = thrust::reduce(dev.begin(), dev.end(), 0, thrust::plus<double>()));
} |
2,378 | #include "includes.h"
__global__ void mat_mult_transposed_kernel(int *mat_a, int *mat_b, int *res) {
int B_TRANS_ROWS = B_COLS;
int B_TRANS_COLS = B_ROWS;
// El for each thread, shared per block
__shared__ int smem[128];
for (int row_block = 0; row_block * gridDim.x < A_ROWS; row_block++) {
int a_row = blockIdx.x + (row_block * gridDim.x);
for (int b_row = 0; b_row < B_TRANS_ROWS; b_row++) {
int total = 0;
for (int thread_i = 0; thread_i * blockDim.x < A_COLS; thread_i++) {
int thread_col = threadIdx.x + (thread_i * blockDim.x);
// Need to check because 240 not even multiple of 128
if (thread_col >= A_COLS)
smem[threadIdx.x] = 0;
else
smem[threadIdx.x] = mat_a[a_row * A_COLS + thread_col] * mat_b[b_row * B_TRANS_COLS + thread_col];
__syncthreads();
//Parallel reduction
for (int i = blockDim.x / 2; i > 0; i /= 2) {
if (threadIdx.x < i) {
int temp = smem[threadIdx.x] + smem[threadIdx.x + i];
smem[threadIdx.x] = temp;
}
__syncthreads();
}
if (threadIdx.x == 0) {
total += smem[threadIdx.x];
}
}
if (threadIdx.x == 0) {
res[a_row * C_COLS + b_row] = total;
}
}
}
} |
2,379 | #include "includes.h"
#define FALSE 0
#define TRUE !FALSE
#define NUMTHREADS 16
#define THREADWORK 32
__global__ void gpuPMCCNoTest(const float * vectsa, size_t na, const float * vectsb, size_t nb, size_t dim, const float * numPairs, const float * means, const float * sds, float * correlations)
{
size_t
offset, stride,
x = blockIdx.x, y = blockIdx.y,
tx = threadIdx.x;
float
a, b, n, scoreA, scoreB;
__shared__ float
meanA, meanB,
sdA, sdB,
threadSums[NUMTHREADS];
if((x >= na) || (y >= nb))
return;
if(tx == 0) {
meanA = means[x*nb*2+y*2];
meanB = means[x*nb*2+y*2+1];
sdA = sds[x*nb*2+y*2];
sdB = sds[x*nb*2+y*2+1];
n = numPairs[x*nb+y];
}
__syncthreads();
threadSums[tx] = 0.f;
for(offset = tx; offset < dim; offset += NUMTHREADS) {
a = vectsa[x * dim + offset];
b = vectsb[y * dim + offset];
scoreA = (a - meanA) / sdA;
scoreB = (b - meanB) / sdB;
threadSums[tx] += scoreA * scoreB;
}
__syncthreads();
for(stride = NUMTHREADS >> 1; stride > 0; stride >>= 1) {
if(tx < stride) threadSums[tx] += threadSums[tx + stride];
__syncthreads();
}
if(tx == 0) correlations[x*nb+y] = threadSums[0] / (n - 1.f);
} |
2,380 | // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#miscellaneous-instructions-pmevent
extern "C" __global__ void run_atomics(
const float *A,
const float *B,
float *OUT,
int numElements)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
asm("pmevent 1;");
asm("pmevent 7;");
asm("pmevent.mask 0xFF;");
OUT[id] = A[id] + B[id];
}
|
2,381 | #ifndef KERNEL_REDUCE
#define KERNEL_REDUCE
#include <math.h>
#include <float.h>
#include <cuda.h>
__global__ void gpu_Heat(float *h, float *g, float *residuals, int N) {
float diff = 0.0;
int tidx = blockIdx.x * blockDim.x + threadIdx.x;
int tidy = blockIdx.y * blockDim.y + threadIdx.y;
if (tidx > 0 && tidx < N - 1 && tidy > 0 && tidy < N - 1) {
g[tidx * N + tidy] = 0.25 * (h[tidx * N + (tidy - 1)] + // left
h[tidx * N + (tidy + 1)] + // right
h[(tidx - 1) * N + tidy] + // top
h[(tidx + 1) * N + tidy]); // bottom
diff = g[tidx * N + tidy] - h[tidx * N + tidy];
residuals[tidx * N + tidy] = diff * diff;
}
}
template<size_t blockSize, typename T>
__device__ void warpReduce(volatile T *sdata, size_t tid) {
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}
template<int blockSize, class T>
__global__ void reduce1(T *g_idata, T *g_odata, int n) {
extern __shared__ T sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = 0;
if (i < n) {
sdata[tid] = g_idata[i];
}
__syncthreads();
for (unsigned int s = 1; s < blockDim.x; s *= 2) {
if (tid % (2 * s) == 0) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template<int blockSize, class T>
__global__ void reduce2(T *g_idata, T *g_odata, int n) {
extern __shared__ T sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = 0;
if (i < n) {
sdata[tid] = g_idata[i];
}
__syncthreads();
for (unsigned int s = 1; s < blockDim.x; s *= 2) {
int index = 2 * s * tid;
if (index < blockDim.x) {
sdata[index] += sdata[index + s];
}
__syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template<int blockSize, class T>
__global__ void reduce3(T *g_idata, T *g_odata, int n) {
extern __shared__ T sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = 0;
if (i < n) {
sdata[tid] = g_idata[i];
}
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template<int blockSize, class T>
__global__ void reduce4(T *g_idata, T *g_odata, int n) {
extern __shared__ T sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
sdata[tid] = 0;
if (i < n) {
sdata[tid] = g_idata[i] + g_idata[i + blockDim.x];
}
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template<int blockSize, class T>
__global__ void reduce5(T *g_idata, T *g_odata, int n) {
extern __shared__ T sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
sdata[tid] = 0;
if (i < n) {
sdata[tid] = g_idata[i] + g_idata[i + blockDim.x];
}
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid < 32) warpReduce<blockSize>(sdata, tid);
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template<int blockSize, class T>
__global__ void reduce6(T *g_idata, T *g_odata, int n) {
extern __shared__ T sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
sdata[tid] = 0;
if (i < n) {
sdata[tid] = g_idata[i] + g_idata[i + blockDim.x];
}
__syncthreads();
if (blockSize >= 512 && tid < 256) { sdata[tid] += sdata[tid + 256]; }
__syncthreads();
if (blockSize >= 256 && tid < 128) { sdata[tid] += sdata[tid + 128]; }
__syncthreads();
if (blockSize >= 128 && tid < 64) { sdata[tid] += sdata[tid + 64]; }
__syncthreads();
if (tid < 32) warpReduce<blockSize>(sdata, tid);
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template<size_t blockSize, typename T>
__global__ void finalReduce(T *g_idata, T *g_odata, size_t n) {
__shared__ T sdata[blockSize];
size_t tid = threadIdx.x;
size_t i = blockIdx.x * (blockSize) + tid;
size_t gridSize = blockSize * gridDim.x;
sdata[tid] = 0;
while (i < n) {
sdata[tid] += g_idata[i];
i += gridSize;
}
__syncthreads();
if (blockSize >= 1024) {
if (tid < 512) { sdata[tid] += sdata[tid + 512]; }
__syncthreads();
}
if (blockSize >= 512) {
if (tid < 256) { sdata[tid] += sdata[tid + 256]; }
__syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) { sdata[tid] += sdata[tid + 128]; }
__syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) { sdata[tid] += sdata[tid + 64]; }
__syncthreads();
}
if (tid < 32) warpReduce<blockSize>(sdata, tid);
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template<size_t blockSize, typename T>
T GPUReductionOrg(T *dA, size_t N) {
T tot = 0.;
size_t n = N;
size_t blocksPerGrid = std::ceil((1. * n) / blockSize);
T *tmp;
cudaMalloc(&tmp, sizeof(T) * blocksPerGrid);
T *from = dA;
do {
blocksPerGrid = std::ceil((1. * n) / blockSize);
finalReduce<blockSize><<<blocksPerGrid, blockSize, blockSize*sizeof(T)>>>(from, tmp, n);
from = tmp;
n = blocksPerGrid;
} while (n > blockSize);
if (n > 1)
finalReduce<blockSize><<<1, blockSize, blockSize*sizeof(T)>>>(tmp, tmp, n);
cudaDeviceSynchronize();
cudaMemcpy(&tot, tmp, sizeof(T), cudaMemcpyDeviceToHost);
cudaFree(tmp);
return tot;
}
#endif
|
2,382 | #include "includes.h"
__global__ void ComputeCubesKernel( float *pointsCoordinates, float *vertexData, int quadOffset, float cubeSide, int *activityFlag, int textureWidth, int maxCells )
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < maxCells)
{
float x = pointsCoordinates[threadId * 3];
float y = pointsCoordinates[threadId * 3 + 1];
float z = pointsCoordinates[threadId * 3 + 2];
float halfSide = 0.50f * cubeSide;
if(activityFlag[threadId] == 0)
{
halfSide = 0.00f;
}
int textureOffset = quadOffset + maxCells * 4 * 6 * 3;
float textureAbsLength = (float)(maxCells * textureWidth);
// BOTTOM SIDE
vertexData[quadOffset + threadId * 72] = x - halfSide;
vertexData[quadOffset + threadId * 72 + 1] = y - halfSide;
vertexData[quadOffset + threadId * 72 + 2] = z + halfSide;
vertexData[quadOffset + threadId * 72 + 3] = x - halfSide;
vertexData[quadOffset + threadId * 72 + 4] = y - halfSide;
vertexData[quadOffset + threadId * 72 + 5] = z - halfSide;
vertexData[quadOffset + threadId * 72 + 6] = x + halfSide;
vertexData[quadOffset + threadId * 72 + 7] = y - halfSide;
vertexData[quadOffset + threadId * 72 + 8] = z - halfSide;
vertexData[quadOffset + threadId * 72 + 9] = x + halfSide;
vertexData[quadOffset + threadId * 72 + 10] = y - halfSide;
vertexData[quadOffset + threadId * 72 + 11] = z + halfSide;
vertexData[textureOffset + threadId * 48] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 1] = 0.00f;
vertexData[textureOffset + threadId * 48 + 2] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 3] = 1.00f;
vertexData[textureOffset + threadId * 48 + 4] = (float)((threadId + 1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 5] = 1.00f;
vertexData[textureOffset + threadId * 48 + 6] = (float)((threadId + 1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 7] = 0.00f;
// FRONT SIDE
vertexData[quadOffset + threadId * 72 + 12] = x - halfSide;
vertexData[quadOffset + threadId * 72 + 13] = y + halfSide;
vertexData[quadOffset + threadId * 72 + 14] = z + halfSide;
vertexData[quadOffset + threadId * 72 + 15] = x - halfSide;
vertexData[quadOffset + threadId * 72 + 16] = y - halfSide;
vertexData[quadOffset + threadId * 72 + 17] = z + halfSide;
vertexData[quadOffset + threadId * 72 + 18] = x + halfSide;
vertexData[quadOffset + threadId * 72 + 19] = y - halfSide;
vertexData[quadOffset + threadId * 72 + 20] = z + halfSide;
vertexData[quadOffset + threadId * 72 + 21] = x + halfSide;
vertexData[quadOffset + threadId * 72 + 22] = y + halfSide;
vertexData[quadOffset + threadId * 72 + 23] = z + halfSide;
vertexData[textureOffset + threadId * 48 + 8] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 9] = 0.00f;
vertexData[textureOffset + threadId * 48 + 10] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 11] = 1.00f;
vertexData[textureOffset + threadId * 48 + 12] = (float)((threadId + 1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 13] = 1.00f;
vertexData[textureOffset + threadId * 48 + 14] = (float)((threadId + 1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 15] = 0.00f;
// LEFT SIDE
vertexData[quadOffset + threadId * 72 + 24] = x - halfSide;
vertexData[quadOffset + threadId * 72 + 25] = y + halfSide;
vertexData[quadOffset + threadId * 72 + 26] = z - halfSide;
vertexData[quadOffset + threadId * 72 + 27] = x - halfSide;
vertexData[quadOffset + threadId * 72 + 28] = y - halfSide;
vertexData[quadOffset + threadId * 72 + 29] = z - halfSide;
vertexData[quadOffset + threadId * 72 + 30] = x - halfSide;
vertexData[quadOffset + threadId * 72 + 31] = y - halfSide;
vertexData[quadOffset + threadId * 72 + 32] = z + halfSide;
vertexData[quadOffset + threadId * 72 + 33] = x - halfSide;
vertexData[quadOffset + threadId * 72 + 34] = y + halfSide;
vertexData[quadOffset + threadId * 72 + 35] = z + halfSide;
vertexData[textureOffset + threadId * 48 + 16] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 17] = 0.00f;
vertexData[textureOffset + threadId * 48 + 18] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 19] = 1.00f;
vertexData[textureOffset + threadId * 48 + 20] = (float)((threadId + 1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 21] = 1.00f;
vertexData[textureOffset + threadId * 48 + 22] = (float)((threadId + 1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 23] = 0.00f;
// BACK SIDE
vertexData[quadOffset + threadId * 72 + 36] = x - halfSide;
vertexData[quadOffset + threadId * 72 + 37] = y + halfSide;
vertexData[quadOffset + threadId * 72 + 38] = z - halfSide;
vertexData[quadOffset + threadId * 72 + 39] = x - halfSide;
vertexData[quadOffset + threadId * 72 + 40] = y - halfSide;
vertexData[quadOffset + threadId * 72 + 41] = z - halfSide;
vertexData[quadOffset + threadId * 72 + 42] = x + halfSide;
vertexData[quadOffset + threadId * 72 + 43] = y - halfSide;
vertexData[quadOffset + threadId * 72 + 44] = z - halfSide;
vertexData[quadOffset + threadId * 72 + 45] = x + halfSide;
vertexData[quadOffset + threadId * 72 + 46] = y + halfSide;
vertexData[quadOffset + threadId * 72 + 47] = z - halfSide;
vertexData[textureOffset + threadId * 48 + 24] = (float)((threadId + 1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 25] = 0.00f;
vertexData[textureOffset + threadId * 48 + 26] = (float)((threadId + 1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 27] = 1.00f;
vertexData[textureOffset + threadId * 48 + 28] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 29] = 1.00f;
vertexData[textureOffset + threadId * 48 + 30] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 31] = 0.00f;
// RIGHT SIDE
vertexData[quadOffset + threadId * 72 + 48] = x + halfSide;
vertexData[quadOffset + threadId * 72 + 49] = y + halfSide;
vertexData[quadOffset + threadId * 72 + 50] = z - halfSide;
vertexData[quadOffset + threadId * 72 + 51] = x + halfSide;
vertexData[quadOffset + threadId * 72 + 52] = y - halfSide;
vertexData[quadOffset + threadId * 72 + 53] = z - halfSide;
vertexData[quadOffset + threadId * 72 + 54] = x + halfSide;
vertexData[quadOffset + threadId * 72 + 55] = y - halfSide;
vertexData[quadOffset + threadId * 72 + 56] = z + halfSide;
vertexData[quadOffset + threadId * 72 + 57] = x + halfSide;
vertexData[quadOffset + threadId * 72 + 58] = y + halfSide;
vertexData[quadOffset + threadId * 72 + 59] = z + halfSide;
vertexData[textureOffset + threadId * 48 + 32] = (float)((threadId + 1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 33] = 0.00f;
vertexData[textureOffset + threadId * 48 + 34] = (float)((threadId + 1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 35] = 1.00f;
vertexData[textureOffset + threadId * 48 + 36] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 37] = 1.00f;
vertexData[textureOffset + threadId * 48 + 38] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 39] = 0.00f;
// UPPER SIDE
vertexData[quadOffset + threadId * 72 + 60] = x - halfSide;
vertexData[quadOffset + threadId * 72 + 61] = y + halfSide;
vertexData[quadOffset + threadId * 72 + 62] = z + halfSide;
vertexData[quadOffset + threadId * 72 + 63] = x - halfSide;
vertexData[quadOffset + threadId * 72 + 64] = y + halfSide;
vertexData[quadOffset + threadId * 72 + 65] = z - halfSide;
vertexData[quadOffset + threadId * 72 + 66] = x + halfSide;
vertexData[quadOffset + threadId * 72 + 67] = y + halfSide;
vertexData[quadOffset + threadId * 72 + 68] = z - halfSide;
vertexData[quadOffset + threadId * 72 + 69] = x + halfSide;
vertexData[quadOffset + threadId * 72 + 70] = y + halfSide;
vertexData[quadOffset + threadId * 72 + 71] = z + halfSide;
vertexData[textureOffset + threadId * 48 + 40] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 41] = 1.00f;
vertexData[textureOffset + threadId * 48 + 42] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 43] = 0.00f;
vertexData[textureOffset + threadId * 48 + 44] = (float)((threadId + 1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 45] = 0.00f;
vertexData[textureOffset + threadId * 48 + 46] = (float)((threadId + 1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 48 + 47] = 1.00f;
}
} |
2,383 | #include <stdio.h>
#include <stdlib.h>
#include <cmath>
using namespace std;
void zzz2(int c){
if(c<10){
char a=c+'0';
printf("%c",a);
}else{
char a=c-10+'a';
printf("%c",a);
}
}
void zzz1(uchar3 a){
int x=a.x;
int y=a.y;
int z=a.z;
zzz2(x/16);
zzz2(x%16);
zzz2(y/16);
zzz2(y%16);
zzz2(z/16);
zzz2(z%16);
}
void zzz3(uchar4 a){
int x=a.x;
int y=a.y;
int z=a.z;
zzz2(x/16);
zzz2(x%16);
zzz2(y/16);
zzz2(y%16);
zzz2(z/16);
zzz2(z%16);
}
__device__ double SQRT(double A){
return sqrt(A);
}
__device__ double POW(double a){
return a*a;
}
#define CSC(call) do { \
cudaError_t e = call; \
if (e != cudaSuccess) { \
fprintf(stderr, "CUDA Error in %s:%d: %s\n"\
, __FILE__, __LINE__, cudaGetErrorString(e)); \
exit(0); \
} \
} while(0)
#define RGBToWB(r, g, b) 0.299*r + 0.587*g + 0.114*b
#define chek(a) a>255.1?255.1:a
//texture<uchar4, 2, cudaReadModeElementType> tex;
__constant__ double3 conTest[33];
__global__ void kernel(uchar4 *dst, int w, int h, int n) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int idy = threadIdx.y + blockDim.y * blockIdx.y;
int offsetx = blockDim.x * gridDim.x;
int offsety = blockDim.y * gridDim.y;
int i, j,k;
uchar4 a;
double3 b;
int ans;
double max,tmp;
for(i = idx; i < w; i += offsetx){
for(j = idy; j < h; j += offsety){
max=1000000;
a = dst[j * w + i];
for(k=0;k<n;k++){
b=conTest[k];
tmp=SQRT(POW(a.x-b.x)+
POW(a.y-b.y)+
POW(a.z-b.z));
//zzz1(b);
//printf(" %d %d %f %f\n",j * w + i,k,tmp,max);
if(max>tmp){
max=tmp;
ans=k;
}
}
dst[j * w + i] = make_uchar4(a.x,a.y,a.z, ans);
}
}
}
int main() {
int h, w;
char path_in[257];
char path_out[257];
scanf("%s", path_in);
scanf("%s", path_out);
FILE* in = fopen(path_in, "rb");
fread(&w, sizeof(int), 1, in);
fread(&h, sizeof(int), 1, in);
uchar4 *img = (uchar4 *)malloc(sizeof(uchar4) * h * w);
fread(img, sizeof(uchar4), h * w, in);
fclose(in);
int n,x,y;
scanf("%d",&n);
double3 *test = (double3 *)malloc(sizeof(double3) * n);
double3 *testNext = (double3 *)malloc(sizeof(double3) * n);
double4 *res = (double4 *)malloc(sizeof(double4) * n);
for(int i=0;i<n;i++){
scanf("%d%d",&x,&y);
test[i].x=img[y*w+x].x;
test[i].y=img[y*w+x].y;
test[i].z=img[y*w+x].z;
/*zzz1(test[i]);
printf("\n");*/
}
/*cudaArray *dev_arr;
cudaChannelFormatDesc ch = cudaCreateChannelDesc<uchar4>();
CSC(cudaMallocArray(&dev_arr, &ch, w, h));
CSC(cudaMemcpyToArray(dev_arr, 0,
0, img, sizeof(uchar4) * w * h, cudaMemcpyHostToDevice));
tex.addressMode[0] = cudaAddressModeClamp;
tex.addressMode[1] = cudaAddressModeClamp;
tex.channelDesc = ch;
tex.filterMode = cudaFilterModePoint;
tex.normalized = false;
CSC(cudaBindTextureToArray(tex, dev_arr, ch));*/
uchar4 *dev_img;
CSC(cudaMalloc(&dev_img, sizeof(uchar4) * w * h));
CSC(cudaMemcpy ( dev_img, img, sizeof(uchar4)
* w * h, cudaMemcpyHostToDevice ));
y=1;
while (y) {
/*for(int i=0;i<n;i++){
zzz1(test[i]);
}*/
CSC( cudaMemcpyToSymbol(conTest, test, n*sizeof(double3)) );
kernel<<< dim3(32, 32), dim3(32, 32) >>>(dev_img, w, h,n);
CSC(cudaMemcpy(img, dev_img, sizeof(uchar4)
* w * h, cudaMemcpyDeviceToHost));
for(int i=0;i<n;i++){
res[i].x = res[i].y = res[i].z = res[i].w = 0;
}
for(int i = 0; i < w*h; i++){
//zzz3(img[i]);
//printf("%d %d \n",i,img[i].w);
res[img[i].w].x+=img[i].x;
res[img[i].w].y+=img[i].y;
res[img[i].w].z+=img[i].z;
res[img[i].w].w+=1;
//printf("%f %f ",res[img[i].w].x,res[img[i].w].w);
}
//printf("\n");
for(int i=0;i<n;i++){
testNext[i].x=res[i].x/res[i].w;
testNext[i].y=res[i].y/res[i].w;
testNext[i].z=res[i].z/res[i].w;
//zzz1(testNext[i]);
}
//printf("NEXT\n");
y=0;
for(int i=0;i<n;i++){
if(test[i].x!=testNext[i].x){
y=1;
}
if(test[i].y!=testNext[i].y){
y=1;
}
if(test[i].z!=testNext[i].z){
y=1;
}
}
for(int i=0;i<n;i++){
test[i].x=testNext[i].x;
test[i].y=testNext[i].y;
test[i].z=testNext[i].z;
}
}
FILE* out = fopen(path_out, "wb");
fwrite(&w, sizeof(int), 1, out);
fwrite(&h, sizeof(int), 1, out);
fwrite(img, sizeof(uchar4), h * w, out);
fclose(out);
CSC(cudaFree(dev_img));
free(img);
free(test);
free(testNext);
free(res);
return 0;
}
|
2,384 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdlib>
#include <cstdio>
#include <cassert>
#include <iostream>
#include <cmath>
__global__ void fibonacci_kernel(double* a, int n) {
unsigned int index = threadIdx.x;
if (index < n)
a[index] = (pow((1 + sqrt(5.0)) / 2, index) - pow((1 - sqrt(5.0)) / 2, index)) / sqrt(5.0);
}
class Fibonacci {
public:
int arrayLength;
explicit Fibonacci(int arrayLength);
void run(int numGrids, int numThreads) const;
void displayResult(double* array, double* resultArray) const;
};
Fibonacci::Fibonacci(int arrayLength) {
this->arrayLength = arrayLength;
}
void Fibonacci::displayResult(double *array, double* resultArray) const {
for(int i = 0; i < this->arrayLength; i++)
printf("Index %d: %f\n", i, resultArray[i]);
}
void Fibonacci::run(int numGrids, int numThreads) const {
int deviceId = cudaGetDevice(&deviceId);
printf("GPU Device ID: %d\n", deviceId);
printf("CPU Device ID: %d\n\n", cudaCpuDeviceId);
double * hostArray, * resultArray, * deviceArray;
size_t arrayBytes = sizeof(int) * this->arrayLength;
cudaMallocHost(&hostArray, arrayBytes);
cudaMallocHost(&resultArray, arrayBytes);
cudaMalloc(&deviceArray, arrayBytes);
cudaMemcpy(deviceArray, hostArray, arrayBytes, cudaMemcpyHostToDevice);
fibonacci_kernel<<<numGrids, numThreads>>>(deviceArray, arrayLength);
cudaDeviceSynchronize();
cudaMemcpy(resultArray, deviceArray, arrayBytes, cudaMemcpyDeviceToHost);
displayResult(hostArray, resultArray);
cudaFreeHost(hostArray);
cudaFreeHost(resultArray);
cudaFree(deviceArray);
}
int main() {
Fibonacci program(16);
program.run(1, 256);
}
|
2,385 | extern "C" __global__ __launch_bounds__(256) void sgemm_nn_128x128(
const float *param_A,
const float *param_B,
float *param_C,
float param_alpha,
float param_beta,
int param_lda8,
int param_ldb8,
int param_ldc,
int param_m,
int param_n,
int param_k) {
__shared__ float share[128 * 8 * 4 + 32];
int tid = threadIdx.x;
share[tid] = 1;
}
|
2,386 | #include <cuda_runtime_api.h>
#include <stdio.h>
int main(){
cudaStream_t s;
cudaError_t res;
res = cudaStreamCreate(&s);
printf("res : %d\n", res);
res = cudaStreamDestroy(s);
printf("res : %d\n", res);
return 0;
}
|
2,387 | #include "CCubicDomain.cuh"
namespace NBody
{
//ds ctor/dtor
//ds default constructor requires environmental parameters: N number of bodies, dT time step, T number of time steps
CCubicDomain::CCubicDomain( const unsigned int& p_uNumberOfParticles ): m_arrPositions( 0 ),
m_arrVelocities( 0 ),
m_arrAccelerations( 0 ),
m_arrMasses( 0 ),
m_uNumberOfParticles( p_uNumberOfParticles ),
m_strParticleInformation( "" ),
m_strIntegralsInformation( "" )
{
//ds nothing to do
}
//ds default destructor
CCubicDomain::~CCubicDomain( )
{
//ds deallocate memory
delete[] m_arrPositions;
delete[] m_arrVelocities;
delete[] m_arrAccelerations;
delete[] m_arrMasses;
}
//ds accessors
void CCubicDomain::createParticlesUniformFromNormalDistribution( const float& p_dTargetKineticEnergy, const float& p_fParticleMass )
{
//ds allocate arrays (linear since we're using CUDA)
m_arrPositions = new float[m_uNumberOfParticles*3];
m_arrVelocities = new float[m_uNumberOfParticles*3];
m_arrAccelerations = new float[m_uNumberOfParticles*3];
m_arrMasses = new float[m_uNumberOfParticles];
//ds kinetic energy to derive from initial situation
float dKineticEnergy( 0.0 );
//ds set particle information for each
for( unsigned int u = 0; u < m_uNumberOfParticles; ++u )
{
//ds set the particle mass (same for all particles in this case)
m_arrMasses[u] = p_fParticleMass;
//ds set the position: uniformly distributed between boundaries in this case
m_arrPositions[3*u+0] = _getUniformlyDistributedNumber( );
m_arrPositions[3*u+1] = _getUniformlyDistributedNumber( );
m_arrPositions[3*u+2] = _getUniformlyDistributedNumber( );
//ds set velocity values: from normal distribution
m_arrVelocities[3*u+0] = _getNormallyDistributedNumber( );
m_arrVelocities[3*u+1] = _getNormallyDistributedNumber( );
m_arrVelocities[3*u+2] = _getNormallyDistributedNumber( );
//ds set acceleration values: 0
m_arrAccelerations[3*u+0] = 0;
m_arrAccelerations[3*u+1] = 0;
m_arrAccelerations[3*u+2] = 0;
//ds add the resulting kinetic component (needed below)
dKineticEnergy += m_arrMasses[u]/2*pow( sqrt( pow( m_arrVelocities[3*u+0], 2 )
+ pow( m_arrVelocities[3*u+1], 2 )
+ pow( m_arrVelocities[3*u+2], 2 ) ), 2 );
}
//ds calculate the rescaling factor
const float dRescalingFactor( sqrt( p_dTargetKineticEnergy/dKineticEnergy ) );
//ds for each particle
for( unsigned int u = 0; u < m_uNumberOfParticles; ++u )
{
//ds rescale the velocity component
m_arrVelocities[3*u+0] = dRescalingFactor*m_arrVelocities[3*u+0];
m_arrVelocities[3*u+1] = dRescalingFactor*m_arrVelocities[3*u+1];
m_arrVelocities[3*u+2] = dRescalingFactor*m_arrVelocities[3*u+2];
}
}
void CCubicDomain::saveParticlesToStream( )
{
//ds format: X Y Z U V W
//ds for each particle
for( unsigned int u = 0; u < m_uNumberOfParticles; ++u )
{
//ds get a buffer for snprintf (max possible buffer size checked)
char chBuffer[256];
//ds get the particle stream
std::snprintf( chBuffer, 256, "%f %f %f %f %f %f", m_arrPositions[3*u+0], m_arrPositions[3*u+1], m_arrPositions[3*u+2],
m_arrVelocities[3*u+0], m_arrVelocities[3*u+1], m_arrVelocities[3*u+2] );
//ds append the buffer to our string
m_strParticleInformation += chBuffer;
m_strParticleInformation += "\n";
}
}
void CCubicDomain::saveIntegralsToStream( const float& p_fTotalEnergy )
{
//ds format: E X Y Z X Y Z X Y Z
//ds buffer for snprintf
char chBuffer[256];
//ds get information - caution, memory gets allocated
const float* vecCenterOfMass = getCenterOfMass( );
const float* vecAngularMomentum = getAngularMomentum( );
const float* vecLinearMomentum = getLinearMomentum( );
//ds get the integrals stream
std::snprintf( chBuffer, 100, "%f %f %f %f %f %f %f %f %f %f", p_fTotalEnergy,
vecCenterOfMass[0], vecCenterOfMass[1], vecCenterOfMass[2],
vecAngularMomentum[0], vecAngularMomentum[1], vecAngularMomentum[2],
vecLinearMomentum[0], vecLinearMomentum[1], vecLinearMomentum[2] );
//ds free memory
delete vecCenterOfMass;
delete vecAngularMomentum;
delete vecLinearMomentum;
//ds append the buffer to our string
m_strIntegralsInformation += chBuffer;
m_strIntegralsInformation += "\n";
}
void CCubicDomain::saveIntegralsToStream( const float& p_fTotalEnergy, const float p_vecCenterOfMass[3] )
{
//ds format: E X Y Z X Y Z X Y Z
//ds buffer for snprintf
char chBuffer[256];
//ds get information - caution, memory gets allocated
//const float* vecCenterOfMass = getCenterOfMass( );
const float* vecAngularMomentum = getAngularMomentum( );
const float* vecLinearMomentum = getLinearMomentum( );
//ds get the integrals stream
std::snprintf( chBuffer, 100, "%f %f %f %f %f %f %f %f %f %f", p_fTotalEnergy,
p_vecCenterOfMass[0], p_vecCenterOfMass[1], p_vecCenterOfMass[2],
vecAngularMomentum[0], vecAngularMomentum[1], vecAngularMomentum[2],
vecLinearMomentum[0], vecLinearMomentum[1], vecLinearMomentum[2] );
//ds free memory
//delete vecCenterOfMass;
delete vecAngularMomentum;
delete vecLinearMomentum;
//ds append the buffer to our string
m_strIntegralsInformation += chBuffer;
m_strIntegralsInformation += "\n";
}
void CCubicDomain::writeParticlesToFile( const std::string& p_strFilename, const unsigned int& p_uNumberOfTimeSteps )
{
//ds ofstream object
std::ofstream ofsFile;
//ds open the file for writing
ofsFile.open( p_strFilename.c_str( ), std::ofstream::out );
//ds if it worked
if( ofsFile.is_open( ) )
{
//ds first dump setup information number of particles and timesteps
ofsFile << m_uNumberOfParticles << " " << p_uNumberOfTimeSteps << "\n" << m_strParticleInformation;
}
//ds close the file
ofsFile.close( );
}
void CCubicDomain::writeIntegralsToFile( const std::string& p_strFilename, const unsigned int& p_uNumberOfTimeSteps, const double& p_dTimeStepSize )
{
//ds ofstream object
std::ofstream ofsFile;
//ds open the file for writing
ofsFile.open( p_strFilename.c_str( ), std::ofstream::out );
//ds if it worked
if( ofsFile.is_open( ) )
{
//ds dump first integrals information
ofsFile << p_uNumberOfTimeSteps << " " << p_dTimeStepSize << "\n" << m_strIntegralsInformation;
}
//ds close the file
ofsFile.close( );
}
float* CCubicDomain::getPositions( )
{
return m_arrPositions;
}
float* CCubicDomain::getVelocities( )
{
return m_arrVelocities;
}
float* CCubicDomain::getAccelerations( )
{
return m_arrAccelerations;
}
float* CCubicDomain::getMasses( )
{
return m_arrMasses;
}
const float* CCubicDomain::getCenterOfMass( ) const
{
//ds center to find
float* vecCenter = new float[3];
//ds set it to zero for sure
vecCenter[0] = 0.0;
vecCenter[1] = 0.0;
vecCenter[2] = 0.0;
//ds total mass
float fMassTotal( 0.0 );
//ds for each particle
for( unsigned int u = 0; u < m_uNumberOfParticles; ++u )
{
//ds mass instance
const float fCurrentMass( m_arrMasses[u] );
//ds add the current relative mass
vecCenter[0] += fCurrentMass*m_arrPositions[3*u+0];
vecCenter[1] += fCurrentMass*m_arrPositions[3*u+1];
vecCenter[2] += fCurrentMass*m_arrPositions[3*u+2];
//ds add the current mass
fMassTotal += fCurrentMass;
}
//ds divide by total mass
vecCenter[0] /= fMassTotal;
vecCenter[1] /= fMassTotal;
vecCenter[2] /= fMassTotal;
return vecCenter;
}
const float* CCubicDomain::getAngularMomentum( ) const
{
//ds momentum
float* vecMomentum = new float[3];
//ds set it to zero for sure
vecMomentum[0] = 0.0;
vecMomentum[1] = 0.0;
vecMomentum[2] = 0.0;
//ds for each particle
for( unsigned int u = 0; u < m_uNumberOfParticles; ++u )
{
//ds mass instance
const float fCurrentMass( m_arrMasses[u] );
//ds add the current momentum
vecMomentum[0] += fCurrentMass*( m_arrPositions[3*u+1]*m_arrVelocities[3*u+2] - m_arrPositions[3*u+2]*m_arrVelocities[3*u+1] );
vecMomentum[1] += fCurrentMass*( m_arrPositions[3*u+2]*m_arrVelocities[3*u+0] - m_arrPositions[3*u+0]*m_arrVelocities[3*u+2] );
vecMomentum[2] += fCurrentMass*( m_arrPositions[3*u+0]*m_arrVelocities[3*u+1] - m_arrPositions[3*u+1]*m_arrVelocities[3*u+0] );
}
return vecMomentum;
}
const float* CCubicDomain::getLinearMomentum( ) const
{
//ds momentum
float* vecMomentum = new float[3];
//ds set it to zero for sure
vecMomentum[0] = 0.0;
vecMomentum[1] = 0.0;
vecMomentum[2] = 0.0;
//ds for each particle
for( unsigned int u = 0; u < m_uNumberOfParticles; ++u )
{
//ds mass instance
const float fCurrentMass( m_arrMasses[u] );
//ds add the current momentum
vecMomentum[0] += fCurrentMass*m_arrVelocities[3*u+0];
vecMomentum[1] += fCurrentMass*m_arrVelocities[3*u+1];
vecMomentum[2] += fCurrentMass*m_arrVelocities[3*u+2];
}
return vecMomentum;
}
float CCubicDomain::_getUniformlyDistributedNumber( ) const
{
//ds drand48 returns [0,1], we need [-1,1] -> therefore 2x[0,1] -> [0,2] -> -1 ->[0-1,2-1] = [-1,1]
return static_cast< float >( 2*drand48( )-1 );
}
float CCubicDomain::_getNormallyDistributedNumber( ) const
{
//ds calculate the uniform number first [0,1]
const float dUniformNumber( static_cast< float >( drand48( ) ) );
//ds return the normal one
return static_cast< float >( sqrt( -2*log( dUniformNumber ) )*cos( 2*static_cast< float >( M_PI )*dUniformNumber ) );
}
} //ds namespace NBody
|
2,388 | #include "includes.h"
__global__ void vecAdd_kernel(float *c, const float* a, const float* b)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = 0; i < 500; i++)
c[idx] = a[idx] + b[idx];
} |
2,389 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
//device (1)
__global__ void suma_2_enteros(int *d1, int *d2, int *sum){
*sum = *d1 + *d2;
}
//HOST
int main(int argc, char **argv){
int DeviceCount = 0;
int h_d1, h_d2, h_sum; //HOST
int *d_d1, *d_d2, *d_sum; //DEVICE (2)
h_d1 = atoi(argv[1]);
h_d2 = atoi(argv[2]);
//inicializamos CUDA
if(cuInit(0) != 0){
printf("ERROR en la inicializacion de CUDA\n");
exit(0);
}
cuDeviceGetCount(&DeviceCount);
if(DeviceCount == 0){
printf("ERROR, ningun dispositivo compatible con CUDA\n");
exit(0);
}
//reservamos memoria en DEVICE (2.1)
cudaMalloc((void**)&d_d1, sizeof(d_d1));
cudaMalloc((void**)&d_d2, sizeof(d_d2));
cudaMalloc((void**)&d_sum, sizeof(d_sum));
//copiamos desde HOST --> DEVICE (3)
cudaMemcpy(d_d1, &h_d1, sizeof(h_d1), cudaMemcpyHostToDevice);
cudaMemcpy(d_d2, &h_d2, sizeof(h_d2), cudaMemcpyHostToDevice);
//llamamos al KERNEL
suma_2_enteros<<<1,1,0,0>>>(d_d1, d_d2, d_sum);
//recogemos los resultados del DEVICE (DEVICE --> HOST) (4)
cudaMemcpy(&h_sum, d_sum, sizeof(h_sum), cudaMemcpyDeviceToHost);
printf("Resultado: %d\n", h_sum);
cudaFree(d_d1);
cudaFree(d_d2);
cudaFree(d_sum);
} |
2,390 | #include <chrono>
#include <iostream>
#include <random>
#include <cuda.h>
using std::cout;
using std::endl;
__global__ void multiply_me_GPU(int *a, int *b, int *c, int width) {
int row = blockIdx.y * gridDim.y + threadIdx.y;
int column = blockIdx.x * gridDim.x + threadIdx.x;
int sum = 0;
for (int ii = 0; ii < width; ii++) {
sum += a[row * width + ii] * b[column + ii * width];
}
c[row * width + column] = sum;
}
void multiply_me_CPU(int *a, int *b, int *c, int width) {
int sum;
for (int row = 0; row < width; row++) {
for (int column = 0; column < width; column++) {
sum = 0;
for (int kk = 0; kk < width; kk++) {
sum += a[row * width + kk] * b[column + kk * width];
}
c[row * width + column] = sum;
}
}
}
int main(int argc, char *argv[])
{
int N = 1024;
int *h_a = new int[N * N];
int *h_b = new int[N * N];
int *h_c = new int[N * N];
int *h_c2 = new int [N * N];
unsigned int seed = std::chrono::system_clock::now().time_since_epoch().count();
std::mt19937 engine(seed);
std::uniform_int_distribution<int> distribution(0, 50);
for (int ii = 0; ii < N * N; ii++) {
h_a[ii] = distribution(engine);
h_b[ii] = distribution(engine);
}
int *d_a;
int *d_b;
int *d_c;
dim3 nblocks (32,32,1);
dim3 nthreads(N/nblocks.x, N/nblocks.y, 1);
float copy_elapsed;
cudaEvent_t copy_start;
cudaEvent_t copy_stop;
cudaEventCreate(©_start);
cudaEventCreate(©_stop);
cudaEventRecord(copy_start, 0);
cudaMalloc((void**)&d_a, N * N * sizeof(int));
cudaMalloc((void**)&d_b, N * N * sizeof(int));
cudaMalloc((void**)&d_c, N * N * sizeof(int));
cudaMemcpy(d_a, h_a, N * N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, N * N * sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(copy_stop, 0);
cudaEventSynchronize(copy_stop);
cudaEventElapsedTime(©_elapsed, copy_start, copy_stop);
float GPU_elapsed;
cudaEvent_t GPU_start;
cudaEvent_t GPU_stop;
cudaEventCreate(&GPU_start);
cudaEventCreate(&GPU_stop);
cudaEventRecord(GPU_start, 0);
multiply_me_GPU<<<nblocks, nthreads>>>(d_a, d_b, d_c, N);
cudaEventRecord(GPU_stop, 0);
cudaEventSynchronize(GPU_stop);
cudaEventElapsedTime(&GPU_elapsed, GPU_start, GPU_stop);
cudaMemcpy(h_c, d_c, N * N * sizeof(int), cudaMemcpyDeviceToHost);
std::chrono::time_point<std::chrono::system_clock> CPU_start, CPU_stop;
std::chrono::duration<double> CPU_elapsed;
CPU_start = std::chrono::system_clock::now();
multiply_me_CPU(h_a, h_b, h_c2, N);
CPU_stop = std::chrono::system_clock::now();
CPU_elapsed = CPU_stop - CPU_start;
cout << "It tool " << copy_elapsed / 1000.0f << "s to copy to data to the GPU" << endl;
cout << "It took " << GPU_elapsed / 1000.0f << "s to multiply the matrix on the GPU" << endl;
cout << "It took " << CPU_elapsed.count() << "s to multiply the matrix on the CPU" << endl;
delete[] h_a;
delete[] h_b;
delete[] h_c;
delete[] h_c2;
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
2,391 | #include <stdio.h>
#define CSC(call) { \
cudaError err = call; \
if(err != cudaSuccess) { \
fprintf(stderr, "CUDA error in file '%s' in line %i: %s.\n", \
__FILE__, __LINE__, cudaGetErrorString(err)); \
exit(1); \
} \
} while (0)
__global__ void kernel(int *a, int n, int k) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int offset = blockDim.x * gridDim.x;
for(; idx < n; idx += offset)
a[idx] *= k;
}
int main() {
int i, n = 10000;
int *a = (int *)malloc(sizeof(int) * n);
int *dev_a;
for(i = 0; i < n; i++)
a[i] = 1;
CSC(cudaMalloc(&dev_a, sizeof(int) * n));
CSC(cudaMemcpy(dev_a, a, sizeof(int) * n, cudaMemcpyHostToDevice));
kernel<<<dim3(2), dim3(32)>>>(dev_a, n, 2);
CSC(cudaGetLastError());
CSC(cudaMemcpy(a, dev_a, sizeof(int) * n, cudaMemcpyDeviceToHost));
for(i = 0; i < n; i++)
printf("%d ", a[i]);
printf("\n");
CSC(cudaFree(dev_a));
free(a);
return 0;
}
|
2,392 | #include "includes.h"
/*
* CudaOperations.cu
*
* Created on: Feb 6, 2019
* Author: alexander
*/
__global__ void allocHamiltonian(float* devMat, float* devSpins, int index, int size, double* energyTempor) {
int i;
int j;
int wIndex = threadIdx.x + blockIdx.x * blockDim.x;
while (wIndex < size * size) {
i = wIndex % size;
j = (int) (wIndex / size);
energyTempor[wIndex] = (double) (devSpins[i + index * size]
* devSpins[j + index * size] * devMat[wIndex]);
wIndex = wIndex + blockDim.x * gridDim.x;
}
} |
2,393 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
using namespace std;
__global__ void hello(){
printf("hello?\n");
return;
}
__global__ void mtxAddKernel(int* d_a, int* d_b, int m, int n, int* d_c){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
//printf("%d,%d\n", x, y);
if(y < m && x <n){
d_c[y*n+x] = d_a[y*n+x] + d_b[y*n+x];
}
}
void mtxAdd(int * h_a, int* h_b, int m, int n, int* h_c){
int size = m*n*sizeof(int);
int* d_a, *d_b, *d_c;
cudaMalloc((void**)&d_a, size);
cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_b, size);
cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_c, size);
dim3 dimGrid(int(ceil(n/32.0)), int(ceil(m/32.0)), 1);
dim3 dimBlock(32, 32, 1);
//printf("%d, %d, %d, %d\n",n, m, ceil(n/32.0), ceil(m/32.0));
mtxAddKernel<<<dimGrid, dimBlock>>>(d_a, d_b, m, n, d_c);
cudaError_t err = cudaDeviceSynchronize();
if(err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
int main(){
int m, n;
scanf("%d%d", &m, &n);
printf("%d,%d\n",m,n);
int* h_a = (int*)malloc(m*n*sizeof(int));
int* h_b = (int*)malloc(m*n*sizeof(int));
int* h_c = (int*)malloc(m*n*sizeof(int));
//memset(h_a, -1, sizeof(m*n*sizeof(int)));
//memset(h_b, -1, sizeof(m*n*sizeof(int)));
for(int i = 0; i < m; ++i){
for(int j = 0; j < n; ++j){
h_a[i*n+j] = -1;
h_b[i*n+j] = -1;
}
}
mtxAdd(h_a, h_b, m, n, h_c);
for(int i = 0; i < m; ++i){
for(int j = 0; j < n; ++j){
if(h_c[i*n+j]!=-2){
printf("%d %d %d false\n",i, j, h_c[i*n+j]);
}
//printf("%d ", h_c[i*m+j]);
}
}
return 0;
}
|
2,394 | #include "includes.h"
__global__ void Correlation_backward_input1(int item, float *gradInput1, int nInputChannels, int inputHeight, int inputWidth, float *gradOutput, int nOutputChannels, int outputHeight, int outputWidth, float *rInput2, int pad_size, int kernel_size, int max_displacement, int stride1, int stride2)
{
// n (batch size), c (num of channels), y (height), x (width)
int n = item;
int y = blockIdx.x * stride1 + pad_size;
int x = blockIdx.y * stride1 + pad_size;
int c = blockIdx.z;
int tch_off = threadIdx.x;
int kernel_rad = (kernel_size - 1) / 2;
int displacement_rad = max_displacement / stride2;
int displacement_size = 2 * displacement_rad + 1;
int xmin = (x - kernel_rad - max_displacement) / stride1;
int ymin = (y - kernel_rad - max_displacement) / stride1;
int xmax = (x + kernel_rad - max_displacement) / stride1;
int ymax = (y + kernel_rad - max_displacement) / stride1;
if (xmax < 0 || ymax < 0 || xmin >= outputWidth || ymin >= outputHeight) {
// assumes gradInput1 is pre-allocated and zero filled
return;
}
if (xmin > xmax || ymin > ymax) {
// assumes gradInput1 is pre-allocated and zero filled
return;
}
xmin = max(0,xmin);
xmax = min(outputWidth-1,xmax);
ymin = max(0,ymin);
ymax = min(outputHeight-1,ymax);
int pInputWidth = inputWidth + 2 * pad_size;
int pInputHeight = inputHeight + 2 * pad_size;
int pdimyxc = pInputHeight * pInputWidth * nInputChannels;
int pdimxc = pInputWidth * nInputChannels;
int pdimc = nInputChannels;
int tdimcyx = nOutputChannels * outputHeight * outputWidth;
int tdimyx = outputHeight * outputWidth;
int tdimx = outputWidth;
int odimcyx = nInputChannels * inputHeight* inputWidth;
int odimyx = inputHeight * inputWidth;
int odimx = inputWidth;
float nelems = kernel_size * kernel_size * nInputChannels;
__shared__ float prod_sum[CUDA_NUM_THREADS];
prod_sum[tch_off] = 0;
for (int tc = tch_off; tc < nOutputChannels; tc += CUDA_NUM_THREADS) {
int i2 = (tc % displacement_size - displacement_rad) * stride2;
int j2 = (tc / displacement_size - displacement_rad) * stride2;
int indx2 = n * pdimyxc + (y + j2)* pdimxc + (x + i2) * pdimc + c;
float val2 = rInput2[indx2];
for (int j = ymin; j <= ymax; ++j) {
for (int i = xmin; i <= xmax; ++i) {
int tindx = n * tdimcyx + tc * tdimyx + j * tdimx + i;
prod_sum[tch_off] += gradOutput[tindx] * val2;
}
}
}
__syncthreads();
if(tch_off == 0) {
float reduce_sum = 0;
for(int idx = 0; idx < CUDA_NUM_THREADS; idx++) {
reduce_sum += prod_sum[idx];
}
const int indx1 = n * odimcyx + c * odimyx + (y - pad_size) * odimx + (x - pad_size);
gradInput1[indx1] = reduce_sum / nelems;
}
} |
2,395 | #include<stdio.h>
#include<iostream>
using namespace std;
/* a sum reduction on the array of floats 'in'.
* The reduction result is written to the
* address 'result'. The number of elements to
* be reduced is given by 'size'
*
* The example contains data races because barrier
* synchronisation statements, of the form:
* __syncthreads();
* are missing.
*
* Can you add them to eliminate all data races?
*/
#define N 8 /* Same as blockDim */
#define tid threadIdx.x
__global__ void reduce(int *in, int *result, int size) {
__shared__ int partial_sums[N];
/* Each thread sums elements
in[tid], in[tid + N], in[tid + 2*N], ...
*/
partial_sums[tid] = in[tid];
for(int i = tid + N; i < size; i += N) {
partial_sums[i] += in[i];
}
/* Tree reduction computes final sum into partial_sums[0] */
for(int d = N/2; d > 0; d >>= 1) {
if(tid < d) {
partial_sums[tid] += partial_sums[tid + d];
}
}
/* Master thread writes out result */
if(tid == 0) {
*result = partial_sums[0];
}
}
|
2,396 | //#include "cuda_runtime.h"
//#include <cuda.h>
//#include <cuda_runtime_api.h>
//#include "device_launch_parameters.h"
#include<stdio.h>
//#include<stdlib.h>
//#include<string.h>
//#include<math.h>
//#include<cutil.h>
#include<iostream>
#define NUM 2048
//////////////////////////////////////////////////////////////////
__global__ void
caculate(unsigned char * g_x)
{
__shared__ char s[512];
const unsigned int bid=blockIdx.x*blockDim.x+threadIdx.x;
if(bid>=NUM) return;
s[bid]=g_x[bid];
s[bid]*=2;
g_x[bid]=s[bid];
}
///////////////////////////////////////////////////////////////
int main(int argc,char**argv)
{
//重定向到文件
//freopen("1.txt", "w", stdout);
int SIZE=sizeof(unsigned char);
//----------------------------------------
unsigned char *h_x=(unsigned char*)malloc(SIZE*NUM);
for(int i=0;i<NUM;i++)
h_x[i]=100;
//---------------------------
unsigned char *d_x;
cudaMalloc((void**)&d_x,SIZE*NUM);
//输入数组从内存拷贝到显存
cudaMemcpy(d_x,h_x,SIZE*NUM,cudaMemcpyHostToDevice);
//调用核心函数
dim3 grid;
dim3 block;
block.x=512;
grid.x=(NUM+block.x-1)/block.x;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
double sum=0;
for(int i=0;i<1;i++){
cudaEventRecord(start, 0);
float runTime;
//====================================
caculate<<<grid,block>>>(d_x);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&runTime, start, stop);
printf("kernel error no =[%d]",cudaGetLastError());
printf("time= %f\n ",runTime);
sum+=runTime;
}
printf("aver time= %f\n ",sum);
//两个同步语句cudaThreadSynchronize、cudaDeviceSynchronize必须有一个才能让nsight中显示内核函数。
//cudaThreadSynchronize();
//=====================================
//CUT_CHECK_ERROR("Kernel execution failed");
//输出数组从显存拷贝到内存
cudaMemcpy(h_x,d_x,SIZE*NUM,cudaMemcpyDeviceToHost);
//在主机端打印
//for(int i=0;i<NUM;i++)
printf("h_x[0]=[%c]\n",h_x[0]);
printf("h_x[num-1]=[%c]\n",h_x[NUM-1]);
//释放内存、显存
free(h_x);
cudaFree(d_x);
printf("press enter to quit:");
getchar();
}
|
2,397 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <time.h>
__global__ void vAdd(float* A, int num_elements, int factor_hilos, float* s){
//__local__ float a = 0.0;
__shared__ float a;
if(threadIdx.x == 0) a = 0.0;
__syncthreads();
//Posicion del thread
int i = (blockIdx.x * blockDim.x + threadIdx.x);
//printf("Hola desde el hilo %d, en el bloque %d y el hilo %d\n", i, blockIdx.x, threadIdx.x);
if(i < factor_hilos*num_elements){
atomicAdd(&a, A[i%num_elements]);
//atomicAdd(&a, 2);
//A[i%num_elements] = A[i%num_elements] + 1;
}
//A[i%num_elements] = a;
s[0] = a;
//printf("%d", s[0]);
}
void fError(cudaError_t err, int i){
if(err != cudaSuccess){
printf("%d Ha ocurrido un error con codigo: %s\n", i, cudaGetErrorString(err));
}
}
int main(){
//cudaSetDevice(1);
int num_elements = 1024;
int factor_hilos = 1;
//Reservar espacio en memoria HOST
float * h_A = (float*)malloc(num_elements * sizeof(float));
if(h_A == NULL ){
printf("Error al reservar memoria para los vectores HOST");
exit(1);
}
float * h_sum = (float*)malloc(sizeof(float));
h_sum[0] = 0;
//Inicializar elementos de los vectores
for(int i=0; i<num_elements; i++){
h_A[i] = (float)i;
}
cudaError_t err;
int size = num_elements * sizeof(float);
float * d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
fError(err,1);
float * d_sum = NULL;
err = cudaMalloc((void **)&d_sum, sizeof(float));
fError(err, 3);
//Copiamos a GPU DEVICE
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_sum, h_sum, sizeof(float), cudaMemcpyHostToDevice);
int HilosPorBloque = 256;
int BloquesPorGrid = (factor_hilos*num_elements + HilosPorBloque -1) / HilosPorBloque;
cudaError_t Err;
//Lanzamos el kernel y medimos tiempos
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
vAdd<<<BloquesPorGrid, HilosPorBloque>>>(d_A, num_elements, factor_hilos, d_sum);
Err = cudaGetLastError();
fError(Err,2);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float tiempo_reserva_host;
cudaEventElapsedTime(&tiempo_reserva_host, start, stop);
printf("Tiempo de suma vectores DEVICE: %f\n", tiempo_reserva_host);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//Copiamos a CPU el vector C
err = cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_sum, d_sum, sizeof(float), cudaMemcpyDeviceToHost);
/*for(int i=0; i<20; i++){
printf("%f ", h_A[i]);
//printf("\n");
}*/
printf("La suma es: %f", h_sum[0]);
}
|
2,398 | //System header
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
//CUDA header
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void CUParaSgemv(const float *a, float *b, float *c,unsigned int size)//valid
{
unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
//int i = threadIdx.x;
if(size<=id)
return;
float temp = 0.0;
for(unsigned int k = 0;k<size; k++)
{
if(id < size)
//Column access - coalesced access
temp += a[k*size+id] * b[k];
//Row access
//temp += a[id*size+k] * b[k];
}
c[id] += temp;
}
//__global__ void transpose(float *odata, float *idata, int width, int height)
//{
// __shared__ float block[BLOCK_DIM][BLOCK_DIM+1];
//
// unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x;
// unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y;
// if((xIndex < width) && (yIndex < height))
// {
// unsigned int index_in = yIndex * width + xIndex;
// block[threadIdx.y][threadIdx.x] = idata[index_in];
// }
//
// __syncthreads();
//
// xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x;
// yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y;
// if((xIndex < height) && (yIndex < width))
// {
// unsigned int index_out = yIndex * height + xIndex;
// odata[index_out] = block[threadIdx.x][threadIdx.y];
// }
//
// float temp = 0.0;
// unsigned int idx = blockIdx.x * BLOCK_DIM + threadIdx.x;
//
// if(idx<height){
// for(int i=0;i<width;i++)
// {
// temp = idata[idx*width+i];
// odata[i*]
// }
// }
//
//}
//void easyTranspose(float o_a[],float i_a[],int size)
//{
// int col = size*size;
// for(int i = 0;i<col;i++)
// {
// for(int j=0;j<col;j++)
// o_a[j*col+i]=i_a[i*col+j];
// }
//}
void simple_sgemv(float *A, float *B, float *C,unsigned int size) //valid
{
unsigned int i,j;
for(i = 0;i < size; i++)
{
float prod = 0;
for(j = 0;j < size; j++)
{
prod += A[i * size + j] * B[j];
}
C[i] = prod;
}
}
int main()
{
//# of nodes(equations)
//each node has 3-direction displacement
unsigned int Nodes = 100; //threashold 3500-old/4500-new
unsigned int ARRAY_SIZE = 3*Nodes; //Vector Scale;
unsigned int ARRAY_SIZE2 = ARRAY_SIZE*ARRAY_SIZE; //Matrix Scale;
//CPU timing
clock_t start, finish; //CPU_sgemv time elapse
clock_t malloc_start,malloc_fin; //CPU malloc time
clock_t init_start,init_fin; //CPU inital time
clock_t trans_start,trans_fin; //CPU time on transpose Matrix
float duration;
float malloc_duration;
float init_duration;
float trans_duration;
//GPU timing
float cal_time;
float cudamalloctime;
float cudamemcpytime;
float cudamemcpyout;
cudaEvent_t d_start, d_stop;
cudaEvent_t cuda_mallocstart,cuda_mallocfin;
cudaEvent_t cuda_memcpystart,cuda_memcpyfin;
cudaEvent_t cuda_memcpyout_start,cuda_memcpyout_fin;
//Host
float *h_a;
float *h_b;
float *h_c;
float *h_cpu;
float *h_check;
float *h_atr;
//Device
float *d_a;
//float *d_atr;
float *d_b;
float *d_c;
//cuda status record
cudaError_t cudaStatus;
printf("The nodes number is: %d\n",Nodes);
printf("The total equations number is : %d\n",ARRAY_SIZE);
printf("Total bytes will be transfered\n");
printf("\tMatrix A: %d MB\n",ARRAY_SIZE2*4/1000000);
printf("\tVector b: %d KB\n",ARRAY_SIZE*4/1000);
printf("Pre-processing in CPU...\n");
/******Malloc on CPU*******/
//start the clock
malloc_start = clock();
//generate the input array on the host
h_a=(float*)malloc(sizeof(float)*ARRAY_SIZE2);
h_b=(float*)malloc(sizeof(float)*ARRAY_SIZE);
h_c=(float*)malloc(sizeof(float)*ARRAY_SIZE);
h_cpu=(float*)malloc(sizeof(float)*ARRAY_SIZE);
h_atr = (float*)malloc(sizeof(float)*ARRAY_SIZE2);
//h_check=(float*)malloc(sizeof(float)*ARRAY_SIZE2);
//finish time
malloc_fin = clock();
//Processing Time in CPU
malloc_duration = (float)(malloc_fin - malloc_start) / CLOCKS_PER_SEC;
printf( "\n%f seconds passed in mallocation\n", malloc_duration);
printf("\n");
/****************************/
/******Initalization on CPU*******/
//use h_ = float(i) to standard the value for the initialization
init_start = clock();
srand((int)time(0));
//inital the h_a, h_b
for(unsigned int i = 0;i<ARRAY_SIZE2;i++){
h_a[i] = float(i);//rand();//float(i);//rand();
}
for(unsigned int i = 0;i<ARRAY_SIZE;i++){
h_b[i] = float(i);//rand();//float(i);//rand();
}
for(unsigned int i = 0;i<ARRAY_SIZE;i++){
h_c[i] = float(0);
}
for(unsigned int i = 0;i<ARRAY_SIZE;i++){
h_cpu[i] = float(0);
}
//time on transpose
trans_start = clock();
for(unsigned int i = 0;i<ARRAY_SIZE;i++){
//h_atr[i] = float(0);
for(unsigned int j=0;j<ARRAY_SIZE;j++)
h_atr[j*ARRAY_SIZE+i]=h_a[i*ARRAY_SIZE+j];
}
trans_fin = clock();
trans_duration = (float)(trans_fin - trans_start) / CLOCKS_PER_SEC;
printf( "\n%f seconds passed in transpose..\n", trans_duration);
init_fin = clock();
//Processing Time on CPU
init_duration = (float)(init_fin - init_start) / CLOCKS_PER_SEC;
printf( "\n%f seconds passed in initalizaton\n", init_duration);
printf("\n");
printf("******************End Pre-processing.**************\n");
/**********************************/
/**************CPU sgemv calculation time********************/
start = clock();
//kernel function on CPU
simple_sgemv(h_a,h_b,h_cpu,ARRAY_SIZE);
finish = clock();
//Processing Time in CPU
duration = (float)(finish - start) ;// CLOCKS_PER_SEC;
printf( "\n%f milliseconds passed in CPU_sgemv\n", duration);
printf("\n");
/**********************************/
//system("pause");
////Print Result
//printf("\nThe result Matrix C is:\n");
//for(unsigned int i=0;i<ARRAY_SIZE;i++){
// printf("%f\n", h_cpu[i]);
//}
printf("Pre-processing in GPU...\n");
/**************GPU malloc********************/
cudaEventCreate(&cuda_mallocstart);
cudaEventCreate(&cuda_mallocfin);
cudaEventRecord(cuda_mallocstart,0); //mark event
//allocate GPU memory
//Malloc the memory for matrix and check
cudaStatus = cudaMalloc((void**)&d_a, sizeof(float)*ARRAY_SIZE2);
cudaStatus = cudaGetLastError();
if(cudaStatus != cudaSuccess)
{
printf("\nCuda Error(cudaMalloc Matrix):%s\n",cudaGetErrorString(cudaStatus));
system("pause\n");
return 0;
}
//Malloc the memory for vector and check
cudaStatus = cudaMalloc((void**)&d_b, sizeof(float)*ARRAY_SIZE);
cudaStatus = cudaGetLastError();
if(cudaStatus != cudaSuccess)
{
printf("\nCuda Error(cudaMalloc Vector):%s\n",cudaGetErrorString(cudaStatus));
system("pause\n");
return 0;
}
//Malloc the memory for storing result and check
cudaStatus = cudaMalloc((void**)&d_c, sizeof(float)*ARRAY_SIZE);
cudaStatus = cudaGetLastError();
if(cudaStatus != cudaSuccess)
{
printf("\nCuda Error(cudaMalloc result):%s\n",cudaGetErrorString(cudaStatus));
system("pause\n");
return 0;
}
cudaThreadSynchronize();
cudaEventRecord(cuda_mallocfin,0);
cudaEventSynchronize(cuda_mallocfin);
cudaEventElapsedTime(&cudamalloctime,cuda_mallocstart,cuda_mallocfin);
printf( "\n%f milliseconds passed in GPU malloc\n", cudamalloctime );
/*********************************************/
/**************GPU Memcpy time********************/
//Timer
cudaEventCreate(&cuda_memcpystart);
cudaEventCreate(&cuda_memcpyfin);
cudaEventRecord(cuda_memcpystart,0); //mark event
//transfer the array from Host to device(CPU->GPU) and check the cudaStatus
//Column access
cudaStatus = cudaMemcpy(d_a, h_atr, sizeof(float)*ARRAY_SIZE2, cudaMemcpyHostToDevice);
//Row access
//cudaStatus = cudaMemcpy(d_a, h_a, sizeof(float)*ARRAY_SIZE2, cudaMemcpyHostToDevice);
if(cudaStatus != cudaSuccess)
{
printf("\nCuda Error(cudaMemcpy matrix):%s\n",cudaGetErrorString(cudaStatus));
system("pause\n");
return 0;
}
cudaStatus = cudaMemcpy(d_b, h_b, sizeof(float)*ARRAY_SIZE, cudaMemcpyHostToDevice);
if(cudaStatus != cudaSuccess)
{
printf("\nCuda Error(cudaMemcpy vector):%s\n",cudaGetErrorString(cudaStatus));
system("pause\n");
return 0;
}
cudaStatus = cudaMemcpy(d_c, h_c, sizeof(float)*ARRAY_SIZE, cudaMemcpyHostToDevice);
if(cudaStatus != cudaSuccess)
{
printf("\nCuda Error(cudaMemcpy result):%s\n",cudaGetErrorString(cudaStatus));
system("pause\n");
return 0;
}
cudaThreadSynchronize();
cudaEventRecord(cuda_memcpyfin,0);
cudaEventSynchronize(cuda_memcpyfin);
cudaEventElapsedTime(&cudamemcpytime,cuda_memcpystart,cuda_memcpyfin);
printf( "\n%f milliseconds passed in cuda memory copy\n", cudamemcpytime );
/*********************************************/
printf("*****************End Pre-processing in GPU********************\n");
/**************GPU Caculation time********************/
printf("\n*****************A transpose before the calculation********************\n");
//easyTranspose(h_atr,h_a,ARRAY_SIZE);
////A transpose Before the calculation...
//cudaStatus = cudaMalloc((void**)&d_atr, sizeof(float)*ARRAY_SIZE2);
////Get malloc error
//cudaStatus = cudaGetLastError();
//if(cudaStatus != cudaSuccess)
//{
// printf("\nCuda Error(cudaMalloc Matrix):%s\n",cudaGetErrorString(cudaStatus));
// system("pause\n");
// return 0;
//}
////Memory copy
//cudaStatus = cudaMemcpy(d_atr, h_a, sizeof(float)*ARRAY_SIZE2, cudaMemcpyHostToDevice);
//if(cudaStatus != cudaSuccess)
//{
// printf("\nCuda Error(cudaMemcpy transpose matrix):%s\n",cudaGetErrorString(cudaStatus));
// system("pause\n");
// return 0;
//}
////Run transpose kernel
//transpose<<<1, 128>>>(d_atr,d_a,ARRAY_SIZE,ARRAY_SIZE);//addKernel
////transfer the array from Device to Host(GPU->CPU) & check
//cudaStatus = cudaMemcpy(h_check, d_atr, sizeof(float)*ARRAY_SIZE2, cudaMemcpyDeviceToHost);
//cudaStatus = cudaGetLastError();
//if(cudaStatus != cudaSuccess)
//{
// printf("\nCuda Error:%s\n",cudaGetErrorString(cudaStatus));
// system("pause\n");
// return 0;
//}
////print out the transpose result
//printf("\After transpose...\n");
//for(long i = 0; i<ARRAY_SIZE2;i++){
// printf("%f\n", h_atr[i]);
//}
////print out the original Matrix A
//printf("\nBefore transpose...\n");
//for(long i = 0; i<ARRAY_SIZE2;i++){
// printf("%f\n", h_a[i]);
//}
printf("\n*****************End of transpose********************\n");
//Run kernel function calculate the matrix-vector multiplication
printf("\n\nRunning Kernel...\n\n");
//Timer
cudaEventCreate(&d_start);
cudaEventCreate(&d_stop);
cudaEventRecord(d_start,0); //mark event
//Check
//cudaError_t cudaState = cudaSuccess;
cudaStatus = cudaSuccess;
//MVKernel
int nblocks= ARRAY_SIZE/512+1;
CUParaSgemv<<<nblocks, 512>>>(d_a,d_b,d_c,ARRAY_SIZE);//addKernel
//addKernel<<<1, ARRAY_SIZE>>>(d_a,d_b,d_c,ARRAY_SIZE);
cudaThreadSynchronize();
cudaEventRecord(d_stop,0);
cudaEventSynchronize(d_stop);
cudaEventElapsedTime(&cal_time,d_start,d_stop);
printf( "\n%f milliseconds passed in GPU_CUParaSgemv\n", cal_time );
cudaStatus = cudaGetLastError();
if(cudaStatus != cudaSuccess)
{
printf("\nCuda Error(GPU calculation):%s\n",cudaGetErrorString(cudaStatus));
system("pause\n");
return 0;
}
//printf( "\n%f milliseconds passed in calculation\n", time );
/*********************************************/
printf("\n*********Copy Data to Host*********\n");
/**************GPU Memory copy out time********************/
//Timer
cudaEventCreate(&cuda_memcpyout_start);
cudaEventCreate(&cuda_memcpyout_fin);
cudaEventRecord(cuda_memcpyout_start,0); //mark event
//transfer the array from Device to Host(GPU->CPU) & check
cudaStatus = cudaMemcpy(h_c, d_c, sizeof(float)*ARRAY_SIZE, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cudaEventRecord(cuda_memcpyout_fin,0);
cudaEventSynchronize(cuda_memcpyout_fin);
cudaEventElapsedTime(&cudamemcpyout,cuda_memcpyout_start,cuda_memcpyout_fin);
printf( "\n%f milliseconds passed in cuda memory copy out\n", cudamemcpyout );
cudaStatus = cudaGetLastError();
if(cudaStatus != cudaSuccess)
{
printf("\nCuda Error:%s\n",cudaGetErrorString(cudaStatus));
system("pause\n");
return 0;
}
/***********************************************/
//system("pause");
/**************Print out the result********************/
////print out the result array
//for(long i = 0; i<ARRAY_SIZE;i++){
// printf("%f\n", h_c[i]);
//}
/***********************************************/
//free GPU memory allocation
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
//free Host memory allocation
free(h_atr);
free(h_a);
free(h_b);
free(h_c);
system("pause");
return 0;
}
|
2,399 | #include "includes.h"
__global__ void matrix_2d_mul_float_gpu(float *A, float *B, float *C, int num_rows_A, int num_cols_A, int num_cols_B) {
// Create shared variables (Available to all threads on the same block)
__shared__ float A_tile[N_THREADS][N_THREADS];
__shared__ float B_tile[N_THREADS][N_THREADS];
// Block index
int bx = blockIdx.x; int by = blockIdx.y;
// Thread index
int tx = threadIdx.x; int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = num_cols_A * N_THREADS * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + num_cols_A - 1;
// Index of the first sub-matrix of B processed by the block
int bBegin = N_THREADS * bx;
int bStep = N_THREADS * num_cols_B;
int aStep = N_THREADS;
float sum = 0;
for (int a = aBegin, b = bBegin;a <= aEnd;a += aStep, b += bStep) {
A_tile[ty][tx] = A[a + num_cols_A * ty + tx];
B_tile[tx][ty] = B[b + num_cols_B * tx + ty];
// Synchronize to make sure the matrices are loaded
__syncthreads();
for (int k = 0; k < N_THREADS; ++k)
sum += A_tile[ty][k] * B_tile[k][tx];
// Wait other threads to finish their sub-matrices
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = num_cols_B * N_THREADS * by + N_THREADS * bx;
C[c + num_cols_B * ty + tx] = sum;
} |
2,400 | #include <emmintrin.h>
#include <sys/time.h>
#include <stdio.h>
const long N = 1000000; // Change array size (may need a long)
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// HELPER CODE TO INITIALIZE, PRINT AND TIME
struct timeval start, end;
void starttime() {
gettimeofday( &start, 0 );
}
void endtime(const char* c) {
gettimeofday( &end, 0 );
double elapsed = ( end.tv_sec - start.tv_sec ) * 1000.0 + ( end.tv_usec - start.tv_usec ) / 1000.0;
printf("%s: %f ms\n", c, elapsed);
}
void init(const char* c) {
printf("***************** %s **********************\n", c);
// TMC Commenting Out for Class
printf("Running %s...\n", c);
starttime();
}
void finish(int a, long N, const char* c) {
endtime(c);
printf("Done.\n");
printf("\nThere are %ld Prime numbers between 1 and %ld.", a, N);
printf("***************************************************\n");
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////
/*
__global__ void prime(long* a, long high) {
// Prime algorithm
bool check = false;
for(int i = 2; i <= high/2; ++i) {
if(high % i == 0) {
check = true;
break;
}
}
if(check)
++a;
}
*/
// Normal C function to square root values
int normal(int a, long N)
{
long low = 2, high = N, i, check;
// printf("Prime numbers between 1 and %d are: ",high);
while (low < high)
{
check = 0;
for(i = 2; i <= low/2; ++i)
{
if(low % i == 0)
{
check = 1;
break;
}
}
if (check == 0)
++a;
//printf("%d ", low);
++low;
}
return a;
}
// GPU function to square root values
// Every thread on every core runs this function
__global__ void gpu_prime(int* a, long N) {
// One element per thread on each core
// blockIdx.x = Core #
// blockDim.x = Threads per core
// threadIdx.x = Thread #
// The formula below makes sure the value of element
// is different on every thread on every core
long element = blockIdx.x*blockDim.x + threadIdx.x;
// If there is not an event split, some threads will be
// out of bounds
// We just let those do nothing
// The rest square root their elements
if (element <= N && element >= 2) {
/*
if (element % 2 != 0)
element = N - element;
//printf("%d\n", element);
*/
//printf("%d\n", element);
int check = 0;
for(int i = 2; i <= element/2; ++i) {
if(element % i == 0) {
check = 1;
break;
}
}
if (check == 0){
atomicAdd(a,1);
}
}
}
void gpu(int* a, long N) {
int threadsPerCore = 512; // This can vary, up to 1024
long numCores = N / threadsPerCore + 1; // This division will work. If the split is uneven, we overshoot
// Budget memory for counter
// Memory must be on the graphics card (use cudaMalloc for this)
int* gpuA;
cudaMalloc(&gpuA, sizeof(int)); // Allocate enough memory on the GPU
// Copy array of floats a from CPU memory to gpuA on the graphics card
// Note: This operation is SLOW. You will have to offset this cost with the parallelism below
cudaMemcpy(gpuA, a, sizeof(int), cudaMemcpyHostToDevice);
//printf("%ld\n", *gpuA);
// Call parallel function with specified number of cores and threads per core
gpu_prime<<<numCores, threadsPerCore>>>(gpuA, N);
// Copy square rooted array of floats gpuA from graphics card to a in CPU memory
// Again, this operation is SLOW.
cudaMemcpy(a, gpuA, sizeof(int), cudaMemcpyDeviceToHost);
// Release the memory for gpuA
cudaFree(&gpuA); // Free the memory on the GPU
}
int main()
{
/////////////////////////////////////////////////////////////////////////
// GPUs will likely have large N
// Budget memory on the heap, prevent a stack overflow
int a = 1;
/////////////////////////////////////////////////////////////////////////
// Test 1: Sequential For Loop
init ("Normal");
a = normal(a, N);
finish(a, N, "Normal");
// Test 2: GPU
a = 1;
init("GPU");
gpu(&a, N);
finish(a, N, "GPU");
// Memory on the heap must be freed manually
//free(&a);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.