serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
22,701 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <fstream>
using namespace std;
extern int size_space;
extern float *Ex, *Hy;
void file_init()
{
fstream outEx, outHy;
outEx.open("Ex.txt", ios::out);
outEx.close();
outHy.open("Hy.txt", ios::out);
outHy.close();
}
void save2file()
{
fstream outEx;
outEx.open("Ex.txt", ios::app);
int i;
for ( i = 0; i < size_space + 1; i++){
outEx << Ex[i] << "\t";
}
outEx << endl << endl;
outEx.close();
fstream outHy;
outHy.open("Hy.txt", ios::app);
for ( i = 0; i < size_space; i++){
outHy << Hy[i] << "\t";
}
outHy << endl << endl;
outHy.close();
} |
22,702 | // Vector addition: C = 1/A + 1/B, for arbitrarily long vectors
// compile with the following command:
//
// (for GTX970)
// nvcc -arch=compute_52 -code=sm_52,sm_52 -O3 -m64 -o vecAdd vecAdd.cu
//
// (for GTX1060)
// nvcc -arch=compute_61 -code=sm_61,sm_61 -O3 -m64 -o vecAdd vecAdd.cu
// Includes
#include <stdio.h>
#include <stdlib.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
// Variables
float* h_A; // host vectors
float* h_B;
float* h_C;
float* h_D;
float* d_A; // device vectors
float* d_B;
float* d_C;
// Functions
void RandomInit(float*, long);
// Device code
__global__ void VecAdd(const float* A, const float* B, float* C, long N)
{
long i = blockDim.x * blockIdx.x + threadIdx.x;
// if (i < N) // only for N < blockDim.x*gridDim.x
// C[i] = 1.0/A[i] + 1.0/B[i];
while (i < N) {
C[i] = 1.0/A[i] + 1.0/B[i];
i += blockDim.x * gridDim.x; // go to the next grid
}
__syncthreads();
}
// Host code
int main(void)
{
int gid;
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
printf("Enter the GPU ID: ");
scanf("%d",&gid);
printf("%d\n", gid);
err = cudaSetDevice(gid);
if (err != cudaSuccess) {
printf("!!! Cannot select GPU with device ID = %d\n", gid);
exit(1);
}
printf("Set GPU with device ID = %d\n", gid);
cudaSetDevice(gid);
printf("Vector Addition: C = 1/A + 1/B\n");
int N;
printf("Enter the size of the vectors: ");
scanf("%ld",&N);
printf("%ld\n",N);
long size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
h_B = (float*)malloc(size);
h_C = (float*)malloc(size);
// Check memory allocations
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Set the sizes of threads and blocks
int threadsPerBlock;
printf("Enter the number of threads per block: ");
scanf("%d",&threadsPerBlock);
printf("%d\n",threadsPerBlock);
if( threadsPerBlock > 1024 ) {
printf("The number of threads per block must be less than 1024 ! \n");
exit(0);
}
// int blocksPerGrid = (N + threadsPerBlock - 1)/threadsPerBlock;
int blocksPerGrid;
printf("Enter the number of blocks per grid: ");
scanf("%d",&blocksPerGrid);
printf("%d\n",blocksPerGrid);
if( blocksPerGrid > 2147483647 ) {
printf("The number of blocks must be less than 2147483647 ! \n");
exit(0);
}
printf("The number of blocks is %d\n", blocksPerGrid);
// create the timer
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start the timer
cudaEventRecord(start,0);
// Allocate vectors in device memory
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_B, size);
cudaMalloc((void**)&d_C, size);
// Copy vectors from host memory to device memory
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// stop the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float Intime;
cudaEventElapsedTime( &Intime, start, stop);
printf("Input time for GPU: %f (ms) \n",Intime);
// start the timer
cudaEventRecord(start,0);
VecAdd <<< blocksPerGrid, threadsPerBlock >>> (d_A, d_B, d_C, N);
// stop the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float gputime;
cudaEventElapsedTime( &gputime, start, stop);
printf("Processing time for GPU: %f (ms) \n",gputime);
printf("GPU Gflops: %f\n",3*N/(1000000.0*gputime));
// Copy result from device memory to host memory
// h_C contains the result in host memory
// start the timer
cudaEventRecord(start,0);
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// stop the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float Outime;
cudaEventElapsedTime( &Outime, start, stop);
printf("Output time for GPU: %f (ms) \n",Outime);
float gputime_tot;
gputime_tot = Intime + gputime + Outime;
printf("Total time for GPU: %f (ms) \n",gputime_tot);
// start the timer
cudaEventRecord(start,0);
h_D = (float*)malloc(size); // to compute the reference solution
for (long i = 0; i < N; ++i)
h_D[i] = 1.0/h_A[i] + 1.0/h_B[i];
// stop the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float cputime;
cudaEventElapsedTime( &cputime, start, stop);
printf("Processing time for CPU: %f (ms) \n",cputime);
printf("CPU Gflops: %f\n",3*N/(1000000.0*cputime));
printf("Speed up of GPU = %f\n", cputime/(gputime_tot));
// destroy the timer
cudaEventDestroy(start);
cudaEventDestroy(stop);
// check result
printf("Check result:\n");
double sum=0;
double diff;
for (long i = 0; i < N; ++i) {
diff = abs(h_D[i] - h_C[i]);
sum += diff*diff;
if(diff > 1.0e-15) {
// printf("i=%d, h_D=%15.10e, h_C=%15.10e \n", i, h_D[i], h_C[i]);
}
}
sum = sqrt(sum);
printf("norm(h_C - h_D)=%20.15e\n\n",sum);
cudaDeviceReset();
}
// Allocates an array with random float entries.
void RandomInit(float* data, long n)
{
for (long i = 0; i < n; ++i)
data[i] = rand() / (float)RAND_MAX;
}
|
22,703 | #include "includes.h"
__global__ void cubefilling_atomic(const float* image, float *dev_cube_wi, float *dev_cube_w, const dim3 image_size, int scale_xy, int scale_eps, dim3 dimensions_down)
{
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
const size_t j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < image_size.x && j < image_size.y) {
const float k = image[i + image_size.x*j];
const size_t cube_idx = (i / scale_xy) + dimensions_down.x*(j / scale_xy) + dimensions_down.x*dimensions_down.y*((int)k / scale_eps);
atomicAdd(&dev_cube_wi[cube_idx], k);
atomicAdd(&dev_cube_w[cube_idx], 1.0f);
}
} |
22,704 | /**************************************************************
* File: rgb2gray.cu
* Description: CUDA implementation of application that transfers
* color picture to grayscale.
*
* Author: jfhansen
* Last Modification: 28/07/2020
*************************************************************/
#include <iostream>
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define BLOCKDIM 16
/* Converts RGBA image to Grayscale image
* When converting image from RGB to grayscale photo,
* the pixels should use the following proportion of red, green and blue:
* I = 0.299f * R + 0.587f * G + 0.114f * B
* Arguments:
* rgbaImage: constant pointer to array of uchar4 holding RGBA values.
* grayImage: pointer to array of chars.
* numrows, numcols: Number of pixel rows and columns */
__global__ void cuda_rgba_to_grayscale(const uchar4 *const rgbaImage,
unsigned char *const grayImage, int numRows, int numCols)
{
// Get row and column for pixel
unsigned col, row;
col = threadIdx.x + blockDim.x * blockIdx.x;
row = threadIdx.y + blockDim.y * blockIdx.y;
// Fetch rgba value at pixel
uchar4 pixel = rgbaImage[row*numCols+col];
unsigned char brightness = (unsigned char)(.299f * pixel.x + .587f * pixel.y + .114f * pixel.z);
// Compute pixel brightness
grayImage[row*numCols+col] = brightness;
}
// Transfers h_rgbaImage to device, converts RGBA image to grayscale and transfers
// resulting grayscale image to host memory, h_grayImage.
void rgba_to_grayscale(const uchar4 *const d_rgbaImage, unsigned char *const d_grayImage,
size_t numRows, size_t numCols)
{
dim3 threadsPerBlock(BLOCKDIM,BLOCKDIM,1);
dim3 blocksPerGrid(
(numCols + BLOCKDIM - 1)/BLOCKDIM,
(numRows + BLOCKDIM - 1)/BLOCKDIM,
1);
cuda_rgba_to_grayscale<<<blocksPerGrid, threadsPerBlock>>>(d_rgbaImage, d_grayImage, numRows, numCols);
cudaError_t err;
while ( (err = cudaGetLastError()) != cudaSuccess )
std::cout << "CUDA Error: " << cudaGetErrorString(err) << std::endl;
}
|
22,705 | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
// Get our global thread ID
int id = blockIdx.x * blockDim.x + threadIdx.x;
// Make sure we do not go out of bounds
if (id < n) c[id] = a[id] + b[id];
}
|
22,706 | ////////////////////////////////////////////////////////////////////////////////
//
// FILE: max_parallel_reduct.cu
// DESCRIPTION: uses parallel reduction to find max element in 1000 num array
// AUTHOR: Dan Fabian
// DATE: 2/23/2020
#include <iostream>
#include <random>
#include <chrono>
using std::cout; using std::endl;
using namespace std::chrono;
// all constants
const int SIZE = 1000;
// kernal func prototypes
__global__ void globalMax(int *vals); // uses global mem
__global__ void interleavingMax(int *vals); // uses interleaving addressing shared mem
__global__ void sequentialMax(int *vals); // uses sequential addressing shared mem
////////////////////////////////////////////////////////////////////////////////
//
// MAIN
int main()
{
// create array of vals
int vals[SIZE], *vals_d;
// create rng
unsigned int seed = system_clock::now().time_since_epoch().count();
std::default_random_engine generator(seed);
std::uniform_int_distribution<int> dist(0, 1000);
// init vals
for (int i = 0; i < SIZE; ++i) vals[i] = dist(generator);
/*
// print vals
for (int i = 0; i < SIZE; ++i)
cout << vals[i] << ' ';
cout << endl;
*/
// copy vals to device memory
int valMem = sizeof(int) * SIZE;
cudaMalloc((void**)&vals_d, valMem);
cudaMemcpy(vals_d, vals, valMem, cudaMemcpyHostToDevice);
// call funcs
//globalMax<<<1, SIZE / 2>>>(vals_d);
//interleavingMax<<<1, SIZE>>>(vals_d);
sequentialMax<<<1, SIZE>>>(vals_d);
// copy device memory back to host
cudaMemcpy(vals, vals_d, valMem, cudaMemcpyDeviceToHost);
// print max
//cout << vals[0] << endl;
// free all device memory
cudaFree(vals_d);
}
////////////////////////////////////////////////////////////////////////////////
//
// KERNEL functions
////////////////////////////////////////
// global memory implementation
__global__ void globalMax(int *vals)
{
// thread index
unsigned int idx = threadIdx.x;
// reduction algorithm
unsigned int elemIdx;
for (unsigned int i = 1; i < SIZE; i *= 2)
{
elemIdx = idx * i * 2;
if (elemIdx < SIZE)
vals[elemIdx] = vals[elemIdx] < vals[elemIdx + i] ?
vals[elemIdx + i] : vals[elemIdx];
__syncthreads();
}
}
////////////////////////////////////////
// interleaving addressing shared memory implementation
__global__ void interleavingMax(int *vals)
{
// create shared val array
static __shared__ int vals_s[SIZE];
// thread index
unsigned int idx = threadIdx.x;
// load 1 element in per thread
vals_s[idx] = vals[idx];
__syncthreads();
// reduction algorithm
unsigned int elemIdx;
for (unsigned int i = 1; i < SIZE; i *= 2)
{
elemIdx = idx * i * 2;
if (elemIdx < SIZE)
vals_s[elemIdx] = vals_s[elemIdx] < vals_s[elemIdx + i] ?
vals_s[elemIdx + i] : vals_s[elemIdx];
__syncthreads();
}
// transfer max val to global mem
if (idx == 0) vals[0] = vals_s[0];
}
////////////////////////////////////////
// sequential addressing shared memory implementation
__global__ void sequentialMax(int *vals)
{
// create shared val array
static __shared__ int vals_s[SIZE];
// thread index
unsigned int idx = threadIdx.x;
// load 1 element in per thread
vals_s[idx] = vals[idx];
__syncthreads();
// reduction algorithm
for (unsigned int i = SIZE / 2; i > 0; i /= 2)
{
if (idx < i)
vals_s[idx] = vals_s[idx] < vals_s[idx + i] ?
vals_s[idx + i] : vals_s[idx];
__syncthreads();
}
// transfer max val to global mem
if (idx == 0) vals[0] = vals_s[0];
}
|
22,707 | #include "includes.h"
__device__ __forceinline__ void copy_c(float const *in, float *out, int slicesizein, int slicesizeout, int C) {
// *out = *in;
for (size_t c(0); c < C; ++c)
out[c * slicesizeout] = in[c * slicesizein];
}
__device__ __forceinline__ void add_c(float const *in, float *out, int slicesizein, int slicesizeout, int C) {
// *out = *in + *out;
for (size_t c(0); c < C; ++c)
out[c * slicesizeout] += in[c * slicesizein];
}
__device__ __forceinline__ int get_index(int X, int Y, int Z, int C, int x, int y, int z) {
return z * (C * X * Y) + y * X + x;
}
__global__ void unshift_kernel(float const *in, float *out, int X, int Y, int C, int dx, int dy, float const beta) {
int x(threadIdx.x + blockDim.x * blockIdx.x);
int y(x / X);
x = x % X;
int x_to(x + dx);
int y_to(y + dy);
if (x >= X || y >= Y || x_to >= X || y_to >= Y || x_to < 0 || y_to < 0)
return;
if (beta>0)
add_c(in + get_index(X, Y, 1, C, x_to, y_to, 0), out + get_index(X, Y, 1, C, x, y, 0), X * Y, X * Y, C);
else
copy_c(in + get_index(X, Y, 1, C, x_to, y_to, 0), out + get_index(X, Y, 1, C, x, y, 0), X * Y, X * Y, C);
} |
22,708 | ///
/// vecAddKernel00.cu
/// For CSU CS575 Spring 2011
/// Instructor: Wim Bohm
/// Based on code from the CUDA Programming Guide
/// By David Newman
/// Created: 2011-02-16
/// Last Modified: 2011-02-16 DVN
///
/// This Kernel adds two Vectors A and B in C on GPU
/// with coalesced memory access.
///
#include <stdio.h>
#include <stdlib.h>
__global__ void AddVectors(const float* A, const float* B, float* C, int N)
{
int i = threadIdx.x + (blockDim.x * blockIdx.x);
int max = blockDim.x * gridDim.x * N;
while ( i < max ) {
C[i] = A[i] + B[i];
i += blockDim.x * gridDim.x;
}
} |
22,709 | #include <cstdlib>
#include <cstring>
#include <cstdio>
__global__ void vecadd(float *A, float *B, float *C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
//printf("%d %.2f %.2f\n", i, A[i], B[i]);
C[i] = A[i] + B[i];
}
//printf("blockDim %d %d %d i %d \n", blockDim.x, blockIdx.x, threadIdx.x, i);
}
int main(int argc, char *argv[])
{
float *h_a, *h_b, *h_c;
size_t n = 1024;
if (argc > 1)
n = atoi(argv[1]);
float *da, *db, *dc;
size_t size = n * sizeof(float);
h_a = static_cast<float *>(malloc(size));
h_b = static_cast<float *>(malloc(size));
h_c = static_cast<float *>(malloc(size));
for (int i = 0; i < n; i++) {
h_a[i] = i * 2.0f;
h_b[i] = i * 2.0f + 1.0;
printf("%.2f %.2f, ", h_a[i], h_b[i]);
}
printf("\n");
cudaMalloc(&da, size);
cudaMalloc(&db, size);
cudaMalloc(&dc, size);
cudaMemcpy(da, h_a, size, cudaMemcpyHostToDevice);
cudaMemcpy(db, h_b, size, cudaMemcpyHostToDevice);
//cudaMemcpy(dc, h_c, size, cudaMemcpyHostToDevice);
int threadPerBlock = 256;
int blockPerGrid = (n + threadPerBlock - 1) / threadPerBlock;
vecadd<<<blockPerGrid, threadPerBlock>>>(da, db, dc, n);
cudaMemcpy(h_c, dc, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
printf("%.2f ", h_c[i]);
printf("\n");
cudaFree(&da);
cudaFree(&db);
cudaFree(&dc);
free(h_a);
free(h_b);
free(h_c);
return 0;
} |
22,710 | #include "includes.h"
__global__ void matByConst(unsigned char *img, unsigned char *result, int alpha, int cols, int rows) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < rows && col < cols) {
int idx = row * cols + col;
result[idx] = img[idx] * alpha;
}
} |
22,711 | #include <stdio.h>
__global__ void helloWorld(){
printf("Hello World from (block=%d,thread=%d)\n",blockIdx.x,threadIdx.x);
}
int main(){
helloWorld<<<3,2>>>();
cudaDeviceSynchronize();
return 0;
}
|
22,712 | #include<iostream>
using namespace std;
#include <time.h>
__global__ void Array_Add(float* d_out, float* d_array, float Size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
int bid = blockIdx.x;
extern __shared__ float sh_array[];
if(id < Size)
sh_array[tid] = d_array[id];
__syncthreads();
for(int s = 512; s>0; s = s/2)
{
__syncthreads();
if(id>=Size || id+s>=Size)
continue;
if(tid<s)
sh_array[tid] += sh_array[tid + s];
}
if(tid==0)
d_out[bid] = sh_array[tid];
__syncthreads();
}
float Find_Sum_GPU(float h_array[], int Size)
{
clock_t start, end;
int sub_size = (int)ceil(Size*1.0/1024);
// The resultant number of blocks obtained on dividing the array into blocks of 1024 elements
int final_size = (int)ceil(sub_size*1.0/1024);
// The resultant number of blocks obtained on dividing the array into blocks of 2^20 elements
float* d_array, *d_out, *d_sum;
cudaMalloc((void**)&d_array, Size*sizeof(float));
cudaMalloc((void**)&d_out, sub_size*sizeof(float));
cudaMalloc((void**)&d_sum, final_size*sizeof(float));
cudaMemcpy(d_array, h_array, sizeof(float) * Size, cudaMemcpyHostToDevice);
float *h_sum;
h_sum = (float*)malloc(final_size * sizeof(float));
start = clock();
Array_Add <<<ceil(Size*1.0/1024), 1024, 1024*sizeof(float)>>> (d_out, d_array, Size);
Array_Add <<<final_size, 1024, 1024*sizeof(float)>>> (d_sum, d_out, ceil(Size*1.0/1024));
end = clock();
cudaMemcpy(h_sum, d_sum, final_size*sizeof(float), cudaMemcpyDeviceToHost);
float sum = h_sum[0];
for(int i=1; i<final_size; i++)
sum += h_sum[i];
cout << "\nThe time taken by GPU is " << (double)(end-start) << " microseconds\n";
cudaFree(d_array);
cudaFree(d_out);
cudaFree(d_sum);
return sum;
}
__global__ void Dot_Product(float* d_A, float* d_B, float* d_Prod, int Size)
{
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
if(id<Size)
d_Prod[id] = d_A[id] * d_B[id];
}
int main()
{
int Array_Size;
cout << "Enetr the size of the two arrays.\n";
cin >> Array_Size;
int Array_Bytes = Array_Size * sizeof(float);
float h_A[Array_Size], h_B[Array_Size], h_Prod[Array_Size];
for(int i=0; i<Array_Size; i++)
{
h_A[i] = (float)i;
h_B[i] = (float)i;
}
float *d_A, *d_B, *d_Prod;
cudaMalloc((void**)&d_A, Array_Bytes);
cudaMalloc((void**)&d_B, Array_Bytes);
cudaMalloc((void**)&d_Prod, Array_Bytes);
cudaMemcpy(d_A, h_A, Array_Bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, Array_Bytes, cudaMemcpyHostToDevice);
Dot_Product<<<ceil(Array_Size*1.0/1024), 1024>>>(d_A, d_B, d_Prod, Array_Size);
cudaMemcpy(h_Prod, d_Prod, Array_Bytes, cudaMemcpyDeviceToHost);
/*for(int i=0; i<Array_Size; i++)
cout << h_Prod[i] << " ";*/
float Dot_Prod = Find_Sum_GPU(h_Prod, Array_Size);
cout << "\nThe dot product is " << Dot_Prod << endl;
cudaFree(h_A);
cudaFree(h_B);
cudaFree(h_Prod);
} |
22,713 | #include "includes.h"
__global__ void OPT_4_SIZES(int *d_adjList, int *d_sizeAdj, int *d_LCMSize, int n_vertices)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if(i<n_vertices)
{
int indexUsed = 0;
int iStart = 0, iEnd = 0;
int k = 0;
if(i > 0)
{
k = d_sizeAdj[i-1];
}
iEnd = d_sizeAdj[i];
__syncthreads();
for(int j = 0; j < n_vertices; j++) {
if(i==j)
continue;
iStart = k;
int jStart = 0, jEnd = 0;
if(j > 0)
jStart = d_sizeAdj[j-1];
jEnd = d_sizeAdj[j];
int compVec = 0;
while (iStart < iEnd && jStart < jEnd)
{
if(d_adjList[iStart] < d_adjList[jStart])
iStart++;
else if (d_adjList[jStart] < d_adjList[iStart])
jStart++;
else // if arr1[i] == arr2[j]
{
jStart++;
iStart++;
compVec++;
break;
}
}
if (compVec > 0)
{
indexUsed++;
}
}
__syncthreads();
d_LCMSize[i] = indexUsed;
// __syncthreads();
}
} |
22,714 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <assert.h>
#ifdef __NVCC__
#include <cublas_v2.h>
#endif
#ifndef THREADS_PER_BLOCK
#define THREADS_PER_BLOCK 1024
#endif
#define THREADS_PER_DIM 32
#define VERBOSE
//#define PROF
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
long long getCurrentTime() {
struct timeval te;
gettimeofday(&te, NULL); // get current time
long long microseconds = te.tv_sec*1000000LL + te.tv_usec;
return microseconds;
}
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = cudaDeviceSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
}
__global__ void mm(float *dA, float *dB, float *dC, int DIM, int N, int GPUN) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id <= GPUN) {
int i = id / DIM;
int j = id % DIM;
float sum = 0.0f;
for (int k = 0; k < DIM; k++) {
sum += dA[i*DIM+k] * dB[k*DIM+j];
}
dC[id] = sum;
}
}
__global__ void mm_tiled(float *dA, float *dB, float *dC, int DIM, int N, int GPUN) {
int it, jt, kt, i, j, k;
__shared__ float sA[32][32], sB[32][32];
// (it, jt) => the first element of a specific tile
it = blockIdx.y * 32;
jt = blockIdx.x * 32;
// (i, j) => specific element
i = it + threadIdx.y;
j = jt + threadIdx.x;
if (i*DIM+j <= GPUN) {
float sum = 0.0f;
// per tile loop
for (kt = 0; kt < DIM; kt += 32) {
// copy to shared memory
sA[threadIdx.y][threadIdx.x] = dA[(it+threadIdx.y)*DIM + kt + threadIdx.x];
sB[threadIdx.y][threadIdx.x] = dB[(kt+threadIdx.y)*DIM + jt + threadIdx.x];
__syncthreads();
// two 32x32 small shared (dB[it + 0:31][kt + 0:31], dC[kt+0:31][jt + 0:31]) at this point
for (k = kt; k < kt+32; k++) {
sum += sA[i-it][k-kt] * sB[k-kt][j-jt];
}
__syncthreads();
}
dC[i*DIM+j] = sum;
}
}
extern "C" {
void mmCUDA(float* A, float *B, float *C, int N, int start, int end, int GPUN, int tiled) {
float *dA, *dB, *dC;
if (GPUN > 0) {
assert(end - start + 1 == GPUN);
#ifdef VERBOSE
printf("In mmCUDA\n");
printf("\t GPUN: %d\n", GPUN);
printf("\t range: %d..%d\n", start, end);
#endif
#ifdef PROF
cudaEvent_t startCudaMallocEvent, endCudaMallocEvent;
cudaEvent_t startCudaMemcpyH2DEvent, endCudaMemcpyH2DEvent;
cudaEvent_t startCudaKernelEvent, endCudaKernelEvent;
cudaEvent_t startCudaMemcpyD2HEvent, endCudaMemcpyD2HEvent;
CudaSafeCall(cudaEventCreate(&startCudaMallocEvent));
CudaSafeCall(cudaEventCreate(&endCudaMallocEvent));
CudaSafeCall(cudaEventCreate(&startCudaMemcpyH2DEvent));
CudaSafeCall(cudaEventCreate(&endCudaMemcpyH2DEvent));
CudaSafeCall(cudaEventCreate(&startCudaKernelEvent));
CudaSafeCall(cudaEventCreate(&endCudaKernelEvent));
CudaSafeCall(cudaEventCreate(&startCudaMemcpyD2HEvent));
CudaSafeCall(cudaEventCreate(&endCudaMemcpyD2HEvent));
#endif
#ifdef PROF
CudaSafeCall(cudaEventRecord(startCudaMallocEvent));
#endif
CudaSafeCall(cudaMalloc(&dA, sizeof(float) * GPUN));
CudaSafeCall(cudaMalloc(&dB, sizeof(float) * N));
CudaSafeCall(cudaMalloc(&dC, sizeof(float) * GPUN));
#ifdef PROF
CudaSafeCall(cudaEventRecord(endCudaMallocEvent));
CudaSafeCall(cudaEventSynchronize(endCudaMallocEvent));
#endif
#ifdef PROF
CudaSafeCall(cudaEventRecord(startCudaMemcpyH2DEvent));
#endif
CudaSafeCall(cudaMemcpy(dA, A+start, sizeof(float) * GPUN, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(dB, B, sizeof(float) * N, cudaMemcpyHostToDevice));
#ifdef PROF
CudaSafeCall(cudaEventRecord(endCudaMemcpyH2DEvent));
CudaSafeCall(cudaEventSynchronize(endCudaMemcpyH2DEvent));
#endif
#ifdef PROF
CudaSafeCall(cudaEventRecord(startCudaKernelEvent));
#endif
if (!tiled) {
mm<<<ceil(((float)GPUN)/THREADS_PER_BLOCK), THREADS_PER_BLOCK>>>(dA, dB, dC, ceil(sqrt(N)), N, GPUN);
} else if (tiled == 1){
dim3 block(THREADS_PER_DIM, THREADS_PER_DIM);
dim3 grid(ceil(sqrt(N)/THREADS_PER_DIM), ceil(sqrt(N)/THREADS_PER_DIM));
mm_tiled<<<grid, block>>>(dA, dB, dC, ceil(sqrt(N)), N, N);
} else {
#ifdef __NVCC__
cublasHandle_t handle;
#ifdef PROF
long long start = getCurrentTime();
#endif
cublasCreate(&handle);
float alpha = 1.0F;
float beta = 0.0F;
int lda = sqrt(N), ldb = sqrt(N), ldc = sqrt(N);
#ifdef PROF
long long end = getCurrentTime();
printf("cuBLAS prep: %lf msec\n", (float)(end-start)/1000);
#endif
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, sqrt(N), GPUN/sqrt(N), sqrt(N), &alpha, dB, ldb, dA, lda, &beta, dC, ldc);
//http://peterwittek.com/cublas-matrix-c-style.html
//C:mxn = A:mxk X B:kxn
//stat=cublasSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N,n,m,k,&a1,d_b,n,d_a,k,&bet,d_c,n);
#ifdef PROF
long long end2 = getCurrentTime();
printf("cuBLAS finish: %lf msec\n", (float)(end2-start)/1000);
#endif
#endif
}
CudaCheckError();
#ifdef PROF
CudaSafeCall(cudaEventRecord(endCudaKernelEvent));
CudaSafeCall(cudaEventSynchronize(endCudaKernelEvent));
#endif
CudaSafeCall(cudaDeviceSynchronize());
#ifdef PROF
CudaSafeCall(cudaEventRecord(startCudaMemcpyD2HEvent));
#endif
CudaSafeCall(cudaMemcpy(C + start, dC, sizeof(float) * GPUN, cudaMemcpyDeviceToHost));
#ifdef PROF
CudaSafeCall(cudaEventRecord(endCudaMemcpyD2HEvent));
CudaSafeCall(cudaEventSynchronize(endCudaMemcpyD2HEvent));
#endif
#ifdef PROF
float msecMalloc, msecH2D, msecKernel, msecD2H;
CudaSafeCall(cudaEventElapsedTime(&msecMalloc, startCudaMallocEvent, endCudaMallocEvent));
CudaSafeCall(cudaEventElapsedTime(&msecH2D, startCudaMemcpyH2DEvent, endCudaMemcpyH2DEvent));
CudaSafeCall(cudaEventElapsedTime(&msecKernel, startCudaKernelEvent, endCudaKernelEvent));
CudaSafeCall(cudaEventElapsedTime(&msecD2H, startCudaMemcpyD2HEvent, endCudaMemcpyD2HEvent));
printf("CUDA malloc: %lf msec\n", msecMalloc);
printf("CUDA h2d: %lf msec\n", msecH2D);
printf("CUDA kernel: %lf msec\n", msecKernel);
printf("CUDA d2h: %lf msec\n", msecD2H);
#endif
//for (int i = 0; i < GPUN; i++) {
// printf("C[%d] = %lf\n", start+i, C[start+i]);
//}
CudaSafeCall(cudaFree(dA));
CudaSafeCall(cudaFree(dB));
CudaSafeCall(cudaFree(dC));
}
}
}
|
22,715 | #include "depthconv_cuda_kernel.h"
#include <cstdio>
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
template <typename DType>
__device__ DType get_gradient_weight(DType argmax_h, DType argmax_w,
const int h, const int w, const int height,
const int width) {
if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) {
// empty
return 0;
}
argmax_h = max(argmax_h, (DType)0.0f);
argmax_w = max(argmax_w, (DType)0.0f);
int argmax_h_low = (int)argmax_h;
int argmax_w_low = (int)argmax_w;
int argmax_h_high;
int argmax_w_high;
if (argmax_h_low >= height - 1) {
argmax_h_high = argmax_h_low = height - 1;
argmax_h = (DType)argmax_h_low;
} else {
argmax_h_high = argmax_h_low + 1;
}
if (argmax_w_low >= width - 1) {
argmax_w_high = argmax_w_low = width - 1;
argmax_w = (DType)argmax_w_low;
} else {
argmax_w_high = argmax_w_low + 1;
}
DType weight = 0;
if (h == argmax_h_low) {
if (w == argmax_w_low) {
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
} else if (w == argmax_w_high) {
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
}
} else if (h == argmax_h_high) {
if (w == argmax_w_low) {
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
} else if (w == argmax_w_high) {
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
}
}
return weight;
}
template <typename DType>
__global__ void depthconv_im2col_gpu_kernel(
const int n, const DType *data_im, const DType *data_depth,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int height_col,
const int width_col, DType *data_col) {
// CxHxW --> (khxkw)x(CxHxW)
CUDA_KERNEL_LOOP(index, n) {
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int c_im = (index / width_col) / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
DType *data_col_ptr = data_col + (c_col * height_col + h_col) * width_col + w_col;
const DType *data_im_ptr = data_im + (c_im * height + h_in) * width + w_in;
const DType *data_depth_ptr = data_depth + h_in * width + w_in;
DType Di = 0.;
bool valid = true;
if ((h_in + dilation_h * (kernel_h - 1) / 2)>=0 &&
w_in + dilation_w * (kernel_w - 1) / 2 >= 0 &&
(h_in + dilation_h * (kernel_h - 1) / 2) < height &&
w_in + dilation_w * (kernel_w - 1) / 2 < width)
Di = data_depth[(h_in + dilation_h * (kernel_h - 1) / 2) * width + w_in + dilation_w * (kernel_w - 1) / 2];
else
valid = false;
//const DType Di = data_depth[(h_in + (kernel_h - 1) / 2 + dilation_h - 1) * width + (w_in + (kernel_w - 1) / 2 + dilation_w - 1)];
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
DType val = static_cast<DType>(0);
DType Dval = static_cast<DType>(0);
const int h_im = h_in + i * dilation_h;
const int w_im = w_in + j * dilation_w;
if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
const int map_h = i * dilation_h;
const int map_w = j * dilation_w;
val = data_im_ptr[map_h * width + map_w];
if (valid)
Dval = data_depth_ptr[map_h * width + map_w];
//printf("%f,%d\n",Dval,h_in * width + w_in+map_h * width + map_w - ((h_in + (kernel_h - 1) / 2 + dilation_h - 1) * width + (w_in + (kernel_w - 1) / 2 + dilation_w - 1)));
// printf("Di-Dval: %f, %f\n", Di, Dval);
// if (exp(-abs(Di - Dval))<0.2)
// printf("Di-Dval: %f\n", exp(-abs(Di - Dval)));
val *= exp(-abs(Di - Dval));
}
*data_col_ptr = val;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename DType>
void depthconv_im2col(cudaStream_t stream, const DType *data_im, const DType *data_depth, const int channels,
const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, DType *data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col =
(height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col =
(width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// Launch
depthconv_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0,
stream>>>(
num_kernels, data_im, data_depth, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col, width_col, data_col);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in depthconv_im2col: %s\n", cudaGetErrorString(err));
// TODO(BZ) panic
}
}
template void depthconv_im2col<float>(
cudaStream_t stream, const float *data_im, const float *data_depth,
const int channels, const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w, float *data_col);
/*template void depthconv_im2col<double>(
cudaStream_t stream, const double *data_im, const double *data_depth,
const int channels, const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w, double *data_col);*/
template <typename DType>
__global__ void depthconv_col2im_gpu_kernel(
const int n, const DType *data_col, const DType *data_depth,
const int channels, const int height, const int width, const int kernel_h,
const int kernel_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w, const int height_col,
const int width_col, DType *grad_im) {
CUDA_KERNEL_LOOP(index, n) {
for (int ii = 0; ii < kernel_h * kernel_w; ii++){
int ii_index = ii + index * kernel_h * kernel_w;
const int j = (ii_index / width_col / height_col) % kernel_w;
const int i = (ii_index / width_col / height_col / kernel_w) % kernel_h;
const int c = ii_index / width_col / height_col / kernel_w / kernel_h;
// compute the start and end of the output
int w_out = ii_index % width_col;
int h_out = (ii_index / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
//const DType cur_inv_h_data = h_in + i * dilation_h;
//const DType cur_inv_w_data = w_in + j * dilation_w;
const DType cur_top_grad = data_col[ii_index];
const int cur_h = h_in + i * dilation_h;//(int)cur_inv_h_data;
const int cur_w = w_in + j * dilation_w;//(int)cur_inv_w_data;
DType Di = 0.;
bool valid = true;
if ((h_in + dilation_h * (kernel_h - 1) / 2)>=0 &&
w_in + dilation_w * (kernel_w - 1) / 2 >= 0 &&
(h_in + dilation_h * (kernel_h - 1) / 2) < height &&
w_in + dilation_w * (kernel_w - 1) / 2 < width)
Di = data_depth[(h_in + dilation_h * (kernel_h - 1) / 2) * width + w_in + dilation_w * (kernel_w - 1) / 2];
else
valid = false;
// const DType Di = data_depth[(h_in + dilation_h * (kernel_h - 1) / 2) * width + w_in + dilation_w * (kernel_w - 1) / 2];
//const DType Di = data_depth[(h_in + (kernel_h - 1) / 2 + dilation_h - 1) * width + w_in + (kernel_w - 1) / 2 + dilation_w - 1];
//printf("%d\n",(h_in + dilation_h * (kernel_h - 1) / 2) * width + w_in + dilation_w * (kernel_w - 1) / 2);
//data_depth[cur_h * width + cur_w];
// data_depth[(h_in + (kernel_h - 1) / 2 + dilation_h - 1) * width + w_in + (kernel_w - 1) / 2 + dilation_w - 1];
int cur_bottom_grad_pos =
(c * height + cur_h) * width + cur_w;
int cur_bottom_depth_pos=
(cur_h) * width + cur_w;
//printf("%d,%d,%d,%d\n",i,j,((h_in + dilation_h * (kernel_h - 1) / 2) * width + w_in + dilation_w * (kernel_w - 1) / 2-cur_bottom_depth_pos),dilation_h);
//printf("%d\n",((h_in + dilation_h * (kernel_h - 1) / 2) * width + w_in + dilation_w * (kernel_w - 1) / 2-cur_bottom_depth_pos));
DType Dval = 0.;
if (valid)
Dval = data_depth[cur_bottom_depth_pos];
if (cur_h >= 0 && cur_h < height && cur_w >= 0 &&
cur_w < width)
atomicAdd(grad_im + cur_bottom_grad_pos, cur_top_grad * exp(-abs(Di - Dval)));
}
}
}
template <typename DType>
void depthconv_col2im(cudaStream_t stream, const DType *data_col,
const DType *data_depth, const int channels,
const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, DType *grad_im) {
int height_col =
(height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col =
(width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// int channel_per_depthconv_group = channels / depthconv_group;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
depthconv_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0,
stream>>>(
num_kernels, data_col, data_depth, channels, height, width, ksize_h,
ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
height_col, width_col, grad_im);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in depthconv_col2im: %s\n", cudaGetErrorString(err));
// TODO(BZ) panic
}
}
template void depthconv_col2im<float>(
cudaStream_t stream, const float *data_col, const float *data_depth,
const int channels, const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w, float *grad_im);
/*template void depthconv_col2im<double>(
cudaStream_t stream, const double *data_col, const double *data_depth,
const int channels, const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w, double *grad_im);*/
|
22,716 | /**
* @author NageshAC
* @email nagesh.ac.aralaguppe@fau.de
* @create date 2021-08-10 11:44:00
* @modify date 2021-08-10 11:44:00
* @desc Contains error diagnosis functions
*/
#pragma once
#include<iostream>
#include<cstdlib>
#include<cuda_runtime.h>
//************************************************************
// check last kernel launch for error
//************************************************************
static inline void cuda_check_last_kernel(
std::string const& err
){
/**
* @brief Checks the status of last kernel call.
* @param kernel_name
* @return exit with -1
*/
auto status = cudaGetLastError();
if(status != cudaSuccess){
std::cout << "Error: CUDA kernel launch: " << err
<<cudaGetErrorString(status) << std::endl;
exit(-1);
}
}
//************************************************************
// Check CUDA API calls for error
//************************************************************
static inline void cuda_status(
cudaError_t err, std::string message = "----",
std::string file = " \" this \" ", int line = -1
){
/**
* @brief Checks the status of API call.
* @param err error code from API call
* @param message message that needs to be included
* @param file In whish file the error has occured.
* @param line In which line the error has occured.
* @return exit with -1
*/
if(err != cudaSuccess){
std::cout << "Error: "<<file<<" in line " << line << std::endl
<< "Message: " << message << std::endl << " CUDA API call: "
<< cudaGetErrorString(err) << std::endl;
exit(-1);
}
} |
22,717 | #include <stdio.h>
#include <stdlib.h>
#include <curand_kernel.h>
#include <math.h>
#include <cuda.h>
int main (int arg, char* argv[]) {
int device;
cudaGetDevice(&device);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop,device);
printf("Multi Processor Count: %d", prop.multiProcessorCount);
}
|
22,718 | #include<cuda.h>
#include<stdio.h>
__global__ void VecAdd(float *A, float *B, float *C)
{
int i = threadIdx.x;
for(int j = 0; j < 1000; j++)
C[i] = A[i] + B[i];
}
__global__ void VecMul(float *A, float *B, float *C)
{
int i = threadIdx.x;
for(int j = 0; j < 1000; j++)
C[i] = A[i] * B[i];
}
int main()
{
int N = 1024 * 10;
dim3 threadPerBlock(256);
dim3 numBlocks(N/threadPerBlock.x);
cudaStream_t stream1, stream2;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
//cudaStreamCreateWithFlags(&stream1, cudaStreamNonBlocking);
//cudaStreamCreateWithFlags(&stream2, cudaStreamNonBlocking);
// Host memory.
float *hA, *hB, *hC, *hAMul, *hBMul, *hCMul;
#if 0
hA = (float*) malloc (sizeof(float) * N);
hB = (float*) malloc (sizeof(float) * N);
hC = (float*) malloc (sizeof(float) * N);
hAMul = (float*) malloc (sizeof(float) * N);
hBMul = (float*) malloc (sizeof(float) * N);
hCMul = (float*) malloc (sizeof(float) * N);
#endif
cudaMallocHost((void**)&hA, sizeof(float) * N);
cudaMallocHost((void**)&hB, sizeof(float) * N);
cudaMallocHost((void**)&hC, sizeof(float) * N);
cudaMallocHost((void**)&hAMul, sizeof(float) * N);
cudaMallocHost((void**)&hBMul, sizeof(float) * N);
cudaMallocHost((void**)&hCMul, sizeof(float) * N);
for(int i = 0; i < N; i++){
hA[i] = i;
hB[i] = i + 1;
hC[i] = 0.0f;
hAMul[i] = i % 10;
hBMul[i] = i % 10;
hCMul[i] = 0.0f;
}
// Device memory.
float *A, *B, *C, *AMul, *BMul, *CMul;
cudaMalloc((void**)&A, sizeof(float) * N);
cudaMalloc((void**)&B, sizeof(float) * N);
cudaMalloc((void**)&C, sizeof(float) * N);
cudaMalloc((void**)&AMul, sizeof(float) * N);
cudaMalloc((void**)&BMul, sizeof(float) * N);
cudaMalloc((void**)&CMul, sizeof(float) * N);
// Host->Device
cudaMemcpyAsync(A, hA, sizeof(float)*N, cudaMemcpyHostToDevice, stream1);
cudaMemcpyAsync(B, hB, sizeof(float)*N, cudaMemcpyHostToDevice, stream1);
//cudaMemcpyAsync(C, hC, sizeof(float)*N, cudaMemcpyHostToDevice, stream1);
VecAdd<<<numBlocks, threadPerBlock, 0, stream1>>>(A, B, C);
cudaMemcpyAsync(AMul, hAMul, sizeof(float)*N, cudaMemcpyHostToDevice, stream2);
cudaMemcpyAsync(BMul, hBMul, sizeof(float)*N, cudaMemcpyHostToDevice, stream2);
VecMul<<<numBlocks, threadPerBlock, 0, stream2>>>(AMul, BMul, CMul);
cudaMemcpyAsync(hC, C, sizeof(float)*N, cudaMemcpyDeviceToHost, stream1);
cudaMemcpyAsync(hCMul, CMul, sizeof(float)*N, cudaMemcpyDeviceToHost, stream2);
//cudaMemcpyAsync(BMul, hBMul, sizeof(float)*N, cudaMemcpyHostToDevice, stream2);
//cudaMemcpyAsync(CMul, hCMul, sizeof(float)*N, cudaMemcpyHostToDevice, stream2);
//cudaMemcpyAsync(hCMul, CMul, sizeof(float)*N, cudaMemcpyDeviceToDevice, stream2);
cudaStreamSynchronize(stream1);
cudaStreamSynchronize(stream2);
printf("hC = %f, hCMul = %f \n", hC[10], hCMul[12]);
return 0;
}
|
22,719 | #include "includes.h"
__global__ void tissueGPU4Kernel(int *d_tisspoints, float *d_dtt000, float *d_qtp000, float *d_xt, float *d_rt, int nnt, int step, float diff)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int itp = i/step;
int itp1 = i%step;
int jtp,ixyz,ix,iy,iz,nnt2=2*nnt,istep;
float r = 0.;
if(itp < nnt){
ix = d_tisspoints[itp];
iy = d_tisspoints[itp+nnt];
iz = d_tisspoints[itp+nnt2];
for(jtp=itp1; jtp<nnt; jtp+=step){
ixyz = abs(d_tisspoints[jtp]-ix) + abs(d_tisspoints[jtp+nnt]-iy) + abs(d_tisspoints[jtp+nnt2]-iz);
r -= d_dtt000[ixyz]*d_qtp000[jtp]*d_xt[jtp];
}
r /= diff;
r += d_xt[itp]; //diagonal of matrix has 1s
if(itp1 == 0) d_rt[itp] = r;
}
//The following is apparently needed to assure that d_pt000 is incremented in sequence from the needed threads
for(istep=1; istep<step; istep++){
__syncthreads();
if(itp1 == istep && itp < nnt) d_rt[itp] += r;
}
} |
22,720 | //==============================================================
// Copyright � 2019 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <cuda.h>
#include <stdio.h>
#include <assert.h>
#include <time.h>
static long long timediff(struct timespec &start, struct timespec &end)
{
return (end.tv_sec - start.tv_sec) * 1e9 + (end.tv_nsec - start.tv_nsec);
}
__global__ void VecAdd(float* A, float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
#define lap clock_gettime(CLOCK_MONOTONIC, &now); \
printf("%lld,", timediff(start, now)); \
start = now;
int main(int argc, char *argv[])
{
if(argc != 3) {
fprintf(stderr, "%s vector_size iteration\n", argv[0]);
exit(-1);
}
size_t vector_size = atoll(argv[1]);
size_t iteration = atoll(argv[2]);
struct timespec start;
struct timespec now;
float *d_A, *d_B, *d_C;
clock_gettime(CLOCK_MONOTONIC, &start);
cudaMalloc(&d_A, vector_size*sizeof(float));
cudaMalloc(&d_B, vector_size*sizeof(float));
cudaMalloc(&d_C, vector_size*sizeof(float));
lap;
for(size_t i = 0; i < iteration; i++) {
int threadsPerBlock = 256;
int blocksPerGrid = (vector_size + threadsPerBlock - 1) / threadsPerBlock;
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, vector_size);
//cudaError_t err = cudaGetLastError();
//if (err != cudaSuccess) {
// fprintf(stderr, "Error: %s\n", cudaGetErrorString(err));
// break;
//}
}
lap;
float *Result = (float*) malloc(sizeof(float) * vector_size);
cudaMemcpy(Result, d_C, vector_size*sizeof(float), cudaMemcpyDeviceToHost);
lap;
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(Result);
lap;
printf("%lu\n", vector_size);
return 0;
}
|
22,721 | /*
* Copyright 2016 Henry Lee
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
extern "C" __device__ int hash_quinary(unsigned char q[], int len, int k, int *dp)
{
int sum = 0;
int i;
for (i=0; i<len; i++)
{
sum += dp[q[i]*14*10 + (len-i-1)*10 + k];
k -= q[i];
if (k <= 0)
{
break;
}
}
return sum;
}
|
22,722 | #include <iostream>
#include <vector>
//#include <cuda.h>
#include <stdio.h>
using namespace std;
///////////////////////////////////////////////////////////////////////////////
void print(std::vector<float> &vec)
{
for (size_t i = 0; i < vec.size(); ++i) {
cerr << vec[i] << " ";
}
cerr << endl;
}
///////////////////////////////////////////////////////////////////////////////
__global__
void kernelHelloWorld()
{
printf("Hello world\n");
}
///////////////////////////////////////////////////////////////////////////////
__global__
void kernelSquare(float *vec)
{
for (size_t i = 0; i < 10; ++i) {
vec[i] = vec[i] * vec[i];
}
}
///////////////////////////////////////////////////////////////////////////////
__global__
void kernelParallelSquare(float *vec, int size)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
printf("gridDim.x=%i blockDim.x=%i blockIdx.x=%i threadIdx.x=%i i=%i \n", gridDim.x, blockDim.x, blockIdx.x, threadIdx.x, i);
vec[i] = vec[i] * vec[i];
}
}
///////////////////////////////////////////////////////////////////////////////
__global__
void kernelReduce(float *vec, int size)
{
for (int i = 1; i < size; ++i) {
vec[0] += vec[i];
}
}
///////////////////////////////////////////////////////////////////////////////
__global__
void kernelReduceAtomic(float *vec, int size)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size) {
atomicAdd(&vec[0], vec[i]);
}
}
///////////////////////////////////////////////////////////////////////////////
__global__
void kernelReduceParallel(float *vec, int size, int half)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < half) {
vec[i] += vec[i+half];
if ((i == half - 1) && (i + half + 2 == size)) {
vec[i] += vec[i+half+1];
}
}
}
///////////////////////////////////////////////////////////////////////////////
__global__
void kernelReduceParallel2(float *vec, int size)
{
int half = size / 2;
int i = threadIdx.x + blockDim.x * blockIdx.x;
while (half > 0) {
if (i < half) {
vec[i] += vec[i+half];
if ((i == half - 1) && (i + half + 2 == size)) {
vec[i] += vec[i+half+1];
}
}
size = half;
half = size / 2;
}
}
///////////////////////////////////////////////////////////////////////////////
int main()
{
cerr << "Starting" << endl;
int NUM = 10;
vector<float> h_vec(NUM);
for (size_t i = 0; i < NUM; ++i) {
h_vec[i] = i;
}
print(h_vec); cerr << endl;
float *d_array;
cudaMalloc(&d_array, sizeof(float) * NUM);
cudaMemcpy(d_array, h_vec.data(), sizeof(float) * NUM, cudaMemcpyHostToDevice);
//kernel1<<<1, 1000>>>();
//kernel2<<<1, 1>>>(d_array);
//kernel3<<<3, 4>>>(d_array, NUM);
//kernel4<<<1, 1>>>(d_array, NUM);
//kernel5<<<1, 10>>>(d_array, NUM);
//kernelReduceParallel<<<1, 5>>>(d_array, 10, 5);
//kernelReduceParallel<<<1, 2>>>(d_array, 5, 2);
//kernelReduceParallel<<<1, 1>>>(d_array, 2, 1);
/*
int size = 10;
int half = size / 2;
while (half > 0) {
kernelReduceParallel<<<1, half>>>(d_array, size, half);
size = half;
half = size / 2;
}
*/
kernelReduceParallel2<<<1, 5>>>(d_array, 10);
if ( cudaSuccess != cudaGetLastError()) {
cerr << "kernel didn't run" << endl;
abort();
}
int ret = cudaDeviceSynchronize();
if (ret) {
cerr << "kernel ran but produced an error" << endl;
abort();
}
cudaMemcpy(h_vec.data(), d_array, sizeof(float) * NUM, cudaMemcpyDeviceToHost);
print(h_vec); cerr << endl;
cerr << "Finished" << endl;
}
|
22,723 | #include <stdio.h>
#include <cuda_runtime.h>
//will compute local histogram
//assuming passed pointers are adjusted for the thread
//bitpos is the lsb from which to consider numbits towards msb
__device__ void computeLocalHisto(int *localHisto, int *arrElem, int n,
int numBits, int bitpos) {
int i;
int numBuckets = 1 << numBits;
int mask = (1 << numBits) - 1;
int key;
for (i = 0; i < n; i++) {
key = (((int)arrElem[i]) >> bitpos) & mask;
localHisto[key]++;
}
}
__device__ void dispArr(int *arr, int n) {
int i;
//threadId with in a block, DMat doc to start with
int thId = threadIdx.x;
if (thId == 0) {
printf("\n");
for (i = 0; i < n; i++) {
printf(" %d ", arr[i]);
}
printf("\n");
}
}
__device__ void computeAtomicHisto(int *aggHisto, int *arrElem, int numElem,
int numBits, int bitpos) {
int i, j;
int numBuckets = 1 << numBits;
int mask = (1 << numBits) - 1;
int key;
//thread id within a block
int threadId = threadIdx.x;
//number of threads in block
int nThreads = blockDim.x;
if (threadId == 0) {
printf("\ncompute histo:");
dispArr(arrElem, numElem);
}
for (i = threadId; i < numElem; i+=nThreads) {
printf("\narrElem[%d]:%d key:%d", i, arrElem[i], key);
key = ( ((int)arrElem[i]) >> bitpos) & mask;
printf("\narrElem[%d]:%d key:%d", i, arrElem[i], key);
atomicAdd(&(aggHisto[key]), 1);
}
}
__device__ void writeSortedVals(int *aggHisto, int *fromArr, int *toArr,
int numBits, int bitpos, int n) {
int i, key;
int mask = (1 << numBits) - 1;
printf("\naggHisto: ");
dispArr(aggHisto, (1<<numBits));
for (i = 0; i < n; i++) {
key = (((int)fromArr[i]) >> bitpos) & mask;
printf("\nkey = %d, aggHisto[%d] = %d", key, key, aggHisto[key]);
printf("\ntoArr[%d] = fromArr[%d] = %d", aggHisto[key], i, fromArr[i]);
toArr[aggHisto[key]++] = fromArr[i];
}
}
__device__ void zeroedInt(int *arr, int count) {
int i;
//thread id within a block
int threadId = threadIdx.x;
//number of threads in block
int nThreads = blockDim.x;
for (i = threadId; i < count; i+=nThreads) {
arr[i] = 0;
}
}
//scan array arr of size n=nThreads, power of 2
__device__ void preSubScan(int *arr, int n, int prev) {
int i, d, ai, bi, offset, temp;
//threadId with in a block, DMat doc to start with
int thId = threadIdx.x;
//number of threads in blocks
int nThreads = blockDim.x;
d = 0;
offset = 1;
//build sum in place up the tree
for (d = n>>1; d > 0; d >>=1) {
__syncthreads();
if (thId < d) {
ai = offset*(2*thId+1) - 1;
bi = offset*(2*thId+2) - 1;
arr[bi] += arr[ai];
}
offset*=2;
}
//clear last element
if (thId == 0) {
arr[n-1] = 0;
}
//traverse down tree & build scan
for (int d = 1; d < n; d *=2) {
offset = offset >> 1;
__syncthreads();
if (thId < d) {
ai = offset*(2*thId + 1) - 1;
bi = offset*(2*thId + 2) - 1;
temp = arr[ai];
arr[ai] = arr[bi];
arr[bi] += temp;
}
}
for (i = thId; i < n; i+=nThreads) {
arr[i] += prev;
}
__syncthreads();
}
__device__ void d_dispFArr(int *arr, int n) {
int i;
//threadId with in a block, DMat doc to start with
int thId = threadIdx.x;
if (thId == 0) {
printf("\n");
for (i = 0; i < n; i++) {
printf(" %d ", arr[i]);
}
printf("\n");
}
}
//works efficiently for power of 2
__device__ void scan(int *arr, int n) {
int i, j, prev, next, temp;
//threadId with in a block, DMat doc to start with
int thId = threadIdx.x;
//number of threads in blocks
int nThreads = blockDim.x;
//divide the simpred into nThreads blocks,
//scan each block in parallel, with next iteration using results from prev blocks
prev = 0;
next = 0;
for (i = 0; i < n; i += nThreads) {
dispArr(arr, n);
next = arr[i+nThreads-1];
if (n - i >= nThreads) {
preSubScan(arr + i, nThreads, (i>0?arr[i-1]:0) + prev);
} else {
//not power of 2 perform serial scan for others
//this will be last iteration of loop
if (thId == 0) {
for (j = i; j < n; j++) {
temp = prev + arr[j-1];
prev = arr[j];
arr[j] = temp;
}
}
}//end else
prev = next;
}//end for
__syncthreads();
}
//numbits means bits at a time
__global__ void radixSort(int *d_InArr, int n, int numBits) {
int i, j, elemPerThread;
int localHistoElemCount;
//get current block number
int blockId = blockIdx.x;
//thread id within a block
int threadId = threadIdx.x;
//number of threads in block
int nThreads = blockDim.x;
//global thread id
int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ int s[];
//shared mem space for aggregated histogram
int *aggHisto = s;
//shared mem space to copy array to be sorted
int *fromArr = (int*) &aggHisto[1<<numBits];
int *toArr = (int *) &fromArr[n];
int *tempSwap;
//bucket size
int bucketSize = 1 << numBits;
//initialize arrays in shared mem
for (i = threadId; i < n; i+=nThreads) {
fromArr[i] = d_InArr[i];
toArr[i] = 0;
}
if (threadId == 0) {
printf("\n fromArray: ");
d_dispFArr(fromArr, n);
}
//for each numbits chunk do following
for (i = 0; i < sizeof(int)*8; i+=numBits) {
//reset histogram
zeroedInt(aggHisto, bucketSize);
if (threadId == 0) {
printf("\n b4 histo fromArr: ");
d_dispFArr(fromArr, n);
}
//aggregate in histogram in shared mem
computeAtomicHisto(aggHisto, fromArr, n,
numBits, i);
if (threadId == 0) {
printf("\naggHisto, bitpos:%d:", i);
dispArr(aggHisto, bucketSize);
}
//perform scan on aggHisto (assuming power of 2)
scan(aggHisto, bucketSize);
__syncthreads();
if (threadId == 0) {
//copy values to correct output by a single thread
writeSortedVals(aggHisto, fromArr, toArr,
numBits, i, n);
}
__syncthreads();
if (threadId == 0) {
printf("\n sorted: ");
d_dispFArr(toArr, n);
}
//toArr contains the sorted arr, for the next iteration point fromArr to this location
tempSwap = toArr;
toArr = fromArr;
fromArr = tempSwap;
}
//at this point fromAr will contain sorted arr in mem
//write this out to device in parallel
for (i = threadId; i < n; i+=nThreads) {
d_InArr[i] = fromArr[i];
}
}
void dispFArr(int *arr, int n) {
int i;
for (i = 0; i < n; i++) {
printf(" %d ", arr[i]);
}
}
int main(int argc, char *argv[]) {
int h_fArr[] = {1, 0, 5, 8, 0, 7, 8, 13, 0, 25, 1, 0, 2};
int h_n = 13;
//int h_fArr[] = {4, 3, 7, 1, 0};
//int h_n = 5;
int *d_fArr;
int *h_fSortedArr;
int i;
int numBits = 2;
printf("\n");
dispFArr(h_fArr, h_n);
//allocate mem on device
cudaMalloc((void **) &d_fArr, sizeof(int)*h_n);
//copy to device
cudaMemcpy((void *) d_fArr, (void *) h_fArr, sizeof(int)*h_n, cudaMemcpyHostToDevice);
//sort with 2 bits at a time
radixSort<<<1, 4, (sizeof(int)*(1<<numBits) + sizeof(int)*h_n*2)>>>(d_fArr, h_n, numBits);
//copy sorted back to host
cudaMemcpy((void *)h_fArr , (void *) d_fArr, sizeof(int)*h_n, cudaMemcpyDeviceToHost);
printf("\n");
dispFArr(h_fArr, h_n);
}
|
22,724 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <float.h>
#include <cuda.h>
typedef struct
{
float posx;
float posy;
float range;
float temp;
}
heatsrc_t;
typedef struct
{
unsigned maxiter; // maximum number of iterations
unsigned resolution; // spatial resolution
int algorithm; // 0=>Jacobi, 1=>Gauss
unsigned visres; // visualization resolution
float *u, *uhelp;
float *uvis;
unsigned numsrcs; // number of heat sources
heatsrc_t *heatsrcs;
}
algoparam_t;
// function declarations
int read_input( FILE *infile, algoparam_t *param );
void print_params( algoparam_t *param );
int initialize( algoparam_t *param );
int finalize( algoparam_t *param );
void write_image( FILE * f, float *u,
unsigned sizex, unsigned sizey );
int coarsen(float *uold, unsigned oldx, unsigned oldy ,
float *unew, unsigned newx, unsigned newy );
__global__ void gpu_Heat (float *h, float *g, float *d,int N);
__global__ void gpu_HeatReduction(float *res, float *res1);
#define NB 8
#define min(a,b) ( ((a) < (b)) ? (a) : (b) )
float cpu_residual (float *u, float *utmp, unsigned sizex, unsigned sizey)
{
float diff, sum=0.0;
for (int i=1; i<sizex-1; i++)
for (int j=1; j<sizey-1; j++) {
diff = utmp[i*sizey+j] - u[i*sizey + j];
sum += diff * diff;
}
return(sum);
}
float cpu_jacobi (float *u, float *utmp, unsigned sizex, unsigned sizey)
{
float diff, sum=0.0;
int nbx, bx, nby, by;
nbx = NB;
bx = sizex/nbx;
nby = NB;
by = sizey/nby;
for (int ii=0; ii<nbx; ii++)
for (int jj=0; jj<nby; jj++)
for (int i=1+ii*bx; i<=min((ii+1)*bx, sizex-2); i++)
for (int j=1+jj*by; j<=min((jj+1)*by, sizey-2); j++) {
utmp[i*sizey+j]= 0.25 * (u[ i*sizey + (j-1) ]+ // left
u[ i*sizey + (j+1) ]+ // right
u[ (i-1)*sizey + j ]+ // top
u[ (i+1)*sizey + j ]); // bottom
diff = utmp[i*sizey+j] - u[i*sizey + j];
sum += diff * diff;
}
return(sum);
}
void usage( char *s )
{
fprintf(stderr, "Usage: %s <input file> -t threads -b blocks\n", s);
fprintf(stderr, " -t number of threads per block in each dimension (e.g. 16)\n");
}
int main( int argc, char *argv[] ) {
unsigned iter;
FILE *infile, *resfile;
char *resfilename;
// algorithmic parameters
algoparam_t param;
int np;
// check arguments
if( argc < 4 ) {
usage( argv[0] );
return 1;
}
// check input file
if( !(infile=fopen(argv[1], "r")) ) {
fprintf(stderr,
"\nError: Cannot open \"%s\" for reading.\n\n", argv[1]);
usage(argv[0]);
return 1;
}
// check result file
resfilename="heat.ppm";
if( !(resfile=fopen(resfilename, "w")) ) {
fprintf(stderr,
"\nError: Cannot open \"%s\" for writing.\n\n",
resfilename);
usage(argv[0]);
return 1;
}
// check input
if( !read_input(infile, ¶m) )
{
fprintf(stderr, "\nError: Error parsing input file.\n\n");
usage(argv[0]);
return 1;
}
// full size (param.resolution are only the inner points)
np = param.resolution + 2;
int Grid_Dim, Block_Dim; // Grid and Block structure values
if (strcmp(argv[2], "-t")==0) {
Block_Dim = atoi(argv[3]);
Grid_Dim = np/Block_Dim + ((np%Block_Dim)!=0);;
if ((Block_Dim*Block_Dim) > 512) {
printf("Error -- too many threads in block, try again\n");
return 1;
}
}
else {
fprintf(stderr, "Usage: %s <input file> -t threads -b blocks\n", argv[0]);
fprintf(stderr, " -t number of threads per block in each dimension (e.g. 16)\n");
return 0;
}
fprintf(stderr, "\nSolving Heat equation on the CPU and the GPU\n");
fprintf(stderr, "--------------------------------------------\n");
print_params(¶m);
fprintf(stdout, "\nExecution on CPU (sequential)\n-----------------------------\n");
if( !initialize(¶m) )
{
fprintf(stderr, "Error in Solver initialization.\n\n");
return 1;
}
// starting time
float elapsed_time_ms; // which is applicable for asynchronous code also
cudaEvent_t start, stop; // using cuda events to measure time
cudaEventCreate( &start ); // instrument code to measure start time
cudaEventCreate( &stop );
cudaEventRecord( start, 0 );
cudaEventSynchronize( start );
iter = 0;
float residual;
while(1) {
residual = cpu_jacobi(param.u, param.uhelp, np, np);
float * tmp = param.u;
param.u = param.uhelp;
param.uhelp = tmp;
//residual = cpu_residual (param.u, param.uhelp, np, np);
//printf("residual: %.6f \n", residual);
iter++;
// solution good enough ?
if (residual < 0.00005) break;
// max. iteration reached ? (no limit with maxiter=0)
if (iter>=param.maxiter) break;
}
cudaEventRecord( stop, 0 ); // instrument code to measue end time
cudaEventSynchronize( stop );
cudaEventElapsedTime( &elapsed_time_ms, start, stop );
// Flop count after iter iterations
float flop = iter * 11.0 * param.resolution * param.resolution;
fprintf(stdout, "Time on CPU in ms.= %f ", elapsed_time_ms);
fprintf(stdout, "(%3.3f GFlop => %6.2f MFlop/s)\n",
flop/1000000000.0,
flop/elapsed_time_ms/1000);
fprintf(stdout, "Convergence to residual=%f: %d iterations\n", residual, iter);
finalize( ¶m );
fprintf(stdout, "\nExecution on GPU\n----------------\n");
fprintf(stderr, "Number of threads per block in each dimension = %d\n", Block_Dim);
fprintf(stderr, "Number of blocks per grid in each dimension = %d\n", Grid_Dim);
if( !initialize(¶m) )
{
fprintf(stderr, "Error in Solver initialization.\n\n");
return 1;
}
dim3 Grid(Grid_Dim, Grid_Dim);
dim3 Block(Block_Dim, Block_Dim);
// starting time
cudaEventRecord( start, 0 );
cudaEventSynchronize( start );
float *dev_u, *dev_uhelp, *dev_res, *res, *result, *dev_result;
res = (float*)calloc(sizeof(float), np*np);
result = (float*)calloc(sizeof(float), np);
// TODO: Allocation on GPU for matrices u and uhelp
cudaMalloc( &dev_u, sizeof(float)*(np*np));
cudaMalloc( &dev_uhelp, sizeof(float)*(np*np));
cudaMalloc( &dev_res, sizeof(float)*(np*np));
cudaMalloc( &dev_result, sizeof(float)*np);
// TODO: Copy initial values in u and uhelp from host to GPU
cudaMemcpy( dev_u,param.u, sizeof(float)*(np*np), cudaMemcpyHostToDevice);
cudaMemcpy( dev_uhelp, param.uhelp, sizeof(float)*(np*np), cudaMemcpyHostToDevice);
cudaMemcpy( dev_res, res, sizeof(float)*(np*np), cudaMemcpyHostToDevice);
cudaMemcpy( dev_result, result, sizeof(float)*(np), cudaMemcpyHostToDevice);
iter = 0;
while(1) {
gpu_Heat<<<Grid,Block>>>(dev_u, dev_uhelp, dev_res, np);
cudaThreadSynchronize(); // wait for all threads to complete
// TODO: residual is computed on host, we need to get from GPU values computed in u and uhelp
cudaMemcpy( res, dev_res, sizeof(float)*(np*np), cudaMemcpyDeviceToHost);
//for(int i=0;i<np;i++) { printf("%.6f ", res[i*2]); }
gpu_HeatReduction<<<np,np,np*sizeof(float)>>>(dev_res, dev_result);
cudaThreadSynchronize();
cudaMemcpy( result, dev_result, sizeof(float)*np, cudaMemcpyDeviceToHost);
//cudaMemcpy( param.uhelp, dev_uhelp, sizeof(float)*(np*np), cudaMemcpyDeviceToHost);
//residual = cpu_residual(param.u, param.uhelp, np, np);
float * tmp = dev_u;
dev_u = dev_uhelp;
dev_uhelp = tmp;
iter++;
float sum =0.0;
for(int i=0;i<np;i++) {
// printf("Result[%d]=%.6f\n",i,result[i]);
sum += result[i];
}
residual = sum;
// solution good enough ?
if (residual < 0.00005) break;
// max. iteration reached ? (no limit with maxiter=0)
if (iter>=param.maxiter) break;
}
// TODO: get result matrix from GPU
cudaMemcpy( param.u, dev_u, sizeof(float)*(np*np), cudaMemcpyDeviceToHost);
// TODO: free memory used in GPU
cudaFree( dev_u ); cudaFree( dev_uhelp);
cudaEventRecord( stop, 0 ); // instrument code to measue end time
cudaEventSynchronize( stop );
cudaEventElapsedTime( &elapsed_time_ms, start, stop );
fprintf(stdout, "\nTime on GPU in ms. = %f ", elapsed_time_ms);
fprintf(stdout, "(%3.3f GFlop => %6.2f MFlop/s)\n",
flop/1000000000.0,
flop/elapsed_time_ms/1000);
fprintf(stdout, "Convergence to residual=%f: %d iterations\n", residual, iter);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// for plot...
coarsen( param.u, np, np,
param.uvis, param.visres+2, param.visres+2 );
write_image( resfile, param.uvis,
param.visres+2,
param.visres+2 );
finalize( ¶m );
return 0;
}
/*
* Initialize the iterative solver
* - allocate memory for matrices
* - set boundary conditions according to configuration
*/
int initialize( algoparam_t *param )
{
int i, j;
float dist;
// total number of points (including border)
const int np = param->resolution + 2;
//
// allocate memory
//
(param->u) = (float*)calloc( sizeof(float),np*np );
(param->uhelp) = (float*)calloc( sizeof(float),np*np );
(param->uvis) = (float*)calloc( sizeof(float),
(param->visres+2) *
(param->visres+2) );
if( !(param->u) || !(param->uhelp) || !(param->uvis) )
{
fprintf(stderr, "Error: Cannot allocate memory\n");
return 0;
}
for( i=0; i<param->numsrcs; i++ )
{
/* top row */
for( j=0; j<np; j++ )
{
dist = sqrt( pow((float)j/(float)(np-1) -
param->heatsrcs[i].posx, 2)+
pow(param->heatsrcs[i].posy, 2));
if( dist <= param->heatsrcs[i].range )
{
(param->u)[j] +=
(param->heatsrcs[i].range-dist) /
param->heatsrcs[i].range *
param->heatsrcs[i].temp;
}
}
/* bottom row */
for( j=0; j<np; j++ )
{
dist = sqrt( pow((float)j/(float)(np-1) -
param->heatsrcs[i].posx, 2)+
pow(1-param->heatsrcs[i].posy, 2));
if( dist <= param->heatsrcs[i].range )
{
(param->u)[(np-1)*np+j]+=
(param->heatsrcs[i].range-dist) /
param->heatsrcs[i].range *
param->heatsrcs[i].temp;
}
}
/* leftmost column */
for( j=1; j<np-1; j++ )
{
dist = sqrt( pow(param->heatsrcs[i].posx, 2)+
pow((float)j/(float)(np-1) -
param->heatsrcs[i].posy, 2));
if( dist <= param->heatsrcs[i].range )
{
(param->u)[ j*np ]+=
(param->heatsrcs[i].range-dist) /
param->heatsrcs[i].range *
param->heatsrcs[i].temp;
}
}
/* rightmost column */
for( j=1; j<np-1; j++ )
{
dist = sqrt( pow(1-param->heatsrcs[i].posx, 2)+
pow((float)j/(float)(np-1) -
param->heatsrcs[i].posy, 2));
if( dist <= param->heatsrcs[i].range )
{
(param->u)[ j*np+(np-1) ]+=
(param->heatsrcs[i].range-dist) /
param->heatsrcs[i].range *
param->heatsrcs[i].temp;
}
}
}
// Copy u into uhelp
float *putmp, *pu;
pu = param->u;
putmp = param->uhelp;
for( j=0; j<np; j++ )
for( i=0; i<np; i++ )
*putmp++ = *pu++;
return 1;
}
/*
* free used memory
*/
int finalize( algoparam_t *param )
{
if( param->u ) {
free(param->u);
param->u = 0;
}
if( param->uhelp ) {
free(param->uhelp);
param->uhelp = 0;
}
if( param->uvis ) {
free(param->uvis);
param->uvis = 0;
}
return 1;
}
/*
* write the given temperature u matrix to rgb values
* and write the resulting image to file f
*/
void write_image( FILE * f, float *u,
unsigned sizex, unsigned sizey )
{
// RGB table
unsigned char r[1024], g[1024], b[1024];
int i, j, k;
float min, max;
j=1023;
// prepare RGB table
for( i=0; i<256; i++ )
{
r[j]=255; g[j]=i; b[j]=0;
j--;
}
for( i=0; i<256; i++ )
{
r[j]=255-i; g[j]=255; b[j]=0;
j--;
}
for( i=0; i<256; i++ )
{
r[j]=0; g[j]=255; b[j]=i;
j--;
}
for( i=0; i<256; i++ )
{
r[j]=0; g[j]=255-i; b[j]=255;
j--;
}
min=DBL_MAX;
max=-DBL_MAX;
// find minimum and maximum
for( i=0; i<sizey; i++ )
{
for( j=0; j<sizex; j++ )
{
if( u[i*sizex+j]>max )
max=u[i*sizex+j];
if( u[i*sizex+j]<min )
min=u[i*sizex+j];
}
}
fprintf(f, "P3\n");
fprintf(f, "%u %u\n", sizex, sizey);
fprintf(f, "%u\n", 255);
for( i=0; i<sizey; i++ )
{
for( j=0; j<sizex; j++ )
{
k=(int)(1023.0*(u[i*sizex+j]-min)/(max-min));
fprintf(f, "%d %d %d ", r[k], g[k], b[k]);
}
fprintf(f, "\n");
}
}
int coarsen( float *uold, unsigned oldx, unsigned oldy ,
float *unew, unsigned newx, unsigned newy )
{
int i, j;
int stepx;
int stepy;
int stopx = newx;
int stopy = newy;
if (oldx>newx)
stepx=oldx/newx;
else {
stepx=1;
stopx=oldx;
}
if (oldy>newy)
stepy=oldy/newy;
else {
stepy=1;
stopy=oldy;
}
// NOTE: this only takes the top-left corner,
// and doesnt' do any real coarsening
for( i=0; i<stopy-1; i++ )
{
for( j=0; j<stopx-1; j++ )
{
unew[i*newx+j]=uold[i*oldx*stepy+j*stepx];
}
}
return 1;
}
#define BUFSIZE 100
int read_input( FILE *infile, algoparam_t *param )
{
int i, n;
char buf[BUFSIZE];
fgets(buf, BUFSIZE, infile);
n = sscanf( buf, "%u", &(param->maxiter) );
if( n!=1)
return 0;
fgets(buf, BUFSIZE, infile);
n = sscanf( buf, "%u", &(param->resolution) );
if( n!=1 )
return 0;
param->visres = param->resolution;
fgets(buf, BUFSIZE, infile);
n = sscanf(buf, "%u", &(param->numsrcs) );
if( n!=1 )
return 0;
(param->heatsrcs) =
(heatsrc_t*) malloc( sizeof(heatsrc_t) * (param->numsrcs) );
for( i=0; i<param->numsrcs; i++ )
{
fgets(buf, BUFSIZE, infile);
n = sscanf( buf, "%f %f %f %f",
&(param->heatsrcs[i].posx),
&(param->heatsrcs[i].posy),
&(param->heatsrcs[i].range),
&(param->heatsrcs[i].temp) );
if( n!=4 )
return 0;
}
return 1;
}
void print_params( algoparam_t *param )
{
int i;
fprintf(stdout, "Iterations : %u\n", param->maxiter);
fprintf(stdout, "Resolution : %u\n", param->resolution);
fprintf(stdout, "Num. Heat sources : %u\n", param->numsrcs);
for( i=0; i<param->numsrcs; i++ )
{
fprintf(stdout, " %2d: (%2.2f, %2.2f) %2.2f %2.2f \n",
i+1,
param->heatsrcs[i].posx,
param->heatsrcs[i].posy,
param->heatsrcs[i].range,
param->heatsrcs[i].temp );
}
}
|
22,725 | #include <cuda.h>
//#include "cuda_runtime.h"
#include <cuda_runtime_api.h>
#include "device_launch_parameters.h"
#include <stdio.h>
#include <assert.h>
#define N 16
__device__ int index(int col, int row, int ord){
return (row *ord)+col;
}
__global__ void Transpose(int *c, const int *a){
int col = (blockDim.x * blockIdx.x) + threadIdx.x;
int row = (blockDim.y * blockIdx.y) + threadIdx.y;
c[index(row,col,4)] = a[index(col, row, 4)] ;
}
int main()
{
const int arraySize = 16;
const int a[arraySize] = { 1, 2, 3, 4, 5 ,6,7,8,9,10,11,12,13,14,15,16};
int c[arraySize] = { 0 };
int *dev_a = 0;
int *dev_c = 0;
// Allocate GPU buffers for three vectors (one input, one output) .
cudaMalloc((void**)&dev_c, arraySize * sizeof(int));
cudaMalloc((void**)&dev_a, arraySize * sizeof(int));
// Copy input vectors from host memory to GPU buffers.
cudaMemcpy(dev_a, a, arraySize * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, c, arraySize * sizeof(int), cudaMemcpyHostToDevice);
// Launch a kernel on the GPU with one thread for each element.
dim3 dimgrid(2, 2);
dim3 dimblock(2, 2);
Transpose<<<dimgrid, dimblock>>>(dev_c, dev_a);
//ESBMC_verify_kernel_with_two_args(Transpose,dimgrid,dimblock,dev_c,dev_a);
// Copy output vector from GPU buffer to host memory.
cudaMemcpy(c, dev_c, arraySize * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_c);
cudaFree(dev_a);
for (int i = 0; i < arraySize; i++){
printf("%d ",c[i]);
if(i<3)
assert(c[i+1]==c[i]+4);
}
return 0;
}
|
22,726 |
#include <stdio.h>
#include <cuda.h>
__global__
void MyKernel()
{
printf("ThreadId(x,y,z)=(%u,%u,%u)blockId(x,y,z)=(%u,%u,%u)\n",
threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z);
return;
}
int main()
{
MyKernel<<<2,2>>>();
printf("\n\n****Kernel (2x2)=4 threads launched****\n\n");
cudaDeviceSynchronize();
printf("\n****Kernel finished****\n\n\n\n");
dim3 numBlks(2,2,2);
dim3 threadsPerBlk(2,2,2);
MyKernel<<<numBlks,threadsPerBlk>>>();
printf("\n\n****Kernel (blocks(2x2x2), threads(2,2,2))=64 threads launched****\n\n");
cudaDeviceSynchronize();
printf("\n****Kernel finished****\n\n");
return 0;
}
|
22,727 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
__global__ void
hello_kernel (char *odata, int num)
{
char hello_str[12] = "Hello CUDA!";
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num)
odata[idx] = hello_str[idx];
}
int
main (void)
{
char *h_data, *d_data;
const int strlen = 12;
size_t strsize = strlen * sizeof (char);
h_data = (char *) malloc (strsize);
memset (h_data, 0, strlen);
cudaMalloc ((void **) &d_data, strsize);
//cudaMemcpy (d_data, h_data, strsize, cudaMemcpyHostToDevice);
int blocksize = 8;
int nblock = strlen / blocksize + (strlen % blocksize == 0 ? 0 : 1);
hello_kernel <<< nblock, blocksize >>> (d_data, strlen);
cudaMemcpy (h_data, d_data, sizeof (char) * strlen, cudaMemcpyDeviceToHost);
printf ("string:%s\n", h_data);
free (h_data);
cudaFree (d_data);
}
|
22,728 | #include "includes.h"
#define N 64
/*
* This CPU function already works, and will run to create a solution matrix
* against which to verify your work building out the matrixMulGPU kernel.
*/
__global__ void matrixMulGPU( int * a, int * b, int * c )
{
/*
* Build out this kernel.
*/
int val = 0;
int row = threadIdx.x + blockIdx.x * blockDim.x;
int col = threadIdx.y + blockIdx.y * blockDim.y;
if (row < N && col < N)
{
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
} |
22,729 | #include "includes.h"
__global__ void matmul_partition(const float *a, const float *b, float *c, int n){
const int TILE_WIDTH = 8;
__shared__ float na[TILE_WIDTH][TILE_WIDTH];
__shared__ float nb[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x, tx = threadIdx.x;
int by = blockIdx.y, ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
float sum = 0;
//每个线程都会执行整个函数,因此每次都是不一样的(ty, tx)位置
for(int m = 0; m < n / TILE_WIDTH; m++){
na[ty][tx] = a[row * n + m * TILE_WIDTH + tx];
nb[ty][tx] = b[(ty + m * TILE_WIDTH) * n + col];
__syncthreads();
//整个tile的值都全了才能继续算
#pragma unroll TILE_WIDTH
for(int k = 0; k < TILE_WIDTH; k++){
sum += na[ty][k] * nb[k][tx];
}
__syncthreads();
//算完这一个tile才能再往里写
}
c[row * n + col] = sum;
} |
22,730 | #include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<time.h>
#define DCTSIZE 8
#define CENTERJSAMPLE 128
#define PASS1_BITS 2
#define CONST_BITS 13
#define ONE ((INT32) 1)
#define FIX_0_298631336 ((INT32) 2446) /* FIX(0.298631336) */
#define FIX_0_390180644 ((INT32) 3196) /* FIX(0.390180644) */
#define FIX_0_541196100 ((INT32) 4433) /* FIX(0.541196100) */
#define FIX_0_765366865 ((INT32) 6270) /* FIX(0.765366865) */
#define FIX_0_899976223 ((INT32) 7373) /* FIX(0.899976223) */
#define FIX_1_175875602 ((INT32) 9633) /* FIX(1.175875602) */
#define FIX_1_501321110 ((INT32) 12299) /* FIX(1.501321110) */
#define FIX_1_847759065 ((INT32) 15137) /* FIX(1.847759065) */
#define FIX_1_961570560 ((INT32) 16069) /* FIX(1.961570560) */
#define FIX_2_053119869 ((INT32) 16819) /* FIX(2.053119869) */
#define FIX_2_562915447 ((INT32) 20995) /* FIX(2.562915447) */
#define FIX_3_072711026 ((INT32) 25172) /* FIX(3.072711026) */
#define RIGHT_SHIFT(x,shft) ((x) >> (shft))
#define GETJSAMPLE(value) ((int) (value))
#define MULTIPLY16C16(var,const) ((var) * (const))
#define MULTIPLY(var,const) MULTIPLY16C16(var,const)
typedef int DCTELEM;
typedef long INT32;
// #define TOTAL_FUNC_CALL 49761
#define TOTAL_FUNC_CALL 4969077
char *str_islow = "islow function";
int *elemd, *dataptrd;
int numOfThreads = 4;
int numOfBlocks = 2;
int batchSize = DCTSIZE*DCTSIZE*sizeof(int);
void readFile(int *elem){
FILE *fp;
char *line;
size_t len = 0;
int ctr = 0, iter = 0;
int *elemptr;
// fp = fopen("./data_small.txt", "r");
fp = fopen("./data_large.txt", "r");
printf("start reading file...\n");
elemptr = elem;
while(getline(&line, &len, fp) != -1){
if(strstr(line, str_islow) != NULL){
iter++;
if(iter % 1000 == 0)
printf("%s", line);
ctr = DCTSIZE;
}
else{
if(ctr != 0){
ctr--;
sscanf(line, "elemptr[0]=%d,elemptr[1]=%d,elemptr[2]=%d,elemptr[3]=%d,elemptr[4]=%d,elemptr[5]=%d,elemptr[6]=%d,elemptr[7]=%d\n",
&elemptr[0], &elemptr[1], &elemptr[2], &elemptr[3], &elemptr[4], &elemptr[5], &elemptr[6], &elemptr[7]);
elemptr += DCTSIZE; // jump 8 elements
}
else{
continue;
}
}
}
printf("done reading file\n");
}
void printFile(int *elemptr, DCTELEM *dataptr){
int *e_p, *d_p;
for(int i = 0 ; i<TOTAL_FUNC_CALL ; i++){
fprintf(stderr, "%dth islow function\n", i+1);
int ctr;
e_p = elemptr;
for(ctr = 0 ; ctr<DCTSIZE ; ctr++){
fprintf(stderr, "elemptr[0]=%d,", (int)e_p[0]);
fprintf(stderr, "elemptr[1]=%d,", (int)e_p[1]);
fprintf(stderr, "elemptr[2]=%d,", (int)e_p[2]);
fprintf(stderr, "elemptr[3]=%d,", (int)e_p[3]);
fprintf(stderr, "elemptr[4]=%d,", (int)e_p[4]);
fprintf(stderr, "elemptr[5]=%d,", (int)e_p[5]);
fprintf(stderr, "elemptr[6]=%d,", (int)e_p[6]);
fprintf(stderr, "elemptr[7]=%d\n", (int)e_p[7]);
e_p += DCTSIZE;
}
d_p = dataptr;
for(ctr = 0 ; ctr<DCTSIZE ; ctr++){
fprintf(stderr, "dataptr[0]=%d,", d_p[DCTSIZE*0]);
fprintf(stderr, "dataptr[1]=%d,", d_p[DCTSIZE*1]);
fprintf(stderr, "dataptr[2]=%d,", d_p[DCTSIZE*2]);
fprintf(stderr, "dataptr[3]=%d,", d_p[DCTSIZE*3]);
fprintf(stderr, "dataptr[4]=%d,", d_p[DCTSIZE*4]);
fprintf(stderr, "dataptr[5]=%d,", d_p[DCTSIZE*5]);
fprintf(stderr, "dataptr[6]=%d,", d_p[DCTSIZE*6]);
fprintf(stderr, "dataptr[7]=%d\n", d_p[DCTSIZE*7]);
d_p += 1;
}
elemptr += DCTSIZE*DCTSIZE;
dataptr += DCTSIZE*DCTSIZE;
}
}
__global__ void islow_cuda_1(int *elemptrd, int *dataptrd){
INT32 tmp0, tmp1, tmp2, tmp3;
INT32 tmp10, tmp11, tmp12, tmp13;
INT32 z1;
int ID = blockIdx.x * blockDim.x + threadIdx.x;
elemptrd += ID * DCTSIZE;
dataptrd += ID * DCTSIZE;
tmp0 = GETJSAMPLE(elemptrd[0]) + GETJSAMPLE(elemptrd[7]);
tmp1 = GETJSAMPLE(elemptrd[1]) + GETJSAMPLE(elemptrd[6]);
tmp2 = GETJSAMPLE(elemptrd[2]) + GETJSAMPLE(elemptrd[5]);
tmp3 = GETJSAMPLE(elemptrd[3]) + GETJSAMPLE(elemptrd[4]);
tmp10 = tmp0 + tmp3;
tmp12 = tmp0 - tmp3;
tmp11 = tmp1 + tmp2;
tmp13 = tmp1 - tmp2;
tmp0 = GETJSAMPLE(elemptrd[0]) - GETJSAMPLE(elemptrd[7]);
tmp1 = GETJSAMPLE(elemptrd[1]) - GETJSAMPLE(elemptrd[6]);
tmp2 = GETJSAMPLE(elemptrd[2]) - GETJSAMPLE(elemptrd[5]);
tmp3 = GETJSAMPLE(elemptrd[3]) - GETJSAMPLE(elemptrd[4]);
dataptrd[0] = (DCTELEM) ((tmp10 + tmp11 - 8 * CENTERJSAMPLE) << PASS1_BITS);
dataptrd[4] = (DCTELEM) ((tmp10 - tmp11) << PASS1_BITS);
z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); /* c6 */
z1 += ONE << (CONST_BITS-PASS1_BITS-1);
dataptrd[2] = (DCTELEM)
RIGHT_SHIFT(z1 + MULTIPLY(tmp12, FIX_0_765366865), /* c2-c6 */
CONST_BITS-PASS1_BITS);
dataptrd[6] = (DCTELEM)
RIGHT_SHIFT(z1 - MULTIPLY(tmp13, FIX_1_847759065), /* c2+c6 */
CONST_BITS-PASS1_BITS);
tmp12 = tmp0 + tmp2;
tmp13 = tmp1 + tmp3;
z1 = MULTIPLY(tmp12 + tmp13, FIX_1_175875602); /* c3 */
/* Add fudge factor here for final descale. */
z1 += ONE << (CONST_BITS-PASS1_BITS-1);
tmp12 = MULTIPLY(tmp12, - FIX_0_390180644); /* -c3+c5 */
tmp13 = MULTIPLY(tmp13, - FIX_1_961570560); /* -c3-c5 */
tmp12 += z1;
tmp13 += z1;
z1 = MULTIPLY(tmp0 + tmp3, - FIX_0_899976223); /* -c3+c7 */
tmp0 = MULTIPLY(tmp0, FIX_1_501321110); /* c1+c3-c5-c7 */
tmp3 = MULTIPLY(tmp3, FIX_0_298631336); /* -c1+c3+c5-c7 */
tmp0 += z1 + tmp12;
tmp3 += z1 + tmp13;
z1 = MULTIPLY(tmp1 + tmp2, - FIX_2_562915447); /* -c1-c3 */
tmp1 = MULTIPLY(tmp1, FIX_3_072711026); /* c1+c3+c5-c7 */
tmp2 = MULTIPLY(tmp2, FIX_2_053119869); /* c1+c3-c5+c7 */
tmp1 += z1 + tmp13;
tmp2 += z1 + tmp12;
dataptrd[1] = (DCTELEM) RIGHT_SHIFT(tmp0, CONST_BITS-PASS1_BITS);
dataptrd[3] = (DCTELEM) RIGHT_SHIFT(tmp1, CONST_BITS-PASS1_BITS);
dataptrd[5] = (DCTELEM) RIGHT_SHIFT(tmp2, CONST_BITS-PASS1_BITS);
dataptrd[7] = (DCTELEM) RIGHT_SHIFT(tmp3, CONST_BITS-PASS1_BITS);
}
__global__ void islow_cuda_2(int *dataptrd){
INT32 tmp0, tmp1, tmp2, tmp3;
INT32 tmp10, tmp11, tmp12, tmp13;
INT32 z1;
int ID = blockIdx.x * blockDim.x + threadIdx.x;
dataptrd += ID;
tmp0 = dataptrd[DCTSIZE*0] + dataptrd[DCTSIZE*7];
tmp1 = dataptrd[DCTSIZE*1] + dataptrd[DCTSIZE*6];
tmp2 = dataptrd[DCTSIZE*2] + dataptrd[DCTSIZE*5];
tmp3 = dataptrd[DCTSIZE*3] + dataptrd[DCTSIZE*4];
/* Add fudge factor here for final descale. */
tmp10 = tmp0 + tmp3 + (ONE << (PASS1_BITS-1));
tmp12 = tmp0 - tmp3;
tmp11 = tmp1 + tmp2;
tmp13 = tmp1 - tmp2;
tmp0 = dataptrd[DCTSIZE*0] - dataptrd[DCTSIZE*7];
tmp1 = dataptrd[DCTSIZE*1] - dataptrd[DCTSIZE*6];
tmp2 = dataptrd[DCTSIZE*2] - dataptrd[DCTSIZE*5];
tmp3 = dataptrd[DCTSIZE*3] - dataptrd[DCTSIZE*4];
dataptrd[DCTSIZE*0] = (DCTELEM) RIGHT_SHIFT(tmp10 + tmp11, PASS1_BITS);
dataptrd[DCTSIZE*4] = (DCTELEM) RIGHT_SHIFT(tmp10 - tmp11, PASS1_BITS);
z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); /* c6 */
/* Add fudge factor here for final descale. */
z1 += ONE << (CONST_BITS+PASS1_BITS-1);
dataptrd[DCTSIZE*2] = (DCTELEM)
RIGHT_SHIFT(z1 + MULTIPLY(tmp12, FIX_0_765366865), /* c2-c6 */
CONST_BITS+PASS1_BITS);
dataptrd[DCTSIZE*6] = (DCTELEM)
RIGHT_SHIFT(z1 - MULTIPLY(tmp13, FIX_1_847759065), /* c2+c6 */
CONST_BITS+PASS1_BITS);
tmp12 = tmp0 + tmp2;
tmp13 = tmp1 + tmp3;
z1 = MULTIPLY(tmp12 + tmp13, FIX_1_175875602); /* c3 */
/* Add fudge factor here for final descale. */
z1 += ONE << (CONST_BITS+PASS1_BITS-1);
tmp12 = MULTIPLY(tmp12, - FIX_0_390180644); /* -c3+c5 */
tmp13 = MULTIPLY(tmp13, - FIX_1_961570560); /* -c3-c5 */
tmp12 += z1;
tmp13 += z1;
z1 = MULTIPLY(tmp0 + tmp3, - FIX_0_899976223); /* -c3+c7 */
tmp0 = MULTIPLY(tmp0, FIX_1_501321110); /* c1+c3-c5-c7 */
tmp3 = MULTIPLY(tmp3, FIX_0_298631336); /* -c1+c3+c5-c7 */
tmp0 += z1 + tmp12;
tmp3 += z1 + tmp13;
z1 = MULTIPLY(tmp1 + tmp2, - FIX_2_562915447); /* -c1-c3 */
tmp1 = MULTIPLY(tmp1, FIX_3_072711026); /* c1+c3+c5-c7 */
tmp2 = MULTIPLY(tmp2, FIX_2_053119869); /* c1+c3-c5+c7 */
tmp1 += z1 + tmp13;
tmp2 += z1 + tmp12;
dataptrd[DCTSIZE*1] = (DCTELEM) RIGHT_SHIFT(tmp0, CONST_BITS+PASS1_BITS);
dataptrd[DCTSIZE*3] = (DCTELEM) RIGHT_SHIFT(tmp1, CONST_BITS+PASS1_BITS);
dataptrd[DCTSIZE*5] = (DCTELEM) RIGHT_SHIFT(tmp2, CONST_BITS+PASS1_BITS);
dataptrd[DCTSIZE*7] = (DCTELEM) RIGHT_SHIFT(tmp3, CONST_BITS+PASS1_BITS);
}
// feed 8*8 elements at a time
void func_islow(int *elemptr, DCTELEM * dataptr){
cudaMemcpy(elemd, elemptr, batchSize, cudaMemcpyHostToDevice);
islow_cuda_1<<<numOfBlocks, numOfThreads>>>(elemd, dataptrd);
islow_cuda_2<<<numOfBlocks, numOfThreads>>>(dataptrd);
cudaMemcpy(dataptr, dataptrd, batchSize, cudaMemcpyDeviceToHost);
}
int main(void){
freopen("./data_self_cuda.txt", "w", stderr);
int size = TOTAL_FUNC_CALL*DCTSIZE*DCTSIZE*(sizeof(int));
int *elem = (int *)malloc(size);
int *dataptr = (int *)malloc(size);
int *elem_start, *dataptr_start;
clock_t start, end;
double time_used = 0;
readFile(elem);
cudaMalloc(&elemd, batchSize);
cudaMalloc(&dataptrd, batchSize);
elem_start = elem;
dataptr_start = dataptr;
printf("start processing data\n");
start = clock();
for(int i = 0 ; i<TOTAL_FUNC_CALL ; i++){
func_islow(elem, dataptr);
elem += DCTSIZE*DCTSIZE;
dataptr += DCTSIZE*DCTSIZE;
}
end = clock();
time_used += ((double)(end - start)) / CLOCKS_PER_SEC;
printf("done processing data\n");
printf("time used:%lfs\n", time_used);
printFile(elem_start, dataptr_start);
return 0;
}
|
22,731 | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//#include "cuda_helper_funcs.h"
//#include "RGB.h"
//
///**
//* Helper function to calculate the greyscale value based on R, G, and B
//*/
//__device__ int greyscale(BYTE red, BYTE green, BYTE blue)
//{
// int grey = 0.3 * red + 0.59 * green + 0 * 11 * blue; // calculate grey scale
// return min(grey, 255);
//}
//
///**
//* Kernel for executing on GPY
//*/
//__global__ void greyscaleKernel(RGB *d_pixels, int height, int width)
//{
// int x = blockIdx.x * blockDim.x + threadIdx.x; // width index
// int y = blockIdx.y * blockDim.y + threadIdx.y; // height index
//
// if (y >= height || y >= width) // thread is not within image
// return;
//
// int index = y * width + x;
//
// int grey = greyscale(d_pixels[index].red, d_pixels[index].green, d_pixels[index].blue); // calculate grey scale
//
// d_pixels[index].red = grey;
// d_pixels[index].green = grey;
// d_pixels[index].blue = grey;
//}
//
///**
//* Host function for launching greyscale kernel
//*/
//__host__ void d_convert_greyscale(RGB *pixel, int height, int width)
//{
// RGB *d_pixel;
//
// cudaMalloc(&d_pixel, height * width * sizeof(RGB));
// cudaMemcpy(d_pixel, pixel, height * width * sizeof(RGB), cudaMemcpyHostToDevice);
//
// dim3 grid, block;
// block.x = 16;
// block.y = 16;
// grid.x = calcBlockDim(width, block.x);
// grid.y = calcBlockDim(height, block.y);
//
// greyscaleKernel << <grid, block >> >(d_pixel, height, width);
//
// cudaMemcpy(pixel, d_pixel, height * width * sizeof(RGB), cudaMemcpyDeviceToHost);
//} |
22,732 |
#include <iostream>
using namespace std;
__global__ void kernel( int* b, int* t)
{
*b = gridDim.x; // Blocks in the grid
*t = blockDim.x; // Treads per block
}
int main()
{
int b;
int* d_b;
int t;
int* d_t;
// store in d_b the address of a memory
// location on the device
cudaMalloc( (void**)&d_b, sizeof(int));
cudaMalloc( (void**)&d_t, sizeof(int));
kernel<<<1,1>>>(d_b,d_t);
cudaMemcpy( &b, d_b, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy( &t, d_t, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_b);
cudaFree(d_t);
cout << "Num blocks : " << b << endl;
cout << "Num threads per block: " << t << endl;
return 0;
}
|
22,733 |
#include <iostream>
#include "cuda.h"
using Real = double;
//Test wrapper to run a function multiple times
template<typename PerfFunc>
float kernel_timer_wrapper(const int n_burn, const int n_perf, PerfFunc perf_func){
//Initialize the timer and test
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
for( int i_run = 0; i_run < n_burn + n_perf; i_run++){
if(i_run == n_burn){
//Burn in time is over, start timing
cudaEventRecord(start);
}
//Run the function timing performance
perf_func();
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
return milliseconds/1000.;
}
//A few integers that are needed for indexing on device
__constant__ int c_n_side3;
__constant__ int c_n_side2;
__constant__ int c_n_side;
__constant__ int c_n_side2n_buf;
__constant__ int c_n_buf;
__global__ void k_zbuffer_pack_array4d(Real* array4d_in, Real* array1d_buf){
const int idx = threadIdx.x + blockIdx.x*blockDim.x;
const int v_var = idx / c_n_side2n_buf;
const int k_grid = (idx - v_var * c_n_side2n_buf) / c_n_side2;
const int j_grid = (idx - v_var * c_n_side2n_buf - k_grid * c_n_side2) / c_n_side;
const int i_grid = idx - v_var * c_n_side2n_buf - k_grid * c_n_side2 - j_grid * c_n_side;
array1d_buf[idx] = array4d_in[i_grid + j_grid*c_n_side + (k_grid+c_n_buf)*c_n_side2 + v_var*c_n_side3];
}
__global__ void k_zbuffer_pack_array_of_array3d(Real** array_of_array3d_in, Real* array1d_buf){
const int idx = threadIdx.x + blockIdx.x*blockDim.x;
const int v_var = idx / c_n_side2n_buf;
const int k_grid = (idx - v_var * c_n_side2n_buf) / c_n_side2;
const int j_grid = (idx - v_var * c_n_side2n_buf - k_grid * c_n_side2) / c_n_side;
const int i_grid = idx - v_var * c_n_side2n_buf - k_grid * c_n_side2 - j_grid * c_n_side;
array1d_buf[idx] = array_of_array3d_in[v_var][i_grid + j_grid*c_n_side + (k_grid+c_n_buf)*c_n_side2];
}
int main(int argc, char* argv[]) {
std::size_t pos;
const int n_var = std::stoi(argv[1],&pos);
const int n_side = std::stoi(argv[2],&pos);
const int n_buf = std::stoi(argv[3],&pos);
const int n_run = std::stoi(argv[4],&pos);
//Order of iteration, fastest moving to slowest moving:
// x (full n_side), y (full n_side), z (only n_buf), var (n_var)
const int buf_size = n_side*n_side*n_buf*n_var;
const int n_side3 = n_side*n_side*n_side;
const int n_side2 = n_side*n_side;
const int n_side2n_buf = n_side*n_side*n_buf;
const int n_grid = n_side3;
const int threads_per_block = 128;
const int cuda_grid = buf_size/threads_per_block;
const int cuda_block = threads_per_block;
//Move useful indexing variables into constant memory
cudaMemcpyToSymbol(c_n_side3, &n_side3, sizeof(n_side3));
cudaMemcpyToSymbol(c_n_side2, &n_side2, sizeof(n_side2));
cudaMemcpyToSymbol(c_n_side, &n_side, sizeof(n_side));
cudaMemcpyToSymbol(c_n_side2n_buf, &n_side2n_buf, sizeof(n_side2n_buf));
cudaMemcpyToSymbol(c_n_buf, &n_buf, sizeof(n_buf));
//Setup a 1d view for the buffer
Real* d_array1d_buf;
cudaMalloc(&d_array1d_buf, sizeof(Real)*buf_size);
//Setup a raw 4D view
Real* d_array4d_in;
cudaMalloc(&d_array4d_in, sizeof(Real)*n_var*n_grid);
float time_array4d = kernel_timer_wrapper( n_run, n_run,
[&] () {
k_zbuffer_pack_array4d<<< cuda_grid, cuda_block >>>
(d_array4d_in, d_array1d_buf);
});
//Setup an array of arrays
//Array of arrays on device
CUdeviceptr* d_array_of_array3d_in;
cudaMalloc(&d_array_of_array3d_in, sizeof(CUdeviceptr)*n_var);
//Array of arrays on host
CUdeviceptr* h_array_of_array3d_in = (CUdeviceptr*) malloc(sizeof(CUdeviceptr)*n_var);
//Malloc each 3d array
for(int i = 0; i < n_var; i++) {
cudaMalloc((void**)(h_array_of_array3d_in+i ), n_grid * sizeof(Real));
}
//Move h_array_of_array1d to d_array_of_array3d
cudaMemcpy(d_array_of_array3d_in, h_array_of_array3d_in, sizeof(CUdeviceptr) * n_var, cudaMemcpyHostToDevice);
double time_array_of_array3d = kernel_timer_wrapper( n_run, n_run,
[&] () {
k_zbuffer_pack_array_of_array3d<<< cuda_grid, cuda_block >>>
( (Real**) d_array_of_array3d_in, d_array1d_buf);
});
double cell_cycles_per_second_array4d = static_cast<double>(n_side2n_buf)*static_cast<double>(n_run)/time_array4d;
double cell_cycles_per_second_array_of_array3d = static_cast<double>(n_side2n_buf)*static_cast<double>(n_run)/time_array_of_array3d;
std::cout<< n_var << " " << n_side << " " << n_run << " " << n_buf << " " << time_array4d << " " << time_array_of_array3d << " "
<< cell_cycles_per_second_array4d << " " << cell_cycles_per_second_array_of_array3d << std::endl;
//free each 1d array
for(int i = 0; i < n_var; i++) {
cudaFree(h_array_of_array3d_in+i);
}
free(h_array_of_array3d_in);
cudaFree(d_array_of_array3d_in);
cudaFree(d_array4d_in);
cudaFree(d_array1d_buf);
}
|
22,734 | //function kernel
__device__ float length(float3 r) {
return r.x*r.x + r.y*r.y + r.z*r.z;
}
__device__ float3 mul_float3(float3 r1, float3 r2) {
return make_float3(r1.x * r2.x, r1.y * r2.y, r1.z * r2.z);
}
__device__ float3 add_float3(float3 r1, float3 r2) {
return make_float3(r1.x + r2.x, r1.y + r2.y, r1.z + r2.z);
}
__device__ float3 dif_float3(float3 r1, float3 r2) {
return make_float3(r1.x - r2.x, r1.y - r2.y, r1.z - r2.z);
}
__device__ float3 scale_float3(float s, float3 r) {
r.x *= s;
r.y *= s;
r.z *= s;
return r;
}
__device__ float Kernel_Poly6(float3 r, float h) {
float PI = 3.14159;
return 315.0f / (64 * PI * pow(h, 9)) * pow(pow(h, 2) - length(r), 3);
}
__device__ float3 Gradient_Kernel_Poly6(float3 r, float h) {
float PI = 3.14159;
return make_float3(
r.x * -945.0f / ( 32.0f * PI * pow(h,9) ) * pow(pow(h, 2) - length(r), 2),
r.y * -945.0f / ( 32.0f * PI * pow(h,9) ) * pow(pow(h, 2) - length(r), 2),
r.z * -945.0f / ( 32.0f * PI * pow(h,9) ) * pow(pow(h, 2) - length(r), 2));
}
__device__ float Lap_Kernel_Poly6(float3 r, float h) {
float PI = 3.14159;
return 945.0f / (8 * PI * pow(h, 9)) * (pow(h, 2) - length(r)) * (length(r) - 3 / 4 * (pow(h, 2) - length(r)));
}
//SPH particle struct
struct pSPH {
float3 pos;
float3 vel;
float m;
float rho;
float _;
float col;
};
extern "C" __global__ void
MC(pSPH *p, float4 *MCBuf, const int len, const float scale, const float h, const int N, const int M)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx > M) return;
int px = idx%len;
int py = (idx/len)%len;
int pz = (idx/len/len);
float3 pos = make_float3(px*scale,py*scale,pz*scale);
float rho = 0;
float3 grad_rho = make_float3(0,0,0);
int i;
for (i = 0; i < N; ++i)
{
pSPH _p = p[i];
float3 r = dif_float3(pos, _p.pos);
if (i == idx) continue;
if (length(r) > h*h) continue;
if (_p._ <= 0.5f) continue;
rho += _p.m * Kernel_Poly6(r, h);
grad_rho = add_float3(grad_rho, scale_float3(_p.m * -1.0f, Gradient_Kernel_Poly6(r, h)));
}
float4 _MCBuf = make_float4(grad_rho.x, grad_rho.y, grad_rho.z, rho);
MCBuf[idx] = _MCBuf;
return;
}
|
22,735 | #include <cuda_runtime.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <time.h>
#include <stdlib.h> //srand()
#include <iostream> //cout
#include <string.h> //memset()
extern "C" void gpuTestAll(float *MatA, float *MatB, float *MatC, int nx, int ny);
// grid 1D block 1D
// grid 2D block 2D
// grid 2D block 1D
// grid 2D block 2D
#define CHECK(status); \
{ \
if (status != 0) \
{ \
std::cout << "Cuda failure: " << status; \
abort(); \
} \
}
__global__ void sumMatrixOnGPU2D(float *MatA, float *MatB, float *MatC, int nx,
int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * nx + ix;
if (ix < nx && iy < ny)
MatC[idx] = MatA[idx] + MatB[idx];
}
// grid 1D block 1D
__global__ void sumMatrixOnGPU1D(float *MatA, float *MatB, float *MatC, int nx,
int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < nx )
for (int iy = 0; iy < ny; iy++)
{
int idx = iy * nx + ix;
MatC[idx] = MatA[idx] + MatB[idx];
}
}
// grid 2D block 1D
__global__ void sumMatrixOnGPUMix(float *MatA, float *MatB, float *MatC, int nx,
int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = blockIdx.y;
unsigned int idx = iy * nx + ix;
if (ix < nx && iy < ny)
MatC[idx] = MatA[idx] + MatB[idx];
}
////=================================================
////=================================================
void gpuTestAll(float *d_MatA, float *d_MatB, float *d_MatC, int nx, int ny)
{
clock_t t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15, t16, t17, t18, t19, t20, t21, t22, t23, t24, t25, t26, t27, t28, t29, t30, t31, t32, t33, t34;
int dimx = 32;
int dimy = 32;
dim3 block(dimx, dimy);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
t5=clock();
sumMatrixOnGPU2D<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny);
CHECK(cudaDeviceSynchronize());
t6=clock();
printf("sumMatrixOnGPU2D <<< (512,512), (32,32) >>> elapsed %f sec\n", (double)(t6-t5)/(CLOCKS_PER_SEC));
// adjust block size
block.x = 16;
grid.x = (nx + block.x - 1) / block.x;
grid.y = (ny + block.y - 1) / block.y;
t7=clock();
sumMatrixOnGPU2D<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny);
CHECK(cudaDeviceSynchronize());
t8=clock();
printf("sumMatrixOnGPU2D <<< (1024,512), (16,32) >>> elapsed %f sec\n", (double)(t8-t7)/(CLOCKS_PER_SEC));
// adjust block size
block.y = 16;
block.x = 32;
grid.x = (nx + block.x - 1) / block.x;
grid.y = (ny + block.y - 1) / block.y;
t9=clock();
sumMatrixOnGPU2D<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny);
CHECK(cudaDeviceSynchronize());
t10=clock();
printf("sumMatrixOnGPU2D <<< (512,1024), (32,16) >>> elapsed %f sec\n", (double)(t10-t9)/(CLOCKS_PER_SEC));
block.y = 16;
block.x = 16;
grid.x = (nx + block.x - 1) / block.x;
grid.y = (ny + block.y - 1) / block.y;
t11=clock();
sumMatrixOnGPU2D<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny);
CHECK(cudaDeviceSynchronize());
t12=clock();
printf("sumMatrixOnGPU2D <<< (1024,1024),(16,16) >>> elapsed %f sec\n", (double)(t12-t11)/(CLOCKS_PER_SEC));
block.y = 16;
block.x = 64;
grid.x = (nx + block.x - 1) / block.x;
grid.y = (ny + block.y - 1) / block.y;
t13=clock();
sumMatrixOnGPU2D<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny);
CHECK(cudaDeviceSynchronize());
t14=clock();
printf("sumMatrixOnGPU2D <<< (256,1024), (64,16) >>> elapsed %f sec\n", (double)(t14-t13)/(CLOCKS_PER_SEC));
block.y = 64;
block.x = 16;
grid.x = (nx + block.x - 1) / block.x;
grid.y = (ny + block.y - 1) / block.y;
t15=clock();
sumMatrixOnGPU2D<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny);
CHECK(cudaDeviceSynchronize());
t16=clock();
printf("sumMatrixOnGPU2D <<< (1024,256), (16,64) >>> elapsed %f sec\n", (double)(t16-t15)/(CLOCKS_PER_SEC));
printf("\n");
block.x = 32;
grid.x = (nx + block.x - 1) / block.x;
block.y = 1;
grid.y = 1;
t17=clock();
sumMatrixOnGPU1D<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny);
CHECK(cudaDeviceSynchronize());
t18=clock();
printf("sumMatrixOnGPU1D <<< (512,1) , (32,1) >>> elapsed %f sec\n", (double)(t18-t17)/(CLOCKS_PER_SEC));
block.x = 64;
grid.x = (nx + block.x - 1) / block.x;
block.y = 1;
grid.y = 1;
t19=clock();
sumMatrixOnGPU1D<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny);
CHECK(cudaDeviceSynchronize());
t20=clock();
printf("sumMatrixOnGPU1D <<< (256,1) , (64,1) >>> elapsed %f sec\n", (double)(t20-t19)/(CLOCKS_PER_SEC));
block.x = 128;
grid.x = (nx + block.x - 1) / block.x;
block.y = 1;
grid.y = 1;
t21=clock();
sumMatrixOnGPU1D<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny);
CHECK(cudaDeviceSynchronize());
t22=clock();;
printf("sumMatrixOnGPU1D <<< (128,1) , (128,1) >>> elapsed %f sec\n", (double)(t22-t21)/(CLOCKS_PER_SEC));
printf("\n");
// grid 2D and block 1D
block.x = 32;
grid.x = (nx + block.x - 1) / block.x;
block.y = 1;
grid.y = ny;
t23=clock();
sumMatrixOnGPUMix<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny);
CHECK(cudaDeviceSynchronize());
t24=clock();
printf("sumMatrixOnGPUMix <<< (512,16384),(32,1) >>> elapsed %f sec\n",(double)(t24-t23)/(CLOCKS_PER_SEC));
block.x = 64;
grid.x = (nx + block.x - 1) / block.x;
block.y = 1;
grid.y = ny;
t25=clock();
sumMatrixOnGPUMix<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny);
CHECK(cudaDeviceSynchronize());
t26=clock();
printf("sumMatrixOnGPUMix <<< (256,16384),(64,1) >>> elapsed %f sec\n",(double)(t26-t25)/(CLOCKS_PER_SEC));
block.x = 128;
grid.x = (nx + block.x - 1) / block.x;
block.y = 1;
grid.y = ny;
t27=clock();
sumMatrixOnGPUMix<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny);
CHECK(cudaDeviceSynchronize());
t28=clock();
printf("sumMatrixOnGPUMix <<< (128,16384),(128,1) >>> elapsed %f sec\n",(double)(t28-t27)/(CLOCKS_PER_SEC));
block.x = 256;
grid.x = (nx + block.x - 1) / block.x;
block.y = 1;
grid.y = ny;
t29=clock();
sumMatrixOnGPUMix<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny);
CHECK(cudaDeviceSynchronize());
t30=clock();
printf("sumMatrixOnGPUMix <<< (64,16384), (256,1) >>> elapsed %f sec\n",(double)(t30-t29)/(CLOCKS_PER_SEC));
block.x = 512;
grid.x = (nx + block.x - 1) / block.x;
block.y = 1;
grid.y = ny;
t31=clock();
sumMatrixOnGPUMix<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny);
CHECK(cudaDeviceSynchronize());
t32=clock();
printf("sumMatrixOnGPUMix <<< (32,16384), (512,1) >>> elapsed %f sec\n",(double)(t32-t31)/(CLOCKS_PER_SEC));
}
|
22,736 | #include <stdio.h>
#define DIM 32 // 32 is maximum for now
int *create_matrix(int row, int col){
return (int *) malloc(sizeof(int) * row * col);
}
void print_matrix(int *mat, int row, int col){
for(int i = 0; i < row; i++){
for(int j = 0; j < col; j++){
printf("%d ", mat[i*col + j]);
}
printf("\n");
}
}
void fill_matrix(int *mat, int row, int col, int fill){
for(int i = 0; i < row; i++){
for(int j = 0; j < col; j++){
mat[i*col + j] = fill;
}
}
}
// Kernel that uses a 2D thread structure for squaring the matrix
// Only one block is allowed (shared memory)
__global__
void matrix_square(int *mat, int dim){
const int myRow = threadIdx.y;
const int myCol = threadIdx.x;
const int myPos = myRow * dim + myCol;
const int colStride = dim;
int i, j;
const int rowBase = myRow * dim; // Left matrix starting position (A x A)
const int colBase = myCol; // Right matrix starting position
int sum = 0;
for(i = 0; i < dim; i++){
for(j = 0; j < dim; j++){
sum += mat[rowBase + i] * mat[colBase + colStride * j];
}
}
__syncthreads();
mat[myPos] = sum;
}
int main(int argc, char *argv[]){
int *h_mat = create_matrix(DIM, DIM);
fill_matrix(h_mat, DIM, DIM, 2);
const size_t size = DIM*DIM*sizeof(int);
int *d_mat;
cudaMalloc(&d_mat, size);
cudaMemcpy(d_mat, h_mat, size, cudaMemcpyHostToDevice);
matrix_square<<<1,dim3(DIM,DIM)>>>(d_mat, DIM);
cudaMemcpy(h_mat, d_mat, size, cudaMemcpyDeviceToHost);
print_matrix(h_mat, DIM, DIM);
cudaFree(d_mat);
free(h_mat);
return 0;
}
|
22,737 | // doing 1024 * 1024 element's reducing
// 1024 blocks with 1024 threads -> 1 block with 1024 threads -> result
#include <iostream>
/*
* This kernel uses global memory, which can be optimized
*/
__global__
void global_reduce_kernel(int* g_in, int* g_out)
{
int global_t_idx = threadIdx.x + blockIdx.x * blockDim.x;
int local_t_idx = threadIdx.x;
// do reduction in global mem
// s >>= 1 is s = s >> 1 is s /=2
for(unsigned int s = blockDim.x/2; s > 0; s >>= 1)
{
if(local_t_idx < s) // take the first half
{
g_in[global_t_idx] += g_in[global_t_idx + s]; // s here is the buffer, draw the diagram will help to understand
}
__syncthreads();
}
// assgin only the first element, since it is the result of summation
if(local_t_idx == 0)
{
g_out[blockIdx.x] = g_in[global_t_idx];
}
}
/*
* This kernel uses shared memory
*/
__global__
void share_reduce_kernel(int* g_in, int* g_out)
{
extern __shared__ int block_memory[]; // local memory share among the same block
int global_t_idx = threadIdx.x + blockIdx.x * blockDim.x;
int local_t_idx = threadIdx.x;
// copy from g_in to shared
block_memory[local_t_idx] = g_in[global_t_idx];
__syncthreads();
// do reduction in shared mem
for(unsigned int s = blockDim.x/2; s > 0; s >>= 1)
{
if(local_t_idx < s) // take the first half
{
block_memory[local_t_idx] += block_memory[local_t_idx + s]; // s here is the buffer, draw the diagram will help to understand
}
__syncthreads();
}
// assgin only the first element, since it is the result of summation
if(local_t_idx == 0)
{
g_out[blockIdx.x] = block_memory[0];
}
}
// logger
void trace(const int* array, const size_t size)
{
for(size_t i(0); i < size; ++i)
{
std::cout << array[i] << "\t";
// formatting
if(i%8 == 0)
{
std::cout << std::endl;
}
}
}
int main()
{
// declare on host memeory
int h_array_in[1024*1024];
int h_array_out[1024];
int* d_array_in; // 1024 * 1024
int* d_array_out; // 1024
// cudaevent
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// dummy-values for host arrary
for(size_t i = 0; i < 1024*1024; ++i)
{
h_array_in[i] = static_cast<int>(i);
}
// allocate device memory
cudaMalloc(&d_array_in, 1024*1024*sizeof(int));
cudaMemset(&d_array_in, 0, 1024*1024*sizeof(int));
cudaMalloc(&d_array_out, 1024*sizeof(int));
cudaMemset(&d_array_out, 0, 1024*sizeof(int));
// copy the host_array into device
cudaMemcpy(d_array_in, h_array_in, 1024*1024*sizeof(int), cudaMemcpyHostToDevice);
// launch kernel
dim3 gridSize(1024, 1, 1);
dim3 blockSize(1024, 1, 1);
cudaEventRecord(start);
share_reduce_kernel<<<gridSize, blockSize, 1024*sizeof(int) /*allocated share memory size*/>>>(d_array_in, d_array_out);
//global_reduce_kernel<<<gridSize, blockSize>>>(d_array_in, d_array_out)
cudaEventRecord(stop);
// copy back the result from device to host
cudaMemcpy(h_array_out, d_array_out, 1024*sizeof(int), cudaMemcpyDeviceToHost);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << "Elapsed time = " << milliseconds << std::endl;
trace(h_array_out, 1024);
return 0;
} |
22,738 | //http://stackoverflow.com/questions/36436432/cuda-thrust-zip-iterator-tuple-transform-reduce
//STL
#include <iostream>
#include <stdlib.h>
//Thrust
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
#include <thrust/transform_reduce.h>
#include <thrust/iterator/zip_iterator.h>
using std::cout; using std::endl;
typedef thrust::device_vector< float > dvec;
typedef thrust::tuple< float, float > tup;
struct func
{
__device__ float operator()( tup t ) //difsq
{
float f = thrust::get< 0 >( t ) - thrust::get< 1 >( t );
return f*f;
}
};
int main()
{
dvec a( 4, 4.f );
dvec b( 4, 2.f );
auto begin = thrust::make_zip_iterator( thrust::make_tuple( a.begin(), b.begin() ) );
auto end = thrust::make_zip_iterator( thrust::make_tuple( a.end(), b.end() ) );
cout << thrust::transform_reduce( begin, end, func(), 0.0f, thrust::plus< float >() ) << endl;
cout << "done" << endl;
return 0;
}
|
22,739 | #include <cstdio>
#include <cstdlib>
#include <vector>
__global__ void bucketsort(int *a, int n, int range) {
// init identifier
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=n) return;
// init bucket
extern __shared__ int bucket[];
__syncthreads();
if (threadIdx.x<range)
bucket[threadIdx.x] = 0;
// add to bucket
__syncthreads();
atomicAdd( &bucket[a[i]], 1 );
__syncthreads();
// prefix sum for each bucket
int pos = 0;
int prefix = 0;
while(prefix + bucket[pos] <= i) {
prefix += bucket[pos];
pos++;
}
// spread bucket
a[i] = pos;
}
int main() {
int n = 50;
int range = 5;
std::vector<int> key(n);
for (int i=0; i<n; i++) {
key[i] = rand() % range;
printf("%d ",key[i]);
}
printf("\n");
// copy to unified memory, leaving original vector intact
int *a;
cudaMallocManaged(&a, n*sizeof(int));
for(int i=0;i<n;i++) a[i] = key[i];
// call gpu
bucketsort<<<1,n,range>>>(a, n, range);
cudaDeviceSynchronize();
// copy back to vector
for (int i=0; i<n; i++) {
key[i] = a[i];
printf("%d ",key[i]);
}
printf("\n");
}
|
22,740 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <iostream>
using namespace std;
__global__
void gpu_matrix_mult(float *d_a, float *d_b, float *d_c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if ((col < k) && (row < m))
{
float sum = 0.0;
for (int i = 0; i < n; i++)
{
sum += d_a[row * n + i] * d_b[i * k + col];
}
d_c[row * k + col] = sum;
}
}
void SaveMatrixToCsvFile(float *matrix, int m, int n, char *fileName)
{
FILE *f = fopen(fileName, "w");
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < n; ++j)
{
fprintf(f, "%f,", matrix[i * n + j]);
}
long currentPos = ftell(f);
fseek(f, currentPos - 1, SEEK_SET);
fprintf(f, "\n");
}
fclose(f);
}
int main(int argc, char** argv)
{
/*
Execute on CPU
Matrix h_a = host matrix A, size: m*n
Matrix h_b = host matrix B, size: n*k
Matrix h_c = host matrix C, size: m*k, is result of h_a * h_b
Execute on GPU
Matrix d_a = device matrix A, size: m*n
Matrix d_b = device matrix B, size: n*k
Matrix d_c = device matrix C, size: m*k, is result of d_a * d_b
*/
// Matrices sizes:
int m = 1024;
int n = 5012;
int k = 1024;
// Host memory allocation
float *h_a, *h_b, *h_c;
h_a = (float*)malloc(m * n * sizeof(float)); // Host matrix A
h_b = (float*)malloc(n * k * sizeof(float)); // Host matrix B
// Random value initialization of host matrix A
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
h_a[i * n + j] = rand() % 1024;
}
}
// Random value initialization of host matrix B
for (int i = 0; i < n; i++) {
for (int j = 0; j < k; j++) {
h_b[i * k + j] = rand() % 1024;
}
}
printf("\nMatrix initalized.");
// Device memory allocation
float *d_a, *d_b, *d_c;
cudaMalloc((void **) &d_a, m * n * sizeof(float)); // Device matrix A
cudaMalloc((void **) &d_b, n * k * sizeof(float)); // Device matrix B
cudaMalloc((void **) &d_c, m * k * sizeof(float)); // Device matrix C (result)
printf("\nMemory allocated in device");
// Copy matrixes from host to device memory
cudaMemcpy(d_a, h_a, sizeof(float)*m*n, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeof(float)*n*k, cudaMemcpyHostToDevice);
printf("\nData copyed to device");
int block_size = 32;
dim3 dimBlock(block_size, block_size, 1);
dim3 dimGrid(ceil((float)k / block_size), ceil((float)m / block_size), 1);
printf("\nBlocks asigned");
// Launch Kernel
gpu_matrix_mult<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, m, n, k);
cudaDeviceSynchronize();
printf("\nKernel launched.");
// Copy result from device to host
h_c = (float*)malloc(m * k * sizeof(float)); // Host matrix C (result)
cudaMemcpy(h_c, d_c, sizeof(float)*m*k, cudaMemcpyDeviceToHost);
SaveMatrixToCsvFile(h_a, m, n, "matrix_a");
SaveMatrixToCsvFile(h_b, n, k, "matrix_b");
SaveMatrixToCsvFile(h_c, m, k, "matrix_c");
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
return 0;
} |
22,741 | /*
* James Jun 2019/12/22
* Fast approximation of knn using binned minimum parallel search
*/
#include <cuda_runtime.h>
#include <math.h>
#define ABS(my_val) ((my_val) < 0) ? -(my_val) : (my_val)
#define NC (45) //3pca x 16 channels max
#define SINGLE_INF (3.402E+38) // equipvalent to NAN. consider -1 value
#define SWAP(x, y, T) do { T SWAP = x; x = y; y = SWAP; } while (0)
#define CHUNK (8) // 16
/** Main entry point.
* Works out where the current thread should read/write to global memory
* and calls doIterations to do the actual work.
* D: Mininum distances (nT x nA, float)
* I: Index (nT x nA, int32)
* F: Feature matrix (nC x nF, single)
* IBB: vector of B offset (nBB x 1, int)
* NBB: vector of B counts (nBB x 1, int)
* const: [nC, nF, nBB, iA0, nA]
*/
__global__ void search_min_drift(float *D, int *I, float const *F, const int *IBB, const int *NBB, const int *vnConst){
int nC = vnConst[0]; // number of feature dimension
int nF = vnConst[1]; // number of channels
int nBB = vnConst[2]; // number of blocks to read
int iA0 = vnConst[3]; // A offset
int nA = vnConst[4]; // number of A to read
int tx = threadIdx.x;
int nT = blockDim.x; // must be less than NTHREADS
// shared memory
__shared__ float sA[NC][CHUNK];
__shared__ int sI[CHUNK];
// thread memory
float tD[CHUNK]; // t: thread, s: shraed
int tI[CHUNK];
// initialize
if (tx < CHUNK){
int iA_ = tx; // loop over CHUNK
int iA = (blockIdx.x + blockIdx.y * gridDim.x) * CHUNK + tx;
iA = (iA % nA) + iA0;
for (int iC=0; iC<nC; ++iC){
sA[iC][iA_] = F[iC + iA*nC]; // copy A->sA
}
sI[iA_] = iA;
}
__syncthreads();
//#pragma unroll
for (int iA_=0; iA_<CHUNK; ++iA_){
tD[iA_] = SINGLE_INF; // sD = inf
tI[iA_] = sI[iA_];
}
// find minimum for each bin, stride of 2xnThread to save shared memory
int iB0 = IBB[tx % nBB];
int nB0 = NBB[tx % nBB];
int iT0 = tx / nBB;
int nT0 = nT / nBB;
for (int iB=iT0+iB0; iB<nB0+iB0; iB+=nT0){
// Initialize distance vector
float dist_[CHUNK]; // #programa unroll?
for (int iA_=0; iA_<CHUNK; ++iA_) dist_[iA_] = 0.0f;
for (int iC=0; iC<nC; ++iC){
float b_ = F[iC + iB*nC];
for (int iA_=0; iA_<CHUNK; ++iA_){
float d_ = b_ - sA[iC][iA_];
dist_[iA_] += (d_ * d_);
}
}
for (int iA_=0; iA_<CHUNK; ++iA_){
float d_ = dist_[iA_];
if (dist_[iA_] < tD[iA_] && d_ > 0.0f){
tD[iA_] = d_;
tI[iA_] = iB;
}
}
} // while
// write the output
for (int iA_=0; iA_<CHUNK; ++iA_){
int iA1 = sI[iA_] - iA0; // guaranteed to be bound due to modulus algebra
if (iA1 >= 0 && iA1 < nA){
D[tx + nT*iA1] = sqrt(ABS(tD[iA_]));
I[tx + nT*iA1] = tI[iA_] + 1; // matlab 1 base
}
} // for
} // func
|
22,742 | #include <cuda.h>
#include <iostream>
#include <sys/time.h>
using namespace std;
/* example for atomic function usage
*/
__global__ void atomic(int n, float *a) {
//a[0] += 1.0f; // gives wrong result
// instead use atomic function
atomicAdd(&a[0], 1.0f);
}
int main() {
int n = 1024;
float *data = (float*) malloc(n * sizeof(float));
for (int i=0; i<n; i++) {
data[i] = (float)i;
}
float *data_dev;
cudaMalloc((void**) &data_dev, n * sizeof(float));
cudaMemcpy(data_dev, data, n * sizeof(float) , cudaMemcpyHostToDevice);
cudaError_t error = cudaGetLastError();
cout << "copy to device = " << error << " : " << cudaGetErrorString(error) << endl;
int nBlocks = 1;
int nThreads = 1024;
atomic <<< nBlocks, nThreads >>>(n, data_dev);
error = cudaGetLastError();
cout << "run kernel = " << error << " : " << cudaGetErrorString(error) << endl;
cudaMemcpy(data, data_dev, n * sizeof(float) , cudaMemcpyDeviceToHost);
error = cudaGetLastError();
cout << "copy from device = " << error << " : " << cudaGetErrorString(error) << endl;
cudaFree(data_dev);
cout << "data[0] = " << data[0] << endl;
free(data);
}
|
22,743 | #include <cuda.h>
#include <stdio.h>
int main(void)
{
int count;
cudaDeviceProp prop;
cudaGetDeviceCount(&count);
for (int i=0; i < count; i++) {
cudaGetDeviceProperties(&prop, i);
printf ("Device Profile for Device %d\n\n", i);
printf ("General Information - \n");
printf (" Name:\t\t\t %s\n", prop.name);
printf (" Compute capabilities:\t %d.%d\n", prop.major, prop.minor);
printf (" Clock rate:\t\t %d\n\n", prop.clockRate);
printf ("Memory Information - \n");
printf (" Total global memory: \t %ld\n", prop.totalGlobalMem);
printf (" Total constant memory: %ld\n\n", prop.totalConstMem);
printf ("Multiprocessor Information - \n");
printf (" Multiprocessor count:\t %d\n", prop.multiProcessorCount);
printf (" Shared mem per mp: \t %ld\n", prop.sharedMemPerBlock);
printf ("Max threads per block: \t %d\n", prop.maxThreadsPerBlock);
printf ("\n\n");
}
return 0;
}
|
22,744 | #include <stdio.h>
#define BLOCK_SIZE 128
__global__ void calculateWork(int* work, const unsigned long long int leftMiddle, const unsigned long long int middle, const unsigned long long int n) {
int i = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int temp = i % n;
int force;
if (temp < leftMiddle) {
force = middle - (leftMiddle - temp);
} else if (temp == leftMiddle || temp == middle) {
force = middle;
} else {
force = n - temp;
}
int distance = i % 10 + 1;
work[i] = force * distance;
}
extern "C" void gpuCalculate(int* w, const unsigned long long int leftMiddle, const unsigned long long int middle, const unsigned long long int n) {
cudaError_t mallocResult;
int* work;
mallocResult = cudaMalloc((void**) &work, n * sizeof(int));
if (mallocResult != cudaSuccess) {
fprintf(stderr, "CUDA Malloc failed, exiting...\n");
exit(EXIT_FAILURE);
}
dim3 dimBlock(BLOCK_SIZE);
unsigned long long int gridSize = n / BLOCK_SIZE;
if (n % BLOCK_SIZE != 0) {
gridSize += 1;
}
dim3 dimGrid(gridSize);
calculateWork<<<dimGrid, dimBlock>>>(work, leftMiddle, middle, n);
mallocResult = cudaMemcpy(w, work, n * sizeof(int), cudaMemcpyDeviceToHost);
if (mallocResult != cudaSuccess) {
fprintf(stderr, "CUDA Memcpy failed, exiting...\n");
exit(EXIT_FAILURE);
}
mallocResult = cudaFree(work);
if (mallocResult != cudaSuccess) {
fprintf(stderr, "CUDA free failed, exiting...\n");
exit(EXIT_FAILURE);
}
} |
22,745 | #include "includes.h"
__global__ void task1_NoCoalescing(unsigned const* a, unsigned const* b, unsigned* result, size_t size)
{
auto index = blockIdx.x * blockDim.x + threadIdx.x + 7;
if (index > size + 6) {
return;
}
if (index >= size) {
index -= 7;
}
result[index] = a[index] * b[index];
} |
22,746 | #include "includes.h"
__global__ void chol_kernel_cudaUFMG_elimination(float * U, int k) {
//This call acts as a single K iteration
//Each block does a single i iteration
//Need to consider offset,
int i = (k+1) + blockIdx.x;
//Each thread does some part of j
//Stide in units of 'stride'
//Thread 0 does 0, 16, 32
//Thread 1 does 1, 17, 33
//..etc.
int jstart = i + threadIdx.x;
int jstep = blockDim.x;
// Pre-calculate indexes
int kM = k * MATRIX_SIZE;
int iM = i * MATRIX_SIZE;
int ki = kM + i;
//Do work for this i iteration
//Want to stride across
for (int j=jstart; j<MATRIX_SIZE; j+=jstep) {
U[iM + j] -= U[ki] * U[kM + j];
}
} |
22,747 | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#define TILE_SIZE 16
__global__ void mysgemm(int m, int n, int k, const float *A, const float *B, float* C) {
/********************************************************************
*
* Compute C = A x B
* where A is a (m x k) matrix
* where B is a (k x n) matrix
* where C is a (m x n) matrix
*
* Use shared memory for tiling
*
********************************************************************/
// INSERT KERNEL CODE HERE
int tileL = blockDim.x;
__shared__ float dA[TILE_SIZE][TILE_SIZE]; // a tile in A
__shared__ float dB[TILE_SIZE][TILE_SIZE]; // a tile in B
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float val = 0;
for (int p = 0; p < k; p += tileL) {
// load dA
if (row < m && (p + threadIdx.x) < k) dA[threadIdx.y][threadIdx.x] = A[row * k + p + threadIdx.x];
else dA[threadIdx.y][threadIdx.x] = 0;
// load dB
if (col < n && (p + threadIdx.y) < k) dB[threadIdx.y][threadIdx.x] = B[(p + threadIdx.y) * n + col];
else dB[threadIdx.y][threadIdx.x] = 0;
__syncthreads();
// compute
for (int i = 0; i < tileL; i++) val += dA[threadIdx.y][i] * dB[i][threadIdx.x];
__syncthreads();
}
if (row < m && col < n) C[row * n + col] = val;
}
void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc)
{
if ((transa != 'N') && (transa != 'n')) {
printf("unsupported value of 'transa'\n");
return;
}
if ((transb != 'N') && (transb != 'n')) {
printf("unsupported value of 'transb'\n");
return;
}
if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) {
printf("unsupported value of alpha\n");
return;
}
if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) {
printf("unsupported value of beta\n");
return;
}
// Initialize thread block and kernel grid dimensions ---------------------
const unsigned int BLOCK_SIZE = TILE_SIZE;
//INSERT CODE HERE
dim3 dimGrid((n - 1) / BLOCK_SIZE + 1, (m - 1) / BLOCK_SIZE + 1, 1);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1);
// Invoke CUDA kernel -----------------------------------------------------
//INSERT CODE HERE
mysgemm <<<dimGrid, dimBlock>>> (m, n, k, A, B, C);
}
|
22,748 | #include <stdio.h>
int main() {
cudaDeviceProp props;
cudaGetDeviceProperties(&props, 0);
printf("%24s: %s\n", "Name", props.name);
printf("%24s: %d\n", "Total global memory", props.totalGlobalMem);
printf("%24s: %d\n", "Shared memory per block", props.sharedMemPerBlock);
printf("%24s: %d\n", "Registers per block", props.regsPerBlock);
printf("%24s: %d\n", "Warp size", props.warpSize);
printf("%24s: %d\n", "memPitch", props.memPitch);
printf("%24s: %d\n", "Max threads per block", props.maxThreadsPerBlock);
printf("%24s: %dx%dx%d\n", "Max threads dimensions",
props.maxThreadsDim[0],
props.maxThreadsDim[1],
props.maxThreadsDim[2]);
printf("%24s: %dx%dx%d\n", "Max grid size",
props.maxGridSize[0],
props.maxGridSize[1],
props.maxGridSize[2]);
printf("%24s: %d kHz\n", "Clock rate", props.clockRate);
printf("%24s: %d\n", "Total const memory", props.totalConstMem);
printf("%24s: %d\n", "Major", props.major);
printf("%24s: %d\n", "Minor", props.minor);
printf("%24s: %d\n", "Texture alignment", props.textureAlignment);
return 0;
}
|
22,749 | #include<iostream>
#include<vector>
__global__ void averageCal(float *a, float *b, int n){
int index = blockIdx.x*blockDim.x + threadIdx.x;
//for(int i = 0; i < n; i++){
//b[i] += a[i];
//}
b[index] += a[index];
__syncthreads();
//for(int i = 0; i < n; i++){
//b[i] /= n;
//}
b[index] /= n;
}
int main(){
int N = 100; // Number of elements in the array
std::vector<float> num_array(N);
std::vector<float> averageVal(N, 0.0);
// float averageVal = 0;
for(auto i = 0; i < N; i++){
num_array[i] = i;
}
// Allocate device memory
size_t size = N*sizeof(float);
float *d_num;
cudaMalloc(&d_num, size);
float *d_average;
cudaMalloc(&d_average, size);
cudaMemcpy(d_num, num_array.data(), size, cudaMemcpyHostToDevice);
cudaMemcpy(d_average, averageVal.data(), size, cudaMemcpyHostToDevice);
//Invoke kernel
int threadperblock = 256;
int blockdim = (N + threadperblock - 1)/threadperblock;
averageCal<<<blockdim, threadperblock>>>(d_num, d_average, N);
// Copy the results
cudaMemcpy(averageVal.data(), d_average, size, cudaMemcpyDeviceToHost);
// std::cout << averageVal << std::endl;
for(int i = 0; i < N; i++){
std::cout << averageVal[i] << std::endl;
}
cudaFree(d_num);
cudaFree(d_average);
}
|
22,750 | // filename: gaxpy.cu
// a simple CUDA kernel to add two vectors
extern "C" // ensure function name to be exactly "gaxpy"
{
__global__ void gaxpy(const int lengthC, const double *a, const double *b, double *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthC)
{
c[i] = a[0]*b[i] + c[i]; // REMEMBER ZERO INDEXING IN C LANGUAGE!!
}
}
} |
22,751 | /*
* CUDA kernel for geometric mean for calculating response of COSFIRE filter
* Sofie Lovdal 18.6.2018
* The input is a flattened 3D array of all responses obtained from the COSFIRE
* algorithm. The argument output is a buffer for the final response, input is a 1D
* array of dimensions numResponses*rumRows*numCols.
*/
__global__ void geometricMean(double * output, double * const input,
unsigned int const numRows, unsigned int const numCols,
int const numResponses, double const threshold)
{
const int colIdx = blockIdx.x*blockDim.x + threadIdx.x;
const int rowIdx = blockIdx.y*blockDim.y + threadIdx.y;
/*make sure we are within image*/
if(colIdx>=numCols || rowIdx >= numRows) return;
/*Pixel to consider in outputimage*/
int linearIdx = rowIdx*numCols + colIdx;
double product=1.0;
int i;
for(i=0; i<numResponses; i++) {
product*=input[linearIdx+i*numRows*numCols];
}
output[linearIdx] = pow(product, (1.0/(double)numResponses)); //thresholding??
}
|
22,752 | #include "includes.h"
__global__ void staticReverse(int *d, int n)
{
__shared__ int s[64];
int t = threadIdx.x;
int tr = n - t - 1;
s[t] = d[t];
__syncthreads();
d[t] = s[tr];
} |
22,753 | __global__ void cuda_op_function(const float *in, const int N, float* out){
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N;
i += blockDim.x * gridDim.x) {
out[i] = (float)(2*i) + 1.0f;
if(in[i] == -1.0f){
out[i] = in[i];
}
}
}
void cuda_op_launcher(const float *in, const int N, float* out){
cuda_op_function<<<32,256>>>(in, N, out);
}
|
22,754 | #include <stdio.h>
#include <assert.h>
#include <cuda.h>
int main(int argc, char* argv[])
{
char* p = NULL;
char* q = NULL;
char* r = NULL;
int i = 0;
cudaError_t iRet;
p = (char*) malloc(100);
assert(p != NULL);
q = (char*) malloc(20);
assert(q != NULL);
r = (char*) malloc(40);
assert(r != NULL);
for (i = 0; i < 100; ++i)
{
p[i] = i;
}
iRet = cudaMemcpy2D(q, 1, p, 5, 1, 20, cudaMemcpyHostToHost);
printf("**********\niRet = %d\n**********\n", iRet);
iRet = cudaMemcpy2D(r, 2, p, 5, 2, 20, cudaMemcpyHostToHost);
printf("**********\niRet = %d\n**********\n", iRet);
for (i = 0; i < 100; ++i)
{
printf("%d ", p[i]);
}
printf("\n**********\n");
for (i = 0; i < 20; ++i)
{
printf("%d ", q[i]);
}
printf("\n**********\n");
for (i = 0; i < 40; ++i)
{
printf("%d ", r[i]);
}
printf("\n");
free(r);
free(q);
free(p);
return 0;
}
|
22,755 | #include "includes.h"
__global__ void multi(float *a, float *b, float *c, int width) {
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
float result = 0;
if (col < width && row < width) {
for (int k = 0; k < width; k++) {
result += a[row * width + k] * b[k * width + col];
}
c[row * width + col] = result;
}
} |
22,756 | #include "includes.h"
__global__ void cunn_SoftMax_updateGradInput_kernel(float *gradInput, float *output, float *gradOutput, int nframe, int dim)
{
__shared__ float buffer[SOFTMAX_THREADS];
int k = blockIdx.x;
float *gradInput_k = gradInput + k*dim;
float *output_k = output + k*dim;
float *gradOutput_k = gradOutput + k*dim;
int i_start = threadIdx.x;
int i_end = dim;
int i_step = blockDim.x;
// sum?
buffer[threadIdx.x] = 0;
for (int i=i_start; i<i_end; i+=i_step)
buffer[threadIdx.x] += gradOutput_k[i] * output_k[i];
__syncthreads();
// reduce
if (threadIdx.x == 0)
{
float sum_k = 0;
for (int i=0; i<blockDim.x; i++)
sum_k += buffer[i];
buffer[0] = sum_k;
}
__syncthreads();
float sum_k = buffer[0];
for (int i=i_start; i<i_end; i+=i_step)
gradInput_k[i] = output_k[i] * (gradOutput_k[i] - sum_k);
} |
22,757 | #include "includes.h"
__global__ void ConditionCFLKernel1D (double *Rsup, double *Rinf, double *Rmed, int nrad, int nsec, double *Vtheta, double *Vmoy)
{
int i = threadIdx.x + blockDim.x*blockIdx.x;
int j;
if (i<nrad){
Vmoy[i] = 0.0;
for (j = 0; j < nsec; j++)
Vmoy[i] += Vtheta[i*nsec + j];
Vmoy[i] /= (double)nsec;
}
} |
22,758 | #include<stdio.h>
#include<math.h>
#include<stdlib.h>
#include<sys/time.h>
void usage(int exitStatus, char* programName);
int sumArray(int* array, int arraySize);
void getSeqPrimes(int* array, int arraySize);
__host__ __device__ int isPrime(int value);
__global__ void getPrimes(int* d_array, int N){
int threadId = 0;
threadId = blockIdx.x * blockDim.x + threadIdx.x;
int thisValue = 0;
thisValue = (threadId * 2) + 1;
if(threadId < 1)
return;
if(thisValue < N){
d_array[thisValue] = isPrime(thisValue);
}
}
__host__ __device__ int isPrime(int value){
int limit = 0;
limit = (int) sqrt( (float) value ) + 1;
int j = 0;
for(j = 2; j < limit; j++){
if(value % j == 0){
return 0;
}
}
return 1;
}
int main(int argc, char** argv){
if(argc != 3)
usage(1, argv[0]);
int N = 0;
N = (int) atoi(argv[1]);
int blockSize = 0;
blockSize = (int) atoi(argv[2]);
if(!(N | blockSize))
usage(2, argv[0]);
int arraySizeInBytes = 0;
arraySizeInBytes = 0;
arraySizeInBytes = sizeof(int) * (N + 1);
// index 0 : start time, index 1: end time
struct timeval sequentialTimes[2] = {{0,0},{0,0}};
struct timeval parallelTimes[2] = {{0,0},{0,0}};
// allocate our arrays
int* h_array = NULL;
int* d_array = NULL;
int* seqArray = NULL;
h_array = (int*) malloc(arraySizeInBytes);
seqArray = (int*) calloc(sizeof(int), N + 1);
// caculate the grid size
int gridSize = 0;
gridSize = (int)ceil((N + 1) / 2.0 / blockSize);
// start parallel timer
gettimeofday( &(parallelTimes[0]), NULL);
// allocate device memory for the array
cudaMalloc(&d_array, arraySizeInBytes);
// zero the memory in cuda
cudaMemset(d_array, 0, arraySizeInBytes);
// run the kernel
getPrimes<<<gridSize, blockSize>>>(d_array, N);
// copy the results back to the host array
cudaMemcpy(h_array, d_array, arraySizeInBytes, cudaMemcpyDeviceToHost);
// release the device array
cudaFree(d_array);
// stop parallel timer
gettimeofday( &(parallelTimes[1]) , NULL);
// start sequential timer
gettimeofday( &(sequentialTimes[0]), NULL);
// run the sequential version
getSeqPrimes(seqArray, N + 1);
// stop sequential timer
gettimeofday( &(sequentialTimes[1]), NULL);
// calculated time values
double parallelSeconds[2] = {0.0, 0.0};
parallelSeconds[0] = parallelTimes[0].tv_sec + ((double)parallelTimes[0].tv_usec / 1000000);
parallelSeconds[1] = parallelTimes[1].tv_sec + ((double)parallelTimes[1].tv_usec / 1000000);
double sequentialSeconds[2] = {0.0, 0.0};
sequentialSeconds[0] = sequentialTimes[0].tv_sec + ((double)sequentialTimes[0].tv_usec / 1000000);
sequentialSeconds[1] = sequentialTimes[1].tv_sec + ((double)sequentialTimes[1].tv_usec / 1000000);
double parallelCost = 0;
parallelCost = parallelSeconds[1] - parallelSeconds[0];
double sequentialCost = 0;
sequentialCost = sequentialSeconds[1] - sequentialSeconds[0];
double speedup = 0;
speedup = sequentialCost / parallelCost;
int seqSum = 0;
seqSum = sumArray(seqArray, N + 1);
int parSum = 0;
parSum = sumArray(h_array, N + 1);
printf(" N: %d\n", N);
printf(" blockSize: %d\n", blockSize);
printf(" gridSize: %d\n", gridSize);
printf("sequential prime count: %d\n", seqSum);
printf(" parallel prime count: %d\n", parSum);
printf(" parallel time cost: %lf\n", parallelCost);
printf(" sequential time cost: %lf\n", sequentialCost);
printf(" speedup: %lf\n", speedup);
free(h_array);
free(seqArray);
return 0;
}
void getSeqPrimes(int* array, int arraySize){
int thisValue = 0;
for(thisValue = 3; thisValue < arraySize; thisValue += 2){
array[thisValue] = isPrime(thisValue);
}
}
int sumArray(int* array, int arraySize){
int sum = 0;
int index = 0;
for(; index < arraySize; ++index){
sum += array[index];
}
return sum;
}
void usage(int exitStatus, char* programName){
fprintf(stderr, "usage: %s N blockSize\n", programName);
exit(exitStatus);
}
|
22,759 | #include<iostream>
#include<cuda_runtime.h>
#include<cmath>
using namespace std;
/*
suma elemenata po blokovima koristenjem aomic funkcije
*/
__global__ void funkc(int *M, int dim, unsigned int *fsum)
{
unsigned int rez;
extern __shared__ int sum[];
sum[blockIdx.x*gridDim.x + blockIdx.y] = 0;
__syncthreads();
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dim && j < dim)
rez = M[dim*i+j]*M[dim*i+j];
else
rez = 0;
atomicAdd(&sum[blockIdx.x*gridDim.x + blockIdx.y], rez);
//__syncthreads();
fsum[blockIdx.x*gridDim.x + blockIdx.y] = sum[blockIdx.x*gridDim.x + blockIdx.y] ;
//__syncthreads();
}
/*
suma elemenata vektora dobivenog kao suma kvadrata elemenata blokova prethodne funkcije
*/
__global__ void vecAdd(unsigned int *M, int dim, int *r)
{
__shared__ int rez;
rez = 0;
__syncthreads();
int i = threadIdx.x;
int value(0);
if(i < dim)
value = M[i];
atomicAdd(&rez, value);
__syncthreads();
r[0] = rez;
}
int main(int argc, char*argv[])
{
int N(100);
size_t size = N*N*sizeof(int);
int *M_h = (int*)malloc(size);
// geneiranje matrice
for(int i(0); i < N*N; i++)
M_h[i] = i%3; // elements in the matrix is less than 3
int *M_d;
cudaMalloc(&M_d, size);
cudaMemcpy(M_d, M_h, size, cudaMemcpyHostToDevice);
// postavljanje topologije 32x32 threada u bloku i onda blokova koliko treba
dim3 threadsPerBlock(32,32);
dim3 blocksPerGrid((N/threadsPerBlock.x) + 1, (N/threadsPerBlock.y) + 1);
int gridDimension = blocksPerGrid.x*blocksPerGrid.y;
int *result = (int*)malloc(gridDimension*sizeof(int));
unsigned int *fsum;
cudaMalloc(&fsum, gridDimension*sizeof(int));
// poziv funkcije za racunanje sume kvadrata po blokovima
funkc<<<blocksPerGrid, threadsPerBlock, gridDimension>>>(M_d, N, fsum);
cudaMemcpy(result, fsum, gridDimension*sizeof(int), cudaMemcpyDeviceToHost);
int *vectorSum;
cudaMalloc(&vectorSum, sizeof(int));
// poziv funkcije za racunanje zbroja dobivenih suma
vecAdd<<<1, gridDimension>>>(fsum, gridDimension, vectorSum);
int *sumAll = (int*)malloc(sizeof(int));
cudaMemcpy(sumAll, vectorSum, sizeof(int), cudaMemcpyDeviceToHost);
/*for (int s(0); s < gridDimension; s++)
{
cout<<"rezultat je:"<<result[s]<<endl;
}*/
cout<<endl<<"Konacan rezultat je : "<<sqrt(sumAll[0])<<endl;
free(M_h);
free(sumAll);
cudaFree(M_d);
cudaFree(fsum);
cudaFree(vectorSum);
return 0;
}
|
22,760 | #include <stdio.h>
struct model {
int states;
int emissions;
float* transition;
float* emission;
float* initial;
};
#define trans(from,to) (transition[from*states+to])
#define emis(state,obs) (emission[state*states+obs])
#define init(state) (initial[state])
__device__ float par_sum(int state, float *shared, int states) {
printf("called with state %d of %d\n",state,states);
int step=1;
int depth=2;
do {
printf("Depth %d Thread %d temp %f states %d\n", depth, state, shared[state], states);
if (state%depth == 0) {
printf("Checked Depth %d Thread %d temp %f states %d\n", depth, state, shared[state], states);
if (state+step < states) {
// regular case
// to self add depth far right sibiling
printf("Summing %f += %f\n",shared[state],shared[state+step]);
shared[state] += shared[state+step];
} else {
printf("Else clause state %d + step %d < states %d\n",state,step,states);
// loose end case
// do nothing
// same as copy from previous
}
}
__syncthreads();
step = depth;
} while ((depth*=2) < states);
printf("Parralel sum %f\n",shared[0]);
return shared[0];
}
/*
Takes model and observation sequence of length len and produces
alfa part of forward-backward algorithm. Normalized for
numerical stability.
Assumes alpha is pointer to float array of size m->states * len
*/
// call withc alpha_norm<<grid,block,len*sizeof(int)>>
__global__ void alpha_norm(float* alpha, int states, int emissions, int len, float *transition, float* emission, float *initial, int* obs) {
int state = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ int s[];
float *shared = (float *)s;
if (state >= states) {
return;
}
#if 0
// clear output memory
for (int i = 0; i < len; i++) {
alpha[state*len + i] = 0;
}
#endif
if (obs[0] > emissions) {
printf("Observation %d outside model\n",obs[0]);
}
printf("Before [%f, %f]\n",alpha[0],alpha[1]);
// initialize edge case
shared[state] = init(state) * emis(state,obs[0]);
// normalize
alpha[state] = shared[state];
__syncthreads();
float sum = par_sum(state, shared, states);
printf("Sum %f\n",sum);
alpha[state] /= sum;
__syncthreads();
printf("After [%f, %f]\n",alpha[0],alpha[1]);
printf("initial %d state \n", state);
// j - observations
// for each observation sta
for (int j = 1; j < len - 1; j++) {
int idx = j * states + state;
printf("Internal state %d, j %d, idx %d\n",state,j,idx);
// i - previous state
// sum for each previous state * transition from previous to current
sum = 0.f;
for (int i = 0; i < states; i++) {
printf("Internal state %d, j %d, i%d\n",state,j,i);
sum += alpha[states * (j-1) + i] * trans(i,state);
}
// normalize
shared[state]= sum * emis(state,obs[j]);
alpha[idx] = shared[state];
__syncthreads();
sum = par_sum(state, shared, states);
alpha[idx] /= sum;
__syncthreads();
}
printf("end %d state \n", state);
__syncthreads();
}
__global__ void beta_norm(float* beta, int states, int emissions, int len, float *transition, float* emission, float *initial, int* obs) {
int state = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ int s[];
float *shared = (float *)s;
if (state >= states) {
return;
}
#if 1
// clear output memory
for (int i = 0; i < len; i++) {
beta[state*len + i] = 0;
}
#endif
// edge case
int idx = (len-1) * states + state;
beta[idx] = 1.f/states;
// j - observation
// for each observation from the end
for (int j = len - 2; j >= 0; j--) {
int nidx = (j+1) * states + state;
float sum = 0.f;
for (int i = 0; i < states; i++) {
sum += trans(state,i) * emis(state,obs[j+1]) * beta[nidx];
}
idx = j * states + state;
beta[idx] = sum;
__syncthreads();
/* shared[state] = beta[idx]; */
/* __syncthreads(); */
/* sum = par_sum(state, shared, states); */
/* beta[idx] /= sum; */
__syncthreads();
}
__syncthreads();
}
|
22,761 | #include <stdio.h>
#include <cuda.h>
#include <time.h>
#define EXPO 7
__global__ void RecursiveDoublingKernel(int variableSize, int step,int blockRow, int blockColumn,float* deviceY,float* deviceM,int evenOrOddFlag)
{
//we weill do something like y(i+1)=my(i)+b
int bx=blockIdx.x;
int by=blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
int processIndex=tx;
/* printf("%d ",tx);
printf("%f,%f,%f \n",deviceY[0],deviceY[1],deviceY[2]);
printf("%f,%f,%f \n",deviceM[0],deviceM[1],deviceM[2]);*/
//so M and Y will be divided into two part, the first part store the old value
//the second half part store the updated value
int halfSize=variableSize;
//teh start index of the second part will be halfsize;
//so if evenOrOddFlag is Odd, the new value will be stored in the second half,
//otherwise it will be stored in the first half.
int secondhalfHelper=halfSize+step+processIndex;
//printf("second half helper is: %d \n",secondhalfHelper);
//be careful that 1-step the old value still need to be copied to the current value,since the new value will start calculated at step+1
if(evenOrOddFlag%2==1)
{
//printf("does this ever got run?");
deviceY[secondhalfHelper]=deviceY[secondhalfHelper-halfSize]+deviceM[secondhalfHelper-halfSize]*deviceY[processIndex];
deviceM[secondhalfHelper]=deviceM[secondhalfHelper-halfSize]*deviceM[processIndex];
//copy it once here
if(tx==0&&ty==0)
{
for(int i=0;i<step;i++)
{
deviceY[i+halfSize]=deviceY[i];
deviceM[i+halfSize]=deviceM[i];
}
}
}
else
{
deviceY[secondhalfHelper-halfSize]=deviceY[secondhalfHelper]+deviceM[secondhalfHelper]*deviceY[halfSize+processIndex];
deviceM[secondhalfHelper-halfSize]=deviceM[secondhalfHelper]*deviceM[halfSize+processIndex];
if(tx==0&&ty==0) //just need to copy once, so the other processors allow to idle at thsi time
{
for(int i=0;i<step;i++)
{
deviceY[i]=deviceY[i+halfSize];
deviceM[i]=deviceM[i+halfSize];
}
}
}
__syncthreads();
}
__global__ void LoopingbackRecursiveDoublingKernel(int variableSize, int step,int blockRow, int blockColumn,float* deviceY,float* deviceM,int evenOrOddFlag)
{
//we weill do something like y(i+1)=my(i)+b
int bx=blockIdx.x;
int by=blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
int processIndex=tx;
/* printf("%d ",tx);
printf("%f,%f,%f \n",deviceY[0],deviceY[1],deviceY[2]);
printf("%f,%f,%f \n",deviceM[0],deviceM[1],deviceM[2]);*/
//so M and Y will be divided into two part, the first part store the old value
//the second half part store the updated value
int halfSize=variableSize;
//teh start index of the second part will be halfsize;
//so if evenOrOddFlag is Odd, the new value will be stored in the second half,
//otherwise it will be stored in the first half.
//int secondhalfHelper=halfSize+step+processIndex;
int secondhalfHelper=halfSize+processIndex;
//printf("second half helper is: %d \n",secondhalfHelper);
//be careful that 1-step the old value still need to be copied to the current value,since the new value will start calculated at step+1
if(evenOrOddFlag%2==1)
{
deviceY[secondhalfHelper]=deviceY[processIndex]+deviceY[processIndex+step]*deviceM[processIndex];
deviceM[secondhalfHelper]=deviceM[processIndex]*deviceM[processIndex+step];
//now the reverse part need to copy the second part
//should be from index N-i to index variableSize-1
if(tx==0&&ty==0)
{
for(int i=variableSize-step;i<variableSize;i++)
{
deviceY[i+halfSize]=deviceY[i];
deviceM[i+halfSize]=deviceM[i];
}
}
}
else
{
deviceY[processIndex]=deviceY[halfSize+processIndex]+deviceY[halfSize+step+processIndex]*deviceM[halfSize+processIndex];
//deviceY[secondhalfHelper-halfSize]=deviceY[secondhalfHelper]+deviceM[secondhalfHelper]*deviceY[halfSize+processIndex];
//deviceM[secondhalfHelper-halfSize]=deviceM[secondhalfHelper]*deviceM[halfSize+processIndex];
deviceM[processIndex]=deviceM[processIndex+halfSize]*deviceM[processIndex+halfSize+step];
if(tx==0&&ty==0)
{
for(int i=variableSize-step;i<variableSize;i++)
{
deviceY[i]=deviceY[i+halfSize];
deviceM[i]=deviceM[i+halfSize];
}
}
}
__syncthreads();
}
__global__ void MatrixVersionRecursiveDoubling(int variableSize, int step,int blockRow, int blockColumn,float* deviceYForW,float* deviceMForW,int evenOrOddFlag,float* deviceA, float* deviceB, float* deviceC, float* deviceD)
{
//so right now just use grid (1,1) if time allow will implment other grid size
int bx=blockIdx.x;
int by=blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
int processId=tx; //this is only for the this particluar grid and block setup
int halfSizeY=variableSize;
int halfSizeM=2*variableSize;
/*
int secondhalfHelper=halfSize+step+2*processIndex; //this need to multiply 2, different from non-matrix version
int secondhalfHelper1=halfSize+step+4*processIndex;*/
int indexHelperY=halfSizeY+2*step+2*processId;
int indexHelperM=halfSizeM+4*step+4*processId;
if(evenOrOddFlag%2==1)
{
//update M and Y here
deviceYForW[indexHelperY]=deviceYForW[indexHelperY-halfSizeY]+deviceMForW[indexHelperM-halfSizeM]*deviceYForW[2*processId]+deviceMForW[indexHelperM-halfSizeM+1]*deviceYForW[2*processId+1];
deviceYForW[indexHelperY+1]=deviceYForW[indexHelperY-halfSizeY+1]+deviceMForW[indexHelperM-halfSizeM+2]*deviceYForW[2*processId]+deviceMForW[indexHelperM-halfSizeM+3]*deviceYForW[2*processId+1];
deviceMForW[indexHelperM]=deviceMForW[4*step+4*processId]*deviceMForW[4*processId]+deviceMForW[4*step+4*processId+1]*deviceMForW[4*processId+2];
deviceMForW[indexHelperM+1]=deviceMForW[4*step+4*processId]*deviceMForW[4*processId+1]+deviceMForW[4*step+4*processId+1]*deviceMForW[4*processId+3];
deviceMForW[indexHelperM+2]=deviceMForW[4*step+4*processId+2]*deviceMForW[4*processId]+deviceMForW[4*step+4*processId+3]*deviceMForW[4*processId+2];
deviceMForW[indexHelperM+3]=deviceMForW[4*step+4*processId+2]*deviceMForW[4*processId+1]+deviceMForW[4*step+4*processId+3]*deviceMForW[4*processId+3];
//now need to copy 1-- step old value to new value just need to copy once for each step
for(int i=0;i<step;i++)
{
deviceYForW[halfSizeY+2*i]=deviceYForW[2*i];
deviceYForW[halfSizeY+2*i+1]=deviceYForW[2*i+1];
deviceMForW[halfSizeM+4*i]=deviceMForW[4*i];
deviceMForW[halfSizeM+4*i+1]=deviceMForW[4*i+1];
deviceMForW[halfSizeM+4*i+2]=deviceMForW[4*i+2];
deviceMForW[halfSizeM+4*i+3]=deviceMForW[4*i+3];
}
}
else
{
deviceYForW[indexHelperY-halfSizeY]=deviceYForW[indexHelperY]+deviceMForW[indexHelperM]*deviceYForW[2*processId+halfSizeY]+deviceMForW[indexHelperM+1]*deviceYForW[2*processId+1+halfSizeY];
deviceYForW[indexHelperY-halfSizeY+1]=deviceYForW[indexHelperY+1]+deviceMForW[indexHelperM+2]*deviceYForW[2*processId+halfSizeY]+deviceMForW[indexHelperM+3]*deviceYForW[2*processId+1+halfSizeY];
deviceMForW[indexHelperM-halfSizeM]=deviceMForW[4*step+4*processId+halfSizeM]*deviceMForW[4*processId+halfSizeM]+deviceMForW[4*step+4*processId+1+halfSizeM]*deviceMForW[4*processId+2+halfSizeM];
deviceMForW[indexHelperM+1-halfSizeM]=deviceMForW[4*step+4*processId+halfSizeM]*deviceMForW[4*processId+1+halfSizeM]+deviceMForW[4*step+4*processId+1+halfSizeM]*deviceMForW[4*processId+3+halfSizeM];
deviceMForW[indexHelperM+2-halfSizeM]=deviceMForW[4*step+4*processId+2+halfSizeM]*deviceMForW[4*processId+halfSizeM]+deviceMForW[4*step+4*processId+3+halfSizeM]*deviceMForW[4*processId+2+halfSizeM];
deviceMForW[indexHelperM+3-halfSizeM]=deviceMForW[4*step+4*processId+2+halfSizeM]*deviceMForW[4*processId+1+halfSizeM]+deviceMForW[4*step+4*processId+3+halfSizeM]*deviceMForW[4*processId+3+halfSizeM];
//now need to copy 1-- step old value to new value just need to copy once for each step
for(int i=0;i<step;i++)
{
deviceYForW[2*i]=deviceYForW[2*i+halfSizeY];
deviceYForW[2*i+1]=deviceYForW[2*i+1+halfSizeY];
deviceMForW[4*i]=deviceMForW[4*i+halfSizeM];
deviceMForW[4*i+1]=deviceMForW[4*i+1+halfSizeM];
deviceMForW[4*i+2]=deviceMForW[4*i+2+halfSizeM];
deviceMForW[4*i+3]=deviceMForW[4*i+3+halfSizeM];
}
}
__syncthreads();
}
int main()
{
/* float* M;
float* Y;
int variableSize=10;
int variableSpace=2*variableSize*sizeof(float);*/
//make it double size since it run in parallel so you want to keep all the previous version
/* M=(float*)malloc(variableSpace);
Y=(float*)malloc(variableSpace); */
/* M[0]=1;
Y[0]=1;*/
int m=pow(2,EXPO)-1;
int b=1;
int a=0;
float delta=(b-a)*1.0/(m+1.0);
//store teh metrix that is to be LU decomposited
float *A;
float *B;
float *C;
float *D;
float *W;
float *G;
int chunkLength=m;
int chunkSize=chunkLength*sizeof(float);
A=(float*)malloc(chunkSize);
B=(float*)malloc(chunkSize);
C=(float*)malloc(chunkSize);
D=(float*)malloc(chunkSize);
W=(float*)malloc((m-1)*sizeof(float));
G=(float*)malloc((m*sizeof(float)));
A[0]=0;
//int vectorLength=EXPO*m;
for(int i=1;i<m;i++)
{
A[i]=1-delta*delta*0.5*(i+1);
}
//else will be 0
/* for(int i=m;i<chunkLength;i++)
{
A[i]=0;
}*/
for(int i=0;i<m;i++)
{
B[i]=-2+delta*delta*1.0;
}
/* for(int i=m;i<chunkLength;i++)
{
B[i]=0;
}*/
C[m-1]=0;
for(int i=0;i<m-1;i++)
{
C[i]=1+0.5*delta*delta*(i+1);
}
/* for(int i=m;i<chunkLength;i++)
{
C[i]=0;
}*/
for(int i=0;i<m-1;i++)
{
D[i]=2*(i+1)*pow(delta,3);
}
D[m-1]=2*m*delta*delta*delta-1+3.5*delta*delta;
/* for(int i=m;i<chunkLength;i++)
{
D[i]=0;
}*/
float *deviceA, *deviceB, *deviceC, *deviceD;
cudaMalloc((void**)&deviceA,chunkSize);
cudaMalloc((void**)&deviceB,chunkSize);
cudaMalloc((void**)&deviceC,chunkSize);
cudaMalloc((void**)&deviceD,chunkSize);
//copy the host vector to device.
cudaMemcpy(deviceA,A,chunkSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceB,B,chunkSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceC,C,chunkSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceD,D,chunkSize,cudaMemcpyHostToDevice);
clock_t begin,end;
begin=clock();
//start the code to calculate the w with recursive doubling applied to matrix
//so we need 2*2*(N-1) for both YforW and 2*4*(N-1) for MforW , the size N should be equal to m here
float *MforW, *YforW;
int MforWLength=4*(m-1);
int YforWLength=2*(m-1);
int MforWSize=2*MforWLength*sizeof(float);
int YforWSize=2*YforWLength*sizeof(float);
MforW=(float*)malloc(MforWSize);
YforW=(float*)malloc(YforWSize);
//the first step of recursive doubling, initialize Y and M;
YforW[0]=1;
YforW[1]=B[0]/(C[0]*1.0);
//the other should be 0 since V(I)=A[I]V[I-1]+0
for(int i=2;i<YforWLength;i++)
{
YforW[i]=0;
}
//the first one for M should be[1,0,0,1]
MforW[0]=1;
MforW[1]=0;
MforW[2]=0;
MforW[3]=1;
for(int i=4;i<MforWLength;i=i+4)
{
MforW[i]=0;
MforW[i+1]=1;
MforW[i+2]=-1.0*A[i/4]/C[i/4];
MforW[i+3]=1.0*B[i/4]/C[i/4];
}
float *deviceMforW, *deviceYforW;
cudaMalloc((void**)&deviceMforW,MforWSize);
cudaMalloc((void**)&deviceYforW,YforWSize);
cudaMemcpy(deviceMforW,MforW,MforWSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceYforW,YforW,YforWSize,cudaMemcpyHostToDevice);
int step=1;
int evenOrOddFlag=0;
do {
//each time needs N-Step processors
evenOrOddFlag=evenOrOddFlag+1;
dim3 dimGrid(1,1);
int blockRow=1;
int blockColumn=(m-1)-step;
dim3 dimBlock(blockColumn,blockRow);
//variableSIZE should be half size y
MatrixVersionRecursiveDoubling<<<dimGrid,dimBlock>>>(YforWLength,step,blockRow,blockColumn,deviceYforW,deviceMforW,evenOrOddFlag,deviceA,deviceB,deviceC,deviceD);
step=step+step;
}while( step <= YforWLength/2);
//so if evenOrOddFlag is odd, it means that the latest value will be second half,
//otherwise it will be in the first half
cudaMemcpy(MforW,deviceMforW,MforWSize,cudaMemcpyDeviceToHost);
cudaMemcpy(YforW,deviceYforW,YforWSize,cudaMemcpyDeviceToHost);
printf("The following are w value from recursvie doubling: \n");
if(evenOrOddFlag%2==0)
{
//length of w is m-1 and length of y is s(m-1)
for(int i=0;i<m-1;i++)
{
if(i%16==0)
{
printf("\n");
}
W[i]=YforW[2*i]*1.0/YforW[2*i+1];
printf("%f ",W[i]);
}
}
else
{
for(int i=0;i<m-1;i++)
{
if(i%16==0)
{
printf("\n");
}
W[i]=YforW[2*i+YforWLength]*1.0/YforW[2*i+1+YforWLength];
printf("%f ",W[i]);
}
}
//now we get the w value, next step is to get the g value
//g will have n-1 in values.
//according to the formula 5.3.3.7
float* MforG,*YforG;
MforG=(float*)malloc(m*sizeof(float));
YforG=(float*)malloc(m*sizeof(float));
int forGSize=2*m*sizeof(float);
YforG[0]=D[0]*1.0/B[0];
MforG[0]=1.0;
/* printf("\n test start here");*/
for(int i=1;i<m;i++)
{
YforG[i]=D[i]/(B[i]-A[i]*W[i-1]);
MforG[i]=-1*A[i]/(B[i]-A[i]*W[i-1]);
}
float *deviceMforG, *deviceYforG;
cudaMalloc((void**)&deviceMforG,forGSize);
cudaMalloc((void**)&deviceYforG,forGSize);
cudaMemcpy(deviceMforG,MforG,forGSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceYforG,YforG,forGSize,cudaMemcpyHostToDevice);
int stepG=1;
int evenOrOddFlagG=0;
do {
//each time needs N-Step processors
evenOrOddFlagG=evenOrOddFlagG+1;
dim3 dimGrid1(1,1);
int blockRow1=1;
int blockColumn1=m-stepG;
dim3 dimBlock1(blockColumn1,blockRow1);
RecursiveDoublingKernel<<<dimGrid1,dimBlock1>>>(m,stepG,blockRow1,blockColumn1,deviceYforG,deviceMforG,evenOrOddFlagG);
stepG=stepG+stepG;
}while( stepG <= m);
//so if evenOrOddFlag is odd, it means that the latest value will be second half,
//otherwise it will be in the first half
cudaMemcpy(MforG,deviceMforG,forGSize,cudaMemcpyDeviceToHost);
cudaMemcpy(YforG,deviceYforG,forGSize,cudaMemcpyDeviceToHost);
if(evenOrOddFlagG%2==0)
{
for(int i=0;i<m;i++)
{
if(i%16==0)
{
printf("\n");
}
G[i]=YforG[i];
printf("[%d] %f ",i,YforG[i]);
}
}
else
{
for(int i=0;i<m;i++)
{
if(i%16==0)
{
printf("\n");
}
G[i]=YforG[i];
printf("[%d] %f ",i,YforG[i+m]);
}
}
//now we get G, it is time for us to reverse it back to get our final x
float* MforX,*YforX;
MforX=(float*)malloc(m*sizeof(float));
YforX=(float*)malloc(m*sizeof(float));
int forXSize=2*m*sizeof(float);
YforX[m-1]=G[m-1];
MforG[m-1]=1.0;
/* printf("\n test start here");*/
for(int i=0;i<m-1;i++)
{
YforX[i]=G[i];
MforX[i]=-1*W[i];
}
float *deviceMforX, *deviceYforX;
cudaMalloc((void**)&deviceMforX,forXSize);
cudaMalloc((void**)&deviceYforX,forXSize);
cudaMemcpy(deviceMforX,MforX,forXSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceYforX,YforX,forXSize,cudaMemcpyHostToDevice);
int stepX=1;
int evenOrOddFlagX=0;
do {
//each time needs N-Step processors
evenOrOddFlagX=evenOrOddFlagX+1;
dim3 dimGrid2(1,1);
int blockRow2=1;
int blockColumn2=m-stepX;
dim3 dimBlock2(blockColumn2,blockRow2);
LoopingbackRecursiveDoublingKernel<<<dimGrid2,dimBlock2>>>(m,stepX,blockRow2,blockColumn2,deviceYforX,deviceMforX,evenOrOddFlagX);
stepX=stepX+stepX;
}while( stepX<= m);
//so if evenOrOddFlag is odd, it means that the latest value will be second half,
//otherwise it will be in the first half
cudaMemcpy(MforX,deviceMforX,forXSize,cudaMemcpyDeviceToHost);
cudaMemcpy(YforX,deviceYforX,forXSize,cudaMemcpyDeviceToHost);
printf("The following is the result for x finally! \n");
if(evenOrOddFlagX%2==0)
{
for(int i=0;i<m;i++)
{
if(i%16==0)
{
printf("\n");
}
printf(" %f ",YforX[i]);
}
}
else
{
for(int i=0;i<m;i++)
{
if(i%16==0)
{
printf("\n");
}
printf("%f ",YforX[i+m]);
}
}
//printf("y for G is %f \n",YforG[444]);
double time_spent;
end=clock();
time_spent=(double)(end-begin)/CLOCKS_PER_SEC;
printf("\n time used to calculate pde with %d varaible recursive doubling is :%f seconds \n",m,time_spent);
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
cudaFree(deviceD);
cudaFree(deviceMforW);
cudaFree(deviceYforW);
free(A);
free(B);
free(C);
free(D);
free(MforW);
free(YforW);
return 0;
}
|
22,762 | /*
**********************************************
* CS314 Principles of Programming Languages *
* Spring 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void packGraph_gpu(int * newSrc, int * oldSrc, int * newDst, int * oldDst, int * newWeight, int * oldWeight, int * edgeMap, int numEdges) {
int numThreads = blockDim.x * gridDim.x; //total number of threads
int tid = blockDim.x * blockIdx.x + threadIdx.x; // global index of the thread
int i = 0;
/*this code will automatically loop through the number of threads, as long as you refer to each element in the arrays as [tid]*/
for(i = tid; i < numEdges; i += numThreads)
{
//2 cases, keeping an edge or not
/// to check if we keep
if(edgeMap[i+1] != edgeMap[i]){
newSrc[edgeMap[i]] = oldSrc[i];
newDst[edgeMap[i]] = oldDst[i];
newWeight[edgeMap[i]] = oldWeight[i];
}
}
}
|
22,763 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define CUDA_SAFE_CALL(func) { \
cudaError_t err = (func); \
if (err != cudaSuccess) { \
fprintf(stderr, "error [%d] : %s\n", err, cudaGetErrorString(err)); \
exit(err); \
} \
}
// __global__関数はホストから呼び出せるデバイス側関数
// 戻り値は返せない
__global__ void addKernel(int *c, const int *a, const int *b, const int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n) {
//printf(">> blockIdx: %d threadIdx: %d\n", blockIdx.x, threadIdx.x);
return;
}
c[i] = a[i] + b[i];
}
void add(int *c, const int *a, const int *b, unsigned int n)
{
int *dev_a;
int *dev_b;
int *dev_c;
cudaSetDevice(0);
// 3つの配列領域(2入力/1出力)をGPU側に確保
CUDA_SAFE_CALL( cudaMalloc((void **)&dev_c, sizeof(int) * n) );
CUDA_SAFE_CALL( cudaMalloc((void **)&dev_a, sizeof(int) * n) );
CUDA_SAFE_CALL( cudaMalloc((void **)&dev_b, sizeof(int) * n) );
// 2つの入力データ(配列)をホストからデバイスへ転送
CUDA_SAFE_CALL( cudaMemcpy(dev_a, a, n * sizeof(int), cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMemcpy(dev_b, b, n * sizeof(int), cudaMemcpyHostToDevice) );
// カーネル呼び出し
// Grid > Block > Thread
// func_gpu << Dg, Db [ , Ns, S ] >>> (a, b, c);
// Dg : グリッドサイズ (グリッド内のブロック数)
// Db : ブロックサイズ (ブロック内のスレッド数)
// Ns : シェアードメモリのサイズ. 省略時は 0
// S : ストリーム番号
int th = 1024;
int bl = (n / 1024) + 1;
dim3 blocks(bl, 1, 1);
dim3 threads(th, 1, 1);
addKernel<<<blocks, threads>>>(dev_c, dev_a, dev_b, n);
// カーネルの終了を待つ
cudaDeviceSynchronize();
// 結果(配列)をデバイスからホストへ転送
cudaMemcpy(c, dev_c, n * sizeof(int), cudaMemcpyDeviceToHost);
// デバイス側メモリ開放
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return;
}
#ifdef __cplusplus
extern "C" {
#endif
void cuda_kernel_exec(int n)
{
int i;
int *a = (int *)malloc(sizeof(int) * n);
int *b = (int *)malloc(sizeof(int) * n);
int *c = (int *)malloc(sizeof(int) * n);
for (i=0; i<n; i++) {
a[i] = i+1;
b[i] = i-1;
c[i] = 0;
}
// Add vectors in parallel.
add(c, a, b, n);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaDeviceReset();
free(a);
free(b);
free(c);
return;
}
#ifdef __cplusplus
};
#endif
|
22,764 | #include <stdio.h>
#include <stdlib.h>
#define N 256
__global__ void bitreverse(unsigned int *data){
unsigned int *idata = data;
unsigned int x = idata[threadIdx.x];
x = ((0xf0f0f0f0 & x) >> 4) | ((0x0f0f0f0f & x) << 4);
x = ((0xcccccccc & x) >> 2) | ((0x33333333 & x) << 2);
x = ((0xaaaaaaaa & x) >> 1) | ((0x55555555 & x) << 1);
idata[threadIdx.x] = x;
}
int main(void){
unsigned int *d = NULL; int i;
unsigned int idata[N], odata[N];
for (i = 0; i < N; i++)
idata[i] = (unsigned int)i;
cudaMalloc((void**)&d, sizeof(int)*N);
cudaMemcpy(d, idata, sizeof(int)*N,cudaMemcpyHostToDevice);
bitreverse<<<1, N>>>(d);
cudaMemcpy(odata, d, sizeof(int)*N,cudaMemcpyHostToDevice);
for (i = 0; i < N; i++)
printf("%u -> %u\n", idata[i], odata[i]);
cudaFree((void*)d);
return 0;
}
|
22,765 | /************************************************************************************\
* *
* Copyright � 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
#define BIGNUM 99999999
/**
* init kernel
* @param s_array set array
* @param c_array status array
* @param cu_array status update array
* @param num_nodes number of vertices
* @param num_edges number of edges
*/
__global__ void
init(int *s_array, int *c_array, int *cu_array, int num_nodes, int num_edges)
{
// Get my workitem id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_nodes) {
// Set the status array: not processed
c_array[tid] = -1;
cu_array[tid] = -1;
s_array[tid] = 0;
}
}
/**
* mis1 kernel
* @param row csr pointer array
* @param col csr column index array
* @param node_value node value array
* @param s_array set array
* @param c_array node status array
* @param min_array node value array
* @param stop node value array
* @param num_nodes number of vertices
* @param num_edges number of edges
*/
__global__ void
mis1(int *row, int *col, int *node_value, int *s_array, int *c_array,
int *min_array, int *stop, int num_nodes, int num_edges)
{
// Get workitem id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_nodes) {
// If the vertex is not processed
if (c_array[tid] == -1) {
*stop = 1;
// Get the start and end pointers
int start = row[tid];
int end;
if (tid + 1 < num_nodes) {
end = row[tid + 1];
} else {
end = num_edges;
}
// Navigate the neighbor list and find the min
int min = BIGNUM;
for (int edge = start; edge < end; edge++) {
if (c_array[col[edge]] == -1) {
if (node_value[col[edge]] < min) {
min = node_value[col[edge]];
}
}
}
min_array[tid] = min;
}
}
}
/**
* mis2 kernel
* @param row csr pointer array
* @param col csr column index array
* @param node_value node value array
* @param s_array set array
* @param c_array status array
* @param cu_array status update array
* @param min_array node value array
* @param num_nodes number of vertices
* @param num_edges number of edges
*/
__global__ void
mis2(int *row, int *col, int *node_value, int *s_array, int *c_array,
int *cu_array, int *min_array, int num_nodes, int num_edges)
{
// Get my workitem id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_nodes) {
if (node_value[tid] <= min_array[tid] && c_array[tid] == -1) {
// -1: Not processed -2: Inactive 2: Independent set
// Put the item into the independent set
s_array[tid] = 2;
// Get the start and end pointers
int start = row[tid];
int end;
if (tid + 1 < num_nodes) {
end = row[tid + 1];
} else {
end = num_edges;
}
// Set the status to inactive
c_array[tid] = -2;
// Mark all the neighbors inactive
for (int edge = start; edge < end; edge++) {
if (c_array[col[edge]] == -1) {
//use status update array to avoid race
cu_array[col[edge]] = -2;
}
}
}
}
}
/**
* mis3 kernel
* @param cu_array status update array
* @param c_array status array
* @param num_nodes number of vertices
*/
__global__ void
mis3(int *cu_array, int *c_array, int num_nodes)
{
//get my workitem id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//set the status array
if (tid < num_nodes && cu_array[tid] == -2) {
c_array[tid] = cu_array[tid];
}
}
|
22,766 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <inttypes.h>
#include <math.h>
#include <sys/time.h>
#include <cuda_runtime.h>
const int block_size = 1024;
const int n = 4 * (1 << 20);
void reduce_cpu(int *v, int n, int *sum)
{
/*
int s = 0.0;
for (int i = 0; i < n; i++)
s += v[i];
*sum = s;
*/
// Kahan's summation algorithm
int s = v[0];
int c = (int)0.0;
for (int i = 1; i < n; i++) {
int y = v[i] - c;
int t = s + y;
c = (t - s) - y;
s = t;
}
*sum = s;
}
__global__ void reduce_per_block(int *v, int n, int *per_block_sum)
{
__shared__ int sdata[block_size];
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
sdata[tid] = v[i];
__syncthreads();
for (int s = 1; s < blockDim.x; s *= 2) {
if (tid % (2 * s) == 0)
sdata[tid] += sdata[tid + s];
__syncthreads();
}
if (tid == 0)
per_block_sum[blockIdx.x] = sdata[0];
}
}
double wtime()
{
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
int main()
{
double tcpu = 0, tgpu = 0, tmem = 0;
size_t size = sizeof(int) * n;
int *v = (int *)malloc(size);
srand(0);
for (size_t i = 0; i < n; i++)
v[i] = i + 1;
int sum;
tcpu = -wtime();
reduce_cpu(v, n, &sum);
tcpu += wtime();
/* Allocate on device */
int threads_per_block = block_size;
int blocks = (n + threads_per_block - 1) / threads_per_block;
int *dv;
int *per_block_sum;
int *sums = (int *)malloc(sizeof(int) * blocks);
tmem = -wtime();
cudaMalloc((void **)&per_block_sum, sizeof(int) * blocks);
cudaMalloc((void **)&dv, size);
cudaMemcpy(dv, v, size, cudaMemcpyHostToDevice);
tmem += wtime();
printf("CUDA kernel launch with %d blocks of %d threads\n", blocks, threads_per_block);
fflush(stdout);
/* Compute per block sum */
tgpu = -wtime();
reduce_per_block<<<blocks, threads_per_block>>>(dv, n, per_block_sum);
cudaDeviceSynchronize();
tgpu += wtime();
tmem = -wtime();
cudaMemcpy(sums, per_block_sum, sizeof(int) * blocks, cudaMemcpyDeviceToHost);
tmem += wtime();
/* Compute block sum */
tgpu -= wtime();
int sum_gpu = 0;
for (int i = 0; i < blocks; i++)
sum_gpu += sums[i];
tgpu += wtime();
printf("Sum (CPU) = %d\n", sum);
printf("Sum (GPU) = %d\n", sum_gpu);
printf("CPU version (sec.): %.6f\n", tcpu);
printf("GPU version (sec.): %.6f\n", tgpu);
printf("GPU bandwidth (GiB/s): %.2f\n", 1.0e-9 * size / (tgpu + tmem));
printf("Speedup: %.2f\n", tcpu / tgpu);
printf("Speedup (with mem ops.): %.2f\n", tcpu / (tgpu + tmem));
cudaFree(per_block_sum);
cudaFree(dv);
free(sums);
free(v);
cudaDeviceReset();
return 0;
}
|
22,767 | #include "includes.h"
__global__ void ladKernel(float *a, float *b, float *out, int size) {
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
int stride = blockDim.x * 2 * gridDim.x;
sdata[tid] = 0;
while (i < size) {
sdata[tid] += abs(a[i] - b[i]) + abs(a[i + blockDim.x] - b[i + blockDim.x]);
i += stride;
__syncthreads();
}
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1) {
if (tid<s)
sdata[tid] += sdata[tid + s];
__syncthreads();
}
if (tid < 32) {
sdata[tid] += sdata[tid + 32];
__syncthreads();
sdata[tid] += sdata[tid + 16];
__syncthreads();
sdata[tid] += sdata[tid + 8];
__syncthreads();
sdata[tid] += sdata[tid + 4];
__syncthreads();
sdata[tid] += sdata[tid + 2];
__syncthreads();
sdata[tid] += sdata[tid + 1];
__syncthreads();
}
if (tid == 0) {
out[blockIdx.x] = sdata[0];
}
} |
22,768 | #include <stdio.h>
#include <stdlib.h>
#define BLOCK_SIZE 32
#define N 321
__global__ void sumValues(int *arr, int *sum) {
int index = BLOCK_SIZE * blockIdx.x + threadIdx.x;
__shared__ float temp[BLOCK_SIZE];
if (index < N) {
temp[threadIdx.x] = arr[index] * arr[index];
__syncthreads();
// The thread with index zero will sum up the values in temp
if (threadIdx.x == 0) {
int s = 0;
for (int i = 0; i < BLOCK_SIZE; i++) {
s += temp[i];
}
// Add the sum for this block to the
atomicAdd(sum, s);
}
}
}
int main() {
int *arr;
int *sum;
// Allocate Unified Memory -- accessible from CPU or GPU
cudaMallocManaged(&arr, N * sizeof(int));
cudaMallocManaged(&sum, sizeof(int));
for (int i = 0; i < N; i++) {
arr[i] = i;
}
int block_number =
N / BLOCK_SIZE * BLOCK_SIZE == N ? N / BLOCK_SIZE : N / BLOCK_SIZE + 1;
sumValues<<<block_number, BLOCK_SIZE>>>(arr, sum);
cudaDeviceSynchronize();
printf("sum = %d\n", *sum);
return 0;
}
|
22,769 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cstdlib>
#include <float.h>
__global__ void relu_kernel(float *output, float *input, int batch, int channel, int height, int width, int total_size)
{
int N = batch;
int C = channel;
int H = height;
int W = width;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= total_size)
return;
if (input[tid] > 0.0f) {
output[tid] = input[tid];
}
else {
output[tid] = 0.0f;
}
}
void relu(float *output, float *input, int batch, int channel, int height, int width)
{
int N = batch;
int C = channel;
int H = height;
int W = width;
int THREADS_PER_BLOCK = 256;
int TOTAL_SIZE = N * C * H * W;
int NUMBER_OF_BLOCKS = (TOTAL_SIZE + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
relu_kernel <<< NUMBER_OF_BLOCKS, THREADS_PER_BLOCK >>> (output, input, N, C, H, W, TOTAL_SIZE);
} |
22,770 | #include <cuda.h>
#include <stdio.h>
#include <stdint.h>
// For comparisons
//#include "seqScan.c"
#define ELTS 64
#define BS 1024
#define N 16384*BS
/* ------------------------------------------------------------------------
Unrolled in-place(shared memory) Scan without syncs (32 threads, 64 elts).
Needs 2*64 elements of shared memory storage (512bytes).
(shared mem is 49152 bytes, but you share it with other blocks on an MP)
--------------------------------------------------------------------- */
__device__ void skl_scan(int i,
float* input,
float *output,
float *s_data, // The shared memory
float *maxs) {
int tid = threadIdx.x;
int tids = tid << 1;
// Load data from global memory into shared memory (in two separate load ops)
s_data[tid] = input[tid];
s_data[tid + 32] = input[tid + 32];
// NO SYNC HERE
s_data[tids | 1] += s_data[tids];
s_data[(tids | 3) - (tid & 1)] += s_data[tids & 0xFFFFFFFC | 1];
s_data[(tids | 7) - (tid & 3)] += s_data[tids & 0xFFFFFFF8 | 3];
s_data[(tids | 15) - (tid & 7)] += s_data[tids & 0xFFFFFFF0 | 7];
s_data[(tids | 31) - (tid & 15)] += s_data[tids & 0xFFFFFFE0 | 15];
s_data[(tids | 63) - (tid & 31)] += s_data[tids & 0xFFFFFFC0 | 31];
// NO Interleaved SYNCS here.
output[tid] = s_data[tid];
output[tid + 32] = s_data[tid + 32];
if(tid == 0)
maxs[i] = s_data[63];
}
__device__ void skl_scan16(int i,
float* input,
float *output,
float *s_data) {
int tid = threadIdx.x;
int tids = tid << 1;
// Load data from global memory into shared memory (in two separate load ops)
s_data[tid] = input[tid];
s_data[tid + 8] = input[tid + 8];
// NO SYNC HERE
s_data[tids | 1] += s_data[tids];
s_data[(tids | 3) - (tid & 1)] += s_data[tids & 0xFFFFFFFC | 1];
s_data[(tids | 7) - (tid & 3)] += s_data[tids & 0xFFFFFFF8 | 3];
s_data[(tids | 15) - (tid & 7)] += s_data[tids & 0xFFFFFFF0 | 7];
//s_data[(tids | 31) - (tid & 15)] += s_data[tids & 0xFFFFFFE0 | 15];
//s_data[(tids | 63) - (tid & 31)] += s_data[tids & 0xFFFFFFC0 | 31];
// NO Interleaved SYNCS here.
output[tid] = s_data[tid];
output[tid + 8] = s_data[tid + 8];
}
/* ------------------------------------------------------------------------
The Scan kernel (Thousand(s) of elements. NO SYNCS AT ALL)
--------------------------------------------------------------------- */
__global__ void kernel(float* input0,
float* output0,
float* maxout){
// shared data. (two different kinds. warp local and across warps.)
extern __shared__ float s_data[];
float *maxs = &s_data[64];
// Sequentially execute 64 scans
for (int i = 0; i < 16; i ++) {
skl_scan(i,
(input0+blockIdx.x*BS)+i*64,
(output0+blockIdx.x*BS)+i*64,
s_data,maxs);
}
// in parallel scan the maximum array
//float v; //discard this value.
if (threadIdx.x < 16)
skl_scan16(0,maxs,maxs,(float *)s_data);
// distribute
// 15 thread pass.
if (threadIdx.x > 0 && threadIdx.x < 16) {
for (int j = 0; j < 64; j ++) {
output0[(blockIdx.x*BS)+(threadIdx.x*64)+j] += maxs[threadIdx.x-1];
}
}
// 32 thread pass.
//for (int j = 0; j < 64; j ++) {
// output0[(blockIdx.x*BS)+((threadIdx.x+32)*64)+j] += maxs[threadIdx.x+31];
//}
// This is a debug step.
//maxout[threadIdx.x] = maxs[threadIdx.x];
//maxout[threadIdx.x+32] = maxs[threadIdx.x+32];
}
/* ------------------------------------------------------------------------
MAIN
--------------------------------------------------------------------- */
int main(void) {
float *v;
float *r;
//float rc[N];
float m[64];
float *dv;
float *dr;
float *dm;
v = (float*)malloc(sizeof(float) * N);
r = (float*)malloc(sizeof(float) * N);
memset(m,0,64*sizeof(float));
for (int i = 0; i < N; i ++) {
v[i] = 1.0;
r[i] = 7.0;
}
cudaMalloc((void**)&dv,N*sizeof(float));
cudaMalloc((void**)&dr,N*sizeof(float));
cudaMalloc((void**)&dm,4096*64*sizeof(float));
cudaMemcpy(dv,v,N*sizeof(float),cudaMemcpyHostToDevice);
//kernel<<<1,32,32*3*(sizeof(float))>>>(dv,dr,dm);
//kernel<<<1,16,32*2*(sizeof(float))>>>(dv,dr,dm);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
kernel<<<16384,32,64*2*(sizeof(float))>>>(dv,dr,dm);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
// std::cout << std::endl;
cudaMemcpy(r,dr,N*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(m,dm,64*sizeof(float),cudaMemcpyDeviceToHost);
for (int i = 0; i < 256; i ++) {
printf("%f ",r[i]);
}
printf("\n ------ \n");
for (int i = 0; i < 64; i ++) {
printf("%f ",m[i]);
}
printf("Elapsed time: %f\n", elapsedTime);
//seqScan(v,rc,N);
//int s = compare(rc,r,0.01,N);
//printf ("\n%s\n", s? "same" : "not the same");
return 0;
}
|
22,771 | #include "includes.h"
__global__ void PD_ZC_GPU(float *d_input, float *d_output, int maxTaps, int nTimesamples, int nLoops) {
int x_r, y_r, x_w, y_w;
int Elements_per_block=PD_NTHREADS*PD_NWINDOWS;
//read
y_r=(blockIdx.y*blockDim.y + threadIdx.y)*nTimesamples;
x_r=(blockIdx.x+1)*Elements_per_block + threadIdx.x;
//write
y_w=(blockIdx.y*blockDim.y + threadIdx.y)*(maxTaps-1)*gridDim.x;
x_w=blockIdx.x*(maxTaps-1) + threadIdx.x;
for(int f=0; f<nLoops; f++){
if(x_r<nTimesamples && threadIdx.x<(maxTaps-1)){
d_output[x_w + y_w + f*WARP]=d_input[x_r + y_r + f*WARP];
}
}
} |
22,772 | #include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = threadIdx.x;
int stride = blockDim.x; // how big is one thread
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20; // 1M elements
float *x, *y;
// Allocate Unified Memory
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// 256 Threads
add<<<1, 256>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
} |
22,773 | #include <cuda_fp16.h>
#define p_blockSize 256
extern "C" __global__ void packBuf_half(
const int Nscatter,
const int Nentries,
const int * __restrict__ scatterStarts,
const int * __restrict__ scatterIds,
const float * __restrict__ q,
half * __restrict__ scatterq
)
{
int tile = p_blockSize * blockIdx.x;
{
int s = tile + threadIdx.x;
if (s < Nscatter * Nentries) {
const float qs = q[s];
const int sid = s % Nscatter;
const int k = s / Nscatter;
const int start = scatterStarts[sid];
const int end = scatterStarts[sid + 1];
for (int n = start; n < end; ++n) {
const int id = scatterIds[n];
scatterq[id * Nentries + k] = __float2half(qs);
}
}
}
}
extern "C" __global__ void unpackBuf_halfAdd(const int Ngather,
const int Nentries,
const int * __restrict__ gatherStarts,
const int * __restrict__ gatherIds,
const half * __restrict__ q,
float * __restrict__ gatherq) {
{
int tile = p_blockSize * blockIdx.x;
{
int g = tile + threadIdx.x;
if (g < Ngather * Nentries) {
const int gid = g % Ngather;
const int k = g / Ngather;
const int start = gatherStarts[gid];
const int end = gatherStarts[gid + 1];
float gq = 0.00000000e+00f;
for (int n = start; n < end; ++n) {
const int id = gatherIds[n];
gq += __half2float(q[id * Nentries + k]);
}
//contiguously packed
gatherq[g] += gq;
}
}
}
}
|
22,774 | #include <stdio.h>
#include <iostream>
#define N 64
#define M 32
#define BLOCK_DIM 32
__global__ void matrixMultiply(int *d_a, int *d_b, int *d_out, int nRows, int nCols){
// Mapping from 2D block grid to absolute 2D locations on C matrix
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
// Multiply
int sum = 0;
if (idx_x < nRows && idx_y < nRows){
for (int k=0; k<nRows; k++){
sum += d_a[nCols * idx_x + k] * d_b[nRows * k + idx_y];
}
// 2D location in C matrix to global memory 1D offset
int index = idx_y + idx_x * nRows; // Row-major order with 0 based indices
d_out[index] = sum;
}
}
int main(){
// Declare 2D matrices on host
int h_a[N][M], h_b[M][N], h_out[N][N];
// Declare device/GPU memory pointers
int *d_a, *d_b, *d_out;
// Memory size
int sizeA = N * M * sizeof(int);
int sizeB = M * N * sizeof(int);
int sizeC = N * N * sizeof(int);
// Initialize matrices on host
for (int i=0; i<N; i++){
for (int j=0; j<M; j++){
h_a[i][j] = 1; // Matrix A
}
}
for (int i=0; i<M; i++){
for (int j=0; j<N; j++){
h_b[i][j] = 2; // Matrix B
}
}
// Allocate GPU memory
cudaMalloc((void **) &d_a, sizeA);
cudaMalloc((void **) &d_b, sizeB);
cudaMalloc((void **) &d_out, sizeC);
// Transfer input matrices from host to device
cudaMemcpy(d_a, h_a, sizeA, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeB, cudaMemcpyHostToDevice);
// Define grid blocks dimensions
dim3 blockSize(BLOCK_DIM, BLOCK_DIM);
dim3 gridSize((int)ceil(N/blockSize.x), (int)ceil(N/blockSize.y));
// Launch the kernel
matrixMultiply<<<gridSize, blockSize>>>(d_a, d_b, d_out, N, M);
// Copy the result from device to the host
cudaMemcpy(h_out, d_out, sizeC, cudaMemcpyDeviceToHost);
// Print out the sum of output matrix elements
int total = 0;
for (int i=0; i<N; i++){
for (int j=0; j<N; j++){
total += h_out[i][j];
}
}
std::cout << "Total: " << total << std::endl;
// Free GPU memory allocation
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_out);
return 0;
}
|
22,775 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/copy.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/find.h>
#include <cstdio>
#include <iostream>
#include <cstring>
#include <vector>
using namespace std;
__global__ void fnSearch(char *str, char *key, int *res)
{
*res = -1;
if(str[threadIdx.x] == *key)
*res = threadIdx.x;
}
void mtf(vector<char> word)
{
//Parallel initialisation of character set
thrust::device_vector<char> d_list(256);
thrust::sequence(d_list.begin(), d_list.begin() + 256);
thrust::host_vector<char> list(256);
thrust::device_vector<char> d_word(word.size());
thrust::device_vector<int> dRes;
int counter;
thrust::device_vector<char>::iterator iter, count;
thrust::host_vector<char> h_word(word.size());
char ch;
h_word = word;
d_word = h_word;
int i;
for (counter = 0; counter < word.size(); counter++)
{
thrust::device_vector<char> d_temp_b(255);
thrust::copy(list.begin(), list.end(), d_list.begin());
//Scan for character on cpu
h_word[0] = d_word[counter];
iter = thrust::find(d_list.begin(), d_list.end(), d_word[counter]);
//Shifting of the character set in parallel
if (d_list[0] != h_word[0])
{
thrust::copy(d_list.begin(), iter, list.begin()+1);
list[0] = h_word[0];
}
}
thrust::copy(list.begin(), list.end(), d_list.begin());
thrust::copy(word.begin(), word.end(), d_word.begin());
for (counter = 0; counter < list.size(); counter++)
{
iter = thrust::find(d_word.begin(), d_word.end(), d_list[counter]);
while (iter != d_word.end())
{
*iter = counter;
iter = thrust::find(d_word.begin(), d_word.end(), d_list[counter]);
}
}
thrust::copy(d_word.begin(), d_word.end(), h_word.begin());
for (counter = 0; counter < word.size(); counter++)
{
ch = h_word[counter];
cout << counter << "\t" << ch << endl;
}
}
int main(int argc, char *argv[])
{
if (argc != 2)
{
cout << "Usage: mtf.out STRING_INPUT" << endl;
exit(1);
}
int len = strlen(argv[1]);
vector<char> word(argv[1], argv[1] + len);
// time_t begin, end;
// begin = time(NULL);
// for (int i = 0; i < 10000; i++)
mtf(word);
// end = time(NULL);
// cout <<difftime(end, begin);
return 0;
}
|
22,776 | #include "includes.h"
__device__ unsigned int getGid3d3d(){
int blockId = blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.y * blockDim.x)
+ (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x;
return threadId;
}
__global__ void thread_test(double *in, double *out){
unsigned int Gid = getGid3d3d();
// Now we set each element in the
out[Gid] = Gid;
//in[Gid] = Gid;
} |
22,777 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <iostream>
#include <algorithm>
#define THREADS_PER_BLOCK 1024
#define THREADS_PER_SM 2048
#define BLOCKS_NUM 160
#define TOTAL_THREADS (THREADS_PER_BLOCK*BLOCKS_NUM)
#define WARP_SIZE 32
#define REPEAT_TIMES 16
// GPU error check
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
template <class T>
__global__ void max_flops(uint32_t *startClk, uint32_t *stopClk, T *data1, T *res) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
//register T s1 = data1[gid];
//register T s2 = data2[gid];
//register T result = 0;
// synchronize all threads
asm volatile ("bar.sync 0;");
// start timing
uint32_t start = 0;
asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory");
for (int j=0 ; j<REPEAT_TIMES ; ++j) {
atomicAdd(&data1[gid], 10);
}
// synchronize all threads
asm volatile("bar.sync 0;");
// stop timing
uint32_t stop = 0;
asm volatile("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory");
// write time and data back to memory
startClk[gid] = start;
stopClk[gid] = stop;
res[gid] = data1[0];
}
int main(){
uint32_t *startClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t));
uint32_t *stopClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t));
int32_t *data1 = (int32_t*) malloc(TOTAL_THREADS*sizeof(int32_t));
//int32_t *data2 = (int32_t*) malloc(TOTAL_THREADS*sizeof(int32_t));
int32_t *res = (int32_t*) malloc(TOTAL_THREADS*sizeof(int32_t));
uint32_t *startClk_g;
uint32_t *stopClk_g;
int32_t *data1_g;
//int32_t *data2_g;
int32_t *res_g;
for (uint32_t i=0; i<TOTAL_THREADS; i++) {
data1[i] = (int32_t)i;
//data2[i] = (int32_t)i;
}
gpuErrchk( cudaMalloc(&startClk_g, TOTAL_THREADS*sizeof(uint32_t)) );
gpuErrchk( cudaMalloc(&stopClk_g, TOTAL_THREADS*sizeof(uint32_t)) );
gpuErrchk( cudaMalloc(&data1_g, TOTAL_THREADS*sizeof(int32_t)) );
//gpuErrchk( cudaMalloc(&data2_g, TOTAL_THREADS*sizeof(int32_t)) );
gpuErrchk( cudaMalloc(&res_g, TOTAL_THREADS*sizeof(int32_t)) );
gpuErrchk( cudaMemcpy(data1_g, data1, TOTAL_THREADS*sizeof(int32_t), cudaMemcpyHostToDevice) );
//gpuErrchk( cudaMemcpy(data2_g, data2, TOTAL_THREADS*sizeof(int32_t), cudaMemcpyHostToDevice) );
max_flops<int32_t><<<BLOCKS_NUM,THREADS_PER_BLOCK>>>(startClk_g, stopClk_g, data1_g, res_g);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaMemcpy(startClk, startClk_g, TOTAL_THREADS*sizeof(uint32_t), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(stopClk, stopClk_g, TOTAL_THREADS*sizeof(uint32_t), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(res, res_g, TOTAL_THREADS*sizeof(int32_t), cudaMemcpyDeviceToHost) );
float bw;
uint32_t total_time = *std::max_element(&stopClk[0],&stopClk[TOTAL_THREADS-1])-*std::min_element(&startClk[0],&startClk[TOTAL_THREADS-1]);
bw = ((float)(REPEAT_TIMES*TOTAL_THREADS*4)/(float)(total_time));
printf("int32 bendwidth = %f (byte/clk)\n", bw);
printf("Total Clk number = %u \n", total_time);
return 0;
}
|
22,778 | #include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
int main()
{
cudaDeviceProp Prop;
cudaError_t e=cudaGetDeviceProperties (&Prop,0);
}
|
22,779 | #include "triangle.cuh"
// custom rounding function to support needed pixel rounding
Triangle::Triangle(Point *a, Point *b, Point *c) {
vertices[0] = a;
vertices[1] = b;
vertices[2] = c;
if(getSignedArea() < 0) { // reverse direction
vertices[1] = c;
vertices[2] = b;
}
}
double Triangle::getSignedArea() {
double ax = vertices[0]->getX();
double ay = vertices[0]->getY();
double bx = vertices[1]->getX();
double by = vertices[1]->getY();
double cx = vertices[2]->getX();
double cy = vertices[2]->getY();
// determinant of matrix [bx - ax, cx - ax, by - ay, cy - ay] / 2
return ((bx - ax) * (cy - ay) - (cx - ax) * (by - ay)) / 2;
}
double Triangle::getArea() {
double signedArea = getSignedArea();
if (signedArea < 0) {
return -signedArea;
}
return signedArea;
}
double Triangle::dA(int &p, double vx, double vy) {
// first extract the other two endpoints; note order matters
Point* edgePoints[2];
// retrieve in ccw order
edgePoints[0] = vertices[(p+1)%3];
edgePoints[1] = vertices[(p+2)%3];
// change is -velocity dot edge normal of length |e|/2
Segment opposite(edgePoints[0], edgePoints[1]);
// get normal to segment
double nx, ny;
opposite.scaledNormal(&nx, &ny);
// return negative of dot product
return -(vx * nx + vy * ny);
}
double Triangle::gradX(int &p) {
return dA(p, 1, 0);
}
double Triangle::gradY(int &p) {
return dA(p, 0, 1);
}
__device__ bool Triangle::contains(Point &p) {
// p is inside the triangle iff the orientations of the triangles
// with vertices (vertices[i], vertices[i+1], p) are all ccw
for(int i = 0; i < 3; i++) {
if (Triangle::getSignedArea(vertices[i], vertices[(i+1)%3], &p) < 0) {
return false;
}
}
return true;
}
int Triangle::midVertex() {
double distances[3];
for(int i = 0; i < 3; i++) {
// get length of opposite side
distances[i] = vertices[(i+1)%3]->distance(*vertices[(i+2)%3]);
}
for(int i = 0; i < 3; i++) {
if(distances[i] >= min(distances[(i+1)%3], distances[(i+2)%3]) &&
distances[i] <= max(distances[(i+1)%3], distances[(i+2)%3])) return i;
}
throw runtime_error("should not get here");
return -1; // to make compiler happy
}
double Triangle::maxLength() {
double distance = 0;
for(int i = 0; i < 3; i++) {
distance = max(distance, vertices[(i+1)%3]->distance(*vertices[(i+2)%3]));
}
return distance;
}
void Triangle::copyVertices(Point *ptrA, Point *ptrB, Point *ptrC) {
*ptrA = *vertices[0];
*ptrB = *vertices[1];
*ptrC = *vertices[2];
}
double Triangle::getSignedArea(Point *a, Point *b, Point *c) {
double ax = a->getX();
double ay = a->getY();
double bx = b->getX();
double by = b->getY();
double cx = c->getX();
double cy = c->getY();
return ((bx - ax) * (cy - ay) - (cx - ax) * (by - ay)) / 2;
}
ostream& operator<<(ostream& os, const Triangle &t) {
os << "Triangle ";
for(Point *ptr : t.vertices) {
os << *ptr << " ";
}
os << "\n";
return os;
}
|
22,780 | #include <iostream>
#include <fstream>
#include <string>
#include <cstdlib>
#include <limits>
#include <algorithm>
using namespace std;
const int BLOCK_SIZE = 512;
#define idx(i,j,lda) ( (j) + ((i)*(lda)) )
class mySet
{
private:
int size = 4000;
bool N[4000];
int cnt = 4000;
public:
__device__ mySet(){}
__device__ void init(int s)
{
this->cnt = s;
for (int i = 0; i < s; i++)
{
N[i] = true;
}
}
__device__ bool contains(int x)
{
return N[x];
}
__device__ void insert(int x)
{
if (N[x] == true)
return;
N[x] = true;
cnt++;
}
__device__ void erase(int x)
{
if (N[x] == true)
{
N[x] = false;
cnt--;
}
}
__device__ bool empty()
{
return (cnt == 0);
}
__device__ int getCount()
{
return cnt;
}
};
__device__ int getGlobalIdx_1D_1D()
{
return blockIdx.x *blockDim.x + threadIdx.x;
}
__device__ int getGlobalIdx_1D_2D()
{
return blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
}
__device__ int getGlobalIdx_2D_2D()
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = blockId * (blockDim.x * blockDim.y)
+ (threadIdx.y * blockDim.x) + threadIdx.x;
return threadId;
}
__device__ int getGlobalIdx_2D_1D()
{
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
int threadId = blockId * blockDim.x + threadIdx.x;
return threadId;
}
__device__ int getGlobalIdx_3D_3D()
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x) + threadIdx.x;
return threadId;
}
//zdroj: http://cs.calvin.edu/courses/cs/374/CUDA/CUDA-Thread-Indexing-Cheatsheet.pdf
__global__ void prepareArray(int vertexCnt, int* d)
{
int threads = gridDim.x * gridDim.y * gridDim.z * blockDim.x * blockDim.y * blockDim.z;
int cycleCnt = (vertexCnt / threads > 0 ? vertexCnt / threads : 1);
for (int cycle = 0; cycle < cycleCnt; cycle++)
{
int s = (blockIdx.x * blockDim.x + threadIdx.x) + threads * cycle; // pozice na radku
if(s >= vertexCnt)
return;
for (int i = 0; i < vertexCnt; i++)
{
d[vertexCnt *i+s] = INT_MAX / 2;
}
}
}
__global__ void dijsktra( int* __restrict__ edgeMatrix, int vertexCnt, int* d)
{
int threads = gridDim.x * gridDim.y * gridDim.z * blockDim.x * blockDim.y * blockDim.z;
int cycleCnt = (vertexCnt / threads > 0 ? vertexCnt / threads : 1);
for (int cycle = 0; cycle < cycleCnt; cycle++)
{
int s = (blockIdx.x * blockDim.x + threadIdx.x) + threads * cycle; // pozice na radku
if(s >= vertexCnt)
return;
mySet N;
N.init(vertexCnt);
d[s*vertexCnt + s] = 0;
while (!N.empty())
{
int localMin = INT_MAX;
int cnt = N.getCount();
int u = 0;
int j = 0;
for (int i = 0; i < vertexCnt && j < cnt; i++)
{
if (!N.contains(i)) continue;
if (localMin > d[vertexCnt *i+s])
{
localMin = d[vertexCnt *i+s];
u = i;
}
j++;
}
N.erase(u);
for (int i = 0; i < vertexCnt; i++)
{
if (i == u || !N.contains(i)) continue;
if (edgeMatrix[u + i*vertexCnt] > 0)
{
int alt = d[vertexCnt *u+s] + edgeMatrix[u + i*vertexCnt];
atomicMin((d + vertexCnt * i + s), alt);
}
}
}
}
}
int *readMatrix(const char *path, int &n)
{
ifstream iFile;
iFile.open(path);
if (!iFile.is_open())
return NULL;
string line;
getline(iFile, line);
n = atoi(line.c_str());
int *matrix = new int[n * n];
int i;
for (i = 0; getline(iFile, line); i++)
{
size_t endpos = line.find_last_not_of(" \t\r\n");
if (string::npos != endpos)
line = line.substr(0, endpos + 1);
for (int j = 0; j < n; j++)
{
size_t pos = line.find_first_of(" ");
matrix[idx(i,j,n)] = stoi(line, &pos, 10);
line = line.substr(pos);
}
}
iFile.close();
return matrix;
}
void writeMatrix(const char *path, int n, int *matrix)
{
ofstream oFile;
oFile.open(path, fstream::trunc | fstream::out);
if (oFile.is_open())
{
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
oFile << matrix[idx(i, j, n)] << (j + 1 < n ? " " : "\n");
}
}
}
oFile.close();
}
int main(int argc, const char* argv[])
{
if (argc < 3 || argc > 4)
{
cout << "Program takes 2 or 3 parameters (matrix and thread count and optional output file)!\n";
return 1;
}
int threadCnt = atoi(argv[2]);
int stc = 0;
int *matrix = readMatrix(argv[1], stc);
// reading input file
if (matrix == NULL){
cout << "File doesn't exists" << endl;
cout << argv[1] << endl;
return 1;
}
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
int *cumatrix;
int *d;
cudaMalloc((void **)&cumatrix, stc * stc * sizeof(int));
cudaMemcpy(cumatrix, matrix, stc * stc * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&d, stc * stc * sizeof(int));
prepareArray<<<stc / BLOCK_SIZE, BLOCK_SIZE>>>(stc, d);
cudaError_t code = cudaThreadSynchronize();
if (code != cudaSuccess)
{
fprintf(stdout, "GPUassert: %s \n", cudaGetErrorString(code));
}
dijsktra<<<stc / BLOCK_SIZE, BLOCK_SIZE>>>(cumatrix, stc, d);
code = cudaThreadSynchronize();
if (code != cudaSuccess)
{
fprintf(stdout, "GPUassert: %s \n", cudaGetErrorString(code));
}
int *outM = new int[stc*stc];
cudaMemcpy(outM, d, stc * stc * sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cout << "Time: " << elapsedTime << endl;
if (argc == 4)
{
cout << "Writing results...\n";
writeMatrix(argv[3], stc, outM);
}
cudaFree(cumatrix);
cudaFree(d);
delete [] matrix;
delete [] outM;
cout << "\nDone\n";
return 0;
}
|
22,781 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void square(int*a , int *t)
{
int n = threadIdx.x, m=blockIdx.x, size=blockDim.x, size1=gridDim.x;
int i= m*size+n;
t[i]=1;
//int final=0;
for(int j=0;j<(m+1);j++)
t[i]*=a[i];
}
int main(int argc, char const *argv[])
{
int *a,*t,m,n,i,j;
int *d_a, *d_t;
printf("enter the value of m \n");
scanf("%d",&m);
printf("enter the value of n\n");
scanf("%d",&n);
int size= sizeof(int)*m*n;
a=(int*)malloc(m*n*sizeof(int));
t=(int*)malloc(m*n*sizeof(int));
printf("enter the input matrix\n");
for(i=0;i<m*n;i++)
scanf("%d",&a[i]);
cudaMalloc((void**)&d_a,size);
cudaMalloc((void**)&d_t,size);
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
square<<<m,n>>>(d_a,d_t);
cudaMemcpy(t,d_t,size,cudaMemcpyDeviceToHost);
printf("the result vector is :\n");
for(i=0;i<m;i++)
{
for(j=0;j<n;j++)
printf("%d\t",t[i*n+j] );
printf("\n");
}
getchar();
cudaFree(d_a);
cudaFree(d_t);
return 0;
} |
22,782 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
/*
#include <sys/time.h>
#include <sys/resource.h>
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}*/
__global__ void multiply_no_shared( int global_array[] , int dim, const int c, const int tile_width)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx<dim) global_array[idx]*=c;
}
__global__ void multiply( int global_array[] , int dim, const int c, const int tile_width)
{
extern __shared__ int shared_a[];
int idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx<dim){
shared_a[idx]=global_array[idx];
shared_a[idx]*=c;
global_array[idx]=shared_a[idx];
}
}
int main(int argc, char *argv[]){
//Measure time
clock_t time_begin;
// pointers to host & device arrays
int *device_array = 0;
int *host_array = 0;
unsigned int size_array=16;
bool verbose=false;
int tile_width =16;
if(argc == 3){
size_array=atoi(argv[1]) ;
tile_width=atoi(argv[2]);
}
else if(argc==4){
size_array=atoi(argv[1]);
tile_width=atoi(argv[2]);
verbose=(argv[3][0]=='v');
}
// malloc a host array
host_array = (int*)malloc( size_array * sizeof(int));
for(int i=0; i<size_array; i++){
host_array[i]=rand()%10;
if(verbose) printf("%i\t", host_array[i]);
}
if(verbose) printf("\n");
// cudaMalloc a device array
cudaMalloc(&device_array,size_array * sizeof(int));
// download and inspect the result on the host:
cudaMemcpy(device_array, host_array, sizeof(int)*size_array, cudaMemcpyHostToDevice);
dim3 bloque(tile_width, tile_width);
dim3 grid((int)ceil(double((float)size_array)/double(bloque.x)), ceil(double((float)size_array)/double(bloque.y)));
printf("%i threads per block, %i vector\n", tile_width*tile_width, size_array);
int shared_mem=sizeof(int);
time_begin=clock(); //time_begin=dwalltime();
multiply_no_shared<<<grid, bloque>>>(device_array, size_array , 2, tile_width);
cudaThreadSynchronize();
// download and inspect the result on the host:
cudaMemcpy(host_array, device_array, sizeof(int)*size_array, cudaMemcpyDeviceToHost);
//printf("GPU time without shared memory: %f seconds\n", dwalltime() - time_begin );
printf("GPU time without shared memory: %f seconds\n", (((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 );
for(int i=0; i<size_array; i++){
host_array[i]/=2;
}
cudaMemcpy(device_array, host_array, sizeof(int)*size_array, cudaMemcpyHostToDevice);
time_begin=clock(); // time_begin=dwalltime();
multiply<<<grid, bloque, shared_mem>>>(device_array, size_array , 2, tile_width);
cudaThreadSynchronize();
// download and inspect the result on the host:
cudaMemcpy(host_array, device_array, sizeof(int)*size_array, cudaMemcpyDeviceToHost);
//printf("GPU time with shared memory: %f seconds\n", dwalltime() - time_begin );
printf("GPU time with shared memory: %f seconds\n", (((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 );
if(verbose){
for(int i=0; i<size_array; i++)
printf("%i\t", host_array[i]);
}
// deallocate memory
free(host_array);
cudaFree(device_array);
}
|
22,783 | #include "includes.h"
__global__ void EFD_2dBM( int width, int height, int pitch_n, int pitch_npo, float *d_val_n, float *d_val_npo, float alpha, float beta ){
int idx = blockIdx.x; //row
int idy = threadIdx.x; //column
if ((idx < height) && (idy <width)){
//d_val_npo[i] = Pu * d_val_n[i + 1] + Pm * d_val_n[i] + Pd * d_val_n[i - 1];
d_val_npo[idx*(pitch_npo / sizeof(float)) + idy] = alpha*(d_val_n[(idx + 1)*(pitch_n / sizeof(float)) + idy]
+ d_val_n[(idx - 1)*(pitch_n / sizeof(float)) + idy])
+ beta*(d_val_n[idx*(pitch_n / sizeof(float)) + idy + 1]
+ d_val_n[idx*(pitch_n / sizeof(float)) + idy - 1])
+ (1.0 - 2.0*alpha - 2.0*beta)*d_val_n[idx*(pitch_n / sizeof(float)) + idy];
//modify the ones on the top
if (idx == 0){
d_val_npo[idx*(pitch_npo / sizeof(float)) + idy] = d_val_npo[(idx + 1)*(pitch_npo / sizeof(float)) + idy];
}
//modify the ones on the bottom
if (idx == (height - 1)){
d_val_npo[idx*(pitch_npo / sizeof(float)) + idy] = d_val_npo[(idx - 1)*(pitch_npo / sizeof(float)) + idy];
}
//modify the ones on the left
if (idy == 0){
d_val_npo[idx*(pitch_npo / sizeof(float)) + idy] = d_val_npo[(idx - 1)*(pitch_npo / sizeof(float)) + idy + 1];
}
//modify the ones on the right
if (idx == (width - 1)){
d_val_npo[idx*(pitch_npo / sizeof(float)) + idy] = d_val_npo[(idx - 1)*(pitch_npo / sizeof(float)) + idy - 1];
}
}
} |
22,784 | #include <functional>
#include "auxiliares.cu"
using namespace std;
// Punteros a memoria global
double *g_datos;
double *g_resp;
double *g_verosimilitud;
double *g_verosimilitudParcial;
double *g_sumaProbabilidades;
double *g_medias;
double *g_pesos;
double *g_covarianzas;
double *g_L;
double *g_logDets;
__global__ void paso_e_cholesky(double *g_covarianzas, double *g_L, const size_t numDimensiones)
{
const size_t k = blockIdx.z;
for (size_t j = 0; j < numDimensiones; j++) {
for (size_t h = 0; h < numDimensiones; h++) {
g_L[k * numDimensiones * numDimensiones + j * numDimensiones + h] = 0.0;
}
}
for (size_t i = 0; i < numDimensiones; i++) {
for (size_t j = 0; j < i + 1; j++) {
double suma = 0.0;
for (size_t h = 0; h < j; h++) {
suma += g_L[k * numDimensiones * numDimensiones + i * numDimensiones + h] * g_L[k * numDimensiones * numDimensiones + j * numDimensiones + h];
}
g_L[k * numDimensiones * numDimensiones + i * numDimensiones + j] = (i == j) ?
sqrt(g_covarianzas[k * numDimensiones * numDimensiones + i * numDimensiones + i] - suma) :
(1.0 / g_L[k * numDimensiones * numDimensiones + j * numDimensiones + j] * (g_covarianzas[k * numDimensiones * numDimensiones + i * numDimensiones + j] - suma));
}
}
}
__global__ void paso_e(double *g_L, double *g_logDets, double *g_datos, double *g_pesos, double *g_medias, double *g_resp, const size_t n, size_t const numDimensiones)
{
const size_t i = blockIdx.x * BLOCK_SIZE + threadIdx.x;
const size_t k = blockIdx.z;
const size_t knd = k * numDimensiones;
const size_t kndnd = knd * numDimensiones;
if (i == 0 && threadIdx.x == 0) {
g_logDets[k] = logaritmoDeterminante(g_L, k, numDimensiones);
}
__syncthreads();
if (i < n) {
extern __shared__ double sharedData[];
double *v = (double*) &sharedData[threadIdx.x * numDimensiones];
double suma = 0.0;
double tmp;
for (size_t j = 0; j < numDimensiones; j++) {
tmp = g_datos[j * n + i] - g_medias[knd + j];
for (size_t h = 0; h < j; h++) {
tmp -= g_L[kndnd + j * numDimensiones + h] * v[h];
}
v[j] = tmp / g_L[kndnd + j * numDimensiones + j];
suma += v[j] * v[j];
}
g_resp[k * n + i] = -0.5 * (suma + g_logDets[k]) + log(g_pesos[k]);
}
}
__global__ void paso_e2(double *g_resp, double *g_verosimilitudParcial, const size_t n, const size_t numGaussianas)
{
const size_t i = blockIdx.x * BLOCK_SIZE + threadIdx.x;
if (i < n) {
double suma = 0.0;
double verosimilitudParcial;
double maxContribucion = MENOS_INFINITO;
for (size_t k = 0; k < numGaussianas; k++) {
if (g_resp[k * n + i] > maxContribucion) {
maxContribucion = g_resp[k * n + i];
}
}
for (size_t k = 0; k < numGaussianas; k++) {
suma += exp(g_resp[k * n + i] - maxContribucion);
}
verosimilitudParcial = maxContribucion + log(suma);
for (size_t k = 0; k < numGaussianas; k++) {
g_resp[k * n + i] = exp(g_resp[k * n + i] - verosimilitudParcial);
}
g_verosimilitudParcial[i] = verosimilitudParcial;
}
}
__global__ void paso_e_verosimilitud(double *g_verosimilitudParcial, double *g_verosimilitud, const size_t n)
{
extern __shared__ double sharedData[];
reducir<BLOCK_SIZE>([&] (size_t i) -> double { return g_verosimilitudParcial[i]; },
[&] () -> double* { return &g_verosimilitud[blockIdx.x]; },
[&] () -> void { reducirFinal<BLOCK_SIZE>([&] (size_t tid) -> double* { return &g_verosimilitud[tid]; }, [&] () -> double* { return &g_verosimilitud[0]; }, sharedData, gridDim.x); },
n, sharedData, gridDim.x * gridDim.y * gridDim.z);
}
__global__ void paso_m(double *g_resp, double *g_sumaProbabilidades, double *g_pesos, const size_t n)
{
extern __shared__ double sharedData[];
const size_t k = blockIdx.z;
const size_t numGaussianas = gridDim.z;
reducir<BLOCK_SIZE>([&] (size_t i) -> double { return g_resp[k * n + i]; },
[&] () -> double* { return &g_sumaProbabilidades[k * gridDim.x + blockIdx.x]; },
[&] () -> void {
for (size_t a = 0; a < numGaussianas; a++) {
reducirFinal<BLOCK_SIZE>([&] (size_t tid) -> double* { return &g_sumaProbabilidades[a * gridDim.x + tid]; }, [&] () -> double* { return &g_sumaProbabilidades[a]; }, sharedData, gridDim.x);
if (threadIdx.x == 0) g_pesos[a] = g_sumaProbabilidades[a] / n;
}
}, n, sharedData, gridDim.x * gridDim.z);
}
__global__ void paso_m2(double *g_resp, double *g_datos, double *g_sumaProbabilidades, double *g_medias, const size_t n)
{
extern __shared__ double sharedData[];
const size_t j = blockIdx.y;
const size_t k = blockIdx.z;
const size_t numGaussianas = gridDim.z;
const size_t numDimensiones = gridDim.y;
reducir<BLOCK_SIZE>([&] (size_t i) -> double { return g_resp[k * n + i] * g_datos[j * n + i]; },
[&] () -> double* { return &g_medias[k * numDimensiones * gridDim.x + j * gridDim.x + blockIdx.x]; },
[&] () -> void {
for (size_t a = 0; a < numGaussianas; a++) {
for (size_t b = 0; b < numDimensiones; b++) {
reducirFinal<BLOCK_SIZE>([&] (size_t tid) -> double* { return &g_medias[a * numDimensiones * gridDim.x + b * gridDim.x + tid]; }, [&] () -> double* { return &g_medias[a * numDimensiones + b]; }, sharedData, gridDim.x);
if (threadIdx.x == 0) g_medias[a * numDimensiones + b] /= g_sumaProbabilidades[a];
}
}
}, n, sharedData, gridDim.x * gridDim.y * gridDim.z);
}
__global__ void paso_m_covarianzas(double *g_resp, double *g_datos, double *g_medias, double *g_covarianzas, const size_t n, const size_t numDimensiones)
{
__shared__ double sharedData[BLOCK_SIZE];
__shared__ size_t numBloques;
__shared__ size_t j;
__shared__ size_t h;
__shared__ size_t k;
__shared__ size_t kn;
__shared__ size_t jn;
__shared__ size_t hn;
__shared__ size_t knd;
__shared__ double medias_j;
__shared__ double medias_h;
if (threadIdx.x == 0) {
numBloques = gridDim.x * gridDim.y * gridDim.z;
j = blockIdx.y / numDimensiones;
h = blockIdx.y % numDimensiones;
k = blockIdx.z;
kn = k * n;
jn = j * n;
hn = h * n;
knd = k * numDimensiones;
medias_j = g_medias[knd + j];
medias_h = g_medias[knd + h];
}
__syncthreads();
reducir<BLOCK_SIZE>([&] (size_t i) -> double { return g_resp[kn + i] * (g_datos[jn + i] - medias_j) * (g_datos[hn + i] - medias_h); },
[&] () -> double* { return &g_covarianzas[knd * numDimensiones * gridDim.x + j * numDimensiones * gridDim.x + h * gridDim.x + blockIdx.x]; },
[&] () -> void {
}, n, sharedData, numBloques);
}
__global__ void paso_m_covarianzas_final(double *g_sumaProbabilidades, double *g_covarianzas, const size_t numTrozos)
{
extern __shared__ double sharedData[];
const size_t j = blockIdx.x;
const size_t h = blockIdx.y;
const size_t k = blockIdx.z;
const size_t numDimensiones = gridDim.y;
reducirFinal<BLOCK_SIZE>([&] (size_t tid) -> double* { return &g_covarianzas[k * numDimensiones * numDimensiones * numTrozos + j * numDimensiones * numTrozos + h * numTrozos + tid]; }, [&] () -> double* { return &g_covarianzas[k * numDimensiones * numDimensiones + j * numDimensiones + h]; }, sharedData, numTrozos);
if (threadIdx.x == 0) g_covarianzas[k * numDimensiones * numDimensiones + j * numDimensiones + h] /= g_sumaProbabilidades[k];
}
|
22,785 | #include "includes.h"
__global__ void forwardDifferenceAdjointKernel(const int len, const float* source, float* target) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x + 1; idx < len - 1;
idx += blockDim.x * gridDim.x) {
target[idx] = -source[idx] + source[idx - 1];
}
} |
22,786 | extern "C"
__global__ void feilei(int n, float *hostInputA, float *hostInputB,float *result) {
int i = threadIdx.y * blockDim.x + threadIdx.x;
if (i<n) {
for(int j = 0; j < n; j++){
if(hostInputA[j]==1.70141E38f){ //如果chang_tile[j/4]的值是无效值,则无用值赋为0
result[j] = hostInputA[j];
continue;
}
if(hostInputA[j]>=4500.0f){
result[j] = 1.0f; //山地类型1
} else if(hostInputA[j]>=3500.0f && hostInputA[j]<4500.0f){
result[j]= 2.0f; //山地类型2
} else if(hostInputA[j]>=2500.0f && hostInputA[j]<3500.0f){
result[j]= 3.0f; //山地类型3
} else if(hostInputA[j]>=1500.0f && hostInputA[j]<2500.0f && hostInputB[j] >= 2.0f){
result[j]= 4.0f; //山地类型4
} else if(hostInputA[j]>=1000.0f && hostInputA[j]<1500.0f && hostInputB[j] >= 5.0f){
result[j]= 5.0f; //山地类型5
} else if(hostInputA[j]>=300.0f && hostInputA[j]<1000.0f){
result[j]= 6.0f; //山地类型6
} else{
result[j] = 0.0f; //非山地
}
}
}
}
|
22,787 | #include<stdio.h>
__global__ void hello(){
printf("*");
}
int main() {
cudaError_t error_code;
hello<<<-1, 1>>>();
error_code = cudaGetLastError();
printf("%d\n", error_code);
if(error_code!=cudaSuccess){
printf("\n");
printf("line:%d in %s\n", __LINE__, __FILE__);
printf("Error needs to be handled!\n");
printf("Error code:%d \n", error_code);
printf("Error string:%s \n", cudaGetErrorString(error_code));
}
hello<<<1, 1025>>>();
error_code = cudaGetLastError();
if(error_code!=cudaSuccess){
printf("\n");
printf("line:%d in %s\n", __LINE__, __FILE__);
printf("Error needs to be handled!\n");
printf("Error code:%d \n", error_code);
printf("Error string:%s \n", cudaGetErrorString(error_code));
}
error_code = cudaDeviceSynchronize();
if(error_code!=cudaSuccess){
printf("\n");
printf("line:%d in %s\n", __LINE__, __FILE__);
printf("Error needs to be handled!\n");
printf("Error code:%d \n", error_code);
printf("Error string:%s \n", cudaGetErrorString(error_code));
}
error_code = cudaDeviceReset();
if(error_code!=cudaSuccess){
printf("\n");
printf("line:%d in %s\n", __LINE__, __FILE__);
printf("Error needs to be handled!\n");
printf("Error code:%d \n", error_code);
printf("Error string:%s \n", cudaGetErrorString(error_code));
}
return 0;
}
|
22,788 | #include <time.h>
#include <iostream>
#include <stdio.h>
#define RADIUS 3
#define NUM_ELEMENTS 1000
static void handleError(cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
#define cudaCheck( err ) (handleError(err, __FILE__, __LINE__ ))
__global__ void stencil_1d(int *in, int *out)
{
//PUT YOUR CODE HERE
int array_length = sizeof(*in)/sizeof(in[0]);
// Run whole array
int i = threadIdx.x;
// Calculate for all neighbours and check
for(int j = -RADIUS; j <= RADIUS; j++)
{
if(i + j < 0)
{
j += array_length;
}
else if (i+j > array_length)
{
j -= array_length;
}
out[i] += in[i+j];
}
}
void cpu_stencil_1d(int *in, int *out) {
//PUT YOUR CODE HERE
//PUT YOUR CODE HERE
int array_length = sizeof(*in)/sizeof(in[0]);
// Run whole array
for(int i = 0; i < array_length; i++)
{
// Calculate for all neighbours and check
for(int j = -RADIUS; j <= RADIUS; j++)
{
if(i + j < 0)
{
j += array_length;
}
else if (i+j > array_length)
{
j -= array_length;
}
out[i] += in[i+j];
}
}
}
int main() {
//PUT YOUR CODE HERE - INPUT AND OUTPUT ARRAYS
int h_in[NUM_ELEMENTS],
h_out[NUM_ELEMENTS],
*d_in,
*d_out;
for(int i = 0; i < NUM_ELEMENTS; i++)
{
h_in[i] = rand() % 50 ;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
//PUT YOUR CODE HERE - DEVICE MEMORY ALLOCATION
cudaCheck(cudaMalloc((void **)&d_in, NUM_ELEMENTS*sizeof(int)));
cudaCheck(cudaMalloc((void **)&d_out, NUM_ELEMENTS*sizeof(int)));
cudaCheck(cudaMemcpy((void *)d_in, (void *) h_in, NUM_ELEMENTS*sizeof(int), cudaMemcpyHostToDevice));
//PUT YOUR CODE HERE - KERNEL EXECUTION
stencil_1d<<<1, 1000>>>(d_in, d_out);
cudaCheck(cudaPeekAtLastError());
std::cout << "Done!" << std::endl;
//PUT YOUR CODE HERE - COPY RESULT FROM DEVICE TO HOST
//cudaCheck(cudaMemcpy((void *)h_out, (void *)d_out, sizeof(int)*NUM_ELEMENTS, cudaMemcpyDeviceToHost));
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime( &elapsedTime, start, stop);
printf("Total GPU execution time: %3.1f ms\n", elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//PUT YOUR CODE HERE - FREE DEVICE MEMORY
cudaFree(d_in);
cudaFree(d_out);
struct timespec cpu_start, cpu_stop;
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cpu_start);
cpu_stencil_1d(h_in, h_out);
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cpu_stop);
double result = (cpu_stop.tv_sec - cpu_start.tv_sec) * 1e3 + (cpu_stop.tv_nsec - cpu_start.tv_nsec) / 1e6;
printf( "CPU execution time: %3.1f ms\n", result);
return 0;
}
|
22,789 |
#include <cuda_runtime.h>
#include <stdio.h>
#include <unistd.h>
#include <signal.h>
#include "NeuralNet.cuh"
sig_atomic_t volatile g_running = 1;
void sig_handler(int signum)
{
if (signum == SIGINT)
g_running = 0;
}
__global__
void add_input_spikes(NeuralNet *elem) {
return;
}
__global__
void push_spikes(NeuralNet *elem) {
return;
}
__global__
void generate_spikes(NeuralNet *elem) {
return;
}
void launch_add_input_spikes(NeuralNet *elem) {
add_input_spikes<<< 1, 1 >>>(elem);
cudaDeviceSynchronize();
}
void launch_push_spikes(NeuralNet *elem) {
push_spikes<<< 1, 1 >>>(elem);
cudaDeviceSynchronize();
}
void launch_generate_spikes(NeuralNet *elem) {
generate_spikes<<< 1, 1 >>>(elem);
cudaDeviceSynchronize();
}
int main(int argc, char **argv)
{
NeuralNet* neuralNet = new NeuralNet(5);
// ToDo build up neural net here
neuralNet->initThreadBlocks();
signal(SIGINT, &sig_handler);
while (g_running) {
// neuralNet->updateActivity();
neuralNet->trial();
// neuralNet->getActivity();
// launch_add_input_spikes(neuralNet);
// launch_push_spikes(neuralNet);
// launch_generate_spikes(neuralNet);
}
printf("exiting safely\n");
// printf("On host (after by-pointer): name=%s, value=%d\n", e->name.c_str(), e->value);
delete neuralNet;
cudaDeviceReset();
return 0;
}
|
22,790 | #include <stdio.h>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line) {
if (err != cudaSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << cudaGetErrorString(err) << " " << func << std::endl;
exit(1);
}
}
__global__ void fill_one(int* d_array, size_t length) {
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
if ( index >= length ) {
return;
}
d_array[index] = 1;
}
#define BLOCK_SIZE 1024
__global__ void perfix_sum_simple(int* d_array, size_t length) {
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
printf("index %d length %d", index, length);
if ( index >= length ) {
return;
}
__shared__ int cache[BLOCK_SIZE];
cache[threadIdx.x] = d_array[index];
for ( size_t stride = 1; stride <= threadIdx.x; stride *= 2 ) {
__syncthreads();
cache[threadIdx.x] += cache[threadIdx.x - stride];
}
// write back
d_array[index] = cache[threadIdx.x];
}
__global__ void perfix_sum( int* d_array, size_t block_size, size_t length) {
const int index = threadIdx.x + blockIdx.x * blockDim.x;
const int start = index * block_size;
// printf( "id %d index %d, start %d length %d block_size %d \n", threadIdx.x, index, start, (int)length, (int) block_size );
if ( start >= length ) {
return;
}
__shared__ int cache[BLOCK_SIZE];
int local_copy[BLOCK_SIZE];
for ( size_t i = 0; i < block_size; ++i ) {
local_copy[i] = d_array[ start + i ];
// printf("id %d, local_copy[%d] = d_array[%d] = %d \n", threadIdx.x, (int)i, (int)(start+i), local_copy[i]);
}
cache[threadIdx.x] = local_copy[block_size-1];
for ( size_t stride = 1; stride <= threadIdx.x; stride *= 2 ) {
// printf("id %d, cache[%d] = local_copy[%d] = %d \n", threadIdx.x, threadIdx.x, (int)(block_size - 1), local_copy[block_size-1]);
__syncthreads();
int operend = cache[threadIdx.x-stride];
// printf("id %d, stride %d operend %d \n", threadIdx.x, stride, operend);
for ( size_t i = 0; i < block_size; ++i ) {
local_copy[i] += operend;
}
__syncthreads();
cache[threadIdx.x] = local_copy[block_size-1];
}
// write back
for ( size_t i = 0; i < block_size; ++i ) {
d_array[ start + i ] = local_copy[i];
}
}
#define BLOCK_NUM 256
int main(int argc, char** argv) {
int* d_array = NULL;
checkCudaErrors(cudaMalloc(&d_array, sizeof(int) * BLOCK_SIZE * BLOCK_NUM));
fill_one<<<BLOCK_NUM, BLOCK_SIZE>>>(d_array, BLOCK_SIZE * BLOCK_NUM);
perfix_sum<<<BLOCK_NUM, BLOCK_SIZE>>>(d_array, 1, BLOCK_SIZE * BLOCK_NUM);
cudaDeviceSynchronize();
int h_array[BLOCK_NUM*BLOCK_SIZE] = {0};
checkCudaErrors(cudaMemcpy(h_array, d_array, sizeof(int) * BLOCK_SIZE * BLOCK_NUM, cudaMemcpyDeviceToHost));
for ( size_t i = 0; i < BLOCK_NUM * BLOCK_SIZE; ++i ) {
std::cout << h_array[i] << " ";
if ( (i % BLOCK_SIZE) == (BLOCK_SIZE-1) ) {
std::cout << std::endl;
}
}
checkCudaErrors(cudaFree(d_array));
}
|
22,791 | #include <stdio.h>
#define NUMOFRASTERRECORDSPERCORE 3 // 160 // defined by num of raster records ~80k divided by num of GPU cores ~512
// rasters are stored in int(4Byte): rasterDd, int(4Byte): minLat, int(4Byte): minLon, int(4Byte): maxLat, int(4Byte): maxLon, int(4Byte): [empty]
#define SIZEOFRASTERRECORD 5 // DWORDS to jump between the records
#define NUMOFADDRESSRECORDSPERBLOCK 4 // 5000 // defined by num of address records ~2.5m divided by num of GPU cores ~512
#define NUMOFADDRESSBLOCKS 3 // 512 // equal to the number of GPU cores
// addresses are stored in int(4Byte): id, int(4Byte): lat, int(4Byte): lon, int(4Byte): [rasterId]
#define SIZEOFADDRESSRECORD 4 // DWORDS to jump between the records
#define ADDLAT 1
#define ADDLON 2
#define MINLAT 1
#define MINLON 2
#define MAXLAT 3
#define MAXLON 4
int globalThreadId = 0;
__global__ void saxpy(int n, float a, float *x, float *y) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
// __global__
void mapRasterToAddresses(int rasterBase, int addressRecords) {
int threadId, recordNum, addressBlockNum, currentAddressBlockNum, addressBase, addressNum, currentRasterAddress, currentAddressAddress;
// int rMinLat, rMaxLat, rMinLon, rMaxLon, aLat, aLon;
threadId = globalThreadId;
for ( addressBlockNum = 0; addressBlockNum < NUMOFADDRESSBLOCKS; addressBlockNum++ ) {
currentAddressBlockNum = ( addressBlockNum + threadId ) % NUMOFADDRESSBLOCKS;
addressBase = addressRecords + ( currentAddressBlockNum * NUMOFADDRESSRECORDSPERBLOCK * SIZEOFADDRESSRECORD );
for ( recordNum = 0; recordNum < NUMOFRASTERRECORDSPERCORE; recordNum++ ) {
currentRasterAddress = rasterBase + ( recordNum * SIZEOFRASTERRECORD ) + ( threadId * SIZEOFRASTERRECORD );
for ( addressNum = 0; addressNum < NUMOFADDRESSRECORDSPERBLOCK; addressNum++ ) {
currentAddressAddress = addressBase + ( addressNum * SIZEOFADDRESSRECORD );
printf("threadId:\t%d\n",threadId);
printf("\taddressBlockNum:\t%d\tcurrentAddressBlockNum:\t%d\taddressBase:\t%d\n",addressBlockNum,currentAddressBlockNum,addressBase);
printf("\t\trecordNum:\t%d\tcurrentRasterAddress:\t%d\n",recordNum,currentRasterAddress);
printf("\t\t\taddressNum = %d \tcurrentAddressAddress = %d\n", addressNum, currentAddressAddress);
/*
rMinLat = currentRasterAddress[currentRasterAddress][MINLAT];
rMaxLat = currentRasterAddress[currentRasterAddress][MAXLAT];
rMinLon = currentRasterAddress[currentRasterAddress][MINLON];
rMaxLon = currentRasterAddress[currentRasterAddress][MAXLON];
aLat = currentAddressAddress[currentAddressAddress][ADDLAT];
aLon = currentAddressAddress[currentAddressAddress][ADDLON];
printf("rMinLat > aLat\n");
printf("rMaxLat < aLat\n");
printf("rMinLon > aLon\n");
printf("rMaxLon > aLon\n");
*/
}
}
}
}
|
22,792 |
#include "grid_cell_kernel.cuh"
__device__ bool IsGridIdxValid(int idx, int maxGridNum)
{
return !(idx == GRID_UNDEF || idx < 0 || idx > maxGridNum - 1);
}
__device__ int GetGridCell(
const float3 & gridVolMin,
const int3 & gridRes,
const float3 & pos,
float cellSize,
int3 & gridCell)
{
float gx = gridVolMin.x;
float gy = gridVolMin.y;
float gz = gridVolMin.z;
int rx = gridRes.x;
int ry = gridRes.y;
int rz = gridRes.z;
float px = pos.x - gx;
float py = pos.y - gy;
float pz = pos.z - gz;
if (px < 0.0) px = 0.0;
if (py < 0.0) py = 0.0;
if (pz < 0.0) pz = 0.0;
gridCell.x = (int)(px / cellSize);
gridCell.y = (int)(py / cellSize);
gridCell.z = (int)(pz / cellSize);
if (gridCell.x > rx - 1) gridCell.x = rx - 1;
if (gridCell.y > ry - 1) gridCell.y = ry - 1;
if (gridCell.z > rz - 1) gridCell.z = rz - 1;
return gridCell.y * rx * rz + gridCell.z * rx + gridCell.x;
}
__global__ void UniformAdd(
int * data,
int * uniforms,
int n,
int block_offset,
int base_idx)
{
__shared__ int uniform;
if (threadIdx.x == 0)
uniform = uniforms[blockIdx.x + block_offset];
int address = threadIdx.x + __mul24(blockIdx.x, (blockDim.x << 1)) + base_idx;
__syncthreads();
data[address] += uniform;
data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uniform;
}
__device__ int BuildSum(int * input)
{
int idx = threadIdx.x;
int stride = 1;
// Build The Sum In Place Up The Tree
for (int d = blockDim.x; d > 0; d >>= 1)
{
__syncthreads();
if (idx < d)
{
int i = __mul24(__mul24(2, stride), idx);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
input[bi] += input[ai];
}
stride *= 2;
}
return stride;
}
__device__ void ScanRootToLeaves(int * input, int stride)
{
int idx = threadIdx.x;
// Traverse Down The Tree Building The Scan In Place
for (int d = 1; d <= blockDim.x; d *= 2)
{
stride >>= 1;
__syncthreads();
if (idx < d)
{
int i = __mul24(__mul24(2, stride), idx);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
int temp_data = input[ai];
input[ai] = input[bi];
input[bi] += temp_data;
}
}
}
|
22,793 | //pass
//--gridDim=[4,1,1] --blockDim=[256,1,1]
__global__ void sequence_gpu(int *d_ptr, int length)
{
int elemID = blockIdx.x * blockDim.x + threadIdx.x;
if (elemID < length)
{
unsigned int laneid;
//This command gets the lane ID within the current warp
asm("mov.u32 %0, %%laneid;" : "=r"(laneid));
d_ptr[elemID] = laneid;
}
}
|
22,794 | #include <stdio.h>
#include <stdlib.h>
__global__
void gpu_conv1d(float *d_out, float *d_in, float *d_filter, int size_in, int size_filter){
int i = blockDim.x * blockIdx.x + threadIdx.x;
float sum = 0.0;
int offset = size_filter / 2;
if (i < size_in){
for (int j=0; j < size_filter; j++){
if ((i-offset+j) >= 0 && (i - offset + j) <= size_in)
sum += d_in[i-offset+j]*d_filter[j];
}
d_out[i] = sum;
}
}
void init(float *arr, int N, float val){
for (int r=0; r < N; r++){
arr[r] = val;
}
}
void host_conv1d(float *h_out, float *h_in, float *h_filter, int size_in, int size_filter){
int offset = size_filter / 2;
for (int i =0; i < size_in; i++){
float sum = 0.0;
for (int j = 0; j < size_filter; j++){
if ((i-offset+j) >= 0 && (i - offset + j) <= size_in)
sum += h_in[i-offset+j]*h_filter[j];
}
h_out[i] = sum;
}
}
int main(){
float *d_in, *d_filter, *d_out;
float *h_in, *h_filter, *h_out; // Only for checking. Not needed for functionality
int din_size = 24;
int filter_size = 5;
size_t size_in = din_size*sizeof(float);
size_t size_filter = filter_size*sizeof(float);
size_t size_out = din_size*sizeof(float);
h_in = (float*) malloc (size_in);
h_filter = (float*) malloc (size_filter);
h_out = (float*) malloc (size_out);
cudaMallocManaged(&d_in, size_in);
cudaMallocManaged(&d_filter, size_filter);
cudaMallocManaged(&d_out, size_out);
init(d_in, size_in, 1.0);
init(d_filter, size_filter, 1.0);
init(d_out, size_out, 0.0);
size_t num_threads = 256;
size_t num_blocks = (size_out-1)/num_threads + 1;
gpu_conv1d<<<num_blocks, num_threads>>>(d_out, d_in, d_filter, size_in, size_filter);
cudaDeviceSynchronize();
init(h_in, size_in, 1.0);
init(h_filter, size_filter, 1.0);
init(h_out, size_out, 0.0);
host_conv1d(h_out, h_in, h_filter, size_in, size_filter);
for (int i =0; i<size_out; i++)
if (d_out[i] != h_out[i]){
printf("Program failed!! Check the idx: %d", i);
return 0;
}
printf("Success!!\n");
}
|
22,795 | #include "includes.h"
__global__ void __soft(float* y, const float* x, float T, int m)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
float x_e, y_e;
if(xIndex < m)
{
x_e = x[xIndex];
y_e = fmaxf(fabsf(x_e) - T, 0.f);
y[xIndex] = y_e / (y_e + T) * x_e;
}
} |
22,796 | #define COALESCED_NUM 16
#define blockDimX 16
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define A(y,x) A[(y)*WIDTH_A+(x)]
#define B(y,x) B[(y)*WIDTH_B+(x)]
#define C(y,x) C[(y)*WIDTH_C+(x)]
#define WIDTH_C 2048
#define WIDTH_B 16
#define WIDTH_A (2048+16)
__global__ void conv(float * A, float * B, float * C, int width, int height, int w, int h)
{
__shared__ float shared_0[32];
int i;
int j;
float sum = 0;
for (j=0; j<h; j=(j+1))
{
for (i=0; i<w; i=(i+16))
{
int it_1;
shared_0[(tidx+0)]=A(((idy+(-1*j))+h), (((idx+(-1*i))+w)+-16));
shared_0[(tidx+16)]=A(((idy+(-1*j))+h), ((idx+(-1*i))+w));
__syncthreads();
#pragma unroll
for (it_1=0; it_1<16; it_1=(it_1+1))
{
float a;
float b;
a=shared_0[((tidx+(-1*it_1))+16)];
b=B(j, (it_1+i));
sum+=(a*b);
}
__syncthreads();
}
}
{
C(idy, idx)=sum;
}
}
|
22,797 | #ifndef _MATRIX_CU_
#define _MATRIX_CU_
#include <cuda_runtime.h>
__global__ void cuMatMul(double* a, double* b, double* c, int* n)
// __global__ void cuMatMul(double* a, double* bt, double* c, int* n)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
c[index] = 0;
for (int i = 0; i < *n; ++i)
{
c[index] += a[i] * b[(*n)*i + index];
// c[index] += a[i] * bt[i + (*n) * index];
}
}
#endif // _MATRIX_CU_
|
22,798 | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
//Hillis Steele scan in one block;
__global__ void prefixOnDevice(int *a, int *b, int n){
int id = threadIdx.x;
int *s;
for(int j=1; j<n; j<<=1){
if(id >=j)
b[id] = a[id-j] + a[id];
else
b[id] = a[id];
s = a;
a = b;
b = s;
__syncthreads();
}
}
void prefixOnHost(int *a, int n){
int sum =0;
for(int i=0; i<n; i++){
sum += a[i];
a[i] = sum;
}
}
//one value per thread, with power of two number of threads
int main(int argc, char **argv){
int blockSize = 128;
int nBlocks = 1;
int n = blockSize;
int size = n*sizeof(int);
int *a = (int*) malloc(size);
int *b = (int*) malloc(size);
int *a_d, *b_d;
cudaMalloc((void**) &a_d, size);
cudaMalloc((void**) &b_d, size);
for(int i=0; i<n; i++)
a[i] = i+1;
cudaMemcpy(a_d, a, size, cudaMemcpyHostToDevice);
prefixOnHost(a, n);
prefixOnDevice <<<nBlocks, blockSize>>> (a_d, b_d, n);
if((int)log2((double)n)%2 == 0)
cudaMemcpy(b, a_d, size, cudaMemcpyDeviceToHost);
else
cudaMemcpy(b, b_d, size, cudaMemcpyDeviceToHost);
for(int i=0; i<n; i++)
//printf("%d %d\n", a[i], b[i]);
assert(a[i] == b[i]);
return 0;
}
|
22,799 | #include "includes.h"
__global__ void mmul(const float *A, const float *B, float *C, int ds) {
// declare cache in shared memory
__shared__ float As[block_size][block_size];
__shared__ float Bs[block_size][block_size];
int idx = threadIdx.x+blockDim.x*blockIdx.x; // create thread x index
int idy = threadIdx.y+blockDim.y*blockIdx.y; // create thread y index
if ((idx < ds) && (idy < ds)){
float temp = 0;
for (int i = 0; i < ds/block_size; i++) {
// Load data into shared memory
As[threadIdx.y][threadIdx.x] = A[idy * ds + (i * block_size + threadIdx.x)];
Bs[threadIdx.y][threadIdx.x] = B[(i * block_size + threadIdx.y) * ds + idx];
// Synchronize
__syncthreads();
// Keep track of the running sum
for (int k = 0; k < block_size; k++)
temp += As[threadIdx.y][k] * Bs[k][threadIdx.x]; // dot product of row and column
__syncthreads();
}
// Write to global memory
C[idy*ds+idx] = temp;
}
} |
22,800 | #include "includes.h"
__device__ float maxMetricPoints(const float* g_uquery, const float* g_vpoint, int pointdim, int signallength){
float r_u1;
float r_v1;
float r_d1,r_dim=0;
r_dim=0;
for(int d=0; d<pointdim; d++){
r_u1 = *(g_uquery+d*signallength);
r_v1 = *(g_vpoint+d*signallength);
r_d1 = r_v1 - r_u1;
r_d1 = r_d1 < 0? -r_d1: r_d1; //abs
r_dim= r_dim < r_d1? r_d1: r_dim;
}
return r_dim;
}
__global__ void kernelBFRSAllshared(const float* g_uquery, const float* g_vpointset, int *g_npoints, int pointdim, int triallength, int signallength, int exclude, const float* vecradius)
{
// shared memory
extern __shared__ char array[];
int *s_npointsrange;
s_npointsrange = (int*)array;
float radius=0;
const unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
const unsigned int itrial = tid / triallength; // indextrial
if(tid<signallength){
s_npointsrange[threadIdx.x] = 0;
__syncthreads();
radius = *(vecradius+tid);
unsigned int indexi = tid-triallength*itrial;
for(int t=0; t<triallength; t++){
int indexu = tid;
int indexv = (t + itrial*triallength);
int condition1=indexi-exclude;
int condition2=indexi+exclude;
if((t<condition1)||(t>condition2)){
float temp_dist = maxMetricPoints(g_uquery+indexu, g_vpointset+indexv,pointdim, signallength);
if(temp_dist <= radius){
s_npointsrange[threadIdx.x]++;
}
}
}
__syncthreads();
//printf("\ntid:%d npoints: %d\n",tid, s_npointsrange[threadIdx.x]);
//COPY TO GLOBAL MEMORY
g_npoints[tid] = s_npointsrange[threadIdx.x];
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.