serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
20,201 | #include <stdlib.h>
#include <string.h>
#include <time.h>
void sumArraysOnHost(float *A, float *B, float *C, const int N) {
for (int idx = 0; idx < N; idx++) {
C[idx] = A[idx] + B[idx];
}
}
void initialData(float *ip, int size) {
// generate different seed for random number
time_t t;
srand((unsigned int)time(&t));
for (int i = 0; i < size; i++) {
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
int main(int argc, char **argv) {
int nElem = 1024;
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *h_C;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
h_C = (float *)malloc(nBytes);
initialData(h_A, nElem);
initialData(h_B, nElem);
sumArraysOnHost(h_A, h_B, h_C, nElem);
free(h_A);
free(h_B);
free(h_C);
return(0);
} |
20,202 | #define NUM_DIFF_EQUATIONS 2 // number of differential equations
#define NUM_ITERATIONS 10000
#define TIME_STEP 0.001
#define cuda_get(matrix, row, column, width) (matrix[(row)*(width) + (column)])
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <iostream>
using namespace std;
// Differential equations for a circle
__device__ float dy_dt_1(float y1, float y2, float t) {
return y2;
}
__device__ float dy_dt_2(float y1, float y2, float t) {
return -y1;
}
// Forward ODE solver that moves forward by onestep
__global__ void solve_one(float* ys, float* next_ys, float time, float timestep) {
next_ys[0] = ys[0] + timestep * dy_dt_1(ys[0], ys[1], time);
next_ys[1] = ys[1] + timestep * dy_dt_2(ys[0], ys[1], time);
}
__global__ void solve(float* ys, float time) {
for (int i = 1; i < NUM_ITERATIONS; ++i) {
cuda_get(ys, i, 0, 2) = cuda_get(ys, i-1, 0, 2) + TIME_STEP * dy_dt_1(cuda_get(ys, i-1, 0, 2), cuda_get(ys, i-1, 1, 2), time);
cuda_get(ys, i, 1, 2) = cuda_get(ys, i-1, 1, 2) + TIME_STEP * dy_dt_2(cuda_get(ys, i-1, 0, 2), cuda_get(ys, i-1, 1, 2), time);
time += TIME_STEP;
}
}
int main(int argc, char* argv[]) {
float *dev_ys;
int dev_ys_size = NUM_ITERATIONS * NUM_DIFF_EQUATIONS * sizeof(float);
cudaMalloc( (void**)&dev_ys, dev_ys_size);
// Create the two input vectors
float ys[NUM_ITERATIONS * NUM_DIFF_EQUATIONS];
cuda_get(ys, 0, 0, NUM_DIFF_EQUATIONS) = 1;
cuda_get(ys, 0, 1, NUM_DIFF_EQUATIONS) = 0;
cudaMemcpy(dev_ys, ys, dev_ys_size, cudaMemcpyHostToDevice);
solve<<<1,1>>>(dev_ys, 0);
cudaMemcpy(ys, dev_ys, dev_ys_size, cudaMemcpyDeviceToHost);
for (int row = 0; row < NUM_ITERATIONS; row += 100) {
cout << "next_ys[" << row << "] = [";
for (int i = 0; i < NUM_DIFF_EQUATIONS; i++) {
cout << cuda_get(ys, row, i, NUM_DIFF_EQUATIONS) << ", ";
}
cout << "]\n";
}
cudaFree(dev_ys);
return 0;
}
|
20,203 | #include <cuda.h>
#include <iostream>
#include <sys/time.h>
#include <stdio.h>
using namespace std;
/* Concurrent kernel execution
* - compare concurrent execution performance with serial execution
* - effect of (number of blocks) and (number of multiprocessors)
*/
#define TILE_DIM 16
#define BLOCK_ROWS 16
__global__ void transposeNaive(double *odata, double* idata, int width, int height, int nreps)
{
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + width * yIndex;
int index_out = yIndex + height * xIndex;
for (int r=0; r < nreps; r++) {
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
odata[index_out+i] = idata[index_in+i*width];
}
}
}
int main() {
// check if device support concurrent executions
int deviceCount;
cudaGetDeviceCount(&deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
printf("Device %d has compute capability %d.%d.\n", device,
deviceProp.major, deviceProp.minor);
cout << " concurrent kernel execution = " << deviceProp.concurrentKernels << endl;
}
/************************/
// (*) Repeat for side = 32, 64, 2048
// (for side = 2048, set nTranspose = 8)
int side = 32;
int n = side*side;
int nTranspose = 96;
int nreps = 200;
int nStream = nTranspose;
// define streams
cudaStream_t stream[nStream];
for (int i=0; i<nStream; i++)
cudaStreamCreate(&stream[i]);
time_t sTime = time(NULL);
struct timeval tt1, tt2;
int ms;
double fms;
// allocate pinned host memory
double *data;
cudaMallocHost((void**) &data, nTranspose * n * sizeof(double));
// data initialization
for (int j=0; j<nTranspose; j++)
for (int i=0; i<n; i++) {
data[i+j*n] = double(i+j*n);
}
double *data_dev;
// device memory allocation
cudaMalloc((void**) &data_dev, nStream * 2 * n * sizeof(double));
dim3 grid(side/16,side/16,1);
dim3 threads(16,16,1);
// send data to device
for (int i=0; i<nStream; i++) {
int offset = i * n;
cudaMemcpy(data_dev + offset*2, data + offset, n * sizeof(double),
cudaMemcpyHostToDevice);
}
cudaThreadSynchronize();
gettimeofday( &tt1, NULL );
// kernel executions :
// (*) for concurrent execution : stream[0] --> stream[i]
for (int i=0; i<nStream; i++) {
int offset = i * n;
transposeNaive <<< grid, threads, 0, stream[0] >>>
(data_dev + offset*2+n, data_dev + offset*2, side, side, nreps);
}
cudaThreadSynchronize();
gettimeofday( &tt2, NULL );
// get data back from device
for (int i=0; i<nStream; i++) {
int offset = i * n;
cudaMemcpy(data + offset, data_dev + offset*2 + n, n * sizeof(double),
cudaMemcpyDeviceToHost);
}
// timing
ms = (tt2.tv_sec - tt1.tv_sec);
ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec);
fms = ((double)ms)/1000000.0;
cout << "Comp time = " << fms << endl;
// dDestroy streams
for (int i=0; i<nStream; i++)
cudaStreamDestroy(stream[i]);
cudaFree(data_dev);
cout << "value check = " << data[n+5467] << endl;
// free pinned host memory
cudaFreeHost(data);
}
|
20,204 | #include "includes.h"
#define DEBUG false
#define DEBUG_OUTPUT false
#define DEBUG_DELTA_K false
#define DEBUGNET false
#define DEBUG_TIMEING true
#define index(i,j,ld) (((j)*(ld))+(i))
int numBlocks = 1;
int blockSize = 256;
using namespace std;
/*
* Print Matrix on host
*/
__global__ void addConstant(float* input, float constant, int num_elements){
const unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if(tid < num_elements)
{
input[tid] = input[tid] + constant;
}
} |
20,205 | #include "includes.h"
__global__ void accumulateRowsKernel( float *input, float *output, int channels, int h, int w) {
// view multichannel image as a multiline single-channel image
int globalRowIdx = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
if (globalRowIdx < channels * h) {
float *outputRow = output + (globalRowIdx + globalRowIdx / h + 1) * (w+1) + 1;
outputRow[-1] = 0;
double sum = 0;
for (int i = 0; i < w; ++i) {
sum += input[globalRowIdx * w + i];
outputRow[i] = static_cast<float>(sum);
}
// need to zero the (0,0) corner of the output separately >:(
output[(globalRowIdx / h) * (w+1) * (h+1)] = 0;
}
} |
20,206 | #include<curand_kernel.h>
extern "C" __global__ void integrate(curandState * states,
unsigned long long * seed,
unsigned long long * numSamples,
unsigned long long * inCount,
unsigned long long * outCount) {
int numThreads = blockDim.x * gridDim.x;
unsigned long long n = *numSamples / numThreads;
unsigned long long seq = threadIdx.x + blockIdx.x * blockDim.x;
curandState * state = &states[seq];
curand_init(*seed, seq, 0, state);
unsigned long long in = 0;
unsigned long long out = 0;
for(unsigned long long i = 0; i < n; ++i) {
float x = 5 * curand_uniform(state) - 2;
float y = 5 * curand_uniform(state) - 2;
float d1 = x*x + y*y;
float d2 = (x-1)*(x-1) + y*y;
float d3 = (x-1)*(x-1) + (y-1)*(y-1);
float d4 = x*x + (y-1)*(y-1);
int z = d1 < 4 && d2 < 4 && d3 < 4 && d4 < 4 ? 1 : 0;
in += z;
out += 1-z;
}
inCount[seq] = in;
outCount[seq] = out;
} |
20,207 | #include<cuda.h>
#include<iostream>
#include<stdio.h>
__global__ void factorialKernel()
{
//this adds a value to a variable stored in global memory
int factorial = 1;
int n = threadIdx.x+1;
for(int i = 1; i <= n; ++i) {
factorial *= i;
}
printf("%d!=%d\n", n, factorial);
}
int main()
{
//invoke GPU kernel, with one block that has four threads
factorialKernel<<<1,8>>>();
cudaDeviceSynchronize();
//bring the result back from the GPU into the hostArray
// cudaMemcpy(&hostArray, devArray, sizeof(int) * numElems, cudaMemcpyDeviceToHost);
// print out the result to confirm that things are looking good
//std::printf("here\n");
//release the memory allocated on the GPU
//cudaFree(devArray);
return 0;
}
|
20,208 | #include<stdio.h>
__global__ void hello_from_gpu(){
const int bid = blockIdx.x;
const int tid = threadIdx.x;
printf("hello world from block %d and thread %d\n", bid, tid);
}
int main() {
hello_from_gpu<<<2, 4>>>();
cudaDeviceSynchronize();
return 0;
} |
20,209 | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cuSearchDoublet.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int *nSpM = NULL;
cudaMalloc(&nSpM, XSIZE*YSIZE);
const float *spMmat = NULL;
cudaMalloc(&spMmat, XSIZE*YSIZE);
const int *nSpB = NULL;
cudaMalloc(&nSpB, XSIZE*YSIZE);
const float *spBmat = NULL;
cudaMalloc(&spBmat, XSIZE*YSIZE);
const int *nSpT = NULL;
cudaMalloc(&nSpT, XSIZE*YSIZE);
const float *spTmat = NULL;
cudaMalloc(&spTmat, XSIZE*YSIZE);
const float *deltaRMin = NULL;
cudaMalloc(&deltaRMin, XSIZE*YSIZE);
const float *deltaRMax = NULL;
cudaMalloc(&deltaRMax, XSIZE*YSIZE);
const float *cotThetaMax = NULL;
cudaMalloc(&cotThetaMax, XSIZE*YSIZE);
const float *collisionRegionMin = NULL;
cudaMalloc(&collisionRegionMin, XSIZE*YSIZE);
const float *collisionRegionMax = NULL;
cudaMalloc(&collisionRegionMax, XSIZE*YSIZE);
int *nSpMcomp = NULL;
cudaMalloc(&nSpMcomp, XSIZE*YSIZE);
int *nSpBcompPerSpM_Max = NULL;
cudaMalloc(&nSpBcompPerSpM_Max, XSIZE*YSIZE);
int *nSpTcompPerSpM_Max = NULL;
cudaMalloc(&nSpTcompPerSpM_Max, XSIZE*YSIZE);
int *nSpBcompPerSpM = NULL;
cudaMalloc(&nSpBcompPerSpM, XSIZE*YSIZE);
int *nSpTcompPerSpM = NULL;
cudaMalloc(&nSpTcompPerSpM, XSIZE*YSIZE);
int *McompIndex = NULL;
cudaMalloc(&McompIndex, XSIZE*YSIZE);
int *BcompIndex = NULL;
cudaMalloc(&BcompIndex, XSIZE*YSIZE);
int *tmpBcompIndex = NULL;
cudaMalloc(&tmpBcompIndex, XSIZE*YSIZE);
int *TcompIndex = NULL;
cudaMalloc(&TcompIndex, XSIZE*YSIZE);
int *tmpTcompIndex = NULL;
cudaMalloc(&tmpTcompIndex, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cuSearchDoublet<<<gridBlock,threadBlock>>>(nSpM,spMmat,nSpB,spBmat,nSpT,spTmat,deltaRMin,deltaRMax,cotThetaMax,collisionRegionMin,collisionRegionMax,nSpMcomp,nSpBcompPerSpM_Max,nSpTcompPerSpM_Max,nSpBcompPerSpM,nSpTcompPerSpM,McompIndex,BcompIndex,tmpBcompIndex,TcompIndex,tmpTcompIndex);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cuSearchDoublet<<<gridBlock,threadBlock>>>(nSpM,spMmat,nSpB,spBmat,nSpT,spTmat,deltaRMin,deltaRMax,cotThetaMax,collisionRegionMin,collisionRegionMax,nSpMcomp,nSpBcompPerSpM_Max,nSpTcompPerSpM_Max,nSpBcompPerSpM,nSpTcompPerSpM,McompIndex,BcompIndex,tmpBcompIndex,TcompIndex,tmpTcompIndex);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cuSearchDoublet<<<gridBlock,threadBlock>>>(nSpM,spMmat,nSpB,spBmat,nSpT,spTmat,deltaRMin,deltaRMax,cotThetaMax,collisionRegionMin,collisionRegionMax,nSpMcomp,nSpBcompPerSpM_Max,nSpTcompPerSpM_Max,nSpBcompPerSpM,nSpTcompPerSpM,McompIndex,BcompIndex,tmpBcompIndex,TcompIndex,tmpTcompIndex);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
20,210 | #include <thrust/device_vector.h>
#include <thrust/remove.h>
#include <thrust/unique.h>
#include <thrust/binary_search.h>
#include <thrust/sort.h>
#include <iostream>
/*
* This example "welds" triangle vertices together by taking as
* input "triangle soup" and eliminating redundant vertex positions
* and shared edges. A connected mesh is the result.
*
*
* Input: 9 vertices representing a mesh with 3 triangles
*
* Mesh Vertices
* ------ (2) (5)--(4) (8)
* | \ 2| \ | \ \ | | \
* | \ | \ <-> | \ \ | | \
* | 0 \| 1 \ | \ \ | | \
* ----------- (0)--(1) (3) (6)--(7)
*
* (vertex 1 equals vertex 3, vertex 2 equals vertex 5, ...)
*
* Output: mesh representation with 5 vertices and 9 indices
*
* Vertices Indices
* (1)--(3) [(0,2,1),
* | \ | \ (2,3,1),
* | \ | \ (2,4,3)]
* | \| \
* (0)--(2)--(4)
*/
// compare two float2s for equality
struct float2_equal_to
{
__host__ __device__
bool operator()(float2 a, float2 b)
{
return a.x == b.x && a.y == b.y;
}
};
// compare ordering of two float2s
struct float2_less
{
__host__ __device__
bool operator()(float2 a, float2 b)
{
if (a.x < b.x)
return true;
else if (a.x > b.x)
return false;
return a.y < b.y;
}
};
int main(void)
{
// allocate memory for input mesh representation
thrust::device_vector<float2> input(9);
input[0] = make_float2(0,0); // First Triangle
input[1] = make_float2(1,0);
input[2] = make_float2(0,1);
input[3] = make_float2(1,0); // Second Triangle
input[4] = make_float2(1,1);
input[5] = make_float2(0,1);
input[6] = make_float2(1,0); // Third Triangle
input[7] = make_float2(2,0);
input[8] = make_float2(1,1);
// allocate space for output mesh representation
thrust::device_vector<float2> vertices = input;
thrust::device_vector<unsigned int> indices(input.size());
// sort vertices to bring duplicates together
thrust::sort(vertices.begin(), vertices.end(), float2_less());
// find unique vertices and erase redundancies
vertices.erase(thrust::unique(vertices.begin(), vertices.end(), float2_equal_to()), vertices.end());
// find index of each input vertex in the list of unique vertices
thrust::lower_bound(vertices.begin(), vertices.end(),
input.begin(), input.end(),
indices.begin(),
float2_less());
// print output mesh representation
std::cout << "Output Representation" << std::endl;
for(size_t i = 0; i < vertices.size(); i++)
{
float2 v = vertices[i];
std::cout << " vertices[" << i << "] = (" << v.x << "," << v.y << ")" << std::endl;
}
for(size_t i = 0; i < indices.size(); i++)
{
std::cout << " indices[" << i << "] = " << indices[i] << std::endl;
}
return 0;
}
|
20,211 | #include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#define TILE_WIDTH 512
#define index(i, j, N) ((i)*(N+1)) + (j)
int maximum(int a, int b) {
return (a > b)? a : b;
}
__global__ void knapsackKernel(int *profits, int *weights, int *input_f, int *output_f, int capacity, int c_min, int k){
int c = blockIdx.x*512 + threadIdx.x;
if(c<c_min || c>capacity){return;}
if(input_f[c] < input_f[c-weights[k-1]]+profits[k-1]){
output_f[c] = input_f[c-weights[k-1]]+profits[k-1];
}
else{
output_f[c] = input_f[c];
}
}
void knapsackCuda(int *profits, int *weights, int capacity, int n, int *f0, int *f1){
int *dev_profits, *dev_weights, *dev_f0, *dev_f1;
int sumW = 0;
int i,c;
for(i=0; i<n; i++){
sumW = sumW + weights[i];
}
cudaMalloc((void**)&dev_f0, (capacity+1)*sizeof(int));
cudaMalloc((void**)&dev_f1, (capacity+1)*sizeof(int));
cudaMalloc((void**)&dev_profits, n*sizeof(int));
cudaMalloc((void**)&dev_weights, n*sizeof(int));
cudaMemcpy(dev_profits, profits, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_weights, weights, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMemset(dev_f0, 0, (capacity+1)*sizeof(int));
cudaMemset(dev_f1, 0, (capacity+1)*sizeof(int));
/*int p;
for(p=0; p<=capacity; p++) printf("%d ", dev_f1[p]);
printf("\n");*/
int k=1;
while(k<=n){
sumW = sumW - weights[k-1];
c = maximum(capacity-sumW, weights[k-1]);
//printf("k = %d\n", k);
//printf("%d\n", c);
dim3 dimGrid(ceil(1.0*(capacity-0+1)/TILE_WIDTH), 1, 1);
dim3 dimBlock(TILE_WIDTH,1,1);
if(k%2==0){
cudaMemcpy(dev_f1, dev_f0, (capacity+1)*sizeof(int), cudaMemcpyDeviceToDevice);
knapsackKernel<<<dimGrid, dimBlock>>>(dev_profits, dev_weights, dev_f0, dev_f1, capacity, c, k);
//cudaDeviceSynchronize();
/*cudaMemcpy(f1, dev_f1, (capacity+1)*sizeof(int), cudaMemcpyDeviceToHost);
int p;
for(p=0; p<=capacity; p++) printf("%d ", f1[p]);
printf("\n");*/
}
else{
cudaMemcpy(dev_f0, dev_f1, (capacity+1)*sizeof(int), cudaMemcpyDeviceToDevice);
knapsackKernel<<<dimGrid, dimBlock>>>(dev_profits, dev_weights, dev_f1, dev_f0, capacity, c, k);
//cudaDeviceSynchronize();
/*cudaMemcpy(f0, dev_f0, (capacity+1)*sizeof(int), cudaMemcpyDeviceToHost);
int p;
for(p=0; p<=capacity; p++) printf("%d ", f0[p]);
printf("\n");*/
}
k++;
}
cudaMemcpy(f0, dev_f0, (capacity+1)*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(f1, dev_f1, (capacity+1)*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_profits);
cudaFree(dev_weights);
cudaFree(dev_f0);
cudaFree(dev_f1);
}
int main() {
int i;
int n = 10000;
int *profits = (int*)malloc(n*sizeof(int));
int *weights = (int*)malloc(n*sizeof(int));
FILE *myFile;
myFile = fopen("rand.txt", "r");
for (i = 0; i < n; i++)
{
fscanf(myFile, "%d %d", &profits[i], &weights[i]);
}
int capacity = 0;
for(i=0; i<n; i++){
capacity = capacity + weights[i];
}
capacity = capacity/2;
//capacity = 1000;
printf("capacity = %d\n", capacity);
int *f0 = (int *)malloc((capacity+1)*sizeof(int));
int *f1 = (int *)malloc((capacity+1)*sizeof(int));
knapsackCuda(profits, weights, capacity, n, f0, f1);
if(n%2==0){
//int p;
//for(p=0; p<=capacity; p++){ printf("%d ", f1[p]);}
printf("%d\n", f1[capacity]);
}
else{
//int p;
//for(p=0; p<=capacity; p++) {printf("%d ", f0[p]);}
printf("%d\n", f0[capacity]);
}
}
|
20,212 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <cuda.h>
#define LINE 100000
void readfile(int num[LINE]){
int temp;
int i;
FILE *fp;
fp = fopen("number.txt", "r");
i = 0;
if(fp == NULL){
printf("Error loading file!!\n");
exit(1);
}else{
while(!feof(fp)){
fscanf(fp, "%d", &temp);
num[i] = temp;
i++;
}
}
fclose(fp);
}
void printfile(int num[LINE]){
int i;
FILE *fp = fopen("update.txt", "w");
for (i = 0; i < LINE; i++)
fprintf(fp, "%d ", num[i]);
fclose(fp);
}
void copyData(int num[LINE], int num1[LINE]){
int i;
for(i = 0; i < LINE; i++)
num1[i] = num[i];
}
__global__ void even(int *dnum, int n){
int k = threadIdx.x + blockIdx.x * blockDim.x;
int temp;
k = k * 2;
if(k <= n - 2){
if(dnum[k] > dnum[k + 1]){
temp = dnum[k];
dnum[k] = dnum[k + 1];
dnum[k + 1] = temp;
}
}
}
__global__ void odd(int *dnum, int n){
int k = threadIdx.x + blockIdx.x * blockDim.x;
int temp;
k = k * 2 + 1;
if(k <= n - 2){
if(dnum[k] > dnum[k + 1]){
temp = dnum[k];
dnum[k] = dnum[k + 1];
dnum[k + 1] = temp;
}
}
}
void docuda(int *dnum, int line){
int i;
for(i = 0; i < line; i++){
even<<<LINE, 256>>>(dnum, line);
odd<<<LINE, 256>>>(dnum, line);
}
}
void cuda(int num[LINE], int num1[LINE]){
int line, i;
int *dnum;
struct timeval tv;
struct timezone tz;
double start, end, time, time1, time2, average;
start = 0;
end = 0;
time = 0;
time1 = 0;
time2 = 0;
line = 10000;
average = 0;
printf("Time execution for parallel bubble sort using CUDA using 100k block and 256 threads\n");
printf("================================================================================\n");
printf(" Number of data 1st time 2nd time 3rd time average \n");
printf("================================================================================\n");
while (line <= LINE){
for (i = 0; i < 3; i++){
copyData(num, num1);
cudaMalloc(&dnum, LINE*sizeof(int));
cudaMemcpy(dnum, num, LINE*sizeof(int), cudaMemcpyHostToDevice);
gettimeofday(&tv, &tz);
start = (double)tv.tv_sec + (double)tv.tv_usec / 1000000;
docuda(dnum, line);
gettimeofday(&tv, &tz);
end = (double)tv.tv_sec + (double)tv.tv_usec / 1000000;
cudaMemcpy(num, dnum, LINE*sizeof(int), cudaMemcpyDeviceToHost);
if (i == 0)
time = end - start;
else if (i == 1)
time1 = end - start;
else if (i == 2)
time2 = end - start;
}
average = (time + time1 + time2) / 3;
printf(" %i %fs %fs %fs %fs\n", line, time, time1, time2, average);
line += 10000;
}
}
int main(){
int num[LINE];
int num1[LINE];
printf("Getting data...\n");
readfile(num);
printf("Sorting data...\n\n");
cuda(num, num1);
printfile(num);
printf("\nParallel bubble sort in CUDA sucessfully.\n");
return 0;
} |
20,213 | // 16CO234 Prajval M
// 16CO145 Sumukha PK
#include<stdio.h>
#include<cuda.h>
__global__ void add_vec(float *d_a, int n){ //7.CUDA Kernel that computes sum
int i = threadIdx.x;
if((n-i-1)!=i) {
d_a[i]+=d_a[n-i-1];
}
}
int main(){
int i, n, deviceCount;
cudaGetDeviceCount(&deviceCount);
if(deviceCount > 0){
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, 0); // Use default GPU
#define MAX_THREAD devProp.maxThreadsPerBlock
printf("Enter the size of the array: ");
scanf("%d",&n);
if(n < MAX_THREAD){ // Do not sum if array size is greater than thread-size per block
float *h_a;
h_a = (float *)malloc(sizeof(float)*n);
printf("Enter the values of the array: ");
for(i=0;i<n;i++)
scanf("%f",&h_a[i]);
float *d_a;
cudaMalloc((void**)&d_a,n*sizeof(float)); //1.Allocate device memory
cudaMemcpy(d_a,h_a,n*sizeof(float),cudaMemcpyHostToDevice); //2.Copy host memory to device
int Block_size = 1, threads_used; //3.Initialise thread block and kernel grid dimensions
for(i=n;i>1;i= i%2?i/2 + 1:i/2){
threads_used = i/2;
add_vec<<<Block_size,threads_used>>>(d_a,i); //4.Invoke kernel
}
float z;
cudaMemcpy(&z,&d_a[0],sizeof(float),cudaMemcpyDeviceToHost); //5.Copy results from device to host
cudaFree(d_a); //6.Free device memory
free(h_a); // free host memory
printf("The sum is : %f \n",z);
return 0;
}
else{
printf("Not Supported :( \n");
}
}
else{
printf("Nvidia GPU not not found");
}
return -1;
}
|
20,214 | #include <memory>
#include <iostream>
#include <cuda_runtime.h>
// Main Program
int main(void)
{
int device_Count = 0;
cudaGetDeviceCount(&device_Count);
// This function returns count of number of CUDA enable devices and 0 if there are no CUDA capable devices.
if (device_Count == 0)
{
printf("There are no available device(s) that support CUDA\n");
}
else
{
printf("Detected %d CUDA Capable device(s)\n", device_Count);
}
int device = 0;
int driver_Version, runtime_Version;
cudaDeviceProp device_Property;
cudaGetDeviceProperties(&device_Property, device);
printf("\nDevice %d: \"%s\"\n", device, device_Property.name);
cudaDriverGetVersion(&driver_Version);
cudaRuntimeGetVersion(&runtime_Version);
printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driver_Version / 1000, (driver_Version % 100) / 10, runtime_Version / 1000, (runtime_Version % 100) / 10);
printf( " Total amount of global memory: %.0f MBytes (%llu bytes)\n",
(float)device_Property.totalGlobalMem / 1048576.0f, (unsigned long long) device_Property.totalGlobalMem);
printf(" (%2d) Multiprocessors", device_Property.multiProcessorCount );
printf(" GPU Max Clock rate: %.0f MHz (%0.2f GHz)\n", device_Property.clockRate * 1e-3f, device_Property.clockRate * 1e-6f);
printf("\n");
printf( " Total amount of global memory: %.0f MBytes (%llu bytes)\n",
(float)device_Property.totalGlobalMem / 1048576.0f, (unsigned long long) device_Property.totalGlobalMem);
printf(" Memory Clock rate: %.0f Mhz\n", device_Property.memoryClockRate * 1e-3f);
printf(" Memory Bus Width: %d-bit\n", device_Property.memoryBusWidth);
if (device_Property.l2CacheSize)
{
printf(" L2 Cache Size: %d bytes\n", device_Property.l2CacheSize);
}
printf(" Total amount of constant memory: %lu bytes\n", device_Property.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n", device_Property.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n", device_Property.regsPerBlock);
printf("\n");
printf(" Maximum number of threads per multiprocessor: %d\n", device_Property.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n", device_Property.maxThreadsPerBlock);
printf(" Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n",
device_Property.maxThreadsDim[0],
device_Property.maxThreadsDim[1],
device_Property.maxThreadsDim[2]);
printf(" Max dimension size of a grid size (x,y,z): (%d, %d, %d)\n",
device_Property.maxGridSize[0],
device_Property.maxGridSize[1],
device_Property.maxGridSize[2]);
printf("\n");
cudaDeviceProp device_property;
cudaGetDevice(&device);
printf("ID of device: %d\n", device);
memset(&device_property, 0, sizeof(cudaDeviceProp));
device_property.major = 1;
device_property.minor = 3;
cudaChooseDevice(&device, &device_property);
printf("ID of device which supports double precision is: %d\n", device);
cudaSetDevice(device);
} |
20,215 | #include <math.h>
#include <stdio.h>
#include <cuda_runtime.h>
// Array access macros
#define im(i,j) A[(i) + (j)*(m)]
#define f(i,j) f[(i) + (j)*(m)]
#define Z(i,j) Z[(i) + (j)*m]
__global__ void Zev(float const * const A, float *Z,float const * const H, int m, int n,int patch,float patchSigma,float filtsigma){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x<m-(patch-1)/2 && y<n-(patch-1)/2){
int i,j,p,l,count=0;
patch=(patch-1) /2;
float temp=0.0,sum=0;
for(p=patch;p<m-patch;p++){
for(l=patch;l<n-patch;l++){
for(i=-patch;i<=patch;i++){
for(j=-patch;j<=patch;j++){
temp=(im(x+patch+i,y+patch+j)-im(p+i,l+j))*H[count];
sum=sum+temp*temp;
count++;
}
}
Z(x+patch,y+patch)=Z(x+patch,y+patch)+exp(-(sum/(filtsigma)));
sum=0;
count=0;
}
}
}
}
__global__ void fev(float const * const A,float const * const Z, float *f,float const * const H, int m, int n,int patch,float patchSigma,float filtsigma){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x<m-(patch-1)/2 && y<n-(patch-1)/2){
patch=(patch-1) /2;
int i,j;
float temp,sum=0.0;
int p,l,count=0;
for(p=patch;p<m-patch;p++){
for(l=patch;l<n-patch;l++){
for(i=-patch;i<=patch;i++){
for(j=-patch;j<=patch;j++){
temp=(im(x+patch+i,y+patch+j)-im(p+i,l+j))*H[count];
sum=sum+temp*temp;
count++;
}
}
count=0;
f(x+patch,y+patch)=f(x+patch,y+patch)+((1/Z(x+patch,y+patch))*exp(-(sum/filtsigma)))*im(p,l);
sum=0;
}
}
}
}
|
20,216 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <fstream>
#include <time.h>
using namespace std;
__global__ void blur(int* flat, int* result, int lines, int cols, int channels, int scale) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < lines * cols * channels) {
int line = tid / (cols * channels);
int rest = tid % (cols * channels);
int col = rest / channels;
int channel = rest % channels;
int sum = 0;
int nr = 0;
for (int i = 0 - scale / 2;i <= scale / 2;i++)
for (int j = 0 - scale / 2;j <= scale / 2;j++) {
int line2 = line + i;
int col2 = col + j;
if (line2 >= 0 && line2 < lines && col2 >= 0 && col2 < cols) { // check if pixel is outside of frame
nr++;
int index = line2 * cols * channels + col2 * channels + channel;
sum += flat[index];
}
}
result[tid] = sum / nr;
}
}
void blurSecvential(int* flat, int* result, int lines, int cols, int channels, int scale) {
for (int i = 0;i < lines * cols * channels;i++) {
int line = i / (cols * channels);
int rest = i % (cols * channels);
int col = rest / channels;
int channel = rest % channels;
int sum = 0;
int nr = 0;
for (int i = 0 - scale / 2;i <= scale / 2;i++) {
for (int j = 0 - scale / 2;j <= scale / 2;j++) {
int line2 = line + i;
int col2 = col + j;
if (line2 >= 0 && line2 < lines && col2 >= 0 && col2 < cols) { // check if pixel is outside of frame
nr++;
int index = line2 * cols * channels + col2 * channels + channel;
sum += flat[index];
}
}
}
result[i] = sum / nr;
}
}
int* flatten(int*** matrix, int lines, int cols, int channels) {
int* flat = (int*)malloc(lines * cols * channels * sizeof(int));
int id = 0;
for (int i = 0; i < lines; i++) {
for (int j = 0; j < cols; j++) {
for (int c = 0; c < channels; c++) {
flat[id] = matrix[i][j][c];
id++;
}
}
}
return flat;
}
int*** unflatten(int* arr, int lines, int cols, int channels) {
int*** img = (int***)malloc(lines * sizeof(int**));
int id = 0;
for (int i = 0; i < lines; i++) {
img[i] = (int**)malloc(cols * sizeof(int*));
for (int j = 0; j < cols; j++) {
img[i][j] = (int*)malloc(channels * sizeof(int));
for (int c = 0; c < channels; c++) {
img[i][j][c] = arr[id];
id++;
}
}
}
return img;
}
int main()
{
string name = "cake2";
string readCmd = "python read.py " + name + ".png";
string writeCmd = "python write.py " + name + "blur.png";
int scale = 45;
const char* command = readCmd.c_str();
system(command);
//read matrix
ifstream fin("pixels.txt");
int lines, cols, channels;
fin >> lines >> cols >> channels;
int*** matrix = (int***)malloc(lines * sizeof(int**));
for (int i = 0; i < lines; i++) {
matrix[i] = (int**)malloc(cols * sizeof(int*));
for (int j = 0; j < cols; j++) {
int* rgb = (int*)malloc(channels * sizeof(int));
fin >> rgb[0] >> rgb[1] >> rgb[2];
matrix[i][j] = rgb;
}
}
fin.close();
int* flat = flatten(matrix, lines, cols, channels);
int size = lines * cols * channels;
int* result = (int*)malloc(size * sizeof(int));
int* deviceMatrix, * deviceResult;
clock_t begin = clock();
cudaMalloc(&deviceMatrix, size * sizeof(int));
cudaMalloc(&deviceResult, size * sizeof(int));
cudaMemcpy(
deviceMatrix, flat,
size * sizeof(int),
cudaMemcpyHostToDevice
);
int num_threads = 1 <<10;
int num_blocks = (size + num_threads - 1) / num_threads;
blur <<<num_blocks, num_threads >>> (deviceMatrix, deviceResult, lines, cols, channels, scale);
cudaMemcpy(
result, deviceResult,
size * sizeof(int),
cudaMemcpyDeviceToHost
);
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
int*** resultMatrix = unflatten(result, lines, cols, channels);
ofstream out("pixels2.txt");
out << lines << " " << cols << " " << channels << "\n";
for (int i = 0; i < lines; i++) {
for (int j = 0; j < cols; j++) {
for (int k = 0; k < channels; k++) {
out << resultMatrix[i][j][k] << " ";
}
out << "\n";
}
}
out.close();
command = writeCmd.c_str();
system(command);
printf("Paralel: %f seconds", time_spent);
begin = clock();
blurSecvential(flat, result, lines, cols, channels, scale);
end = clock();
resultMatrix = unflatten(result, lines, cols, channels);
ofstream fout("pixels2.txt");
fout << lines << " " << cols << " " << channels << "\n";
for (int i = 0; i < lines; i++) {
for (int j = 0; j < cols; j++) {
for (int k = 0; k < channels; k++) {
fout << resultMatrix[i][j][k] << " ";
}
fout << "\n";
}
}
fout.close();
writeCmd = "python write.py " + name + "blur_secv.png";
command = writeCmd.c_str();
system(command);
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("Secvential: %f seconds", time_spent);
return 0;
} |
20,217 | #include "includes.h"
__global__ void mcfauto_kernal(const float* data1, float* data2, const int totaltc)
{
int idx = 2*(threadIdx.x + (blockIdx.x + blockIdx.y*gridDim.x)*MAX_THREADS);
if(idx < totaltc){
data2[idx] = sqrt(data1[idx] * data2[idx] + data1[idx + 1] * data2[idx + 1]);
data2[idx + 1] = 0;
}
} |
20,218 | // Hilos y Bloques
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#define COLUMNAS 10 // Nro de columnas -> eje x
#define FILAS 6 // Nro de filas -> eje y
// Kernel Bidimensional (x, y)
__global__
void MathFinal(int *entrada, int *salida)
{
// indice de la columna: eje x
int columna = threadIdx.x;
// indice de la fila: eje y
int fila = threadIdx.y;
// Kernel de un solo bloque:
// indice lineal
int globalID = columna + fila * COLUMNAS;
// Indice Lineal Traspuesto
int idTrasp = fila + columna * FILAS;
// Escritura en la matriz final
salida[idTrasp] = entrada[globalID];
}
int main(int argc, char** argv) {
// declaraciones
int *hst_Entrada, *hst_Salida;
int *dev_Entrada, *dev_Salida;
// reserva en el host
hst_Entrada = (int*)malloc(FILAS*COLUMNAS * sizeof(int));
hst_Salida = (int*)malloc(FILAS*COLUMNAS * sizeof(int));
// reserva en el device
cudaMalloc((void**)&dev_Entrada, FILAS*COLUMNAS * sizeof(int));
cudaMalloc((void**)&dev_Salida, FILAS*COLUMNAS * sizeof(int));
// inicializacion
for (int i=0; i<(FILAS*COLUMNAS); i++) {
hst_Entrada[i] = i + 1; // numeros secuenciales desde el 1
hst_Salida[i] = 0;
}
// dimensiones del kernel
// 1 Bloque
dim3 Nbloques(1);
// bloque bidimensional (x,y)
// Eje x-> COLUMNAS
// Eje y-> FILAS
dim3 hilosB(COLUMNAS, FILAS);
// copia de datos hacia el device
cudaMemcpy(dev_Entrada, hst_Entrada, FILAS*COLUMNAS * sizeof(int), cudaMemcpyHostToDevice);
// Numero de hilos
printf("> KERNEL de 1 BLOQUE con %d HILOS:\n", COLUMNAS*FILAS);
printf(" eje x -> %2d hilos\n eje y -> %2d hilos\n", COLUMNAS, FILAS);
// Lanzamiento del Kernel
MathFinal <<< Nbloques, hilosB >>>(dev_Entrada, dev_Salida);
// recogida de datos desde el device
cudaMemcpy(hst_Salida, dev_Salida, FILAS*COLUMNAS * sizeof(int), cudaMemcpyDeviceToHost);
// impresion de resultados
printf("> MATRIZ ORIGINAL:\n");
for (int i = 0; i<FILAS; i++) {
for (int j = 0; j<COLUMNAS; j++) {
printf("%3d ", hst_Entrada[j + i*COLUMNAS]);
}
printf("\n");
}
printf("\n");
printf("> MATRIZ FINAL:\n");
for (int i = 0; i<COLUMNAS; i++) {
for (int j = 0; j<FILAS; j++) {
printf("%3d ", hst_Salida[j + i*FILAS]);
}
printf("\n");
}
// salida del programa
printf("\n<pulsa [INTRO] para finalizar>\n");
getchar();
return 0;
}
|
20,219 | // Computes adjacencies matrix in parallel
__global__ void compute_adjacent_nodes(int *indptr, int *indices, float *in_component, float *update_values, float *adjacencies, int n)
{
const int i = threadIdx.x;
if(update_values[i] == 0)
return;
int offset = i*n;
for(int j = indptr[i]; j < indptr[i+1]; j++)
if(in_component[indices[j]] == 1)
adjacencies[offset+indices[j]] = 1;
} |
20,220 | #define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
#define INDEX(b,c,h,w,channels,height,width) ((b * channels + c) * height + h) * width+ w
extern "C" __global__ void IRNNForward(
const float* input_feature,
const float* weight_up,
const float* weight_right,
const float* weight_down,
const float* weight_left,
const float* bias_up,
const float* bias_right,
const float* bias_down,
const float* bias_left,
float* output_up,
float* output_right,
float* output_down,
float* output_left,
const int channels,
const int height,
const int width,
const int n){
CUDA_KERNEL_LOOP(index,n){
int w = index % width;
int h = index / width % height;
int c = index / width / height % channels;
int b = index / width / height / channels;
float temp = 0;
// left
output_left[index] = input_feature[INDEX(b, c, h, width-1, channels, height, width)] > 0 ? input_feature[INDEX(b, c, h, width-1, channels, height, width)] : 0;
for (int i = width-2; i>=w; i--)
{
temp = output_left[index] * weight_left[c] + bias_left[c] + input_feature[INDEX(b, c, h, i, channels, height, width)];
output_left[index] = (temp > 0)? temp : 0;
}
// right
output_right[index] = input_feature[INDEX(b, c, h, 0, channels, height, width)] > 0 ? input_feature[INDEX(b, c, h, 0, channels, height, width)] : 0;
for (int i = 1; i <= w; i++)
{
temp = output_right[index] * weight_right[c] + bias_right[c] + input_feature[INDEX(b, c, h, i, channels, height, width)];
output_right[index] = (temp > 0)? temp : 0;
}
// up
output_up[index] = input_feature[INDEX(b,c,height-1,w,channels,height,width)] > 0 ? input_feature[INDEX(b,c,height-1,w,channels,height,width)] : 0;
for (int i = height-2; i >= h; i--)
{
temp = output_up[index] * weight_up[c] + bias_up[c] + input_feature[INDEX(b, c, i, w, channels, height, width)];
output_up[index] = (temp > 0)? temp : 0;
}
// down
output_down[index] = input_feature[INDEX(b, c, 0, w, channels, height, width)] > 0 ? input_feature[INDEX(b, c, 0, w, channels, height, width)] : 0;
for (int i = 1; i <= h; i++)
{
temp = output_down[index] * weight_down[c] + bias_down[c] + input_feature[INDEX(b, c, i, w, channels, height, width)];
output_down[index] = (temp > 0)? temp : 0;
}
}
} |
20,221 | #include "Vector3.cuh"
#include <cuda_runtime.h>
#ifdef __INTELLISENSE__
//#define __CUDACC__
#include <math_functions.h>
#endif // __INTELLISENSE__
Vector3* addVector3(Vector3* dst, Vector3* src) {
dst->x = dst->x + src->x;
dst->y = dst->y + src->y;
dst->z = dst->z + src->z;
return dst;
}
Vector3* scaleVector3(Vector3* dst, float src) {
dst->x = dst->x * src;
dst->y = dst->y * src;
dst->z = dst->z * src;
return dst;
}
Vector3* subVector3(Vector3* dst, Vector3* src) {
dst->x = dst->x + src->x;
dst->y = dst->y + src->y;
dst->z = dst->z + src->z;
return dst;
}
Vector3* normVector3(Vector3* dst) {
float invSqrt = rsqrtf(dst->x*dst->x + dst->y*dst->y + dst->z*dst->z);
return scaleVector3(dst, invSqrt);
}
float magVector3(Vector3* dst) {
return sqrtf(dst->x*dst->x + dst->y*dst->y + dst->z*dst->z);
}
Vector3* copyVector3(Vector3* dst, Vector3* src) {
dst->x = src->x;
dst->y = src->y;
dst->z = src->z;
return dst;
}
Vector3* copyVector3(Vector3* dst, std::array<float, 3>& src) {
dst->x = src[0];
dst->y = src[1];
dst->z = src[2];
return dst;
} |
20,222 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda_profiler_api.h>
#include <assert.h>
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
__global__ void matMul(float* Pd, float* Md, float* Nd, int Width, int Tile_Width) {
float Pvalue = 0.0;
int j = blockIdx.x * Tile_Width + threadIdx.x;
int i = blockIdx.y * Tile_Width + threadIdx.y;
for (int k = 0; k < Width; ++k) {
Pvalue += Md[i * Width + k] * Nd[k * Width + j];
}
Pd[i * Width + j] = Pvalue;
}
// Allocates a matrix with random float entries.
void randomInit(float* data, int size) {
for (int k = 0; k < size; ++k) {
data[k] = (float)drand48();
}
}
int main(int argc, char* argv[])
{
if (argc != 4) {
fprintf(stderr, "Syntax: %s <matrix size Width> < Block_size> <CacheConfL1> \n", argv[0]);
return EXIT_FAILURE;
}
int Width = atoi(argv[1]);
int BlockSize = atoi(argv[2]);
int devId = 0;
int CacheConfL1 = atoi(argv[3]);
checkCuda( cudaSetDevice(devId) );
cudaDeviceReset();
// allocate host memory for matrices M and N
printf("Allocate host memory for matrices M and N...\n");
float* M = (float*) malloc(Width * Width * sizeof(float));
float* N = (float*) malloc(Width * Width * sizeof(float));
float* P = (float*) malloc(Width * Width * sizeof(float));
// set seed for drand48()
srand48(42);
// initialize host matrices
printf("Initialize host matrices...\n");
randomInit(M, Width*Width);
randomInit(N, Width*Width);
// allocate device matrices (linearized)
printf("Allocate device matrices (linearized)...\n");
float* Md = NULL;
float* Nd = NULL;
float* Pd = NULL;
checkCuda( cudaMalloc((void**) &Md, Width * Width * sizeof(float)) );
checkCuda( cudaMalloc((void**) &Nd, Width * Width * sizeof(float)) );
checkCuda( cudaMalloc((void**) &Pd, Width * Width * sizeof(float)) );
// copy host memory to device
checkCuda( cudaMemcpy(Md, M, Width*Width*sizeof(float), cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpy(Nd, N, Width*Width*sizeof(float), cudaMemcpyHostToDevice) );
// execute the kernel
printf("Execute the kernel...\n");
if (CacheConfL1 == 1){
cudaFuncSetCacheConfig(matMul, cudaFuncCachePreferShared);
}
else if (CacheConfL1 == 2){
cudaFuncSetCacheConfig(matMul, cudaFuncCachePreferEqual);
}
else if (CacheConfL1 == 3){
cudaFuncSetCacheConfig(matMul, cudaFuncCachePreferL1);
}
else {
cudaFuncSetCacheConfig(matMul, cudaFuncCachePreferNone);
}
int GridSize = (Width + BlockSize-1) / BlockSize;
dim3 gridDim(GridSize, GridSize);
dim3 blockDim(BlockSize, BlockSize);
cudaProfilerStart();
matMul<<< gridDim, blockDim >>>(Pd, Md, Nd, Width, BlockSize);
cudaProfilerStop();
// copy result from device to host
checkCuda( cudaMemcpy( P, Pd, Width * Width * sizeof(float),cudaMemcpyDeviceToHost) );
cudaDeviceProp prop;
checkCuda( cudaGetDeviceProperties(&prop, devId) );
printf("Device: %s\n", prop.name);
float* Pt = (float*) malloc(Width * sizeof(float));
// Assert Process
char fileName[20] = "apps/matMul/matMul_";
char bufferWidth[5] = " ";
sprintf(bufferWidth, "%d", Width);
strcat(fileName, bufferWidth);
strcat(fileName, ".out");
FILE *ptr_file;
ptr_file =fopen(fileName, "r");
assert(ptr_file);
for (int i=0; i < Width; i++){
fscanf(ptr_file, "%f", &Pt[i]);
}
fclose(ptr_file);
printf("Assertion started\n");
for(int i=0 ;i<Width; i++) {
assert(fabs(P[i * Width + i] - Pt[i]) < 0.1);
}
printf("Assertion Finished");
// clean up memory
free(M);
free(N);
free(P);
free(Pt);
checkCuda( cudaFree(Md) );
checkCuda( cudaFree(Nd) );
checkCuda( cudaFree(Pd) );
return 0;
}
|
20,223 | #include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include "tree23_array_help.cu"
int offsetTotal_h = 0;
int numNodes = 0;
__host__ int createNode_arr(int *arr, int offset, int data)
{
arr[offset] = offset;
arr[offset+1] = data;
arr[offset+2] = -1;
arr[offset+3] = -2;
arr[offset+4] = -2;
arr[offset+5] = -2;
arr[offset+6] = -2;
numNodes++;
offsetTotal_h += 7;
return offset;
}
__host__ void insert_help_arr(int *arr, int index, int newChild, int newSmallest);
// Insert into a node with 1 child
__host__ void insert1Siblings_arr(int* arr, int index, int newChild, int newSmallest)
{
//printf("1sib, index: %d, newchilde: %d, newSmallest: %d\n", index, newChild, newSmallest);
int newKey = getKey0(arr, newChild);
arr[parentIndex(newChild)] = index; //newChild->parent = node;
if (newKey < getKey0(arr, getChild0(arr, index))) { //node->child[0]->key[0]
// newNode is inserted as first child of root
arr[child1Index(index)] = getChild0(arr,index); //node->child[1] = node->child[0];
arr[child0Index(index)] = newChild; //node->child[0] = newChild;
arr[key0Index(index)] = getSmallest_arr(arr, getChild1(arr, index)); //node->key[0] = getSmallest_d(node->child[1]);
}
else {
// newNode is iserted as second child of root
arr[child1Index(index)] = newChild; //node->child[1] = newChild;
arr[key0Index(index)] = newSmallest; //node->key[0] = newSmallest;
}
}
// Insert into a node with 2 children
__host__ void insert2Siblings_arr(int* arr, int index, int newChild, int newSmallest)
{
//printf("2sibs\n");
int newKey = getKey0(arr, newChild); //newChild->key[0];
arr[parentIndex(newChild)] = index; //newChild->parent = node;
if (newKey < getKey0(arr, getChild0(arr, index)) ) { //node->child[0]->key[0]) {
arr[child2Index(index)] = getChild1(arr,index); //node->child[2] = node->child[1];
arr[child1Index(index)] = getChild0(arr,index); //node->child[1] = node->child[0];
arr[child0Index(index)] = newChild; //node->child[0] = newChild;
arr[key1Index(index)] = getKey0(arr, index); //node->key[1] = node->key[0];
arr[key0Index(index)] = getSmallest_arr(arr, getChild1(arr, index)); //node->key[0] = getSmallest_d(node->child[1]);
updateParentSmallest_arr(arr, index, newSmallest); //updateParentSmallest_d(node, newSmallest);
}
else if ( newKey < getKey0(arr, getChild1(arr, index)) ) {//(newKey < node->child[1]->key[0]) {
arr[child2Index(index)] = getChild1(arr, index); //node->child[2] = node->child[1];
arr[child1Index(index)] = newChild; //node->child[1] = newChild;
arr[key1Index(index)] = getKey0(arr, index); //node->key[1] = node->key[0];
arr[key0Index(index)] = newSmallest; //node->key[0] = newSmallest;
}
else {
arr[child2Index(index)] = newChild; //node->child[2] = newChild;
arr[key1Index(index)] = newSmallest; //node->key[1] = newSmallest;
}
}
// Insert into a node with 3 children
__host__ void insert3Siblings_arr(int *arr, int index, int newChild, int newSmallest) {
//printf("3sibs\n");
int newKey = getKey0(arr, newChild); //int newKey = newChild->key[0];
int splitSmallest = -1;
int splitNode = createNode_arr(arr, offsetTotal_h, -1);
arr[parentIndex(splitNode)] = getParent(arr, index); //splitNode->parent = node->parent;
if (newKey < getKey0(arr, getChild0(arr, index)) || newKey < getKey0(arr, getChild1(arr,index)) ) { //(newKey < node->child[0]->key[0] || newKey < node->child[1]->key[0]) {
// newChild is inserted in current node
splitSmallest = getKey0(arr,index); //splitSmallest = node->key[0];
arr[child0Index(splitNode)] = getChild1(arr, index); //splitNode->child[0] = node->child[1];
arr[child1Index(splitNode)] = getChild2(arr, index); //splitNode->child[1] = node->child[2];
arr[key0Index(splitNode)] = getKey1(arr, index); //splitNode->key[0] = node->key[1];
arr[parentIndex(getChild1(arr, index))] = splitNode; //node->child[1]->parent = splitNode;
arr[parentIndex(getChild2(arr, index))] = splitNode; //node->child[2]->parent = splitNode;
arr[parentIndex(newChild)] = index; //newChild->parent = node;
if (newKey < getKey0(arr, getChild0(arr, index))) {//(newKey < node->child[0]->key[0]) {
// newChild is inserted as first child
arr[child1Index(index)] = getChild0(arr, index); //node->child[1] = node->child[0];
arr[child0Index(index)] = newChild; //node->child[0] = newChild;
arr[key0Index(index)] = getSmallest_arr(arr, getChild1(arr, index)); //node->key[0] = getSmallest_d(node->child[1]);
updateParentSmallest_arr(arr, index, newSmallest); //updateParentSmallest_d(node, newSmallest);
}
else {
// newChild is inserted as second child
arr[child1Index(index)] = newChild; //node->child[1] = newChild;
arr[key0Index(index)] = newSmallest; //node->key[0] = newSmallest;
}
}
else {
// newChild is inserted in split node
arr[parentIndex(getChild2(arr, index))] = splitNode; //node->child[2]->parent = splitNode;
arr[parentIndex(newChild)] = splitNode; //newChild->parent = splitNode;
if (newKey < getKey0(arr, getChild2(arr, index)) ) { //(newKey < node->child[2]->key[0]) {
// newChild is inserted as first child
splitSmallest = newSmallest;
arr[child0Index(splitNode)] = newChild; //splitNode->child[0] = newChild;
arr[child1Index(splitNode)] = getChild2(arr, index); //splitNode->child[1] = node->child[2];
arr[key0Index(splitNode)] = getKey1(arr, index); //splitNode->key[0] = node->key[1];
}
else {
// newChild is inserted as second child
splitSmallest = getKey1(arr, index); //splitSmallest = node->key[1];
arr[child0Index(splitNode)] = getChild2(arr, index); //splitNode->child[0] = node->child[2];
arr[child1Index(splitNode)] = newChild; //splitNode->child[1] = newChild;
arr[key0Index(splitNode)] = newSmallest; //splitNode->key[0] = newSmallest;
}
}
arr[child2Index(index)] = -2; //node->child[2] = NULL;
arr[key1Index(index)] = -1; //node->key[1] = -1;
if (getParent(arr, getParent(arr, index)) == -2) { //(node->parent->parent == NULL) {
// At root, so new root needs to be created
int newNode = createNode_arr(arr, offsetTotal_h, -1);
//printf("new root: %d", newNode);
//root = newNode;
arr[child0Index(getParent(arr, index))] = newNode; //node->parent->child[0] = newNode;
arr[parentIndex(newNode)] = getParent(arr, index); //newNode->parent = node->parent;
arr[child0Index(newNode)] = index; //newNode->child[0] = node;
arr[parentIndex(index)] = newNode; //node->parent = newNode;
}
insert_help_arr(arr, getParent(arr, index), splitNode, splitSmallest); //insert_help_d(node->parent, splitNode, splitSmallest);
}
__host__ int findSpot_arr(int *arr, int index, int data) {
if (arr[index] == -2) return -2;
//printf("findSpot index: %d, data: %d", index, data);
int ind = index;
int count = 0;
while (!isLeaf_arr(arr,ind)) {
//printf("findSpot ind: %d", ind);
count++;
if(count == 10)
break;
if (getKey0(arr, ind) == data || getKey1(arr, ind) == data) //(node->key[0] == data || node->key[1] == data)
return -2;
if (getKey0(arr,ind) == -1 || data < getKey0(arr,ind)) //(node->key[0] == -1 || data < node->key[0])
ind = getChild0(arr, ind);
//arr[index] = getChild0(arr, index); //node = node->child[0];
else if (getKey1(arr,ind) == -1 || data < getKey1(arr,ind)) //(node->key[1] == -1 || data < node->key[1])
ind = getChild1(arr,ind); //node = node->child[1];
else
ind = getChild2(arr,ind); //node = node->child[2];
}
if (getKey0(arr, ind) == data)//(node->key[0] == data)
return -2;
//printf("spot found: %d\n", getParent(arr,ind));
return getParent(arr, ind); //node->parent;
}
// Insertion
__host__ void insert_help_arr(int *arr, int index, int newChild, int newSmallest)
{
if (getChild1(arr,index) == -2) insert1Siblings_arr(arr, index, newChild, newSmallest); //(node->child[1] == NULL) insert1Siblings_d(node, newChild, newSmallest);
else if (getChild2(arr, index) == -2) insert2Siblings_arr(arr, index, newChild, newSmallest); //else if (node->child[2] == NULL) insert2Siblings_d(node, newChild, newSmallest);
else insert3Siblings_arr(arr, index, newChild, newSmallest);
}
__host__ void insert_arr(int *arr, int index, int data)
{
int newNode = createNode_arr(arr, offsetTotal_h, data);
int spot = getChild0(arr, index); //node->child[0];
if (getChild0(arr,spot) == -2) { //spot->child[0] == NULL) {
// First insertion
arr[parentIndex(newNode)] = spot;//newNode->parent = spot;
arr[child0Index(spot)] = newNode;//spot->child[0] = newNode;
}
else {
spot = findSpot_arr(arr, spot, data);
if (spot == -2) return;
insert_help_arr(arr, spot, newNode, data);
}
return;
}
__host__ int insert_arr_d_index(int *arr, int index, int data)
{
int newNode = createNode_arr(arr, offsetTotal_h, data);
int spot = getChild0(arr, index); //node->child[0];
if (getChild0(arr,spot) == -2) { //spot->child[0] == NULL) {
// First insertion
arr[parentIndex(newNode)] = spot;//newNode->parent = spot;
arr[child0Index(spot)] = newNode;//spot->child[0] = newNode;
}
else {
spot = findSpot_arr(arr, spot, data);
if (spot == -2) return -2;
insert_help_arr(arr, spot, newNode, data);
}
return spot;
}
__host__ void createRoot_arr(int* arr)
{
int n = createNode_arr(arr, offsetTotal_h, -1); //Node *n = new Node;
int n1 = createNode_arr(arr, offsetTotal_h, -1); //Node *n1 = createNode_d();
arr[parentIndex(n1)] = n; //n1->parent = n;
arr[child0Index(n)] = n1;
}
void print_tree(int *tree) {
for(int i = 0; i < numNodes*7; i+=7){
printf("Index = %d\n\t Key[0] = %d \n\t Key[1] = %d \n\t Parent = %d \n\t Child0 = %d \n\t Child1 = %d \n\t Child2 = %d \n\n", tree[i],tree[i+1],tree[i+2],tree[i+3],tree[i+4],tree[i+5],tree[i+6]);
}
}
__host__ void print_keys(int *tree, int index) {
if (getKey1(tree,index) == -1)
printf("%d, ", getKey0(tree,index));
else
printf("%d, %d ", getKey0(tree,index), getKey1(tree,index));
}
__host__ void print_inorder(int *tree, int index) {
if(index == -2)
return;
if(isLeaf_arr(tree, index))
print_keys(tree,index);
else if(getChild2(tree,index) == -2){
print_inorder(tree, getChild0(tree, index));
//printf("%d, ", getKey0(tree,index));
print_inorder(tree, getChild1(tree, index));
}
else {
print_inorder(tree, getChild0(tree, index));
//printf("%d, ", getKey0(tree,index));
print_inorder(tree, getChild1(tree, index));
//printf("%d, ", getKey1(tree,index));
print_inorder(tree, getChild2(tree, index));
}
}
|
20,224 | #include "includes.h"
__global__ void kernel(int* arr,int offset_min,int n){
int bx = blockIdx.x;
int tx = threadIdx.x;
int BX = blockDim.x;
int i = bx*BX+tx;
if (i>= n|| i < 0) return;
//printf("%d %d - %d %d\n",offset_min,offset_max,i+offset_min,i);
arr[i+offset_min] += 1;
} |
20,225 | // RUN: %clang_cc1 -Wno-cuda-compat -Werror %s
// RUN: %clang_cc1 -Wcuda-compat -verify %s
// RUN: %clang_cc1 -x c++ -Wcuda-compat -Werror %s
// Note that this puts the expected lines before the directives to work around
// limitations in the -verify mode.
void test(int *List, int Length) {
/* expected-warning {{argument to '#pragma unroll' should not be in parentheses in CUDA C/C++}} */#pragma unroll(4)
for (int i = 0; i < Length; ++i) {
List[i] = i;
}
}
|
20,226 | #include <iostream>
#include <fstream>
#include <cmath>
#define IMAGE_DIMENSION 1000
#define G 9.81
#define PI 3.14159265358979323846
#define ARRAY_LENGTH 4000
#define MAX_TIME 40
void writeImgArrToFile(int *arr, std::string fileName)
{
std::ofstream arrayFile(fileName);
for (int i = 0; i < IMAGE_DIMENSION * IMAGE_DIMENSION; i++)
{
if (i % IMAGE_DIMENSION == IMAGE_DIMENSION - 1)
{
arrayFile << arr[i] << ';';
}
else
{
arrayFile << arr[i] << ',';
}
}
}
struct Vector4
{
double d1, d2, d3, d4;
__device__ Vector4(double d1in, double d2in, double d3in, double d4in)
{
d1 = d1in;
d2 = d2in;
d3 = d3in;
d4 = d4in;
};
__device__ Vector4()
{
d1 = 0;
d2 = 0;
d3 = 0;
d4 = 0;
};
__device__ Vector4 operator+(const Vector4 v)
{
return Vector4(d1 + v.d1, d2 + v.d2, d3 + v.d3, d4 + v.d4);
};
__device__ Vector4 operator*(const double d)
{
return Vector4(d1 * d, d2 * d, d3 * d, d4 * d);
};
};
struct Coordinate
{
double x, y;
__device__ Coordinate(double xin, double yin)
{
x = xin;
y = yin;
};
};
__device__ void linspace(double *arr, double start, double stop, int arrLength)
{
double delta = (stop - start) / (double) arrLength;
arr[0] = start;
for (int i = 1; i < arrLength; i += 1)
{
arr[i] = arr[i-1] + delta;
}
}
__device__ Vector4 deriv(Vector4 y0, double t, double *args)
{
double l1 = args[0];
double l2 = args[1];
double m1 = args[2];
double m2 = args[3];
double theta1 = y0.d1;
double theta2 = y0.d2;
double omega1 = y0.d3;
double omega2 = y0.d4;
double sinVal = sin(theta1 - theta2);
double cosVal = cos(theta1 - theta2);
double d1 = l1 * (m1 + m2 * sinVal * sinVal);
double d2 = l2 * (m1 + m2 * sinVal * sinVal);
double n1 = (m2 * G * sin(theta2) * cosVal
- m2 * sinVal * (l1 * omega1 * omega1 * cosVal + l2 * omega2 * omega2)
- (m1 + m2) * G * std::sin(theta1));
double n2 = ((m1 + m2) * (l1 * omega1 * omega1 * sinVal
- G * sin(theta2) + G * sin(theta1) * cosVal)
+ m2 * l2 * omega2 * omega2 * sinVal * cosVal);
double omega1Dot = n1 / d1;
double omega2Dot = n2 / d2;
return Vector4(omega1, omega2, omega1Dot, omega2Dot);
}
__device__ void rk4Solve(Vector4 *resultVector, Vector4 y0, double *args)
{
double tArr[ARRAY_LENGTH] = {0};
linspace(tArr, 0, MAX_TIME, ARRAY_LENGTH);
double h = tArr[1] - tArr[0];
Vector4 previousY = y0;
resultVector[0] = y0;
for (int i = 0; i < ARRAY_LENGTH; i++)
{
Vector4 k1 = deriv(previousY, tArr[i], args);
Vector4 k2 = deriv(previousY + k1 * (h / 2.0), tArr[i] + h / 2.0, args);
Vector4 k3 = deriv(previousY + k2 * (h / 2.0), tArr[i] + h / 2.0, args);
Vector4 k4 = deriv(previousY + k3 * h, tArr[i] + h, args);
Vector4 y = previousY + (k1 + k2 * 2.0 + k3 * 2.0 + k4) * (h / 6.0);
resultVector[i] = y;
previousY = y;
}
}
__device__ int getChaosRating(Vector4 y0, double *args)
{
Vector4 y[ARRAY_LENGTH] = {Vector4()};
rk4Solve(y, y0, args);
for (int i = 0; i < ARRAY_LENGTH; i++)
{
if (y[i].d1 > PI || y[i].d1 < -PI || y[i].d2 > PI || y[i].d2 < -PI)
{
return i;
}
}
return ARRAY_LENGTH;
}
__device__ Coordinate mapIdxToRange(int i, int j, double xmin, double xmax,
double ymin, double ymax, int length)
{
return Coordinate(
((double)i / ((double)length - 1.0)) * (xmax - xmin) + xmin,
((double)j / ((double)length - 1.0)) * (ymax - ymin) + ymin
);
}
__global__ void generateImage(int *pixels)
{
for (int i = blockDim.x * blockIdx.x + threadIdx.x;
i < IMAGE_DIMENSION * IMAGE_DIMENSION;
i += blockDim.x * gridDim.x)
{
int xIdx = i % IMAGE_DIMENSION;
int yIdx = i / IMAGE_DIMENSION;
Coordinate coord = mapIdxToRange(xIdx, yIdx, -PI, PI, -PI, PI, IMAGE_DIMENSION);
Vector4 y0 = Vector4(coord.x, coord.y, 0, 0);
double args[4] = {1, 1, 1, 1}; // (L1, L2, M1, M2)
pixels[i] = getChaosRating(y0, args);
}
}
int main()
{
int *pixels;
cudaMallocManaged(&pixels, IMAGE_DIMENSION * IMAGE_DIMENSION * sizeof(int));
int blockSize = 256;
int numBlocks = std::ceil(IMAGE_DIMENSION * IMAGE_DIMENSION / (float)blockSize);
std::cout << "Starting " << numBlocks << " blocks of size " << blockSize << std::endl;
std::cout << numBlocks * blockSize << " threads in total for ";
std::cout << IMAGE_DIMENSION * IMAGE_DIMENSION << " pixels" << std::endl;
generateImage<<<numBlocks, blockSize>>>(pixels);
cudaDeviceSynchronize();
std::cout << "Writing to file..." << std::endl;
writeImgArrToFile(pixels, "img.txt");
cudaFree(pixels);
return 0;
} |
20,227 | #include "includes.h"
__global__ void kernel_push_stochastic1(int *g_push_reser, int *s_push_reser, int *g_count_blocks, bool *g_finish, int *g_block_num, int width1)
{
int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
int thid = __umul24(y, width1) + x;
s_push_reser[thid] = g_push_reser[thid];
if (thid == 0)
{
if ((*g_count_blocks) == 0)
(*g_finish) = false;
}
} |
20,228 | #include "includes.h"
__global__ void pfbFilter(float *filtered, float *unfiltered, float *taps, const int ntaps) {
const int nfft = blockDim.x;
const int i = threadIdx.x + threadIdx.y*blockDim.x + blockIdx.x * blockDim.x * blockDim.y;
filtered[i] = unfiltered[i] * taps[threadIdx.x];
for (int j=1; j<ntaps; j++) {
filtered[i] += unfiltered[i + j*nfft] * taps[threadIdx.x + j*nfft];
}
} |
20,229 | #include <stdlib.h>
#include <stdio.h>
double3* r; //array of displacement vectors
double3* rc; //array of displacement vectors in cylindrical coords, used in initcond() only
double3* r1; //array of displacement vectors of one end of particles
double3* r2; //array of displacement vectors of the other end of particles
double3* u; //array of orientation vectors
double2* theta; //array of theta and phi orientation values
double* l; //array of particle lengths
double* sigma; //array of particle diameters
double3* GU1; //array of cartesian gradient vectors (of U)
double2* GU1A; //array of theta and phi gradients (of U)
double3* GU0;
double2* GU0A;
double phi; //packing fraction
double3* h_host;
double2* hA_host;
FILE* fp;
FILE* inputs;
dim3 blocks, threads;
dim3 grid, block;
//THESE VALUES ARE IMPORTED FROM A FILE AS THE PARAMETERS OF THE SIMULATION//
int npart;
double R;
double H;
double ETA;
double DPHI;
double PHI;
double ALPHA;
int CUBE;
double LENGTH;
double WIDTH;
double HEIGHT;
//THESE VALUES ARE SIMPLY COPIES OF SOME OF THE ABOVE VALUES TO BE STORED IN DEVICE MEMORY
double3* r_dev;
double3* u_dev;
double2* theta_dev;
double* l_dev;
double* sigma_dev;
double3* r1_dev;
double3* r2_dev;
double* params;
double* U_dev;
double* U1_dev;
double3* U2_dev;
double2* U2A_dev;
double3* GU1_dev;
double2* GU1A_dev;
double3* GU0_dev;
double2* GU0A_dev;
double* phi_dev;
|
20,230 |
#include <iostream>
#include <sstream>
#include <fstream>
#include <string>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <curand.h>
#include <curand_kernel.h>
#include <cuda.h>
/*
#include "cuPrintf.cu"`
*/
using namespace std;
inline void __cudaSafeCall( cudaError err,
const char *file, const int line )
{
#ifdef CUDA_CHECK_ERROR
#pragma warning( push )
#pragma warning( disable: 4127 ) // Prevent warning on do-while(0);
do
{
if ( cudaSuccess != err )
{
fprintf( stderr,
"cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
} while ( 0 );
#pragma warning( pop )
#endif
// CUDA_CHECK_ERROR
return;
}//end function
inline void __cudaCheckError( const char *file, const int line ) {
#ifdef CUDA_CHECK_ERROR
#pragma warning( push )
#pragma warning( disable: 4127 ) // Prevent warning on do-while(0);
do
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err )
{
fprintf( stderr,
"cudaCheckError() with sync failed at %s:%i : %s.\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
err = cudaThreadSynchronize();
if( cudaSuccess != err )
{
if ( cudaSuccess != err )
fprintf( stderr,
"cudaCheckError() failed at %s:%i : %s.\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
} while ( 0 );
// More careful checking. However, this will affect performance. // Comment if not needed
#pragma warning( pop )
#endif // CUDA_CHECK_ERROR
return;
}
void bubble_sort(int * array, int size)
{
for(int i = 0; i <= size - 1; i ++)
{
for(int j = 1; j <= size - 1; j ++)
{
if(array[j] < array[j - 1])
{
//printf("%d %d\n", array[j - 1], array[j]);
int c = array[j - 1];
array[j - 1] = array[j];
array[j] = c;
//printf("%d %d\n\n", array[j - 1], array[j]);
}//end if
}//end for j
}//end for i
}//end function
void print_array(int * array, int size)
{
for(int i = 0; i <= size - 1; i ++)
{
printf("%d, ", array[i]);
}//end for i
printf("\n");
}//end function
int * makeRandArray( const int size, const int seed ) {
srand( seed );
int * array = new int[ size ];
for( int i = 0; i < size; i ++ ) {
array[i] = std::rand() % 1000000;
}
return array; }
/*
Kernel is fuction to run on GPU.
*/
__global__ void matavgKernel(int * array, int size ) {
//array[0] = 5;
for(int i = 0; i <= size - 1; i ++)
{
//cuPrintf(“Value is: %d\n”, i);
for(int j = 1; j <= size - 1; j ++)
{
if(array[j] < array[j - 1])
{
//printf("%d %d\n", array[j - 1], array[j]);
int c = array[j - 1];
array[j - 1] = array[j];
array[j] = c;
//printf("%d %d\n\n", array[j - 1], array[j]);
}//end if
}//end for j
}//end for i
//return array;
}//end function
int main( int argc, char* argv[] ) {
int * array; // the poitner to the array of rands
int size, seed; // values for the size of the array
bool printSorted = false;
// and the seed for generating
// random numbers
// check the command line args
if( argc < 3 ){
std::cerr << "usage: "
<< argv[0]
<< " [amount of random nums to generate] [seed value for rand]" << " [1 to print sorted array, 0 otherwise]"
<< std::endl;
exit( -1 ); }
// convert cstrings to ints
{
std::stringstream ss1( argv[1] );
ss1 >> size;
} {
std::stringstream ss1( argv[2] );
ss1 >> seed; }
/*
{
int sortPrint;
std::stringstream ss1( argv[2] );
ss1 >> sortPrint;
if( sortPrint == 1 )
printSorted = true;
}
*/
// get the random numbers
array = makeRandArray( size, seed );
int * host_array = (int*)malloc(size * 4);
for(int i =0; i <= size - 1; i ++)
{
host_array[i] = array[i];
}//end for i
print_array(array, size);
printf("host_array\n");
print_array(host_array, size);
cudaEvent_t startTotal, stopTotal; float timeTotal; cudaEventCreate(&startTotal); cudaEventCreate(&stopTotal); cudaEventRecord( startTotal, 0 );
/////////////////////////////////////////////////////////////////////
/////////////////////// YOUR CODE HERE ///////////////////////
/////////////////////////////////////////////////////////////////////
//curandState* devRandomGeneratorStateArray;
// cudaMalloc ( &devRandomGeneratorStateArray, 1*sizeof( curandState ) );
//bubble_sort(array, size);
// thrust::host_vector<int> hostCounts(1, 0);
// thrust::device_vector<int> deviceCounts(hostCounts);
int * cuda_array;
cudaMalloc(&cuda_array, size * 4);
cudaMemcpy(cuda_array, host_array, size * 4, cudaMemcpyHostToDevice);
//matavgKernel <<< 1, 1 >>> (array, size);
matavgKernel <<< 1, 1 >>> (cuda_array, size);
cudaMemcpy(host_array, cuda_array, size * 4, cudaMemcpyDeviceToHost);
cudaFree(cuda_array);
//https://stackoverflow.com/questions/6419700/way-to-verify-kernel-was-executed-in-cuda
/*
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
//thrust::reduce(deviceCounts.begin(), deviceCounts.end(), 0, thrust::plus<int>());;
*/
//matavgKerenel(array, size);
/***********************************
*
Stop and destroy the cuda timer
**********************************/
cudaEventRecord( stopTotal, 0 );
cudaEventSynchronize( stopTotal );
cudaEventElapsedTime( &timeTotal, startTotal, stopTotal );
cudaEventDestroy( startTotal );
cudaEventDestroy( stopTotal );
/***********************************
end of cuda timer destruction
**********************************/
std::cerr << "Total time in seconds: "
<< timeTotal / 1000.0 << std::endl;
printSorted = true;
if( printSorted ){
for(int i = 0; i <= size - 1; i ++)
{
printf("%d, ", host_array[i]);
}//end for i
printf("\n");
///////////////////////////////////////////////
/// Your code to print the sorted array here //
///////////////////////////////////////////////
} }
|
20,231 | #include "includes.h"
// Possible weight coefficients for tracking cost evaluation :
// Gaussian discretisation
/*
* 1 4 6 4 1
* 4 16 24 16 4
* 6 24 36 24 6
* 4 16 24 16 4
* 1 4 6 4 1
*/
// Compute spatial derivatives using Scharr operator - Naive implementation..
// Compute spatial derivatives using Scharr operator - Naive implementation..
// Compute spatial derivatives using Sobel operator - Naive implementation..
// Compute spatial derivatives using Sobel operator - Naive implementation..
// Low pass gaussian-like filtering before subsampling
// Low pass gaussian-like filtering before subsampling
/*
// Upsample a picture using the "magic" kernel
*/
__global__ void kernelScharrY( float const *in, int _w, int _h, float *out )
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if(x >= _w || y >= _h)
return;
// Pattern // Indexes:
// -3 0 3 // a1 b1 c1
// -10 0 10 // a2 b2 c2
// -3 0 3 // a3 b3 c3
int a = max(y-1,0);
int c = min((y+1),_h -1);
int a1, a2, a3,
c1, c2, c3;
int i1 = max(x-1, 0);
int i3 = min(x+1, _w-1);
a1 = a*_w + i1;
a2 = a*_w + x;
a3 = a*_w + i3;
c1 = c*_w + i1;
c2 = c*_w + x;
c3 = c*_w + i3;
out[y*_w+x] = __fdividef(3.0*(- in[a1] -in[a3] +in[c1] +in[c3])
+ 10.0*(in[c2] -in[a2]), 20.0);
// out[y*_w+x] = -3.0*in[a1] -10.0*in[a2] -3.0*in[a3] + 3.0*in[c1] + 10.0*in[c2] + 3.0*in[c3];
} |
20,232 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#define MAX_ARRAY_SIZE 1000000
/* -------------------------------------------------------------------------
Algorithm description:
array_in {1, 5, 3, 2, 6, 7, 9, 5, 3, 6}
|
| parallel check odd/even: O(1)
\_/
array_is_odd {1, 1, 1, 0, 0, 1, 1, 1, 1, 0}
|
| inclusive prefix scan: O(logN)
\_/
array_index {1, 2, 3, 3, 3, 4, 5, 6, 7, 7}
num_odd := array_index[N - 1]; -\
array_o[num_odd]; |
for i = 1 : N - 1 in parallel | O(1)
if array_is_odd[i] |
array_o[array_index[i] - 1] = array_in[i]; |
-/
array_o {1, 5, 3, 7, 9, 5, 3}
T(N) = O(logN)
------------------------------------------------------------------------- */
/*
* Check GPU device
*/
void check_dev(void) {
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
printf("!! Error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
cudaSetDevice(dev);
}
/*
* Calculate the number of threads per block based on array size
* The function is so designed that a reduction on the array can
* be completed in two steps.
* The assumption is that the size of the array is no more than
* 1,000,000, such that the number of threads is no more than
* 1024, which is the computational limit of the GPU device.
*/
int calc_num_thread(int size) {
int approx = (int)sqrt((double)size);
// find the nearest power of 2
int pow2 = 1;
while (pow2 < approx) {
pow2 <<= 1;
}
return pow2;
}
/*
* Read data from ./inp.txt
* Return the pointer to the data array
* Ouput the number of data items thru passed-in pointer (int * size)
*/
int * read_data(int * size)
{
FILE * fptr = fopen("./inp.txt", "r");
if (!fptr) {
printf("!! Error in opening data file \n");
exit(1);
}
int cur_array_size = MAX_ARRAY_SIZE;
int * buffer = (int *)malloc(cur_array_size * sizeof(int));
int i = 0;
while (!feof(fptr)) {
if (fscanf(fptr, "%d, ", &buffer[i]) != 1) {
break;
}
++i;
}
fclose(fptr);
*size = i;
return buffer;
}
/*
* Outputs the result array into file
*/
void print_file(int * array, int array_size, const char file_name[]) {
FILE * fptr_b = fopen(file_name, "w");
if (!fptr_b) {
printf("!! Error in opening output file \n");
exit(1);
}
for (int i = 0; i < array_size; ++i) {
fprintf(fptr_b, "%d", array[i]);
if (i < array_size - 1)
fprintf(fptr_b, ", ");
}
fclose(fptr_b);
}
/*
* GPU kernel: parallel odd/even check
* The output array has 1/odd or 0/even at the corresponding spot
*/
__global__ void odd_check(int * array_i, int * array_o, int array_size) {
int myId = threadIdx.x + blockDim.x * blockIdx.x;
if (myId < array_size) {
array_o[myId] = array_i[myId] % 2;
}
}
/*
* GPU kernel: inclusive prefix scan, one step
* The result can not be stored in the original array since different blocks
* cannot be synchronized within the kernel
*/
__global__ void prefix_scan_step(int * array_i, int * array_o, int array_size, int dist) {
// shared memory to store intermediate results
extern __shared__ int sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int thId = threadIdx.x;
// load initial values to shared memory
sdata[thId] = array_i[myId];
__syncthreads();
// store block results in shared memory
if (!(myId < dist) && myId < array_size) {
sdata[thId] += array_i[myId - dist];
}
__syncthreads();
// copy results to global memory
if (myId < array_size) {
array_o[myId] = sdata[thId];
}
}
/*
* Inclusive prefix scan
*/
void prefix_scan(int * array_i, int * array_o, int array_size) {
// dynamically calculate the number of threads and blocks
const int maxThreadsPerBlock = calc_num_thread(array_size);
int threads = maxThreadsPerBlock;
int blocks = (array_size + maxThreadsPerBlock - 1) / maxThreadsPerBlock;
int dist = 1, i = 0;
while (dist < array_size) {
// each array is alternatively used as the kernel input or output to avoid the overhead of
// copying the output to the input in evey iteration
if (i % 2 == 0)
prefix_scan_step<<<blocks, threads, threads * sizeof(int)>>>(array_i, array_o, array_size, dist);
else
prefix_scan_step<<<blocks, threads, threads * sizeof(int)>>>(array_o, array_i, array_size, dist);
cudaDeviceSynchronize();
++i;
dist *= 2;
}
if (i % 2 == 0)
cudaMemcpy(array_o, array_i, array_size * sizeof(int), cudaMemcpyDeviceToDevice);
}
/*
* GPU kernel: compact the input array to get the odd numbers
*/
__global__ void get_odd(int * array_i, int * array_o, int * array_is_odd, int * array_index, int array_size/* , int num_odd */) {
int myId = threadIdx.x + blockDim.x * blockIdx.x;
if (myId < array_size) {
// the alternative method which is commented out causes a lot of overhead on the superscalar cores
if (array_is_odd[myId] /* (myId == 0 && myId > 0) || (array_index[myId] > array_index[myId - 1]) */) {
array_o[array_index[myId] - 1] = array_i[myId];
}
}
}
/*
* Compact algorithm: put the odd numbers in the input array into the output array
* Returns the pointer to the output array
* Ouputs the number of odd numbers thru passed-in pointer (int * num_odd)
*/
int * compact(int * array_i, int * num_odd, int array_size) {
// dynamically calculate the number of threads and blocks
const int maxThreadsPerBlock = calc_num_thread(array_size);
int threads = maxThreadsPerBlock;
int blocks = (array_size + maxThreadsPerBlock - 1) / maxThreadsPerBlock;
// copy the input array into GPU shared memory
int * array_device;
cudaMalloc((void **) &array_device, array_size * sizeof(int));
cudaMemcpy(array_device, array_i, array_size * sizeof(int), cudaMemcpyHostToDevice);
// allocate GPU memories for array_is_odd and array_index
int * array_is_odd, * array_index, * array_index_buffer;
cudaMalloc((void **) &array_is_odd, array_size * sizeof(int));
cudaMalloc((void **) &array_index, array_size * sizeof(int));
cudaMalloc((void **) &array_index_buffer, array_size * sizeof(int));
// compute array_is_odd
odd_check<<<blocks, threads>>>(array_device, array_is_odd, array_size);
cudaDeviceSynchronize();
// populate array_index with initial values
cudaMemcpy(array_index_buffer, array_is_odd, array_size * sizeof(int), cudaMemcpyDeviceToDevice);
// compute array_index by prefix scan
prefix_scan(array_index_buffer, array_index, array_size);
// get the number of odd numbers
cudaMemcpy(num_odd, &array_index[array_size - 1], sizeof(int), cudaMemcpyDeviceToHost);
// allocate GPU memory for the result array
int * array_device_out;
cudaMalloc((void **) &array_device_out, (*num_odd) * sizeof(int));
// collect the final result in GPU
get_odd<<<blocks, threads>>>(array_device, array_device_out, array_is_odd, array_index, array_size/* , *num_odd */);
cudaDeviceSynchronize();
// allocate CPU memory for the result array
int * array_o = (int *)malloc((*num_odd) * sizeof(int));
// copy the result from GPU to CPU
cudaMemcpy(array_o, array_device_out, (*num_odd) * sizeof(int), cudaMemcpyDeviceToHost);
// finish
cudaFree(array_device_out);
cudaFree(array_index_buffer);
cudaFree(array_index);
cudaFree(array_is_odd);
cudaFree(array_device);
return array_o;
}
/*
* CPU main routine
*/
int main(void) {
// check device
check_dev();
// data array on host
int array_size = 0;
int * array_i = read_data(&array_size);
// do compact & elasped time record
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int num_odd = 0;
cudaEventRecord(start, 0);
int * array_o = compact(array_i, &num_odd, array_size);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
// print to file
print_file(array_o, num_odd, "./q3.txt");
// print debug information to stdout
// printf(">> Number of odd numbers found: %d\n", num_odd);
// printf(">> Average time elapsed: %f\n", elapsedTime);
// finish
free(array_i);
free(array_o);
return 0;
}
|
20,233 | #define SizeT int
#define VertexId int
__global__ void Collect(
const SizeT edges,
const SizeT iter,
const SizeT* const flag,
const VertexId* const froms_data,
const VertexId* const tos_data,
VertexId* froms,
VertexId* tos,
SizeT* pos,
SizeT* counts)
{
SizeT x = blockIdx.x * blockDim.x + threadIdx.x;
SizeT size = ((iter==0) ? pos[iter]:counts[0]) * (pos[iter+1]-pos[iter]);
if(x>=0 && x<size*edges)
{
//SizeT a = x%(((iter==0)?pos[iter]:counts[0]) * edges);
SizeT a = (x/edges%((iter==0)?pos[iter]:counts[0]))*edges+x%edges;
SizeT b = pos[iter]+x/(edges*((iter==0)?pos[iter]:counts[0])); // edge iter+1 e_id
if(flag[x/edges]>=1 && (x/edges==0 || flag[x/edges]>flag[x/edges-1]))
{
// printf("large group:%d small group: %d iter:%d froms_out[%d]:%d->tos_out[%d]:%d flag[%d]=%d\n",x/edges%(pos[iter+1]-pos[iter]), x/edges/(pos[iter+1]-pos[iter]),iter,a,froms[a],a,tos[a],x/edges,flag[x/edges]);
VertexId from = froms[a];
VertexId to = tos[a];
//VertexId from = froms[x];
//VertexId to = tos[x];
__syncthreads();
if(x%edges!=iter+1){
froms[(flag[x/edges]-1)*edges+x%edges]=from;
tos[(flag[x/edges]-1)*edges+x%edges]=to;}
else{
froms[(flag[x/edges]-1)*edges+iter+1] = froms_data[b];
tos[(flag[x/edges]-1)*edges+iter+1] = tos_data[b];}
//printf("iter:%d froms[%d]:%d -> tos[%d]:%d flag[%d]:%d\n",iter,(flag[x/edges]-1)*edges+x%edges,froms[(flag[x/edges]-1)*edges+x%edges],(flag[x/edges]-1)*edges+x%edges, tos[(flag[x/edges]-1)*edges+x%edges],x/edges,flag[x/edges]);
//printf("iter:%d froms[%d]:%d -> tos[%d]:%d flag[%d]:%d\n",iter,(flag[x/edges]-1)*edges+x%edges,from,(flag[x/edges]-1)*edges+x%edges, to,x/edges,flag[x/edges]);
counts[0] = flag[size-1];
}
}
} |
20,234 | #include "MemoryManagement.cuh"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
ComputationEnvironment glob_Env = ComputationEnvironment::GPU;
ComputationEnvironment trellis_3D_Env = ComputationEnvironment::GPU;
MemoryMovementDuplication glob_Dup = MemoryMovementDuplication::NO;
__host__ cudaError_t allocateDeviceVector(IntHdl pVector, int numberOfElements, bool cleanAlloc)
{
cudaError_t cudaStatus = cudaError_t::cudaErrorIllegalInstruction;
switch (glob_Env)
{
case ComputationEnvironment::GPU:
cudaStatus = cudaMalloc((void**)pVector, numberOfElements * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
cudaFree(*pVector);
*pVector = NULL;
}
if (cleanAlloc)
{
cudaStatus = cudaMemset(*pVector, 0, numberOfElements);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemset failed!");
cudaFree(*pVector);
*pVector = NULL;
}
}
break;
case ComputationEnvironment::CPU:
if (cleanAlloc)
*pVector = (IntPtr)calloc(numberOfElements, sizeof(int));
else
*pVector = (IntPtr)malloc(numberOfElements * sizeof(int));
cudaStatus = cudaError_t::cudaSuccess;
break;
}
return cudaStatus;
}
__host__ cudaError_t allocateDeviceVector(UIntHdl pVector, int numberOfElements, bool cleanAlloc)
{
cudaError_t cudaStatus = cudaError_t::cudaErrorIllegalInstruction;
switch (glob_Env)
{
case ComputationEnvironment::GPU:
cudaStatus = cudaMalloc((void**)pVector, numberOfElements * sizeof(unsigned int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
cudaFree(*pVector);
*pVector = NULL;
}
if (cleanAlloc)
{
cudaStatus = cudaMemset(*pVector, 0, numberOfElements);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemset failed!");
cudaFree(*pVector);
*pVector = NULL;
}
}
break;
case ComputationEnvironment::CPU:
if (cleanAlloc)
*pVector = (UIntPtr)calloc(numberOfElements, sizeof(unsigned int));
else
*pVector = (UIntPtr)malloc(numberOfElements * sizeof(unsigned int));
cudaStatus = cudaError_t::cudaSuccess;
break;
}
return cudaStatus;
}
__host__ cudaError_t allocateDeviceVector(FloatHdl pVector, int numberOfElements, bool cleanAlloc)
{
cudaError_t cudaStatus = cudaError_t::cudaErrorIllegalInstruction;
switch (glob_Env)
{
case ComputationEnvironment::GPU:
cudaStatus = cudaMalloc((void**)pVector, numberOfElements * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
cudaFree(*pVector);
*pVector = NULL;
}
if (cleanAlloc)
{
int factor = sizeof(float) / sizeof(int);
cudaStatus = cudaMemset(*pVector, 0, numberOfElements * factor);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemset failed!");
cudaFree(*pVector);
*pVector = NULL;
}
}
break;
case ComputationEnvironment::CPU:
if (cleanAlloc)
*pVector = (float *)calloc(numberOfElements, sizeof(float));
else
*pVector = (float *)malloc(numberOfElements * sizeof(float));
cudaStatus = cudaError_t::cudaSuccess;
break;
}
return cudaStatus;
}
__host__ cudaError_t allocateDeviceVector(DoubleHdl pVector, int numberOfElements, bool cleanAlloc)
{
cudaError_t cudaStatus = cudaError_t::cudaErrorIllegalInstruction;
switch (glob_Env)
{
case ComputationEnvironment::GPU:
cudaStatus = cudaMalloc((void**)pVector, numberOfElements * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
cudaFree(*pVector);
*pVector = NULL;
}
if (cleanAlloc)
{
int factor = sizeof(double) / sizeof(int);
cudaStatus = cudaMemset(*pVector, 0, numberOfElements * factor);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemset failed!");
cudaFree(*pVector);
*pVector = NULL;
}
}
break;
case ComputationEnvironment::CPU:
if (cleanAlloc)
*pVector = (double *)calloc(numberOfElements, sizeof(double));
else
*pVector = (double *)malloc(numberOfElements * sizeof(double));
cudaStatus = cudaError_t::cudaSuccess;
break;
}
return cudaStatus;
}
// -------------------------------------------------------------------------------------------------------------------------
__host__ cudaError_t memcpyVector(IntHdl dst, const IntPtr src, int numberOfElements, enum cudaMemcpyKind kind)
{
cudaError_t cudaStatus = cudaError_t::cudaErrorIllegalInstruction;
switch (glob_Env)
{
case ComputationEnvironment::GPU:
cudaStatus = cudaMemcpy(*dst, src, numberOfElements * sizeof(int), kind);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
break;
case ComputationEnvironment::CPU:
switch (glob_Dup)
{
case MemoryMovementDuplication::YES:
memccpy(*dst, src, numberOfElements, sizeof(int));
cudaStatus = cudaError_t::cudaSuccess;
break;
case MemoryMovementDuplication::NO:
*dst = src;
cudaStatus = cudaError_t::cudaSuccess;
break;
}
break;
}
return cudaStatus;
}
__host__ cudaError_t memcpyVector(UIntHdl dst, const UIntPtr src, int numberOfElements, enum cudaMemcpyKind kind)
{
cudaError_t cudaStatus = cudaError_t::cudaErrorIllegalInstruction;
switch (glob_Env)
{
case ComputationEnvironment::GPU:
cudaStatus = cudaMemcpy(*dst, src, numberOfElements * sizeof(unsigned int), kind);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
break;
case ComputationEnvironment::CPU:
switch (glob_Dup)
{
case MemoryMovementDuplication::YES:
memccpy(*dst, src, numberOfElements, sizeof(unsigned int));
cudaStatus = cudaError_t::cudaSuccess;
break;
case MemoryMovementDuplication::NO:
*dst = src;
cudaStatus = cudaError_t::cudaSuccess;
break;
}
break;
}
return cudaStatus;
}
__host__ cudaError_t memcpyVector(FloatHdl dst, const FloatPtr src, int numberOfElements, enum cudaMemcpyKind kind)
{
cudaError_t cudaStatus = cudaError_t::cudaErrorIllegalInstruction;
switch (glob_Env)
{
case ComputationEnvironment::GPU:
cudaStatus = cudaMemcpy(*dst, src, numberOfElements * sizeof(float), kind);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
break;
case ComputationEnvironment::CPU:
switch (glob_Dup)
{
case MemoryMovementDuplication::YES:
memccpy(*dst, src, numberOfElements, sizeof(float));
cudaStatus = cudaError_t::cudaSuccess;
break;
case MemoryMovementDuplication::NO:
*dst = src;
cudaStatus = cudaError_t::cudaSuccess;
break;
}
break;
}
return cudaStatus;
}
__host__ cudaError_t memcpyVector(DoubleHdl dst, const DoublePtr src, int numberOfElements, enum cudaMemcpyKind kind)
{
cudaError_t cudaStatus = cudaError_t::cudaErrorIllegalInstruction;
switch (glob_Env)
{
case ComputationEnvironment::GPU:
cudaStatus = cudaMemcpy(*dst, src, numberOfElements * sizeof(double), kind);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
break;
case ComputationEnvironment::CPU:
switch (glob_Dup)
{
case MemoryMovementDuplication::YES:
memccpy(*dst, src, numberOfElements, sizeof(double));
cudaStatus = cudaError_t::cudaSuccess;
break;
case MemoryMovementDuplication::NO:
*dst = src;
cudaStatus = cudaError_t::cudaSuccess;
break;
}
break;
}
return cudaStatus;
}
// -------------------------------------------------------------------------------------------------------------------------
__host__ cudaError_t memcpyVector(IntPtr dst, const IntPtr src, int numberOfElements, enum cudaMemcpyKind kind)
{
cudaError_t cudaStatus = cudaError_t::cudaErrorIllegalInstruction;
switch (glob_Env)
{
case ComputationEnvironment::GPU:
cudaStatus = cudaMemcpy(dst, src, numberOfElements * sizeof(int), kind);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
break;
case ComputationEnvironment::CPU:
memccpy(dst, src, numberOfElements, sizeof(int));
cudaStatus = cudaError_t::cudaSuccess;
break;
}
return cudaStatus;
}
__host__ cudaError_t memcpyVector(UIntPtr dst, const UIntPtr src, int numberOfElements, enum cudaMemcpyKind kind)
{
cudaError_t cudaStatus = cudaError_t::cudaErrorIllegalInstruction;
switch (glob_Env)
{
case ComputationEnvironment::GPU:
cudaStatus = cudaMemcpy(dst, src, numberOfElements * sizeof(unsigned int), kind);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
break;
case ComputationEnvironment::CPU:
memccpy(dst, src, numberOfElements, sizeof(unsigned int));
cudaStatus = cudaError_t::cudaSuccess;
break;
}
return cudaStatus;
}
__host__ cudaError_t memcpyVector(FloatPtr dst, const FloatPtr src, int numberOfElements, enum cudaMemcpyKind kind)
{
cudaError_t cudaStatus = cudaError_t::cudaErrorIllegalInstruction;
switch (glob_Env)
{
case ComputationEnvironment::GPU:
cudaStatus = cudaMemcpy(dst, src, numberOfElements * sizeof(float), kind);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
break;
case ComputationEnvironment::CPU:
memccpy(dst, src, numberOfElements, sizeof(float));
cudaStatus = cudaError_t::cudaSuccess;
break;
}
return cudaStatus;
}
__host__ cudaError_t memcpyVector(DoublePtr dst, const DoublePtr src, int numberOfElements, enum cudaMemcpyKind kind)
{
cudaError_t cudaStatus = cudaError_t::cudaErrorIllegalInstruction;
switch (glob_Env)
{
case ComputationEnvironment::GPU:
cudaStatus = cudaMemcpy(dst, src, numberOfElements * sizeof(double), kind);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
break;
case ComputationEnvironment::CPU:
memccpy(dst, src, numberOfElements, sizeof(double));
cudaStatus = cudaError_t::cudaSuccess;
break;
}
return cudaStatus;
}
// -------------------------------------------------------------------------------------------------------------------------
__host__ cudaError_t deviceFree(void *devPtr)
{
cudaError_t cudaStatus = cudaError_t::cudaErrorIllegalInstruction;
switch (glob_Env)
{
case ComputationEnvironment::GPU:
cudaStatus = cudaFree(devPtr);
break;
case ComputationEnvironment::CPU:
switch (glob_Dup)
{
case MemoryMovementDuplication::YES:
free(devPtr);
break;
}
cudaStatus = cudaError_t::cudaSuccess;
break;
}
return cudaStatus;
}
__host__ cudaError_t deviceFree(IntHdl devHdl)
{
cudaError_t cudaStatus = cudaError_t::cudaErrorIllegalInstruction;
switch (glob_Env)
{
case ComputationEnvironment::GPU:
cudaStatus = cudaFree(*devHdl);
break;
case ComputationEnvironment::CPU:
switch (glob_Dup)
{
case MemoryMovementDuplication::YES:
free(*devHdl);
break;
}
cudaStatus = cudaError_t::cudaSuccess;
break;
}
return cudaStatus;
}
__host__ cudaError_t deviceFree(UIntHdl devHdl)
{
cudaError_t cudaStatus = cudaError_t::cudaErrorIllegalInstruction;
switch (glob_Env)
{
case ComputationEnvironment::GPU:
cudaStatus = cudaFree(*devHdl);
break;
case ComputationEnvironment::CPU:
switch (glob_Dup)
{
case MemoryMovementDuplication::YES:
free(*devHdl);
break;
}
cudaStatus = cudaError_t::cudaSuccess;
break;
}
return cudaStatus;
}
__host__ cudaError_t deviceFree(FloatHdl devHdl)
{
cudaError_t cudaStatus = cudaError_t::cudaErrorIllegalInstruction;
switch (glob_Env)
{
case ComputationEnvironment::GPU:
cudaStatus = cudaFree(*devHdl);
break;
case ComputationEnvironment::CPU:
switch (glob_Dup)
{
case MemoryMovementDuplication::YES:
free(*devHdl);
break;
}
cudaStatus = cudaError_t::cudaSuccess;
break;
}
return cudaStatus;
}
__host__ cudaError_t deviceFree(DoubleHdl devHdl)
{
cudaError_t cudaStatus = cudaError_t::cudaErrorIllegalInstruction;
switch (glob_Env)
{
case ComputationEnvironment::GPU:
cudaStatus = cudaFree(*devHdl);
break;
case ComputationEnvironment::CPU:
switch (glob_Dup)
{
case MemoryMovementDuplication::YES:
free(*devHdl);
break;
}
cudaStatus = cudaError_t::cudaSuccess;
break;
}
return cudaStatus;
}
// -------------------------------------------------------------------------------------------------------------------------
__host__ void printDeviceMemToScreen(double * device_target, int amount){
double * t = (double *)calloc(amount, sizeof(double));
cudaMemcpy(t, device_target, amount * sizeof(double), cudaMemcpyDeviceToHost);
for (int i = 0; i < amount; i++)
{
std::cout << t[i] << " ";
}
free(t);
}
|
20,235 | //Calculate prime numbers within a certain range provided by the user, or use default
//values of 0-1000
#include <iostream>
#include <cstdint> //required for uint64_t
#include <sstream> //convert runtime params into uint64 using istringstream
#include <ctime>
#include <chrono>
#include <cstdlib>
#include <cuda_runtime.h>
const int MAX_THREADS = 1024;
using namespace std::chrono;
/*double inline __declspec (naked) __fastcall sqrt(double n)
{
_asm fld qword ptr[esp + 4]
_asm fsqrt
_asm ret 8
}*/
void reportTime(const char* msg, steady_clock::duration span) {
auto ms = duration_cast<milliseconds>(span);
std::cout << msg << " - took - " <<
ms.count() << " millisecs" << std::endl;
}
uint64_t genPrime(uint64_t a, uint64_t b) {
//Keep track of results
uint64_t count = 0;
//Outer loop
for (uint64_t i = a; i < b; i++)
//Inner loop
for (uint64_t j = 2; j*j <= i; j++) {
if (i % j == 0)
break;
else if (j + 1 > sqrt(i)) {
//Actual output
//std::cout.precision(0);
std::cout << std::fixed << i << "\n";
count++;
}
}
return count; //Return total number of primes generated in the range specified
}
int main(int argc, char* argv[]) {
std::cout << "***Team /dev/null GPU610 PRIME NUMBER GENERATOR v1.2***\n";
//In case the user didn't provide arguments
uint64_t start = 0; //orig 21474836470000
uint64_t end = 1000; //orig 214748364700000
//Save runtime params into local variables, if provided
if (argc == 2) {
std::istringstream ss1(argv[1]);
if (!(ss1 >> end))
std::cout << "Bad input for end parameter\n";
}
if (argc == 3) {
std::istringstream ss2(argv[1]);
if (!(ss2 >> start))
std::cout << "Bad input for start parameter\n";
std::istringstream ss3(argv[2]);
if (!(ss3 >> end))
std::cout << "Bad input for end parameter\n";
}
else std::cout << "No range given (or bad input), using preset values\n";
if (start >= end) {
std::cerr << "***Invalid input, start must be less than end***\n";
}
std::cout << "Generating from range (" << start << "~" << end << ")\n";
std::cout << "--------------------------------------------------------------------------------\n";
//Keep track of time spent doing calculations
steady_clock::time_point ts, te;
ts = steady_clock::now();
//Generate primes
//Starting with the thread calculations
uint64_t threadStart = 0; //The first portion, where each thread begins
uint64_t threadEnd, taskLength;
int threadAmount = MAX_THREADS; //Replace MAX_THREADS with amount of threads once we find the function
taskLength = (end - start) / threadAmount; //Assigns the length of each portion of the task. This is how much of the total function runs in each thread.
threadEnd = taskLength;
//CUDA Allocation (please freaking work)
double* h_a = new double[start];
double* h_b = new double[end];
double* d_a, d_b;
cudaMalloc((void**)&d_a, taskLength * sizeof(double));
//End cuda allocation
//Function call
uint64_t count = genPrime(start, end); //REPLACE WITH CUDA KERNEL CALL
te = steady_clock::now();
std::cout << "\n--------------------------------------------------------------------------------\n"
<< "There are " << count << " prime numbers in the calculated range.\n";
reportTime("Took: {0} seconds", te - ts);
return 0;
}
/* Original code
int main()
{
for (int i = 2; i<100; i++)
for (int j = 2; j*j <= i; j++)
{
if (i % j == 0)
break;
else if (j + 1 > sqrt(i)) {
std::cout << i << " ";
}
}
return 0;
}
*/
//Changelog
/*
v1 - Generating from simple double loop
v1.0.1 - Command line parameter input
v1.1 - Nicer output format and error feedback
v1.2 - Full 64 bit integer compatibility
*/
/*TODO
- Write kernel function to replace genPrime()
-complete/correct CUDA memory and thread allocation
-Write new function call for genPrime<<<>>> //The <<<>>> mean kernel function
-... Yeah that's it.
*/ |
20,236 | #include <iostream>
using namespace std;
const int N = 16;
const int blocksize = 16;
__global__
void add_matrix_gpu( float* a, float *b, float *c, int N )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j*N;
if ( i < N && j < N )
c[index] = a[index] + b[index];
}
int main() {
float *a = new float[N*N];
float *b = new float[N*N];
float *c = new float[N*N];
for ( int i = 0; i < N*N; ++i ) {
a[i] = 1.0f; b[i] = 3.5f;
}
float *ad, *bd, *cd;
const int size = N*N*sizeof(float);
cudaMalloc( (void**)&ad, size );
cudaMalloc( (void**)&bd, size );
cudaMalloc( (void**)&cd, size );
cudaMemcpy( ad, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, size, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, blocksize );
dim3 dimGrid(N/blocksize,N/blocksize);
add_matrix_gpu<<<dimGrid, dimBlock>>>( ad, bd, cd, N );
cudaMemcpy( c, cd, size, cudaMemcpyDeviceToHost );
cudaFree( ad ); cudaFree( bd ); cudaFree( cd );
for ( int i = 0; i < N; ++i ) {
for ( int j = 0; j < N; ++j ) {
int index = i + j*N;
// if ( index%1000==0 )
cout << c[index] << " ";
}
cout << endl;
}
delete[] a;
delete[] b;
delete[] c;
return EXIT_SUCCESS;
}
|
20,237 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#define n 1024
__global__ void sc(char *a, char c[n]) {
int i = threadIdx.x;
c[i] = (char)((int)a[i] - 32);
// printf("%s\n", c[i]);
}
int main() {
char a[n], c[n], *pa, *pc;
for (int i = 0; i < n; i++) {
a[i] = 'a';
}
printf("C == \n");
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMalloc((void**)&pa, n * sizeof(char));
cudaMalloc((void**)&pc, n * sizeof(char));
cudaMemcpy(pa, a, n * sizeof(char), cudaMemcpyHostToDevice);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("Err1: %s\n", cudaGetErrorString(err));
}
sc<<<1, n>>>(pa, pc);
err = cudaGetLastError();
if (err != cudaSuccess) {
printf("Err2: %s\n", cudaGetErrorString(err));
}
cudaMemcpy(c, pc, n * sizeof(char), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float et;
cudaEventElapsedTime(&et, start, stop);
for (int i = 0; i < n; i++) {
printf("%c -> %c\n", a[i], c[i]);
}
printf("Kernel exec time on device : %f\n", et);
cudaFree(pa);
cudaFree(pc);
} |
20,238 | //pass
//--gridDim=[64,1,1] --blockDim=[128,1,1]
__global__ void kernelAddConstant(int *g_a, const int b)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_a[idx] += b;
}
|
20,239 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <algorithm>
#define PI 3.14159265359
#define grid(i,k,nr) k*nr+i
#define omega 1.5
#define RelativeError 1e-3
#define epsilon 1e-12
#define nMax 256
#define zEvalsPerBlock 16
#define rEvalsPerBlock 16
double Besseli0(double x){
//returns modified bessel function I_0 for any real x
double ax, ans;
double y;
if((ax=fabs(x)) < 3.75){
y = x/3.75;
y*=y;
ans=1.0+y*(3.5156229+y*(3.0899424+y*(1.2067492+y*(0.2659732+y*(0.360768e-1+y*0.45813e-2)))));
}
else {
y=3.75/ax;
ans=(exp(ax)/sqrt(ax))*(0.39894228+y*(0.1328592e-1+y*(0.225319e-2+y*(-0.157565e-2+y*(0.916281e-2+y*(-0.2057706e-1+y*(0.2635537e-1+y*(-0.1647633e-1+y*0.392377e-2))))))));
}
return ans;
}
double B(int n){
return 200.0/(n*PI*Besseli0(n*PI/2));
}
double V(double r, double z, double lz){
double sum = 0;
for (int n = 1; n <= nMax; n++) {
double b = B(n);
sum += b*Besseli0(n*PI/lz*r)*sin(n*PI/lz*z);
}
return sum;
}
//-----------------------------------------------------------------------------
__global__ void update(double *volt_old, double * volt_new, bool isRed, bool *converge, int nr, int nz, double dr, double dz, int blockz){
extern __shared__ double volt_s[];
int i = blockIdx.x * zEvalsPerBlock + threadIdx.x;//x position index
int k = blockIdx.y * rEvalsPerBlock + threadIdx.y;//y position index
int sharedPos = threadIdx.y * (rEvalsPerBlock+2) + threadIdx.x;//Making 2D into 1D for the shared memory position in this block
int blockPos = 2*(blockIdx.x * blockz + blockIdx.y);
if(isRed){//Could have just as well been !isRed
blockPos++;//Because each block has two convergence flags, need to only update one of the two
}
if(i< nr+2 && k < nz+2 && i != 0){//Within the domain of the grid
volt_s[sharedPos] = volt_old[grid(i,k,(nr+2))];
}
//Because value of center of nozzle is a floating potential copy the value that is above it
if(i==0){
volt_s[sharedPos] = volt_old[grid(1,k,(nr+2))];//Zero gradient between r=0 and r=dr
}
__syncthreads();//This is to ensure that all the threads have copied values from the previous iteration to shared memory
if((i%2 == k%2) == isRed){//Red or not Red?
converge[blockPos] = true;//Default. Then all you need is one 'false' to force another iteration
}
__syncthreads();//ensures converge has been set to true for all threads
if((i%2 == k%2) == isRed){//Red or not Red?
if(threadIdx.x != 0 && threadIdx.y != 0 && threadIdx.x != zEvalsPerBlock + 1 && threadIdx.y != rEvalsPerBlock + 1){//not halo points
if(i != 0 && i < nr+1 && k != 0 && k < nz+1){//not boundaries
volt_new[grid(i,k,(nr+2))] = (1-omega)*volt_s[sharedPos];//copy a weighted fraction of the old
//Then update with the remaining fraction with the new
volt_new[grid(i,k,(nr+2))] += omega *(
volt_s[sharedPos-(rEvalsPerBlock+2)]*dr*dr/(2*(dr*dr+dz*dz)) + //Bottom
volt_s[sharedPos+rEvalsPerBlock+2]*dr*dr/(2*(dr*dr+dz*dz)) + //Top
volt_s[sharedPos-1]*dz*dz/(2*(dr*dr+dz*dz))*(1-(1/(2*i))) + //Left
volt_s[sharedPos+1]*dz*dz/(2*(dr*dr+dz*dz))*(1+(1/(2*i))) //Right
);
//Convergence check
double relChange = fabs((volt_new[grid(i,k,(nr+2))] - volt_s[sharedPos])/(volt_s[sharedPos] + epsilon));
if(relChange > RelativeError/max(nr,nz)){
converge[blockPos] = false;
}//end converge check
}//end of not boundaries
}//end of not halo
}//end of red/black
}//end of update
__global__ void getEFields(double *Er, double *Ez, double *volt, double dr, double dz, int nr){
extern __shared__ double volt_s[];
int i = blockIdx.x * zEvalsPerBlock + threadIdx.x;//x position index
int k = blockIdx.y * rEvalsPerBlock + threadIdx.y;//y position index
int sharedPos = threadIdx.y * (rEvalsPerBlock+1) + threadIdx.x;//Making 2D into 1D for the shared memory position in this block
volt_s[sharedPos] = volt[grid(i,k,(nr+2))];//Bottom Left
volt_s[sharedPos + 1] = volt[grid((i+1),k,(nr+2))];//Bottom right
volt_s[sharedPos + rEvalsPerBlock + 1] = volt[grid(i,k,(nr+2))];//Top Left
volt_s[sharedPos + rEvalsPerBlock + 1 + 1] = volt[grid(i,k,(nr+2))];//Top right
double vtop, vbot, vleft, vright;
vtop = (volt_s[sharedPos + rEvalsPerBlock + 1] + volt_s[sharedPos + rEvalsPerBlock + 1 + 1])/2;
vbot = (volt_s[sharedPos] + volt_s[sharedPos + 1])/2;
vleft = (volt_s[sharedPos] + volt_s[sharedPos + rEvalsPerBlock + 1])/2;
vright = (volt_s[sharedPos + 1] + volt_s[sharedPos + rEvalsPerBlock + 1 + 1])/2;
Er[grid(i,k,(nr+1))] = -(vright-vleft)/dr;
Ez[grid(i,k,(nr+1))] = -(vtop-vbot)/dz;
}
//-----------------------------------------------------------------------------
int main(){
/*
NOTE: i index refers to radial direction and it is treated equivalent to x
k index refers to axial direction
left and right in code refer to change in r while top and bottom refers
to change in z
This changes in plotting where z is plotted on the horizontal axis and
r is plotted on the vertical axis
*/
//Get boundary conditions
double vleft, vright;
scanf("%*s");
scanf("%lf", &vleft);
scanf("%*s");
scanf("%lf", &vright);
//Get segmentation info
int nz, nr;
scanf("%*s");
scanf("%d", &nz);
scanf("%*s");
scanf("%d", &nr);
//Get length info
double lz, lr;
scanf("%*s");
scanf("%lf", &lz);
scanf("%*s");
scanf("%lf", &lr);
//Calculate segment length
double dz = lz/(nz+1);
double dr = lr/(nr+1);
//Allocate grid memory on both host and device
double *volt_h; //for host
double *volt_d_old, *volt_d_new; //for device
double *error;
size_t gridSize = (nr+2)*(nz+2)*sizeof(double);
cudaMalloc(&volt_d_new, gridSize);
cudaMalloc(&volt_d_old, gridSize);
volt_h = (double*)malloc(gridSize);
error = (double*)malloc(gridSize);
//Boundary conditions
//get change in voltage per change in z
double VdeltaZ = 100.0/(nz+1);
//Left and right
for(int i = 0; i< nr + 2; i++){
volt_h[grid(i,0,(nr+2))] = vleft;
volt_h[grid(i,(nz+1),(nr+2))] = vright;
}
//Top
for(int k=0; k< nz+2; k++){
volt_h[grid((nr+1),k,(nr+2))] = 100 - VdeltaZ*k;
}
//initial guess
for(int i = 1; i< nr + 1; i++){
for(int k = 1; k< nz + 1; k++){
volt_h[grid(i,k,(nr+2))] = (vleft + vright)/2;
}
}
//copy memory down to devicce
cudaMemcpy(volt_d_new, volt_h, gridSize, cudaMemcpyHostToDevice);
//Get block dimenions
dim3 threadDim(zEvalsPerBlock + 2, rEvalsPerBlock + 2);// +2 accounts for halo points
int blockr = 1 + ((nr-1)/rEvalsPerBlock);//nr is the number of interior r points
int blockz = 1 + ((nz-1)/zEvalsPerBlock);//nz is the number of interior z points
dim3 blockDim(blockr, blockz);
//Allocate converge check memory
bool *converge_d;
bool *converge_h;
size_t convSize = 2*blockr*blockz*sizeof(bool);//Each block has a red conv pos and a black conv pos therefore 2*numBlocks
converge_h = (bool*)malloc(convSize);
cudaMalloc(&converge_d, convSize);
int steps = 0;
bool didConverge = false;
printf("Blocks: %d\n", blockr*blockz);
while(!didConverge){
cudaMemcpy(volt_d_old, volt_d_new, gridSize, cudaMemcpyDeviceToDevice);
//Evaluate red blocks
update<<<blockDim,threadDim,(zEvalsPerBlock+2)*(rEvalsPerBlock+2)*sizeof(double)>>>(volt_d_old, volt_d_new, true, converge_d, nr, nz, dr, dz, blockz);
cudaMemcpy(volt_d_old, volt_d_new, gridSize, cudaMemcpyDeviceToDevice);
//Evaluate black blocks
update<<<blockDim,threadDim,(zEvalsPerBlock+2)*(rEvalsPerBlock+2)*sizeof(double)>>>(volt_d_old, volt_d_new, false, converge_d, nr, nz, dr, dz, blockz);
//copy back converge check
cudaMemcpy(converge_h, converge_d, convSize, cudaMemcpyDeviceToHost);
steps++;
//all converge must be true
didConverge = converge_h[0];
for(int i = 1; i< 2*blockr*blockz; i++){
didConverge = didConverge && converge_h[i];
}
}//converged
printf("\nconverged in %d steps.\n", steps);
cudaMemcpy(volt_h, volt_d_new, gridSize, cudaMemcpyDeviceToHost);
for(int k = 0; k< nz+2; k++){
volt_h[grid(0,k,(nr+2))] = volt_h[grid(1,k,(nr+2))];
}
//---------------------------------------------------------------------------//
//This section deals with E fields
/*
size_t Esize = (nr+1)*(nz+1)*sizeof(double);
double *Er_d, *Ez_d, *Er_h, *Ez_h;
cudaMalloc(&Er_d, Esize);
cudaMalloc(&Ez_d, Esize);
Er_h = (double*)malloc(Esize);
Ez_h = (double*)malloc(Esize);
//Get block dimenions
dim3 enumthreadDim(zEvalsPerBlock, rEvalsPerBlock);
int eblockr = 1 + ((nr+1-1)/rEvalsPerBlock);//nr is the number of interior r points
int eblockz = 1 + ((nz+1-1)/zEvalsPerBlock);//nz is the number of interior z points
dim3 eblockDim(eblockr, eblockz);
getEFields<<<eblockDim,enumthreadDim,(rEvalsPerBlock + 1)*(zEvalsPerBlock + 1)*sizeof(double)>>>();
cudaMemcpy(Er_h, Er_d, Esize, cudaMemcpyDeviceToHost);
cudaMemcpy(Ez_h, Ez_d, Esize, cudaMemcpyDeviceToHost);
*/
//----------------------------------------------------------------------------//
//Output data for contour maps plotly
FILE *myfile, *err;
myfile = fopen("voltageRes.txt", "w");
err = fopen("voltageErr.txt", "w");
int i, j;
for(i = 0; i< nz+1; i++){
fprintf(myfile, "%lf ", i*dz);
fprintf(err, "%lf ", i*dz);
}
fprintf(myfile, "%lf\n", i*dz);//This is the nz+1 point.
fprintf(err, "%lf\n", i*dz);//This is the nz+1 point.
for(j = 0; j<nr+1; j++){
fprintf(myfile, "%lf ", j*dr);
fprintf(err, "%lf ", j*dr);
}
fprintf(myfile, "%lf\n", j*dr);//This is the nr+1 point.
fprintf(err, "%lf\n", j*dr);//This is the nr+1 point.
for(j = 0; j<nr+2; j++){
for(i = 0; i< nz+1; i++){
if(i==0 || j==nr+1){
error[grid(j,i,(nr+2))] = 0;
}
else{
error[grid(j,i,(nr+2))] =volt_h[grid(j,i,(nr+2))] - V(j*dr, i*dz, lz);
}
fprintf(myfile, "%lf ", volt_h[grid(j,i,(nr+2))]);
fprintf(err, "%lf ", error[grid(j,i,(nr+2))]);
}
error[grid(j,i,(nr+2))] = 0;
fprintf(myfile, "%lf\n", volt_h[grid(j,i,(nr+2))]);//nz+1 point for each j
fprintf(err, "%lf\n", error[grid(j,i,(nr+2))]);//nz+1 point for each j
}
fprintf(myfile, "%lf %lf\n", dz, dr);
fclose(myfile);
fclose(err);
cudaFree(volt_d_new);
cudaFree(volt_d_old);
cudaFree(converge_d);
//cudaFree(Er_d);
//cudaFree(Ez_d);
//free(Er_h);
//free(Ez_h);
free(volt_h);
free(converge_h);
}
|
20,240 | /*
#include <iostream>
using namespace std;
__global__
void kernelFunction()
{
return;
}
//extern "C"
void CudaMain()
{
int threads = 32;
dim3 gridSize(1, 1, 1);
dim3 blockSize(threads, 1, 1);
kernelFunction<<<gridSize, blockSize>>>();
}
*/
|
20,241 | #include <stdio.h>
#include<stdlib.h>
#include "device_launch_parameters.h"
#include "cuda_runtime.h"
#define BLOCK_SIZE 64
__global__ void
totalKernel(float * input, float * output, int len)
{
__shared__ float partialSum[2*BLOCK_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = blockIdx.x*blockDim.x*2;
if((start+t)<len)
{
partialSum[t] = input[start+t];
if(start+t+blockDim.x < len)
partialSum[blockDim.x+t] = input[start+t+blockDim.x];
else
partialSum[blockDim.x+t] =0;
}
else
{
partialSum[t] = 0;
partialSum[blockDim.x+t] = 0;
}
__syncthreads();
for(unsigned int stride = blockDim.x; stride >0; stride/=2){
__syncthreads();
if(t<stride)
partialSum[t]+=partialSum[t+stride];
}
//__syncthreads();
output[blockIdx.x] = partialSum[0];
}
void totalCPU(float * input, float * output, int len)
{
int i=0;
output[0]=0;
for(i=0;i<len;i++)
output[0] +=input[i];
}
void loadVal(float *a, float *b, int len)
{
/*
int i=0;
char buff[256];
FILE *file;
file = fopen("dataSetRaw0.txt","r");
if(!file)
{
printf("No file found");
system("Pause");
exit(0);
}
while(fgets(buff,len,file))
{
a[i] = atof(buff);
i++;
}
for(i=0;i<len;i++)
{
a[i] = i;
}
fclose(file);
*/
a[0]=7;
a[1]=9;
a[2]=1;
a[3]=4;
a[4]=2;
a[5]=8;
a[6]=10;
a[7]=5;
a[8]=10;
a[9]=7;
a[10]=5;
a[11]=7;
a[12]=8;
a[13]=6;
a[14]=4;
a[15]=6;
a[16]=6;
a[17]=3;
a[18]=4;
a[19]=0;
a[20]=1;
a[21]=10;
a[22]=5;
a[23]=8;
a[24]=7;
a[25]=0;
a[26]=2;
a[27]=9;
a[28]=2;
a[29]=8;
a[30]=4;
a[31]=3;
a[32]=2;
a[33]=1;
a[34]=4;
a[35]=10;
a[36]=3;
a[37]=9;
a[38]=6;
a[39]=9;
a[40]=4;
a[41]=7;
a[42]=3;
a[43]=3;
a[44]=3;
a[45]=1;
a[46]=5;
a[47]=5;
a[48]=0;
a[49]=7;
a[50]=7;
a[51]=2;
a[52]=5;
a[53]=7;
a[54]=9;
a[55]=5;
a[56]=8;
a[57]=5;
a[58]=0;
a[59]=10;
a[60]=3;
a[61]=9;
a[62]=5;
a[63]=10;
a[64]=8;
a[65]=4;
a[66]=8;
a[67]=8;
a[68]=2;
a[69]=6;
a[70]=9;
a[71]=6;
a[72]=9;
a[73]=0;
a[74]=9;
a[75]=7;
a[76]=3;
a[77]=1;
a[78]=8;
a[79]=7;
a[80]=0;
a[81]=10;
a[82]=9;
a[83]=8;
a[84]=7;
a[85]=10;
a[86]=9;
a[87]=1;
a[88]=4;
a[89]=3;
a[90]=1;
a[91]=8;
a[92]=2;
a[93]=4;
a[94]=8;
a[95]=1;
a[96]=1;
a[97]=2;
a[98]=7;
a[99]=10;
a[100]=6;
a[101]=10;
a[102]=0;
a[103]=0;
a[104]=2;
a[105]=2;
a[106]=5;
a[107]=1;
a[108]=6;
a[109]=10;
a[110]=2;
a[111]=2;
a[112]=8;
a[113]=10;
a[114]=10;
a[115]=9;
a[116]=4;
a[117]=5;
a[118]=9;
a[119]=3;
a[120]=3;
a[121]=4;
a[122]=6;
a[123]=8;
a[124]=8;
a[125]=9;
a[126]=9;
a[127]=3;
a[128]=1;
a[129]=4;
a[130]=10;
a[131]=7;
a[132]=7;
a[133]=0;
a[134]=4;
a[135]=4;
a[136]=7;
a[137]=7;
a[138]=0;
a[139]=1;
a[140]=5;
a[141]=4;
a[142]=4;
a[143]=8;
a[144]=9;
a[145]=10;
a[146]=10;
a[147]=10;
a[148]=3;
a[149]=4;
a[150]=10;
a[151]=6;
a[152]=9;
a[153]=7;
a[154]=10;
a[155]=10;
a[156]=2;
a[157]=8;
a[158]=5;
a[159]=5;
a[160]=7;
a[161]=9;
a[162]=1;
a[163]=3;
a[164]=6;
a[165]=6;
a[166]=5;
a[167]=3;
a[168]=9;
a[169]=6;
a[170]=6;
a[171]=7;
a[172]=1;
a[173]=4;
a[174]=8;
a[175]=8;
a[176]=6;
a[177]=2;
a[178]=9;
a[179]=8;
a[180]=5;
a[181]=5;
a[182]=5;
a[183]=3;
a[184]=0;
a[185]=8;
a[186]=0;
a[187]=4;
a[188]=8;
a[189]=7;
a[190]=9;
a[191]=10;
a[192]=0;
a[193]=5;
a[194]=10;
a[195]=8;
a[196]=3;
a[197]=1;
a[198]=8;
a[199]=3;
a[200]=1;
a[201]=10;
a[202]=5;
a[203]=8;
a[204]=2;
a[205]=6;
a[206]=1;
a[207]=7;
a[208]=10;
a[209]=7;
a[210]=9;
a[211]=5;
a[212]=9;
a[213]=3;
a[214]=1;
a[215]=5;
a[216]=0;
a[217]=9;
a[218]=3;
a[219]=6;
a[220]=5;
a[221]=10;
a[222]=7;
a[223]=5;
a[224]=10;
a[225]=8;
a[226]=7;
a[227]=3;
a[228]=1;
a[229]=9;
a[230]=5;
a[231]=8;
a[232]=7;
a[233]=8;
a[234]=5;
a[235]=3;
a[236]=3;
a[237]=0;
a[238]=3;
a[239]=1;
a[240]=10;
a[241]=6;
a[242]=8;
a[243]=8;
a[244]=7;
a[245]=7;
a[246]=1;
a[247]=5;
a[248]=10;
a[249]=1;
a[250]=8;
a[251]=10;
a[252]=1;
a[253]=9;
a[254]=1;
a[255]=1;
}
void dispRes(float arr)
{
printf("result = ");
printf("%f ",arr);
system("pause");
}
int main(int argc,char*argv[])
{
int ii;
float * hostInput; // The input 1D list
float * hostOutput; // The output list
float * deviceInput;
float * deviceOutput;
int numInputElements = 256; // number of elements in the input list
int numOutputElements = 0; // number of elements in the output list
numOutputElements = numInputElements / (BLOCK_SIZE<<1);
if (numInputElements % (BLOCK_SIZE<<1)) {
numOutputElements++;
}
hostInput = (float*)malloc(numInputElements*sizeof(float));
hostOutput = (float*)malloc(numOutputElements*sizeof(float));
//cuda memory allocation on the device
cudaMalloc((void**)&deviceInput,numInputElements*sizeof(float));
cudaMalloc((void**)&deviceOutput,numOutputElements*sizeof(float));
printf("Loading values to the array...\n");
loadVal(hostInput,hostOutput,numInputElements);
//cuda memory copy from host to device
cudaMemcpy(deviceInput,hostInput,numInputElements*sizeof(float),cudaMemcpyHostToDevice);
//CPU equivalent
//totalCPU(hostInput,hostOutput,numInputElements);
//dispRes(hostOutput[0]);
printf("Calling CUDA kernel...\n");
dim3 DimGrid((numInputElements-1)/BLOCK_SIZE+1,1,1);
dim3 DimBlock(BLOCK_SIZE,1,1);
totalKernel<<<DimGrid,DimBlock>>>(deviceInput,deviceOutput,numInputElements);
//cuda memory copy from device to host
cudaMemcpy(hostOutput,deviceOutput,numOutputElements*sizeof(float),cudaMemcpyDeviceToHost);
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
dispRes(hostOutput[0]);
free(hostInput);
free(hostOutput);
cudaFree(deviceInput);
cudaFree(deviceOutput);
return 0;
} |
20,242 | #include "includes.h"
__global__ void matrixMultiplicationKernel(float* A, float* B, float* C, int N) {
int ROW = blockIdx.y*blockDim.y+threadIdx.y;
int COL = blockIdx.x*blockDim.x+threadIdx.x;
float tmpSum = 0;
if (ROW < N && COL < N) {
// each thread computes one element of the block sub-matrix
for (int i = 0; i < N; i++) {
tmpSum += A[ROW * N + i] * B[i * N + COL];
}
}
C[ROW * N + COL] = tmpSum;
} |
20,243 | #include <stdio.h>
#include <time.h>
#define LEN 256
#define TILESZ 16
// Uncomment this line if you want to display //
// the result of the computation. //
// #define DISPLAY 1
static double CLOCK();
__global__ void matInit(float*);
__global__ void stencil(float*, float*);
__global__ void stencil_tiled(float*, float*);
int main(int argc, char** argv) {
float *a, *a_host, *b;
a_host = (float*) malloc(sizeof(float) * LEN*LEN*LEN);
cudaMalloc(&a, sizeof(float) * LEN*LEN*LEN);
cudaMalloc(&b, sizeof(float) * LEN*LEN*LEN);
cudaMemset(a, 0, sizeof(float) * LEN*LEN*LEN);
cudaMemset(b, 0, sizeof(float) * LEN*LEN*LEN);
dim3 Grid, Block;
#ifdef TILED
Grid = dim3(LEN, LEN/TILESZ, LEN/TILESZ);
// Block = dim3(TILESZ, TILESZ);
Block = dim3(TILESZ);
#else
Grid = dim3(LEN);
Block = dim3(LEN);
#endif // ifdef TILED
////////////////////////////
// Initialize matrix b //
////////////////////////////
matInit<<<LEN, LEN>>>(b);
////////////////////////////
// stencil computation //
////////////////////////////
double start, end;
start = CLOCK();
#ifdef TILED
stencil_tiled<<<Grid, Block>>>(a, b);
#else
stencil<<<Grid, Block>>>(a, b);
#endif // #ifdef TILED
cudaDeviceSynchronize();
end = CLOCK();
/////////////////////////
// Display the result //
/////////////////////////
#ifdef DISPLAY
cudaMemcpy(a_host, a, sizeof(float) * LEN*LEN*LEN, cudaMemcpyDeviceToHost);
for (int i=0; i<LEN; ++i)
for (int j=0; j<LEN; ++j)
for (int k=0; k<LEN; ++k) {
printf("(i=%d, j=%d, k=%d) = %.2f\n",
i, j, k, a_host[i*LEN*LEN+j*LEN+k]);
}
#endif // DISPLAY
#ifdef TILED
printf("stencil-tiled took %.2f ms\n", end-start);
#else
printf("stencil took %.2f ms\n", end-start);
#endif // #ifdef TILED
return 0;
}
__global__ void
matInit(float* mat) {
int i = blockIdx.x; // int M = gridDim.x;
int j = threadIdx.x; int N = blockDim.x;
int L = LEN;
for (int k=0; k<L; ++k) {
mat[i*N*L + j*L + k] = i*N*L + j*L +k;
}
}
__global__ void
stencil(float *a, float *b) {
int x = blockIdx.x, X = gridDim.x,
y = threadIdx.x, Y = gridDim.x,
Z = Y;
int tId = x*Y + y;
if ((x > 0 && x < X-1) &&
(y > 0 && y < Y-1)) {
for (int z = 1; z<Z-1; ++z) {
float b1 = b[(x-1)*Y*Z + y*Z + z],
b2 = b[(x+1)*Y*Z + y*Z + z],
b3 = b[x*Y*Z + (y-1)*Z + z],
b4 = b[x*Y*Z + (y+1)*Z + z],
b5 = b[x*Y*Z + y*Z + (z-1)],
b6 = b[x*Y*Z + y*Z + (z+1)];
a[tId*Z + z] = 0.8*(b1+b2+b3+b4+b5+b6);
}
}
}
__global__ void
stencil_tiled(float *a, float *b) {
int x = blockIdx.x, X = gridDim.x,
y = blockIdx.y, Y = gridDim.y,
z = blockIdx.z, Z = gridDim.z,
s = threadIdx.x, S = blockDim.x,
T = S;
int tId = x*Y*Z*S + y*Z*S +s*Z + z;
if ((x > 0 && x < X-1) &&
(y != 0 || s != 0) && (y != Y-1 || s != S-1))
for (int t=0; t<T; ++t)
if ((z != 0 || t != 0) && (z != Z-1 || t != T-1)) {
float b1 = b[(x-1)*Y*Z*S*T + y*Z*S*T +s*Z*T + z*T + t],
b2 = b[(x+1)*Y*Z*S*T + y*Z*S*T +s*Z*T + z*T + t],
b3 = b[x*Y*Z*S*T + y*Z*S*T +s*Z*T + z*T + t - (T*Z)],
b4 = b[x*Y*Z*S*T + y*Z*S*T +s*Z*T + z*T + t + (T*Z)],
b5 = b[x*Y*Z*S*T + y*Z*S*T +s*Z*T + z*T + (t-1)],
b6 = b[x*Y*Z*S*T + y*Z*S*T +s*Z*T + z*T + (t+1)];
a[tId*T+t] = 0.8*(b1+b2+b3+b4+b5+b6);
}
}
double CLOCK() {
struct timespec t = {0, 0};
clock_gettime(CLOCK_MONOTONIC, &t);
return (double) (t.tv_sec*1.0e3 + t.tv_nsec*1.0e-6);
}
|
20,244 | /*
Game Interface for Tic-Tac-Toe
Rahul Kejriwal
CS14B023
*/
#include <stdio.h>
#include "GameState.cu"
#define BOARD_SIZE 9
#define WIN_SIZE 8
#define ROW_SIZE 3
#define NUM_ROWS 3
#define NUM_COLS 3
#define OFFSET(i,j) ((i)*NUM_COLS + (j))
__device__
int GPU_winning_patterns[WIN_SIZE][ROW_SIZE] = {
{0, 1, 2},
{3, 4, 5},
{6, 7, 8},
{0, 3, 6},
{1, 4, 7},
{2, 5, 8},
{0, 4, 8},
{2, 4, 6},
};
int CPU_winning_patterns[WIN_SIZE][ROW_SIZE] = {
{0, 1, 2},
{3, 4, 5},
{6, 7, 8},
{0, 3, 6},
{1, 4, 7},
{2, 5, 8},
{0, 4, 8},
{2, 4, 6},
};
__device__
int GPU_evaluationTable[NUM_ROWS][NUM_COLS] = {
{3, 2, 3},
{2, 4, 2},
{3, 2, 3}
};
int CPU_evaluationTable[NUM_ROWS][NUM_COLS] = {
{3, 2, 3},
{2, 4, 2},
{3, 2, 3}
};
class TicTacToeState : public GameState {
/*
2 arrays for game board:
occupied - whether position is occupied
owner - if occupied, which player's piece is it
Linearized arrays
i <-> (i/3, i%3)
*/
bool occupied[BOARD_SIZE];
bool owner[BOARD_SIZE];
/*
Store if game is over
If over, store winner
*/
bool isOver;
int winner;
/*
Player heuristics
*/
int p0_hval;
int p1_hval;
public:
/*
Store pointer to parent GameState
*/
TicTacToeState *parent_node;
/*
Initialize game state
*/
__host__ __device__
TicTacToeState(){
for(int i=0; i<BOARD_SIZE; i++)
occupied[i] = false;
turn = false;
p0_hval = p1_hval = 0;
optimal_move = 0;
parent_node = NULL;
}
/*
Copy Constructor
*/
__host__ __device__
TicTacToeState(TicTacToeState &other){
isOver = other.isOver;
winner = other.winner;
turn = other.turn;
moves_length = other.moves_length;
p0_hval = other.p0_hval;
p1_hval = other.p1_hval;
parent_node = other.parent_node;
optimal_move = other.optimal_move;
memcpy(owner, other.owner, BOARD_SIZE*sizeof(bool));
memcpy(occupied, other.occupied, BOARD_SIZE*sizeof(bool));
}
/*
Populates moves of parent with possible moves
*/
__host__ __device__
void moveGen(){
moves_length = BOARD_SIZE;
moves = new bool[BOARD_SIZE];
for(int i=0; i<BOARD_SIZE; i++)
moves[i] = !occupied[i];
}
__host__ __device__
void moveGen(int *num_moves){
*num_moves = 0;
moves_length = BOARD_SIZE;
moves = new bool[BOARD_SIZE];
for(int i=0; i<BOARD_SIZE; i++){
moves[i] = !occupied[i];
*num_moves += moves[i];
}
}
/*
Returns if the current game state is a terminal game tree node
*/
__host__ __device__
bool isTerminal(){
return isOver;
}
/*
Evaluation function to be defined by concrete game interface
*/
__host__ __device__
int heuristicEval(){
if(!isOver)
return p0_hval-p1_hval;
else
return -winner*100;
}
/*
Updates isOver and winner
*/
__host__ __device__
void updateIfWinner(){
isOver = false;
// Check if somebody won
for(int i=0; i<WIN_SIZE; i++){
#ifdef __CUDA_ARCH__
if(
occupied[GPU_winning_patterns[i][0]] &&
occupied[GPU_winning_patterns[i][1]] &&
occupied[GPU_winning_patterns[i][2]] &&
owner[GPU_winning_patterns[i][0]] == owner[GPU_winning_patterns[i][1]] &&
owner[GPU_winning_patterns[i][1]] == owner[GPU_winning_patterns[i][2]]
){
isOver = true;
winner = (owner[GPU_winning_patterns[i][0]] == false)? -1 : 1;
return;
}
#else
if(
occupied[CPU_winning_patterns[i][0]] &&
occupied[CPU_winning_patterns[i][1]] &&
occupied[CPU_winning_patterns[i][2]] &&
owner[CPU_winning_patterns[i][0]] == owner[CPU_winning_patterns[i][1]] &&
owner[CPU_winning_patterns[i][1]] == owner[CPU_winning_patterns[i][2]]
){
isOver = true;
winner = (owner[CPU_winning_patterns[i][0]] == false)? -1 : 1;
return;
}
#endif
}
// Check if game was draw
bool done = true;
for(int i=0; i<BOARD_SIZE; i++)
done &= occupied[i];
if(done){
isOver = true;
winner = 0;
return;
}
// Game unfinished
winner = 0;
return;
}
/*
Creates new TicTacToeState by making move at loc
Note: Does not check validity of move, assumes it is correct
*/
__host__ __device__
TicTacToeState* makeMove(int loc){
TicTacToeState *new_state = new TicTacToeState(*this);
new_state->turn = !this->turn;
new_state->occupied[loc] = true;
new_state->owner[loc] = this->turn;
new_state->parent_node = this;
int child_val = 0;
for(int i=0;i<=loc;i++){
if(this->occupied[i]==0){
child_val++;
}
}
#ifdef __CUDA_ARCH__
if(turn == false) new_state->p0_hval += GPU_evaluationTable[loc/NUM_ROWS][loc%NUM_ROWS];
else new_state->p1_hval += GPU_evaluationTable[loc/NUM_ROWS][loc%NUM_ROWS];
#else
if(turn == false) new_state->p0_hval += CPU_evaluationTable[loc/NUM_ROWS][loc%NUM_ROWS];
else new_state->p1_hval += CPU_evaluationTable[loc/NUM_ROWS][loc%NUM_ROWS];
#endif
new_state->child_num = child_val;
new_state->updateIfWinner();
return new_state;
}
/*
Prints the board for DEBUG purposes
*/
__host__ __device__
void printState(){
for(int i=0; i<NUM_ROWS; i++){
for(int j=0; j<NUM_ROWS; j++)
printf(occupied[OFFSET(i,j)]?(owner[OFFSET(i,j)]?"O ":"X "):"- ");
printf("\n");
}
}
/*
Returns board piece
*/
__host__ __device__
int piece(int i){
return (occupied[i]?((owner[i])?1:-1):0);
}
__host__ __device__
bool getTurn(){
return turn;
}
};
|
20,245 | // In this assignment you will write a basic kernel where every thread
// will write out to console string "Hello world!".
// You will also initialize GPU using cudaSetDevice() and also launch
// your "Hello world" kernel.
#include <stdio.h>
#include <stdlib.h>
// we have to include few more things
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
//----------------------------------------------------------------------
// TASK 2: Write a "Hello world" kernel
//
// Remember that kernel is defined by __global__ and inside it looks like
// a serial code for CPU. For printing out to console you can use printf().
// write your kernel here
__global__ void helloWorld_GPU(void) {
printf("hello world!\n");
}
//----------------------------------------------------------------------
int main(void) {
//----------------------------------------------------------------------
// TASK 1: Initiate GPU using cudaSetDevice()
//
// You can also try to write a check if there is a device with that id,
// so the code behaves nicely when it fails
// write you GPU initialization here
int deviceid = 0;
int devCount;
cudaGetDeviceCount(&devCount);
if (deviceid<devCount) {
cudaSetDevice(deviceid);
}
else return(1);
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// TASK 3: execute your "Hello world" kernel on 1 block with 5 threads
// using execution configuration syntax.
//
// You may use whatever syntax version you prefer, a simplified one
// dimensional or full three dimensional call using dim3 data type.
// execute your "Hello world" kernel here
int nBlocks = 1;
int nThreads = 5;
helloWorld_GPU<<< nBlocks, nThreads >>>();
//----------------------------------------------------------------------
cudaDeviceReset();
return (0);
}
|
20,246 | #include "includes.h"
__global__ void PrepareDerivativesKernel(float* input, float* lastInput, float* derivatives, int inputWidth, int inputHeight)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int size = inputWidth * inputHeight;
if (id < size)
{
float mul = 100000;
//I_x, I_y
float I_x = mul * derivatives[id];
float I_y = mul * derivatives[size + id];
//I_t
float input_dt = mul * (input[id] - lastInput[id]);
lastInput[id] = input[id];
// I_x * I_y
derivatives[2 * size + id] = I_x * I_y;
// I_x * I_t
derivatives[3 * size + id] = I_x * input_dt;
// I_x * I_t
derivatives[4 * size + id] = I_y * input_dt;
// I_x ^ 2
derivatives[id] = I_x * I_x;
// I_y ^ 2
derivatives[size + id] = I_y * I_y;
}
} |
20,247 | #include "includes.h"
#define ARRAY_SIZE 200
#define ARRAY_BYTES ARRAY_SIZE * sizeof(float)
__global__ void CalculateSquare(float* p_out, float* p_in)
{
int index = threadIdx.x;
float valueToSuqare = p_in[index];
p_out[index] = valueToSuqare * valueToSuqare;
} |
20,248 | #include "includes.h"
__global__ void computeSphereVertexDistancesKernel(float4 *V, float *dist, unsigned int *NEIGHBOR, unsigned int *NBOFFSETS, unsigned int *nNeighbors, unsigned int nVertices, float circumference)
{
int n,N;
int offset,soffset;
// since we are using multiple threads per blocks as well as multiple blocks
int vidxb = 4*(blockIdx.x * blockDim.x) + threadIdx.x;
int basevert = 4*(blockIdx.x * blockDim.x);
int vidx,tab;
float4 nv,tv;
float dot,n1,n2,norm;
// create a cache for 4 elements per block (4*BLOCK_SIZE elements)
__shared__ float4 SI[4*BLOCK_SIZE_CVD];
int bidx = threadIdx.x;
// this means we have 128 neighboring vertices cached
for (vidx=vidxb; vidx<vidxb+4*BLOCK_SIZE_CVD; vidx+=BLOCK_SIZE_CVD)
{
if (vidx < nVertices)
{
SI[bidx] = V[vidx];
bidx+=BLOCK_SIZE_CVD;
}
}
__syncthreads();
bidx = threadIdx.x;
// preload the current BLOCK_SIZE vertices
for (vidx=vidxb; vidx<vidxb+4*BLOCK_SIZE_CVD; vidx+=BLOCK_SIZE_CVD)
{
if (vidx < nVertices)
{
offset = NBOFFSETS[ vidx ];
N = nNeighbors[ vidx ];
tv = SI[bidx];
bidx += BLOCK_SIZE_CVD;
for (n = 0; n < N; n++)
{
soffset = NEIGHBOR[offset+n];
/* There seems to be little to NO benefit of this local caching,
either because we have no hits, or reading from the shared memory
is just as slow as reading from global memory
*/
tab = soffset - basevert;
if (tab > 0 && tab < 4*BLOCK_SIZE_CVD)
{
nv = SI[tab];
}
else
{
nv = V[soffset];
}
// avoid FMADS
//dot = tv.x*nv.x + tv.y*nv.y + tv.z*nv.z;
dot = __fmul_rn(tv.x,nv.x);
dot = __fadd_rn(dot,__fmul_rn(tv.y,nv.y));
dot = __fadd_rn(dot,__fmul_rn(tv.z,nv.z));
//n1 = tv.x*tv.x + tv.y*tv.y + tv.z*tv.z;
n1 = __fmul_rn(tv.x,tv.x);
n1 = __fadd_rn(n1,__fmul_rn(tv.y,tv.y));
n1 = __fadd_rn(n1,__fmul_rn(tv.z,tv.z));
//n2 = nv.x*nv.x + nv.y*nv.y + nv.z*nv.z;
n2 = __fmul_rn(nv.x,nv.x);
n2 = __fadd_rn(n2,__fmul_rn(nv.y,nv.y));
n2 = __fadd_rn(n2,__fmul_rn(nv.z,nv.z));
norm = __fmul_rn(__fsqrt_rn(n1),__fsqrt_rn(n2));
//norm = __fsqrt_rn(n1) * __fsqrt_rn(n2);
// this seems to be a quell of numerical error here
if (norm < 1.0e-7f)
{
dist[offset+n] = 0.0f;
}
else if (fabsf(dot) > norm)
{
dist[offset+n] = 0.0f;
}
else
{
dist[offset+n] = __fmul_rn(circumference,fabsf(acosf(dot/norm)));
}
}
}
}
} |
20,249 | // hello_world.cu
|
20,250 | #include <stdio.h>
// #include <cuda.h>
__global__
void foo (float *farr) {
farr[0] = farr[1];
}
int main (void) {
float *d_farr;
cudaMalloc(&d_farr, sizeof(float)*2);
foo<<<1, 1>>>(d_farr);
return 0;
}
|
20,251 | #include "includes.h"
extern "C" {
#ifndef NUMBER
#define NUMBER float
#endif
}
__global__ void vector_copy (const int n, const NUMBER* x, const int offset_x, const int stride_x, NUMBER* y, const int offset_y, const int stride_y) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < n) {
const int ix = offset_x + gid * stride_x;
const int iy = offset_y + gid * stride_y;
y[iy] = x[ix];
}
} |
20,252 | #include<iostream>
#include<vector>
__global__
void vecadd(float *a, float *b, float *c, int num)
{
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
int main(int argc, char *argv[])
{
const int num = 16;
std::vector<float> a(num, 1);
std::vector<float> b(num, 1);
std::vector<float> c(num, 0);
float *d_a;
float *d_b;
float *d_c;
cudaMalloc(&d_a, num * sizeof(float));
cudaMalloc(&d_b, num * sizeof(float));
cudaMalloc(&d_c, num * sizeof(float));
cudaMemcpy(d_a, &a[0], num*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_a, &b[0], num*sizeof(float), cudaMemcpyHostToDevice);
dim3 grid_size = dim3(1, 1, 1);
dim3 block_size = dim3(num, 1, 1);
vecadd<<<grid_size, block_size>>>(d_a, d_b, d_c, num);
cudaMemcpy(&c[0], d_c, num*sizeof(float), cudaMemcpyDeviceToHost);
for(int i=0; i < num; ++i) std::cout << c[i] << std::endl;
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
20,253 | #include "includes.h"
__global__ void ChannelBoxKernelB(const float* p_Input, float* p_Output, int p_Width, int p_Height, int p_Display) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < p_Width) && (y < p_Height))
{
const int index = (y * p_Width + x) * 4;
p_Output[index] = p_Display == 1 ? p_Output[index + 3] : p_Output[index] * p_Output[index + 3] + p_Input[index] * (1.0f - p_Output[index + 3]);
p_Output[index + 1] = p_Display == 1 ? p_Output[index + 3] : p_Output[index + 1] * p_Output[index + 3] + p_Input[index + 1] * (1.0f - p_Output[index + 3]);
p_Output[index + 2] = p_Display == 1 ? p_Output[index + 3] : p_Output[index + 2] * p_Output[index + 3] + p_Input[index + 2] * (1.0f - p_Output[index + 3]);
p_Output[index + 3] = 1.0f;
}} |
20,254 | #include "includes.h"
__global__ void OutputDeltaKernel(float *outputDeltas, float *target, float *outputActivations, float *outputActivationDerivatives)
{
int unitId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (unitId < D_OUTPUT_UNITS)
{
outputDeltas[unitId] = (target[unitId] - outputActivations[unitId]) * outputActivationDerivatives[unitId];
}
} |
20,255 | // g++ -DTHRUST_DEVICE_SYSTEM=THRUST_DEVICE_SYSTEM_OMP -I../../../thrust/ -fopenmp -x c++ exemplo1.cu -o exemplo1 && ./exemplo1 < ../17-intro-gpu/stocks2.csv
// nvcc -arch=sm_70 -std=c++14 exemplo1.cu -o exemplo1 && ./exemplo1 < ../17-intro-gpu/stocks2.csv
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <iostream>
#include <math.h>
struct calcular_var
{
// essas marcações indicam que o código deve ser compilado para CPU (host)
// e GPU (device)
// IMPORTANTE: somente código com a marcação __device__ é compilado para GPU
double mean;
int N;
calcular_var(double a, int n): mean(a), N(n) {};
__device__
double operator()(const double& x) {
return ((x - mean) * (x - mean))/N;
}
};
int main()
{
int N = 0;
thrust::host_vector<double> hostMicrosoft;
thrust::host_vector<double> hostApple;
double a, m;
while (std::cin.fail() == false)
{
N += 1;
std::cin >> a;
std::cin >> m;
hostMicrosoft.push_back(m);
hostApple.push_back(a);
}
thrust::device_vector<double> diferenca(N);
thrust::device_vector<double> MSFT(hostMicrosoft);
thrust::device_vector<double> AAPL(hostApple);
thrust::device_vector<double> mean_vector(N);
thrust::device_vector<double> var(N);
thrust::device_vector<double> var_double(N);
// diference
thrust::transform(MSFT.begin(), MSFT.end(), AAPL.begin(), diferenca.begin(), thrust::minus<double>());
// mean
double mean = thrust::reduce(diferenca.begin(), diferenca.end(), 0.0, thrust::plus<double>()) / N;
// thrust::fill(mean_vector.begin(), mean_vector.end(), mean);
thrust::transform(diferenca.begin(), diferenca.end(), var.begin(), calcular_var(mean, N));
// for (auto i = var_double.begin(); i != var_double.end(); i++)
// {
// std::cout << *i / N << " "; // este acesso é rápido -- CPU
// }
double variancia = thrust::reduce(var.begin(), var.end(), 0.0, thrust::plus<double>());
std::cout << "variancia: " << variancia << "\n";
} |
20,256 | #include <stdio.h>
#include <curand.h>
#include <curand_kernel.h>
static const unsigned int NUM_BUYERS = 1 << 10;
static const unsigned int MAX_BUYER_VALUE = 20;
static const unsigned int MAX_SELLER_VALUE = MAX_BUYER_VALUE;
// static const unsigned int MAX_TRADES = 1 << 10;
unsigned int *buyerValues;
unsigned int *sellerValues;
unsigned int *transactionPrice;
curandState *states;
/*
* Replaces the the values in the input array with a prefix sum.
* Adapted from work by Mark Harris, NVIDIA and Stewart Weiss, CUNY.
*/
__device__
int scan(unsigned int *a)
{
unsigned int idx = threadIdx.x;
unsigned int n = blockDim.x;
unsigned int d;
for (d = 1; d < n; d *= 2) {
int tmp;
if (idx >= d)
tmp = a[idx-d];
__syncthreads();
if (idx >= d)
a[idx] = tmp + a[idx];
__syncthreads();
}
return a[idx];
}
__device__
void key_split(unsigned int *a, unsigned int bit, unsigned int *key)
{
unsigned int idx = threadIdx.x;
unsigned int N = blockDim.x;
unsigned int key_idx = key[idx];
unsigned int a_idx = a[idx];
unsigned int b_idx = (key_idx >> bit) & 1;
key[idx] = b_idx;
__syncthreads();
unsigned int T_before = scan(key);
unsigned int T_total = key[N-1];
unsigned int F_total = N - T_total;
__syncthreads();
if (b_idx) {
key[T_before-1+F_total] = key_idx;
a[T_before-1+F_total] = a_idx;
} else {
key[idx-T_before] = key_idx;
a[idx-T_before] = a_idx;
}
}
__device__
void key_sort(unsigned int *a, unsigned int *key)
{
unsigned int bit;
size_t n = CHAR_BIT * sizeof(a[0]);
for (bit = 0; bit < n; ++bit) {
key_split(a, bit, key);
__syncthreads();
}
}
__global__
void init(unsigned int *buyerValues,
unsigned int *sellerValues,
unsigned int *transactionPrice,
curandState *states,
unsigned long seed)
{
int idx = threadIdx.x;
curand_init(seed, idx, 0, &states[idx]);
curandState state = states[idx];
// initialize buyers and sellers with random value
// initialize transactionPrice with 0
// random value for buyers
buyerValues[idx] = curand(&state) % MAX_BUYER_VALUE + 1;
// random value for traders
sellerValues[idx] = curand(&state) % MAX_SELLER_VALUE + 1;
// zero indicates no trade has taken place between buyer and seller i
transactionPrice[idx] = 0;
}
__global__
//__device__
void shuffle(unsigned int *buyerValues,
unsigned int *sellerValues,
unsigned int *transactionPrice,
curandState *states)
{
unsigned int b_tmp;
unsigned int s_tmp;
unsigned int p_tmp;
__shared__ unsigned int key[NUM_BUYERS];
__shared__ unsigned int perm[NUM_BUYERS];
int idx = threadIdx.x;
curandState state = states[idx];
perm[idx] = idx;
if (transactionPrice[idx] == 0) {
key[idx] = idx + NUM_BUYERS;
} else {
key[idx] = idx;
}
__syncthreads();
key_sort(perm, key);
__syncthreads();
b_tmp = buyerValues[perm[idx]];
s_tmp = sellerValues[perm[idx]];
p_tmp = transactionPrice[perm[idx]];
__syncthreads();
sellerValues[idx] = s_tmp;
buyerValues[idx] = b_tmp;
transactionPrice[idx] = p_tmp;
perm[idx] = idx;
if (transactionPrice[idx] == 0) {
key[idx] = curand(&state);
} else {
key[idx] = idx;
}
__syncthreads();
key_sort(perm, key);
__syncthreads();
s_tmp = sellerValues[perm[idx]];
__syncthreads();
sellerValues[idx] = s_tmp;
__syncthreads();
}
__global__
void trade(unsigned int *buyerValues,
unsigned int *sellerValues,
unsigned int *transactionPrice,
curandState *states)
{
int idx = threadIdx.x;
int bid, ask;
curandState state = states[idx];
// TODO does this cause branch diversion?
if (transactionPrice[idx] == 0) {
bid = curand(&state) % buyerValues[idx] + 1;
ask = sellerValues[idx] + curand(&state) % (MAX_SELLER_VALUE - sellerValues[idx] + 1);
if (bid >= ask) {
transactionPrice[idx] = ask + curand(&state) % (bid - ask + 1);
}
}
__syncthreads();
//shuffle(buyerValues, sellerValues, transactionPrice, states);
}
__global__
void stats(unsigned int *transactionPrice, unsigned int numTrades, unsigned int price) {
int idx = threadIdx.x;
unsigned int traded = 1;
if (transactionPrice == 0) traded = 0;
}
int main()
{
unsigned long int seed = 0;
size_t uintSize = NUM_BUYERS*sizeof(unsigned int); // size of market array
size_t stateSize = NUM_BUYERS*sizeof(curandState); // size of state array
//unsigned int trades = 0;
unsigned int buyersRemaining = NUM_BUYERS; // number of buyers left
// allocate managed memeory on device
// TODO implement error handling
cudaMallocManaged(&buyerValues, uintSize);
cudaMallocManaged(&sellerValues, uintSize);
cudaMallocManaged(&transactionPrice, uintSize);
cudaMallocManaged(&states, stateSize);
init<<<1, NUM_BUYERS>>>(buyerValues, sellerValues, transactionPrice, states, seed);
cudaDeviceSynchronize();
printf("Zero Intelligence Traders\n");
/*
for (int i = 0; i < NUM_BUYERS; i++) {
printf("%3u", buyerValues[i]);
}
printf("\n");
for (int i = 0; i < NUM_BUYERS; i++) {
printf("%3u", sellerValues[i]);
}
printf("\n");
printf("\n");
*/
for (int i = 0; i < 1<<7; ++i) {
trade<<<1, buyersRemaining>>>(buyerValues, sellerValues, transactionPrice, states);
shuffle<<<1, buyersRemaining>>>(buyerValues, sellerValues, transactionPrice, states);
}
cudaDeviceSynchronize();
/*
for (int i = 0; i < NUM_BUYERS; i++) {
printf("%3u", buyerValues[i]);
}
printf("\n");
for (int i = 0; i < NUM_BUYERS; i++) {
printf("%3u", sellerValues[i]);
}
printf("\n");
for (int i = 0; i < NUM_BUYERS; i++) {
printf("%3u", transactionPrice[i]);
}
printf("\n");
*/
// free memory
cudaFree(buyerValues);
cudaFree(sellerValues);
cudaFree(transactionPrice);
return EXIT_SUCCESS;
}
|
20,257 | #include <stdio.h>
//__device__ __managed__ int x, y = 2;
int main() {
int nDevices;
double *a;
cudaMallocManaged(&a, 10 * sizeof(double));
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf(" L2 cache size: %d\n\n", prop.l2CacheSize);
printf(" total Global Memory size: %d\n\n", prop.totalGlobalMem);
printf(" Unified addressing: %d\n\n", prop.unifiedAddressing);
printf(" memoryBusWidth: %d\n\n", prop.memoryBusWidth);
printf(" memoryClockRate: %d\n\n", prop.memoryClockRate);
printf(" pciBusID: %d\n\n", prop.pciBusID);
printf(" pciDeviceID: %d\n\n", prop.pciDeviceID);
printf(" pciDomainID: %d\n\n", prop.pciDomainID);
}
}
template <typename T>
void test()
{
double *a;
cudaMallocManaged(&a, 10 * sizeof(double));
}
|
20,258 | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <math.h>
using namespace std;
//**************************************************************************
__global__ void transformacion_global(float * A, float * B, float * C, float * D, float * mx)
{
int tid = threadIdx.x;
int i = tid + blockDim.x * blockIdx.x;
float c = 0.0; // valor a calcular
extern __shared__ float sdata[]; // Memoria Compartida
float *sdata_A = sdata; // Apunta al primer valor de A
float *sdata_B = sdata+blockDim.x; // Apunta al primer valor de B
float *sdata_C = sdata+blockDim.x*2; // Apunta al primer valor de C
float *sdata_aux = sdata+blockDim.x*3; // Apunta al primer valor de una copia de C
// Paso a memoria compartida de A y B
*(sdata_A+tid) = A[i];
*(sdata_B+tid) = B[i];
__syncthreads();
/***** Calculo del vector C con memoria global *****/
int jini = blockIdx.x * blockDim.x;
int jfin = jini + blockDim.x;
for (int j = jini; j < jfin; j++){
float resultado = A[j] * i ;
int s = int(ceil(resultado))%2 == 0 ? 1 : -1;
c += resultado + B[j] * s;
}
C[i] = c;
*(sdata_C+tid) = c;
*(sdata_aux+tid) = c;
__syncthreads();
/***** Calcula la reduccion de la suma y lo guarda en D y la reducción del mayor y lo guarda en mx *****/
float v1, v2;
for ( unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
*(sdata_C+tid) += *(sdata_C+tid+s);
v1 = *(sdata_aux+tid);
v2 = *(sdata_aux+tid+s);
*(sdata_aux+tid) = (v1 > v2) ? v1 : v2;
}
__syncthreads();
}
if (tid == 0)
{
D[blockIdx.x] = *(sdata_C);
mx[blockIdx.x] = *(sdata_aux);
}
}
//**************************************************************************
__global__ void transformacion_shared(float * A, float * B, float * C, float * D, float * mx)
{
int tid = threadIdx.x;
int i = tid + blockDim.x * blockIdx.x;
float c = 0.0; // valor a calcular
extern __shared__ float sdata[]; // Memoria Compartida
float *sdata_A = sdata; // Apunta al primer valor de A
float *sdata_B = sdata+blockDim.x; // Apunta al primer valor de B
float *sdata_C = sdata+blockDim.x*2; // Apunta al primer valor de C
float *sdata_aux = sdata+blockDim.x*3; // Apunta al primer valor de una copia de C
// Paso a memoria compartida de A y B
*(sdata_A+tid) = A[i];
*(sdata_B+tid) = B[i];
__syncthreads();
/***** Calculo del vector C con memoria compartida *****/
for (int j = 0; j < blockDim.x; j++){
float resultado = *(sdata_A+j) * i + *(sdata_B+j);
int s = int(ceil(resultado))%2 == 0 ? 1 : -1;
c += resultado + B[j] * s;
}
C[i] = c;
*(sdata_C+tid) = c;
*(sdata_aux+tid) = c;
__syncthreads();
/***** Calcula la reduccion de la suma y lo guarda en D y la reducción del mayor y lo guarda en mx *****/
float v1, v2;
for ( unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
*(sdata_C+tid) += *(sdata_C+tid+s);
v1 = *(sdata_aux+tid);
v2 = *(sdata_aux+tid+s);
*(sdata_aux+tid) = (v1 > v2) ? v1 : v2;
}
__syncthreads();
}
if (tid == 0){
D[blockIdx.x] = *(sdata_C);
mx[blockIdx.x] = *(sdata_aux);
}
}
//**************************************************************************
//**************************************************************************
int main (int argc, char *argv[]) {
int Bsize, NBlocks;
if (argc != 3)
{ cout << "Uso: transformacion Num_bloques Tam_bloque "<<endl;
return(0);
}
else
{
NBlocks = atoi(argv[1]);
Bsize= atoi(argv[2]);
}
//Get GPU information
int devID;
cudaDeviceProp props;
cudaError_t err;
err = cudaGetDevice(&devID);
if(err != cudaSuccess) {
cout << "ERRORRR" << endl;
}
cudaGetDeviceProperties(&props, devID);
printf("Device %d: \"%s\" with Compute %d.%d capability\n", devID, props.name, props.major, props.minor);
const int N = Bsize * NBlocks;
//* pointers to host memory */
float *h_A, *h_B, *h_C, *h_D, *h_D_global, *h_D_shared, h_mx, *h_mx_global, *h_mx_shared;
float *d_A, *d_B, *d_C, *d_D_global, *d_D_shared, *d_mx_global, *d_mx_shared;
//* Allocate arrays a, b and c on host*/
h_A = new float[N];
h_B = new float[N];
h_C = new float[N];
h_D = new float[NBlocks];
//variables para kernel global
h_D_global = new float[NBlocks];
h_mx_global= new float[NBlocks];
//variables para kernel compartido
h_D_shared = new float[NBlocks];
h_mx_shared = new float[NBlocks];
//reservar memoria para variables del device
d_A = NULL; d_B = NULL; d_C = NULL;
d_D_global = NULL; d_D_shared = NULL;
d_mx_global = NULL; d_mx_shared = NULL;
err = cudaMalloc((void **) &d_A, N*sizeof(float));
if (err != cudaSuccess) {
cout << "ERROR RESERVA A" << endl;
}
err = cudaMalloc((void **) &d_B, N*sizeof(float));
if (err != cudaSuccess) {
cout << "ERROR RESERVA B" << endl;
}
err = cudaMalloc((void **) &d_C, N*sizeof(float));
if (err != cudaSuccess) {
cout << "ERROR RESERVA C" << endl;
}
err = cudaMalloc((void **) &d_D_global, NBlocks*sizeof(float));
if (err != cudaSuccess) {
cout << "ERROR RESERVA D_global" << endl;
}
err = cudaMalloc((void **) &d_D_shared, NBlocks*sizeof(float));
if (err != cudaSuccess) {
cout << "ERROR RESERVA D_Shared" << endl;
}
err = cudaMalloc((void **) &d_mx_global, NBlocks*sizeof(float));
if (err != cudaSuccess) {
cout << "ERROR RESERVA mx_global" << endl;
}
err = cudaMalloc((void **) &d_mx_shared, NBlocks*sizeof(float));
if (err != cudaSuccess) {
cout << "ERROR RESERVA mx_shared" << endl;
}
//* Initialize arrays A and B */
for (int i=0; i<N;i++)
{
h_A[i]= (float) (1 -(i%100)*0.001);
h_B[i]= (float) (0.5+(i%10) *0.1 );
}
//************************ GPU PHASE TRANSFORMACION GLOBAL **********************************
double t1 = clock();
err = cudaMemcpy(d_A, h_A, N*sizeof(float), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
cout << "ERROR COPIA EN A" << endl;
}
err = cudaMemcpy(d_B, h_B, N*sizeof(float), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
cout << "ERROR COPIA EN B" << endl;
}
dim3 threadsPerBlock(Bsize,1);
dim3 numBloques(NBlocks,1);
transformacion_global<<<numBloques, threadsPerBlock, Bsize*4*sizeof(float)>>>(d_A, d_B, d_C, d_D_global, d_mx_global);
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Failed to launch transformacion global kernel!\n");
exit(EXIT_FAILURE);
}
cudaMemcpy(h_D_global, d_D_global, NBlocks*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(h_mx_global, d_mx_global, NBlocks*sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Obtenemos el valor de la reducción
float mx_global = h_mx_global[0];
for (int k = 1; k<NBlocks; k++)
mx_global = (mx_global > h_mx_global[k]) ? mx_global : h_mx_global[k];
double TGPUTransformacionGlobal = (clock()-t1)/CLOCKS_PER_SEC;
//************************ GPU PHASE TRANSFORMACION SHARED **********************************
t1 = clock();
transformacion_shared<<<numBloques, threadsPerBlock, Bsize*4*sizeof(float)>>>(d_A, d_B, d_C, d_D_shared, d_mx_shared);
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Failed to launch transformacion shared kernel!\n");
exit(EXIT_FAILURE);
}
cudaMemcpy(h_D_shared, d_D_shared, NBlocks*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(h_mx_shared, d_mx_shared, NBlocks*sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Obtenemos el valor de la reducción
float mx_shared = h_mx_shared[0];
for (int k = 1; k<NBlocks; k++)
mx_shared = (mx_shared > h_mx_shared[k]) ? mx_shared : h_mx_shared[k];
double TGPUTransformacionShared = (clock()-t1)/CLOCKS_PER_SEC;
//************************ CPU PHASE VERSION SECUENCIAL **********************************
// Time measurement
t1=clock();
// Compute C[i], d[K] and mx
for (int k=0; k<NBlocks;k++)
{
int istart=k*Bsize;
int iend =istart+Bsize;
h_D[k]=0.0;
for (int i=istart; i<iend;i++)
{ h_C[i]=0.0;
for (int j=istart; j<iend;j++)
{ float a=h_A[j]*i;
if ((int)ceil(a) % 2 ==0)
h_C[i]+= a + h_B[j];
else
h_C[i]+= a - h_B[j];
}
h_D[k]+=h_C[i];
h_mx=(i==1)?h_C[0]:max(h_C[i],h_mx);
}
}
double t2=clock();
t2=(t2-t1)/CLOCKS_PER_SEC;
cout << "********** Valores máximos obtenidos **********" << endl;
cout<<endl;
cout << "Máximo de C version secuencial = " << h_mx << endl << endl;
cout << "Máximo de C version global = " << mx_global << endl << endl;
cout << "Máximo de C version compartida = " << mx_shared << endl;
cout <<endl;
cout << "********** Tiempos Obtenidos **********" << endl;
cout<<endl;
cout << "N =" << N << " = " << Bsize << " * " << NBlocks << endl;
cout << "Tiempo gastado version secuencial = " << t2 << endl << endl;
cout << "Tiempo gastado version global = " << TGPUTransformacionGlobal << endl << endl;
cout << "Tiempo gastado version compartida = " << TGPUTransformacionShared << endl;
cout << "********** Ganancias **********" << endl;
cout<<endl;
cout << "Ganancia version global = " << t2/TGPUTransformacionGlobal << endl << endl;
cout << "Ganancia version compartida = " << t2/TGPUTransformacionShared << endl << endl;
}
|
20,259 |
#ifndef block_size_x
#define block_size_x 128
#endif
/*
* This kernel removes nodes with degree less than or equal to minimum.
* For the remaining nodes this kernel removes edges to nodes that have been removed.
*
* To remove a node we need to set its degree to zero
* To remove an edge we need to set its col_idx to -1
*
* Beyond setting degrees[i] to zero, this kernel does not update degrees[i] with the new degree
* because it would cause a race condition with other threads reading degrees[i] <= min, which
* would lead to edges being removed prematurely. This happens when the minimum degree is declining
* in consecutive iterations of the purging algorithm.
*/
__global__ void remove_nodes(int *degrees, int *row_idx, int *col_idx, int *prefix_sum, const int *__restrict__ minimum, int n) {
int i = blockIdx.x * block_size_x + threadIdx.x;
if (i<n) {
int my_degree = degrees[i];
int min = minimum[0];
//if my node needs to be removed, remove it
if (my_degree > 0 && my_degree <= min) {
degrees[i] = 0;
}
//if my node remains, update my edges, and degree
if (my_degree > min) {
//obtain indices to iterate over my edges
int start = 0;
if (i>0) {
start = prefix_sum[i-1];
}
int end = prefix_sum[i];
//remove edges to nodes with degree less than or equal to min
for (int k=start; k<end; k++) {
int col = col_idx[k];
if (col != -1 && degrees[col] <= min) {
col_idx[k] = -1;
}
}
}
}
}
|
20,260 | // iircu_cu.txt template file, version: 01_01_01
// GENERATED FILE! MODIFY THIS FILE ONLY AT YOUR OWN RESPONSIBLITY!
// An identical behaviour to the simulation results can be assured only if this file remains unchanged!
// Code file of a general CUDA (R) IIR filter implementation
#include "iircu.cuh"
template<typename T>
__global__ void fwdFiltering(CIirCu<T>* f_segment_ptr, unsigned short m_numOfStages, unsigned char f_bitShifter, T f_inputVal, unsigned char f_n, T* f_output_ptr)
{
T f_tempVal = f_inputVal;
for (unsigned short l_idx = 0; l_idx < m_numOfStages; l_idx++)
{
f_segment_ptr[l_idx].m_x = f_tempVal;
if (0 == f_bitShifter)
{
f_segment_ptr[l_idx].m_y = (f_segment_ptr[l_idx].m_coeffs.m_b0 * f_segment_ptr[l_idx].m_x + f_segment_ptr[l_idx].m_t1_vec[(f_n + 1) & 1]);
}
else
{
f_segment_ptr[l_idx].m_y = ((long long)(f_segment_ptr[l_idx].m_coeffs.m_b0 * f_segment_ptr[l_idx].m_x + f_segment_ptr[l_idx].m_t1_vec[(f_n + 1) & 1]) >> f_bitShifter);
}
f_tempVal = f_segment_ptr[l_idx].m_y;
}
*f_output_ptr = f_tempVal;
}
template<typename T>
__global__ void rwdFiltering(CIirCu<T>* f_segment_ptr, unsigned char f_n)
{
const unsigned short l_idx = threadIdx.x;
T l_xB1 = f_segment_ptr[l_idx].m_coeffs.m_b1 * f_segment_ptr[l_idx].m_x;
T l_xB2 = f_segment_ptr[l_idx].m_coeffs.m_b2 * f_segment_ptr[l_idx].m_x;
T l_yA1 = f_segment_ptr[l_idx].m_coeffs.m_a1 * f_segment_ptr[l_idx].m_y;
T l_yA2 = f_segment_ptr[l_idx].m_coeffs.m_a2 * f_segment_ptr[l_idx].m_y;
f_segment_ptr[l_idx].m_t1_vec[f_n & 1] = l_xB1 + f_segment_ptr[l_idx].m_t2_vec[(f_n + 1) & 1] + l_yA1;
f_segment_ptr[l_idx].m_t2_vec[f_n & 1] = l_xB2 + l_yA2;
}
template<typename T>
__global__ void resetSegment(CIirCu<T>* f_segment_ptr)
{
const unsigned short l_idx = threadIdx.x;
f_segment_ptr[l_idx].m_x = static_cast<T>(0.0);
f_segment_ptr[l_idx].m_y = static_cast<T>(0.0);
f_segment_ptr[l_idx].m_t1_vec[0] = static_cast<T>(0.0);
f_segment_ptr[l_idx].m_t1_vec[1] = static_cast<T>(0.0);
f_segment_ptr[l_idx].m_t2_vec[0] = static_cast<T>(0.0);
f_segment_ptr[l_idx].m_t2_vec[1] = static_cast<T>(0.0);
}
template <typename T>
CIirCu<T>::CIirCu()
{
m_x = static_cast<T>(0.0);
m_y = static_cast<T>(0.0);
m_t1_vec[0] = static_cast<T>(0.0);
m_t1_vec[1] = static_cast<T>(0.0);
m_t2_vec[0] = static_cast<T>(0.0);
m_t2_vec[1] = static_cast<T>(0.0);
}
template <typename T>
CIirCu<T>::CIirCu(const CIirCoeffs<T>& f_coeffs)
{
m_x = static_cast<T>(0.0);
m_y = static_cast<T>(0.0);
m_t1_vec[0] = static_cast<T>(0.0);
m_t1_vec[1] = static_cast<T>(0.0);
m_t2_vec[0] = static_cast<T>(0.0);
m_t2_vec[1] = static_cast<T>(0.0);
m_coeffs = f_coeffs;
}
template <typename T>
CIirFilterCu<T>::CIirFilterCu()
{
}
template <typename T>
CIirFilterCu<T>::~CIirFilterCu()
{
}
template <typename T>
T CIirFilterCu<T>::doFiltering(T f_inputVal)
{
T l_outputVal;
dim3 l_fwd_grid(1, 1, 1);
dim3 l_fwd_thr(1, 1, 1);
fwdFiltering<T> << < l_fwd_grid, l_fwd_thr >> > (m_iirSegments_vec, m_numOfStages, m_bitShifter, f_inputVal, m_n, m_outputVal_ptr);
cudaMemcpy(&l_outputVal, m_outputVal_ptr, sizeof(T), cudaMemcpyDeviceToHost);
return l_outputVal;
}
template <typename T>
void CIirFilterCu<T>::doRwdFiltering()
{
dim3 l_rwd_grid(1, 1, 1);
dim3 l_rwd_thr(m_numOfStages, 1, 1);
rwdFiltering<T> << < l_rwd_grid, l_rwd_thr >> > (m_iirSegments_vec, m_n);
m_n++;
m_n &= 1;
}
template <typename T>
void CIirFilterCu<T>::resetFilter()
{
m_n = 0;
dim3 l_res_grid(1, 1, 1);
dim3 l_res_thr(m_numOfStages, 1, 1);
resetSegment<T> << < l_res_grid, l_res_thr >> > (m_iirSegments_vec);
}
template <typename T>
void CIirFilterCu<T>::destroyFilter()
{
cudaFree(m_iirSegments_vec);
m_iirSegments_vec = 0;
cudaFree(m_outputVal_ptr);
m_outputVal_ptr = 0;
}
// Do NOT call this function, only to avoid the linker error
void TemporaryFunction_i16()
{
CIirFilterCu<short> l_tempObj;
l_tempObj.doFiltering(0);
l_tempObj.doRwdFiltering();
l_tempObj.resetFilter();
l_tempObj.destroyFilter();
CIirCoeffs<short> l_tempCoeffs;
CIirCu<short> l_tempIir_1;
CIirCu<short> l_tempIir_2(l_tempCoeffs);
}
// Do NOT call this function, only to avoid the linker error
void TemporaryFunction_i32()
{
CIirFilterCu<long> l_tempObj;
l_tempObj.doFiltering(0);
l_tempObj.doRwdFiltering();
l_tempObj.resetFilter();
l_tempObj.destroyFilter();
CIirCoeffs<long> l_tempCoeffs;
CIirCu<long> l_tempIir_1;
CIirCu<long> l_tempIir_2(l_tempCoeffs);
}
// Do NOT call this function, only to avoid the linker error
void TemporaryFunction_i64()
{
CIirFilterCu<long long> l_tempObj;
l_tempObj.doFiltering(0);
l_tempObj.doRwdFiltering();
l_tempObj.resetFilter();
l_tempObj.destroyFilter();
CIirCoeffs<long long> l_tempCoeffs;
CIirCu<long long> l_tempIir_1;
CIirCu<long long> l_tempIir_2(l_tempCoeffs);
}
// Do NOT call this function, only to avoid the linker error
void TemporaryFunction_f32()
{
CIirFilterCu<float> l_tempObj;
l_tempObj.doFiltering(0);
l_tempObj.doRwdFiltering();
l_tempObj.resetFilter();
l_tempObj.destroyFilter();
CIirCoeffs<float> l_tempCoeffs;
CIirCu<float> l_tempIir_1;
CIirCu<float> l_tempIir_2(l_tempCoeffs);
}
// Do NOT call this function, only to avoid the linker error
void TemporaryFunction_f64()
{
CIirFilterCu<double> l_tempObj;
l_tempObj.doFiltering(0);
l_tempObj.doRwdFiltering();
l_tempObj.resetFilter();
l_tempObj.destroyFilter();
CIirCoeffs<double> l_tempCoeffs;
CIirCu<double> l_tempIir_1;
CIirCu<double> l_tempIir_2(l_tempCoeffs);
}
|
20,261 | #define W 500
#define H 500
#define TX 32 // number of threads per block along x-axis
#define TY 32 // number of threads per block along y-axis
__global__
void distanceKernel(float *d_out, int w, int h, float2 pos) {
const int c = blockIdx.x * blockDim.x + threadIdx.x;
const int r = blockIdx.y * blockDim.y + threadIdx.y;
const int i = r * w + c;
if ((c >= w) || (r >= h)) return;
// compute the distance and set d_out[i]
d_out[i] = sqrtf((c - pos.x) * (c - pos.x) + (r - pos.y) * (r - pos.y));
}
int main() {
float *out = (float*)calloc(W*H, sizeof(float));
float *d_out; // pointer for device array
cudaMalloc(&d_out, W * H * sizeof(float));
const float2 pos = {0.0f, 0.0f}; // set reference position
const dim3 blockSize(TX, TY);
const int bx = (W + TX - 1)/TX;
const int by = (W + TY - 1)/TY;
const dim3 gridSize = dim3(bx, by);
distanceKernel<<<gridSize, blockSize>>>(d_out, W, H, pos);
// copy the results to host
cudaMemcpy(out, d_out, W*H*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_out);
free(out);
return 0;
}
|
20,262 | /*#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include<opencv2\imgproc.hpp>
#include <iostream>
#define DIM 1024
#define totalThreads 16
#define totalBlocks DIM/16
#define PI 3.1415
__global__ void kernel(unsigned char* d_img, int channels)
{
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int tid = x + y*gridDim.x*blockDim.x;
__shared__ float cache[totalThreads][totalThreads];
const float period=128.0f;
cache[threadIdx.x][threadIdx.y] = 255 * (sinf(x*2.0f*PI / period) + 1.0f) *(sinf(y*2.0f*PI / period) + 1.0f) / 4.0f;
__syncthreads();
if (tid < DIM*DIM)//just incase we launch many threads that tid>totalPixelsOnImg
{
d_img[tid * 3 + 0] = 0;
d_img[tid * 3 + 1] = cache[(totalThreads-1) - threadIdx.x][(totalThreads - 1) - threadIdx.y];
d_img[tid * 3 + 2] = 0;
}
}
void main()
{
unsigned char* d_img;// variable for deice data
cv::Mat h_img(DIM, DIM, CV_8UC3, cv::Scalar(255, 255, 255));//OpenCV black image, 3 cannel
cv::Mat img(DIM,DIM,CV_8UC3,cv::Scalar(0,0,255));//BGR, 3 channel image
int channels = img.channels();//getting channel count
size_t imgSize= img.rows*img.cols*channels;//total ImageSize
cudaMalloc((void**)&d_img,imgSize);//allocate memory to GPU
cudaMemcpy(d_img,img.ptr(), imgSize, cudaMemcpyHostToDevice);//copy data to GPU variable
dim3 grid(totalBlocks, totalBlocks);//(64,64)
dim3 threads(totalThreads, totalThreads);//(16,16)
kernel << <grid, threads >> >(d_img,channels);
cudaMemcpy(h_img.ptr(),d_img, imgSize, cudaMemcpyDeviceToHost);
cv::imshow("img", h_img);
cv::waitKey();
cv::destroyAllWindows();
system("pause");
}*/ |
20,263 | #include <cuda.h>
#include <stdio.h>
__global__ void K1() {
unsigned sum = 0;
if (blockIdx.x == 0 && threadIdx.x == 0)
printf("K1 before\n");
for (unsigned ii = 0; ii < 1000; ++ii) {
sum += ii;
}
if (blockIdx.x == 0 && threadIdx.x == 0)
printf("K1 after\n");
}
__global__ void K2() {
printf("in K2\n");
}
int main() {
printf("on CPU\n");
K1<<<10, 32, 0, 0>>>();
K2<<<1, 1>>>();
cudaDeviceSynchronize();
printf("on CPU\n");
return 0;
}
|
20,264 | __device__ float spoc_fadd ( float a, float b ) { return (a + b);}
__device__ float spoc_fminus ( float a, float b ) { return (a - b);}
__device__ float spoc_fmul ( float a, float b ) { return (a * b);}
__device__ float spoc_fdiv ( float a, float b ) { return (a / b);}
#ifdef __cplusplus
extern "C" {
#endif
__global__ void spoc_dummy ( int* spoc_var0, int spoc_var1, int spoc_var2, float spoc_var3 ) {
{int spoc_var5;
{int spoc_var6;
{int spoc_var7;
{int spoc_var8;
{int spoc_var9;
{float spoc_var10;
{float spoc_var11;
{float spoc_var12;
{float spoc_var13;
{float spoc_var14;
{float spoc_var15;
{float spoc_var16;
spoc_var5 = (threadIdx.y + (blockIdx.y * blockDim.y)); ;
spoc_var6 = (threadIdx.x + (blockIdx.x * blockDim.x)); ;
if (spoc_var5 >= 1000 || spoc_var6 >= 1000){
return ;}
;
spoc_var7 = (spoc_var6 + spoc_var1); ;
spoc_var8 = (spoc_var5 + spoc_var2); ;
spoc_var9 = 0; ;
spoc_var10 = 0.f; ;
spoc_var11 = 0.f; ;
spoc_var12 = 0.f; ;
spoc_var13 = 0.f; ;
spoc_var14 = ((4.f * (((float) (spoc_var7) / (float) (1000) ) / spoc_var3)) - 2.f); ;
spoc_var15 = ((4.f * (((float) (spoc_var8) / (float) (1000) ) / spoc_var3)) - 2.f); ;
spoc_var16 = ((spoc_var10 * spoc_var10) + (spoc_var11 * spoc_var11)); ;
while (spoc_var9 < 50 && spoc_var16 <= 4.f){
spoc_var9 = (spoc_var9 + 1); ;
spoc_var12 = (((spoc_var10 * spoc_var10) - (spoc_var11 * spoc_var11)) + spoc_var14); ;
spoc_var13 = (((2.f * spoc_var10) * spoc_var11) + spoc_var15); ;
spoc_var10 = spoc_var12; ;
spoc_var11 = spoc_var13; ;
spoc_var16 = ((spoc_var10 * spoc_var10) + (spoc_var11 * spoc_var11));} ;
spoc_var0[((spoc_var5 * 1000) + spoc_var6)] = spoc_var9;;
}
;
}
;
}
;
}
;
}
;
}
;
}
;
}
;
}
;
}
;
}
;
}
}
#ifdef __cplusplus
}
#endif |
20,265 | #define TILE_DIM 32
template<typename T, typename R>
__device__ void common_mean(const T* matrix, R* result,
const int numRows, const int numColumns) {
__shared__ T tile[TILE_DIM][TILE_DIM];
int tx = threadIdx.x;
int ty = threadIdx.y;
tile[ty][tx] = 0;
#pragma unroll
for (int tr = 0; tr < (numRows - 1) / TILE_DIM + 1; tr++) {
for (int tc = 0; tc < (numColumns - 1) / TILE_DIM + 1; tc++) {
int r = tr * TILE_DIM + ty;
int c = tc * TILE_DIM + tx;
if (r < numRows && c < numColumns) {
tile[ty][tx] += matrix[r * numColumns + c];
} else {
tile[ty][tx] += 0;
}
__syncthreads();
}
}
if (tx == 0 && ty == 0) {
T sum = 0;
#pragma unroll
for (int i = 0; i < TILE_DIM; i++) {
#pragma unroll
for (int j = 0; j < TILE_DIM; j++) {
sum += tile[i][j];
}
}
result[0] = sum / (R) (numRows * numColumns);
}
}
template<typename T>
__device__ void mean(const T* matrix, float* result,
const int numRows, const int numColumns) {
common_mean<T, float>(matrix, result, numRows, numColumns);
}
template<typename T>
__device__ void meand(const T* matrix, double* result,
const int numRows, const int numColumns) {
common_mean<T, double>(matrix, result, numRows, numColumns);
}
template<typename T, typename R>
__device__ void common_rowsMean(const T* matrix, R* result,
const int numRows, const int numColumns) {
__shared__ T tile[TILE_DIM][TILE_DIM];
int by = blockIdx.y;
int ty = threadIdx.y;
int row = by * blockDim.y + ty;
T sum = 0;
#pragma unroll
for (int t = 0; t < (numColumns - 1) / TILE_DIM + 1; t++) {
#pragma unroll
for (int i = 0; i < TILE_DIM; i++) {
int r = by * TILE_DIM + i;
int c = t * TILE_DIM + ty;
if (r < numRows && c < numColumns) {
tile[i][ty] = matrix[r * numColumns + c];
} else {
tile[i][ty] = 0;
}
}
__syncthreads();
#pragma unroll
for (int j = 0; j < TILE_DIM; j++) {
sum += tile[ty][j];
}
__syncthreads();
}
if (row < numRows) {
result[row] = sum / (R) numColumns;
}
}
template<typename T>
__device__ void rowsMean(const T* matrix, float* result,
const int numRows, const int numColumns) {
common_rowsMean<T, float>(matrix, result, numRows, numColumns);
}
template<typename T>
__device__ void rowsMeand(const T* matrix, double* result,
const int numRows, const int numColumns) {
common_rowsMean<T, double>(matrix, result, numRows, numColumns);
}
template<typename T, typename R>
__device__ void common_columnsMean(const T* matrix, R* result,
const int numRows, const int numColumns) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int col = bx * blockDim.x + tx;
if (col < numColumns) {
T sum = 0;
#pragma unroll
for (int i = 0; i < numRows; i++) {
int index = i * numColumns + col;
sum += matrix[index];
}
result[col] = sum / (R) numRows;
}
}
template<typename T>
__device__ void columnsMean(const T* matrix, float* result,
const int numRows, const int numColumns) {
common_columnsMean<T, float>(matrix, result, numRows, numColumns);
}
template<typename T>
__device__ void columnsMeand(const T* matrix, double* result,
const int numRows, const int numColumns) {
common_columnsMean<T, double>(matrix, result, numRows, numColumns);
}
|
20,266 |
# include <stdio.h>
# include <stdlib.h>
# include <cuda.h>
# define N (2048)
# define THREADS_PER_BLOCK 512
__global__ void add(int *a, int *b, int *c)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
}
int main(void)
{
int *a, *b, *c; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int size = N*sizeof(int); // we need space for N integers
// allocate device copies of a, b, c
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(size);
for(int i = 0; i < N; i++)
{
a[i] = i;
b[i] = i+1;
}
// copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// launch add() kernel with blocks and threads
add<<< N/THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(d_a, d_b, d_c);
// copy device result back to host copy of c
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
for(int i = 0; i < N; i++)
printf("%5d + %5d = %5d\n", a[i], b[i], c[i]);
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
20,267 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
//__constant__ int datos[1024];
__global__ void kernel(int *d_dst, int *d_src) {
int tId = threadIdx.x + blockIdx.x * blockDim.x;
d_dst[tId] = d_src[tId];
}
int main(int argc, char **argv) {
int *d_datos, *h_datos, *d_src;
cudaMalloc((void**)&d_datos, sizeof(int) * 1024);
cudaMalloc((void**)&d_src, sizeof(int) * 1024);
h_datos = (int *)malloc(sizeof(int) * 1024);
int *test = new int[1024];
memset(test, 0, sizeof(int) * 1024);
for (int i = 0; i < 1024; i++) {
test[i] = i;
}
//GPU Time
cudaEvent_t start, stop;
float time;
cudaMemcpy(d_src, d_datos, sizeof(int)*1024, cudaMemcpyHostToDevice);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
// Kernel call
kernel<<< 1, 1024 >>>(d_datos, d_src);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
// Copying From Device to Host
cudaMemcpy(h_datos, d_datos, sizeof(int)*1024, cudaMemcpyDeviceToHost);
printf("Time : %f ms\n",time);
cudaEventDestroy(start);
cudaEventDestroy(stop);
free(test);
cudaFree(d_datos);
return 0;
}
|
20,268 | #include<stdlib.h>
#include<stdio.h>
#include<time.h>
#include<iostream>
#include <curand.h>
#include <curand_kernel.h>
#include <math.h>
using namespace std;
// CUDA settings
#define WARP_SIZE 32
#define WARP_COUNT 16
#define BLOCK_COUNT 13
class Rectangle {
public:
int top;
int bottom;
int left;
int right;
__host__ __device__ Rectangle() {
top = 0;
bottom = 0;
left = 0;
right = 0;
}
};
void die(const char *error);
void check_error(cudaError e);
__global__ void monteCarloKernel(double *subarea_estimates,
int cell_length,
int grid_dimension,
int points_per_cell);
double *subarea_estimates_CPU, *subarea_estimates_GPU;
int main(int argc, char **argv) {
if (argc < 4) {
cout << "You need to provide three arguments\n";
cout << "The length of each square grid cell\n";
cout << "The dimension of the grid in terms of cell count along a dimension\n";
cout << "The number of random samples to be generated per cell\n";
}
int cell_length = atoi(argv[1]);
int grid_dimension = atoi(argv[2]);
int points_per_cell = atoi(argv[3]);
cout << "Cell length: " << cell_length << "\n";
cout << "Grid dimension: " << grid_dimension << " by " << grid_dimension << "\n";
cout << "Random samples per cell: " << points_per_cell << "\n";
clock_t start = clock();
long int grid_size = grid_dimension * grid_dimension * sizeof(double);
subarea_estimates_CPU = (double *) malloc(grid_size);
check_error(cudaMalloc((void **) &subarea_estimates_GPU, grid_size));
clock_t end = clock();
double elapsed = (end - start) / (double) CLOCKS_PER_SEC;
cout << "Allocation time: " << elapsed << " seconds \n";
start = clock();
int threadsPerBlock = WARP_SIZE * WARP_COUNT;
monteCarloKernel<<< BLOCK_COUNT, threadsPerBlock >>>
(subarea_estimates_GPU, cell_length, grid_dimension, points_per_cell);
cudaThreadSynchronize();
check_error(cudaGetLastError());
check_error(cudaMemcpy(subarea_estimates_CPU, subarea_estimates_GPU, grid_size, cudaMemcpyDeviceToHost));
double area_estimate = 0.0;
for (int i = 0; i < grid_dimension; i++) {
for (int j = 0; j < grid_dimension; j++) {
area_estimate += subarea_estimates_CPU[i * grid_dimension + j];
}
}
cout << "Area under the curve is: " << area_estimate << "\n";
end = clock();
elapsed = (end - start) / (double) CLOCKS_PER_SEC;
cout << "Execution time: " << elapsed << " seconds \n";
return 0;
}
// Prints the specified error message and then exits
void die(const char *error) {
printf("%s", error);
exit(1);
}
// If the specified error code refers to a real error, report it and quit the program
void check_error(cudaError e) {
if (e != cudaSuccess) {
printf("\nCUDA error: %s\n", cudaGetErrorString(e));
exit(1);
}
}
__global__ void monteCarloKernel(double *subarea_estimates,
int cell_length,
int grid_dimension,
int points_per_cell) {
int threadId = threadIdx.x;
int threads_per_block = WARP_SIZE * WARP_COUNT;
int blockId = blockIdx.x;
double cell_size = cell_length * cell_length;
// distribute the horizontal axis points among the SMs
for (int x = blockId; x < grid_dimension; x += BLOCK_COUNT) {
// distribute the vertical axis points among the threads of an SM
for (int y = threadId; y < grid_dimension; y += threads_per_block) {
// determine grid cell boundaries
Rectangle cell;
cell.left = cell_length * x;
cell.right = cell_length * (x + 1) - 1;
cell.bottom = cell_length * y;
cell.top = cell_length * ( y + 1) - 1;
// initialze a random number generator
curandState_t state;
int threadIndex = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(clock64(), threadIndex, 0, &state);
// perform the sampling trials
int internal_points = 0;
for (int trial = 0; trial < points_per_cell; trial++) {
// generate a point within the cell boundary and calculate
// its position relative to the shape
int xp = ((int) curand_uniform(&state))
% cell_length + cell.left;
int yp = ((int) curand_uniform(&state))
% cell_length + cell.bottom;
// tested polynomial is 10 sin x^2 + 50 cos y^3
double x_2 = xp * xp;
double y_3 = yp * yp * yp;
double result = 10 * sin(x_2) + 50 * cos(y_3);
if (result <= 0.0) {
internal_points++;
}
}
// estimate the part of the polynomial within the grid cell
long int index = y * grid_dimension + x;
subarea_estimates[index] = cell_size * internal_points / points_per_cell;
}
}
}
|
20,269 | #include "cuda.h"
#include "limits.h"
#include "math.h"
#include "stdio.h"
#include "stdlib.h"
#define BLOCK_SIZE 512
#define ELEMS_PER_THREAD 32
template <unsigned int blockSize>
__device__ void warpReduce(volatile double* s_data, unsigned int t) {
if (blockSize >= 64) s_data[t] += s_data[t + 32];
if (blockSize >= 32) s_data[t] += s_data[t + 16];
if (blockSize >= 16) s_data[t] += s_data[t + 8];
if (blockSize >= 8) s_data[t] += s_data[t + 4];
if (blockSize >= 4) s_data[t] += s_data[t + 2];
if (blockSize >= 2) s_data[t] += s_data[t + 1];
}
template <unsigned int blockSize>
__global__ void reductionKernel(double* d_in, double* d_out, unsigned int N) {
extern __shared__ double s_data[];
// Indexing
unsigned int t = threadIdx.x;
unsigned int i = blockIdx.x * (blockSize * 2) + t;
unsigned int gridSize = blockSize * 2 * gridDim.x;
// Load some elements into shared memory
s_data[t] = 0.f;
while (i + blockSize < N) {
s_data[t] += d_in[i] + d_in[i + blockDim.x];
i += gridSize;
}
if (i < N) s_data[t] += d_in[i];
__syncthreads();
// Unroll the loop
if (blockSize >= 512) {
if (t < 256) s_data[t] += s_data[t + 256];
__syncthreads();
}
if (blockSize >= 256) {
if (t < 128) s_data[t] += s_data[t + 128];
__syncthreads();
}
if (blockSize >= 128) {
if (t < 64) s_data[t] += s_data[t + 64];
__syncthreads();
}
if (t < 32) warpReduce<blockSize>(s_data, t);
// Write the result for each block into d_out
if (t == 0) d_out[blockIdx.x] = s_data[0];
}
void reductionOnDevice(double* h_in, double* h_out, double** d_arr,
unsigned int N, int tree_depth, unsigned int* lengths,
dim3* dimBlock, dim3* dimGrid, unsigned int s_size,
float& dur_ex, float& dur_in) {
// Setup timing
cudaEvent_t start_ex, end_ex, start_in, end_in;
cudaEventCreate(&start_ex);
cudaEventCreate(&end_ex);
cudaEventCreate(&start_in);
cudaEventCreate(&end_in);
// Copy host array to device
cudaEventRecord(start_in, 0);
cudaMemcpy(d_arr[0], h_in, N * sizeof(double), cudaMemcpyHostToDevice);
// Perform reduction on device
cudaEventRecord(start_ex, 0);
for (int i = 0; i < tree_depth; i++) {
reductionKernel<BLOCK_SIZE> <<<dimGrid[i], dimBlock[i], s_size>>>
(d_arr[i], d_arr[i + 1], lengths[i]);
}
cudaEventRecord(end_ex, 0);
cudaEventSynchronize(end_ex);
// Copy device array back to host
cudaMemcpy(h_out, d_arr[tree_depth], sizeof(double), cudaMemcpyDeviceToHost);
cudaEventRecord(end_in, 0);
cudaEventSynchronize(end_in);
// Calculate durations
cudaEventElapsedTime(&dur_ex, start_ex, end_ex);
cudaEventElapsedTime(&dur_in, start_in, end_in);
}
void reductionOnHost(double* h_in, double* h_ref, unsigned int N,
float& dur_cpu) {
// Setup timing
cudaEvent_t start_cpu, end_cpu;
cudaEventCreate(&start_cpu);
cudaEventCreate(&end_cpu);
// Perform reduction on host
cudaEventRecord(start_cpu, 0);
double result = 0.f;
for (unsigned int i = 0; i < N; i++) result += h_in[i];
*h_ref = result;
cudaEventRecord(end_cpu, 0);
cudaEventSynchronize(end_cpu);
// Calculate duration
cudaEventElapsedTime(&dur_cpu, start_cpu, end_cpu);
}
bool checkResults(double* h_out, double* h_ref, double eps) {
double delta = abs(*h_out - *h_ref);
if (delta > eps) return false;
return true;
}
void exitUsage() {
printf("Usage: ./p2 [<M> <N> [<dur_max>]]\n");
exit(EXIT_SUCCESS);
}
void parseInput(int argc, char* argv[], unsigned int& N, unsigned int& M,
float& dur_max) {
if (argc == 1) {
N = 50000000;
M = 5;
dur_max = 1000.f;
return;
}
if (argc != 3 && argc != 4) exitUsage();
if (sscanf(argv[1], "%u", &N) != 1) exitUsage();
if (sscanf(argv[2], "%u", &M) != 1) exitUsage();
if (argc == 3) {
dur_max = 1000.f;
return;
}
if (sscanf(argv[3], "%f", &dur_max) != 1) exitUsage();
dur_max *= 1000;
}
double* allocateHostArray(unsigned int size) {
double* h_array;
cudaError_t code = cudaMallocHost(&h_array, size);
if (code != cudaSuccess) {
printf("Memory allocation on the host was unsuccessful.\n");
exit(EXIT_FAILURE);
}
return h_array;
}
double* allocateDeviceArray(unsigned int size) {
double* d_arr;
cudaError_t code = cudaMalloc(&d_arr, size);
if (code != cudaSuccess) {
printf("Memory allocation on the device was unsuccessful.\n");
exit(EXIT_FAILURE);
}
return d_arr;
}
int main(int argc, char* argv[]) {
unsigned int N, M;
float dur_max;
parseInput(argc, argv, N, M, dur_max);
// Setup timing
int nruns_gpu = 0;
int nruns_cpu = 0;
float dur_ex, dur_in, dur_cpu;
float dur_ex_total = 0.f;
float dur_in_total = 0.f;
float dur_cpu_total = 0.f;
float dur_ex_min = 1e99;
float dur_in_min = 1e99;
float dur_cpu_min = 1e99;
// Calculate the tree depth
int tree_depth = 0;
{
unsigned int length = N;
while (length > 1) {
length = (length + (BLOCK_SIZE * ELEMS_PER_THREAD) - 1) /
(BLOCK_SIZE * ELEMS_PER_THREAD);
tree_depth++;
}
}
// Calculate the lengths of the device arrays
unsigned int lengths[tree_depth + 1];
lengths[0] = N;
for (int i = 1; i < tree_depth + 1; i++)
lengths[i] = (lengths[i - 1] + (BLOCK_SIZE * ELEMS_PER_THREAD) - 1) /
(BLOCK_SIZE * ELEMS_PER_THREAD);
// Setup grid
dim3 dimBlock[tree_depth];
dim3 dimGrid[tree_depth];
for (int i = 0; i < tree_depth; i++) {
dimBlock[i].x = BLOCK_SIZE;
dimGrid[i].x = lengths[i + 1];
}
// Shared memory size
unsigned int s_size = sizeof(double) * BLOCK_SIZE;
// Allocate host arrays
double* h_in = allocateHostArray(sizeof(double) * N);
double* h_out = allocateHostArray(sizeof(double));
double* h_ref = allocateHostArray(sizeof(double));
// Allocate device arrays
double* d_arr[tree_depth + 1];
for (int i = 0; i < tree_depth + 1; i++)
d_arr[i] = allocateDeviceArray(sizeof(double) * lengths[i]);
// Fill host array with random numbers
srand(73);
for (unsigned int i = 0; i < N; i++)
h_in[i] = ((double)rand() / RAND_MAX - 0.5f) * 2 * M;
// Perform reduction on the device a number of times
while (dur_in_total < dur_max) {
nruns_gpu++;
reductionOnDevice(h_in, h_out, d_arr, N, tree_depth, lengths, dimBlock,
dimGrid, s_size, dur_ex, dur_in);
dur_ex_total += dur_ex;
dur_in_total += dur_in;
if (dur_ex < dur_ex_min) dur_ex_min = dur_ex;
if (dur_in < dur_in_min) dur_in_min = dur_in;
if (dur_in_total == 0.f) break;
}
// Perform reduction on the host a number of times
while (dur_cpu_total < dur_max) {
nruns_cpu++;
reductionOnHost(h_in, h_ref, N, dur_cpu);
dur_cpu_total += dur_cpu;
if (dur_cpu < dur_cpu_min) dur_cpu_min = dur_cpu;
if (dur_cpu_total == 0.f) break;
}
dur_ex = dur_ex_total / nruns_gpu;
dur_in = dur_in_total / nruns_gpu;
dur_cpu = dur_cpu_total / nruns_cpu;
// Compare device and host results
double eps = (double)M * 2 * 0.001f;
bool testPassed = checkResults(h_out, h_ref, eps);
if (testPassed)
printf("Test PASSED\n");
else
printf("Test FAILED\n");
// Print stuff
printf("N: %u\n", N);
printf("M: %u\n", M);
printf("Elements per thread: %d\n", ELEMS_PER_THREAD);
printf("Tree depth: %d\n", tree_depth);
printf("Block sizes: ");
for (int i = 0; i < tree_depth - 1; i++) printf("%d, ", dimBlock[i].x);
printf("%d\n", dimBlock[tree_depth - 1].x);
printf("Grid sizes: ");
for (int i = 0; i < tree_depth - 1; i++) printf("%d, ", dimGrid[i].x);
printf("%d\n", dimGrid[tree_depth - 1].x);
printf("GPU array lengths: ");
for (int i = 0; i < tree_depth; i++) printf("%d, ", lengths[i]);
printf("%d\n", lengths[tree_depth]);
printf("GPU result: %24.14f\n", *h_out);
printf("CPU result: %24.14f\n", *h_ref);
printf("Timing results %12s %12s %8s\n", "Average", "Minimum", "Num_runs");
printf("GPU exclusive: %12.6f %12.6f %8d\n", dur_ex, dur_ex_min, nruns_gpu);
printf("GPU inclusive: %12.6f %12.6f %8d\n", dur_in, dur_in_min, nruns_gpu);
printf("CPU: %12.6f %12.6f %8d\n", dur_cpu, dur_cpu_min, nruns_cpu);
printf("\n");
// Free arrays
cudaFree(h_in);
cudaFree(h_out);
cudaFree(h_ref);
for (int i = 0; i < tree_depth + 1; i++) cudaFree(d_arr[i]);
return 0;
}
|
20,270 | #include "includes.h"
__global__ void cuda_filter2D(float *dst, float *src, float *kernel, int src_width, int src_height, int kernel_rows, int kernel_cols)
{
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
if(row < src_height && col < src_width)
{
float sum = 0;
for(int i = 0; i < kernel_rows; i++)
{
for(int j = 0; j < kernel_cols; j++)
{
if(row + i - (kernel_rows - 1) / 2 >= 0 &&
col + j - (kernel_cols - 1) / 2 >= 0 &&
col + j - (kernel_cols - 1) / 2 < src_width &&
row + i - (kernel_rows - 1) / 2 < src_height)
{
sum = sum + kernel[i * kernel_cols + j] * (float)src[(row + i - (kernel_rows - 1)/2) * src_width + col +j - (kernel_rows - 1)/2];
}
}
}
dst[row *src_width + col] = (sum <0)?0:(sum>255?255:float(sum));
#ifdef debug
printf("filter2D: dst[%d] = %f\n", row * src_width + col, dst[row * src_width + col]);
#endif
}
} |
20,271 | // Library imports.
#include <iostream>
using namespace std;
// Main method.
int main() {
cout << "Hello world!!";
return 0;
} |
20,272 | #include "Game.cuh"
#include "Screen.cuh"
Game::Game(Screen& initialScreen)
{
currentScreen = &initialScreen;
}
void
Game::onCreate()
{
currentScreen->onCreate();
currentScreen->isCreated = true;
currentScreen->onResume();
isCreated = true;
}
void
Game::update(int delta)
{
currentScreen->onUpdate(delta);
}
void
Game::setScreen(Screen& screen)
{
// Create the new screen if it hasn't been
if (!screen.isCreated && isCreated)
{
screen.onCreate();
screen.isCreated = true;
}
// Resume the new screen
if (isCreated)
{
currentScreen->onLeave();
screen.onResume();
}
currentScreen = &screen;
} |
20,273 | #include "includes.h"
__global__ void substractWalkers ( const int dim, const int nwl, const float *xx0, const float *xxCP, float *xx1 ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int t = i + j * dim;
if ( i < dim && j < nwl ) {
xx1[t] = xx0[t] - xxCP[t];
}
} |
20,274 | #include <iostream>
using namespace std;
#define CHECK(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
cout<< "Error:" << cudaGetErrorString(_m_cudaStat) \
<< " at line " << __LINE__ << " in file " << __FILE__ << "\n"; \
exit(1); \
} }
//calculate<<<(rowsY + 255) / 256, 256>>>(dev_original_matrix, dev_result, elementsQuantity, rowsY, colsX, border_number); <<<кол-во блоков в grid, кол-во потоков>>>
// + 255 для того, чтобы точно уместить все данные
// размер массива ограничен максимальным размером пространства потоков = 256
__global__ void calculate(int *dev_original_matrix, int *dev_result, int elementsQuantity, int rowsY, int colsX, int border_number)
{
int current_row_number = threadIdx.x + blockIdx.x * blockDim.x; // номер строки в изображении
// Grid -> блок -> поток(поток в блоке, блок в сетке), один поток запускает функцию calculate один раз
// blockIdx - номер блока в 1D-grid, blockDim - кол-во блоков в одном потоке
int r;
int count_points = 0;
for (int i = 1; i < colsX - 1; i++) // крайние не считаются
{
//r = matrix[i * colsX + j - 1] - matrix[i * colsX + j + 1]; // мб надо делить на 2, но в Гонсалесе вот так(см градиент Собела/Собеля);
r = dev_original_matrix[current_row_number * colsX + i - 1] - dev_original_matrix[current_row_number * colsX + i + 1];
if (r > border_number)
{
count_points++;
}
}
dev_result[current_row_number] = count_points;
//cout << "dev_result in GPU claculate :" << "\n";
/*
for (int i = 0; i < current_row_number; i++)
{
cout << dev_result[i] << " ";
}
cout << '\n';
*/
}
//СОХРАНЯТЬ РЕЗУЛЬТАТ В ЛОК ПЕРЕМ ПОТОМ В РЕЗ МАТРИЦУ
__global__ void goodCalculation(int *dev_original_matrix, int *dev_result, int elementsQuantity,
int rowsY, int colsX, int border_number)
{
int rowsCountBeforeOrangeLine = blockIdx.x * blockDim.x;
//int bigRowNumber = blockIdx.x * blockDim.x + threadIdx.x;
int cacheWidth = 32; // original
int rectangleHeight = 8; // original
//int rectangleInRowQuantity = colsX / cacheWidth; // original
int rectangleInRowQuantity = (colsX - 2) / (cacheWidth - 2);
__shared__ int cache[256][33];
int r;
int count_points = 0;
int rowInCache = threadIdx.x / cacheWidth; // номер строки в верхнем ЗП (первый элемент)
int currentRowInCache = rowInCache;
int columnInCache = threadIdx.x % cacheWidth;
int pixelCountUpperRowInTopGreenRect = (rowsCountBeforeOrangeLine + rowInCache) * colsX;
int indexTopPixelInCurrentFPInsideImage = pixelCountUpperRowInTopGreenRect + columnInCache;
int verticalStep = rectangleHeight * colsX;
for (int stringIteration = 0; stringIteration < rectangleInRowQuantity; stringIteration++)
{
int currentPixelInImage = indexTopPixelInCurrentFPInsideImage;
for (int levelInCache = 0; levelInCache < cacheWidth; levelInCache++)
{
cache[currentRowInCache][columnInCache] = dev_original_matrix[currentPixelInImage];
currentRowInCache += rectangleHeight;
currentPixelInImage += verticalStep; // verticalStep по ЗП вниз
}
indexTopPixelInCurrentFPInsideImage += 30; // переход к след ФП
currentRowInCache = rowInCache;
__syncthreads();
r = 0;
// тут начинаются ошибки с неправильным обращенем к памяти - fixed
for (int i = 1; i < cacheWidth - 1; i++)
{
r = cache[threadIdx.x][i - 1] - cache[threadIdx.x][i + 1];
if (r > border_number) // ошибка
count_points = count_points + 1;
}
__syncthreads();
}
dev_result[rowsCountBeforeOrangeLine + threadIdx.x] = count_points; // ошибка с неправильным обращенем к памяти - fixed
}
void printMatrix(int* matrix, int colsX, int rowsY)
{
for (int i = 0; i < rowsY; i++)
{
for (int j = 0; j < colsX; j++)
{
cout << matrix[i * colsX + j] << "\t";
}
cout << "\n";
}
}
bool checkResult(int* host_result, int* result, int colsX, int rowsY)
{
for (int i = 0; i < 20; i++)
{
cout << "host_result[ " << i << " ] = " << host_result[i] << '\n';
}
for (int i = 0; i < 20; i++)
{
cout << "result[ " << i << " ] = " << result[i] << '\n';
}
for (int i = 0; i < rowsY; i++)
{
if (host_result[i] != result[i])
{
//cout << "host_result[ " << i << " ] = " << host_result[i] << '\n';
//cout << "result[ " << i << " ] = " << result[i] << '\n';
return false;
}
}
return true;
}
int main(void)
{
cudaEvent_t startCUDA, stopCUDA, startOptimalCUDA, stopOptimalCUDA;
clock_t startCPU;
float elapsedUsualTimeCUDA, elapsedTimeCPU, elapsedOptimalTime;
// 13. Создайте детектор вертикальных границ на изображении (в градациях серого).
// Функция должна для каждой строки считать количество точек, в которых производная цвета по горизонтали больше заданного значения.
// Все изображения хранятся в памяти по строкам.
int colsX = 1502; // пикселей 30 * 50 + 2 = 1502
int rowsY = 17920; // пикселей 256 * 70 = 17920
int elementsQuantity = colsX * rowsY;
cout << "Size in Mbs = " << elementsQuantity * sizeof(int) / 1048576.0 << "\n";
int *matrix = new int[elementsQuantity];
for (int i = 0; i < rowsY; i++)
{
for (int j = 0; j < colsX; j++)
{
matrix[i * colsX + j] = rand() % 255; // filling matrix
//matrix[i * colsX + j] = (i * colsX + j) * 10 * i;
}
}
//printMatrix(matrix, colsX, rowsY);
int border_number = 10; // -410
cout << "border_number = " << border_number << '\n';
startCPU = clock();
int *result = new int[rowsY];
//int *count_points = new int[rowsY];
int r;
int count_points;
for (int i = 0; i < rowsY; i++) // alg CPU func
{
//int r = 0;
//int count_points = 0;
count_points = 0;
for (int j = 1; j < colsX - 1; j++)
{
//r = r + matrix[i * colsX + j]; // original
r = matrix[i * colsX + j - 1] - matrix[i * colsX + j + 1]; // мб надо делить на 2, но в Гонсалесе вот так(см градиент Собела/Собеля);
//dI = dy/dx -> у нас только вертикальные границы, поэтому считаем приращение только по x
//cout << "r = " << r << "\n";
if (r > border_number)
{
//cout << "r = " << r << "\n";
//cout << "found one" << "\n";
count_points++;
}
}
result[i] = count_points;
//cout << "in " << i << " row found " << result[i] << " points" << "\n";
}
/*
cout << "result in CPU :" << "\n";
for (int i = 0; i < rowsY; i++)
{
cout << result[i] << " ";
}
cout << '\n';
*/
clock_t end = clock();
elapsedTimeCPU = (double)(end-startCPU)/CLOCKS_PER_SEC;
cout << "CPU calculating time = " << elapsedTimeCPU * 1000 << " ms\n";
cout << "CPU memory throughput = " << elementsQuantity *sizeof(int)/elapsedTimeCPU/1024/1024/1024 << " Gb/s\n";
cout << "\n";
cudaEventCreate(&startCUDA);
cudaEventCreate(&stopCUDA);
int *dev_original_matrix, *dev_result;
int *host_original_matrix, * host_result;
host_original_matrix = matrix;
host_result = new int[rowsY];
for (int i = 0; i < rowsY; i++)
{
host_result[i] = 0;
}
CHECK( cudaMalloc(&dev_original_matrix, elementsQuantity * sizeof(int)));
CHECK( cudaMalloc(&dev_result, rowsY * sizeof(int)));
CHECK( cudaMemcpy(dev_original_matrix, host_original_matrix, elementsQuantity * sizeof(int), cudaMemcpyHostToDevice));
CHECK( cudaMemcpy(dev_result, host_result, rowsY * sizeof(int), cudaMemcpyHostToDevice));
cudaEventRecord(startCUDA, 0);
calculate<<<(rowsY + 255) / 256, 256>>>(dev_original_matrix, dev_result, elementsQuantity, rowsY, colsX, border_number);
cudaEventRecord(stopCUDA, 0);
cout << "FINISH" << '\n';
cudaEventSynchronize(stopCUDA);
CHECK(cudaGetLastError());
cudaEventElapsedTime(&elapsedUsualTimeCUDA, startCUDA, stopCUDA);
cout << "CUDA sum time = " << elapsedUsualTimeCUDA << " ms\n";
cout << "CUDA memory throughput = " << elementsQuantity * sizeof(int) / elapsedUsualTimeCUDA/1024/1024/1.024 << " Gb/s\n";
CHECK( cudaMemcpy(host_result, dev_result, rowsY * sizeof(int),cudaMemcpyDeviceToHost));
/*
cout << '\n' << "host_result = " << '\n';
printMatrix(host_result, 1, rowsY);
cout << '\n' << "result = " << '\n';
printMatrix(result, 1, rowsY);
*/
cout << "result was correct " << checkResult(host_result, result, colsX, rowsY) << "\n";
cout << "Data size = " << (float)4 * elementsQuantity / 1024 / 1024 << "\n";
CHECK( cudaFree(dev_original_matrix));
CHECK( cudaFree(dev_result));
//}
///*
//**********************************************************************************************
//ХОРОШЕЕ УМНОЖЕНИЕ
cudaEventCreate(&startOptimalCUDA);
cudaEventCreate(&stopOptimalCUDA);
int* good_host_result = new int[rowsY];
for (int i = 0; i < rowsY; i++)
{
good_host_result[i] = 0; // 0
}
int *good_dev_result;
CHECK( cudaMalloc(&dev_original_matrix, elementsQuantity * sizeof(int)));
CHECK( cudaMalloc(&good_dev_result,rowsY * sizeof(int)));
CHECK( cudaMemcpy(dev_original_matrix, host_original_matrix, elementsQuantity * sizeof(int), cudaMemcpyHostToDevice));
CHECK( cudaMemcpy(good_dev_result, good_host_result, rowsY * sizeof(int), cudaMemcpyHostToDevice));
cudaEventRecord(startOptimalCUDA, 0);
goodCalculation<<<(rowsY + 255) / 256, 256>>>(dev_original_matrix, good_dev_result, elementsQuantity, rowsY, colsX, border_number);
//cout << '\n' << "good_host_result = " << '\n';
//printMatrix(good_host_result, 1, rowsY);
//cout << '\n' << "good_dev_result = " << '\n'; // good_dev_result пустая?
//printMatrix(good_dev_result, 1, rowsY);
cudaEventRecord(stopOptimalCUDA, 0);
CHECK( cudaMemcpy(good_host_result, good_dev_result, rowsY * sizeof(int),cudaMemcpyDeviceToHost));
cout << ("OPTIMAL SUMMATION WAS FINISHED");
cudaEventElapsedTime(&elapsedOptimalTime, startOptimalCUDA, stopOptimalCUDA);
cout << "CUDA GOOD (OPTIMAL) sum time = " << elapsedOptimalTime << " ms\n";
cout << "CUDA GOOD (OPTIMAL) memory throughput = " << elementsQuantity * sizeof(int) / elapsedOptimalTime/1024/1024/1.024 << " Gb/s\n";
//cout << '\n' << "good_host_result = " << '\n';
//printMatrix(good_host_result, 1, rowsY);
cout << "result was correct" << checkResult(good_host_result, result, colsX, rowsY) << "\n";
cout << "Data size = " << (float)4 * elementsQuantity / 1024 / 1024 << "\n"; // float original, ok
CHECK( cudaFree(dev_original_matrix));
CHECK( cudaFree(good_dev_result));
return 0;
}
//*/
|
20,275 | #include "includes.h"
__global__ void advectParticles_OGL(float2 *part, float2 *v, int dx, int dy, float dt, int lb, size_t pitch) {
int gtidx = blockIdx.x * blockDim.x + threadIdx.x;
int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb;
int p;
// gtidx is the domain location in x for this thread
float2 pterm, vterm;
if (gtidx < dx) {
for (p = 0; p < lb; p++) {
// fi is the domain location in y for this thread
int fi = gtidy + p;
if (fi < dy) {
int fj = fi * dx + gtidx;
pterm = part[fj];
int xvi = ((int)(pterm.x * dx));
int yvi = ((int)(pterm.y * dy));
vterm = *((float2*)((char*)v + yvi * pitch) + xvi);
pterm.x += dt * vterm.x;
pterm.x = pterm.x - (int)pterm.x;
pterm.x += 1.f;
pterm.x = pterm.x - (int)pterm.x;
pterm.y += dt * vterm.y;
pterm.y = pterm.y - (int)pterm.y;
pterm.y += 1.f;
pterm.y = pterm.y - (int)pterm.y;
part[fj] = pterm;
}
} // If this thread is inside the domain in Y
} // If this thread is inside the domain in X
} |
20,276 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
// Tamanho do filtro, como definido na especificação: 5x5.
#define TAM_FILTRO 5
#define GRID 1
#define BLOCK 1024
#define TILE_WIDTH 28
#define BLOCK_WIDTH (TILE_WIDTH + (TAM_FILTRO - 1))
// Kernel de convolução
__global__ void convolucao(int *output, int *input, int width, int height){
// Linha atual
int idxY = blockIdx.y * TILE_WIDTH + threadIdx.y;
// Coluna atual
int idxX = blockIdx.x * TILE_WIDTH + threadIdx.x;
// utilizacao de memoria compartilhada para diminuir o tempo de acesso aos pixels
__shared__ int sharedMemory[BLOCK_WIDTH][BLOCK_WIDTH];
int offset = TAM_FILTRO / 2;
int sharedX = threadIdx.x, sharedY = threadIdx.y;
int row_i = idxY - offset;
int col_i = idxX - offset;
// Loop que percorre todos os 3 canais de cor para realizar a convolucao
for(int channelIndex = 0; channelIndex < 3; channelIndex++){
// calculo de indice necessario, pois houve a transformacao da matriz da imagem em um vetor
int index = (row_i * width + col_i) * 3 + channelIndex;
// verifica se esta dentro dos limites
if((row_i >= 0) && (row_i < height) &&
(col_i >= 0) && (col_i < width))
sharedMemory[sharedY][sharedX] = input[index];
else
sharedMemory[sharedY][sharedX] = 0;
// espera a matriz ser populada
__syncthreads();
int total = 0;
// Verifica se valores de linha e coluna pertencem �| matriz
if(sharedY < TILE_WIDTH && sharedX < TILE_WIDTH){
for(int i = 0; i < TAM_FILTRO; i++)
for(int j = 0; j < TAM_FILTRO; j++)
total += sharedMemory[sharedY+i][sharedX+j];
// Média dos pixels
if(idxY < height && idxX < width)
output[(idxY * width + idxX) * 3 + channelIndex] = total / (TAM_FILTRO * TAM_FILTRO);
__syncthreads();
}
}
}
int main(int argc, char *argv[]){
int totalX, totalY, N;
FILE *arquivo;
arquivo = fopen(argv[1], "r");
if(arquivo == NULL){
printf("Arquivo não pode ser encontrado!\n");
exit(1);
}
// P3
fseek(arquivo, 2, SEEK_SET);
// tamanho da imagem
fscanf(arquivo, "%d %d", &totalX, &totalY);
N = totalX * totalY * 3;
//declara matriz de entrada e final
int *input, *outputFinal;
input = (int *) malloc(sizeof(int) * N);
outputFinal = (int *) malloc(sizeof(int) * N);
//declara matrizes de entrada e saída (CUDA)
int *dev_input, *dev_output;
cudaMalloc((void**) &dev_input, N * sizeof(int));
cudaMalloc((void**) &dev_output, N * sizeof(int));
// 255
fseek(arquivo, 4, SEEK_CUR);
// carrega matriz
//printf("%d %d\n", totalX, totalY);
for(int i = 0; i < N; i++){
fscanf(arquivo, "%d", &input[i]);
//printf("input[%d]: %d\n", i, input[i]);
}
cudaMemcpy(dev_input, input, N * sizeof(int), cudaMemcpyHostToDevice);
// dimensoes padrao de block e grid
dim3 dimBlock(BLOCK_WIDTH, BLOCK_WIDTH);
dim3 dimGrid((totalX-1) / TILE_WIDTH + 1, (totalY-1) / TILE_WIDTH + 1);
convolucao<<<dimGrid, dimBlock>>>(dev_output, dev_input, totalX, totalY);
cudaMemcpy(outputFinal, dev_output, N * sizeof(int), cudaMemcpyDeviceToHost);
//Salva no arquivo de saida
char str_final[100];
strcpy(str_final, "out_cuda_");
strcat(str_final, argv[1]);
arquivo = fopen(str_final, "w");
if(arquivo != NULL){
fprintf(arquivo, "P3\n%d %d\n255\n", totalX, totalY);
for(int i = 0; i < N; i++)
fprintf(arquivo, "%d\n", outputFinal[i]);
fclose(arquivo);
}
else
printf("Arquivo não pode ser criado!\n");
free(input);
free(outputFinal);
cudaFree(dev_input);
cudaFree(dev_output);
return 0;
}
|
20,277 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <time.h>
#define NUM_THREADS 256
bool InitCUDA()
{
int count;
cudaGetDeviceCount(&count);
if(count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
int i;
for(i = 0; i < count; i++) {
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if(prop.major >= 1) {
break;
}
}
}
if(i == count) {
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
cudaSetDevice(i);
return true;
}
void matgen(float* a, int lda, int n)
{
int i, j;
for(i = 0; i < n; i++) {
for(j = 0; j < n; j++) {
a[i * lda + j] = (float) rand() / RAND_MAX;
}
}
}
void matmult(const float* a, int lda, const float* b, int ldb, float* c, int ldc, int n)
{
int i, j, k;
for(i = 0; i < n; i++) {
for(j = 0; j < n; j++) {
double t = 0;
for(k = 0; k < n; k++) {
t += a[i * lda + k] * b[k * ldb + j];
}
c[i * ldc + j] = t;
}
}
}
void compare_mat(const float* a, int lda, const float* b, int ldb, int n)
{
float max_err = 0;
float average_err = 0;
int i, j;
for(i = 0; i < n; i++) {
for(j = 0; j < n; j++) {
if(b[i * ldb + j] != 0) {
float err = fabs((a[i * lda + j] - b[i * ldb + j]) / b[i * ldb + j]);
if(max_err < err) max_err = err;
average_err += err;
}
}
}
printf("Max error: %g Average error: %g\n", max_err, average_err / (n * n));
}
__global__ static void matMultCUDA(const float* a, size_t lda, const float* b, size_t ldb, float* c, size_t ldc, int n)
{
extern __shared__ float data[];
const int tid = threadIdx.x;
// const int bid = blockIdx.x;
// const int idx = bid * blockDim.x + tid;
const int row = blockIdx.x;
// const int column = idx % n;
int i, j;
for(i = tid; i < n; i += blockDim.x) {
data[i] = a[row * lda + i];
}
__syncthreads();
for(j = tid; j < n; j += blockDim.x) {
float t = 0;
float y = 0;
for(i = 0; i < n; i++) {
// t += a[row * lda + i] * b[i * ldb + column];
float r;
y -= data[i] * b[i * ldb + j];
r = t - y;
y = (r - t) + y;
t = r;
}
c[row * ldc + j] = t;
}
}
clock_t matmultCUDA(const float* a, int lda, const float* b, int ldb, float* c, int ldc, int n)
{
float *ac, *bc, *cc;
clock_t start, end;
start = clock();
// cudaMalloc((void**) &ac, sizeof(float) * n * n);
// cudaMalloc((void**) &bc, sizeof(float) * n * n);
// cudaMalloc((void**) &cc, sizeof(float) * n * n);
size_t pitch_a, pitch_b, pitch_c;
cudaMallocPitch((void**) &ac, &pitch_a, sizeof(float) * n, n);
cudaMallocPitch((void**) &bc, &pitch_b, sizeof(float) * n, n);
cudaMallocPitch((void**) &cc, &pitch_c, sizeof(float) * n, n);
// cudaMemcpy2D(ac, sizeof(float) * n, a, sizeof(float) * lda, sizeof(float) * n, n, cudaMemcpyHostToDevice);
// cudaMemcpy2D(bc, sizeof(float) * n, b, sizeof(float) * ldb, sizeof(float) * n, n, cudaMemcpyHostToDevice);
cudaMemcpy2D(ac, pitch_a, a, sizeof(float) * lda, sizeof(float) * n, n, cudaMemcpyHostToDevice);
cudaMemcpy2D(bc, pitch_b, b, sizeof(float) * ldb, sizeof(float) * n, n, cudaMemcpyHostToDevice);
// int blocks = (n + NUM_THREADS - 1) / NUM_THREADS;
// matMultCUDA<<<n, NUM_THREADS, sizeof(float) * n>>> (ac, n, bc, n, cc, n, n);
matMultCUDA<<<n, NUM_THREADS, sizeof(float) * n>>> (ac, pitch_a / sizeof(float), bc, pitch_b / sizeof(float), cc, pitch_c / sizeof(float), n);
// cudaMemcpy2D(c, sizeof(float) * ldc, cc, sizeof(float) * n, sizeof(float) * n, n, cudaMemcpyDeviceToHost);
cudaMemcpy2D(c, sizeof(float) * ldc, cc, pitch_c, sizeof(float) * n, n, cudaMemcpyDeviceToHost);
cudaFree(ac);
cudaFree(bc);
cudaFree(cc);
end = clock();
return end - start;
}
int main()
{
float *a, *b, *c, *d;
int n = 1000;
if(!InitCUDA()) return 0;
a = (float*) malloc(sizeof(float) * n * n);
b = (float*) malloc(sizeof(float) * n * n);
c = (float*) malloc(sizeof(float) * n * n);
d = (float*) malloc(sizeof(float) * n * n);
srand(0);
matgen(a, n, n);
matgen(b, n, n);
clock_t time = matmultCUDA(a, n, b, n, c, n, n);
clock_t startc, timec;
startc = clock();
matmult(a, n, b, n, d, n, n);
timec = clock() - startc;
compare_mat(c, n, d, n, n);
printf("GPU time used: %f \n", (double) time / CLOCKS_PER_SEC);
printf("CPU time used: %f \n", (double) timec / CLOCKS_PER_SEC);
return 0;
} |
20,278 | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void j3d27pt (double * __restrict__ t_in, double * __restrict__ t_out, int N) {
//Determing the block's indices
int i0 = (int)(blockIdx.x)*(int)(blockDim.x) + 1;
int i = max(i0,1) + (int)(threadIdx.x);
int j0 = 4*(int)(blockIdx.y)*(int)(blockDim.y) + 1;
int j = max(j0,1) + 4*(int)(threadIdx.y);
int k0 = (int)(blockIdx.z)*(int)(blockDim.z) + 1;
int k = max(k0,1) + (int)(threadIdx.z);
double (*in)[514][514] = (double (*)[514][514])t_in;
double (*out)[514][514] = (double (*)[514][514])t_out;
if (i<=N-2 & j<=N-2 && k<=N-2) {
double _t_1_ = in[k-1][j-1][i-1];
_t_1_ += in[k-1][j-1][i+1];
_t_1_ += in[k+1][j-1][i-1];
_t_1_ += in[k+1][j-1][i+1];
_t_1_ += in[k-1][j+1][i-1];
double _t_5_ = in[k-1][j+1][i-1];
double _t_7_ = in[k-1][j+1][i-1];
_t_1_ += in[k-1][j+1][i+1];
_t_5_ += in[k-1][j+1][i+1];
_t_7_ += in[k-1][j+1][i+1];
_t_1_ += in[k+1][j+1][i-1];
_t_5_ += in[k+1][j+1][i-1];
_t_7_ += in[k+1][j+1][i-1];
_t_1_ += in[k+1][j+1][i+1];
_t_5_ += in[k+1][j+1][i+1];
_t_7_ += in[k+1][j+1][i+1];
double outkc0jc0ic0 = 0.75 * _t_1_;
double _t_0_ = in[k][j-1][i];
_t_0_ += in[k-1][j][i];
_t_5_ += in[k-1][j][i];
_t_0_ += in[k+1][j][i];
_t_5_ += in[k+1][j][i];
_t_0_ += in[k][j+1][i];
double outkc0jp1ic0 = 0.125 * in[k][j+1][i];
double _t_6_ = in[k][j+1][i];
_t_0_ += in[k][j][i-1];
_t_5_ += in[k][j][i-1];
_t_0_ += in[k][j][i+1];
_t_5_ += in[k][j][i+1];
outkc0jc0ic0 += 1.14 * _t_0_;
double _t_2_ = in[k-1][j-1][i];
_t_2_ += in[k][j-1][i-1];
_t_2_ += in[k][j-1][i+1];
_t_2_ += in[k+1][j-1][i];
_t_2_ += in[k-1][j][i-1];
double _t_4_ = in[k-1][j][i-1];
_t_2_ += in[k-1][j][i+1];
_t_4_ += in[k-1][j][i+1];
_t_2_ += in[k-1][j+1][i];
double _t_3_ = in[k-1][j+1][i];
double _t_8_ = in[k-1][j+1][i];
_t_2_ += in[k][j+1][i-1];
_t_3_ += in[k][j+1][i-1];
_t_8_ += in[k][j+1][i-1];
_t_2_ += in[k][j+1][i+1];
_t_3_ += in[k][j+1][i+1];
_t_8_ += in[k][j+1][i+1];
_t_2_ += in[k+1][j][i-1];
_t_4_ += in[k+1][j][i-1];
_t_2_ += in[k+1][j][i+1];
_t_4_ += in[k+1][j][i+1];
_t_2_ += in[k+1][j+1][i];
_t_3_ += in[k+1][j+1][i];
_t_8_ += in[k+1][j+1][i];
outkc0jc0ic0 += 1.031 * _t_2_;
outkc0jc0ic0 += 0.125 * in[k][j][i];
_t_3_ += in[k][j][i];
_t_3_ += in[k][j+2][i];
double outkc0jp2ic0 = 0.125 * in[k][j+2][i];
double _t_9_ = in[k][j+2][i];
outkc0jp1ic0 += 1.14 * _t_3_;
_t_4_ += in[k-1][j+2][i-1];
_t_8_ += in[k-1][j+2][i-1];
double _t_10_ = in[k-1][j+2][i-1];
_t_4_ += in[k-1][j+2][i+1];
_t_8_ += in[k-1][j+2][i+1];
_t_10_ += in[k-1][j+2][i+1];
_t_4_ += in[k+1][j+2][i-1];
_t_8_ += in[k+1][j+2][i-1];
_t_10_ += in[k+1][j+2][i-1];
_t_4_ += in[k+1][j+2][i+1];
_t_8_ += in[k+1][j+2][i+1];
_t_10_ += in[k+1][j+2][i+1];
outkc0jp1ic0 += 0.75 * _t_4_;
_t_5_ += in[k-1][j+2][i];
_t_6_ += in[k-1][j+2][i];
double _t_11_ = in[k-1][j+2][i];
_t_5_ += in[k][j+2][i-1];
_t_6_ += in[k][j+2][i-1];
_t_11_ += in[k][j+2][i-1];
_t_5_ += in[k][j+2][i+1];
_t_6_ += in[k][j+2][i+1];
_t_11_ += in[k][j+2][i+1];
_t_5_ += in[k+1][j+2][i];
_t_6_ += in[k+1][j+2][i];
_t_11_ += in[k+1][j+2][i];
outkc0jp1ic0 += 1.031 * _t_5_;
_t_6_ += in[k][j+3][i];
_t_10_ += in[k-1][j+4][i-1];
_t_10_ += in[k-1][j+4][i+1];
_t_10_ += in[k+1][j+4][i-1];
_t_10_ += in[k+1][j+4][i+1];
double outkc0jp3ic0 = 0.75 * _t_10_;
outkc0jp3ic0 += 0.125 * in[k][j+3][i];
outkc0jp2ic0 += 1.14 * _t_6_;
_t_7_ += in[k-1][j+3][i-1];
_t_7_ += in[k-1][j+3][i+1];
_t_7_ += in[k+1][j+3][i-1];
_t_7_ += in[k+1][j+3][i+1];
outkc0jp2ic0 += 0.75 * _t_7_;
_t_11_ += in[k-1][j+3][i-1];
_t_11_ += in[k-1][j+3][i+1];
_t_11_ += in[k+1][j+3][i-1];
_t_11_ += in[k+1][j+3][i+1];
_t_11_ += in[k-1][j+4][i];
_t_11_ += in[k][j+4][i-1];
_t_11_ += in[k][j+4][i+1];
_t_11_ += in[k+1][j+4][i];
outkc0jp3ic0 += 1.031 * _t_11_;
_t_8_ += in[k-1][j+3][i];
_t_8_ += in[k][j+3][i-1];
_t_8_ += in[k][j+3][i+1];
_t_8_ += in[k+1][j+3][i];
outkc0jp2ic0 += 1.031 * _t_8_;
_t_9_ += in[k-1][j+3][i];
_t_9_ += in[k+1][j+3][i];
_t_9_ += in[k][j+3][i-1];
_t_9_ += in[k][j+3][i+1];
_t_9_ += in[k][j+4][i];
outkc0jp3ic0 += 1.14 * _t_9_;
out[k][j][i] = outkc0jc0ic0;
out[k][j+1][i] = outkc0jp1ic0;
out[k][j+2][i] = outkc0jp2ic0;
out[k][j+3][i] = outkc0jp3ic0;
}
}
extern "C" void host_code (double *h_in, double *h_out, int N) {
double *in;
cudaMalloc (&in, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for in\n");
cudaMemcpy (in, h_in, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *out;
cudaMalloc (&out, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for out\n");
dim3 blockconfig (32,4,4);
dim3 gridconfig (ceil(N-2, blockconfig.x), ceil(N-2, 4*blockconfig.y), ceil(N-2, blockconfig.z));
j3d27pt<<<gridconfig, blockconfig>>> (in, out, N);
cudaMemcpy (h_out, out, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaFree (in);
cudaFree (out);
}
|
20,279 | /* Copyright (C) 2007-2012 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
/**
* \file
*
* \author Anoop Saldanha <anoopsaldanha@gmail.com>
*
* The Cuda kernel for MPM AC.
*
* \todo - This is a basic version of the kernel.
* - Support 16 bit state tables.
* - Texture memory.
* - Multiple threads per blocks of threads. Make use of
* shared memory/texture memory.
*/
#define FRAGNUM 4
extern "C"
__global__ void SCACCudaSearch64(unsigned char *d_buffer,
unsigned int d_buffer_start_offset,
unsigned int *o_buffer,
unsigned int *results_buffer,
unsigned int nop,
unsigned char *tolower)
{
unsigned int u = 0;
unsigned int pid = blockIdx.x * blockDim.x + threadIdx.x; //packet id
unsigned int fid = threadIdx.y; //fragment id
if (pid >= nop)
return;
unsigned int buflen = *((unsigned long *)(d_buffer + (o_buffer[pid] - d_buffer_start_offset)));
unsigned int (*state_table_u32)[256] =
(unsigned int (*)[256])*((unsigned long *)(d_buffer + (o_buffer[pid] - d_buffer_start_offset) + 8));
unsigned int *state_depth_table = (unsigned int *)*((unsigned long *)(d_buffer + (o_buffer[pid] - d_buffer_start_offset) + 16));
unsigned char *buf = (d_buffer + (o_buffer[pid] - d_buffer_start_offset) + 24);
unsigned int state = 0;
unsigned int matches = 0;
unsigned int fraglen = buflen / FRAGNUM;
unsigned int fragbeg = fraglen * fid;
unsigned int fragend = fragbeg + fraglen;
if(fid == FRAGNUM - 1)
fragend = buflen;
unsigned int *results = (results_buffer + ((o_buffer[pid] - d_buffer_start_offset + fragbeg) * 2) + 1);
for (u = fragbeg; u < fragend; u++) {
state = state_table_u32[state & 0x00FFFFFF][tolower[buf[u]]];
if (state & 0xFF000000) {
results[matches++] = u;
results[matches++] = state & 0x00FFFFFF;
}
}
unsigned int distance = 0;
for( ; u < buflen;u++)
{
state = state_table_u32[state & 0x00FFFFFF][tolower[buf[u]]];
distance++;
if(state_depth_table[state & 0x00FFFFFF] <= distance)
break;
if(state & 0xFF000000)
{
results[matches++] = u;
results[matches++] = state & 0x00FFFFFF;
}
}
*(results - 1) = matches;
/* if(fid == FRAGNUM - 1)
{
unsigned int *res = results_buffer + ((o_buffer[pid] - d_buffer_start_offset) * 2);
for(unsigned int i = 0; i < buflen * 2 ; i++)
{
printf("%d ",res[i]);
}
printf("\n");
}
*/
return;
}
extern "C"
__global__ void SCACCudaSearch32(unsigned char *d_buffer,
unsigned int d_buffer_start_offset,
unsigned int *o_buffer,
unsigned int *results_buffer,
unsigned int nop,
unsigned char *tolower)
{
unsigned int u = 0;
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= nop)
return;
unsigned int buflen = *((unsigned int *)(d_buffer + (o_buffer[tid] - d_buffer_start_offset)));
unsigned int (*state_table_u32)[256] =
(unsigned int (*)[256])*((unsigned int *)(d_buffer + (o_buffer[tid] - d_buffer_start_offset) + 4));
unsigned char *buf = (d_buffer + (o_buffer[tid] - d_buffer_start_offset) + 8);
unsigned int state = 0;
unsigned int matches = 0;
unsigned int *results = (results_buffer + ((o_buffer[tid] - d_buffer_start_offset) * 2) + 1);
for (u = 0; u < buflen; u++) {
state = state_table_u32[state & 0x00FFFFFF][tolower[buf[u]]];
if (state & 0xFF000000) {
results[matches++] = u;
results[matches++] = state & 0x00FFFFFF;
}
}
*(results - 1) = matches;
return;
}
|
20,280 | // HelloWorldCUDA.cpp : 이 파일에는 'main' 함수가 포함됩니다. 거기서 프로그램 실행이 시작되고 종료됩니다.
//
#include <iostream>
// CUDA runtime
#include <cuda_runtime.h>
void printHelloWorld()
{
printf("Hello World!\n");
}
__global__ void printHelloWorldCUDA()
{
printf("Hello World from CUDA!\n");
}
int main()
{
printHelloWorld();
printHelloWorldCUDA <<<1,1>>>();
}
// 프로그램 실행: <Ctrl+F5> 또는 [디버그] > [디버깅하지 않고 시작] 메뉴
// 프로그램 디버그: <F5> 키 또는 [디버그] > [디버깅 시작] 메뉴
// 시작을 위한 팁:
// 1. [솔루션 탐색기] 창을 사용하여 파일을 추가/관리합니다.
// 2. [팀 탐색기] 창을 사용하여 소스 제어에 연결합니다.
// 3. [출력] 창을 사용하여 빌드 출력 및 기타 메시지를 확인합니다.
// 4. [오류 목록] 창을 사용하여 오류를 봅니다.
// 5. [프로젝트] > [새 항목 추가]로 이동하여 새 코드 파일을 만들거나, [프로젝트] > [기존 항목 추가]로 이동하여 기존 코드 파일을 프로젝트에 추가합니다.
// 6. 나중에 이 프로젝트를 다시 열려면 [파일] > [열기] > [프로젝트]로 이동하고 .sln 파일을 선택합니다.
|
20,281 | /*
Created based off of Cuda intro tutorial: https://devblogs.nvidia.com/even-easier-introduction-cuda/
Compile with g++: g++ add.cpp -o add
Complie with Cuda nvcc: nvcc add.cu -o add_cuda
* Must rename file to *.cu in order to compile with Cuda
*/
#include <iostream>
#include <string>
#include <math.h>
#include <cuda_profiler_api.h>
using namespace std;
// Single Thread GPU Add
__global__
void addCuda(int n, float* x, float* y) {
for (int i = 0; i < n; i++) {
y[i] = x[i] + y[i];
}
}
// Parallel GPU Add
__global__
void addCudaParallel(int n, float* x, float* y) {
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < n; i += stride) {
y[i] = x[i] + y[i];
}
}
// Parallel Block GPU Add
__global__
void addCudaParallelBlock(int n, float* x, float* y) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
y[i] = x[i] + y[i];
}
}
void calcError(string label, float *y, float expected, int n) {
// Check for error
float maxError = 0.0f;
float totalError = 0.0f;
int totalOff = 0;
for (int i = 0; i < n; i++) {
maxError = fmax(maxError, fabs(y[i] - expected));
totalError += y[i] - expected;
if (y[i] - expected != 0.0) totalOff++;
}
std::cout << label << std::endl;
std::cout << "Max Error: " << maxError << std::endl;
std::cout << "Total Error: " << totalError << std::endl;
std::cout << "Total Off: " << totalOff << std::endl;
// delete &maxError;
// delete &totalError;
// delete &totalOff;
}
void reset(float* x, float* y, int numCalcs) {
for (int i = 0; i < numCalcs; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
}
int main(void) {
int numCalcs = 1 << 20; // 1 Million elements to calculate
float *x, *y;
cudaMallocManaged(&x, numCalcs*sizeof(float));
cudaMallocManaged(&y, numCalcs*sizeof(float));
// Threading Parameters
int blockSize = 256;
int numBlocks = (numCalcs + blockSize - 1) / blockSize;
// Sinlge Threaded GPU
// reset(x, y, numCalcs);
// addCuda<<<1, 1>>>(numCalcs, x, y); // Run addCuda on GPU
// cudaDeviceSynchronize(); // Wait for GPU to finish
// calcError("addCuda()", y, 3.0f, numCalcs); // Calculate errors if any
// Multithreaded GPU
// reset(x, y, numCalcs);
// addCudaParallel<<<1, blockSize>>>(numCalcs, x, y); // Run addCudaParallel on GPU
// cudaDeviceSynchronize(); // Wait for GPU to finish
// calcError("addCudaParallel()", y, 3.0f, numCalcs); // Calculate errors if any
// Multiblock GPU
reset(x, y, numCalcs);
addCudaParallelBlock<<<numBlocks, blockSize>>>(numCalcs, x, y); // Run addCudaParallel on GPU
cudaDeviceSynchronize(); // Wait for GPU to finish
calcError("addCudaParallelBlock()", y, 3.0f, numCalcs); // Calculate errors if any
// Free Shared CPU & GPU memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
20,282 | #include "includes.h"
__global__ void cuda_gray(unsigned char *input, int offset, int streamSize, unsigned char* gray, int size) {
int gray_idx = (offset/3) + (blockIdx.x * blockDim.x + threadIdx.x);
int rgb_idx = (offset) + ((blockIdx.x * blockDim.x + threadIdx.x) * 3);
if (((blockIdx.x * blockDim.x + threadIdx.x)*3)>=streamSize || gray_idx>=size) {
return;
}
gray[gray_idx] = (gray_value[0] * input[rgb_idx]) + (gray_value[1] * input[rgb_idx + 1]) + (gray_value[2] * input[rgb_idx + 2]);
} |
20,283 | /*
* This sample implements a separable convolution
* of a 2D image with an arbitrary filter.
*/
#include <stdio.h>
#include <stdlib.h>
//#include <cuda.h>
//#include <cuda_runtime_api.h>
unsigned int filter_radius;
#define FILTER_LENGTH (2 * filter_radius + 1)
#define ABS(val) ((val)<0.0 ? (-(val)) : (val))
#define accuracy 0.00005
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(float *h_Dst, float *h_Src, float *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = x + k;
if (d >= 0 && d < imageW) {
sum += h_Src[y * imageW + d] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(float *h_Dst, float *h_Src, float *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = y + k;
if (d >= 0 && d < imageH) {
sum += h_Src[d * imageW + x] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// GPU: Row convolution Kernel
////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_rows(const float *filter, const float *input, float *output,
int imageW, int imageH, int filterR){
int idx_x = threadIdx.x + blockDim.x * blockIdx.x;
int idx_y = threadIdx.y + blockDim.y * blockIdx.y;
int grid_width = gridDim.x * blockDim.x;
int idx = grid_width * idx_y + idx_x;
float sum = 0;
int k;
// Rows
for(k = -filterR; k <= filterR; k++){
int d = idx_x + k;
if(d >= 0 && d < imageW){
sum += input[idx_y * imageW + d] * filter[filterR - k];
}
}
output[idx] = sum;
}
////////////////////////////////////////////////////////////////////////////////
// GPU: Column convolution Kernel
////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_columns(const float *filter, const float *buffer, float *output,
int imageW, int imageH, int filterR){
int idx_x = threadIdx.x + blockDim.x * blockIdx.x;
int idx_y = threadIdx.y + blockDim.y * blockIdx.y;
int grid_width = gridDim.x * blockDim.x;
int idx = grid_width * idx_y + idx_x;
float sum = 0;
int k;
// Columns
for(k = -filterR; k <= filterR; k++){
int d = idx_y + k;
if(d >= 0 && d < imageH){
sum += buffer[d * imageW + idx_x] * filter[filterR - k];
}
}
output[idx] = sum;
}
// Auxiliary function for CUDA error checking
void cudaCheckForErrors(){
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess){
// something's gone wrong
// print out the CUDA error as a string
printf("CUDA Error: %s\n", cudaGetErrorString(error));
exit(1);
}
}
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(void) {
float
*h_Filter,
*h_Input,
*h_Buffer,
*h_OutputCPU;
// GPU
float *d_Filter, *d_Input, *d_Buffer, *d_OutputGPU, *h_OutputGPU;
unsigned int imageW;
unsigned int imageH;
unsigned int i;
printf("Enter filter radius : ");
scanf("%d", &filter_radius);
// Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa,
// dhladh imageW = imageH = N, opou to N to dinei o xrhsths.
// Gia aplothta thewroume tetragwnikes eikones.
printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH);
scanf("%d", &imageW);
imageH = imageW;
printf("Image Width x Height = %i x %i\n\n", imageW, imageH);
printf("Allocating and initializing host arrays...\n");
// Tha htan kalh idea na elegxete kai to apotelesma twn malloc...
h_Filter = (float *)malloc(FILTER_LENGTH * sizeof(float));
h_Input = (float *)malloc(imageW * imageH * sizeof(float));
h_Buffer = (float *)malloc(imageW * imageH * sizeof(float));
h_OutputCPU = (float *)malloc(imageW * imageH * sizeof(float));
h_OutputGPU = (float *)malloc(imageW * imageH * sizeof(float));
cudaMalloc( (void **) &d_Filter, FILTER_LENGTH * sizeof(float));
cudaMalloc( (void **) &d_Input, imageW * imageH * sizeof(float));
cudaMalloc( (void **) &d_Buffer, imageW * imageH * sizeof(float));
cudaMalloc( (void **) &d_OutputGPU, imageW * imageH * sizeof(float));
if(!h_Filter || !h_Input || !h_Buffer || !h_OutputCPU || !h_OutputGPU){
printf("error allocating memory for the host\n");
exit(1);
}
if(!d_Filter || !d_Input || !d_Buffer || !d_OutputGPU){
printf("Error allocating memory for the device\n");
exit(1);
}
srand(200);
for (i = 0; i < FILTER_LENGTH; i++) {
h_Filter[i] = (float)(rand() % 16);
}
for (i = 0; i < imageW * imageH; i++) {
h_Input[i] = (float)rand() / ((float)RAND_MAX / 255) + (float)rand() / (float)RAND_MAX;
}
cudaMemcpy(d_Filter, h_Filter, FILTER_LENGTH * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_Input, h_Input, imageW * imageH * sizeof(float), cudaMemcpyHostToDevice);
//////////////////////////////// CPU ///////////////////////////////////////
// To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU.
printf("CPU computation...\n");
convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius); // convolution kata grammes
convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius); // convolution kata sthles
//////////////////////////////// GPU ///////////////////////////////////////
dim3 block_dim;
dim3 grid_dim;
if(imageW < 32){
block_dim.x = imageW;
block_dim.y = imageH;
grid_dim.x = 1;
grid_dim.y = 1;
} else{
block_dim.x = 32;
block_dim.y = 32;
grid_dim.x = imageW / block_dim.x;
grid_dim.y = imageH / block_dim.y;
}
printf("GPU computation...\n");
kernel_rows<<<grid_dim, block_dim>>>(d_Filter, d_Input, d_Buffer, imageW, imageH, filter_radius);
cudaDeviceSynchronize();
cudaCheckForErrors();
kernel_columns<<<grid_dim, block_dim>>>(d_Filter, d_Buffer, d_OutputGPU, imageW, imageH, filter_radius);
cudaDeviceSynchronize();
cudaCheckForErrors();
cudaMemcpy(h_OutputGPU, d_OutputGPU, imageW * imageH * sizeof(float), cudaMemcpyDeviceToHost);
//////////////////////// RESULT COMPARISON /////////////////////////////////
// Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia
// pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas
for(i = 0; i < imageH * imageW; i++){
if(ABS(h_OutputGPU[i] - h_OutputCPU[i]) >= accuracy){
printf("GPU computations are not as accurate as we want.\n");
break;
}
}
////////////////// CPU: free all the allocated memory //////////////////////
free(h_OutputCPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
////////////////// GPU: free all the allocated memory //////////////////////
free(h_OutputGPU);
cudaFree(d_Filter);
cudaFree(d_Input);
cudaFree(d_Buffer);
cudaFree(d_OutputGPU);
// Do a device reset just in case... Bgalte to sxolio otan ylopoihsete CUDA
cudaDeviceReset();
return 0;
}
|
20,284 | #include <iostream>
#include <stdio.h>
#include <time.h>
//#define LENGTH 100
//#define rowA 4
//#define colA 1
//#define rowB 1
//#define colB 4
#define w 100
#define tw 10
//#define TILE_BLOCKS 10
//#define TILE_WIDTH 100
using namespace std;
__global__ void mat_mult_simple(int (*a)[w], int (*b)[w], int (*c)[w]){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int result = 0;
for (int i=0;i<w;i++){
result += (a[row][i] * b[i][col]);
}
c[row][col] = result;
}
__global__ void mat_mult_shared(int (*a)[w], int (*b)[w], int (*c)[w]){
__shared__ int s_a[tw][tw];
__shared__ int s_b[tw][tw];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int ty = threadIdx.y;
int tx = threadIdx.x;
int result = 0;
for (int p=0;p<w/tw;p++){
s_a[ty][tx] = a[row][p*tw+tx];
s_b[ty][tx] = b[p*tw+ty][col];
__syncthreads();
for (int k=0;k<tw;k++){
result += s_a[ty][k] * s_b[k][tx];
__syncthreads();
}
c[row][col] = result;
}
}
int main(){
int (*a)[w];
int (*b)[w];
int (*c)[w];
int (*d_a)[w], (*d_b)[w], (*d_c)[w];
//int *h_c;
//int a_mat[rowA][colA] = {{0},{1},{2},{3}};
//int b_mat[rowB][colB] = {0,1,2,3};
//int c_mat[rowA][colB] = {};
a = (int(*)[w])malloc(w * w *sizeof(int));
b = (int(*)[w])malloc(w * w *sizeof(int));
//h_c = (int*)malloc(rowA * colB * sizeof(int));
for(int i=0 ; i< w; i++){
for (int j=0;j<w;j++){
a[i][j] = 1;
b[i][j] = 1;
}
}
cudaMalloc((void**)&d_a, w*w*sizeof(int));
cudaMalloc((void**)&d_b, w*w*sizeof(int));
cudaMalloc((void**)&d_c, w*w*sizeof(int)); // host -> device
c = (int(*)[w])malloc(w*w*sizeof(int)); //cpu device -> host
cudaMemcpy(d_a, a, w*w*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, w*w*sizeof(int), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 grid;
grid.x = w/tw;
grid.y = w/tw;
dim3 block;
block.x = tw;
block.y = tw;
cudaEventRecord(start);
//mat_mult_simple<<<grid,block>>>(d_a, d_b, d_c);
mat_mult_shared<<<grid,block>>>(d_a, d_b, d_c);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaMemcpy(c, d_c, w*w*sizeof(int), cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
for(int i=0; i<w ;i++){
// for (int j=0;j<w;j++){
std::cout << c[99][i]<<" ";
// }
std::cout<<std::endl;
}
std::cout << "Time taken : " << milliseconds << std::endl;
}
|
20,285 | //////////////////////////////
//MultiDimKernelLaunch.cpp
//This program is an example posted online concerning the
//launch of multiple processes in a 2D format. The hope
//is that this program also contains incormation concerning
//the passing and use of 2D arrays using CUDA. This program
//is probably written in C.
/////////////////////////////
////////////////////////////
//Libraries
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
///////////////////////////
//Define Kernel
__global__ void kernel(int *array){
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
//map 2d indicies to single liner 1d index
int grid_width = gridDim.x * blockDim.x;
int index = index_y * grid_width + index_x; //every value of y goes to next "row"
//map the 2d block indicies to single linear 1d block index
int result = blockIdx.y * gridDim.x + blockIdx.x;
//write out result
array[index] = result;
}
int main(void){
//define size of grid in box x and y direction
int num_elements_x = 16;
int num_elements_y = 16;
//setup size of memory that we will be requesting
int num_bytes = num_elements_x * num_elements_y * sizeof(int);
int *device_array = 0;
int *host_array = 0;
//allocate memory to host and device
host_array = (int*)malloc(num_bytes);
cudaMalloc((void**)&device_array, num_bytes);
//create 2d 4x4 thread blocks
dim3 block_size;
block_size.x = 4;
block_size.y = 4;
//configure a 2d grid
dim3 grid_size;
grid_size.x = num_elements_x / block_size.x;
grid_size.y = num_elements_y / block_size.y;
//gridsize and block size are passed as arugments to kernel
kernel <<<grid_size, block_size>>> (device_array);
//note how system defines 2d run with grid size as "x" and block_size
//(aka number of threads/block) as "y"
//download results and inspect on host
cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost);
//print out result in nested loop
for(int row = 0; row < num_elements_y; row++){
for(int col = 0; col < num_elements_x; col++){
printf("%2d ", host_array[row*num_elements_x + col]);
}
printf("\n");
}
printf("\n");
//free memory to be nice
free(host_array);
cudaFree(device_array);
return 0;
}
|
20,286 | #include "rgb2yuv.cuh"
#include <stdio.h>
#include <stdint.h>
__global__ void convert_rgb_to_yu12_kernel(uint8_t *rgb_input, uint8_t *yu12_output)
{
int y_idx = threadIdx.y + blockIdx.y * blockDim.y;
int x_idx = threadIdx.x + blockIdx.x * blockDim.x;
int idx = x_idx + y_idx * gridDim.x * blockDim.x;
int width = 3840, height = 2160;
int R, G, B, Y, U, V;
B = rgb_input[(3840 * y_idx + x_idx) * 3 + 2];
R = rgb_input[(3840 * y_idx + x_idx) * 3];
G = rgb_input[(3840 * y_idx + x_idx) * 3 + 1];
Y = ((66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ((-38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ((112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
yu12_output[3840 * y_idx + x_idx] = (uint8_t)((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
int start_u_output = 3840 * 2160;
int start_v_output = 3840 * 2160 * 5 / 4;
if ((y_idx % 2 == 0) && (x_idx % 2 == 0)){
yu12_output[start_u_output + 1920 / 2 * y_idx + x_idx / 2] = (uint8_t)((U < 0) ? 0 : ((U > 255) ? 255 : U));
yu12_output[start_v_output + 1920 / 2 * y_idx + x_idx / 2] = (uint8_t)((V < 0) ? 0 : ((V > 255) ? 255 : V));
}
}
void convert_rgb_to_yu12(uint8_t *input, uint8_t *output)
{
uint8_t *dev_input, *dev_output;
cudaMalloc((void**)&dev_input, 3840 * 2160 * 3 * sizeof(uint8_t));
cudaMalloc((void**)&dev_output, 3840 * 2160 * 3 * sizeof(uint8_t) / 2);
cudaMemcpy(dev_input, input, 3840 * 2160 * 3 * sizeof(uint8_t), cudaMemcpyHostToDevice);
dim3 blocks(3840 / 32, 2160 / 16);
dim3 threads(32, 16);
convert_rgb_to_yu12_kernel<<< blocks, threads >>> (dev_input, dev_output);
cudaMemcpy(output, dev_output, 3840 * 2160 * 3 * sizeof(uint8_t) / 2, cudaMemcpyDeviceToHost);
cudaFree(dev_input);
cudaFree(dev_output);
} |
20,287 | #include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#include <random>
#include <chrono>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
void randomize_vector_float(thrust::host_vector<float> &h_vec, float start, float stop)
{
// randomize the seed, create distribution
auto seed = std::chrono::system_clock::now().time_since_epoch().count();
std::mt19937 gen(seed);
// fill array with random values
std::uniform_real_distribution<float> dist(start, stop);
for (thrust::host_vector<float>::iterator i = h_vec.begin(); i != h_vec.end(); i++)
{
*i = dist(gen);
}
}
int main(int argc, char *argv[])
{
int n = atoi(argv[1]);
thrust::host_vector<float> h_vec(n);
randomize_vector_float(h_vec, -1.0, 1.0);
thrust::device_vector<float> d_vec = h_vec;
// timing variables
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
float result = thrust::reduce(d_vec.begin(), d_vec.end());
cudaEventRecord(stop);
cudaEventSynchronize(stop);
// calc time in ms
float ms;
cudaEventElapsedTime(&ms, start, stop);
printf("%f\n", result);
printf("%f\n", ms);
}
|
20,288 | #include "includes.h"
__global__ void alpha_calculation(float * r_squared ,float * p_sum,float* alpha)
{
alpha[0] = r_squared[0]/p_sum[0] ;
} |
20,289 | #include <iostream>
using namespace std;
__global__ void square(int *d_out, int *d_in){
int idx = blockDim.x*blockIdx.x + threadIdx.x;
int i = d_in[idx];
d_out[idx] = i*i;
}
int main(){
const int ARRAY_SIZE = 1000;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
int id = cudaGetDevice(&id);
int *in, *out;
cudaMallocManaged((void**)&in, ARRAY_BYTES);
cudaMallocManaged((void**)&out, ARRAY_BYTES);
for(int i=0; i < ARRAY_SIZE; i++){
in[i] = i;
}
// int NUM_THREADS = 128;
// int NUM_BLOCKS = ARRAY_SIZE / NUM_THREADS + 1;
// cudaMemPrefetchAsync(in, ARRAY_BYTES, id);
// square<<<NUM_BLOCKS, NUM_THREADS>>>(out, in);
// cudaDeviceSynchronize();
// cudaMemPrefetchAsync(out, ARRAY_BYTES, cudaCpuDeviceId);
// for(int i=0; i< ARRAY_SIZE; i++){
// cout << out[i];
// if(i%10!=9) cout << "\t";
// else cout << endl;
// }
} |
20,290 | #include<stdio.h>
#include<cuda.h>
#define row1 2 /* Number of rows of first matrix */
#define col1 3 /* Number of columns of first matrix */
#define row2 3 /* Number of rows of second matrix */
#define col2 2 /* Number of columns of second matrix */
__global__ void matproduct(int *l,int *m, int *n)
{
int x=blockIdx.x;
int y=blockIdx.y;
int k;
n[col2*y+x]=0;
for(k=0;k<col1;k++)
{
n[col2*y+x]=n[col2*y+x]+l[col1*y+k]*m[col2*k+x];
}
}
int main()
{
//cpu pointers
int a[row1][col1];
int b[row2][col2];
int c[row1][col2];
//gpu pointers
int *d,*e,*f;
int i,j;
printf("\n Enter elements of first matrix of size 2*3\n");
for(i=0;i<row1;i++)
{
for(j=0;j<col1;j++)
{
scanf("%d",&a[i][j]);
}
}
printf("\n Enter elements of second matrix of size 3*2\n");
for(i=0;i<row2;i++)
{
for(j=0;j<col2;j++)
{
scanf("%d",&b[i][j]);
}
}
cudaMalloc((void **)&d,row1*col1*sizeof(int));
cudaMalloc((void **)&e,row2*col2*sizeof(int));
cudaMalloc((void **)&f,row1*col2*sizeof(int));
cudaMemcpy(d,a,row1*col1*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(e,b,row2*col2*sizeof(int),cudaMemcpyHostToDevice);
dim3 grid(col2,row1);
/* Here we are defining two dimensional Grid(collection of blocks) structure. Syntax is dim3 grid(no. of columns,no. of rows) */
matproduct<<<grid,1>>>(d,e,f);
cudaMemcpy(c,f,row1*col2*sizeof(int),cudaMemcpyDeviceToHost);
printf("\nProduct of two matrices:\n ");
for(i=0;i<row1;i++)
{
for(j=0;j<col2;j++)
{
printf("%d\t",c[i][j]);
}
printf("\n");
}
cudaFree(d);
cudaFree(e);
cudaFree(f);
return 0;
}
|
20,291 | #include "includes.h"
// customDllFunctions.cu
//////////////////////////
// Template to write .dlls
//////////////////////////
/* Include the following directories for the program to run appropriately:
///////////////////////
in the VC++ directories:
$(VC_IncludePath);
$(WindowsSDK_IncludePath);
C:\ProgramData\NVIDIA Corporation\CUDA Samples\v9.0\common\inc;
$(CUDA_INC_PATH)
C:\Program Files\National Instruments\LabVIEW 2015\cintools
////////////////////////
CUDA/C/C++ directories:
./
../../common/inc
$(CudaToolkitDir)/include
////////////////////////////////
Linker/General include libraries:
cudart.lib
//changed the target machine platform from 32 to 64 bit
*/
////////////////////////////////////////////////////////////////////////////////
// Complex operations,
////////////////////////////////////////////////////////////////////////////////
__device__ static __inline__ float cmagf(float x, float y)
{
float a, b, v, w, t;
a = fabsf(x);
b = fabsf(y);
if (a > b) {
v = a;
w = b;
}
else {
v = b;
w = a;
}
t = w / v;
t = 1.0f + t * t;
t = v * sqrtf(t);
if ((v == 0.0f) || (v > 3.402823466e38f) || (w > 3.402823466e38f)) {
t = v + w;
}
return t;
}
__global__ void ConvertCmplx2Polar(float* inRe, float* inIm, float* mag, float* phase, int size) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < size; i += numThreads)
{
phase[i] = atan2f(inIm[i], inRe[i]);
mag[i] = cmagf(inIm[i], inRe[i]);
}
} |
20,292 | // kmrocki 1/15/19
__global__ void cudainit(unsigned int *canvas, int imgw) {
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
canvas[y*imgw+x] = (threadIdx.x + threadIdx.y + blockDim.x + blockDim.y ) % 7 == 0 ? 0xffffffff : 0x00000000;
}
__global__ void gameoflife(unsigned int *canvas, int imgw)
{
// thread indices
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// original value at location y,x
unsigned int in = canvas[y*imgw+x];
unsigned int n=0; // number of neighbors
// counting neighbors
if (y > 0 && x > 0 ) n += (canvas[(y-1)*imgw+(x-1)] > 0) ? 1 : 0;
if (y > 0 ) n += (canvas[(y-1)*imgw+(x )] > 0) ? 1 : 0;
if (y > 0 && x < (imgw-1) ) n += (canvas[(y-1)*imgw+(x+1)] > 0) ? 1 : 0;
if ( x > 0 ) n += (canvas[(y )*imgw+(x-1)] > 0) ? 1 : 0;
if ( x < (imgw-1) ) n += (canvas[(y )*imgw+(x+1)] > 0) ? 1 : 0;
if (y < (imgw-1) && x > 0 ) n += (canvas[(y+1)*imgw+(x-1)] > 0) ? 1 : 0;
if (y < (imgw-1) ) n += (canvas[(y+1)*imgw+(x )] > 0) ? 1 : 0;
if (y < (imgw-1) && x < (imgw-1) ) n += (canvas[(y+1)*imgw+(x+1)] > 0) ? 1 : 0;
// rules of game of life
unsigned int out;
if (n < 2 || n > 3) out = 0;
else if (n == 3 && canvas[(y)*imgw+(x)] == 0) out = 0xffffffff;
else out = in;
canvas[y*imgw+x] = out;
}
extern "C" void launch_cudainit(dim3 grid, dim3 block, int sbytes, unsigned int *canvas, int imgw)
{
cudainit<<< grid, block, sbytes >>>(canvas, imgw);
}
extern "C" void launch_process(dim3 grid, dim3 block, int sbytes, unsigned int *canvas, int imgw)
{
gameoflife<<< grid, block, sbytes >>>(canvas, imgw);
}
|
20,293 | #include "includes.h"
#define DIMENSIONS 2
#define GPU_DEVICE_ZERO 0
__global__ void minimumClusterDistance(int threads, double *pointToClusterDistance, int *minimumPointToCluster, int pointsCounter, int clusterCounter)
{
/**
This function puts the point in the right cluster after computing smallest distances.
**/
int leftThreads=pointsCounter % blockDim.x;
if ((blockIdx.x +1 != gridDim.x) || (leftThreads > threadIdx.x))
{
int index=0;
double smallestIndex; //minimum index
double min; //minimum distance
double temp; //temp distance
int pointIndex=threads * blockIdx.x + threadIdx.x;
min = pointToClusterDistance[pointIndex];
int currentIndex;
while(index<clusterCounter)
{
currentIndex=index*pointsCounter;
temp = pointToClusterDistance[pointIndex + currentIndex];
if(temp < min)
{
smallestIndex = index;
min = temp;
}
index++;
}
minimumPointToCluster[pointIndex] = smallestIndex;
}
} |
20,294 | #include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
__host__ __device__ int getKey0 (int *arr, int index){
return arr[ index + 1 ];
}
__host__ __device__ int getKey1 (int *arr, int index){
return arr[ index + 2 ];
}
__host__ __device__ int getParent (int *arr, int index){
return arr[ index + 3 ];
}
__host__ __device__ int getChild0 (int *arr, int index){
return arr[ index + 4 ];
}
__host__ __device__ int getChild1 (int *arr, int index){
return arr[ index + 5 ];
}
__host__ __device__ int getChild2 (int *arr, int index){
return arr[ index + 6 ];
}
__host__ __device__ int key0Index(int index){
return index+1;
}
__host__ __device__ int key1Index(int index){
return index+2;
}
__host__ __device__ int parentIndex(int index){
return index+3;
}
__host__ __device__ int child0Index(int index){
return index+4;
}
__host__ __device__ int child1Index(int index){
return index+5;
}
__host__ __device__ int child2Index(int index){
return index+6;
}
// Check if node is a leaf
__host__ __device__ bool isLeaf_arr(int *arr, int index)
{
//printf("isLeaf\n");
return (getChild0(arr,index) == -2);
}
__host__ __device__ int getSmallest_arr(int *arr, int index)
{
//printf("getSmallest\n");
while (!isLeaf_arr(arr, index)) index += 7;
return getChild0(arr,index);
}
// Get which sibling the node is
__host__ __device__ int sibNumber_arr(int *arr, int index)
{
//printf("sibNum\n");
int parent = getParent(arr, index);
int child0 = getChild0(arr, parent);
for (int i = 0; i < 3; ++i) {
int check = arr[child0+i]; //arr[index+3] is index of parent, +4 gives index of first child, i iterates through 2nd/3rd children indicies
if (index == check) return i;
}
return -1;
}
// Update the parent nods efor the smallest child value
__host__ __device__ void updateParentSmallest_arr(int* arr, int index, int data)
{
//printf("updateSmallestParent \n");
int sibs = sibNumber_arr(arr, index);
int p1 = getParent(arr, index);
if(sibs == 0){
int p2 = getParent(arr, p1);
if ( p2 != -2 ) updateParentSmallest_arr(arr, p1, data);
return;
}
if(sibs == 1){
int k0 = key0Index(p1);
arr[k0] = data;
return;
}
if(sibs == 2){
int k1 = key1Index(p1);
arr[k1] = data;
return;
}
}
|
20,295 | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
__global__ void VecAdd(float* A, float* B, float*
C, int N_op,int op_loop){
// N_op : no of total ops
// op_loop: no of ops to do in a loop
// Host code
int j;
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N_op){
for (j=0;j<op_loop;j++){
C[i*op_loop+j] = A[i*op_loop+j] + B[i*op_loop+j];
}
}
}
int main(int argc,char **argv) {
//printf("Usage example. \n./ee16b068_456.out <threads_per_block> <ops_per_thread> <vector_size> <filename1> <filename2>");
if (argc<5){
//printf("Too few arguments.\nUsage is ./ee16b068_456.out <threads_per_block> <ops_per_thread> <vector_size> <filename1> <filename2> ");
return 1;
}
int threadsPerBlock_op=atoi(argv[1]);
int op_loop=atoi(argv[2]);
//int N = pow(2,atoi(argv[3]));
int N = atoi(argv[3]);
FILE *arrayfile_a;
FILE *arrayfile_b;
size_t size = N * sizeof(float);
//Helper variables
int loop;
float time_spent;
//files
arrayfile_a = fopen(argv[4], "r");
arrayfile_b = fopen(argv[5], "r");
// Allocate input vectors h_A and h_B in host memory
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
float* h_C = (float*)malloc(size);
// Read first two arrays
//printf("\nArray A (first 10 values) \n ");
for (loop = 0; loop < N; loop++)
{
fscanf(arrayfile_a, "%f", &h_A[loop]);
//if (loop<10){
// printf("%f ", h_A[loop]);
//}
}
//printf("\nArray B (first 10 values) \n ");
for (loop = 0; loop < N; loop++)
{
fscanf(arrayfile_b, "%f", &h_B[loop]);
//if (loop<10){
//printf("%f ", h_B[loop]);
//}
}
// Allocate vectors in device memory
float* d_A; cudaMalloc(&d_A, size);
float* d_B; cudaMalloc(&d_B, size);
float* d_C; cudaMalloc(&d_C, size);
//GPU timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Copy vectors from host memory to device memory
cudaMemcpy(d_A, h_A, size,cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size,cudaMemcpyHostToDevice);
//ops per loop
//printf("Ops per loop %d\n",op_loop);
//printf("Threads per Block %d\n",threadsPerBlock_op);
//printf("Vector Size %d\n",op_loop);
// Invoke kernel
int threadsPerBlock = threadsPerBlock_op;
int N_op=(N + op_loop -1)/op_loop;
int blocksPerGrid = (N_op + threadsPerBlock - 1) /threadsPerBlock;
cudaEventRecord(start, 0);
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A,d_B, d_C, N_op,op_loop);
cudaEventRecord(stop, 0);
// h_C contains the result in host memory
cudaMemcpy(h_C, d_C, size,cudaMemcpyDeviceToHost);
//printf("\nArray C (first 10 outputs)\n");
//for(loop = 0; loop < 10; loop++)
// printf("%f ", h_C[loop]);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time_spent, start, stop);
printf("%f",time_spent);
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// Free host memory
free(h_A);
free(h_B);
return 0;
} |
20,296 | #include "cudamat_kernels.cuh"
#include "float.h"
const int NUM_THREADS = 32;
__device__ void reduceToMax(float* sdata, unsigned int tid){
//Synchronize threads to share shared memory data
__syncthreads();
float mySum = sdata[tid];
// do reduction in shared mem
if (NUM_THREADS >= 512) { if (tid < 256) { sdata[tid] = mySum = fmaxf(mySum, sdata[tid + 256]); } __syncthreads(); }
if (NUM_THREADS >= 256) { if (tid < 128) { sdata[tid] = mySum = fmaxf(mySum, sdata[tid + 128]); } __syncthreads(); }
if (NUM_THREADS >= 128) { if (tid < 64) { sdata[tid] = mySum = fmaxf(mySum, sdata[tid + 64]); } __syncthreads(); }
if (NUM_THREADS == 32){
if (tid < 16)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 32) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 16]); }
if (NUM_THREADS >= 16) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 8]); }
if (NUM_THREADS >= 8) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 4]); }
if (NUM_THREADS >= 4) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 2]); }
if (NUM_THREADS >= 2) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 1]); }
}
}
else
{
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 64) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 32]); }
if (NUM_THREADS >= 32) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 16]); }
if (NUM_THREADS >= 16) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 8]); }
if (NUM_THREADS >= 8) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 4]); }
if (NUM_THREADS >= 4) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 2]); }
if (NUM_THREADS >= 2) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 1]); }
}
}
}
__device__ void reduceToSumLocal(float* sdata, unsigned int tid)
{
//Synchronize threads to share shared memory data
__syncthreads();
float mySum = sdata[tid];
// do reduction in shared mem
if (NUM_THREADS >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); }
if (NUM_THREADS >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); }
if (NUM_THREADS >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); }
if (NUM_THREADS == 32){
if (tid < 16)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
if (NUM_THREADS >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
if (NUM_THREADS >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
if (NUM_THREADS >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
if (NUM_THREADS >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
}
}
else
{
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; }
if (NUM_THREADS >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
if (NUM_THREADS >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
if (NUM_THREADS >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
if (NUM_THREADS >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
if (NUM_THREADS >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
}
}
}
/* ------------------------- Random number generation ------------------------- */
__global__ void kSeedRandom(unsigned int* rndMults, unsigned long long* rndWords, unsigned int seed) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// The initial x is the seed and the initial carry is 1
unsigned long long rndWord = ((unsigned long long)seed << 32) + 1;
const unsigned int rndMult = rndMults[idx];
/*
* Run the chain for a few steps so that all the streams have a chance
* to differentiate. They start out generating similar random numbers
* because all the multipliers are similar.
*/
for(unsigned int i = 0; i < NUM_RND_BURNIN; i++) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
}
rndWords[idx] = rndWord;
}
__global__ void kRandomUniform(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
gData[i] = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
}
rndWords[idx] = rndWord;
}
__global__ void kRandomGaussian(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
gData[i] = R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
gData[i + NUM_RND_STREAMS] = R * __sinf(T);
}
rndWords[idx] = rndWord;
}
__global__ void kRandomDropout(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements, float dropprob, float val) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
gData[i] = ((__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f) > dropprob ? gData[i]:val;
}
rndWords[idx] = rndWord;
}
__global__ void kSampleBernoulli(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
target[i] = ((__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f) < gData[i] ? 1:0;
}
rndWords[idx] = rndWord;
}
__global__ void kSampleBernoulliTanh(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
target[i] = ((__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f) < (1.0 + gData[i]) / 2.0 ? 1:0;
}
rndWords[idx] = rndWord;
}
__global__ void kSamplePoisson(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
target[i] = gData[i];
}
rndWords[idx] = rndWord;
}
__global__ void kSampleGaussian(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements, float mult) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
target[i] = gData[i] + mult * R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
target[i + NUM_RND_STREAMS] = gData[i + NUM_RND_STREAMS] + mult * R * __sinf(T);
}
rndWords[idx] = rndWord;
}
__global__ void kPerturbEnergy(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd;
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
target[i] = gData[i] - __logf( - __logf(rnd));
}
rndWords[idx] = rndWord;
}
__global__ void kPerturbProb(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd;
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
target[i] = - gData[i] / __logf(rnd);
}
rndWords[idx] = rndWord;
}
/* ------------------------- Data copying ------------------------- */
/*
Copy row slice from source to target. There is a block for every 32x32 chunk being copied.
*/
__global__ void kGetRowSlice(float* source, float* target, int start, int end, int width, int height) {
const int row = start + blockIdx.x * 32 + threadIdx.x;
const int start_col = blockIdx.y * 32;
const int end_col = (start_col + 32 < width) ? start_col + 32: width;
const int target_height = end - start;
if (row < end) {
for (int cur_col = start_col; cur_col < end_col; cur_col++)
target[cur_col * target_height + row - start] = source[cur_col * height + row];
}
}
__global__ void kSetRowSlice(float* source, float* target, int start, int end, int width, int height) {
const int row = start + blockIdx.x * 32 + threadIdx.x;
const int start_col = blockIdx.y * 32;
const int end_col = (start_col + 32 < width) ? start_col + 32: width;
const int source_height = end - start;
if (row < end) {
for (int cur_col = start_col; cur_col < end_col; cur_col++)
target[cur_col * height + row] = source[cur_col * source_height + row - start];
//source[cur_col * height + row - start] = target[cur_col * target_height + row];
}
}
__global__ void kTranspose(float *odata, float *idata, int width, int height) {
__shared__ float block[COPY_BLOCK_SIZE][COPY_BLOCK_SIZE+1];
// read the matrix tile into shared memory
unsigned int xIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.x;
unsigned int yIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.y;
if((xIndex < width) && (yIndex < height)) {
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = idata[index_in];
}
__syncthreads();
// write the transposed matrix tile to global memory
xIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.x;
yIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.y;
if((xIndex < height) && (yIndex < width)) {
unsigned int index_out = yIndex * height + xIndex;
odata[index_out] = block[threadIdx.x][threadIdx.y];
}
}
/* ------------------------- Mathematical operations ------------------------- */
__global__ void kLessThan(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] < mat2[i];
}
__global__ void kLessThanEq(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] <= mat2[i];
}
__global__ void kLessThanScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] < val;
}
__global__ void kLessThanEqScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] <= val;
}
__global__ void kGreaterThan(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] > mat2[i];
}
__global__ void kGreaterThanEq(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] >= mat2[i];
}
__global__ void kGreaterThanScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] > val;
}
__global__ void kGreaterThanEqScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] >= val;
}
__global__ void kUpperBound(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] > mat2[i] ? mat2[i] : mat1[i];
}
__global__ void kLowerBound(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] < mat2[i] ? mat2[i] : mat1[i];
}
__global__ void kUpperBoundScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] > val ? val:mat[i];
}
__global__ void kLowerBoundScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] < val ? val:mat[i];
}
__global__ void kSparseDot(int m, int n, int k, float *data, int* indptr, int* indices, float *dense_data, float* target, float beta, float alpha) {
const unsigned int row = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < m && col < n) {
const int start = indptr[row];
const int end = indptr[row + 1];
float sum = 0.f;
for (int i = start; i < end; i++) {
sum += data[i] * dense_data[col * k + indices[i]];
}
const int pos = col * m + row;
target[pos] = alpha * sum + ((beta == 0) ? 0 : beta * target[pos]);
}
}
__global__ void kSign(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] ? copysignf(1., mat[i]) : 0;
}
__global__ void kApplySin(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = __sinf(mat[i]);
}
__global__ void kApplyCos(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = __cosf(mat[i]);
}
__global__ void kApplySigmoid(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = 1 / (1 + __expf(-mat[i]));
}
__global__ void kApplyTanh(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float mat_i, exp2x;
for (unsigned int i = idx; i < len; i += numThreads) {
mat_i = mat[i];
exp2x = __expf(2 * mat_i);
target[i] = 1 - 2 / (exp2x + 1);
}
}
__global__ void kApplyAbs(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] * ((mat[i] > 0) - (mat[i] < 0));
}
__global__ void kApplyLog1PlusExp(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float mat_i;
for (unsigned int i = idx; i < len; i += numThreads) {
mat_i = mat[i];
if (mat_i > 0)
target[i] = (__logf(1 + __expf(-mat_i)) + mat_i);
else
target[i] = __logf(1 + __expf(mat_i));
}
}
__global__ void kLog(float* mat, float* target, unsigned int len, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = __logf(mat[i] + tiny);
}
__global__ void kExp(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = __expf(mat[i]);
}
__global__ void kCeil(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = ceil(mat[i]);
}
__global__ void kFloor(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = floor(mat[i]);
}
__global__ void kSqrt(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = sqrt(mat[i]);
}
__global__ void kPow(float* mat, float pow, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = powf(mat[i], pow);
}
__global__ void kPowMatrix(float* mat, float* pow, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = powf(mat[i], pow[i]);
}
__global__ void kCrossEntropy(float* mat, float* p, float* target, unsigned int len, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = -mat[i] * __logf(p[i] + tiny);
}
__global__ void kCrossEntropyBernoulli(float* mat, float* p, float* target, unsigned int len, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads)
target[i] = -mat[i] * __logf(p[i] + tiny) - (1 - mat[i]) * __logf(1 - p[i] + tiny);
}
__global__ void kCorrectPreds(float* mat, float* p, float* target, unsigned int len, float cutoff) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads)
target[i] = mat[i] * (p[i] >= cutoff) + (1 - mat[i]) * (p[i] < cutoff);
}
__global__ void kReciprocal(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = 1. / mat[i];
}
__global__ void kAddColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + vec[i % height];
}
}
__global__ void kAddDiagonalScalar(float* mat, float val, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] + val;
}
}
__global__ void kAddDiagonal(float* mat, float* vec, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] + vec[i];
}
}
__global__ void kMultDiagonalScalar(float* mat, float val, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] * val;
}
}
__global__ void kMultDiagonal(float* mat, float* vec, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] * vec[i];
}
}
__global__ void kAddRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + vec[i / height];
}
}
__global__ void kAddColMult(float* mat, float* vec, float* tgtMat, float mult, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + mult * vec[i % height];
}
}
__global__ void kAddRowMult(float* mat, float* vec, float* tgtMat, float mult, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + mult * vec[i / height];
}
}
__global__ void kMultByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] * vec[i % height];
}
}
__global__ void kDivByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] / vec[i / height];
}
}
__global__ void kDivByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] / vec[i % height];
}
}
__global__ void kMultByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] * vec[i / height];
}
}
__global__ void kAddMultSign(float* a, float* b, unsigned int numEls, float mult) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
a[i] = a[i] + ((b[i] > 0) ? mult : ((b[i] < 0) ? -mult : 0));
}
}
__global__ void kAdd(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] + b[i];
}
}
__global__ void kSubtract(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] - b[i];
}
}
__global__ void kDivide(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] / b[i];
}
}
__global__ void kMult(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * b[i];
}
}
__global__ void kCosDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = -a[i] * __sinf(b[i]);
}
}
__global__ void kSinDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * __cosf(b[i]);
}
}
__global__ void kLogisticDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * b[i] * (1.0 - b[i]);
}
}
__global__ void kTanhDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * (1.0 + b[i]) * (1.0 - b[i]) * 0.5;
}
}
__global__ void kRectifiedLinearDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * (b[i] > 0 ? 1 : 0);
}
}
__global__ void kRectifiedLinearSmoothDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * (1 - __expf(-b[i]));
}
}
__global__ void kMultScalar(float* mat, float alpha, float* dest, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = alpha * mat[i];
}
}
__global__ void kAssignScalar(float* dest, float alpha, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = alpha;
}
}
__global__ void kDivideScalar(float* mat, float alpha, float* dest, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = mat[i] / alpha;
}
}
__global__ void kAddScalar(float* a, float alpha, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] + alpha;
}
}
__global__ void kSelectRows(float* source, float* target, float* indices, int nRowIs, int nCols, int nSourceRows){
__shared__ int sourceRowIndices[32];
const int startTargetRowI = blockIdx.x * 32;
const int tid = threadIdx.x;
const int localNRowIs = min(32, nRowIs-startTargetRowI);
// cooperatively load 32 row indices
if (tid < localNRowIs){
sourceRowIndices[tid] = int(indices[startTargetRowI + tid]);
if (sourceRowIndices[tid]<0)
sourceRowIndices[tid] += nSourceRows;
if (sourceRowIndices[tid]<0 || sourceRowIndices[tid]>=nSourceRows)
sourceRowIndices[tid] = -1;
}
__syncthreads();
// copy 32 rows
for (int i=0; i<localNRowIs; i++){
const int targetRowI = startTargetRowI + i, sourceRowI = sourceRowIndices[i];
for (int colI=tid; colI<nCols; colI+=32)
target[targetRowI * nCols + colI] = sourceRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI];
}
}
__global__ void kSwapColumns(float* source, float* target, float* indices1, float* indices2, int cols, int width, int height){
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float temp;
unsigned int column, row, source_pos, target_pos;
for (unsigned int i = idx; i < height * cols; i += numThreads) {
column = i / height;
row = i % height;
source_pos = height * (int)indices1[column] + row;
target_pos = height * (int)indices2[column] + row;
temp = source[source_pos];
source[source_pos] = target[target_pos];
target[target_pos] = temp;
}
}
__global__ void kSetSelectedRows(float* target, float* source, float* indices, int nRowIs, int nCols, int nTargetRows){
__shared__ int targetRowIndices[32];
const int startSourceRowI = blockIdx.x * 32;
const int tid = threadIdx.x;
const int localNRowIs = min(32, nRowIs-startSourceRowI);
// cooperatively load 32 row indices
if (tid < localNRowIs){
targetRowIndices[tid] = int(indices[startSourceRowI + tid]);
if (targetRowIndices[tid]<0)
targetRowIndices[tid] += nTargetRows;
if (targetRowIndices[tid]<0 || targetRowIndices[tid]>=nTargetRows)
targetRowIndices[tid] = -1;
}
__syncthreads();
// copy 32 rows
for (int i=0; i<localNRowIs; i++){
const int sourceRowI = startSourceRowI + i, targetRowI = targetRowIndices[i];
for (int colI=tid; colI<nCols; colI+=32)
target[targetRowI * nCols + colI] = targetRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI];
}
}
__global__ void kBlockify(float* source, float* target, int numdims, int blocksize) {
const unsigned int idx = threadIdx.x;
const unsigned int numThreads = blockDim.x;
const int off = blockIdx.x * numdims;
for (unsigned int target_ind = idx; target_ind < numdims; target_ind += numThreads) {
const int block = target_ind / blocksize;
target[off + target_ind] = source[off + block * blocksize];
}
}
__global__ void kGenerateTranslationsBigVarOff(float* source, float* target, float* off_x_arr, float* off_y_arr, int source_w, int target_w, int num_channels) {
const unsigned int idx = threadIdx.x;
const unsigned int numThreads = blockDim.x;
int target_x, target_y;
int pad = (source_w - target_w)/2;
int target_tile_size = target_w * target_w;
int source_tile_size = source_w * source_w;
int off_x = off_x_arr[blockIdx.x];
int off_y = off_y_arr[blockIdx.x];
int target_off = blockIdx.x * target_tile_size;
int source_off = blockIdx.x * source_tile_size + (pad + off_x) * source_w + (pad + off_y);
for (unsigned int target_ind = idx; target_ind < target_tile_size; target_ind += numThreads) {
target_x = target_ind / target_w;
target_y = target_ind - target_x * target_w;
for (unsigned int ch = 0; ch < num_channels; ch += 1) {
target[num_channels*(target_off + target_x * target_w + target_y) + ch] = source[num_channels*(source_off + target_x * source_w + target_y) + ch];
}
}
}
__global__ void kSoftMaxGrad(float* mat, float* labels, float* target, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width*height; i += numThreads) {
target[i] = mat[i] - ((int)labels[i / height] == i % height ? 1 : 0);
}
}
__global__ void kSoftMaxCrossEntropy(float* mat, float* labels, float* target, unsigned int width, unsigned int height, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
target[i] = -__logf(mat[height * i + (int)labels[i]] + tiny);
}
}
__global__ void kSoftMaxCorrect(float* mat, float* labels, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target[column] = (cur_argmax == (int)labels[column]) ? 1 : 0;
}
}
}
__global__ void kSoftMax(float* mat, float* target, unsigned int width, unsigned int height) {
extern __shared__ float max_vals[] ;
float cur_max = -FLT_MAX;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
max_vals[threadIdx.x]=-FLT_MAX;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) {
cur_max = val;
}
}
max_vals[threadIdx.x] = cur_max;
reduceToMax(max_vals, threadIdx.x);
__syncthreads();
cur_max = max_vals[0] ;
__syncthreads();
val = 0;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val += __expf(cur_data[i]-cur_max);
}
max_vals[threadIdx.x] = val;
reduceToSumLocal(max_vals, threadIdx.x);
__syncthreads();
float norm = max_vals[0] ;
float *cur_target = &target[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_target[i] = __expf(cur_data[i]-cur_max) / norm ;
}
}
}
__global__ void kSoftMaxOverwrite(float* mat, unsigned int width, unsigned int height) {
extern __shared__ float max_vals[] ;
float cur_max = -FLT_MAX;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
max_vals[threadIdx.x]=-FLT_MAX;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) {
cur_max = val;
}
}
max_vals[threadIdx.x] = cur_max;
reduceToMax(max_vals, threadIdx.x);
__syncthreads();
cur_max = max_vals[0] ;
__syncthreads();
val = 0;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_data[i] = __expf(cur_data[i]-cur_max);
val += cur_data[i];
}
max_vals[threadIdx.x] = val;
reduceToSumLocal(max_vals, threadIdx.x);
__syncthreads();
float norm = max_vals[0] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_data[i] /= norm;
}
}
}
__global__ void kChooseMaxAndAccumulate(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
float *target_data = &target[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target_data[cur_argmax] += 1;
}
}
}
__global__ void kChooseMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
float *target_data = &target[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
target[i] = 0;
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target_data[cur_argmax] = 1;
}
}
}
__global__ void kMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
extern __shared__ float max_vals[] ;
float cur_max = -FLT_MAX;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) cur_max = val;
}
max_vals[threadIdx.x] = cur_max;
reduceToMax(max_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) target[column] = max_vals[0];
}
}
__global__ void kArgMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target[column] = cur_argmax;
}
}
}
__global__ void kSqSumColumnwise(float* mat, float* target, unsigned int width, unsigned int height, float mult, float p) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float cur_sum = 0;
float *cur_data = &mat[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += cur_data[i]*cur_data[i];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal(sum_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) target[column] = p * target[column] + mult * sum_vals[0];
}
}
__global__ void kNormLimitColumnwise(float* mat, float* target, float norm, unsigned int width, unsigned int height) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float cur_sum = 0;
float *cur_data = &mat[column * height] ;
float *target_data = &target[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += cur_data[i]*cur_data[i];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal(sum_vals, threadIdx.x);
__syncthreads();
cur_sum = sqrt(sum_vals[0]);
cur_sum = (cur_sum < norm) ? 1: (norm / cur_sum);
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
target_data[i] = cur_data[i] * cur_sum;
}
__syncthreads();
}
}
__global__ void kExpand(float* source, float* indices, float* target, int height, int width, int target_width){
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < target_width*height; i += numThreads) {
const int pos = height * (int)indices[i / height] + i % height;
target[i] = (pos < height * width)? source[pos] : 1.0/0.0 - 1.0/0.0;
}
}
__global__ void kExpandAndAdd(float* source, float* mat, float* indices, float* target, int width, int height, float mult, int width2){
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width*height; i += numThreads) {
const int pos = height * (int)indices[i / height] + i % height;
target[i] = (pos < height * width2)? source[i] + mult * mat[pos] : 1.0/0.0 - 1.0/0.0;
}
}
__global__ void kAccumulateColumns(float* mat, float* indices, float* target, int mat_width, int target_width, int height, float mult, int avg){
const int row = gridDim.x * blockIdx.y + blockIdx.x;
const int column = threadIdx.x;
if (row < height && column < target_width) {
float cur_sum = 0.0;
unsigned int count = 0;
for (unsigned int i = 0; i < mat_width; i ++) {
count += ((int)indices[i] == column) ? 1 : 0 ;
cur_sum += ((int)indices[i] == column) ? mat[row + i * height] : 0 ;
}
target[row + height * column] = mult * cur_sum / ((avg == 1 && count > 0) ? count : 1);
}
}
|
20,297 | #include "rwalk.cuh"
#include <stdio.h>
#include <assert.h>
int64_t * d_p_scan_list = NULL;
int64_t * d_v_list = NULL;
float * d_w_list = NULL;
int64_t *d_global_walk = NULL;
int tblocksize = 512;
int nblock;
void __global__ device_rwalk(
int m_walk_length,
int n_walks_per_node,
int total_num_nodes,
unsigned long long rnumber,
int64_t * d_p_scan_list, int64_t * d_v_list, float * d_w_list, int64_t *d_global_walk){
int64_t i = (blockIdx.x * blockDim.x) + threadIdx.x;
if(i >= total_num_nodes){
return;
}
long long int w;
for(int w_n = 0; w_n < n_walks_per_node; ++w_n) {
d_global_walk[( total_num_nodes * w_n * m_walk_length) + ( i * m_walk_length ) + 0] = i;
// d_global_walk[( i * m_walk_length * n_walks_per_node ) + ( w_n * m_walk_length ) + 0] = i;
float prev_time_stamp = 0;
int64_t src_node = i;
int walk_cnt;
for(walk_cnt = 1; walk_cnt < m_walk_length; ++walk_cnt) {
int valid_neighbor_cnt = 0;
for(w = d_p_scan_list[src_node]; w < d_p_scan_list[src_node+1]; w++){
if(d_w_list[w] > prev_time_stamp){
valid_neighbor_cnt++;
break;
}
}
if(valid_neighbor_cnt == 0) {
break;
}
float min_bound = d_w_list[d_p_scan_list[src_node]];
float max_bound = d_w_list[d_p_scan_list[src_node]];
for(w = d_p_scan_list[src_node]; w < d_p_scan_list[src_node+1]; w++){
if(d_w_list[w] < min_bound)
min_bound = d_w_list[w];
if(d_w_list[w] > max_bound)
max_bound = d_w_list[w];
}
float time_boundary_diff = (max_bound - min_bound);
if(time_boundary_diff < 0.0000001){
for(w = d_p_scan_list[src_node]; w < d_p_scan_list[src_node+1]; w++){ // We randomly pick 1 neighbor, we just pick the first
if(d_w_list[w] > prev_time_stamp){
d_global_walk[( total_num_nodes * w_n * m_walk_length) + ( i * m_walk_length ) + walk_cnt] = d_v_list[w];
src_node = d_v_list[w];
prev_time_stamp = d_w_list[w];
break;
}
}
continue;
}
double exp_summ = 0;
for(w = d_p_scan_list[src_node]; w < d_p_scan_list[src_node+1]; w++){
if(d_w_list[w] > prev_time_stamp){
exp_summ += exp((float)(d_w_list[w]-prev_time_stamp)/time_boundary_diff);
}
}
double curCDF = 0, nextCDF = 0;
double random_number = rnumber * 1.0 / ULLONG_MAX;
rnumber = rnumber * (unsigned long long)25214903917 + 11;
bool fall_through = false;
for(w = d_p_scan_list[src_node]; w < d_p_scan_list[src_node+1]; w++){
if(d_w_list[w] > prev_time_stamp){
nextCDF += (exp((float)(d_w_list[w]-prev_time_stamp)/time_boundary_diff) * 1.0 / exp_summ);
if(nextCDF >= random_number && curCDF <= random_number) {
d_global_walk[( total_num_nodes * w_n * m_walk_length) + ( i * m_walk_length ) + walk_cnt] = d_v_list[w];
// d_global_walk[( i * m_walk_length * n_walks_per_node ) + ( w_n * m_walk_length ) + walk_cnt] = d_v_list[w];
src_node = d_v_list[w];
prev_time_stamp = d_w_list[w];
fall_through = true;
break;
} else {
curCDF = nextCDF;
}
}
}
if(!fall_through){
for(w = d_p_scan_list[src_node]; w < d_p_scan_list[src_node+1]; w++){ // This line should not be reached anyway (reaching this line means something is wrong). But just for testing, we randomly pick 1 neighbor, we just pick the first
if(d_w_list[w] > prev_time_stamp){
d_global_walk[( total_num_nodes * w_n * m_walk_length) + ( i * m_walk_length ) + walk_cnt] = d_v_list[w];
// d_global_walk[( i * m_walk_length * n_walks_per_node ) + ( w_n * m_walk_length ) + walk_cnt] = d_v_list[w];
src_node = d_v_list[w];
prev_time_stamp = d_w_list[w];
break;
}
}
}
}
if (walk_cnt != m_walk_length){
d_global_walk[( total_num_nodes * w_n * m_walk_length) + ( i * m_walk_length ) + walk_cnt] = -1;
}
}
}
#define cudaCheck(err) { \
if (err != cudaSuccess) { \
printf("CUDA error: %s: %s, line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); \
assert(err == cudaSuccess); \
} \
}
void cleanUpGPU_rwalk(){
cudaCheck(cudaFree(d_p_scan_list));
cudaCheck(cudaFree(d_v_list));
cudaCheck(cudaFree(d_w_list));
cudaCheck(cudaFree(d_global_walk));
}
void initializeGPU_rwalk(int max_walk_length, int num_walks_per_node){
// Device query
int nDevices;
cudaCheck(cudaGetDeviceCount(&nDevices));
int device = 0;
cudaCheck(cudaSetDevice(device));
cudaDeviceProp prop;
cudaCheck(cudaGetDeviceProperties(&prop, device));
tblocksize = prop.maxThreadsPerBlock;
#if defined(DEBUG)
printf(" Max Threads Per Block %d\n", tblocksize);
#endif
nblock = (num_of_nodes - 1) / tblocksize + 1;
cudaCheck(cudaMalloc((void**) & d_p_scan_list, (num_of_nodes + 1) * sizeof(int64_t)));
cudaCheck(cudaMalloc((void**)&d_v_list, num_of_edges * sizeof(int64_t)));
cudaCheck(cudaMalloc((void**) & d_w_list, num_of_edges * sizeof(float)));
cudaCheck(cudaMalloc((void**) & d_global_walk, num_of_nodes * max_walk_length * MAX_NUM_OF_WALK_PER_NODE * sizeof(int64_t)));
}
void TransferDataToGPU_rwalk(){
cudaCheck(cudaMemcpy( d_p_scan_list, p_scan_list,
(num_of_nodes + 1) * sizeof(int64_t) , cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy( d_v_list, v_list,
num_of_edges * sizeof(int64_t) , cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy( d_w_list, w_list,
num_of_edges * sizeof(float) , cudaMemcpyHostToDevice));
}
void GetResultData_rwalk(int max_walk_length, int num_walks_per_node, long long int offset){
cudaCheck(cudaMemcpy(global_walk + offset, d_global_walk, num_of_nodes * max_walk_length * num_walks_per_node * sizeof(int64_t), cudaMemcpyDeviceToHost));
}
void TrainGPU_rwalk(int max_walk_length, int num_walks_per_node, unsigned long long random_number) {
TransferDataToGPU_rwalk();
int i;
for(i = 0; i < num_walks_per_node / MAX_NUM_OF_WALK_PER_NODE; i++){
device_rwalk<<<nblock,tblocksize>>>(
max_walk_length,
MAX_NUM_OF_WALK_PER_NODE,
num_of_nodes,
random_number,
d_p_scan_list, d_v_list, d_w_list, d_global_walk);
GetResultData_rwalk(max_walk_length, MAX_NUM_OF_WALK_PER_NODE, (long long int)num_of_nodes * max_walk_length * MAX_NUM_OF_WALK_PER_NODE * i);
}
device_rwalk<<<nblock,tblocksize>>>(
max_walk_length,
num_walks_per_node % MAX_NUM_OF_WALK_PER_NODE,
num_of_nodes,
random_number,
d_p_scan_list, d_v_list, d_w_list, d_global_walk);
GetResultData_rwalk(max_walk_length, num_walks_per_node % MAX_NUM_OF_WALK_PER_NODE, (long long int)num_of_nodes * max_walk_length * MAX_NUM_OF_WALK_PER_NODE * i);
#if defined(DEBUG)
cudaCheck(cudaGetLastError());
cudaCheck(cudaDeviceSynchronize());
#endif
}
|
20,298 | __global__ void addSubArray0 (int *A, int *B, int w, int h) {
for (int i = 0; i < w; i++) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j % 2 == 0) {
B[j * w + i] += A[i];
} else {
B[j * w + i] -= A[i];
}
}
} |
20,299 |
#include <stdio.h>
#include <stdlib.h>
#define N 30
//typedef long long int ll;
__global__ void align(char *key , char *s , int *scores , int n , int num)
{
int GP = -1 , MR = 1;
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < num)
{
int i , j , k , tmp;
int nm[N + 1][N + 1];
char r1[2*N+2] , r2[2*N+2];
for (i = 0; i <= n; i++)
{
nm[0][i] = GP * i;
nm[i][0] = GP * i;
}
for (i = 1; i <= n; i++)
{
for (j = 1; j <= n; j++)
{
if(key[i-1] == s[n*index + j-1])
nm[i][j] = nm[i-1][j-1] + MR;
else
{
if(nm[i-1][j] <= nm[i][j-1])
nm[i][j] = nm[i][j-1] + GP;
else
nm[i][j] = nm[i-1][j] + GP;
}
}
}
/* for (i = 0; i <= n; i++)
{
for (j = 0; j <= n; j++)
{
printf("%d " , nm[i][j]);
}
printf("\n");
} */
i = n , j = n , k = 0;
//for(int cnt = 1; cnt <= 30 && i > 0 && j > 0; cnt++)
while(i > 0 && j > 0)
{
//if(index == 3)printf("**%d %d % d\n" , cnt , i , j);
tmp = nm[i-1][j-1] > nm[i][j-1] ? (nm[i-1][j-1] > nm[i-1][j] ? nm[i-1][j-1] : nm[i-1][j]) : (nm[i][j-1] > nm[i-1][j] ? nm[i][j-1] : nm[i-1][j]);
if(tmp == nm[i-1][j-1] || key[i] == s[n*index + j-1])
{
r1[k] = key[i-1];
r2[k] = s[n*index + j-1];
i--;
j--;
}
else if(tmp == nm[i][j-1])
{
r1[k] = '-';
r2[k] = s[n*index + j-1];
j--;
}
else if(tmp == nm[i-1][j])
{
r1[k] = key[i-1];
r2[k] = '-';
i--;
}
k++;
}
for(i = 0; i < k/2; i++)
{
r1[i] = (r1[i] + r1[k-i-1]) - (r1[k-i-1] = r1[i]);
r2[i] = (r2[i] + r2[k-i-1]) - (r2[k-i-1] = r2[i]);
}
r1[k] = '\0';
r2[k] = '\0';
printf("\nAlignment #%d :\n-------------------\nKey:\n%s\nQuery:\n%s\n" , index+1 , r1 , r2);
int score = 0;
for(i = 0; i < k; i++)
{
if(r1[k] == '-' || r2[k] == '-')
score += GP;
else if(r1[i] == r2[i])
score += MR;
else
score += GP;
}
scores[index] = score;
}
}
int main(int argc, char** argv)
{
int size = sizeof(int);
int THREADS = 1024;
freopen(argv[1] , "r", stdin);
freopen(argv[2] , "w", stdout);
int *host_scores , *scores;
int i , num , n;
//printf("Enter size:");
scanf("%d" , &n);
//printf("Enter number of queries:");
scanf("%d" , &num);
int m = num < THREADS ? num : THREADS;
char *host_key = (char *)malloc(n);
char *tmp = (char *)malloc(n);
char *host_q = (char *)malloc(num * n + 2);
char *key , *q;
//printf("Enter key:");
scanf("%s" , host_key);
//printf("Enter the queries:");
for(i = 0; i <num; i++)
{
if(i == 0)
scanf("%s" , host_q);
else
{
scanf("%s" , tmp);
strcat(host_q , tmp);
}
}
host_scores = (int *)malloc(size * num);
cudaMalloc((void **)&scores , num * size);
cudaMalloc((void **)&key , n);
cudaMalloc((void **)&q , n * num + 2);
cudaMemcpy(key , host_key , n , cudaMemcpyHostToDevice);
cudaMemcpy(q , host_q , n * num + 2 , cudaMemcpyHostToDevice);
align <<<(n + m - 1) / m , m>>> (key , q , scores , n , num);
cudaMemcpy(host_scores , scores , size * num , cudaMemcpyDeviceToHost);
printf("\n\nAlignment Scores:\n----------------------------\n");
for(i = 0; i < num; i++)
printf("Query #%d : %d\n" , i+1 , host_scores[i]);
cudaFree(key);
cudaFree(q);
cudaFree(scores);
return 0;
} |
20,300 | #include <stdio.h>
#include <cuda_runtime_api.h>
#include <time.h>
/****************************************************************************
This program gives an example of a poor way to implement a password cracker
in CUDA C. It is poor because it acheives this with just one thread, which
is obviously not good given the scale of parallelism available to CUDA
programs.
The intentions of this program are:
1) Demonstrate the use of __device__ and __global__ functions
2) Enable a simulation of password cracking in the absence of library
with equivalent functionality to libcrypt. The password to be found
is hardcoded into a function called is_a_match.
Compile and run with:
nvcc -o cuda_crack cuda_crack.cu
./cuda_crack
Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
/****************************************************************************
This function returns 1 if the attempt at cracking the password is
identical to the plain text password string stored in the program.
Otherwise,it returns 0.
*****************************************************************************/
__device__ int is_a_match(char *attempt) {
char plain_password[] = "KB5234";
char *a = attempt;
char *p = plain_password;
while(*a == *p) {
if(*a == '\0') {
return 1;
}
a++;
p++;
}
return 0;
}
/****************************************************************************
The kernel function assume that there will be only one thread and uses
nested loops to generate all possible passwords and test whether they match
the hidden password.
*****************************************************************************/
__global__ void kernel() {
int w, a, s, d, g;
char password[6];
password[2] = '\0';
password[0] ='A' + threadIdx.x;
password[1] ='A' + blockIdx.x;
for(w = 0; w < 10; w++){
g = w + '0';
password[2] =g;
for(a = 0; a < 10; a++){
g = a + '0';
password[3] =g;
for(s = 0; s < 10; s++){
g = s + '0';
password[4] =g;
for(d = 0; d < 10; d++){
g = d + '0';
password[5] =g;
if(is_a_match(password)) {
printf("password found: %s\n", password);
}
}
}
}
}
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if (dn < 0) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed / 1.0e9));
kernel <<<26, 26>>>();
cudaThreadSynchronize();
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.