serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
2,501 | /**
* @file utils.cu
* @author Daniel San Martin (dsanmartinreyes@gmail.com)
* @brief Extra CUDA functions
* @version 0.1
* @date 2020-09-01
*
* @copyright Copyright (c) 2020
*
*/
#include <stdlib.h>
#include "include/utils.cuh"
/**
* @brief Fill array
*
* @param v Pointer to fill, space discrete domain \f$ x \f$ or \f$ y \f$
* @param h \f$ \Delta x \f$ or \f$ \Delta y \f$
* @param N Number of nodes
*/
__global__ void fillVectorKernel(double *v, double h, int N) {
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if (tId < N)
v[tId] = tId * h;
} |
2,502 | /*
* streams_kernel.cu
*
* Created on: 14/02/2018
* Author: fernando
*/
#include <pthread.h>
#include <stdio.h>
__global__ void sqrt_streams(float *x, int n) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < n; i += blockDim.x * gridDim.x) {
float sum = 0;
for (int j = 1; j < 100; j++){
sum += sqrt(pow(3.14159, i))/j;
}
x[i] = sum;
}
}
void vector_streams() {
const int num_streams = 8;
const int N = 1 << 20;
cudaStream_t streams[num_streams];
float *data[num_streams];
for (int i = 0; i < num_streams; i++) {
cudaStreamCreate(&streams[i]);
cudaMalloc(&data[i], N * sizeof(float));
// launch one worker kernel per stream
sqrt_streams<<<1, 64, 0, streams[i]>>>(data[i], N);
sqrt_streams<<<1, 1>>>(0, 0);
}
for (int i = 0; i < num_streams; i++)
cudaFree(data[i]);
cudaDeviceReset();
}
int main(int argc, char **argv) {
vector_streams();
return 0;
}
|
2,503 | #include "includes.h"
__global__ void Counting(int* HalfData, int HalfDataSize, int N)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i<HalfDataSize)
{
HalfData[i] *= N;
}
} |
2,504 | //
// Created by kindr on 2021/4/28.
//
#include "manualMemory.cuh"
#include "../../common/utils.cuh"
#include "zeroCopyMemory.cuh"
#include <cstdio>
void manualMemory(size_t nElement, size_t nThread) {
size_t nBytes = nElement * sizeof(float);
auto *vec = (float *) malloc(nBytes);
memset(vec, 0, nBytes);
float *d_vec;
cudaMalloc(&d_vec, nBytes);
CHECK(cudaGetLastError());
cudaMemcpy(d_vec, vec, nBytes, cudaMemcpyHostToDevice);
CHECK(cudaGetLastError());
size_t nBlock = (nElement + nThread - 1) / nThread;
addOne<<<nBlock, nThread>>>(d_vec, nElement);
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
cudaMemcpy(vec, d_vec, nBytes, cudaMemcpyDeviceToHost);
CHECK(cudaGetLastError());
bool isSame = true;
for (size_t i = 0; i < nElement; ++i) {
if (vec[i] != 1.f) {
isSame = false;
}
}
printf("isSame?: %s", isSame ? "true" : "false");
cudaFree(d_vec);
free(vec);
}
|
2,505 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cmath>
#define D 1 //Please set it to d in texture_main.m manually
#define NG 32 //Please set it to Ng in texture_main.m manually
#define DEFAULT 0 //Please set it to default in texture_main.m manually
#define WINDOWDIM 9 //Please set it to WindowDim in texture_main.m manually
#define BlOCKDIM 16 //Please set it to BlockDim in texture_main.m manually
#define EPS 1e-8
__device__ int calInd(int Dim, int y, int x)
{
return y*Dim + x;
}
__device__ float calAvgOrRange(float *X, int n, int bool_mean)
{
float ans = 0.0;
if (bool_mean == 1)
{
for (int i = 0; i < n; i++)
ans += X[i];
}
else
{
float max = X[0];
float min = max;
for (int i = 0; i < n; i++)
{
if (X[i] > max)
max = X[i];
if (X[i] < min)
min = X[i];
}
ans = max - min;
}
return ans;
}
__device__ float calPx(unsigned char *SDM, float sum, int i)
{
float px = 0.0;
for (int k = 0; k < NG; k++)
{
px += (float)SDM[calInd(NG, k, i)] / sum;
}
return px;
}
__device__ float calPy(unsigned char *SDM, float sum, int j)
{
float py = 0.0;
for (int k = 0; k < NG; k++)
{
py += (float)SDM[calInd(NG, j, k)] / sum;
}
return py;
}
__device__ float calPx_plus_y(unsigned char *SDM, float sum, int k) //k is the same as the paper listed
{
k = k - 2; //k = 2, 3, ..., 2NG
float pxy = 0.0;
int i, j;
int lowerlimit, upperlimit;
if (k < NG)
{
lowerlimit = 0;
upperlimit = k + 1;
}
else
{
lowerlimit = k - NG + 1;
upperlimit = NG;
}
for (j = lowerlimit; j < upperlimit; j++)
{
i = k - j;
pxy += (float)SDM[calInd(NG, j, i)] / sum;
}
return pxy;
}
__device__ float calPx_minus_y(unsigned char *SDM, float sum, int k)
{
float pxy = 0.0;
int i, j;
int lowerlimit, upperlimit;
lowerlimit = 0;
upperlimit = NG - k;
for (j = lowerlimit; j < upperlimit; j++)
{
i = j + k;
pxy += (float)SDM[calInd(NG, j, i)] / sum;
}
lowerlimit = k;
upperlimit = NG;
for (j = lowerlimit; j < upperlimit; j++)
{
i = j - k;
pxy += (float)SDM[calInd(NG, j, i)] / sum;
}
return pxy;
}
__device__ float2 cal_mu_std_x(unsigned char *SDM, float sum, int flag) //flag = 0 only calculate mean, flag = 1 calculate mean and std
{
float px[NG] = { 0.0 };
float2 ans; //ans.x is mean, ans.y is standard deviation
ans.x = 0; ans.y = 0;
for (int i = 0; i < NG; i++)
{
px[i] = calPx(SDM, sum, i);
}
for (int i = 0; i < NG; i++)
ans.x += px[i];
ans.x = ans.x / NG;
if (flag == 1)
{
for (int i = 0; i < NG; i++)
{
ans.y += (px[i] - ans.x)* (px[i] - ans.x);
}
ans.y = sqrt(ans.y / NG);
}
return ans;
}
__device__ float2 cal_mu_std_y(unsigned char *SDM, float sum, int flag) //flag = 0 only calculate mean, flag = 1 calculate mean and std
{
float py[NG] = { 0.0 };
float2 ans; //ans.x is mean, ans.y is standard deviation
ans.x = 0; ans.y = 0;
for (int j = 0; j < NG; j++)
{
py[j] = calPy(SDM, sum, j);
}
for (int j = 0; j < NG; j++)
ans.x += py[j];
ans.x = ans.x / NG;
if (flag == 1)
{
for (int j = 0; j < NG; j++)
{
ans.y += (py[j] - ans.x) * (py[j] - ans.x);
}
ans.y = sqrt(ans.y / NG);
}
return ans;
}
__device__ float calmu(unsigned char *SDM, float sum)
{
float px;
float mu = 0.0;
for (int j = 0; j < NG; j++)
{
for (int i = 0; i < NG; i++)
{
px = calPx(SDM, sum, i);
mu += px*i;
}
}
return mu;
}
__device__ float calHXY(unsigned char *SDM, float sum)
{
float HXY = 0.0;
float p;
for (int i = 0; i < NG*NG; i++)
{
p = (float)SDM[i] / sum;
HXY -= p*log(p + EPS);
}
return HXY;
}
__device__ float calHXY1(unsigned char *SDM, float sum)
{
float HXY1 = 0.0;
float p;
float px[NG];
float py[NG];
for (int i = 0; i < NG; i++)
px[i] = calPx(SDM, sum, i);
for (int j = 0; j < NG; j++)
py[j] = calPy(SDM, sum, j);
for (int j = 0; j < NG; j++)
{
for (int i = 0; i < NG; i++)
{
p = (float)SDM[calInd(NG, j, i)] / sum;
HXY1 -= p*log(px[i] * py[j] + EPS);
}
}
return HXY1;
}
__device__ float calHXY2(unsigned char *SDM, float sum)
{
float HXY2 = 0.0;
float p;
float px[NG];
float py[NG];
for (int i = 0; i < NG; i++)
px[i] = calPx(SDM, sum, i);
for (int j = 0; j < NG; j++)
py[j] = calPy(SDM, sum, j);
for (int j = 0; j < NG; j++)
{
for (int i = 0; i < NG; i++)
{
p = px[i] * py[j];
HXY2 -= p*log(p + EPS);
}
}
return HXY2;
}
__device__ float calHX(unsigned char *SDM, float sum)
{
float HX = 0.0;
float p;
for (int i = 0; i < NG; i++)
{
p = calPx(SDM, sum, i);
HX -= p*log(p + EPS);
}
return HX;
}
__device__ float calHY(unsigned char *SDM, float sum)
{
float HY = 0.0;
float p;
for (int j = 0; j < NG; j++)
{
p = calPy(SDM, sum, j);
HY -= p*log(p + EPS);
}
return HY;
}
__device__ float calTexture(unsigned char *SDM, int method)
{
float texture = 0.0;
float sum = 0.0;
float p = 0.0;
for (int i = 0; i < NG*NG; i++)
sum += (float)SDM[i];
if (method == 0)
{
texture = sum;
}
else if (method == 1) //Angular Second Moment
{
if (sum == 0)
texture = 0;
else
{
for (int i = 0; i < NG*NG; i++)
{
p = (float)SDM[i] / sum;
texture += p*p;
}
}
}
else if (method == 2) //Contrast
{
if (sum == 0)
texture = 0;
else
{
for (int n = 0; n < NG; n++)
{
texture += n*n*calPx_minus_y(SDM, sum, n);
}
}
}
else if (method == 3) //Correlation
{
if (sum == 0)
texture = 0;
else
{
float2 mustd_x = cal_mu_std_x(SDM, sum, 1);
float2 mustd_y = cal_mu_std_y(SDM, sum, 1);
for (int j = 0; j < NG; j++)
{
for (int i = 0; i < NG; i++)
{
p += i*j*(float)SDM[calInd(NG, j, i)] / sum;
}
}
texture = (p - mustd_x.x * mustd_y.x) / (mustd_x.y * mustd_y.y);
}
}
else if (method == 4) //Sum of Squares: Variance
{
if (sum == 0)
texture = 0;
else
{
float mu = calmu(SDM, sum);
for (int j = 0; j < NG; j++)
{
for (int i = 0; i < NG; i++)
{
p = (float)SDM[calInd(NG, j, i)] / sum;
texture += (i - mu)*(i - mu)*p;
}
}
}
}
else if (method == 5) //Inverse Difference Moment
{
if (sum == 0)
texture = 0;
else
{
for (int j = 0; j < NG; j++)
{
for (int i = 0; i < NG; i++)
{
p = (float)SDM[calInd(NG, j, i)] / sum;
texture += p / (1 + (i - j)*(i - j));
}
}
}
}
else if (method == 6) //Sum Average
{
if (sum == 0)
texture = 0;
else
{
for (int k = 2; k <= 2 * NG; k++)
{
texture += k*calPx_plus_y(SDM, sum, k);
}
}
}
else if (method == 7) //Sum Variance
{
if (sum == 0)
texture = 0;
else
{
float pxy[2 * NG - 1];
float f8 = 0.0;
for (int k = 2; k <= 2 * NG; k++)
{
p = calPx_plus_y(SDM, sum, k);
pxy[k - 2] = p;
f8 -= p*log(p + EPS);
}
for (int k = 2; k <= 2 * NG; k++)
{
texture += (k - f8)*(k - f8)*pxy[k - 2];
}
}
}
else if (method == 8) //Sum Entropy
{
if (sum == 0)
texture = 0;
else
{
for (int k = 2; k <= 2 * NG; k++)
{
p = calPx_plus_y(SDM, sum, k);
texture -= p*log(p + EPS);
}
}
}
else if (method == 9) //Entropy
{
if (sum == 0)
texture = 0;
else
{
texture = calHXY(SDM, sum);
}
}
else if (method == 10) //Difference Variance
{
if (sum == 0)
texture = 0;
else
{
float pxy[NG];
float mean = 0.0;
for (int k = 0; k < NG; k++)
{
pxy[k] = calPx_minus_y(SDM, sum, k);
mean += pxy[k];
}
mean = mean / NG;
for (int k = 0; k < NG; k++)
{
texture += (pxy[k] - mean)*(pxy[k] - mean);
}
texture = texture / NG;
}
}
else if (method == 11) //Difference Entropy
{
if (sum == 0)
texture = 0;
else
{
for (int k = 0; k < NG; k++)
{
p = calPx_minus_y(SDM, sum, k);
texture -= p*log(p + EPS);
}
}
}
else if (method == 12) //Information Mesures of Correlation
{
if (sum == 0)
texture = 0;
else
{
float HX = calHX(SDM, sum);
float HY = calHY(SDM, sum);
float H;
if (HX >= HY)
H = HX;
else
H = HY;
texture = (calHXY(SDM, sum) - calHXY1(SDM, sum)) / H;
}
}
else if (method == 13) //Information Mesures of Correlation
{
if (sum == 0)
texture = 0;
else
{
texture = 1 - exp(-2.0*(calHXY2(SDM, sum) - calHXY(SDM, sum)));
if (texture < 0)
texture = 0;
else
texture = sqrt(texture);
}
}
else if (method == 14) //Maximal Correlation Coefficient
{
/*
if (sum == 0)
texture = 0;
else
{
float Q[NG*NG];
float q, pik, pjk, pxi, pyk;
for (int j = 0; j < NG; j++)
{
for (int i = 0; i < NG; i++)
{
pxi = calPx(SDM, sum, i);
for (int k = 0; k < NG; k++)
{
pik= (float)SDM[calInd(NG, k, i)] / sum;
pjk = (float)SDM[calInd(NG, k, j)] / sum;
pyk = calPy(SDM, sum, k);
q += (pik*pjk) / (pxi*pyk + EPS);
}
Q[calInd(NG, j, i)] = q;
}
}
//Next are solving Q.TQ second largest eigenvalue, supposed to use QR decomposing.
//It hardly available for a single thread in GPU.
}
*/
}
return texture;
}
__device__ void updateSDM(unsigned char *SDM, int value1, int value2)
{
SDM[calInd(NG, value1, value2)] += 1;
}
__device__ int convert2scale(float value, float *Q)
{
int rank = 0;
while (Q[rank] < value && rank < NG - 1)
rank += 1;
return rank;
}
__device__ void copyAndConvertImage(float *SplitImage, int SIDim, int *SubSplitImage, int SSIDim, float *Q,
int x, int y, int ix, int iy)
{
SubSplitImage[calInd(SSIDim, iy, ix)] = convert2scale(SplitImage[calInd(SIDim, x, y)], Q);
if (ix < WINDOWDIM - 1)
SubSplitImage[calInd(SSIDim, iy, ix + BlOCKDIM)] = convert2scale(SplitImage[calInd(SIDim, x + BlOCKDIM, y)], Q);
if (iy < WINDOWDIM - 1)
SubSplitImage[calInd(SSIDim, iy + BlOCKDIM, ix)] = convert2scale(SplitImage[calInd(SIDim, x, y + BlOCKDIM)], Q);
if (ix < WINDOWDIM - 1 && iy < WINDOWDIM - 1)
SubSplitImage[calInd(SSIDim, iy + BlOCKDIM, ix + BlOCKDIM)]
= convert2scale(SplitImage[calInd(SIDim, x + BlOCKDIM, y + BlOCKDIM)], Q);
__syncthreads();
}
__global__ void gpuCalculateTexture(float *Texture, int TextureDim, float *SplitImage, int SIDim, float *Q, int method, int bool_mean)
{
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int ix = threadIdx.x;
int iy = threadIdx.y;
if (x >= TextureDim || y >= TextureDim)
return;
__shared__ int SubSplitImage[(BlOCKDIM + WINDOWDIM - 1)*(BlOCKDIM + WINDOWDIM - 1)];
int SSIDim = BlOCKDIM + WINDOWDIM - 1;
copyAndConvertImage(SplitImage, SIDim, SubSplitImage, SSIDim, Q, x, y, ix, iy);
if (SplitImage[calInd(SIDim, x + (WINDOWDIM - 1) / 2, y + (WINDOWDIM - 1) / 2)] == DEFAULT)
Texture[calInd(TextureDim, x, y)] = DEFAULT;
else
{
int jstart, jstop, istart, istop, xshift, yshift;
int value1, value2;
float texture[4] = { 0.0 };
for (int t = 0; t < 4; t++)
{
//X=D, Y=0 shift
if (t == 0)
{
xshift = D; yshift = 0;
jstart = iy; jstop = WINDOWDIM + iy; istart = ix; istop = WINDOWDIM - D + ix;
}
//X=D, Y=-D shift
if (t == 1)
{
xshift = D; yshift = -1 * D;
jstart = D + iy; jstop = WINDOWDIM + iy; istart = ix; istop = WINDOWDIM - D + ix;
}
//X=0, Y=D shift
if (t == 2)
{
xshift = 0; yshift = D;
jstart = iy; jstop = WINDOWDIM - D + iy; istart = ix; istop = WINDOWDIM + ix;
}
//X=D, Y=D shift
if (t == 3)
{
xshift = D; yshift = D;
jstart = iy; jstop = WINDOWDIM - D + iy; istart = ix; istop = WINDOWDIM - D + ix;
}
unsigned char SDM[NG*NG] = { 0 };
for (int j = jstart; j < jstop; j++)
{
for (int i = istart; i < istop; i++)
{
value1 = SubSplitImage[calInd(SSIDim, j, i)];
value2 = SubSplitImage[calInd(SSIDim, j + yshift, i + xshift)];
updateSDM(SDM, value1, value2);
updateSDM(SDM, value2, value1);
}
}
texture[t] = calTexture(SDM, method);
}
Texture[calInd(TextureDim, x, y)] = calAvgOrRange(texture, 4, bool_mean);
}
__syncthreads();
} |
2,506 | #include <stdio.h>
#include <cuda.h>
#include <unistd.h>
#include <signal.h>
#include <stdint.h>
#include <stdlib.h>
#include <pthread.h>
#include <sys/time.h>
#define NUM_THREADS 4
#define MEMPER 0.9
#define SIZE 1024ul
#define REQ_UNDEFINED '-'
#define REQ_IDLE ';'
#define REQ_START 'A'
#define RSP_FINISH 'Z'
#define RSP_UNDEFINED '-'
struct thread_arg{
int id;
int size;
double* doubles;
double* compare;
};
void *Increment(void *args)
{
struct thread_arg* arg;
int tid;
int size;
double* d;
int work;
arg = (struct thread_arg*) args;
tid = arg->id;
size = arg->size;
d = arg->doubles;
work = size / (8 * 8);
//printf("Tid: %d\tSize: %d\td:%p\n",tid,size,d);
for(int i = 0; i < work; i++){
d[(tid * work) + i] *= 2;
}
pthread_exit(NULL);
}
__global__ void
intensive_kernel(unsigned int *cmd){
int threadId = threadIdx.x + (blockDim.x * blockIdx.x);
double* input;
double* compare;
while (cmd[8]) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
if (cmd[0] == REQ_START && cmd[1] != RSP_FINISH) {
// we've got a request for a new job
// initialize
cmd[7] = 1;
__threadfence();
}
else {
cmd[7] = 0;
cmd[1] = RSP_UNDEFINED;
__threadfence();
}
}
__syncthreads();
if (cmd[7] == 1) {
while(cmd[2] == 0);
input = (double*) ((long long)cmd[6]<<32 | cmd[5]);
compare = (double*) ((long long)cmd[4]<<32 | cmd[3]);
if(input[threadId] - compare[threadId] > 0.01){
input[threadId] = (compare[threadId] * 2) + exp(input[threadId]);
}
if (threadIdx.x == 0 && blockIdx.x == 0) {
// finitto
cmd[0] = REQ_IDLE;
cmd[1] = RSP_FINISH;
cmd[7] = 0;
__threadfence();
// host will set #threads equal to 0 after obtaining the results
while (cmd[2] != 0);
}
}
__syncthreads();
}
}
size_t
available_memory(){
size_t mem_total = 0;
size_t mem_free = 0;
cudaMemGetInfo(&mem_free, &mem_total);
printf("Total memory %dMB\tFree Memory %dMB\n",mem_total/(1024*1024),mem_free/(1024*1024));
return mem_free;
}
int
main(int argc, char **argv){
size_t available_mem = 0;
double *doubles_host;
double *doubles_device;
double *compare_host;
double *compare_device;
unsigned int* cmd_h;
unsigned int* cmd_d;
int threads;
int blocks;
int timeToRun;
int result;
cudaStream_t stream1;
cudaStream_t stream2;
struct timeval t1;
struct timeval t2;
pthread_t thread[8];
struct thread_arg args;
if(argc < 2){
printf("Usage: stresstest <duration>\n\tduration\tTime stress will run in seconds\n");
exit(EXIT_FAILURE);
}
timeToRun = atoi(argv[1]);
// system("sudo ./gpu_over.sh");
// system("sudo ./cpu_over.sh");
cudaSetDevice(0);
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
available_mem = available_memory() * 0.9;
printf("Allocating 90%% of the available memory: (%dMB)\n", available_mem/(1024 * 1024));
cudaMalloc((void**)&doubles_device, available_mem/4 * sizeof(char));
cudaMalloc((void**)&compare_device, available_mem/4 * sizeof(char));
cudaMalloc((void**)&cmd_d, 10 * sizeof(unsigned int));
cudaMallocHost((void **)&cmd_h, 10 * sizeof(unsigned int));
cudaMallocHost((void**)&doubles_host, available_mem/4 * sizeof(char));
cudaMallocHost((void**)&compare_host, available_mem/4 * sizeof(char));
srand(time(NULL));
printf("Initializing buffers...\n");
for(int i=0; i < available_mem/32; i++){
doubles_host[i] = i * rand() * 1.8643;
compare_host[i] = i * rand() * 1.4903;
}
printf("Finished initialization of buffers!\n\n");
cmd_h[0] = REQ_UNDEFINED;
cmd_h[1] = RSP_UNDEFINED;
cmd_h[9] = 0;
cudaMemcpy(doubles_device, doubles_host, available_mem/4 * sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(compare_device, compare_host, available_mem/4 * sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(cmd_h+3,&(compare_device), sizeof(double*),cudaMemcpyHostToHost);
cudaMemcpy(cmd_h+5,&(doubles_device), sizeof(double*),cudaMemcpyHostToHost);
cudaMemcpy(cmd_d, cmd_h, 10 * sizeof(unsigned int), cudaMemcpyHostToDevice);
threads = 1024;
blocks = available_mem/(16 * threads);
gettimeofday(&t1, 0);
printf("Start stressing...\n");
intensive_kernel<<<blocks,threads,0,stream1>>>(cmd_d);
pid_t pid = fork();
if(pid == 0){
//child
if(execv("./temperature", argv) == -1){
printf("Execv failed!\n");
exit(EXIT_FAILURE);
}
}
else if(pid > 0){
//parent
gettimeofday(&t2, 0);
while(t2.tv_sec - t1.tv_sec < timeToRun){
usleep(10);
cmd_h[0] = REQ_START;
cmd_h[1] = RSP_UNDEFINED;
cmd_h[2] = random() % 512;
for(int i=0; i < 8; i++ ){
args.id = i;
args.size = available_mem/4;
args.doubles = compare_host;
result = pthread_create(&thread[i], NULL,
Increment, (void *)&args);
if (result){
printf("Unable to create thread\n");
exit(-1);
}
}
for(int i = 0; i < 8; i++){
pthread_join(thread[i], NULL);
}
cudaMemcpyAsync(doubles_device, doubles_host, available_mem/4 * sizeof(char), cudaMemcpyHostToDevice, stream2);
cudaMemcpyAsync(compare_device, compare_host, available_mem/4 * sizeof(char), cudaMemcpyHostToDevice, stream2);
// first set #threads
cudaMemcpyAsync(cmd_d+2, cmd_h+2, 1 * sizeof(unsigned int), cudaMemcpyHostToDevice, stream2);
cudaStreamSynchronize(stream2);
// set RSP
cudaMemcpyAsync(cmd_d+1, cmd_h+1, 1 * sizeof(unsigned int), cudaMemcpyHostToDevice, stream2);
cudaStreamSynchronize(stream2);
// set REQ
cudaMemcpyAsync(cmd_d+0, cmd_h+0, 1 * sizeof(unsigned int), cudaMemcpyHostToDevice, stream2);
cudaStreamSynchronize(stream2);
int ready = 0;
while (((cmd_h[0] == REQ_START) && cmd_h[1] != RSP_FINISH)) {
ready = 1;
// get RSP
cudaMemcpyAsync(&cmd_h[1], &cmd_d[1], 1 * sizeof(unsigned int), cudaMemcpyDeviceToHost, stream2);
cudaStreamSynchronize(stream2);
}
if (ready == 1) {
// get data
cudaMemcpyAsync(doubles_host,doubles_device, available_mem/4 * sizeof(char), cudaMemcpyDeviceToHost, stream2);
cudaStreamSynchronize(stream2);
for(int i=0; i < 8; i++ ){
args.id = i;
args.size = available_mem/4;
args.doubles = doubles_host;
result = pthread_create(&thread[i], NULL,
Increment, (void *)&args);
if (result){
printf("Unable to create thread\n");
exit(-1);
}
}
for(int i = 0; i < 8; i++){
pthread_join(thread[i], NULL);
}
//pthread_exit(NULL);
cmd_h[0] = REQ_UNDEFINED;
cudaMemcpyAsync(&cmd_d[0], &cmd_h[0], 1 * sizeof(unsigned int), cudaMemcpyHostToDevice, stream2);
cudaStreamSynchronize(stream2);
// notify GPU by setting #threads equal to 0
cmd_h[2] = 0;
cudaMemcpyAsync(&cmd_d[2], &cmd_h[2], 1 * sizeof(unsigned int), cudaMemcpyHostToDevice, stream2);
cudaStreamSynchronize(stream2);
}
gettimeofday(&t2, 0);
}
cmd_h[8] = 1;
cudaMemcpyAsync(&cmd_d[8], &cmd_h[8], 1 * sizeof(unsigned int), cudaMemcpyHostToDevice, stream2);
cudaStreamSynchronize(stream2);
while(cmd_h[8] == 1 && cmd_h[9] != 1){
cudaMemcpyAsync(&cmd_h[9], &cmd_d[9], 1 * sizeof(unsigned int), cudaMemcpyDeviceToHost, stream2);
cudaStreamSynchronize(stream2);
}
}
else{
//error
printf("fork() failed!\n");
exit(EXIT_FAILURE);
}
cudaFree(doubles_device);
cudaFree(compare_device);
cudaFree(cmd_d);
cudaFreeHost(doubles_host);
cudaFreeHost(compare_host);
cudaFreeHost(cmd_h);
// system("sudo ./gpu_down.sh");
// system("sudo ./cpu_down.sh");
printf("Finished!\n");
}
|
2,507 | #include <iostream>
__device__ int compute_layer(float tx, float ty, float radius) {
int x = int(std::abs(tx) / radius + 0.5);
int y = int(std::abs(ty) / radius + 0.5);
if (x == 1 && y == 1)
return 2;
int c = 0;
if (x + y < 2)
c = x + y;
else
c = x + y + 1;
if (c > 5)
c = 5;
return c;
}
__global__ void five_kernel(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, int num_feat_per_threads,
const float* points, const float* tex, const int* idx, float* out, float radius)
{
int b = blockIdx.y;
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= num_groups)
return;
int feat_start = num_feat_per_threads * blockIdx.z;
int feat_end = feat_start + num_feat_per_threads;
if (feat_end >= num_featdim)
feat_end = num_featdim;
const int* group_array = idx + (b * num_groups + group_idx) * num_neighbors;
const float* tex_array = tex + 2 * num_points * b;
const float* points_array = points + (b * num_points * num_featdim);
float* out_array = out + (b * num_groups + group_idx) * (6 * num_featdim);
int layers_counts[6] = {0, 0, 0, 0, 0, 0};
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
layers_counts[layer] += 1;
}
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
float* out_temp = out_array + layer * num_featdim;
const float* point_temp = points_array + index * num_featdim;
for (int j = feat_start; j < feat_end; ++j) {
out_temp[j] += point_temp[j] / layers_counts[layer];
}
}
for (int i = 0; i < 6; ++i) {
if (layers_counts[i] == 0) {
int front = i;
int rear = i;
float weight_front = 0.0f;
float weight_rear = 0.0f;
while (front >= 0 && layers_counts[front] == 0)
front -= 1;
while (rear < 6 && layers_counts[rear] == 0)
rear += 1;
if (front >= 0 && rear < 6) {
weight_rear = (i - front) / (rear - front + 0.0f);
weight_front = 1.0f - weight_rear;
}
else if (front >= 0) {
weight_front = 1.0f;
weight_rear = 0.0f;
rear = 5;
}
else {
weight_front = 0.0f;
weight_rear = 1.0f;
front = 0;
}
float* out_temp = out_array + i * num_featdim;
float* out_front = out_array + front * num_featdim;
float* out_rear = out_array + rear * num_featdim;
for (int j = feat_start; j < feat_end; ++j) {
out_temp[j] = out_front[j] * weight_front + out_rear[j] * weight_rear;
}
}
}
}
__global__ void fivegrad_kernel(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, int num_feat_per_threads,
const float* points, const float* tex, const int* idx, const float* grad_out, float* grad_points, float radius)
{
int b = blockIdx.y;
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= num_groups)
return;
int feat_start = num_feat_per_threads * blockIdx.z;
int feat_end = feat_start + num_feat_per_threads;
if (feat_end >= num_featdim)
feat_end = num_featdim;
const int* group_array = idx + (b * num_groups + group_idx) * num_neighbors;
const float* tex_array = tex + 2 * num_points * b;
float* points_array = grad_points + (b * num_points * num_featdim);
const float* out_array = grad_out + (b * num_groups + group_idx) * (6 * num_featdim);
int layers_counts[6] = {0, 0, 0, 0, 0, 0};
float weights_front[6] = {0, 0, 0, 0, 0, 0};
float weights_rear[6] = {0, 0, 0, 0, 0, 0};
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
layers_counts[layer] += 1;
}
for (int i = 0; i < 6; ++i) {
if (layers_counts[i] == 0) {
int front = i;
int rear = i;
float weight_front = 0.0f;
float weight_rear = 0.0f;
while (front >= 0 && layers_counts[front] == 0)
front -= 1;
while (rear < 6 && layers_counts[rear] == 0)
rear += 1;
if (front >= 0 && rear < 6) {
weight_rear = (i - front) / (rear - front + 0.0f);
weight_front = 1.0f - weight_rear;
}
else if (front >= 0) {
weight_front = 1.0f;
weight_rear = 0.0f;
rear = 5;
}
else {
weight_front = 0.0f;
weight_rear = 1.0f;
front = 0;
}
weights_front[i] = weight_front;
weights_rear[i] = weight_rear;
}
}
for (int i = 0; i < num_neighbors; ++i) {
int index = group_array[i];
if (tex_array[index * 2] > 1e20)
continue;
int layer = compute_layer(tex_array[index * 2], tex_array[index * 2 + 1], radius);
const float* out_temp = out_array + layer * num_featdim;
float* point_temp = points_array + index * num_featdim;
for (int j = feat_start; j < feat_end; ++j) {
float signal = out_temp[j];
int l = layer - 1;
const float* out_temp_step = out_temp - num_featdim;
while (l >= 0 && layers_counts[l] == 0) {
signal += out_temp_step[j] * weights_rear[l];
out_temp_step -= num_featdim;
l -= 1;
}
l = layer + 1;
out_temp_step = out_temp + num_featdim;
while (l < 6 && layers_counts[l] == 0) {
signal += out_temp_step[j] * weights_front[l];
out_temp_step += num_featdim;
l += 1;
}
atomicAdd(&point_temp[j], signal / layers_counts[layer]);
}
}
}
void fivekernel_gpu(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, const float* points, const float* tex, const int* idx, float* out, float radius) {
int num_threads_for_feat = (num_groups + 255) / num_groups;
int num_feat_per_threads = (num_featdim + num_threads_for_feat - 1) / num_threads_for_feat;
five_kernel<<<dim3((num_groups + 255) / 256, batch_size, num_threads_for_feat), dim3(256, 1, 1)>>>(batch_size, num_points, num_featdim, num_neighbors, num_groups, num_feat_per_threads, points, tex, idx, out, radius);
}
void fivekernelgrad_gpu(int batch_size, int num_points, int num_featdim, int num_neighbors, int num_groups, const float* points, const float* tex, const int* idx, const float* grad_out, float* grad_points, float radius) {
int num_threads_for_feat = (num_groups + 255) / num_groups;
int num_feat_per_threads = (num_featdim + num_threads_for_feat - 1) / num_threads_for_feat;
fivegrad_kernel<<<dim3((num_groups + 255) / 256, batch_size, num_threads_for_feat), dim3(256, 1, 1)>>>(
batch_size, num_points, num_featdim, num_neighbors, num_groups, num_feat_per_threads, points, tex, idx, grad_out, grad_points, radius);
} |
2,508 | void setGrid(int n, dim3 &blockDim, dim3 &gridDim)
{
// set your block dimensions and grid dimensions here
gridDim.x = n / blockDim.x;
gridDim.y = n / blockDim.y;
if(n % blockDim.x != 0)
gridDim.x++;
if(n % blockDim.y != 0)
gridDim.y++;
}
|
2,509 | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#define N 100
#define IT 3
__global__ void JacobiIteration(int n, float *a, float *b, float *x, float*x_result){
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x){
float sigma = 0;
for(int j = 0 ; j<n; j++){
if(j!=i){
sigma += a[i + j * n]*x[j];
}
}
x_result[i] = (b[i] - sigma)/a[i + i*n];
}
}
__global__ void initAMatrix(int n, float*a){
int i;
for (int j = blockIdx.x * blockDim.x + threadIdx.x; j < n; j += blockDim.x * gridDim.x){
for ( i = 0; i < n; i++ ){
if ( j == i - 1 ){
a[j+i*n] = -1.0;
}
else if ( j == i ){
a[j+i*n] = 2.0;
}
else if ( j == i + 1 ){
a[j+i*n] = -1.0;
}
else{
a[j+i*n] = 0.0;
}
}
}
}
__global__ void copy(int n, float*a, float *b){
for (int j = blockIdx.x * blockDim.x + threadIdx.x; j < n; j += blockDim.x * gridDim.x){
a[j] = b[j];
}
}
void swap(float* &a, float* &b){
float *temp = a;
a = b;
b = temp;
}
int main(){
float *a, *b, *x, *x_result;
// alloc
cudaMallocManaged(&a, N*N*sizeof(float));
cudaMallocManaged(&b, N*sizeof(float));
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&x_result, N*sizeof(float));
// init
for (int i = 0; i < N; i++ )
{
b[i] = 3.0;
}
b[N-1] = ( float ) ( N + 1 );
for ( int i = 0; i < N; i++ )
{
x[i] = 0.0;
}
initAMatrix<<<32, 32>>>(N, a);
for ( int it = 0; it < IT; it++ ){
JacobiIteration<<<32, 32>>>(N, a, b, x, x_result);
cudaDeviceSynchronize();
swap(x, x_result);
}
for(int i = 0; i < N; i++){
printf("%f ",x[i]);
}
return 0;
}
|
2,510 | #include<ostream>
#include<vector>
#include<stdio.h>
#include<cuda_runtime.h>
// reset GPGPUs to be safe.
void reset_cuda_devs() {
int dev_count = 0;
cudaGetDeviceCount(&dev_count);
while( dev_count --> 0 )
{
printf("Resetting device %i", dev_count);
cudaSetDevice(dev_count);
cudaDeviceReset();
}
printf("\n");
}
void sizes() {
printf("sizeof(char) = %u\n", sizeof(char));
printf("sizeof(int) = %u\n", sizeof(int));
printf("sizeof(unsigned int) = %u\n", sizeof(unsigned int));
printf("sizeof(float) = %u\n", sizeof(float));
printf("sizeof(double) = %u\n", sizeof(double));
printf("sizeof(long double) = %u\n", sizeof(long double));
printf("sizeof(long) = %u\n", sizeof(long));
printf("sizeof(long long int) = %u\n", sizeof(long long int));
printf("sizeof(unsigned long) = %u\n", sizeof(unsigned long));
}
#define LENGTH 1024
__device__ int cube(int a) {
return a * a * a;
}
__global__ void kernel_cube_array(int * dev_array, int length) {
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ int shared_mem[LENGTH];
shared_mem[tidx] = dev_array[tidx];
__syncthreads();
shared_mem[tidx] = cube(shared_mem[tidx]);
__syncthreads();
dev_array[tidx] = shared_mem[tidx];
}
__host__ int main() {
std::vector<int> host_array(LENGTH, 2);
int * dev_array = NULL;
cudaMalloc((void **)&dev_array, LENGTH * sizeof(int));
cudaMemcpy(dev_array, &host_array[0], LENGTH * sizeof(int), cudaMemcpyHostToDevice);
dim3 blockDims(LENGTH, 1, 1);
dim3 gridDims(1, 1, 1);
kernel_cube_array<<<gridDims, blockDims>>>(dev_array, LENGTH);
cudaDeviceSynchronize();
cudaMemcpy(&host_array[0], dev_array, LENGTH * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_array);
}
|
2,511 | /**
* gesummv.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <cuda.h>
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
/* Problem size */
#define N 4096
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 256
#define DIM_THREAD_BLOCK_Y 1
/* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */
#define ALPHA 43532.0f
#define BETA 12313.0f
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void gesummv(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp)
{
int i, j;
for (i = 0; i < N; i++)
{
tmp[i] = 0;
y[i] = 0;
for (j = 0; j < N; j++)
{
tmp[i] = A[i*N + j] * x[j] + tmp[i];
y[i] = B[i*N + j] * x[j] + y[i];
}
y[i] = ALPHA * tmp[i] + BETA * y[i];
}
}
void init(DATA_TYPE* A, DATA_TYPE* x)
{
int i, j;
for (i = 0; i < N; i++)
{
x[i] = ((DATA_TYPE) i) / N;
for (j = 0; j < N; j++)
{
A[i*N + j] = ((DATA_TYPE) i*j) / N;
}
}
}
void compareResults(DATA_TYPE* y, DATA_TYPE* y_outputFromGpu)
{
int i, fail;
fail = 0;
for (i=0; i<(N); i++)
{
if (percentDiff(y[i], y_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
}
__global__ void gesummv_kernel(DATA_TYPE *a, DATA_TYPE *b, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
int j;
for(j = 0; j < N; j++)
{
tmp[i] += a[i * N + j] * x[j];
y[i] += b[i * N + j] * x[j];
}
y[i] = ALPHA * tmp[i] + BETA * y[i];
}
}
void gesummvCuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp, DATA_TYPE* y_outputFromGpu)
{
double t_start, t_end;
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
DATA_TYPE *x_gpu;
DATA_TYPE *y_gpu;
DATA_TYPE *tmp_gpu;
cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * N * N);
cudaMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * N * N);
cudaMalloc((void **)&x_gpu, sizeof(DATA_TYPE) * N);
cudaMalloc((void **)&y_gpu, sizeof(DATA_TYPE) * N);
cudaMalloc((void **)&tmp_gpu, sizeof(DATA_TYPE) * N);
cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(B_gpu, B, sizeof(DATA_TYPE) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(x_gpu, x, sizeof(DATA_TYPE) * N, cudaMemcpyHostToDevice);
cudaMemcpy(y_gpu, y, sizeof(DATA_TYPE) * N, cudaMemcpyHostToDevice);
cudaMemcpy(tmp_gpu, tmp, sizeof(DATA_TYPE) * N, cudaMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((unsigned int)ceil( ((float)N) / ((float)block.x) ), 1);
t_start = rtclock();
gesummv_kernel<<< grid, block>>>(A_gpu,B_gpu,x_gpu, y_gpu, tmp_gpu);
cudaThreadSynchronize();
t_end = rtclock();
cudaMemcpy(y_outputFromGpu, y_gpu, sizeof(DATA_TYPE) * N, cudaMemcpyDeviceToHost);
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
}
int main(int argc, char *argv[])
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* x;
DATA_TYPE* y;
DATA_TYPE* y_outputFromGpu;
DATA_TYPE* tmp;
A = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE));
B = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE));
x = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
y = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
y_outputFromGpu = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
tmp = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
init(A, x);
GPU_argv_init();
gesummvCuda(A, B, x, y, tmp, y_outputFromGpu);
t_start = rtclock();
gesummv(A, B, x, y, tmp);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(y, y_outputFromGpu);
free(A);
free(B);
free(x);
free(y);
free(y_outputFromGpu);
free(tmp);
return 0;
}
|
2,512 | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#define TILE_SIZE 32
#define KRP_BLOCK_SIZE 8
__global__ void matmul_kernel(int m, int n, int k, const float *X, const float *KRP, float* MTTKRP) {
/********************************************************************
*
* Compute C = A x B
* where A is a (m x k) matrix
* where B is a (k x n) matrix
* where C is a (m x n) matrix
*
* Use shared memory for tiling
*
********************************************************************/
// INSERT KERNEL CODE HERE
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
float p_value = 0;
int width, height;
__shared__ float tile_X[TILE_SIZE][TILE_SIZE];
__shared__ float tile_KRP[TILE_SIZE][TILE_SIZE];
int steps = (k+TILE_SIZE-1) / TILE_SIZE;
for(int i=0; i<steps; i++){
width = i*TILE_SIZE+threadIdx.x;
if(row < m && width < k)
tile_X[threadIdx.y][threadIdx.x]=X[row*k+width];
else
tile_X[threadIdx.y][threadIdx.x]=0.0;
height = i*TILE_SIZE+threadIdx.y;
if(col < n && height < k)
tile_KRP[threadIdx.y][threadIdx.x]=KRP[height*n+col];
else
tile_KRP[threadIdx.y][threadIdx.x]=0.0;
__syncthreads();
for(int i=0; i<TILE_SIZE; i++)
p_value += tile_X[threadIdx.y][i]*tile_KRP[i][threadIdx.x];
__syncthreads();
}
if(row<m && col<n)
MTTKRP[row*n+col] = p_value;
}
void matmul(int d, int c, int k, const float *X, const float *KRP, float *MTTKRP)
{
// Initialize thread block and kernel grid dimensions ---------------------
const unsigned int BLOCK_SIZE = TILE_SIZE;
//INSERT CODE HERE
dim3 dim_block(BLOCK_SIZE,BLOCK_SIZE,1);
dim3 dim_grid;
dim_grid.x = (c+dim_block.x-1)/dim_block.x;
dim_grid.y = (d+dim_block.y-1)/dim_block.y;
// Invoke CUDA kernel -----------------------------------------------------
//INSERT CODE HERE
matmul_kernel<<<dim_grid,dim_block>>>(d,c,k,X,KRP,MTTKRP);
}
__global__ void krp_kernel(int m, int n, int c, const float *A, const float *B, float *KRP)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x; //0 to mnc
if(idx < m*n*c){
int col = int(idx/(m*n));
int start_col = m*n*col;
int pos_in_col = idx - (start_col);
// int idx_a = int(pos_in_col/n)%n + (n*col);
int idx_a = int(pos_in_col/n)+m*col;
int idx_b = int(pos_in_col%n+n*col);
// printf("\n bid=%d bdim=%d tid=%d idx=%d col=%d start_col=%d pos_in_col=%d idxA=%d idxB=%d\n",blockIdx.x, blockDim.x, threadIdx.x, idx, col, start_col, pos_in_col, idx_a, idx_b);
KRP[idx] = A[idx_a] * B[idx_b];
}
}
__global__ void krp_kernel_sharedmem(int m, int n, int c, const float *A, const float *B, float *KRP)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x; //0 to mnc
__shared__ float tile_A[KRP_BLOCK_SIZE];
__shared__ float tile_B[KRP_BLOCK_SIZE];
int steps_colA = (m+KRP_BLOCK_SIZE-1) / KRP_BLOCK_SIZE;
int steps_colB = (n+KRP_BLOCK_SIZE-1) / KRP_BLOCK_SIZE;
int w;
if(idx < m*n*c){
for(int i=0; i<steps_colA; i++){
w = i*KRP_BLOCK_SIZE+threadIdx.x;
if(w < m*c){
tile_A[threadIdx.x] = A[idx*m*c+w];
printf("\n IF A: bid=%d bdim=%d tid=%d idx=%d w=%d step=%d, tile_A=%f\n"
,blockIdx.x, blockDim.x, threadIdx.x, idx, w, i,tile_A[threadIdx.x]);
}
else{
tile_A[threadIdx.x] = 0.0;
// printf("\n ELSE A: bid=%d bdim=%d tid=%d idx=%d col=%d start_col=%d pos_in_col=%d idxA=%d idxB=%d w=%d step=%d tile_A=%f\n"
// ,blockIdx.x, blockDim.x, threadIdx.x, idx, col, start_col, pos_in_col, idx_a, idx_b, w, i,tile_A[threadIdx.x]);
}
}
__syncthreads();
for(int i=0; i<steps_colB; i++){
w = i*KRP_BLOCK_SIZE+threadIdx.x;
if(w < n*c){
tile_B[threadIdx.x] = B[idx*n*c+w];
printf("\n IF B: bid=%d bdim=%d tid=%d idx=%d w=%d step=%d, tile_B=%f\n"
,blockIdx.x, blockDim.x, threadIdx.x, idx, w, i,tile_B[threadIdx.x]);
}
else{
tile_B[threadIdx.x] = 0.0;
// printf("\n ELSE B: bid=%d bdim=%d tid=%d idx=%d col=%d start_col=%d pos_in_col=%d idxA=%d idxB=%d w=%d step=%d tile_B=%f\n"
// ,blockIdx.x, blockDim.x, threadIdx.x, idx, col, start_col, pos_in_col, idx_a, idx_b, w, i,tile_B[threadIdx.x]);
}
}
__syncthreads();
int col = int(idx/(m*n));
int idx_tileA = int(idx/n);
int idx_tileB = int(idx%n + n*col);
// if(blockIdx.x == 1)
printf("\n bid=%d bdim=%d tid=%d idx=%d col=%d tile_A=%f tile_B=%f\n",blockIdx.x, blockDim.x, threadIdx.x, idx, col, tile_A[idx_tileA],tile_B[idx_tileB]);
KRP[idx] = tile_A[idx_tileA] * tile_B[idx_tileB];
}
}
void krp(int m, int n, int c, const float *A, const float *B, float *KRP)
{
const unsigned int BLOCK_SIZE = KRP_BLOCK_SIZE;
dim3 dim_block(BLOCK_SIZE, 1,1);
dim3 dim_grid(((m*n*c)-1)/BLOCK_SIZE+1,1,1);
krp_kernel_sharedmem<<<dim_grid,dim_block>>>(m,n,c,A,B,KRP);
// krp_kernel<<<dim_grid,dim_block>>>(m,n,c,A,B,KRP);
}
void mttkrp(int m, int n, int c, int d, const float *A, const float *B, const float *X, float *KRP, float *MTTKRP)
{
krp(m,n,c,A,B,KRP);
matmul(d,c,m*n,X,KRP,MTTKRP);
}
|
2,513 | //=====================================================================
// MAIN FUNCTION
//=====================================================================
__device__ void kernel_cam(float timeinst, float* d_initvalu, float *d_finavalu, int valu_offset,
float* d_params, int params_offset, float* d_com, int com_offset, float Ca) {
//=====================================================================
// VARIABLES
//=====================================================================
// inputs
// float CaMtot;
float Btot;
float CaMKIItot;
float CaNtot;
float PP1tot;
float K;
float Mg;
// variable references
int offset_1;
int offset_2;
int offset_3;
int offset_4;
int offset_5;
int offset_6;
int offset_7;
int offset_8;
int offset_9;
int offset_10;
int offset_11;
int offset_12;
int offset_13;
int offset_14;
int offset_15;
// decoding input array
float CaM;
float Ca2CaM;
float Ca4CaM;
float CaMB;
float Ca2CaMB;
float Ca4CaMB;
float Pb2;
float Pb;
float Pt;
float Pt2;
float Pa;
float Ca4CaN;
float CaMCa4CaN;
float Ca2CaMCa4CaN;
float Ca4CaMCa4CaN;
// Ca/CaM parameters
float Kd02; // [uM^2]
float Kd24; // [uM^2]
float k20; // [s^-1]
float k02; // [uM^-2 s^-1]
float k42; // [s^-1]
float k24; // [uM^-2 s^-1]
// CaM buffering (B) parameters
float k0Boff; // [s^-1]
float k0Bon; // [uM^-1 s^-1] kon = koff/Kd
float k2Boff; // [s^-1]
float k2Bon; // [uM^-1 s^-1]
float k4Boff; // [s^-1]
float k4Bon; // [uM^-1 s^-1]
// using thermodynamic constraints
float k20B; // [s^-1] thermo constraint on loop 1
float k02B; // [uM^-2 s^-1]
float k42B; // [s^-1] thermo constraint on loop 2
float k24B; // [uM^-2 s^-1]
// Wi Wa Wt Wp
float kbi; // [s^-1] (Ca4CaM dissocation from Wb)
float kib; // [uM^-1 s^-1]
float kpp1; // [s^-1] (PP1-dep dephosphorylation rates)
float Kmpp1; // [uM]
float kib2;
float kb2i;
float kb24;
float kb42;
float kta; // [s^-1] (Ca4CaM dissociation from Wt)
float kat; // [uM^-1 s^-1] (Ca4CaM reassociation with Wa)
float kt42;
float kt24;
float kat2;
float kt2a;
// CaN parameters
float kcanCaoff; // [s^-1]
float kcanCaon; // [uM^-1 s^-1]
float kcanCaM4on; // [uM^-1 s^-1]
float kcanCaM4off; // [s^-1]
float kcanCaM2on;
float kcanCaM2off;
float kcanCaM0on;
float kcanCaM0off;
float k02can;
float k20can;
float k24can;
float k42can;
// CaM Reaction fluxes
float rcn02;
float rcn24;
// CaM buffer fluxes
float B;
float rcn02B;
float rcn24B;
float rcn0B;
float rcn2B;
float rcn4B;
// CaN reaction fluxes
float Ca2CaN;
float rcnCa4CaN;
float rcn02CaN;
float rcn24CaN;
float rcn0CaN;
float rcn2CaN;
float rcn4CaN;
// CaMKII reaction fluxes
float Pix;
float rcnCKib2;
float rcnCKb2b;
float rcnCKib;
float T;
float kbt;
float rcnCKbt;
float rcnCKtt2;
float rcnCKta;
float rcnCKt2a;
float rcnCKt2b2;
float rcnCKai;
// CaM equations
float dCaM;
float dCa2CaM;
float dCa4CaM;
float dCaMB;
float dCa2CaMB;
float dCa4CaMB;
// CaMKII equations
float dPb2; // Pb2
float dPb; // Pb
float dPt; // Pt
float dPt2; // Pt2
float dPa; // Pa
// CaN equations
float dCa4CaN; // Ca4CaN
float dCaMCa4CaN; // CaMCa4CaN
float dCa2CaMCa4CaN; // Ca2CaMCa4CaN
float dCa4CaMCa4CaN; // Ca4CaMCa4CaN
//=====================================================================
// EXECUTION
//=====================================================================
// inputs
// CaMtot = d_params[params_offset];
Btot = d_params[params_offset + 1];
CaMKIItot = d_params[params_offset + 2];
CaNtot = d_params[params_offset + 3];
PP1tot = d_params[params_offset + 4];
K = d_params[16];
Mg = d_params[17];
// variable references
offset_1 = valu_offset;
offset_2 = valu_offset + 1;
offset_3 = valu_offset + 2;
offset_4 = valu_offset + 3;
offset_5 = valu_offset + 4;
offset_6 = valu_offset + 5;
offset_7 = valu_offset + 6;
offset_8 = valu_offset + 7;
offset_9 = valu_offset + 8;
offset_10 = valu_offset + 9;
offset_11 = valu_offset + 10;
offset_12 = valu_offset + 11;
offset_13 = valu_offset + 12;
offset_14 = valu_offset + 13;
offset_15 = valu_offset + 14;
// decoding input array
CaM = d_initvalu[offset_1];
Ca2CaM = d_initvalu[offset_2];
Ca4CaM = d_initvalu[offset_3];
CaMB = d_initvalu[offset_4];
Ca2CaMB = d_initvalu[offset_5];
Ca4CaMB = d_initvalu[offset_6];
Pb2 = d_initvalu[offset_7];
Pb = d_initvalu[offset_8];
Pt = d_initvalu[offset_9];
Pt2 = d_initvalu[offset_10];
Pa = d_initvalu[offset_11];
Ca4CaN = d_initvalu[offset_12];
CaMCa4CaN = d_initvalu[offset_13];
Ca2CaMCa4CaN = d_initvalu[offset_14];
Ca4CaMCa4CaN = d_initvalu[offset_15];
// Ca/CaM parameters
if (Mg <= 1) {
Kd02 = 0.0025 * (1 + K / 0.94 - Mg / 0.012) * (1 + K / 8.1 + Mg / 0.022); // [uM^2]
Kd24 = 0.128 * (1 + K / 0.64 + Mg / 0.0014) * (1 + K / 13.0 - Mg / 0.153); // [uM^2]
} else {
Kd02 = 0.0025 * (1 + K / 0.94 - 1 / 0.012 + (Mg - 1) / 0.060)
* (1 + K / 8.1 + 1 / 0.022 + (Mg - 1) / 0.068); // [uM^2]
Kd24 = 0.128 * (1 + K / 0.64 + 1 / 0.0014 + (Mg - 1) / 0.005)
* (1 + K / 13.0 - 1 / 0.153 + (Mg - 1) / 0.150); // [uM^2]
}
k20 = 10; // [s^-1]
k02 = k20 / Kd02; // [uM^-2 s^-1]
k42 = 500; // [s^-1]
k24 = k42 / Kd24; // [uM^-2 s^-1]
// CaM buffering (B) parameters
k0Boff = 0.0014; // [s^-1]
k0Bon = k0Boff / 0.2; // [uM^-1 s^-1] kon = koff/Kd
k2Boff = k0Boff / 100; // [s^-1]
k2Bon = k0Bon; // [uM^-1 s^-1]
k4Boff = k2Boff; // [s^-1]
k4Bon = k0Bon; // [uM^-1 s^-1]
// using thermodynamic constraints
k20B = k20 / 100; // [s^-1] thermo constraint on loop 1
k02B = k02; // [uM^-2 s^-1]
k42B = k42; // [s^-1] thermo constraint on loop 2
k24B = k24; // [uM^-2 s^-1]
// Wi Wa Wt Wp
kbi = 2.2; // [s^-1] (Ca4CaM dissocation from Wb)
kib = kbi / 33.5e-3; // [uM^-1 s^-1]
kpp1 = 1.72; // [s^-1] (PP1-dep dephosphorylation rates)
Kmpp1 = 11.5; // [uM]
kib2 = kib;
kb2i = kib2 * 5;
kb24 = k24;
kb42 = k42 * 33.5e-3 / 5;
kta = kbi / 1000; // [s^-1] (Ca4CaM dissociation from Wt)
kat = kib; // [uM^-1 s^-1] (Ca4CaM reassociation with Wa)
kt42 = k42 * 33.5e-6 / 5;
kt24 = k24;
kat2 = kib;
kt2a = kib * 5;
// CaN parameters
kcanCaoff = 1; // [s^-1]
kcanCaon = kcanCaoff / 0.5; // [uM^-1 s^-1]
kcanCaM4on = 46; // [uM^-1 s^-1]
kcanCaM4off = 0.0013; // [s^-1]
kcanCaM2on = kcanCaM4on;
kcanCaM2off = 2508 * kcanCaM4off;
kcanCaM0on = kcanCaM4on;
kcanCaM0off = 165 * kcanCaM2off;
k02can = k02;
k20can = k20 / 165;
k24can = k24;
k42can = k20 / 2508;
// CaM Reaction fluxes
rcn02 = k02 * pow(Ca, 2) * CaM - k20 * Ca2CaM;
rcn24 = k24 * pow(Ca, 2) * Ca2CaM - k42 * Ca4CaM;
// CaM buffer fluxes
B = Btot - CaMB - Ca2CaMB - Ca4CaMB;
rcn02B = k02B * pow(Ca, 2) * CaMB - k20B * Ca2CaMB;
rcn24B = k24B * pow(Ca, 2) * Ca2CaMB - k42B * Ca4CaMB;
rcn0B = k0Bon * CaM * B - k0Boff * CaMB;
rcn2B = k2Bon * Ca2CaM * B - k2Boff * Ca2CaMB;
rcn4B = k4Bon * Ca4CaM * B - k4Boff * Ca4CaMB;
// CaN reaction fluxes
Ca2CaN = CaNtot - Ca4CaN - CaMCa4CaN - Ca2CaMCa4CaN - Ca4CaMCa4CaN;
rcnCa4CaN = kcanCaon * pow(Ca, 2) * Ca2CaN - kcanCaoff * Ca4CaN;
rcn02CaN = k02can * pow(Ca, 2) * CaMCa4CaN - k20can * Ca2CaMCa4CaN;
rcn24CaN = k24can * pow(Ca, 2) * Ca2CaMCa4CaN - k42can * Ca4CaMCa4CaN;
rcn0CaN = kcanCaM0on * CaM * Ca4CaN - kcanCaM0off * CaMCa4CaN;
rcn2CaN = kcanCaM2on * Ca2CaM * Ca4CaN - kcanCaM2off * Ca2CaMCa4CaN;
rcn4CaN = kcanCaM4on * Ca4CaM * Ca4CaN - kcanCaM4off * Ca4CaMCa4CaN;
// CaMKII reaction fluxes
Pix = 1 - Pb2 - Pb - Pt - Pt2 - Pa;
rcnCKib2 = kib2 * Ca2CaM * Pix - kb2i * Pb2;
rcnCKb2b = kb24 * pow(Ca, 2) * Pb2 - kb42 * Pb;
rcnCKib = kib * Ca4CaM * Pix - kbi * Pb;
T = Pb + Pt + Pt2 + Pa;
kbt = 0.055 * T + 0.0074 * pow(T, 2) + 0.015 * pow(T, 3);
rcnCKbt = kbt * Pb - kpp1 * PP1tot * Pt / (Kmpp1 + CaMKIItot * Pt);
rcnCKtt2 = kt42 * Pt - kt24 * pow(Ca, 2) * Pt2;
rcnCKta = kta * Pt - kat * Ca4CaM * Pa;
rcnCKt2a = kt2a * Pt2 - kat2 * Ca2CaM * Pa;
rcnCKt2b2 = kpp1 * PP1tot * Pt2 / (Kmpp1 + CaMKIItot * Pt2);
rcnCKai = kpp1 * PP1tot * Pa / (Kmpp1 + CaMKIItot * Pa);
// CaM equations
dCaM = 1e-3 * (-rcn02 - rcn0B - rcn0CaN);
dCa2CaM = 1e-3 * (rcn02 - rcn24 - rcn2B - rcn2CaN + CaMKIItot * (-rcnCKib2 + rcnCKt2a));
dCa4CaM = 1e-3 * (rcn24 - rcn4B - rcn4CaN + CaMKIItot * (-rcnCKib + rcnCKta));
dCaMB = 1e-3 * (rcn0B - rcn02B);
dCa2CaMB = 1e-3 * (rcn02B + rcn2B - rcn24B);
dCa4CaMB = 1e-3 * (rcn24B + rcn4B);
// CaMKII equations
dPb2 = 1e-3 * (rcnCKib2 - rcnCKb2b + rcnCKt2b2); // Pb2
dPb = 1e-3 * (rcnCKib + rcnCKb2b - rcnCKbt); // Pb
dPt = 1e-3 * (rcnCKbt - rcnCKta - rcnCKtt2); // Pt
dPt2 = 1e-3 * (rcnCKtt2 - rcnCKt2a - rcnCKt2b2); // Pt2
dPa = 1e-3 * (rcnCKta + rcnCKt2a - rcnCKai); // Pa
// CaN equations
dCa4CaN = 1e-3 * (rcnCa4CaN - rcn0CaN - rcn2CaN - rcn4CaN); // Ca4CaN
dCaMCa4CaN = 1e-3 * (rcn0CaN - rcn02CaN); // CaMCa4CaN
dCa2CaMCa4CaN = 1e-3 * (rcn2CaN + rcn02CaN - rcn24CaN); // Ca2CaMCa4CaN
dCa4CaMCa4CaN = 1e-3 * (rcn4CaN + rcn24CaN); // Ca4CaMCa4CaN
// encode output array
d_finavalu[offset_1] = dCaM;
d_finavalu[offset_2] = dCa2CaM;
d_finavalu[offset_3] = dCa4CaM;
d_finavalu[offset_4] = dCaMB;
d_finavalu[offset_5] = dCa2CaMB;
d_finavalu[offset_6] = dCa4CaMB;
d_finavalu[offset_7] = dPb2;
d_finavalu[offset_8] = dPb;
d_finavalu[offset_9] = dPt;
d_finavalu[offset_10] = dPt2;
d_finavalu[offset_11] = dPa;
d_finavalu[offset_12] = dCa4CaN;
d_finavalu[offset_13] = dCaMCa4CaN;
d_finavalu[offset_14] = dCa2CaMCa4CaN;
d_finavalu[offset_15] = dCa4CaMCa4CaN;
// write to global variables for adjusting Ca buffering in EC coupling model
d_finavalu[com_offset] = 1e-3
* (2 * CaMKIItot * (rcnCKtt2 - rcnCKb2b)
- 2 * (rcn02 + rcn24 + rcn02B + rcn24B + rcnCa4CaN + rcn02CaN + rcn24CaN)); // [uM/msec]
//d_finavalu[JCa] = 1; // [uM/msec]
}
|
2,514 | #include "includes.h"
#define FLOAT_N 3214212.01
__global__ void calcsymmat(double* d_data, double* d_symmat, int M, int N)
{
int i, j2;
int j1 = blockDim.x * blockIdx.x + threadIdx.x+1;
if (j1<=(M+1)) {
for (j2 = j1; j2 < (M+1); j2++) {
d_symmat[j1*(M+1) + j2] = 0.0;
for (i = 1; i < N+1; i++) {
d_symmat[j1*(M+1) + j2] += d_data[i*(M+1) + j1] * d_data[i*(M+1) + j2];
}
d_symmat[j2*(M+1) + j1] = d_symmat[j1*(M+1) + j2];
}
}
} |
2,515 | #include <vector>
#include <cstdlib>
#include <iostream>
#include <fstream>
#include <math.h>
#include <cstring>
#include <cuda.h>
#define ITER_LIMIT 500
#define MATRIX_MAX 10
using namespace std;
void nmfgpu(float *a, int r, int c, int k, int niters, float *w, float *h);
void matrix_print(float *a, int r, int c);
void nmf_seed(float *out, float *a, int r, int c, int k);
void surface_matrix_multi(float *out, float *a, float *b, int r1, int c1, int r2, int c2);
__global__ void kernel(float *a, int r, int c, int k, int niters, float *w,
float *h, float* wt, float* ht, float* wta, float* wtw, float* wtwh, float* hdiv, float* aht,
float* wh, float* whht, float* wdiv, int total);
__device__ void matrix_multi(float *out, float *a, float *b, int r1, int c1, int r2, int c2, int total);
__device__ void matrix_trans(float *out, float *a, int r, int c, int total);
__device__ float matrix_findmax(float *a, int r, int c, int total);
__device__ float matrix_distance(float *a, float *b, int r, int c, int total);
__device__ void matrix_elemproduct(float* out, float *a, float *b, int r, int c, int total);
__device__ void matrix_elemdivison(float* out, float *a, float *b, int r ,int c, int total);
void surface_matrix_multi(float *out, float *a, float *b, int r1, int c1, int r2, int c2){
if (c1!=r2){
return;
}
// int total = omp_get_thread_num();
// int total = omp_get_num_threads();
int nth = 0;
int total = 1;
int division = r1/total;
for (int row = (division*total); row < (nth==(total-1) ? r1 : (division*nth+division)); row++) {
for (int col = 0; col < c2; col++) {
out[row*c2+col] = 0;
for (int inner = 0; inner < c1; inner++) {
out[row*c2+col] += a[row*c1+inner] * b[inner*c2+col]*1.0;
// printf("%d %d :%.2f += %.2f\n",
// row, col, out[row*c2+col]-a[row*c1+inner] * b[inner*c2+col]*1.0, a[row*c1+inner] * b[inner*c2+col]*1.0);
}
}
}
}
void nmfgpu(float *a, int r, int c, int k, int niters, float *w, float *h){
// setup cuda
float *dm;
float *dm_w;
float *dm_h;
int msize = r*c*sizeof(float);
int msize_w = r*k*sizeof(float);
int msize_h = k*c*sizeof(float);
float *dm_wt = new float[r*k];
float *dm_ht = new float[c*k];
float *dm_wta = new float[k*c];
float *dm_wtw = new float[k*k];
float *dm_wtwh = new float[k*c];
float *dm_hdiv = new float[k*c];
float *dm_aht = new float[r*k];
float *dm_wh = new float[r*c];
float *dm_whht = new float[r*k];
float *dm_wdiv = new float[r*k];
nmf_seed(w, a, r, c, k);
memcpy(h, w, sizeof(float)*r*k);
cudaMalloc((void**)&dm, msize);
cudaMalloc((void**)&dm_h, msize_h);
cudaMalloc((void**)&dm_w, msize_w);
cudaMalloc((void**)&dm_wt, r*k*sizeof(float));
cudaMalloc((void**)&dm_ht, c*k*sizeof(float));
cudaMalloc((void**)&dm_wta, k*c*sizeof(float));
cudaMalloc((void**)&dm_wtw, k*k*sizeof(float));
cudaMalloc((void**)&dm_wtwh, k*c*sizeof(float));
cudaMalloc((void**)&dm_hdiv, k*c*sizeof(float));
cudaMalloc((void**)&dm_aht, r*k*sizeof(float));
cudaMalloc((void**)&dm_wh, r*c*sizeof(float));
cudaMalloc((void**)&dm_whht,r*k*sizeof(float));
cudaMalloc((void**)&dm_wdiv,r*k*sizeof(float));
int total = 4;
dim3 dimGrid(1,1);
dim3 dimBlock(total, 1, 1);
cudaMemcpy(dm, a, msize, cudaMemcpyHostToDevice);
kernel<<<dimGrid, dimBlock>>>(dm, r, c, k, niters , dm_w, dm_h,
dm_wt, dm_ht, dm_wta, dm_wtw, dm_wtwh, dm_hdiv, dm_aht,
dm_wh, dm_whht, dm_wdiv, total);
cudaMemcpy(w, dm_w, msize_w, cudaMemcpyDeviceToHost);
cudaMemcpy(h, dm_h, msize_h, cudaMemcpyDeviceToHost);
cudaFree(dm);
cudaFree(dm_h);
cudaFree(dm_w);
cudaFree(dm_wt);
cudaFree(dm_ht);
cudaFree(dm_wta);
cudaFree(dm_wtw);
cudaFree(dm_wtwh);
cudaFree(dm_hdiv);
cudaFree(dm_aht);
cudaFree(dm_wh);
cudaFree(dm_whht);
cudaFree(dm_wdiv);
}
__global__ void kernel(float *a, int r, int c, int k, int niters, float *w, float *h,
float* wt, float* ht, float* wta, float* wtw, float* wtwh, float* hdiv, float* aht,
float* wh, float* whht, float* wdiv, int total){
// calculate h
// nominator
// printf("NMF: 1\n");
for (int i=0;i<niters;i++){
matrix_trans(wt, w, r, k, total);
matrix_multi(wta, wt, a, k, r, r, c, total);
// denominator
// printf("NMF: 2\n");
matrix_multi(wtw, wt, w, k, r, r, k, total);
matrix_multi(wtwh, wtw, h, k, k, k, c, total);
// new h
// printf("NMF: 3\n");
matrix_elemdivison(hdiv, wta, wtwh, k, c, total);
matrix_elemproduct(h, h, hdiv, k, c, total);
// calcualte w
// nominator
// printf("NMF: 4\n");
matrix_trans(ht, h, k, c, total);
matrix_multi(aht, a, ht, r, c, c, k, total);
// calculate w
// printf("NMF: 5\n");
matrix_multi(wh, w, h, r, k, k, c, total);
matrix_multi(whht, wh, ht, r, c, c, k, total);
matrix_elemdivison(wdiv, aht, whht, r, k, total);
// new w
// printf("NMF: 6\n");
matrix_elemproduct(w, w, wdiv, r, k, total);
}
}
__device__ void matrix_multi(float *out, float *a, float *b, int r1, int c1, int r2, int c2, int total){
if (c1!=r2){
return;
}
// int total = omp_get_thread_num();
// int total = omp_get_num_threads();
int nth = threadIdx.x;
int division = r1/total;
for (int row = (division*nth); row < (nth==(total-1) ? r1 : (division*nth+division)); row++) {
for (int col = 0; col < c2; col++) {
out[row*c2+col] = 0;
for (int inner = 0; inner < c1; inner++) {
out[row*c2+col] += a[row*c1+inner] * b[inner*c2+col]*1.0;
// printf("%d %d :%.2f += %.2f\n",
// row, col, out[row*c2+col]-a[row*c1+inner] * b[inner*c2+col]*1.0, a[row*c1+inner] * b[inner*c2+col]*1.0);
}
}
}
}
__device__ void matrix_trans(float *out, float *a, int r, int c, int total){
// This trans function can be boosted up by multi thread
// int total = omp_get_thread_num();
// int total = omp_get_num_threads();
int nth = threadIdx.x;
int division = c/total;
for (int j=(division*nth);j<(nth==(total-1) ? c : (division*nth+division));j++){
for (int i=0;i<r;i++){
out[j*r+i]=a[i*c+j];
}
}
}
void matrix_print(float *a, int r, int c){
for(int i=0;i<r;i++){
for (int j=0;j<c;j++){
printf("%.2f\t", a[i*c+j]);
}
printf("\n");
}
}
__device__ float matrix_findmax(float *a, int r, int c, int total){
// this function can be boosted by find max in row/col and compare them
if (r==0 && c==0){
return 0;
}
float max = a[0];
for (int i = 0;i<r*c;i++){
if (a[i]>max){
max=a[i];
}
}
return max;
}
void nmf_seed(float *out, float *a, int r, int c, int k){
// setup generator
srand(time(NULL));
for (int i=0;i<r*k;i++){
out[i] = (float)(MATRIX_MAX*((rand()%100/100.0)));
}
}
__device__ float matrix_distance(float *a, float *b, int r, int c, int total){
float distance=0;
for (int i=0;i<r*c;i++){
distance += abs(a[i]-b[i])*abs(a[i]-b[i]);
}
return distance;
}
__device__ void matrix_elemproduct(float* out, float *a, float *b, int r, int c, int total){
// int total = omp_get_thread_num();
// int total = omp_get_num_threads();
int nth = threadIdx.x;
int division = r*c/total;
// printf("Its %d/%d\n", total, total);
for (int i=(division*nth);i<(nth==(total-1) ? r*c : (division*nth+division));i++){
out[i]=1.0*a[i]*b[i];
}
}
__device__ void matrix_elemdivison(float* out, float *a, float *b, int r ,int c, int total){
// int total = omp_get_thread_num();
// int total = omp_get_num_threads();
int nth = threadIdx.x;
int division = r*c/total;
for (int i=(division*nth);i<(nth==(total-1) ? r*c : (division*nth+division));i++){
if (b[i]==0){
return;
}
out[i]=1.0*a[i]/b[i];
// printf("Now is %d\n", i);
}
}
int main(int argc, char const *argv[])
{
// omp_set_num_threads(8);
/* code */
// float a[12]={2, 3, 4, 1, 2, 10, 11, 32,
// 3.1, 4.1, 31, 0.2};
// float b[12]={1, 2, 1, 3, 1, 4, 1, 5,
// 1, 6, 1, 7};
// matrix for nmf. rxc (row, col)
// int r = 300;
// int c = 200;
// int k = 150;
int r = 5;
int c = 4;
int k = 3;
float B[20]={1, 2, 3, 4,
5, 6, 7, 8,
9, 10, 11, 12,
13, 14, 15, 16,
17, 18, 19, 20};
// matrix generator
srand(time(NULL));
float A[r*c];
for (int i=0;i<r;i++){
for (int j=0;j<c;j++){
A[i*c+j]=(float)(1.0*MATRIX_MAX*((float)(rand()%100/100.0)));
}
}
// nmf using originall
float w[r*k];
float h[k*c];
// double time_begin=get_wall_time();
for (int i=0;i<1;i++){
nmfgpu(A, r, c, k, 500, w, h);
printf("It's run: \t%d\n", i);
}
// double time_end=get_wall_time();
// printf("Matrix w is: \n");
// matrix_print(w, 5, 3);
// printf("Matrix h is: \n");
// matrix_print(h, 3, 4);
printf("Matrix A is: \n");
matrix_print(A, r, c);
surface_matrix_multi(A, w, h, r, k, k, c);
printf("Matrix w*h is: \n");
matrix_print(A, r, c);
return 0;
} |
2,516 | //pass
//--gridDim=[4096,1,1] --blockDim=[256,1,1]
__global__ void vectorAddGPU(float *a, float *b, float *c, int N)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < N)
{
c[idx] = a[idx] + b[idx];
}
}
|
2,517 | #include "cuda_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cstdio>
#include <chrono>
typedef std::chrono::high_resolution_clock Clock;
#define ITER 65535
// CPU version of the vector add function
void vector_add_cpu(int *a, int *b, int *c, int n) {
int i;
// Add the vector elements a and b to the vector c
for (i = 0; i < n; ++i) {
c[i] = a[i] + b[i];
}
}
// GPU version of the vector add function via "__global__" prefix.
// These kind of functions are called kernels in CUDA. When called, they are executed in parallel by N different threads
// as opposed to only once in a regular c++ function
__global__ void vector_add_gpu(int *gpu_a, int *gpu_b, int *gpu_c) {
int i = threadIdx.x;
// No for loop needed because the CUDA runtime
// will thread this ITER times
gpu_c[i] = gpu_a[i] + gpu_b[i];
}
int main() {
int *a, *b, *c;
int *gpu_a, *gpu_b, *gpu_c;
a = (int *)malloc(ITER * sizeof(int));
b = (int *)malloc(ITER * sizeof(int));
c = (int *)malloc(ITER * sizeof(int));
// We need variables accessible to the GPU,
// so cudaMallocManaged provides these
cudaMallocManaged(&gpu_a, ITER * sizeof(int));
cudaMallocManaged(&gpu_b, ITER * sizeof(int));
cudaMallocManaged(&gpu_c, ITER * sizeof(int));
for (int i = 0; i < ITER; ++i) {
a[i] = i;
b[i] = i;
c[i] = i;
}
// Call the CPU function and time it
auto cpu_start = Clock::now();
vector_add_cpu(a, b, c, ITER);
auto cpu_end = Clock::now();
std::cout << "vector_add_cpu: "
<< std::chrono::duration_cast<std::chrono::nanoseconds>(cpu_end - cpu_start).count()
<< " nanoseconds.\n";
// Call the GPU function and time it
// The triple angle brakets is a CUDA runtime extension that allows
// parameters of a CUDA kernel call to be passed.
// In this example, we are passing one thread block with ITER GPU threads.
auto gpu_start = Clock::now();
vector_add_gpu <<<1, ITER>>> (gpu_a, gpu_b, gpu_c);
cudaDeviceSynchronize();
auto gpu_end = Clock::now();
std::cout << "vector_add_gpu: "
<< std::chrono::duration_cast<std::chrono::nanoseconds>(gpu_end - gpu_start).count()
<< " nanoseconds.\n";
// Free the GPU-function based memory allocations
cudaFree(gpu_a);
cudaFree(gpu_b);
cudaFree(gpu_c);
// Free the CPU-function based memory allocations
free(a);
free(b);
free(c);
return 0;
}
|
2,518 |
#include <stdio.h> //standard library
#include "/usr/include/linux/cuda.h" //cuda library
#include <stdlib.h>
#include <unistd.h>
__global__ void kernel(int* A)
{
A[0]++;
}
//~ __global__ void vecAdd(int* A, int* B)
//~ {
//~
//~
//~ }
//~ __global__ void passTheTorch(int* A)
//~ {
//~
//~ int i = threadIdx.x; //this is used as a thread identifier
//~
//~ }
/*more things need to be done right here
* you have to cast malloc in order to pass some stuff
* ridiculous stuff going on
*
*
*
*
*
*
* */
int main (void)
{
int* hostMem = (int*)malloc(sizeof(int) * 1); //allocate one element of host memory
int* cudaMem; //allocate one element of device memory
cudaMalloc(&cudaMem, sizeof(int) * 1); //this will allocate memory inside of the cuda device
hostMem[0] = 0; //just assign the host memory value to zero
cudaMemcpy(cudaMem,hostMem,sizeof(int) * 1,cudaMemcpyHostToDevice); //copy memory from host to the device
for(int i = 0; i < 1000; i++) //iterate 1000 times
{
printf("Value %d\n",hostMem[0]);
kernel<<<1,1>>>(cudaMem); //call the cuda kernel function
cudaMemcpy(hostMem,cudaMem,sizeof(int) * 1,cudaMemcpyDeviceToHost); //copy memory from device to the host
printf("Sleeping for 2 \n"); //sleeping
sleep(2); //sleep for 5 seconds
printf("Value %d\n\n",hostMem[0]); //the new value
cudaMemcpy(cudaMem,hostMem,sizeof(int) * 1,cudaMemcpyHostToDevice); //copy memory from host to the device
}
free(hostMem); //free the host memory
cudaFree(cudaMem); //free device memory
return 0; //return 0
}
|
2,519 | #include <stdio.h>
#include <cuda_runtime.h>
void printValue(float *ip, const int n);
__global__ void modifyGlobalVariable(const int n);
#define CHECK(call) { \
const cudaError_t error = call; \
if (error != cudaSuccess) { \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
} \
#define N 32
__device__ float devData[N];
int main(int argc, char **argv) {
// initialize the global variable
float *value = (float *)malloc(N * sizeof(float));
for (int i = 0; i < N; i++) {
value[i] = 3.14f;
}
printValue(value, N);
CHECK(cudaMemcpyToSymbol(devData, value, N * sizeof(float)));
// invoke kernel
modifyGlobalVariable<<<1, N>>>(N);
CHECK(cudaDeviceSynchronize());
// copy back to host
CHECK(cudaMemcpyFromSymbol(value, devData, N * sizeof(float)));
printValue(value, N);
cudaDeviceReset();
return 0;
}
__global__ void modifyGlobalVariable(const int n) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
devData[idx] *= idx;
}
}
void printValue(float *ip, const int n) {
for (int i = 0; i < n; i ++) {
printf("%.2f, ", ip[i]);
}
printf("%c", '\n');
}
|
2,520 | #include <stdio.h>
#include <cuda.h>
#define NBIN 1000000
#define NUM_BLOCK 13
#define NUM_THREAD 192
int tid;
float pi = 0;
__global__ void cal_pi( float *sum, int nbin, float step, int nthreads, int nblocks ) {
int i;
float x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for( i = idx; i < nbin; i += nthreads * nblocks ) {
x = ( i - 0.5 ) * step;
sum[idx] += 4.0 / ( 1.0 + x * x );
}
}
int main( void ) {
dim3 dimGrid( NUM_BLOCK, 1, 1 );
dim3 dimBlock( NUM_THREAD, 1, 1 );
float *sumHost, *sumDev;
float step = 1.0 / NBIN;
size_t size = NUM_BLOCK * NUM_THREAD * sizeof( float );
sumHost = (float *) malloc( size );
cudaMalloc( (void **) &sumDev, size );
cudaMemset( sumDev, 0, size );
cal_pi <<< dimGrid, dimBlock >>> ( sumDev, NBIN, step, NUM_THREAD, NUM_BLOCK );
cudaMemcpy( sumHost, sumDev, size, cudaMemcpyDeviceToHost );
for( tid = 0; tid < NUM_THREAD * NUM_BLOCK; tid++ )
pi += sumHost[tid];
pi *= step;
printf( "PI = %f\n", pi );
free( sumHost );
cudaFree( sumDev );
return 0;
}
|
2,521 | #include "includes.h"
#define H 64
// Default values
int N = 10000; //Size
int T = 32; //BlockSize
int B = 4; //Blocks
// Host Variables
int* HostData;
int* HostHist;
int* HostTimer=NULL;
// Device Variables
int* DeviceData;
int* DeviceHist;
int* DeviceTimer=NULL;
// Timer Variables
struct timeval CPU_Time_start, CPU_Time_end;
struct timeval GPU_Time_start, GPU_Time_end;
struct timeval DeviceToHost_start, DeviceToHost_end;
struct timeval HostToDevice_start, HostToDevice_end;
struct timeval CPU_Partial_Time_start, CPU_Partial_Time_end;
struct timeval CPU_Cleanup_Time_start, CPU_Cleanup_Time_end;
struct timeval Total_Time_start, Total_Time_end;
// Function Declaration
void Cleanup(void);
void HistogramSequential(int* result, int* data, int size);
// Histogram kernel
__global__ void histogram_kernel(int* PartialHist, int* DeviceData, int dataCount,int* timer)
{
unsigned int tid = threadIdx.x;
unsigned int gid = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
clock_t start_clock=0;
clock_t stop_clock=0;
if(tid==0)
{
start_clock = clock();
}
__shared__ int BlockHist[H];
extern __shared__ int hist[];
for(int h = 0; h < H; h++)
{
hist[tid * H + h]=0;
}
BlockHist[tid] = 0;
BlockHist[tid + blockDim.x] = 0;
for(int pos = gid; pos < dataCount; pos += stride)
hist[tid * H + DeviceData[pos]]++;
for(int t_hist = 0; t_hist < blockDim.x; t_hist++)
{
BlockHist[tid] += hist[t_hist * H + tid];
BlockHist[tid+blockDim.x] += hist[(t_hist * H)+(tid + blockDim.x)];
}
PartialHist[tid+(blockIdx.x * H)] = BlockHist[tid];
PartialHist[tid+(blockIdx.x * H) + blockDim.x] = BlockHist[tid + blockDim.x];
if(tid==0)
{
stop_clock = clock();
timer[blockIdx.x * 2] = start_clock;
timer[blockIdx.x * 2 + 1] = stop_clock;
}
} |
2,522 | // ================================================================================================
// Tim Backus
// CIS 450 - High Performance Computing
// 3D Game of Life - CUDA Version
// ================================================================================================
#define GOL_IO_FILENAME "gol3DOutput.dat"
#define GOL_CUDA_THREADS_SIZE 8
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#include <time.h>
#include <stdlib.h>
// ------------------------------------------------------------------------------------------------
// CUDA kernel (Gather & Map) - Adds up the number of neighbors for a cell in a 3x3x3 cube and
// sets each cell to alive or dead depending on its number of neighbors and the rules for this
// current game.
// ------------------------------------------------------------------------------------------------
__global__
void lifeItrKernel(const char* const d_in, char* d_out, const unsigned int xsize,
const unsigned int ysize, const unsigned int zsize, const unsigned int alow,
const unsigned int ahigh) {
extern __shared__ char shMem[];
// Calculate block and thread IDs
const int threadPosX = blockIdx.x * blockDim.x + threadIdx.x;
const int threadPosY = blockIdx.y * blockDim.y + threadIdx.y;
const int threadPosZ = blockIdx.z * blockDim.z + threadIdx.z;
const unsigned int stepX = ysize * zsize;
const unsigned int arrayPos = threadPosX * stepX + threadPosY * zsize + threadPosZ;
const unsigned int threadID = threadIdx.x * blockDim.y * blockDim.z +
threadIdx.y * blockDim.z + threadIdx.z;
// Ensure thread bounds
if(threadPosX >= xsize) return;
if(threadPosY >= ysize) return;
if(threadPosZ >= zsize) return;
// Copy global into shared memory
shMem[threadID] = d_in[arrayPos];
__syncthreads();
// Begin adding neighbors
char sum = 0;
// X-Axis neighbors
int xc, xcoord;
for(xc = threadPosX - 1; xc <= threadPosX + 1; xc++) {
// Wrap X-Axis
xcoord = xc;
if(xc < 0) xcoord = xsize;
else if(xc >= xsize) xcoord = 0;
// Y-Axis neighbors
int yc, ycoord;
for(yc = threadPosY - 1; yc <= threadPosY + 1; yc++) {
// Wrap Y-Axis
ycoord = yc;
if(yc < 0) ycoord = ysize;
else if(yc >= ysize) ycoord = 0;
// Z-Axis neighbors
int zc, zcoord;
for(zc = threadPosZ - 1; zc <= threadPosZ + 1; zc++) {
// Wrap Z-Axis
zcoord = zc;
if(zc < 0) zcoord = zsize;
else if(zc >= zsize) zcoord = 0;
// Don't count the cell itself
if(threadPosX != xcoord || threadPosY != ycoord || threadPosZ != zcoord) {
// Use shared memory instead of global memory if the current coord is in the thread block
if((xcoord >= blockDim.x * blockIdx.x && xcoord < (blockDim.x) * blockIdx.x + 1) &&
(ycoord >= blockDim.y * blockIdx.y && ycoord < (blockDim.y) * blockIdx.y + 1) &&
(zcoord >= blockDim.z * blockIdx.z && zcoord < (blockDim.z) * blockIdx.z + 1)) {
sum += shMem[(xcoord % blockDim.x) * blockDim.y * blockDim.z + (ycoord % blockDim.y) *
blockDim.z + (zcoord % blockDim.z)];
} else {
sum += d_in[xcoord * stepX + ycoord * zsize + zcoord];
}
}
}
}
}
// Set the cell's dead or alive status based on its neighbor count
if (sum < alow || sum > ahigh) {
d_out[arrayPos] = 0;
} else if (sum >= alow && sum <= ahigh) {
d_out[arrayPos] = 1;
}
}
// ------------------------------------------------------------------------------------------------
// CUDA kernel (Gather) - Adds up the number of neighbors for a cell in a 3x3x3 cube.
// ------------------------------------------------------------------------------------------------
__global__
void sumNeighborsKernel(const char* const d_in, char* d_out, const unsigned int xsize,
const unsigned int ysize, const unsigned int zsize) {
// Calculate block and thread IDs
const int threadPosX = blockIdx.x * blockDim.x + threadIdx.x;
const int threadPosY = blockIdx.y * blockDim.y + threadIdx.y;
const int threadPosZ = blockIdx.z * blockDim.z + threadIdx.z;
const unsigned int stepX = ysize * zsize;
const unsigned int arrayPos = threadPosX * stepX + threadPosY * zsize + threadPosZ;
// printf("TID=%d,%d,%d\n", threadIdx.x, threadIdx.y, threadIdx.z);
// printf("TPOS=%d,%d,%d\n", threadPosX, threadPosY, threadPosZ);
// printf("APOS=%d\n", arrayPos);
// Ensure thread bounds
if(threadPosX > xsize - 1) return;
if(threadPosY > ysize - 1) return;
if(threadPosZ > zsize - 1) return;
char sum = 0;
// X-Axis neighbors
int xc, xcoord;
for(xc = threadPosX - 1; xc <= threadPosX + 1; xc++) {
// Wrap X-Axis
xcoord = xc;
if(xc < 0) xcoord = xsize;
else if(xc >= xsize) xcoord = 0;
// Y-Axis neighbors
int yc, ycoord;
for(yc = threadPosY - 1; yc <= threadPosY + 1; yc++) {
// Wrap Y-Axis
ycoord = yc;
if(yc < 0) ycoord = ysize;
else if(yc >= ysize) ycoord = 0;
// Z-Axis neighbors
int zc, zcoord;
for(zc = threadPosZ - 1; zc <= threadPosZ + 1; zc++) {
// Wrap Z-Axis
zcoord = zc;
if(zc < 0) zcoord = zsize;
else if(zc >= zsize) zcoord = 0;
// Don't count the cell itself
if(threadPosX != xcoord || threadPosY != ycoord || threadPosZ != zcoord) {
sum += d_in[xcoord * stepX + ycoord * zsize + zcoord];
}
}
}
}
d_out[arrayPos] = sum;
}
// ------------------------------------------------------------------------------------------------
// CUDA kernel (Map) - Sets each cell to alive or dead depending on its number of neighbors and
// the rules for this current game.
// ------------------------------------------------------------------------------------------------
__global__
void setAliveDeadKernel(const char* const d_nei, char* d_out, const unsigned int xs,
const unsigned int ys, const unsigned int zs, const unsigned int alow,
const unsigned int ahigh) {
// Calculate block and thread IDs
const int threadPosX = blockIdx.x * blockDim.x + threadIdx.x;
const int threadPosY = blockIdx.y * blockDim.y + threadIdx.y;
const int threadPosZ = blockIdx.z * blockDim.z + threadIdx.z;
const int stepX = ys * zs;
const int arrayPos = threadPosX * stepX + threadPosY * zs + threadPosZ;
// Ensure thread bounds
if(threadPosX > xs - 1) return;
if(threadPosY > ys - 1) return;
if(threadPosZ > zs - 1) return;
// Set the cell alive or dead as according to the rules
if (d_nei[arrayPos] < alow || d_nei[arrayPos] > ahigh) {
d_out[arrayPos] = 0;
} else if (d_nei[arrayPos] >= alow && d_nei[arrayPos] <= ahigh) {
d_out[arrayPos] = 1;
}
}
// ------------------------------------------------------------------------------------------------
// Returns the 1D position of a simulated 3D array
// ------------------------------------------------------------------------------------------------
int getArrIndex(const unsigned int xp, const unsigned int yp, const unsigned int zp,
const unsigned int ys, const unsigned int zs) {
return xp * ys * zs + yp * zs + zp;
}
// ------------------------------------------------------------------------------------------------
// Prints a 3D array.
// ------------------------------------------------------------------------------------------------
void print3DArray(char* arr, unsigned const int x, unsigned const int y, unsigned const int z) {
int i;
for(i = 0; i < x; ++i) {
printf("Dimension %d:\n", i);
int j;
for(j = 0; j < y; ++j) {
int k;
for(k = 0; k < z; ++k) {
printf("%d ", (char)arr[getArrIndex(i, j, k, y, z)]);
}
printf("\n");
}
printf("\n");
}
}
// ------------------------------------------------------------------------------------------------
// Writes cells to alive or dead, randomly.
// ------------------------------------------------------------------------------------------------
void randomizeGrid(char* grid, unsigned const int size, unsigned const int chance) {
srand(time(NULL));
int i;
for(i = 0; i < size; i++) {
grid[i] = (char)((rand() % 100 <= chance) ? 1 : 0);
}
}
// ------------------------------------------------------------------------------------------------
// Initializes the game data file.
// Line 1: <iteration count> <x-size> <y-size> <z-size>
// Line 2: Blank
// ------------------------------------------------------------------------------------------------
void initGameFile(const unsigned int itrs, const unsigned int x, const unsigned int y,
const unsigned int z) {
FILE *fp;
fp = fopen(GOL_IO_FILENAME, "w+");
fprintf(fp, "%d %d %d %d\n\n", itrs, x, y, z);
fclose(fp);
}
// ------------------------------------------------------------------------------------------------
// Writes a game to a file for visualization within Java.
// For every iteration, a block of text is created of the format:
// "<x-coord>:<z-coords for y=0>, <z-coords for y=1>, ..."
// Z-coords are represented by a 0 or 1 for each z-coordinate
// Example: Game with 5 iterations, x=3, y=7, z=4
// 5 3 7 4
//
// 0:0000,0000,0000,0000,0000,0000,0000
// 1:0000,0000,0000,0100,0000,0000,0001
// 2:0000,0000,0010,0100,0001,0011,0000
// 0:0000,0000,0000,0000,0000,0000,0000
// 1:0000,0000,0000,0100,0000,0000,0001
// 2:0000,0000,0010,0100,0001,0011,0000
// 0:0000,0000,0000,0000,0000,0000,0000
// 1:0000,0000,0000,0100,0000,0000,0001
// 2:0000,0000,0010,0100,0001,0011,0000
// 0:0000,0000,0000,0000,0000,0000,0000
// 1:0000,0000,0000,0100,0000,0000,0001
// 2:0000,0000,0010,0100,0001,0011,0000
// 0:0000,0000,0000,0000,0000,0000,0000
// 1:0000,0000,0000,0100,0000,0000,0001
// 2:0000,0000,0010,0100,0001,0011,0000
//
// ------------------------------------------------------------------------------------------------
void writeGameStep(char* arr, unsigned const int x, unsigned const int y, unsigned const int z) {
FILE *fp;
fp = fopen(GOL_IO_FILENAME, "a");
int i;
for(i = 0; i < x; i++) {
fprintf(fp, "%d:", i);
int j;
for(j = 0; j < y; j++) {
if(j > 0) {
fprintf(fp, ",");
}
// Print Z-Dim values
int k;
for(k = 0; k < z; k++) {
fprintf(fp, "%d", arr[getArrIndex(i, j, k, y, z)]);
}
}
fprintf(fp, "\n");
}
fclose(fp);
}
// ------------------------------------------------------------------------------------------------
// Runs the Game of Life.
// ------------------------------------------------------------------------------------------------
void runLife(const unsigned int iterations, unsigned int xsize, const unsigned int ysize,
const unsigned int zsize, const unsigned int initc, const unsigned int alow,
const unsigned int ahigh, const unsigned int printArr, const unsigned int writeOut) {
// Memory values
const unsigned int arrSize = xsize * ysize * zsize;
const unsigned int arrMem = arrSize * sizeof(char);
// GPU grid dimensions
const int gx = ceil((double) xsize / GOL_CUDA_THREADS_SIZE);
const int gy = ceil((double) ysize / GOL_CUDA_THREADS_SIZE);
const int gz = ceil((double) zsize / GOL_CUDA_THREADS_SIZE);
printf("Grid dimension: %d,%d,%d\n", gx, gy, gz);
dim3 gridDim(gx, gy, gz);
// GPU thread dimensions
const int tx = GOL_CUDA_THREADS_SIZE;
const int ty = GOL_CUDA_THREADS_SIZE;
const int tz = GOL_CUDA_THREADS_SIZE;
printf("Block dimension: %d,%d,%d\n", tx, ty, tz);
dim3 blockDim(tx, ty, tz);
// Initialize game space
char *h_in = (char *) malloc(arrMem);
printf("Randomizing initial game (could take a while)...\n");
randomizeGrid(h_in, arrSize, initc);
// Print the initial array if enabled
if(printArr) {
printf("Initial grid:\n");
print3DArray(h_in, xsize, ysize, zsize);
}
// Initialize the output file if enabled
if(writeOut) {
initGameFile(iterations, xsize, ysize, zsize);
writeGameStep(h_in, xsize, ysize, zsize);
}
// Pointers for GPU game data
char *d_in;
char *d_out;
// Allocate input array on GPU
printf("Allocating %d bytes of memory on the GPU...\n",
(int)(xsize * ysize * zsize * sizeof(char)));
cudaMalloc(&d_in, arrMem);
// Allocate output array on GPU
cudaMalloc(&d_out, arrMem);
// Copy the host data to the GPU
cudaMemcpy(d_in, h_in, arrMem, cudaMemcpyHostToDevice);
// Do Game of Life iterations
int itrNum;
for(itrNum = 0; itrNum < iterations; itrNum++) {
printf("Iteration %d ", itrNum);
// Run the kernel to simulate an iteration of 3D life
clock_t start = clock();
lifeItrKernel<<<gridDim, blockDim, (tx * ty * tz * sizeof(char))>>>(d_in, d_out, xsize, ysize, zsize, alow, ahigh);
cudaError_t cerr = cudaDeviceSynchronize();
if(cerr != cudaSuccess) {
printf("Kernel lifeItr failed with error \"%s\".\n", cudaGetErrorString(cerr));
}
clock_t end = clock();
// Copy the memory back to the input array for the next iteration
cudaMemcpy(d_in, d_out, arrMem, cudaMemcpyDeviceToDevice);
printf("took %d ticks.\n", (end - start));
// Print and write out if enabled
if(printArr || writeOut) {
cudaMemcpy(h_in, d_out, arrMem, cudaMemcpyDeviceToHost);
if(printArr) {
print3DArray(h_in, xsize, ysize, zsize);
}
if(writeOut) {
writeGameStep(h_in, xsize, ysize, zsize);
}
}
}
// Free memory
cudaFree(d_in);
cudaFree(d_out);
free(h_in);
}
// ------------------------------------------------------------------------------------------------
// Prints the usage message if a bad number of runtime arguments are passed.
// ------------------------------------------------------------------------------------------------
void printUsage() {
printf("Arguments (separated by spaces):\n");
printf(" MAX_ITERATIONS\n SIZE_X\n SIZE_Y\n SIZE_Z\n INITIAL_ALIVE_CHANCE (1-100)\n");
printf(" ALIVE_THRESHOLD_LOW (inclusive)\n ALIVE_THRESHOLD_HIGH (inclusive)\n");
printf(" PRINT_ARRAY? (0=no, 1=yes)\n WRITE_TO_FILE? (0=no, 1=yes)\n");
}
// ------------------------------------------------------------------------------------------------
// Main Method
// ------------------------------------------------------------------------------------------------
int main(int argc, char *argv[]) {
// Ensure proper runtime argument count
if(argc != 10) {
printUsage();
return EXIT_SUCCESS;
}
// Parse iteration count
unsigned const int iterations = atoi(argv[1]);
// Parse X-Size
unsigned const int sizeX = atoi(argv[2]);
// Parse Y-Size
unsigned const int sizeY = atoi(argv[3]);
// Parse Z-Size
unsigned const int sizeZ = atoi(argv[4]);
// Parse initial alive chance
unsigned const int initChance = atoi(argv[5]);
// Parse alive low threshold (inclusive)
unsigned const int aliveLow = atoi(argv[6]);
// Parse alive high threshold (inclusive)
unsigned const int aliveHigh = atoi(argv[7]);
// Parse whether or not to print the array
unsigned const int printArray = atoi(argv[8]);
// Parse whether or not to output to disk
unsigned const int writeOut = atoi(argv[9]);
// Print game information to the console
printf("Starting %d iteration Game of Life (CUDA) with sizes x=%d, y=%d, z=%d\n", iterations,
sizeX, sizeY, sizeZ);
printf(" initial alive chance=%d, neighbors for alive=%d to %d\n", initChance,
aliveLow, aliveHigh);
if(writeOut) {
printf(" File output enabled.\n");
}
runLife(iterations, sizeX, sizeY, sizeZ, initChance, aliveLow, aliveHigh, printArray, writeOut);
return EXIT_SUCCESS;
} |
2,523 | #include "includes.h"
__global__ void addKernel(int * dev_a, int* dev_b ,int* dev_size)
{
int i = threadIdx.x;
int j,p;
for (j = 0; j < (*dev_size); j++)
{
p = *dev_size*i + j;
dev_b[i] += dev_a[p];
//printf("%d %d\n", i, p);
}
} |
2,524 | #include <iostream>
#include <math.h>
#include <stdio.h>
#include <algorithm>
#include <stdlib.h>
#include <time.h>
using namespace std;
__device__
int xgcd(int a, int b, int *x, int *y)
{
// Base Case
if (a == 0)
{
*x = 0, *y = 1;
return b;
}
int x1, y1; // To store results of recursive call
int gcd = xgcd(b%a, a, &x1, &y1);
// Update x and y using results of recursive
// call
*x = y1 - (b/a) * x1;
*y = x1;
return gcd;
}
__global__
void modinv(int *x, int *y, int n)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
{
int a, b;
int g = xgcd(x[i], y[i], &a, &b);
if (g != 1) return;
else (a%y[i] + y[i]) % y[i];
}
}
int main(void)
{
int N = 1<<22;
int *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(int));
cudaMallocManaged(&y, N*sizeof(int));
srand((unsigned)time(NULL));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = rand()%10+1;
y[i] = 11;
}
// Run kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
modinv<<<numBlocks, blockSize>>>(x, y, N);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
2,525 | #include <iostream>
#include <math.h>
#include <unistd.h>
#include <memory>
#include <array>
#include <algorithm>
#include <vector>
const std::size_t NUMBER_STREAMS = 80;
const std::size_t N = 1 << 20;
struct test_struct
{
cudaStream_t stream;
std::unique_ptr<std::array<float, N>> h_a;
std::unique_ptr<std::array<float, N>> h_b;
std::unique_ptr<std::array<float, N>> h_c;
float* d_a;
float* d_b;
float* d_c;
std::size_t error_count;
};
typedef struct test_struct test_struct_t;
int make_test_struct(test_struct_t& t)
{
t.h_a.reset(new std::array<float, N>);
t.h_b.reset(new std::array<float, N>);
t.h_c.reset(new std::array<float, N>);
if (nullptr == t.h_a)
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (nullptr == t.h_a)
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (nullptr == t.h_a)
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (cudaSuccess != cudaStreamCreate(&t.stream))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (cudaSuccess != cudaMallocHost(&t.d_a, N * sizeof(t.d_a[0])))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (cudaSuccess != cudaMallocHost(&t.d_b, N * sizeof(t.d_b[0])))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (cudaSuccess != cudaMallocHost(&t.d_c, N * sizeof(t.d_c[0])))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
std::fill(t.h_a->begin(), t.h_a->end(), 1.0);
std::fill(t.h_b->begin(), t.h_b->end(), 2.0);
t.error_count = 0;
return 0;
}
int destroy_test_struct(test_struct_t& t)
{
if (cudaSuccess != cudaFreeHost(t.d_a))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (cudaSuccess != cudaFreeHost(t.d_b))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (cudaSuccess != cudaFreeHost(t.d_c))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (cudaSuccess != cudaStreamDestroy(t.stream))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
return 0;
}
__global__
void vec_add(float* const c, const float* const a, const float* const b, const std::size_t n)
{
for (std::size_t i = threadIdx.x + (blockIdx.x * blockDim.x); i < n; i += (blockDim.x * gridDim.x))
{
c[i] = a[i] + b[i] + 1;
}
}
int main(void)
{
const dim3 grid_size (1, 1, 1);
const dim3 block_size (1024, 1, 1);
std::array<test_struct_t, NUMBER_STREAMS> streams;
std::for_each(streams.begin(), streams.end(), make_test_struct);
for (test_struct_t& t: streams)
{
if (cudaSuccess != cudaMemcpyAsync(t.d_a, t.h_a->data(), N * sizeof(t.d_a[0]), cudaMemcpyHostToDevice, t.stream))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (cudaSuccess != cudaMemcpyAsync(t.d_b, t.h_b->data(), N * sizeof(t.d_b[0]), cudaMemcpyHostToDevice, t.stream))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
vec_add<<<grid_size, block_size, 0, t.stream>>>(t.d_c, t.d_a, t.d_b, N);
if (cudaSuccess != cudaMemcpyAsync(t.h_c->data(), t.d_c, N * sizeof(t.d_c[0]), cudaMemcpyDeviceToHost, t.stream))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
}
for (test_struct_t& t: streams)
{
if (cudaSuccess != cudaStreamSynchronize(t.stream))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
}
for (test_struct_t& t: streams)
{
for (std::size_t i = 0; i < N; ++ i)
{
if (4 != t.h_c->at(i))
{
++ t.error_count;
}
}
if (0 != t.error_count)
{
std::cout << t.error_count << std::endl << std::flush;
}
}
std::for_each(streams.begin(), streams.end(), destroy_test_struct);
return 0;
}
|
2,526 | #include<stdio.h>
#include<cuda.h>
/*__global__ void device_func(int *a)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
a[tid]+=1;
//int tmp;
for(int i=0;i<6;i++)
{
if(i%2==0)
{
even(a)
/*tmp=a[threadIdx.x*2];
a[threadIdx.x]=a[threadIdx.x*2+1];
a[threadIdx.x*2+1]=tmp;
}
else{
odd(a)
/*tmp=a[threadIdx.x*2+1];
a[threadIdx.x+1]=a[threadIdx.x*2+2];
a[threadIdx.x*2+2]=tmp;
}}*/
__global__ void even(int *a)
{
if(a[threadIdx.x*2]>a[threadIdx.x*2+1]){
int
tmp=a[threadIdx.x*2];
a[threadIdx.x*2]=a[threadIdx.x*2+1];
a[threadIdx.x*2+1]=tmp;}
__syncthreads();
}
__global__ void odd(int *a)
{
if(a[threadIdx.x*2+1]>a[threadIdx.x*2+2])
{int tmp;
tmp=a[threadIdx.x*2+1];
a[threadIdx.x*2+1]=a[threadIdx.x*2+2];
a[threadIdx.x*2+2]=tmp;
__syncthreads();
}}
int main()
{
int a[]={6,7,5,2,3,1};
int *devA;
int c[6];
printf("hello");
//c=(int *)malloc(sizeof(int)*6);
cudaMalloc((void **)&devA,sizeof(int)*6);
cudaMemcpy(devA,a,sizeof(int)*6,cudaMemcpyHostToDevice);
for(int i=0;i<6;i++)
{
if(i%2==0)
{
even<<<1,3>>>(devA);
}
else{
odd<<<1,3>>>(devA);
}
}
//device_func<<<1,3>>>(devA);
cudaMemcpy(c,devA,sizeof(int)*6,cudaMemcpyDeviceToHost);
printf("After sort--------\n");
for(int j=0;j<6;j++)
{
printf("%d ",c[j]);
}
cudaFree(devA);
return 0;
}
|
2,527 | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
// number of threads per block
//const int numThreadsPerBlock = 256;
const int numThreadsPerBlock = 1024;
//// device to use in case there is more than one
//static int selectedDevice = 0;
__global__ void kern_Dvxv(const int n, double* v1, double* v2, double* v3)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < n)
{
v3[i] = v1[i] * v2[i];
i += blockDim.x * gridDim.x;
}
}
extern "C" {
//element-wise vector multiplication double precision
void cuda_Dvxv(const int* n, double* v1, double* v2, double* v3)
{
int numBlocks = (*n+(numThreadsPerBlock-1)) / numThreadsPerBlock;
kern_Dvxv<<<numBlocks,numThreadsPerBlock>>>(*n,v1,v2,v3);
return;
}
}
|
2,528 | #include <iostream>
#include <chrono>
const unsigned max_block_size = 1024;
__global__ void kernel_all_threads_malloc(void** ptr /*For side effect*/,
unsigned long long* total_malloc_clock,
unsigned long long* total_free_clock
) {
const auto tid = threadIdx.x;
void* p;
const auto t0 = clock64();
p = malloc(sizeof(float));
const auto t1 = clock64();
ptr[tid] = p;
const auto t2 = clock64();
free(p);
const auto t3 = clock64();
atomicAdd(total_malloc_clock, t1 - t0);
atomicAdd(total_free_clock, t3 - t2);
}
__global__ void kernel_one_thread_malloc(void** ptr /*For side effect*/,
unsigned long long* total_malloc_clock,
unsigned long long* total_free_clock
) {
const auto tid = threadIdx.x;
void* p;
if (tid == 0) {
const auto t0 = clock64();
p = malloc(sizeof(float) * blockDim.x);
const auto t1 = clock64();
ptr[tid] = p;
const auto t2 = clock64();
free(p);
const auto t3 = clock64();
atomicAdd(total_malloc_clock, t1 - t0);
atomicAdd(total_free_clock, t3 - t2);
}
}
void in_block_test() {
std::printf("block_size,mode,malloc_clock,free_clock,kernel_time\n");
float **ptr;
cudaMalloc(&ptr, sizeof(float*) * max_block_size);
unsigned long long *total_malloc_clock, *total_free_clock;
cudaMallocHost(&total_malloc_clock, sizeof(unsigned long long));
cudaMallocHost(&total_free_clock, sizeof(unsigned long long));
for (unsigned b = 1; b <= max_block_size; b++) {
*total_malloc_clock = 0llu;
*total_free_clock = 0llu;
cudaDeviceSynchronize();
const auto t0 = std::chrono::system_clock::now();
kernel_all_threads_malloc<<<1, b>>>((void**)ptr, total_malloc_clock, total_free_clock);
const auto t1 = std::chrono::system_clock::now();
cudaDeviceSynchronize();
std::printf("%u,all,%e,%e,%lu\n", b,
static_cast<double>(*total_malloc_clock) / b,
static_cast<double>(*total_free_clock) / b,
std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - t0).count());
}
for (unsigned b = 1; b <= max_block_size; b++) {
*total_malloc_clock = 0llu;
*total_free_clock = 0llu;
cudaDeviceSynchronize();
const auto t0 = std::chrono::system_clock::now();
kernel_one_thread_malloc<<<1, b>>>((void**)ptr, total_malloc_clock, total_free_clock);
const auto t1 = std::chrono::system_clock::now();
cudaDeviceSynchronize();
std::printf("%u,one,%e,%e,%lu\n", b,
static_cast<double>(*total_malloc_clock) / b,
static_cast<double>(*total_free_clock) / b,
std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - t0).count());
}
cudaFreeHost(total_malloc_clock);
cudaFreeHost(total_free_clock);
cudaFree(ptr);
}
int main() {
in_block_test();
}
|
2,529 | #include <stdio.h>
#include <cuda_runtime.h>
__global__ void kernel() {
printf("%d, %d\n", threadIdx.x, blockIdx.x);
return;
}
int main() {
// main iteration
kernel <<<16, 4, 0>>>();
return 0;
} |
2,530 | #include <stdio.h>
#include <stdlib.h>
__global__ void exclusive_prefix_sum_gpu(int * oldSum, int * newSum, int distance, int numElements) {
int num_threads = blockDim.x * gridDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(distance == 0){
for(int i = tid; i < numElements ; i+= num_threads){
if(i == 0){
newSum[i] = 0;
}
else{
newSum[i] = oldSum[i-1];
}
}
}
else{
for(int i = tid; i < numElements ; i+= num_threads){
if(i-distance > 0){
newSum[i] = oldSum[i] + oldSum[i-distance];
}
else{
newSum[i] = oldSum[i];
}
}
}
}
|
2,531 | #include "bitmap.cuh"
void BmpHeader::setDim(int32_t w, int32_t h) {
width = w;
height = h;
sizeOfBitmapFile = HEADER_SIZE + w * h * 3; // Each pixel takes 3 bytes
}
void BmpHeader::setRes(double mmPerPixel) {
horizontalResolution = (int32_t)(1000/mmPerPixel);
verticalResolution = (int32_t)(1000/mmPerPixel);
}
|
2,532 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
// ---------------------------------
// BEGIN OF USER AREA
// Debug level, possible values: 0 - 5, 5 is highest
// Highest level will cause EXTREMELY detailed output (the whole array will be printed)
__constant__ const int DEBUG_LEVEL = 4;
// Array size for initialization, used only in inputArray functiont
__constant__ const int G_ARRAY_SIZE = 8192;
// Number of threads inside of block
__constant__ const int BLOCK_SIZE = 8;
int inputArray(int ** _arr) {
int arr_size = G_ARRAY_SIZE;
*_arr = new int[arr_size];
for (int i = 0; i < arr_size; i++) {
(*_arr)[i] = rand() % arr_size;
}
if (DEBUG_LEVEL >= 5) {
std::wcout << "Array: ";
for (int i = 0; i < arr_size; i++) {
std::wcout << (*_arr)[i] << ", ";
}
std::wcout << std::endl;
}
return arr_size;
}
void outputArray(int * _arr, int arr_size) {
if (DEBUG_LEVEL >= 5) {
std::wcout << "Array: ";
for (int i = 0; i < arr_size; i++) {
std::wcout << _arr[i] << ", ";
}
std::wcout << std::endl;
}
bool sorted = true;
for (int i = 1; i < arr_size; i++) {
if (_arr[i] < _arr[i - 1]) {
sorted = false;
break;
}
}
if (DEBUG_LEVEL >= 1) std::wcout << "Array sorting check, sorted: " << std::boolalpha << sorted << std::endl;
}
// END OF USER AREA
// ---------------------------------
// Number of blocks
__constant__ const int GRID_SIZE = G_ARRAY_SIZE / 2 / BLOCK_SIZE;
void pause() {
std::wcout << "Press enter to continue . . . " << std::endl;
std::cin.ignore();
}
bool inline cudaErrorOccured(cudaError_t _cudaStatus) {
if (_cudaStatus != cudaSuccess) {
std::wcout << std::endl << std::endl
<< "------------------------------"
<< "CUDA error: " << _cudaStatus << std::endl;
if (DEBUG_LEVEL >= 1) std::wcout << cudaGetErrorString(_cudaStatus) << std::endl;
std::wcout
<< "------------------------------"
<< std::endl << std::endl;
return true;
}
return false;
}
__device__ bool D_SORTED = false;
__device__ inline void swap(int * arr, int i, int j) {
int tmp = arr[i];
arr[i] = arr[j];
arr[j] = tmp;
}
__global__ void kernel(int * arr, int parity) {
//get own index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
//array for swapping
__shared__ int shared_arr[BLOCK_SIZE * 2];
//copying forth
int last_deduction = 0;
if (threadIdx.x == 0) {
if (parity == 1 && blockIdx.x == GRID_SIZE - 1) last_deduction = 1;
for (int i = 0; i < blockDim.x * 2 - last_deduction; i++) {
shared_arr[i] = arr[2 * idx + i + parity];
}
}
__syncthreads();
// Last kernel shouldn't work in this case
if (parity == 1 && idx == BLOCK_SIZE * GRID_SIZE - 1) return;
//swapping
if (shared_arr[threadIdx.x * 2] > shared_arr[threadIdx.x * 2 + 1]) {
swap(shared_arr, threadIdx.x * 2, threadIdx.x * 2 + 1);
D_SORTED = false;
}
__syncthreads();
//copying back
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.x * 2 - last_deduction; i++) {
arr[2 * idx + i + parity] = shared_arr[i];
}
}
}
void oddevensort(int * arr, int arr_size) {
bool sorted = false;
cudaError_t cudaStatus = cudaSuccess;
int counter = 0;
while (!sorted) {
sorted = true;
cudaStatus = cudaMemcpyToSymbol(D_SORTED, &sorted, sizeof(bool));
if (cudaErrorOccured(cudaStatus)) return;
kernel<<<GRID_SIZE, BLOCK_SIZE>>>(arr, 0);
kernel<<<GRID_SIZE, BLOCK_SIZE>>>(arr, 1);
cudaStatus = cudaMemcpyFromSymbol(&sorted, D_SORTED, sizeof(bool));
if (cudaErrorOccured(cudaStatus)) return;
counter++;
}
if (DEBUG_LEVEL >= 1) std::cout << "Sorting finished, iterations: " << counter << std::endl;
}
int main()
{
cudaError_t cudaStatus = cudaSuccess;
int arr_size = 0;
int * arr = 0;
int * d_arr = 0; //GPU copy of array
//0. CUDA device'
if (DEBUG_LEVEL >= 1)
{
std::wcout << "CUDA realization of odd-even sorting algorithm" << std::endl;
std::wcout << "Author: Roman Beltyukov" << std::endl << std::endl;
std::wcout << "CUDA information" << std::endl;
int deviceCount = 0;
cudaStatus = cudaGetDeviceCount(&deviceCount);
if (cudaErrorOccured(cudaStatus)) return 1;
std::wcout << "Available CUDA device count: " << deviceCount << std::endl << std::endl;
cudaDeviceProp devProps;
for (int i = 0; i < deviceCount; i++) {
cudaStatus = cudaGetDeviceProperties(&devProps, i);
if (cudaErrorOccured(cudaStatus)) return 1;
std::wcout
<< "Device #" << i << ", CUDA version: " << devProps.major << "." << devProps.minor
<< ", integrated: " << std::boolalpha << devProps.integrated << std::endl
<< "Name: " << devProps.name << std::endl
<< "Clockrate: " << (double)devProps.clockRate / 1024 << "MHz" << std::endl
<< "Total global memory: " << (double)devProps.totalGlobalMem / 1024 / 1024 / 1024 << "GB" << std::endl
<< "Shared memory per block: " << (double)devProps.sharedMemPerBlock / 1024 << "KB" << std::endl
<< "Warp size: " << devProps.warpSize << std::endl
<< "Max threads per block: " << devProps.maxThreadsPerBlock << std::endl
<< "Max threads dimension: ["
<< devProps.maxThreadsDim[0] << ", "
<< devProps.maxThreadsDim[1] << ", "
<< devProps.maxThreadsDim[2] << "]" << std::endl
<< "Max grid size: ["
<< devProps.maxGridSize[0] << ", "
<< devProps.maxGridSize[1] << ", "
<< devProps.maxGridSize[0] << "]" << std::endl
<< std::endl;
}
std::wcout << std::endl;
}
//1.
arr_size = inputArray(&arr);
if (DEBUG_LEVEL >= 1) std::wcout << "Array generated, size: " << arr_size << ", last element: " << arr[arr_size - 1] << std::endl;
//2.
cudaStatus = cudaMalloc((void **)&D_SORTED, sizeof(bool));
if (cudaErrorOccured(cudaStatus)) return 1;
cudaStatus = cudaMalloc((void **)&d_arr, arr_size * sizeof(int));
if (cudaErrorOccured(cudaStatus)) return 1;
cudaStatus = cudaMemcpy(d_arr, arr, arr_size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaErrorOccured(cudaStatus)) return 1;
if (DEBUG_LEVEL >= 1) std::wcout << "Memory allocation and copying host->device finished" << std::endl;
//3.
oddevensort(d_arr, arr_size);
cudaStatus = cudaGetLastError();
if (cudaErrorOccured(cudaStatus)) return 1;
cudaStatus = cudaDeviceSynchronize();
if (cudaErrorOccured(cudaStatus)) return 1;
//4.
cudaStatus = cudaMemcpy(arr, d_arr, arr_size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaErrorOccured(cudaStatus)) return 1;
cudaStatus = cudaFree(d_arr);
if (cudaErrorOccured(cudaStatus)) return 1;
cudaStatus = cudaDeviceReset();;
if (cudaErrorOccured(cudaStatus)) return 1;
if (DEBUG_LEVEL >= 1) std::wcout << "Copying device->host and memory releasing finished" << std::endl;
//5.
outputArray(arr, arr_size);
delete[] arr;
if (DEBUG_LEVEL >= 1) std::wcout << "Array output finished" << std::endl;
if (DEBUG_LEVEL >= 1) {
std::wcout << "Program finished" << std::endl;
}
if (DEBUG_LEVEL >= 2) pause();
return 0;
}
|
2,533 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
using namespace std;
struct MyStruct
{
int a;
int b;
int c;
};
int main(void)
{
int *ptr;
char *ptr1;
int **ptr2;
int(*ptr3)[3];
int *(*ptr4)[4];
int temp = sizeof(*ptr);
printf("result is :%d\n",temp);
int array[20];
int *ptr5 = array;
for (int i = 0; i < 20; i++)
{
array[i] = i;
}
for (int i = 0; i < 20; i++)
{
(*ptr5)++;
ptr5++;
}
for (int i = 0; i < 20; i++)
{
printf("number %d: value %d\n", i, array[i]);
}
int a1 = 12;
int b1;
int *p1;
int **ptr6;
p1 = &a1;
*p1 = 24;
ptr6 = &p1;
*ptr6 = &b1;
**ptr6 = 34;
printf("a1 is %d,b1 is %d,*p1 is %d,**ptr6 is %d\n", a1, b1, *p1, **ptr6);
char *str1[3] = {
"Hello,this is a sample!\n",
"Hi,good morning\n",
"Hello world\n"
};
char s[80];
strcpy(s, str1[0]);
printf(s);
cout << **str1 << endl;
strcpy(s, str1[1]);
printf(s);
strcpy(s, str1[2]);
printf(s);
MyStruct ss = { 20,30,40 };
MyStruct *ptr7 = &ss;
int *pstr = (int*)&ss;
cout << sizeof(ptr7) << " and " << sizeof(pstr) << endl;
cout << ptr7->a << " " << ptr7->b << " " << ptr7->c << endl;
cout << *pstr << " " << *(pstr + 1) << " " << *(pstr + 2) << endl;
int fun1(char*, int);
int(*pfun1)(char*, int);
pfun1 = fun1;
int res = (*pfun1)("abcdefg", 7);
}
int fun1(char* x1, int x2){
cout << x1 << " " << x2 << endl;
return 0;
}
|
2,534 | /*
Faz a soma dos elementos de dois vetores
Exemplifica o uso de memoria mapeada com cudaHostAlloc() usando
o parametro cudaHostAllocMapped para alocar memoria
tanto no host quanto no device. Copias entre host e device sao
implicitas, igual aa memoria unificada.
cudaDeviceSynchronize() antes da impressao do resultado se faz
necessaria, caso contrário o resultado deve sair errado.
Para compilar: nvcc 02-soma-vet-mapped.cu -o 02-soma-vet-mapped
Para executar: ./02-soma-vet-mapped
OBS: os valores de tamanho do vetor e o conteudo do vetor
estao fixos no codigo
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
__global__ void soma(int *vetorA, int *vetorB,int *vetorC,int tam)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < tam)
{
vetorC[idx]=vetorA[idx]+vetorB[idx];
}
}
int main(int argc,char **argv)
{
int i,*vetorA,*vetorB,*vetorC,threadsPerBlock,blocksPerGrid;
int tam = 16; //5000;
//Define a quantidade de threads por bloco
threadsPerBlock = 256;
//Aloca os vetores no host e no device (memória mapeada em endereço virtual unificado)
cudaHostAlloc((void**)&vetorA,tam*(sizeof(int)),cudaHostAllocMapped);
cudaHostAlloc((void**)&vetorB,tam*(sizeof(int)),cudaHostAllocMapped);
cudaHostAlloc((void**)&vetorC,tam*(sizeof(int)),cudaHostAllocMapped);
//Preenche os vetores no host
for(i=0;i<tam;i++)
{
vetorA[i]=i;
vetorB[i]=0; //-i;
}
//Define a quantidade de blocos por grade
blocksPerGrid=(tam+threadsPerBlock-1)/threadsPerBlock;
//Invoca o kernel com blocksPerGrid blocos e threadsPerBlock threads
soma <<<blocksPerGrid,threadsPerBlock>>> (vetorA,vetorB,vetorC,tam);
cudaDeviceSynchronize();
//Imprime o resultado no host
for(i=0;i<tam;i++)
{
printf("%d ",vetorC[i]);
}
printf("\n");
//Desaloca os vetores no host e no device
cudaFreeHost(vetorA);
cudaFreeHost(vetorB);
cudaFreeHost(vetorC);
} |
2,535 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define THREADS_PER_BLOCK 512
//function declarations
unsigned int getmax(unsigned int *, unsigned int);
__global__ void getmaxcu(unsigned int *num, unsigned int size);
int main(int argc, char *argv[])
{
unsigned int size = 0; // The size of the array
unsigned int i; // loop index
unsigned int * numbers; //pointer to the array
//size is number of threads total
size = atol(argv[1]);
//calculates number of blocks
unsigned int NUM_BLOCKS = (size + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK;
numbers = (unsigned int *)malloc(size * sizeof(unsigned int));
if( !numbers )
{
printf("Unable to allocate mem for an array of size %u\n", size);
exit(1);
}
srand(time(NULL)); // setting a seed for the random number generator
// Fill-up the array with random numbers from 0 to size-1
for( i = 0; i < size; i++){
numbers[i] = rand() % size;
}
//create device pointers
unsigned int *d_numbers;
//transfer array to device memory
cudaMalloc((void**) &d_numbers, size * sizeof(unsigned int));
cudaMemcpy(d_numbers, numbers, size * sizeof(unsigned int), cudaMemcpyHostToDevice);
//sequential
//printf(" The maximum number in the array is: %u\n", getmax(numbers, size));
//parallel kernel call
unsigned int sizea = size;
while(sizea > 1){
getmaxcu<<<NUM_BLOCKS, THREADS_PER_BLOCK>>>(d_numbers, sizea);
sizea = (sizea) / 10;
}
cudaMemcpy(numbers, d_numbers, size * sizeof(unsigned int), cudaMemcpyDeviceToHost);
printf("The max integer in the array is: %d\n", numbers[0]);
//free device matrices
cudaFree(d_numbers);
free(numbers);
exit(0);
}
__global__ void getmaxcu(unsigned int* num, unsigned int size){
unsigned int temp;
unsigned int index = threadIdx.x + (blockDim.x * blockIdx.x);
unsigned int nTotalThreads = size;
unsigned int i;
unsigned int tenPoint = nTotalThreads / 10; // divide by ten
if(index < tenPoint){
for(i = 1; i < 10; i++){
temp = num[index + tenPoint*i];
//compare to "0" index
if(temp > num[index]){
num[index] = temp;
}
}
}
}
unsigned int getmax(unsigned int num[], unsigned int size)
{
unsigned int i;
unsigned int max = num[0];
for(i = 1; i < size; i++)
if(num[i] > max)
max = num[i];
return( max );
}
|
2,536 | #include <iostream>
#include <vector>
#include <complex>
#include <cuComplex.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
// Функция освобождения памяти GPU (device_pointer_input — указатель на входное поле, device_pointer_output — на выходное поле (результат), device_pointer_x1 — на вектор x, device_pointer_x2 — на вектор y, device_pointer_x3 — на вектор u или z, device_pointer_x4 — на вектор v, device_pointer_parameters — на вектор параметров преобразования, device_pointer_dimension — на вектор размерностей, device_pointer_progress — на атомарную переменную прогресса процесса)
void freeGPUMemory(cuDoubleComplex* device_pointer_input, cuDoubleComplex* device_pointer_output, double* device_pointer_x1, double* device_pointer_x2, double* device_pointer_x3, double* device_pointer_x4, double* device_pointer_parameters, int* device_pointer_dimension, int* device_pointer_progress) {
cudaFree(device_pointer_input);
cudaFree(device_pointer_output);
cudaFree(device_pointer_x1);
cudaFree(device_pointer_x2);
cudaFree(device_pointer_x3);
cudaFree(device_pointer_x4);
cudaFree(device_pointer_parameters);
cudaFree(device_pointer_dimension);
cudaFree(device_pointer_progress);
}
// Сложение двух CUDA double комплексных чисел (left — число слева, right — справа)
__device__ cuDoubleComplex operator+(const cuDoubleComplex& left, const cuDoubleComplex& right) {
return cuCadd(left, right);
}
// Сложение двух CUDA double комплексных чисел с присвоением (left — переменная до знака "=", right — после)
__device__ cuDoubleComplex operator+=(cuDoubleComplex& left, const cuDoubleComplex& right) {
left = left + right;
return left;
}
// Перемножение двух CUDA double комплексных чисел (left — число слева, right — справа)
__device__ cuDoubleComplex operator*(const cuDoubleComplex& left, const cuDoubleComplex& right) {
return cuCmul(left, right);
}
// Умножение CUDA double комплексного числа на вещественное double число (left — комплексное CUDA число, right — вещественное double число)
__device__ cuDoubleComplex operator*(const cuDoubleComplex& left, const double& right) {
return cuCmul(left, make_cuDoubleComplex(right, 0));
}
// Умножение вещественного double числа на CUDA double комплексное число (left — вещественное double число, right — комплексное CUDA число)
__device__ cuDoubleComplex operator*(const double& left, const cuDoubleComplex& right) {
return cuCmul(make_cuDoubleComplex(left, 0), right);
}
// Экспонента, принимающая в качестве аргумента CUDA double комплексное число
__device__ cuDoubleComplex exp(const cuDoubleComplex& value) {
return exp(value.x) * make_cuDoubleComplex(cos(value.y), sin(value.y));
}
// Нахождение оптимального количества нитей в блоке (N — количество точек по одной оси)
int getNumberThreads(int N) {
auto result = static_cast<int>(round(sqrt(N)));
while (((N % result) != 0) || (result * result > 1024)) {
result--;
}
return result;
}
// Функция отображения прогресса выполнения преобразования (now — сколько сейчас выполнено операций, max — общее кол-во операций)
__device__ void processing(int now, int max) {
double percent;
if (now == max) {
percent = 100.;
}
else {
percent = trunc(10000. * (static_cast<double>(now) / max)) / 100;
}
printf("\rВыполнено %2.2f%", percent);
}
// Обобщенное ядро вычисления интеграла Коллинза (output — массив результата (выходное поле), input — входное поле, x1 — вектор x, x2 — вектор y, x3 — вектор u или z, x4 — вектор v, parameters — массив параметров преобразования, dimension — массив размерностей, progress — атомарная переменная прогресса процесса вычисления интеграла Коллинза, transformType (вид преобразования: 0 — дробное преобразование Фурье, 1 — преобразование Френеля, 2 — другое (с заданной определенной ABCD матрицей)), OxyCrossSection (выбранное сечение: 1 — сечение в плоскости Ouv (поперечное), 0 — сечение в плоскости Ovz (продольное)))
__global__ void collinsKernel(cuDoubleComplex* output, const cuDoubleComplex* input, const double* x1, const double* x2, const double* x3, const double* x4, const double* parameters, const int* dimension, int* progress, int transformType, bool OxyCrossSection)
{
auto pi = 3.14159265358979323846;
auto q = blockIdx.x * blockDim.x + threadIdx.x;
auto p = blockIdx.y * blockDim.y + threadIdx.y;
auto hx = x1[1] - x1[0];
auto hy = x2[0] - x2[1];
auto wavelength = parameters[0];
auto z = OxyCrossSection ? parameters[1] : x3[q];
auto u = OxyCrossSection ? x3[q] : parameters[1];
auto f = !transformType ? parameters[2] : 0;
auto k = 2 * pi / wavelength;
auto n1 = dimension[0];
auto n2 = dimension[1];
auto n3 = OxyCrossSection ? dimension[1] : dimension[2];
auto A = 0.0;
auto B = 0.0;
auto D = 0.0;
switch (transformType) {
case 0:
A = cos(pi * z / (2 * f));
B = f * sin(pi * z / (2 * f));
D = cos(pi * z / (2 * f));
break;
case 1:
A = 1.;
B = z;
D = 1.;
break;
default:
A = OxyCrossSection ? parameters[1] : parameters[2];
B = OxyCrossSection ? parameters[2] : parameters[3];
D = OxyCrossSection ? parameters[3] : parameters[4];
}
auto value = make_cuDoubleComplex(0, 0);
for (auto i = 0; i < n1; i++) {
for (auto j = 0; j < n1; j++) {
auto arg = (k / (2 * B)) * (A * (x2[i] * x2[i] + x1[j] * x1[j]) - 2 * (x2[i] * x4[p] + x1[j] * u) + D * (x4[p] * x4[p] + u * u));
value += input[i * n1 + j] * exp(make_cuDoubleComplex(0, arg));
}
}
atomicAdd(progress, 1);
processing(*progress, n2 * n3);
output[p * n3 + q] = make_cuDoubleComplex(0, -(k / (2 * pi * B))) * value * hx * hy;
}
std::vector<std::vector<std::complex<double>>> calculateCollinsCUDA(const std::vector<std::vector<std::complex<double>>>& input, const std::vector<double>& x1, const std::vector<double>& x2, const std::vector<double>& x3, const std::vector<double>& x4, const std::vector<double>& parameters, const std::vector<int>& dimension, int transformType)
{
// Choose which GPU to run on, change this on a multi-GPU system.
cudaError_t cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
throw std::runtime_error("cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
}
bool OxyCrossSection = dimension.size() == 3 ? false : true;
// Allocate GPU buffers for vectors.
auto n1 = dimension.at(0);
auto n2 = dimension.at(1);
auto n3 = OxyCrossSection ? dimension.at(1) : dimension.at(2);
auto device_output = new cuDoubleComplex[n2 * n3];
cuDoubleComplex* device_pointer_output = 0;
cudaStatus = cudaMalloc((void**)&device_pointer_output, static_cast<unsigned long long>(n2) * n3 * sizeof(cuDoubleComplex));
auto device_input = new cuDoubleComplex[input.size() * input.at(0).size()];
for (auto i = 0; i < input.size(); i++) {
for (auto j = 0; j < input.at(0).size(); j++) {
device_input[i * n1 + j] = make_cuDoubleComplex(input.at(i).at(j).real(), input.at(i).at(j).imag());
}
}
cuDoubleComplex* device_pointer_input = 0;
cudaStatus = cudaMalloc((void**)&device_pointer_input, static_cast<unsigned long long>(n1) * n1 * sizeof(cuDoubleComplex));
cudaStatus = cudaMemcpy(device_pointer_input, device_input, static_cast<unsigned long long>(n1) * n1 * sizeof(cuDoubleComplex), cudaMemcpyHostToDevice);
delete[] device_input;
double* device_pointer_x1 = 0;
cudaStatus = cudaMalloc((void**)&device_pointer_x1, n1 * sizeof(double));
cudaStatus = cudaMemcpy(device_pointer_x1, x1.data(), n1 * sizeof(double), cudaMemcpyHostToDevice);
double* device_pointer_x2 = 0;
cudaStatus = cudaMalloc((void**)&device_pointer_x2, n1 * sizeof(double));
cudaStatus = cudaMemcpy(device_pointer_x2, x2.data(), n1 * sizeof(double), cudaMemcpyHostToDevice);
double* device_pointer_x3 = 0;
cudaStatus = cudaMalloc((void**)&device_pointer_x3, n3 * sizeof(double));
cudaStatus = cudaMemcpy(device_pointer_x3, x3.data(), n3 * sizeof(double), cudaMemcpyHostToDevice);
double* device_pointer_x4 = 0;
cudaStatus = cudaMalloc((void**)&device_pointer_x4, n2 * sizeof(double));
cudaStatus = cudaMemcpy(device_pointer_x4, x4.data(), n2 * sizeof(double), cudaMemcpyHostToDevice);
double* device_pointer_parameters = 0;
cudaStatus = cudaMalloc((void**)&device_pointer_parameters, parameters.size() * sizeof(double));
cudaStatus = cudaMemcpy(device_pointer_parameters, parameters.data(), parameters.size() * sizeof(double), cudaMemcpyHostToDevice);
int* device_pointer_dimension = 0;
cudaStatus = cudaMalloc((void**)&device_pointer_dimension, dimension.size() * sizeof(int));
cudaStatus = cudaMemcpy(device_pointer_dimension, dimension.data(), dimension.size() * sizeof(int), cudaMemcpyHostToDevice);
int* device_pointer_progress = 0;
cudaStatus = cudaMalloc((void**)&device_pointer_progress, sizeof(int));
// Launch a kernel on the GPU with one thread for each element.
dim3 threadsPerBlock(getNumberThreads(n3), getNumberThreads(n2));
dim3 numBlocks(n3 / threadsPerBlock.x, n2 / threadsPerBlock.y);
collinsKernel<<<numBlocks, threadsPerBlock>>>(device_pointer_output, device_pointer_input, device_pointer_x1, device_pointer_x2, device_pointer_x3, device_pointer_x4, device_pointer_parameters, device_pointer_dimension, device_pointer_progress, transformType, OxyCrossSection);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\ncollinsKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
freeGPUMemory(device_pointer_input, device_pointer_output, device_pointer_x1, device_pointer_x2, device_pointer_x3, device_pointer_x4, device_pointer_parameters, device_pointer_dimension, device_pointer_progress);
throw std::runtime_error("Запуск ядра CUDA не удался!");
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\ncudaDeviceSynchronize returned error code %d after launching collinsKernel!\n", cudaStatus);
freeGPUMemory(device_pointer_input, device_pointer_output, device_pointer_x1, device_pointer_x2, device_pointer_x3, device_pointer_x4, device_pointer_parameters, device_pointer_dimension, device_pointer_progress);
throw std::runtime_error("Синхронизация данных между хостом и устройством завершилась неудачей!");
}
std::cout << "\rВыполнено 100.00%" << std::endl;
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(device_output, device_pointer_output, static_cast<unsigned long long>(n2) * n3 * sizeof(cuDoubleComplex), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
freeGPUMemory(device_pointer_input, device_pointer_output, device_pointer_x1, device_pointer_x2, device_pointer_x3, device_pointer_x4, device_pointer_parameters, device_pointer_dimension, device_pointer_progress);
throw std::runtime_error("Копирование результата в ОЗУ завершилось неудачей!");
}
freeGPUMemory(device_pointer_input, device_pointer_output, device_pointer_x1, device_pointer_x2, device_pointer_x3, device_pointer_x4, device_pointer_parameters, device_pointer_dimension, device_pointer_progress);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
throw std::runtime_error("cudaDeviceReset failed!");
}
std::vector<std::vector<std::complex<double>>> result;
result.reserve(x4.size());
for (auto i = 0; i < x4.size(); i++) {
auto row = std::vector<std::complex<double>>();
row.reserve(x4.size());
for (auto j = 0; j < x3.size(); j++) {
row.emplace_back(std::complex<double>(device_output[i * n3 + j].x, device_output[i * n3 + j].y));
}
result.emplace_back(row);
}
delete[] device_output;
return result;
} |
2,537 | #include<stdio.h>
//#define NUM_BINS 64 // We are also going to use this for the number of threads in a block.
#define NUM_BINS 1024 // We are also going to use this for the number of threads in a block.
#define NUM_THREADS_PER_BLOCK 16
#define NUM_BLOCKS 16
////////////////////////////////////////////////////////////////////////////////
// Took this code from a Dr. Dobbs example.
////////////////////////////////////////////////////////////////////////////////
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
////////////////////////////////////////////////////////////////////////////////
// The kernel.
////////////////////////////////////////////////////////////////////////////////
__global__ void kernel (int *dev_hist)
{
int tid = threadIdx.x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// shared memory
__shared__ int shared_hist[NUM_BINS];
// access thread id
//const unsigned int tid = threadIdx.x;
// access number of threads in this block
const unsigned int num_threads = blockDim.x;
// Note that we only clear things out for the first thread on each block.
if(threadIdx.x==0)
{
for (int i=0;i<NUM_BINS;i++)
shared_hist[i] = 0;
}
__syncthreads();
////////////////////////////////////////////////////////////////////////
// FILL THE ARRAYS
shared_hist[tid] = blockIdx.x*1000 + tid;
//shared_hist[tid] = blockIdx.x;
__syncthreads();
if(threadIdx.x==0)
{
for(int i=0;i<NUM_BINS;i++)
{
dev_hist[i+(blockIdx.x*(NUM_BINS))]=shared_hist[i];
}
}
}
////////////////////////////////////////////////////////////////////////////////
// The main() program.
////////////////////////////////////////////////////////////////////////////////
int main()
{
int nbins = NUM_BINS;
int dimx = NUM_BLOCKS; // Number of blocks
int num_bytes = NUM_BINS*sizeof(int);
int num_bytes_on_gpu = dimx*NUM_BINS*sizeof(int);
int *d_a=0, *h_a=0, *h_hist=0; // device and host pointers
// Allocate memory on host (CPU)
h_a = (int*)malloc(num_bytes_on_gpu);
h_hist = (int*)malloc(num_bytes);
// Allocate memory on device (GPU)
cudaMalloc((void**)&d_a,num_bytes_on_gpu);
checkCUDAError("malloc");
// Check to see that there was enough memory for both
// allocations.
// If the memory allocation fails, it doesn't change the
// pointer value. That is why we set them to be 0 at declaration,
// and then see if they have changed or stayed the same.
if (0==h_a)
{
printf("Couldn't allocate host memory\n");
return 1;
}
if (0==d_a)
{
printf("Couldn't allocate device memory\n");
return 1;
}
// Initialize array to all 0's
cudaMemset(d_a,0,num_bytes_on_gpu);
checkCUDAError("memset");
//-----------------------------------------------------------------------//
// Some explanatory code
/*
// This will give us 256 thread blocks, arranged in a 16x16 grid.
dim3 grid(16,16);
// This will give us 256 threads/block, arranged in a 16x16 grid.
dim3 block(16,16);
kernel<<<grid,block,0,0>>>(XXX);
// This is a shortcut for launching some thread blocks.
// It launches a grid of 32 thread blocks arranged in a 1x32 grid
// and 512 threads per block, arranged in a 1x512 array.
kernel<<<32,512>>>(YYY);
*/
//dim3 grid,block;
//block.x = 8;
//grid.x = dimx/block.x;
//kernel<<<grid,block>>>(d_a);
//kernel<<<4,16>>>(d_a);
dim3 grid, block;
grid.x = NUM_BLOCKS; // Number of blocks
block.x = NUM_BINS; // Number of threads per block.
kernel<<<grid,block>>>(d_a);
cudaThreadSynchronize();
checkCUDAError("kernel");
// Copy it back over
cudaMemcpy(h_a,d_a,num_bytes_on_gpu,cudaMemcpyDeviceToHost);
for (int i=0;i<dimx;i++)
{
for (int j=0;j<NUM_BINS;j++)
{
printf("%d ",h_a[i*NUM_BINS + j]);
}
printf("\n");
}
printf("\n");
free(h_a);
cudaFree(d_a);
return 0;
}
|
2,538 | #include<iostream>
#include <cuda.h>
__global__ void matmul_kernel(const float* A, const float* B, float* C, size_t n)
{
size_t col = blockIdx.x * blockDim.x + threadIdx.x;
size_t row =0;
if (col < n*n) {
row = col / n;
col = col % n;
for (size_t i = 0; i < n; i++) {
C[row * n + col] += A[row * n + i] * B[i * n + col];
}
}
}
void matmul(const float* A, const float* B, float* C, size_t n, unsigned int threads_per_block)
{
size_t blocksize = (n*n + threads_per_block - 1) / threads_per_block;
matmul_kernel <<<blocksize, threads_per_block >>> (A, B, C, n);
cudaDeviceSynchronize();
}
|
2,539 | #include<bits/stdc++.h>
using namespace std;
#define MAX_VAL ((int)1e8)
#define cudaCatchError(error) { gpuAssert((error), __FILE__, __LINE__); }
// Catch Cuda errors
inline void gpuAssert(cudaError_t error, const char *file, int line, bool abort = false)
{
if (error != cudaSuccess)
{
printf("\n====== Cuda Error Code %i ======\n %s in CUDA %s\n", error, cudaGetErrorString(error));
printf("\nIn file :%s\nOn line: %d", file, line);
if(abort)
exit(-1);
}
}
__global__ void compute(int *d_r, int *d_c, int *d_depth, int *max_depth, int *Q1, int *Q2, int nodes){
int idx = threadIdx.x;
__shared__ int len1, len2, curr_depth;
int i;
for(i=idx; i<nodes; i+=1024){
d_depth[i] = MAX_VAL;
}
if(idx == 0){
d_depth[0] = 0;
curr_depth = 0;
len1 = 1;
len2 = 0;
Q1[0] = 0;
}
__syncthreads();
while(len1){
for(i=idx; i<len1; i+=1024){
for(int j=d_r[Q1[i]]; j<d_r[Q1[i]+1]; j++){
int v = d_c[j];
if(atomicCAS(&d_depth[v], MAX_VAL, d_depth[Q1[i]]+1) == MAX_VAL){
int t = atomicAdd(&len2,1);
Q2[t] = v;
}
}
}
__syncthreads();
if(idx==0){
for(i=0; i<len2; i++){
Q1[i] = Q2[i];
}
len1 = len2;
len2 = 0;
curr_depth++;
}
__syncthreads();
}
max_depth[0] = curr_depth;
}
int main(int argc, char *argv[]){
if(argc<2){
cout << "Usage: " << argv[0] << " <graph_file_name>\n";
return 0;
}
ifstream input;
input.open(argv[1]);
int nodes, edges, i;
input >> nodes;
input >> edges;
// allocating host memory
int *h_r = (int*)malloc((nodes+1)*sizeof(int));
int *h_c = (int*)malloc(edges*2*sizeof(int));
// reading inputs
for(i=0; i<nodes+1; i++){
input >> h_r[i];
}
for(i=0; i<edges*2; i++){
input >> h_c[i];
}
// allocating device memory
int *Q1, *Q2, *d_r, *d_c, *d_depth, *max_depth;
cudaMalloc((void**)&Q1, nodes*sizeof(int));
cudaMalloc((void**)&Q2, nodes*sizeof(int));
cudaMalloc((void**)&d_r, (nodes+1)*sizeof(int));
cudaMalloc((void**)&d_c, edges*2*sizeof(int));
cudaMalloc((void**)&d_depth, nodes*sizeof(int));
cudaMalloc((void**)&max_depth, sizeof(int));
// copying data to device
cudaMemcpy(d_r, h_r, (nodes+1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, h_c, edges*2*sizeof(int), cudaMemcpyHostToDevice);
// timer
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
// kernel call
printf("Starting Computation\n");
compute <<<1, 1024>>> (d_r, d_c, d_depth, max_depth, Q1, Q2, nodes);
cudaThreadSynchronize();
printf("Finished Computation\n");
// timer
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout<<"Compute time in GPU: "<<milliseconds<<"ms"<<endl;
// copying results to host
int *result = (int *)malloc(sizeof(int));
cudaCatchError(cudaMemcpy(result, max_depth, sizeof(int), cudaMemcpyDeviceToHost));
printf("Depth : %d\n", result[0]-1);
// solution check
int *h_depth = (int*) malloc(nodes*sizeof(int));
cudaMemcpy(h_depth, d_depth, nodes*sizeof(int), cudaMemcpyDeviceToHost);
int *h_check_depth = (int*)malloc(nodes*sizeof(int));
freopen(argv[2], "r", stdin);
printf("malloc done\n");
for(int i = 0; i < nodes; i++) {
cin>>h_check_depth[i];
}
printf("Finished reading output file\n");
bool flag = true;
int count = 0;
printf("Starting checking\n");
for(int i = 0; i < nodes; i++) {
if(h_depth[i] != h_check_depth[i]) {
printf("Found %d, Expected %d\n",h_depth[i], h_check_depth[i]);
flag = false;
count++;
}
}
printf("Finished checking\n");
if(flag) {
cout<<"Solution is correct!\n";
}
else {
cout<<"Solution is incorrect!"<<endl;
cout<<count<<" testcases failed.\n";
}
return 0;
} |
2,540 | #include<stdio.h>
#include<stdlib.h>
#include<time.h>
__device__ int d_change;
__global__ void bellman_ford(int *d_g, int *d_d, int k, int n)
{
d_change = 0;
int i = blockIdx.x*blockDim.x+threadIdx.x;
int cur_dis = d_d[i];
__syncthreads();
int j;
for (j=1; j<n;j++)
{
if (d_g[j*n+i]==1 && cur_dis > d_d[j] + d_g[j*n+i])
{
cur_dis = d_d[j] + d_g[j*n+i];
d_change = 1;
}
}
__syncthreads();
d_d[i] = cur_dis;
}
int h_graph[9000][9000];
int main( int argc, char* argv[] )
{
FILE *fp = fopen("wiki-Vote.txt","r");
int source =0,dest=0, n =9000,i;
srand(time(NULL));
while(!feof(fp))
{
fscanf(fp,"%d",&source);
fscanf(fp,"%d",&dest);
h_graph[source][dest] = 1;
}
fclose(fp);
int *d_g;
const size_t a_size = sizeof(int) * size_t(n*n);
int block_size = atoi(argv[1]);
int n_blocks = n/block_size + (n%block_size==0?0:1);
int h_s = 3;
int h_d[9000], *d_d, k;
for(i=0; i<n; i++)
h_d[i] = (int)1e5;
h_d[h_s] = 0;
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start) ;
cudaEventCreate(&stop) ;
cudaEventRecord(start, 0) ;
cudaMalloc((void **)&d_g, a_size);
cudaMemcpy(d_g, h_graph, a_size, cudaMemcpyHostToDevice);
cudaMalloc(&d_d, n*sizeof(int));
cudaMemcpy(d_d, h_d,n*sizeof(int),cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0) ;
cudaEventSynchronize(stop) ;
cudaEventElapsedTime(&time, start, stop) ;
printf("w %f\n", time);
cudaEventCreate(&start) ;
cudaEventCreate(&stop) ;
cudaEventRecord(start, 0) ;
for (k=0;k<n-1;k++)
{
bellman_ford<<<n_blocks,block_size>>>(d_g, d_d, k, n);
int answer;
cudaMemcpyFromSymbol(&answer, d_change, sizeof(int), 0, cudaMemcpyDeviceToHost);
if (answer == 0)
break;
}
cudaEventRecord(stop, 0) ;
cudaEventSynchronize(stop) ;
cudaEventElapsedTime(&time, start, stop) ;
printf("e %f\n", time);
cudaEventCreate(&start) ;
cudaEventCreate(&stop) ;
cudaEventRecord(start, 0) ;
cudaMemcpy(h_d, d_d,n*sizeof(int),cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0) ;
cudaEventSynchronize(stop) ;
cudaEventElapsedTime(&time, start, stop) ;
printf("w %f\n", time);
FILE *op = fopen("bellman.txt","w");
for (i=0;i<n;i++)
{
fprintf(op,"%d: %d\n",i,h_d[i]);
}
fclose(op);
return 0;
}
|
2,541 | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <limits.h>
__global__ void assign(float * x)
{
int idx = (gridDim.y * blockIdx.x + blockIdx.y )*blockDim.x*blockDim.y*blockDim.z + blockDim.z*blockDim.y*threadIdx.x + blockDim.z*threadIdx.y + threadIdx.z;
x[idx] = idx;
printf("%d\n",idx);
}
int main(int argc,char ** argv)
{
float * x_h;
float * x_d;
int x;
int y;
int z;
int gridX;
int gridY;
int nBytes;
if (argc != 6) {
printf("%s usage:\n",argv[0]);
printf(" %s gridX gridY blockX blockY blockZ\n",argv[0]);
exit(1);
}
else {
gridX = atoi(argv[1]);
gridY = atoi(argv[2]);
x = atoi(argv[3]);
y = atoi(argv[4]);
z = atoi(argv[5]);
}
nBytes = sizeof(float) * x*y*z*gridX*gridY;
x_h = (float*) malloc(nBytes);
cudaMalloc((void **) &x_d, nBytes);
assign<<<dim3(gridX,gridY),dim3(x,y,z)>>>(x_d);
cudaMemcpy(x_h,x_d,nBytes,cudaMemcpyDeviceToHost);
int i = 0;
for (i=0;i<x*y*z*gridX*gridY;i++)
{
printf("%f\n",x_h[i]);
}
cudaFree(x_d);
free(x_h);
return 0;
}
|
2,542 | #include "includes.h"
extern "C"
{
}
__global__ void vadd(const int n, const double *a, const double *b, double *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<n)
{
c[i] = a[i] + b[i];
}
} |
2,543 | #include "includes.h"
/*
* file name: mm_omp_vs_cuda.cu
*
* mm_omp_vs_cuda.cu contains the code that realize some common used matrix operations in CUDA, and
* an implementation of matrix multiplication speedup via openmp, this is a practice to compare the
* of performance of cuda and openmp, as well as a trail of using cuda and openmp in the same program
*
* this is a toy program for learning CUDA, some functions are reusable in other project
* note:
* compile: nvcc -Xcompiler \-fopenmp -lgomp mm_omp_vs_cuda.cu
*/
#define BLOCK_SIZE 16
/*
*********************************************************************
function name: gpu_matrix_mult
description: dot product of two matrix (not only square)
parameters:
&a GPU device pointer to a m X n matrix (A)
&b GPU device pointer to a n X k matrix (B)
&c GPU device output purpose pointer to a m X k matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
further sppedup can be obtained by using shared memory to decrease global memory access times
return: none
*********************************************************************
*/
/*
*********************************************************************
function name: cpu_matrix_mult
description: dot product of two matrix (not only square) in CPU,
for validating GPU results
parameters:
&a CPU device pointer to a n X n matrix (A)
&b CPU device pointer to a n X n matrix (B)
&c CPU device output purpose pointer to a n X n matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
return: none
*********************************************************************
*/
/*
*********************************************************************
function name: gpu_matrix_transpose
description: matrix transpose
parameters:
&mat_in GPU device pointer to a rows X cols matrix
&mat_out GPU device output purpose pointer to a cols X rows matrix
to store the result
Note:
grid and block should be configured as:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
return: none
*********************************************************************
*/
/*
*********************************************************************
function name: cpu_matrix_mult
description: dot product of two matrix (not only square) in CPU,
for validating GPU results
parameters:
&a CPU host pointer to a m X n matrix (A)
&b CPU host pointer to a n X k matrix (B)
&c CPU host output purpose pointer to a m X k matrix (C)
to store the result
return: none
*********************************************************************
*/
__global__ void gpu_matrix_transpose(int* mat_in, int* mat_out, unsigned int rows, unsigned int cols)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx < cols && idy < rows)
{
unsigned int pos = idy * cols + idx;
unsigned int trans_pos = idx * rows + idy;
mat_out[trans_pos] = mat_in[pos];
}
} |
2,544 | #include "includes.h"
extern "C"
__global__ void multiply(int n, float *a, float *b, float *sum)
{
int ind = threadIdx.x + blockDim.x * blockIdx.x;
int i = ind ;
int j = ind % n;
if (j<n)
{
sum[i] = a[i] * b[j];
}
} |
2,545 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>
#include "device_launch_parameters.h"
int main() {
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device number: %d\n", i);
printf(" Device name: %s\n",prop.name);
}
} |
2,546 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
// CUDA example: illustrates kernel-allocated shared memory; does
// nothing useful, just copying an array from host to device global,
// then to device shared, doubling it there, then copying back to device
// global then host
__global__ void doubleit(int *dv, int n)
{ extern __shared__ int sv[];
int me = threadIdx.x;
// threads share in copying dv to sv, with each thread copying one
// element
sv[me] = dv[me];
sv[me] = 2 * sv[me];
dv[me] = sv[me];
}
int main(int argc, char **argv)
{
int n = atoi(argv[1]); // number of matrix rows/cols
int *hv, // host array
*dv; // device array
int vsize = n * sizeof(int); // size of array in bytes
// allocate space for host array
hv = (int *) malloc(vsize);
// fill test array with consecutive integers
int t = 0,i;
for (i = 0; i < n; i++)
hv[i] = t++;
// allocate space for device array
cudaMalloc((void **)&dv,vsize);
// copy host array to device array
cudaMemcpy(dv,hv,vsize,cudaMemcpyHostToDevice);
// set up parameters for threads structure
dim3 dimGrid(1,1);
dim3 dimBlock(n,1,1); // all n threads in the same block
// invoke the kernel; third argument is amount of shared memory
doubleit<<<dimGrid,dimBlock,vsize>>>(dv,n);
// wait for kernel to finish
cudaDeviceSynchronize();
// copy row array from device to host
cudaMemcpy(hv,dv,vsize,cudaMemcpyDeviceToHost);
// check results
if (n < 10) for(int i=0; i<n; i++) printf("%d\n",hv[i]);
// clean up
free(hv);
cudaFree(dv);
}
|
2,547 | #include "includes.h"
__global__ void constructCircuitGraphVertex(unsigned int * C,unsigned int * offset,unsigned int ecount, unsigned int * cv, unsigned int cvCount){
unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x;
if(tid < ecount){
if(C[tid]!=0){
cv[offset[tid]]=tid;
}
}
} |
2,548 | #include "CmplxUtils.cuh"
__device__ __host__ cuComplex cuComplex_exp(float exp)
{
float re = cos(exp);
float im = sin(exp);
return make_cuComplex(re, im);
} |
2,549 | #include <stdio.h>
__device__ const char *STR = "HELLO WORLD!";
const char STR_LENGTH = 12;
__global__ void hello()
{
printf("calling kernel\n");
printf("%c\n", STR[threadIdx.x % STR_LENGTH]);
printf("kernel called\n");
}
int main(void)
{
int num_threads = STR_LENGTH;
int num_blocks = 1;
printf("before hello\n");
hello<<<num_blocks,num_threads>>>();
printf("after hello\n");
cudaDeviceSynchronize();
{
cudaError_t cudaerr = cudaDeviceSynchronize();
if (cudaerr != cudaSuccess)
printf("kernel launch failed with error \"%s\".\n",
cudaGetErrorString(cudaerr));
}
return 0;
}
|
2,550 |
#include "test.cuh"
__global__
void hello_kernel(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
void hello(char *ad, int *bd)
{
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hello_kernel<<<dimGrid, dimBlock>>>(ad, bd);
}
|
2,551 | #include "includes.h"
__global__ void fill_AT_expansion(float* A, int* rowind, int* colind, float* val, int npix, int nimages) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < npix*nimages) {
colind[i] = i / nimages + (i % nimages)*npix;
rowind[i] = i / nimages;
val[i] = A[colind[i]];
}
} |
2,552 | /*
Faz a soma dos elementos de dois vetores em uma stream e em outra stream faz a multiplicacao de um vetor
por um valor escalar
Exemplifica o uso de diferentes streams (1 e 2) para computacoes
distintas. Usa cudaMallocHost para alocar memoria nao paginavel
no host e faz copia assincrona com cudaMemcpyAsync. Usa tambem
o cudaStreamSynchronize para aguardar toda a stream terminar.
O algoritmo calcula na stream 01 a soma de dois vetores e na stream 2
ele faz a multiplicacao de um vetor por um valor escalar.
O algoritmo divide "tam" elementos por "streams_nr" e encontra "threadsPerGrid" e "blocksPerGrid"
Os vetores no device tem o tamanho de threadsPerGrid.
Caso o nr de streams provoque uma divisao com resto, a ultima grade da stream deve acertar o resto.
Funcionou para teste com stream 03 e tam 16 ou 17 e TPB 2 ou 3
Caso o nr de threads por bloco provoque uma divisao com resto, o algoritmo deve funcionar
com a fitragem do excesso implementada. Funcionou com alguns testes basicos.
Para compilar: nvcc 05-streams_MIMD.cu -o 05-streams_MIMD
Para executar: ./05-streams_MIMD
OBS: os valores de tamanho do vetor e o conteudo do vetor
estao fixos no codigo
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
//Kernel que faz a soma de vetores
__global__ void soma(int *vetorA, int *vetorB,int *vetorC,int tam, int iter)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < tam)
{
vetorC[idx]=vetorA[idx]+vetorB[idx];
printf("Soma: Iter=%d, Blc=%d, Thread=%d, Tam=%d, VetC[%d]=%d \n", iter, blockIdx.x, threadIdx.x, tam, idx, vetorC[idx]);
}
}
//Kernel que faz a multiplicação de um escalar por um vetor
__global__ void mult_escalar(int *vetorA, int escalar,int tam, int iter)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < tam)
{
vetorA[idx]=escalar*vetorA[idx];
printf("Mult: Iter=%d, Blc=%d, Thread=%d, Tam=%d, vet[%d]=%d \n", iter, blockIdx.x, threadIdx.x, tam, idx, vetorA[idx]);
}
}
int main(int argc,char **argv)
{
//Declara as variáveis de índice
int i,threadsPerBlock;
//Inicializa a seed para geração de números pseudo aleatórios
srand(time(NULL));
//Declara os vetores no host
int *vetorA,*vetorB,*vetorC,*vetorD;
int escalar=10; //rand()%10+1;
//Declara os vetores para uso na primeira stream
int *vetorA_d1,*vetorB_d1,*vetorC_d1;
//Declara o vetor para uso na segunda stream
int *vetorD_d2;
//Declaração das variáveis do tipo cudaStream_t
cudaStream_t stream1,stream2;
//Criação das streams
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
//Define o tamanho do vetor
int tam = 17; // 16; // 2048;
//Define a quantidade de threads por bloco
threadsPerBlock = 3; //2; //256;
//Define quantas streams usar
int streams_nr = 3;
//Define o nr de threads por grade (uma grade por vez na stream)
int threadsPerGrid=tam/streams_nr; // threadsPerGrid=8 (16/2) // =1024 (2048/2)
//Define a quantidade de blocos por grade
int blocksPerGrid=(threadsPerGrid+threadsPerBlock-1)/threadsPerBlock; // blockPerStream=4 (8/2) // =4 (1024/256)
// (8+1)/2 ==> 4 (4,5)
// (1024+255)/256 ==> 4 (4,9960)
//
//Aloca memoria nao paginavel para os vetores no host
cudaMallocHost((void**)&vetorA,tam*(sizeof(int)));
cudaMallocHost((void**)&vetorB,tam*(sizeof(int)));
cudaMallocHost((void**)&vetorC,tam*(sizeof(int)));
cudaMallocHost((void**)&vetorD,tam*(sizeof(int)));
//Aloca os vetores no device para a stream 1
cudaMalloc((void**)&vetorA_d1,threadsPerGrid*(sizeof(int)));
cudaMalloc((void**)&vetorB_d1,threadsPerGrid*(sizeof(int)));
cudaMalloc((void**)&vetorC_d1,threadsPerGrid*(sizeof(int)));
//Aloca os vetores no device para a stream 2
cudaMalloc((void**)&vetorD_d2,threadsPerGrid*(sizeof(int)));
//Preenche os vetores no host
for(i=0;i<tam;i++)
{
vetorA[i]=i;
vetorB[i]=0; //i;
vetorD[i]=i; //10;
}
printf("Host: tam=%d, streams_nr=%d, TPG=%d, BPG=%d, TPB=%d \n", tam, streams_nr, threadsPerGrid, blocksPerGrid, threadsPerBlock);
for(i=0;i<tam;i+=threadsPerGrid)
{
// caso tenha mais streams que o necessario, precisa acertar o tamanho do bloco na ultima stream
// essa ultima stream pega apenas o restante para processar; nao o vlr cheio de threadsPerGrid
if((tam-i)< threadsPerGrid)
threadsPerGrid = tam - i;
//copia um bloco de tamanho threadsPerGrid do vetor A do host para o device (stream1)
cudaMemcpyAsync(vetorA_d1,vetorA+i,threadsPerGrid*(sizeof(int)),cudaMemcpyHostToDevice,stream1);
//copia um bloco de tamanho threadsPerGrid do vetor B do host para o device (stream1)
cudaMemcpyAsync(vetorB_d1,vetorB+i,threadsPerGrid*(sizeof(int)),cudaMemcpyHostToDevice,stream1);
//Invoca o kernel soma passando a stream 1 como argumento
soma <<<blocksPerGrid,threadsPerBlock,0,stream1>>> (vetorA_d1,vetorB_d1,vetorC_d1,threadsPerGrid,i);
//Copia um bloco de tamanho threadsPerGrid do resultado da stream 1 de volta para o host
cudaMemcpyAsync(vetorC+i,vetorC_d1,threadsPerGrid*(sizeof(int)),cudaMemcpyDeviceToHost,stream1);
//copia um bloco de tamanho threadsPerGrid do vetor D do host para o device (stream2)
cudaMemcpyAsync(vetorD_d2,vetorD+i,threadsPerGrid*(sizeof(int)),cudaMemcpyHostToDevice,stream2);
//Invoca o kernel mult_escalar passando a stream 2 como argumento
mult_escalar <<<blocksPerGrid,threadsPerBlock,0,stream2>>> (vetorD_d2,escalar,threadsPerGrid,i);
//Copia um bloco de tamanho threadsPerGrid do resultado da stream 2 de volta para o host
cudaMemcpyAsync(vetorD+i,vetorD_d2,threadsPerGrid*(sizeof(int)),cudaMemcpyDeviceToHost,stream2);
}
//Sincroniza as streams
cudaStreamSynchronize(stream1);
cudaStreamSynchronize(stream2);
printf("Soma dos vetores:\n");
//Imprime o resultado da soma de vetores no host
for(i=0;i<tam;i++)
{
printf("%d ",vetorC[i]);
}
printf("\n");
printf("Multiplicação pelo escalar:\n");
//Imprime o resultado da multiplicação pelo escalar no host
for(i=0;i<tam;i++)
{
printf("%d ",vetorD[i]);
}
printf("\n");
//Desaloca os vetores no host
cudaFreeHost(vetorA);
cudaFreeHost(vetorB);
cudaFreeHost(vetorC);
cudaFreeHost(vetorD);
//Desaloca os vetores da stream 1
cudaFree(vetorA_d1);
cudaFree(vetorB_d1);
cudaFree(vetorC_d1);
//Desaloca o vetor da stream 2
cudaFree(vetorD_d2);
//Destroi as streams
cudaStreamDestroy(stream1);
cudaStreamDestroy(stream2);
} |
2,553 | #include<iostream>
#include<vector>
#include<random>
const int SHARED_MEM = 256;
__global__ void dotProduct(int *x, int *y, int *dot, int N){
int index = blockDim.x*blockIdx.x+threadIdx.x;
__shared__ int cache[SHARED_MEM];
int offset = 0;
int stride = blockDim.x;
while(index+offset<N){
cache[threadIdx.x] += x[index+offset]*y[index+offset];
offset += stride;
}
__syncthreads();
int i = blockDim.x/2;
while(i > 0){
if(threadIdx.x < i){
cache[threadIdx.x] += cache[threadIdx.x+i];
}
i/=2;
}
__syncthreads();
if(threadIdx.x == 0){
atomicAdd(dot, cache[0]);
// *dot = cache[0];
}
}
int main(){
int N = 10240;
size_t size = N*sizeof(int);
std::vector<int> h_x(N);
std::vector<int> h_y(N);
for(auto i = 0; i < N; i++){
h_x[i] = rand()%10;
h_y[i] = rand()%10;
}
int *h_dot;
h_dot = (int*)malloc(sizeof(int));
int *d_x, *d_y, *d_dot;
cudaMalloc(&d_x, size);
cudaMalloc(&d_y, size);
cudaMalloc(&d_dot, sizeof(int));
cudaMemset(d_dot, 0, sizeof(int));
cudaMemcpy(d_x, h_x.data(), size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, h_y.data(), size, cudaMemcpyHostToDevice);
auto threadsPerBlock = 32;
auto blocksPerGrid = (N+threadsPerBlock-1)/threadsPerBlock;
dotProduct<<<blocksPerGrid,threadsPerBlock>>>(d_x,d_y,d_dot,N);
cudaMemcpy(h_dot, d_dot, sizeof(int), cudaMemcpyDeviceToHost);
std::cout << *h_dot << std::endl;
free(h_dot);
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_dot);
return 0;
}
|
2,554 | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define ROW_NUM 10000100
#define COLUMN_NUM 4
#define CONSTRAINT_MAX 100000
__global__
void request(int *tab, int *result)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < ROW_NUM; i += stride)
{
if(tab[i] > 1000 && tab[i] < CONSTRAINT_MAX
&& tab[i+ROW_NUM] > 1000 && tab[i+ROW_NUM] < CONSTRAINT_MAX
&& tab[i+ROW_NUM*2] > 1000 && tab[i+ROW_NUM*2] < CONSTRAINT_MAX)
{
result[i] = 1;
}
else
{
result[i] = 0;
}
}
}
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int main(void)
{
/*int *tab, *result; // GPU
int *tabCPU, *resultCPU; // CPU
tabCPU = (int*)malloc(ROW_NUM*COLUMN_NUM*sizeof(int));
resultCPU = (int*)malloc(ROW_NUM*sizeof(int));
cudaMalloc(&tab, ROW_NUM*COLUMN_NUM*sizeof(int));
cudaMalloc(&result, ROW_NUM*sizeof(int));
srand(0);
for(int column=0;column<COLUMN_NUM-1;++column)
{
for(int row=0;row<ROW_NUM;++row)
{
tabCPU[ROW_NUM*column+row] = rand()%1000000;
}
}
cudaMemcpy(tab, tabCPU, ROW_NUM*COLUMN_NUM*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(result, resultCPU, ROW_NUM*sizeof(int), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
request<<<(ROW_NUM+255)/256, 256>>>(tab, result);
cudaEventRecord(stop);
cudaMemcpy(resultCPU, result, ROW_NUM*sizeof(int), cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);*/
/*
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize());
*/
/*int total = 0;
for(int row=0;row<ROW_NUM;++row)
{
if(result[row])
{
++total;
}
}
std::cout << "Total : " << total << std::endl;
std::cout << milliseconds;
cudaFree(tab);
cudaFree(result);
free(resultCPU);
free(tabCPU);
return 0;*/
int *x, *y, *d_x, *d_y;
x = (int*)malloc(ROW_NUM*COLUMN_NUM*sizeof(int));
y = (int*)malloc(ROW_NUM*sizeof(int));
cudaMalloc(&d_x, ROW_NUM*COLUMN_NUM*sizeof(int));
cudaMalloc(&d_y, ROW_NUM*sizeof(int));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
srand(0);
for(int column=0;column<COLUMN_NUM-1;++column)
{
for(int row=0;row<ROW_NUM;++row)
{
x[ROW_NUM*column+row] = rand()%1000000;
}
}
cudaMemcpy(d_x, x, ROW_NUM*COLUMN_NUM*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, ROW_NUM*sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(start);
request<<<(ROW_NUM+255)/256, 256>>>(d_x, d_y);
cudaEventRecord(stop);
cudaMemcpy(y, d_y, ROW_NUM*sizeof(int), cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
/*int total = 0;
for(int row=0;row<ROW_NUM;++row)
{
if(resultCPU[row])
{
++total;
}
}
std::cout << "Total : " << total << std::endl;*/
std::cout << milliseconds;
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
} |
2,555 | #include <cstdio>
#include <string>
#include <cassert>
#include <iostream>
#include <cstddef>
#include <vector>
#define uint8_t unsigned char
#define uint16_t unsigned short
#define uint32_t unsigned int
#define uint64_t unsigned long long
using namespace std;
//
// DEFAULt functions for work with cuda
//
#define CSC(call) do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "CUDA Error in %s:%d: %s\n", __FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while (0)
cudaEvent_t start, stop;
float t;
void time_start() {
CSC(cudaEventCreate(&start));
CSC(cudaEventCreate(&stop));
CSC(cudaEventRecord(start, 0));
}
void time_end() {
CSC(cudaGetLastError());
CSC(cudaEventRecord(stop, 0));
CSC(cudaEventSynchronize(stop));
CSC(cudaEventElapsedTime(&t, start, stop));
printf("time = %f\n", t);
CSC(cudaEventDestroy(start));
CSC(cudaEventDestroy(stop));
}
double minor3x3(double m[3][3], int i, int j) {
double arr[4];
int len = 0;
for (int row = 0; row < 3; row++) {
for (int col = 0; col < 3; col++) {
if (col == j || row == i) continue;
arr[len++] = m[row][col];
}
}
return arr[0] * arr[3] - arr[1] * arr[2];
}
double cof3x3(double m[3][3], int i, int j) {
double mr = minor3x3(m, i, j);
if ((i+j) % 2 == 1)
mr *= -1;
return mr;
}
double det3x3(double m[3][3]) {
double res = 0;
for (int col = 0; col < 3; col++) {
res += m[0][col] * cof3x3(m, 0, col);
}
return res;
}
class IMG {
public:
uint32_t w;
uint32_t h;
uint64_t size;
uchar4 *data;
IMG() : w(0), h(0), size(0), data(NULL) {}
IMG(string path) {
FILE *fin = fopen(path.c_str(), "rb");
if (!fin) {
cout << "Wrong file name " << path << "!\n";
return;
}
fread(&w, sizeof(uint32_t), 1, fin);
fread(&h, sizeof(uint32_t), 1, fin);
size = w * h;
data = new uchar4[size];
fread(data, sizeof(uchar4), size, fin);
fclose(fin);
}
~IMG() {
if (data != NULL) {
delete[] data;
}
}
void toFile(string path) {
FILE *fout = fopen(path.c_str(), "wb");
if (!fout) {
cout << "Wrong file name " << path << "!\n";
return;
}
fwrite(&w, sizeof(uint32_t), 1, fout);
fwrite(&h, sizeof(uint32_t), 1, fout);
fwrite(data, sizeof(uchar4), size, fout);
fclose(fout);
}
};
//
// main programm
//
#define pixel uchar4
#define p(x, y) data[y*w + x]
vector<vector<pixel> > cls;
__constant__ double GPU_AVG[50][3];
__constant__ double GPU_INVERT_COV[50][3][3];
__device__ double mahalanobis(const pixel p, int ci) {
double v[3] = {p.x - GPU_AVG[ci][0],
p.y - GPU_AVG[ci][1],
p.z - GPU_AVG[ci][2]
};
double tmp[3] = {0, 0, 0}, res = 0;
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
tmp[i] += v[j] * GPU_INVERT_COV[ci][j][i];
}
res -= tmp[i] * v[i];
}
return res;
}
__device__ void classify(pixel &p, int cnum) {
double mx = mahalanobis(p, 0);
p.w = 0;
for (char ci = 1; ci < cnum; ++ci) {
double tmp = mahalanobis(p, ci);
if (tmp > mx) {
mx = tmp;
p.w = ci;
}
}
}
__global__ void kernel(uchar4 *data, uint32_t w, uint32_t h, int cnum) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int idy = blockDim.y * blockIdx.y + threadIdx.y;
int offsetx = blockDim.x * gridDim.x;
int offsety = blockDim.y * gridDim.y;
for (int x = idx; x < w; x += offsetx) {
for (int y = idy; y < h; y += offsety) {
classify(p(x, y), cnum);
}
}
}
int main() {
int cnum = 0;
string in, out;
cin >> in >> out >> cnum;
cls.resize(cnum, vector<pixel>());
double cavg[cnum][3];
double ccov[cnum][3][3];
memset(cavg, 0, sizeof(double) * cnum * 3);
memset(ccov, 0, sizeof(double) * cnum * 9);
IMG img(in);
uint32_t w = img.w;
for (int i = 0; i < cnum; ++i) {
int csize, x, y;
cin >> csize;
for (int j = 0; j < csize; ++j) {
cin >> x >> y;
pixel p = img.p(x, y);
cls[i].push_back(p);
cavg[i][0] += p.x;
cavg[i][1] += p.y;
cavg[i][2] += p.z;
}
for (int j = 0; j < 3; ++j) cavg[i][j] /= csize;
}
for (int c = 0; c < cnum; ++c) {
int csize = cls[c].size();
for (int i = 0; i < csize; ++i) {
pixel p = cls[c][i];
double coeff[3] = {p.x - cavg[c][0],
p.y - cavg[c][1],
p.z - cavg[c][2]
};
for (int r = 0; r < 3; ++r) {
for (int s = 0; s < 3; ++s) {
ccov[c][r][s] += coeff[r] * coeff[s];
}
}
}
for (int r = 0; r < 3; ++r) {
for (int s = 0; s < 3; ++s) {
ccov[c][r][s] /= csize - 1;
}
}
double det = det3x3(ccov[c]);
double icov[3][3];
for (int r = 0; r < 3; ++r) {
for (int s = 0; s < 3; ++s) {
if (det == 0) {
icov[s][r] = (r == s ? 1 : 0);
} else {
icov[s][r] = cof3x3(ccov[c], r, s) / det;
}
}
}
for (int r = 0; r < 3; ++r) {
for (int s = 0; s < 3; ++s) {
ccov[c][r][s] = icov[r][s];
}
}
}
uchar4 *result;
CSC(cudaMemcpyToSymbol(GPU_AVG, cavg, sizeof(double) * cnum * 3));
CSC(cudaMemcpyToSymbol(GPU_INVERT_COV, ccov, sizeof(double) * cnum * 9));
CSC(cudaMalloc(&result, sizeof(uchar4) * img.size));
CSC(cudaMemcpy(result, img.data, sizeof(uchar4) * img.size, cudaMemcpyHostToDevice));
kernel<<<dim3(16, 16), dim3(16, 16)>>>(result, img.w, img.h, cnum);
CSC(cudaMemcpy(img.data, result, sizeof(uchar4) * img.size, cudaMemcpyDeviceToHost));
CSC(cudaFree(result));
img.toFile(out);
}
|
2,556 | #include <stdio.h>
#include <chrono>
using namespace std::chrono;
#define N 2048 * 2048 // Number of elements in each vector
/*
* Optimize this already-accelerated codebase. Work iteratively
* and use profiler to check your progress
*
* Aim to profile `saxpy` (without modifying `N`) running under
* 25us.
*
* Some bugs have been placed in this codebase for your edification.
*/
__global__ void initWith(float num, float *a, int Size)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < Size; i += stride)
{
a[i] = num;
}
}
__global__ void saxpy(float * a, float * b, float * c)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if ( tid < N )
c[tid] = 2 * a[tid] + b[tid];
}
int main()
{
int deviceId;
int numberOfSMs;
cudaGetDevice(&deviceId);
cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId);
float *a, *b, *c;
int size = N * sizeof (int); // The total number of bytes per vector
cudaMallocManaged(&a, size);
cudaMallocManaged(&b, size);
cudaMallocManaged(&c, size);
//Initialize memory
// for( int i = 0; i < N; ++i )
// {
// a[i] = 2;
// b[i] = 1;
// c[i] = 0;
// }
cudaStream_t stream1, stream2, stream3;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
cudaStreamCreate(&stream3);
cudaMemPrefetchAsync(a, size, deviceId);
cudaMemPrefetchAsync(b, size, deviceId);
cudaMemPrefetchAsync(c, size, deviceId);
int threads_per_block = 256;
int number_of_blocks = 32 * numberOfSMs;
int number_of_blocks_kernel = (N + threads_per_block) / threads_per_block;
initWith<<<number_of_blocks, threads_per_block, 0, stream1>>>(2, a, N);
initWith<<<number_of_blocks, threads_per_block, 0, stream2>>>(1, b, N);
//initWith<<<number_of_blocks, threads_per_block, 0, stream3>>>(0, c, N);
auto start_fn = high_resolution_clock::now();
saxpy <<< number_of_blocks_kernel, threads_per_block>>> ( a, b, c);
auto stop_fn = high_resolution_clock::now();
auto duration_fn = duration_cast<microseconds>(stop_fn - start_fn);
cudaDeviceSynchronize();
cudaMemPrefetchAsync(c, size, cudaCpuDeviceId);
printf("Time: %d us \n", duration_fn);
// Print out the first and last 5 values of c for a quality check
for( int i = 0; i < 5; ++i )
printf("c[%d] = %f, ", i, c[i]);
printf ("\n");
for( int i = N-5; i < N; ++i )
printf("c[%d] = %f, ", i, c[i]);
printf ("\n");
cudaStreamDestroy(stream1);
cudaStreamDestroy(stream2);
cudaStreamDestroy(stream3);
cudaFree( a ); cudaFree( b ); cudaFree( c );
}
|
2,557 | // To compile - gcc -o 3dFDTD FDTD3D.c -lm
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <cuda_runtime.h>
int main() {
int imax = 100, jmax = 100, nmax = 1000, nhalf = 20, no = nhalf*3, kmax = 100;
int i, j, n,k;
double c = 2.99792458e8, pi = 3.141592654, sigma = 0, mu = 4.0 * pi * 1.0e-7, eps = 8.85418782e-12;
double delta = 1e-3;
double dt = delta/(c*1.41421356237);
double ***Ex, ***Ey, ***Ez, ***Hy, ***Hx, ***Hz;
cudaEvent_t start_event, stop_event;
float elapsed_time;
Ex = (double ***)malloc((imax+1)*sizeof(double **));
Ey = (double ***)malloc((imax+1)*sizeof(double **));
Ez = (double ***)malloc((imax+1)*sizeof(double **));
Hx = (double ***)malloc((imax+1)*sizeof(double **));
Hy = (double ***)malloc((imax+1)*sizeof(double **));
Hz = (double ***)malloc((imax+1)*sizeof(double **));
for(i=0;i<(imax+1);i++) {
Ex[i] = (double **)malloc((jmax+1)*sizeof(double *));
Ey[i] = (double **)malloc((jmax+1)*sizeof(double *));
Ez[i] = (double **)malloc((jmax+1)*sizeof(double *));
Hx[i] = (double **)malloc((jmax+1)*sizeof(double *));
Hy[i] = (double **)malloc((jmax+1)*sizeof(double *));
Hz[i] = (double **)malloc((jmax+1)*sizeof(double *));
for(j=0;j<(jmax+1);j++) {
Ex[i][j] = (double *)malloc((kmax+1)*sizeof(double));
Ey[i][j] = (double *)malloc((kmax+1)*sizeof(double));
Ez[i][j] = (double *)malloc((kmax+1)*sizeof(double));
Hx[i][j] = (double *)malloc((kmax+1)*sizeof(double));
Hy[i][j] = (double *)malloc((kmax+1)*sizeof(double));
Hz[i][j] = (double *)malloc((kmax+1)*sizeof(double));
}
}
for(k=0;k<(kmax+1);k++){
for(j=0;j<(jmax+1);j++){
for(i=0;i<(imax+1);i++){
Ex[i][j][k] = 0.0;
Ey[i][j][k] = 0.0;
Ez[i][j][k] = 0.0;
Hx[i][j][k] = 0.0;
Hy[i][j][k] = 0.0;
Hz[i][j][k] = 0.0;
}
}
}
double Ca,Cb,Da,Db;
char buf[18];
Ca = (1-((sigma*dt)/(2*eps)))/(1+((sigma*dt)/(2*eps)));
Cb = (dt/(eps*delta))/(1+((sigma*dt)/(2*eps)));
Da = (1-((sigma*dt)/(2*mu)))/(1+((sigma*dt)/(2*mu)));
Db = (dt/(mu*delta))/(1+((sigma*dt)/(2*mu)));
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
cudaEventRecord(start_event, 0);
for (n = 0; n < nmax; n++) {
for (i = 0; i < imax; i++) {
for (j = 1; j < jmax; j++) {
for (k = 1; k < kmax; k++) {
Ex[i][j][k] = Ca*Ex[i][j][k] + Cb*((Hz[i][j][k] - Hy[i][j-1][k]) + (Hy[i][j][k-1] - Hy[i][j][k]));
}
}
}
for (i = 1; i < imax; i++) {
for (j = 0; j < jmax; j++) {
for (k = 1; k < kmax; k++) {
Ey[i][j][k] = Ca*Ey[i][j][k] + Cb*((Hz[i-1][j][k] - Hy[i][j][k]) + (Hy[i][j][k] - Hy[i][j][k-1]));
}
}
}
for (i = 1; i < imax; i++) {
for (j = 1; j < jmax; j++) {
for (k = 0; k < kmax; k++) {
Ez[i][j][k] = Ca*Ez[i][j][k] + Cb*((Hz[i][j][k] - Hy[i-1][j][k]) + (Hy[i][j-1][k] - Hy[i][j][k]));
}
}
}
Ez[imax/2][jmax/2][kmax/2] = exp(-(pow(((n-no)/(double)nhalf),2.0)));
for (i = 1; i < imax; i++) {
for (j = 0; j < jmax-1; j++) {
for (k = 0; k < kmax-1; k++) {
Hx[i][j][k] = Da*Hx[i][j][k] + Db*((Ez[i][j][k] - Ez[i][j+1][k]) + (Ez[i][j][k+1]-Ez[i][j][k]));
}
}
}
for (i = 0; i < imax; i++) {
for (j = 1; j < jmax-1; j++) {
for (k = 0; k < kmax-1; k++) {
Hy[i][j][k] = Da*Hy[i][j][k] + Db*((Ez[i+1][j][k] - Ez[i][j][k]) + (Ez[i][j][k]-Ez[i][j][k+1]));
}
}
}
for (i = 0; i < imax; i++) {
for (j = 0; j < jmax-1; j++) {
for (k = 1; k < kmax-1; k++) {
Hz[i][j][k] = Da*Hz[i][j][k] + Db*((Ez[i][j][k] - Ez[i+1][j][k]) + (Ez[i][j+1][k]-Ez[i][j][k]));
}
}
}
}
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&elapsed_time, start_event, stop_event);
FILE * fPointer;
fPointer = fopen("parlleloutput.dat","w");
int x, y, z;
for (x=0; x<kmax; x++) {
for (y=1; y<jmax; y++) {
for (z=1; z<imax; z++) {
memset(buf, 0, 18);
sprintf(buf, "%e\n", Ez[(z*imax) + (y*jmax) + x]);
fputs(buf, fPointer);
}
}
}
fclose(fPointer);
// calculate run time
printf("GPU Time: %.2f\n", elapsed_time);
return 0;
}
|
2,558 | #include "includes.h"
__global__ void _reluback(int n, float *y, float *dy) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
if (y[i] <= 0) dy[i] = 0;
i += blockDim.x * gridDim.x;
}
} |
2,559 | #include "includes.h"
#define TB 128
#define GS(x) (((x) - 1) / TB + 1)
__global__ void fix_border_(float *input, int pad_size, int side, int size3, int size23)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size23) {
int x = id % size3;
int y = id / size3;
if (side == 0 && x < pad_size) {
input[id] = input[y * size3 + pad_size];
} else if (side == 1 && x > size3 - pad_size - 1) {
input[id] = input[y * size3 + size3 - pad_size - 1];
}
}
} |
2,560 | /*********************************************************************
* Copyright © 2011-2014,
* Marwan Abdellah: <abdellah.marwan@gmail.com>
*
* This library (cufftShift) is free software; you can redistribute it
* and/or modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
********************************************************************/
#ifndef CUFFTSHIFT_3D_SINGLE_ARRAY_CU
#define CUFFTSHIFT_3D_SINGLE_ARRAY_CU
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <cufft.h>
// #include <cutil_inline.h>
template <typename T>
__global__
void cufftShift_3D_slice_kernel(T* data, int N, int zIndex)
{
// 3D Volume & 2D Slice & 1D Line
int sLine = N;
int sSlice = N * N;
int sVolume = N * N * N;
// Transformations Equations
int sEq1 = (sVolume + sSlice + sLine) / 2;
int sEq2 = (sVolume + sSlice - sLine) / 2;
int sEq3 = (sVolume - sSlice + sLine) / 2;
int sEq4 = (sVolume - sSlice - sLine) / 2;
// Thread
int xThreadIdx = threadIdx.x;
int yThreadIdx = threadIdx.y;
// Block Width & Height
int blockWidth = blockDim.x;
int blockHeight = blockDim.y;
// Thread Index 2D
int xIndex = blockIdx.x * blockWidth + xThreadIdx;
int yIndex = blockIdx.y * blockHeight + yThreadIdx;
// Thread Index Converted into 1D Index
int index = (zIndex * sSlice) + (yIndex * sLine) + xIndex;
T regTemp;
if (zIndex < N / 2)
{
if (xIndex < N / 2)
{
if (yIndex < N / 2)
{
regTemp = data[index];
// First Quad
data[index] = data[index + sEq1];
// Fourth Quad
data[index + sEq1] = regTemp;
}
else
{
regTemp = data[index];
// Third Quad
data[index] = data[index + sEq3];
// Second Quad
data[index + sEq3] = regTemp;
}
}
else
{
if (yIndex < N / 2)
{
regTemp = data[index];
// Second Quad
data[index] = data[index + sEq2];
// Third Quad
data[index + sEq2] = regTemp;
}
else
{
regTemp = data[index];
// Fourth Quad
data[index] = data[index + sEq4];
// First Quad
data[index + sEq4] = regTemp;
}
}
}
}
template <typename T>
void cufftShift_3D_kernel(T* data, int N, dim3 block, dim3 grid)
{
for (int i = 0; i < N; i++)
cufftShift_3D_slice_kernel <<< grid, block >>> (data, N, i);
}
template
void cufftShift_3D_kernel <cufftReal>
(cufftReal* data, int N, dim3 block, dim3 grid);
template
void cufftShift_3D_kernel <cufftDoubleReal>
(cufftDoubleReal* data, int N, dim3 block, dim3 grid);
template
void cufftShift_3D_kernel <cufftComplex>
(cufftComplex* data, int N, dim3 block, dim3 grid);
template
void cufftShift_3D_kernel <cufftDoubleComplex>
(cufftDoubleComplex* data, int N, dim3 block, dim3 grid);
#endif // CUFFTSHIFT_3D_SINGLE_ARRAY_CU
|
2,561 | extern "C" __global__
void cu_suppress_non_max(float* mag, float* deltaX, float* deltaY, float* nms,
long parser_length, long offset)
{
const int SUPPRESSED = 0;
long idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= 0 && idx < parser_length * offset)
{
float alpha;
float mag1, mag2;
// put zero all boundaries of image
// TOP edge line of the image
if((idx >= 0) && (idx <offset))
nms[idx] = SUPPRESSED;
// BOTTOM edge line of image
else if((idx >= (parser_length-1)*offset) && (idx < (offset * parser_length)))
nms[idx] = SUPPRESSED;
// LEFT & RIGHT edge line
else if(((idx % offset)==0) || ((idx % offset)==(offset - 1)))
{
nms[idx] = SUPPRESSED;
}
else // not the boundaries
{
// if magnitude = 0, no edge
if(mag[idx] == 0)
nms[idx] = SUPPRESSED;
else{
if(deltaX[idx] >= 0)
{
if(deltaY[idx] >= 0) // dx >= 0, dy >= 0
{
if((deltaX[idx] - deltaY[idx]) >= 0) // direction 1 (SEE, South-East-East)
{
alpha = (float)deltaY[idx] / deltaX[idx];
mag1 = (1-alpha)*mag[idx+1] + alpha*mag[idx+offset+1];
mag2 = (1-alpha)*mag[idx-1] + alpha*mag[idx-offset-1];
}
else // direction 2 (SSE)
{
alpha = (float)deltaX[idx] / deltaY[idx];
mag1 = (1-alpha)*mag[idx+offset] + alpha*mag[idx+offset+1];
mag2 = (1-alpha)*mag[idx-offset] + alpha*mag[idx-offset-1];
}
}
else // dx >= 0, dy < 0
{
if((deltaX[idx] + deltaY[idx]) >= 0) // direction 8 (NEE)
{
alpha = (float)-deltaY[idx] / deltaX[idx];
mag1 = (1-alpha)*mag[idx+1] + alpha*mag[idx-offset+1];
mag2 = (1-alpha)*mag[idx-1] + alpha*mag[idx+offset-1];
}
else // direction 7 (NNE)
{
alpha = (float)deltaX[idx] / -deltaY[idx];
mag1 = (1-alpha)*mag[idx+offset] + alpha*mag[idx+offset-1];
mag2 = (1-alpha)*mag[idx-offset] + alpha*mag[idx-offset+1];
}
}
}
else
{
if(deltaY[idx] >= 0) // dx < 0, dy >= 0
{
if((deltaX[idx] + deltaY[idx]) >= 0) // direction 3 (SSW)
{
alpha = (float)-deltaX[idx] / deltaY[idx];
mag1 = (1-alpha)*mag[idx+offset] + alpha*mag[idx+offset-1];
mag2 = (1-alpha)*mag[idx-offset] + alpha*mag[idx-offset+1];
}
else // direction 4 (SWW)
{
alpha = (float)deltaY[idx] / -deltaX[idx];
mag1 = (1-alpha)*mag[idx-1] + alpha*mag[idx+offset-1];
mag2 = (1-alpha)*mag[idx+1] + alpha*mag[idx-offset+1];
}
}
else // dx < 0, dy < 0
{
if((-deltaX[idx] + deltaY[idx]) >= 0) // direction 5 (NWW)
{
alpha = (float)deltaY[idx] / deltaX[idx];
mag1 = (1-alpha)*mag[idx-1] + alpha*mag[idx-offset-1];
mag2 = (1-alpha)*mag[idx+1] + alpha*mag[idx+offset+1];
}
else // direction 6 (NNW)
{
alpha = (float)deltaX[idx] / deltaY[idx];
mag1 = (1-alpha)*mag[idx-offset] + alpha*mag[idx-offset-1];
mag2 = (1-alpha)*mag[idx+offset] + alpha*mag[idx+offset+1];
}
}
}
// non-maximal suppression
// compare mag1, mag2 and mag[t]
// if mag[t] is smaller than one of the neighbours then suppress it
if((mag[idx] < mag1) || (mag[idx] < mag2))
nms[idx] = SUPPRESSED;
else
{
nms[idx] = mag[idx];
}
} // END OF ELSE (mag != 0)
} // END OF FOR(j)
} // END OF FOR(i)
} |
2,562 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "curand.h"
#include <stdio.h>
#define _USE_MATH_DEFINES
#include <math.h>
#include <iostream>
__device__ __host__ __inline__ float N(float x)
{
return 0.5 + 0.5 * erf(x * M_SQRT1_2);
}
__device__ __host__ void price(float k, float s, float t, float r, float v, float *c, float *p)
{
float srt = v * sqrtf(t);
float d1 = (logf(s/k) + (r+0.5*v*v)*t) / srt;
float d2 = d1 - srt;
float kert = k * expf(-r * t);
*c = N(d1)*s - N(d2)*kert;
*p = kert - s + *c;
}
__global__ void price(float *k, float *s, float *t, float *r, float *v, float *c, float *p)
{
int tid = threadIdx.x;
price(k[tid], s[tid], t[tid], r[tid], v[tid], &c[tid], &p[tid]);
}
int main()
{
const int count = 512;
size_t size = count * sizeof(float);
float *args[5];
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MTGP32);
for (int i=0; i<5; ++i)
{
cudaMalloc(&args[i], size);
}
float *dc, *dp;
//XXX: check cuda error
cudaMalloc(&dc, size);
cudaMalloc(&dp, size);
//linear grid
//It's a gather pattern, so the layout of the grid/block doesn't matter here.
price<<<1, count>>> (args[0], args[1], args[2], args[3], args[4], dc, dp);
cudaFree(dc);
cudaFree(dp);
float *hc = (float*)malloc(size);
cudaMemcpy(hc, dc, size, cudaMemcpyDeviceToHost);
for (int i=0; i<10; ++i)
{
std::cout << hc[i] << std::endl;
}
free(hc);
} |
2,563 | #include "cuda_kernel_func.cuh"
extern "C" void vecSub(float *a, float *b, float *res, int blockSize, int n) {
int gridSize = (n + blockSize - 1)/blockSize;
vecSub_kernel<<<gridSize, blockSize>>>(a, b, res, n);
}
extern "C" void vecAdd(float *a, float *b, float *res, int blockSize, int n) {
int gridSize = (n + blockSize - 1)/blockSize;
vecAdd_kernel<<<gridSize, blockSize>>>(a, b, res, n);
}
extern "C" void vecScalar(float *a, float scalar, int blockSize, int n) {
int gridSize = (n + blockSize - 1)/blockSize;
vecScalar_kernel<<<gridSize, blockSize>>>(a, scalar, n);
}
extern "C" void vecCopy(float *src, float* dest, int blockSize, int n) {
int gridSize = (n + blockSize - 1)/blockSize;
vecCopy_kernel<<<gridSize, blockSize>>>(src, dest, n);
}
extern "C" void triu(float *d_src,
float *d_dest,
const int m, const int n)
{
dim3 Blocks(1,1);
dim3 Threads(m, n);
triu_kernel<<<Blocks, Threads>>>(d_src, d_dest, m, n);
}
extern "C" void update_value(float *d_A, int idx, float val) {
update_value_kernel<<<1,1>>>(d_A, idx, val);
}
extern "C" float get_value(float *d_A, int idx) {
float res = 0;
cudaMemcpy(&res, d_A + idx, sizeof(float), cudaMemcpyDeviceToHost);
return res;
}
extern "C" void diag(float *d_A, float *d_res, int w, int h) {
diag_kernel<<<1, w>>>(d_A, d_res, w, h);
}
extern "C" void off_diag(float *d_A, float *d_res, int w, int h) {
off_diag_kernel<<<1, h>>>(d_A, d_res, w, h);
}
|
2,564 | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <stdio.h>
//#include <iostream>
//#include <time.h>
//
//void CHECK(cudaError_t error)
//{
// if (error != cudaSuccess)
// {
// printf("Error : %s : %d, ", __FILE__, __LINE__);
// printf("code : %d, reason: %s \n", error, cudaGetErrorString(error));
// exit(1);
// }
//}
//
//void checkResult(float *host_ref, float *gpu_ref, const int N)
//{
// double epsilon = 0.0000001;
// bool match = 1;
//
// for (size_t i = 0; i < N; i++)
// {
// if (abs(host_ref[i] - gpu_ref[i]) > epsilon)
// {
// match = 0;
// printf("Arrays do not match! \n");
// printf("host %5.2f gpu %5.2f at current %d\n", host_ref[i], gpu_ref[i], N);
// break;
// }
// }
//
// if (match) printf("Arrays match . \n\n");
//}
//
//void initialize_data_s(float * ip, int size)
//{
// time_t t;
// srand((unsigned)time(&t));
//
// for (size_t i = 0; i < size; i++)
// {
// ip[i] = (float)(rand() & 0xFF) / 10.0f;
// }
//}
//
//void sum_array_cpu(float * a, float * b, float * c, const int N)
//{
// for (size_t i = 0; i < N; i++)
// {
// c[i] = a[i] + b[i];
// }
//}
//
//__global__ void sum_array_gpu(float * a, float * b, float * c)
//{
// int i = threadIdx.x;
// c[i] = a[i] + b[i];
// printf("a =%f b = %f c = %f \n", a[i], b[i], c[i]);
//}
//
//void run_code()
//{
// int element_Count = 32;
// size_t number_bytes = element_Count * sizeof(float);
//
// float *h_a, *h_b, *host_ref, *gpu_ref;
//
// h_a = (float *)malloc(number_bytes);
// h_b = (float *)malloc(number_bytes);
// host_ref = (float *)malloc(number_bytes);
// gpu_ref = (float *)malloc(number_bytes);
//
// initialize_data_s(h_a, element_Count);
// initialize_data_s(h_b, element_Count);
//
// memset(host_ref, 0, number_bytes);
// memset(gpu_ref, 0, number_bytes);
//
// float *d_a, *d_b, *d_c;
// cudaMalloc((float **)&d_a, number_bytes);
// cudaMalloc((float **)&d_b, number_bytes);
// cudaMalloc((float **)&d_c, number_bytes);
//
// cudaMemcpy(d_a, h_a, number_bytes, cudaMemcpyHostToDevice);
// cudaMemcpy(d_b, h_b, number_bytes, cudaMemcpyHostToDevice);
//
// dim3 block(element_Count);
// dim3 grid(element_Count / block.x);
//
// sum_array_gpu << <grid, block >> > (d_a, d_b, d_c);
//
// cudaMemcpy(gpu_ref, d_c, number_bytes, cudaMemcpyDeviceToHost);
//
// cudaFree(d_a);
// cudaFree(d_b);
// cudaFree(d_c);
//
// free(h_a);
// free(h_b);
// free(host_ref);
// free(gpu_ref);
//}
//
////int main()
////{
//// run_code();
//// system("pause");
//// return 0;
////} |
2,565 | #include <stdio.h>
#include <stdlib.h>
typedef short WORD;
typedef int DWORD;
typedef int LONG;
#pragma pack(push, 1)
typedef struct tagBITMAPFILEHEADER
{
WORD bfType; //specifies the file type
DWORD bfSize; //specifies the size in bytes of the bitmap file
WORD bfReserved1; //reserved; must be 0
WORD bfReserved2; //reserved; must be 0
DWORD bOffBits; //species the offset in bytes from the bitmapfileheader to the bitmap bits
}BITMAPFILEHEADER;
#pragma pack(pop)
#pragma pack(push, 1)
typedef struct tagBITMAPINFOHEADER
{
DWORD biSize; //specifies the number of bytes required by the struct
LONG biWidth; //specifies width in pixels
LONG biHeight; //species height in pixels
WORD biPlanes; //specifies the number of color planes, must be 1
WORD biBitCount; //specifies the number of bit per pixel
DWORD biCompression;//spcifies the type of compression
DWORD biSizeImage; //size of image in bytes
LONG biXPelsPerMeter; //number of pixels per meter in x axis
LONG biYPelsPerMeter; //number of pixels per meter in y axis
DWORD biClrUsed; //number of colors used by th ebitmap
DWORD biClrImportant; //number of colors that are important
}BITMAPINFOHEADER;
#pragma pack(pop)
__global__ void RB_Swap(unsigned char *imageData, int size)
{
int imageIdx = threadIdx.x+blockIdx.x*blockDim.x;
if(imageIdx<size/3)
{
unsigned char tempRGB;
imageIdx = imageIdx*3;
tempRGB = imageData[imageIdx];
imageData[imageIdx] = imageData[imageIdx + 2];
imageData[imageIdx + 2] = tempRGB;
}
}
unsigned char *LoadBitmapFile(char *filename, BITMAPINFOHEADER *bitmapInfoHeader, BITMAPFILEHEADER *bitmapFileHeader)
{
FILE *filePtr; //our file pointer
unsigned char *bitmapImage; //store image data
//open filename in read binary mode
filePtr = fopen(filename,"rb");
if (filePtr == NULL)
return NULL;
//read the bitmap file header
fread(bitmapFileHeader, sizeof(BITMAPFILEHEADER),1,filePtr);
//verify that this is a bmp file by check bitmap id
if (bitmapFileHeader->bfType !=0x4D42)
{
fclose(filePtr);
return NULL;
}
//read the bitmap info header
fread(bitmapInfoHeader, sizeof(BITMAPINFOHEADER),1,filePtr); // small edit. forgot to add the closing bracket at sizeof
//move file point to the begging of bitmap data
fseek(filePtr, bitmapFileHeader->bOffBits, SEEK_SET);
//allocate enough memory for the bitmap image data
bitmapImage = (unsigned char*)malloc(bitmapInfoHeader->biSizeImage);
//verify memory allocation
if (!bitmapImage)
{
free(bitmapImage);
fclose(filePtr);
return NULL;
}
//read in the bitmap image data
fread(bitmapImage,1,bitmapInfoHeader->biSizeImage,filePtr);
//make sure bitmap image data was read
if (bitmapImage == NULL)
{
fclose(filePtr);
return NULL;
}
cudaEvent_t start;
cudaEventCreate(&start);
cudaEvent_t end;
cudaEventCreate(&end);
float swapTime;
//swap the r and b values to get RGB (bitmap is BGR)
unsigned char *d_bitmapImage; //store image data in device
//Allocate size to array in device memory
cudaMalloc((void**)&d_bitmapImage, bitmapInfoHeader->biSizeImage);
//Copy data from host to device
cudaMemcpy(d_bitmapImage, bitmapImage, bitmapInfoHeader->biSizeImage, cudaMemcpyHostToDevice);
int B = ceil(bitmapInfoHeader->biSizeImage/1024);
int T = 1024;
//Kernel call
cudaEventRecord(start, 0);
RB_Swap<<<B, T>>> (d_bitmapImage, bitmapInfoHeader->biSizeImage);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaMemcpy(bitmapImage, d_bitmapImage, bitmapInfoHeader->biSizeImage, cudaMemcpyDeviceToHost);
cudaEventElapsedTime(&swapTime, start, end);
printf("Load Swap Time: %fms\n",swapTime);
cudaEventDestroy(start);
cudaEventDestroy(end);
//close file and return bitmap iamge data
fclose(filePtr);
return bitmapImage;
}
void ReloadBitmapFile(char *filename, unsigned char *bitmapImage, BITMAPFILEHEADER *bitmapFileHeader, BITMAPINFOHEADER *bitmapInfoHeader)
{
FILE *filePtr; //our file pointer
//open filename in write binary mode
filePtr = fopen(filename,"wb");
if (filePtr == NULL)
{
printf("\nERROR: Cannot open file %s", filename);
exit(1);
}
//write the bitmap file header
fwrite(bitmapFileHeader, sizeof(BITMAPFILEHEADER),1,filePtr);
//write the bitmap info header
fwrite(bitmapInfoHeader, sizeof(BITMAPINFOHEADER),1,filePtr); // small edit. forgot to add the closing bracket at sizeof
//swap the r and b values to get RGB (bitmap is BGR)
unsigned char *d_bitmapImage; //store image data in device
//Allocate size to array in device memory
cudaMalloc((void**)&d_bitmapImage, bitmapInfoHeader->biSizeImage);
//Copy data from host to device
cudaMemcpy(d_bitmapImage, bitmapImage, bitmapInfoHeader->biSizeImage, cudaMemcpyHostToDevice);
int B = ceil(bitmapInfoHeader->biSizeImage/1024);
int T = 1024;
//Kernel call
RB_Swap<<<B, T>>> (d_bitmapImage, bitmapInfoHeader->biSizeImage);
cudaMemcpy(bitmapImage, d_bitmapImage, bitmapInfoHeader->biSizeImage, cudaMemcpyDeviceToHost);
//write in the bitmap image data
fwrite(bitmapImage,bitmapInfoHeader->biSizeImage,1,filePtr);
//close file
fclose(filePtr);
}
__global__ void encrypt(unsigned char *bitmapImage, int size, int key)
{
int threadId = threadIdx.x + blockIdx.x*blockDim.x;
int half = key/2;
int index = ((threadId/half)*key) + (threadId%half);
int swap = index + (key - (2*(index%half)) - 1);
if((swap)<size)
{
unsigned char temp;
//unsigned mid = bitmapImage[((index/half)*key) + half];
temp = bitmapImage[index];
bitmapImage[index] = bitmapImage[swap];
bitmapImage[swap] = temp;
}
}
__global__ void decrypt(unsigned char *bitmapImage, int size, int key)
{
int threadId = threadIdx.x + blockIdx.x*blockDim.x;
int half = key/2;
int index = ((threadId/half)*key) + (threadId%half);
int swap = index + (key - (2*(index%half)) - 1);
if((swap)<size)
{
unsigned char temp;
//unsigned mid = bitmapImage[((index/half)*key) + half];
temp = bitmapImage[index];
bitmapImage[index] = bitmapImage[swap];
bitmapImage[swap] = temp;
}
}
int main()
{
BITMAPINFOHEADER bitmapInfoHeader;
BITMAPFILEHEADER bitmapFileHeader;
unsigned char *bitmapData;
bitmapData = LoadBitmapFile("mona_lisa.bmp",&bitmapInfoHeader, &bitmapFileHeader);
printf("%d\n",bitmapInfoHeader.biSizeImage);
/*
//Print array to file
FILE *fout = fopen("out.bmp","wb");
fwrite(bitmapData,bitmapInfoHeader.biSizeImage,1,fout);
*/
cudaEvent_t start;
cudaEventCreate(&start);
cudaEvent_t end;
cudaEventCreate(&end);
float encryptionTime, decryptionTime, HostToDevice, DeviceToHost;
//Encryption
int key = 8000;
unsigned char *d_bitmapImage; //store image data in device
//Allocate size to array in device memory
cudaMalloc((void**)&d_bitmapImage, bitmapInfoHeader.biSizeImage);
//Copy data from host to device
cudaEventRecord(start, 0);
cudaMemcpy(d_bitmapImage, bitmapData, bitmapInfoHeader.biSizeImage, cudaMemcpyHostToDevice);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&HostToDevice, start, end);
printf("Host to Device Time: %fms\n",HostToDevice);
int B = ceil(bitmapInfoHeader.biSizeImage/1024);
int T = 1024;
//Kernel call
cudaEventRecord(start, 0);
encrypt<<<B, T>>> (d_bitmapImage, bitmapInfoHeader.biSizeImage, key);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&encryptionTime, start, end);
printf("Encryption Time: %fms\n",encryptionTime);
//Copy data from device to host
cudaEventRecord(start, 0);
cudaMemcpy(bitmapData, d_bitmapImage, bitmapInfoHeader.biSizeImage, cudaMemcpyDeviceToHost);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&DeviceToHost, start, end);
printf("Device to Host Time: %fms\n",DeviceToHost);
ReloadBitmapFile("Encrypted.bmp", bitmapData, &bitmapFileHeader, &bitmapInfoHeader);
//load encrypted image to array
bitmapData = LoadBitmapFile("Encrypted.bmp",&bitmapInfoHeader, &bitmapFileHeader);
//Decryption
cudaMemcpy(d_bitmapImage, bitmapData, bitmapInfoHeader.biSizeImage, cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
decrypt<<<B, T>>> (d_bitmapImage, bitmapInfoHeader.biSizeImage, key);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&decryptionTime, start, end);
printf("Decryption Time: %fms\n",decryptionTime);
cudaMemcpy(bitmapData, d_bitmapImage, bitmapInfoHeader.biSizeImage, cudaMemcpyDeviceToHost);
//decrypt(bitmapData, bitmapInfoHeader.biSizeImage);
ReloadBitmapFile("Decrypted.bmp", bitmapData, &bitmapFileHeader, &bitmapInfoHeader);
cudaEventDestroy(start);
cudaEventDestroy(end);
cudaFree(d_bitmapImage);
return 0;
} |
2,566 | #include <stdlib.h>
#include <stdio.h>
__device__ int get_global_index(){
return blockIdx.x * blockDim.x + threadIdx.x;
}
__device__ int get_constant(){
return 7;
}
__global__ void kernel1(int *array){
int index = get_global_index();
array[index] = get_constant();
}
__global__ void kernel2(int *array){
int index = get_global_index();
array[index] = get_global_index();
}
int main(){
int num_elements = 256;
int num_bytes = num_elements*sizeof(int);
int *device_array = 0;
int *host_array = 0;
host_array = (int *) malloc(num_bytes);
cudaMalloc((void**)&device_array, num_bytes);
int block_size = 128;
int grid_size = num_elements/block_size;
kernel1<<<grid_size, block_size>>>(device_array);
cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost);
printf("kernel 1 results: \n");
int i;
for(i=0;i<num_elements;i++){
printf("%d ", host_array[i]);
}
printf("\n");
kernel2<<<grid_size, block_size>>>(device_array);
cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost);
printf("kernel 2 results: \n");
for(i= 0; i< num_elements; i++){
printf("%d ", host_array[i]);
}
printf("\n");
return 0;
} |
2,567 | //raytracer.mustafaisik.net//
#include "world.cuh"
#include "cuda_runtime.h"
int main()
{
{
World world;
world.loadScene("input//cornellbox//scene-realtime.xml");
world.video();
}
return 0;
} |
2,568 | #include "includes.h"
__global__ void CalcTotEnergy(double *Etotal_d, double *GlobalE_d, double *Mh_d, double *W_d, double *Rho_d, double *temperature_d, double Gravit, double Cp, double Rd, double A, double *Altitude_d, double *Altitudeh_d, double *lonlat_d, double *areasT, double *func_r_d, int num, bool DeepModel) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int nv = gridDim.y;
int lev = blockIdx.y;
if (id < num) {
double Ek, Eint, Eg;
double wx, wy, wz;
double Cv = Cp - Rd;
//calculate control volume
double zup, zlow, Vol;
zup = Altitudeh_d[lev + 1] + A;
zlow = Altitudeh_d[lev] + A;
if (DeepModel) {
Vol = areasT[id] / pow(A, 2) * (pow(zup, 3) - pow(zlow, 3)) / 3;
}
else {
Vol = areasT[id] * (zup - zlow);
}
//calc cartesian values of vertical wind
wx = W_d[id * nv + lev] * cos(lonlat_d[id * 2 + 1]) * cos(lonlat_d[id * 2]);
wy = W_d[id * nv + lev] * cos(lonlat_d[id * 2 + 1]) * sin(lonlat_d[id * 2]);
wz = W_d[id * nv + lev] * sin(lonlat_d[id * 2 + 1]);
//kinetic energy density 0.5*rho*v^2
Ek = 0.5
* ((Mh_d[id * 3 * nv + lev * 3 + 0] + wx) * (Mh_d[id * 3 * nv + lev * 3 + 0] + wx)
+ (Mh_d[id * 3 * nv + lev * 3 + 1] + wy) * (Mh_d[id * 3 * nv + lev * 3 + 1] + wy)
+ (Mh_d[id * 3 * nv + lev * 3 + 2] + wz) * (Mh_d[id * 3 * nv + lev * 3 + 2] + wz))
/ Rho_d[id * nv + lev];
//internal energy rho*Cv*T
Eint = Cv * temperature_d[id * nv + lev] * Rho_d[id * nv + lev];
//gravitation potential energy rho*g*altitude (assuming g = constant)
Eg = Rho_d[id * nv + lev] * Gravit * Altitude_d[lev];
//total energy in the control volume
Etotal_d[id * nv + lev] = (Ek + Eint + Eg) * Vol;
// printfn("E = %e\n",Etotal_d[id*nv+lev]);
}
} |
2,569 | /***************************************************************************//**
* \file LHS1.cu
* \author Christopher Minar (minarc@oregonstate.edu)
* \brief kernels to generate the left hand side for the intermediate velocity solve
*/
#include "LHS1.h"
namespace kernels
{
/*
* calculates the boundary terms for the left hand side matrix for the velocity solve
* param row array storing the row indices for the sparse LHS matrix
* param col array storing the column indices for the sparse LHS matrix
* param val array storing the values for the sparse LHS matrix
* param dx distance between nodes in the x direction (measured between node sides, where u velocites are stored)
* param dy distance between nodes in the y direction (measured between node top/bot, where v velocites are stored)
* param dt change in time
* param nu viscosity
* param nx number of cells in x direction
* param ny number of cells in y direction
*/
__global__
void LHS_BC_X(int *row, int *col, double *val, double *dx, double *dy, double dt, double nu, int nx, int ny)
{
if (threadIdx.x + blockDim.x * blockIdx.x >= (nx-1)*ny)
return;
int i = threadIdx.x + blockDim.x * blockIdx.x,
I = i % (nx-1),
J = i / (nx-1);
if (I != 0 && I != nx-2 && J != 0 && J != ny-1)
return;
double temp = 1;
int numE = 0;
if (J == 0)
{
numE = I*4;
if (I != 0)
numE -= 1;
}
else if (J == ny-1)
{
numE = (nx-1)*4 - 2 + (J-1)*(5*(nx-1) - 2) + I*4;
if (I != 0)
numE-=1;
}
else
{
if (I == 0)
numE = (nx-1)*4 - 2 + (J-1)*(5*(nx-1) - 2) + I*5;
else
numE = (nx-1)*4 - 2 + (J-1)*(5*(nx-1) - 2) + I*5 - 1;
}
//EAST
if(I != nx-2)//check if on east boundary
{
row[numE] = i;
col[numE] = i+1;
val[numE] = -0.5*dt*nu*(1/(dx[I+1]*(dx[I+1]+dx[I])*0.5));
temp += 0.5*dt*nu*(1/(dx[I+1]*(dx[I+1]+dx[I])*0.5));
numE++;
}
else
{
temp += 0.5*dt*nu*(1/(dx[I+1]*(dx[I+1]+dx[I])*0.5));
}
//WEST
if(I != 0)//check if on west boundary
{
row[numE] = i;
col[numE] = i-1;
val[numE] = -0.5*dt*nu*(1/(dx[I]*(dx[I+1]+dx[I])*0.5));
temp += 0.5*dt*nu*(1/(dx[I]*(dx[I+1]+dx[I])*0.5));
numE++;
}
else
{
temp += 0.5*dt*nu*(1/(dx[I]*(dx[I+1]+dx[I])*0.5));
}
//NORTH
if(J != ny-1)//check if on north boundary
{
row[numE] = i;
col[numE] = i+(nx-1);
val[numE] = -0.5*dt*nu*(1/(dy[J]*(dy[J+1]+dy[J])*0.5));
temp += 0.5*dt*nu*(1/(dy[J]*(dy[J+1]+dy[J])*0.5));
numE++;
}
else
{
temp += 0.5*dt*nu*(1/(dy[J]*(dy[J]+dy[J])*0.5));
}
//SOUTH
if(J != 0)//check if on south boundary
{
row[numE] = i;
col[numE] = i-(nx-1);
val[numE] = -0.5*dt*nu*(1/(dy[J]*(dy[J-1]+dy[J])*0.5));
temp += 0.5*dt*nu*(1/(dy[J]*(dy[J-1]+dy[J])*0.5));
numE++;
}
else
{
temp += 0.5*dt*nu*(1/(dy[J]*(dy[J]+dy[J])*0.5));
}
//CENTER
row[numE] = i;
col[numE] = i;
val[numE] = temp;
numE++;
}
/*
* calculates the boundary terms for the left hand side matrix for the velocity solve
* param row array storing the row indices for the sparse LHS matrix
* param col array storing the column indices for the sparse LHS matrix
* param val array storing the values for the sparse LHS matrix
* param dx distance between nodes in the x direction (measured between node sides, where u velocites are stored)
* param dy distance between nodes in the y direction (measured between node top/bot, where v velocites are stored)
* param dt change in time
* param nu viscosity
* param nx number of cells in x direction
* param ny number of cells in y direction
*/
__global__
void LHS_BC_Y(int *row, int *col, double *val, double *dx, double *dy, double dt, double nu, int nx, int ny)
{
if (threadIdx.x + blockDim.x * blockIdx.x >= nx*(ny-1))
return;
int ip = threadIdx.x + blockDim.x * blockIdx.x,
I = ip % nx,
J = ip / nx,
i = ip + (nx-1)*ny;
if (I != 0 && I != nx-1 && J != 0 && J != ny-2)
return;
int numE = (nx-1)*ny*5 - 2*ny-2*(nx-1);
if (J == 0)
{
numE += I*4;
if (I != 0)
numE -= 1;
}
else if (J == ny-2)
{
numE += nx*4 - 2 + (J-1)*(5*nx - 2) + I*4;
if (I != 0)
numE-=1;
}
else
{
if (I == 0)
numE += nx*4 - 2 + (J-1)*(5*nx - 2) + I*5;
else
numE += nx*4 - 2 + (J-1)*(5*nx - 2) + I*5 - 1;
}
double temp = 1;
//EAST
if(I != nx-1)//check if on east boundary
{
row[numE] = i;
col[numE] = i+1;
val[numE] = -0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I+1])*0.5));
temp += 0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I+1])*0.5));
numE++;
}
else
{
temp += 0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I])*0.5));
}
//WEST
if(I != 0)//check if on west boundary
{
row[numE] = i;
col[numE] = i-1;
val[numE] = -0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I-1])*0.5));
temp += 0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I-1])*0.5));
numE++;
}
else
{
temp += 0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I])*0.5));
}
//NORTH
if(J != ny-2)//check if on north boundary
{
row[numE] = i;
col[numE] = i + nx;
val[numE] = -0.5*dt*nu*(1/(dy[J+1]*(dy[J]+dy[J+1])*0.5));
temp += 0.5*dt*nu*(1/(dy[J+1]*(dy[J]+dy[J+1])*0.5));
numE++;
}
else
{
temp += 0.5*dt*nu*(1/(dy[J+1]*(dy[J]+dy[J+1])*0.5));
}
//SOUTH
if(J != 0)//check if on south boundary
{
row[numE] = i;
col[numE] = i-nx;
val[numE] = -0.5*dt*nu*(1/(dy[J]*(dy[J]+dy[J+1])*0.5));
temp += 0.5*dt*nu*(1/(dy[J]*(dy[J]+dy[J+1])*0.5));
numE++;
}
else
{
temp += 0.5*dt*nu*(1/(dy[J]*(dy[J]+dy[J+1])*0.5));
}
//CENTER
row[numE] = i;
col[numE] = i;
val[numE] = temp;
numE++;
}
/*
* calculates the middle terms for the left hand side matrix for the velocity solve
* param row array storing the row indices for the sparse LHS matrix
* param col array storing the column indices for the sparse LHS matrix
* param val array storing the values for the sparse LHS matrix
* param dx distance between nodes in the x direction (measured between node sides, where u velocites are stored)
* param dy distance between nodes in the y direction (measured between node top/bot, where v velocites are stored)
* param dt change in time
* param nu viscosity
* param nx number of cells in x direction
* param ny number of cells in y direction
*/
__global__
void LHS_mid_X_nobody(int *row, int *col, double *val, double *dx, double *dy, double dt, double nu, int nx, int ny)
{
if (threadIdx.x + blockDim.x * blockIdx.x >= (nx-1)*ny)
return;
int i = threadIdx.x + blockDim.x * blockIdx.x,
I = i % (nx-1),
J = i / (nx-1);
if (I == 0 || I == nx-2 || J == 0 || J == ny-1)
return;
// top row - corner mid sides current row
int numE = (nx-1)*4 - 2 + (J-1)*(5*(nx-1) - 2) + I*5 - 1;
double temp = 1;
//EAST
row[numE] = i;
col[numE] = i+1;
val[numE] = -0.5*dt*nu*(1/(dx[I+1]*(dx[I+1]+dx[I])*0.5));
temp += 0.5*dt*nu*(1/(dx[I+1]*(dx[I+1]+dx[I])*0.5));
numE++;
//WEST
row[numE] = i;
col[numE] = i-1;
val[numE] = -0.5*dt*nu*(1/(dx[I]*(dx[I+1]+dx[I])*0.5));
temp += 0.5*dt*nu*(1/(dx[I]*(dx[I+1]+dx[I])*0.5));
numE++;
//NORTH
row[numE] = i;
col[numE] = i+(nx-1);
val[numE] = -0.5*dt*nu*(1/(dy[J]*(dy[J+1]+dy[J])*0.5));
temp += 0.5*dt*nu*(1/(dy[J]*(dy[J+1]+dy[J])*0.5));
numE++;
//SOUTH
row[numE] = i;
col[numE] = i-(nx-1);
val[numE] = -0.5*dt*nu*(1/(dy[J]*(dy[J-1]+dy[J])*0.5));
temp += 0.5*dt*nu*(1/(dy[J]*(dy[J-1]+dy[J])*0.5));
numE++;
//CENTER
row[numE] = i;
col[numE] = i;
val[numE] = temp;
numE++;
}
/*
* calculates the middle terms for the left hand side matrix for the velocity solve
* param row array storing the row indices for the sparse LHS matrix
* param col array storing the column indices for the sparse LHS matrix
* param val array storing the values for the sparse LHS matrix
* param dx distance between nodes in the x direction (measured between node sides, where u velocites are stored)
* param dy distance between nodes in the y direction (measured between node top/bot, where v velocites are stored)
* param dt change in time
* param nu viscosity
* param nx number of cells in x direction
* param ny number of cells in y direction
*/
__global__
void LHS_mid_Y_nobody(int *row, int *col, double *val, double *dx, double *dy, double dt, double nu, int nx, int ny)
{
if (threadIdx.x + blockDim.x * blockIdx.x >= nx*(ny-1))
return;
int ip = threadIdx.x + blockDim.x * blockIdx.x,
I = ip % nx,
J = ip / nx,
i = ip + (nx-1)*ny;
if (I == 0 || I == nx-1 || J == 0 || J == ny-2)
return;
// ( numU ) (row1) (rows2-before me) (current row)
int numE = (nx-1)*ny*5 - 2*ny-2*(nx-1) + nx*4-2 + (J-1)*(nx*5 - 2) + I*5 - 1;
double temp = 1;
//EAST
row[numE] = i;
col[numE] = i+1;
val[numE] = -0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I+1])*0.5));
temp += 0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I+1])*0.5));
numE++;
//WEST
row[numE] = i;
col[numE] = i-1;
val[numE] = -0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I-1])*0.5));
temp += 0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I-1])*0.5));
numE++;
//NORTH
row[numE] = i;
col[numE] = i + nx;
val[numE] = -0.5*dt*nu*(1/(dy[J+1]*(dy[J]+dy[J+1])*0.5));
temp += 0.5*dt*nu*(1/(dy[J+1]*(dy[J]+dy[J+1])*0.5));
numE++;
//SOUTH
row[numE] = i;
col[numE] = i-nx;
val[numE] = -0.5*dt*nu*(1/(dy[J]*(dy[J]+dy[J+1])*0.5));
temp += 0.5*dt*nu*(1/(dy[J]*(dy[J]+dy[J+1])*0.5));
numE++;
//CENTER
row[numE] = i;
col[numE] = i;
val[numE] = temp;
numE++;
}
}
|
2,570 | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define N 10
#define num_threads 10000
__global__ void increment_naive(int *d)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
tid = tid % N;
d[tid] += 1;
}
__global__ void increment_atomic(int *d)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
tid = tid % N;
atomicAdd(&d[tid], 1);
}
int main()
{
int h[N], *d;
cudaMalloc(&d, sizeof(int)*N);
cudaMemset(d, 0, sizeof(int)*N);
increment_naive<<<(num_threads/N), N>>>(d);
cudaMemcpy(h, d, sizeof(int)*N, cudaMemcpyDeviceToHost);
for(int i=0; i<N; i++)
std::cout << h[i] << "\n";
cudaMemset(d, 0, sizeof(int)*N);
increment_atomic<<<(num_threads/N), N>>>(d);
cudaMemcpy(h, d, sizeof(int)*N, cudaMemcpyDeviceToHost);
for(int i=0; i<N; i++)
std::cout << h[i] << "\n";
}
//12
//12
//12
//12
//12
//12
//12
//12
//12
//12
//1000
//1000
//1000
//1000
//1000
//1000
//1000
//1000
//1000
//1000
|
2,571 | //#include <cuda.h>
//#include <cuda_runtime.h>
//#include <math.h>
//#include "NativeDeclarations.h"
//#include <memory>
//
//#define PI 3.14159f
//#define BlockSize 16
//#define IterationLimit 10
//#define Planes 3
//
//template<typename T, int BLOCK_SIZE>
//__global__ void MeanFilterKernel(int Rows, int Cols, T* Image, int NumIterations)
//{
// // Thread index
// int tx = threadIdx.x;
// int ty = threadIdx.y;
//
// int x = threadIdx.x + blockIdx.x * blockDim.x;
// int y = threadIdx.y + blockIdx.y * blockDim.y;
//
// // put padding in for the edge (do all calcs in float and then convert back to native format)
// __shared__ float OriginalImageTile[BLOCK_SIZE + 2][BLOCK_SIZE + 2][Planes];
//
// __shared__ float NewImageTile[BLOCK_SIZE][BLOCK_SIZE][Planes];
//
// for (int iPlane = 0; iPlane < Planes; iPlane++)
// {
// int idx = (x + y * blockDim.x * gridDim.x) * Planes + iPlane;
// for (int i = 0; i < NumIterations; i++)
// {
// float val = (float) Image[idx];
// if (idx < Rows * Cols * Planes)
// {
// if (tx == 0)
// OriginalImageTile[0][ty + 1][iPlane] = val;
// if (ty == 0)
// OriginalImageTile[tx + 1][0][iPlane] = val;
// if (tx == blockDim.x)
// OriginalImageTile[tx + 2][ty + 1][iPlane] = val;
// if (ty == blockDim.y)
// OriginalImageTile[tx + 1][ty + 2][iPlane] = val;
//
// OriginalImageTile[tx + 1][ty + 1][iPlane] = val;
// }
// __syncthreads();
// NewImageTile[tx][ty][iPlane] = (
// OriginalImageTile[tx + 1][ty + 1][iPlane] +
// OriginalImageTile[tx][ty + 1][iPlane] +
// OriginalImageTile[tx + 2][ty + 1][iPlane] +
// OriginalImageTile[tx + 1][ty][iPlane] +
// OriginalImageTile[tx + 1][ty + 2][iPlane])/5;
// __syncthreads();
// }
// Image[idx] = (T)NewImageTile[tx + 1][ty + 1][iPlane];
// }
//
//}
//
//// helper routine to convert colorspace from rgb to hsi
//__host__ __device__ void RGB2HSI(float R, float G, float B, float* H, float* S, float* I) {
// *I = (R + G + B)/3;
// float MinColor;
//
// if (R < G)
// MinColor = R;
// else
// MinColor = G;
// if (MinColor > B)
// MinColor = B;
//
// *S = 1 - MinColor / *I;
//
// float Theta;
// if (R == 0 && G == 0 && B == 0)
// Theta = 0;
// else
// Theta = acosf(((R - G) + (R - B))/ 2 / sqrtf((R - G)*(R - G) + (R - B)*(G - B)));
// if (B <= G)
// *H = Theta/(2 * PI);
// else
// *H = ((2 * PI) - Theta)/(2*PI);
//}
//
//template <typename T>
//__global__ void SegmentKernel(T* Image, int Rows, int Cols, float* Colors, int NumColors, int* SelectedColors) {
// float R; // red value (0 to 1)
// float G; // blue value (0 to 1)
// float B; // green value (0 to 1)
// int x = threadIdx.x + blockIdx.x * blockDim.x;
// int y = threadIdx.y + blockIdx.y * blockDim.y;
// int idx = (x + y * blockDim.x * gridDim.x);
// R = ((float) Image[idx * Planes]) / 255.0f;
// G = ((float) Image[idx * Planes + 1]) / 255.0f;
// B = ((float) Image[idx * Planes + 2]) / 255.0f;
// float H;
// float S;
// float I;
// RGB2HSI(R, G, B, &H, &S, &I);
// float MinDistance = 3;
// float Distance;
// int ClosestColor = 0;
// for (int iColor = 0; iColor < NumColors; iColor++)
// {
// float DistH = H - Colors[iColor * Planes];
// float DistS = S - Colors[iColor * Planes + 1];
// float DistI = H - Colors[iColor * Planes + 2];
// Distance = DistH * DistH + DistI * DistI + DistS * DistS;
// if (Distance < MinDistance) {
// MinDistance = Distance;
// ClosestColor = iColor;
// }
// }
// SelectedColors[idx] = ClosestColor;
//}
//// RGB values are 0-1 and HSI outputs 0-1
//
//namespace native_library {
// namespace details{
// template<typename T>
// void SegmentColorsCUDA(T* Image, int Rows, int Columns, T* Colors, int NumColors, int* SelectedColors)
// {
// cudaError Status;
// int PixelCount = Rows * Columns;
// //T* dev_Image;
// //float* dev_Colors;
// int* dev_SelectedColors;
// float* Colors_float;
// Colors_float = (float*) malloc(NumColors * Planes * sizeof(float));
// for (int iColor = 0; iColor < NumColors; iColor++)
// {
// float R = ((float) Colors[iColor * Planes]) / 255.0f;
// float G = ((float) Colors[iColor * Planes + 1]) / 255.0f;
// float B = ((float) Colors[iColor * Planes + 2]) / 255.0f;
// RGB2HSI(R, G, B, &Colors_float[iColor * Planes] , &Colors_float[iColor * Planes + 1], &Colors_float[iColor * Planes + 2]);
// printf("H: %f",Colors_float[iColor * Planes]);
// printf(" |S: %f",Colors_float[iColor * Planes + 1]);
// printf(" |I: %f\n",Colors_float[iColor * Planes + 2]);
// }
// //Status = cudaMalloc((void **) &dev_Image, PixelCount * Planes * sizeof(T));
// Status = cudaMalloc((void **) &dev_SelectedColors, PixelCount * sizeof(int));
// //Status = cudaMalloc((void **) &dev_Colors, NumColors * Planes * sizeof(float));
// //Status = cudaMemcpy(dev_Image, Image, PixelCount * Planes * sizeof(T), cudaMemcpyHostToDevice);
// //Status = cudaMemcpy(dev_Colors, Colors_float, NumColors * Planes * sizeof(float), cudaMemcpyHostToDevice);
// dim3 TileSize(BlockSize, BlockSize);
// dim3 GridSize(Rows/BlockSize, Columns/BlockSize);
//// Filter image data on gpu side before processing
// //MeanFilterKernel<T, BlockSize><<<TileSize, GridSize>>>(Rows, Columns, dev_Image, IterationLimit);
// //SegmentKernel<T><<<TileSize, GridSize>>>(dev_Image, Rows, Columns, dev_Colors, NumColors, dev_SelectedColors);
// //Status = cudaMemcpy(SelectedColors, dev_SelectedColors, PixelCount * sizeof(int), cudaMemcpyDeviceToHost);
// free(Colors_float);
// cudaFree(dev_SelectedColors);
// //cudaFree(dev_Image);
//// cudaFree(dev_Colors);
// }
// }
//
// void SegmentColorsCUDA(float* Image, int Rows, int Columns, float* Colors, int NumColors, int* SelectedColors) {
// details::SegmentColorsCUDA<float>(Image, Rows, Columns, Colors, NumColors, SelectedColors);
// }
//
// void SegmentColorsCUDA(int* Image, int Rows, int Columns, int* Colors, int NumColors, int* SelectedColors) {
// details::SegmentColorsCUDA<int>(Image, Rows, Columns, Colors, NumColors, SelectedColors);
// }
//
// void SegmentColorsCUDA(unsigned char* Image, int Rows, int Columns, unsigned char* Colors, int NumColors, int* SelectedColors) {
// details::SegmentColorsCUDA<unsigned char>(Image, Rows, Columns, Colors, NumColors, SelectedColors);
// }
//
//}
|
2,572 | #include <stdio.h> /* printf, scanf, puts, NULL */
#include <stdlib.h> /* srand, rand */
#include <time.h> /* time */
#include <iostream>
// From: https://a248.e.akamai.net/f/862/5658/3/developer.download.nvidia.com/assets/cuda/files/reduction.pdf
template<unsigned int blockSize>
__device__ void warpReduce(volatile double *sdata, unsigned int tid){
if (blockSize >= 64) { sdata[tid] += sdata[tid + 32] ; }
if (blockSize >= 32) { sdata[tid] += sdata[tid + 16] ; }
if (blockSize >= 16) { sdata[tid] += sdata[tid + 8]; }
if (blockSize >= 8) { sdata[tid] += sdata[tid + 4]; }
if (blockSize >= 4) {sdata[tid] += sdata[tid + 2]; }
if (blockSize >= 2) {sdata[tid] += sdata[tid + 1]; }
}
template <unsigned int blockSize> __global__ void reduce6( double *g_idata,
double *g_odata, unsigned int n) {
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + tid ;
unsigned int gridSize = blockSize*2*gridDim.x;
sdata[tid] = 0;
while (i < n) {
sdata[tid] += g_idata[i] + g_idata[i + blockSize]; i += gridSize;
}
__syncthreads();
if ( blockSize >= 512) { if ( tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); }
if ( blockSize >= 256) { if ( tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); }
if ( blockSize >= 128) { if ( tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); }
if (tid < 32) {
warpReduce<blockSize>(sdata,tid);
}
__syncthreads();
if (tid == 0) {g_odata[blockIdx.x] = sdata[0]; }
}
int main() {
for (int t = 0; t <= 4; t++) {
for (int iter = 1; iter <= 15; iter++) {
for (int bl = 1; bl <= 4; bl*=2) {
printf("Iter %d, Blocks %d",t,bl);
int blocks = 32*bl;
int threads = 1024;
int arraySize = 3200*1024*iter;
int smemSize = threads * sizeof(double);
int arrayBytes = arraySize * sizeof(double);
printf("=====\n");
printf("Input Size = %d\n",arraySize);
double *h_in, *h_out; // host arrays
h_in = (double*) malloc(arrayBytes);
double *d_in, *d_out; //device arrays
h_out = (double*) malloc(smemSize);
for (int i = 0; i < threads; i++) h_out[i] = 0;
double result = 0;
for (int i = 0; i < arraySize; i++) {
h_in[i] = i;
result += i;
}
cudaEvent_t start, stop, startT, stopT;
float time,full;
cudaEventCreate(&start);
cudaEventCreate(&startT);
cudaEventCreate(&stop);
cudaEventCreate(&stopT);
//allocate memory on device and copy
cudaEventRecord(startT,0);
cudaMalloc((void**)&d_in, arrayBytes);
cudaMalloc((void**)&d_out, smemSize);
cudaMemcpy(d_in, h_in, arrayBytes, cudaMemcpyHostToDevice);
cudaEventRecord(start,0);
reduce6<512><<<blocks,threads,smemSize>>>(d_in,d_out,arraySize);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaMemcpy(h_out, d_out, smemSize, cudaMemcpyDeviceToHost);
double res = 0;
for (int i = 0; i < blocks;i++){
res += h_out[i];
}
cudaEventRecord(stopT,0);
cudaEventSynchronize(stopT);
std::cout << "Device Result is: " << h_out[0] << std::endl;
std::cout << "Host Result is: " << result << std::endl;
std::cout << "Result is: " << res << std::endl;
printf("Result correct? %s\n", res == result ? "true" : "false");
cudaEventElapsedTime(&time, start, stop);
cudaEventElapsedTime(&full, startT, stopT);
printf ("Time for the kernel: %f ms\n", time);
printf ("Time Full: %f ms\n", full);
free(h_in);
free(h_out);
cudaFree(d_in);
cudaFree(d_out);
}
}
}
}
|
2,573 | #include "include/vec_add.cuh"
int main(int argc, char *argv[]) {
test_vec_add();
return 0;
}
|
2,574 | //#define num_cuda_stream (5)
__global__ void Euler_distance(float* input, float* output, int index, int size)
{
int thread = blockIdx.y*gridDim.x*(blockDim.x*blockDim.y) + blockIdx.x*(blockDim.x*blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int total = gridDim.x*gridDim.y*blockDim.x*blockDim.y;
float dx, dy;
while (thread<size)
{
dx = input[thread*2]-input[index*2];
dy = input[thread*2+1]-input[index*2+1];
output[thread] = (dx)* (dx) + (dy)* (dy);
output[thread] = sqrtf(output[thread]);
thread += total;
}
return;
}
__global__ void is_in_epsi(float* input, int* output, int index, float epsi, int size)
{
int thread = blockIdx.y*gridDim.x*(blockDim.x*blockDim.y) + blockIdx.x*(blockDim.x*blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int total = gridDim.x*gridDim.y*blockDim.x*blockDim.y;
float dx, dy;
while (thread<size)
{
if (input[thread]<epsi&&thread!=index)
{
atomicAdd(output, 1);
}
thread += total;
}
return;
}
__global__ void is_in_epsi(float* input, int* output1, int* output2, int index, float epsi, int size)
{
int thread = blockIdx.y*gridDim.x*(blockDim.x*blockDim.y) + blockIdx.x*(blockDim.x*blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int total = gridDim.x*gridDim.y*blockDim.x*blockDim.y;
float dx, dy;
while (thread<size)
{
output2[thread] = 0;
if (input[thread]<epsi&&thread!=index)
{
atomicAdd(output1, 1);
output2[thread] = 1;
}
thread += total;
}
return;
}
__global__ void copy_to_euler_neighbour(int* input1, float* input2, float* output, int size, int* length)
{
int thread = blockIdx.y*gridDim.x*(blockDim.x*blockDim.y) + blockIdx.x*(blockDim.x*blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int total = gridDim.x*gridDim.y*blockDim.x*blockDim.y;
while (thread<size)
{
if (!thread&&input1[thread])
{
output[(input1[thread]-1)] = input2[thread];
output[length[0]+(input1[thread])-1] = thread;
}
if(thread&&input1[thread]!=input1[thread-1])
{
output[(input1[thread]-1)] = input2[thread];
output[length[0]+(input1[thread])-1] = thread;
}
thread += total;
}
return;
}
__global__ void set_core_distance(float* input, float* output, int index, float epsi, int size)
{
//int thread = blockIdx.y*gridDim.x*(blockDim.x*blockDim.y) + blockIdx.x*(blockDim.x*blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int thread = blockIdx.x;
int j = 0;
if (thread+index >=size)
{
return;
}
float* d_input = input +thread* size;
float* d_output = output + 2*thread*size;
if (threadIdx.x==0)
{
for (int i=0;i<size;i++)
{
if (d_input[i]<epsi&&d_input[i])
{
d_output[j*2] = i;
d_output[j*2+1] = d_input[i];
j++;
}
}
for (int i=0;i<j;i++)
{
for (int k=i;k<j;k++)
{
if(d_output[i*2+1]>d_output[k*2+1])
{
float a1,a2;
a1 = d_output[i*2];
a2 = d_output[i*2+1];
d_output[i*2] = d_output[k*2];
d_output[i*2+1] = d_output[k*2+1];
d_output[k*2] = a1;
d_output[k*2+1] = a2;
}
}
}
}
return;
}
__global__ void break_cycle_step1(int* cyc_list, int* vertex, int* mst_edge, int* edge_perv, int* group_id, float* weight, int* edge,float* cyc_delta)
{
int tid = threadIdx.x + threadIdx.y*blockDim.x + blockIdx.x*(blockDim.x*blockDim.y) + blockIdx.y*gridDim.x*(blockDim.x*blockDim.y);
int cyc_id = cyc_list[tid];
int start, end, present, temp;
present = start = vertex[cyc_id];
end = vertex[cyc_id] + edge_perv[cyc_id];
if(mst_edge[cyc_id!=-1])
{
while (mst_edge[cyc_id]!=edge[present]){present++;}
int i=present+1;
for (;i<end;i++)
{
int par_id = edge[i];
if (group_id[par_id]!=group_id[cyc_id])
{
cyc_delta[tid*2] = weight[i] - weight[present];
cyc_delta[tid*2 + 1] = i;//¼edgeϵƫ
break;
}
}
if (i==end)
{
cyc_delta[tid*2] = -weight[present];
cyc_delta[tid*2 + 1] = -1;
}
}
return ;
}
__global__ void break_cycle_step2(int* cyc_list, int* edge, int* mst_edge, float* cyc_delta, int size)
{
int tid = threadIdx.x + threadIdx.y*blockDim.x + blockIdx.x*(blockDim.x*blockDim.y) + blockIdx.y*gridDim.x*(blockDim.x*blockDim.y);
int n0min=0;
int n0max = 0;
int i=0;
while (cyc_delta[i*2]<0&&i<size){i++;}
if (i==size)
{
n0min = -1;
}else
{
n0min = i;
}
for(i=0;i<size;i++)
{
if (cyc_delta[i*2]>0&&cyc_delta[n0min*2]>cyc_delta[i*2])
{
n0min = i;
}
if (cyc_delta[i*2]<0&&cyc_delta[n0max*2]>cyc_delta[i*2])
{
n0max = i;
}
}
if (n0min==-1)//ɾ;
{
int delete_par_id = cyc_list[n0max];
mst_edge[delete_par_id] = -1;
}
if(n0min!=-1)//ɾӱ;
{
int replace_par_id = cyc_delta[2*n0min+1];
int delete_par_id = cyc_list[n0min];
mst_edge[delete_par_id] = edge[replace_par_id];
}
return;
} |
2,575 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void kernel_sumar(int *a, int *b, int *c, int *resultado)
/*
Guarda en resultado la suma de a + b + c.
*/
{
*resultado = *a + *b + *c;
}
void sumar_en_cuda(int a, int b, int c, int* resultado)
{
// Variables de la grfica:
int *dev_a;
int *dev_b;
int *dev_c;
// Variable resultado:
int *dev_resultado;
// Reservo memoria en DEVICE para los 3 ints. Nota:(void **) es un parseo de puntero.
cudaMalloc((void **)&dev_a, sizeof(int));
cudaMalloc((void **)&dev_b, sizeof(int));
cudaMalloc((void **)&dev_c, sizeof(int));
cudaMalloc((void **)&dev_resultado, sizeof(int));
// Copio contenido del HOST al DEVICE: (No hace falta copiar resultado, pues no tiene an valor).
cudaMemcpy(dev_a, &a, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, &b, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, &c, sizeof(int), cudaMemcpyHostToDevice);
// Lanzo el kernel:
kernel_sumar <<<100, 100 >>> (dev_a, dev_b, dev_c, dev_resultado);
// Espero a que el kernel termine su ejecucin:
cudaDeviceSynchronize();
// Copio de DEVICE a HOST: (guardo en c).
cudaMemcpy(resultado, dev_resultado, sizeof(int), cudaMemcpyDeviceToHost);
// Libero memoria del DEVICE:
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaFree(dev_resultado);
}
int main()
{
// Variable donde se almacenar el resultado.
int resultado;
// Llamo a la funcin de suma:
sumar_en_cuda(3, 5, 8, &resultado);
// Imprimo resultado:
printf("El resultado es %d", resultado);
return 0;
}
|
2,576 | #include "includes.h"
#pragma diag_suppress integer_sign_change
static unsigned short* d_in;
static unsigned char* d_out;
static unsigned int h_Width;
static unsigned int h_Height;
static unsigned int h_BlockWidth;
static unsigned int h_BlockHeight;
#define THREAD_TOTAL_X_LEN 12
#define THREAD_AUX_X_LEN 4
#define THREAD_WORKING_X_LEN (THREAD_TOTAL_X_LEN - THREAD_AUX_X_LEN)
#define THREAD_TOTAL_Y_LEN 12
#define THREAD_AUX_Y_LEN 4
#define THREAD_WORKING_Y_LEN (THREAD_TOTAL_Y_LEN - THREAD_AUX_Y_LEN)
#define OFFSET(x,y) sIdx + y * THREAD_TOTAL_X_LEN + x
__global__ void Sobel(const unsigned short* in,unsigned char* out, const unsigned int width, const unsigned int height)
{
extern __shared__ float s[];
const unsigned int xPos = (blockIdx.x * THREAD_WORKING_X_LEN + threadIdx.x) - (THREAD_AUX_X_LEN / 2);
const unsigned int yPos = (blockIdx.y * THREAD_WORKING_Y_LEN + threadIdx.y) - (THREAD_AUX_Y_LEN / 2);
const unsigned int inPos = (xPos + yPos * width);
const unsigned int sIdx = (threadIdx.x + threadIdx.y * THREAD_TOTAL_X_LEN);
unsigned int outIt = inPos * 4;
if (xPos < width && yPos < height)
s[sIdx] = in[inPos] / float(USHRT_MAX);
else
s[sIdx] = 0.0f;
__syncthreads();
if ((threadIdx.x - (THREAD_AUX_X_LEN / 2)) < THREAD_WORKING_X_LEN && (threadIdx.y - (THREAD_AUX_X_LEN / 2)) < THREAD_WORKING_Y_LEN)
{
const float sobelX = (
-1 * s[OFFSET(-2,-2)] -2 * s[OFFSET(-1,-2)] +0 * s[OFFSET(0,-2)] +2 * s[OFFSET(1,-2)] +1 * s[OFFSET(2,-2)]
-1 * s[OFFSET(-2,-1)] -2 * s[OFFSET(-1,-1)] +0 * s[OFFSET(0,-1)] +2 * s[OFFSET(1,-1)] +1 * s[OFFSET(2,-1)]
-2 * s[OFFSET(-2, 0)] -4 * s[OFFSET(-1, 0)] +0 * s[OFFSET(0, 0)] +4 * s[OFFSET(1, 0)] +2 * s[OFFSET(2, 0)]
-1 * s[OFFSET(-2, 1)] -2 * s[OFFSET(-1, 1)] +0 * s[OFFSET(0, 1)] +2 * s[OFFSET(1, 1)] +1 * s[OFFSET(2, 1)]
-1 * s[OFFSET(-2, 2)] -2 * s[OFFSET(-1, 2)] +0 * s[OFFSET(0, 2)] +2 * s[OFFSET(1, 2)] +1 * s[OFFSET(2, 2)]
)*(512/8)/18;
const float sobelY = (
+1 * s[OFFSET(-2,-2)] +1 * s[OFFSET(-1,-2)] +2 * s[OFFSET(0,-2)] +1 * s[OFFSET(1,-2)] +1 * s[OFFSET(2,-2)]
+2 * s[OFFSET(-2,-1)] +2 * s[OFFSET(-1,-1)] +4 * s[OFFSET(0,-1)] +2 * s[OFFSET(1,-1)] +2 * s[OFFSET(2,-1)]
+0 * s[OFFSET(-2, 0)] +0 * s[OFFSET(-1, 0)] +0 * s[OFFSET(0, 0)] +0 * s[OFFSET(1, 0)] +0 * s[OFFSET(2, 0)]
-2 * s[OFFSET(-2, 1)] -2 * s[OFFSET(-1, 1)] -4 * s[OFFSET(0, 1)] -2 * s[OFFSET(1, 1)] -2 * s[OFFSET(2, 1)]
-1 * s[OFFSET(-2, 2)] -1 * s[OFFSET(-1, 2)] -2 * s[OFFSET(0, 2)] -1 * s[OFFSET(1, 2)] -1 * s[OFFSET(2, 2)]
)*(512/8)/18;
const float gradientLen = sqrt(sobelX*sobelX + sobelY*sobelY + 1.0f);
const unsigned char xLen = -(sobelX * 128)/gradientLen + 128;
const unsigned char yLen = -(sobelY * 128)/gradientLen + 128;
const unsigned char zLen = (UCHAR_MAX)/gradientLen;
out[outIt++] = xLen;
out[outIt++] = yLen;
out[outIt++] = zLen;
out[outIt] = 255;
}
} |
2,577 | #include <cuda_runtime.h>
typedef unsigned int uint;
uint max_threads = 1024;
uint max_blocks = 65535;
__global__
void bitonicSortStep(float *cudaArr, uint i, uint j)
{
uint tid = threadIdx.x + blockDim.x * blockIdx.x;
uint mate = tid ^ j;
if (tid < mate)
{
if((tid & i) == 0)
{
if(cudaArr[tid] > cudaArr[mate])
{
float temp = cudaArr[tid];
cudaArr[tid] = cudaArr[mate];
cudaArr[mate] = temp;
}
}
else
{
if(cudaArr[tid] < cudaArr[mate])
{
float temp = cudaArr[tid];
cudaArr[tid] = cudaArr[mate];
cudaArr[mate] = temp;
}
}
}
}
//len 2
extern "C" void bitonicSort(float *cudaArr, uint len)
{
uint threads = max_threads;
uint blocks = len / threads;
if(len % threads != 0)
blocks++;
if(blocks > max_blocks)
throw 1;
for(uint i = 2; i <= len; i <<= 1)
{
for(uint j = i>>1; j > 0; j >>= 1)
{
bitonicSortStep<<<blocks, threads>>>(cudaArr, i, j);
cudaThreadSynchronize();
}
}
}
|
2,578 | #include "includes.h"
#define IDX2D(a, i, stride, j) ((a)[(i)*(stride) + (j)])
__global__ void sim_kernel_naive(double *z, double *v, size_t nx, size_t ny, double dx2inv, double dy2inv, double dt) {
const int mesh_x = blockIdx.x*blockDim.x + threadIdx.x + 1;
const int mesh_y = blockIdx.y*blockDim.y + threadIdx.y + 1;
if (mesh_x >= nx-1 || mesh_y >= ny-1) return;
const double z_val = IDX2D(z, mesh_y, nx, mesh_x);
const double ax = dx2inv*(IDX2D(z, mesh_y, nx, mesh_x-1) + IDX2D(z, mesh_y, nx, mesh_x+1)
- 2.0*z_val);
const double ay = dy2inv*(IDX2D(z, mesh_y-1, nx, mesh_x) + IDX2D(z, mesh_y+1, nx, mesh_x)
- 2.0*z_val);
double const v_val = IDX2D(v, mesh_y, nx, mesh_x) + dt*(ax + ay)/2;
IDX2D(v, mesh_y, nx, mesh_x) = v_val;
IDX2D(z, mesh_y, nx, mesh_x) += dt*v_val;
} |
2,579 | #include "includes.h"
__global__ void transposeSmemUnrollPad(float *out, float *in, const int nx, const int ny)
{
// static 1D shared memory with padding
__shared__ float tile[BDIMY * (BDIMX * 2 + IPAD)];
// coordinate in original matrix
unsigned int ix = 2 * blockIdx.x * blockDim.x + threadIdx.x;
unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y;
// linear global memory index for original matrix
unsigned int ti = iy * nx + ix;
// thread index in transposed block
unsigned int bidx = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int irow = bidx / blockDim.y;
unsigned int icol = bidx % blockDim.y;
// coordinate in transposed matrix
unsigned int ix2 = blockIdx.y * blockDim.y + icol;
unsigned int iy2 = 2 * blockIdx.x * blockDim.x + irow;
// linear global memory index for transposed matrix
unsigned int to = iy2 * ny + ix2;
if (ix + blockDim.x < nx && iy < ny)
{
// load two rows from global memory to shared memory
unsigned int row_idx = threadIdx.y * (blockDim.x * 2 + IPAD) +
threadIdx.x;
tile[row_idx] = in[ti];
tile[row_idx + BDIMX] = in[ti + BDIMX];
// thread synchronization
__syncthreads();
// store two rows to global memory from two columns of shared memory
unsigned int col_idx = icol * (blockDim.x * 2 + IPAD) + irow;
out[to] = tile[col_idx];
out[to + ny * BDIMX] = tile[col_idx + BDIMX];
}
} |
2,580 |
// update the velocities
__global__ void fields2part(int np, float dx, float *xp, float *Epx, float *Ecx){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < np) {
Epx[tid] = Ecx[int(xp[tid]/float(dx))];
tid += blockDim.x * gridDim.x;
}
}
|
2,581 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
extern "C"
{
__global__ void NewModuleWithSourceDepsKernel(float* input, float* output, float incrementSize, int itemCount)
{
int threadId = blockIdx.y*blockDim.x*gridDim.x
+ blockIdx.x*blockDim.x
+ threadIdx.x;
}
} |
2,582 | #include "includes.h"
__global__ void conv2() {
} |
2,583 | //#include"E:\VisualStudio\CUDA\CUDAColorToGray\CUDAColorToGray\bmploader_zhang.h"
//#include"E:\VisualStudio\CUDA\CUDAColorToGray\CUDAColorToGray\color_fading.h"
//#include"E:\VisualStudio\CUDA\cudaDome03\common\cpu_bitmap.h"
//#include <stdio.h>
//#include <stdlib.h>
//#include <cmath>
//#define width 1024
//#define height 683
//#define size width*height
//void kernel(unsigned char *ptr, unsigned char *dst1){
//
// for (int y = 0; y<height; y++) {
// for (int x = 0; x<width; x++) {
// int offset = x + y * width;
// ptr[offset * 4 + 0] = dst1[offset * 4 + 0] * 0.299 + dst1[offset * 4 + 1] * 0.587 + dst1[offset * 4 + 2] * 0.114;
// ptr[offset * 4 + 1] = dst1[offset * 4 + 0] * 0.299 + dst1[offset * 4 + 1] * 0.587 + dst1[offset * 4 + 2] * 0.114;
// ptr[offset * 4 + 2] = dst1[offset * 4 + 0] * 0.299 + dst1[offset * 4 + 1] * 0.587 + dst1[offset * 4 + 2] * 0.114;
// ptr[offset * 4 + 3] = 255;
// }
// }
//
//}
//int main(void)
//{
// unsigned char *dst1;
// dst1 = (unsigned char *)malloc(sizeof(int));
// unsigned char **dst;
// dst = &dst1;
// int w, h;
// w = width; h = height;
// char *name = "un.bmp";
// LoadBMPFile(dst, &w, &h, name);
// CPUBitmap bitmap(w, h);
// unsigned char *ptr = bitmap.get_ptr();
// kernel(ptr, dst1);//ɫ
// unsigned char * buffer;
// buffer = (unsigned char *)malloc(sizeof(unsigned char )*size);
//
// for (int y = 0; y<height; y++) {
// for (int x = 0; x<width; x++) {
// int offset = x + y * width;
// buffer[offset] = ptr[offset * 4 + 0]; //
//
// }
// }
// int histo[256];//ֱͼ
// for (int i = 0; i < 256; i++)
// histo[i] = 0;
// for (long i = 0; i < size; i++)
// histo[buffer[i]]++;
// /*for (int i = 0; i < 256; i++)
// {
// printf("%d:,%d", i, histo[i]);
// printf("\n");
// }*/
// long histoCount[256];
// histoCount[0] = histo[0];
// printf("%d", histoCount[0]); printf("\n");
// for (int i = 1; i < 256; i++)
// {
// histoCount[i] = histo[i] + histoCount[i - 1];
//
// }
//
//
// long new_grey[256];
// for (int i = 0; i < 256; i++) new_grey[i] = 0;
// for (int i = 1; i < 256; i++)
// {
// new_grey[i] =round( (long double)(histoCount[i] - histoCount[0]) / (size - histoCount[0]) * 255);
//
// }
// kernel(ptr, dst1);
// for (int y = 0; y < height; y++) {
// for (int x = 0; x < width; x++) {
// int offset = x + y * width;
// ptr[offset * 4 + 0] = new_grey[ptr[offset * 4 + 0]];
// ptr[offset * 4 + 1] = new_grey[ptr[offset * 4 + 1]];
// ptr[offset * 4 + 2] = new_grey[ptr[offset * 4 + 2]];
// ptr[offset * 4 + 3] = 255;
// }
// }
// bitmap.display_and_exit();
// free(dst1);
// free(buffer);
//
//}
|
2,584 | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
//GPU function, execute on GPU(device)
__global__ void myfunction(void)
{
}
//standard C function, execute on CPU(host)
int main(void)
{
int blockSize = 1, gridSize =1;
myfunction<<<gridSize, blockSize>>>();
cudaCheckErrors("hello world fail");
printf("Hello World!\n");
return 0;
} |
2,585 | #include "includes.h"
__global__ void sort_boxes_by_indexes_kernel( float* filtered_box, int* filtered_label, int* filtered_dir, float* box_for_nms, int* indexes, int filter_count, float* sorted_filtered_boxes, int* sorted_filtered_label, int* sorted_filtered_dir, float* sorted_box_for_nms, const int num_box_corners, const int num_output_box_feature) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < filter_count) {
int sort_index = indexes[tid];
sorted_filtered_boxes[tid * num_output_box_feature + 0] =
filtered_box[sort_index * num_output_box_feature + 0];
sorted_filtered_boxes[tid * num_output_box_feature + 1] =
filtered_box[sort_index * num_output_box_feature + 1];
sorted_filtered_boxes[tid * num_output_box_feature + 2] =
filtered_box[sort_index * num_output_box_feature + 2];
sorted_filtered_boxes[tid * num_output_box_feature + 3] =
filtered_box[sort_index * num_output_box_feature + 3];
sorted_filtered_boxes[tid * num_output_box_feature + 4] =
filtered_box[sort_index * num_output_box_feature + 4];
sorted_filtered_boxes[tid * num_output_box_feature + 5] =
filtered_box[sort_index * num_output_box_feature + 5];
sorted_filtered_boxes[tid * num_output_box_feature + 6] =
filtered_box[sort_index * num_output_box_feature + 6];
sorted_filtered_label[tid] = filtered_label[sort_index];
sorted_filtered_dir[tid] = filtered_dir[sort_index];
sorted_box_for_nms[tid * num_box_corners + 0] =
box_for_nms[sort_index * num_box_corners + 0];
sorted_box_for_nms[tid * num_box_corners + 1] =
box_for_nms[sort_index * num_box_corners + 1];
sorted_box_for_nms[tid * num_box_corners + 2] =
box_for_nms[sort_index * num_box_corners + 2];
sorted_box_for_nms[tid * num_box_corners + 3] =
box_for_nms[sort_index * num_box_corners + 3];
}
} |
2,586 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
#define MAX_DIM 20
#define SCALING_FACTOR 256
#define NUM_THREADS 1024
#define MOD_BASE 10007
int NUM_DIM;
int * MAT_DIM;
int ** MAT_LIST;
int find_max(int * arr, int num_elem)
{
int max = 0;
for (int i = 0; i < num_elem; i++) {
if (arr[i] > max) {
max = arr[i];
}
}
return max;
}
int * def_mat_dim(int k)
{
int * dim = (int *) malloc(k * sizeof(int));
int i;
srand(time(NULL));
for (i = 0; i < k; i++)
{
dim[i] = (rand() % MAX_DIM) + 1;
//printf("%d\n", dim[i]);
}
return dim;
}
int * equipartition(int k, int blocks)
{
float div = float(k) / float(blocks);
int div_int = int(div);
float rem = div - float(div_int);
int * partition = (int *) malloc((blocks + 1) * sizeof(int));
srand(time(NULL));
partition[0] = 0;
partition[blocks] = k-1;
int cur_index = 0;
float round_factor = 0.0;
for (int i = 1; i < blocks; i++) {
cur_index += div;
round_factor = float(rand()) / float(RAND_MAX);
if (round_factor < rem) {
cur_index += 1;
}
partition[i] = cur_index;
}
return partition;
}
int * creat_mat(int dimX, int dimY)
{
int x;
int * mat = (int *) malloc(dimX * dimY * sizeof(int));
srand(time(NULL));
for (x = 0; x < dimX * dimY; x++) {
mat[x] = rand() % MOD_BASE;
//mat[x] = (rand() % MAX_DIM) * SCALING_FACTOR;
//printf("%d ", mat[x]);
}
return mat;
}
void if_mats_equal(int * A, int * B, int rows, int cols)
{
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
if (A[i * cols + j] != B[i * cols + j]) {
printf("Matrices are not equal\n");
//printf("%d %d\n", i, j);
return;
}
}
}
printf("Matrices are equal\n");
}
void cpu_mat_mul(int* A, int* B, int* C, int ARows, int ACols, int BRows, int BCols)
{
int sum = 0;
for (int i = 0; i < ARows; i++) {
for (int j = 0; j < BCols; j++) {
for (int k = 0; k < ACols; k++) {
sum += (A[i * ACols + k] * B[k * BCols + j]) % MOD_BASE;
//sum += A[i * ACols + k] * B[k * BCols + j];
}
//printf("%d %d\n", i, j);
C[i * BCols + j] = sum % MOD_BASE;
//C[i * BCols + j] = sum;
sum = 0;
}
}
}
void print_mat(int * mat, int dimX, int dimY)
{
for (int i = 0; i < dimX; i++) {
for (int j = 0; j < dimY; j++) {
printf("%d ", mat[i * dimY + j]);
}
printf("\n");
}
}
int * cpu_multi_mat_mult(int num_dim, int * dim_list, int ** mat_list, int start = 0) {
int max_dim = find_max(dim_list, num_dim);
int * output_mat1 = (int *) calloc(max_dim * max_dim, sizeof(int));
int * output_mat2 = (int *) calloc(max_dim * max_dim, sizeof(int));
cpu_mat_mul(mat_list[start], mat_list[start + 1], output_mat1, dim_list[start], dim_list[start + 1], dim_list[start + 1], dim_list[start + 2]);
int num_rows = dim_list[start];
int num_cols = dim_list[start + 2];
//print_mat(output_mat1, num_rows, num_cols);
int num_mult;
for (num_mult = 1; num_mult < num_dim - 2; num_mult++) {
//printf("multiplied %d matrices\n", num_mult + 1);
if (num_mult % 2 == 1) {
cpu_mat_mul(output_mat1, mat_list[start + num_mult + 1], output_mat2, num_rows, num_cols, dim_list[start + num_mult + 1] , dim_list[start + num_mult + 2]);
}
else {
cpu_mat_mul(output_mat2, mat_list[start + num_mult + 1], output_mat1, num_rows, num_cols, dim_list[start + num_mult + 1] , dim_list[start + num_mult + 2]);
}
num_cols = dim_list[start + num_mult + 2];
}
int * output_mat = (int *) malloc(dim_list[0] * dim_list[num_dim - 1] * sizeof(int));
//printf("%d %d\n", num_rows, num_cols);
if (num_mult % 2 == 1) {
memcpy(output_mat, output_mat1, dim_list[0] * dim_list[num_dim - 1] * sizeof(int));
}
else {
memcpy(output_mat, output_mat2, dim_list[0] * dim_list[num_dim - 1] * sizeof(int));
}
free(output_mat1);
free(output_mat2);
return output_mat;
}
__device__
void matmult(int* A, int* B, int* C, int ARows, int ACols, int BRows, int BCols)
{
int num_elem_output = ARows * BCols;
int C_elem_row = 0;
int C_elem_col = 0;
int sum = 0;
for (int n = threadIdx.x; n < num_elem_output; n+=NUM_THREADS) {
C_elem_col = n % BCols;
C_elem_row = (n + (BCols - C_elem_col)) / BCols - 1;
for (int i = 0; i < ACols; i++) {
sum += (A[C_elem_row * ACols + i] * B[i * BCols + C_elem_col]) % MOD_BASE;
//sum += A[C_elem_row * ACols + i] * B[i * BCols + C_elem_col];
}
C[C_elem_row * BCols + C_elem_col] = sum % MOD_BASE;
//C[C_elem_row * BCols + C_elem_col] = sum;
sum = 0;
__syncthreads();
}
__syncthreads();
}
__global__
void gpu_seq_multi_matmult(int num_dim, int * dim_list, int ** mat_list, int * output_mat1, int * output_mat2)
{
matmult(mat_list[0], mat_list[1], output_mat1, dim_list[0], dim_list[1], dim_list[1], dim_list[2]);
__syncthreads();
int num_mult;
int num_rows = dim_list[0];
int num_cols = dim_list[2];
for (num_mult = 1; num_mult < num_dim - 2; num_mult++) {
if (num_mult % 2 == 1) {
matmult(output_mat1, mat_list[num_mult + 1], output_mat2, num_rows, num_cols, dim_list[num_mult + 1], dim_list[num_mult + 2]);
} else {
matmult(output_mat2, mat_list[num_mult + 1], output_mat1, num_rows, num_cols, dim_list[num_mult + 1], dim_list[num_mult + 2]);
}
num_cols = dim_list[num_mult + 2];
__syncthreads();
}
}
__global__
void gpu_par_multi_matmult(int start_dim_idx, int end_dim_idx, int * dim_list, int ** mat_list, int * output_mat1, int * output_mat2)
{
matmult(mat_list[start_dim_idx], mat_list[start_dim_idx + 1], output_mat1, dim_list[start_dim_idx], dim_list[start_dim_idx + 1], dim_list[start_dim_idx + 1], dim_list[start_dim_idx + 2]);
__syncthreads();
//int num_mult;
int num_rows = dim_list[start_dim_idx];
int num_cols = dim_list[start_dim_idx + 2];
// for (num_mult = start_dim_idx + 1; count < end_dim_idx - start_dim_idx - 2; num_mult++) {
for (int count = 1; count < (end_dim_idx - start_dim_idx - 2); count++) {
if (count % 2 == 1) {
matmult(output_mat1, mat_list[start_dim_idx + count + 1], output_mat2, num_rows, num_cols, dim_list[start_dim_idx + count + 1], dim_list[start_dim_idx + count + 2]);
} else {
matmult(output_mat2, mat_list[start_dim_idx + count + 1], output_mat1, num_rows, num_cols, dim_list[start_dim_idx + count + 1], dim_list[start_dim_idx + count + 2]);
//matmult(output_mat2, mat_list[num_mult + 1], output_mat1, num_rows, num_cols, dim_list[num_mult + 1], dim_list[num_mult + 2]);
}
num_cols = dim_list[start_dim_idx + count + 2];
__syncthreads();
}
}
int * cpu_algo(int num_dim, int * mat_dim, int ** mat_list)
{
int * cpu_mat = cpu_multi_mat_mult(num_dim, mat_dim, mat_list);
//printf("%d %d\n", mat_dim[0], mat_dim[num_dim-1]);
printf("row: %d col: %d\n", mat_dim[0], mat_dim[num_dim-1]);
printf("printing cpu sequential result\n");
print_mat(cpu_mat, mat_dim[0], mat_dim[num_dim-1]);
printf("\n");
return cpu_mat;
}
int * gpu_one_block_algo(int num_dim, int * mat_dim, int ** mat_list)
{
int num_mat = num_dim - 1;
int max_dim = find_max(mat_dim, num_dim);
printf("Copying matrix dimensions to device\n");
int * d_mat_dim;
cudaMalloc((void **)&d_mat_dim, num_dim * sizeof(int));
cudaMemcpy(d_mat_dim, mat_dim, num_dim * sizeof(int), cudaMemcpyHostToDevice);
printf("Allocating space to store output matrix\n");
int * out_mat = (int *) calloc(max_dim * max_dim, sizeof(int));
int * d_out_mat1, * d_out_mat2;
cudaMalloc((void **) &d_out_mat1, max_dim * max_dim * sizeof(int));
cudaMalloc((void **) &d_out_mat2, max_dim * max_dim * sizeof(int));
cudaMemcpy(d_out_mat1, out_mat, max_dim * max_dim * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_out_mat2, out_mat, max_dim * max_dim * sizeof(int), cudaMemcpyHostToDevice);
printf("Allocating space for each matrix, and storing pointer address of matrices on the host\n");
int ** int_mat_list = (int **) malloc(num_mat * sizeof(int *));
for (int k = 0; k < num_mat; k++) {
cudaMalloc((void **)&int_mat_list[k], mat_dim[k] * mat_dim[k+1] * sizeof(int));
cudaMemcpy(int_mat_list[k], mat_list[k], mat_dim[k] * mat_dim[k+1] * sizeof(int), cudaMemcpyHostToDevice);
}
printf("Copying pointer addresses of matrices from host to device\n");
int ** d_mat_list;
cudaMalloc(&d_mat_list, num_mat * sizeof(int *));
cudaMemcpy(d_mat_list, int_mat_list, num_mat * sizeof(int *), cudaMemcpyHostToDevice);
gpu_seq_multi_matmult<<<1, NUM_THREADS>>>(num_dim, d_mat_dim, d_mat_list, d_out_mat1, d_out_mat2);
cudaDeviceSynchronize();
if (num_dim % 2 == 1) {
cudaMemcpy(out_mat, d_out_mat1, mat_dim[0] * mat_dim[num_dim-1] * sizeof(int), cudaMemcpyDeviceToHost);
} else {
cudaMemcpy(out_mat, d_out_mat2, mat_dim[0] * mat_dim[num_dim-1] * sizeof(int), cudaMemcpyDeviceToHost);
}
printf("row: %d col: %d\n", mat_dim[0], mat_dim[num_dim-1]);
printf("printing gpu (one thread block) result\n");
print_mat(out_mat, mat_dim[0], mat_dim[num_dim-1]);
printf("\n");
int * output_mat = (int *) malloc(mat_dim[0] * mat_dim[num_dim-1] * sizeof(int));
memcpy(output_mat, out_mat, mat_dim[0] * mat_dim[num_dim-1] * sizeof(int));
free(out_mat);
cudaFree(d_mat_dim);
cudaFree(int_mat_list);
cudaFree(d_mat_list);
cudaFree(d_out_mat1);
cudaFree(d_out_mat2);
for (int k = 0; k < num_mat; k++) {
cudaFree(int_mat_list[k]);
}
free(int_mat_list);
return output_mat;
}
int * gpu_multi_block_algo(int num_dim, int * mat_dim, int ** mat_list)
{
int num_mat = num_dim - 1;
int nblocks = int(double(num_mat) / double(sqrt(num_mat)));
int max_dim = find_max(mat_dim, num_dim);
printf("using %d blocks for %d matrices\n", nblocks, num_mat);
printf("Copying matrix dimensions to device\n");
int * d_mat_dim;
cudaMalloc((void **)&d_mat_dim, num_dim * sizeof(int));
cudaMemcpy(d_mat_dim, mat_dim, num_dim * sizeof(int), cudaMemcpyHostToDevice);
printf("Allocating space to store output matrix\n");
int * out_mat = (int *) calloc(max_dim * max_dim, sizeof(int));
int * d_out_mat1, * d_out_mat2;
cudaMalloc((void **) &d_out_mat1, max_dim * max_dim * sizeof(int));
cudaMalloc((void **) &d_out_mat2, max_dim * max_dim * sizeof(int));
cudaMemcpy(d_out_mat1, out_mat, max_dim * max_dim * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_out_mat2, out_mat, max_dim * max_dim * sizeof(int), cudaMemcpyHostToDevice);
printf("Allocating space for each matrix, and storing pointer address of matrices on the host\n");
int ** int_mat_list = (int **) malloc(num_mat * sizeof(int *));
for (int k = 0; k < num_mat; k++) {
cudaMalloc((void **)&int_mat_list[k], mat_dim[k] * mat_dim[k+1] * sizeof(int));
cudaMemcpy(int_mat_list[k], mat_list[k], mat_dim[k] * mat_dim[k+1] * sizeof(int), cudaMemcpyHostToDevice);
}
printf("Copying pointer addresses of matrices from host to device\n");
int ** d_mat_list;
cudaMalloc(&d_mat_list, num_mat * sizeof(int *));
cudaMemcpy(d_mat_list, int_mat_list, num_mat * sizeof(int *), cudaMemcpyHostToDevice);
printf("Allocating a set of intermediate arrays to store partial results\n");
int ** int_mat1, ** int_mat2;
int_mat1 = (int **) malloc(nblocks * sizeof(int *));
int_mat2 = (int **) malloc(nblocks * sizeof(int *));
for (int k = 0; k < nblocks; k++) {
cudaMalloc((void **)&int_mat1[k], max_dim * max_dim * sizeof(int));
cudaMalloc((void **)&int_mat2[k], max_dim * max_dim * sizeof(int));
cudaMemcpy(int_mat1[k], out_mat, max_dim * max_dim * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(int_mat2[k], out_mat, max_dim * max_dim * sizeof(int), cudaMemcpyHostToDevice);
}
printf("Copying pointer addresses of intermediate products from host to device\n");
int ** int_mat_final = (int **) malloc(nblocks * sizeof(int *));
int ** d_int_mat_final;
cudaMalloc((void **)&d_int_mat_final, nblocks * sizeof(int *));
printf("Defining a partition to split work evenly\n");
int * mat_list_partition = equipartition(num_dim, nblocks);
//print_mat(mat_list_partition, 1, nblocks + 1);
printf("Getting the dimensions of the partial products matrices\n");
//print_mat(mat_dim, 1, num_dim);
int * int_mat_dim = (int *) malloc((nblocks + 1) * sizeof(int));
for (int i = 0; i < nblocks + 1; i++) {
int_mat_dim[i] = mat_dim[mat_list_partition[i]];
}
//printf("Moving the dimensions of partial products from host to device\n");
//print_mat(int_mat_dim, 1, nblocks + 1);
//printf("\n");
printf("Moving the dimensions of partial products from host to device\n");
int * d_int_mat_dim;
cudaMalloc((void **)&d_int_mat_dim, (nblocks + 1) * sizeof(int));
cudaMemcpy(d_int_mat_dim, int_mat_dim, (nblocks + 1) * sizeof(int), cudaMemcpyHostToDevice);
printf("Defining two output arrays which store so that one array storing partial results can be used as input of another output, which is store in the new output array\n");
int * d_int_output_mat1, * d_int_output_mat2;
cudaMalloc((void **)&d_int_output_mat1, max_dim * max_dim * sizeof(int));
cudaMalloc((void **)&d_int_output_mat2, max_dim * max_dim * sizeof(int));
cudaMemcpy(d_int_output_mat1, out_mat, max_dim * max_dim * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_int_output_mat2, out_mat, max_dim * max_dim * sizeof(int), cudaMemcpyHostToDevice);
printf("Calling kernels to calculate a subset of input matrices asynchronously\n");
for (int i = 0; i < nblocks; i++) {
//printf("nblocks %d, i %d\n", nblocks, i);
//printf("mat_list_partition %d %d\n", mat_list_partition[i], mat_list_partition[i+1] - 1);
//printf("end mat_dim %d %d\n", mat_dim[mat_list_partition[i]], mat_dim[mat_list_partition[i+1]]);
gpu_par_multi_matmult<<<1, NUM_THREADS>>>(mat_list_partition[i], mat_list_partition[i+1] + 1, d_mat_dim, d_mat_list, int_mat1[i], int_mat2[i]);
}
cudaDeviceSynchronize();
printf("output arrays accounted for, will be used to aggregate results\n");
for (int i = 0; i < nblocks; i++) {
cudaMemcpy(out_mat, int_mat1[i], mat_dim[mat_list_partition[i]] * mat_dim[mat_list_partition[i+1]] * sizeof(int), cudaMemcpyDeviceToHost);
if ((mat_list_partition[i+1] - mat_list_partition[i]) % 2 == 1) {
int_mat_final[i] = int_mat1[i];
} else {
int_mat_final[i] = int_mat2[i];
}
}
cudaMemcpy(d_int_mat_final, int_mat_final, nblocks * sizeof(int *), cudaMemcpyHostToDevice);
printf("Calling last kernel to aggregate partial results\n");
gpu_par_multi_matmult<<<1, NUM_THREADS>>>(0, nblocks + 1, d_int_mat_dim, d_int_mat_final, d_int_output_mat1, d_int_output_mat2); //, d_iii);
cudaDeviceSynchronize();
if (nblocks % 2 == 1) {
cudaMemcpy(out_mat, d_int_output_mat1, mat_dim[0] * mat_dim[num_dim-1] * sizeof(int), cudaMemcpyDeviceToHost);
} else {
cudaMemcpy(out_mat, d_int_output_mat2, mat_dim[0] * mat_dim[num_dim-1] * sizeof(int), cudaMemcpyDeviceToHost);
}
printf("rows:%d cols: %d\n", mat_dim[mat_list_partition[0]], mat_dim[mat_list_partition[nblocks]]);
printf("printing gpu (using %d blocks) results\n", nblocks);
print_mat(out_mat, mat_dim[0], mat_dim[num_dim-1]);
printf("\n");
int * output_mat = (int *) malloc(mat_dim[0] * mat_dim[num_dim-1] * sizeof(int));
memcpy(output_mat, out_mat, mat_dim[0] * mat_dim[num_dim-1] * sizeof(int));
free(out_mat);
cudaFree(d_mat_dim);
cudaFree(int_mat_list);
cudaFree(d_mat_list);
cudaFree(d_out_mat1);
cudaFree(d_out_mat2);
for (int k = 0; k < num_mat; k++) {
cudaFree(int_mat_list[k]);
}
free(int_mat_list);
for (int k = 0; k < nblocks; k++) {
cudaFree(int_mat1[k]);
cudaFree(int_mat2[k]);
}
free(int_mat1);
free(int_mat2);
// STILL MORE THINGS TO FREE
return output_mat;
}
void read_file(char * filename)//, int * num_dim, int * mat_dim, int ** mat_list)
{
FILE * fp;
fp = fopen(filename, "r");
fscanf(fp, "%d", &NUM_DIM);
MAT_DIM = (int *) malloc(NUM_DIM * sizeof(int));
for (int i = 0; i < NUM_DIM; i++) {
fscanf(fp, "%d", &MAT_DIM[i]);
}
MAT_LIST = (int **) malloc((NUM_DIM - 1) * sizeof(int *));
for (int arr_i = 0; arr_i < (NUM_DIM - 1); arr_i++) {
MAT_LIST[arr_i] = (int *) malloc(MAT_DIM[arr_i] * MAT_DIM[arr_i + 1] * sizeof(int));
for (int i = 0; i < (MAT_DIM[arr_i] * MAT_DIM[arr_i + 1]); i++) {
fscanf(fp, "%d", &MAT_LIST[arr_i][i]);
}
}
}
void usage() {
printf("usage\n ./multi_matmult <filename> <algorithm>\n");
printf("algorithm:\n\t 0 - cpu (sequential), 1 - gpu (parallel, one thread block), 2 - (parallel, multiple thread blocks)\n");
//printf("num_blocks : specify only when algorithm == 2. num_blocks should be no more than 1/10th the number of matrices you use\n");
}
int main(int argc, char ** argv)
{
int algorithm = -1;
int * product_mat;
char filename[256];
if (argc == 3) {
strcpy(filename, argv[1]);
if ((atoi(argv[2]) >= 0) && (atoi(argv[2]) < 3)) {
algorithm = atoi(argv[2]);
} else {
usage();
return 0;
}
} else {
usage();
return 0;
}
read_file(filename);
/*
printf("%d\n", NUM_DIM);
for (int i = 0; i < NUM_DIM; i++) {
printf("%d ", MAT_DIM[i]);
}
printf("\n");
for (int arr_i = 0; arr_i < (NUM_DIM - 1); arr_i++) {
for (int i = 0; i < MAT_DIM[arr_i]; i++) {
for (int j = 0; j < MAT_DIM[arr_i + 1]; j++) {
printf("%d ", MAT_LIST[arr_i][(i * MAT_DIM[arr_i + 1]) + j]);
}
printf("\n");
}
}
*/
/*
for (int i = 0; i < num_dim; i++) {
printf("%d ", mat_dim[i]);
}
printf("\n");
*/
/*
int num_dim = 45;
int num_mat = num_dim - 1;
int * mat_dim = def_mat_dim(num_dim);
int ** mat_list = (int **) malloc((num_mat) * sizeof(int *));
int max_dim = find_max(mat_dim, num_dim);
for (int k = 0; k < num_mat; k++) {
//printf("================= MATRIX %d ====================\n", k);
//printf("%d %d\n", mat_dim[k], mat_dim[k+1]);
mat_list[k] = creat_mat(mat_dim[k], mat_dim[k+1]);
//printf("%d %d\n", mat_dim[k], mat_dim[k+1]);
//print_mat(mat_list[k], mat_dim[k], mat_dim[k+1]);
}
*/
if (algorithm == 0) {
product_mat = cpu_algo(NUM_DIM, MAT_DIM, MAT_LIST);
}
else if (algorithm == 1) {
product_mat = gpu_one_block_algo(NUM_DIM, MAT_DIM, MAT_LIST);
} else {
product_mat = gpu_multi_block_algo(NUM_DIM, MAT_DIM, MAT_LIST);
}
return 0;
}
|
2,587 | //xfail:ASSERTION_ERROR
//--blockDim=1024 --gridDim=1 --no-inline
struct wrapped {
unsigned int bidx;
unsigned int bdim;
unsigned int tidx;
};
__device__ float multiplyByTwo(float *v, wrapped tw)
{
unsigned int tid = tw.bidx * tw.bdim + tw.tidx;
return v[tid] * 2.0f;
}
__device__ float divideByTwo(float *v, wrapped tw)
{
unsigned int tid = tw.bidx * tw.bdim + tw.tidx;
return v[tid] * 0.5f;
}
typedef float(*funcType)(float*, wrapped);
__global__ void foo(float *v, funcType f, unsigned int size)
{
wrapped tid = {blockIdx.x, blockDim.x, threadIdx.x};
if ((tid.bidx * tid.bdim + tid.tidx) < size)
{
float x = (*f)(v, tid);
}
}
|
2,588 | #include "includes.h"
__global__ void multiplicacion( int *a, int *b, int *c, int n, int m, int l ) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = threadIdx.y + blockIdx.y*blockDim.y;
c[j+i*l] = 0;
for(int k=0 ; k < m ; k++ ){
c[j+i*l] += a[k+i*m] * b[j+k*l];
}
} |
2,589 | /*
* Copyright (C) 2018 Philip Langdale <philipl@overt.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
template<typename T>
__inline__ __device__ T spatial_predictor(T a, T b, T c, T d, T e, T f, T g,
T h, T i, T j, T k, T l, T m, T n)
{
int spatial_pred = (d + k)/2;
int spatial_score = abs(c - j) + abs(d - k) + abs(e - l);
int score = abs(b - k) + abs(c - l) + abs(d - m);
if (score < spatial_score) {
spatial_pred = (c + l)/2;
spatial_score = score;
score = abs(a - l) + abs(b - m) + abs(c - n);
if (score < spatial_score) {
spatial_pred = (b + m)/2;
spatial_score = score;
}
}
score = abs(d - i) + abs(e - j) + abs(f - k);
if (score < spatial_score) {
spatial_pred = (e + j)/2;
spatial_score = score;
score = abs(e - h) + abs(f - i) + abs(g - j);
if (score < spatial_score) {
spatial_pred = (f + i)/2;
spatial_score = score;
}
}
return spatial_pred;
}
__inline__ __device__ int max3(int a, int b, int c)
{
int x = max(a, b);
return max(x, c);
}
__inline__ __device__ int min3(int a, int b, int c)
{
int x = min(a, b);
return min(x, c);
}
template<typename T>
__inline__ __device__ T temporal_predictor(T A, T B, T C, T D, T E, T F,
T G, T H, T I, T J, T K, T L,
T spatial_pred, bool skip_check)
{
int p0 = (C + H) / 2;
int p1 = F;
int p2 = (D + I) / 2;
int p3 = G;
int p4 = (E + J) / 2;
int tdiff0 = abs(D - I);
int tdiff1 = (abs(A - F) + abs(B - G)) / 2;
int tdiff2 = (abs(K - F) + abs(G - L)) / 2;
int diff = max3(tdiff0, tdiff1, tdiff2);
if (!skip_check) {
int maxi = max3(p2 - p3, p2 - p1, min(p0 - p1, p4 - p3));
int mini = min3(p2 - p3, p2 - p1, max(p0 - p1, p4 - p3));
diff = max3(diff, mini, -maxi);
}
if (spatial_pred > p2 + diff) {
spatial_pred = p2 + diff;
}
if (spatial_pred < p2 - diff) {
spatial_pred = p2 - diff;
}
return spatial_pred;
}
template<typename T>
__inline__ __device__ void yadif_single(T *dst,
cudaTextureObject_t prev,
cudaTextureObject_t cur,
cudaTextureObject_t next,
int dst_width, int dst_height, int dst_pitch,
int src_width, int src_height,
int parity, int tff, bool skip_spatial_check)
{
// Identify location
int xo = blockIdx.x * blockDim.x + threadIdx.x;
int yo = blockIdx.y * blockDim.y + threadIdx.y;
if (xo >= dst_width || yo >= dst_height) {
return;
}
// Don't modify the primary field
if (yo % 2 == parity) {
dst[yo*dst_pitch+xo] = tex2D<T>(cur, xo, yo);
return;
}
// Calculate spatial prediction
T a = tex2D<T>(cur, xo - 3, yo - 1);
T b = tex2D<T>(cur, xo - 2, yo - 1);
T c = tex2D<T>(cur, xo - 1, yo - 1);
T d = tex2D<T>(cur, xo - 0, yo - 1);
T e = tex2D<T>(cur, xo + 1, yo - 1);
T f = tex2D<T>(cur, xo + 2, yo - 1);
T g = tex2D<T>(cur, xo + 3, yo - 1);
T h = tex2D<T>(cur, xo - 3, yo + 1);
T i = tex2D<T>(cur, xo - 2, yo + 1);
T j = tex2D<T>(cur, xo - 1, yo + 1);
T k = tex2D<T>(cur, xo - 0, yo + 1);
T l = tex2D<T>(cur, xo + 1, yo + 1);
T m = tex2D<T>(cur, xo + 2, yo + 1);
T n = tex2D<T>(cur, xo + 3, yo + 1);
T spatial_pred =
spatial_predictor(a, b, c, d, e, f, g, h, i, j, k, l, m, n);
// Calculate temporal prediction
int is_second_field = !(parity ^ tff);
cudaTextureObject_t prev2 = prev;
cudaTextureObject_t prev1 = is_second_field ? cur : prev;
cudaTextureObject_t next1 = is_second_field ? next : cur;
cudaTextureObject_t next2 = next;
T A = tex2D<T>(prev2, xo, yo - 1);
T B = tex2D<T>(prev2, xo, yo + 1);
T C = tex2D<T>(prev1, xo, yo - 2);
T D = tex2D<T>(prev1, xo, yo + 0);
T E = tex2D<T>(prev1, xo, yo + 2);
T F = tex2D<T>(cur, xo, yo - 1);
T G = tex2D<T>(cur, xo, yo + 1);
T H = tex2D<T>(next1, xo, yo - 2);
T I = tex2D<T>(next1, xo, yo + 0);
T J = tex2D<T>(next1, xo, yo + 2);
T K = tex2D<T>(next2, xo, yo - 1);
T L = tex2D<T>(next2, xo, yo + 1);
spatial_pred = temporal_predictor(A, B, C, D, E, F, G, H, I, J, K, L,
spatial_pred, skip_spatial_check);
dst[yo*dst_pitch+xo] = spatial_pred;
}
template <typename T>
__inline__ __device__ void yadif_double(T *dst,
cudaTextureObject_t prev,
cudaTextureObject_t cur,
cudaTextureObject_t next,
int dst_width, int dst_height, int dst_pitch,
int src_width, int src_height,
int parity, int tff, bool skip_spatial_check)
{
int xo = blockIdx.x * blockDim.x + threadIdx.x;
int yo = blockIdx.y * blockDim.y + threadIdx.y;
if (xo >= dst_width || yo >= dst_height) {
return;
}
if (yo % 2 == parity) {
// Don't modify the primary field
dst[yo*dst_pitch+xo] = tex2D<T>(cur, xo, yo);
return;
}
T a = tex2D<T>(cur, xo - 3, yo - 1);
T b = tex2D<T>(cur, xo - 2, yo - 1);
T c = tex2D<T>(cur, xo - 1, yo - 1);
T d = tex2D<T>(cur, xo - 0, yo - 1);
T e = tex2D<T>(cur, xo + 1, yo - 1);
T f = tex2D<T>(cur, xo + 2, yo - 1);
T g = tex2D<T>(cur, xo + 3, yo - 1);
T h = tex2D<T>(cur, xo - 3, yo + 1);
T i = tex2D<T>(cur, xo - 2, yo + 1);
T j = tex2D<T>(cur, xo - 1, yo + 1);
T k = tex2D<T>(cur, xo - 0, yo + 1);
T l = tex2D<T>(cur, xo + 1, yo + 1);
T m = tex2D<T>(cur, xo + 2, yo + 1);
T n = tex2D<T>(cur, xo + 3, yo + 1);
T spatial_pred;
spatial_pred.x =
spatial_predictor(a.x, b.x, c.x, d.x, e.x, f.x, g.x, h.x, i.x, j.x, k.x, l.x, m.x, n.x);
spatial_pred.y =
spatial_predictor(a.y, b.y, c.y, d.y, e.y, f.y, g.y, h.y, i.y, j.y, k.y, l.y, m.y, n.y);
// Calculate temporal prediction
int is_second_field = !(parity ^ tff);
cudaTextureObject_t prev2 = prev;
cudaTextureObject_t prev1 = is_second_field ? cur : prev;
cudaTextureObject_t next1 = is_second_field ? next : cur;
cudaTextureObject_t next2 = next;
T A = tex2D<T>(prev2, xo, yo - 1);
T B = tex2D<T>(prev2, xo, yo + 1);
T C = tex2D<T>(prev1, xo, yo - 2);
T D = tex2D<T>(prev1, xo, yo + 0);
T E = tex2D<T>(prev1, xo, yo + 2);
T F = tex2D<T>(cur, xo, yo - 1);
T G = tex2D<T>(cur, xo, yo + 1);
T H = tex2D<T>(next1, xo, yo - 2);
T I = tex2D<T>(next1, xo, yo + 0);
T J = tex2D<T>(next1, xo, yo + 2);
T K = tex2D<T>(next2, xo, yo - 1);
T L = tex2D<T>(next2, xo, yo + 1);
spatial_pred.x =
temporal_predictor(A.x, B.x, C.x, D.x, E.x, F.x, G.x, H.x, I.x, J.x, K.x, L.x,
spatial_pred.x, skip_spatial_check);
spatial_pred.y =
temporal_predictor(A.y, B.y, C.y, D.y, E.y, F.y, G.y, H.y, I.y, J.y, K.y, L.y,
spatial_pred.y, skip_spatial_check);
dst[yo*dst_pitch+xo] = spatial_pred;
}
extern "C" {
__global__ void yadif_uchar(unsigned char *dst,
cudaTextureObject_t prev,
cudaTextureObject_t cur,
cudaTextureObject_t next,
int dst_width, int dst_height, int dst_pitch,
int src_width, int src_height,
int parity, int tff, bool skip_spatial_check)
{
yadif_single(dst, prev, cur, next,
dst_width, dst_height, dst_pitch,
src_width, src_height,
parity, tff, skip_spatial_check);
}
__global__ void yadif_ushort(unsigned short *dst,
cudaTextureObject_t prev,
cudaTextureObject_t cur,
cudaTextureObject_t next,
int dst_width, int dst_height, int dst_pitch,
int src_width, int src_height,
int parity, int tff, bool skip_spatial_check)
{
yadif_single(dst, prev, cur, next,
dst_width, dst_height, dst_pitch,
src_width, src_height,
parity, tff, skip_spatial_check);
}
__global__ void yadif_uchar2(uchar2 *dst,
cudaTextureObject_t prev,
cudaTextureObject_t cur,
cudaTextureObject_t next,
int dst_width, int dst_height, int dst_pitch,
int src_width, int src_height,
int parity, int tff, bool skip_spatial_check)
{
yadif_double(dst, prev, cur, next,
dst_width, dst_height, dst_pitch,
src_width, src_height,
parity, tff, skip_spatial_check);
}
__global__ void yadif_ushort2(ushort2 *dst,
cudaTextureObject_t prev,
cudaTextureObject_t cur,
cudaTextureObject_t next,
int dst_width, int dst_height, int dst_pitch,
int src_width, int src_height,
int parity, int tff, bool skip_spatial_check)
{
yadif_double(dst, prev, cur, next,
dst_width, dst_height, dst_pitch,
src_width, src_height,
parity, tff, skip_spatial_check);
}
} /* extern "C" */
|
2,590 | #include<time.h>
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<cuda.h>
/* Macro for mapping three dimensional index (ix,iy,iz) to
* linear index. The vertical index (z) is running fastest so
* that vertical columns are always kept together in memory.
*/
#define LINIDX(n, ix,iy,iz) ((n.z)*(n.y)*(ix) + (n.z)*(iy) + (iz))
#define GLINIDX(n, ix,iy,iz) ((n->z)*(n->y)*(ix) + (n->z)*(iy) + (iz))
#define len n.x*n.y*n.z
#define BLOCK_SIZE 5
/* Structure for three dimensional grid size */
struct N {
int x; // Number of grid points in x-direction
int y; // Number of grid points in y-direction
int z; // Number of grid points in z-direction
};
/* Number of gridpoints */
struct N n;
/* Relative residual reduction target */
//const float resreduction = 1.0e-5;
/* parameters of PDE */
const float lambda2 = 1e4;
const float omega2 = 1.0;
const float delta = 0.0;
/* *********************************************************** *
* Block-Jacobi Preconditioner
* y = M^{-1}.b
* *********************************************************** */
void prec_BJ(const struct N n,
const float* b,
float* y) {
int ix, iy, iz;
// Grid spacings in all dirctions
float hx = 1./n.x;
float hy = 1./n.y;
float hz = 1./n.z;
float hx_inv2 = 1./(hx*hx);
float hy_inv2 = 1./(hy*hy);
float hz_inv2 = 1./(hz*hz);
float *c,*d;
// Temporary arrays for Thomas algorithm
cudaMallocHost((void**)&c,n.z*sizeof(float));
cudaMallocHost((void**)&d,n.z*sizeof(float));
// float c[100],d[100];
//float* c=malloc(n.z*sizeof(float));
//float* d=malloc(n.z*sizeof(float));
// Loop over number of relaxation steps
for (ix = 0; ix<n.x; ix++) {
for (iy = 0; iy<n.y; iy++) {
// Do a tridiagonal solve in the vertical direction
// STEP 1: Calculate modified coefficients
c[0] = (-omega2*lambda2*hz_inv2)
/ (delta+2.*omega2*(hx_inv2+hy_inv2+lambda2*hz_inv2));
d[0] = b[LINIDX(n, ix,iy,0)]
/ (delta+2.*omega2*(hx_inv2+hy_inv2+lambda2*hz_inv2));
for (iz = 1; iz<n.z; iz++) {
c[iz] = (-omega2*lambda2*hz_inv2)
/ ( (delta+2.*omega2*(hx_inv2+hy_inv2+lambda2*hz_inv2))
- (-omega2*lambda2*hz_inv2) * c[iz-1]);
d[iz] = (b[LINIDX(n, ix,iy,iz)]
- (-omega2*lambda2*hz_inv2)*d[iz-1])
/ ( (delta+2.*omega2*(hx_inv2+hy_inv2+lambda2*hz_inv2))
- (-omega2*lambda2*hz_inv2)*c[iz-1]);
}
// STEP 2: Back-substitution.
y[LINIDX(n, ix,iy,n.z-1)] = d[n.z-1];
for (iz = n.z-2; iz>=0; iz--) {
y[LINIDX(n, ix,iy,iz)]
= d[iz] - c[iz]*y[LINIDX(n, ix,iy,iz+1)];
}
}
}
cudaFree(c);
cudaFree(d);
}
__global__ void gpu_bj(const struct N *n,
const float* b,
float* y) {
int ix, iy, iz;
// Grid spacings in all dirctions
float hx = 1./n->x;
float hy = 1./n->y;
float hz = 1./n->z;
float hx_inv2 = 1./(hx*hx);
float hy_inv2 = 1./(hy*hy);
float hz_inv2 = 1./(hz*hz);
//float *c,*d;
// Temporary arrays for Thomas algorithm
//cudaMalloc((void**)&c,n->z*sizeof(float));
//cudaMalloc((void**)&d,n->z*sizeof(float));
float c[1000],d[1000];
//float* c=malloc(n.z*sizeof(float));
//float* d=malloc(n.z*sizeof(float));
// Loop over number of relaxation steps
ix=blockIdx.x*BLOCK_SIZE+threadIdx.x;
iy=blockIdx.y*BLOCK_SIZE+threadIdx.y;
// for (ix = 0; ix<n->x; ix++) {
// for (iy = 0; iy<n->y; iy++) {
// Do a tridiagonal solve in the vertical direction
// STEP 1: Calculate modified coefficients
c[0] = (-omega2*lambda2*hz_inv2)/(delta+2.*omega2*(hx_inv2+hy_inv2+lambda2*hz_inv2));
d[0] = b[GLINIDX(n, ix,iy,0)]/(delta+2.*omega2*(hx_inv2+hy_inv2+lambda2*hz_inv2));
for (iz = 1; iz<n->z; iz++) {
c[iz] = (-omega2*lambda2*hz_inv2)/( (delta+2.*omega2*(hx_inv2+hy_inv2+lambda2*hz_inv2))- (-omega2*lambda2*hz_inv2) * c[iz-1]);
d[iz] = (b[GLINIDX(n, ix,iy,iz)]-(-omega2*lambda2*hz_inv2)*d[iz-1])/((delta+2.*omega2*(hx_inv2+hy_inv2+lambda2*hz_inv2))- (-omega2*lambda2*hz_inv2)*c[iz-1]);
}
// STEP 2: Back-substitution.
y[GLINIDX(n, ix,iy,n->z-1)] = d[n->z-1];
for (iz = n->z-2; iz>=0; iz--) {
y[GLINIDX(n, ix,iy,iz)]=d[iz] - c[iz]*y[GLINIDX(n, ix,iy,iz+1)];
}
// }
// }
}
int main(){
n.x = 20;
n.y = 100;
n.z = 20;
int i;
printf(" parameters\n");
printf(" ==========\n");
printf(" nx = %10d\n",n.x);
printf(" ny = %10d\n",n.y);
printf(" nz = %10d\n",n.z);
printf(" omega2 = %12.6e\n",omega2);
printf(" lambda2 = %12.6e\n",lambda2);
printf(" delta = %12.6e\n",delta);
float x[len],y[len],gpu_y[len];
for(i=0;i<len;i++){
x[i]=1.2;
}
double start1=clock();
for(i=0;i<1000;i++)
prec_BJ(n,x,y);
double end1=clock();
N *h_n,*dev_n;
h_n=(struct N*)malloc(sizeof(N));
h_n->x=n.x;
h_n->y=n.y;
h_n->z=n.z;
float *dev_b, *dev_y;
cudaMalloc((void**)&dev_n,sizeof(N));
cudaMalloc((void**)&dev_b,len*sizeof(float));
cudaMalloc((void**)&dev_y,len*sizeof(float));
cudaMemcpy(dev_n,h_n,sizeof(N),cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,x,len*sizeof(float),cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(n.x/BLOCK_SIZE,n.y/BLOCK_SIZE);
gpu_bj<<<dimGrid,dimBlock>>>(dev_n,dev_b,dev_y);
cudaMemcpy(gpu_y,dev_y,len*sizeof(float),cudaMemcpyDeviceToHost);
/*for(i=0;i<len;i++){
if(y[i]-gpu_y[i]>0.00001)
printf("%f--%f\n",y[i],gpu_y[i]);
else
printf(".");
}*/
cudaFree(dev_n);
cudaFree(dev_b);
cudaFree(dev_y);
printf("cpu time = %f\n",(double)(end1-start1)/CLOCKS_PER_SEC*1000000);
return(0);
}
|
2,591 | #include <stdio.h>
#include <stdlib.h>
#include <string.h> /* memcpy */
#include <math.h>
#include <stdint.h>
void *cuda_upload_var(void *host_var, int size)
{
void *cuda_var;
cudaMalloc(&cuda_var, 4);
cudaMemcpy(cuda_var, host_var, size, cudaMemcpyHostToDevice);
return cuda_var;
}
void cuda_download_var(void *cuda_var, void *host_var, int size)
{
cudaMemcpy(host_var, cuda_var, size, cudaMemcpyDeviceToHost);
cudaFree(cuda_var);
}
typedef struct intmat2x2
{
int m[4];
} intmat2x2;
intmat2x2 intmat2x2_mul(intmat2x2 lhs, intmat2x2 rhs)
{
intmat2x2 ret;
ret.m[0] = lhs.m[0]*rhs.m[0] + lhs.m[1]*rhs.m[2];
ret.m[2] = lhs.m[2]*rhs.m[0] + lhs.m[3]*rhs.m[2];
ret.m[1] = lhs.m[0]*rhs.m[1] + lhs.m[1]*rhs.m[3];
ret.m[3] = lhs.m[2]*rhs.m[1] + lhs.m[3]*rhs.m[3];
return ret;
}
typedef struct floatmat3x3
{
float m[9];
} floatmat3x3;
floatmat3x3 floatmat3x3_mul(floatmat3x3 lhs, floatmat3x3 rhs)
{
floatmat3x3 ret;
ret.m[0] = lhs.m[0]*rhs.m[0] + lhs.m[1]*rhs.m[3] + lhs.m[2]*rhs.m[6];
ret.m[3] = lhs.m[3]*rhs.m[0] + lhs.m[4]*rhs.m[3] + lhs.m[5]*rhs.m[6];
ret.m[6] = lhs.m[6]*rhs.m[0] + lhs.m[7]*rhs.m[3] + lhs.m[8]*rhs.m[6];
ret.m[1] = lhs.m[0]*rhs.m[1] + lhs.m[1]*rhs.m[4] + lhs.m[2]*rhs.m[7];
ret.m[4] = lhs.m[3]*rhs.m[1] + lhs.m[4]*rhs.m[4] + lhs.m[5]*rhs.m[7];
ret.m[7] = lhs.m[6]*rhs.m[1] + lhs.m[7]*rhs.m[4] + lhs.m[8]*rhs.m[7];
ret.m[2] = lhs.m[0]*rhs.m[2] + lhs.m[1]*rhs.m[5] + lhs.m[2]*rhs.m[8];
ret.m[5] = lhs.m[3]*rhs.m[2] + lhs.m[4]*rhs.m[5] + lhs.m[5]*rhs.m[8];
ret.m[8] = lhs.m[6]*rhs.m[2] + lhs.m[7]*rhs.m[5] + lhs.m[8]*rhs.m[8];
return ret;
}
int main(int argc, char **argv)
{
intmat2x2 mat1;
intmat2x2 mat2;
floatmat3x3 mat3;
floatmat3x3 mat4;
int i;
int k;
mat1.m[1*0 + 2*0] = 0;
mat1.m[1*1 + 2*0] = 1;
mat1.m[1*1 + 2*1] = 2;
mat1.m[1*0 + 2*1] = 3;
mat2.m[1*0 + 2*0] = 0;
mat2.m[1*1 + 2*0] = 1;
mat2.m[1*1 + 2*1] = 2;
mat2.m[1*0 + 2*1] = 3;
for (i = 0; i < 3; i = i + 1) {
for (k = 0; k < 3; k = k + 1) {
mat3.m[1*i + 3*k] = 1;
mat4.m[1*i + 3*k] = 2;
}
}
mat1 = intmat2x2_mul(mat1, mat2);
mat3 = floatmat3x3_mul(floatmat3x3_mul(floatmat3x3_mul(mat4, mat3), mat3), mat3);
return 0;
}
|
2,592 | #include "includes.h"
/*
* lanczos computes the smallest n_eigs eigenvalues for dev_L and the
* corresponding eigenvectors using the Lanczos algorithm.
*
* F: an array (n_patch by n_eigs) to store the eigenvectors
* Es: an array (1 by n_eigs) to store the eigenvalues
* dev_L: an array (n_patch by n_patch) representing the Laplacian matrix
* n_patch: the dimension of dev_L
*/
static double norm2(double *v, int length);
__global__ void divide_copy(double *dest, const double *src, int length, const double divisor)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
double factor = 1.0 / divisor;
while (tid < length) {
dest[tid] = src[tid] * factor;
tid += blockDim.x * gridDim.x;
}
} |
2,593 | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <time.h>
__global__ void mk_kernel(char* keep_mem, size_t bytes)
{
for (unsigned i=0; i<bytes; ++i)
{
keep_mem[i] = 0;
}
}
int main()
{
unsigned bytes = 1024 * 1024 * 1024;
printf("I will sleep for 5 seconds. \n");
char *keep_mem;
cudaMalloc(&keep_mem, sizeof(char)*bytes);
//mk_kernel<<<1, 1>>>(keep_mem, bytes);
sleep(5);
printf("Done. \n");
cudaFree(keep_mem);
cudaDeviceReset();
return 0;
} |
2,594 | #include <cuda_runtime.h>
#include <stdio.h>
int main(int argc, char **argv) {
// set up device
int dev = 0;
cudaSetDevice(dev);
// memory size
unsigned int isize = 1<<22;
unsigned int nbytes = isize * sizeof(float);
// get device information
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("%s starting at ", argv[0]);
printf("device %d: %s memory size %d nbyte %5.2fMB\n", dev,
deviceProp.name,isize,nbytes/(1024.0f*1024.0f));
// allocate the host memory
float *h_a = (float *)malloc(nbytes);
// allocate the device memory
float *d_a;
cudaMalloc((float **)&d_a, nbytes);
// initialize the host memory
for(unsigned int i=0;i<isize;i++) h_a[i] = 0.5f;
// transfer data from the host to the device
cudaMemcpy(d_a, h_a, nbytes, cudaMemcpyHostToDevice);
// transfer data from the device to the host
cudaMemcpy(h_a, d_a, nbytes, cudaMemcpyDeviceToHost);
// free memory
cudaFree(d_a);
free(h_a);
// reset device
cudaDeviceReset();
return EXIT_SUCCESS;
} |
2,595 | #include <stdio.h>
#include <stdlib.h>
#include <cmath>
__device__ int position; //index of the largest value
__device__ int largest; //value of the largest value
int lenString = 593;
int maxNumStrings = 1000000;
int threshold = 2;
// Checks if there are any unmerged tuples (Parallelized)
__global__ void anyLeft(int *d_c, int *remaining, int size) {
int my_id = blockDim.x * blockIdx.x + threadIdx.x;
if((d_c[my_id] == 0) && (my_id < size)) {
*remaining = 0;
}
}
// Searches for the index of the largest count (Parallelized)
__global__ void search(int *d_b, int *d_c, int size) {
int my_id = blockDim.x * blockIdx.x + threadIdx.x;
if((d_c[my_id] == 0) && (d_b[my_id] == largest) && (my_id < size)) {
position = my_id;
}
}
// Populates copy_db such that the counts for merged tuple is 0
// but count for unmerged tuples is unchanged (Parallelized)
__global__ void populate (int *d_b, int *copy_db, int *d_c, int size, int *left) {
int n = 0;
*left = 1; // reinitalized to false to check if all strings are merged
int my_id = blockDim.x * blockIdx.x + threadIdx.x;
if (my_id < size) {
n = abs((bool)d_c[my_id] - 1);
copy_db[my_id] = d_b[my_id] * n;
}
}
// Reduction-type tree implementation to find largest count (Parallelized)
__device__ void cuda_select(int *db, int size) {
int my_id = blockDim.x * blockIdx.x + threadIdx.x;
if(my_id < size) {
if(db[2 * my_id] > db[2 * my_id + 1])
db[my_id] = db[2 * my_id];
else
db[my_id] = db[2 * my_id + 1];
}
}
// Loops cuda_select function until largest value is at index 0
__global__ void select(int *db, int size) {
int height = (int)ceil(log2((double)size));
int i = 0;
for(i = 0; i < height; i++) {
size = (int)ceil((double) size/2);
cuda_select(db, size);
}
largest = db[0];
}
// Compares target string to all other unmerged strings with lesser count
__global__ void compare(char *d_a, int *d_b, int *d_c, int size, int lenString, int threshold) {
int my_id = blockDim.x * blockIdx.x + threadIdx.x;
if (my_id == position)
d_c[my_id] = 2;
if ((my_id < size) && (d_c[my_id] == 0) && (my_id != position)) {
int x, diffs = 0;
for (x = 0; x < lenString; x++) {
diffs += (bool)(d_a[(lenString*position)+x]^d_a[(my_id*lenString)+x]);
if (diffs > threshold)
break;
}
if (diffs <= threshold) {
d_b[position] += d_b[my_id];
d_c[my_id] = 1;
}
}
}
int main(int argc, char** argv) {
char *strings, *d_a; // host and device copy of strings
int *counts, *d_b; // host and device copy of counts
int *merged, *d_c; // host and device copy of bools
int *copy_db; // device copy of counts (counts of merged is 0)
char copy[lenString+1]; // intermediate variable to load strings into array from file
int numbers; // intermediate variable to load counts into array from file
int *any_left, *left; // host and device copies to check if all tuples are merged
int size = 0; // keeps track of number of tuples in the file
int i = 0; // loop variable
int size_string = maxNumStrings*sizeof(char)*(lenString+1);
int size_int = maxNumStrings*sizeof(int);
// Open the file
FILE *fp;
fp = fopen("/cluster/home/charliep/courses/cs360/single-linkage-clustering/Iceland2014.trim.contigs.good.unique.good.filter.unique.count.fasta", "r");
// Allocate space for arrays on the host
if (!(strings = (char *)malloc(size_string))) {
fprintf(stderr, "malloc() FAILED (Block)\n");
exit(0);
}
if (!(counts = (int*)malloc(size_int))) {
fprintf(stderr, "malloc() FAILED (Block)\n");
exit(0);
}
if (!(merged = (int*)malloc(size_int))) {
fprintf(stderr, "malloc() FAILED (Block)\n");
exit(0);
}
any_left = (int *)malloc(sizeof(int));
// Set the values of global variables on the device
cudaMemset(&position, 0, sizeof(int));
cudaMemset(&largest, 0, sizeof(int));
// Load strings and counts into array
while(fscanf(fp, "%s %d", copy, &numbers) != EOF && size < 1000){
strcpy(&strings[i], copy);
counts[size] = numbers;
i = i + lenString;
size++;
}
// Close file
fclose(fp);
// Allocate space for arrays on the device
cudaMalloc(&d_a, size_string);
cudaMalloc(&d_b, size_int);
cudaMalloc(&d_c, size_int);
cudaMalloc(©_db, size_int);
cudaMalloc(&left, sizeof(int));
// Copy arrays from host to device
cudaMemcpy(d_a, strings, size_string, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, counts, size_int, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, merged, size_int, cudaMemcpyHostToDevice);
// Determine number of threads and blocks needed
int threads_num = 512, blocks_num;
blocks_num = (int)ceil((float)size/threads_num);
// Cluster the strings for the given threshold
do {
populate<<<blocks_num, threads_num>>>(d_b, copy_db, d_c, size,left);
select<<<blocks_num, threads_num>>>(copy_db, size);
search<<<blocks_num, threads_num>>>(d_b, d_c, size);
compare<<<blocks_num, threads_num>>>(d_a, d_b, d_c, size, lenString, threshold);
anyLeft<<<blocks_num, threads_num>>>(d_c, left, size);
cudaMemcpy(any_left, left, sizeof(int), cudaMemcpyDeviceToHost);
} while (*any_left == 0);
// Copy results back from device to host
cudaMemcpy(strings, d_a, size_string, cudaMemcpyDeviceToHost);
cudaMemcpy(counts, d_b, size_int, cudaMemcpyDeviceToHost);
cudaMemcpy(merged, d_c, size_int, cudaMemcpyDeviceToHost);
int counter = 0;
FILE *output = fopen("output2.txt", "w+");
for(i = 0; i < size; i++) {
strncpy(copy, &strings[i*lenString], lenString);
fprintf(output, "%s %d\n", copy, counts[i]);
if (merged[i] == 2)
counter++;
}
fclose(output);
printf("%d\n", counter);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFree(copy_db);
cudaFree(left);
free(strings);
free(counts);
free(merged);
free(any_left);
}
|
2,596 | #include<stdio.h>
#include<stdlib.h>
#define N 2048
#define BLOCK_SIZE 32
//naive way of implementing it, uncoalecesd memory access
__global__ void matrix_transpose_naive(int *in, int *out) {
int index_x = threadIdx.x + blockDim.x * blockIdx.x;
int index_y = threadIdx.y + blockDim.y * blockIdx.y;
int idx = index_x + index_y * N;
int idx_transpose = index_x * N + index_y; //just swith x and y indeces
out[idx] = in[idx_transpose];
}
//shared memory, about 3x faster
__global__ void matrix_transpose_shared(int *in, int *out) {
__shared__ int shared_mem[BLOCK_SIZE][BLOCK_SIZE+1];
//global indeces
int idx_x = threadIdx.x + blockIdx.x * blockDim.x;
int idx_y = threadIdx.y + blockIdx.y * blockDim.y;
//local indeces
int local_x = threadIdx.x;
int local_y = threadIdx.y;
int idx = idx_x + idx_y * N;
int idx_transpose = idx_x * N + idx_y;
//write input into shared memory, coalesced access
shared_mem[local_x][local_y] = in[idx];
__syncthreads();
//copy over into global mem for the output
out[idx_transpose] = shared_mem[local_y][local_x];
}
//basically just fills the array with index.
void fill_array(int *data) {
for(int idx=0;idx<(N*N);idx++)
data[idx] = idx;
}
void print_output(int *a, int *b) {
printf("\n Original Matrix::\n");
for(int idx=0;idx<(N*N);idx++) {
if(idx%N == 0)
printf("\n");
printf(" %d ", a[idx]);
}
printf("\n Transposed Matrix::\n");
for(int idx=0;idx<(N*N);idx++) {
if(idx%N == 0)
printf("\n");
printf(" %d ", b[idx]);
}
}
int main(void) {
int *a;
int *b;
int *d_a;
int *d_b;
int size = N * N * sizeof(int);
//host arrays
a = (int *) malloc(size);
fill_array(a);
b = (int *) malloc(size);
//device array allocation
cudaMalloc((void **)&d_a,size);
cudaMalloc((void **)&d_b,size);
//copy inputs to device
cudaMemcpy((void **)&d_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy((void **)&d_b,b,size,cudaMemcpyHostToDevice);
//declare sizes
dim3 blocksize(BLOCK_SIZE,BLOCK_SIZE,1);
dim3 gridsize(N/BLOCK_SIZE,N/BLOCK_SIZE,1);
//launch the kernel
matrix_transpose_shared<<<gridsize,blocksize>>>(d_a,d_b);
//copy results back to host
cudaMemcpy(b,d_b,size,cudaMemcpyDeviceToHost);
free(a); free(b);
cudaFree(d_a); cudaFree(d_b);
return 0;
}
|
2,597 | #define BLOCK_SIZE 32
#define BLOCK_DEPTH 3
#define POW2(x) ((x) * (x))
__global__ void bilateralFilterKernel(float* o_arr1d, float* i_arr1d, int rows, int cols, int radius, float gauss_color_coeff, float gauss_space_coeff) {
__shared__ float tile[BLOCK_DEPTH][BLOCK_SIZE][BLOCK_SIZE];
int tx = threadIdx.x;
int ty = threadIdx.y;
int xidx = blockIdx.x * blockDim.x + threadIdx.x;
int yidx = blockIdx.y * blockDim.y + threadIdx.y;
tile[0][ty][tx] = i_arr1d[yidx * cols + xidx];
tile[1][ty][tx] = i_arr1d[rows * cols + yidx * cols + xidx];
tile[2][ty][tx] = i_arr1d[2 * rows * cols + yidx * cols + xidx];
__syncthreads();
float Iir = tile[0][ty][tx];
float Iig = tile[1][ty][tx];
float Iib = tile[2][ty][tx];
float Ior = 0.0f;
float Iog = 0.0f;
float Iob = 0.0f;
float accumWeight = 0.0f;
for(int dy = -radius; dy <= radius; dy++) {
for(int dx = -radius; dx <= radius; dx++) {
// test boundary of tile
if(tx + dx >= 0 && tx + dx <= BLOCK_SIZE - 1 && ty + dy >= 0 && ty + dy <= BLOCK_SIZE - 1) {
float Ir = tile[0][ty + dy][tx + dx];
float Ig = tile[1][ty + dy][tx + dx];
float Ib = tile[2][ty + dy][tx + dx];
// intensity weight exponent
float expi = gauss_color_coeff * (POW2(Ir - Iir) + POW2(Ig - Iig) + POW2(Ib - Iib));
// spatial weight exponent
float exps = gauss_space_coeff * (POW2(dx) + POW2(dy));
float weight = expf(expi + exps);
Ior += Ir * weight;
Iog += Ig * weight;
Iob += Ib * weight;
accumWeight += weight;
}
}
}
// normalize weight
Ior /= accumWeight;
Iog /= accumWeight;
Iob /= accumWeight;
// write result to global memory
o_arr1d[yidx * cols + xidx] = Ior;
o_arr1d[rows * cols + yidx * cols + xidx] = Iog;
o_arr1d[2 * rows * cols + yidx * cols + xidx] = Iob;
}
|
2,598 |
extern "C"
__global__ void wavee(int* tab, unsigned int rowSize, unsigned int centerX, unsigned int centerY,
float A, float lambda, float time, float fi, unsigned int N)
{
int index = threadIdx.x + blockDim.x * blockIdx.x;
int w = int(index/rowSize);
int h = index%rowSize;
if ( w*rowSize+h < N ) {
float dx = 0;
if(centerX > w) {
dx = centerX - w;
} else {
dx = w - centerX;
}
float dy = 0;
if(centerY > h) {
dy = centerY - h;
} else {
dy = h - centerY;
}
float distance = pow(dx,2) + pow(dy,2);
distance = sqrt(distance);
float pi = 3.1415f;
float v = 1.0f;
float T = lambda/v;
float ww = 2.0f*pi/T;
float k = 2.0f*pi/lambda;
float f = A * sin( ww*time - k*distance + fi );
float res = f * 127 + 127;
tab[index] = int(res);
}
} |
2,599 | #include <stdio.h>
#include <cuda.h>
//Code writen by Alan Fleming
void add_matrix_cpu(int *a, int *b, int *c, int N){
int i, j, index;
for( i = 0; i<N; i++){
for( j = 0; j<N; j++){
index = i*N+j;
c[index] = a[index] + b[index];
}
}
}
__global__ void add_matrix_gpu(int *a, int *b, int *c, int N){
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int index = row * N + col;
if( row < N && col < N) {
c[index] = a[index] + b[index];
}
}
void printMatrix(int *m, int N){
for( int i = 0; i < N; i++){
for( int j = 0; j < N; j++){
printf("%d ", m[i * N + j]);
}
printf("\n");
}
}
int verifyMatrix(int *a, int *b, int N){
for( int i = 0; i < N; i++){
for( int j = 0; j < N; j++){
if(a[i * N + j] != b[i * N + j]){
printf("TEST FAILED\n");
return 1;
}
}
}
printf("TEST PASSED\n");
return 0;
}
int main(int argc, char *argv[]){
//check number of arguments
if(argc <= 2) {
printf("Please supply matrix size and block size");
return 1;
}
//assign Matrix and block size
const int MATRIXSIZE = atoi(argv[1]);
const int BLOCKSIZE = atoi(argv[2]);
//allocate system memory for array
int *a = (int *)malloc(sizeof(int) * MATRIXSIZE * MATRIXSIZE ); //first matrix
int *b = (int *)malloc(sizeof(int) * MATRIXSIZE * MATRIXSIZE ); //second matrix
int *c = (int *)malloc(sizeof(int) * MATRIXSIZE * MATRIXSIZE ); //result from CPU
int *d = (int *)malloc(sizeof(int) * MATRIXSIZE * MATRIXSIZE ); //result from gpu
//initialize a and b for addition
int init = 1325;
for( int i = 0; i < MATRIXSIZE; i++){
for( int j = 0; j < MATRIXSIZE; j++){
init = 3125 * init % 65536;
a[ i * MATRIXSIZE + j ] = (init - 32768)/6553;
b[ i * MATRIXSIZE + j ] = init % 1000;
}
}
//print initial matrix a and b
printf("a \n --------------------- \n");
printMatrix(a, MATRIXSIZE);
printf("b \n --------------------- \n");
printMatrix(b, MATRIXSIZE);
//add matrix using cpu
add_matrix_cpu(a, b, c, MATRIXSIZE);
//print the result
printf("c \n --------------------- \n");
printMatrix(c, MATRIXSIZE);
//allocate memory on device
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void **)(&dev_a),MATRIXSIZE * MATRIXSIZE * sizeof(int));
cudaMalloc((void **)(&dev_b),MATRIXSIZE * MATRIXSIZE * sizeof(int));
cudaMalloc((void **)(&dev_c),MATRIXSIZE * MATRIXSIZE * sizeof(int));
//copy memory to device
cudaMemcpy(dev_a,a, MATRIXSIZE * MATRIXSIZE * sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b, MATRIXSIZE * MATRIXSIZE * sizeof(int),cudaMemcpyHostToDevice);
//calculate gridWidth
dim3 dimBlock(BLOCKSIZE, BLOCKSIZE, 1);
int gridWidth = ceil((MATRIXSIZE-1)/double(dimBlock.x));
//define dimGrid
dim3 dimGrid(gridWidth, gridWidth,1);
//add matrix using gpu
add_matrix_gpu<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c, MATRIXSIZE);
//copy memory from device
cudaMemcpy(d,dev_c, MATRIXSIZE * MATRIXSIZE * sizeof(int),cudaMemcpyDeviceToHost);
//print the result
printf("d \n --------------------- \n");
printMatrix(d, MATRIXSIZE);
//verify the results
verifyMatrix(c, d, MATRIXSIZE);
//free memory
free(a);
free(b);
free(c);
free(d);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
//exit program
return 0;
}
|
2,600 | /*
nvcc gpu_info.cu -o gpu_info.x
Michael Pohoreski
Copyleft {c} 2013
*/
#include <stdio.h>
#include <cuda.h>
/*
GeForce GTX Titan @ 928 MHz
SM: 14 * 192 sm/core = 2688 Cores
384-bit @ 3004 MHz = 288 GB/s
GeForce GT 750M @ 925 MHz
2 * 192 Cores/SM = 384 Cores
128-bit @ 2508 MHz = 80 GB/s
GeForce GT 330M @ 1100 MHz
SM: 6 * 8 sm/core = 48 Cores
128-bit @ 790 MHz = 25 GB/s
*/
int CudaGetCores( int major, int minor )
{
int cores[] = {
8, 8, 8, 8, 0, 0, // 1.0 1.1 1.2 1.3 -.- -.-
32, 48, 0, 0, 0, 0, // 2.0 2.1
192, 0, 0, 0, 0, 192, // 3.0 3.5
256, 0, 0, 0, 0, 0 // 4.0
};
return cores[ 6*(major-1) + minor ];
}
// cudaDeviceProp()
// Reference: http://developer.download.nvidia.com/compute/cuda/4_1/rel/toolkit/docs/online/group__CUDART__DEVICE_g5aa4f47938af8276f08074d09b7d520c.html
// https://devblogs.nvidia.com/parallelforall/how-query-device-properties-and-handle-errors-cuda-cc/
// https://devblogs.nvidia.com/parallelforall/how-implement-performance-metrics-cuda-cc/
int main()
{
int devices;
cudaError_t error = cudaGetDeviceCount( &devices );
if( error != cudaSuccess )
return printf( "ERROR: Couldn't find any CUDA devices.\n" );
for( int device = 0; device < devices; device++ )
{
cudaDeviceProp prop;
cudaGetDeviceProperties( &prop, device );
printf( "\nGPU #%d: \'%s\' @ %d MHz\n", (device+1), prop.name, (prop.clockRate/1000) );
printf( " Compute: %d.%d\n", prop.major, prop.minor );
printf( " Multi Processors: %d * %d Cores/SM = %d Cores\n"
, prop.multiProcessorCount
, CudaGetCores( prop.major, prop.minor )
, prop.multiProcessorCount * CudaGetCores( prop.major, prop.minor )
);
printf( "\n=== Memory ===\n" );
printf( " Total Memory : %lu MB (%lu bytes)\n", (prop.totalGlobalMem/1024)/1024, (size_t)prop.totalGlobalMem );
printf( " Bus Width : %u-bit @ %d MHz ==> ", prop. memoryBusWidth, prop.memoryClockRate/1000 );
printf( " Max Bandwidth: %u GB/s\n" , (prop.memoryClockRate/1000 * ((prop. memoryBusWidth/8)*2))/1000 ); // DDR2/3/4/5 = *2
printf( " Const memory : %lu (bytes)\n" , prop.totalConstMem );
printf( " Memory/Block : %lu\n" , prop.sharedMemPerBlock );
printf( " Unified mem : %d\n" , prop.unifiedAddressing );
printf( "\n=== Threads ===\n" );
printf( " Max Threads/SM : %d \n" , prop.maxThreadsPerMultiProcessor );
printf( " Threads / Block: %d\n" , prop.maxThreadsPerBlock );
printf( " Max Thread Size: %d, %d, %d\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] );
printf( " Max Grid size : %u, %u, %u\n", prop.maxGridSize [0], prop.maxGridSize [1], prop.maxGridSize [2] );
printf( " Registers/Block: %d\n" , prop.regsPerBlock );
printf( "\n=== Texture ===\n" );
printf( " Texture Size 1D: %d \n", prop.maxTexture1D );
printf( " Texture Size 2D: %d x %d \n", prop.maxTexture2D[0], prop.maxTexture2D[1] );
printf( " Texture Size 3D: %d x %d x %d\n", prop.maxTexture3D[0], prop.maxTexture3D[1], prop.maxTexture3D[2] );
printf( "\n" );
}
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.