serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
23,201 | #include <iostream>
using namespace std;
template<class T> void f(T x) {
cout << "generic : " << x << endl;
}
template<> void f(int x){
cout << "int : " << x << endl;
}
int main () {
f(1); f(2.3);
}
|
23,202 | #include "hmac_sha512.cuh"
__device__ void hmac_sha512_init(HmacSha512Context *ctx, const uint8_t password[], size_t password_len) {
uint8_t key[128];
if (password_len <= 128) {
memcpy(key, password, password_len);
memset(key + password_len, 0, 128 - password_len);
} else {
Sha512Context ctx_key{};
sha512_init(&ctx_key);
sha512_update(&ctx_key, key, password_len);
sha512_final(&ctx_key);
sha512_write_output(&ctx_key, key);
memset(key + 64, 0, 64);
}
#pragma unroll 128
for (uint8_t &byte : key) {
byte ^= 0x5c;
}
sha512_init(&ctx->ctx_outside);
sha512_update(&ctx->ctx_outside, key, 128);
#pragma unroll 128
for (uint8_t &byte : key) {
byte ^= 0x5c ^ 0x36;
}
sha512_init(&ctx->ctx_inside);
sha512_update(&ctx->ctx_inside, key, 128);
}
__device__ void hmac_sha512_update(HmacSha512Context *ctx, const uint8_t message[], size_t message_len) {
sha512_update(&ctx->ctx_inside, message, message_len);
}
__device__ void hmac_sha512_final(HmacSha512Context *ctx) {
uint8_t inner[64];
sha512_final(&ctx->ctx_inside);
sha512_write_output(&ctx->ctx_inside, inner);
sha512_update(&ctx->ctx_outside, inner, 64);
sha512_final(&ctx->ctx_outside);
}
__device__ void hmac_sha512_write_output(HmacSha512Context *ctx, uint8_t hash[]) {
sha512_write_output(&ctx->ctx_outside, hash);
}
|
23,203 | #include "includes.h"
__global__ void chol_kernel_cudaUFMG_zero(float * U, int elem_per_thr) {
// Get a thread identifier
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
int tn = ty * blockDim.x * gridDim.x + tx;
for(unsigned i=0;i<elem_per_thr;i++){
int iel = tn * elem_per_thr + i;
int xval = iel % MATRIX_SIZE;
int yval = iel / MATRIX_SIZE;
if(xval == yval){
continue;
}
// if on the upper diagonal...
if(yval < xval){
xval = MATRIX_SIZE - xval - 1;
yval = MATRIX_SIZE - yval - 1;
}
int iU = xval + yval * MATRIX_SIZE;
U[iU] = 0;
}
} |
23,204 | #include "includes.h"
__device__ unsigned int getGid3d3d(){
int blockId = blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.y * blockDim.x)
+ (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x;
return threadId;
}
__global__ void l2_norm(double *in1, double *in2, double *out){
int gid = getGid3d3d();
out[gid] = sqrt(in1[gid]*in1[gid] + in2[gid]*in2[gid]);
} |
23,205 | #include "includes.h"
__global__ void sneladd(float * inA, float * inB, int *sub, int Nprj, int snno)
{
int idz = threadIdx.x + blockDim.x*blockIdx.x;
if (blockIdx.y<Nprj && idz<snno)
inA[snno*blockIdx.y + idz] += inB[snno*sub[blockIdx.y] + idz];//sub[blockIdx.y]
} |
23,206 | #include <stdio.h>
#include <stdlib.h>
__global__
void calc_meanshift2(float* y_new, float* y_old, float* meanshift)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
float tempY_new = y_new[i];
float tempY_old = y_old[i];
meanshift[i] = (tempY_new-tempY_old)*(tempY_new-tempY_old);
}
__device__
float kernel_fun(float x, float sigma2)
{
if( x > sigma2)
return 0;
else
return exp(-x/2/sigma2);
}
__global__
void calc_Kernel_Matrix(int N, int D, float *x, float *y, float *K, int sigma2)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
int j = blockDim.y*blockIdx.y + threadIdx.y;
// It is also true that ( k == (N*i + j) )
// Calc Dist...
float dist = 0;
for(int d=0; d<D; d++)
dist+= (y[i*D+d] - x[j*D+d])*(y[i*D+d] - x[j*D+d]);
K[i*N+j] = kernel_fun(dist, sigma2);
}
__global__
void kernel_sum_div(int D, float* y_new, float* K_sum)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
for(int d=0; d<D; d++)
y_new[i*D+d] = y_new[i*D+d]/K_sum[i];
}
__global__ void kernel_Dvec_mult(int N, int D, float* K, float* x, float* Kx, int d)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
int j = blockDim.y*blockIdx.y + threadIdx.y;
Kx[i*N+j] = K[i*N+j]*x[j*D+d];
}
__global__ void copy_to_y(int D, float* d_y_new, float* kernelXsum, int d)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
d_y_new[i*D+d] = kernelXsum[i];
}
__global__ void calc_reduce_meanshift(int N, float* y_new, float* y_old, float* reducted_vec)
{
extern __shared__ float reduction_cache[] ;
//thread ID on each row of blocks
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int cache_i = threadIdx.x;
float temp=0;
float tempY_new; // This are usuful to ensure that only on GB access is gonna happen for each vector
float tempY_old;
while (tid < N)
{
tempY_new = y_new[tid];
tempY_old= y_old[tid];
temp += (tempY_new-tempY_old)*(tempY_new-tempY_old);
tid += blockDim.x * gridDim.x;
}
reduction_cache[cache_i] = temp;
__syncthreads();
// Begin the reduction per shared-memory-block
for(int i=blockDim.x/2; i>0; i>>=1)
{
if(cache_i < i)
reduction_cache[cache_i] += reduction_cache[cache_i+i];
__syncthreads();
}
// Final Sum is stored in global array.
if(cache_i==0)
reducted_vec[blockIdx.x] = reduction_cache[0];
}
// __global__
// void kernelX_dot_product(int N, int D, int d, float* K, float* x, float* reducted_vec)
// {
// extern __shared__ float reduction_cache[] ;
// //thread ID on each row of blocks
// int tid = blockDim.x * blockIdx.x + threadIdx.x;
// int cache_i = threadIdx.x;
// /* This UNROLLS the elements of x, "outside" the grid's index range.
// In the case of N=600, threadsPerBlock=256 and 2 blocks in total,
// we have 600-256*2=88 additions done in parallel, before the reduction of the 512 threads.
// incase the index-range > N, the reduction scheme will simply add some zeros to the vector.
// This allows as to oversubscribe in terms of threads and blocks.
// */
// int offset = N*blockIdx.y;
// float temp=0;
// while (tid < N)
// {
// temp += K[tid+offset]*x[tid*D+d];
// tid += blockDim.x * gridDim.x;
// }
// /* Load x-data into local shared memory.
// As mentioned before, some entries are small sums of
// x's outside the grid's range */
// reduction_cache[cache_i] = temp;
// __syncthreads();
// // Begin the reduction per shared-memory-block
// for(int i=blockDim.x/2; i>0; i>>=1)
// {
// if(cache_i < i)
// reduction_cache[cache_i] += reduction_cache[cache_i+i];
// __syncthreads();
// }
// // Final Sum is stored in global array, with stride d, to match the NxD dimensionality of the input dataset.
// if(cache_i==0)
// reducted_vec[blockIdx.y*gridDim.x + blockIdx.x + d] = reduction_cache[cache_i];
// }
// void WR_kernelX_dow_product(int N, float* d_K, float* d_x, /*out*/ ReductionCache* rc )
// {
// dim3 blockDim2(4, 1, 1);
// dim3 gridDim2(N/4,N,1);
// size_t cache_size = 4*N*sizeof(float);
// kernelX_dot_product<<<gridDim2, blockDim2, cache_size>>>(N,D,0, d_KernelMatrix, d_x, d_y_new);
// kernelX_dot_product<<<gridDim2, blockDim2, cache_size>>>(N,D,1, d_KernelMatrix, d_x, d_y_new);
// //reduction_sum<<<L/256, 256, 256*sizeof(float) >>>(N/4, d_y_new, d_y_new);
// if(rc->blocksNum == 1)
// {
// kernelX_dot_product<<<rc->gridDim, rc->blockDim, rc->cache_size>>>(N,D,0, d_K,d_x, rc->d_sum);
// kernelX_dot_product<<<rc->gridDim, rc->blockDim, rc->cache_size>>>(N,D,1, d_K,d_x, rc->d_sum);
// }
// else
// {
// // We need multiple reduction calls!
// reduction_sum <<<rc->gridDim, rc->blockDim, rc->cache_size>>>(N, d_A, rc->d_reduced_vec);
// /* Reduct the final reduction vector! */
// // Ideally we would like threads_num==length(reduced_vec)/numRow.
// However threads_num2 must be a power of 2. Thus:
// int threads_num2 = exp2f(floor(log2f(rc->reduced_vec_length/rc->rowNum)));
// if(threads_num2>512)
// threads_num2=512;
// //printf("THREADS: %d RED_VEC %d\n", threads_num2, rc->reduced_vec_length/rc->rowNum );
// dim3 gridDim2(1,rc->rowNum,1);
// dim3 blockDim2(threads_num2,1,1);
// reduction_sum<<<gridDim2, blockDim2, threads_num2*sizeof(float)>>>\
// (rc->gridDim.x, rc->d_reduced_vec, rc->d_sum); //
// // WARNING: launching with original thread_num might be too much.
// // SOLUTION: Find power-of-2 nearest to block_num
// }
// } |
23,207 | #include <fstream>
#include <vector>
#include <string>
#include <sstream>
#include <iostream>
#include <cuda.h>
using namespace std;
__global__ void Dim2_Calculation(float * __restrict__ d_tem_res, float * __restrict__ d_tem_meo,
const float * __restrict__ d_tem_fix, const int width, const int height, const float k)
{
const int curId = blockIdx.x * blockDim.x + threadIdx.x;
const int w = curId/height;
const int h = curId - w*height;
int top, down, left, right;
if( w<width && h<height){
//float tmp = 0;
if (w>0){
//printf("(w-1)*height+h = %d\n", (w-1)*height+h);
//tmp += k * ( - d_tem_meo[curId] + d_tem_meo[(w-1)*height+h]);
left = w-1;
}else{
left=w;
}
if (w<width-1){
//printf("(w+1)*height+h = %d\n", (w+1)*height+h);
//tmp += k * ( - d_tem_meo[curId] + d_tem_meo[(w+1)*height+h]);
right = w+1;
}else{
right = w;
}
if (h>0){
//printf("(w)*height+h-1 = %d\n", (w)*height+h-1);
//tmp += k * ( - d_tem_meo[curId] + d_tem_meo[w*height+(h-1)]);
top = h-1;
}else{
top = h;
}
if (h<height-1){
//printf("(w)*height+h+1 = %d\n", (w)*height+h+1);
//tmp += k * ( - d_tem_meo[curId] + d_tem_meo[w*height+(h+1)]);
down = h+1;
}else{
down = h;
}
d_tem_res[curId] = d_tem_meo[curId]+k*(d_tem_meo[left*height+h]+d_tem_meo[right*height+h]
+ d_tem_meo[w*height+top] + d_tem_meo[w*height+down] - 4*d_tem_meo[curId]);
}
if ( d_tem_fix[curId] != -1){
d_tem_res[curId] = d_tem_fix[curId];
}
// wait until other thread if finished.
__syncthreads();
d_tem_meo[curId] = d_tem_res[curId];
}
__global__ void Dim3_Calculation(float * __restrict__ d_tem_res, float * __restrict__ d_tem_meo,
const float * __restrict__ d_tem_fix, const int width, const int height, const int depth, const float k)
{
const int curId = blockIdx.x * blockDim.x + threadIdx.x;
const int d = curId/(height*width);
const int w = (curId-d*(height*width))/height;
const int h = curId-d*(height*width) - w*height;
int top, down, left, right, front, back;
if( w<width && h<height && d<depth){
if (w>0){
left = w-1;
}else{
left=w;
}
if (w<width-1){
right = w+1;
}else{
right = w;
}
if (h>0){
top = h-1;
}else{
top = h;
}
if (h<height-1){
down = h+1;
}else{
down = h;
}
if (d>0){
front = d-1;
}else{
front = d;
}
if (d<depth-1){
back = d+1;
}else{
back = d;
}
d_tem_res[curId] = d_tem_meo[curId]+k*(d_tem_meo[d*(height*width)+left*height+h]+d_tem_meo[d*(height*width)+right*height+h]
+ d_tem_meo[d*(height*width)+w*height+top] + d_tem_meo[d*(height*width)+w*height+down]
+ d_tem_meo[front*(height*width)+w*height+h] + d_tem_meo[back*(height*width)+w*height+h] - 6*d_tem_meo[curId]);
}
if ( d_tem_fix[curId] != -1){
d_tem_res[curId] = d_tem_fix[curId];
}
// wait until other thread if finished.
__syncthreads();
d_tem_meo[curId] = d_tem_res[curId];
}
int main(int argc,char**argv)
{
// ------------------------------------ initial parameter --------------------------------------
string Dimension;
string path = argv[1];
ifstream cfile(path.c_str());
int timestep, width, height, depth, totalLength;
int location_x, location_y, location_z, fix_width, fix_height, fix_depth;
float init_temp, ftemp, k;
float *tem_res, *tem_meo, *tem_fix;
float *d_tem_res, *d_tem_meo, *d_tem_fix;
// ------------------------------------Reading the config file ---------------------------------------
string l;
vector<string> fileContent;
while(getline(cfile, l))
{
if(l[0] != '#' && !l.empty())
{fileContent.push_back(l);}
}
// -------------------------Dimension, k, timestep, init_temp---------------------------
Dimension = fileContent[0];
if(fileContent[1][0]=='.')
{fileContent[1].insert(fileContent[1].begin(), '0');}
k = (float)atof(fileContent[1].c_str());
timestep = atoi(fileContent[2].c_str());
init_temp = (float)atof(fileContent[4].c_str());
cout << "k=" << k << " timestep=" << timestep << " BeginTemperture=" << init_temp << endl;
// --------------------begin calculation based on Dimension----------------------
// read width and height, then build the matrix.
if(Dimension=="2D")
{
string::size_type pos = fileContent[3].find(",");
width = atoi(fileContent[3].substr(0, pos).c_str());
height = atoi(fileContent[3].substr(pos+1).c_str());
cout << "Width=" << width << " Height=" << height << endl;
totalLength = width*height;
}
else{
string::size_type pos = fileContent[3].find(",");
string::size_type pos2 = fileContent[3].find_last_of(",");
width = atoi(fileContent[3].substr(0, pos).c_str());
height = atoi(fileContent[3].substr(pos+1,pos2-pos-1).c_str());
depth = atoi(fileContent[3].substr(pos2+1).c_str());
cout << "Width=" << width << " Height=" << height << " Depth="<< depth << endl;
totalLength = width*height*depth;
}
tem_res = (float *)malloc(totalLength * sizeof(float));
tem_meo = (float *)malloc(totalLength * sizeof(float));
tem_fix = (float *)malloc(totalLength * sizeof(float));
for (int i = 0; i < totalLength; ++i)
{
tem_res[i] = init_temp;
tem_meo[i] = init_temp;
tem_fix[i] = -1;
}
// ------------------------ initialize matrix ----------------------------------
if(Dimension=="2D")
{
for(int i=5; i<fileContent.size(); i++)
{
string s = fileContent[i];
location_x = atoi(s.substr(0,s.find(",")).c_str());
s.erase(0,s.find(",")+1);
location_y = atoi(s.substr(0,s.find(",")).c_str());
s.erase(0,s.find(",")+1);
fix_width = atoi(s.substr(0,s.find(",")).c_str());
s.erase(0,s.find(",")+1);
fix_height = atoi(s.substr(0,s.find(",")).c_str());
s.erase(0,s.find(",")+1);
ftemp = (float)atof(s.c_str());
for(int w=location_x; w<location_x+fix_width; w++)
{
for(int h=location_y; h<location_y+fix_height; h++)
{
tem_res[w*height+h] = ftemp;
tem_meo[w*height+h] = ftemp;
tem_fix[w*height+h] = ftemp;
}
}
}
//for (int i=0; i<totalLength; i++){cout << tem_fix[i] << endl;}
}
else{
for(int i=5; i<fileContent.size(); i++)
{
string s = fileContent[i];
location_x = atoi(s.substr(0,s.find(",")).c_str());
s.erase(0,s.find(",")+1);
location_y = atoi(s.substr(0,s.find(",")).c_str());
s.erase(0,s.find(",")+1);
location_z = atoi(s.substr(0,s.find(",")).c_str());
s.erase(0,s.find(",")+1);
fix_width = atoi(s.substr(0,s.find(",")).c_str());
s.erase(0,s.find(",")+1);
fix_height = atoi(s.substr(0,s.find(",")).c_str());
s.erase(0,s.find(",")+1);
fix_depth = atoi(s.substr(0,s.find(",")).c_str());
s.erase(0,s.find(",")+1);
ftemp = (float)atof(s.c_str());
for(int w=location_x; w<location_x+fix_width; w++)
{
for(int h=location_y; h<location_y+fix_height; h++)
{
for(int d=location_z; d<location_z+fix_depth; d++)
{
tem_res[d*width*height + w*height + h] = ftemp;
tem_meo[d*width*height + w*height + h] = ftemp;
tem_fix[d*width*height + w*height + h] = ftemp;
}
}
}
}
//for (int i=0; i<totalLength; i++){cout << tem_fix[i] << endl;}
}
cudaMalloc((void **)&d_tem_res, totalLength * sizeof(float));
cudaMalloc((void **)&d_tem_meo, totalLength * sizeof(float));
cudaMalloc((void **)&d_tem_fix, totalLength * sizeof(float));
cudaMemcpy(d_tem_res, tem_res, totalLength * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_tem_meo, tem_meo, totalLength * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_tem_fix, tem_fix, totalLength * sizeof(float), cudaMemcpyHostToDevice);
// ----------------------- call Cuda Calculation -----------------------------------
cout << "You begin Cuda Calculation" << endl;
int bknum = 128;
if(Dimension=="2D")
{
for (int i = 0; i < timestep; i++)
{
//cout << "Cuda Calculation " << i <<endl;
Dim2_Calculation <<<(totalLength+bknum-1)/bknum, bknum>>>(d_tem_res, d_tem_meo, d_tem_fix, width, height, k);
}
cudaMemcpy(tem_res, d_tem_res, totalLength*sizeof(float), cudaMemcpyDeviceToHost);
}
else
{
for (int i = 0; i < timestep; i++)
{
Dim3_Calculation <<<(totalLength+bknum-1)/bknum, bknum>>>(d_tem_res, d_tem_meo, d_tem_fix, width, height, depth, k);
}
cudaMemcpy(tem_res, d_tem_res, totalLength*sizeof(float), cudaMemcpyDeviceToHost);
}
// ------------------------- Writing into output.csv -------------------------------
ofstream result;
result.open("heatOutput.csv");
if(Dimension=="2D")
{
for (int w = 0; w < width; w++)
{
for (int h = 0; h < height; h++)
{
result << tem_res[w*height+h];
if(h<height-1)
{result << ",";}
}
result << '\n';
}
}
else
{
for(int d=0; d<depth; d++)
{
for (int w = 0; w < width; w++)
{
for (int h = 0; h < height; h++)
{
result << tem_res[d*width*height+w*height+h];
if(h<height-1)
{result << ",";}
}
result << '\n';
}
result << '\n';
}
}
result.close();
// -------------------- free meomery -------------------
cudaFree(d_tem_res);
cudaFree(d_tem_meo);
cudaFree(d_tem_fix);
free(tem_res);
free(tem_meo);
free(tem_fix);
return 0;
} |
23,208 | #include <stdio.h>
#include <cuda.h>
#include <time.h>
#include <math.h>
#define ISLAND 10
#define POPULATION 50
#define FACILITY 20
#define GENERATION 10
#define CROSSOVER 0.6
#define MUTATION 0.03
#define MIGRATION 15
#define INDIVIDUAL 5
#define H 15 // BAY height
#define W 10 // BAY width
void shuffle(int* facility);
__global__ void calPosition(int *data, short int *bay, float *position){
int b=blockIdx.x; //區塊索引 == ISLAND
int t=threadIdx.x; //執行緒索引 == POPULATION
int n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
int x=b*n+t;
int posit = x * FACILITY;
int bayposit = x * (FACILITY-1);
// int posit=b*POPULATION*FACILITY+t*FACILITY; //執行緒在陣列中對應的位置
// int posofposit = b*POPULATION*FACILITY*2+t*FACILITY*2;
for(int i=0;i<ISLAND*POPULATION*FACILITY*2;i++){
position[i] = 0;
}
int len = 1;
int next = 0;
for(int f=0;f<FACILITY;f++){
if(bay[bayposit+f] == 0){
len = len + 1;
}
if(bay[bayposit+f] == 1 || f == FACILITY - 1 ){
if(f == FACILITY - 1 && bay[bayposit+f] == 0){
len = len - 1;
}
float x = W / 2.0 + next;
for(int j=0;j<len;j++){
position[posit*2+(f+j-len+1)*2] = x;
float y = H / (len * 2.0) * ( (j * 2) + 1) ;
position[posit*2+(f+j-len+1)*2+1] = y;
}
len = 1;
next = next + W;
}
}
}
__global__ void calDistance(int *data, float *position, float *distance){
int b=blockIdx.x; //區塊索引 == ISLAND
int t=threadIdx.x; //執行緒索引 == POPULATION
int n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
int x=b*n+t;
int posit = x * FACILITY;
// int posit=b*POPULATION*FACILITY+t*FACILITY; //執行緒在陣列中對應的位置
// int posofposit = b*POPULATION*FACILITY*2+t*FACILITY*2;
for(int i=0;i<ISLAND*POPULATION*FACILITY*FACILITY;i++){
distance[i] = 0;
}
for(int f=0;f<FACILITY;f++){
// printf("\ndistance calculate facility%d\n", f);
for(int j=f+1;j<FACILITY;j++){
float x1 = position[ (posit + f)*2 ];
float y1 = position[ (posit + f)*2 + 1];
int x = data[ posit + f ];
// printf("x = %d\n", x);
float x2 = position[ (posit + j)*2 ];
float y2 = position[ (posit + j)*2 + 1];
int y = data[ posit + j ];
// printf("y= %d\n", y);
if(y2 > y1){
distance[ (posit + x)*FACILITY + y] = sqrt( (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) ) ;
distance[ (posit + y)*FACILITY + x] = sqrt( (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) ) ;
}
else{
distance[ (posit + x)*FACILITY + y] = sqrt( (x2 - x1) * (x2 - x1) + (y1 - y2) * (y1 - y2) ) ;
distance[ (posit + y)*FACILITY + x] = sqrt( (x2 - x1) * (x2 - x1) + (y1 - y2) * (y1 - y2) ) ;
}
}
}
}
__global__ void calTotalcost(float *distance, int *cost, float *totalCost){
int b=blockIdx.x; //區塊索引 == ISLAND
int t=threadIdx.x; //執行緒索引 == POPULATION
int n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
int x=b*n+t;
int posit = x * FACILITY;
// int posit=b*POPULATION*FACILITY+t*FACILITY; //執行緒在陣列中對應的位置
// int posofposit = b*POPULATION*FACILITY*2+t*FACILITY*2;
// for(int i=0;i<ISLAND*POPULATION*FACILITY*FACILITY;i++){
// totalCost[i] = 0;
// }
for(int f=0;f<FACILITY;f++){
for(int j=0;j<FACILITY;j++){
totalCost[ (posit + f)*FACILITY + j] = cost[f*FACILITY + j] * distance[ (posit + f)*FACILITY + j];
}
}
}
__global__ void calOF(float *sumCost, float *minCost, float *totalCost){
int b=blockIdx.x; //區塊索引 == ISLAND
int t=threadIdx.x; //執行緒索引 == POPULATION
int n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
int x=b*n+t;
int posit = x * FACILITY;
// calculate OF
sumCost[x] = 0.0;
minCost[x/POPULATION * 2] = 0.0;
for(int f=0;f<FACILITY;f++){
for(int j=0;j<FACILITY;j++){
sumCost[x] += totalCost[ (posit + f)*FACILITY + j];
}
}
if(x % POPULATION==0){
minCost[(x/POPULATION)*2] = sumCost[x*FACILITY + 0];
minCost[(x/POPULATION)*2 + 1] = 0;
}else if(minCost[x/POPULATION] > sumCost[x]){
minCost[(x/POPULATION)*2] = sumCost[x];
minCost[(x/POPULATION)*2 + 1] = x % POPULATION;
}
}
__global__ void calProbability(float *probability, float *totalPro, float *sumCost){
int b=blockIdx.x; //區塊索引 == ISLAND
int t=threadIdx.x; //執行緒索引 == POPULATION
int n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
int x=b*n+t;
probability[x] = (1.0 / sumCost[x]) / (totalPro[ x / POPULATION ]) ;
}
__global__ void crossOver(float *probability2, int *data, short int *bay, int *data2, short int *bay2, int *tem, int *tem2, int *Gyes, int *Gsss, int *Gcount, int *GGgetP, int *GGgetP2, float *test){
int b=blockIdx.x; //區塊索引 == ISLAND
int t=threadIdx.x; //執行緒索引 == POPULATION
int n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
int x=b*n+t;
int posit = x * 2 * FACILITY;
int posit2 = (2*x+1) * FACILITY;
int bayposit = x * 2 * (FACILITY-1);
int bayposit2 = (2*x+1) * (FACILITY-1);
float get = (tem[x] % 100) * 0.01;
test[x] = 0.0;
tem[x] = tem[x] % 100;
int getP = 0;
float get2 = tem2[x] % 100 * 0.01;
tem2[x] = tem2[x] % 100;
int getP2 = 0;
GGgetP2[x] = -1;
for(int p=0;p<POPULATION-1;p++){
if(get >= probability2[ (x/POPULATION)*POPULATION + p ] && get < probability2[ (x/POPULATION)*POPULATION + p+1 ]){
getP = p+1;
GGgetP2[x] = (x/POPULATION)*POPULATION + p;
break;
}
else if(p==POPULATION-2){
getP = p+1;
break;
}
}
test[x] = probability2[ (x/POPULATION)*POPULATION + 1];
for(int p=0;p<POPULATION-1;p++){
if(get2 >= probability2[x/POPULATION*POPULATION + p] && get2 < probability2[x/POPULATION*POPULATION + p+1]){
getP2 = p+1;
break;
}
else if(p==POPULATION-2){
getP2 = p+1;
break;
}
}
for(int f=0;f<FACILITY;f++){
data2[ posit + f] = data[ x/POPULATION*POPULATION*FACILITY + getP*FACILITY + f];
bay2[ (2 * x)*(FACILITY-1) + f] = bay[ x/POPULATION*POPULATION*(FACILITY-1) + getP*(FACILITY-1) + f];
}
for(int f=0;f<FACILITY;f++){
data2[ posit2 + f ] = data[x/POPULATION*POPULATION*FACILITY + getP2*FACILITY + f];
bay2[ (2 * x + 1)*(FACILITY-1) + f] = bay[x/POPULATION*POPULATION*(FACILITY-1) + getP2*(FACILITY-1) + f];
}
int tt = Gyes[x] % 100;
float yes = tt * 0.01;
Gyes[x] = tt;
if(yes <= CROSSOVER){
int sss = FACILITY - 1;
int seq = Gsss[x] % sss;
Gsss[x] = seq;
int cross[4][2];
cross[0][0] = data2[ posit + seq];
cross[0][1] = data2[ posit2 + seq];
cross[1][0] = data2[ posit + seq];
cross[1][1] = data2[ posit2 + seq+1];
cross[2][0] = data2[ posit + seq+1];
cross[2][1] = data2[ posit2 + seq];
cross[3][0] = data2[ posit+ seq+1];
cross[3][1] = data2[ posit2 + seq+1];
int temp = data2[ posit2 + seq];
int temp2 = data2[posit2 + seq+1];
data2[ posit2 + seq] = data2[ posit + seq];
data2[ posit2 + seq+1] = data2[posit + seq+1];
data2[posit + seq] = temp;
data2[posit + seq+1] = temp2;
int count = 0;
for(int c=0;c<4;c++){
if(cross[c][0] == cross[c][1]){
count++;
}
}
Gcount[x] = count;
switch (count) {
case 0:
for(int c=0;c<FACILITY;c++){
if(c != seq){
if(data2[posit + c] == cross[0][1]){
data2[ posit + c] = cross[0][0];
}
if(data2[posit + c] == cross[3][1]){
data2[ posit + c] = cross[3][0];
}
}
else{
c++;
}
}
for(int c=0;c<FACILITY;c++){
if(c != seq){
if(data2[posit2 + c] == cross[0][0]){
data2[ posit2 + c] = cross[0][1];
}
if(data2[posit2 + c] == cross[3][0]){
data2[ posit2 + c] = cross[3][1];
}
}
else{
c++;
}
}
break;
case 1:
temp = 99;
for(int c=0;c<4;c++){
if(cross[c][0] == cross[c][1]){
temp = cross[c][0];
}
}
for(int c=0;c<4;c++){
if(cross[c][0] != temp && cross[c][1] != temp){
for(int f=0;f<FACILITY;f++){
if(f != seq){
if(data2[posit + f] == cross[c][1]){
data2[ posit + f] = cross[c][0];
}
}
else{
f++;
}
}
}
}
for(int c=0;c<4;c++){
if(cross[c][0] != temp && cross[c][1] != temp){
for(int f=0;f<FACILITY;f++){
if(f != seq){
if(data2[posit2 + f] == cross[c][0]){
data2[ posit2 + f] = cross[c][1];
}
}
else{
f++;
}
}
}
}
break;
case 2:
break;
}
temp = bay2[bayposit2 + seq];
temp2 = bay2[bayposit2 + seq+1];
bay2[bayposit2 + seq] = bay2[bayposit + seq];
bay2[bayposit2 + seq+1] = bay2[bayposit + seq+1];
bay2[bayposit + seq] = bay2[bayposit2 + seq];
bay2[bayposit + seq+1] = bay2[bayposit2 + seq+1];
}else {
}
}
__global__ void mutation(int *data2, int *mutaYes, int *mutaTem, int *mutaTem2){
int b=blockIdx.x; //區塊索引 == ISLAND
int t=threadIdx.x; //執行緒索引 == POPULATION
int n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
int x=b*n+t;
int posit = x * FACILITY;
float yes = (mutaYes[x] % 100) * 0.01;
// fprintf(FIN, "取得%f \n", yes);
if(yes < MUTATION){
// fprintf(FIN, "第%d突變\n", p);
int get = mutaTem[x] % FACILITY;
int get2 = mutaTem2[x] % FACILITY;
int temp = data2[posit + get];
data2[posit + get] = data2[posit + get2];
data2[posit + get2] = temp;
}else {
}
}
__global__ void mutationBay(short int *bay2, int *mutaBayYes, int *mutaBayTem){
int b=blockIdx.x; //區塊索引 == ISLAND
int t=threadIdx.x; //執行緒索引 == POPULATION
int n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
int x=b*n+t;
int posit = x * (FACILITY - 1);
float yes = mutaBayYes[x] % 100 * 0.01 ;
if(yes < MUTATION){
int get = mutaBayTem[x] % (FACILITY - 1);
if(bay2[posit + get] == 0){
bay2[posit + get] = 1;
}else {
bay2[posit + get] = 0;
}
}
}
int main(){
double START,END;
START = clock();
srand(time(NULL));
int data[ISLAND][POPULATION][FACILITY];
short int bay[ISLAND][POPULATION][FACILITY-1]; //bay
int facility[FACILITY];
for(int i=0;i<ISLAND;i++){ // shuffle the sorted facility
printf("new island%d\n", i);
for(int p=0;p<POPULATION;p++){
for(int t=0;t<FACILITY;t++){
facility[t] = t;
}
shuffle(facility);
// for(int t=0;t<FACILITY;t++){
// printf("%d ", facility[t]);
// }
for(int f=0;f<FACILITY;f++){
data[i][p][f] = facility[f];
printf("%d ", data[i][p][f]);
}
printf("\n");
for(int b=0;b<FACILITY-1;b++){
int j = rand() % 2;
bay[i][p][b] = j;
}
}
}
printf("data\n");
for(int i=0;i<ISLAND;i++){
for(int p=0;p<POPULATION;p++){
for(int f=0;f<FACILITY;f++){
printf("%d ", data[i][p][f]);
}
printf("\n");
}
printf("\n");
}
printf("bay\n");
for(int i=0;i<ISLAND;i++){
for(int p=0;p<POPULATION;p++){
for(int f=0;f<FACILITY-1;f++){
printf("%d ", bay[i][p][f]);
}
printf("\n");
}
printf("\n");
}
// int *GA;
// short int *GB;
// cudaMalloc((void**)&GA, ISLAND*POPULATION*FACILITY*sizeof(int));
// cudaMemcpy(GA, data, ISLAND*POPULATION*FACILITY*sizeof(int), cudaMemcpyHostToDevice);
//
// cudaMalloc((void**)&GB, ISLAND*POPULATION*(FACILITY-1)*sizeof(short int));
// cudaMemcpy(GB, bay, ISLAND*POPULATION*(FACILITY-1)*sizeof(short int), cudaMemcpyHostToDevice);
// read ther cost
FILE *fPtr;
int ttt = FACILITY * (FACILITY-1);
fPtr=fopen("cost.txt","r");
int cost[FACILITY][FACILITY] = {0};
int temp[ttt][3]; // cost
for(int i=0;i<ttt;i++){
fscanf(fPtr , "%d %d %d" , &temp[i][0], &temp[i][1], &temp[i][2]);
}
fclose(fPtr);
for(int i=0;i<ttt;i++){ // 2 dimention cost
cost[ temp[i][0]-1 ][ temp[i][1]-1] = temp[i][2];
}
printf("cost: \n");
for(int i=0;i<FACILITY;i++){ // 2 dimention cost
for(int j=0;j<FACILITY;j++){
printf("%d ", cost[i][j]);
}
printf("\n");
}
int *Gcost;
cudaMalloc((void**)&Gcost, FACILITY*FACILITY*sizeof(int));
cudaMemcpy(Gcost, cost, FACILITY*FACILITY*sizeof(int), cudaMemcpyHostToDevice);
for(int gggggg=0;gggggg<GENERATION;gggggg++){ // generation
printf("\n*****%d的generation*****\n", gggggg);
int *GA;
short int *GB;
cudaMalloc((void**)&GA, ISLAND*POPULATION*FACILITY*sizeof(int));
cudaMemcpy(GA, data, ISLAND*POPULATION*FACILITY*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**)&GB, ISLAND*POPULATION*(FACILITY-1)*sizeof(short int));
cudaMemcpy(GB, bay, ISLAND*POPULATION*(FACILITY-1)*sizeof(short int), cudaMemcpyHostToDevice);
float *Gposition;
cudaMalloc((void**)&Gposition, ISLAND*POPULATION*FACILITY*2*sizeof(float));
// int *Gposition2;
// cudaMalloc((void**)&Gposition2, ISLAND*POPULATION*FACILITY*2*sizeof(int));
int g=ISLAND, b=POPULATION;
// int m=g*b;
calPosition<<<g, b>>>(GA, GB, Gposition);
float position[ISLAND*POPULATION*FACILITY*2];
// int position2[ISLAND*POPULATION*FACILITY*2];
// int data2[ISLAND*POPULATION*FACILITY];
// short int bay2[ISLAND*POPULATION*(FACILITY-1)]; //bay
//
// cudaMemcpy(data2, GA, ISLAND*POPULATION*FACILITY*sizeof(int), cudaMemcpyDeviceToHost);
//
// printf("data2\n");
// for(int i=0;i<ISLAND;i++){
// for(int p=0;p<POPULATION;p++){
// for(int f=0;f<FACILITY;f++){
// printf("%d ", data[i*POPULATION*FACILITY+p*FACILITY+f]);
// }
// printf("\n");
// }
// printf("\n");
// }
// cudaMemcpy(bay2, GB, ISLAND*POPULATION*(FACILITY-1)*sizeof(short int), cudaMemcpyDeviceToHost);
// printf("bay2\n");
// for(int i=0;i<ISLAND;i++){
// for(int p=0;p<POPULATION;p++){
// for(int f=0;f<FACILITY-1;f++){
// printf("%d ", bay2[i*POPULATION*FACILITY+p*(FACILITY-1)+f]);
// }
// printf("\n");
// }
// printf("\n");
// }
cudaMemcpy(position, Gposition, ISLAND*POPULATION*FACILITY*2*sizeof(float), cudaMemcpyDeviceToHost);
// print position
// for(int i=0;i<ISLAND;i++){
// printf("island%d \n", i);
// for(int p=0;p<POPULATION;p++){
// printf("po%d = \n",p);
// for(int f=0;f<FACILITY;f++){
// for(int k=0;k<2;k++){
// printf("%f ", position[i*POPULATION*FACILITY*2+p*FACILITY*2+f*2+k]);
// }
// printf("\n");
// }
// }
// }
// for(int i=0;i<ISLAND*POPULATION*FACILITY*2;i++){
// printf("%f ", position[i]);
// }
printf("\n");
float distance[ISLAND*POPULATION*FACILITY*FACILITY] = {0};
float *Gdistance;
cudaMalloc((void**)&Gdistance, ISLAND*POPULATION*FACILITY*FACILITY*sizeof(float));
calDistance<<<g, b>>>(GA, Gposition, Gdistance);
cudaMemcpy(distance, Gdistance, ISLAND*POPULATION*FACILITY*FACILITY*sizeof(float), cudaMemcpyDeviceToHost);
printf("\ncalculate distance end\n");
// print distance
// for(int i=0;i<ISLAND;i++){
// for(int p=0;p<POPULATION;p++){
// printf("po%d: \n", p);
// for(int f=0;f<FACILITY;f++){
// for(int j=0;j<FACILITY;j++){
// printf("%f ", distance[ i*POPULATION*FACILITY*FACILITY + p*FACILITY*FACILITY + f*FACILITY + j ]);
// }
// printf("\n");
// }
// }
// }
float totalCost[ISLAND][POPULATION][FACILITY][FACILITY] = {0.0};
float *GtotalCost;
cudaMalloc((void**)&GtotalCost, ISLAND*POPULATION*FACILITY*FACILITY*sizeof(float));
calTotalcost<<<g, b>>>(Gdistance, Gcost, GtotalCost);
cudaMemcpy(totalCost, GtotalCost, ISLAND*POPULATION*FACILITY*FACILITY*sizeof(float), cudaMemcpyDeviceToHost);
// print totalCost
// for(int i=0;i<ISLAND;i++){
// for(int p=0;p<POPULATION;p++){
// printf("po%d: \n", p);
// for(int f=0;f<FACILITY;f++){
// for(int j=0;j<FACILITY;j++){
// printf("%f ", totalCost[i][p][f][j]);
// }
// printf("\n");
// }
// }
// }
float *GsumCost;
float sumCost[ISLAND][POPULATION]={0.0};
cudaMalloc((void**)&GsumCost, ISLAND*POPULATION*sizeof(float));
float *GminCost;
float minCost[ISLAND][2];
cudaMalloc((void**)&GminCost, ISLAND*2*sizeof(float));
calOF<<<g, b>>>(GsumCost, GminCost, GtotalCost);
cudaMemcpy(sumCost, GsumCost, ISLAND*POPULATION*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(minCost, GminCost, ISLAND*2*sizeof(float), cudaMemcpyDeviceToHost);
// printf("\n");
// for(int i=0;i<ISLAND;i++){
// printf("第%d島嶼: \n", i);
// for(int p=0;p<POPULATION;p++){
// printf("%d: ", p);
// printf("sum = %f", sumCost[i][p]);
// printf("\n");
// }
// }
int data2[ISLAND][POPULATION][FACILITY]; // facility
int *Gdata2;
cudaMalloc((void**)&Gdata2, ISLAND*POPULATION*FACILITY*sizeof(int));
short int bay2[ISLAND][POPULATION][FACILITY]; //bay
short int *Gbay2;
cudaMalloc((void**)&Gbay2, ISLAND*POPULATION*FACILITY*sizeof(short int));
float probability[ISLAND][POPULATION] = {0.0}; // �U�Ӿ��v
// for(int i=0;i<ISLAND;i++){
// for(int p=0;p<POPULATION;p++){
// printf("pro%f \n", probability[i][p]);
// }
// }
float *Gprobability;
cudaMalloc((void**)&Gprobability, ISLAND*POPULATION*sizeof(float));
float totalPro[ISLAND] = {0.0}; // �`(�����˼�)
float *GtotalPro;
cudaMalloc((void**)&GtotalPro, ISLAND*sizeof(float));
for(int i=0;i<ISLAND;i++){
for(int p=0;p<POPULATION;p++){
totalPro[i] = totalPro[i] + (1.0 / sumCost[i][p]);
// printf("%f %f\n", totalPro[i], (1.0 / sumCost[i][p]));
}
}
cudaMemcpy(GtotalPro, totalPro, ISLAND*sizeof(float), cudaMemcpyHostToDevice);
// for(int i=0;i<ISLAND;i++){
// for(int p=0;p<POPULATION;p++){
// printf("%f %f\n", totalPro[i], (1.0 / sumCost[i][p]));
// }
// }
calProbability<<<ISLAND, POPULATION>>>(Gprobability, GtotalPro, GsumCost);
cudaMemcpy(probability, Gprobability, ISLAND*POPULATION*sizeof(float), cudaMemcpyDeviceToHost);
// for(int i=0;i<ISLAND;i++){
// for(int p=0;p<POPULATION;p++){
// printf("%f %f %f \n", probability[i][p], (1.0 / sumCost[i][p]), totalPro[i]);
// }
// }
float probability2[ISLAND][POPULATION] = {0.0};
for(int i=0;i<ISLAND;i++){
for(int p=0;p<POPULATION;p++){
for(int j=0;j<=p;j++){
probability2[i][p] += probability[i][j];
}
}
}
float *Gprobability2;
cudaMalloc((void**)&Gprobability2, ISLAND*POPULATION*sizeof(float));
cudaMemcpy(Gprobability2, probability2, ISLAND*POPULATION*sizeof(float), cudaMemcpyHostToDevice);
// print probability2 (Roulette)
// printf("probability2\n");
// for(int i=0;i<ISLAND;i++){
// for(int p=0;p<POPULATION;p++){
// printf("%f ", probability2[i][p]);
// }
// }
int *Gtem, *Gtem2, *Gyes, *Gsss;// choose two to crossover and if yes or not and choose area
int tem[ISLAND*POPULATION], tem2[ISLAND*POPULATION], yes[ISLAND*POPULATION], sss[ISLAND*POPULATION];
cudaMalloc((void**)&Gtem, ISLAND*POPULATION*sizeof(int));
cudaMalloc((void**)&Gtem2, ISLAND*POPULATION*sizeof(int));
cudaMalloc((void**)&Gyes, ISLAND*POPULATION*sizeof(int));
cudaMalloc((void**)&Gsss, ISLAND*POPULATION*sizeof(int));
int *GmutaYes, *GmutaTem, *GmutaTem2;
int mutaYes[ISLAND*POPULATION], mutaTem[ISLAND*POPULATION], mutaTem2[ISLAND*POPULATION];
cudaMalloc((void**)&GmutaYes, ISLAND*POPULATION*sizeof(int));
cudaMalloc((void**)&GmutaTem, ISLAND*POPULATION*sizeof(int));
cudaMalloc((void**)&GmutaTem2, ISLAND*POPULATION*sizeof(int));
for(int i=0;i<ISLAND*POPULATION;i++){
tem[i] = rand(); // first change
tem2[i] = rand(); // second change
yes[i] = rand(); // crossover or not
sss[i] = rand(); // bay to crossover
mutaYes[i] = rand(); // mutation or not
mutaTem[i] = rand(); // first to change
mutaTem2[i] = rand(); // second to change
// printf("%d %d %d %d %d %d %d\n", tem[i], tem2[i], yes[i], sss[i], mutaYes[i], mutaTem[i], mutaTem2[i]);
}
cudaMemcpy(Gtem, tem, ISLAND*POPULATION*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(Gtem2, tem2, ISLAND*POPULATION*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(Gyes, yes, ISLAND*POPULATION*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(Gsss, sss, ISLAND*POPULATION*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(GmutaYes, mutaYes, ISLAND*POPULATION*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(GmutaTem, mutaTem, ISLAND*POPULATION*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(GmutaTem2, mutaTem2, ISLAND*POPULATION*sizeof(int), cudaMemcpyHostToDevice);
int *Gcount;
cudaMalloc((void**)&Gcount, 10*sizeof(int));
int *GetP, *GetP2;
cudaMalloc((void**)&GetP, 10*sizeof(int));
cudaMalloc((void**)&GetP2, 10*sizeof(int));
int getP[10], getP2[10];
float *Gtest;
cudaMalloc((void**)&Gtest, 10*sizeof(float));
float test[10] = {0.0};
crossOver<<<ISLAND, POPULATION / 2>>>(Gprobability2, GA, GB, Gdata2, Gbay2, Gtem, Gtem2, Gyes, Gsss, Gcount, GetP, GetP2, Gtest);
cudaMemcpy(data2, Gdata2, ISLAND*POPULATION*FACILITY*sizeof(int), cudaMemcpyDeviceToHost);
int count[10] = {0};
cudaMemcpy(tem, Gtem, ISLAND*POPULATION*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(tem2, Gtem2, ISLAND*POPULATION*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(yes, Gyes, ISLAND*POPULATION*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(sss, Gsss, ISLAND*POPULATION*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(count, Gcount, 10*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(getP, GetP, 10*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(getP2, GetP2, 10*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(test, Gtest, 10*sizeof(int), cudaMemcpyDeviceToHost);
// mutation facility
printf("\nready to mutation\n");
mutation<<<ISLAND, POPULATION>>>(Gdata2, GmutaYes, GmutaTem, GmutaTem2);
cudaMemcpy(mutaYes, GmutaYes, ISLAND*POPULATION*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mutaTem, GmutaTem, ISLAND*POPULATION*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mutaTem2, GmutaTem2, ISLAND*POPULATION*sizeof(int), cudaMemcpyDeviceToHost);
int *GmutaBayYes, *GmutaBayTem;
int mutaBayYes[ISLAND*POPULATION], mutaBayTem[ISLAND*POPULATION];
cudaMalloc((void**)&GmutaBayYes, ISLAND*POPULATION*sizeof(int));
cudaMalloc((void**)&GmutaBayTem, ISLAND*POPULATION*sizeof(int));
cudaMemcpy(GmutaBayYes, mutaBayYes, ISLAND*POPULATION*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(GmutaBayTem, mutaBayTem, ISLAND*POPULATION*sizeof(int), cudaMemcpyHostToDevice);
mutationBay<<<ISLAND, POPULATION>>>(Gbay2, GmutaBayYes, GmutaBayTem);
cudaMemcpy(mutaBayYes, GmutaBayYes, ISLAND*POPULATION*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mutaBayTem, GmutaBayTem, ISLAND*POPULATION*sizeof(int), cudaMemcpyDeviceToHost);
// migration
if( (gggggg+1) % MIGRATION == 0 && (gggggg+1) != 0 && ISLAND > 1){
printf("***migration***\n");
int temp3[ISLAND][POPULATION/2][FACILITY];
short temp4[ISLAND][POPULATION/2][FACILITY-1];
int indexCost[ISLAND][POPULATION];
for(int i=0;i<ISLAND;i++){
for(int p=0;p<POPULATION;p++){
indexCost[i][p] = p;
}
}
// bubble sort
float temp;
for(int k=0;k<ISLAND;k++){
for(int i=POPULATION-1; i>=1; i--){
for(int j=0; j<=i-1; j++){
if(sumCost[k][j] > sumCost[k][j+1]){
int temp2 = indexCost[k][j];
indexCost[k][j] = indexCost[k][j+1];
indexCost[k][j+1] = temp2;
}
}
}
}
// print sorted index
// for(int i=0;i<ISLAND;i++){
// for(int p=0;p<POPULATION;p++){
// printf("%d ", indexCost[i][p]);
// }
// printf("\n");
// }
int countP = 0;
for(int i=0;i<ISLAND;i++){
while(countP < INDIVIDUAL){
for(int p=0;p<POPULATION;p++){
if(p == indexCost[i][countP]){
for(int f=0;f<FACILITY;f++){
temp3[i][countP][f] = data2[i][p][f];
}
for(int f=0;f<FACILITY-1;f++){
temp4[i][countP][f] = bay2[i][p][f];
}
countP++;
break;
}
} // population end
}
countP = 0;
} // island end
for(int i=0;i<ISLAND;i++){
if(i==0){
for(int k=0;k<POPULATION/2;k++){
int backP = indexCost[ISLAND-1][k];
int frontP = indexCost[i][k];
for(int f=0;f<FACILITY;f++){
data2[i][frontP][f] = temp3[ISLAND-1][backP][f];
}
for(int f=0;f<FACILITY-1;f++){
bay2[i][frontP][f] = temp4[ISLAND-1][backP][f];
}
}
}else {
for(int k=0;k<POPULATION/2;k++){
int backP = indexCost[i-1][k];
int frontP = indexCost[i][k];
int p = indexCost[i][k];
for(int f=0;f<FACILITY;f++){
data2[i][frontP][f] = temp3[ISLAND-1][backP][f];
}
for(int f=0;f<FACILITY-1;f++){
bay2[i][frontP][f] = temp4[ISLAND-1][backP][f];
}
}
} // else end
} // for end
} // if migration end
// printf("count: \n");
// for(int i=0;i<10;i++){
// printf("%d ", count[i]);
// }
// printf("\nget: \n");
// for(int i=0;i<10;i++){
// printf("%d %d\n", getP[i], getP2[i]);
// }
// printf("\ntest: \n");
// for(int i=0;i<10;i++){
// printf("%f\n", test[i]);
// }
// printf("\nTEM: \n");
// for(int i=0;i<20;i++){
// printf("%d %d %d %d\n", tem[i], tem2[i], yes[i], sss[i]);
// }
//
// printf("\nmutation: \n");
// for(int i=0;i<20;i++){
// printf("%d %d %d\n", mutaYes[i], mutaTem[i], mutaTem2[i]);
// }
float answer[3];
answer[0] = 0;
answer[1] = 0;
answer[2] = sumCost[0][0];
for(int i=0;i<ISLAND;i++){
// printf("第%d島嶼(OF): \n", i);
for(int p=0;p<POPULATION;p++){
// printf("%f ", sumCost[i][p]);
if(sumCost[i][p] < answer[2]){
answer[0] = i;
answer[1] = p;
answer[2] = sumCost[i][p];
}
// printf("\n");
}
}
printf("最小: %.0f %.0f = %f\n", answer[0], answer[1], answer[2]);
// for(int i=0;i<ISLAND;i++){
// for(int p=0;p<POPULATION;p++){
// printf("\n交配結果(data2)%d\n", p);
// for(int f=0;f<FACILITY;f++){
// printf("%d ", data2[i][p][f]);
// }
// printf("\n");
// }
// }
// parent to child
// printf("***chile to parent!!!***\n");
for(int i=0;i<ISLAND;i++){
// printf("island%d\n", i);
for(int p=0;p<POPULATION;p++){
for(int f=0;f<FACILITY;f++){
data[i][p][f] = data2[i][p][f];
// printf("%d ", data[i][p][f]);
}
// printf("\n");
}
}
// 子代BAY
for(int i=0;i<ISLAND;i++){
for(int p=0;p<POPULATION;p++){
for(int f=0;f<FACILITY-1;f++){
bay[i][p][f] = bay2[i][p][f];
}
}
}
cudaFree(GA);
cudaFree(GB);
cudaFree(Gdata2);
cudaFree(Gbay2);
cudaFree(GsumCost);
cudaFree(GminCost);
cudaFree(GtotalCost);
cudaFree(Gtem);
cudaFree(Gtem2);
cudaFree(GetP);
cudaFree(GetP2);
cudaFree(Gtest);
cudaFree(Gprobability);
cudaFree(Gprobability2);
cudaFree(Gyes);
cudaFree(Gsss);
cudaFree(GmutaYes);
cudaFree(GmutaTem);
cudaFree(GmutaTem2);
cudaFree(Gdistance);
cudaFree(Gposition);
} // GENERATION 結束
END = clock();
printf("程式執行所花費: %lf S\n", (double)clock()/CLOCKS_PER_SEC);
printf("進行運算所花費的時間: %lf S\n", (END - START) / CLOCKS_PER_SEC);
// cout << endl << "程式執行所花費:" << (double)clock()/CLOCKS_PER_SEC << " S" ;
// cout << endl << "進行運算所花費的時間:" << (END - START) / CLOCKS_PER_SEC << " S" << endl;
return 0;
}
void shuffle(int* facility) { // ���ñƧǦn��facility
int i;
for(i = 0; i < FACILITY; i++) {
int j = rand() % FACILITY;
int tmp = facility[i];
facility[i] = facility[j];
facility[j] = tmp;
}
}
|
23,209 | #include <stdio.h>
#include <time.h>
#define ADIABATIC_GAMMA (5.0 / 3.0)
#define min2(a, b) (a) < (b) ? (a) : (b)
#define max2(a, b) (a) > (b) ? (a) : (b)
typedef double real;
__host__ __device__ void conserved_to_primitive(const real *cons, real *prim)
{
const real newton_iter_max = 50;
const real error_tolerance = 1e-12 * cons[0];
const real gm = ADIABATIC_GAMMA;
const real m = cons[0];
const real tau = cons[3];
const real ss = cons[1] * cons[1] + cons[2] * cons[2];
int iteration = 0;
real p = prim[3];
real w0;
while (1) {
const real et = tau + p + m;
const real b2 = min2(ss / et / et, 1.0 - 1e-10);
const real w2 = 1.0 / (1.0 - b2);
const real w = sqrt(w2);
const real e = (tau + m * (1.0 - w) + p * (1.0 - w2)) / (m * w);
const real d = m / w;
const real h = 1.0 + e + p / d;
const real a2 = gm * p / (d * h);
const real f = d * e * (gm - 1.0) - p;
const real g = b2 * a2 - 1.0;
p -= f / g;
if (fabs(f) < error_tolerance || iteration == newton_iter_max) {
w0 = w;
break;
}
iteration += 1;
}
prim[0] = m / w0;
prim[1] = w0 * cons[1] / (tau + m + p);
prim[2] = w0 * cons[2] / (tau + m + p);
prim[3] = p;
}
__host__ __device__ real primitive_to_gamma_beta_squared(const real *prim)
{
const real u1 = prim[1];
const real u2 = prim[2];
return u1 * u1 + u2 * u2;
}
__host__ __device__ real primitive_to_lorentz_factor(const real *prim)
{
return sqrt(1.0 + primitive_to_gamma_beta_squared(prim));
}
__host__ __device__ real primitive_to_gamma_beta_component(const real *prim, int direction)
{
switch (direction)
{
case 0: return prim[1];
case 1: return prim[2];
default: return 0.0;
}
}
__host__ __device__ real primitive_to_beta_component(const real *prim, int direction)
{
const real w = primitive_to_lorentz_factor(prim);
switch (direction)
{
case 0: return prim[1] / w;
case 1: return prim[2] / w;
default: return 0.0;
}
}
__host__ __device__ real primitive_to_enthalpy_density(const real* prim)
{
const real rho = prim[0];
const real pre = prim[3];
return rho + pre * (1.0 + 1.0 / (ADIABATIC_GAMMA - 1.0));
}
__host__ __device__ __host__ void primitive_to_conserved(const real *prim, real *cons)
{
const real rho = prim[0];
const real u1 = prim[1];
const real u2 = prim[2];
const real pre = prim[3];
const real w = primitive_to_lorentz_factor(prim);
const real h = primitive_to_enthalpy_density(prim) / rho;
const real m = rho * w;
cons[0] = m;
cons[1] = m * h * u1;
cons[2] = m * h * u2;
cons[3] = m * (h * w - 1.0) - pre;
}
__host__ __device__ void primitive_to_flux_vector(const real *prim, real *flux, int direction)
{
const real vn = primitive_to_beta_component(prim, direction);
const real pre = prim[3];
real cons[4];
primitive_to_conserved(prim, cons);
flux[0] = vn * cons[0];
flux[1] = vn * cons[1] + pre * (direction == 0);
flux[2] = vn * cons[2] + pre * (direction == 1);
flux[3] = vn * cons[3] + pre * vn;
}
__host__ __device__ real primitive_to_sound_speed_squared(const real *prim)
{
const real pre = prim[3];
const real rho_h = primitive_to_enthalpy_density(prim);
return ADIABATIC_GAMMA * pre / rho_h;
}
__host__ __device__ void primitive_to_outer_wavespeeds(const real *prim, real *wavespeeds, int direction)
{
const real a2 = primitive_to_sound_speed_squared(prim);
const real un = primitive_to_gamma_beta_component(prim, direction);
const real uu = primitive_to_gamma_beta_squared(prim);
const real vv = uu / (1.0 + uu);
const real v2 = un * un / (1.0 + uu);
const real vn = sqrt(v2);
const real k0 = sqrt(a2 * (1.0 - vv) * (1.0 - vv * a2 - v2 * (1.0 - a2)));
wavespeeds[0] = (vn * (1.0 - a2) - k0) / (1.0 - vv * a2);
wavespeeds[1] = (vn * (1.0 - a2) + k0) / (1.0 - vv * a2);
}
__device__ void riemann_hlle(const real *pl, const real *pr, real *flux, int direction)
{
real ul[4];
real ur[4];
real fl[4];
real fr[4];
real al[2];
real ar[2];
primitive_to_conserved(pl, ul);
primitive_to_conserved(pr, ur);
primitive_to_flux_vector(pl, fl, direction);
primitive_to_flux_vector(pr, fr, direction);
primitive_to_outer_wavespeeds(pl, al, direction);
primitive_to_outer_wavespeeds(pr, ar, direction);
const real am = min2(0.0, min2(al[0], ar[0]));
const real ap = max2(0.0, max2(al[1], ar[1]));
for (int i = 0; i < 4; ++i)
{
flux[i] = (fl[i] * ap - fr[i] * am - (ul[i] - ur[i]) * ap * am) / (ap - am);
}
}
void initial_primitive(real *primitive, int num_zones, real x0, real x1)
{
real dx = (x1 - x0) / num_zones;
for (int i = 0; i < num_zones; ++i)
{
real x = (i + 0.5) * dx;
real *prim = &primitive[i * 4];
if (x < 0.5 * (x0 + x1))
{
prim[0] = 1.0;
prim[1] = 0.0;
prim[2] = 0.0;
prim[3] = 1.0;
}
else
{
prim[0] = 0.1;
prim[1] = 0.0;
prim[2] = 0.0;
prim[3] = 0.125;
}
}
}
struct UpdateStruct
{
int num_zones;
real x0;
real x1;
real *primitive;
real *conserved;
real *flux;
};
struct UpdateStruct update_struct_new(int num_zones, real x0, real x1)
{
struct UpdateStruct update;
update.num_zones = num_zones;
update.x0 = x0;
update.x1 = x1;
cudaMalloc(&update.primitive, num_zones * 4 * sizeof(real));
cudaMalloc(&update.conserved, num_zones * 4 * sizeof(real));
cudaMalloc(&update.flux, (num_zones + 1) * 4 * sizeof(real));
return update;
}
void update_struct_del(struct UpdateStruct update)
{
cudaFree(update.primitive);
cudaFree(update.conserved);
cudaFree(update.flux);
}
void update_struct_set_primitive(struct UpdateStruct update, const real *primitive_host)
{
real *conserved_host = (real*) malloc(update.num_zones * 4 * sizeof(real));
for (int i = 0; i < update.num_zones; ++i)
{
const real *prim = &primitive_host[4 * i];
/* */ real *cons = &conserved_host[4 * i];
primitive_to_conserved(prim, cons);
}
cudaMemcpy(
update.primitive,
primitive_host,
update.num_zones * 4 * sizeof(real),
cudaMemcpyHostToDevice
);
cudaMemcpy(
update.conserved,
conserved_host,
update.num_zones * 4 * sizeof(real),
cudaMemcpyHostToDevice
);
free(conserved_host);
}
void update_struct_get_primitive(struct UpdateStruct update, real *primitive_host)
{
cudaMemcpy(primitive_host,
update.primitive,
update.num_zones * 4 * sizeof(real),
cudaMemcpyDeviceToHost
);
}
__global__ void update_struct_do_compute_flux(UpdateStruct update)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= update.num_zones + 1)
return;
int il = i - 1;
int ir = i;
if (il == -1)
il += 1;
if (ir == update.num_zones)
ir -= 1;
const real *pl = &update.primitive[4 * il];
const real *pr = &update.primitive[4 * ir];
real *flux = &update.flux[4 * i];
riemann_hlle(pl, pr, flux, 0);
}
__global__ void update_struct_do_advance_cons(UpdateStruct update, real dt)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= update.num_zones)
return;
const real dx = (update.x1 - update.x0) / update.num_zones;
const real *fl = &update.flux[4 * (i + 0)];
const real *fr = &update.flux[4 * (i + 1)];
real *cons = &update.conserved[4 * i];
real *prim = &update.primitive[4 * i];
for (int q = 0; q < 4; ++q)
{
cons[q] -= (fr[q] - fl[q]) * dt / dx;
}
conserved_to_primitive(cons, prim);
}
int main()
{
const int num_zones = 1 << 16;
const int block_size = 32;
const int fold = 100;
const real x0 = 0.0;
const real x1 = 1.0;
const real dx = (x1 - x0) / num_zones;
real *primitive = (real*) malloc(num_zones * 4 * sizeof(real));
struct UpdateStruct update = update_struct_new(num_zones, x0, x1);
initial_primitive(primitive, num_zones, x0, x1);
update_struct_set_primitive(update, primitive);
int iteration = 0;
real time = 0.0;
real dt = dx * 0.1;
while (time < 0.1)
{
clock_t start = clock();
for (int i = 0; i < fold; ++i)
{
update_struct_do_compute_flux<<<num_zones / block_size + 1, block_size>>>(update);
update_struct_do_advance_cons<<<num_zones / block_size + 0, block_size>>>(update, dt);
time += dt;
iteration += 1;
}
clock_t end = clock();
real seconds = ((real) (end - start)) / CLOCKS_PER_SEC;
real mzps = (num_zones / 1e6) / seconds * fold;
printf("[%d] t=%.3e Mzps=%.2f\n", iteration, time, mzps);
}
update_struct_get_primitive(update, primitive);
update_struct_del(update);
FILE* outfile = fopen("sr1d.dat", "w");
for (int i = 0; i < num_zones; ++i)
{
real *prim = &primitive[i * 4];
real x = (i + 0.5) * dx;
fprintf(outfile, "%f %f %f %f\n", x, prim[0], prim[1], prim[3]);
}
fclose(outfile);
free(primitive);
cudaError_t error = cudaGetLastError();
if (error)
{
printf("%s\n", cudaGetErrorString(error));
}
return 0;
}
|
23,210 | #include <cuda_runtime.h>
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if(error != cudaSucess) \
{ \
printf("Error: %s:%d, ", __FIEL__, __LINE__); \
printf("code:%d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
} \
// ホストで計算した値とGPUで計算した値が同一かチェックする
void checkResult(float *hostRef, float *gpuRef, const int N){
double epsilon = 1.0E-8;
bool match = 1;
for(int i = 0; i < N; i++){
if(abs(hostRef[i] - gpuRef[i]) > epsilon){
match = 0;
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n",hostRef[i], gpuRef[i],i);
break;
}
}
if(match){
printf("Arrays match \n\n");
}
}
// データ初期化
// 入力された配列要素を初期化する
void initialData(float *ip, int size){
// 乱数シードの作成
time_t t;
srand((unsigned int)time(&t));
for(int i = 0; i < size; i++){
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
return;
}
// ホスト側でベクトルの和を計算
void sumArraysOnHost(float *A, float *B, float *C, const int N){
for(int idx = 0; idx < N; idx++){
C[idx] = A[idx] + B[idx];
}
}
// GPU 側でベクトルの和を計算
// カーネル関数の定義
// sumArraysOnHost に対して配列を操作するloopが存在しない!!
// 配列のループの代わりに複数のthread で計算する
__global__ void sumArrayOnGPU(float *A, float *B, float *C){
// スレッドIDを割り当てる
int i = threadIdx.x;
C[i] = A[i] + B[i];
}
int main(int argc, char** argv){
printf("%s Starting...\n", argv[0]);
int dev = 0; // デバイスのセットアップ
cudaSetDevice(dev); // 0番目(1枚目) のデバイス(GPUカード)を利用する
// ベクトルのデータサイズを設定する
int nElem = 32;
printf("Vector size %d\n", nElem);
// ホスト側のメモリを確保する
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// ホスト側で配列を初期化する
initialData(h_A, nElem);
initialData(h_B, nElem);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// デバイス側のグローバルメモリを取得する
float *d_A, *d_B, *d_C;
// cudaMalloc( void **devPtr, size_t size) の形式
// 入力は void のダブルポインタ
// 参考書ではここの表現が揺れている
cudaMalloc((void**)&d_A, nBytes);
cudaMalloc((void**)&d_B, nBytes);
cudaMalloc((void**)&d_C, nBytes);
// ホストからデバイスへデータ転送
// CPU -> GPU
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_C, gpuRef, nBytes, cudaMemcpyHostToDevice);
// ホスト側でカーネルを呼び出す
dim3 block(nElem);
dim3 grid(1);
sumArrayOnGPU<<<grid, block>>>(d_A, d_B, d_C);
printf("Execution configure << %d, %d >>\n", grid.x, block.x);
// カーネル関数の結果をホスト側にコピー
cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
// 結果をチェックするためにホスト側でベクトルの加算
sumArraysOnHost(h_A, h_B, hostRef, nElem);
// デバイスの結果をチェック
checkResult(hostRef, gpuRef, nElem);
// デバイスのメモリを開放
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// ホストのメモリを開放
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
cudaDeviceReset();
return(0);
}
|
23,211 | //
// Created by root on 2020/11/23.
//
#include "stdio.h"
#include "cuda_runtime.h"
#define NSTREAM 4
#define n_repeat 32
__global__ void sumArrays(float *A, float *B, float *C, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
for (int i = 0; i < n_repeat; i++) {
C[idx] = A[idx] + B[idx];
}
}
}
int main() {
int nElem = 256;
int nBytes = nElem * sizeof(float);
float *gpu_ref;
float *host_ref;
float *d_A;
float *d_B;
float *h_A;
float *h_B;
cudaHostAlloc((void **) &host_ref, nBytes, cudaHostAllocDefault); // Async copy needs pinned host memory
cudaMalloc(&gpu_ref, nBytes);
cudaMalloc(&d_A, nBytes);
cudaMalloc(&d_B, nBytes);
cudaHostAlloc((void **) &h_A, nBytes, cudaHostAllocDefault);
cudaHostAlloc((void **) &h_B, nBytes, cudaHostAllocDefault);
for (int i = 0; i < nElem; i++) {
h_A[i] = i;
h_B[i] = i + 1;
}
int iElem = nElem / NSTREAM;
int iBytes = iElem * sizeof(float );
cudaStream_t *streams = (cudaStream_t *) malloc(NSTREAM * sizeof(cudaStream_t));
for (int i = 0; i < NSTREAM; i++) {
cudaStreamCreate(&streams[i]);
}
dim3 block(1);
dim3 grid(iElem);
// // Deep first
// for (int i = 0; i < NSTREAM; i++) {
// int ioffset = i * iElem;
// cudaMemcpyAsync(&d_A[ioffset], &h_A[ioffset], iBytes, cudaMemcpyHostToDevice, streams[i]);
// cudaMemcpyAsync(&d_B[ioffset], &h_B[ioffset], iBytes, cudaMemcpyHostToDevice, streams[i]);
// sumArrays<<<grid, block, 0, streams[i]>>>(&d_A[ioffset], &d_B[ioffset], &gpu_ref[ioffset], iElem);
// cudaMemcpyAsync(&host_ref[ioffset], &gpu_ref[ioffset], iBytes, cudaMemcpyDeviceToHost, streams[i]);
// }
// Breadth first
for (int i = 0; i < NSTREAM; i++) {
int ioffset = i * iElem;
cudaMemcpyAsync(&d_A[ioffset], &h_A[ioffset], iBytes, cudaMemcpyHostToDevice, streams[i]);
}
for (int i = 0; i < NSTREAM; i++) {
int ioffset = i * iElem;
cudaMemcpyAsync(&d_B[ioffset], &h_B[ioffset], iBytes, cudaMemcpyHostToDevice, streams[i]);
}
for (int i = 0; i < NSTREAM; i++) {
int ioffset = i * iElem;
sumArrays<<<grid, block, 0, streams[i]>>>(&d_A[ioffset], &d_B[ioffset], &gpu_ref[ioffset], iElem);
}
for (int i = 0; i < NSTREAM; i++) {
int ioffset = i * iElem;
cudaMemcpyAsync(&host_ref[ioffset], &gpu_ref[ioffset], iBytes, cudaMemcpyDeviceToHost, streams[i]);
}
for (int i = 0; i < nElem; i++) {
printf("%.2f\t", host_ref[i]);
}
return 0;
}
|
23,212 | #include "shared.cuh"
struct ParticleRef {
Point pos;
Point dir;
double nextdist;
};
inline __device__ ParticleRef make_ref(const ParticleView &view, int i) {
return {view.get_pos(i), view.get_dir(i), view.get_nextdist(i)};
}
__device__ inline void move_impl(const ParticleRef ref) {
const double x = *ref.pos.x;
const double y = *ref.pos.y;
const double z = *ref.pos.z;
const double u = *ref.dir.x;
const double v = *ref.dir.y;
const double w = *ref.dir.z;
*ref.pos.x = x + u * ref.nextdist;
*ref.pos.y = y + v * ref.nextdist;
*ref.pos.z = z + w * ref.nextdist;
}
__global__ void move(ParticleView view) {
int i = thread_id();
if (i >= view.size) return;
move_impl(make_ref(view, i));
}
|
23,213 | #include<stdio.h>
#include<stdlib.h>
#include<cuda_runtime.h>
__global__ void global_scan_kernel(float* d_out, float* d_in)
{
int idx = threadIdx.x;
d_out[idx] = d_in[idx];
float out = 0.00f;
for (int interpre = 1; interpre < sizeof(d_in); interpre *= 2)
{
if (idx - interpre >= 0){
out = d_out[idx] + d_out[idx - interpre];
}
__syncthreads();
if (idx - interpre >= 0){
d_out[idx] = out;
out = 0.00f;
}
}
}
__global__ void shmem_scan_kernel(float* d_out, float* d_in)
{
extern __shared__ float d_temp[];
int idx = threadIdx.x;
float out = 0.00f;
d_temp[idx] = d_in[idx];
__syncthreads();
for (int i = 1; i < sizeof(d_in); i *= 2){
if (idx - i >= 0){
out = d_temp[idx] + d_temp[idx - i];
}
__syncthreads();
if (idx - i >= 0){
d_temp[idx] = out;
out = 0.00f;
}
__syncthreads();
}
d_out[idx] = d_temp[idx];
}
int main(int argc, char** argv)
{
const int ARRAY_SIZE = 8;
const int SIZE = ARRAY_SIZE * sizeof(float);
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++){
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
float* d_in;
float* d_out;
cudaMalloc((void**) &d_in, SIZE);
cudaMalloc((void**) &d_out, SIZE);
cudaMemcpy(d_in, h_in, SIZE, cudaMemcpyHostToDevice);
//int threads = ARRAY_SIZE;
//int blocks = int(threads / 1024) + 1;
//printf("Num of blocks: %d\n", blocks);
//global_scan_kernel<<<blocks, threads>>>(d_out, d_in);
shmem_scan_kernel<<<1, ARRAY_SIZE, ARRAY_SIZE * sizeof(float)>>>(d_out, d_in);
cudaMemcpy(h_out, d_out, SIZE, cudaMemcpyDeviceToHost);
for (int i = 0; i < ARRAY_SIZE; i++){
printf("%f", h_out[i]);
printf(((i%4) != 3) ? "\t" : "\n");
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
23,214 | #include <cuda.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <iostream>
#include <string.h>
#include <algorithm>
#include <stdlib.h>
#define N 40
#define GAP -2
#define MATCH 1
#define MISMATCH -1
//#include "kernels.h"
using namespace std ;
__device__ volatile int g_mutex;
__device__ volatile int g_mutex_sync;
__device__ void __gpu_sync(int goalVal)
{
//thread ID in a block
int tid_in_block = threadIdx.x * blockDim.y + threadIdx.y;
// only thread 0 is used for synchronization
if (tid_in_block == 0)
{
atomicAdd((int *)&g_mutex_sync, 1);
while(g_mutex_sync != goalVal)
{
//Do nothing here
}
//g_mutex = 0 ;
}
//__threadfence();
__syncthreads();
}
__global__ void calc_matrix(int *d_mat , int l1 , int l2)
{
int row = (blockIdx.y * blockDim.y) + threadIdx.y ;
int col = (blockIdx.x * blockDim.x) + threadIdx.x ;
int offset = col + row * blockDim.x * gridDim.x ;
__gpu_sync(1);
d_mat[offset] = gridDim.x ;
}
void cuda_error_check(cudaError_t err , const char *msg );
int main()
{
const int NN = 64 ;
const int L1 = NN ;
const int L2 = NN ;
// int mat[L1*L2]; // Dynamic Prog. Matrix
int *mat = new int[L1*L2] ;
//printf("\nSize : %d\n",sizeof(mat));
// ################################# Parallel #####################################################
int *d_mat ;
cuda_error_check(cudaSetDevice(0) , "cudaSetDevice failed!" );
cuda_error_check(cudaMalloc((void **)&d_mat , L1 * L2 * sizeof(int)),"cudaMalloc Failed!");
int BLOCK_SIZE = 16;
int GRID_SIZE = 1 ;
dim3 dimBlock(BLOCK_SIZE , BLOCK_SIZE );
dim3 dimGrid(GRID_SIZE , 1);
calc_matrix<<< dimBlock , dimGrid >>> ( d_mat , L1 , L2 ) ;
printf("\nKernel Complete Size : %d %d : %d ", L1 ,L2 , L1*L2 );
cuda_error_check(cudaMemcpy(mat , d_mat ,L1* L2 * sizeof(int) , cudaMemcpyDeviceToHost),"cudaMemcpy D-H failed! 1");
printf("\nCudamemcpy D-H Complete");
cudaFree(d_mat);
// cudaDeviceReset();
// #################################################################################################
printf("\nMatrix: \n");
cout << " ";
for( int j = 0; j < L1; j++ )
{
//cout << seq_2[ j ] << " ";
}
cout << "\n";
for( int i = 0; i < L2; i++ )
{
if( i > 0 )
{
//cout << seq_1[ i-1 ] << " ";
}
for( int j = 0; j < L1; j++ )
{
cout.width(3);
cout << mat[i*L1 + j] << " ";
}
cout << endl;
}
// #################################################################################################
}
void cuda_error_check(cudaError_t err , const char *msg )
{
if(err != cudaSuccess)
{
printf("The error is %s, %s \n", cudaGetErrorString(err), msg );
exit(1);
}
}
|
23,215 | #define _NTHREAD 512
#define _NBLOCK 65535
#include<cuda.h>
__global__ void _AFFINE_KERNEL(int* ,int ,int* ,int ,int ,int ,int ,int ,int );
#include<stdio.h>
#include<stdlib.h>
int main()
{
int block[20],quadrant[20],i,j,k;
for(i=0;i<20;i++)
{
block[i]=2*i;
quadrant[i]=3*i;
}
int _SZ_quadrant_1 = 20;
int _SZ_block_1 = 20;
int *_DEV_quadrant;
cudaMalloc((void**) &_DEV_quadrant, sizeof(int)*_SZ_quadrant_1);
cudaMemcpy(_DEV_quadrant, quadrant, sizeof(int)*_SZ_quadrant_1, cudaMemcpyHostToDevice);
int *_DEV_block;
cudaMalloc((void**) &_DEV_block, sizeof(int)*_SZ_block_1);
cudaMemcpy(_DEV_block, block, sizeof(int)*_SZ_block_1, cudaMemcpyHostToDevice);
int _NUM_THREADS = 20;
float _NUM_BLOCKS=1;
int _NUM_TILE=1;
dim3 _THREADS(512);
dim3 _BLOCKS(1);
if(_NUM_THREADS < _NTHREAD)
{
_THREADS.x=_NUM_THREADS;
}
else {
_THREADS.x=_NTHREAD;
_NUM_BLOCKS=(_NUM_THREADS % _NTHREAD == 0)?(_NUM_THREADS/_NTHREAD):((_NUM_THREADS/_NTHREAD)+1);
if(_NUM_BLOCKS<_NBLOCK)
_BLOCKS.x=_NUM_BLOCKS;
else {
_BLOCKS.x=_NBLOCK;
int temp=_NUM_BLOCKS;
_NUM_TILE=(temp % _NBLOCK == 0)?(_NUM_BLOCKS/_NBLOCK):((_NUM_BLOCKS/_NBLOCK)+1);
}
}
int _CUDA_TILE;
for(i=0;i<20;i+=15)
for(_CUDA_TILE=0;_CUDA_TILE<_NUM_TILE;_CUDA_TILE++)
{ _AFFINE_KERNEL<<<_BLOCKS,_THREADS>>>(_DEV_quadrant, _SZ_quadrant_1, _DEV_block, _SZ_block_1, 1, i, 0, 20, _CUDA_TILE);
cudaDeviceSynchronize();
} cudaMemcpy(block, _DEV_block, sizeof(int)*_SZ_block_1, cudaMemcpyDeviceToHost);
cudaFree(_DEV_block);
}
__global__ void _AFFINE_KERNEL(int* quadrant,int _SZ_quadrant_1,int* block,int _SZ_block_1,int phi_count, int CUDA_i, int CUDA_L_i,int CUDA_U_i, int _CUDA_TILE)
{
int i = gridDim.x*blockDim.x*_CUDA_TILE + blockDim.x*blockIdx.x + threadIdx.x;
if((CUDA_i<=i)&&(i<(CUDA_i+15))&&(i<CUDA_U_i)){
block[15+i]=block[i];
quadrant[15+i]=0;
}}
|
23,216 | #include<iostream>
#include<time.h>
using namespace std;
__global__ void Matrix_Add(int* d_A, int* d_B, int* d_Sum)
{
int i = blockIdx.y;
int j = threadIdx.x;
int id = (i * blockDim.x) + j;
*(d_Sum + id) = *(d_A + id) + *(d_B + id);
}
int main()
{
const int Rows = 4;
const int Cols = 4;
const int Size = Rows * Cols * sizeof(int);
int h_A[Rows][Cols], h_B[Rows][Cols], h_Sum[Rows][Cols];
for(int i=0; i<Rows; i++)
{
for(int j=0; j<Cols; j++)
{
h_A[i][j] = (i * Cols) + j + 1;
h_B[i][j] = (i * Cols) + j + 1;
}
}
int *d_A, *d_B, *d_Sum;
clock_t start, end;
cudaMalloc((void**)&d_A, Size);
cudaMalloc((void**)&d_B, Size);
cudaMalloc((void**)&d_Sum, Size);
cudaMemcpy(d_A, h_A, Size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, Size, cudaMemcpyHostToDevice);
start = clock();
Matrix_Add<<<dim3(1, Rows, 1), dim3(Cols, 1, 1)>>>(d_A, d_B, d_Sum);
end = clock();
cudaMemcpy(h_Sum, d_Sum, Size, cudaMemcpyDeviceToHost);
cout << "\nGPU time required for addition of two 500 by 500 matrices : " << (double)(end - start) << endl;
for(int i=0; i<Rows; i++)
{
for(int j=0; j<Cols; j++)
cout << h_Sum[i][j] << "\t";
cout << endl;
}
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_Sum);
int h_Sum_CPU[Rows][Cols];
start = clock();
for(int i=0; i<Rows; i++)
{
for(int j=0; j<Cols; j++)
h_Sum_CPU[i][j] = h_A[i][j] + h_B[i][j];
}
end = clock();
cout << "\nCPU time required for addition of two 500 by 500 matrices : " << (double)(end - start) << endl;
} |
23,217 | #include "includes.h"
__global__ void conv_horizontal_naive_output(const int n, float *y, const float *x, const float *w, const int iH, const int iW, const int kL)
{
for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < n; i += blockDim.x*gridDim.x) {
int oW = iW - kL + 1;
int x_offset = (i/oW)*iW + i%oW;
int w_offset = (i/(oW*iH))*kL;
for (int k = 0; k < kL; k++) {
y[i] += w[w_offset + k]*x[x_offset + k];
}
}
} |
23,218 | /*--------------------------------------------------------------------*/
/* CUDA special utility Library */
/* written by Viktor K. Decyk, UCLA */
#include <stdlib.h>
#include <stdio.h>
#include "cuda.h"
static cudaError_t crc;
/*--------------------------------------------------------------------*/
extern "C" void gpu_fallocate(float **g_f, int nsize, int *irc) {
/* allocate global float memory on GPU, return pointer to C */
void *gptr;
crc = cudaMalloc(&gptr,sizeof(float)*nsize);
if (crc) {
printf("cudaMalloc float Error=%d:%s,l=%d\n",crc,
cudaGetErrorString(crc),nsize);
*irc = 1;
}
*g_f = (float *)gptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_iallocate(int **g_i, int nsize, int *irc) {
/* allocate global integer memory on GPU, return pointer to C */
void *gptr;
crc = cudaMalloc(&gptr,sizeof(int)*nsize);
if (crc) {
printf("cudaMalloc int Error=%d:%s,l=%d\n",crc,
cudaGetErrorString(crc),nsize);
*irc = 1;
}
*g_i = (int *)gptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_callocate(float2 **g_c, int nsize, int *irc) {
/* allocate global float2 memory on GPU, return pointer to C */
void *gptr;
crc = cudaMalloc(&gptr,sizeof(float2)*nsize);
if (crc) {
printf("cudaMalloc float2 Error=%d:%s,l=%d\n",crc,
cudaGetErrorString(crc),nsize);
*irc = 1;
}
*g_c = (float2 *)gptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_deallocate(void *g_d, int *irc) {
/* deallocate global memory on GPU */
crc = cudaFree(g_d);
if (crc) {
printf("cudaFree Error=%d:%s\n",crc,cudaGetErrorString(crc));
*irc = 1;
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void hpl_fallocate(float **h_f, int nsize, int *irc) {
/* allocate page-locked float memory on host, return pointer to C */
void *hptr = NULL;
crc = cudaMallocHost(&hptr,sizeof(float)*nsize);
if (crc) {
printf("cudaMallocHost float Error=%d:%s,l=%d\n",crc,
cudaGetErrorString(crc),nsize);
*irc = 1;
}
*h_f = (float *)hptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void hpl_callocate(float2 **h_c, int nsize, int *irc) {
/* allocate page-locked float2 memory on host, return pointer to C */
void *hptr = NULL;
crc = cudaMallocHost(&hptr,sizeof(float2)*nsize);
if (crc) {
printf("cudaMallocHost float2 Error=%d:%s,l=%d\n",crc,
cudaGetErrorString(crc),nsize);
*irc = 1;
}
*h_c = (float2 *)hptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void hpl_deallocate(void *h_d, int *irc) {
/* deallocate page-locked on host */
crc = cudaFreeHost(h_d);
if (crc) {
printf("cudaFreeHost Error=%d:%s\n",crc,cudaGetErrorString(crc));
*irc = 1;
}
return;
}
/* Interfaces to Fortran */
/*--------------------------------------------------------------------*/
extern "C" void gpu_fallocate_(unsigned long *gp_f, int *nsize,
int *irc) {
/* allocate global float memory on GPU, return pointer to Fortran */
float *fptr;
gpu_fallocate(&fptr,*nsize,irc);
*gp_f = (long )fptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_iallocate_(unsigned long *gp_i, int *nsize,
int *irc) {
/* allocate global integer memory on GPU, return pointer to Fortran */
int *iptr;
gpu_iallocate(&iptr,*nsize,irc);
*gp_i = (long )iptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_callocate_(unsigned long *gp_f, int *nsize,
int *irc) {
/* allocate global float2 memory on GPU, return pointer */
/* to Fortran */
float2 *fptr;
gpu_callocate(&fptr,*nsize,irc);
*gp_f = (long )fptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_deallocate_(unsigned long *gp_d, int *irc) {
/* deallocate global memory on GPU, return pointer to Fortran */
void *d;
d = (void *)*gp_d;
gpu_deallocate(d,irc);
*gp_d = 0;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void hpl_deallocate_(void *h_d, int *irc) {
/* deallocate page-locked memory on host */
/* pointer in Fortran should also be nullified */
hpl_deallocate(h_d,irc);
return;
}
|
23,219 | #include "includes.h"
__global__ void __transpose(float *in, int instride, float *out, int outstride, int nrows, int ncols) {
int nx = BLOCKDIM * gridDim.x;
int ny = BLOCKDIM * gridDim.y;
int ix = BLOCKDIM * blockIdx.x;
int iy = BLOCKDIM * blockIdx.y;
__shared__ float tile[BLOCKDIM][BLOCKDIM+1];
for (int yb = iy; yb < ncols; yb += ny) {
for (int xb = ix; xb < nrows; xb += nx) {
if (xb + threadIdx.x < nrows) {
int ylim = min(ncols, yb + BLOCKDIM);
for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) {
tile[threadIdx.x][y-yb] = in[threadIdx.x+xb + y*instride];
}
}
__syncthreads();
if (yb + threadIdx.x < ncols) {
int xlim = min(nrows, xb + BLOCKDIM);
for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) {
out[threadIdx.x + yb + x*outstride] = tile[x-xb][threadIdx.x];
}
}
__syncthreads();
}
}
} |
23,220 | // CUDA programming
// Exercise n. 06
#include <errno.h>
#include <cuda.h>
#include <stdio.h>
#define BLOCKS 4
#define THREADS 4
// Prototype
__global__ void saxpy(float a, float *x, float *y, float *z, int N);
__host__ void ints(float *m, int N);
__host__ void print_saxpy(float a, float *x, float *y, float *z, int N);
int main(void)
{
float *x, *y, *z, a; // host copies of x, y, a
float *d_x, *d_y, *d_z; // device copies of x, y
int N = BLOCKS * THREADS;
int size = N * sizeof(float);
// Allocate space for host copies of x, y
x = (float *)malloc(size);
y = (float *)malloc(size);
z = (float *)malloc(size);
// Setup input values
ints(x, N);
ints(y, N);
a = 3.0/2.5;
// Allocate space for device copies of x, y
cudaMalloc((void **)&d_x, size);
cudaMalloc((void **)&d_y, size);
cudaMalloc((void **)&d_z, size);
// Copy inputs to device
cudaMemcpy(d_x, x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, size, cudaMemcpyHostToDevice);
// Call the kernel on GPU
saxpy<<< BLOCKS, THREADS >>>(a, d_x, d_y, d_z, N);
// Copy result back to host
cudaMemcpy(z, d_z, size, cudaMemcpyDeviceToHost);
print_saxpy(a, x, y, z, N);
// Cleanup
free(x);
free(y);
free(z);
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
return(EXIT_SUCCESS);
}
// Single-precision A*X Plus Y (on device)
__global__ void saxpy(float a, float *x, float *y, float *z, int N)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
// Avoid accessing beyond the end of the arrays
if(index < N)
{
z[index] = a * x[index] + y[index];
}
}
// Initialisation
__host__ void ints(float *m, int N)
{
int i;
for(i = 0; i < N; i++)
m[i] = i/(i + 1.0);
}
// Print the elements of the equation
__host__ void print_saxpy(float a, float *x, float *y, float *z, int N)
{
for(int i = 0; i < N; i++)
{
printf("%5.2f = %5.2f x %5.2f + %5.2f\n", z[i], a, x[i], y[i]);
}
printf("\n");
}
|
23,221 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
/*Kernel*/
__global__ void vectorAdd(float a[], float b[], float c[], int N) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (blockIdx.x < N && threadIdx.x < N)
c[index] = a[index] + b[index];
}
void vecAdd(float* A, float* B, float* C, int N)
{
int size = N * sizeof(float);
float *d_A, *d_B, *d_C;
cudaMalloc((void **) &d_A, size);
cudaMalloc((void **) &d_B, size);
cudaMalloc((void **) &d_C, size);
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
vectorAdd<<<N,N>>> (d_A, d_B, d_C, N);
cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost);
// Free device memory for A, B, C
cudaFree(d_A);
cudaFree(d_B);
cudaFree (d_C);
}
void Print_vector(char name[], float A[], int s) {
int i;
printf("%s\n", name);
for (i = 0; i < s; i++) {
printf("%.1f ", A[i]);
printf("\n");
}
}
void Read_vector(float A[], int s) {
int i;
for (i = 0; i < s; i++)
scanf("%f", &A[i]);
}
int main(int argc, char* argv[]) {
int N;
int size_vector;
float *dev_a, *dev_b, *dev_c;
float *a, *b, *c;
N = strtol(argv[1], NULL, 10);
printf("size = %d", N);
size_vector = N*sizeof(float);
a = (float*) malloc(size_vector);
b = (float*) malloc(size_vector);
c = (float*) malloc(size_vector);
printf("vector A: \n");
Read_vector(a, N);
printf("vector B: \n");
Read_vector(b, N);
Print_vector("A =", a, N);
Print_vector("B =", b, N);
vecAdd(a,b,c,N);
Print_vector("Result: ",c,N);
free(a);
free(b);
free(c);
return 0;
} |
23,222 | extern "C" {
__global__ void kernel1(
float4* pos, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// calculate uv coordinates
float u = x / (float) width;
float v = y / (float) height;
u = u*2.0f - 1.0f;
v = v*2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f;
// write output vertex
pos[y*width+x] = make_float4(u, w, v, 1.0f);
}
__global__ void kernel2(
float4* pos, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// calculate uv coordinates
float u = x / (float) width;
float v = y / (float) height;
u = u*2.0f - 1.0f;
v = v*2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float efac = .5f*exp(.5f*sin((u+v)*freq+time));
float w = (sinf(u*freq + time) + cosf(v*freq + time)) * efac;
// write output vertex
pos[y*width+x] = make_float4(u, w, v, 1.0f);
}
}
|
23,223 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
__device__ int IP[64];
__device__ int FP[64];
__device__ int E[48];
__device__ int P[32];
__device__ int SBox[8][64];
// Initial Permutation
int host_IP[64] = {
57, 49, 41, 33, 25, 17, 9, 1,
59, 51, 43, 35, 27, 19, 11, 3,
61, 53, 45, 37, 29, 21, 13, 5,
63, 55, 47, 39, 31, 23, 15, 7,
56, 48, 40, 32, 24, 16, 8, 0,
58, 50, 42, 34, 26, 18, 10, 2,
60, 52, 44, 36, 28, 20, 12, 4,
62, 54, 46, 38, 30, 22, 14, 6
};
// Final Permutation
int host_FP[64] = {
39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30,
37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28,
35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26,
33, 1, 41, 9, 49, 17, 57, 25,
32, 0, 40, 8, 48, 16, 56, 24
};
// Expansion Function: from 32 bit to 48 bit
int host_E[48] = {
31, 0, 1, 2, 3, 4,
3, 4, 5, 6, 7, 8,
7, 8, 9, 10, 11, 12,
11, 12, 13, 14, 15, 16,
15, 16, 17, 18, 19, 20,
19, 20, 21, 22, 23, 24,
23, 24, 25, 26, 27, 28,
27, 28, 29, 30, 31, 0
};
// Permutation
int host_P[32] = {
15, 6, 19, 20, 28, 11, 27, 16,
0, 14, 22, 25, 4, 17, 30, 9,
1, 7, 23, 13, 31, 26, 2, 8,
18, 12, 29, 5, 21, 10, 3, 24
};
int PC1_LEFT[28] = {
56, 48, 40, 32, 24, 16, 8,
0, 57, 49, 41, 33, 25, 17,
9, 1, 58, 50, 42, 34, 26,
18, 10, 2, 59, 51, 43, 35,
};
int PC1_RIGHT[28] = {
62, 54, 46, 38, 30, 22, 14,
6, 61, 53, 45, 37, 29, 21,
13, 5, 60, 52, 44, 36, 28,
20, 12, 4, 27, 19, 11, 3
};
int PC2[48] = {
13, 16, 10, 23, 0, 4,
2, 27, 14, 5, 20, 9,
22, 18, 11, 3, 25, 7,
15, 6, 26, 19, 12, 1,
40, 51, 30, 36, 46, 54,
29, 39, 50, 44, 32, 47,
43, 48, 38, 55, 33, 52,
45, 41, 49, 35, 28, 31
};
int Rotations[16] = {1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1};
// Substitution Boxes
int host_SBox[8][64] = {
// S1
{
14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7,
0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8,
4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0,
15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13
},
// S2
{
15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10,
3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5,
0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15,
13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9
},
// S3
{
10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8,
13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1,
13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7,
1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12
},
// S4
{
7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15,
13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9,
10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4,
3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14
},
// S5
{
2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9,
14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6,
4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14,
11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3
},
// S6
{
12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11,
10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8,
9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6,
4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13
},
// S7
{
4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1,
13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6,
1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2,
6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12
},
// S8
{
13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7,
1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2,
7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8,
2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11
}
};
__device__
__host__
long long int permutation(long long int data, int data_size, int *table, int table_size) {
long long int result = 0;
int i = 0;
for (; i < table_size; i++) {
result = (result << 1) + ((data >> (data_size - 1 - table[i])) & 0x1);
}
return result;
}
long long int *generate_sub_keys(long long int key, int decrypt) {
int n_keys = 16;
long long int *sub_keys = (long long int *) malloc(sizeof(long long int) * n_keys);
int half_key_length = 28;
long long int left = permutation(key, 64, PC1_LEFT, half_key_length);
long long int right = permutation(key, 64, PC1_RIGHT, half_key_length);
int i = 0;
for (; i < n_keys; i++) {
int rotation = Rotations[i];
left = (((left << rotation) | (left >> (half_key_length - rotation))) & 0xFFFFFFF);
right = (((right << rotation) | (right >> (half_key_length - rotation))) & 0xFFFFFFF);
long long int new_key = (left << half_key_length) | right;
int sub_key_index = (decrypt ? 15 - i : i);
sub_keys[sub_key_index] = permutation(new_key, half_key_length * 2, PC2, 48);
}
return sub_keys;
}
__device__
long long int substitution(long long int data) {
// data: 48 bit
long long int result = 0;
int i = 0;
for (; i < 8; i++) {
unsigned int box = data >> (6 * (7 - i)) & 0x3F;
int outer = ((box & 0x20) >> 4) | (box & 0x1);
int inner = (box & 0x1E) >> 1;
result = (result << 4) + SBox[i][(outer << 4) + inner];
}
return result;
}
__device__
long int F(unsigned int c, long long int key) {
long long int lc = c;
long long int new_c = permutation(lc, 32, E, 48);
long long int mixed_data = new_c ^ key;
long long int s_box_result = substitution(mixed_data);
return permutation(s_box_result, 32, P, 32);
}
__device__
void DES(int index, long long int *MD, long long int *keys) {
long long int data = permutation(MD[index], 64, IP, 64);
unsigned int left = data >> 32;
unsigned int right = (int) data;
int i = 0;
for (; i < 16; i++) {
unsigned int buf = left ^ F(right, keys[i]);
left = right;
right = buf;
}
data = right;
data = (data << 32) + left;
MD[index] = permutation(data, 64, FP, 64);
}
__global__
void kernel_DES(unsigned int quota, unsigned int n_blocks, long long int *MD, long long int *keys) {
int start_index = (blockIdx.x * blockDim.x + threadIdx.x) * quota;
int end_index = start_index + quota;
int i;
for (i = start_index; i < end_index; i++) {
if (i >= n_blocks) {
return;
}
DES(i, MD, keys);
}
}
void runDESCuda(unsigned int n_blocks, long long int *host_MD, long long int *host_sub_keys, int n_cuda_blocks, int n_cuda_threads) {
unsigned int max_n_threads = 512;
if (n_cuda_blocks == -1 && n_cuda_threads == -1) {
n_cuda_threads = max_n_threads;
n_cuda_blocks = (n_blocks + max_n_threads - 1) / max_n_threads;
}
if (n_cuda_threads > max_n_threads) {
printf("Maximum value of the number of threads is 512. You entered : %d\n", n_cuda_threads);
return;
}
cudaMemcpyToSymbol(IP, host_IP, sizeof(host_IP));
cudaMemcpyToSymbol(FP, host_FP, sizeof(host_FP));
cudaMemcpyToSymbol(E, host_E, sizeof(host_E));
cudaMemcpyToSymbol(P, host_P, sizeof(host_P));
cudaMemcpyToSymbol(SBox, host_SBox, sizeof(host_SBox));
long long int *MD, *sub_keys;
cudaMalloc((void **) &MD, sizeof(long long int) * n_blocks);
cudaMemcpy(MD, host_MD, sizeof(long long int) * n_blocks, cudaMemcpyHostToDevice);
cudaMalloc((void **) &sub_keys, sizeof(long long int) * 16);
cudaMemcpy(sub_keys, host_sub_keys, sizeof(long long int) * 16, cudaMemcpyHostToDevice);
unsigned int quota = n_blocks / (n_cuda_blocks * n_cuda_threads) + 1;
printf("%u bytes per threads.\n", quota * 64);
kernel_DES<<<n_cuda_blocks, n_cuda_threads>>>(quota, n_blocks, MD, sub_keys);
cudaMemcpy(host_MD, MD, sizeof(long long int) * n_blocks, cudaMemcpyDeviceToHost);
cudaFree(IP);
cudaFree(FP);
cudaFree(E);
cudaFree(P);
cudaFree(SBox);
cudaFree(MD);
cudaFree(sub_keys);
}
unsigned int n_blocks = 0;
void des_with_file(int decrypt, char *in, char *out, char *key, int n_cuda_blocks, int n_cuda_threads) {
int buf_size = 8 * n_blocks;
char *buf = (char *) malloc(sizeof(char) * buf_size);
FILE *in_fp = fopen(in, "rb");
if (in_fp == NULL) {
printf("Can't open the in file :%s\n", in);
return;
}
fread(buf, buf_size, 1, in_fp);
fclose(in_fp);
long long int *MD = (long long int *) malloc(sizeof(long long int) * n_blocks);
int i = 0;
int j = 0;
for (i = 0; i < n_blocks; i++) {
long long int block = 0;
for (j = 0; j < 8; j++) {
block = (block << 8) + (buf[(i * 8) + j] & 0xFF);
}
MD[i] = block;
}
long long int binary_key = 0;
for (i = 0; i < 8; i++) {
binary_key = (binary_key << 8) + (key[i] & 0xFF);
}
long long int *sub_keys = generate_sub_keys(binary_key, decrypt);
clock_t begin = clock();
runDESCuda(n_blocks, MD, sub_keys, n_cuda_blocks, n_cuda_threads);
clock_t end = clock();
double time_spent = (double) (end - begin) / CLOCKS_PER_SEC;
printf("CUDA time: %f\n", time_spent);
free(sub_keys);
for (i = 0; i < n_blocks; i++) {
for (j = 0; j < 8; j++) {
buf[(i * 8) + (7 - j)] = ((MD[i] >> (j * 8)) & 0xFF);
}
}
FILE *out_fp = fopen(out, "wb");
if (out_fp == NULL) {
printf("Can't open the out file :%s\n", out);
return;
}
fwrite(buf, buf_size, 1, out_fp);
fclose(out_fp);
free(buf);
free(MD);
}
void encryption(char *in, char *out, char *key, int n_cuda_blocks, int n_cuda_threads) {
des_with_file(0, in, out, key, n_cuda_blocks, n_cuda_threads);
}
void decryption(char *in, char *out, char *key, int n_cuda_blocks, int n_cuda_threads) {
des_with_file(1, in, out, key, n_cuda_blocks, n_cuda_threads);
}
int main(int argc, char** argv) {
if (argc < 6) {
printf("usage) ./des.out [e|d] <input_file> <output_file> <n_des_block_size> <n_cuda_blocks> <n_cuda_threads>\n");
printf("example) ./des.out e in.txt out.txt 1\n");
return -1;
}
int n_cuda_blocks;
int n_cuda_threads;
sscanf(argv[5], "%d", &n_blocks);
sscanf(argv[6], "%d", &n_cuda_blocks);
sscanf(argv[7], "%d", &n_cuda_threads);
switch(argv[1][0]) {
case 'e':
printf("encryption\n");
encryption(argv[2], argv[3], argv[4], n_cuda_blocks, n_cuda_threads);
break;
case 'd':
printf("decryption\n");
decryption(argv[2], argv[3], argv[4], n_cuda_blocks, n_cuda_threads);
break;
default:
printf("mode must be 'e' or 'd'\n");
}
}
|
23,224 | //Minimal CUDA program
__global__ void foo(int* r) {
if(threadIdx.x == 0) {
r[0] = blockIdx.x;
}
}
int main() {
int* r;
cudaMalloc(&r, sizeof(int));
foo<<<128, 128>>>(r);
}
|
23,225 | /********************************************************************
sequential.cu the sequential version of NN
Input:
/usr/local/cuda-10.1/bin/nvcc -arch=compute_52 -o sequential.out sequential.cu
./sequential.out block_size activationtype // block_size = 0; activationtype=1 means sigomid and 2 means ReLU
Output:
elapsed_time - the elapsed time to perform the multiplication.
accuracy on training set and test set.
********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <cuda_runtime.h>
#include <time.h>
#include <math.h>
#include <iostream>
#include <string>
#include <vector>
#include <fstream>
#include <sstream>
#include<random>
using namespace std;
#define X_trn(x, y) X_trn[x * size_train + y] // 196 * 964
#define X_tst(x, y) X_tst[x * size_test + y] // 196 * 414
#define Y_trn(x, y) Y_trn[x * size_train + y] // 1 * 964
#define Y_tst(x, y) Y_tst[x * size_test + y] // 1 * 414
#define X(x, y) X[x * size_batch + y] // 196 * 964
#define Y(x, y) Y[x * size_batch + y] // 1 * 414
#define W1(x, y) W1[x * size_input + y] // 20 * 196
#define b1(x, y) b1[x * 1 + y] // 20 * 1
#define W2(x, y) W2[x * size_hidden + y] // 2 * 20
#define b2(x, y) b2[x * 1 + y] // 2 * 1
#define dW1(x, y) dW1[x * size_input + y] // 20 * 196
#define db1(x, y) db1[x * 1 + y] // 20 * 1
#define dW2(x, y) dW2[x * size_hidden + y] // 2 * 20
#define db2(x, y) db2[x * 1 + y] // 2 * 1
#define Z1(x, y) Z1[x * size_batch + y] // 20 * 964
#define A1(x, y) A1[x * size_batch + y] // 20 * 964
#define Z2(x, y) Z2[x * size_batch + y] // 2 * 964
#define A2(x, y) A2[x * size_batch + y] // 2 * 964
#define dZ1(x, y) dZ1[x * size_batch + y] // 20 * 964
#define dA1(x, y) dA1[x * size_batch + y] // 20 * 964
#define dZ2(x, y) dZ2[x * size_batch + y] // 2 * 964
#define dA2(x, y) dA2[x * size_batch + y] // 2 * 964
#define dev_X_trn(x, y) dev_X_trn[x * size_train + y] // 196 * 964
#define dev_X_tst(x, y) dev_X_tst[x * size_test + y] // 196 * 414
#define dev_Y_trn(x, y) dev_Y_trn[x * size_train + y] // 1 * 964
#define dev_Y_tst(x, y) dev_Y_tst[x * size_test + y] // 1 * 414
#define dev_X(x, y) dev_X[x * size_batch + y] // 196 * 964
#define dev_Y(x, y) dev_Y[x * size_batch + y] // 1 * 414
#define dev_W1(x, y) dev_W1[x * size_input + y] // 20 * 196
#define dev_b1(x, y) dev_b1[x * 1 + y] // 20 * 1
#define dev_W2(x, y) dev_W2[x * size_hidden + y] // 2 * 20
#define dev_b2(x, y) dev_b2[x * 1 + y] // 2 * 1
#define dev_dW1(x, y) dev_dW1[x * size_input + y] // 20 * 196
#define dev_db1(x, y) dev_db1[x * 1 + y] // 20 * 1
#define dev_dW2(x, y) dev_dW2[x * size_hidden + y] // 2 * 20
#define dev_db2(x, y) dev_db2[x * 1 + y] // 2 * 1
#define dev_Z1(x, y) dev_Z1[x * size_batch + y] // 20 * 964
#define dev_A1(x, y) dev_A1[x * size_batch + y] // 20 * 964
#define dev_Z2(x, y) dev_Z2[x * size_batch + y] // 2 * 964
#define dev_A2(x, y) dev_A2[x * size_batch + y] // 2 * 964
#define dev_dZ1(x, y) dev_dZ1[x * size_batch + y] // 20 * 964
#define dev_dA1(x, y) dev_dA1[x * size_batch + y] // 20 * 964
#define dev_dZ2(x, y) dev_dZ2[x * size_batch + y] // 2 * 964
#define dev_dA2(x, y) dev_dA2[x * size_batch + y] // 2 * 964
#define max_index(x, y) max_index[y] // 1 * 964
int size_train = 964;
int size_test = 414;
int size_batch = 0;
int size_input = 196;
int size_hidden = 20;
int size_output = 2;
int size_X_trn = 196*964;
int size_Y_trn = 1*964;
int size_X_tst = 196*414;
int size_Y_tst = 1*414;
int size_Xbatch = 0;
int size_Ybatch = 0;
int size_W1 = size_hidden*size_input;
int size_b1 = size_hidden*1;
int size_W2 = size_output*size_hidden;
int size_b2 = size_output*1;
int size_dW1 = size_hidden*size_input;
int size_db1 = size_hidden*1;
int size_dW2 = size_output*size_hidden;
int size_db2 = size_output*1;
#define size_Z1 size_hidden*size_batch
#define size_A1 size_hidden*size_batch
#define size_Z2 size_output*size_batch
#define size_A2 size_output*size_batch
#define size_dZ1 size_hidden*size_batch
#define size_dA1 size_hidden*size_batch
#define size_dZ2 size_output*size_batch
#define size_dA2 size_output*size_batch
#define size_max_index 1*size_batch
double *X_trn, *X_tst;
int *Y_trn, *Y_tst;
double *W1, *b1, *W2, *b2;
double *dW1, *db1, *dW2, *db2;
double *Z1, *A1, *Z2, *A2;
double *dZ1, *dA1, *dZ2, *dA2;
int *max_index;
void HiddenLayer(double* dev_X, double* dev_W1, double* dev_b1, double* dev_A1, double* dev_Z1, int size_input, int size_batch, int acti_type, int max_row, int max_col)
{
for(int i = 0; i < max_row; i++) {
for(int j = 0; j < max_col; j++) {
double partial = 0.0;
for (int k = 0; k < size_input; k++){
partial += dev_W1(i,k) * dev_X(k,j);
}
dev_Z1(i,j) = partial + dev_b1(i,0);
// Sigmoid
if (acti_type == 1)
dev_A1(i,j) = 1 / (1 + exp(0 - dev_Z1(i,j)));
// ReLU
if (acti_type == 2) {
if (dev_Z1(i,j) < 0)
dev_A1(i,j) = 0;
if (dev_Z1(i,j) >= 0)
dev_A1(i,j) = dev_Z1(i,j);
}
}
}
}
void OutputLayer(double* dev_A1, double* dev_W2, double* dev_b2, double* dev_Z2, int size_hidden, int size_batch, int max_row, int max_col)
{
for(int i = 0; i < max_row; i++) {
for(int j = 0; j < max_col; j++) {
double partial = 0.0;
for (int k = 0; k < size_hidden; k++){
partial += dev_W2(i,k) * dev_A1(k,j);
}
dev_Z2(i,j) = partial + dev_b2(i,0);
}
}
}
void Softmax(double* Z2, int row, int col, double* A2, int* max_index)
{
int c, r;
double max = 0, sum = 0;
for (c = 0; c < col; c++) {
max = Z2(0, c);
max_index[c] = 1;
for (r = 1; r < row; r++) {
if (Z2(r, c) > max){
max = Z2(r, c);
max_index[c] = 0;
}
}
sum = 0;
for (r = 0; r < row; r++)
sum += exp(Z2(r, c));
for (r = 0; r < row; r++)
A2(r, c) = exp(Z2(r, c)) / sum;
}
return;
}
double cross_entropy_loss(int* Y, double* A2, int col)
{
int c;
double loss = 0;
for(c = 0; c < col; c++) {
loss += -log(A2(0, c)) * Y(0, c) - log(A2(1, c)) * (1-Y(0, c));
}
return loss/col;
}
/* init Z and A in the host */
void initialize_ZA(int size_batch) {
Z1 = (double *) malloc(size_Z1*sizeof(double)); // 20*964
A1 = (double *) malloc(size_A1*sizeof(double)); // 20*964
Z2 = (double *) malloc(size_Z2*sizeof(double)); // 2*964
A2 = (double *) malloc(size_A2*sizeof(double)); // 2*964
dZ1 = (double *) malloc(size_dZ1*sizeof(double)); // 20*964
dA1 = (double *) malloc(size_dA1*sizeof(double)); // 20*964
dZ2 = (double *) malloc(size_dZ2*sizeof(double)); // 2*964
dA2 = (double *) malloc(size_dA2*sizeof(double)); // 2*964
max_index = (int *) malloc(size_max_index*sizeof(int)); // 1*964
memset (Z1,0, size_Z1);
memset (A1,0, size_A1);
memset (Z2,0, size_Z2);
memset (A2,0, size_A2);
memset (dZ1,0, size_dZ1);
memset (dA1,0, size_dA1);
memset (dZ2,0, size_dZ2);
memset (dA2,0, size_dA2);
memset (max_index,0,size_max_index);
}
void forward(double* X, int* Y, string type, int acti_type, int block_size){
if(type == "train"){
size_batch = size_train;
size_Xbatch = size_X_trn;
size_Ybatch = size_Y_trn;
}
else{
size_batch = size_test;
size_Xbatch = size_X_tst;
size_Ybatch = size_Y_tst;
}
// init Z and A in the host
initialize_ZA(size_batch);
// hidden layer and activation function to get Z1 and A1
HiddenLayer(X, W1, b1, A1, Z1, size_input, size_batch, acti_type, size_hidden, size_batch);
// output layer to get Z2
OutputLayer(A1, W2, b2, Z2, size_hidden, size_batch, size_output, size_batch);
// softmax layer to get A2
Softmax(Z2, size_output, size_batch, A2, max_index);
}
void Back_dZ2 (double* dev_A2, int* dev_Y_trn, double* dev_dZ2, int size_train, int size_batch, int max_row, int max_col)
{
for(int i = 0; i < max_row; i++) {
for(int j = 0; j < max_col; j++) {
// int c = threadIdx.x; // column of Z2
dev_dZ2(0, j) = (dev_A2(0, j) - dev_Y_trn(0, j)) / size_train;
dev_dZ2(1, j) = (dev_Y_trn(0, j) - dev_A2(0, j)) / size_train;
}
}
}
// dW1(20*196) = dZ1(20*964) * X(196*964)
// dW2(2*20) = dZ2(2*964) * A1(20*964)
void Back_dW (double* dev_A, double* dev_dZ, double* dev_dW, int size_batch, int W_col, int max_row, int max_col)
{
for(int i = 0; i < max_row; i++) {
for(int j = 0; j < max_col; j++) {
double tmp = 0.0;
for (int k = 0; k < size_batch; k++) {
tmp += dev_dZ[i*size_batch+k] * dev_A[j*size_batch+k];
}
dev_dW[i*W_col+j] = tmp;
}
}
}
// db1(20*1) is from dZ1(20*964)
// db2(2*1) is from dZ1(2*964)
void Back_db(double* dZ, double* db, int row, int col, int size_batch)
{
int r, c;
for(r = 0; r < row; r++) {
double tmp = 0;
for(c = 0; c < col; c++) {
tmp += dZ[r*size_batch+c];
}
db[r*1+0] = tmp;
}
}
void Back_dA1 (double* dev_W2, double* dev_dZ2, double* dev_dA1, int size_batch, int size_hidden, int size_output, int max_row, int max_col)
{
// dA1(20*964) = dZ2(2*964) * W2(2*20)
for(int i = 0; i < max_row; i++) {
for(int j = 0; j < max_col; j++) {
double partial = 0.0;
for (int k = 0; k < size_output; k++) {
partial += dev_W2(k,i) * dev_dZ2(k,j);
}
dev_dA1(i,j) = partial;
}
}
}
void Back_dZ1 (double* dev_dA1, double* dev_A1, double* dev_Z1, double* dev_dZ1, int size_batch, int acti_type, int max_row, int max_col)
{
for(int i = 0; i < max_row; i++) {
for(int j = 0; j < max_col; j++) {
if(acti_type == 1){ // Sigmoid
dev_dZ1(i, j) = dev_dA1(i, j) * dev_A1(i, j) * (1-dev_A1(i, j)); // dZ1 = dA1*A1*(1-A1)
}
else if(acti_type == 2) { // ReLU
if(dev_Z1(i, j) < 0)
dev_dZ1(i, j) = 0;
else
dev_dZ1(i, j) = dev_dA1(i, j); //dZ1 = dA1*Z1_mask
}
}
}
}
void backprop(int acti_type, int block_size) { // type = 1 is Sigmoid
// get dZ2
Back_dZ2(A2, Y_trn, dZ2, size_train, size_train, 1, size_train);
// get dw2
Back_dW(A1, dZ2, dW2, size_train, size_hidden, size_output, size_hidden);
// get db2
Back_db(dZ2, db2, size_output, size_train, size_train);
// get dA1
Back_dA1(W2, dZ2, dA1, size_train, size_hidden, size_output, size_hidden, size_train);
// get dZ1
Back_dZ1(dA1, A1, Z1, dZ1, size_train, acti_type, size_hidden, size_train);
// get dW1
Back_dW(X_trn, dZ1, dW1, size_train, size_input, size_hidden, size_input);
// get b1
Back_db(dZ1, db1, size_hidden, size_train, size_train);
}
void update_Wb(double* dev_dWb, double* dev_Wb, int col, double learn_rate, int max_row, int max_col)
{
for(int i = 0; i < max_row; i++) {
for(int j = 0; j < max_col; j++) {
dev_Wb[i*col+j] = dev_Wb[i*col+j] - learn_rate * dev_dWb[i*col+j];
}
}
}
void updateParameter(double learn_rate, int block_size)
{
// update w1
update_Wb(dW1, W1, size_input, learn_rate, size_hidden, size_input);
// update b1
update_Wb(db1, b1, 1, learn_rate, size_hidden, 1);
// update w2
update_Wb(dW2, W2, size_hidden, learn_rate, size_output, size_hidden);
// update b2
update_Wb(db2, b2, 1, learn_rate, size_output, 1);
}
void read_X(string data_path, double* array)
{
ifstream inFile(data_path);
string row;
int p;
p = 0;
string value;
while (getline(inFile, row)){
stringstream col(row);
while (getline(col, value, ',')){
array[p] = stod(value);
p++;
}
}
}
void read_Y(string data_path, int* array)
{
ifstream inFile(data_path);
string row;
int p;
p = 0;
string value;
while (getline(inFile, row)){
stringstream col(row);
while (getline(col, value, ',')){
array[p] = stod(value);
p++;
}
}
}
/* Set the value and reading data */
void read_data()
{
X_trn = (double *) malloc(size_X_trn * sizeof(double)); // 196*964
Y_trn = (int *) malloc(size_Y_trn * sizeof(int)); // 1*964
X_tst = (double *) malloc(size_X_tst * sizeof(double)); // 196*414
Y_tst = (int *) malloc(size_Y_tst * sizeof(int)); // 1*414
string X_trn_path = "X_trn.csv"; // Defined the name of cvs file
string Y_trn_path = "Y_trn.csv";
string X_tst_path = "X_tst.csv";
string Y_tst_path = "Y_tst.csv";
read_X(X_trn_path, X_trn); //Execution
read_Y(Y_trn_path, Y_trn);
read_X(X_tst_path, X_tst);
read_Y(Y_tst_path, Y_tst);
}
void initialize_Wb() {
W1 = (double *) malloc(size_W1*sizeof(double)); // 20*196
b1 = (double *) malloc(size_b1*sizeof(double)); // 20*1
W2 = (double *) malloc(size_W2*sizeof(double)); // 2*20
b2 = (double *) malloc(size_b2*sizeof(double)); // 2*1
dW1 = (double *) malloc(size_dW1*sizeof(double)); // 20*196
db1 = (double *) malloc(size_db1*sizeof(double)); // 20*1
dW2 = (double *) malloc(size_dW2*sizeof(double)); // 2*20
db2 = (double *) malloc(size_db2*sizeof(double)); // 2*1
memset (W1,0.5,size_W1);
memset (b1,0, size_b1);
memset (W2,0.5,size_W2);
memset (b2,0, size_b2);
memset (dW1,0, size_dW1);
memset (db1,0, size_db1);
memset (dW2,0, size_dW2);
memset (db2,0, size_db2);
default_random_engine e;
uniform_real_distribution<double> u(-1,1);
for (int i = 0; i < size_W1; i++) {
W1[i] = u(e);
}
for (int i = 0; i < size_W2; i++) {
W2[i] = u(e);
}
for (int i = 0; i < size_b1; i++) {
b1[i] = 0;
}
for (int i = 0; i < size_b2; i++) {
b2[i] = 0;
}
}
double accuracy(int* max_index, int* Y, int size_batch)
{
int i;
double count = 0;
for(i = 0; i < size_batch; i++) {
if(Y(0, i) == max_index(0, i))
count += 1;
}
return count/double(size_batch);
}
double train(double* X_trn, int* Y_trn, int acti_type, int block_size) {
forward(X_trn, Y_trn, "train", acti_type, block_size);
backprop(acti_type, block_size); // 1 Sigmoid 2 ReLU
updateParameter(0.01, block_size);
return cross_entropy_loss(Y_trn, A2, size_train);
}
double test(double* X, int* Y, string type, int acti_type, int block_size) {
forward(X, Y, type, acti_type, block_size);
if(type == "train")
return accuracy(max_index, Y, size_train);
else
return accuracy(max_index, Y, size_test);
}
int main(int argc, char *argv[])
{
int block_size;
double loss;
double acc_trn, acc_tst;
int e;
int epochs = 20000;
int acti_type = 1;
if ( argc < 3 ){
printf(" Usage: first argument: block size \n");
printf(" second argument: activation type \n");
return -1;
} else if ( argc > 3 ) {
printf("\n Too many arguments. \n");
return -1;
} else {
block_size = atoi(argv[1]);
acti_type = atoi(argv[2]);
}
initialize_Wb();
read_data();
float elapsed_time = 0.0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
for(e = 0; e < epochs; e++) {
loss = train(X_trn, Y_trn, acti_type, block_size);
// printf("%f \n", loss);
// printf("the %d epoch, the training loss is: %f \n", e, loss);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
printf( "Elapsed Time: %.4e msec. \n", elapsed_time );
acc_trn = test(X_trn, Y_trn, "train", acti_type, block_size);
acc_tst = test(X_tst, Y_tst, "test", acti_type, block_size);
printf("the %d epoch, the training accuracy is: %f, the test accuracy is: %f\n", e, acc_trn, acc_tst);
}
|
23,226 | #include "includes.h"
__global__ void pi_optimized(float* x, float* y, int* global_count) {
__shared__ int counts[nthreads];
//int globalId = blockIdx.x * blockDim.x + nitemsperthread * threadIdx.x;
int globalId = blockIdx.x * blockDim.x + threadIdx.x;
int thread_count=0;
for (int i=0; i<nitemsperthread; i++) {
int idx = globalId+(i*nthreads*nblocks);
if (idx < nsamples) {
if (x[idx]*x[idx] + y[idx]*y[idx] < 1.0) {
thread_count++;
}
}
}
counts[threadIdx.x] = thread_count;
__syncthreads();
if (threadIdx.x == 0) {
int block_count = 0;
for (int i=0; i<nthreads; i++) {
block_count += counts[i];
}
global_count[blockIdx.x] = block_count;
}
} |
23,227 | //Determinante de una matriz
#include<iostream>
#include<time.h>
using namespace std;
__global__
void Det1_CU(int *M, int filas, int columnas, int &suma){
//int i = blockIdx.y*blockDim.y+threadIdx.y;//filas
int j = blockIdx.x*blockDim.x+threadIdx.x;//columnas
if(j < columnas){
int k = j, aux = columnas, l = 0, mult = 1;
while(aux--){
if(k == columnas) k = 0;
mult *= M[(l*columnas)+k];
__syncthreads();//espera a que todos los hilos hagan la misma multiplicacion
k++; l++;
}
suma += mult;
}
}
__global__
void Det2_CU(int *M, int filas, int columnas, int &suma){
//int i = blockIdx.y*blockDim.y+threadIdx.y;//filas
int j = blockIdx.x*blockDim.x+threadIdx.x;//columnas
if(j < columnas){
int k = j, aux = columnas, l = filas - 1, mult = 1;
while(aux--){
if(k == columnas) k = 0;
mult *= M[(l*columnas)+k];
__syncthreads();//espera a que todos los hilos hagan la misma multiplicacion
k++; l--;
}
suma += mult;
}
}
__host__
int Det1(int *M, int filas, int columnas){
int suma = 0;
for(int j = 0; j < columnas; j++){
int k = j, aux = columnas, l = 0, mult = 1;
while(aux--){
if(k == columnas) k = 0;
mult *= M[(l*columnas)+k];
k++; l++;
}
suma += mult;
}
return suma;
}
__host__
int Det2(int *M, int filas, int columnas){
int suma = 0;
for(int j = 0; j < columnas; j++){
int k = j, aux = columnas, l = filas - 1, mult = 1;
while(aux--){
if(k == columnas) k = 0;
mult *= M[(l*columnas)+k];
k++; l--;
}
suma += mult;
}
return suma;
}
__host__
void imprime(int* A,int filas, int columnas){//imprime como si fuera una matriz
for(int i = 0; i < filas; i++){
for(int j = 0; j < columnas; j++){
cout<<A[(i*columnas)+j]<<" ";
}
cout<<endl;
}
}
__host__
void inicializa(int *A,int filas, int columnas){//inicializa arreglos
/* initialize random seed: */
srand (time(NULL));
int valor;
for(int i=0;i<filas*columnas;i++){
/* generate secret number between 1 and 10: */
valor = rand() % 10 + 1;
A[i] = valor;
}
}
__host__
void llena(int *A, int filas, int columnas){
int valor;
for(int i = 0; i < filas; i++){
for(int j = 0; j < columnas; j++){
cin >> valor;
A[(i*columnas)+j] = valor;
}
}
}
int main(void){
cudaError_t error = cudaSuccess;//Para controlar errores
int *d_matriz, *matriz;
int filas = 3, columnas = 3, h_det, d_det;
int size = filas*columnas*sizeof(int);
//----------------------CPU-------------------------
//Separamos memoria para el host
matriz = (int*)malloc(size);
//Inicializamos los valores de la matriz
//llena(matriz, filas, columnas);
inicializa(matriz, filas, columnas);
clock_t t = clock();//Iniciamos la cuenta de reloj
//imprime(matriz, filas, columnas);
//Calculamos el determinante
h_det = Det1(matriz, filas, columnas) - Det2(matriz, filas, columnas);
//cout << endl << "Determinante = " << h_det << endl;
t = clock() - t;//Terminamos la cuenta de reloj
double time_CPU = ((double)t) / CLOCKS_PER_SEC;
cout<<"El tiempo transcurrido en la CPU fue: "<<time_CPU<<endl;
//---------------------GPU------------------------------
t = clock();//Iniciamos la cuenta de reloj
//Separamos memoria para el device
error = cudaMalloc((void**)&d_matriz,size);
if(error != cudaSuccess){
cout << "Error reservando memoria para d_matriz" << endl;
//return -1;
}
//Copiamos datos del host al device
error = cudaMemcpy(d_matriz,matriz,size,cudaMemcpyHostToDevice);//destino d_matriz y origen matriz
if(error != cudaSuccess){
cout << "Error copiando los datos de matriz a d_matriz" << endl;
//exit(-1);
}
//Lanzamos el kernel
dim3 dimblock(3,1,1);//solo necesitamos los hilos de las columnas
dim3 dimGrid(1,1,1);
//dim3 dimGrid(ceil((double)(columnas/32)),ceil((double)(filas/32)),1);
int ans1 = 0, ans2 = 0;
//Det1_CU<<<dimGrid,dimblock>>>(d_matriz, filas, columnas, ans1);
//Det2_CU<<<dimGrid,dimblock>>>(d_matriz, filas, columnas, ans2);
cudaDeviceSynchronize();
d_det = ans1 - ans2;
t = clock() - t;//Terminamos la cuenta de reloj
double time_GPU = ((double)t) / CLOCKS_PER_SEC;
cout<<"El tiempo transcurrido en la GPU fue: "<<time_GPU<<endl;
//------------------------------------------------------------
cout<<"El tiempo de aceleramiento fue: "<<time_CPU/time_GPU<<endl;
if(h_det == d_det) cout << "Buen cálculo" << endl;
else cout << "Mal cálculo" << endl;
//Liberamos memoria
free(matriz);
cudaFree(d_matriz);
return 0;
}
|
23,228 | #include "includes.h"
__global__ void cuda_f32_to_int8_nomax(float* input_f32, size_t size, int8_t *output_int8, float multipler)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_int8[idx] = input_f32[idx] * multipler; // 7-bit (1-bit sign)
} |
23,229 | #include "includes.h"
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void shmem_scan(float* d_out, float* d_in) {
extern __shared__ float sdata[];
int idx = threadIdx.x;
float out = 0.00f;
sdata[idx] = d_in[idx];
__syncthreads();
for (int interpre = 1; interpre < sizeof(d_in); interpre *= 2) {
if (idx - interpre >= 0) {
out = sdata[idx] + sdata[idx - interpre];
}
__syncthreads();
if (idx - interpre >= 0) {
sdata[idx] = out;
out = 0.00f;
}
}
d_out[idx] = sdata[idx];
} |
23,230 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <cuda.h>
__global__ void TournamentKernel ( float* pVector, int stride )
{
unsigned index = 2 * ( blockIdx.x * blockDim.x + threadIdx.x) * stride ;
unsigned offset = threadIdx.x * stride;
float tmpfloat;
for ( unsigned i=1; i<=blockDim.x; i*=2 )
{
if ( offset % (stride*i) == 0 )
{
if ( pVector[index] < pVector[index+stride*i] )
{
tmpfloat = pVector[index];
pVector[index] = pVector[index+stride*i];
pVector[index+stride*i] = tmpfloat;
}
}
__syncthreads();
}
}
/*
*
* N is the number of elements in the tournament.
* we are going to choose as flat a tournament as possible,
* which means grouping 512 elements at each lowest level.
* and start by limiting ourselves to two levels.
*
* This subroutine only runs for power of 2 elements per block
* and the number of elements must either be 4 blkSize^2 or
* 8 blkSize^3.
*
*/
bool Tournament ( unsigned N, unsigned blkSize, float* pHostVector )
{
unsigned stride = 1;
unsigned BlockCount= N / 2 / blkSize;
unsigned VectorSize= N * sizeof(float);
float* pDeviceVector= 0;
assert (( N == 8 * blkSize * blkSize * blkSize ) || ( N == 4 * blkSize * blkSize ));
assert ( (N & ( N-1)) == 0 );
cudaMalloc((void**)&pDeviceVector, VectorSize);
cudaMemcpy(pDeviceVector, pHostVector, VectorSize, cudaMemcpyHostToDevice);
// OK let's start with a one-level invocation of a tournament
while ( BlockCount > 0 )
{
printf ( "BlockCount %d, BlockSize %d, Stride %d\n", BlockCount, blkSize, stride );
TournamentKernel <<<BlockCount,blkSize>>> ( pDeviceVector, stride );
stride *= blkSize * 2;
BlockCount /= blkSize *2;
}
cudaMemcpy(pHostVector, pDeviceVector, VectorSize, cudaMemcpyDeviceToHost);
return true;
}
int main ()
{
int i;
const int numels = 8 * 64;
const int blocksize = 4;
float hostvector[numels];
// Initialize the input vectors
for ( i=0; i<numels; i++ )
{
hostvector[i] = i;
}
// Call the kernel
Tournament ( numels, blocksize, hostvector );
// Check the answer
for ( i=0; i< numels; i++ )
{
printf ("Index/Value: %d/%4.4f\n", i, hostvector[i]);
}
}
|
23,231 | #include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#define N 512
__global__ void add(int *a, int *b, int *c){
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; // Use threadIdx.x for multiple threads
}
// Atribute random values to elements of a[n]
void random_ints(int *a, int n){
for(int i = 0; i < n; ++i)
a[i] = rand() %5000;
}
// Print all elements of a[n]
void printv(int *a, int n){
printf("[ ");
for(int i = 0; i < N; ++i){
printf("%d ", a[i]);
}
printf("]\n");
}
int main(){
int *a, *b, *c; // Host copies of a, b, c
int *dev_a, *dev_b, *dev_c; // Device copies of a, b, c
int size = N * sizeof(int);
// Allocate space for device copies a, b, c
cudaMalloc((void **) &dev_a, size);
cudaMalloc((void **) &dev_b, size);
cudaMalloc((void **) &dev_c, size);
// Allocate space for host copies of a, b, c and setup input values
a = (int *) malloc(size);
b = (int *) malloc(size);
c = (int *) malloc(size);
random_ints(a, N);
random_ints(b, N);
printv(a, N);
printv(b, N);
// Copy inputs to device
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
// Launch add kernel on GPU
add<<<N,1>>>(dev_a, dev_b, dev_c); // N blocks, 1 thread
// Copy result back to host
cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
printv(c, N);
// Clean up
free(a); free(b); free(c);
cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c);
return 0;
} |
23,232 | #include<cuda_runtime.h>
#include<stdio.h>
__global__ void addSleep(int *v, int *r){
float v1 =(float) *v;
int ret =0;
while(ret <v1){
ret = ret+1;
}
*r=ret;
}
void sleep(int v){
int * d_v, *d_r;
cudaMalloc(&d_v, sizeof(int));
cudaMalloc(&d_r, sizeof(int));
cudaMemcpy(d_v, &v, sizeof(int), cudaMemcpyHostToDevice);
dim3 threads(32, 1);
dim3 grid(1, 1);
addSleep<<<grid, threads, 0>>>(d_v, d_r);
int r;
cudaMemcpy(&r, d_r, sizeof(int), cudaMemcpyDeviceToHost);
printf("%d\n", r);
}
|
23,233 | /*
* Tiled Matrix Multiplication
* (MP2, Fall 2014, GPU Programming/Auburn University)
*
* Compile with -DTILE_WIDTH=16 (for example) to change the tile size.
* Compile with -DSEED=12 (for example) to seed the random number generator.
*/
#include <assert.h>
#include <cuda.h>
#include <stdio.h>
#include <math.h>
/* Usage message displayed when invalid command line arguments are supplied */
#define USAGE \
"MP2 generates a random (m x k) matrix M and (k x n) matrix N\n" \
"and multiplies M by N using tiled matrix multiplication.\n" \
"The values of m, k, and n must be >= 1.\n" \
"\n" \
"Usage: mp2 m k n\n"
/* Tile size -- define here if not defined using the -D compiler flag */
#ifndef TILE_WIDTH
# define TILE_WIDTH 16
#endif
/* Seed for the random number generator -- define here if not using -D */
#ifndef SEED
# define SEED 1
#endif
/* Maximum difference allowed between the GPU and CPU result matrices */
#define EPSILON 1e-2
/* If a CUDA call fails, display an error message and exit */
#define CUDA_CHECK(e) { \
cudaError_t err = (e); \
if (err != cudaSuccess) \
{ \
fprintf(stderr, "CUDA error: %s, line %d, %s: %s\n", \
__FILE__, __LINE__, #e, cudaGetErrorString(err)); \
exit(EXIT_FAILURE); \
} \
}
/* assert() is only supported on devices of compute capability >= 2.0 */
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200)
# undef assert
# define assert(arg)
#endif
/* Tiled matrix multiplication kernel */
__global__ static void matMul(float *d_M, float *d_N, float *d_P,
int m, int k, int n)
{
assert(blockDim.x == TILE_WIDTH && blockDim.y == TILE_WIDTH);
/*
*
*
* TODO: IMPLEMENT TILED MATRIX MULTIPLICATION
*
* Multiply matrix d_M by d_N, storing product in d_P.
* Use tiled matrix multiplication with shared memory.
*
*
*/
//__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
//__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_A[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_B[TILE_WIDTH][TILE_WIDTH];
int tx = threadIdx.x, ty = threadIdx.y;
int row = blockIdx.y * TILE_WIDTH + threadIdx.y;
int col= blockIdx.x * TILE_WIDTH + threadIdx.x;
float sum = 0;
//double numberoftiles =ceil(m/TILE_WIDTH);
if (m == k== n) {
for (int l=0;l<m/TILE_WIDTH ; ++l) { //iterate through tiles
for (int j=0; j< TILE_WIDTH ; ++j) { //iterate through elements in the tile
sum = sum + d_M[(row*m) + ( l*TILE_WIDTH+j)] * d_N[(l*TILE_WIDTH+j)*m + col ];
}
__syncthreads();
}
d_P[row*m +col] = sum;
} else {
for (int l=0;l<ceil((float) k/TILE_WIDTH) ; ++l) { //iterate through tiles
if (row < m && l * TILE_WIDTH + tx < k)
ds_A[ty][tx] = d_M[row *k + l * TILE_WIDTH + tx];
else
ds_A[ty][tx] = 0.0;
if (l * TILE_WIDTH + ty < k && col < n)
ds_B[ty][tx] = d_N[(l * TILE_WIDTH + ty) *n + col];
else
ds_B[ty][tx] = 0.0;
__syncthreads();
for (int j=0; j< TILE_WIDTH && j < k ; ++j) { //iterate through elements in the tile
sum = sum + ds_A[ty][j] * ds_B[j][tx];
}
__syncthreads();
}
if (row < m && col < n)
d_P[row*n+col] =sum;
}
}
/* Displays one row of the given matrix */
static void printRow(int row, float *matrix, int cols)
{
printf("[");
if (cols >= 1) printf(" %3.3f", matrix[row*cols+0]);
if (cols >= 2) printf(" %3.3f", matrix[row*cols+1]);
if (cols >= 3) printf(" %3.3f", matrix[row*cols+2]);
if (cols >= 6) printf(" ...");
if (cols >= 5) printf(" %3.3f", matrix[row*cols+(cols-2)]);
if (cols >= 4) printf(" %3.3f", matrix[row*cols+(cols-1)]);
printf(" ]\n");
}
/* Displays the given matrix */
static void printMatrix(float *matrix, int rows, int cols)
{
if (rows >= 1) printRow(0, matrix, cols);
if (rows >= 2) printRow(1, matrix, cols);
if (rows >= 3) printRow(2, matrix, cols);
if (rows >= 6) printf(" ...\n");
if (rows >= 5) printRow(rows-2, matrix, cols);
if (rows >= 4) printRow(rows-1, matrix, cols);
}
/* Program entrypoint. Invoke with three command line arguments: m k n */
int main(int argc, char **argv)
{
/* Get command line arguments; save as m, k, and n */
if (argc != 4)
{
fprintf(stderr, USAGE);
fprintf(stderr, "Expected 3 arguments; received %d.\n", argc-1);
return EXIT_FAILURE;
}
int m = atoi(argv[1]);
int k = atoi(argv[2]);
int n = atoi(argv[3]);
if (m < 1 || k < 1 || n < 1)
{
fprintf(stderr, USAGE);
fprintf(stderr, "Invalid value for m, k, or n (%d, %d, %d)\n",
m, k, n);
return EXIT_FAILURE;
}
printf("Multiplying MN = P. M is (%d x %d); N is (%d x %d); ",
m, k, k, n);
printf("using (%d x %d) tiles.\n", TILE_WIDTH, TILE_WIDTH);
/********************************************/
/* M is (m x k), N is (k x n), P is (m x n) */
/********************************************/
/* Compute number of bytes needed to stores matrices M, N, and P */
size_t bytesForM = m * k * sizeof(float);
size_t bytesForN = k * n * sizeof(float);
size_t bytesForP = m * n * sizeof(float);
/* Allocate host memory for matrices */
float *h_M, *h_N, *h_P;
h_M = (float *)malloc(bytesForM);
h_N = (float *)malloc(bytesForN);
h_P = (float *)malloc(bytesForP);
if (h_M == NULL || h_N == NULL || h_P == NULL)
{
fprintf(stderr, "Unable to allocate host memory\n");
return EXIT_FAILURE;
}
/* Allocate device memory for matrices */
float *d_M, *d_N, *d_P;
CUDA_CHECK(cudaMalloc((void **)&d_M, bytesForM));
CUDA_CHECK(cudaMalloc((void **)&d_N, bytesForN));
CUDA_CHECK(cudaMalloc((void **)&d_P, bytesForP));
/* Fill M and N with random numbers (on host) */
srand(SEED);
for (int i = 0; i < m*k; ++i)
h_M[i] = rand()/(float)RAND_MAX*10.0;
for (int i = 0; i < k*n; ++i)
h_N[i] = rand()/(float)RAND_MAX*10.0;
if (m <= 5 && k <= 5 && n <= 5)
{
printf("M =\n"); printMatrix(h_M, m, k);
printf("N =\n"); printMatrix(h_N, k, n);
}
/* Copy M and N to device global memory */
CUDA_CHECK(cudaMemcpy(d_M, h_M, bytesForM, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(d_N, h_N, bytesForN, cudaMemcpyHostToDevice));
/* Launch the CUDA kernel */
dim3 dimGrid((n+TILE_WIDTH-1)/TILE_WIDTH, (m+TILE_WIDTH-1)/TILE_WIDTH);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
printf("matMul called from host");
matMul<<<dimGrid, dimBlock>>>(d_M, d_N, d_P, m, k, n);
CUDA_CHECK(cudaDeviceSynchronize());
/* Copy result matrix from device global memory back to host memory */
CUDA_CHECK(cudaMemcpy(h_P, d_P, bytesForP, cudaMemcpyDeviceToHost));
printf(" product received from host");
if (m <= 5 && k <= 5 && n <= 5)
{
printf("P =\n"); printMatrix(h_P, m, n);
}
/* Verify that the result matrix is correct */
for (int row = 0; row < m; row++)
{
for (int col = 0; col < n; col++)
{
float expected = 0.0;
for (int i = 0; i < k; i++)
{
expected += h_M[row*k+i] * h_N[i*n+col];
}
float actual = h_P[row*n+col];
if (fabs(expected - actual) > EPSILON)
{
fprintf(stderr, "d_P[%d, %d] is incorrect\n",
row, col);
fprintf(stderr, " Expected: %f\n", expected);
fprintf(stderr, " Computed: %f\n", actual);
return EXIT_FAILURE;
}
}
}
/* Free device global memory */
CUDA_CHECK(cudaFree(d_M));
CUDA_CHECK(cudaFree(d_N));
CUDA_CHECK(cudaFree(d_P));
/* Free host memory */
free(h_M);
free(h_N);
free(h_P);
/* Reset the device (unnecessary if not profiling, but good practice) */
CUDA_CHECK(cudaDeviceReset());
printf("Done\n");
return EXIT_SUCCESS;
}
|
23,234 | #include <iostream>
using namespace std;
//Test
// Device code: Computes Z = aX + Y
__global__
void daxpy(double a, const double* X, const double* Y,
int arraySize, double* Z)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < arraySize)
Z[i] = a * X[i] + Y[i];
}
// Host code
void doTheKernelLaunch(double h_a, double* h_X, double* h_Y,
int arraySize, double* h_Z)
{
// Now on with the show...
size_t size = arraySize * sizeof(double);
// Allocate vectors in device memory
double* d_X;
cudaMalloc((void**)&d_X, size);
double* d_Y;
cudaMalloc((void**)&d_Y, size);
double* d_Z;
cudaMalloc((void**)&d_Z, size);
// Copy vectors from host memory to device memory
cudaMemcpy(d_X, h_X, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_Y, h_Y, size, cudaMemcpyHostToDevice);
// Invoke kernel
int threadsPerBlock = 256;
int blocksPerGrid =
(arraySize + threadsPerBlock - 1) / threadsPerBlock;
daxpy<<<blocksPerGrid, threadsPerBlock>>>(h_a, d_X, d_Y, arraySize, d_Z);
// Copy result from device memory to host memory
// h_Z will contain the result in host memory
cudaMemcpy(h_Z, d_Z, size, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_X);
cudaFree(d_Y);
cudaFree(d_Z);
}
double* do_daxpy(int arraySize)
{
double a = 2.0;
double* X = new double[arraySize];
double* Y = new double[arraySize];
double* Z = new double[arraySize];
for (int i=0 ; i<arraySize ; i++)
{
X[i] = 1000.0;
Y[i] = 10.0;
}
doTheKernelLaunch(a, X, Y, arraySize, Z);
for (int i=0 ; i<arraySize ; i++)
cout << Z[i] << " = " << a << " * " << X[i] << " + " << Y[i] << '\n';
delete [] X;
delete [] Y;
return Z;
}
int main()
{
// report versions
int driverVersion, runtimeVersion;
cudaError_t dv = cudaDriverGetVersion(&driverVersion);
cudaError_t rv = cudaRuntimeGetVersion(&runtimeVersion);
cout << "Driver version: " << driverVersion << "; Runtime version: "
<< runtimeVersion << "\n\n";
double* Z = do_daxpy(20);
// ...
delete [] Z;
return 0;
}
|
23,235 | __global__ void selection_k_radius_gpu(int b, int m, int k, float radius, const int* idx, const float* val, int* idx_out, float* val_out){
int batch_index = blockIdx.x;
int stride = batch_index * m * k;
idx += stride;
val += stride;
idx_out += stride;
val_out += stride;
for(int i = threadIdx.x; i < m;i += blockDim.x) {
for(int j = 0;j < k;j ++) {
if(val[i * k + j] < radius) {
idx_out[i * k + j] = idx[i * k + j];
val_out[i * k + j] = val[i * k + j];
} else {
idx_out[i * k + j] = idx[i * k ];
val_out[i * k + j] = val[i * k ];
}
}
}
}
__global__ void cube_select(int b, int n,float radius, const float* xyz, int* idx_out) {
int batch_idx = blockIdx.x;
xyz += batch_idx * n * 3;
idx_out += batch_idx * n * 8;
float temp_dist[8];
float judge_dist = radius * radius;
for(int i = threadIdx.x; i < n;i += blockDim.x) {
float x = xyz[i * 3];
float y = xyz[i * 3 + 1];
float z = xyz[i * 3 + 2];
for(int j = 0;j < 8;j ++) {
temp_dist[j] = 1e8;
idx_out[i * 8 + j] = i; // if not found, just return itself..
}
for(int j = 0;j < n;j ++) {
if(i == j) continue;
float tx = xyz[j * 3];
float ty = xyz[j * 3 + 1];
float tz = xyz[j * 3 + 2];
float dist = (x - tx) * (x - tx) + (y - ty) * (y - ty) + (z - tz) * (z - tz);
if(dist > judge_dist) continue;
int _x = (tx > x);
int _y = (ty > y);
int _z = (tz > z);
int temp_idx = _x * 4 + _y * 2 + _z;
if(dist < temp_dist[temp_idx]) {
idx_out[i * 8 + temp_idx] = j;
temp_dist[temp_idx] = dist;
}
}
}
}
__global__ void cube_select_two(int b, int n,float radius, const float* xyz, int* idx_out) {
int batch_idx = blockIdx.x;
xyz += batch_idx * n * 3;
idx_out += batch_idx * n * 16;
float temp_dist[16];
float judge_dist = radius * radius;
for(int i = threadIdx.x; i < n;i += blockDim.x) {
float x = xyz[i * 3];
float y = xyz[i * 3 + 1];
float z = xyz[i * 3 + 2];
for(int j = 0;j < 16;j ++) {
temp_dist[j] = judge_dist;
idx_out[i * 16 + j] = i; // if not found, just return itself..
}
for(int j = 0;j < n;j ++) {
if(i == j) continue;
float tx = xyz[j * 3];
float ty = xyz[j * 3 + 1];
float tz = xyz[j * 3 + 2];
float dist = (x - tx) * (x - tx) + (y - ty) * (y - ty) + (z - tz) * (z - tz);
if(dist > judge_dist) continue;
int _x = (tx > x);
int _y = (ty > y);
int _z = (tz > z);
int temp_idx = _x * 8 + _y * 4 + _z * 2;
bool flag = false;
for(int k = 0;k < 2;k ++) {
if (dist < temp_dist[temp_idx + k]) {
flag = true;
}
if (flag) {
for (int kk = 1; kk >= k + 1; kk --) {
idx_out[i * 16 + temp_idx + kk] = idx_out[i * 16 + temp_idx + kk - 1];
temp_dist[temp_idx + kk] = temp_dist[temp_idx + kk - 1];
}
idx_out[i * 16 + temp_idx + k] = j;
temp_dist[temp_idx + k] = dist;
break;
}
}
}
}
}
__global__ void cube_select_four(int b, int n,float radius, const float* xyz, int* idx_out) {
int batch_idx = blockIdx.x;
xyz += batch_idx * n * 3;
idx_out += batch_idx * n * 32;
float temp_dist[32];
float judge_dist = radius * radius;
for(int i = threadIdx.x; i < n;i += blockDim.x) {
float x = xyz[i * 3];
float y = xyz[i * 3 + 1];
float z = xyz[i * 3 + 2];
for(int j = 0;j < 32;j ++) {
temp_dist[j] = judge_dist;
idx_out[i * 32 + j] = i; // if not found, just return itself..
}
for(int j = 0;j < n;j ++) {
if(i == j) continue;
float tx = xyz[j * 3];
float ty = xyz[j * 3 + 1];
float tz = xyz[j * 3 + 2];
float dist = (x - tx) * (x - tx) + (y - ty) * (y - ty) + (z - tz) * (z - tz);
if(dist > judge_dist) continue;
int _x = (tx > x);
int _y = (ty > y);
int _z = (tz > z);
int temp_idx = _x * 16 + _y * 8 + _z * 4;
bool flag = false;
for(int k = 0;k < 4;k ++) {
if (dist < temp_dist[temp_idx + k]) {
flag = true;
}
if (flag) {
for (int kk = 3; kk >= k + 1; kk --) {
idx_out[i * 32 + temp_idx + kk] = idx_out[i * 32 + temp_idx + kk - 1];
temp_dist[temp_idx + kk] = temp_dist[temp_idx + kk - 1];
}
idx_out[i * 32 + temp_idx + k] = j;
temp_dist[temp_idx + k] = dist;
break;
}
}
}
}
}
void selectionKRadiusLauncher(int b, int m, int k, float radius, const int* idx, const float* val, int* idx_out, float* val_out){
selection_k_radius_gpu<<<b,256>>>(b, m, k, radius, idx, val, idx_out, val_out);
}
void cubeSelectLauncher(int b, int n, float radius, const float* xyz, int* idx_out) {
cube_select<<<b, 512>>>(b, n, radius, xyz, idx_out);
}
void cubeSelectTwoLauncher(int b, int n, float radius, const float* xyz, int* idx_out) {
cube_select_two<<<b, 512>>>(b, n, radius, xyz, idx_out);
}
void cubeSelectFourLauncher(int b, int n, float radius, const float* xyz, int* idx_out) {
cube_select_four<<<b, 512>>>(b, n, radius, xyz, idx_out);
}
|
23,236 | #include <stdio.h>
#include <sys/time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <driver_types.h>
#define M 32
__global__
void Calcu(float **od, float **ev, size_t oddpitch, size_t evenpitch)
{
//pitchの使い道がわからん。詰んだ。
int i=blockIdx.x * blockDim.x + threadIdx.x;
int j=blockIdx.y * blockDim.y + threadIdx.y;
int count;
float r=0.2;
if(i<M && j<M){
for(count=0;count<100;count+=2){
od[i][j]=(1-4*r)*ev[i][j]+r*ev[i+1][j]+r*ev[i-1][j]+r*ev[i][j+1]+r*ev[i][j-1];
__syncthreads();
ev[i][j]=(1-4*r)*od[i][j]+r*od[i+1][j]+r*od[i-1][j]+r*od[i][j+1]+r*od[i][j-1];
__syncthreads();
}
}}
int main(void)
{
struct timeval t0,t1;
int i,j;
float dx=1/M;
float r=0.2;
float even[M+1][M+1], odd[M+1][M+1];
float time;
size_t oddpitch;
size_t evenpitch;
float **ev;
float **od;
for(i=0;i<M+1;i++)
for(j=0;j<M+1;j++)
even[i][j]=1;
for(i=0;i<M+1;i++){
even[0][i]=0;
even[M][i]=0;
even[i][0]=0;
even[i][M]=0;
}
for(i=0;i<M+1;i++){
odd[0][i]=0;
odd[M][i]=0;
odd[i][0]=0;
odd[i][M]=0;
}
cudaMallocPitch(ev, &evenpitch, (M+1)*sizeof(float), M+1);
cudaMallocPitch(od, &oddpitch, (M+1)*sizeof(float), M+1);
cudaMemcpy2D(ev ,evenpitch ,even ,(M+1)*sizeof(float), (M+1)*sizeof(float), M+1, cudaMemcpyHostToDevice);
cudaMemcpy2D(od ,oddpitch ,odd ,(M+1)*sizeof(float), (M+1)*sizeof(float), M+1, cudaMemcpyHostToDevice);
dim3 threadPerBlock(32,32);
dim3 numBlocks(M/threadPerBlock.x, M/threadPerBlock.y);
gettimeofday(&t0, NULL);
Calcu<<<numBlocks, threadPerBlock>>>(od , ev, oddpitch, evenpitch);
cudaDeviceSynchronize();
cudaMemcpy2D(even ,(M+1)*sizeof(float) ,ev ,evenpitch, (M+1)*sizeof(float), M+1, cudaMemcpyDeviceToHost);
cudaMemcpy2D(odd ,(M+1)*sizeof(float) ,od ,oddpitch , (M+1)*sizeof(float), M+1, cudaMemcpyDeviceToHost);
gettimeofday(&t1, NULL);
time = t1.tv_sec-t0.tv_sec + (t1.tv_usec - t0.tv_usec)*1.0e-6;
printf("Elapsed time = %lf\n", time);
printf("FLOPS = %lf\n", (M-1)*(M-1)*100*6/time);
for(j=10;j>=0;j--){
for(i=0;i<11;i++)
printf("%f ",even[i][j]);
printf("\n");
}
return 0;
}
|
23,237 | #include <thrust/device_vector.h>
#include <thrust/tabulate.h>
#include <iostream>
struct Fragment
{
int index[3];
Fragment() = default;
};
struct functor
{
__device__ __host__
Fragment operator() (const int &i) const {
Fragment f;
f.index[0] = i; f.index[1] = i+1; f.index[2] = i+2;
return f;
}
};
int main()
{
const int N = 10;
thrust::device_vector<Fragment> dvFragment(N);
thrust::tabulate(dvFragment.begin(), dvFragment.end(), functor());
for(auto p : dvFragment) {
Fragment f = p;
std::cout << f.index[0] << " " << f.index[1] << " " << f.index[2] << std::endl;
}
return 0;
} |
23,238 | #include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
// CUDA runtime
#include <cuda_runtime.h>
#define SIZE 100000000
#define THREADS_PER_BLOCK 1024
// Convert and mod
__global__ void add_kernel(uint32_t *d_c, uint32_t *d_a, uint32_t *d_b) {
// compute index = thread index in a block + block index * number of threads per block
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
// if SIZE mod THREADS PER BLOCK != 0 --> some index will not be available as one more block is used
if(idx <= SIZE) d_c[idx] = d_a[idx] + d_b[idx];
}
int main(int argc, char* argv[]) {
int i;
uint32_t *h_a, *h_b, *h_c; // host pointer
uint32_t *d_a, *d_b, *d_c; // device pointer
cudaMallocHost((void**)&h_a, SIZE * sizeof(uint32_t));
cudaMallocHost((void**)&h_b, SIZE * sizeof(uint32_t));
cudaMallocHost((void**)&h_c, SIZE * sizeof(uint32_t));
cudaMalloc((void**)&d_a, SIZE * sizeof(uint32_t));
cudaMalloc((void**)&d_b, SIZE * sizeof(uint32_t));
cudaMalloc((void**)&d_c, SIZE * sizeof(uint32_t));
for(i=0; i<SIZE; i++)
{
h_a[i] = i;
h_b[i] = i;
}
cudaMemcpy(d_a, h_a, SIZE * sizeof(uint32_t), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, SIZE * sizeof(uint32_t), cudaMemcpyHostToDevice);
// <<< [Number of blocks], [Number of threads per block] >>>
add_kernel<<<(SIZE / THREADS_PER_BLOCK) + 1, THREADS_PER_BLOCK>>>(d_c, d_a, d_b);
cudaMemcpy(h_c, d_c, SIZE * sizeof(uint32_t), cudaMemcpyDeviceToHost);
printf("\n----------\nResults CPU:\n");
for(i=0; i<SIZE; i++) printf("%u: %u ",i , h_c[i]);
cudaFree(h_a); cudaFree(h_b); cudaFree(h_c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
cudaDeviceReset();
return 0;
}
|
23,239 | #include "includes.h"
__global__ void MNKernel(int count, long * Md, long *Nd, long *Pd, int width) {
// 2D thread ID
int col = blockIdx.x*blockDim.x + threadIdx.x;
int row = blockIdx.y*blockDim.y + threadIdx.y;
// Pvalue stores the Pd element that is computed by the thread
long Pvalue = 0;
for (int k=0; k < width; k++)
Pvalue += Md[row * width + k] * Nd[k * width + col];
Pd[row * width + col] = Pvalue;
} |
23,240 |
/*
Based off work by Nelson, et al.
Brigham Young University (2010)
Adapted by Kevin Yuh (2015)
*/
#include <stdio.h>
#include <cuda.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <cufft.h>
#define PI 3.14159265358979
/* Check errors on CUDA runtime functions */
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
exit(code);
}
}
/* Check errors on cuFFT functions */
void gpuFFTchk(int errval){
if (errval != CUFFT_SUCCESS){
printf("Failed FFT call, error code %d\n", errval);
}
}
/* Check errors on CUDA kernel calls */
void checkCUDAKernelError()
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "Error %s\n", cudaGetErrorString(err));
} else {
fprintf(stderr, "No kernel error detected\n");
}
}
/* Basic ramp filter. Scale all frequencies linearly. */
__global__ void cudaFrequencyKernal(cufftComplex *out_data, int length) {
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < length) {
float scaleFactor;
// We need to account for the fact that the highest amplitude is at
// length / 2
if (index < (length / 2)) {
scaleFactor = ((float) index) / (length / 2);
}
else {
scaleFactor = ((float) (length - index)) / (length / 2);
}
cufftComplex temp = out_data[index];
temp.x = temp.x * scaleFactor;
temp.y = temp.y * scaleFactor;
out_data[index] = temp;
index += blockDim.x * gridDim.x;
}
}
/* Convert an array of complex values to an array of real values. */
__global__ void cudaComplexToRealKernal(cufftComplex *in_data,
float *out_data,
int length) {
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < length) {
cufftComplex in = in_data[index];
out_data[index] = in.x;
index += blockDim.x * gridDim.x;
}
}
/* Backproject the sinogram to an image. */
__global__ void cudaBackprojectionKernal(float *in_data, float *out_data,
int nAngles, int sin_width,
int image_dim) {
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < (image_dim * image_dim)) {
// Get the pixel (x,y) coordinate from the index value
int y_image = index / image_dim;
int x_image = index % image_dim;
// Get the geometric (x,y) coordinate from the pixel coordinate
int x_geo = x_image - (image_dim / 2);
int y_geo = (image_dim / 2) - y_image;
// For all theta in the sinogram...
for (int i = 0; i < nAngles; i++) {
float d;
// Handle the edges cases of theta = 0 and theta = PI/2
if(i == 0) {
d = (float) x_geo;
}
else if (i == nAngles / 2) {
d = (float) y_geo;
}
else {
float theta = PI * (((float) i) / ((float) nAngles));
float m = -1 * cos(theta) / sin(theta);
float x_i = ((float) (y_geo - m * x_geo)) / ((-1 / m) - m);
float y_i = (-1 / m) * x_i;
d = sqrt((x_i * x_i) + (y_i * y_i));
// Center the index
if (((-1 / m) > 0 && x_i < 0) || ((-1 / m) < 0 && x_i > 0)) {
d *= -1;
}
}
// d is the distance from the center line, so we need to offset d by
// this much
d += sin_width / 2.0;
d = truncf(d);
// Now that we have d, add the right value to the image array
out_data[y_image * image_dim + x_image] += in_data[i * sin_width + (int)d];
}
index += blockDim.x * gridDim.x;
}
}
int main(int argc, char** argv){
if (argc != 7){
fprintf(stderr, "Incorrect number of arguments.\n\n");
fprintf(stderr, "\nArguments: \n \
< Sinogram filename > \n \
< Width or height of original image, whichever is larger > \n \
< Number of angles in sinogram >\n \
< threads per block >\n \
< number of blocks >\n \
< output filename >\n");
exit(EXIT_FAILURE);
}
/********** Parameters **********/
int width = atoi(argv[2]);
int height = width;
int sinogram_width = (int)ceilf( height * sqrt(2) );
int nAngles = atoi(argv[3]);
int threadsPerBlock = atoi(argv[4]);
int nBlocks = atoi(argv[5]);
/********** Data storage *********/
// GPU DATA STORAGE
cufftComplex *dev_sinogram_cmplx;
float *dev_sinogram_float;
float* output_dev; // Image storage
cufftComplex *sinogram_host;
size_t size_result = width*height*sizeof(float);
float *output_host = (float *)malloc(size_result);
/*********** Set up IO, Read in data ************/
sinogram_host = (cufftComplex *)malloc( sinogram_width*nAngles*sizeof(cufftComplex) );
FILE *dataFile = fopen(argv[1],"r");
if (dataFile == NULL){
fprintf(stderr, "Sinogram file missing\n");
exit(EXIT_FAILURE);
}
FILE *outputFile = fopen(argv[6], "w");
if (outputFile == NULL){
fprintf(stderr, "Output file cannot be written\n");
exit(EXIT_FAILURE);
}
int j, i;
for(i = 0; i < nAngles * sinogram_width; i++){
fscanf(dataFile,"%f",&sinogram_host[i].x);
sinogram_host[i].y = 0;
}
fclose(dataFile);
/*********** Assignment starts here *********/
/* TODO: Allocate memory for all GPU storage above, copy input sinogram
over to dev_sinogram_cmplx. */
int sinogram_size = nAngles * sinogram_width;
cudaMalloc((void **) &dev_sinogram_cmplx, sizeof(cufftComplex) * sinogram_size);
cudaMalloc((void **) &dev_sinogram_float, sizeof(float) * sinogram_size);
cudaMemcpy(dev_sinogram_cmplx, sinogram_host,
sizeof(cufftComplex) * sinogram_size,
cudaMemcpyHostToDevice);
/* TODO 1: Implement the high-pass filter:
- Use cuFFT for the forward FFT
- Create your own kernel for the frequency scaling.
- Use cuFFT for the inverse FFT
- extract real components to floats
- Free the original sinogram (dev_sinogram_cmplx)
Note: If you want to deal with real-to-complex and complex-to-real
transforms in cuFFT, you'll have to slightly change our code above.
*/
cufftHandle plan;
int batch = 1;
cufftPlan1d(&plan, sinogram_size, CUFFT_C2C, batch);
// Run the forward DFT
cufftExecC2C(plan, dev_sinogram_cmplx, dev_sinogram_cmplx, CUFFT_FORWARD);
// Apply basic ramp filter
cudaFrequencyKernal<<<nBlocks, threadsPerBlock>>>
(dev_sinogram_cmplx, sinogram_size);
// Run the inverse DFT
cufftExecC2C(plan, dev_sinogram_cmplx, dev_sinogram_cmplx, CUFFT_INVERSE);
// Extract the real components to floats
cudaComplexToRealKernal<<<nBlocks, threadsPerBlock>>>
(dev_sinogram_cmplx, dev_sinogram_float, sinogram_size);
// Free the original sinogram
cudaFree(dev_sinogram_cmplx);
/* TODO 2: Implement backprojection.
- Allocate memory for the output image.
- Create your own kernel to accelerate backprojection.
- Copy the reconstructed image back to output_host.
- Free all remaining memory on the GPU.
*/
cudaMalloc((void **) &output_dev, sizeof(float) * width * height);
cudaMemset(output_dev, 0, sizeof(float) * width * height);
// Run the Backprojection kernal
cudaBackprojectionKernal<<<nBlocks, threadsPerBlock>>>(dev_sinogram_float,
output_dev,
nAngles,
sinogram_width,
width);
// Copy the reconstructed image back to host
cudaMemcpy(output_host, output_dev, sizeof(float) * width * height, cudaMemcpyDeviceToHost);
// Free the remaining GPU memory
cudaFree(dev_sinogram_float);
cudaFree(output_dev);
/* Export image data. */
for(j = 0; j < width; j++){
for(i = 0; i < height; i++){
fprintf(outputFile, "%e ",output_host[j*width + i]);
}
fprintf(outputFile, "\n");
}
/* Cleanup: Free host memory, close files. */
free(sinogram_host);
free(output_host);
fclose(outputFile);
return 0;
}
|
23,241 | // 20181010
// Yuqiong Li
// Matrix multiplication with CUDA
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
#include <stdio.h>
#define index(i, j, n) ((i) * (n) + (j))
// declare global kernel function
__global__ void matrixMulKernel(float * a, float * b, float * c, unsigned int m, unsigned int n, unsigned int r);
int main(){
unsigned int m = 2000, n = 2000, r = 1000; // dimensions
float * a, * b, * c, *temp ; // declare matrices
a = (float *) malloc(m * n * sizeof(float)); // a is m by n
b = (float *) malloc(n * r * sizeof(float)); // b is n by r
c = (float *) calloc(m * r, sizeof(float)); // c is m by r : the result matrix
temp = (float *) calloc(m * r, sizeof(float)); // to store GPU results
int i = 0, j = 0;
// initializing a
for (i = 0; i < m; i++){
for (j = 0; j < n; j++)
a[index(i, j, n)] = i + j;
}
// initializing b
for (i = 0; i < n; i++){
for (j = 0; j < r; j++)
b[index(i, j, r)] = i + j + 1;
}
double time_taken;
clock_t start, end;
// CPU version
start = clock();
int k = 0;
for (i = 0; i < m; i++){
for (j = 0; j < r; j++){
for (k = 0; k < n; k++)
c[index(i, j, r)] += a[index(i, k, n)] * b[index(k, j, r)];
}
}
end = clock();
time_taken = (double) (end - start) / CLOCKS_PER_SEC;
printf("Time taken for CPU is %.2f.\n", time_taken);
float val = 0.0;
for (i = 0; i < m; i++){
for (j = 0; j < r; j++){
val += c[index(i, j, r)];
}
}
printf("Check value for CPU: sum is %.2f\n.", val);
// 1. allocate device memory for cuda variables
float * d_a, * d_b, * d_c;
cudaMalloc((void **) &d_a, m * n * sizeof(float));
cudaMalloc((void **) &d_b, n * r * sizeof(float));
cudaMalloc((void **) &d_c, m * r * sizeof(float));
// copy memory to device
cudaMemcpy(d_a, a, m * n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, n * r * sizeof(float), cudaMemcpyHostToDevice);
// 2. invoke kernel function
dim3 blocksPerGrid(ceil(m/16.0), ceil(r/16.0), 1);
dim3 threadsPerBlock(16, 16, 1);
start = clock();
matrixMulKernel<<<blocksPerGrid, threadsPerBlock>>>(d_a, d_b, d_c, m, n, r);
end = clock();
time_taken = (double) (end - start)/ CLOCKS_PER_SEC;
printf("Time taken for GPU is %.2f\n", time_taken);
// 3. copy results to device
cudaMemcpy(temp, d_c, m * r * sizeof(float), cudaMemcpyDeviceToHost);
val = 0;
for (i = 0; i < m; i++){
for (j = 0; j < r; j++){
val += temp[index(i, j, r)];
}
}
printf("Check value for GPU: sum is %.2f\n", val);
free(a);
free(b);
free(c);
free(temp);
cudaFree(d_c);
cudaFree(d_a);
cudaFree(d_b);
return 0;
}
__global__ void matrixMulKernel(float * a, float * b, float * c, unsigned int m, unsigned int n, unsigned int r){
// a function to perform matrix multiplication
// a is m by n; b is n by r; c is the result m by r
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
if ((row < m) && (col < r)){
float pvalue = 0;
int k = 0;
for (k = 0; k < n; k++){
pvalue += a[index(row, k, n)] * b[index(k, col, r)];
}
c[index(row, col, r)] = pvalue;
}
}
|
23,242 | #include "thrust/device_vector.h"
#include "thrust/host_vector.h"
#include "thrust/tuple.h"
#include "thrust/complex.h"
#include <iostream>
#include <iomanip>
#include <string>
#include <fstream>
using namespace std;
typedef thrust::complex<float> th_complex;
typedef thrust::device_vector<th_complex> th_dev_cplx_vec;
typedef thrust::device_vector<int> th_dev_bool_vec;
struct z_functor
{
int nb_ite;
z_functor(int _nb_ite) : nb_ite(_nb_ite) {}
__host__ __device__
int operator () (th_complex c)
{
th_complex z_val = 0;
int t=0;
while ((abs(z_val) < 2) && (t<nb_ite))
{
z_val = z_val*z_val + c;
t++;
}
return t;
}
};
int main(int argc, char * argv[])
{
if (argc == 1)
{
cout << "Help" << endl
<< " 8 args :" << endl << endl
<< "arg1 = min real part | arg2 = min imaginary part " << endl
<< "arg3 = max real part | arg4 = max imaginary part " << endl
<< "arg5 = number of points on the real axe | arg6 = number of points on the imaginary axe " << endl
<< "arg7 = nb of iterations | arg8 = limit convergence"
<< endl << endl
<< " 4 args :" << endl << endl
<< "arg1 = number of points on the real axe | arg2 = number of points on the imaginary axe " << endl
<< "arg3 = nb of iterations "
<< endl << endl ;
return 1;
}
float max_re, max_im, min_re, min_im;
int nb_pts_re, nb_pts_im, nb_ite;
if (argc == 8)
{
try
{
min_re = stod(argv[1]);
min_im = stod(argv[2]);
max_re = stod(argv[3]);
max_im = stod(argv[4]);
nb_pts_re = stoi(argv[5]);
nb_pts_im = stoi(argv[6]);
nb_ite = stoi(argv[7]);
}
catch (...)
{
cout << "Bad Args : see help (type nameofprogram without args)" << endl << endl;
return 1;
}
}
if (argc == 4 )
{
min_re = -2;
min_im = -1;
max_re = 1;
max_im = 1;
try
{
nb_pts_re = stoi(argv[1]);
nb_pts_im = stoi(argv[2]);
nb_ite = stoi(argv[3]);
}
catch (...)
{
cout << "Bad Args : see help (type nameofprogram without args)" << endl << endl;
return 1;
}
}
th_dev_cplx_vec mat (nb_pts_re*nb_pts_im) ;
float re, im;
for (int i=0; i<nb_pts_im; i++)
{
im = max_im - (max_im-min_im)/nb_pts_im*i;
for (int j=0; j<nb_pts_re; j++)
{
re = max_re - (max_re-min_re)/nb_pts_re*j;
th_complex cplx (re,im);
mat[i*nb_pts_re+j] = cplx;
}
}
th_dev_bool_vec img (nb_pts_re* nb_pts_im);
thrust::transform(mat.begin(), mat.end(), img.begin(), z_functor(nb_ite));
ofstream file; file.open("data");
for (int i=0; i<nb_pts_im; i++)
{
for (int j=0; j<nb_pts_re; j++)
{
file << setw(15) << img[i*nb_pts_re+j] ;
}
file << endl ;
}
file.close();
return 0;
}
|
23,243 | //#include "CDebug.cuh"
//#include "CMatrixFunctions.cuh"
//#include "CAABBFunctions.cuh"
//#include "CVoxelFunctions.cuh"
//#include "CSVOTypes.h"
//#include "CSVOFunctions.cuh"
//#include <cassert>
//#include <limits>
//#include <cstdio>
//#include "COpenglTypes.h"
//
//__global__ void DebugCheckNodeId(const CSVONode* gSVODense,
// const CSVONode* gSVOSparse,
// const unsigned int* gNodeIds,
// const unsigned int* gSVOLevelOffsets,
// const unsigned int& gSVOLevelOffset,
// const unsigned int levelNodeCount,
// const unsigned int currentLevel,
// const CSVOConstants& svoConstants)
//{
// const unsigned int globalId = threadIdx.x + blockIdx.x * blockDim.x;
//
// // Cull if out of range
// if(globalId > levelNodeCount) return;
//
// // Read Sibling Materials
// const CSVONode node = gSVOSparse[gSVOLevelOffset + globalId];
//
// // Cull if there is no children
// if(node == 0xFFFFFFFF) return;
//
// // Read Node Adress (we will compare this)
// const unsigned int nodeId = gNodeIds[gSVOLevelOffset + globalId];
//
// uint3 voxNode = UnpackNodeId(nodeId, currentLevel,
// svoConstants.numCascades,
// svoConstants.totalDepth);
//
// voxNode.x <<= (svoConstants.totalDepth - currentLevel);
// voxNode.y <<= (svoConstants.totalDepth - currentLevel);
// voxNode.z <<= (svoConstants.totalDepth - currentLevel);
//
// // Traverse SVO
// uint3 levelVoxId = CalculateLevelVoxId(voxNode, svoConstants.denseDepth, svoConstants.totalDepth);
// CSVONode location = gSVODense[svoConstants.denseDim * svoConstants.denseDim * levelVoxId.z +
// svoConstants.denseDim * levelVoxId.y +
// levelVoxId.x];
// assert(location != 0xFFFFFFFF);
// location += CalculateLevelChildId(voxNode, svoConstants.denseDepth + 1, svoConstants.totalDepth);
// for(unsigned int i = svoConstants.denseDepth + 1; i <= currentLevel; i++)
// {
// unsigned int levelIndex = i - svoConstants.denseDepth;
// const CSVONode node = gSVOSparse[gSVOLevelOffsets[levelIndex] + location];
// assert(node != 0xFFFFFFFF);
//
// // Offset child
// unsigned int childId = CalculateLevelChildId(voxNode, i + 1, svoConstants.totalDepth);
// location = node + childId;
// }
//}
//
//__global__ void DebugCheckUniqueAlloc(ushort2* gObjectAllocLocations,
// unsigned int segmentCount)
//{
// unsigned int globalId = threadIdx.x + blockIdx.x * blockDim.x;
// if(globalId >= segmentCount) return;
//
// ushort2 myAllocLoc = gObjectAllocLocations[globalId];
// for(unsigned int i = 0; i < segmentCount; i++)
// {
// ushort2 otherSegment = gObjectAllocLocations[i];
// if(i != globalId &&
// myAllocLoc.x != 0xFFFF &&
// myAllocLoc.y != 0xFFFF &&
// myAllocLoc.x == otherSegment.x &&
// myAllocLoc.y == otherSegment.y)
// {
// assert(false);
// }
// }
//}
//
//__global__ void DebugCheckSegmentAlloc(const CVoxelGrid& gGridInfo,
//
// const ushort2* gObjectAllocLocations,
// const unsigned int* gSegmentObjectId,
// unsigned int segmentCount,
//
// const CObjectAABB* gObjectAABB,
// const CObjectTransform* gObjTransforms)
//{
// //unsigned int globalId = threadIdx.x + blockIdx.x * blockDim.x;
// //if(globalId >= segmentCount) return;
//
// //unsigned int objectId = gSegmentObjectId[globalId];
// //bool intersects = CheckGridVoxIntersect(gGridInfo,
// // gObjectAABB[objectId],
// // gObjTransforms[objectId].transform);
// //ushort2 myAllocLoc = gObjectAllocLocations[globalId];
// //
// //if(intersects &&
// // (myAllocLoc.x == 0xFFFF ||
// // myAllocLoc.y == 0xFFFF))
// //{
// // assert(false);
// //}
//
// //if((!intersects) &&
// // (myAllocLoc.x != 0xFFFF ||
// // myAllocLoc.y != 0xFFFF))
// //{
// // assert(false);
// //}
//
//}
//
//
|
23,244 | #include <stdio.h>
#define NUM_BLOCKS 1
#define BLOCK_WIDTH 256
__global__ void hello()
{
printf("Hello world! I'm thread %d\n", threadIdx.x );
}
int main(int argc, char **argv)
{
hello<<<NUM_BLOCKS, BLOCK_WIDTH>>>();
cudaDeviceSynchronize();
printf("That is all!\n");
return 0;
} |
23,245 | #pragma once
typedef float c_precision;
#define thread_group_size 64
#define max_nominal 20
// Constant buffer strucs
struct SharedBuffer{
unsigned int cb_numTrees;
unsigned int cb_numFeatures;
unsigned int cb_maxDepth;
unsigned int cb_currentDepth;
unsigned int cb_availableNodes;
unsigned int cb_nodeBufferStart;
unsigned int cb_nodeBufferEnd;
unsigned int cb_maxInstInNodes;
unsigned int cb_instanceCount;
unsigned int cb_attributeCount;
unsigned int cb_nodeIdFlip;
};
struct ConstantBufferBagging{
unsigned int cb_treeOffset;
unsigned int cb_instanceCount;
unsigned int cb_nodeBufferEnd;
bool cb_baggingActivated;
};
struct ConstantBufferClassify{
unsigned int cb_numTrees;
unsigned int cb_treeOffset;
unsigned int cb_nodeBufferEnd;
unsigned int cb_majorityClass;
unsigned int cb_instanceCount;
unsigned int cb_attributeCount;
};
// Kernel parameter package structs
namespace ExtremeFindSplit{
struct paramPack_Kernel{
unsigned int
*treeIds,
*rngStates,
*nodeIndices,
*distBuffer,
*classValues,
*bagWeights,
*nodeIndicesLimits,
*attributeTypes,
*innerNodeIds,
*childIds;
int
*attributes;
c_precision
*splitVals,
*splitPoints,
*dataset;
};
}
namespace ExtremeMakeSplit{
struct paramPack_Kernel{
int
*attributes;
unsigned int
*nodeIndices,
*nodeIndicesMirror,
*nodeIndicesLimits,
*attributeTypes,
*distBuffer,
*innerNodeIds,
*childIds,
*classValues,
*check;
c_precision
*splitPoints,
*dataset;
};
}
namespace ExtremeCreateNodes{
struct paramPack_Kernel{
int
*attributes;
unsigned int
*nodeIndices,
*nodeIndicesMirror,
*distBuffer,
*childIds,
*nodeIndicesLimits,
*check,
*attributeValCounts,
*innerNodeIds,
*treeIds;
c_precision
*classProbs,
*splitPoints;
};
}
namespace Bagging{
struct paramPack_Kernel{
unsigned int
*stateBuffer,
*treeIds,
*bagWeights,
*nodeIndicesLimits,
*nodeIndices,
*nodeIndicesMirror;
int
*classValues;
c_precision
*classProbs;
};
}
namespace Sort{
struct paramPack_Kernel{
unsigned int
*nodeIndicesLimits,
*nodeIndices,
*nodeIndicesMirror,
*inputInds,
*stateBuffer,
*attributeNumValues,
*innerNodeIds;
};
}
namespace FindSplit{
struct paramPack_Kernel{
unsigned int
*nodeIndicesLimits,
*nodeIndices,
*treeIds,
*bagWeights,
*distBuffer,
*stateBuffer,
*inputInds,
*attributeNumValues,
*innerNodeIds;
int
*classValues,
*attributeBuffer;
c_precision
*splitPoints,
*inputData,
*splitVal,
*classProbs;
};
}
namespace EvaluateSplit{
struct paramPack_Kernel{
unsigned int
*inputInds,
*bagWeights,
*stateBuffer,
*nodeIndicesLimits,
*nodeIndices,
*treeIds,
*childIds,
*distBuffer,
*nodeIndicesMirror,
*attributeNumValues,
*innerNodeIds;
int
*classValues,
*attributeBuffer,
*check;
c_precision
*splitPoints,
*inputData,
*classProbs;
};
};
namespace RandomForest_Kernel_Classify{
struct paramPack_Kernel{
unsigned int
*attributeType,
*childIds,
*votes;
int
*attributeBuffer;
c_precision
*testData,
*classProbs,
*splitPoints;
};
}
namespace RandomForest_SplitData_Kernel{
struct paramPack_Kernel{
unsigned int
*inputInds,
*nodeIndicesLimits,
*nodeIndices,
*nodeIndicesMirror,
*attributeNumValues,
*innerNodeIds,
*distBuffer;
int
*attributeBuffer;
c_precision
*inputData,
*splitPoints;
};
}
namespace Build{
struct paramPack_Kernel{
unsigned int
*inputInds,
*bagWeights,
*stateBuffer,
*nodeIndicesLimits,
*nodeIndices,
*treeIds,
*childIds;
int
*classValues,
*attributeBuffer,
*check;
c_precision
*splitPoints,
*inputData,
*classProbs;
};
}
namespace KeplerBuild{
struct paramPack_Kernel{
unsigned int
*inputInds,
*bagWeights,
*stateBuffer,
*nodeIndicesLimits,
*nodeIndices,
*nodeIndicesMirror,
*treeIds,
*childIds,
*distBuffer;
int
*classValues,
*attributeBuffer,
*check;
c_precision
*splitVal,
*inputData,
*classProbs,
*splitPoints;
};
} |
23,246 | #include "includes.h"
//!!nvcc -c test.cu --compiler-options -fPIC
//!g++ -o program -L/usr/local/cuda/lib64 main.cpp test.o -lcuda -lcudart
__global__ void exp(float *a,float *c)
{
*c = expf(*a);
} |
23,247 | /*
* CUDA blur
* Kevin Yuh, 2014
* Revised by Nailen Matschke, 2016
*/
#include <cstdio>
#include <cuda_runtime.h>
#include "blur_device.cuh"
__global__
void cudaBlurKernel(const float *raw_data, const float *blur_v, float *out_data,
int n_frames, int blur_v_size) {
/* GPU-accelerated convolution. */
/* Get current thread's id. */
uint thread_index = blockIdx.x * blockDim.x + threadIdx.x;
/* While this thread is dealing with a valid index... */
while (thread_index < n_frames) {
// Zero out data to begin with
out_data[thread_index] = 0;
// Perform data calculation
int min = thread_index < blur_v_size ? thread_index + 1 : blur_v_size;
for (int j = 0; j < min; j++)
out_data[thread_index] += raw_data[thread_index - j] * blur_v[j];
// Update thread_index
thread_index += blockDim.x * gridDim.x;
}
}
void cudaCallBlurKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
const float *raw_data,
const float *blur_v,
float *out_data,
const unsigned int n_frames,
const unsigned int blur_v_size) {
/* Call the kernel above this function. */
cudaBlurKernel<<<blocks, threadsPerBlock>>>(raw_data, blur_v, out_data,
n_frames, blur_v_size);
}
|
23,248 | #include <cuda.h>
#define DIVERGENCE_HERE \
if(arr[id] %2 == 0) \
arr[id] = arr[id-1]; \
else \
arr[id] = arr[id+1];
__global__ void kernel_one(int *arr, int N) {
int id = threadIdx.x + blockDim.x * blockIdx.x;
if (id >= N);
DIVERGENCE_HERE;
}
__device__ void aux(int *arr, int id, int N) {
DIVERGENCE_HERE;
}
__global__ void kernel_two(int *arr, int N) {
int id = threadIdx.x + blockDim.x * blockIdx.x;
if (id >= N);
aux(arr, id, N);
}
|
23,249 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#include <string.h>
int size_n;
unsigned int SEED;
#define CUDA_ERROR_EXIT(str) do{\
cudaError err = cudaGetLastError();\
if( err != cudaSuccess){\
printf("Cuda Error: '%s' for %s\n", cudaGetErrorString(err), str);\
exit(-1);\
}\
}while(0);
#define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec))
__global__ void XOR_Sum(int *a, int sz) {
long long int i = (blockDim.x * blockIdx.x) + threadIdx.x;
int n = sz/2;
if(i >= n)
return;
a[i] = (a[i] ^ a[i+n]);
if(sz %2 == 1)
a[n] = a[2*n];
}
int main(int argc, char **argv) {
struct timeval start, end, t_start, t_end;
int *arr, *gpu_arr;
int X_sum, blocks; // array whose xor we need to calculate
if(argc != 3) {
printf("Invalid arguments!!\n");
exit(-1);
}
else {
size_n = atoi(argv[1]);
SEED = atoi(argv[2]);
if(size_n <= 0) {
printf("Invalid value of number of terms!\n");
exit(-1);
}
}
/* Allocate host (CPU) memory and initialize*/
arr = (int *)malloc(size_n * sizeof(int));
if(!arr) {
printf("Cannot declare required memory\n");
exit(-1);
}
srand(SEED); // srand sets the seed which is used for generating random numbers
for(int i = 0;i < size_n;i++)
arr[i] = rand();
gettimeofday(&t_start, NULL); // for total time
/* Allocate GPU memory and copy from CPU --> GPU*/
cudaMalloc(&gpu_arr, size_n * sizeof(int));
CUDA_ERROR_EXIT("cudaMalloc");
cudaMemcpy(gpu_arr, arr, size_n * sizeof(int) , cudaMemcpyHostToDevice);
CUDA_ERROR_EXIT("cudaMemcpy");
gettimeofday(&start, NULL); // when actual processing starts (call to global)
blocks = (size_n / 2) / 1024;
if((size_n / 2) % 1024)
++blocks;
int sz = size_n, nn = sz/2;
while(nn != 0) {
XOR_Sum<<<blocks, 1024>>> (gpu_arr, sz);
if(sz%2 == 1)
sz = nn + 1;
else
sz = nn;
nn = sz/2;
}
gettimeofday(&end, NULL);
cudaMemcpy(arr, gpu_arr, sizeof(int) , cudaMemcpyDeviceToHost);
gettimeofday(&t_end, NULL);
X_sum = arr[0];
// printf("Total time = %ld microsecs Processsing =%ld microsecs\n", TDIFF(t_start, t_end), TDIFF(start, end));
cudaFree(gpu_arr);
printf("XOR Sum: %d\n", X_sum);
return 0;
} |
23,250 | #include <fstream>
#include <iostream>
#include <string>
#include <cstring>
#include <cstdlib>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/binary_search.h>
#include <thrust/pair.h>
#define IGNORE_FIRST_N 1 //ignore the first n halo? host?
#define IGNORE_LAST_N 100000000 //ignore the halo after this number?
#define GPU_MEM 100000000
using namespace std;
string index_file = "/home/lyang/data/vl2b.00400.r200.index";
string ahf_part_file = "/home/lyang/halodata/vl_400_rhovesc.z0.000.AHF_particles";
string output_file = "vl2b.00400.r200.ahf.haloflags";
int * haloParticles_;
int * searchParts_;
int * searchIndex_;
bool * searchResult_;
char * flags_;
int * particles_;
//bool verbose = false;
int numParts_ = 0;
void getSearchRes(int requiredSearchPartNum, int numPartsRead_,
thrust::device_vector<int> &dev_searchParts_,
thrust::device_vector<int> &dev_searchResult_,
thrust::device_vector<int> &dev_val){
//do the search
thrust::copy(searchParts_, searchParts_ + requiredSearchPartNum, dev_searchParts_.begin());
thrust::binary_search(dev_val.begin(), dev_val.begin() + numPartsRead_,
dev_searchParts_.begin(),
dev_searchParts_.begin() + requiredSearchPartNum,
dev_searchResult_.begin());
thrust::copy(dev_searchResult_.begin(), dev_searchResult_.begin() + requiredSearchPartNum, searchResult_);
for(int l = 0; l < requiredSearchPartNum; l++){
if(searchResult_[l]){
flags_[searchIndex_[l]] = 1;
}
}
}
void doSearch(int numPartsRead_, thrust::device_vector<int> &dev_searchParts_,
thrust::device_vector<int> &dev_searchResult_,
thrust::device_vector<int> &dev_val){
printf("Start testing %d halo particles...\n", numPartsRead_);
//start filling the tags
//step 1: sorting
printf("Sorting ...\n");
thrust::copy(haloParticles_, haloParticles_ + numPartsRead_, dev_val.begin());
thrust::sort(dev_val.begin(), dev_val.begin() + numPartsRead_);
//step 2: testing
printf("Searching ...\n");
//test every particle whether it's in the array
int requiredSearchPartNum = 0;
for(int k = 0; k < numParts_; k ++){
if(flags_[k] == 0){
searchParts_[requiredSearchPartNum] = particles_[k];
searchIndex_[requiredSearchPartNum] = k;
requiredSearchPartNum ++;
}
if(requiredSearchPartNum >= GPU_MEM){
getSearchRes(requiredSearchPartNum, numPartsRead_,
dev_searchParts_, dev_searchResult_, dev_val);
requiredSearchPartNum = 0;
}
}
if(requiredSearchPartNum > 0){
getSearchRes(requiredSearchPartNum, numPartsRead_,
dev_searchParts_, dev_searchResult_, dev_val);
requiredSearchPartNum = 0;
}
}
//get flags
void getFlag(){
thrust::device_vector<int> dev_searchParts_(GPU_MEM);
thrust::device_vector<int> dev_searchResult_(GPU_MEM);
thrust::device_vector<int> dev_val(GPU_MEM);
int numHalos = 0;
for(int i = 0; i < numParts_; i++){
flags_[i] = 0;
}
haloParticles_ = new int[GPU_MEM];
searchParts_ = new int[GPU_MEM];
searchIndex_ = new int[GPU_MEM];
searchResult_ = new bool[GPU_MEM];
ifstream haloInputFile_(ahf_part_file.c_str());
haloInputFile_ >> numHalos;
int numPartsRead_ = 0;
printf("Number halos: %d.\nStart reading halo particles...\n", numHalos);
for(int i = 0; (i < numHalos) && (i <= IGNORE_LAST_N); i ++){
int numHaloParts;
haloInputFile_ >> numHaloParts;
printf("Halo: %d, Particles: %d.\n", i, numHaloParts);
for(int j = 0; j < numHaloParts; j++){
int partindex;
int ch;
haloInputFile_ >> partindex;
haloInputFile_ >> ch;
if(i >= IGNORE_FIRST_N){
haloParticles_[numPartsRead_] = partindex;
numPartsRead_ ++;
}
if(numPartsRead_ >= GPU_MEM){
doSearch(numPartsRead_, dev_searchParts_, dev_searchResult_, dev_val);
numPartsRead_ = 0;
}
}
}
if(numPartsRead_ > 0){
doSearch(numPartsRead_, dev_searchParts_, dev_searchResult_, dev_val);
numPartsRead_ = 0;
}
printf("\n");
haloInputFile_.close();
delete haloParticles_;
delete searchParts_;
delete searchIndex_;
delete searchResult_;
}
int main(int argc, const char **argv){
int m=1;
while (m<argc)
{
string arg = argv[m];
if (arg == "-index") { index_file = argv[m+1]; m+=1;}
if (arg == "-ahf") { ahf_part_file = argv[m+1]; m+=1;}
if (arg == "-output") { output_file = argv[m+1]; m+=1;}
//else if (arg == "-verbose") { verbose = true;}
else{
cout << "Usage:" << endl;
exit(0);
}
m++;
}
ifstream dataInputFile_;
dataInputFile_.open(index_file.c_str(), ios::binary);
if(!dataInputFile_.good()){
printf("Datafile error: %s !\n", index_file.c_str());
exit(1);
}
dataInputFile_.read((char*)&numParts_, sizeof(int));
cout << "Particles: " << numParts_ << endl;
particles_ = new int[numParts_];
//printf("ok\n");
flags_ = new char[numParts_];
//printf("ok1\n");
dataInputFile_.read((char *) particles_, sizeof(int) * numParts_);
dataInputFile_.close();
getFlag();
//output
printf("Output the result...\n");
ofstream dataOutputStream_(output_file.c_str(), ios::binary);
dataOutputStream_.write((char *) &numParts_, sizeof(int));
dataOutputStream_.write((char *) flags_, sizeof(char) * numParts_);
dataOutputStream_.close();
printf("Finished...\n");
delete particles_;
delete flags_;
}
|
23,251 | #include <cstdio>
#define N 100000
#define blocksPerGrid 256
#define threadsPerBlock 128
__global__ void dot(float* a, float* b, float* partial_c)
{
__shared__ float cache[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x*blockDim.x;
float temp = 0;
while (tid < N)
{
temp += (a[tid] + b[tid]);
tid += blockDim.x*gridDim.x;
}
cache[threadIdx.x] = temp;
__syncthreads();
// Reduction
int stride = blockDim.x/2;
while (stride != 0)
{
if (threadIdx.x < stride)
{
cache[threadIdx.x] += cache[threadIdx.x+stride];
}
__syncthreads();
stride /= 2;
}
if (threadIdx.x == 0)
{
partial_c[blockIdx.x] = cache[0];
}
}
int main()
{
float *a, *b, *partial_c;
a = (float*)malloc(N*sizeof(float));
b = (float*)malloc(N*sizeof(float));
partial_c = (float*)malloc(blocksPerGrid*sizeof(float));
for (int i = 0; i < N; i++)
{
a[i] = i;
b[i] = i*2;
}
float *dev_a, *dev_b, *dev_partial_c;
cudaMalloc((void**)&dev_a, N*sizeof(float));
cudaMalloc((void**)&dev_b, N*sizeof(float));
cudaMalloc((void**)&dev_partial_c, blocksPerGrid*sizeof(float));
cudaMemcpy(dev_a, a, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N*sizeof(float), cudaMemcpyHostToDevice);
dot<<<blocksPerGrid, threadsPerBlock>>>(dev_a, dev_b, dev_partial_c);
cudaMemcpy(partial_c, dev_partial_c, blocksPerGrid*sizeof(float), cudaMemcpyDeviceToHost);
float c = 0;
for (int i = 0; i < blocksPerGrid; i++)
{
c += partial_c[i];
}
printf("dotmul result: %.2f\n", c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_partial_c);
free(a);
free(b);
free(partial_c);
return 0;
}
|
23,252 | //imports
#include <iostream>
#include <math.h>
#include <cstdlib>
#include <ctime>
#include <curand.h>
#include <curand_kernel.h>
#include <stdio.h>
#include <math.h>
#include <cuda.h>
//constants for dimensions of matrices
#define A_HEIGHT 8192
#define A_WIDTH 8192
#define THREADSIZEX 32
#define THREADSIZEY 32
#define THREADSPERBLOCK 256
//init matrix: initialize A and B with value from 0.0 to 1.0
__global__ void initMatrixGPU(float *X, float *Y, int N, curandState *state){
int i, seed=1337;
int threadID=blockDim.x*blockIdx.x+threadIdx.x;
int gridStride=gridDim.x*blockDim.x;
curand_init(seed, threadID, 0, &state[threadIdx.x]);
float RANDOM = curand_uniform(&state[threadIdx.x]);
for(i=threadID;i<N;i+=gridStride){
X[i] = RANDOM;
Y[i] = RANDOM;
}
}
//matrix addition, non threaded
void matrixAddNonThreaded(float* A, float* B, float* D, int nX, int nY){
int row, col;
for (row=0; row<nY; row++){
for(col=0; col<nX; col++) {
D[row*nX+col]=A[row*nX+col]+B[row*nX+col];
}
}
}
//threaded across cuda enabled GPU for matrix addition
__global__ void matrixAddGPU(float* A, float* B, float* C, int nX, int nY)
{
int i, j;
int xLoc=blockDim.x*blockIdx.x+threadIdx.x;
int yLoc=blockDim.y*blockIdx.y+threadIdx.y;
int gridStrideX=blockDim.x*gridDim.x;
int gridStrideY=blockDim.y*gridDim.y;
for(i=xLoc;i<nX;i+=gridStrideX){
for(j=yLoc;j<nY;j+=gridStrideY){
C[i*nX+j]=A[i*nX+j]+B[i*nX+j];
}
}
}
int main(void)
{
//memory allocation
float* A;
float* B;
float* C;
float* D;
int nX;
int nY;
nX=A_WIDTH;
nY=A_HEIGHT;
int deviceID;
int N=nX*nY;
curandState* state;
// GPU specific variables
cudaDeviceProp gpuProps;
// Get GPU properties
cudaGetDevice(&deviceID);
cudaGetDeviceProperties(&gpuProps, deviceID);
int numSM=gpuProps.multiProcessorCount;
int maxThreadsPerBlock=gpuProps.maxThreadsPerBlock;
int maxThreadsPerMultiProcessor=gpuProps.maxThreadsPerMultiProcessor;
int maxGridSize=gpuProps.maxGridSize[0];
int maxThreadsDim=gpuProps.maxThreadsDim[0];
const dim3 blockSize(THREADSIZEX, THREADSIZEY, 1);
const dim3 gridSize(((A_WIDTH-1)/THREADSIZEX)+1,((A_HEIGHT-1)/THREADSIZEY)+1);
// Allocate memory on unified heap and host memory
cudaMallocManaged(&A, nX*nY*sizeof(float));
cudaMallocManaged(&B, nX*nY*sizeof(float));
cudaMemAdvise(&A, N*sizeof(float), cudaMemAdviseSetReadMostly, deviceID);
cudaMemAdvise(&B, N*sizeof(float), cudaMemAdviseSetReadMostly, deviceID);
cudaMallocManaged(&C, nX*nY*sizeof(float));
cudaMalloc(&state, THREADSIZEX*sizeof(curandState));
D = (float*)malloc(N*sizeof(float));
//current memory status, assuming >Pascal
//A,B,C allocated on the device
//nX, nY, deviceID allocated on the host
//D allocated on the host, as we don't need it on the device.
//Prefetch A, B, and C onto device
cudaMemPrefetchAsync(&A, N*sizeof(float), deviceID);
cudaMemPrefetchAsync(&B, N*sizeof(float), deviceID);
cudaMemPrefetchAsync(&C, N*sizeof(float), deviceID);
// Launch init kernel
initMatrixGPU<<<2*numSM, THREADSPERBLOCK>>>(A,B,nX*nY,state);
cudaDeviceSynchronize();
// Print GPU info
std::cout<<"SM's "<<numSM<<", maxThreadsPerBlock "<<maxThreadsPerBlock<<", maxThreadsPerMultiProcessor "<<maxThreadsPerMultiProcessor<<" maxGridSize "<<maxGridSize<<" maxThreadsDim "<<maxThreadsDim<<'\n';
// Launch add kernel
matrixAddGPU<<<gridSize, blockSize>>>(A,B,C,nX,nY);
cudaDeviceSynchronize();
// Prefetch A,B to host
cudaMemPrefetchAsync(&A, N*sizeof(float), cudaCpuDeviceId);
cudaMemPrefetchAsync(&B, N*sizeof(float), cudaCpuDeviceId);
cudaMemPrefetchAsync(&C, N*sizeof(float), cudaCpuDeviceId);
// Sequential matrix addition
matrixAddNonThreaded(A,B,D,nX,nY);
//sanity check
int row, col;
float dif=0;
for (row=0; row<nY; row++){
for(col=0; col<nX; col++)
dif+=abs(C[row*nX+col]-D[row*nX+col]);
}
if(dif < 0.1) printf("SUCCESS\n");
else printf("FAIL\n");
printf("%f\n",dif);
// Free memory
cudaFree(A);
cudaFree(B);
cudaFree(C);
free(D);
return 0;
}
|
23,253 | // compute.cu
//
// driver and kernel call
#include <stdio.h>
#define THREADS_PER_BLOCK 512
__global__ void compute_d (int *a_d, int *b_d, int *c_d, int n)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < n) {
a_d[x] = x % 10 + 1;
if (x < (n / 2))
{
b_d[x] = x + 1;
//j = x + 1;
} else if (x == (n / 2) && 0 < n % 2)
{
b_d[x] = x + 1;
} else
{
b_d[x] = (n - x);
}
c_d[x] = a_d[x] * b_d[x];
}
}
extern "C" void computeArr (int *c, int arraySize)
{
int *a_d, *b_d, *c_d;
cudaMalloc ((void**) &a_d, sizeof(int) * arraySize);
cudaMalloc ((void**) &b_d, sizeof(int) * arraySize);
cudaMalloc ((void**) &c_d, sizeof(int) * arraySize);
compute_d <<< ceil((float) arraySize/THREADS_PER_BLOCK), THREADS_PER_BLOCK >>> (a_d, b_d, c_d, arraySize);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf ("CUDA error: %s\n", cudaGetErrorString(err));
cudaMemcpy (c, c_d, sizeof(int) * arraySize, cudaMemcpyDeviceToHost);
cudaFree (a_d);
cudaFree (b_d);
cudaFree (c_d);
}
|
23,254 | #include<stdio.h>
#include<cuda.h>
#include<cuda_runtime.h>
#define SIZE 10
__global__ void min(int *input){
int tid = threadIdx.x;
int step_size=1;
int numberofthreads = blockDim.x;
while(numberofthreads>0){
if(tid<numberofthreads){
int first = tid*step_size*2;
int second = first+step_size;
if(input[second]<input[first])
input[first]=input[second];
}
step_size*=2;
numberofthreads/=2;
}
}
int main()
{
// printf("Enter size of array:");
// scanf("%d",&SIZE);
int arr[SIZE];
int byte_size = SIZE*sizeof(int);
for(int i=0;i<SIZE;i++){
arr[i] = rand()% 100;
}
printf("The array is:\n");
for(int i=0;i<SIZE;i++){
printf("%d ",arr[i]=rand());
}
printf("\n");
int *arr_min,result;
cudaMalloc(&arr_min,byte_size);
cudaMemcpy(arr_min,arr,byte_size,cudaMemcpyHostToDevice);
min<<<1,SIZE/2>>>(arr_min);
cudaMemcpy(&result,arr_min,sizeof(int),cudaMemcpyDeviceToHost);
printf("Minimum: %d", result);
return 0;
} |
23,255 | #include <iostream>
#include <sstream>
#include <cmath>
#include <algorithm>
#include <vector>
#include <chrono>
#include <type_traits>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/device_ptr.h>
class GpuTimer
{
cudaEvent_t start;
cudaEvent_t stop;
public:
GpuTimer()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer()
{
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start()
{
cudaEventRecord(start, 0);
}
void Stop()
{
cudaEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
template <class Resolution = std::chrono::microseconds>
class CPUTimer {
public:
using Clock = std::conditional_t<std::chrono::high_resolution_clock::is_steady,
std::chrono::high_resolution_clock,
std::chrono::steady_clock>;
private:
const Clock::time_point mStart = Clock::now();
public:
CPUTimer() = default;
~CPUTimer() {
const auto end = Clock::now();
std::ostringstream strStream;
// strStream << "Destructor Elapsed: "
// << std::chrono::duration_cast<Resolution>(end - mStart).count()
// << std::endl;
// std::cout << strStream.str() << std::endl;
}
void stop(std::string const& label) {
const auto end = Clock::now();
std::ostringstream strStream;
strStream << "Stop Elapsed " << label << " : "
<< std::chrono::duration_cast<Resolution>(end - mStart).count()
<< std::endl;
std::cout << strStream.str() << std::endl;
}
};
constexpr unsigned NUM_BINS = 256;
void histogramCPU(unsigned int* input, unsigned int* res, unsigned int n)
{
for (unsigned i=0; i<n; ++i) {
res[input[i]]++;
}
}
__global__ void histogramGPUGlobalMem(unsigned *input, unsigned *res, unsigned n)
{
// Calculate global thread ID
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t i = tid; i < n; i += (gridDim.x * blockDim.x)) {
atomicAdd(&res[input[i]], 1);
}
}
__global__ void histogramGPUSharedMem(unsigned *input, unsigned *res, unsigned n)
{
// Calculate global thread ID
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// Allocate a local histogram for each TB
__shared__ int s_res[NUM_BINS];
// Initalize the shared memory to 0
if (threadIdx.x < NUM_BINS) {
s_res[threadIdx.x] = 0;
}
// Wait for shared memory writes to complete
__syncthreads();
for (size_t i = tid; i < n; i += (gridDim.x * blockDim.x)) {
atomicAdd(&s_res[input[i]], 1);
}
// Wait for shared memory writes to complete
__syncthreads();
// Combine the partial results
if (threadIdx.x < NUM_BINS) {
atomicAdd(&res[threadIdx.x], s_res[threadIdx.x]);
}
}
int main()
{
unsigned N = 1 << 16;
std::vector<unsigned> h_input(N);
std::vector<unsigned> h_result(NUM_BINS);
thrust::host_vector<unsigned> h_result_copy(NUM_BINS);
std::srand(42);
std::generate(std::begin(h_input), std::end(h_input), [] { return (unsigned)rand() % NUM_BINS; } );
CPUTimer<> cpu_timer;
histogramCPU(h_input.data(), h_result.data(), h_result.size());
cpu_timer.stop("histogramCPU");
thrust::device_vector<unsigned> d_input(h_input);
thrust::device_vector<unsigned> d_result(h_result_copy);
thrust::device_vector<unsigned> d_result2(h_result_copy);
// Number of threads per threadblock
size_t THREADS = 512;
// Calculate the number of threadblocks
size_t BLOCKS = N / THREADS;
GpuTimer timer;
timer.Start();
CPUTimer<> cpu_timer1;
histogramGPUGlobalMem<<<BLOCKS, THREADS>>>(thrust::raw_pointer_cast(d_input.data()),
thrust::raw_pointer_cast(d_result.data()),
d_result.size());
timer.Stop();
cpu_timer1.stop("histogramGPUGlobalMem");
std::cout << "elapsed for histogramGPUGlobalMem: " << timer.Elapsed() << " millisecs\n";
timer.Start();
CPUTimer<> cpu_timer2;
histogramGPUSharedMem<<<BLOCKS, THREADS>>>(thrust::raw_pointer_cast(d_input.data()),
thrust::raw_pointer_cast(d_result2.data()),
d_result2.size());
timer.Stop();
cpu_timer2.stop("histogramGPUGlobalMem");
std::cout << "elapsed for histogramGPUSharedMem: " << timer.Elapsed() << " millisecs\n";
auto compare_vals = [](std::string const& label,
auto const& arr1,
auto const& arr2) {
std::cout << "checking values: " << label << std::endl;
size_t mismatches = 0;
for (size_t i = 0; i < arr1.size(); ++i) {
if (arr1[i] != arr2[i]) {
++mismatches;
std::cout << "mismatch for index: " << i << " got:" << arr1[i] << " expected: " << arr2[i] << std::endl;
}
}
std::cout << mismatches << " mismatches for " << label << std::endl;
};
// copy back
h_result_copy = d_result;
compare_vals("h_result vs. d_result", h_result, h_result_copy);
// copy back
h_result_copy = d_result2;
compare_vals("h_result vs. d_result2", h_result, h_result_copy);
}
|
23,256 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <unistd.h>
#include <cuda.h>
#define RANDOM(x) (rand() % x)
#define MAX 100000
#define BLOCKSIZE 16
__global__ void multiply(const int *a, const int *b, int *c, int n) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
int k;
int sum = 0;
if (row < n && col < n) {
for (k = 0; k < n; k++) {
sum += a[row * n + k] * b[k * n + col];
}
c[row * n + col] = sum;
}
}
int main(int argc, char **argv) {
int n = 512;
int i, j, k;
timeval start, finish;
if (argc == 2) {
n = atoi(argv[1]);
}
int *host_a = (int *)malloc(sizeof(int) * n * n);
int *host_b = (int *)malloc(sizeof(int) * n * n);
int *host_c = (int *)malloc(sizeof(int) * n * n);
int *host_c2 = (int *)malloc(sizeof(int) * n * n);
srand(time(NULL));
for (i = 0; i < n * n; i++) {
host_a[i] = RANDOM(MAX);
host_b[i] = RANDOM(MAX);
}
cudaError_t error = cudaSuccess;
int *device_a, *device_b, *device_c;
error = cudaMalloc((void **)&device_a, sizeof(int) * n * n);
error = cudaMalloc((void **)&device_b, sizeof(int) * n * n);
error = cudaMalloc((void **)&device_c, sizeof(int) * n * n);
if (error != cudaSuccess) {
printf("Fail to cudaMalloc on GPU");
return 1;
}
//GPU parallel start
gettimeofday(&start, 0);
cudaMemcpy(device_a, host_a, sizeof(int) * n * n, cudaMemcpyHostToDevice);
cudaMemcpy(device_b, host_b, sizeof(int) * n * n, cudaMemcpyHostToDevice);
double num = ceil(pow((double)n,2) / pow((double)BLOCKSIZE, 2));
int gridsize = (int)ceil(sqrt(num));
dim3 dimBlock(BLOCKSIZE, BLOCKSIZE, 1);
dim3 dimGrid(gridsize, gridsize, 1);
multiply<<<dimGrid, dimBlock>>>(device_a, device_b, device_c, n);
cudaThreadSynchronize();
cudaMemcpy(host_c, device_c, sizeof(int) * n * n, cudaMemcpyDeviceToHost);
gettimeofday(&finish, 0);
double t1 = 1000000 * (finish.tv_sec - start.tv_sec) + finish.tv_usec - start.tv_usec;
printf("%lf ms\n", t1 / 1000);
//GPU parallel finish
//CPU serial start
gettimeofday(&start, 0);
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
host_c2[i * n + j] = 0;
for (k = 0; k < n; k++) {
host_c2[i * n + j] += host_a[i * n + k] * host_b[k * n + j];
}
}
}
gettimeofday(&finish, 0);
double t2 = 1000000 * (finish.tv_sec - start.tv_sec) + finish.tv_usec - start.tv_usec;
printf("%lf ms\n", t2 / 1000);
//CPU serial start
printf("加速比:%lf\n", t2 / t1);
//check
int errorNum = 0;
for (int i = 0; i < n * n; i++) {
if (host_c[i] != host_c2[i]) {
errorNum ++;
printf("Error occurs at index: %d: c = %d, c2 = %d\n", i, host_c[i], host_c2[i]);
}
}
if (errorNum == 0) {
printf("Successfully run on GPU and CPU!\n");
} else {
printf("%d error(s) occurs!\n", errorNum);
}
free(host_a);
free(host_b);
free(host_c);
free(host_c2);
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_c);
return 0;
} |
23,257 | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
__host__ __device__ double2 d2add(double2 a, double2 b) {
/*
* Arguments: two 2d vectors
* Returns: the vector addition of the two vectors
*/
double2 ret;
ret.x=a.x+b.x;
ret.y=a.y+b.y;
return ret;
}
__host__ __device__ double2 d2sub(double2 a, double2 b) {
/*
* Arguments: two 2d vectors
* Returns: the vector subtraction of the two vectors
*/
double2 ret;
ret.x=a.x-b.x;
ret.y=a.y-b.y;
return ret;
}
__host__ __device__ double2 d2multscal(double2 a, double b) {
/*
* Arguments: a 2d vector and a scalar
* Returns: the vector with each component multiplied by the scalar
*/
double2 ret;
ret.x=b*a.x;
ret.y=b*a.y;
return ret;
}
__host__ __device__ double2 d2multscal2(double2 a, double b) {
/*
* Arguments: a 2d vector and a scalar
* Returns: the vector with each component multiplied by the scalar squared
*/
double2 ret;
ret.x=b*b*a.x;
ret.y=b*b*a.y;
return ret;
}
__host__ __device__ double2 d2divscal(double2 a, double b) {
/*
* Arguments: a 2d vector and a scalar
* Returns: the vector with each component divided by the scalar
*/
double2 ret;
ret.x=a.x/b;
ret.y=a.y/b;
return ret;
}
__host__ __device__ double d2dotp(double2 a, double2 b) {
/*
* Arguments: two 2d vectors
* Returns: the dot product between the two vectors
*/
return (a.x*b.x)+(a.y*b.y);
}
__host__ __device__ double d2mag(double2 a) {
/*
* Arguments: one 2d vector
* Returns: the magnitude of the argument vector
*/
return sqrt((a.x)*(a.x)+(a.y)*(a.y));
}
__host__ __device__ double d2dist(double2 a, double2 b) {
/*
* Arguments: two 2d vectors
* Returns: the magnitude of the first vector minus the second vector
*/
return d2mag(d2sub(a,b));
}
__host__ __device__ double2 d2unit(double2 a) {
/*
* Arguments: one 2d vector
* Returns: a unit vector pointing in the same direction as the argument vector
*/
return d2divscal(a, d2mag(a));
}
__host__ __device__ double2 d2null() {
/*
* Arguments: none
* Returns: the null vector (2d)
*/
double2 a;
a.x=0.0;
a.y=0.0;
return a;
}
|
23,258 | #include <stdio.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define imin(a,b) (a<b?a:b)
const int N=33000*1024;
const int tpb = 256; // threads per block
const int bpg = imin(32, (N+tpb-1) / tpb);
// this kernel computes a local sum by calculating the product of certain
// element pairs and maintaining a local sum of the results, which are
// subsequently reduced.
__global__ void dot(float *a, float* b, float* c)
{
__shared__ float cache[tpb];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
float temp=0;
// each thread is responsible for values in the original arrays, strided
// by blockDim*gridDim. Neighboring threads in a warp will request
// adjacent array elements, leading to better memory access performance
// (coalesced reads)
while (tid < N)
{
// c[tid]=a[tid]*b[tid];
temp += a[tid]*b[tid];
tid += blockDim.x * gridDim.x;
}
// Now reduce the sum on each thread block
// the shared memory has room for one value per thread.
cache[threadIdx.x] = temp;
// wait for all threads to finish the above while loop and reduce ...
__syncthreads();
int cutoff = blockDim.x /2 ;
while (cutoff != 0)
{
if (threadIdx.x < cutoff)
{
cache[threadIdx.x] += cache[threadIdx.x + cutoff];
}
__syncthreads();
cutoff /= 2;
}
// cache[0] contains the reduced value for this thread block
if (threadIdx.x == 0)
{
c[blockIdx.x] = cache[0];
}
}
#define sum_squares(x) (x*(x+1)*(2*x+1)/6)
int main(int argc, char *argv[])
{
float *a, *b, *c;
float *d_a, *d_b, *d_c;
a = (float*)malloc(N*sizeof(float));
b = (float*)malloc(N*sizeof(float));
c = (float*)malloc(bpg*sizeof(float));
gpuErrchk(cudaMalloc(&d_a, N*sizeof(float)));
gpuErrchk(cudaMalloc(&d_b, N*sizeof(float)));
gpuErrchk(cudaMalloc(&d_c, bpg*sizeof(float)));
for (int i=0; i<N; i++)
{
a[i] = i;
b[i] = i*2;
}
gpuErrchk(cudaMemcpy(d_a, a, N*sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_b, b, N*sizeof(float), cudaMemcpyHostToDevice));
cudaEvent_t start, stop;
float elapsed;
// this allows GPU-level timing of the kernel (elapsed time between kernel
// launch and subsequent synchronized call
cudaEventCreate(&start);
cudaEventRecord(start, 0);
dot<<<bpg, tpb>>>(d_a, d_b, d_c);
gpuErrchk( cudaDeviceSynchronize() );
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
printf("cuda kernel: %.2f ms\n", elapsed);
// here's the "boring" host-based reduction of the threaded code above.
// Notice this could be done entirely on the GPU with a second kernel that
// just did a reduction.
gpuErrchk(cudaMemcpy(c, d_c, bpg*sizeof(float), cudaMemcpyDeviceToHost));
float temp=0;
for (int i=0; i<bpg; i++)
{
temp += c[i];
}
// this manual solution should equal the closed form solution for these vectors
printf ("solved: %.6g\nclosed: %.6g\n", temp, 2*sum_squares((float)(N-1)));
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(a);
free(b);
free(c);
return 0;
} |
23,259 | /*=========================================================================
*
* Copyright Insight Software Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
#include <stdio.h>
/** This is for parallel reduction and is modified from NVIDIA's example code */
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template<class T>
struct SharedMemory
{
__device__ inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
// specialize for double to avoid unaligned memory
// access compile errors
template<>
struct SharedMemory<double>
{
__device__ inline operator double *()
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
__device__ inline operator const double *() const
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
};
// The following defines are set during runtime compilation, see reduction.cpp
// #define T float
// #define blockSize 128
// #define nIsPow2 1
/*
This version is completely unrolled. It uses a template parameter to achieve
optimal code for any (power of 2) number of threads. This requires a switch
statement in the host code to handle all the different thread block sizes at
compile time.
Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
If blockSize > 32, allocate blockSize*sizeof(T) bytes.
*/
template <class T>
__device__ void reduce5(T *g_idata, T *g_odata, unsigned int n, unsigned int blockSize)
{
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockSize * 2) + threadIdx.x;
T mySum = (i < n) ? g_idata[i] : 0;
if (i + blockSize < n)
mySum += g_idata[i+blockSize];
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); }
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile T *smem = sdata;
if (blockSize >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; }
if (blockSize >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
if (blockSize >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
if (blockSize >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
if (blockSize >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
if (blockSize >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/*
This version adds multiple elements per thread sequentially. This reduces the overall
cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n).
(Brent's Theorem optimization)
Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
If blockSize > 32, allocate blockSize*sizeof(T) bytes.
*/
template <class T>
__device__ void reduce6(T *g_idata, T *g_odata, unsigned int n, unsigned int blockSize, bool nIsPow2)
{
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockSize * 2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
T mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
mySum += g_idata[i];
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (nIsPow2 || i + blockSize < n)
mySum += g_idata[i+blockSize];
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); }
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile T *smem = sdata;
if (blockSize >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; }
if (blockSize >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
if (blockSize >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
if (blockSize >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
if (blockSize >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
if (blockSize >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
#define MAKE_reduce5_TEMPLATE_IMPL(_exportname_, T) \
extern "C" __global__ void _exportname_(T* g_idata, T *g_odata, unsigned int n, unsigned int blockSize)\
{\
reduce5<T>(g_idata, g_odata, n, blockSize);\
}
#define MAKE_reduce6_TEMPLATE_IMPL(_exportname_, T) \
extern "C" __global__ void _exportname_(T* g_idata, T *g_odata, unsigned int n, unsigned int blockSize, bool nIsPow2)\
{\
reduce6<T>(g_idata, g_odata, n, blockSize, nIsPow2);\
}
#define MAKE_IMPL(_func_)\
MAKE_##_func_##_TEMPLATE_IMPL(_func_##_c, char);\
MAKE_##_func_##_TEMPLATE_IMPL(_func_##_uc, unsigned char);\
MAKE_##_func_##_TEMPLATE_IMPL(_func_##_s, short);\
MAKE_##_func_##_TEMPLATE_IMPL(_func_##_i, int);\
MAKE_##_func_##_TEMPLATE_IMPL(_func_##_ui, unsigned int);\
MAKE_##_func_##_TEMPLATE_IMPL(_func_##_f, float);\
MAKE_##_func_##_TEMPLATE_IMPL(_func_##_d, double);
// Export the template implementations
MAKE_IMPL(reduce5);
MAKE_IMPL(reduce6);
|
23,260 | //15co154 Yeshwanth R
//15co118 Goutham M
#include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
__global__ void func(float *da_in,float *db_in,float *d_out)
{
int idx = blockIdx.x*100 + threadIdx.x;
d_out[idx] = da_in[idx] + db_in[idx];
}
int main()
{
float t1,t2;
const int array_size = 16000;
const int array_bytes = array_size* sizeof(float);
float a_in[array_size],b_in[array_size];
for(int i=0;i<array_size;i++)
{
a_in[i] = float(i);
}
for(int i=0;i<array_size;i++)
{
b_in[i]=rand()%16000;
}
float h_out[array_size];
float *da_in;
float *db_in;
float *d_out;
cudaMalloc((void **)&da_in,array_bytes);
cudaMalloc((void **)&db_in,array_bytes);
cudaMalloc((void **)&d_out,array_bytes);
cudaMemcpy(da_in,a_in,array_bytes,cudaMemcpyHostToDevice);
cudaMemcpy(db_in,b_in,array_bytes,cudaMemcpyHostToDevice);
//kernel
func<<<dim3(160,1,1),dim3(100,1,1)>>>(da_in,db_in,d_out);
float time;
//copying back
cudaMemcpy(h_out,d_out,array_bytes,cudaMemcpyDeviceToHost);
for(int i=0;i<array_size;i++)
{
printf("%f",h_out[i]);
printf(((i%12)!=3)? "\t":"\n");
}
cudaFree(da_in);
cudaFree(d_out);
cudaFree(db_in);
printf("\n\n\n\n");
}
|
23,261 | // HEADERS
#include <iostream>
#include <iomanip>
#include <limits>
#include <stdlib.h>
#include <fstream>
#include <math.h>
#include <time.h>
using namespace std;
// DEFINITIONS
#define NX 201
#define NY 201
#define NT 401
#define NS 640
__constant__ float hx = 0.001f;
__constant__ float hy = 0.001f;
__constant__ float h = 0.001f;
__constant__ float T = 1.3333e-04f; // 0.2f / 1500.0f;
__constant__ float dt = 3.3333e-07f; // T / 400.0f;
__constant__ float fre = 125000.0f;
__constant__ float omegac = 7.8540e+05f; // 2.0f * pi * fre;
__constant__ float tao = 4.0000e-06f; // pi / omegac;
__constant__ float tt = 8.1573e-06f; // sqrtf(6.0f * logf(2.0f)) * tao;
// FUNCTIONS DECLARATION
void Ultrasonic_Tomography(int group_size, float target_epsilon, int max_iterations, int ti);
void IO_Files(float*, float*, float*, float*);
float norm(float*, int);
__global__ void field_setup(float*, float*, float*, float*, float*, float*, float*);
__global__ void propagation(int, int, int, int, float*, float*, float* , float*, float*, int);
__global__ void propagation_at_corners(float*);
__global__ void initial_signal(float*, float*, float*, float*, float*, int);
__global__ void difference_signal(float*, float*, float*, float*, float*, float*, float*, float*, float*, int);
__global__ void backpropagation1(float*, float*, int);
__global__ void backpropagation2(float*, float*, float*, float*, float*, int);
__global__ void laplace1(float*, float*, int);
__global__ void laplace2(float*, float*, int);
__global__ void init_differential(float*, float*, float*, float*);
__global__ void update_differential(float*, float*, float*, float*, int);
__global__ void update_field(float*, float*, float*, float*, float*);
__global__ void reset(float*, float*, float*, float*, float*);
// MAIN PROGRAM
int main(int argc, char **argv)
{
if (argc != 4) {
cerr << "Usage: " << argv[0] << " <sensor group size> <target epsilon> <max iterations>\n\n";
exit(1);
}
int group_size = stoi(argv[1]);
float target_epsilon = stof(argv[2]);
int max_iterations = stoi(argv[3]);
if (max_iterations == -1)
max_iterations = numeric_limits<int>::max();
// Time measuring variables
int ti = 0, tf = 0;
// Function Execution
// set floting-point precision on stdout and stderr
cout << fixed << setprecision(10);
cerr << fixed << setprecision(10);
cout << "Ultrasonic Tomography Running:\n\n";
ti = clock();
cout << "ti = " << ti << "\n";
Ultrasonic_Tomography(group_size, target_epsilon, max_iterations, ti);
tf = clock();
cout << "tf = " << tf << "\n"
<< "tt = " << tf - ti << "\n"
<< "Total Seconds = " << (float)(tf - ti) / CLOCKS_PER_SEC << "\n";
// End of the program
return 0;
}
// FUNCTIONS DEFINITION
void Ultrasonic_Tomography(int group_size, float target_epsilon, int max_iterations, int ti)
{
// Simulation Variables
float hx = 0.001f;
float hy = 0.001f;
int Nx_Ny = NX * NY;
int Nx_Ny_Nt = NX * NY * NT;
int Nx_Nt = NX * NT;
float* x = new float[NX];
float* y = new float[NY];
float* fo = new float[Nx_Ny];
// Kernel Preparation
/*dim3 Grid_Size(13, 26);
dim3 Block_Size(16, 8);*/
/*dim3 Grid_Size(7, 51);
dim3 Block_Size(32, 4);*/
/*dim3 Grid_Size(7, 26);
dim3 Block_Size(32, 8);*/
dim3 Grid_Size(13, 13);
dim3 Block_Size(16, 16);
// Variables of allocation
float* dev_x;
int size_x = NX * sizeof(float);
float* dev_y;
int size_y = NX * sizeof(float);
float* dev_fo;
int size_fo = Nx_Ny * sizeof(float);
float* dev_v;
int size_v = Nx_Ny * sizeof(float);
float* dev_r;
int size_r = Nx_Ny * sizeof(float);
float* dev_r2;
int size_r2 = Nx_Ny * sizeof(float);
float* dev_s;
int size_s = Nx_Ny * sizeof(float);
float* dev_u;
int size_u = Nx_Ny_Nt * sizeof(float);
int Ng = NS / group_size;
float* dev_g1;
int size_g1 = Nx_Nt * Ng * sizeof(float);
float* dev_g2;
int size_g2 = Nx_Nt * Ng * sizeof(float);
float* dev_g3;
int size_g3 = Nx_Nt * Ng * sizeof(float);
float* dev_g4;
int size_g4 = Nx_Nt * Ng * sizeof(float);
cudaMalloc((void**) &dev_x, size_x);
cudaMalloc((void**) &dev_y, size_y);
cudaMalloc((void**) &dev_fo, size_fo);
cudaMalloc((void**) &dev_v, size_v);
cudaMalloc((void**) &dev_r, size_r);
cudaMalloc((void**) &dev_r2, size_r2);
cudaMalloc((void**) &dev_s, size_s);
cudaMalloc((void**) &dev_u, size_u);
cudaMalloc((void**) &dev_g1, size_g1);
cudaMalloc((void**) &dev_g2, size_g2);
cudaMalloc((void**) &dev_g3, size_g3);
cudaMalloc((void**) &dev_g4, size_g4);
cudaMemset(dev_u, 0.0, size_u);
cudaMemset(dev_g1, 0.0, size_g1);
cudaMemset(dev_g2, 0.0, size_g2);
cudaMemset(dev_g3, 0.0, size_g3);
cudaMemset(dev_g4, 0.0, size_g4);
// Environment Initialization
for (int i = 0; i < NX; i++)
{
x[i] = -0.1f + i * hx;
}
for (int j = 0; j < NY; j++)
{
y[j] = -0.1f + j * hy;
}
cudaMemcpy(dev_x, x, size_x, cudaMemcpyHostToDevice);
cudaMemcpy(dev_y, y, size_y, cudaMemcpyHostToDevice);
field_setup<<<Grid_Size, Block_Size>>>(dev_x, dev_y, dev_fo, dev_v, dev_r, dev_r2, dev_s);
cudaMemcpy(fo, dev_fo, size_fo, cudaMemcpyDeviceToHost);
// Position of the transducers
int* jj = new int[NS];
int* ii = new int[NS];
for (int p = 0; p < 160; p++)
{
jj[p] = 181;
ii[p] = 21 + (p + 1);
}
for (int p = 160; p < 320; p++)
{
ii[p] = 181;
jj[p] = 181 - ((p + 1) - 160);
}
for (int p = 320; p < 480; p++)
{
jj[p] = 21;
ii[p] = 181 - ((p + 1) - 320);
}
for (int p = 480; p < NS; p++)
{
ii[p] = 21;
jj[p] = 21 + ((p + 1) - 480);
}
for (int p = 0; p < NS; p += group_size)
{
cudaMemset(dev_u, 0.0, size_u);
int jp1 = jj[p];
int jp2 = jj[p + group_size - 1];
int ip1 = ii[p];
int ip2 = ii[p + group_size - 1];
if (jp2 < jp1)
{
int jp = jp1;
jp1 = jp2;
jp2 = jp;
}
if (ip2 < ip1)
{
int ip = ip1;
ip1 = ip2;
ip2 = ip;
}
// Boundary
for (int k = 1; k < NT - 1; k++)
{
propagation<<<Grid_Size, Block_Size>>>(jp1, jp2, ip1, ip2, dev_v, dev_r, dev_r2 , dev_s, dev_u, k);
}
// Four corners
propagation_at_corners<<<1, NT>>>(dev_u);
initial_signal<<<dim3(NT - 2, 1) , dim3(159, 1)>>>(dev_u, dev_g1, dev_g2, dev_g3, dev_g4, p / group_size);
}
// Kaczmarz method
// propagation
float* dev_rr1;
int size_rr1 = Nx_Nt * sizeof(float);
float* dev_rr2;
int size_rr2 = Nx_Nt * sizeof(float);
float* dev_rr3;
int size_rr3 = Nx_Nt * sizeof(float);
float* dev_rr4;
int size_rr4 = Nx_Nt * sizeof(float);
float* dev_z;
int size_z = Nx_Ny * (NT + 1) * sizeof(float);
float* dev_Lu;
int size_Lu = Nx_Ny_Nt * sizeof(float);
float* dev_f;
int size_f = Nx_Ny * sizeof(float);
float* dev_df;
int size_df = Nx_Ny * sizeof(float);
float* dev_alpha;
int size_alpha = Nx_Ny * sizeof(float);
float* dev_f_minus_fo;
int size_f_minus_fo = Nx_Ny * sizeof(float);
// Allocation
cudaMalloc((void**) &dev_rr1, size_rr1);
cudaMalloc((void**) &dev_rr2, size_rr2);
cudaMalloc((void**) &dev_rr3, size_rr3);
cudaMalloc((void**) &dev_rr4, size_rr4);
cudaMalloc((void**) &dev_z, size_z);
cudaMalloc((void**) &dev_Lu, size_Lu);
cudaMalloc((void**) &dev_f, size_f);
cudaMalloc((void**) &dev_df, size_df);
cudaMalloc((void**) &dev_alpha, size_alpha);
cudaMalloc((void**) &dev_f_minus_fo, size_f_minus_fo);
cudaMemset(dev_rr1, 0.0, size_rr1);
cudaMemset(dev_rr2, 0.0, size_rr2);
cudaMemset(dev_rr3, 0.0, size_rr3);
cudaMemset(dev_rr4, 0.0, size_rr4);
cudaMemset(dev_f, 0.0, size_f);
cudaMemset(dev_Lu, 0.0, size_Lu);
float* f = new float[Nx_Ny];
float* f_minus_fo = new float[Nx_Ny];
// initialize epsilon values
float prev_epsilon = std::numeric_limits<float>::infinity();
float curr_epsilon = -std::numeric_limits<float>::infinity();
ofstream convergence_file("art_convergence0.txt");
ofstream time_file("art_time0.txt");
for (int iter = 0; iter < max_iterations; iter++)
{
cout << "\nIter: " << iter << "\n";
cudaMemset(dev_u, 0.f, size_u);
for (int p = 0; p < NS; p += group_size)
{
int jp1 = jj[p];
int jp2 = jj[p + group_size - 1];
int ip1 = ii[p];
int ip2 = ii[p + group_size - 1];
if (jp2 < jp1)
{
int jp = jp1;
jp1 = jp2;
jp2 = jp;
}
if (ip2 < ip1)
{
int ip = ip1;
ip1 = ip2;
ip2 = ip;
}
reset<<<Grid_Size, Block_Size>>>(dev_f, dev_v, dev_r, dev_r2, dev_s);
// Boundary
for (int k = 1; k < NT - 1; k++)
{
propagation<<<Grid_Size, Block_Size>>>(jp1, jp2, ip1, ip2, dev_v, dev_r, dev_r2 , dev_s, dev_u, k);
}
// Four corners
propagation_at_corners<<<1, NT>>>(dev_u);
difference_signal<<<dim3(NT - 2, 1), dim3(159, 1)>>>(dev_u, dev_g1, dev_g2, dev_g3, dev_g4, dev_rr1, dev_rr2, dev_rr3, dev_rr4, p / group_size);
cudaMemset(dev_z, 0.0, size_z);
for (int k = NT - 2; k > 0; k--)
{
backpropagation1<<<Grid_Size, Block_Size>>>(dev_z, dev_f, k);
backpropagation2<<<1, NX>>>(dev_z, dev_rr1, dev_rr2, dev_rr3, dev_rr4, k);
}
for (int k = 1; k < NT; k++)
{
laplace1<<<Grid_Size, Block_Size>>>(dev_u, dev_Lu, k);
laplace2<<<1, NX>>>(dev_u, dev_Lu, k);
}
init_differential<<<Grid_Size, Block_Size>>>(dev_df, dev_z, dev_Lu, dev_f);
for (int k = 2; k < NT; k++)
{
update_differential<<<Grid_Size, Block_Size>>>(dev_df, dev_z, dev_Lu, dev_f, k);
}
update_field<<<Grid_Size, Block_Size>>>(dev_alpha, dev_f, dev_df, dev_f_minus_fo, dev_fo);
}
cudaMemcpy(f_minus_fo, dev_f_minus_fo, size_f_minus_fo, cudaMemcpyDeviceToHost);
curr_epsilon = norm(f_minus_fo, Nx_Ny) / norm(fo, Nx_Ny) * 100.0f;
float current_t = (float)(clock()-ti) / CLOCKS_PER_SEC;
convergence_file << curr_epsilon << " ";
time_file << (current_t)<<" ";
cout << "epsilon = " << curr_epsilon << "\n";
// stop if reached target epsilon
if (curr_epsilon <= target_epsilon) {
break;
}
// stop if epsilon diverges
if (curr_epsilon > prev_epsilon ||
std::isnan(curr_epsilon)) {
break;
}
// update prev_epsilon
prev_epsilon = curr_epsilon;
}
cudaMemcpy(f, dev_f, size_f, cudaMemcpyDeviceToHost);
IO_Files(x, y, fo, f);
// Free Variables
cudaFree(dev_x);
cudaFree(dev_y);
cudaFree(dev_fo);
cudaFree(dev_v);
cudaFree(dev_r);
cudaFree(dev_r2);
cudaFree(dev_s);
cudaFree(dev_u);
cudaFree(dev_g1);
cudaFree(dev_g2);
cudaFree(dev_g3);
cudaFree(dev_g4);
cudaFree(dev_rr1);
cudaFree(dev_rr2);
cudaFree(dev_rr3);
cudaFree(dev_rr4);
cudaFree(dev_z);
cudaFree(dev_Lu);
cudaFree(dev_f);
cudaFree(dev_df);
cudaFree(dev_alpha);
cudaFree(dev_f_minus_fo);
delete [] x;
delete [] y;
delete [] fo;
delete [] ii;
delete [] jj;
delete [] f;
delete [] f_minus_fo;
cudaDeviceReset();
}
__global__ void field_setup(float* x, float* y, float* fo, float* v, float* r, float* r2, float* s)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < NX && j < NY)
{
int offset = i + NX * j;
float value = 0.0f;
/* if(((sqrtf(powf(x[i] - 0.015f, 2.0f) + powf(y[j] + 0.000f, 2.0f))) <= 0.005f) || ((sqrtf(powf(x[i] + 0.015f, 2.0f) + powf(y[j] + 0.000f, 2.0f))) <= 0.005f)) */
/* { */
/* value = 0.06f; */
/* } */
/* else */
/* { */
/* if(sqrtf(x[i] * x[i] + y[j] * y[j]) <= 0.03f) */
/* { */
/* value = 0.02f; */
/* } */
/* else */
/* { */
/* value = 0.0f; */
/* } */
/* } */
float rc = 0.015f;
float rp = 0.005f;
/* float lim = 0.020f; */
float sc = 0.03f;
float sp = 0.05f;
/* float sb = 0.02f; */
if (powf(x[i], 2) + powf(y[j], 2) <= powf(rc, 2))
{
value = sc;
}
if (powf(x[i] - rc * cos(-30 * (3.14159265f / 180)), 2) + powf(y[j] - rc * sin(30 * (3.14159265f / 180)), 2) <= powf(rp, 2))
{
value = sp;
}
if (powf(x[i] + rc * cos(-30 * (3.14159265f / 180)), 2) + powf(y[j] - rc * sin(30 * (3.14159265f / 180)), 2) <= powf(rp, 2))
{
value = sp;
}
if (powf(x[i], 2) + powf(y[j] + rc, 2) <= powf(rp, 2))
{
value = sp;
}
fo[offset] = value;
v[offset] = 1500.0f * sqrtf(1.0f + value);
r[offset] = v[offset] * dt / hx;
r2[offset] = powf(r[offset], 2.0f);
s[offset] = 2.0f - 4.0f * r2[offset];
/*int offset = i + NX * j;
float value = 0.0f;
if (((sqrtf(powf(x[i] - 0.05f, 2.0f) + powf(y[j] + 0.000f, 2.0f))) <= 0.005f) || ((sqrtf(powf(x[i] + 0.05f, 2.0f) + powf(y[j] + 0.000f, 2.0f))) <= 0.005f))
{
value = 0.06f;
}
else
{
if (sqrtf(x[i] * x[i] + y[j] * y[j]) <= 0.03f)
{
value = 0.02f;
}
else
{
if ((x[i] >= -0.05f) && (x[i] <= 0.05f) && (y[j] >= -0.06f) && (y[j] <= -0.045f))
{
value = 0.04f;
}
else
{
if ((x[i] >= -0.03f) && (x[i] <= 0.00f) && (y[j] <= 0.065f) && (y[j] >= (0.04f - 0.5f * x[i])))
{
value = 0.03f;
}
else
{
if ((x[i] >= 0.00f) && (x[i] <= 0.03f) && (y[j] <= 0.065f) && (y[j] >= (0.04f + 0.5f * x[i])))
{
value = 0.03f;
}
else
{
value = 0.0f;
}
}
}
}
}*/
fo[offset] = value;
v[offset] = 1500.0f * sqrtf(1.0f + value);
r[offset] = v[offset] * dt / hx;
r2[offset] = powf(r[offset], 2.0f);
s[offset] = 2.0f - 4.0f * r2[offset];
}
}
__global__ void propagation(int jp1, int jp2, int ip1, int ip2, float* v, float* r, float* r2 , float* s, float* u, int k)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int Nx_Ny = NX * NY;
int Nx_Ny_k = Nx_Ny * k;
if((i < NX) && (j < NY))
{
int offset = i + NX * j;
if (((j + 1) >= jp1) && ((j + 1) <= jp2) && ((i + 1) >= ip1) && ((i + 1) <= ip2) && ((k + 1) <= 24))
{
u[offset + Nx_Ny * (k + 1)] = powf(v[offset], 2.0f) * powf(dt, 2.0f) * cosf(omegac * (k * dt - tt)) * expf(-powf(k * dt - tt, 2.0f) / (2.0f * powf(tao, 2.0f))) + r2[offset] * (u[(i + 1) + NX * j + Nx_Ny_k] + u[(i - 1) + NX * j + Nx_Ny_k] + u[i + NX * (j - 1) + Nx_Ny_k] + u[i + NX * (j + 1) + Nx_Ny_k]) + s[offset] * u[offset + Nx_Ny_k] - u[offset + Nx_Ny * (k - 1)];
}
else
{
u[offset + Nx_Ny * (k + 1)] = r2[offset] * (u[(i + 1) + NX * j + Nx_Ny_k] + u[(i - 1) + NX * j + Nx_Ny_k] + u[i + NX * (j - 1) + Nx_Ny_k] + u[i + NX * (j + 1) + Nx_Ny_k]) + s[offset] * u[offset + Nx_Ny_k] - u[offset + Nx_Ny * (k - 1)];
}
if ((i == 0) && (j > 0) && (j < (NY - 1)))
{
u[offset + Nx_Ny * (k + 1)] = (2.0f - 2.0f * r[offset] - r2[offset]) * u[offset + Nx_Ny_k] + 2.0f * r[offset] * (1.0f + r[offset]) * u[(offset + 1) + Nx_Ny_k] - r2[offset] * u[(offset + 2) + Nx_Ny_k] + (2.0f * r[offset] - 1.0f) * u[offset + Nx_Ny * (k - 1)] - 2.0f * r[offset] * u[(offset + 1) + Nx_Ny * (k - 1)];
}
if ((i == NX - 1) && (j > 0) && (j < (NY - 1)))
{
u[offset + Nx_Ny * (k + 1)] = (2.0f - 2.0f * r[offset] - r2[offset]) * u[offset + Nx_Ny_k] + 2.0f * r[offset] * (1.0f + r[offset]) * u[(offset - 1) + Nx_Ny_k] - r2[offset] * u[(offset - 2) + Nx_Ny_k] + (2.0f * r[offset] - 1.0f) * u[offset + Nx_Ny * (k - 1)] - 2.0f * r[offset] * u[(offset - 1) + Nx_Ny * (k - 1)];
}
if ((j == 0) && (i > 0) && (i < (NX - 1)))
{
u[offset + Nx_Ny * (k + 1)] = (2.0f - 2.0f * r[offset] - r2[offset]) * u[offset + Nx_Ny_k] + 2.0f * r[offset] * (1.0f + r[offset]) * u[(i + (j + 1) * NX) + Nx_Ny_k] - r2[offset] * u[(i + (j + 2) * NX) + Nx_Ny_k] + (2.0f * r[offset] - 1.0f) * u[offset + Nx_Ny * (k - 1)] - 2.0f * r[offset] * u[(i + (j + 1) * NX) + Nx_Ny * (k - 1)];
}
if ((j == NY - 1) && (i > 0) && (i < (NX - 1)))
{
u[offset + Nx_Ny * (k + 1)] = (2.0f - 2.0f * r[offset] - r2[offset]) * u[offset + Nx_Ny_k] + 2.0f * r[offset] * (1.0f + r[offset]) * u[(i + (j - 1) * NX) + Nx_Ny_k] - r2[offset] * u[(i + (j - 2) * NX) + Nx_Ny_k] + (2.0f * r[offset] - 1.0f) * u[offset + Nx_Ny * (k - 1)] - 2.0f * r[offset] * u[(i + (j - 1) * NX) + Nx_Ny * (k - 1)];
}
}
}
__global__ void propagation_at_corners(float* u)
{
int k = threadIdx.x;
int Nx_Ny = NX * NY;
int Nx_Ny_k = Nx_Ny * k;
u[Nx_Ny_k] = 1.0f / 2.0f * (u[NX + k] + u[1 + k]);
u[(NX - 1) + Nx_Ny_k] = 1.0f / 2.0f * (u[(NX - 2) + Nx_Ny_k] + u[(NX - 1) + NX + Nx_Ny_k]);
u[(NY - 1) * NX + Nx_Ny_k] = 1.0f / 2.0f * (u[(NY - 2) * NX + Nx_Ny_k] + u[1 +(NY - 1) * NX + Nx_Ny_k]);
u[(NX - 1) + (NY - 1) * NX + Nx_Ny_k] = 1.0f / 2.0f * (u[(NX - 2) + (NY - 1) * NX + Nx_Ny_k] + u[(NX - 1) + (NY - 2) * NX + Nx_Ny_k]);
}
__global__ void initial_signal(float* u, float* g1, float* g2, float* g3, float* g4, int p)
{
int i = threadIdx.x + 21;
int k = blockIdx.x + 2;
int Nx_Ny_k = NX * NY * k;
int i_k_Nx_Nx_Nt_p = i + NX * k + NX * NT * p;
g1[i_k_Nx_Nx_Nt_p] = u[i + NX * 180 + Nx_Ny_k];
g3[i_k_Nx_Nx_Nt_p] = u[i + NX * 20 + Nx_Ny_k];
g2[i_k_Nx_Nx_Nt_p] = u[180 + NX * i + Nx_Ny_k];
g4[i_k_Nx_Nx_Nt_p] = u[20 + NX * i + Nx_Ny_k];
}
__global__ void difference_signal(float* u, float* g1, float* g2, float* g3, float* g4, float* rr1, float* rr2, float* rr3, float* rr4, int p)
{
int i = threadIdx.x + 21;
int k = blockIdx.x + 2;
int Nx_Ny_k = NX * NY * k;
int i_k_Nx_Nx_Nt_p = i + k * NX + NX * NT * p;
int i_Nx_k = i + NX * k;
rr1[i_Nx_k] = g1[i_k_Nx_Nx_Nt_p] - u[i + NX * 180 + Nx_Ny_k];
rr3[i_Nx_k] = g3[i_k_Nx_Nx_Nt_p] - u[i + NX * 20 + Nx_Ny_k];
rr2[i_Nx_k] = g2[i_k_Nx_Nx_Nt_p] - u[180 + NX * i + Nx_Ny_k];
rr4[i_Nx_k] = g4[i_k_Nx_Nx_Nt_p] - u[20 + NX * i + Nx_Ny_k];
}
__global__ void backpropagation1(float* z, float* f, int k)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int Nx_Ny = NX * NY;
int Nx_Ny_k_p_1 = Nx_Ny * (k + 1);
if((i >= 1) && (i < (NX - 1)) && (j >= 1) && (j < (NY - 1)))
{
int offset = i + NX * j;
int up = i + NX * (j - 1);
int down = i + NX * (j + 1);
int left = (i - 1) + NX * j;
int right = (i + 1) + NX * j;
z[offset + Nx_Ny * k] = powf(1500.0f, 2.0f) * (dt * dt) * ((1.0f + f[up]) * z[up + Nx_Ny_k_p_1] + (1.0f + f[down]) * z[down + Nx_Ny_k_p_1] + (1.0f + f[left]) * z[left + Nx_Ny_k_p_1] + (1.0f + f[right]) * z[right + Nx_Ny_k_p_1] - 4.0f * (1.0f + f[offset]) * z[offset + Nx_Ny_k_p_1]) / (h * h) + 2.0f * z[offset + Nx_Ny_k_p_1] - z[offset + Nx_Ny * (k + 2)];
}
}
__global__ void backpropagation2(float* z, float* rr1, float* rr2, float* rr3, float* rr4, int k)
{
int i = threadIdx.x;
int Nx_Ny_k = NX * NY * k;
int i_Nx_k = i + NX * k;
if((i >= 21) && (i < 180))
{
z[i + NX * 180 + Nx_Ny_k] = z[i + NX * 179 + Nx_Ny_k] + rr1[i_Nx_k] * h * 1000.0f;
z[i + NX * 20 + Nx_Ny_k] = z[i + NX * 21 + Nx_Ny_k] + rr3[i_Nx_k] * h * 1000.0f;
z[180 + NX * i + Nx_Ny_k] = z[179 + NX * i + Nx_Ny_k] + rr2[i_Nx_k] * h * 1000.0f;
z[20 + NX * i + Nx_Ny_k] = z[21 + NX * i + Nx_Ny_k] + rr4[i_Nx_k] * h * 1000.0f;
}
if((i >= 1) && (i < (NX - 1)))
{
z[i + Nx_Ny_k] = z[i + NX + Nx_Ny_k];
z[i + NX * (NY - 1) + Nx_Ny_k] = z[i + NX * (NY - 2) + Nx_Ny_k];
z[NX * i + Nx_Ny_k] = z[1 + NX * i + Nx_Ny_k];
z[(NX - 1) + NX * i + Nx_Ny_k] = z[(NX - 2) + NX * i + Nx_Ny_k];
}
if(i == 0)
{
z[Nx_Ny_k] = (z[1 + Nx_Ny_k] + z[NX + Nx_Ny_k]) / 2.0f;
z[(NX - 1) + Nx_Ny_k] = (z[(NX - 2) + Nx_Ny_k] + z[(NX - 1) + NX + Nx_Ny_k]) / 2.0f;
z[NX * (NY - 1) + Nx_Ny_k] = (z[1 + NX * (NY - 1) + Nx_Ny_k] + z[NX * (NY - 2) + Nx_Ny_k]) / 2.0f;
z[(NX - 1) + NX * (NY - 1) + Nx_Ny_k] = (z[(NX - 2) + NX * (NY - 1) + Nx_Ny_k] + z[(NX - 1) + NX * (NY - 2) + Nx_Ny_k]) / 2.0f;
}
}
__global__ void laplace1(float* u, float* Lu, int k)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int Nx_Ny = NX * NY;
int Nx_Ny_k = Nx_Ny * k;
if((i >= 1) && (i < (NX - 1)) && (j >= 1) && (j < (NY - 1)))
{
int offset = i + NX * j;
Lu[offset + Nx_Ny_k] = (u[i + NX * (j - 1) + Nx_Ny_k] + u[i + NX * (j + 1) + Nx_Ny_k] + u[(i - 1) + NX * j + Nx_Ny_k] + u[(i + 1) + NX * j + Nx_Ny_k] - 4.0f * u[offset + Nx_Ny_k]) / (h * h);
}
}
__global__ void laplace2(float* u, float* Lu, int k)
{
int i = threadIdx.x;
int Nx_Ny_k = NX * NY * k;
if((i >= 1) && (i < (NX - 1)))
{
Lu[i + Nx_Ny_k] = (u[i + Nx_Ny_k] + u[i + NX + Nx_Ny_k] + u[(i - 1) + Nx_Ny_k] + u[(i + 1) + Nx_Ny_k] - 4.0f * u[i + Nx_Ny_k]) / (h * h);
Lu[i + NX * (NY - 1) + Nx_Ny_k] = (u[i + NX * (NY - 1) + Nx_Ny_k] + u[i + NX * (NY - 2) + Nx_Ny_k] + u[(i - 1) + NX * (NY - 1) + Nx_Ny_k] + u[(i + 1) + NX * (NY - 1) + Nx_Ny_k] - 4.0f * u[i + NX * (NY - 1) + Nx_Ny_k]) / (h * h);
Lu[NX * i + Nx_Ny_k] = (u[NX * i + Nx_Ny_k] + u[1 + NX * i + Nx_Ny_k] + u[NX * (i - 1) + Nx_Ny_k] + u[NX * (i + 1) + Nx_Ny_k] - 4.0f * u[NX * i + Nx_Ny_k]) / (h * h);
Lu[(NX - 1) + NX * i + Nx_Ny_k] = (u[(NX - 1) + NX * i + Nx_Ny_k] + u[(NX - 2) + NX * i + Nx_Ny_k] + u[(NX - 1) + NX * (i - 1) + Nx_Ny_k] + u[(NX - 1) + NX * (i + 1) + Nx_Ny_k] - 4.0f * u[(NX - 1) + NX * i + Nx_Ny_k]) / (h * h);
}
if(i == 0)
{
Lu[Nx_Ny_k] = (Lu[1 + Nx_Ny_k] + Lu[NX + Nx_Ny_k]) / 2.0f;
Lu[(NX - 1) + Nx_Ny_k] = (Lu[(NX - 2) + Nx_Ny_k] + Lu[(NX - 1) + NX + Nx_Ny_k]) / 2.0f;
Lu[NX * (NY - 1) + Nx_Ny_k] = (Lu[1 + NX * (NY - 1) + Nx_Ny_k] + Lu[NX * (NY - 2) + Nx_Ny_k]) / 2.0f;
Lu[(NX - 1) + NX * (NY - 1) + Nx_Ny_k] = (Lu[(NX - 2) + NX * (NY - 1) + Nx_Ny_k] + Lu[(NX - 1) + NX * (NY - 2) + Nx_Ny_k]) / 2.0f;
}
}
__global__ void init_differential(float* df, float* z, float* Lu, float* f)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int Nx_Ny = NX * NY;
if((i < NX) && (j < NY))
{
int offset = i + NX * j;
df[offset] = z[offset + Nx_Ny] * Lu[offset + Nx_Ny] / (1.0f + f[offset]);
}
}
__global__ void update_differential(float* df, float* z, float* Lu, float* f, int k)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int Nx_Ny_k = NX * NY * k;
if((i < NX) && (j < NY))
{
int offset = i + NX * j;
df[offset] += z[offset + Nx_Ny_k] * Lu[offset + Nx_Ny_k] / (1.0f + f[offset]);
}
}
__global__ void update_field(float* alpha, float* f, float* df, float* f_minus_fo, float* fo)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i < NX) && (j < NY))
{
int offset = i + NX * j;
if((i >= 21) && (i < 180) && (j >= 21) && (j < 180))
{
alpha[offset] = 1.0f;
}
else
{
alpha[offset] = 0.0f;
}
f[offset] += 20000.0f * alpha[offset] * df[offset];
f_minus_fo[offset] = f[offset] - fo[offset];
}
}
__global__ void reset(float* f, float* v, float* r, float* r2, float* s)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i < NX) && (j < NY))
{
int offset = i + NX * j;
v[offset] = 1500.0f * sqrtf(1.0f + f[offset]);
r[offset] = v[offset] * dt / hx;
r2[offset] = powf(r[offset], 2.0f);
s[offset] = 2.0f - 4.0f * r2[offset];
}
}
void IO_Files(float *x, float *y, float *fo, float *f)
{
// I/O Files
ofstream x_file, y_file;
ofstream fo_file;
ofstream f_file;
x_file.open("dev_x.txt");
y_file.open("dev_y.txt");
fo_file.open("dev_f0.txt");
f_file.open("dev_f.txt");
for (int i = 0; i < NX; i++)
{
x_file << x[i];
x_file << "\n";
}
for (int j = 0; j < NX; j++)
{
y_file << y[j];
y_file << "\n";
}
for (int j = 0; j < NY; j++)
{
for (int i = 0; i < NX; i++)
{
fo_file << fo[i + NX * j];
fo_file << "\t";
}
fo_file << "\n";
}
for (int j = 0; j < NY; j++)
{
for (int i = 0; i < NX; i++)
{
f_file << f[i + NX * j];
f_file << " ";
}
f_file << "\n";
}
x_file.close();
y_file.close();
fo_file.close();
f_file.close();
}
float norm(float* A, int lenght)
{
float sum = 0;
for (int i = 0; i < lenght; i++)
{
sum += A[i] * A[i];
}
return sqrtf(sum);
}
|
23,262 | #include <cufft.h>
#include <iostream>
#include <complex>
// #define DATA_LEN 1024
// #define ITERATION 100000
int main(int argc, char **argv)
{
if (argc != 3)
{
std::cout << "Usage: " << argv[0] << " [DATA_LEN] [ITERATION]" << std::endl;
return 1;
}
int DATA_LEN = atoi(argv[1]);
int ITERATION = atoi(argv[2]);
// Siapkan memory untuk input data di Host
// cufftComplex *t_HostInputData = (cufftComplex*)malloc(sizeof(cufftComplex)*DATA_LEN*1);
cufftComplex *t_HostInputData;
cudaError_t status = cudaMallocHost((void**)&t_HostInputData, sizeof(cufftComplex)*DATA_LEN*1);
for(int i=0; i < DATA_LEN; i++)
{
t_HostInputData[i].x = 1.0;
t_HostInputData[i].y = 1.0;
}
// for(int i=0; i < DATA_LEN; i++)
// {
// std::cout << t_HostInputData[i].x << " + i" << t_HostInputData[i].y << std::endl;
// }
// std::cout << std::endl;
// Siapkan memory untuk data di GPU
cufftComplex *t_InputData;
cufftComplex *t_OutputData;
cudaMalloc((void**)&t_InputData, sizeof(cufftComplex)*DATA_LEN*1);
cudaMalloc((void**)&t_OutputData, sizeof(cufftComplex)*DATA_LEN*1);
if (cudaGetLastError() != cudaSuccess)
{
std::cout << "Cuda error: Failed to allocate" << std::endl;
return 1;
}
cudaMemset(t_InputData, 0, DATA_LEN);
cudaMemcpy(t_InputData, t_HostInputData, sizeof(cufftComplex)*DATA_LEN*1, cudaMemcpyHostToDevice);
cudaMemset(t_OutputData, 0, DATA_LEN);
// FFT plan
cufftHandle t_Plan;
if (cufftPlan1d(&t_Plan, DATA_LEN, CUFFT_C2C, 1) != CUFFT_SUCCESS)
{
std::cout << "CUFFT error: Plan creation failed" << std::endl;
return 1;
}
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
float elapsedTime;
cudaEventRecord(start, 0);
// Execute FFT Forward operation
for(int i=0; i < ITERATION; i++)
{
if (cufftExecC2C(t_Plan, t_InputData, t_OutputData, CUFFT_FORWARD) != CUFFT_SUCCESS)
{
std::cout << "CUFFT error: ExecC2C Forward failed" << std::endl;
return 1;
}
}
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&elapsedTime, start, end);
printf("%d times for the FFT: %fms\n", ITERATION, elapsedTime);
// // Execute FFT Backward / IFFT operation
// if (cufftExecC2C(t_Plan, t_OutputData, t_InputData, CUFFT_INVERSE) != CUFFT_SUCCESS)
// {
// std::cout << "CUFFT error: ExecC2C Forward failed" << std::endl;
// return 1;
// }
// Synchro
if (cudaDeviceSynchronize() != cudaSuccess)
{
std::cout << "Cuda error: Failed to synchronize" << std::endl;
return 1;
}
// Siapkan host memory untuk menerima result FFT dari GPU
cufftComplex *t_HostData = (cufftComplex*)malloc(sizeof(cufftComplex)*DATA_LEN*1);
// Copy from GPU to host memroy
cudaMemcpy(t_HostData, t_OutputData, sizeof(cufftComplex)*DATA_LEN*1, cudaMemcpyDeviceToHost);
// Display data
// for(int i=0; i < DATA_LEN; i++)
// {
// std::cout << t_HostData[i].x << " + i" << t_HostData[i].y << std::endl;
// }
// Cleaning stuff
cufftDestroy(t_Plan);
cudaFree(t_InputData);
cudaFree(t_OutputData);
cudaFreeHost(t_HostInputData);
return 0;
}
|
23,263 | #include<stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdlib>
const long int INF = 99999999;
const int N = 4;
__global__ void ComputeMinPath(int *d_Matrix) {
int row = blockIdx.x;
int col = threadIdx.x;
for (int j = 0; j < N; j++) {
d_Matrix[row * N + col] = d_Matrix[row * N + j] + d_Matrix[j * N + col] < d_Matrix[row * N + col] ?
d_Matrix[row * N + j] + d_Matrix[j * N + col]
: d_Matrix[row * N + col];
}
}
void RoyFloyd(int *h_Matrix, int N) {
size_t size = N * N * sizeof(int);
int *d_Matrix;
cudaMalloc(&d_Matrix, size);
cudaMemcpy(d_Matrix, h_Matrix, size, cudaMemcpyHostToDevice);
for (int k = 0; k < N; k++) {
ComputeMinPath<<<N, N>>>(d_Matrix);
}
cudaMemcpy(h_Matrix, d_Matrix, size, cudaMemcpyDeviceToHost);
cudaFree(d_Matrix);
}
int main() {
int matrix[N*N] = {
0, 5, INF, 10,
INF, 0, 3, INF,
INF, INF, 0, 1 ,
INF, INF, INF, 0
};
RoyFloyd(matrix, N);
for (int i = 0; i < N * N; i++) {
if(matrix[i] == INF)
{
printf("INF ");
}
else {
printf("%d ", matrix[i]);
}
if ((i + 1) % N == 0) {
printf("\n");
}
}
getchar();
return 0;
} |
23,264 | /*
Demo for the following:
cudaError_t
cudaGetErrorString
*/
#include <stdio.h>
#include <cuda_runtime.h>
__global__ void helloFromGPU(void){
printf("Hello from GPU! %d\n", threadIdx.x);
}
int main(void){
printf("Hello from CPU!\n");
helloFromGPU <<< 1,10 >>>();
// error handling
cudaError_t res; // enumerated error-code type
res = cudaDeviceReset();
if(res != cudaSuccess){
printf("%s\n", cudaGetErrorString(res)); // get description of error
}else{
printf("Success!\n");
}
}
|
23,265 | #include <stdio.h>
#include <stdlib.h>
__global__ void devicePrint(){
// Print from GPU.
printf("Hello from device! Thread %d,%d\n", threadIdx.x, blockIdx.x);
}
int main(int argc, char** argv){
printf("Hello from host!\n");
devicePrint<<<1, 1>>>();
cudaDeviceSynchronize();
return 0;
}
|
23,266 | #include "includes.h"
__global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean)
{
const int threads = BLOCK;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for(j = 0; j < batch; ++j){
for(i = 0; i < spatial; i += threads){
int index = j*spatial*filters + filter*spatial + i + id;
local[id] += (i+id < spatial) ? x[index] : 0;
}
}
if(id == 0){
mean[filter] = 0;
for(i = 0; i < threads; ++i){
mean[filter] += local[i];
}
mean[filter] /= spatial * batch;
}
} |
23,267 |
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#define BLOCK_SIZE 16
/*
*********************************************************************
function name: inicializarMatrizRandom
descripcion: inicializa aleatoriamente los elementos de una matriz
parametros:
- M: puntero a la matriz a inicializar
- m: numero de filas de A
- n: numero de columnas de A
*********************************************************************
*/
void inicializarMatrizRandom (float *M, int m, int n){
int i;
for (i = 0; i < m*n; i++) {
M[i] = rand() % 10;
}
}
/*
*********************************************************************
funcion: gpu_matrix_mult
descripcion: producto de dos matrices sin caches en GPU (no necesariamente cuadradas)
parametros:
- a,b,c: punteros a las matrices con las que operar en GPU
- m: numero de filas de A
- n: numero de columnas de A
- k: numero de columnas de B
*********************************************************************
*/
__global__ void gpu_matrix_mult(float *a,float *b, float *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0.0;
if( col < k && row < m)
{
for(int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = sum;
}
}
/*
*********************************************************************
funcion: gpu_square_matrix_mult
descripcion: producto de dos matrices utilizando caches en GPU (matriz cuadrada)
parametros:
- d_a,d_b,d_result: punteros a las matrices device con las que operar en GPU
- n: numero de columnas de A (se presupone que la matriz sera cuadrada)
*********************************************************************
*/
__global__ void gpu_square_matrix_mult(float *d_a, float *d_b, float *d_result, int n)
{
__shared__ float tile_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
float tmp = 0.0;
int idx;
for (int sub = 0; sub < gridDim.x; ++sub)
{
idx = row * n + sub * BLOCK_SIZE + threadIdx.x;
if(idx >= n*n)
{
tile_a[threadIdx.y][threadIdx.x] = 0.0;
}
else
{
tile_a[threadIdx.y][threadIdx.x] = d_a[idx];
}
idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col;
if(idx >= n*n)
{
tile_b[threadIdx.y][threadIdx.x] = 0.0;
}
else
{
tile_b[threadIdx.y][threadIdx.x] = d_b[idx];
}
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
{
tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x];
}
__syncthreads();
}
if(row < n && col < n)
{
d_result[row * n + col] = tmp;
}
}
/*
*********************************************************************
funcion: cpu_matrix_mult
descripcion: producto de dos matrices (no necesariamente cuadradas) en CPU.
parametros:
- h_a,h_b,h_result: punteros a las matrices host con las que operar en CPU
- m: numero de filas de A
- n: numero de columnas de A
- k: numero de columnas de B
*********************************************************************
*/
void cpu_matrix_mult(float *h_a, float *h_b, float *h_result, int m, int n, int k) {
/*
*********************************************************************
Version IKJ (secuencial optimizada)
*********************************************************************
*/
float r=0.0;
int i;
int j;
int h;
for (i=0; i< m; i++)
for (j=0; j< k; j++)
h_result[i*k+j] = 0;
for (i=0; i<m; i++)
for (h=0; h<n; h++) {
r = h_a[i*n+h];
for (j=0; j<k; j++)
h_result[i*k+j]+= r * h_b[h*k+j];
}
/*
*********************************************************************
Version IJK (secuencial original)
*********************************************************************
*/
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < k; ++j)
{
float tmp = 0.0;
for (int h = 0; h < n; ++h)
{
tmp += h_a[i * n + h] * h_b[h * k + j];
}
h_result[i * k + j] = tmp;
}
}
}
int main(int argc, char const *argv[])
{
int m, n, k;
if(argc < 4 || argc > 4){
fprintf(stderr,"Uso: filas matriz A, columnas matriz A, columnas matriz B\n");
exit(-1);
}
// Recogemos los parámetros de ejecución relativos al tamaño de las matrices
m = atoi(argv[1]);
n = atoi(argv[2]);
k = atoi(argv[3]);
// Reservamos memoria para almacenar las matrices en host y su posterior multiplicación en CPU
float *h_a, *h_b, *h_c, *h_cc;
h_a=(float*)malloc(sizeof(float)*m*n);
h_b=(float*)malloc(sizeof(float)*n*k);
h_c=(float*)malloc(sizeof(float)*m*k);
h_cc=(float*)malloc(sizeof(float)*m*k);
// Inicializamos las matrices con valores decimales aleatorios
inicializarMatrizRandom (h_a, m, n);
inicializarMatrizRandom (h_b, n, k);
// Creamos cudaEvents para medir los tiempos de ejeución en CPU y GPU.
float gpu_elapsed_time_ms, cpu_elapsed_time_ms;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Reservamos memoria para almacenar las matrices en device y su posterior multiplicación en GPU
float *d_a, *d_b, *d_c;
cudaMalloc((void **) &d_a, sizeof(float)*m*n);
cudaMalloc((void **) &d_b, sizeof(float)*n*k);
cudaMalloc((void **) &d_c, sizeof(float)*m*k);
cudaMemcpy(d_a, h_a, sizeof(float)*m*n, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeof(float)*n*k, cudaMemcpyHostToDevice);
// Establecemos las dimensiones del Grid (Rejilla) y el Bloque
unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
// If y else para elegir entre ejecución sin caches o con caches (memoria sin compartir o compartida)
if(m == n && n == k){
gpu_square_matrix_mult<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, n);
}
else{
gpu_matrix_mult<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, m, n, k);
}
cudaMemcpy(h_c, d_c, sizeof(float)*m*k, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Medimos el tiempo empleado en ejecutar la multiplicación en GPU
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Tiempo empleado en la Mult de Matrices %dx%d . %dx%d en GPU: %fs.\n\n", m, n, n, k, gpu_elapsed_time_ms/1000);
cudaEventRecord(start, 0);
cpu_matrix_mult(h_a, h_b, h_cc, m, n, k);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Medimos el tiempo empleado en ejecutar la multiplicación en CPU
cudaEventElapsedTime(&cpu_elapsed_time_ms, start, stop);
printf("Tiempo empleado en la Mult de Matrices %dx%d . %dx%d en CPU: %fs.\n\n", m, n, n, k, cpu_elapsed_time_ms/1000);
// Comprobamos que los resultados de la multiplicación son correctos
int all_ok = 1;
for (int i = 0; i < m; ++i){
for (int j = 0; j < k; ++j){
if(h_cc[i*k + j] != h_c[i*k + j])
{
all_ok = 0;
}
}
}
// Calculamos el speedup empleado respecto a la CPU
if(all_ok){
printf("CORRECTO!, Speedup = %f\n", cpu_elapsed_time_ms / gpu_elapsed_time_ms);
}
else{
printf("INCORRECTO\n");
}
// Liberamos la memoria reservada anteriormente
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
cudaFreeHost(h_cc);
return 0;
}
|
23,268 | #include "includes.h"
__global__ void histogram_privatized_kernel(unsigned char *input, unsigned int *bins, unsigned int num_elements, unsigned int num_bins) {
const int bx = blockIdx.x;
const int bdx = blockDim.x;
const int tx = threadIdx.x;
const int gdx = gridDim.x;
unsigned int tid = bx * bdx + tx;
extern __shared__ unsigned int histo_s[]; // size is 3rd arg in <<< >>> of kernel
for (unsigned int bin_idx = tx; bin_idx < num_bins; bin_idx += bdx) {
histo_s[bin_idx] = 0u;
}
__syncthreads();
const int bin_size = (num_elements - 1) / num_bins + 1;
for (unsigned int i = tid; i < num_elements; i += bdx * gdx) {
int c = input[i] - 'a';
if (c >= 0 && c < 26)
atomicAdd(&(histo_s[c / bin_size]), 1);
}
__syncthreads();
for (unsigned int bin_idx = tx; bin_idx < num_bins; bin_idx += bdx) {
atomicAdd(&(bins[bin_idx]), histo_s[bin_idx]);
}
} |
23,269 | #include <cuda_runtime.h>
#include<iostream>
using namespace std;
#include <device_launch_parameters.h>
#define N (1024 * 1024)
__global__ void add(int *a, int *b, int *c)
{
//blockDim is num threads/block, multiplied by block number to index to one of them, then select thread inside block via thread Id
int threadID = threadIdx.x + blockIdx.x * blockDim.x;
//Max 65 535 blocks, with 512 threads each ~ 8 million elements, if vector exceeds that amount require a soln
//Run arbitrary number of blocks and threads
//Done at each parallel process, allows a single launch of threads to iteratively cycle through all available indices of vector
//As long as each thread begins at a unique index-val, all will iterate arr without affecting one another
while (threadID < N)
{
c[threadID] = a[threadID] + b[threadID];
//Add
threadID += blockDim.x * gridDim.x;
}
}
int main(void)
{
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void**)&dev_a, sizeof(int) * N);
cudaMalloc((void**)&dev_b, sizeof(int) * N);
cudaMalloc((void**)&dev_c, sizeof(int) * N);
for (int i = 0; i < N; i++)
{
a[i] = i;
b[i] = i * i;
}
cudaMemcpy(dev_a, a, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, sizeof(int) * N, cudaMemcpyHostToDevice);
//Could use add<<(N+127/128), 128>>>() to ensure blocks only created for largest multiples of 128, however this can create too many blocks if N >> 65 535 (limit)
//To ensure that doesn't occur, use arbitrary num blocks
add<<<128, 128 >>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, sizeof(int) * N, cudaMemcpyDeviceToHost);
bool success = true;
for (int i = 0; i < N; i++)
{
if ((a[i] + b[i]) != c[i])
{
cout << "Error " << c[i] << " != " << a[i] << " + " << b[i] << endl;
success = false;
}
}
if (success) cout << "Last element is: " << c[N-1] << endl;
} |
23,270 | #include <stdio.h>
__global__ void vec_add(int *a, int *b, int *c) {
int tid = threadIdx.x;
c[tid] = a[tid] + b[tid];
}
int main() {
int n = 8;
int *a_h, *b_h, *c_h;
a_h = (int *) malloc(sizeof(int)*n);
b_h = (int *) malloc(sizeof(int)*n);
c_h = (int *) malloc(sizeof(int)*n);
for (int i = 0; i < n; i++) {
a_h[i] = i;
b_h[i] = i;
}
int *a_d, *b_d, *c_d;
cudaMalloc((void **)&a_d, sizeof(int)*n);
cudaMalloc((void **)&b_d, sizeof(int)*n);
cudaMalloc((void **)&c_d, sizeof(int)*n);
cudaMemcpy(a_d, a_h, sizeof(int)*n, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b_h, sizeof(int)*n, cudaMemcpyHostToDevice);
vec_add<<<1, n>>>(a_d, b_d, c_d);
cudaDeviceSynchronize();
cudaMemcpy(c_h, c_d, sizeof(int)*n, cudaMemcpyDeviceToHost);
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
for (int i = 0; i < n; i++) {
printf("v%d: %d\n", i, c_h[i]);
}
return 0;
} |
23,271 | #include<stdlib.h>
#include<stdio.h>
#include<time.h>
using namespace std;
__global__ void mul(int *d_in1,int *d_in2,int *d_out){
int idx = threadIdx.x;
d_out[idx] = d_in1[idx]*d_in2[idx];
}
__global__ void reduce_section(int *d_in,int &d_out,const int start,const int end){
int idx = threadIdx.x;
extern __shared__ int s_out[];
s_out[idx] = d_in[start+idx];
__syncthreads();
int out;
for(int step=1;step<end-start;step*=2){
if(idx-step>=0){
out = s_out[idx]+s_out[idx-1];
}
__syncthreads();
if(idx-step>=0)
s_out[idx] = out;
__syncthreads();
}
if(idx == end-start-1)
d_out = s_out[idx];
}
int main(){
const int size = 6;
int value[size] = {1,2,3,4,5,6};
int cols[size] = {0,2,1,0,1,0};
int rows[5] = {0,2,3,5,size};//最后一个元素记录非零元素个数
int mul_val[3] = {1,2,3};
int mul_valn[size];//非零元素相乘的对应元素
printf("左矩阵:\n");
int flag = 0;
for(int i=0;i<4;i++){
for(int i=0;i<3;i++){
if(i == cols[flag])
printf("%d ",value[flag++]);
else
printf("0 ");
}
printf("\n");
}
printf("\n右矩阵:\n");
for(int i=0;i<3;i++){
printf("%d\n",mul_val[i]);
}
printf("\n");
for(int i=0;i<size;i++){
mul_valn[i] = mul_val[cols[i]];
}
int *h_in1 = value;
int *h_in2 = mul_valn;
int *h_out;
int *d_in1;
int *d_in2;
int *d_out_mid;
int *d_out;
h_out = (int *)malloc(4*sizeof(int));
cudaMalloc((int **)&d_in1,size*sizeof(int));
cudaMalloc((int **)&d_in2,size*sizeof(int));
cudaMalloc((int **)&d_out,4*sizeof(int));
cudaMalloc((int **)&d_out_mid,size*sizeof(int));
cudaMemcpy(d_in1,h_in1,size*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_in2,h_in2,size*sizeof(int),cudaMemcpyHostToDevice);
dim3 thread(size);
mul<<<1,thread>>>(d_in1,d_in2,d_out_mid);
for(int i=1;i<5;i++){
int sizenew = rows[i]-rows[i-1];
dim3 threadnew(sizenew);
reduce_section<<<1,threadnew,sizenew>>>(d_out_mid,d_out[i-1],rows[i-1],rows[i]);
}
cudaMemcpy(h_out,d_out,4*sizeof(int),cudaMemcpyDeviceToHost);
printf("结果:\n");
for(int i=0;i<4;i++){
printf("%d\n",h_out[i]);
}
printf("\n");
free(h_out);
cudaFree(d_in1);
cudaFree(d_in2);
cudaFree(d_out_mid);
cudaFree(d_out);
return 0;
}
|
23,272 | #include<stdio.h>
#include<stdlib.h>
#include<string.h>
//#include<cublas.h>
//#include<R.h>
#define NTHREADS 512
//computes u = constant * t(X) %*% v
__device__ void cXtv(float con, int rows, int cols, float * X, int ldX, float * v,
float * u){
int i,k;
float sum;
for(k = 0; k < cols; k++){
sum = 0;
for(i = 0; i < rows; i++){
sum += X[i + k*ldX] * v[i];
}
u[k] = con*sum;
}
}
//computes X = X + v %*% u'
__device__ void Xpvut(int rows, int cols, float * X, int ldX, float *v, float *u){
int i, j;
for(i=0; i<rows; i++)
for(j=0; j<cols; j++)
X[i + j*ldX] += v[i] * u[j];
}
//calculates T = V'V where V is upper triangular
__device__ void uVtV(int rows, int cols, float * V, float * T){
int i, j, k;
float sum;
for(i=0; i<cols; i++){
for(j=0; j<cols; j++){
sum = 0;
for(k=0; k<min(i,j); k++)
sum += V[k + i*rows] * V[k + j*rows];
T[i + j*cols] = sum;
}
}
}
//Rounds "length" up the the next multiple of block length
__device__ int alignBlock(int length, unsigned blockExp){
int blockSize = 1 << blockExp;
int out = (length + blockSize - 1) & ( ( (unsigned) - 1) << blockExp);
return out;
}
__device__ void colswap(float *X, int n, int p, int i, int j){
float tmp;
int k;
float * Xi = X + i*n;
float * Xj = X + i*j;
if(i <= p && j <= p){
for(k = 0; k < n; k++){
tmp = Xi[k];
Xi[k] = Xj[k];
Xj[k] = tmp;
}
}
}
__device__ int getMaxIdx(int p, float * ColNorms, int id){
int maxIdx = 0, i, bit;
float max = ColNorms[maxIdx], tmp;
for(i = 1; i < p; i++){
bit = ( id & (1 << i) ) >> 1;
if( bit == 1){
tmp = ColNorms[i];
if(tmp > max){
max = tmp;
maxIdx = i;
}
}
}
return maxIdx;
}
__device__ int idx(int id, int p, int i){
int j, colcount=0, out;
for(j=0; j<p; j++){
if(colcount == i)
out = j;
if( ( id & ( 1 << j ) ) >> j == 1)
colcount += 1;
}
return out;
}
__global__ void getColNorms(int rows, int cols, float * dX, float * ColNorms){
int colIndex = threadIdx.x + blockIdx.x * blockDim.x;
float sum = 0.f, term, * col;
if(colIndex >= cols)
return;
col = dX + colIndex * rows;
// debug printing
// printf("printing column %d\n", colIndex);
// for(int i = 0; i < rows; i++)
// printf("%f, ", col[i]);
// puts("");
// end debug printing
for(int i = 0; i < rows; i++) {
term = col[i];
term *= term;
sum += term;
}
// debug printing
// printf("norm %f\n", norm);
// end debug printing
ColNorms[colIndex] = sqrtf(sum);
}
__global__ void GetQRDecompBlocked(int n, int p, float * X, float * ColNorms,
unsigned int blockSize, int stride, float * scales,
float * Beta, float * qrAux, int * pivot,
float * WtR, int * rank, float * V, float * W,
float * T, float * u){
float tol = 0.0001; //float tolerance
int blockCount, blockEnd = 0, bc, i, kp; //kp is # cols processed
//unsigned int id = threadIDx.x + blockIDx.x * blockDim.x;
int varcount = 0, maxIdx, pm = 1;
//unsigned int * binid;
float * Xm; //model id's model matrix; will eventually contain R in QR decomp
//float * pXcol = X; //pointer to column of X
float * pXmcol; //pointer to column of Xm
float maxNorm;
float * pXmBlock; //pointer to block of Xm
size_t fbytes = sizeof(float);
int rk = 0; // Local value of rank;
int maxRank; //maximum possible rank of the model matrix
float minElt; // Lowest acceptable norm under given tolerance.
int maxCol; //this will be initialized to pm - 1, highest candidate column, not fixed
int maxRow = n - 1;
int rowskp = stride; // # unprocessed rows = stride - kp
int colskp; // # unprocessed cols = cols - kp
float *pVcol; //pointer to column of V
float *pVdiag; //pointer to diagonal element of V
float *pXmdiag; //pointer to diagonal element of dQRBlock
int colIdx;
int a,b;
int l;
//if(id > M)
// return;
//binid = (unsigned int *)calloc(p-1, sizeof(unsigned int));
/*
//copies the relevant columns of X into Xm
do{
int jump = 0;
unsigned int bit = 0;
memcpy(pXmcol, pXcol, n*fbytes); //copy the current column
//any time we copy any column but the first, set binid[col] = 1
if(varcount > 0){
binid[p - varcount] = 1; //actual index is (p - 1) - (varcount - 1)
pm += 1;
}
pXmcol += n; //update pointer on Xm to the next column
//bitwise operations on id to find proper columns
//look for the next used column of X
do{
jump += 1; //increment number of columns of X to move forward
varcount += 1; //increment number of total vars of X checked
//note: each column of X but the first represents a variable
//set bit as the k'th bit of id (1st bit is far right)
bit = ( id & ( 1 << varcount ) ) >> varcount;
//alternative code: declare tmpid above first.
////bit = tmpid & 1; //bit = 1 if last bit of tmpid is 1, 0 otherwise
////tmpid = tmpid >> 1; //bitshift tmpid right - lopping off last bit
}
while(bit == 0 && varcount < p);
pXcol += n*jump; //update pointer on X to the next used column
}
while(varcount < p );
//if varcount = k ( = p - 1 ), we still have to copy this last column, so the
//loop must iterate again
*/
pm = p;
//initialize variables that depend on pm and Xm
//Xm = (float *) calloc(stride*pm,fbytes); //allocate memory for model matrix
Xm = X;
pXmcol = Xm;
pXmBlock = Xm;
maxCol = pm - 1; //maximum possible column
colskp = pm;
maxIdx = getMaxIdx(p, ColNorms, (1 << pm) - 1);
//id);
maxNorm = ColNorms[maxIdx];
if (maxNorm < tol){
maxRank = 0; // Short-circuits the main pivot loop
}
else{
minElt = (1.0 + maxNorm) * tol;
maxRank = n > pm ? pm : n; //min(n,pm)
}
blockCount = (pm + blockSize - 1) / blockSize;
for(bc = 0; bc < blockCount; bc++){
// Determines "blockEnd", which counts the number of columns remaining
// in the upcoming block. Swaps trivial columns with the rightmost
// unvisited column until either a nontrivial column is found or all
// columns have been visited. Note that 'blockEnd <= blockSize', with
// inequality possible only in the rightmost processed block.
//
// This pivoting scheme does not attempt to order columns by norm, nor
// does it recompute norms altered by the rank-one update within the
// upcoming block. A higher-fidelity scheme is implemented in the non-
// blocked form of this function. Sapienti sat.
blockEnd = 0;
for(i = kp; i < kp + blockSize && i < maxRank && i <= maxCol; i++){
float colNorm = ColNorms[i];
//ColNorms[idx(id, p, i)];
while( (colNorm < minElt) && (maxCol > i) ){
//while colNorm is smaller than the lowest acceptable norm, given tolerance
//and while the maximum column possible is larger than the current column
//keep track of column swap in pivot vector
int tempIdx = pivot[maxCol];
//exchange columns of Xm: current column and maxCol
colswap(Xm, stride, pm, i, maxCol);
pivot[maxCol] = pivot[i];
pivot[i] = tempIdx;
maxCol--;
//set new colNorm as norm of max column, now in i'th spot
colNorm = ColNorms[i];
//ColNorms[idx(id, p, i)];
}
if (colNorm >= minElt)
blockEnd++;
}
rk += blockEnd;
//set V to 0 everywhere
for(a=0; a<blockSize; a++)
for(b=0; b<rowskp; b++)
*(V + b + a*rowskp) = 0;
pVcol = V; //pointer to column of V
pVdiag = V; //pointer to diagonal element of V
pXmdiag = pXmBlock; //pointer to diagonal element of XmBlock
for(colIdx = 0; colIdx < blockEnd; colIdx++){
float v1;
float v1Abs;
memcpy(pVdiag, pXmdiag, (rowskp - colIdx)*fbytes);
// Builds Householder vector from maximal column just copied.
// For now, uses slow memory transfers to modify leading element:
// V_1 += sign(V_1) * normV.
v1 = *pVdiag;
v1Abs = fabs(v1);
if(kp == maxRow)//botom row is not scaled
qrAux[kp] = v1Abs;
else{ // zero-valued "normV" should already have been ruled out.
int d;
float normV = 0;
float recipNormV;
float fac;
for(d=0; d < rowskp - colIdx; d++){
float diag = pVdiag[d];
normV += diag*diag;
}
normV = sqrtf(normV);
recipNormV = 1.0 / normV;
qrAux[kp] = 1.0 + v1Abs * recipNormV;
scales[colIdx] = (v1 >= 0.f ? 1.f : -1.f) * recipNormV;
// Scales leading nonzero element of vector.
fac = 1.0 + normV / v1Abs;
*pVdiag *= fac;
Beta[colIdx] = -2.0 / (normV * normV + v1Abs * (-1.0 + fac * fac));
// u = Beta[colIdx] * t(pXmBlock) %*% pVcol
// rows of pXmBlock = rowskp
// cols of pXmBlock = min(blockSize, colskp)
// leading dimension of pXmBlock = stride
// elements of pVcol = rowskp
// elements of u = min(blockSize, colskp)
cXtv(Beta[colIdx], rowskp, min(blockSize, colskp), pXmBlock, stride, pVcol, u);
//pXmBlock = pXmBlock + pVcol %*% u'
//rows of pXmBlock = rowskp
//cols of pXmBlock = min(blockSize, colsk)
//elements of pVcol = rowskp
//elements of u = min(blockSize, colsk)
//leading dim of pXmBlock = stride
Xpvut(rowskp, min(blockSize, colskp), pXmBlock, stride, pVcol, u);
}
pVcol += rowskp;
pVdiag += (rowskp + 1);
pXmdiag += (stride + 1);
kp++;
}
// If more unseen columns remain, updates the remainder of QR lying to
// the right of the block just updated. This must be done unless we
// happen to have exited the inner loop without having applied any
// Householder transformations (i.e., blockEnd == 0).
if (bc < blockCount - 1 && blockEnd > 0) {
// w_m = Beta (I + W V^t) v_m, where the unsubscripted matrices
// refer to those built at step 'i-1', having 'i' columns.
//
// w_i = Beta v_i
//
float *pTcol = T; //pointer to columns of dT
float *pWcol = W; //pointer to columns of dW
int m, a, b, c;
pVcol = V; //pointer to columns of dV
// T = V^t V
//// T = dT
//// V = dV
//rows of V' = blockSize
//cols of V' = rowsk
//leading dim of V = rowsk
//leading dim of T = rowsk
//rows/cols of T = blockSize
uVtV(rowskp, blockSize, V, T);
for (m = 0; m < blockSize; m++, pWcol += rowskp, pVcol += rowskp,
pTcol += blockSize) {
//for m=0,1,..., blockSize - 1
//pdWcol += rowsk
//pdVcol += rowsk
//pdTcol += blockSize
int a;
for(a = 0; a<rowskp; a++)
pWcol[a] = Beta[m]*pVcol[a];
// w_m = w_m + Beta W T(.,m)
//// w_m = pWcol
//// Beta = Beta[m]
//// T(.,m) = pTcol
//// W = dW
if (m > 0) {
int a, b;
for(a = 0; a < rowskp; a++){
float sum = 0;
for(b = 0; b < m; b++){
sum += W[b + a*m] * pTcol[b];
}
pWcol[a] += sum;
}
}
}
// Updates R, beginning at current diagonal by:
// R = (I_m + V W^t) R = R + V (W^t R)
//
// WtR = W^t R
//// WtR = dWtR
//// W = dW
//W is rowskp by blockSize
//pXmBlock + blockSize*stride is rowskp by colskp - blockSize
//WtR is blockSize by colsk - blockSize
for(a = 0; a < blockSize; a++){
for(b = 0; b < colskp - blockSize; b++){
float sum = 0;
for(c = 0; c < rowskp; c++)
sum += W[c + rowskp*a] * *(pXmBlock + blockSize*stride + c + b*rowskp);
WtR[a + b*blockSize] = sum;
}
}
// R = V WtR + R
//// V = dV
//// WtR = dWtR
//// R = pdQRBlock + blockSize*stride
// V is rowskp by blockSize
//WtR is blockSize by colskp - blockSize
//pXmBlock + blockSize*stride is rowskp by colskp - blockSize
for(a = 0; a < rowskp; a++){
for(b = 0; b < colskp - blockSize; b++){
float sum = 0;
for(c = 0; c < blockSize; c++){
sum += V[a + c*rowskp] * WtR[c + b*blockSize];
}
*(pXmBlock + blockSize*stride + a + b*rowskp) += sum;
}
}
}
// Flushes scaled Householder vectors to the subdiagonals of dQR,
// 'blockSize'-many at a time. The only time a smaller number are
// sent occurs when a partial block remains at the right end.
//
pVdiag = V; //pointer to diagonal of V
pXmdiag = pXmBlock; //pointer to diagonal of pXmBlock / XmBlock
for (l = 0; l < blockEnd; l++, pVdiag += (rowskp + 1),
pXmdiag += (stride + 1)) {
//for l = 0, 1, ..., blockEnd - 1
//// pVdiag += (rowskp + 1)
//// pXmdiag += (stride +1)
int a;
//pVdiag + 1 = scales[l]* (pVdiag + 1)
//pXmdiag + 1 = pVdiag + 1
for(a = 0; a < rowskp - l - 1; a++){
*(pVdiag + 1 + a) *= scales[l];
*(pXmdiag + 1 + a) = *(pVdiag + 1 + a);
}
}
pXmBlock += blockSize * (stride + 1);
colskp -= blockSize;
rowskp -= blockSize;
//end main loop
}
//set rank
*rank = rk;
// Xm now contains the upper-triangular portion of the factorization,
// R.
// V is lower-triangular, and contains the Householder vectors, from
// which the Q portion can be derived. An adjusted form of the
// diagonal is saved in qrAux, while the sub-diagonal portion is
// written onto QR.
}
int main(void){
int n = 10, p = 5;
//, k = p - 1;
int i, j;
size_t fbytes = sizeof(float);
float * X = (float *)malloc(n*p*fbytes);
float * dX, * dColNorms;
int nblocks, nthreads = NTHREADS;
const unsigned blockExp = 7;
unsigned int blockSize = 1 << blockExp;
int stride = (n + blockSize - 1) & (((unsigned) -1) << blockExp);
float * dscales;
float * dBeta;
float * dqrAux;
int * dpivot;
float * dWtR;
int * drank;
float * dV;
float * dW;
float * dT;
float * du;
for(i=0; i<n; i++)
for(j=0; j<p; j++)
X[i + j*n] = i + j;
printf("\n X \n");
for(i=0; i<n; i++){
for(j=0; j<p; j++){
printf("%.f ", X[i + j*n]);
}
printf("\n");
}
cudaMalloc( (void **)&dX, n*p*fbytes );
cudaMalloc( (void **)&dColNorms, p*fbytes);
cudaMalloc( (void **)&dscales, blockSize*fbytes);
cudaMalloc( (void **)&dBeta, blockSize*fbytes);
cudaMalloc( (void **)&dqrAux, p*fbytes);
cudaMalloc( (void **)&dpivot, p*sizeof(int));
cudaMalloc( (void **)&dWtR, blockSize*(p - blockSize)*fbytes);
cudaMalloc( (void **)&drank, sizeof(int));
cudaMalloc( (void **)&dV, blockSize*stride*fbytes);
cudaMalloc( (void **)&dW, blockSize*stride*fbytes);
cudaMalloc( (void **)&dT, blockSize*blockSize*fbytes);
cudaMalloc( (void **)&du, stride*fbytes);
cudaMemcpy( dX, X, n*p*fbytes, cudaMemcpyHostToDevice);
nblocks = p / nthreads;
if(nblocks * nthreads < p)
nblocks++;
getColNorms<<<nblocks, nthreads>>>(n, p, dX, dColNorms);
GetQRDecompBlocked<<<1,1>>>(n, p, dX, dColNorms, blockSize, stride, dscales, dBeta,
dqrAux, dpivot, dWtR, drank, dV, dW, dT, du);
cudaMemcpy( X, dX, n*p*fbytes, cudaMemcpyDeviceToHost);
printf("\n R \n");
for(i=0; i<n; i++){
for(j=0; j<p; j++){
printf("%.f ", X[i + j*n]);
}
printf("\n");
}
cudaFree(dX);
cudaFree(dColNorms);
free(X);
cudaFree(dscales);
cudaFree(dBeta);
cudaFree(dqrAux);
cudaFree(dpivot);
cudaFree(dWtR);
cudaFree(drank);
cudaFree(dV);
cudaFree(dW);
cudaFree(dT);
cudaFree(du);
return 0;
}
|
23,273 | #include "includes.h"
using namespace std;
// function generate random numbers and assign it to array
__global__ void add(int *a, int *b, int *c) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
} |
23,274 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#define N 2
__global__ void foo()
{
__shared__ int A[8];
A[0] = threadIdx.x;
}
int main(){
foo<<<1, N>>>();
//ESBMC_verify_kernel(foo,1, N);
cudaThreadSynchronize();
return 0;
}
|
23,275 | #include "includes.h"
__global__ void vel_step( float4 *__restrict__ deviceVel, float3 *__restrict__ accels, unsigned int numBodies, float dt)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index > numBodies) {return;};
deviceVel[index].x += accels[index].x * 0.5 * dt;
deviceVel[index].y += accels[index].y * 0.5 * dt;
deviceVel[index].z += accels[index].z * 0.5 * dt;
} |
23,276 | /*written by Cheng Chen
parallel computing second project part 1 12/4/2016*/
#include <iostream>
#include <stdlib.h>
#include <algorithm>
#include <ctime>
#include "kernels.cuh"
using namespace std;
int randNum() {
double ran = (double)rand();
return ran;
}
// double timeTransfer(struct timeval start, struct timeval end){
// double startInuSec = start.tv_sec * 1000 * 1000 + start.tv_usec;
// double endInuSec = end.tv_sec * 1000 * 1000 + end.tv_usec;
// return (endInuSec - startInuSec)/1000000.0;
// }
void concurrent_all(double* array, int n){
double *h_max;
double *h_std;
double *h_mean;
double *h_min;
h_max = (double*)malloc(sizeof(double));
h_std = (double*)malloc(sizeof(double));
h_mean = (double*)malloc(sizeof(double));
h_min = (double*)malloc(sizeof(double));
dim3 gridSize = 256;
dim3 blockSize = 256;
double *d_max;
double *d_min;
double *d_mean;
double *d_std;
double *d_array;
int *d_mutex,*d_mutex1, *d_mutex2;
//cudaMalloc((void**)&d_array, n*sizeof(double));
cudaMalloc((void**)&d_max, sizeof(double));
cudaMalloc((void**)&d_min, sizeof(double));
cudaMalloc((void**)&d_mean, sizeof(double));
cudaMalloc((void**)&d_std, sizeof(double));
cudaMalloc((void**)&d_array, n*sizeof(double));
cudaMemset(d_max, 0, sizeof(double));
cudaMemset(d_min, 0, sizeof(double));
cudaMemset(d_std, 0, sizeof(double));
cudaMemset(d_mean, 0, sizeof(double));
cudaMalloc((void**)&d_mutex, sizeof(int));
cudaMemset(d_mutex, 0, sizeof(int));
cudaMalloc((void**)&d_mutex1, sizeof(int));
cudaMemset(d_mutex1, 0, sizeof(int));
cudaMalloc((void**)&d_mutex2, sizeof(int));
cudaMemset(d_mutex2, 0, sizeof(int));
cudaMemcpy(d_array, array, n*sizeof(double), cudaMemcpyHostToDevice);
float gpu_elapsed_time;
cudaEvent_t gpu_start, gpu_stop;
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_stop);
cudaEventRecord(gpu_start, 0);
cout<<"--------GPU version concurrent------------"<<endl;
find_maximum_kernel<<<gridSize, blockSize>>>(d_array, d_max, d_mutex, n);
find_minimum_kernel<<<gridSize, blockSize>>>(d_array, d_min, d_mutex1, n);
mean_kernel<<<gridSize, blockSize>>>(d_array, d_mean, d_mutex2, n);
cudaMemcpy(h_mean, d_mean, sizeof(double), cudaMemcpyDeviceToHost);
*h_mean = *h_mean / n;
std_kernel<<<gridSize, blockSize>>>(d_array, d_std, d_mutex2, n, *h_mean);
cudaMemcpy(h_std, d_std, sizeof(double), cudaMemcpyDeviceToHost);
*h_std = sqrt(*h_std/n);
//concurrent_kernel<<<gridSize, blockSize>>>(d_array, d_max, d_min, d_mean, d_mutex, n);
//cudaMemcpy(h_mean, d_mean, sizeof(double), cudaMemcpyDeviceToHost);
cudaEventRecord(gpu_stop, 0);
cudaEventSynchronize(gpu_stop);
cudaEventElapsedTime(&gpu_elapsed_time, gpu_start, gpu_stop);
cudaEventDestroy(gpu_start);
cudaEventDestroy(gpu_stop);
// cudaStreamDestroy(stream1);
// cudaStreamDestroy(stream2);
// cudaStreamDestroy(stream3);
cudaMemcpy(h_max, d_max, sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(h_min, d_min, sizeof(double), cudaMemcpyDeviceToHost);
//cudaMemcpy(h_std, d_std, sizeof(double), cudaMemcpyDeviceToHost);
std::cout<<"max "<<h_max<<" min "<<h_min<<" mean "<<h_mean << " Stand_d: "<< h_std<<endl;
std::cout<<"array size "<< n <<" time "<<gpu_elapsed_time<<endl;
//cpu version
cout<<"-----------cpu Version concurrent---------------------"<<endl;
double max=array[0], min=array[0], mean = 0, standDeviation = 0;
for(int i = 0; i < n; ++i){
if(max < array[i]){
max = array[i];
}
if(min < array[i]){
min = array[i];
}
mean += array[i];
}
mean = mean / n;
clock_t cpu_start = clock();
for(int i = 0; i < n; i++){
standDeviation+=(array[i] - mean)*(array[i] - mean);
}
standDeviation =sqrt( standDeviation/n);
clock_t cpu_stop = clock();
double cpu_elapsed_time = 1000*(cpu_stop - cpu_start)/CLOCKS_PER_SEC;
std::cout<<"max "<<max<<" min "<<min<<" mean "<<mean << " Stand d "<<standDeviation<<endl;
std::cout<<"array size "<< n<<" time "<<cpu_elapsed_time<<endl;
cudaFree(d_array);
cudaFree(d_max);
cudaFree(d_mutex);
free(array);
}
int main(int argc, char *argv[]) {
//set the total number
int totalNum = 0;
if(argc < 2){
totalNum = 50 * 1000 * 1000;
}else {
totalNum =atoi(argv[1]);
}
double *d_array;
double *d_max;
int *d_mutex;
double *h_max;
//allocate memory space for the random numbers
double* nums = (double*)malloc(totalNum * sizeof(double));
h_max = (double*)malloc(sizeof(double));
cudaMalloc((void**)&d_array, totalNum * sizeof(double));
cudaMalloc((void**)&d_max, sizeof(double));
cudaMalloc((void**)&d_mutex, sizeof(int));
cudaMemset(d_max, 0, sizeof(float));
cudaMemset(d_mutex, 0, sizeof(int));
//fill array with data
srand(time(NULL));
for (int i = 0; i < totalNum; ++i) {
nums[i] = randNum();
}
//set up timing variables
float gpu_elapased_time;
cudaEvent_t gpu_start, gpu_stop;
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_stop);
//copy from host to device
cudaMemcpy(d_array, nums, totalNum * sizeof(double), cudaMemcpyHostToDevice);
dim3 gridSize = 256;
dim3 blockSize = 256;
//call kernel
printf("---------find max-------------------\n");
cudaEventRecord(gpu_start, 0);
find_maximum_kernel <<<gridSize, blockSize>>>(d_array, d_max, d_mutex, totalNum);
//copy from GPU to host
cudaMemcpy(h_max, d_max, sizeof(double), cudaMemcpyDeviceToHost);
cudaEventRecord(gpu_stop, 0);
cudaEventSynchronize(gpu_stop);
cudaEventElapsedTime(&gpu_elapased_time, gpu_start, gpu_stop);
cudaEventDestroy(gpu_start);
cudaEventDestroy(gpu_stop);
std::cout << "size of arrays " << totalNum <<endl;
std::cout << "Maximum number on gpu was: " << *h_max << std::endl;
cout<<"GPU time " << gpu_elapased_time << endl;
//cpu version
//printf("------------CPU version find the maximum---------------\n");
clock_t cpu_start = clock();
double maxNum = nums[0];
for (int i = 0; i < totalNum; i++) {
if (maxNum < nums[i]) {
maxNum = nums[i];
}
}
clock_t cpu_stop = clock();
clock_t cpu_elapsed_time = 1000*(cpu_stop - cpu_start)/CLOCKS_PER_SEC;
cout << "CPU time " << cpu_elapsed_time << endl;
cout << "the cpu max number is " << maxNum << endl;
printf("---------find min-------------------\n");
//allocate memory space for the random numbers
double* h_min;
h_min = (double*)malloc(sizeof(double));
double* d_min;
cudaMalloc((void**)&d_min, sizeof(double));
cudaMemset(d_min, 0, sizeof(float));
cudaMemset(d_mutex, 0, sizeof(int));
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_stop);
cudaEventRecord(gpu_start, 0);
find_minimum_kernel <<<gridSize, blockSize>>>(d_array, d_min, d_mutex, totalNum);
//copy from GPU to host
cudaMemcpy(h_min, d_min, sizeof(double), cudaMemcpyDeviceToHost);
cudaEventRecord(gpu_stop, 0);
cudaEventSynchronize(gpu_stop);
cudaEventElapsedTime(&gpu_elapased_time, gpu_start, gpu_stop);
std::cout << "size of arrays " << totalNum << endl;
std::cout << "the gpu min number was: " << *h_min << std::endl;
cout<<" gpu time " << gpu_elapased_time << endl;
//cpu version
//printf("------------CPU version find the minimun---------------\n");
//gettimeofday(&start, NULL);
double minimum = nums[0];
cpu_start = clock();
for (int i = 0; i < totalNum; i++) {
if (minimum < nums[i]) {
minimum = nums[i];
}
}
cpu_stop = clock();
cpu_elapsed_time = 1000*(cpu_stop - cpu_start)/CLOCKS_PER_SEC;
cout << "cpu time " << cpu_elapsed_time << endl;
cout << "the cpu min number is " << minimum << endl;
printf("--------- arithmetic mean-------------------\n");
//allocate memory space for the random numbers
double* h_mean;
h_mean = (double*)malloc(sizeof(double));
double* d_mean;
cudaMalloc((void**)&d_mean, sizeof(double));
cudaMemset(d_mean, 0, sizeof(float));
cudaMemset(d_mutex, 0, sizeof(int));
//set up timing variables
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_stop);
cudaEventRecord(gpu_start, 0);
mean_kernel <<<gridSize, blockSize>>> (d_array, d_mean, d_mutex, totalNum);
//copy from GPU to host
cudaMemcpy(h_mean, d_mean, sizeof(double), cudaMemcpyDeviceToHost);
cudaEventRecord(gpu_stop, 0);
cudaEventSynchronize(gpu_stop);
cudaEventElapsedTime(&gpu_elapased_time, gpu_start, gpu_stop);
cudaEventDestroy(gpu_start);
cudaEventDestroy(gpu_stop);
*h_mean = *h_mean/totalNum;
std::cout << "mean of the array calculate by gpu was: " << *h_mean<< std::endl;
std::cout << "size of arrays " << totalNum << endl;
cout<<"Gpu time " << gpu_elapased_time << endl;
//cpu version
//printf("------------CPU version find the arithmetic mean---------------\n");
//gettimeofday(&start, NULL);
double mean = nums[0];
cpu_start = clock();
for (int i = 1; i < totalNum; i++) {
mean += nums[i];
}
mean = mean/totalNum;
cpu_stop = clock();
cpu_elapsed_time = 1000*(cpu_stop - cpu_start)/CLOCKS_PER_SEC;
cout << "cpu time " << cpu_elapsed_time << endl;
cout << "the mean is " << mean << endl;
//GPU version std
printf("-----------------GPU version STD--------------------\n");
double *h_std = (double*)malloc(sizeof(double));
double *d_std;
cudaMalloc((void**)&d_std, sizeof(double));
cudaMemset(d_std, 0, sizeof(double));
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_stop);
cudaEventRecord(gpu_start, 0);
std_kernel<<<gridSize, blockSize>>>(d_array, d_std, d_mutex, totalNum, *h_mean);
cudaEventRecord(gpu_stop,0);
cudaEventSynchronize(gpu_stop);
cudaEventElapsedTime(&gpu_elapased_time, gpu_start, gpu_stop);
cudaMemcpy(h_std, d_std, sizeof(double), cudaMemcpyDeviceToHost);
cudaEventDestroy(gpu_start);
cudaEventDestroy(gpu_stop);
*h_std = sqrt(*h_std/totalNum);
std::cout<<"GPU std is " << *h_std<<endl;
std::cout<<"GPU time "<<gpu_elapased_time<<endl;
//run the cpu version std
//printf("------------CPU version find the STD---------------\n");
//gettimeofday(&start, NULL);
double stand_d= 0;
cpu_start = clock();
for (int i = 0; i < totalNum; i++) {
stand_d += (nums[i]-mean)*(nums[i]-mean);
}
stand_d = stand_d/totalNum;
stand_d = sqrt(stand_d);
cpu_stop = clock();
cpu_elapsed_time = 1000*(cpu_stop - cpu_start)/CLOCKS_PER_SEC;
cout << "CPU time " << cpu_elapsed_time << endl;
cout << "the stand deviation is " << stand_d << endl;
//free the numbers
free(h_max);
free(h_min);
free(h_std);
free(h_mean);
//free gpu
cudaFree(d_max);
cudaFree(d_min);
cudaFree(d_std);
cudaFree(d_mean);
cudaFree(d_array);
concurrent_all(nums, totalNum);
}
|
23,277 |
extern __device__ int file1_func(int);
int __device__ file3_func(int x)
{
if (x > 0)
return file1_func(-x);
return x;
}
|
23,278 | #include <stdio.h>
#include <cuda.h>
#include <sys/time.h>
#define K 32
#define N 32
__global__ void fun(int *a) {
int i;
unsigned nthreads = blockDim.x * gridDim.x;
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
unsigned start = N / nthreads * id;
for (i = 0; i < N/K; ++i)
a[start + i] = threadIdx.x * threadIdx.x;
}
double rtclock() {
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday(&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d", stat);
return(Tp.tv_sec + Tp.tv_usec * 1.0e-6);
}
void printtime(const char *str, double starttime, double endtime) {
printf("%s%3f seconds\n", str, endtime - starttime);
}
int main() {
int a[N], *da;
int i;
cudaMalloc(&da, N * sizeof(int));
double start = rtclock();
for (i = 0; i < 1000; ++i) {
fun<<<K, N>>>(da);
cudaDeviceSynchronize();
}
double end = rtclock();
printtime("Single block: ", start, end);
cudaMemcpy(a, da, N * sizeof(int), cudaMemcpyDeviceToHost);
//for (i = 0; i < N; ++i)
// printf("%d\n", a[i]);
return 0;
}
|
23,279 | #include <stdio.h>
//constant for architecture
int SEED = 12; //seed for rand //old was 15
int DIM_LIM = 300; //max size of a matrix
double INIT_VAL = 0.1; //initial value of matrix
int MAT_COUNT = 10000; //
/* file format
ndicate the size of array A)
n_1 n_2 n_3... n_k (k numbers in a single line indicate the dimensions of matrices)
n_1 by n_2 matrix 1 (n_1 rows, each row contains n_2 doubles)
n_2 by n_3 matrix 2 (n_2 rows, each row contains n_3 doubles)
...
n_{k-1} by n_k matrix k-1
*/
double randMToN(double M, double N)
{
return M + (rand() / ( RAND_MAX / (N-M) ) ) ;
}
void gen_matrix(int rows, int cols, FILE *fp){
for(int i = 0; i < rows; i++){//each row as outer loop
for(int j = 0; j < cols; j++){ //each element in row, across all columns
//fprintf(fp, "%f ", floor(randMToN(1,INIT_VAL))); //print space after each value
//fprintf(fp, "%f ", randMToN(0,INIT_VAL)); //print space after each value
fprintf(fp, "%f ", INIT_VAL); //print space after each value
//fprintf(fp, "%f ", INIT_VAL); //print space after each value
}
//print newline after each row.
fprintf(fp, "\n");
}
}
int main(int argc, char *argv[]){
if(argc == 3){
INIT_VAL = atof(argv[1]);
MAT_COUNT = atoi(argv[2]);
printf("main: %d matrices of initial value is %f\n", MAT_COUNT, INIT_VAL);
} else {
printf("incorrect input values, must be max initial value, and number of matrices");
return -1;
}
//initialize random number gen, get array sizes
srand(SEED); //init random gen
int dim[MAT_COUNT + 1]; //stores matrix sizes
for(int z = 0; z <= MAT_COUNT; z++){
dim[z] = rand()%DIM_LIM + 1;//random between 1 and limit
}
const char* filename = "input.txt";
FILE *fp = fopen(filename, "w");
fprintf(fp, "%d\n", MAT_COUNT + 1); //count of arrays
for(int i = 0; i < MAT_COUNT + 1; i++){
fprintf(fp, "%d ", dim[i]);
}
//print newline after each row.
fprintf(fp, "\n");
for(int i = 0; i < MAT_COUNT; i++){
gen_matrix(dim[i], dim[i+1], fp);
}
}
|
23,280 | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
/*
* Paint a 3D texture with a gradient in X (blue) and Z (green), and have every
* other Z slice have full red.
*/
__global__ void cuda_kernel_texture_3d(unsigned char *surface, int width, int height, int depth, size_t pitch, size_t pitchSlice, float t)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
// in the case where, due to quantization into grids, we have
// more threads than pixels, skip the threads which don't
// correspond to valid pixels
if (x >= width || y >= height) return;
// walk across the Z slices of this texture. it should be noted that
// this is far from optimal data access.
for (int z = 0; z < depth; ++z)
{
// get a pointer to this pixel
unsigned char *pixel = surface + z*pitchSlice + y*pitch + 4*x;
pixel[0] = (unsigned char)(255.f * (0.5f + 0.5f*cos(t + (x*x + y*y + z*z)*0.0001f *3.14f))); // red
pixel[1] = (unsigned char)(255.f * (0.5f + 0.5f*sin(t + (x*x + y*y + z*z)*0.0001f *3.14f))); // green
pixel[2] = (unsigned char) 0; // blue
pixel[3] = 255; // alpha
}
}
extern "C"
void cuda_texture_3d(void *surface, int width, int height, int depth, size_t pitch, size_t pitchSlice, float t)
{
cudaError_t error = cudaSuccess;
dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads
dim3 Dg = dim3((width+Db.x-1)/Db.x, (height+Db.y-1)/Db.y);
cuda_kernel_texture_3d<<<Dg,Db>>>((unsigned char *)surface, width, height, depth, pitch, pitchSlice, t);
error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("cuda_kernel_texture_3d() failed to launch error = %d\n", error);
}
}
|
23,281 | #include <stdio.h>
__global__ void emptyKernel()
{
printf("empty kernel call\n");
}
int main()
{
dim3 threadsPerBlock(1);
dim3 blocksPerGrid(1);
emptyKernel<<<blocksPerGrid, threadsPerBlock>>>();
cudaThreadSynchronize();
return 0;
}
|
23,282 | // ***********************************************************************
//
// Demo program for education in subject
// Computer Architectures and Paralel Systems
// Petr Olivka, dep. of Computer Science, FEI, VSB-TU Ostrava
// email:petr.olivka@vsb.cz
//
// Example of CUDA Technology Usage
// Multiplication of elements in float array
//
// ***********************************************************************
#include <cuda_runtime.h>
#include <stdio.h>
// Demo kernel for array elements multiplication.
// Every thread selects one element and multiply it.
__global__ void kernel_mult( float *pole, int L, float Mult )
{
int l = blockDim.x * blockIdx.x + threadIdx.x;
// if grid is greater then length of array...
if ( l >= L ) return;
pole[ l ] *= Mult;
}
void run_mult( float *P, int Length, float Mult )
{
cudaError_t cerr;
int threads = 128;
int blocks = ( Length + threads - 1 ) / threads;
// Memory allocation in GPU device
float *cudaP;
cerr = cudaMalloc( &cudaP, Length * sizeof( float ) );
if ( cerr != cudaSuccess )
printf( "CUDA Error [%d] - '%s'\n", __LINE__, cudaGetErrorString( cerr ) );
// Copy data from PC to GPU device
cerr = cudaMemcpy( cudaP, P, Length * sizeof( float ), cudaMemcpyHostToDevice );
if ( cerr != cudaSuccess )
printf( "CUDA Error [%d] - '%s'\n", __LINE__, cudaGetErrorString( cerr ) );
// Grid creation
kernel_mult<<< blocks, threads >>>( cudaP, Length, Mult );
if ( ( cerr = cudaGetLastError() ) != cudaSuccess )
printf( "CUDA Error [%d] - '%s'\n", __LINE__, cudaGetErrorString( cerr ) );
// Copy data from GPU device to PC
cerr = cudaMemcpy( P, cudaP, Length * sizeof( float ), cudaMemcpyDeviceToHost );
if ( cerr != cudaSuccess )
printf( "CUDA Error [%d] - '%s'\n", __LINE__, cudaGetErrorString( cerr ) );
// Free memory
cudaFree( cudaP );
}
|
23,283 | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 3
* of the programming guide with some additions like error checking.
*
*/
// Device code
extern "C" __global__ void argmax(const int *in_indexes, const double *in_values, int *out_indexes, double *out_values, int rows, int cols) {
extern __shared__ int s[];
int *maxindexes = s;
double *maxvalues = (double*)&maxindexes[blockDim.x*blockDim.y];
unsigned int tidx = threadIdx.x;
unsigned int tidy = threadIdx.y;
unsigned int i = tidx + tidy*blockDim.x;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
maxindexes[i] = (x < rows && y < cols) ? in_indexes[x + rows*y] : 0;
maxvalues[i] = (x < rows && y < cols) ? in_values[x + rows*y] : 0;
__syncthreads();
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tidx < s && x + s < rows && y < cols)
{
if (maxvalues[i + s] > maxvalues[i]) {
maxvalues[i] = maxvalues[i + s];
maxindexes[i] = maxindexes[i + s];
}
}
__syncthreads();
}
if (tidx == 0 && y < cols) {
out_indexes[gridDim.x*(tidy+ (blockIdx.y*blockDim.y)) + blockIdx.x] = maxindexes[blockDim.x * tidy];
out_values[gridDim.x*(tidy + (blockIdx.y*blockDim.y)) + blockIdx.x] = maxvalues[blockDim.x * tidy];
}
}
|
23,284 | #include <cuda_runtime_api.h>
#include <stddef.h>
__global__ void image2d_crop(
const float *in_pixels,
int in_width,
int in_height,
int channels,
int x_offset,
int y_offset,
float *out_pixels,
int crop_width,
int crop_height)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int u = idx % crop_width;
int v = (idx / crop_width) % crop_height;
int c = idx / (crop_width * crop_height);
int n = crop_width * crop_height * channels;
if (idx < n) {
int x = u + x_offset;
int y = v + y_offset;
if ((x >= 0) && (x < in_width) && (y >= 0) && (y < in_height) && (c < channels)) {
int in_idx = x + y * in_width + c * in_width * in_height;
out_pixels[idx] = in_pixels[in_idx];
} else {
out_pixels[idx] = 0.0f;
}
}
}
extern "C" void neuralops_cuda_image2d_crop(
const float *in_pixels,
size_t in_width,
size_t in_height,
size_t channels,
ptrdiff_t x_offset,
ptrdiff_t y_offset,
float *out_pixels,
size_t crop_width,
size_t crop_height,
cudaStream_t stream)
{
int n = crop_width * crop_height * channels;
image2d_crop<<<(n+1024-1)/1024, 1024, 0, stream>>>(
in_pixels,
in_width,
in_height,
channels,
x_offset,
y_offset,
out_pixels,
crop_width,
crop_height);
}
__global__ void image2d_flip(
const float *in_pixels,
int in_width,
int in_height,
int channels,
float *out_pixels)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int x = idx % in_width;
int y = (idx / in_width) % in_height;
int c = idx / (in_width * in_height);
int n = in_width * in_height * channels;
if (idx < n) {
int in_idx = (in_width - x - 1) + y * in_width + c * in_width * in_height;
out_pixels[idx] = in_pixels[in_idx];
}
}
extern "C" void neuralops_cuda_image2d_flip(
const float *in_pixels,
size_t in_width,
size_t in_height,
size_t channels,
float *out_pixels,
cudaStream_t stream)
{
int n = in_width * in_height * channels;
image2d_flip<<<(n+1024-1)/1024, 1024, 0, stream>>>(
in_pixels,
in_width,
in_height,
channels,
out_pixels);
}
|
23,285 | #include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <cuda.h>
#define a 3
#define b 5
#define c 4
void llenarMatriz(double *w, int li, int lj){
double count = 0;
for(int i=0; i<li; i++){
for(int j=0; j<lj; j++){
w[i*lj+j] = count;
count++;
}
}
}
void print(double *w, int li, int lj){
for(int i=0; i<li; i++){
for(int j=0; j<lj; j++){
printf("%.4lf ", w[i*lj+j]);
}
printf("\n");
}
}
__global__
void add(double *d_x, double *d_y, double *d_z){
}
int main(int argc, char const *argv[])
{
int size1 = a*b*sizeof(double);
int size2 = b*c*sizeof(double);
int size3 = a*c*sizeof(double);
double *x = (double*)malloc(size1);
double *y = (double*)malloc(size2);
double *z = (double*)malloc(size3);
llenarMatriz(x,a,b);
llenarMatriz(y,b,c);
clock_t begin, end;
double time_spent;
begin = clock();
double *d_x = (double*)malloc(size1);
double *d_y = (double*)malloc(size2);
double *d_z = (double*)malloc(size3);
cudaMalloc((void**)&d_x, size1);
cudaMalloc((void**)&d_y, size2);
cudaMalloc((void**)&d_z, size3);
cudaMemcpy(d_x, x, size1, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, size2, cudaMemcpyHostToDevice);
// product<<<,>>>
for(int i=0; i<a*b; i++){
printf("%.4lf\n", x[i]);
}
print(x,a,b);
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
//printf("%lf\n", time_spent);
return 0;
}
|
23,286 | /*
** Originally copied from
** https://github.com/CodedK/CUDA-by-Example-source-code-for-the-book-s-examples-/blob/master/chapter06/ray_noconst.cu
** With a few bugs fixed
*/
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <fstream>
#include <iostream>
using namespace std;
#define INF 2e10f
#define rnd(x) (x * rand() / RAND_MAX)
#define SPHERES 20
#define DIM 1024
#define uchar unsigned char
#define uint32_t unsigned int
#define uint16_t unsigned short
#define int32_t int
#pragma pack(push, 1)
struct BMPFileHeader {
uint16_t file_type; // File type always BM which is 0x4D42
uint32_t file_size; // Size of the file (in bytes)
uint16_t reserved1; // Reserved, always 0
uint16_t reserved2; // Reserved, always 0
uint32_t offset_data; // Start position of pixel data (bytes from the beginning of the file)
BMPFileHeader() {
file_type = 0x4D42;
file_size = 0;
reserved1 = 0;
reserved2 = 0;
offset_data = 54;
};
};
struct BMPInfoHeader {
uint32_t size; // Size of this header (in bytes)
int32_t width; // width of bitmap in pixels
int32_t height; // width of bitmap in pixels
// (if positive, bottom-up, with origin in lower left corner)
// (if negative, top-down, with origin in upper left corner)
uint16_t planes; // No. of planes for the target device, this is always 1
uint16_t bit_count; // No. of bits per pixel
uint32_t compression; // 0 or 3 - uncompressed. THIS PROGRAM CONSIDERS ONLY UNCOMPRESSED BMP images
uint32_t size_image; // 0 - for uncompressed images
int32_t x_pixels_per_meter;
int32_t y_pixels_per_meter;
uint32_t colors_used; // No. color indexes in the color table. Use 0 for the max number of colors allowed by bit_count
uint32_t colors_important; // No. of colors used for displaying the bitmap. If 0 all colors are required
BMPInfoHeader() {
size = 40;
width = DIM;
height = DIM;
planes = 1;
bit_count = 24;
compression = 0;
size_image = 0;
x_pixels_per_meter = 0;
y_pixels_per_meter = 0;
colors_used = 0;
colors_important = 0;
}
};
struct Header {
BMPFileHeader fileheader;
BMPInfoHeader infoheader;
Header() {
fileheader.file_size = 54 + 3 * DIM * DIM;
}
};
#pragma pack(pop)
struct Sphere {
float r, b, g;
float radius;
float x, y, z;
__device__ float hit(float ox, float oy, float *n) {
float dx = ox - x;
float dy = oy - y;
if (dx * dx + dy * dy < radius * radius) {
float dz = sqrtf(radius * radius - dx * dx - dy * dy);
*n = dz / sqrtf(radius * radius);
return dz + z;
}
return -INF;
}
};
__global__ void kernel(Sphere *, uchar *);
int main() {
cudaEvent_t start, stop; // Capture the start time
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
uchar *bitmap = new uchar[3 * DIM * DIM];
uchar *dev_bitmap;
Sphere *s;
cudaMalloc((void**)&dev_bitmap, sizeof(uchar) * 3 * DIM * DIM);
cudaMalloc((void**)&s, sizeof(Sphere) * SPHERES);
Sphere *temp_s = new Sphere[SPHERES];
for (int i = 0; i < SPHERES; i++) {
temp_s[i].r = rnd(1.f);
temp_s[i].g = rnd(1.f);
temp_s[i].b = rnd(1.f);
temp_s[i].x = rnd(1000.f);
temp_s[i].y = rnd(1000.f);
temp_s[i].z = rnd(1000.f);
temp_s[i].radius = rnd(100.f) + 20;
}
cudaMemcpy(s, temp_s, sizeof(Sphere) * SPHERES, cudaMemcpyHostToDevice);
delete[] temp_s;
dim3 grids(DIM / 16, DIM / 16);
dim3 threads(16, 16);
kernel<<<grids, threads>>>(s, dev_bitmap);
cudaMemcpy(bitmap, dev_bitmap, sizeof(uchar) * 3 * DIM * DIM, cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
cout << "Time to generate: " << elapsedTime << " ms\n";
cudaEventDestroy(start);
cudaEventDestroy(stop);
// draw
Header header;
ofstream out;
out.open("with_pure_CUDA/nonconst.bmp", ios::binary);
out.write((char*)&header, sizeof(Header));
out.write((char*)bitmap, 3 * DIM * DIM);
cudaFree(dev_bitmap);
cudaFree(s);
delete[] bitmap;
}
__global__ void kernel(Sphere *s, uchar *ptr) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
// float ox = x - DIM / 2;
// float oy = y - DIM / 2;
float ox = x, oy = y;
float r = 0, g = 0, b = 0;
float maxz = -INF;
for (int i = 0; i < SPHERES; i++) {
float n;
float t = s[i].hit(ox, oy, &n);
if (t > maxz) {
float fscale = n;
r = s[i].r * fscale;
g = s[i].g * fscale;
b = s[i].b * fscale;
maxz = t;
}
}
ptr[offset * 3 + 0] = (int)(b * 255);
ptr[offset * 3 + 1] = (int)(g * 255);
ptr[offset * 3 + 2] = (int)(r * 255);
} |
23,287 | #include <cuda_runtime.h>
static __device__ float E = 2.718281828;
__global__ void sliceTensorKernel(float *src, float *dst, int sdim, int ddim, int start, int block_size)
{
int di = blockIdx.x * block_size + threadIdx.x;
int si = (blockIdx.x / ddim * sdim + blockIdx.x % ddim + start) * block_size + threadIdx.x;
dst[di] = src[si];
}
__global__ void reduceArgMaxKernel(float *src, float *dst, float *arg, int dim_size, int block_size)
{
int di = blockIdx.x * block_size + threadIdx.x;
int si = di * dim_size;
float now = src[si], max = now;
int maxi = 0;
for (int i = 1; i < dim_size; i++) {
now = src[si+i];
if (now > max) {
max = now;
maxi = i;
}
}
dst[di] = max;
arg[di] = maxi;
}
__global__ void multiplyElementKernel(float *src1, float *src2, float *dst, int block_size)
{
int di = blockIdx.x * block_size + threadIdx.x;
dst[di] = src1[di] * src2[di];
}
__global__ void transformBboxSQDKernel(float *delta, float *anchor, float *res, int block_size)
{
int di = (blockIdx.x * block_size + threadIdx.x) * 4;
float d[4] = {delta[di], delta[di+1], delta[di+2], delta[di+3]};
float a[4] = {anchor[di], anchor[di+1], anchor[di+2], anchor[di+3]};
float cx = a[0] + d[0] * a[2];
float cy = a[1] + d[1] * a[3];
float w = a[2] * (d[2] < 1 ? expf(d[2]) : d[2] * E);
float h = a[3] * (d[3] < 1 ? expf(d[3]) : d[3] * E);
res[di] = cx - w * 0.5;
res[di+1] = cy - h * 0.5;
res[di+2] = cx + w * 0.5;
res[di+3] = cy + h * 0.5;
}
|
23,288 | #include <iostream>
#include <cuda.h>
#include <cmath>
#include <ctime>
// #include "common/book.h"
#define mycout cout<<"["<<__FILE__<<":"<<__LINE__<<"] "
/* 全局线程id get thread id: 1D block and 2D grid <<<(32,32),32>>>*/
#define get_tid() (blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x) + threadIdx.x) // 2D grid,1D block
// #define get_tid() (blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x) + threadIdx.x+threadIdx.y*blockDim.x) // 2D grid,2D block
/* get block id: 2D grid */
#define get_bid() (blockIdx.x + blockIdx.y * gridDim.x)
/* 每个block的线程id*/
// #define get_tid_per_block() (threadIdx.x+threadIdx.y*blockDim.x) // 2D block
#define get_tid_per_block() (threadIdx.x)
#define CHECK(res) if(res!=cudaSuccess){exit(-1);}
using namespace std;
typedef float FLOAT;
__global__ void vec_add(FLOAT *A,const int N)
{
int idx=get_tid();
if(idx>=N) return;
A[idx]+=2;
}
int main()
{
mycout<<"虚拟统一内存使用(CPU与GPU都能访问)\n"<<
"1.使用cudaMallocManaged分配内存\n"<<
"2.第3个参数cudaMemAttachGlobal or cudaMemAttachHost\n"<<
"3.(defaults to cudaMemAttachGlobal)"<<endl;
int N=10;
int nBytes=N*sizeof(FLOAT);
FLOAT *host_and_dev_data=NULL;
// GPU分配内存空间
CHECK(cudaMallocManaged((void**)&host_and_dev_data,nBytes));
// 赋值
for(int i=0;i<N;++i)
{
host_and_dev_data[i]=1;
}
// 启动核函数
vec_add<<<1,32>>>(host_and_dev_data,N);
cudaDeviceSynchronize(); //等待GPU执行完成, 有多种方式
// printf
for(int i=0;i<N;++i)
{
cout<<host_and_dev_data[i]<<" ";
}
cout<<endl;
CHECK(cudaFree(host_and_dev_data));
return 0;
}
|
23,289 | #include <iostream>
#include <math.h>
#include<stdio.h>
#include <algorithm>
#define BLOCK_SIZE 16
//int const Nx = 30, Nz = 20;
__global__
void laplacian_GPU (int ordem, int Nz, int Nx,int dz, int dx, float *P, float *Lapla)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int colStride = blockDim.x * gridDim.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int rowStride = blockDim.y * gridDim.y;
int in_n = ordem / 2;
int lim_nx = Nx - ordem / 2;
int lim_nz = Nz - ordem / 2;
float Pxx,Pzz;
// printf("in_n = %d",in_n);
printf("lim_nx = %d",lim_nx);
for (int i = col; i < lim_nz && i >= in_n; i += colStride){
for (int j = row; j < lim_nx && j >= in_n; j += rowStride){
Pzz = P[(i+1) * Nx + j] + P[(i-1) * Nx + j] - P[i * Nx + j] * 2.0;
Pxx = P[i * Nx + j+1] + P[i * Nx + j-1] - P[i * Nx + j] * 2.0;
Lapla[i * Nx + j] = Pxx/(dx*dx) + Pzz/(dz*dz);
printf("Pxx %.3f, Pzz %.3f,i %d, j %d\n",Pxx,Pzz,i,j);
}
}
}
void laplacian(int Nx,int Nz,float dx,float dz)
{
float **P = new float*[Nx];
P[0] = new float[Nz * Nx];
for (int i = 1; i < Nz; ++i)
P[i] = P[i-1] + Nx;
float **Lapla = new float*[Nx];
Lapla[0] = new float[Nz * Nx];
for (int i = 1; i < Nz; ++i)
Lapla[i] = Lapla[i-1] + Nx;
float *dP; cudaMalloc((void **) &dP, sizeof(float) * Nz * Nx);
float *dLapla; cudaMalloc((void **) &dLapla, sizeof(float) * Nz * Nx);
for (int i = 0; i < Nz; i++){
for (int j = 0; j < Nx; j++){
P[i][j] = i*j + i*i*i;
};
};
cudaMemcpy(dP, P[0],sizeof(float) * Nz * Nx, cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
int Gridx = (Nx + dimBlock.x) / dimBlock.x;
int Gridy = (Nz + dimBlock.y) / dimBlock.y;
dim3 dimGrid(Gridx,Gridy);
std::cout<<"dimBlock.x "<<dimBlock.x<<" dimBlock.y "<<dimBlock.y<<std::endl;
std::cout<<"dimGrid.x "<<dimGrid.x<<" dimGrid.y "<<dimGrid.y<<std::endl;
int ordem=2;
laplacian_GPU<<<dimBlock,dimGrid>>>(ordem,Nz,Nx,dz,dx,dP,dLapla);
cudaError_t err = cudaGetLastError();
if(err != cudaSuccess)
std::cout<<"Error: "<<cudaGetErrorString(err)<<std::endl;
cudaDeviceSynchronize();
cudaMemcpy(Lapla[0], dLapla,sizeof(float) * Nz * Nx, cudaMemcpyDeviceToHost);
for(int i=0; i<Nz; i++){
for(int j=0; j<Nx; j++){
std::cout << P[i][j] << " ";
}
std::cout << std::endl;
}
for(int i=0; i<Nz; i++){
for(int j=0; j<Nx; j++){
std::cout << Lapla[i][j] << " ";
}
std::cout << std::endl;
}
cudaFree(P);
cudaFree(Lapla);
}
int main()
{
int Nx = 10;
int Nz = 10;
float dz = 1;
float dx = 1;
laplacian(Nx,Nz,dx,dz);
return 0;
}
|
23,290 | #include <iostream>
#include <math.h>
#include <algorithm>
#include <stdio.h>
#include<float.h>
#define THREADS_PER_BLOCK 1024 //max of the threads in one block is 1024
// Kernel function to add the elements of two arrays
__global__
void iteration(double *d_A,double *d_B,int n)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
if(i<n*n){
if((0<i&&i<n)||i%n==0||(i+1)%n==0||(n*n-n<i&&i<n*n-1)){
d_B[i]=d_A[i];
}
else{
double local[4];
double temp;
double first_small;
double secnd_small;
local[0]=d_A[i+n-1];
local[1]=d_A[i+n+1];
local[2]=d_A[i-n-1];
local[3]=d_A[i-n+1];
if (local[0]>local[1]){
first_small = local[1];
secnd_small = local[0];
}
else{
first_small = local[0];
secnd_small = local[1];
}
if(local[2]<first_small){
secnd_small = first_small;
first_small = local[2];
}
else if(local[2]<secnd_small){
secnd_small = local[2];
}
if(local[3]<first_small){
secnd_small = first_small;
first_small = local[3];
}
else if(local[3]<secnd_small){
secnd_small = local[3];
}
d_B[i]=secnd_small+d_A[i];
}
}
}
__global__
void sumblock(double *d_A, int size, double*sum_temp)
{
extern __shared__ double sum_block[];
int i=blockIdx.x*blockDim.x+threadIdx.x;
int k;
int bound;
double temp=0;
if(i<size){
temp=d_A[i]+temp;
}
sum_block[threadIdx.x]=temp;
__syncthreads();
k = blockDim.x;
while(k>1){
if(k%2==0){
bound=k/2;
if (threadIdx.x<bound){
sum_block[threadIdx.x]=sum_block[threadIdx.x]+sum_block[threadIdx.x+bound];
}
__syncthreads();
k=k/2;
}
else{
bound=k/2;
if (threadIdx.x<=bound||threadIdx.x!=0){
sum_block[threadIdx.x]=sum_block[threadIdx.x]+sum_block[threadIdx.x+bound];
}
__syncthreads();
k=k/2+1;
}
}
if(threadIdx.x==0){
sum_temp[blockIdx.x] =sum_block[0];
}
}
int main(int argc, char **argv)
{
int n = atoi(argv[1]); //The size of the matrix
double *A; //The definition of matrix A
double *d_A; //The definition of matrix A in gpu
double *d_B; //The definition of iterated matrix A in gpu
cudaEvent_t start; // the start time of gpu calculation
cudaEvent_t end; // the end time of gpu calculation
float elapsedTime=0; // the elapsed time of gpu calculation
double sum=0; //The sum of the matrix A after 10 iterations
double *sum_temp; //The sum of the matrix A after 10 iterations for each block on gpu
double center=0; //The center of the matrix A after 10 iterations
double verification=0; //The A(17,31) of the matrix A after 10 iterations
int count=0; //To count the number of the iterations
int grid; //The dimension of the grid on the gpu
int block; //The dimension of the block on the gpu
int size; //The size of elements in each iteration for sum
double* sum_ptr; //The pointer of the first element in each iteration for sum
cudaEventCreate(&start);
cudaEventCreate(&end);
// Allocate Unified Memory on the CPU
A = (double*)malloc(n*n*sizeof(double));
//initialize the matrix A on the host
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
A[i*n+j]=(1+cos(2*i)+sin(j))*(1+cos(2*i)+sin(j));
}
}
// Allocate Memory on the GPU
cudaMalloc(&d_A, n*n*sizeof(double));
cudaMalloc(&d_B, n*n*sizeof(double));
//copy the data to the gpu from cpu
cudaMemcpy(d_A, A, n*n*sizeof(double), cudaMemcpyHostToDevice);
block=THREADS_PER_BLOCK;
grid=(n*n%block==0)?n*n/block:(n*n/block+1);
cudaMalloc(&sum_temp, grid*sizeof(double));
cudaEventRecord(start);
while(count<10){
// Run kernel on the GPU
iteration<<<grid,block>>>(d_A,d_B,n);
cudaDeviceSynchronize();
double *temp;
temp=d_B;
d_B=d_A;
d_A=temp;
count++;
std::cout<<"The numbers of iterations is "<<count<<std::endl;
}
size=n*n;
sum_ptr=d_A;
while(grid!=0){
sumblock<<<grid,block,block*sizeof(double)>>>(sum_ptr,size,sum_temp);
cudaDeviceSynchronize();
size=grid;
if(grid==1){
grid=0;
}
else{
grid=(grid%block==0)?grid/block:grid/block+1;
}
sum_ptr=sum_temp;
}
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&elapsedTime, start, end);
cudaMemcpy(&sum, sum_temp, sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(¢er, (d_A+n/2*n+n/2), sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(&verification, (d_A+37*n+47), sizeof(double), cudaMemcpyDeviceToHost);
std::cout <<"The elapsed_time of the cuda program is "<< elapsedTime << std::endl;
std::cout <<"The sum of the matrix A after 10 iterations is "<< sum << std::endl;
std::cout <<"The center of the matrix A after 10 iterations is "<< center << std::endl;
std::cout <<"The A(37,47) of the matrix A after 10 iterations is "<< verification << std::endl;
cudaFree(d_A);
cudaFree(d_B);
cudaFree(sum_temp);
free(A);
return 0;
}
|
23,291 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define N (33 * 1024)
__global__ void add(int *a, int *b, int * c)
{
//threadIdx.x:当前线程的Index. blockIdx:当前线程块的index. blockDim.x:每个线程块中线程的数量.
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N)
{
c[tid] = a[tid] + b[tid];
//blockDim.x:每个线程块中线程的数量. gridDim.x:线程格中线程块的数量.
tid += blockDim.x * gridDim.x;
}
}
int main()
{
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void**)&dev_a, N*sizeof(int));
cudaMalloc((void**)&dev_b, N*sizeof(int));
cudaMalloc((void**)&dev_c, N*sizeof(int));
for(int i = 0; i < N; ++i)
{
a[i] = -i;
b[i] = i*i;
}
cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, c, N*sizeof(int), cudaMemcpyHostToDevice);
//add<<<a, b>>>其中a表示设备在执行核函数时使用的并行线程块的数量,b表示一个线程块中有b个线程.(其中b不能超过512)
add<<<128, 128>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost);
bool success = true;
for(int i = 0; i < N; ++i)
{
if((a[i] + b[i]) != c[i]){
printf("Error: %d + %d != %d\n", a[i], b[i], c[i]);
success = false;
}
}
if(success)
printf("We did it!\n");
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
23,292 | #include "includes.h"
__global__ void updateGradInputLSM(const float* target, const float* mapping, const float* n_class_in_cluster, float* class_score, float* class_logsum, float* cluster_score, float* cluster_logsum, const long class_score_stride0, const long cluster_score_stride0, int n_clusters) {
const int tidx = threadIdx.x;
const int nthreads = blockDim.x;
const int itarget = (int)(target[blockIdx.x] - 0.5f);
const int cluster_target = (int)(mapping[2*itarget] - 0.5f);
const int idx_in_cluster_target = (int)(mapping[2*itarget+1] - 0.5f);
const int cluster_size = (int)(n_class_in_cluster[cluster_target] + 0.5f);
float *score, logsum_k, *target_score;
int N;
if (blockIdx.y == 0) {
score = cluster_score + blockIdx.x * cluster_score_stride0;
logsum_k = cluster_logsum[blockIdx.x];
N = n_clusters;
target_score = score + cluster_target;
} else {
score = class_score + blockIdx.x * class_score_stride0;
logsum_k = class_logsum[blockIdx.x];
N = cluster_size;
target_score = score + idx_in_cluster_target;
}
for (int i = tidx; i < N; i += nthreads)
score[i] = expf(score[i] - logsum_k);
__syncthreads(); //TODO : not exactly needed
if (tidx == 0)
*target_score -= 1.f;
} |
23,293 | // compute.cu
//
// driver and kernel call
#include <stdio.h>
#define THREADS_PER_BLOCK 128
// __global__ void compute_2d (int secondArrSize, float *arr[])
__global__ void compute_2d ( int firstArrSize, int secondArrSize, float **arr)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x == 0 && x < firstArrSize && y == 0 && y < firstArrSize)
{
printf("Hello. I'm a thread %d in block %d \n", threadIdx.x, blockIdx.x);
// printf("%lf \n", arr[x][y]);
}
// if (x <= arrSize) {
// if (x % 2 == timeStep % 2 && x <= timeStep)
// {
// if (timeStep > timeSteps && x <= (timeStep - timeSteps - arrSize))
// {
// } else
// {
// if (x == 0)
// {
// c_d[x] = (100.0 + c_d[x + 1]) / 2.0;
// } else if (x == arrSize - 1)
// {
// c_d[x] = (c_d[x - 1] + c_d[x]) / 2.0;
// } else
// {
// c_d[x] = (c_d[x - 1] + c_d[x + 1]) / 2.0;
// }
// }
// }
__syncthreads();
// }
}
extern "C" void compute2DArr (int firstArrSize, int secondArrSize, float *metalRod, int timeSteps)
{
int i = 0, j = 0;
int size=firstArrSize*secondArrSize*sizeof(float);
//allocate resources
float **cell=(float**)malloc(size * 2);
float **cell2=(float**)malloc(size * 2);
for (i = 0; i < firstArrSize; i ++)
{
cell[i] = (float*)malloc(size);
cell2[i] = (float*)malloc(size);
for (j = 0; j < secondArrSize; j ++)
{
cell[i][j] = 23.0;
}
}
size_t pitch;
float **d_cell;
cudaMallocPitch((void**) &d_cell, &pitch, secondArrSize * sizeof(float), firstArrSize);
cudaError_t tmp = cudaMemcpy2D(d_cell, pitch, cell, secondArrSize * sizeof(float), secondArrSize * sizeof(float), firstArrSize, cudaMemcpyHostToDevice);
if (cudaSuccess != tmp)
{
printf("\n copy to GPU \n");
printf(cudaGetErrorString(tmp));
}
dim3 dimBlock(8,8);
dim3 dimGrid(1,1);
compute_2d<<<dimGrid, dimBlock>>>( firstArrSize, secondArrSize, d_cell);
if (cudaSuccess != tmp)
{
printf("\n compute \n");
printf(cudaGetErrorString(tmp));
}
tmp = cudaMemcpy2D(cell2, secondArrSize * sizeof(float), d_cell, pitch, secondArrSize * sizeof(float), firstArrSize, cudaMemcpyDeviceToHost);
if (cudaSuccess != tmp)
{
printf("\n copy to CPU \n");
printf(cudaGetErrorString(tmp));
}
for (i = 0; i < firstArrSize; i++)
{
for (j = 0; j < secondArrSize; j ++)
{
printf("\n %lf ", cell2[i][j]);
}
}
// for (i = 0; i < (2*(timeSteps - 1)) + secondArrSize; i ++)
// {
// //compute_2d <<< ceil((float) secondArrSize/THREADS_PER_BLOCK), THREADS_PER_BLOCK >>> (c_d, secondArrSize, i, timeSteps);
// }
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf ("CUDA error: %s\n", cudaGetErrorString(err));
}
|
23,294 | #include <iostream>
#include <string>
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
using namespace std;
void print_matrix(float* matrix, int rows, int cols) {
for (int i = 0; i < rows; ++i) {
for (int j = 0; j < cols; ++j)
cout<<matrix[i*cols+j]<<" ";
cout<<endl;
}
}
void cpu(float* matrix, float* transpose, int rows, int cols)
{
for (int i = 0; i < rows; ++i)
for (int j = 0; j < cols; ++j)
transpose[j*rows+i] = matrix[i*cols+j];
}
__global__ void naive_gpu(float* matrix, float* transpose, int rows, int cols) {
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if ((i >= rows) || (j >= cols)) {
return;
}
int index_in = i * cols + j;
int index_out = j * rows + i;
transpose[index_out] = matrix[index_in];
}
const int BLOCK_SIZE_X = 32;
const int BLOCK_SIZE_Y = 32;
__global__ void shared_memory(float* matrix, float* transpose, int rows, int cols) {
__shared__ float mat[BLOCK_SIZE_X][BLOCK_SIZE_Y];
int bx = blockIdx.x * BLOCK_SIZE_X;
int by = blockIdx.y * BLOCK_SIZE_Y;
int i = by + threadIdx.y; int j = bx +threadIdx.x;
int ti = bx + threadIdx.y; int tj = by + threadIdx.x;
if ((i < rows) && (j < cols))
mat[threadIdx.x][threadIdx.y] = matrix[i * cols + j];
__syncthreads();
if ((tj < rows) && (ti < cols))
transpose[ti * rows + tj] = mat[threadIdx.y][threadIdx.x];
}
__global__ void no_bank_conflict(float * matrix, float * transpose, int rows, int cols) {
__shared__ float mat[BLOCK_SIZE_X][BLOCK_SIZE_Y + 1];
int bx = blockIdx.x * BLOCK_SIZE_X;
int by = blockIdx.y * BLOCK_SIZE_Y;
int i = by + threadIdx.y; int j = bx +threadIdx.x;
int ti = bx + threadIdx.y; int tj = by + threadIdx.x;
if ((i < rows) && (j < cols))
mat[threadIdx.x][threadIdx.y] = matrix[i * cols + j];
__syncthreads();
if ((tj < rows) && (ti < cols))
transpose[ti * rows + tj] = mat[threadIdx.y][threadIdx.x];
}
const int TILE = 32;
const int SIDE = 8;
__global__ void loop_unrolled(float * matrix, float * transpose, int rows, int cols) {
__shared__ float mat[TILE][TILE + 1];
int x = blockIdx.x * TILE + threadIdx.x;
int y = blockIdx.y * TILE + threadIdx.y;
#pragma unroll
for(int k = 0; k < TILE ; k += SIDE) {
if(x < rows && y + k < cols)
mat[threadIdx.y+k][threadIdx.x] = matrix[(y + k)*rows + x];
}
__syncthreads();
x = blockIdx.y * TILE + threadIdx.x;
y = blockIdx.x * TILE + threadIdx.y;
#pragma unroll
for(int k = 0; k < TILE; k += SIDE) {
if(x < cols && y + k < rows)
transpose[(y + k) * cols + x] = mat[threadIdx.x][threadIdx.y+k];
}
}
int main(int argc, char *argv[])
{
int rows = 4000;
int cols = 4000;
string ver = "cpu";
if (argc == 1) {
;
} else if (argc == 3) {
string str1 = argv[1];
rows = atoi(str1.c_str());
string str2 = argv[2];
cols = atoi(str2.c_str());
} else if (argc == 4) {
string str1 = argv[1];
rows = atoi(str1.c_str());
string str2 = argv[2];
cols = atoi(str2.c_str());
ver = argv[3];
if ((ver != "cpu") && (ver != "naive_gpu") && (ver != "shared_memory")\
&& (ver != "no_bank_conflict") && (ver != "loop_unrolled")) {
cout<<"Wrong parameters of program version, you can only choose from following:";
cout<<" cpu, naive_gpu, shared_memory, no_bank_conflict, loop_unrolled."<<endl;
return 1;
}
} else {
cout<<"Wrong number of parameters."<<endl;
return 1;
}
if ((rows <= 0) || (cols <= 0)) {
cout<<"Wrong parameters of the the matrix dimension, should be two positive numbers.";
cout<<endl;
return 1;
}
cout<<"Matrix Dimension: ("<<rows<<", "<<cols<<")"<<endl;
cout<<"Program Version: "<<ver<<endl;
float* matrix = new float[rows*cols];
float* transpose = new float[rows*cols];
srand((unsigned)time(NULL));
for (int i = 0; i < rows; ++i)
for (int j = 0; j < cols; ++j)
matrix[i*cols+j] = rand();
if (ver == "cpu") {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
for (int iter = 0; iter < 100; ++iter)
cpu(matrix, transpose, rows, cols);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime=0;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Elapsed time:%f <ms>\n", elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
else {
float *dev_matrix, *dev_transpose;
cudaMalloc((void**)&dev_matrix, rows*cols*sizeof(float));
cudaMalloc((void**)&dev_transpose, rows*cols*sizeof(float));
cudaMemcpy(dev_matrix, matrix, rows*cols*sizeof(float), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
dim3 dimGrad(cols/32+1,rows/32+1,1);
dim3 dimBlock(32,32,1);
if (ver == "naive_gpu") {
naive_gpu<<<dimGrad, dimBlock>>>(dev_matrix, dev_transpose, rows, cols);
} else if (ver == "shared_memory") {
shared_memory<<<dimGrad, dimBlock>>>(dev_matrix, dev_transpose, rows, cols);
} else if (ver == "no_bank_conflict") {
no_bank_conflict<<<dimGrad, dimBlock>>>(dev_matrix, dev_transpose, rows, cols);
} else {
dim3 dimGrad(cols/32+1,rows/32+1,1);
dim3 dimBlock(32,8,1);
loop_unrolled<<<dimGrad, dimBlock>>>(dev_matrix, dev_transpose, cols, rows);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime=0;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Elapsed time:%f <ms>\n", elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaMemcpy(transpose, dev_transpose, rows*cols*sizeof(float), cudaMemcpyDeviceToHost);
//print_matrix(matrix, rows, cols);
//printf("==========================\n");
//print_matrix(transpose, cols, rows);
cudaFree(dev_matrix);
cudaFree(dev_transpose);
}
free(matrix);
free(transpose);
return 0;
}
|
23,295 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add3(float *val1, float *val2, int *num_elem)
{
int i = threadIdx.x;
val1[i] += val2[i];
}
__global__ void sub3(float *val1, float *val2, int *num_elem)
{
int i = threadIdx.x;
val1[i] += val2[i]+1;
}
int main()
{
return 0;
}
|
23,296 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29) {
if (comp == (var_1 - -1.2209E-37f - var_2)) {
comp = var_4 - (+1.1637E-41f * var_5);
for (int i=0; i < var_3; ++i) {
comp = -1.6746E-35f + (var_6 / (-1.6653E-42f + var_7 / (-1.8159E-18f + var_8)));
comp = var_9 + var_10 * (-1.3638E-36f - +1.9566E-6f);
comp += +1.9819E-37f + var_11 * log10f(var_12 * (+1.4948E34f * var_13 + var_14 + powf((var_15 * (var_16 / var_17 / var_18)), var_19 * var_20 + atan2f(-1.5268E-43f, +1.8931E23f - coshf(-1.2911E-36f / var_21 - +1.9380E35f)))));
}
if (comp == var_22 + (var_23 * var_24)) {
comp += (var_25 - sinf(var_26 + (var_27 / asinf(var_28 - -1.9587E-36f - log10f((var_29 / (-1.4373E34f / +1.1303E34f * (-0.0f - -0.0f))))))));
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30);
cudaDeviceSynchronize();
return 0;
}
|
23,297 | #include <thrust/device_vector.h>
#include <thrust/iterator/counting_iterator.h>
#include <iostream>
struct raw_access {
double *ptr;
raw_access (double *ptr) : ptr(ptr) {};
__device__ __host__
double operator()(const int &i) {
return ptr[i] + 1;
}
};
int main() {
thrust::device_vector<double> vec(10, 1);
thrust::counting_iterator<int> iter(0);
raw_access ra(thrust::raw_pointer_cast(vec.data()));
thrust::transform(iter, iter+10, vec.begin(), ra);
for (const double &d : vec) {
std::cout << d << "\n";
}
return 0;
} |
23,298 | #include <iostream>
#include <chrono>
//Host Code
__global__ void polynomial_expansion (float* poly, int degree, int n, float* array) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if( index < n ){
float polynomial = 0.0;
float power = 1.0;
for ( int i = 0; i < degree+1; ++i){
polynomial += power * poly[i];
power *= array[index];
}
array[index] = polynomial;
}
}
//
//Referred Github Code
//
int main (int argc, char* argv[]) {
////TODO: add usage
if (argc < 3) {
std::cerr<<"usage: "<<argv[0]<<" n degree"<<std::endl;
return -1;
}
int n = atoi(argv[1]);
int degree = atoi(argv[2]);
int nbiter = atoi(argv[3]);;
float* array = new float[n];
float* poly = new float[degree+1];
for (int i=0; i<n; ++i) array[i] = 1.;
for (int i=0; i<degree+1; ++i) poly[i] = 1.;
float *dev_array, *dev_polynomial;
std::chrono::time_point<std::chrono::system_clock> startTime, endTime;
startTime = std::chrono::system_clock::now();
cudaMallocManaged(&dev_array, n*sizeof(float));
cudaMallocManaged(&dev_polynomial, (degree+1)*sizeof(float));
cudaMemcpy(dev_array, array, n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_polynomial, poly, (degree+1)*sizeof(float), cudaMemcpyHostToDevice);
polynomial_expansion<<<(n+255)/256, 256>>>(dev_polynomial, degree, n, dev_array);
cudaMemcpy(array, dev_array, n*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(dev_array);
cudaFree(dev_polynomial);
cudaDeviceSynchronize();
endTime = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed_time = (endTime-startTime)/nbiter;
{
bool finish = true;
for (int i=0; i< n; ++i) {
if (fabs(array[i]-(degree+1))>0.01) {
finish = false;
}
}
if (!finish) std::cerr<<"Incorrect Result."<<std::endl;
}
std::cerr<<array[0]<<std::endl;
std::cout<<n<<" "<<degree<<" "<<elapsed_time.count()<<std::endl;
delete[] array;
delete[] poly;
return 0;
} |
23,299 | #include "includes.h"
__global__ void update2(float *alphaMinusBeta_out, const float *rho, const float *yDotZ, const float *alpha)
{
const float beta = *rho * *yDotZ;
*alphaMinusBeta_out = *alpha - beta;
} |
23,300 | //3x3 mask
__constant__
double mask0[3][3] = { {0.1036,0.1464,0.1036},
{0.1464,0,0.1464},
{0.1036,0.1464,0.1036}};
//horizontal 5x5 mask
__constant__
double mask1[5][5] = { {0,0,0,0,0},
{0.0465,0.0735,0.1040,0.0735,0.0465},
{0.0520,0.1040,0,0.1040,0.0520},
{0.0465,0.0735,0.1040,0.0735,0.0465},
{0,0,0,0,0}};
//vertical 5x5 mask
__constant__
double mask2[5][5] = { {0,0.0465,0.0520,0.0465,0},
{0,0.0735,0.1040,0.0735,0},
{0,0.1040,0,0.1040,0},
{0,0.0735,0.1040,0.0735,0},
{0,0.0465,0.0520,0.0465,0}};
//45 degree 7x7 mask
__constant__
double mask3[7][7] = { {0,0,0,0,0.0251,0,0},
{0,0,0,0.0397,0.0355,0.0281,0},
{0,0,0.0562,0.0794,0.0562,0.0355,0.0251},
{0,0.0397,0.0794,0,0.0794,0.0397,0},
{0.0251,0.0355,0.0562,0.0794,0.0562,0,0},
{0,0.0281,0.0355,0.0397,0,0,0},
{0,0,0.0251,0,0,0,0}};
//135 degree 7x7 mask
__constant__
double mask4[7][7] = { {0,0,0.0251,0,0,0,0},
{0,0.0281,0.0355,0.0397,0,0,0},
{0.0251,0.0355,0.0562,0.0794,0.0562,0,0},
{0,0.0397,0.0794,0,0.0794,0.0397,0},
{0,0,0.0562,0.0794,0.0562,0.0355,0.0251},
{0,0,0,0.0397,0.0355,0.0281,0},
{0,0,0,0,0.0251,0,0}}; |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.