serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
3,901 | /*
Generic Parent Class for all game interfaces
Rahul Kejriwal
CS14B023
*/
/*
Abstract Class for abstracting actual game interface from game-playing algorithms
*/
class GameState {
public:
/*
Array to hold moves from current GameState
Can be used to generate children
*/
bool *moves;
int moves_length;
/*
To store best known next move if computed
*/
int optimal_move;
/*
Store turn of player
false = Player 0
true = Player 1
*/
bool turn;
/*
Stores the parent node and child number of parent which gives current node
*/
GameState *parent;
GameState **children;
int child_num;
/*
Stack vars for Alpha Beta
*/
int alpha;
int beta;
int depth;
bool isMax;
int next_move;
int val;
bool started_loop;
int last_returned_val;
GameState *prev;
GameState *next;
/*
Evaluation function to be defined by concrete game interface
*/
__host__ __device__
virtual int heuristicEval() = 0;
/*
Returns if the current game state is a terminal game tree node
*/
__host__ __device__
virtual bool isTerminal() = 0;
/*
Creates an array of possible moves in moves
*/
__host__ __device__
virtual void moveGen() = 0;
/*
Returns the new game state after making the given move
DANGER: No validity check for move # [Excersice Caution]
*/
__host__ __device__
virtual GameState* makeMove(int) = 0;
/*
Prints Game Board for DEBUG purposes
*/
__host__ __device__
virtual void printState() = 0;
__host__ __device__
virtual int piece(int) = 0;
/*
Stack vars initialize
*/
__host__ __device__
void stateReset(int a, int b, int d, bool iM){
alpha = a;
beta = b;
depth = d;
isMax = iM;
next_move = -1;
prev = next = NULL;
if(iM) last_returned_val = val = INT_MIN;
else last_returned_val = val = INT_MAX;
started_loop = false;
}
};
|
3,902 | #include "includes.h"
#define max(a, b) a > b ? a : b
#define min(a, b) a < b ? a : b
struct Edge{
long long int x;
};
///*
//*/
__global__ void accumulate(Edge* edge_list, bool* cross_edges, int* indices, int e){
int bid = blockIdx.x;
int id = bid*blockDim.x + threadIdx.x;
Edge temp;
temp.x = 0;
if(id < e)
if(cross_edges[id])
temp = edge_list[id];
__syncthreads();
if(temp.x)
edge_list[indices[id]] = temp;
return;
} |
3,903 | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
__global__ void gpu_matrix_mult(float *a,float *b, float *c,
int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0;
if( col < k && row < m)
{
for(int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = sum;
}
}
__global__ void gpu_matrix_transpose(float *a, float *b)
{
int rows = sizeof(a);
int cols = sizeof(a[0]);
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < cols && idy < rows)
{
b[idx * rows + idy] = a[idy * cols + idx];
}
} |
3,904 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
#include <cstdlib>
#include <ctime>
__global__ void code_without_divergence()
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
float a, b;
a = b = 0;
int warp_id = gid / 32;
if (warp_id % 2 == 0) {
a = 100.0;
b = 50.0;
} else {
a = 200.0;
b = 75.0;
}
}
__global__ void divergence()
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
float a, b;
a = b = 0.0;
if (gid % 2 == 0) {
a = 100.0;
b = 50.0;
} else {
a = 200.0;
b = 75.0;
}
}
int main(void)
{
int size = 1 << 22;
dim3 block_size(128);
dim3 grid_size((size + block_size.x - 1) / block_size.x);
code_without_divergence<<<grid_size, block_size>>>();
cudaDeviceSynchronize();
divergence<<<grid_size, block_size>>>();
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
|
3,905 | #include "includes.h"
__global__ void mergeLocation(const short2* loc_, float* x, float* y, const int npoints, float scale)
{
const int ptidx = blockIdx.x * blockDim.x + threadIdx.x;
if (ptidx < npoints)
{
short2 loc = loc_[ptidx];
x[ptidx] = loc.x * scale;
y[ptidx] = loc.y * scale;
}
} |
3,906 | #include <cuda.h>
#include <iostream>
#include <math.h>
#include <ctime>
#include <cmath>
#include <stdlib.h>
#include <fstream>
#include <sstream>
double* three_dim_index(double* matrix, int i, int j, int k, double m, int b, int num_assets);
double* two_dim_index(double* vector, int i, int j, double m, int b);
__device__ double* two_dim_indexME(double* vector, int i, int j, double m, int b){
double* p;
//specify index layout here
p=&vector[b*(i)+(j)];
return p;
}
__device__ double* three_dim_indexME(double* matrix, int i, int j, int k, double m, int b, int num_assets){
double* p;
p=&matrix[i*b*num_assets+j*num_assets+k];
return p;
}
//this function returns the payoff of a geometric average call option.
__device__ double GeometricPayOffCallM(double* X, int i, int j, double m, int b, int num_assets, double Strike){
double h;
h=1;
for(int l=0; l<num_assets; l++){
h*= exp(*three_dim_indexME(X, i, j, l, m, b, num_assets));
}
h=pow(h,1.0/(num_assets));
if(h-Strike>0){
h=h-Strike;
}
else{
h=0;
}
return h;
}
//this function returns the payoff of a geometric average put option.
__device__ double GeometricPayOffPutM(double* X, int i, int j, double m, int b, int num_assets, double Strike){
double h;
h=1;
for(int l=0; l<num_assets; l++){
h*=exp(*three_dim_indexME(X, i, j, l, m, b, num_assets));
}
h=pow(h,1.0/(num_assets));
if(Strike-h>0){
h=Strike-h;
}
else{
h=0;
}
return h;
}
//this function returns the continuation value given by the inner control.
__device__ double inner_control_meshME(int i, int j, int b, double r, double delta_t, double m, double* W_device, double* X_device, double* V_device, int num_assets ){
int m_int= (int)m;
double ControlMeshContinVal=0;
double sum=0, ContinVal=0, stock_expectation=0, true_stock_expectation=0, numerator=0, denominator=0, beta=0;
for(int k=0; k<b; k++){
sum+= (*three_dim_indexME(W_device, (m_int-i), k, j, m, b, b)) * (*two_dim_indexME(V_device, (i-1), k, m, b));
}
ContinVal=(1/((double)b))*sum;
sum=0;
for(int l=0; l<b; l++){
sum+=(*three_dim_indexME(W_device, (m_int-i), l, j, m, b, b)) * exp((*three_dim_indexME(X_device, (m_int-i), l,0 , m, b, num_assets)));
}
stock_expectation=(1/((double)b))*sum;
true_stock_expectation=exp(*three_dim_indexME(X_device, (m_int-i-1), j, 0, m, b, num_assets)) * exp(r*delta_t);//old
for(int p=0; p<b; p++){
numerator += ( (*three_dim_indexME(W_device, (m_int-i), p, j, m, b, b)) * exp( *three_dim_indexME(X_device, (m_int-i), p, 0, m, b, num_assets) ) - stock_expectation ) * ( (*three_dim_indexME(W_device, (m_int-i), p, j, m, b, b)) * (*two_dim_indexME(V_device, (i-1), p, m, b)) - ContinVal );
denominator += pow( ( (*three_dim_indexME(W_device, (m_int-i), p, j, m, b, b)) * exp( *three_dim_indexME(X_device, (m_int-i), p, 0, m, b, num_assets) ) - stock_expectation ) , 2 );
}
beta=numerator/denominator;
ControlMeshContinVal= ContinVal-beta*(stock_expectation-true_stock_expectation);
return ControlMeshContinVal;
}
//this kernel updates the V matrix of high bias mesh prices
__global__ void MeshEstimatorKernel(double strike, double r, double delta_t, int b, double m, double* X_device, double* W_device, double* V_device, double* asset_amount_device, int num_assets, int ker){
double H; //payoff variable
double C; //continuation value variable
int idx =blockDim.x*blockIdx.x + threadIdx.x;
if(idx<b){
if(ker==0){
H=GeometricPayOffCallM( X_device, m-1-ker, idx, m, b, num_assets, strike)*exp(-r*delta_t*(m-ker));
*two_dim_indexME(V_device, ker, idx, m, b)=H;
}
else{
C=inner_control_meshME(ker, idx, b, r, delta_t, m, W_device, X_device, V_device, num_assets);
H=GeometricPayOffCallM( X_device, m-1-ker, idx, m, b, num_assets, strike)*exp(-r*delta_t*(m-ker));
if(H>=C){
*two_dim_indexME(V_device, ker, idx, m, b)=H;
}
else{
*two_dim_indexME(V_device, ker, idx, m, b)=C;
}
}
}
}
//This function allocates memory on the device and reutrns the high bias estimate to the main function.
double MeshEstimator(double strike, double r, double delta_t, int b, double m, double* X, double* W, double* V, double asset_amount[], int num_assets){
double V_0;
int m_int=(int)m;
double* asset_amount_host;
asset_amount_host =asset_amount;
int X_N=(m_int) * b * (num_assets);
int asset_amount_N=num_assets;
int W_N=(m_int) * b*b;
int V_N=(m_int) * b;
double* X_device;
double* V_device;
double* asset_amount_device;
double* W_device;
cudaMalloc((void**) &X_device, X_N*sizeof(double) );
cudaMemcpy(X_device, X, X_N*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**) &asset_amount_device, asset_amount_N*sizeof(double) );
cudaMemcpy(asset_amount_device, asset_amount_host, asset_amount_N*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**) &V_device, V_N*sizeof(double) );
cudaMemcpy(V_device, V, V_N*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**) &W_device, W_N*sizeof(double) );
cudaMemcpy(W_device, W, W_N*sizeof(double), cudaMemcpyHostToDevice);
dim3 gridDim((int)ceil(b/512.0));
dim3 blockDim(512.0);
cudaError_t error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
for(int ker=0; ker<m; ker++){
MeshEstimatorKernel<<<gridDim, blockDim>>>(strike, r, delta_t, b, m, X_device, W_device, V_device, asset_amount_device, num_assets, ker);
cudaDeviceSynchronize();
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
cudaMemcpy(V, V_device, sizeof(double)*V_N, cudaMemcpyDeviceToHost);
if(ker<m-1){
cudaMemcpy(V_device, V, V_N*sizeof(double), cudaMemcpyHostToDevice);
}
}
cudaFree(X_device);
cudaFree(asset_amount_device);
cudaFree(V_device);
cudaFree(W_device);
double sum=0;
for(int k=0; k<b; k++){
sum+=*two_dim_index(V, (m_int-1), k, m, b);
}
//this is the high bias option value at time 0
V_0=(1/((double)b))*sum;
return V_0;
}
|
3,907 | #include "includes.h"
__global__ void reduction(const int N, float *a, float *result) {
int thread = threadIdx.x;
int block = blockIdx.x;
int blockSize = blockDim.x;
int gridSize = gridDim.x;
//unique global thread ID
int id = thread + block*blockSize;
__volatile__ __shared__ float s_sum[256];
float sum = 0;
for (int i=0; i<4; i++){
if(id+i*blockSize*gridSize<N){
sum += a[id+i*blockSize*gridSize]; //add the thread's id to start
}
}
s_sum[thread] = sum;
__syncthreads(); //make sure the write to shared is finished
if (thread<128) {//first half
s_sum[thread] += s_sum[thread+128];
}
__syncthreads(); //make sure the write to shared is finished
if (thread<64) {//next half
s_sum[thread] += s_sum[thread+64];
}
__syncthreads(); //make sure the write to shared is finished
if (thread<32) {//next half
s_sum[thread] += s_sum[thread+32];
}
__syncthreads(); //make sure the write to shared is finished
if (thread<16) {//next half
s_sum[thread] += s_sum[thread+16];
}
__syncthreads(); //make sure the write to shared is finished
if (thread<8) {//next half
s_sum[thread] += s_sum[thread+8];
}
__syncthreads(); //make sure the write to shared is finished
if (thread<4) {//next half
s_sum[thread] += s_sum[thread+4];
}
__syncthreads(); //make sure the write to shared is finished
if (thread<2) {//next half
s_sum[thread] += s_sum[thread+2];
}
__syncthreads(); //make sure the write to shared is finished
if (thread<1) {//final piece
s_sum[thread] += s_sum[thread+1];
result[block] = s_sum[thread];
}
} |
3,908 | #include <cuda.h>
#include <iostream>
#include <time.h>
__global__
void addKernel(int* A_d, int* B_d, int*C_d); //vector addition(device code)
void arrayAdd(int* A, int* B, int* C, int n); //vector addition(serial code)
void vecAdd(int* A, int* B, int* C, int n); //loading, transfer, execution(host code)
void printArray(int * array,int n); // print Array elements
int main(void){
// const int SIZE=5;
// int a[SIZE]={1,2,3,4,5};
// int b[SIZE]={10,20,30,40,50};
// int c[SIZE]={0};
int SIZE;
int i;
int *a,*b,*c;
printf("Insert size: ");
scanf("%d",&SIZE);
a=(int*)malloc(sizeof(int)*SIZE);
b=(int*)malloc(sizeof(int)*SIZE);
c=(int*)malloc(sizeof(int)*SIZE);
for(i=0;i<SIZE;i++){
a[i]=i;
b[i]=10*i;
}
clock_t start;
start=clock();
arrayAdd(a,b,c,SIZE);
printf("Vector addition execution time(Serial): %fms\n",(double)(clock()-start));
start=clock();
vecAdd(a,b,c,SIZE);
printf("Vector addition execution time(Parallel): %fms\n",(double)(clock()-start));
printArray(c,SIZE);
free(a);
free(b);
free(c);
}
// Compute vector sum C = A+B
// Each thread performs one pairwise addition
__global__
void addKernel(int* A_d, int* B_d, int*C_d)
{
// each thread knows its own index
int i = threadIdx.x;
C_d[i] = A_d[i] + B_d[i];
}
void arrayAdd(int* A, int* B, int* C, int n){
int i;
for(i=0;i<n;i++)
C[i]=A[i]+B[i];
}
void vecAdd(int* A, int* B, int* C, int n)
{
int size = n * sizeof(int);
int* A_d=0;
int* B_d=0;
int* C_d=0;
// Allocate device memory
cudaMalloc((void **) &A_d, size);
cudaMalloc((void **) &B_d, size);
cudaMalloc((void **) &C_d, size);
// Transfer A and B to device memory
cudaMemcpy(A_d, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B, size, cudaMemcpyHostToDevice);
//configure grid ==> <<<number of thread blocks within grid, number of threads in each thread block>>>
addKernel<<<1, size>>>(A_d, B_d, C_d);
// Transfer C from device to host
cudaMemcpy(C, C_d, size, cudaMemcpyDeviceToHost);
// Free device memory for A, B, C
cudaFree(A_d); cudaFree(B_d); cudaFree (C_d);
}
void printArray(int * array,int n){
int i,sum=0;
printf("Elements: ");
for(i=0;i<n;i++){
printf("%d ",array[i]);
sum+=array[i];
}
printf("\n");
printf("Sum: %d\n",sum);
} |
3,909 | /*
Implement your CUDA kernel in this file
*/
#define TILE_DIM 32
__global__ void mirror_boundaries(double *E_prev, const int n, const int m)
{
int row = blockIdx.y*blockDim.y + threadIdx.y + 1;
int col = blockIdx.x*blockDim.x + threadIdx.x + 1;
if (col == 1) {
E_prev[row*(n+2)] = E_prev[row*(n+2) + 2];
E_prev[row*(n+2) + n + 1] = E_prev[row*(n+2) + n - 1];
}
if (row == 1) {
E_prev[col] = E_prev[2*(n+2) + col];
E_prev[(m+1)*(n+2) + col] = E_prev[(m-1)*(n+2) + col];
}
}
__global__ void simulate (double *E, double *E_prev, double *R, const double alpha,
const int n, const int m, const double kk,
const double dt, const double a, const double epsilon,
const double M1,const double M2, const double b)
{
// __shared__ double E_Block[TILE_DIM][TILE_DIM];
// __shared__ double R_Block[TILE_DIM][TILE_DIM];
// int ty = threadIdx.y;
// int tx = threadIdx.x;
// int row = blockIdx.y*blockDim.y + threadIdx.y + 1;
// int col = blockIdx.x*blockDim.x + threadIdx.x + 1;
// if ((row - 1 < m) && (col - 1 < n)) {
// E_Block[ty][tx] = E[row*(n+2) + col];
// R_Block[ty][tx] = R[row*(n+2) + col];
// E[row*(n+2)+col] = E_prev[row*(n+2)+col] + alpha*(E_prev[row*(n+2)+col+1] + E_prev[row*(n+2)+col-1] - 4*E_prev[row*(n+2)+col] + E_prev[(row+1)*(n+2)+col] + E_prev[(row-1)*(n+2)+col]);
// E[row*(n+2)+col] = E_Block[ty][tx] = E_Block[ty][tx] - dt*(kk*E_Block[ty][tx]*(E_Block[ty][tx] - a)*(E_Block[ty][tx] - 1) + E_Block[ty][tx]*R_Block[ty][tx]);
// R[row*(n+2)+col] = R_Block[ty][tx] + dt*(epsilon + M1*R_Block[ty][tx]/(E_Block[ty][tx] + M2))*(-R_Block[ty][tx] - kk*E_Block[ty][tx]*(E_Block[ty][tx] - b - 1));
// }
__shared__ double E_Block[TILE_DIM][TILE_DIM];
__shared__ double R_Block[TILE_DIM][TILE_DIM];
__shared__ double E_prev_Block[TILE_DIM+2][TILE_DIM+2];
int ty = threadIdx.y;
int tx = threadIdx.x;
int row = blockIdx.y*blockDim.y + threadIdx.y + 1;
int col = blockIdx.x*blockDim.x + threadIdx.x + 1;
if ((row - 1 < m) && (col - 1 < n)) {
// E_prev_Block[ty][tx] = E_prev[row*(n+2) + col];
// if (ty == 2) {
// E_prev_Block[0][tx] = E_prev[(row-2)*(n+2) + col];
// E_prev_Block[TILE_DIM+1][tx] = E_prev[(row+TILE_DIM-1)*(n+2) + col];
// }
// if (tx == 2) {
// E_prev_Block[ty][0] = E_prev[row*(n+2) + col-2];
// E_prev_Block[ty][TILE_DIM+1] = E_prev[row*(n+2) + col+TILE_DIM-1];
// }
// E[row*(n+2)+col] = E_prev_Block[ty][tx] + alpha*(E_prev_Block[ty][tx+1] + E_prev_Block[ty][tx-1] - 4*E_prev_Block[ty][tx] + E_prev_Block[ty+1][tx] + E_prev_Block[ty-1][tx]);
E_prev_Block[ty+1][tx+1] = E_prev[row*(n+2) + col];
if (ty == 1) {
E_prev_Block[0][tx+1] = E_prev[(row-2)*(n+2) + col];
E_prev_Block[TILE_DIM+1][tx+1] = E_prev[(row+TILE_DIM-1)*(n+2) + col];
}
if (tx == 1) {
E_prev_Block[ty+1][0] = E_prev[row*(n+2) + col-2];
E_prev_Block[ty+1][TILE_DIM+1] = E_prev[row*(n+2) + col+TILE_DIM-1];
}
__syncthreads();
E[row*(n+2)+col] = E_prev_Block[ty+1][tx+1] + alpha*(E_prev_Block[ty+1][tx+2] + E_prev_Block[ty+1][tx] - 4*E_prev_Block[ty+1][tx+1] + E_prev_Block[ty+2][tx+1] + E_prev_Block[ty][tx+1]);
// E[row*(n+2)+col] = E_prev[row*(n+2)+col] + alpha*(E_prev[row*(n+2)+col+1] + E_prev[row*(n+2)+col-1] - 4*E_prev[row*(n+2)+col] + E_prev[(row+1)*(n+2)+col] + E_prev[(row-1)*(n+2)+col]);
E_Block[ty][tx] = E[row*(n+2) + col];
R_Block[ty][tx] = R[row*(n+2) + col];
E[row*(n+2)+col] = E_Block[ty][tx] = E_Block[ty][tx] - dt*(kk*E_Block[ty][tx]*(E_Block[ty][tx] - a)*(E_Block[ty][tx] - 1) + E_Block[ty][tx]*R_Block[ty][tx]);
R[row*(n+2)+col] = R_Block[ty][tx] + dt*(epsilon + M1*R_Block[ty][tx]/(E_Block[ty][tx] + M2))*(-R_Block[ty][tx] - kk*E_Block[ty][tx]*(E_Block[ty][tx] - b - 1));
}
}
|
3,910 | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <stdio.h>
//#include <random>
//#include <conio.h>
//
//
//#define N 2048
//#define NB_THREADS 1024
//
//__global__ void multVect(int* result, int* a, int* b) {
// int idx = threadIdx.x + blockIdx.x * blockDim.x;
// if(idx < N)
// result[idx] = a[idx] * b[idx];
//}
//
//
//int main() {
// int *h_vect1, *h_vect2, *h_vect3;
// int *d_vect1, *d_vect2, *d_vect3;
//
// const int size = N * sizeof(int);
//
// cudaMalloc(&d_vect1, size);
// cudaMalloc(&d_vect2, size);
// cudaMalloc(&d_vect3, size);
//
// h_vect1 = (int *)malloc(size);
// h_vect2 = (int *)malloc(size);
// h_vect3 = (int *)malloc(size);
//
// for(int i = 0; i < N; ++i)
// {
// h_vect1[i]= rand() % (int)floor(sqrt(INT_MAX));
// h_vect2[i]= rand() % (int)floor(sqrt(INT_MAX));
// }
//
// cudaMemcpy(d_vect1, h_vect1, size, cudaMemcpyHostToDevice);
// cudaMemcpy(d_vect2, h_vect2, size, cudaMemcpyHostToDevice);
//
// multVect <<<N / NB_THREADS, NB_THREADS >> >(d_vect3, d_vect1, d_vect2);
//
// cudaMemcpy(h_vect3, d_vect3, size, cudaMemcpyDeviceToHost);
//
// for (int i = 0; i < N; ++i)
// {
// printf("%d*%d=%d", h_vect1[i], h_vect2[i], h_vect3[i]);
// }
//
// free(h_vect1);
// free(h_vect2);
// free(h_vect3);
// cudaFree(d_vect1);
// cudaFree(d_vect2);
// cudaFree(d_vect3);
//
// _getch();
// return 0;
//} |
3,911 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda.h>
void printBoard(unsigned char *board, int rows, int cols)
{
int counter = 0;
for(int i = 0; i < rows; i++)
{
for(int j = 0; j < cols; j++)
{
if(board[counter] == 0)
printf("-");
else
printf("0");
counter++;
}
printf("\n");
}
return;
}
__global__ void life (unsigned char *d_board,int iterations) {
int i,row,col,rows,cols;
unsigned char state,neighbors;
row = blockIdx.y * blockDim.y + threadIdx.y;
col = blockIdx.x * blockDim.x + threadIdx.x;
rows = gridDim.y * blockDim.y;
cols = gridDim.x * blockDim.x;
state = d_board[(row)*cols+(col)];
for (i=0;i<iterations;i++) {
neighbors=0;
if (row!=0) {
if (col!=0) if (d_board[(row-1)*cols+(col-1)]==1) neighbors++;
if (d_board[(row-1)*cols+(col)]==1) neighbors++;
if (col!=(cols-1)) if (d_board[(row-1)*cols+(col+1)]==1) neighbors++;
}
if (col!=0) if (d_board[(row)*cols+(col-1)]==1) neighbors++;
if (col!=(cols-1)) if (d_board[(row)*cols+(col+1)]==1) neighbors++;
if (row!=(rows-1)) {
if (col!=0) if (d_board[(row+1)*cols+(col-1)]==1) neighbors++;
if (d_board[(row+1)*cols+(col)]==1) neighbors++;
if (col!=(cols-1)) if (d_board[(row+1)*cols+(col+1)]==1) neighbors++;
}
if (neighbors<2) state = 0;
else if (neighbors==3) state = 1;
else if (neighbors>3) state = 0;
__syncthreads();
d_board[(row)*cols+(col)]=state;
}
}
int main () {
dim3 gDim,bDim;
unsigned char *h_board,*d_board;
int i,iterations=100;
bDim.y=16;
bDim.x=32;
bDim.z=1;
gDim.y=16;
gDim.x=8;
gDim.z=1;
h_board=(unsigned char *)malloc(sizeof(unsigned char)*256*256);
cudaMalloc((void **)&d_board,sizeof(unsigned char)*256*256);
srand(0);
for (i=0;i<256*256;i++) h_board[i]=rand()%2;
printf("Starting state\n");
printBoard(h_board, 256, 256);
cudaMemcpy(d_board,h_board,sizeof(unsigned char)*256*256,cudaMemcpyHostToDevice);
life <<<gDim,bDim>>> (d_board,iterations);
cudaMemcpy(h_board,d_board,sizeof(unsigned char)*256*256,cudaMemcpyDeviceToHost);
printf("Ending state\n");
printBoard(h_board, 256, 256);
free(h_board);
cudaFree(d_board);
}
|
3,912 | #include "includes.h"
__global__ void int_copy(int *vec_to, int *vec_from, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if ( xIndex < n )
vec_to[xIndex] = vec_from[xIndex];
} |
3,913 | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
const char* getfield(char* line, int num){
const char* tok;
for (tok = strtok(line, ",");
tok && *tok;
tok = strtok(NULL, ",\n"))
{
if (!--num)
return tok;
}
return NULL;
}
__global__ void calcPot(double *u, double *unext, int *currRefracIter, double *currTime, int *d_it, double *w, double *lastTime, double *d_refracTime,
int *d_maxRefracIter, int *d_N, int *d_R, double *d_uth, int *d_minMPVIter, double *d_dt, double *d_mi, double *d_sumCoeff){
double pi=3.14159265359;
int it = *d_it;
double refracTime = *d_refracTime;
int maxRefracIter = *d_maxRefracIter;
int N = *d_N;
int R = *d_R;
double uth = *d_uth;
int minMPVIter = *d_minMPVIter;
double dt = *d_dt;
double mi = *d_mi;
double sumCoeff = *d_sumCoeff;
int myId = blockDim.x * blockIdx.x + threadIdx.x;
/*******Refractory Period*******/
if (*(u+myId)==0 && *(currRefracIter+myId)<maxRefracIter){
(*(currRefracIter+myId))++;
return;
}
else{
*(currRefracIter+myId)=0;
}
/*******Sum Calculation*******/
double sumVar=0.0;
int k,l;
int iLeftCorner=N+myId/N-R;
int jLeftCorner=N+myId%N-R;
for (k=iLeftCorner; k<iLeftCorner+2*R+1; k++){
for (l=jLeftCorner; l<jLeftCorner+2*R+1; l++){
sumVar+=*(u+myId)-*(u+(k%N)*N+l%N);
}
}
*(unext+myId)=*(u+myId)+dt*(mi-*(u+myId)+sumCoeff*sumVar);
*(currTime+myId)+=dt;
if(*(unext+myId)>=uth){ //Threshold crossed
*(unext+myId)=0.0;
if (it>=minMPVIter){
*(w+myId)=((*(w+myId))*(*(lastTime+myId))+2*pi)/((*(lastTime+myId))+(*(currTime+myId))+refracTime);
*(lastTime+myId)+=(*(currTime+myId))+refracTime;
}
*(currTime+myId)=0.0;
}
return;
}
int main(int argc, char** argv){
FILE *file1;
char filename[100];
/*******Parameter Declarations*******/
int N=100; //Grid dimension
double dt=0.001; //0.001
int totalTime=10000; //Simulation time
int it=0;
int totalIter=totalTime/dt; //Total iterations
int R=22; //Square radius
double sigma=0.7; //Coupling strength
double sumCoeff=sigma/((2*R+1)*(2*R+1)-1); //Potential sum coefficient
double mi=1.0; //Integrator floor
double uth=0.98;
double Ts=log(mi/(mi-uth));
double refracTime=0.22*Ts; //Refractory period time
int maxRefracIter=(int)ceil(refracTime/dt); //Refractory period iterations
int i,j;
double u[N*N];
double unext[N*N];
int currRefracIter[N*N]; //Current iterations already in refractory period
int maxMPVIter=30000;
int minMPVIter=2000000; //bhma meta to opoio me endiaferei na arxisw na upologizw thn syxnothta twn neurwnwn.
double currTime[N*N];
double lastTime[N*N];
double w[N*N];
double t=0.0;
for (i=0; i<N; i++){
for (j=0; j<N; j++){
(*(unext+i*N+j))=0.0;
(*(currTime+i*N+j))=0.0;
(*(lastTime+i*N+j))=0.0;
(*(currRefracIter+i*N+j))=0.0;
}
}
file1=fopen(argv[1],"r"); //argv[1]
char line[2048];
i=0;
while(fgets(line, 2048, file1)){
for(j=1;j<=N;j++){
char* tmp = strdup(line);
(*(u+N*i+j-1))=atof(getfield(tmp,j));
free(tmp);
}
i++;
}
fclose(file1);
double *d_u, *d_unext, *d_currTime, *d_w, *d_lastTime, *d_refracTime, *d_uth, *d_dt, *d_mi, *d_sumCoeff;
int *d_currRefracIter, *d_it, *d_maxRefracIter, *d_N, *d_R, *d_minMPVIter;
cudaMalloc(&d_u, N*N*sizeof(double));
cudaMalloc(&d_unext, N*N*sizeof(double));
cudaMalloc(&d_currRefracIter, N*N*sizeof(int));
cudaMalloc(&d_currTime, N*N*sizeof(double));
cudaMalloc(&d_it, sizeof(int));
cudaMalloc(&d_w, N*N*sizeof(double));
cudaMalloc(&d_lastTime, N*N*sizeof(double));
cudaMalloc(&d_refracTime, sizeof(double));
cudaMalloc(&d_maxRefracIter, sizeof(int));
cudaMalloc(&d_N, sizeof(int));
cudaMalloc(&d_R, sizeof(int));
cudaMalloc(&d_uth, sizeof(double));
cudaMalloc(&d_minMPVIter, sizeof(int));
cudaMalloc(&d_dt, sizeof(double));
cudaMalloc(&d_mi, sizeof(double));
cudaMalloc(&d_sumCoeff, sizeof(double));
cudaMemcpy(d_refracTime, &refracTime, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_maxRefracIter, &maxRefracIter, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_N, &N, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_R, &R, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_uth, &uth, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_minMPVIter, &minMPVIter, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_dt, &dt, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_mi, &mi, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_sumCoeff, &sumCoeff, sizeof(double), cudaMemcpyHostToDevice);
time_t benchBegin = time(NULL);
/*******Simulation*******/
while (it<totalIter){
if (it%10000==0) printf("Iteration %d of %d\n", it, totalIter);
cudaMemcpy(d_u, u, N*N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_unext, unext, N*N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_currRefracIter, currRefracIter, N*N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_currTime, currTime, N*N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_it, &it, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_w, w, N*N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_lastTime, lastTime, N*N*sizeof(double), cudaMemcpyHostToDevice);
//printf("STARTING\n");
calcPot<<<100,100>>>(d_u, d_unext, d_currRefracIter, d_currTime, d_it, d_w, d_lastTime, d_refracTime, d_maxRefracIter, d_N, d_R, d_uth, d_minMPVIter, d_dt, d_mi, d_sumCoeff);
cudaThreadSynchronize();
cudaMemcpy(unext, d_unext, N*N*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(currRefracIter, d_currRefracIter, N*N*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(currTime, d_currTime, N*N*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(w, d_w, N*N*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(lastTime, d_lastTime, N*N*sizeof(double), cudaMemcpyDeviceToHost);
// for(i=0; i<N; i++){
// for(j=0; j<N; j++){
// printf("%lf ", *(unext+N*i+j));
// }
// printf("\n");
// }
// printf("FINISHED\n");
if(it%10000==0){
sprintf(filename, "ResultsCUDA%s/Results_POT_LIF_2D_Classic_sigma_%lf_R_%d_time_%lf_.dat",argv[2],sigma,R,t);
file1=fopen(filename,"w");
for(i=0;i<N;i++){
for(j=0;j<N;j++){
fprintf(file1, "%lf,",*(unext+N*i+j));
}
fprintf(file1,"\n");
}
fclose(file1);
}
if (it>minMPVIter){
if ((it-minMPVIter)%maxMPVIter==0){
sprintf(filename, "ResultsCUDA%s/Results_MPV_LIF_2D_Classic_sigma_%lf_R_%d_time_%lf_.dat",argv[2],sigma,R,t);
file1=fopen(filename,"w");
for(i=0;i<N;i++){
for(j=0;j<N;j++){
fprintf(file1,"%lf,",*(w+N*i+j));
}
fprintf(file1,"\n");
}
fclose(file1);
}
}
if (it == 2000000){
time_t benchEnd = time(NULL);
sprintf(filename, "ResultsCUDA%s/execTime.dat",argv[2]);
file1=fopen(filename,"w");
fprintf(file1,"Execution time for 2000 time units: %ld seconds\n",benchEnd-benchBegin);
fclose(file1);
}
for (i=0; i<N; i++){
for (j=0; j<N; j++){
(*(u+N*i+j))=*(unext+N*i+j);
}
}
t+=dt;
it++;
} //edw kleinei h while.
cudaFree(d_u);
cudaFree(d_unext);
cudaFree(d_currRefracIter);
cudaFree(d_currTime);
cudaFree(d_it);
cudaFree(d_w);
cudaFree(d_lastTime);
cudaFree(d_refracTime);
cudaFree(d_maxRefracIter);
cudaFree(d_N);
cudaFree(d_R);
cudaFree(d_uth);
return(0);
}
|
3,914 | #include "includes.h"
__global__ void crossFade(float* out1, float* out2, int numFrames){
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
float fn = float(threadID) / (numFrames - 1.0f);
out1[threadID * 2] = out1[threadID * 2] * (1.0f - fn) + out2[threadID * 2] * fn;
out1[threadID * 2 + 1] = out1[threadID * 2 + 1] * (1.0f - fn) + out2[threadID * 2 + 1] * fn;
} |
3,915 | #include <stdio.h>
#include <cuda_runtime.h>
int main( ) {
int dev = 0;
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, dev);
printf("device id %d, name %s\n", dev, prop.name);
printf("number of multi-processors = %d\n",
prop.multiProcessorCount);
printf("Total constant memory: %4.2f kb\n",
prop.totalConstMem/1024.0);
printf("Shared memory per block: %4.2f kb\n",
prop.sharedMemPerBlock/1024.0);
printf("Total registers per block: %d\n",
prop.regsPerBlock);
printf("Maximum threads per block: %d\n",
prop.maxThreadsPerBlock);
printf("Maximum threads per multi-processor: %d\n",
prop.maxThreadsPerMultiProcessor);
printf("Maximum number of warps per multi-processor %d\n",
prop.maxThreadsPerMultiProcessor/32);
return 0;
} |
3,916 | #include <stdio.h>
#include <stdlib.h>
#define N 256
__global__ void bitreverse(void *data){
unsigned int *idata = (unsigned int *)data;
extern __shared__ int array[];
array[threadIdx.x] = idata[threadIdx.x];
array[threadIdx.x] = ((0xf0f0f0f0 & array[threadIdx.x]) >> 4) | ((0x0f0f0f0f & array[threadIdx.x]) << 4);
array[threadIdx.x] = ((0xcccccccc & array[threadIdx.x]) >> 2) | ((0x33333333 & array[threadIdx.x]) << 2);
array[threadIdx.x] = ((0xaaaaaaaa & array[threadIdx.x]) >> 1) | ((0x55555555 & array[threadIdx.x]) << 1);
idata[threadIdx.x] = array[threadIdx.x];
}
int main(void) {
void *d = NULL; int i;
unsigned int idata[N], odata[N];
for (i = 0; i < N; i++){
idata[i] = (unsigned int)i;
}
cudaMalloc((void**)&d, sizeof(int)*N);
cudaMemcpy(d, idata, sizeof(int)*N,cudaMemcpyHostToDevice);
bitreverse<<<1, N, N*sizeof(int)>>>(d);
cudaMemcpy(odata, d, sizeof(int)*N,cudaMemcpyDeviceToHost);
for (i = 0; i < N; i++){
printf("%u -> %u\n", idata[i], odata[i]);
}
cudaFree((void*)d);
return 0;
} |
3,917 | #include "includes.h"
/*
Autor: Munesh Singh
Date: 08 March 2010
Vector addition using cudaMallocPitch
*/
const int width = 567;
const int height = 985;
__global__ void testKernel2D(float* M, float* N, float* P, size_t pitch) {
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
if (row < width && col < width) {
float* row_M = (float*)((char*)M + row * pitch);
float* row_N = (float*)((char*)N + row * pitch);
float* row_P = (float*)((char*)P + row * pitch);
row_P[col] = row_M[col] + row_N[col];
}
} |
3,918 | #include <stdio.h>
#include <cuda_runtime.h>
#define N 10
#ifndef checkCudaErrors
#define checkCudaErrors(err) __checkCudaErrors(err, __FILE__, __LINE__)
void __checkCudaErrors(cudaError_t err, const char *file, const int line)
{
if(cudaSuccess != err)
{
fprintf(stderr, "checkCudaErrors() Driver API error = %04d \"%s\" from file <%s>, line %i.\n", err, cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
#endif
__global__ void add(int *a, int *b, int *c)
{
int tid = blockIdx.x;
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main(void)
{
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
checkCudaErrors(cudaMalloc((void**)&dev_a, 10000000000000 * sizeof(int)));
cudaMalloc((void**)&dev_b, N * sizeof(int));
cudaMalloc((void**)&dev_c, N * sizeof(int));
for (int i = 0; i < N; ++i)
{
a[i] = -i;
b[i] = i * i;
}
cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);
add<<<N,1>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; ++i)
{
printf("%d + %d = %d\n",a[i],b[i],c[i]);
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
3,919 | #include <stdio.h>
#include <stdlib.h>
// Define this to turn on error checking
#define CUDA_ERROR_CHECK
#define CUDASAFECALL( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CUDACHECKERROR() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall(cudaError err, const char* file, const int line)
{
#ifdef CUDA_ERROR_CHECK
if (cudaSuccess != err)
{
fprintf(stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
fprintf(stdout, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
exit(-1);
}
#endif
return;
}
inline void __cudaCheckError(const char* file, const int line)
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if (cudaSuccess != err)
{
fprintf(stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
fprintf(stdout, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
exit(-1);
}
#endif
return;
}
|
3,920 | #include <stdio.h>
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
// Functions
void Cleanup(bool);
void RandomInit(float*, int);
void ParseArguments(int, char**);
// Device code
__global__ void VecAdd(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
// Host code
int main(int argc, char** argv)
{
int N;
scanf("%d", &N);
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
h_B = (float*)malloc(size);
h_C = (float*)malloc(size);
cudaError_t error;
// Initialize input vectors
for(int i = 0; i < N; i++)
{
scanf("%f", h_A + i);
scanf("%f", h_B + i);
}
// Allocate vectors in device memory
error = cudaMalloc((void**)&d_A, size);
error = cudaMalloc((void**)&d_B, size);
error = cudaMalloc((void**)&d_C, size);
// Copy vectors from host memory to device memory
error = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
error = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// Invoke kernel
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
// Copy result from device memory to host memory
// h_C contains the result in host memory
error = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
// Verify result
int i;
for (i = 0; i < N; ++i)
printf("%f+%f=%f\n", h_A[i], h_B[i], h_C[i]);
}
|
3,921 | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/time.h>
/* Problem size */
#define NI 8192 // height
#define NJ 8192 // width
__global__ void convolutionKernel(double *A_d, double *B_d, int width, int height)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int i = y, j = x;
double c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +0.2; c21 = +0.5; c31 = -0.8;
c12 = -0.3; c22 = +0.6; c32 = -0.9;
c13 = +0.4; c23 = +0.7; c33 = +0.10;
if ( (x < width) && (y < height) )
{
if ( i >= 1 && i < (height-1))
{
if ( j >= 1 && j < (width-1))
{
B_d[y*width+x] = c11 * A_d[(i - 1)*NJ + (j - 1)] + c12 * A_d[(i + 0)*NJ + (j - 1)] + c13 * A_d[(i + 1)*NJ + (j - 1)]
+ c21 * A_d[(i - 1)*NJ + (j + 0)] + c22 * A_d[(i + 0)*NJ + (j + 0)] + c23 * A_d[(i + 1)*NJ + (j + 0)]
+ c31 * A_d[(i - 1)*NJ + (j + 1)] + c32 * A_d[(i + 0)*NJ + (j + 1)] + c33 * A_d[(i + 1)*NJ + (j + 1)];
}
}
}
}
int my_ceil(float num)
{
int inum = (int)num;
if (num == (float)inum) {
return inum;
}
return inum + 1;
}
void print_matrix(double* C, int width, int height)
{
printf("%s\n", " ");
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
//printf("(%d, %d) -> %d\t", i, j, C[i*width+j]);
printf("%- 3.2f ", C[i*width+j]);
}
printf("%s\n", " ");
}
}
void Convolution(double* A, double* B)
{
int i, j;
double c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +0.2; c21 = +0.5; c31 = -0.8;
c12 = -0.3; c22 = +0.6; c32 = -0.9;
c13 = +0.4; c23 = +0.7; c33 = +0.10;
for (i = 1; i < NI - 1; ++i) {
for (j = 1; j < NJ - 1; ++j) {
B[i*NJ + j] = c11 * A[(i - 1)*NJ + (j - 1)] + c12 * A[(i + 0)*NJ + (j - 1)] + c13 * A[(i + 1)*NJ + (j - 1)]
+ c21 * A[(i - 1)*NJ + (j + 0)] + c22 * A[(i + 0)*NJ + (j + 0)] + c23 * A[(i + 1)*NJ + (j + 0)]
+ c31 * A[(i - 1)*NJ + (j + 1)] + c32 * A[(i + 0)*NJ + (j + 1)] + c33 * A[(i + 1)*NJ + (j + 1)];
}
}
}
void init(double* A)
{
int i, j;
for (i = 0; i < NI; ++i) {
for (j = 0; j < NJ; ++j) {
A[i*NJ + j] = (double)rand()/RAND_MAX;
}
}
}
int main(int argc, char *argv[])
{
// open file
FILE *output;
output = fopen("gpu.out", "w");
if (output == NULL) {
printf("Could not open file");
exit(1);
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
double *A_h;
double *B_h;
A_h = (double*)calloc(NI*NJ, sizeof(double)); // better use calloc than malloc, initializes elements to 0
B_h = (double*)calloc(NI*NJ, sizeof(double));
// create matrices in device
double *A_d;
double *B_d;
// allocate memory in device
long size = NI*NJ*sizeof(double);
cudaMalloc((void**) &A_d, size);
cudaMalloc((void**) &B_d, size);
/* zero-out B ---- Is it really necessary? */
// for (int i = 0; i < NI; ++i) {
// for (int j = 0; j < NJ; ++j) {
// B_h[i*NJ + j] = 0;
// }
// }
//initialize the arrays
init(A_h);
// printf("\ninitialized A:");
// print_matrix(A_h, NJ, NI); //print initialized A
/* Print B initial state on HOST */
// printf("\nB before kernel call:");
// print_matrix(B_h, NJ, NI);
// transfer matrix A to device
cudaMemcpy(A_d, A_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B_h, size, cudaMemcpyHostToDevice);
// !!! set grid and block dimensions
// calculate grid dimensions given block size = 16x16
// given block size
int block_dim_x = 16;
int block_dim_y = 16;
//calculate grid dimensions
int grid_dim_x = my_ceil( (float) NJ / block_dim_x);
int grid_dim_y = my_ceil( (float) NI / block_dim_y);
printf("Block Size = %dx%d\n", block_dim_x, block_dim_y);
printf("Calculated Grid Size = %dx%d\n", grid_dim_x, grid_dim_y);
// width, height
dim3 dimGrid(grid_dim_x, grid_dim_x);
dim3 dimBlock(block_dim_x, block_dim_y);
// call GPU kernel
cudaEventRecord(start);
convolutionKernel<<<dimGrid, dimBlock>>>(A_d, B_d, NJ, NI);
cudaEventRecord(stop);
// transer matrix B from DEVICE to HOST
cudaMemcpy(B_h, B_d, size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("GPU Runtime: %0.6lf sec\n", milliseconds/1000.0);
/* print kernel result */
// printf("\nB kernel result:");
// print_matrix(B_h, NJ, NI);
// write results to file
// for (int i = 0; i < NI*NJ; i++)
// {
// if(i%NJ==0)
// fprintf(output, "%19.15f\n", B_h[i]);
// }
for (int i = 1; i < NI - 1; ++i) {
for (int j = 1; j < NJ - 1; ++j) {
//B[i*NJ + j]
if(i%NJ/2==0)
fprintf(output, "%19.15f\n", B_h[i*NJ + j]);
}
}
/* for validation purposes
lets calculate b using CPU convolution function and print it */
// Convolution(A_h, B_h);
// printf("\nB CPU result:");
// print_matrix(B_h, NJ, NI);
free(A_h);
free(B_h);
fclose(output);
// free memory from device
cudaFree(A_d);
cudaFree(B_d);
return 0;
}
|
3,922 | #include <cstdio>
#include <iostream>
#include <chrono>
#include <algorithm>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
__global__ void add_gpu(float *dx, float *dy)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
//int temp = dx[id] > 0 ? dx[id]: (-1)*dx[id];
//dy[id] += temp;
dy[id] += std::abs(dx[id]);
//dy[id] += dx[id];
}
void add_cpu(int N, float *hx, float *hy)
{
for(int i=0; i<N; i++){
hy[i] += std::abs(hx[i]);
}
}
int main()
{
int N = 1 << 20;
std::cout<<"N:"<<N<<std::endl;
float *hx, *hy, *dx, *dy;
hx = new float[N];
hy = new float[N];
cudaMalloc(&dx, N*sizeof(float));
cudaMalloc(&dy, N*sizeof(float));
for(int i = 0; i < N; i++){
hx[i] = 1.0f;
hy[i] = 2.0f;
}
cudaMemcpy(dx, hx, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dy, hy, N*sizeof(float), cudaMemcpyHostToDevice);
// N can be divided by 256
int threadNums = 256;
int blockNums = (N + threadNums -1)/threadNums;
std::chrono::time_point<std::chrono::system_clock> begin;
std::chrono::time_point<std::chrono::system_clock> end;
std::chrono::duration<double> elapsedTime;
// call add_cpu
begin = std::chrono::system_clock::now();
add_cpu(N, hx, hy);
end = std::chrono::system_clock::now();
elapsedTime = end - begin;
printf("Call add_cpu, Time: %.6lfs\n", elapsedTime.count());
// call add_gpu
begin = std::chrono::system_clock::now();
add_gpu<<< blockNums, threadNums>>>(dx, dy);
end = std::chrono::system_clock::now();
elapsedTime = end - begin;
printf("Call add_gpu, Time: %.6lfs\n", elapsedTime.count());
// block 同步
cudaDeviceSynchronize();
cudaMemcpy(hy, dy, N*sizeof(float), cudaMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i=0; i<N; i++){
maxError = std::max(maxError, std::abs(hy[i] - 4.0f));
}
printf("Max error: %.6f\n", maxError);
delete[] hx;
delete[] hy;
cudaFree(dx);
cudaFree(dy);
return 0;
}
|
3,923 | /*
Copyright (C) Muaaz Gul Awan and Fahad Saeed
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include<iostream>
#include<vector>
#include<stdlib.h>
#include<time.h>
#include<math.h>
#include<ctime>
#include<algorithm>
#include<utility>
#include <curand.h>
#include <curand_kernel.h>
#include<random>
#include <thrust/scan.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <string>
#include <fstream>
using namespace std;
#define elesPerBucket 20
#define sampleRate 10
//#define totArrays 10000
#define maxSize2 10000
//#define minSize 10000
#define SAMPLED (sampleRate*maxSize)/100
#define maxBuckets (maxSize/elesPerBucket)
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
//swap function for Insertion sort
template <class type>
__device__ void swapD (type &a, type &b)
{
/* &a and &b are reference variables */
type temp;
temp=a;
a=b;
b=temp;
}
//insertion sort
template <class type>
__device__ void insertionSort(type *input, int begin, int end){
int i, j; //,tmp;
for (i = begin+1; i < end; i++) {
j = i;
while (j > begin && input[j - 1] > input[j]) {
swapD(input[j], input[j-1]);
j--;
}//end of while loop
}
}
__device__ void getMinMax(int input[], int beginPtr, int endPtr, int *ret){
int min = input[beginPtr];
int max = 0;
// int *ret = new int[2];
for(int i = beginPtr; i < endPtr; i++){
if(min > input[i])
min = input[i];
if (max < input[i])
max = input[i];
}
ret[0] = min;
ret[1] = max;
//return ret;
}
//data generation
template <typename mType>
struct dataArrays{
vector<mType> dataList;
int *prefixArray;
};
template <typename type>
dataArrays<type> dataGen (int numOfArrays, int maxArraySize, int minArraySize){
dataArrays<int> data;
data.prefixArray = new int[numOfArrays+1]; //exclusive prefix scan
const int range_from = 0;
const unsigned int range_to = 5000;//2147483647; //2^31 - 1
random_device rand_dev;
mt19937 generator(rand_dev());
uniform_int_distribution<int> distr(range_from, range_to);
int prefixSum = 0;
srand(time(0));
for( int i = 0; i < numOfArrays; i++){
int size = rand()%(maxArraySize-minArraySize + 1) + minArraySize;
data.prefixArray[i] = prefixSum;
for(int j = prefixSum; j < prefixSum + size; j++){
data.dataList.push_back(distr(generator));
}
prefixSum += size;
}
data.prefixArray[numOfArrays] = prefixSum;
return data;
}
//kernel for obtaining num of buckets for each array
__global__ void getNumOfBuckets(int *prefixSumArray, int *numOfBucketsArray, int totArrays){
int id = blockIdx.x; // * blockDim.x + threadIdx.x;
if(id < totArrays)
numOfBucketsArray[id] = (prefixSumArray[id+1] - prefixSumArray[id])/elesPerBucket;
}
template <typename mType, int maxSize>
__device__ void getSplitters (mType *data, mType *splittersArray, int sample[], int beginPtr, int endPtr, int arraySize, int *prefixBucketsArray){
__shared__ mType mySamples[SAMPLED];
//int *ret = new int[2];
//int arraySize = endPtr - beginPtr;
// calculating samples for this array
int numOfSamples = ((float)sampleRate/100)*(arraySize);
//calculating the number of buckets for this array
int numOfBuckets = (blockIdx.x == 0) ? prefixBucketsArray[0] : (prefixBucketsArray[blockIdx.x] - prefixBucketsArray[blockIdx.x-1]);
for(int i = 0; i < numOfSamples; i++)
mySamples[i] = data[beginPtr+sample[i]];
insertionSort(mySamples, 0, numOfSamples);
//calculate splitter index for this array
int splitterIndex = ((blockIdx.x == 0)? 1 : (prefixBucketsArray[blockIdx.x-1]+1))+1; //the other plus one is for leaving space for smallest splitter(added later)
int splittersSize=0;
for(int i = (numOfSamples)/(numOfBuckets); splittersSize < numOfBuckets-1; i +=(numOfSamples)/(numOfBuckets)){
splittersArray[splitterIndex] = mySamples[i];
splitterIndex++;
splittersSize++;
}
//getMinMax(data, beginPtr, endPtr, ret);
int bits = 8*sizeof(mType);
mType min = -(1 << (bits-1));
mType max = (1 << (bits - 1)) - 1;//int max = (1 << (bits-1)) � 1;
splittersArray[((blockIdx.x == 0)? 0 : (prefixBucketsArray[blockIdx.x-1]+1))] = min;//ret[0]-2;//to accodmodate the smallest
splittersArray[((blockIdx.x == 0)? prefixBucketsArray[0] : (prefixBucketsArray[blockIdx.x]))] = max;//ret[1]+2;
//delete [] ret;
}
//kernel for obtaining splitters
template <typename mType, int maxSize>
__global__ void splitterKer(mType *data, mType *splittersArray, int *prefixSizeArray, int *prefixBucketsArray, int totArrays){
if(blockIdx.x < totArrays){
int id = blockIdx.x;
__shared__ int sampleSh[SAMPLED];
int arraySize = prefixSizeArray[id+1] - prefixSizeArray[id];
// calculating samples for this array
int numOfSamples = ((float)sampleRate/100)*(arraySize);
//int *h_sample = new int[SAMPLED];
int max = arraySize;
int sam = numOfSamples;
int stride = max/sam;
int sampleVal = 0;
for( int i = 0; i < numOfSamples; i++)
{
sampleSh[i] = sampleVal;
sampleVal += stride;
}
//for(int i = 0; i < numOfSamples; i++)
// sampleSh[i] = mySample[i];
getSplitters<mType, maxSize>(data, splittersArray, sampleSh, prefixSizeArray[id], prefixSizeArray[id+1], prefixSizeArray[id+1] - prefixSizeArray[id], prefixBucketsArray);
}
}
template <typename mType, int maxSize>
__device__ void getBuckets(mType *input, mType *splitters, int beginPtr, int endPtr, int *bucketsSize, mType *myInput, int *prefixBucketsArray, int totArrays){
volatile int numOfBuckets = (blockIdx.x == 0) ? prefixBucketsArray[0] : (prefixBucketsArray[blockIdx.x] - prefixBucketsArray[blockIdx.x-1]);
if(blockIdx.x < totArrays && threadIdx.x < numOfBuckets){
int id = threadIdx.x;
int sizeOffset = (blockIdx.x == 0) ? (0+threadIdx.x) : (prefixBucketsArray[blockIdx.x-1] + threadIdx.x); //blockIdx.x*BUCKETS+threadIdx.x;
int sizeOffsetBlock = (blockIdx.x == 0) ? (0) : (prefixBucketsArray[blockIdx.x-1]);
int bucketSizeOff = sizeOffset+1;
mType myBucket[maxSize]; //make it shared as well
// int bucketIndexOffset;
int indexSum=0;
bucketsSize[bucketSizeOff] = 0;
for(int i = 0; i < (endPtr - beginPtr); i++){
if(myInput[i] > splitters[id] && myInput[i] <= splitters[id+1]){
myBucket[bucketsSize[bucketSizeOff]] = myInput[i];
bucketsSize[bucketSizeOff]++;
}
}
__syncthreads();
//prefix sum for bucket sizes of current array
for(int j = 0; j < threadIdx.x; j++)
indexSum += bucketsSize[sizeOffsetBlock+j+1];
//writing back current buckt back to the input memory
for(int i = 0; i < bucketsSize[bucketSizeOff]; i++)
input[indexSum+beginPtr+i] = myBucket[i];
}
}
//kernel for obtaining buckets
template <typename mType, int maxSize>
__global__ void bucketKernel(mType *data, mType *splittersArray, int *prefixSizeArray, int *prefixBucketsArray, int *bucketSizes, int totArrays){
int numOfBuckets = (blockIdx.x == 0) ? prefixBucketsArray[0] : (prefixBucketsArray[blockIdx.x] - prefixBucketsArray[blockIdx.x-1]);
if(blockIdx.x < totArrays && threadIdx.x < numOfBuckets){
bucketSizes[0] = 0;
int bid = blockIdx.x;
int tid = threadIdx.x;
int arraySize = prefixSizeArray[blockIdx.x+1] - prefixSizeArray[blockIdx.x];
int leftOvers = arraySize%numOfBuckets;
int jmpFac = arraySize/numOfBuckets;
int gArrayStart = prefixSizeArray[blockIdx.x] + tid*jmpFac;
int gArrayEnd = (tid==(numOfBuckets-1))?(gArrayStart + jmpFac+leftOvers):(gArrayStart + jmpFac);
int lArrayStart = tid*jmpFac;
__shared__ int myInput [maxSize];
int arrBegin = prefixSizeArray[bid];
int arrEnd = prefixSizeArray[bid+1];
int splitterIndexSt = ((blockIdx.x == 0)? 0 : (prefixBucketsArray[blockIdx.x-1]+1));//blockIdx.x*(BUCKETS+1);
int splitterIndexEd = splitterIndexSt + numOfBuckets+1;
__shared__ mType splitters[maxBuckets+2];
//copy my array in shared memory in parallel
for(int i=lArrayStart,j=gArrayStart;j<gArrayEnd;i++,j++){
myInput[i] = data[j];
}
__syncthreads();
int j = 0;
for(int i = splitterIndexSt; i < splitterIndexEd; i++){
splitters[j] = splittersArray[i];
j++;
}
getBuckets<mType, maxSize>(data, splitters, arrBegin, arrEnd, bucketSizes, myInput, prefixBucketsArray, totArrays);
}
}
//sorting kernel
template <typename mType, int maxSize>
__global__ void sortBuckets(mType *buckets, int *bucketsSize, int *prefixBucketsArray, int *prefixSizeArray, int totArrays){
int numOfBuckets = (blockIdx.x == 0) ? prefixBucketsArray[0] : (prefixBucketsArray[blockIdx.x] - prefixBucketsArray[blockIdx.x-1]);
if(blockIdx.x < totArrays && threadIdx.x < numOfBuckets){
int sizeOffset = (blockIdx.x == 0) ? (0+threadIdx.x) : (prefixBucketsArray[blockIdx.x-1] + threadIdx.x);
int sizeOffsetBlock = (blockIdx.x == 0) ? (0) : (prefixBucketsArray[blockIdx.x-1]);
// int bid = blockIdx.x;
int tid = threadIdx.x;
int arraySize = prefixSizeArray[blockIdx.x+1] - prefixSizeArray[blockIdx.x];
int leftOvers = arraySize%numOfBuckets;
int jmpFac = arraySize/numOfBuckets;
int gArrayStart = prefixSizeArray[blockIdx.x] + tid*jmpFac;
int gArrayEnd = (tid==(numOfBuckets-1))?(gArrayStart + jmpFac+leftOvers):(gArrayStart + jmpFac);
int lArrayStart = tid*jmpFac;
//int lArrayEnd = (tid==(BUCKETS-1))?(lArrayStart + jmpFac+leftOvers):(lArrayStart + jmpFac);
__shared__ mType myArray [maxSize];
int indexSum = 0;
for(int i=lArrayStart,j=gArrayStart;j<gArrayEnd;i++,j++){
myArray[i] = buckets[j];
}
__syncthreads();
//prefix sum for bucket sizes of current array
for(int j = 0; j < threadIdx.x; j++)
indexSum += bucketsSize[sizeOffsetBlock+j+1];
insertionSort(myArray, indexSum,indexSum + bucketsSize[sizeOffset+1]);
__syncthreads();
for(int i=lArrayStart,j=gArrayStart;j<gArrayEnd;i++,j++){
buckets[j] = myArray[i];
}
__syncthreads();
}
}
template <typename mType, int maxSize>
void gpuArraySort(dataArrays<mType> newData, int *prefixSum, int flag, int totArrays){
int *d_prefixSum, *d_numOfBuckets;
if(flag == 1){
int *d_prefixSumDo;
cudaMalloc((void**) &d_prefixSumDo, (totArrays+1)*sizeof(int));
cudaMemcpy(d_prefixSumDo, prefixSum, sizeof(int)*(1+totArrays), cudaMemcpyHostToDevice);
//casting device ptr to thrust dev_ptr
thrust::device_ptr<int> prefixDo = thrust::device_pointer_cast(d_prefixSumDo);
//performing prefixSum using thrust
thrust::exclusive_scan(prefixDo, prefixDo + totArrays, prefixDo);
prefixDo[totArrays] = prefixDo[totArrays-1] + prefixSum[totArrays-1];
d_prefixSum = d_prefixSumDo;
}
mType *d_inputData, *d_splitters, *d_bucketSizes;
//creating events
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int *h_totalBuckets = new int[1];
size_t size_heap, size_stack;
//setting stack size limit
cudaDeviceSetLimit(cudaLimitStackSize,10240);
cudaDeviceGetLimit(&size_heap, cudaLimitMallocHeapSize);
cudaDeviceGetLimit(&size_stack, cudaLimitStackSize);
// allocating device memory for prefixSum, num Of buckets, splitters, bucketSizes
cudaMalloc((void**) &d_prefixSum, (totArrays+1)*sizeof(int));
//copying prefixSums to Device
cudaMemcpy(d_prefixSum, prefixSum, sizeof(int)*(1+totArrays), cudaMemcpyHostToDevice);
//allocating device memory for inputData
thrust::device_vector<mType> inData (newData.dataList.size());
thrust::copy(newData.dataList.begin(), newData.dataList.end(), inData.begin());
cudaDeviceSynchronize();
clock_t start2, end2;
double cpu_time_used;
cudaEventRecord(start);
start2 = clock();
// allocating device memory for prefixSum, num Of buckets, splitters, bucketSizes
cudaMalloc((void**) &d_numOfBuckets, (totArrays)*sizeof(int));
//clculating buckets on GPU
getNumOfBuckets<<<totArrays,1>>>(d_prefixSum, d_numOfBuckets, totArrays);
//casting device ptr to thrust dev_ptr
thrust::device_ptr<int> prefixNumBuckets = thrust::device_pointer_cast(d_numOfBuckets);
//performing prefixSum using thrust
thrust::inclusive_scan(prefixNumBuckets, prefixNumBuckets + totArrays, prefixNumBuckets);
//copying total number of buckets back
checkCuda(cudaMemcpy(h_totalBuckets, d_numOfBuckets+(totArrays-1), sizeof(int), cudaMemcpyDeviceToHost));
//allocating device memory for splitters
cudaMalloc((void**) &d_splitters, (totArrays+h_totalBuckets[0])*sizeof(mType));
cudaMalloc((void**) &d_bucketSizes, (1+h_totalBuckets[0])*sizeof(int));
d_inputData = thrust::raw_pointer_cast(&inData[0]);
cout<< "**** Generating Splitters ****" << endl;
splitterKer<mType, maxSize><<<totArrays, 1>>>(d_inputData, d_splitters, d_prefixSum, d_numOfBuckets, totArrays);
cout<< "**** Splitters Generated****" << endl;
cout<< "**** Generating Buckets ****" << endl;
bucketKernel<mType, maxSize><<<totArrays, maxBuckets>>>(d_inputData, d_splitters, d_prefixSum, d_numOfBuckets, d_bucketSizes, totArrays);
cout<< "**** Buckets Generated ****" << endl;
cout<< "**** Sorting Buckets ****" << endl;
sortBuckets<mType, maxSize><<<totArrays, maxBuckets>>>(d_inputData, d_bucketSizes,d_numOfBuckets, d_prefixSum, totArrays);
cout<< "**** Writing Back ****" << endl;
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cudaDeviceSynchronize();
end2 = clock();
cpu_time_used = ((double) (end2 - start2)) * 1000 / CLOCKS_PER_SEC;
mType *h_bucketedData = new mType[newData.prefixArray[totArrays]];
checkCuda(cudaMemcpy(h_bucketedData, d_inputData, (newData.prefixArray[totArrays])*sizeof(mType), cudaMemcpyDeviceToHost));
// cout<<"printing bucketed array:";
// for(int i = newData.prefixArray[totArrays-2]; i < newData.prefixArray[totArrays-1]; i++)
// cout<< i<<":"<<h_bucketedData[i]<<endl;
cout<< "**** Arrays Sorted, Time Taken : "<< milliseconds<<"****" << endl;
cout<< "**** Arrays Sorted, Time Taken (on CPU) : "<< cpu_time_used<<"****" << endl;
}
dataArrays<int> readFile(int size , int totArrays){
string fn = "../Futhark/list_" + to_string(size) + "_" + to_string(totArrays) + ".in";
ifstream file(fn);
if(!file.good()){
cout<< "Error reading file " << fn << "\n";
throw "Can't read file";
}
dataArrays<int> data;
int cur;
for(int a = 0; a < totArrays*size && file.good(); a++){
int c = file.peek();
if(isdigit(c) ){
file >> cur;
data.dataList.push_back(cur);
}
else {
file.get();
a--;
}
}
if(!file.good()){
cout<< "Error reading file " << fn << "\n";
throw "Can't read file";
}
data.prefixArray = new int[totArrays+1]; //exclusive prefix scan
int prefix = 0;
for(int a =0; a <= totArrays; a++)
{
data.prefixArray[a] = prefix;
prefix += size;
}
file.close();
return data;
}
int main(int argc, char *argv[]){
if(argc != 3){
printf("Need 2 arguments\n");
return 0;
}
int totArrays = stoi(argv[1]);
int size = stoi(argv[2]);
printf("%i arrays of length %i\n", totArrays, size);
int maxSize = size;
int minSize = size;
//generate data
//dataArrays<int> newData = (dataGen<int>(totArrays,maxSize,minSize));
//read data
dataArrays<int> newData;
try {
newData = readFile(size, totArrays);
} catch (const char* msg) {
cerr << msg << endl;
return 0;
}
int *prefixSum = newData.prefixArray;
//calling GPU-ArraySort
if(maxSize == 100){
gpuArraySort<int, 100>(newData, prefixSum, 0, totArrays);
} else if(maxSize == 1000)
{
gpuArraySort<int, 1000>(newData, prefixSum, 0, totArrays);
} else if(maxSize == 10000)
{
gpuArraySort<int, 10000>(newData, prefixSum, 0, totArrays);
} else
{
printf("Incorrect arraysizes, we only support 100, 1000 and 10000\n");
}
}
|
3,924 |
template<typename Destination, typename Data>
__global__ void floorArrays(size_t elements, Destination *dst, Data *src) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = floor(src[kernelIndex]); }
}
template<typename Destination, typename Data>
__global__ void ceilArrays(size_t elements, Destination *dst, Data *src) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = ceil(src[kernelIndex]); }
}
|
3,925 | #include <cuda_runtime.h>
#include <iostream>
#include <string>
// Define this to turn on error checking
#define CUDA_ERROR_CHECK
#define CudaSafeCall(err) __cudaSafeCall(err, __FILE__, __LINE__, deviceID)
#define CudaSyncAndCheckError() __cudaSyncAndCheckError(__FILE__, __LINE__, deviceID)
__host__ inline std::string __cudaSafeCall(cudaError err, const char *file, const int line, const int deviceID)
{
#ifdef CUDA_ERROR_CHECK
if (cudaSuccess != err)
return "CUDA device ID [" + std::to_string(deviceID) + "] encountered an error: " + cudaGetErrorString(err);
else
#endif //CUDA_ERROR_CHECK
return "";
}
__host__ inline std::string __cudaSyncAndCheckError(const char *file, const int line, const int deviceID)
{
cudaError_t response{ cudaSuccess };
std::string cudaErrors{ "" };
#ifdef CUDA_ERROR_CHECK
response = cudaGetLastError();
if (response != cudaSuccess)
{
while (response != cudaSuccess)
{
if (!cudaErrors.empty()) cudaErrors += " <- ";
cudaErrors += cudaGetErrorString(response);
response = cudaGetLastError();
}
return "CUDA device ID [" + std::to_string(deviceID) + "] encountered an error: " + cudaErrors;
}
#endif //CUDA_ERROR_CHECK
response = cudaDeviceSynchronize();
if (response != cudaSuccess)
{
while (response != cudaSuccess)
{
if (!cudaErrors.empty()) cudaErrors += " <- ";
cudaErrors += cudaGetErrorString(response);
response = cudaGetLastError();
}
return "CUDA device ID [" + std::to_string(deviceID) + "] encountered an error: " + cudaErrors;
}
return "";
}
|
3,926 | #include <math.h>
#include <cstdio>
#include <cstdlib>
#include <time.h>
#define GIG 1000000000
#define NANO_TO_MILLI 1000000
#define CPG 2.8 // Cycles per GHz -- Adjust to your computer
// Assertion to check for errors
#define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
const int MATRIX_SIZE = 2000;
const int NUM_BLOCKS = 1;
const int THREADS_PER_BLOCK_X = 16;
const int THREADS_PER_BLOCK_Y = 16;
const int SOR_ITERATIONS = 2000;
const int OMEGA = 1;
#define ALLOCATE_AND_INIT
#define TRANSFER_TO_GPU
#define LAUNCH_KERNEL
#define TRANSFER_RESULTS
#define COMPUTE_CPU_RESULTS
#define COMPARE_RESULTS
#define FREE_MEMORY
#define GPU_TIMING
#define CPU_TIMING
#define DEBUG_PRINT
#define WRITE_2D_ARRAYS
void initialize_array_2D(float **A, int len, int seed);
__global__ void kernel_SOR_internal(float *A, int omega, int N_x, int N_y){
int i, j = 0;
//different divisions needed for group of threads etc.
int xx = blockIdx.x * blockDim.x + threadIdx.x;
int yy = blockIdx.x * blockDim.y + threadIdx.y;
float phi;
for(i = MATRIX_SIZE/THREADS_PER_BLOCK_X*xx; i < MATRIX_SIZE/THREADS_PER_BLOCK_X*(xx+1); i++){
for(j = MATRIX_SIZE/THREADS_PER_BLOCK_Y*yy; j < MATRIX_SIZE/THREADS_PER_BLOCK_Y*(yy+1); j++){
if(i > 0 && i < (N_x-1) && j > 0 && j < (N_y-1)){
phi = A[i*MATRIX_SIZE + j] - .25*((A[(i-1)*MATRIX_SIZE + j] + A[(i+1)*MATRIX_SIZE+j]) + (A[i*MATRIX_SIZE + (j-1)] + A[i*MATRIX_SIZE+(j+1)]));
A[i*MATRIX_SIZE+j] = abs(A[i*MATRIX_SIZE+j] - (phi*omega));
}
}
}
}
void SOR_internal_sequential(float **A, int omega, int xx, int yy, int N_x, int N_y){
float phi;
if(xx > 0 && xx < N_x-1 && yy > 0 && yy < N_y-1){
phi = A[xx][yy] - .25*((A[xx-1][yy] + A[xx+1][yy]) + (A[xx][yy-1] + A[xx][yy+1]));
A[xx][yy] = abs(A[xx][yy] - (phi*omega));
}
}
void write_2d_array_to_file(float **A, int N_x, int N_y, char *filename){
int i, j;
FILE *f = fopen(filename, "w");
for(i = 0; i < N_x; i++){
for(j = 0; j < N_y-1; j++){
fprintf(f,"%f, ",A[i][j]);
}
fprintf(f, "%f\n", A[i][N_y-1]);
}
fclose(f);
}
int main(int argc, char **argv){
//loop variables
int i, j, k, errors = 0;
//timing variables
cudaEvent_t start, stop;
float elapsed_gpu;
struct timespec diff(struct timespec start, struct timespec end);
struct timespec time1, time2, elapsed_cpu;
//array dimensions
dim3 dimGrid(NUM_BLOCKS,1,1);
dim3 dimBlock(THREADS_PER_BLOCK_X,THREADS_PER_BLOCK_Y,1);
//Arrays on GPU global memory
float *g_A;
//Arrays on host memory
float **h_A;
float **h_A_test;
//error file init
FILE *f = fopen("mismatches.txt", "w");
if(f == NULL){
printf("Error opening file!\n");
exit(1);
}
#ifdef DEBUG_PRINT
printf("init done\n");
#endif
//Allocate arrays on GPU memory
#ifdef ALLOCATE_AND_INIT
CUDA_SAFE_CALL(cudaMalloc((void **) &g_A, MATRIX_SIZE*MATRIX_SIZE*sizeof(float)));
//Allocate arrays on host memory
h_A = (float**) malloc(MATRIX_SIZE * sizeof(float*));
h_A_test = (float**) malloc(MATRIX_SIZE * sizeof(float*));
for(i = 0; i < MATRIX_SIZE; i++){
h_A[i] = (float*) malloc(MATRIX_SIZE * sizeof(float));
h_A_test[i] = (float*) malloc(MATRIX_SIZE * sizeof(float));
}
//initialize host arrays
initialize_array_2D(h_A, MATRIX_SIZE, 2453);
initialize_array_2D(h_A_test, MATRIX_SIZE, 2453);
#endif
#ifdef GPU_TIMING
//create cuda events
cudaEventCreate(&start);
cudaEventCreate(&stop);
//record event on default stream
cudaEventRecord(start, 0);
#endif
#ifdef DEBUG_PRINT
printf("all init done\n");
#endif
//transfer array to GPU memory
#ifdef TRANSFER_TO_GPU
for(i = 0; i < MATRIX_SIZE; i++){
CUDA_SAFE_CALL(cudaMemcpy(&g_A[i*MATRIX_SIZE], h_A[i], MATRIX_SIZE, cudaMemcpyHostToDevice));
}
#endif
//launch the kernel
#ifdef LAUNCH_KERNEL
for(i = 0; i < SOR_ITERATIONS; i++){
kernel_SOR_internal<<<dimGrid, dimBlock>>>(g_A, OMEGA, MATRIX_SIZE, MATRIX_SIZE);
}
#endif
//check for errors during launch
CUDA_SAFE_CALL(cudaPeekAtLastError());
#ifdef DEBUG_PRINT
printf("kernel run\n");
#endif
//transfer results back to host
#ifdef TRANSFER_RESULTS
for(i = 0; i < MATRIX_SIZE; i++){
CUDA_SAFE_CALL(cudaMemcpy(h_A[i], &g_A[i*MATRIX_SIZE], MATRIX_SIZE, cudaMemcpyDeviceToHost));
}
#endif
//stop and destroy the timer
#ifdef GPU_TIMING
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_gpu, start, stop);
printf("\nGPU time: %f (msec)\n", elapsed_gpu);
cudaEventDestroy(start);
cudaEventDestroy(stop);
#endif
#ifdef DEBUG_PRINT
printf("results transfered\n");
#endif
//compute results on host
#ifdef CPU_TIMING
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time1);
#endif
#ifdef COMPUTE_CPU_RESULTS
for(i = 0; i < SOR_ITERATIONS; i++){
for(j = 0; j < MATRIX_SIZE; j++){
for(k = 0; k < MATRIX_SIZE; k++){
SOR_internal_sequential(h_A_test, OMEGA, j, k, MATRIX_SIZE, MATRIX_SIZE);
}
}
}
#endif
#ifdef CPU_TIMING
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time2);
elapsed_cpu = diff(time1,time2);
printf("\nCPU time: %f(msec)\n", (float)(((double)GIG*elapsed_cpu.tv_sec + elapsed_cpu.tv_nsec)/(double)NANO_TO_MILLI));
#endif
#ifdef DEBUG_PRINT
printf("results computed on CPU\n");
#endif
//compare results
#ifdef COMPARE_RESULTS
for(i = 0; i < MATRIX_SIZE; i++){
for(j = 0; j < MATRIX_SIZE; j++){
if(h_A[i][j] != h_A_test[i][j]){
errors++;
fprintf(f, "Mismatch at [%d,%d] GPU = %f CPU = %f\n", i, j, h_A[i][j], h_A_test[i][j]);
}
}
}
#endif
#ifdef DEBUG_PRINT
printf("results checked\n");
#endif
#ifdef WRITE_2D_ARRAYS
write_2d_array_to_file(h_A, MATRIX_SIZE, MATRIX_SIZE, "GPU_output.txt");
write_2d_array_to_file(h_A_test, MATRIX_SIZE, MATRIX_SIZE, "CPU_output.txt");
#endif
//errors
printf("Found %d errors\n", errors);
fclose(f);
//free up memory
#ifdef FREE_MEMORY
CUDA_SAFE_CALL(cudaFree(g_A));
for(i = 0; i < MATRIX_SIZE; i++){
free(h_A[i]);
free(h_A_test[i]);
}
free(h_A);
free(h_A_test);
#ifdef DEBUG_PRINT
printf("arrays freed\n");
#endif
#endif
return (float)0;
}
void initialize_array_2D(float **A, int len, int seed){
int i, j;
float randNum;
srand(seed);
for(i = 0; i < len; i++){
for(j = 0; j < len; j++){
randNum = (float) rand();
A[i][j] = randNum;
}
}
}
struct timespec diff(struct timespec start, struct timespec end)
{
struct timespec temp;
if ((end.tv_nsec-start.tv_nsec)<0) {
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec;
} else {
temp.tv_sec = end.tv_sec-start.tv_sec;
temp.tv_nsec = end.tv_nsec-start.tv_nsec;
}
return temp;
}
|
3,927 | //
// main.cpp
//
//
// Created by Elijah Afanasiev on 25.09.2018.
//
//
// System includes
#include <assert.h>
#include <stdio.h>
#include <chrono>
#include <cstdlib>
#include <iostream>
// CUDA runtime
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#ifndef MAX
#define MAX(a, b) (a > b ? a : b)
#endif
__global__ void vectorAddGPU(float* a, float* b, float* c, int N, int offset) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
c[offset + idx] = a[offset + idx] + b[offset + idx];
}
}
void sample_vec_add(int size = 1048576) {
int n = size;
int nBytes = n * sizeof(int);
float *a, *b, *c;
a = (float*)malloc(nBytes);
b = (float*)malloc(nBytes);
c = (float*)malloc(nBytes);
float *d_A, *d_B, *d_C;
dim3 block(256);
dim3 grid((unsigned int)ceil(n / (float)block.x));
for (int i = 0; i < n; i++) {
a[i] = rand() / (float)RAND_MAX;
b[i] = rand() / (float)RAND_MAX;
c[i] = 0;
}
cudaMalloc((void**)&d_A, n * sizeof(float));
cudaMalloc((void**)&d_B, n * sizeof(float));
cudaMalloc((void**)&d_C, n * sizeof(float));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaMemcpy(d_A, a, n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, b, n * sizeof(float), cudaMemcpyHostToDevice);
vectorAddGPU<<<grid, block>>>(d_A, d_B, d_C, n, 0);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Streams Used: 0\nGPU Elapsed time: %f ms\n", milliseconds);
cudaDeviceSynchronize();
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
void streams_vec_add(int size = 1048576) {
int n = size;
int nBytes = n * sizeof(float);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float *a, *b, *c;
cudaHostAlloc((void**)&a, nBytes, cudaHostAllocDefault);
cudaHostAlloc((void**)&b, nBytes, cudaHostAllocDefault);
cudaHostAlloc((void**)&c, nBytes, cudaHostAllocDefault);
float *d_A, *d_B, *d_C;
for (int i = 0; i < n; i++) {
a[i] = rand() / (float)RAND_MAX;
b[i] = rand() / (float)RAND_MAX;
c[i] = 0;
}
cudaMalloc((void**)&d_A, nBytes);
cudaMalloc((void**)&d_B, nBytes);
cudaMalloc((void**)&d_C, nBytes);
cudaEventRecord(start);
const int stream_count = 4;
const int stream_size = n / stream_count;
cudaStream_t Stream[stream_count];
for (int i = 0; i < stream_count; i++)
cudaStreamCreate(&Stream[i]);
dim3 block(1024);
dim3 grid((stream_size - 1) / 1024 + 1);
for (int i = 0; i < stream_count; i++) {
int offset = i * stream_size;
cudaMemcpyAsync(&d_A[offset], &a[offset], stream_size * sizeof(float),
cudaMemcpyHostToDevice, Stream[i]);
cudaMemcpyAsync(&d_B[offset], &b[offset], stream_size * sizeof(float),
cudaMemcpyHostToDevice, Stream[i]);
cudaMemcpyAsync(&d_C[offset], &c[offset], stream_size * sizeof(float),
cudaMemcpyHostToDevice, Stream[i]);
vectorAddGPU<<<grid, block>>>(d_A, d_B, d_C, stream_size, offset);
cudaMemcpyAsync(&a[offset], &d_A[offset], stream_size * sizeof(float),
cudaMemcpyDeviceToHost, Stream[i]);
cudaMemcpyAsync(&b[offset], &d_B[offset], stream_size * sizeof(float),
cudaMemcpyDeviceToHost, Stream[i]);
cudaMemcpyAsync(&c[offset], &d_C[offset], stream_size * sizeof(float),
cudaMemcpyDeviceToHost, Stream[i]);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float msecs = 0;
cudaEventElapsedTime(&msecs, start, stop);
std::cout << "Streams Used: " << stream_count
<< "\nGPU Elapsed Time : " << msecs << " ms.\n";
cudaDeviceSynchronize();
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaFreeHost(a);
cudaFreeHost(b);
cudaFreeHost(c);
}
int main(int argc, char** argv) {
sample_vec_add(atoi(argv[1]));
std::cout << "---------------------\n---------------------\n";
streams_vec_add(atoi(argv[1]));
return 0;
}
|
3,928 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void transpose(int *a,int *t)
{
int v = threadIdx.y;
int n = v*blockDim.x+threadIdx.x;
int ta = (int)powf(a[n],v+1);
t[n] = ta;
}
int main(void)
{
int *a,*t,n,i,j;
int *d_a,*d_t;
printf("Enter the value of n: ");
scanf("%d",&n);
int size = sizeof(int)*n*n;
a = (int*)malloc(n*n*sizeof(int));
t = (int*)malloc(n*n*sizeof(int));
printf("Enter input matrix: \n");
for(i = 0;i<n*n;i++)
scanf("%d",&a[i]);
cudaMalloc((void**)&d_a,size);
cudaMalloc((void**)&d_t,size);
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
dim3 threadsPerBlock(n,n);
dim3 numBlocks(1,1);
transpose<<<numBlocks,threadsPerBlock>>>(d_a,d_t);
cudaMemcpy(t,d_t,size,cudaMemcpyDeviceToHost);
printf("Result vector is :\n");
for(i = 0;i<n;i++)
{
for(j = 0;j<n;j++)
printf("%d ",t[i*n+j]);
printf("\n");
}
getchar();
cudaFree(d_a);
cudaFree(d_t);
return 0;
} |
3,929 | #include "shared.cuh"
__global__ void memset_zero(int* all_thread_ids) {
int i = thread_id();
all_thread_ids[i] = 0;
}
|
3,930 | /**
* 2DConvolution.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <cuda.h>
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
/* Problem size */
#define NI 4096
#define NJ 4096
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 8
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void conv2D(DATA_TYPE* A, DATA_TYPE* B)
{
int i, j;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +0.2; c21 = +0.5; c31 = -0.8;
c12 = -0.3; c22 = +0.6; c32 = -0.9;
c13 = +0.4; c23 = +0.7; c33 = +0.10;
for (i = 1; i < NI - 1; ++i) // 0
{
for (j = 1; j < NJ - 1; ++j) // 1
{
B[i*NJ + j] = c11 * A[(i - 1)*NJ + (j - 1)] + c12 * A[(i + 0)*NJ + (j - 1)] + c13 * A[(i + 1)*NJ + (j - 1)]
+ c21 * A[(i - 1)*NJ + (j + 0)] + c22 * A[(i + 0)*NJ + (j + 0)] + c23 * A[(i + 1)*NJ + (j + 0)]
+ c31 * A[(i - 1)*NJ + (j + 1)] + c32 * A[(i + 0)*NJ + (j + 1)] + c33 * A[(i + 1)*NJ + (j + 1)];
}
}
}
void init(DATA_TYPE* A)
{
int i, j;
for (i = 0; i < NI; ++i)
{
for (j = 0; j < NJ; ++j)
{
A[i*NJ + j] = (float)rand()/RAND_MAX;
}
}
}
void compareResults(DATA_TYPE* B, DATA_TYPE* B_outputFromGpu)
{
int i, j, fail;
fail = 0;
// Compare a and b
for (i=1; i < (NI-1); i++)
{
for (j=1; j < (NJ-1); j++)
{
if (percentDiff(B[i*NJ + j], B_outputFromGpu[i*NJ + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
}
__global__ void Convolution2D_kernel(DATA_TYPE *A, DATA_TYPE *B)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +0.2; c21 = +0.5; c31 = -0.8;
c12 = -0.3; c22 = +0.6; c32 = -0.9;
c13 = +0.4; c23 = +0.7; c33 = +0.10;
if ((i < NI-1) && (j < NJ-1) && (i > 0) && (j > 0))
{
B[i * NJ + j] = c11 * A[(i - 1) * NJ + (j - 1)] + c21 * A[(i - 1) * NJ + (j + 0)] + c31 * A[(i - 1) * NJ + (j + 1)]
+ c12 * A[(i + 0) * NJ + (j - 1)] + c22 * A[(i + 0) * NJ + (j + 0)] + c32 * A[(i + 0) * NJ + (j + 1)]
+ c13 * A[(i + 1) * NJ + (j - 1)] + c23 * A[(i + 1) * NJ + (j + 0)] + c33 * A[(i + 1) * NJ + (j + 1)];
}
}
void convolution2DCuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* B_outputFromGpu)
{
double t_start, t_end;
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NJ);
cudaMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NI * NJ);
cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NJ, cudaMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)ceil( ((float)NI) / ((float)block.x) ), (size_t)ceil( ((float)NJ) / ((float)block.y)) );
t_start = rtclock();
Convolution2D_kernel<<<grid,block>>>(A_gpu,B_gpu);
cudaThreadSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);//);
cudaMemcpy(B_outputFromGpu, B_gpu, sizeof(DATA_TYPE) * NI * NJ, cudaMemcpyDeviceToHost);
cudaFree(A_gpu);
cudaFree(B_gpu);
}
int main(int argc, char *argv[])
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* B_outputFromGpu;
A = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE));
B = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE));
B_outputFromGpu = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE));
//initialize the arrays
init(A);
GPU_argv_init();
convolution2DCuda(A, B, B_outputFromGpu);
t_start = rtclock();
conv2D(A, B);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);//);
compareResults(B, B_outputFromGpu);
free(A);
free(B);
free(B_outputFromGpu);
return 0;
}
|
3,931 | /// Convolution 1D Parallel.
///
/// Implementation of a 1-dimensional convolution in CUDA, with a placeholding
/// mask and shared memory usage.
///
/// Authors:
/// Lucas Oliveira David.
/// Paulo Finardi.
///
/// Note (in Brazilian Portuguese):
/// Como nosso trabalho final e' relacionado `a redes convolucionais,
/// possuindo um operador convolucao implementado em CUDA, ambos os alunos
/// fizeram esta ultima tarefa juntos.
///
/// ___________________________________________________________________________
/// | Performance Analysis |
/// ___________________________________________________________________________
/// | input | CPU_Serial | GPU_NOShared | GPU_Shared | Speedup (CPU/GPUSM) |
/// ___________________________________________________________________________
/// | arq1.in | 0.080165 | 0.110547 | 0.104868 | 0.7644371972384331 |
/// ___________________________________________________________________________
/// | arq2.in | 9.934354 | 8.289333 | 8.235395 | 1.2062996371151598 |
/// ___________________________________________________________________________
/// | arq3.in | 100.419526 | 96.001373 | 91.386422 | 1.0988451435378443 |
/// ___________________________________________________________________________
///
/// License: MIT (c) 2016
///
#include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include <math.h>
#include<cuda_runtime_api.h>
#define MASK_WIDTH 101
#define OUT_TILE_WIDTH 512
__global__ void _k_conv1d(int *N, int *M, int *out, int n)
{
__shared__ int mask_s[MASK_WIDTH];
__shared__ int N_s[OUT_TILE_WIDTH + MASK_WIDTH - 1];
int i_out = blockIdx.x*OUT_TILE_WIDTH + threadIdx.x,
i_in = i_out - (MASK_WIDTH -1) / 2,
i_shared = threadIdx.x;
while (i_shared < OUT_TILE_WIDTH + MASK_WIDTH -1)
{
// Loads N into the shared memory.
if (i_in > -1 && i_in < n)
N_s[i_shared] = N[i_in];
else
N_s[i_shared] = 0;
i_in += OUT_TILE_WIDTH;
i_shared += OUT_TILE_WIDTH;
}
i_shared = threadIdx.x;
while (i_shared < MASK_WIDTH)
{
mask_s[i_shared] = M[i_shared];
i_shared += OUT_TILE_WIDTH;
}
__syncthreads();
if (i_out < n)
{
int output = 0;
for(int j = 0; j < MASK_WIDTH; j++)
output += N_s[threadIdx.x + j] * mask_s[j];
out[i_out] = output;
}
}
int main()
{
int n;
scanf("%d",&n);
int size = n*sizeof(int);
int *input = (int *)malloc(size);
int *mask = (int *)malloc(sizeof(int)*MASK_WIDTH);
int *output = (int *)malloc(size);
int *d_input;
int *d_mask;
int *d_output;
cudaMalloc((void **)&d_input, size);
cudaMalloc((void **)&d_mask, sizeof(int)*MASK_WIDTH);
cudaMalloc((void **)&d_output, size);
for(int i = 0; i < n; i++)
scanf("%d", &input[i]);
for(int i = 0; i < MASK_WIDTH; i++)
mask[i] = i;
cudaMemcpy(d_input, input, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_mask, mask, sizeof(int)*MASK_WIDTH, cudaMemcpyHostToDevice);
dim3 dimGrid((n-1) / OUT_TILE_WIDTH + 1, 1, 1);
dim3 dimBlock(OUT_TILE_WIDTH, 1, 1);
_k_conv1d<<<dimGrid, dimBlock>>>(d_input, d_mask, d_output, n);
cudaDeviceSynchronize();
cudaMemcpy(output, d_output, size, cudaMemcpyDeviceToHost);
for(int i = 0; i < n; i++)
printf("%d ", output[i]);
printf("\n");
cudaFree(d_input);
cudaFree(d_mask);
free(input);
free(output);
free(mask);
return 0;
}
|
3,932 | #include <iostream>
#include <complex>
#include <math.h>
#include <thrust/complex.h>
#include <sys/time.h>
#include <cassert>
using namespace std;
__constant__ const int block_1 = 16;
__constant__ const int block_2 = 8;
void checkError(){
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
printf("Sync kernel error: %s\n", cudaGetErrorString(errSync));
if (errAsync != cudaSuccess)
printf("Async kernel error: %s\n", cudaGetErrorString(errAsync));
}
__global__
void fft_16(thrust::complex<double> *x, int l){
const int n = block_1;
int blidx = blockIdx.x, blidy = blockIdx.y;
int k = blidx*l+blidy;
int step = l/n;
extern __shared__ thrust::complex<double> temp[];
int thid = threadIdx.x;
int pout=0, pin=1;
temp[pout*n + thid] = x[k+thid*step];
__syncthreads();
int thid1;
thid1 = 0;
int b = __log2f(n+1);
for(int i=0; i<b;i++){
if(thid & (1<<i))
thid1 |= (1<<(b-1-i));
}
pout = 1 - pout;
pin = 1 - pin;
temp[pout*n + thid] = temp[pin*n + thid1];
__syncthreads();
for(int i=1; i<n; i*=2){
pout = 1 - pout;
pin = 1 - pin;
thid1 = thid ^ i;
thrust::complex<double> factor(cos(-M_PI*thid/i), sin(-M_PI*thid/i));
if(thid1 > thid){
temp[pout*n + thid] = temp[pin*n + thid] + factor * temp[pin*n + thid1];
}
else{
temp[pout*n + thid] = temp[pin*n + thid1] + factor * temp[pin*n + thid];
}
__syncthreads();
}
thrust::complex<double> factor = thrust::complex<double>(cos(-M_PI*2*thid*blidy/l), sin(-M_PI*2*thid*blidy/l));
x[k+thid*step] = factor * temp[pout*n + thid];
}
__global__
void fft_8(thrust::complex<double> *x, int l){
const int n = block_2;
int blidx = blockIdx.x, blidy = blockIdx.y;
int k = blidx*l+blidy;
int step = l/n;
extern __shared__ thrust::complex<double> temp[];
int thid = threadIdx.x;
int pout=0, pin=1;
temp[pout*n + thid] = x[k+thid*step];
__syncthreads();
int thid1;
thid1 = 0;
int b = __log2f(n+1);
for(int i=0; i<b;i++){
if(thid & (1<<i))
thid1 |= (1<<(b-1-i));
}
pout = 1 - pout;
pin = 1 - pin;
temp[pout*n + thid] = temp[pin*n + thid1];
__syncthreads();
for(int i=1; i<n; i*=2){
pout = 1 - pout;
pin = 1 - pin;
thid1 = thid ^ i;
thrust::complex<double> factor(cos(-M_PI*thid/i), sin(-M_PI*thid/i));
if(thid1 > thid){
temp[pout*n + thid] = temp[pin*n + thid] + factor * temp[pin*n + thid1];
}
else{
temp[pout*n + thid] = temp[pin*n + thid1] + factor * temp[pin*n + thid];
}
__syncthreads();
}
thrust::complex<double> factor = thrust::complex<double>(cos(-M_PI*2*thid*blidy/l), sin(-M_PI*2*thid*blidy/l));
x[k+thid*step] = factor * temp[pout*n + thid];
}
__global__
void fft_last(thrust::complex<double> *x, thrust::complex<double> *y, int x1, int x2){
const int n = block_1;
int blidx = blockIdx.x;
int l=block_1, blidy=0;
int k = blidx*l+blidy;
int step = l/n;
extern __shared__ thrust::complex<double> temp[];
int thid = threadIdx.x;
int pout=0, pin=1;
temp[pout*n + thid] = x[k+thid*step];
__syncthreads();
int thid1;
thid1 = 0;
int b = __log2f(n+1);
for(int i=0; i<b;i++){
if(thid & (1<<i))
thid1 |= (1<<(b-1-i));
}
pout = 1 - pout;
pin = 1 - pin;
temp[pout*n + thid] = temp[pin*n + thid1];
__syncthreads();
for(int i=1; i<n; i*=2){
pout = 1 - pout;
pin = 1 - pin;
thid1 = thid ^ i;
thrust::complex<double> factor(cos(-M_PI*thid/i), sin(-M_PI*thid/i));
if(thid1 > thid){
temp[pout*n + thid] = temp[pin*n + thid] + factor * temp[pin*n + thid1];
}
else{
temp[pout*n + thid] = temp[pin*n + thid1] + factor * temp[pin*n + thid];
}
__syncthreads();
}
int p = blidx;
int j = thid;
int loc = j;
for(int k=0; k<x2; k++){
int t = p&(block_2-1);
loc = loc*block_2+t;
p = p/block_2;
}
for(int k=0; k<x1-1; k++){
int t = p&(block_1-1);
loc = loc*block_1+t;
p = p/block_1;
}
y[loc] = temp[pout*n + thid];
}
void fft(thrust::complex<double> *x, thrust::complex<double> *y, int n){
int m = log2(n+1);
int x1,x2, log_block_1=log2(block_1+1), log_block_2=log2(block_2+1);
for(int i=0; i<log_block_1; i++){
if((m-log_block_2*i)%log_block_1 == 0){
x1 = (m-log_block_2*i)/log_block_1;
x2=i;
}
}
int l = n;
for(int i=0; i<x1-1; i++){
dim3 grid(n/l, l/block_1, 1);
dim3 block(block_1,1,1);
fft_16<<<grid, block, 2*block_1*sizeof(thrust::complex<double>)>>>(x, l);
checkError();
l/=block_1;
}
for(int i=0; i<x2; i++){
dim3 grid(n/l, l/block_2, 1);
dim3 block(block_2,1,1);
fft_8<<<grid, block, 2*block_2*sizeof(thrust::complex<double>)>>>(x, l);
checkError();
l/=block_2;
}
assert(l==block_1);
dim3 grid(n/l, 1, 1);
dim3 block(block_1,1,1);
fft_last<<<grid, block, 2*block_1*sizeof(thrust::complex<double>)>>>(x, y, x1, x2);
}
int main(){
int n;
cin>>n;
thrust::complex<double> *x, *y;
cudaMallocManaged(&x, n*sizeof(thrust::complex<double>));
cudaMallocManaged(&y, n*sizeof(thrust::complex<double>));
for(int i=0; i<n; i++){
int t,u; cin>>t>>u;
x[i] = thrust::complex<double>(t, u);
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
fft(x, y, n);
checkError();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout<<milliseconds;
for(int i=0; i<n; i++){
// if(i%(n/block_1)==0) cout<<endl;
// cout<<y[i]<<"\n";
}
} |
3,933 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define N 2048
#define THREADS_PER_BLOCK 50
__global__ void add(int *a, int *b, int *c)
{
printf("threadid No : %d\n",threadIdx.x);
printf("blockid No : %d\n",blockIdx.x);
printf("blockdim No : %d\n",blockDim.x);
int index = threadIdx.x + blockIdx.x * blockDim.x;
printf("Index No : %d\n",index);
c[index] = a[index] + b[index];
}
int main()
{
int a[N],b[N],c[N];
int *dev_a, *dev_b, *dev_c;
int size = N * sizeof(int);
cudaMalloc((void **)&dev_a,size);
cudaMalloc((void **)&dev_b,size);
cudaMalloc((void **)&dev_c,size);
//FILL data
for(int i=0;i<10;i++)
{
a[i]=i;
b[i]=i;
}
cudaMemcpy(dev_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,size,cudaMemcpyHostToDevice);
add<<<N/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(dev_a,dev_b,dev_c);
cudaMemcpy(c,dev_c,size,cudaMemcpyDeviceToHost);
for(int i=0;i<10;i++)
{
printf("\n %d + %d --> addition is :%d \n\n",a[i],b[i],c[i]);
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} |
3,934 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define N 100000000
float hArray[N];
float *dArray;
int blocks;
clock_t begin1,begin2,begin3,begin4,end1,end2,end3,end4;
void prologue(void)
{
memset(hArray, 0, sizeof(hArray));
for(int i = 0; i < N; i++)
{
hArray[i] = i + 1;
}
cudaMalloc((void**)&dArray, sizeof(hArray));
begin2 = clock();
cudaMemcpy(dArray, hArray, sizeof(hArray), cudaMemcpyHostToDevice);
end2 = clock();
}
void epilogue(void)
{
cudaMemcpy(hArray, dArray, sizeof(hArray), cudaMemcpyDeviceToHost);
cudaFree(dArray);
}
// Kernel
__global__ void pow3(float *A)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
if(x < N)
{
#pragma unroll
for(int i =0; i< 100; i++)
{
A[x] += A[x] * A[x] * A[x] + A[x] * A[x] + A[x];
}
}
}
__global__ void pow4(float *A)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
if(x < N)
{
#pragma unroll 1
for(int i =0; i< 100; i++)
{
A[x] += A[x] * A[x] * A[x] + A[x] * A[x] + A[x];
}
}
}
//CPU
void cpu(float *A)
{
int x;
for (x = 0; x<N; x++)
{
A[x] = A[x] * A[x] * A[x] * A[x] * A[x] * A[x];
}
}
int main(int argc, char** argv)
{
int devCnt;
cudaGetDeviceCount(&devCnt);
if(devCnt == 0) {
perror("No CUDA devices available -- exiting.");
return 1;
}
struct cudaDeviceProp *prop;
prop = (cudaDeviceProp*)malloc(sizeof(struct cudaDeviceProp));
cudaGetDeviceProperties(prop,0);
printf("Ilosc watkow: %d\n", prop->maxThreadsPerBlock);
//GPU
prologue();
blocks = N / prop->maxThreadsPerBlock;
if(N % prop->maxThreadsPerBlock)
blocks++;
begin4 = clock();
pow3<<<blocks, prop->maxThreadsPerBlock>>>(dArray);
cudaThreadSynchronize();
end4 = clock();
begin1 = clock();
pow4<<<blocks, prop->maxThreadsPerBlock>>>(dArray);
cudaThreadSynchronize();
end1 = clock();
epilogue();
//CPU
begin3 = clock();
cpu(hArray);
end3 = clock();
double time_spent1 = (double)(end1 - begin1) / CLOCKS_PER_SEC;
double time_spent4 = (double)(end4 - begin4) / CLOCKS_PER_SEC;
printf("Nie odwiniete: %f\n", time_spent1);
printf("Odwiniete: %f\n", time_spent4);
return 0;
}
|
3,935 | #include <stdio.h>
__global__ void helloFromGPU() {
if (threadIdx.x < 20 && blockIdx.x < 20)
printf("Hello World from GPU! %d %d\n", threadIdx.x, blockIdx.x);
}
int main(int argc, char**argv) {
printf("Hello World from CPU!\n");
// 2 milhões blocos de 1024 threads
long long int blocks = 2 * 1e6;
long long int threads = 1024; // Numero maximo suportada pela GPU que rodamos
helloFromGPU<<<blocks, threads>>>();
cudaDeviceReset();
return 0;
}
|
3,936 | #include "particleSolver.cuh"
void SolverFunctions::forwardEulerCPU(vector<float> v, vector<float> vp, float dt){
} |
3,937 | #include <cuda_runtime.h>
#include <stdexcept>
#include <algorithm>
constexpr int CUDA_NUM_THREADS = 128;
constexpr int MAXIMUM_NUM_BLOCKS = 4096;
inline int GET_BLOCKS(const int N) {
return std::max(std::min((N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
MAXIMUM_NUM_BLOCKS), 1);
}
// define the kernel function:
template <typename T>
__global__ void sum(T *a, T *b, T *c, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i <= N) {
c[i] = a[i] + b[i];
}
}
// define the kernel calling code:
template <typename T>
void AddGPUKernel(T *in_a, T *in_b, T *out_c, int N, cudaStream_t stream) {
sum<T>
<<<GET_BLOCKS(N), CUDA_NUM_THREADS, 0, stream>>>(in_a, in_b, out_c, N);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
throw std::runtime_error("CUDA kernel failed : " + std::to_string(err));
}
// instantiate the kernel template for T=float:
template void AddGPUKernel<float>(float *in_a, float *in_b, float *out_c, int N, cudaStream_t stream); |
3,938 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
#include <time.h>
#define TAM 5
void llenarVector(int *A) {
//srand(time(NULL));
for(int i=0; i<TAM; i++) {
A[i]=rand();
}
}
__global__ void sumaVectores(int *A, int *B, int *C) {
int i = threadIdx.x+blockDim.x * blockIdx.x;
if(i<TAM)
C[i] = A[i]+B[i];
}
void printVector(int *A) {
printf("(");
for(int i=0; i<TAM; i++) {
printf("%d ", A[i]);
if(i!=TAM-1) {
printf(", ");
}
}
printf(")\n");
}
int main(){
int size = TAM*sizeof(int);
int *A = (int *) malloc(size);
int *B = (int *) malloc(size);
int *C = (int *) malloc(size);
int *d_A, *d_B, *d_C;
cudaError_t err = cudaMalloc((void**)&d_A,size);
if (err != cudaSuccess) {
printf("Error %s", cudaGetErrorString( err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void**)&d_B,size);
if (err != cudaSuccess) {
printf("Error %s", cudaGetErrorString( err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void**)&d_C,size);
if (err != cudaSuccess) {
printf("Error %s", cudaGetErrorString( err));
exit(EXIT_FAILURE);
}
llenarVector(A);
// printVector(A);
llenarVector(B);
// printVector(B);
cudaMemcpy(d_A,A,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_B,B,size,cudaMemcpyHostToDevice);
sumaVectores<<<ceil(TAM/64),64>>>(d_A,d_B,d_C);
cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost );
printVector(C);
//printf( "c[0] = %d\n",0,C[0] );
//printf( "c[%d] = %d\n",TAM-1, C[TAM-1] );
err = cudaFree(d_A);
if (err != cudaSuccess) {
printf("Error %s", cudaGetErrorString( err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess) {
printf("Error %s", cudaGetErrorString( err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_C);
if (err != cudaSuccess) {
printf("Error %s", cudaGetErrorString( err));
exit(EXIT_FAILURE);
}
free(A);
free(B);
free(C);
return 0;
}
|
3,939 | #include<stdio.h>
int main()
{
int num_devices,i;
cudaGetDeviceCount(&num_devices);
for(i=0;i<num_devices;i++)
{
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop,i);
printf("Device Number: %d\n",i);
printf("Device Name: %s\n",prop.name);
printf("Compute Capability: Major - %d Minor - %d\n",prop.major,prop.minor);
printf("Memory Clock Rate(kHz): %d\n",prop.memoryClockRate);
printf("Memory Bus Width:(bits): %d\n",prop.memoryBusWidth);
printf("Multiprocessor Count: %d\n",prop.multiProcessorCount);
printf("Total Global Memory Size(MB): %zu\n",prop.totalGlobalMem/(1024*1024));
printf("Total Shared Memory Size(KB): %zu\n",prop.sharedMemPerBlock/(1024));
printf("Total Constant Memory Size(KB): %zu\n",prop.totalConstMem/(1024));
printf("Number of Registers Per Block: %d\n",prop.regsPerBlock);
printf("Warp Size: %d\n",prop.warpSize);
printf("Maximum Number of Threads Per Block: %d\n",prop.maxThreadsPerBlock);
printf("Maximum Size of a Block: %d %d %d\n",prop.maxThreadsDim[0],prop.maxThreadsDim[1],prop.maxThreadsDim[2]);
printf("Maximum Size of Grid: %d %d %d\n",prop.maxGridSize[0],prop.maxGridSize[1],prop.maxGridSize[2]);
printf("Clock Rate(kHz): %f\n",prop.clockRate);
}
return 0;
} |
3,940 | //nvcc -ptx cuda_kernel.cu --gpu-architecture=compute_30 --gpu-code=compute_30 --optimize 2
#include "cuComplex.h"
// CUDA runtime
#include "cuda_runtime.h"
#include "stdint.h"
#define IDX2F(i,j,ld) ((((j)-1)*(ld))+((i)-1))
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
// #define IDX3(k1,k2,k3,kcut) (((((k1)-1)*(kcut))+((k2)-1))*(kcut) + ((k3)-1))
extern "C"
{
//Multiplies the coefficients according to indices list
__global__ void calculate_coefficient_matrix(const cuFloatComplex* coeff, const int numcoeff, const float* wignerlist, const int* indices, const int indiceslength, const int* PAcombos, const int combolength, float *PA, const int klength)
{
const int i = threadIdx.x + blockIdx.x * blockDim.x + 1;
if(i<=combolength)
{
const int k1 = PAcombos[IDX2F(1, i, 9)];
const int k2 = PAcombos[IDX2F(2, i, 9)];
const int k3 = PAcombos[IDX2F(3, i, 9)];
const int ki = PAcombos[IDX2F(4, i, 9)];
const int jstart = PAcombos[IDX2F(8, i, 9)];
const int mcombos = PAcombos[IDX2F(9, i, 9)];
const cuFloatComplex* ck1 = &coeff[(k1-1)*numcoeff];
const cuFloatComplex* ck2 = &coeff[(k2-1)*numcoeff];
const cuFloatComplex* ck3 = &coeff[(k3-1)*numcoeff];
float As = 0.0f;
for(int n=0; n < mcombos; n++){
const int j = jstart + n;
const int k1i = indices[IDX2F(1, j, 9)];
const int k2i = indices[IDX2F(2, j, 9)];
const int k3i = indices[IDX2F(3, j, 9)];
As += wignerlist[j-1]*cuCrealf(cuCmulf(ck1[k1i-1], cuCmulf(ck2[k2i-1],ck3[k3i-1])) );
}
for(int n=0; n <mcombos; n++){
PA[IDX2F(jstart+n,ki, indiceslength)] *= As;
}
}
}
__device__ float tripletFactor(const int k1, const int k2, const int k3){
if(k1 == k2 && k2 == k3){
return 1.0;
}
else if ( (k1 == k2 != k3) || (k1 == k3 != k2) || (k2 == k3 != k1) ){
return 3.0;
}
else if ( (k1 != k2) && (k1 != k3) && (k2 != k3) ){
return 6.0;
}
return 0.0;
}
//Multiplies the coefficients according to indices list
__global__ void energy(const float *c3, const float *c3ref, const int* Kcombos, const int combolength, const int slab_size, float *result)
{
const int i = threadIdx.x + blockIdx.x * blockDim.x + 1;
if(i<=combolength)
{
const int k1 = Kcombos[IDX2F(1, i, 3)];
const int k2 = Kcombos[IDX2F(2, i, 3)];
const int k3 = Kcombos[IDX2F(3, i, 3)];
float sum_c3 = 0.0;
float sum_c3ref = 0.0;
for(int j=1; j <= slab_size; j++){
sum_c3 += abs(c3[IDX2F(j,i, slab_size)]);
sum_c3ref += abs(c3ref[IDX2F(j,i, slab_size)]);
}
// printf("%d\t%d\t%d\t%f\t%f\n", k1,k2,k3,sum_c3, sum_c3ref);
float res = 0.0;
for(int j=1; j <= slab_size; j++){
res += abs(c3ref[IDX2F(j,i, slab_size)])/sum_c3ref * (logf(c3[IDX2F(j,i, slab_size)])-logf(sum_c3));
}
result[i-1] = tripletFactor(k1,k2,k3)*res;
}
}
}
|
3,941 | //Генерация псевдослучайных чисел с использованием CuRand
#include <iostream>
#include <curand.h>
#include <curand_kernel.h>
#define MAX 100
/* эта функция ядра GPU вычисляет случайное число и сохраняет его в памяти*/
__global__ void random(unsigned int seed, int* result) {
/* Библиотека случайных чисел CUDA использует curandState_t для отслеживания начального значения
мы будем хранить случайное состояние для каждого потока*/
curandState_t state;
/* инициализация состояния*/
curand_init(seed, /* seed контролирует последовательность значений, которые
генерируются*/
0, /* порядковый номер важен только с несколькими ядрами*/
0,
&state);
/* curand работает как rand – за исключением того, что он принимает состояние как параметр*/
*result = curand(&state) % MAX;
}
int main( ) {
/* выделить память int на GPU*/
int* gpu_x;
cudaMalloc((void**) &gpu_x, sizeof(int));
/* вызывать GPU для инициализации всех случайных состояний*/
random<<<1, 1>>>(time(NULL), gpu_x);
/* скопировать случайное число на CPU*/
int x;
cudaMemcpy(&x, gpu_x, sizeof(int), cudaMemcpyDeviceToHost);
printf("Random number = %d.\n", x);
/* освобождение памяти */
cudaFree(gpu_x);
return 0;
} |
3,942 | #include <stdio.h>
#include <cuda.h>
#include <time.h>
#include <math.h>
#include <unistd.h>
/* we need these includes for CUDA's random number stuff */
#include <curand.h>
#include <curand_kernel.h>
#define ISLAND 10
#define POPULATION 50
#define FACILITY 20
#define GENERATION 8
#define CROSSOVER 0.6
#define MUTATION 0.03
#define MIGRATION 15
#define INDIVIDUAL 5
#define H 15 // BAY height
#define W 10 // BAY width
/* this GPU kernel function is used to initialize the random states */
__global__ void init(unsigned int seed, curandState_t* states) {
short b=blockIdx.x; //區塊索引 == ISLAND
short t=threadIdx.x; //執行緒索引 == POPULATION
short n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
short x=b*n+t;
/* we have to initialize the state */
curand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */
x, /* the sequence number should be different for each core (unless you want all
cores to get the same sequence of numbers for some reason - use thread id! */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&states[x]);
}
__global__ void randomData(curandState_t* states, short* GA){
short b=blockIdx.x; //區塊索引 == ISLAND
short t=threadIdx.x; //執行緒索引 == POPULATION
short n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
short x=b*n+t;
for(int j=0;j<FACILITY;j++){ // setup
GA[x*FACILITY + j] = j;
}
int i; // shuffle
for(i = 0; i < FACILITY; i++) {
short k = curand(&states[x]) % FACILITY;
int tmp = GA[x*FACILITY + i];
GA[x*FACILITY + i] = GA[x*FACILITY + k];
GA[x*FACILITY + k] = tmp;
}
}
__global__ void randomBay(curandState_t* states, bool* GB){
short b=blockIdx.x; //區塊索引 == ISLAND
short t=threadIdx.x; //執行緒索引 == POPULATION
short n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
short x=b*n+t;
int i; // shuffle
for(i = 0; i < FACILITY-1; i++) {
GB[x*(FACILITY-1) + i] = curand(&states[x]) % 2;
}
}
__global__ void calPosition(short *data, bool *bay, float *position){
short b=blockIdx.x; //區塊索引 == ISLAND
short t=threadIdx.x; //執行緒索引 == POPULATION
short n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
short x=b*n+t;
short posit = x * FACILITY;
short bayposit = x * (FACILITY-1);
// int posit=b*POPULATION*FACILITY+t*FACILITY; //執行緒在陣列中對應的位置
// int posofposit = b*POPULATION*FACILITY*2+t*FACILITY*2;
for(int i=0;i<ISLAND*POPULATION*FACILITY*2;i++){
position[i] = 0;
}
short len = 1;
short next = 0;
for(short f=0;f<FACILITY;f++){
if(bay[bayposit+f] == 0){
len = len + 1;
}
if(bay[bayposit+f] == 1 || f == FACILITY - 1 ){
if(f == FACILITY - 1 && bay[bayposit+f] == 0){
len = len - 1;
}
float x = W / 2.0 + next;
for(short j=0;j<len;j++){
position[posit*2+(f+j-len+1)*2] = x;
float y = H / (len * 2.0) * ( (j * 2) + 1) ;
position[posit*2+(f+j-len+1)*2+1] = y;
}
len = 1;
next = next + W;
}
}
}
__global__ void calDistance(short *data, float *position, float *distance){
short b=blockIdx.x; //區塊索引 == ISLAND
short t=threadIdx.x; //執行緒索引 == POPULATION
short n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
short x=b*n+t;
short posit = x * FACILITY;
// int posit=b*POPULATION*FACILITY+t*FACILITY; //執行緒在陣列中對應的位置
// int posofposit = b*POPULATION*FACILITY*2+t*FACILITY*2;
for(int i=0;i<ISLAND*POPULATION*FACILITY*FACILITY;i++){
distance[i] = 0;
}
for(short f=0;f<FACILITY;f++){
// printf("\ndistance calculate facility%d\n", f);
for(short j=f+1;j<FACILITY;j++){
float x1 = position[ (posit + f)*2 ];
float y1 = position[ (posit + f)*2 + 1];
short x = data[ posit + f ];
// printf("x = %d\n", x);
float x2 = position[ (posit + j)*2 ];
float y2 = position[ (posit + j)*2 + 1];
short y = data[ posit + j ];
// printf("y= %d\n", y);
if(y2 > y1){
distance[ (posit + x)*FACILITY + y] = sqrt( (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) ) ;
distance[ (posit + y)*FACILITY + x] = sqrt( (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) ) ;
}
else{
distance[ (posit + x)*FACILITY + y] = sqrt( (x2 - x1) * (x2 - x1) + (y1 - y2) * (y1 - y2) ) ;
distance[ (posit + y)*FACILITY + x] = sqrt( (x2 - x1) * (x2 - x1) + (y1 - y2) * (y1 - y2) ) ;
}
}
}
}
__global__ void calTotalcost(float *distance, int *cost, float *totalCost){
short b=blockIdx.x; //區塊索引 == ISLAND
short t=threadIdx.x; //執行緒索引 == POPULATION
short n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
short x=b*n+t;
short posit = x * FACILITY;
// short posit=b*POPULATION*FACILITY+t*FACILITY; //執行緒在陣列中對應的位置
// int posofposit = b*POPULATION*FACILITY*2+t*FACILITY*2;
// for(int i=0;i<ISLAND*POPULATION*FACILITY*FACILITY;i++){
// totalCost[i] = 0;
// }
for(short f=0;f<FACILITY;f++){
for(short j=0;j<FACILITY;j++){
totalCost[ (posit + f)*FACILITY + j] = cost[f*FACILITY + j] * distance[ (posit + f)*FACILITY + j];
}
}
}
__global__ void calOF(float *sumCost, float *totalCost){
short b=blockIdx.x; //區塊索引 == ISLAND
short t=threadIdx.x; //執行緒索引 == POPULATION
short n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
short x=b*n+t;
short posit = x * FACILITY;
// calculate OF
sumCost[x] = 0.0;
// minCost[x/POPULATION * 2] = 0.0;
for(short f=0;f<FACILITY;f++){
for(short j=0;j<FACILITY;j++){
sumCost[x] += totalCost[ (posit + f)*FACILITY + j];
}
}
// if(x % POPULATION==0){
// minCost[(x/POPULATION)*2] = sumCost[x*FACILITY + 0];
// minCost[(x/POPULATION)*2 + 1] = 0;
// }else if(minCost[x/POPULATION] > sumCost[x]){
// minCost[(x/POPULATION)*2] = sumCost[x];
// minCost[(x/POPULATION)*2 + 1] = x % POPULATION;
// }
}
__global__ void calTotalPro(float *sumCost, float *totalPro){
short b=blockIdx.x; //區塊索引 == ISLAND
short t=threadIdx.x; //執行緒索引 == POPULATION
short n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
short x=b*n+t;
for(short p=0;p<POPULATION;p++){
totalPro[x] = totalPro[x] + (1.0 / sumCost[x*ISLAND + p]);
}
}
__global__ void calProbability(float *probability, float *totalPro, float *sumCost){
short b=blockIdx.x; //區塊索引 == ISLAND
short t=threadIdx.x; //執行緒索引 == POPULATION
short n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
short x=b*n+t;
probability[x] = (1.0 / sumCost[x]) / (totalPro[ x / POPULATION ]) ;
}
__global__ void crossOver(curandState_t* states,float *probability2, short *data, bool *bay, short *data2, bool *bay2){
short b=blockIdx.x; //區塊索引 == ISLAND
short t=threadIdx.x; //執行緒索引 == POPULATION
short n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
short x=b*n+t;
short posit = x * 2 * FACILITY;
short posit2 = (2*x+1) * FACILITY;
short bayposit = x * 2 * (FACILITY-1);
short bayposit2 = (2*x+1) * (FACILITY-1);
float get = ( curand(&states[x]) % 10000) * 0.0001;
short getP = 0;
float get2 = curand(&states[x]) % 10000 * 0.0001;
short getP2 = 0;
for(short p=0;p<POPULATION-1;p++){
if(get >= probability2[ (x/POPULATION)*POPULATION + p ] && get < probability2[ (x/POPULATION)*POPULATION + p+1 ]){
getP = p+1;
break;
}
else if(p==POPULATION-2){
getP = p+1;
break;
}
}
for(short p=0;p<POPULATION-1;p++){
if(get2 >= probability2[x/POPULATION*POPULATION + p] && get2 < probability2[x/POPULATION*POPULATION + p+1]){
getP2 = p+1;
break;
}
else if(p==POPULATION-2){
getP2 = p+1;
break;
}
}
for(short f=0;f<FACILITY;f++){
data2[ posit + f] = data[ x/POPULATION*POPULATION*FACILITY + getP*FACILITY + f];
bay2[ (2 * x)*(FACILITY-1) + f] = bay[ x/POPULATION*POPULATION*(FACILITY-1) + getP*(FACILITY-1) + f];
}
for(short f=0;f<FACILITY;f++){
data2[ posit2 + f ] = data[x/POPULATION*POPULATION*FACILITY + getP2*FACILITY + f];
bay2[ (2 * x + 1)*(FACILITY-1) + f] = bay[x/POPULATION*POPULATION*(FACILITY-1) + getP2*(FACILITY-1) + f];
}
int tt = curand(&states[x]) % 10000;
float yes = tt * 0.0001;
if(yes <= CROSSOVER){
short sss = FACILITY - 1;
int seq = curand(&states[x]) % sss;
int cross[4][2];
cross[0][0] = data2[ posit + seq];
cross[0][1] = data2[ posit2 + seq];
cross[1][0] = data2[ posit + seq];
cross[1][1] = data2[ posit2 + seq+1];
cross[2][0] = data2[ posit + seq+1];
cross[2][1] = data2[ posit2 + seq];
cross[3][0] = data2[ posit+ seq+1];
cross[3][1] = data2[ posit2 + seq+1];
short temp = data2[ posit2 + seq];
short temp2 = data2[posit2 + seq+1];
data2[ posit2 + seq] = data2[ posit + seq];
data2[ posit2 + seq+1] = data2[posit + seq+1];
data2[posit + seq] = temp;
data2[posit + seq+1] = temp2;
short count = 0;
for(short c=0;c<4;c++){
if(cross[c][0] == cross[c][1]){
count++;
}
}
switch (count) {
case 0:
for(short c=0;c<FACILITY;c++){
if(c != seq){
if(data2[posit + c] == cross[0][1]){
data2[ posit + c] = cross[0][0];
}
if(data2[posit + c] == cross[3][1]){
data2[ posit + c] = cross[3][0];
}
}
else{
c++;
}
}
for(short c=0;c<FACILITY;c++){
if(c != seq){
if(data2[posit2 + c] == cross[0][0]){
data2[ posit2 + c] = cross[0][1];
}
if(data2[posit2 + c] == cross[3][0]){
data2[ posit2 + c] = cross[3][1];
}
}
else{
c++;
}
}
break;
case 1:
temp = 99;
for(short c=0;c<4;c++){
if(cross[c][0] == cross[c][1]){
temp = cross[c][0];
}
}
for(short c=0;c<4;c++){
if(cross[c][0] != temp && cross[c][1] != temp){
for(short f=0;f<FACILITY;f++){
if(f != seq){
if(data2[posit + f] == cross[c][1]){
data2[ posit + f] = cross[c][0];
}
}
else{
f++;
}
}
}
}
for(short c=0;c<4;c++){
if(cross[c][0] != temp && cross[c][1] != temp){
for(short f=0;f<FACILITY;f++){
if(f != seq){
if(data2[posit2 + f] == cross[c][0]){
data2[ posit2 + f] = cross[c][1];
}
}
else{
f++;
}
}
}
}
break;
case 2:
break;
}
temp = bay2[bayposit2 + seq];
temp2 = bay2[bayposit2 + seq+1];
bay2[bayposit2 + seq] = bay2[bayposit + seq];
bay2[bayposit2 + seq+1] = bay2[bayposit + seq+1];
bay2[bayposit + seq] = bay2[bayposit2 + seq];
bay2[bayposit + seq+1] = bay2[bayposit2 + seq+1];
}else {
}
}
__global__ void mutation(curandState_t *states, short *data2){
int b=blockIdx.x; //區塊索引 == ISLAND
int t=threadIdx.x; //執行緒索引 == POPULATION
int n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
int x=b*n+t;
int posit = x * FACILITY;
float yes = (curand(&states[x]) % 10000) * 0.0001;
// fprintf(FIN, "取得%f \n", yes);
if(yes < MUTATION){
// fprintf(FIN, "第%d突變\n", p);
short get = curand(&states[x]) % FACILITY;
short get2 = curand(&states[x]) % FACILITY;
short temp = data2[posit + get];
data2[posit + get] = data2[posit + get2];
data2[posit + get2] = temp;
}else {
}
}
__global__ void mutationBay(curandState_t *states, bool *bay2){
short b=blockIdx.x; //區塊索引 == ISLAND
short t=threadIdx.x; //執行緒索引 == POPULATION
short n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
short x=b*n+t;
short posit = x * (FACILITY - 1);
float yes = curand(&states[x]) % 10000 * 0.0001 ;
if(yes < MUTATION){
short get = curand(&states[x]) % (FACILITY - 1);
if(bay2[posit + get] == 0){
bay2[posit + get] = 1;
}else {
bay2[posit + get] = 0;
}
}
}
// __global__ void migration(short *data2, bool *bay2, short *temp3, bool *temp4, int *indexCost,float *sumCost){
// short b=blockIdx.x; //區塊索引 == ISLAND
// short t=threadIdx.x; //執行緒索引 == POPULATION
// short n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
// short x=b*n+t;
// short posit = x * FACILITY ; // island
// short positBay = x * (FACILITY - 1);
// for(int f=0;f<FACILITY;f++){
// temp3[posit + f] = data2[posit + indexCost[x]*FACILITY + f] ;
// }
// for(int f=0;f<FACILITY-1;f++){
// temp4[positBay + f] = bay2[positBay + indexCost[x]*(FACILITY-1) + f];
// }
// __syncthreads();
// if(posit == 0){
// int backP = indexCost[(ISLAND-1)*POPULATION + x];
// int frontP = indexCost[posit + x];
// for(int f=0;f<FACILITY;f++){
// data2[posit + frontP*FACILITY + f] = temp3[(x/POPULATION -1)*POPULATION*FACILITY * + backP*FACILITY + f];
// }
// for(int f=0;f<FACILITY-1;f++){
// bay2[posit + frontP*FACILITY + f] = temp4[(ISLAND-1)*POPULATION*FACILITY + backP*FACILITY + f];
// }
// }else {
// int backP = indexCost[i-1 + x];
// int frontP = indexCost[i + x];
// for(int f=0;f<FACILITY;f++){
// data2[i*POPULATION*FACILITY + frontP*FACILITY + f] = temp3[(ISLAND-1)*POPULATION*FACILITY + backP*FACILITY + f];
// }
// for(int f=0;f<FACILITY-1;f++){
// bay2[i*POPULATION*FACILITY + frontP*FACILITY + f] = temp4[(ISLAND-1)*POPULATION*FACILITY + backP*FACILITY + f];
// }
// } // else end
// }
__global__ void parent_to_child(short *data, short *data2, bool *bay, bool *bay2){
short b=blockIdx.x; //區塊索引 == ISLAND
short t=threadIdx.x; //執行緒索引 == POPULATION
short n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
short x=b*n+t;
short posit = x * FACILITY ;
short positBay = x * (FACILITY - 1);
for(int f=0;f<FACILITY;f++){
data[posit + f] = data2[posit + f];
}
for(int f=0;f<FACILITY-1;f++){
bay[positBay + f] = bay2[positBay + f];
}
}
int main(){
double START, END;
START = clock();
/* CUDA's random number library uses curandState_t to keep track of the seed value
we will store a random state for every thread */
curandState_t* states;
/* allocate space on the GPU for the random states */
cudaMalloc((void**) &states, ISLAND * POPULATION * sizeof(curandState_t));
/* invoke the GPU to initialize all of the random states */
init<<<ISLAND, POPULATION>>>(time(NULL), states);
// generate random data
short *GA;
cudaMalloc((void**)&GA, ISLAND*POPULATION*FACILITY*sizeof(short));
bool *GB;
cudaMalloc((void**)&GB, ISLAND*POPULATION*(FACILITY-1)*sizeof(bool));
randomData<<<ISLAND, POPULATION>>>(states, GA);
randomBay<<<ISLAND, POPULATION>>>(states, GB);
short data[ISLAND][POPULATION][FACILITY];
cudaMemcpy(data, GA, ISLAND*POPULATION*FACILITY*sizeof(short), cudaMemcpyDeviceToHost);
bool bay[ISLAND][POPULATION][FACILITY-1];
cudaMemcpy(bay, GB, ISLAND*POPULATION*(FACILITY-1)*sizeof(bool), cudaMemcpyDeviceToHost);
FILE *fPtr;
int ttt = FACILITY * (FACILITY-1) ;
fPtr=fopen("cost.txt","r");
int cost[FACILITY][FACILITY] = {0};
int temp[ttt][3]; // cost
for(int i=0;i<ttt;i++){
fscanf(fPtr , "%d %d %d" , &temp[i][0], &temp[i][1], &temp[i][2]);
}
fclose(fPtr);
for(int i=0;i<ttt;i++){ // 2 dimention cost
cost[ temp[i][0]-1 ][ temp[i][1]-1] = temp[i][2];
}
int *Gcost;
cudaMalloc((void**)&Gcost, FACILITY*FACILITY*sizeof(int));
cudaMemcpy(Gcost, cost, FACILITY*FACILITY*sizeof(int), cudaMemcpyHostToDevice);
for(short gggggg=0;gggggg<GENERATION;gggggg++){ // generation
float *Gposition;
cudaMalloc((void**)&Gposition, ISLAND*POPULATION*FACILITY*2*sizeof(float));
// calculate position
calPosition<<<ISLAND, POPULATION>>>(GA, GB, Gposition);
float position[ISLAND*POPULATION*FACILITY*2];
cudaMemcpy(position, Gposition, ISLAND*POPULATION*FACILITY*2*sizeof(float), cudaMemcpyDeviceToHost);
float distance[ISLAND*POPULATION*FACILITY*FACILITY] = {0};
float *Gdistance;
cudaMalloc((void**)&Gdistance, ISLAND*POPULATION*FACILITY*FACILITY*sizeof(float));
// distance
calDistance<<<ISLAND, POPULATION>>>(GA, Gposition, Gdistance);
cudaMemcpy(distance, Gdistance, ISLAND*POPULATION*FACILITY*FACILITY*sizeof(float), cudaMemcpyDeviceToHost);
float totalCost[ISLAND][POPULATION][FACILITY][FACILITY] = {0.0};
float *GtotalCost;
cudaMalloc((void**)&GtotalCost, ISLAND*POPULATION*FACILITY*FACILITY*sizeof(float));
// totalcost
calTotalcost<<<ISLAND, POPULATION>>>(Gdistance, Gcost, GtotalCost);
cudaMemcpy(totalCost, GtotalCost, ISLAND*POPULATION*FACILITY*FACILITY*sizeof(float), cudaMemcpyDeviceToHost);
float *GsumCost;
float sumCost[ISLAND][POPULATION]={0.0};
cudaMalloc((void**)&GsumCost, ISLAND*POPULATION*sizeof(float));
// float *GminCost;
// float minCost[ISLAND][2];
// cudaMalloc((void**)&GminCost, ISLAND*2*sizeof(float));
// of
calOF<<<ISLAND, POPULATION>>>(GsumCost, GtotalCost);
cudaMemcpy(sumCost, GsumCost, ISLAND*POPULATION*sizeof(float), cudaMemcpyDeviceToHost);
// cudaMemcpy(minCost, GminCost, ISLAND*2*sizeof(float), cudaMemcpyDeviceToHost);
short data2[ISLAND][POPULATION][FACILITY]; // facility
short *Gdata2;
cudaMalloc((void**)&Gdata2, ISLAND*POPULATION*FACILITY*sizeof(short));
bool bay2[ISLAND][POPULATION][FACILITY-1]; //bay
bool *Gbay2;
cudaMalloc((void**)&Gbay2, ISLAND*POPULATION*(FACILITY-1)*sizeof(bool));
float probability[ISLAND][POPULATION] = {0.0};
float *Gprobability;
cudaMalloc((void**)&Gprobability, ISLAND*POPULATION*sizeof(float));
// float totalPro[ISLAND] = {0.0};
float *GtotalPro;
cudaMalloc((void**)&GtotalPro, ISLAND*sizeof(float));
calTotalPro<<<1, ISLAND>>>(GsumCost, GtotalCost);
// cudaMemcpy(GtotalPro, totalPro, ISLAND*sizeof(float), cudaMemcpyHostToDevice);
calProbability<<<ISLAND, POPULATION>>>(Gprobability, GtotalPro, GsumCost);
cudaMemcpy(probability, Gprobability, ISLAND*POPULATION*sizeof(float), cudaMemcpyDeviceToHost);
float probability2[ISLAND][POPULATION] = {0.0};
for(short i=0;i<ISLAND;i++){
for(short p=0;p<POPULATION;p++){
for(short j=0;j<=p;j++){
probability2[i][p] += probability[i][j];
}
}
}
float *Gprobability2;
cudaMalloc((void**)&Gprobability2, ISLAND*POPULATION*sizeof(float));
cudaMemcpy(Gprobability2, probability2, ISLAND*POPULATION*sizeof(float), cudaMemcpyHostToDevice);
crossOver<<<ISLAND, POPULATION / 2>>>(states, Gprobability2, GA, GB, Gdata2, Gbay2);
mutation<<<ISLAND, POPULATION>>>(states, Gdata2);
// migration
if( (gggggg+1) % MIGRATION == 0 && (gggggg+1) != 0 && ISLAND > 1){
// printf("***migration***\n");
// int temp3[ISLAND][POPULATION/2][FACILITY];
// short temp4[ISLAND][POPULATION/2][FACILITY-1];
// int indexCost[ISLAND][POPULATION];
cudaMemcpy(data2, Gdata2, ISLAND*POPULATION*FACILITY*sizeof(short), cudaMemcpyDeviceToHost);
cudaMemcpy(bay2, Gbay2, ISLAND*POPULATION*(FACILITY-1)*sizeof(bool), cudaMemcpyDeviceToHost);
int temp3[ISLAND][POPULATION/2][FACILITY];
short temp4[ISLAND][POPULATION/2][FACILITY-1];
int indexCost[ISLAND][POPULATION];
for(int i=0;i<ISLAND;i++){
for(int p=0;p<POPULATION;p++){
indexCost[i][p] = p;
}
}
// bubble sort
// float temp;
for(int k=0;k<ISLAND;k++){
for(int i=POPULATION-1; i>=1; i--){
for(int j=0; j<=i-1; j++){
if(sumCost[k][j] > sumCost[k][j+1]){
int temp2 = indexCost[k][j];
indexCost[k][j] = indexCost[k][j+1];
indexCost[k][j+1] = temp2;
}
}
}
}
// print sorted index
// for(int i=0;i<ISLAND;i++){
// for(int p=0;p<POPULATION;p++){
// printf("%d ", indexCost[i][p]);
// }
// printf("\n");
// }
int countP = 0;
for(int i=0;i<ISLAND;i++){
while(countP < INDIVIDUAL){
for(int p=0;p<POPULATION;p++){
if(p == indexCost[i][countP]){
for(int f=0;f<FACILITY;f++){
temp3[i][countP][f] = data2[i][p][f];
}
for(int f=0;f<FACILITY-1;f++){
temp4[i][countP][f] = bay2[i][p][f];
}
countP++;
break;
}
} // population end
}
countP = 0;
} // island end
for(int i=0;i<ISLAND;i++){
if(i==0){
for(int k=0;k<POPULATION/2;k++){
int backP = indexCost[ISLAND-1][k];
int frontP = indexCost[i][k];
for(int f=0;f<FACILITY;f++){
data2[i][frontP][f] = temp3[ISLAND-1][backP][f];
}
for(int f=0;f<FACILITY-1;f++){
bay2[i][frontP][f] = temp4[ISLAND-1][backP][f];
}
}
}else {
for(int k=0;k<POPULATION/2;k++){
int backP = indexCost[i-1][k];
int frontP = indexCost[i][k];
// int p = indexCost[i][k];
for(int f=0;f<FACILITY;f++){
data2[i][frontP][f] = temp3[ISLAND-1][backP][f];
}
for(int f=0;f<FACILITY-1;f++){
bay2[i][frontP][f] = temp4[ISLAND-1][backP][f];
}
}
} // else end
} // for end
// cudaMemcpy(GindexCost, indexCost, ISLAND*POPULATION*sizeof(int));
// migration <<< ISLAND, POPULATION >>> (Gdata2, Gbay2, Gtemp3, Gtemp4, GindexCost, GsumCost);
} // if migration end
// if(gggggg==GENERATION-1){
if(1==1){
cudaMemcpy(data2, Gdata2, ISLAND*POPULATION*FACILITY*sizeof(short), cudaMemcpyDeviceToHost);
cudaMemcpy(bay2, Gbay2, ISLAND*POPULATION*(FACILITY-1)*sizeof(bool), cudaMemcpyDeviceToHost);
int answerPos[2];
float answer;
answerPos[0] = 0;
answerPos[1] = 0;
answer = sumCost[0][0];
for(int i=0;i<ISLAND;i++){
// printf("第%d島嶼(OF): \n", i);
for(int p=0;p<POPULATION;p++){
// printf("%f ", sumCost[i][p]);
if(sumCost[i][p] < answer && sumCost[i][p] != 0){
answerPos[0] = i;
answerPos[1] = p;
answer = sumCost[i][p];
}
// printf("\n");
}
}
for(int i=0;i<FACILITY;i++){
printf("%d ", data2[ answerPos[0] ][ answerPos[1] ][i]);
}
printf("\n");
for(int i=0;i<FACILITY-1;i++){
printf("%d ", bay2[ answerPos[0] ][ answerPos[1] ][i]);
}
printf("最小: %d %d = %f\n", answerPos[0], answerPos[1], answer);
}
// parent to child
parent_to_child<<<ISLAND, POPULATION>>>(GA, Gdata2, GB, Gbay2);
cudaFree(Gposition);
cudaFree(Gdistance);
cudaFree(GtotalCost);
cudaFree(GsumCost);
// cudaFree(GminCost);
cudaFree(Gdata2);
cudaFree(Gbay2);
cudaFree(Gprobability);
cudaFree(GtotalPro);
}
cudaFree(GA); // free GA gpu_data
cudaFree(GB); // free GB gpu_bay
cudaFree(Gcost);
END = clock();
printf("程式執行所花費: %lf S\n", (double)clock()/CLOCKS_PER_SEC);
printf("進行運算所花費的時間: %lf S\n", (END - START) / CLOCKS_PER_SEC);
return 0;
}
|
3,943 | //
// Created by igor on 28.03.2021.
//
#include "Camera.cuh"
Camera::Camera(float fov, const unsigned int x, const unsigned int y) : fov(fov), x(x), y(y) {
position = Matrix4::IDENTITY;
origin = Vector3{0, 0, 0};
float pixelDxLen = tan(fov/2)/x*2;
pixelDx = {pixelDxLen, 0, 0};
pixelDy = {0, -pixelDxLen, 0};
Vector3 left = x / -2.0 * pixelDx;
Vector3 top = y / -2.0 * pixelDy;
topLeft = left + top + Vector3{0, 0, -1};
}
|
3,944 | #include <cuda.h>
#include <vector>
#include <cstdio>
#include <cstdlib>
template <typename T, std::size_t capacity>
struct queue {
int size = 0;
T data[capacity];
__device__ bool insert(const T& value) {
// TODO: insert an element into the queue.
// This will involve:
// 1) An atomic increment to `size` using atomicAdd().
// Note that you can get a pointer to size with &size.
// 2) If you've not overrun the end of the queue, write
// the element to the reserved slot in data.
}
};
constexpr size_t queue_size = 1000;
__global__ void kernel(queue<int, queue_size>* queues, int n) {
// TODO: have each thread insert its TID into every queue.
}
int main(int argc, char** argv) {
constexpr size_t n = queue_size;
std::vector<queue<int, n>> queues(n);
queue<int, n>* d_queues;
cudaMalloc(&d_queues, sizeof(queue<int, n>)*n);
cudaMemcpy(d_queues, queues.data(), sizeof(queue<int, n>)*n, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
size_t block_size = 256;
// ceil(grid_size / block_size)
dim3 grid((n + block_size - 1) / block_size);
dim3 block(block_size);
kernel<<<grid, block>>>(d_queues, n);
cudaMemcpy(queues.data(), d_queues, sizeof(queue<int, n>)*n, cudaMemcpyDeviceToHost);
bool success = true;
for (size_t i = 0; i < n; i++) {
queue<int, n>& queue = queues[i];
if (queue.size != n) {
success = false;
break;
}
std::vector<size_t> histogram(n, 0);
for (size_t i = 0; i < n; i++) {
if (queue.data[i] < 0 && queue.data[i] >= n) {
success = false;
break;
}
histogram[queue.data[i]] += 1;
if (histogram[queue.data[i]] != 1) {
success = false;
break;
}
}
}
if (success) {
printf("OK!\n");
} else {
printf("FAILED.\n");
}
cudaDeviceSynchronize();
cudaFree(d_queues);
return 0;
}
|
3,945 | #include <stdio.h>
const int max_threads_per_block = 512;
#define HANDLE_CUDA_ERROR(err) if (err) { printf("%s", cudaGetErrorString(err)); return; }
__device__ int cyclic_reduction_forward_reduction(float *lower, float *diagonal, float *upper, float *equal, const int dim, int step, const int to)
{
/* Forward reduction */
for (; (step * to * 3) <= dim; step <<= 1)
{
const int addr = (threadIdx.x * (step << 1)) + (step << 1) - 1;
if (addr < dim)
{
if (addr - step >= 0)
{
const float alpha = -lower[addr] / diagonal[addr - step];
equal[addr] += (alpha * equal[addr - step]);
diagonal[addr] += (alpha * upper[addr - step]);
lower[addr] = alpha * lower[addr - step];
}
if (addr + step < dim)
{
const float gamma = -upper[addr] / diagonal[addr + step];
equal[addr] += (gamma * equal[addr + step]);
diagonal[addr] += (gamma * lower[addr + step]);
upper[addr] = gamma * upper[addr + step];
}
}
__syncthreads();
}
return step;
}
__device__ void cyclic_reduction_back_substitution(float *lower, float *diagonal, float *upper, float *equal, const int dim, int step, const int to)
{
/* Backward substitution */
for (; step > to; step >>= 1)
{
const int addr = (threadIdx.x * (step << 1)) + step - 1;
if (addr < dim)
{
if (addr - step >= 0)
{
equal[addr] -= (lower[addr] * equal[addr - step]);
}
if (addr + step < dim)
{
equal[addr] -= (upper[addr] * equal[addr + step]);
}
equal[addr] = equal[addr] / diagonal[addr];
}
__syncthreads();
}
}
__global__ void cyclic_reduction_device(float *lower_glb, float *diagonal_glb, float *upper_glb, float *equal_glb, const int dim)
{
__shared__ float lower[512];
__shared__ float diagonal[512];
__shared__ float upper[512];
__shared__ float equal[512];
lower[threadIdx.x] = lower_glb[threadIdx.x];
diagonal[threadIdx.x] = diagonal_glb[threadIdx.x];
upper[threadIdx.x] = upper_glb[threadIdx.x];
equal[threadIdx.x] = equal_glb[threadIdx.x];
__syncthreads();
/* Forward reduction */
int step = cyclic_reduction_forward_reduction(lower, diagonal, upper, equal, dim, 1, 1);
/* Solve base system */
if (threadIdx.x == 0)
{
if ((dim / step) == 2) /* Solve simultaneous equations */
{
const int equal_addr = (step << 1) - 1;
const float a0 = diagonal[equal_addr - step];
const float a1 = lower[equal_addr];
const float b0 = upper[equal_addr - step];
const float b1 = diagonal[equal_addr];
const float c0 = equal[equal_addr - step];
const float c1 = equal[equal_addr];
equal[equal_addr] = (c0 * a1 - a0 * c1) / (a1 * b0 - a0 * b1);
equal[equal_addr - step] = (c0 - b0 * equal[equal_addr]) / a0;
}
else /* blk_size == 1, equations are already solved */
{
const int equal_addr = step - 1;
equal[equal_addr] = equal[equal_addr] / diagonal[equal_addr];
}
}
__syncthreads();
step >>= 1;
/* Backward substitution */
cyclic_reduction_back_substitution(lower, diagonal, upper, equal, dim, step, 0);
equal_glb[threadIdx.x] = equal[threadIdx.x];
}
void cyclic_reduction(float *lower, float *diagonal, float *upper, float *equal, const int dim)
{
const int log_dim = static_cast<int>(ceil(log(static_cast<float>(dim)) / log(2.0f)));
/* Get device */
HANDLE_CUDA_ERROR(cudaSetDevice(0));
/* Allocate and copy memory */
float *dev_equal;
HANDLE_CUDA_ERROR(cudaMalloc((void **)&dev_equal, dim * log_dim * sizeof(float)));
HANDLE_CUDA_ERROR(cudaMemcpy(dev_equal, equal, dim * sizeof(float), cudaMemcpyHostToDevice));
float *dev_lower;
HANDLE_CUDA_ERROR(cudaMalloc((void **)&dev_lower, dim * log_dim * sizeof(float)));
HANDLE_CUDA_ERROR(cudaMemcpy(dev_lower, lower, dim * sizeof(float), cudaMemcpyHostToDevice));
float *dev_diagonal;
HANDLE_CUDA_ERROR(cudaMalloc((void **)&dev_diagonal, dim * log_dim * sizeof(float)));
HANDLE_CUDA_ERROR(cudaMemcpy(dev_diagonal, diagonal, dim * sizeof(float), cudaMemcpyHostToDevice));
float *dev_upper;
HANDLE_CUDA_ERROR(cudaMalloc((void **)&dev_upper, dim * log_dim * sizeof(float)));
HANDLE_CUDA_ERROR(cudaMemcpy(dev_upper, upper, dim * sizeof(float), cudaMemcpyHostToDevice));
/* Run kernel */
if (dim > max_threads_per_block)
{
printf("Thead count (%i) exceeds maximum", dim);
return;
}
cyclic_reduction_device<<<1, dim>>>(dev_lower, dev_diagonal, dev_upper, dev_equal, dim);
/* Copy data back */
HANDLE_CUDA_ERROR(cudaDeviceSynchronize());
HANDLE_CUDA_ERROR(cudaMemcpy(equal, dev_equal, dim * sizeof(float), cudaMemcpyDeviceToHost));
/* Clean up */
HANDLE_CUDA_ERROR(cudaFree(dev_equal));
HANDLE_CUDA_ERROR(cudaFree(dev_lower));
HANDLE_CUDA_ERROR(cudaFree(dev_diagonal));
HANDLE_CUDA_ERROR(cudaFree(dev_upper));
/* Flush profiling info */
HANDLE_CUDA_ERROR(cudaDeviceReset());
}
__global__ void parallel_cyclic_reduction_device(float *lower_glb, float *diagonal_glb, float *upper_glb, float *equal_glb, const int dim)
{
const int rank = threadIdx.x;
__shared__ float lower[512];
__shared__ float diagonal[512];
__shared__ float upper[512];
__shared__ float equal[512];
lower[rank] = lower_glb[rank];
diagonal[rank] = diagonal_glb[rank];
upper[rank] = upper_glb[rank];
equal[rank] = equal_glb[rank];
__syncthreads();
float lower_tmp;
float upper_tmp;
float result_tmp;
float diag_tmp;
for (int span = 1 ; span < dim; span <<= 1)
{
if (rank < dim)
{
result_tmp = equal[rank];
diag_tmp = diagonal[rank];
if (rank - span >= 0)
{
lower_tmp = -lower[rank] / diagonal[rank - span];
diag_tmp += lower_tmp * upper[rank - span];
result_tmp += lower_tmp * equal[rank - span];
lower_tmp *= lower[rank - span];
}
if (rank + span < dim)
{
upper_tmp = -upper[rank] / diagonal[rank + span];
diag_tmp += upper_tmp * lower[rank + span];
result_tmp += upper_tmp * equal[rank + span];
upper_tmp *= upper[rank + span];
}
}
__syncthreads();
if (rank < dim)
{
lower[rank] = lower_tmp;
upper[rank] = upper_tmp;
equal[rank] = result_tmp;
diagonal[rank] = diag_tmp;
}
__syncthreads();
}
if (rank < dim)
{
equal_glb[rank] = equal[rank] / diagonal[rank];
}
}
void parallel_cyclic_reduction(float *lower, float *diagonal, float *upper, float *equal, const int dim)
{
/* Get device */
HANDLE_CUDA_ERROR(cudaSetDevice(0));
/* Allocate and copy memory */
float *dev_equal;
HANDLE_CUDA_ERROR(cudaMalloc((void **)&dev_equal, dim * sizeof(float)));
HANDLE_CUDA_ERROR(cudaMemcpy(dev_equal, equal, dim * sizeof(float), cudaMemcpyHostToDevice));
float *dev_lower;
HANDLE_CUDA_ERROR(cudaMalloc((void **)&dev_lower, dim * sizeof(float)));
HANDLE_CUDA_ERROR(cudaMemcpy(dev_lower, lower, dim * sizeof(float), cudaMemcpyHostToDevice));
float *dev_diagonal;
HANDLE_CUDA_ERROR(cudaMalloc((void **)&dev_diagonal, dim * sizeof(float)));
HANDLE_CUDA_ERROR(cudaMemcpy(dev_diagonal, diagonal, dim * sizeof(float), cudaMemcpyHostToDevice));
float *dev_upper;
HANDLE_CUDA_ERROR(cudaMalloc((void **)&dev_upper, dim * sizeof(float)));
HANDLE_CUDA_ERROR(cudaMemcpy(dev_upper, upper, dim * sizeof(float), cudaMemcpyHostToDevice));
/* Run kernel */
if (dim > max_threads_per_block)
{
printf("Thead count (%i) exceeds maximum", dim);
return;
}
parallel_cyclic_reduction_device<<<1, dim>>>(dev_lower, dev_diagonal, dev_upper, dev_equal, dim);
/* Copy data back */
HANDLE_CUDA_ERROR(cudaDeviceSynchronize());
HANDLE_CUDA_ERROR(cudaMemcpy(equal, dev_equal, dim * sizeof(float), cudaMemcpyDeviceToHost));
/* Clean up */
HANDLE_CUDA_ERROR(cudaFree(dev_equal));
HANDLE_CUDA_ERROR(cudaFree(dev_lower));
HANDLE_CUDA_ERROR(cudaFree(dev_diagonal));
HANDLE_CUDA_ERROR(cudaFree(dev_upper));
/* Flush profiling info */
HANDLE_CUDA_ERROR(cudaDeviceReset());
}
__global__ void hybrid_cyclic_reduction_device(float *lower_glb, float *diagonal_glb, float *upper_glb, float *equal_glb, const int dim)
{
__shared__ float lower[512];
__shared__ float diagonal[512];
__shared__ float upper[512];
__shared__ float equal[512];
lower[threadIdx.x] = lower_glb[threadIdx.x];
diagonal[threadIdx.x] = diagonal_glb[threadIdx.x];
upper[threadIdx.x] = upper_glb[threadIdx.x];
equal[threadIdx.x] = equal_glb[threadIdx.x];
__syncthreads();
/* Cyclic forward reduction */
int step = cyclic_reduction_forward_reduction(lower, diagonal, upper, equal, dim, 1, 128);
/* Parallel cyclic reduction to solve system */
float lower_tmp;
float upper_tmp;
float result_tmp;
float diag_tmp;
const int rank = (threadIdx.x * step) + step - 1;
for (int span = step; span < dim; span <<= 1)
{
if (rank < dim)
{
result_tmp = equal[rank];
diag_tmp = diagonal[rank];
if (rank - span >= 0)
{
lower_tmp = -lower[rank] / diagonal[rank - span];
diag_tmp += lower_tmp * upper[rank - span];
result_tmp += lower_tmp * equal[rank - span];
lower_tmp *= lower[rank - span];
}
if (rank + span < dim)
{
upper_tmp = -upper[rank] / diagonal[rank + span];
diag_tmp += upper_tmp * lower[rank + span];
result_tmp += upper_tmp * equal[rank + span];
upper_tmp *= upper[rank + span];
}
}
__syncthreads();
if (rank < dim)
{
lower[rank] = lower_tmp;
upper[rank] = upper_tmp;
equal[rank] = result_tmp;
diagonal[rank] = diag_tmp;
}
__syncthreads();
}
if (rank < dim)
{
equal[rank] /= diagonal[rank];
}
__syncthreads();
/* Cyclic backward substitution */
cyclic_reduction_back_substitution(lower, diagonal, upper, equal, dim, step >> 1, 0);
equal_glb[threadIdx.x] = equal[threadIdx.x];
}
void hybrid_cyclic_reduction(float *lower, float *diagonal, float *upper, float *equal, const int dim)
{
const int log_dim = static_cast<int>(ceil(log(static_cast<float>(dim)) / log(2.0f)));
/* Get device */
HANDLE_CUDA_ERROR(cudaSetDevice(0));
/* Allocate and copy memory */
float *dev_equal;
HANDLE_CUDA_ERROR(cudaMalloc((void **)&dev_equal, dim * log_dim * sizeof(float)));
HANDLE_CUDA_ERROR(cudaMemcpy(dev_equal, equal, dim * sizeof(float), cudaMemcpyHostToDevice));
float *dev_lower;
HANDLE_CUDA_ERROR(cudaMalloc((void **)&dev_lower, dim * log_dim * sizeof(float)));
HANDLE_CUDA_ERROR(cudaMemcpy(dev_lower, lower, dim * sizeof(float), cudaMemcpyHostToDevice));
float *dev_diagonal;
HANDLE_CUDA_ERROR(cudaMalloc((void **)&dev_diagonal, dim * log_dim * sizeof(float)));
HANDLE_CUDA_ERROR(cudaMemcpy(dev_diagonal, diagonal, dim * sizeof(float), cudaMemcpyHostToDevice));
float *dev_upper;
HANDLE_CUDA_ERROR(cudaMalloc((void **)&dev_upper, dim * log_dim * sizeof(float)));
HANDLE_CUDA_ERROR(cudaMemcpy(dev_upper, upper, dim * sizeof(float), cudaMemcpyHostToDevice));
/* Run kernel */
if (dim > max_threads_per_block)
{
printf("Thead count (%i) exceeds maximum", dim);
return;
}
hybrid_cyclic_reduction_device<<<1, dim>>>(dev_lower, dev_diagonal, dev_upper, dev_equal, dim);
/* Copy data back */
HANDLE_CUDA_ERROR(cudaDeviceSynchronize());
HANDLE_CUDA_ERROR(cudaMemcpy(equal, dev_equal, dim * sizeof(float), cudaMemcpyDeviceToHost));
/* Clean up */
HANDLE_CUDA_ERROR(cudaFree(dev_equal));
HANDLE_CUDA_ERROR(cudaFree(dev_lower));
HANDLE_CUDA_ERROR(cudaFree(dev_diagonal));
HANDLE_CUDA_ERROR(cudaFree(dev_upper));
/* Flush profiling info */
HANDLE_CUDA_ERROR(cudaDeviceReset());
}
|
3,946 | #include "includes.h"
__global__ void pow_kerneld(double *v, int n, double e) {
int x(threadIdx.x + blockDim.x * blockIdx.x);
if (x >= n) return;
v[x] = ::pow(v[x], e);
} |
3,947 | __global__ void kernel_task1_cuda(int n, int *A1, int *A2, int *A3) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < n; i += stride) {
A1[i] += 1;
A3[i] = A2[i];
}
}
__global__ void kernel_task2_cuda(int n, int *A1, int *A2) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < n; i += stride) {
A2[i] += A1[i];
}
}
extern "C" int task1_cuda(int n, int *A1, int *A2, int *A3) {
int nb = 256;
int nt = (n + nb - 1) / nb;
kernel_task1_cuda<<<nt,nb>>>(n, A1, A2, A3);
return 0;
}
extern "C" int task2_cuda(int n, int *A1, int *A2) {
int nb = 256;
int nt = (n + nb -1) / nb;
kernel_task2_cuda<<<nt,nb>>>(n, A1, A2);
return 0;
}
|
3,948 | #include "includes.h"
__global__ void reduceUnrolling (double *g_idata, double *g_odata, unsigned int n, unsigned int q) //added int q
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * q + threadIdx.x; // q adapted idx
// unroll analogous q
if (idx + blockDim.x*(q-1) < n)
{
for (int i=1; i<q; i++)
{
g_idata[idx] += g_idata[idx + blockDim.x*i];
}
}
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride /= 2)
{
if (tid < stride)
{
g_idata[idx] += g_idata[idx + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = g_idata[idx];
} |
3,949 | #include "stdio.h"
#define COLUMNS 3
#define ROWS 2
__global__ void add(int *a, int *b, int *c)
{
int x = blockIdx.x;
int y = blockIdx.y;
int i = (COLUMNS * y) + x;
c[i] = a[i] + b[i];
}
int main()
{
int a[ROWS][COLUMNS], b[ROWS][COLUMNS], c[ROWS][COLUMNS];
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void **)&dev_a, ROWS * COLUMNS * sizeof(int));
cudaMalloc((void **)&dev_b, ROWS * COLUMNS * sizeof(int));
cudaMalloc((void **)&dev_c, ROWS * COLUMNS * sizeof(int));
for (int y = 0; y < ROWS; y++) // Fill Arrays
for (int x = 0; x < COLUMNS; x++)
{
a[y][x] = x;
b[y][x] = y;
}
cudaMemcpy(dev_a, a, ROWS * COLUMNS * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, ROWS * COLUMNS * sizeof(int), cudaMemcpyHostToDevice);
dim3 grid(COLUMNS, ROWS);
add<<<grid, 1>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, ROWS * COLUMNS * sizeof(int), cudaMemcpyDeviceToHost);
for (int y = 0; y < ROWS; y++)
{
for (int x = 0; x < COLUMNS; x++)
{
printf("[%d][%d]=%d ", y, x, c[y][x]);
}
printf("\n");
}
return 0;
}
|
3,950 | #include "includes.h"
/*
Vector addition with a single thread for each addition
*/
/*
Vector addition with thread mapping and thread accessing its neighbor parallely
*/
//slower than simpler
/*
Matrix Matrix multiplication with a single thread for each row
*/
/*
Matrix Matrix multiplication with a single thread for each result element
*/
/*
Matrix Vector multiplication with a block with 4 threads per block, shared block mem and parallel reduce
*/
__global__ void matrix_matrix_mul_old(int *a, int *b, int *c, int n_row, int n_col, int n_comm)
{
int tid= threadIdx.x + blockIdx.x * blockDim.x;
int temp=0;
while(tid<n_row)
{
for (int k=0;k<n_col;k++)
{
temp=0;
for(int j=0;j<n_comm;j++)
{
temp+= a[n_comm*tid+j]* b[j*n_col+k];
}
c[tid*n_col+k]=temp;
}
tid+=blockDim.x * gridDim.x;
}
} |
3,951 | /*
initialize all parameter for System model and cost function
*/
#include "../include/init.cuh"
void init_params( float *a )
{
// params for simple nonlinear systems
// for Simple Nonlinear System
/*a[0] = 1.0f;
a[1] = 1.0f;*/
// FOR CART AND POLE
a[0] = 0.1f;
a[1] = 0.024f;
a[2] = 0.2f;
a[3] = a[1] * powf(a[2],2) /3;
a[4] = 1.265f;
a[5] = 0.0000001;
a[6] = 9.81f;
}
void init_state( float *a )
{
// initial state for simple nonlinear system
// for Simple Nonlinear model
/*a[0] = 2.0f;
a[1] = 0.0f;*/
// FOR CART AND POLE
a[0] = 0.0f; //x
a[1] = M_PI + 0.005; //theta
a[2] = 0.0f; //dx
a[3] = 0.0f; //dth
}
void init_constraint( float *a )
{
// constraints for simple nonlinear system
// For Simple Nonlinera System
/*a[0] = 0.0f;
a[1] = 0.0f;*/
// FOR CONTROL CART AND POLE
// For Quadric Fitting Superior Constraints parameters
a[0] = -3.0f;
a[1] = 3.0f;
a[2] = -0.5f;
a[3] = 0.5f;
// For MC superior Parameter
/*a[0] = -1.0f;
a[1] = 1.0f;
a[2] = -0.5f;
a[3] = 0.5f;*/
}
void init_matrix( float *a )
{
//matrix elements for simple nonlinear system
// FOR SIMPLE NONLINEAR SYSTEM
/*a[0] = 0.1f;
a[1] = 1.0f;
a[2] = 0.1f;*/
// FOR CAONTROL CART AND POLE
// For Quadric Fitting Superior Weight Parameter
/*a[0] = 3.0f;
a[1] = 3.5f;
a[2] = 0.0f;
a[3] = 0.0f;
a[4] = 1.0f;*/
// For MC superior Parameter
a[0] = 3.0f;
a[1] = 10.0f;
a[2] = 0.05f;
a[3] = 0.01f;
a[4] = 0.5f;
}
void initialize_host_vector(float *get_params, float *get_state, float *get_constraints, float *get_matrix)
{
init_params( get_params );
init_state( get_state );
init_constraint( get_constraints );
init_matrix( get_matrix );
}
|
3,952 | #include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <ctime>
#include <cstdint>
#include <thrust/reduce.h>
#include <cuda.h>
using namespace std;
__device__ int binarySearch(int* arr, int l, int r, int x)
{
while (l <= r)
{
int m = (l+r)/2;
if (arr[m] == x)
return m;
if (arr[m] < x)
l = m + 1;
else
r = m - 1;
}
return -1;
}
__global__ void Tricount(int* beginposition , int* adjlist ,int* d_counts,int* adjver ,int vertices , int entries)
{
int adjindex = blockIdx.x * blockDim.x + threadIdx.x;
int vertex =0 ;
// INDENTIFY WHICH VERTEX THE THREAD IS
if( adjindex < entries )
{
vertex = adjver[adjindex];
int initial_find = 0;
//FIND ITSELF IN ADJLIST
for(int a = vertex + 1 ; a < vertices ; a++)
{
int sizeofarray1 = beginposition[a+1]-beginposition[a];
if( a+1 == vertices)
sizeofarray1 = entries-beginposition[a];
initial_find = binarySearch(adjlist , beginposition[a] , beginposition[a] + sizeofarray1 -1 , adjlist[adjindex]);
if(initial_find != -1)// IF FOUND, FIND VERTEX IN VERTEX2 ADJ
{
int vertex2 = adjver[initial_find];
int sizeofarray = beginposition[vertex2+1]-beginposition[vertex2];
if(vertex2+1 == vertices)
sizeofarray = entries-beginposition[vertex2];
int last_connection = binarySearch(adjlist,beginposition[vertex2],beginposition[vertex2] + sizeofarray -1,vertex);
if(last_connection != -1)//FOUND TRIANGLE
{
//atomicAdd(&d_counts[0],1);
//printf(" %d ",d_counts[0]);
d_counts[adjindex] = d_counts[adjindex] + 1;
}
}
}
}
}
int mmioread(int* adjlist , int* beginposition) {
string line;
ifstream myfile ("email-EuAll_adj.tsv");
long linecount =0;
// 0 - adjlist 1 - vertex 2 - N/A
beginposition[0] = 0;
long adjlistpos = 0;
long beginlistpos = 1;
long prevnum = 0;
if (myfile.is_open())
{
while ( getline (myfile,line) )
{
istringstream buf(line);
long type =0;
for(string word; buf >> word; )
{
if( type == 0 ) // add adjlist
{
adjlist[adjlistpos] = stoi(word);
adjlistpos++;
type++;
}
else if( type == 1 ) // add begin pos
{
if(prevnum != stoi(word) )
{
if (prevnum+1 != stoi(word) )
{
//printf("now is %d but before was %d\n",stoi(word),prevnum );
for(int a = 0 ; a <stoi(word)-prevnum-1 ; a++)
{
beginposition[beginlistpos] = adjlistpos-1;
//printf("IN \n" );
//printf("putting %d at beginpos %d\n",int(adjlistpos-1),int(beginlistpos));
beginlistpos++;
}
}
beginposition[beginlistpos] = adjlistpos-1;
beginlistpos++;
prevnum = stoi(word);
}
type++;
}
else if (type == 2)
type++;
//forcount++;
}
linecount++;
}
myfile.close();
}
else cout << "Unable to open file";
return 1;
};
int main(){
int vertices = 265215;
int entries = 728962;
int* h_beginposition= new int[vertices];
int* h_adjlist= new int[entries];
int* h_adjvertex= new int[entries];
int* h_count = new int [entries];
//h_count=(int *) malloc(1*sizeof(int));
int* d_begin;
int* d_adj;
int* d_counts;
int* d_adjvertex;
cout <<"Converting MMIO to array form..." <<endl;
clock_t startTime = clock();
mmioread(h_adjlist,h_beginposition);
int pos =0;
for(int x = 1 ; x < vertices ; x++)
{
int size = h_beginposition[x+1] - h_beginposition[x];
//printf("%d \n ",size);
if(x+1 == vertices)
size = entries-h_beginposition[x];
for(int y = 0 ; y < size ; y++)
{
h_adjvertex[pos] = x;
pos++;
}
}
//printf("pos is %d is %d \n",h_adjlist[718264] ,h_adjvertex[718264]);
//printf("last is %d \n", h_beginposition[4]);
/*
printf("adjlist consist of");
for(int a = 0 ; a < entries ; a++)
printf(" %d ", h_adjlist[a]);
printf("\n");
printf("bp consist of");
for(int a = 0 ; a < vertices ; a++)
printf(" %d ", h_beginposition[a]);
printf("\n");*/
double secondsPassed = (clock() - startTime) / CLOCKS_PER_SEC;
cout <<"Transform complete : "<< secondsPassed << " seconds have passed" << endl;
cout <<"Allocating space on GPU and transfer data..."<< endl;
cudaMalloc(&d_begin, vertices*sizeof(int));
cudaMalloc(&d_adj, entries*sizeof(int));
cudaMalloc(&d_adjvertex, entries*sizeof(int));
cudaMalloc((void**)&d_counts, entries*sizeof(int));
//cudaMemset((void*)d_counts,0,10*sizeof(int));
cudaMemcpy(d_begin, h_beginposition, vertices*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_adj, h_adjlist, entries*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_adjvertex, h_adjvertex, entries*sizeof(int), cudaMemcpyHostToDevice);
int blocks = (entries/1024)+1;
cout << "Now counting Triangles" <<endl;
Tricount<<<blocks, 1024>>>(d_begin,d_adj,d_counts,d_adjvertex,vertices,entries);
cout << "Done..." <<endl;
cudaMemcpy(h_count,d_counts,entries*sizeof(int),cudaMemcpyDeviceToHost);
cout << "Done with MEMCOPY...Now counting" <<endl;
int result = thrust::reduce(h_count, h_count+ entries);
printf("answer : %d \n",result/3);
cudaFree(d_begin);
cudaFree(d_adj);
cudaFree(d_counts);
//cudaDeviceReset();
//3686467
}
|
3,953 | //pass
//--gridDim=64 --blockDim=256
template <class T> __global__ void reduce3(T *g_idata, T *g_odata, unsigned int n);
template __global__ void reduce3<int>(int *g_idata, int *g_odata, unsigned int n);
#include "common.h"
template <class T>
__global__ void
reduce3(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
T mySum = (i < n) ? g_idata[i] : 0;
if (i + blockDim.x < n){
mySum += g_idata[i+blockDim.x];
}
sdata[tid] = mySum;
// __syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] = mySum = mySum + sdata[tid + s];
}
// __syncthreads();
}
// write result for this block to global mem
if (tid == 0){
g_odata[blockIdx.x] = sdata[0];
}
}
|
3,954 | #include <stdio.h>
__global__ void vecMatSum(int *a, int *b, int *c, int width, int length){
int row = blockIdx.x*blockDim.x + threadIdx.x;
int col = blockIdx.y*blockDim.y + threadIdx.y;
int tid = row*width+col;
if(tid < length)
c[tid] = a[tid] + b[tid];
}
int main(int argc, char* argv[]){
//initialization code
int width,size,threads,blocks,totalSize;
float total_time;
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
if(argv[2])
width = atoi(argv[2]);
else
width = 300;
size = width*width;
if(argv[1])
threads = atoi(argv[1]);
else
threads = 16;
dim3 ThreadsInBlock(threads,threads); //will provide threads * threads threads
blocks = (int) sqrt((float) size / (float) (threads*threads));
dim3 BlockDim(blocks,blocks);
while(BlockDim.x*BlockDim.y*threads*threads < size)
BlockDim.y += 1;
totalSize = size*sizeof(int);
printf("\n%ix%i blocks of %ix%i threads = %i threads total \n",BlockDim.x,BlockDim.y,ThreadsInBlock.x,ThreadsInBlock.y,BlockDim.x*BlockDim.y*ThreadsInBlock.x*ThreadsInBlock.y);
//end init
//start mallocs
int *a,*dev_a,*b,*dev_b,*c,*dev_c;
cudaMalloc((void**)&dev_a,totalSize);
cudaMalloc((void**)&dev_b,totalSize);
cudaMalloc((void**)&dev_c,totalSize);
a = (int*) malloc(totalSize);
b = (int*) malloc(totalSize);
c = (int*) malloc(totalSize);
//end mallocs
//problem specific
int idx;
for(idx=0;idx<size;idx++){
a[idx] = idx;
b[idx] = idx*2;
}
//copy to dev
cudaMemcpy(dev_a,a,totalSize,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,totalSize,cudaMemcpyHostToDevice);
//end copy
int iteration = 0;
float avg_time = 0;
for(iteration=0;iteration<10;iteration++){
//call kernel and measure times
cudaEventRecord(start,0);
vecMatSum<<<BlockDim,ThreadsInBlock>>>(dev_a,dev_b,dev_c,width,size);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&total_time,start,stop);
printf("\n time for %i blocks of %i threads : %f \n",blocks,threads,total_time);
avg_time+=total_time;
}
avg_time/=10.0;
printf("average time for %ix%i matrix sum is %f ",width,width,avg_time);
//copy back and prints
cudaMemcpy(c,dev_c,totalSize,cudaMemcpyDeviceToHost);
for(idx=0;idx<size;idx+=size/5)
printf("\n a[%i]=%i\n",idx,c[idx]);
//free
free(a);
free(b);
free(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
3,955 | #include "includes.h"
// this is how cuda knows that this code is a kernel by calling __global__
__global__ void cube(float * d_out, float * d_in) {
int idx = threadIdx.x ;
float f = d_in[idx];
d_out[idx] = f * f * f;
} |
3,956 | /*
Parallel Tile Coding Software version 3.0beta translated to C
by Jaden Travnik based on Rich Sutton's Python implementation
*/
// PARALLEL TILECODING
// A gpu kernal function which completes the hashing function started in calCoordAndHashFloat and stores it in d_hashArray on the gpu
__global__ void shiftHash(unsigned * d_hashArray, int size){
unsigned h = d_hashArray[threadIdx.x];
h ^= ((unsigned) h >> 20) ^ ((unsigned)h >> 12);
h = h ^ ((unsigned)h >> 7) ^ ((unsigned)h >> 4);
d_hashArray[threadIdx.x] = h % size;
}
// A gpu kernel function which takes the floats, and integers and finds hashes their value
// blockIdx.x is the tile index, threadIdx.x is the index of the coordinate
// d_hashArray is an array of length numTiles, it will contain the hashed index
__global__ void calcCoordAndHashFloat(unsigned * d_hashArray, int numtilings, float * d_floats, int lenFloats, int * d_ints, int lenInts){
float coord = 1;
unsigned floatAsInt = 1;
int offset = threadIdx.x - 2; // these offsets are used to get the same hash results from the serial hashFloatArray function
if (threadIdx.x > 0){
coord = (float) blockIdx.x;
if (threadIdx.x > 1 && threadIdx.x <= lenFloats + 1){
// this is the tile mapping function of floating point numbers
coord = floor((floor(d_floats[offset]*numtilings) + blockIdx.x + offset*blockIdx.x*2) / numtilings);
} else if (threadIdx.x > lenFloats + 1) {
// "append" the integers to the coordinate array
coord = (float) d_ints[offset - lenFloats];
}
// This is where the hashing function starts. Another hashing function could be used instead and here is where it would be used.
floatAsInt = *(int*)(&coord);
}
for (int i = 0; i < lenFloats + lenInts + 1 - threadIdx.x; i++){
floatAsInt *= 31; // cant use pow on the gpu as we want an unsigned overflow
}
atomicAdd(& d_hashArray[blockIdx.x], floatAsInt);
}
// returns tile indicies corresponding to the floats and ints
void parallel_tiles(int size, unsigned * d_hashArray, int numtilings, float * d_floats, float *h_floats, int lenFloats, int * d_ints, int *h_ints, int lenInts, unsigned *Tiles) {
// reset the d_hashArray to be zero so we can add to it in calcCoordAndHashFloat
cudaMemset(d_hashArray, 0, numtilings*sizeof(unsigned));
// Copy the data from the cpu over to the gpu
cudaMemcpy(d_floats, h_floats, lenFloats * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_ints, h_ints, lenInts * sizeof(int), cudaMemcpyHostToDevice);
// call kernel to compute the coordinates and being the hashing function
calcCoordAndHashFloat<<<numtilings, 2 + lenFloats + lenInts>>>(d_hashArray, numtilings, d_floats, lenFloats, d_ints, lenInts);
// call a kernel to complete the hashing function
shiftHash<<<1,numtilings>>>(d_hashArray, size);
// copy the memory from the gpu to the cpu
cudaMemcpy(Tiles, d_hashArray, numtilings * sizeof(int), cudaMemcpyDeviceToHost);
} |
3,957 | #include <iostream>
#include <stdlib.h>
#include <math.h>
using namespace std;
__global__ void maximum(int *a, int *b, int n)
{
int max=0;
int index = 256 * blockIdx.x;
for(int i=index;i<min(256+index,n);i++)
{
if(a[i]>max)
{
max=a[i];
}
}
b[blockIdx.x]=max;
}
__global__ void minimum(int *a, int *b, int n)
{
int mini=999999;
int index = 256 * blockIdx.x;
for(int i=index;i<min(256+index,n);i++)
{
if(a[i]<mini)
{
mini=a[i];
}
}
b[blockIdx.x]=mini;
}
__global__ void sum(int *a, int *b, int n)
{
int sum=0;
int index = 256 * blockIdx.x;
for(int i=index;i<min(256+index,n);i++)
{
sum+=a[i];
}
b[blockIdx.x]=sum;
}
__global__ void standard_deviation(int *a, int *b, int n,double mean)
{
double sum=0;
int index = 256 * blockIdx.x;
for(int i=index;i<min(256+index,n);i++)
{
sum+=(a[i]-mean)*(a[i]-mean);
}
b[blockIdx.x]=sum;
}
int main()
{
int n,nmin,nsum,nmean,nsd;
cout<<"\nEnter n:";
cin>>n;
int *hostA=(int*)malloc(sizeof(int)*n);
nmin=n;
nsum=n;
nmean=n;
nsd=n;
float timeS=0.0,timeP=0.0;
cudaEvent_t startS,endS,startP,endP;
cudaEventCreate(&startS);
cudaEventCreate(&endS);
cudaEventCreate(&startP);
cudaEventCreate(&endP);
cudaEventRecord(startS);
int max=0,min=9999999,ssum=0;
double sd_sum=0.0f;
for(int i=0;i<n;i++)
{
hostA[i]=rand()%n;
cout<<hostA[i]<<"\t";
if(hostA[i]>max)
{
max=hostA[i];
}
if(hostA[i]<min)
{
min=hostA[i];
}
ssum+=hostA[i];
}
double means=(ssum*1.0f)/(nmean*1.0f);
for(int i=0;i<n;i++)
{
sd_sum+=(hostA[i]-means)*(hostA[i]-means);
}
cudaEventRecord(endS);
cudaEventSynchronize(endS);
cudaEventElapsedTime(&timeS,startS,endS);
cudaEventRecord(startP);
int *deviceA;
cudaMalloc(&deviceA, sizeof(int)*n);
cudaMemcpy(deviceA,hostA,sizeof(int)*n,cudaMemcpyHostToDevice);
int *deviceB;
int grids=ceil(n*1.0f/256*1.0f);
cudaMalloc(&deviceB, sizeof(int)*grids);
dim3 grid(grids,1);
dim3 block(1,1);
while(n>1)
{
maximum<<<grid,block>>>(deviceA,deviceB,n);
n=ceil(n*1.0f/256*1.0f);
cudaMemcpy(deviceA,deviceB,n*sizeof(int),cudaMemcpyDeviceToDevice);
}
int maxi[2];
cudaMemcpy(maxi,deviceA,4,cudaMemcpyDeviceToHost);
cout<<"\nParallel Max="<<maxi[0]<<endl<<"Sequential Max="<<max<<endl;
//----------------------------------min----------------------------------------------------------
cudaMemcpy(deviceA,hostA,sizeof(int)*nmin,cudaMemcpyHostToDevice);
while(nmin>1)
{
minimum<<<grid,block>>>(deviceA,deviceB,nmin);
nmin=ceil(nmin*1.0f/256*1.0f);
cudaMemcpy(deviceA,deviceB,nmin*sizeof(int),cudaMemcpyDeviceToDevice);
}
int mini[2];
cudaMemcpy(mini,deviceA,4,cudaMemcpyDeviceToHost);
cout<<"\nParallel Min="<<mini[0]<<endl<<"Sequential Min="<<min<<endl;
//--------------------------------sum------------------------------------------------------------
cudaMemcpy(deviceA,hostA,sizeof(int)*nsum,cudaMemcpyHostToDevice);
while(nsum>1)
{
sum<<<grid,block>>>(deviceA,deviceB,nsum);
nsum=ceil(nsum*1.0f/256*1.0f);
cudaMemcpy(deviceA,deviceB,nsum*sizeof(int),cudaMemcpyDeviceToDevice);
}
int sums[2];
cudaMemcpy(sums,deviceA,4,cudaMemcpyDeviceToHost);
cout<<"\nParallel Sum="<<sums[0]<<endl<<"Sequential sum="<<ssum<<endl;
double mean=(double)(sums[0]*1.0f/nmean*1.0f);
cout<<"\nParallel Mean="<<mean<<endl<<"Sequential mean="<<means<<endl;
//--------------------------------sd-------------------------------------------------------------
cudaMemcpy(deviceA,hostA,sizeof(int)*nsd,cudaMemcpyHostToDevice);
while(nsd>1)
{
standard_deviation<<<grid,block>>>(deviceA,deviceB,nsd,mean);
nsd=ceil(nsd*1.0f/256*1.0f);
cudaMemcpy(deviceA,deviceB,nsd*sizeof(int),cudaMemcpyDeviceToDevice);
}
int sdp[2];
cudaMemcpy(sdp,deviceA,4,cudaMemcpyDeviceToHost);
cout<<"\nParallel SD="<<(double)sqrt((sdp[0]*1.0f)/(nmean*1.0f))<<endl<<"Sequential SD="<<(double)sqrt((sd_sum*1.0f)/(nmean*1.0f))<<endl;
cudaEventRecord(endP);
cudaEventSynchronize(endP);
cudaEventElapsedTime(&timeP,startP,endP);
cout<<"\nSequential Time="<<timeS;
cout<<"\nParallel Time="<<timeP<<endl;
return cudaDeviceSynchronize();
}
|
3,958 | #include "includes.h"
__device__ int greatest_row; __device__ void swap(float* arr, int ind_a, int ind_b)
{
float tmp = arr[ind_a];
arr[ind_a] = arr[ind_b];
arr[ind_b] = tmp;
}
__global__ void swapRow(float* mat, float* b, float* column_k, int rows, int cols, int k)
{
int row_i = greatest_row;
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (k != row_i) //If the same row don't swap.
{
if (i < cols) //Ensure bounds
{
//Swap:
float tmp = mat[k*cols + i];
mat[k*cols + i] = mat[row_i*cols + i];
mat[row_i*cols + i] = tmp;
}
//Swap vector b:
else if (i == cols)
{
float tmp = b[k];
b[k] = b[row_i];
b[row_i] = tmp;
}
}
//Store column k in a separate array: (A[k,k] is updated since the same warp swaps it).
if (i < rows)
column_k[i] = mat[i*cols + k];
} |
3,959 | // Copyright 2013
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void im2col(const int n, const float * data_im,
const int height, const int width, const int imagenum,
float * data_col) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
int image_in = index/width;
index -=image_in * width;
data_col += height * width * image_in + index * height ;
data_im += height * image_in + imagenum * height * index;
for (int i = 0; i < height; ++i) {
*data_col = data_im[i];
data_col += 1;
}
}
}
|
3,960 | #include "relu-grad.hh"
#include "graph.hh"
#include "../runtime/node.hh"
#include "../memory/alloc.hh"
namespace ops
{
ReluGrad::ReluGrad(Op* z, Op* dout)
: Op("relu_grad", z->shape_get(), {z, dout})
{}
void ReluGrad::compile()
{
auto& g = Graph::instance();
auto& cz_out = g.compiled(preds()[0]);
auto& cdout = g.compiled(preds()[1]);
std::size_t len = cz_out.out_shape.total();
Shape out_shape = cz_out.out_shape;
dbl_t* out_data = tensor_alloc(len);
auto out_node = rt::Node::op_relu_grad(cz_out.out_data, cdout.out_data, out_data,
len,
{cz_out.out_node, cdout.out_node});
g.add_compiled(this, {out_node}, {out_data}, out_node, out_shape, out_data);
}
}
|
3,961 | #include "includes.h"
__global__ void reluActivationBackprop(float* Z, float* dA, float* dZ, int Z_x_dim, int Z_y_dim) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < Z_x_dim * Z_y_dim) {
if (Z[index] > 0) {
dZ[index] = dA[index];
}
else {
dZ[index] = 0;
}
}
} |
3,962 | #include "includes.h"
__device__ float digamma_fl(float x) {
float result = 0.0f, xx, xx2, xx4;
for ( ; x < 7.0f; ++x) { /* reduce x till x<7 */
result -= 1.0f/x;
}
x -= 1.0f/2.0f;
xx = 1.0f/x;
xx2 = xx*xx;
xx4 = xx2*xx2;
result += logf(x)+(1.0f/24.0f)*xx2-(7.0f/960.0f)*xx4+(31.0f/8064.0f)*xx4*xx2-(127.0f/30720.0f)*xx4*xx4;
return result;
}
__device__ double digamma(double x) {
double result = 0.0, xx, xx2, xx4;
for ( ; x < 7.0; ++x) { /* reduce x till x<7 */
result -= 1.0/x;
}
x -= 1.0/2.0;
xx = 1.0/x;
xx2 = xx*xx;
xx4 = xx2*xx2;
result += log(x)+(1./24.)*xx2-(7.0/960.0)*xx4+(31.0/8064.0)*xx4*xx2-(127.0/30720.0)*xx4*xx4;
return result;
}
__global__ void kernel_evaluatenu_fl(int Nd, float qsum, float *q, float deltanu,float nulow) {
unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x;
if (tid<Nd) {
float thisnu=(nulow+((float)tid)*deltanu);
float dgm=digamma_fl(thisnu*0.5f+0.5f);
q[tid]=dgm-logf((thisnu+1.0f)*0.5f); /* psi((nu+1)/2)-log((nu+1)/2) */
dgm=digamma_fl(thisnu*0.5f);
q[tid]+=-dgm+logf((thisnu)*0.5f); /* -psi((nu)/2)+log((nu)/2) */
q[tid]+=-qsum+1.0f; /* -(-sum(ln(w_i))/N+sum(w_i)/N)+1 */
}
} |
3,963 |
__device__
float divergence(const float* pz, const float* py, const float* px,
long idx, const int3& p, long size2d, const int3& shape)
{
float _div = 0.0f;
long _idx;
if ( p.z - 1 >= 0 ) {
_idx = (p.z - 1) * size2d + p.y * shape.x + p.x;
_div += (pz[idx] - pz[_idx]);
} else {
_div += pz[idx];
}
if ( p.y - 1 >= 0 ) {
_idx = p.z * size2d + (p.y - 1) * shape.x + p.x;
_div += (py[idx] - py[_idx]);
} else {
_div += py[idx];
}
if ( p.x - 1 >= 0 ) {
_idx = p.z * size2d + p.y * shape.x + (p.x - 1);
_div += (px[idx] - px[_idx]);
} else {
_div += px[idx];
}
return _div;
}
__device__
void gradient(const float* u, float3& grad, long idx,
const int3& p, long size2d, const int3& shape)
{
float uidx = u[idx];
if ( p.z + 1 < shape.z ) {
grad.z = (u[(p.z+1)*size2d + p.y*shape.x + p.x] - uidx);
} else {
grad.z = 0;
}
if ( p.y + 1 < shape.y ) {
grad.y = (u[p.z*size2d + (p.y+1)*shape.x + p.x] - uidx);
} else {
grad.y = 0;
}
if ( p.x + 1 < shape.x ) {
grad.x = (u[p.z*size2d + p.y*shape.x + (p.x+1)] - uidx);
} else {
grad.x = 0;
}
}
__global__
void update_u(const float* f, const float* pz, const float* py, const float* px,
float* u, float tau, float lambda, const int3 shape)
{
long idx = blockDim.x * blockIdx.x + threadIdx.x;
long plane = shape.y * shape.x;
if ( idx >= plane * shape.z )
return;
long t = idx % plane;
int3 pos;
pos.z = idx / plane;
pos.y = t / shape.x;
pos.x = t % shape.x;
float _div = divergence(pz, py, px, idx, pos, plane, shape);
float r = u[idx] * (1.0f - tau) + tau * (f[idx] + lambda * _div);
u[idx] = r;
}
__global__
void update_p(const float* u, float* pz, float* py, float* px,
float tau, const int3 shape)
{
long idx = blockDim.x * blockIdx.x + threadIdx.x;
long plane = shape.y * shape.x;
if ( idx >= plane * shape.z )
return;
long t = idx % plane;
int3 pos;
pos.z = idx / plane;
pos.y = t / shape.x;
pos.x = t % shape.x;
float3 grad, q;
gradient(u, grad, idx, pos, plane, shape);
q.z = pz[idx] + tau * grad.z;
q.y = py[idx] + tau * grad.y;
q.x = px[idx] + tau * grad.x;
float n = q.z * q.z + q.y * q.y + q.x * q.x;
float norm = fmaxf(1.0f, sqrtf(fmaxf(0, n)));
pz[idx] = q.z / norm;
py[idx] = q.y / norm;
px[idx] = q.x / norm;
} |
3,964 | #include "includes.h"
/**********************************************************************
* DESCRIPTION:
* Serial Concurrent Wave Equation - C Version
* This program implements the concurrent wave equation
*********************************************************************/
#define MAXPOINTS 1000000
#define MAXSTEPS 1000000
#define MINPOINTS 20
#define PI 3.14159265
#define BLOCK_SIZE 512
void check_param(void);
void printfinal (void);
/**********************************************************************
* Initialize points on line
*********************************************************************/
/**********************************************************************
* Update all values along line a specified number of times
*********************************************************************/
__global__ void init_and_update (float *values_d, int tpoints, int nsteps){
int idx = threadIdx.x + blockIdx.x * BLOCK_SIZE;
if(idx <= 1 || idx >= tpoints)
return;
float old_v, v, new_v;
float x, tmp;
tmp = tpoints - 1;
x = idx / tmp;
v = sin(2.0 * PI * x);
old_v = v;
for (int i = 1; i <= nsteps; i++){
new_v = (2.0 * v) - old_v + (0.09 * (-2.0 * v));
old_v = v;
v = new_v;
}
values_d[idx] = v;
} |
3,965 | #include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/sequence.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/replace.h>
#include <thrust/functional.h>
#include <iostream>
int main(void)
{
// allocate three device_vectors with 10 elements
thrust::device_vector<int> X(10);
thrust::device_vector<int> Y(10);
thrust::device_vector<int> Z(10);
// initialize X to 0,1,2,3, ....
thrust::sequence(X.begin(), X.end());
// compute Y = -X
thrust::transform(X.begin(), X.end(), Y.begin(), thrust::negate<int>());
// fill Z with twos
thrust::fill(Z.begin(), Z.end(), 2);
// compute Y = X mod 2
thrust::transform(X.begin(), X.end(), Z.begin(), Y.begin(), thrust::modulus<int>());
// replace all the ones in Y with tens
thrust::replace(Y.begin(), Y.end(), 1, 10);
// print Y
thrust::copy(Y.begin(), Y.end(), std::ostream_iterator<int>(std::cout, "\n"));
return 0;
} |
3,966 | #define ELEMENT_SIZE 64
#define BLOCK_SIZE 16
extern "C"
__global__ void int8pack_kernel(long *ret, const unsigned char *input, const int ret0, const int ret1, const int input1) {
const int y = blockIdx.x * blockDim.x + threadIdx.x;
const int x = blockIdx.y * blockDim.y + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int cache1 = ELEMENT_SIZE / 8 * blockDim.x;
const int square_size = BLOCK_SIZE * BLOCK_SIZE;
const int offset_y = blockIdx.x * blockDim.x * ELEMENT_SIZE / 8;
const int offset = blockIdx.y * blockDim.y * input1 + offset_y;
__shared__ unsigned char cache[BLOCK_SIZE * BLOCK_SIZE * ELEMENT_SIZE / 8];
for (int i = 0; i < ELEMENT_SIZE / 8; i++){
const int idx = i * square_size + tid;
const int x_sub = idx / cache1;
const int y_sub = idx - x_sub * cache1;
cache[x_sub * cache1 + y_sub] = y_sub + offset_y < input1 ? input[x_sub * input1 + y_sub + offset] : 0;
}
__syncthreads();
const int y_input = threadIdx.x * ELEMENT_SIZE / 8;
if (x < ret0 && y < ret1) {
long tmp = 0;
#pragma unroll
for (int i = 0; i < ELEMENT_SIZE / 8; i++) {
tmp |= ((long) cache[threadIdx.y * cache1 + y_input + i] ) << (8 * i);
}
ret[x * ret1 + y] = tmp;
}
} |
3,967 | #include<iostream>
#include<thrust/reduce.h>
#include<thrust/sequence.h>
#include<thrust/host_vector.h>
#include<thrust/device_vector.h>
using namespace std;
int main(){
const int N=5000;
thrust::device_vector<int> a(N);
//填充数组
thrust::sequence(a.begin(),a.end(),0);
//计算数组各个元素之和
int SUM=thrust::reduce(a.begin(),a.end(),0);
int sumCheck=0;
for(int i=0;i<N;i++)sumCheck+=i;
if(sumCheck==SUM)cout<<"hello world;"<<endl;
else{
cerr<<"Test failed!"<<endl;
return -1;
}
return 0;
} |
3,968 | // *----------------------------------------------
// Author Contact Information:
// Hao Gao
// hao.gao@emory.edu || hao.gao.2012@gmail.com
// Department of Mathematics and Computer Science, Emory University
// Department of Radiology and Imaging Sciences, Emory University
//
// Copyright (c) Hao Gao 2012
// ----------------------------------------------*/
//
// If you find this code useful, you may cite the following reference:
// H. Gao. "Fast parallel algorithms for the X-ray transform and its adjoint", Medical Physics (2012).
// The full source codes are available at https://sites.google.com/site/fastxraytransform
#include <math.h>
#include <malloc.h>
#define ABS(a) (a>0?a:-(a))
#define BLOCK_SIZE_x 16
#define BLOCK_SIZE_y 16
extern "C" void Ax_cone_mf_gpu_new(float *X,float *y,float *yNorm,float *sd_phi,float *sd_z,float *y_det,float *z_det,int *id_Y,int *Nv,
float SO,float OD,float scale,float dz,int nx,int ny,int nz,int nt,int na,int nb,int nv,int tmp_size,int nv_block);
__global__ void Ax_cone_mf_gpu_kernel_new(float *x,float *y,float *yNorm,float *sd_phi,float *sd_z,float *y_det,float *z_det,int *id_Y,
float SO,float OD,float scale,float dz,int nx,int ny,int nz,int na,int nb,int nv2)
// Please note that this version has O(Nx) per thread, since GPU threads are already saturated.
// O(1) per thread can be achieved by parallelizing the "for" loop here, given sufficient number of GPU threads.
{
int bx=blockIdx.x;
int by=blockIdx.y;
int tx0=threadIdx.x;
int ty0=threadIdx.y;
int ia=bx*BLOCK_SIZE_x+tx0;
int ib2=by*BLOCK_SIZE_y+ty0;
if(ia<na&&ib2<nb*nv2)
{
int nd,nx2,ny2,nz2,iv0,iv,ib,id,ix,iy,iz,cx1,cx2,cy1,cy2,cz1,cz2;
float norm,cos_phi,sin_phi,x1,y1,x2,y2,z1,z2,xx1,yy1,zz1,xx2,yy2,zz2,slope1,slope2,l,d,tmp,rx,ry,rz;
nx2=nx/2;ny2=ny/2;nz2=nz/2;
nd=na*nb;
iv0=(int)floor((float)ib2/(float)nb);
ib=ib2-iv0*nb;
iv=id_Y[iv0];
id=iv0*nd+ib*na+ia;
cos_phi=cosf(sd_phi[iv]);sin_phi=sinf(sd_phi[iv]);
x1=cos_phi*(-SO);
y1=sin_phi*(-SO);
z1=sd_z[iv];
x2=cos_phi*OD-sin_phi*y_det[ia];
y2=sin_phi*OD+cos_phi*y_det[ia];
z2=z_det[ib]+sd_z[iv];
norm = sqrt(pow(x1-x2,2)+pow(y1-y2,2)+pow(z1-z2,2))*scale;
y[id]=0;
// assuming z1-z2 is small
if(ABS(x1-x2)>ABS(y1-y2))
{ slope1=(y2-y1)/(x2-x1);
slope2=(z2-z1)/(x2-x1);
for(ix=0;ix<nx;ix++)
{ xx1=(float)(ix-nx2);xx2=xx1+1;
if(slope1>=0)
{ yy1=y1+slope1*(xx1-x1)+ny2;
yy2=y1+slope1*(xx2-x1)+ny2;
}
else
{ yy1=y1+slope1*(xx2-x1)+ny2;
yy2=y1+slope1*(xx1-x1)+ny2;
}
cy1=(int)floor(yy1);
cy2=(int)floor(yy2);
if(slope2>=0)
{ zz1=(z1+slope2*(xx1-x1))/dz+nz2;
zz2=(z1+slope2*(xx2-x1))/dz+nz2;
}
else
{ zz1=(z1+slope2*(xx2-x1))/dz+nz2;
zz2=(z1+slope2*(xx1-x1))/dz+nz2;
}
cz1=(int)floor(zz1);
cz2=(int)floor(zz2);
if(cy2==cy1)
{ if(cy1>=0&&cy1<=ny-1)
{ if(cz2==cz1)
{ if(cz1>=0&&cz1<=nz-1)// 11
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
iy=cy1;iz=cz1;y[id]+=l*x[iz*ny*nx+iy*nx+ix];
}
}
else
{ if(cz2>0&&cz2<nz)// 12
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rz=(cz2-zz1)/(zz2-zz1);
iy=cy1;iz=cz1;y[id]+=rz*l*x[iz*ny*nx+iy*nx+ix];
iy=cy1;iz=cz2;y[id]+=(1-rz)*l*x[iz*ny*nx+iy*nx+ix];
}
else
{ if(cz2==0)// 13
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rz=(cz2-zz1)/(zz2-zz1);
iy=cy1;iz=cz2;y[id]+=(1-rz)*l*x[iz*ny*nx+iy*nx+ix];
}
if(cz2==nz)// 14
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rz=(cz2-zz1)/(zz2-zz1);
iy=cy1;iz=cz1;y[id]+=rz*l*x[iz*ny*nx+iy*nx+ix];
}
}
}
}
}
else
{ if(cy2>0&&cy2<ny)
{ if(cz2==cz1)
{ if(cz1>=0&&cz1<=nz-1)// 21
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
ry=(cy2-yy1)/d;
iy=cy1;iz=cz1;y[id]+=ry*l*x[iz*ny*nx+iy*nx+ix];
iy=cy2;iz=cz1;y[id]+=(1-ry)*l*x[iz*ny*nx+iy*nx+ix];
}
}
else
{ if(cz2>0&&cz2<nz)// 22
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
ry=(cy2-yy1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(ry>rz)
{ iy=cy1;iz=cz1;y[id]+=rz*l*x[iz*ny*nx+iy*nx+ix];
iy=cy1;iz=cz2;y[id]+=(ry-rz)*l*x[iz*ny*nx+iy*nx+ix];
iy=cy2;iz=cz2;y[id]+=(1-ry)*l*x[iz*ny*nx+iy*nx+ix];
}
else
{ iy=cy1;iz=cz1;y[id]+=ry*l*x[iz*ny*nx+iy*nx+ix];
iy=cy2;iz=cz1;y[id]+=(rz-ry)*l*x[iz*ny*nx+iy*nx+ix];
iy=cy2;iz=cz2;y[id]+=(1-rz)*l*x[iz*ny*nx+iy*nx+ix];
}
}
else
{ if(cz2==0)// 23
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
ry=(cy2-yy1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(ry>rz)
{ iy=cy1;iz=cz2;y[id]+=(ry-rz)*l*x[iz*ny*nx+iy*nx+ix];
iy=cy2;iz=cz2;y[id]+=(1-ry)*l*x[iz*ny*nx+iy*nx+ix];
}
else
{ iy=cy2;iz=cz2;y[id]+=(1-rz)*l*x[iz*ny*nx+iy*nx+ix];
}
}
if(cz2==nz)// 24
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
ry=(cy2-yy1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(ry>rz)
{ iy=cy1;iz=cz1;y[id]+=rz*l*x[iz*ny*nx+iy*nx+ix];
}
else
{ iy=cy1;iz=cz1;y[id]+=ry*l*x[iz*ny*nx+iy*nx+ix];
iy=cy2;iz=cz1;y[id]+=(rz-ry)*l*x[iz*ny*nx+iy*nx+ix];
}
}
}
}
}
else
{ if(cy2==0)
{ if(cz2==cz1)
{ if(cz1>=0&&cz1<=nz-1)// 31
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
ry=(cy2-yy1)/d;
iy=cy2;iz=cz1;y[id]+=(1-ry)*l*x[iz*ny*nx+iy*nx+ix];
}
}
else
{ if(cz2>0&&cz2<nz)// 32
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
ry=(cy2-yy1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(ry>rz)
{ iy=cy2;iz=cz2;y[id]+=(1-ry)*l*x[iz*ny*nx+iy*nx+ix];
}
else
{ iy=cy2;iz=cz1;y[id]+=(rz-ry)*l*x[iz*ny*nx+iy*nx+ix];
iy=cy2;iz=cz2;y[id]+=(1-rz)*l*x[iz*ny*nx+iy*nx+ix];
}
}
else
{ if(cz2==0)// 33
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
ry=(cy2-yy1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(ry>rz)
{ iy=cy2;iz=cz2;y[id]+=(1-ry)*l*x[iz*ny*nx+iy*nx+ix];
}
else
{ iy=cy2;iz=cz2;y[id]+=(1-rz)*l*x[iz*ny*nx+iy*nx+ix];
}
}
if(cz2==nz)// 34
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
ry=(cy2-yy1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(ry>rz)
{
}
else
{ iy=cy2;iz=cz1;y[id]+=(rz-ry)*l*x[iz*ny*nx+iy*nx+ix];
}
}
}
}
}
if(cy2==ny)
{ if(cz2==cz1)
{ if(cz1>=0&&cz1<=nz-1)// 41
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
ry=(cy2-yy1)/d;
iy=cy1;iz=cz1;y[id]+=ry*l*x[iz*ny*nx+iy*nx+ix];
}
}
else
{ if(cz2>0&&cz2<nz)// 42
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
ry=(cy2-yy1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(ry>rz)
{ iy=cy1;iz=cz1;y[id]+=rz*l*x[iz*ny*nx+iy*nx+ix];
iy=cy1;iz=cz2;y[id]+=(ry-rz)*l*x[iz*ny*nx+iy*nx+ix];
}
else
{ iy=cy1;iz=cz1;y[id]+=ry*l*x[iz*ny*nx+iy*nx+ix];
}
}
else
{ if(cz2==0)// 43
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
ry=(cy2-yy1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(ry>rz)
{ iy=cy1;iz=cz2;y[id]+=(ry-rz)*l*x[iz*ny*nx+iy*nx+ix];
}
else
{
}
}
if(cz2==nz)// 44
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
ry=(cy2-yy1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(ry>rz)
{ iy=cy1;iz=cz1;y[id]+=rz*l*x[iz*ny*nx+iy*nx+ix];
}
else
{ iy=cy1;iz=cz1;y[id]+=ry*l*x[iz*ny*nx+iy*nx+ix];
}
}
}
}
}
}
}
}
}
else
{ slope1=(x2-x1)/(y2-y1);
slope2=(z2-z1)/(y2-y1);
for(iy=0;iy<ny;iy++)
{ yy1=(float)(iy-ny2);yy2=yy1+1;
if(slope1>=0)
{ xx1=x1+slope1*(yy1-y1)+nx2;
xx2=x1+slope1*(yy2-y1)+nx2;
}
else
{ xx1=x1+slope1*(yy2-y1)+nx2;
xx2=x1+slope1*(yy1-y1)+nx2;
}
cx1=(int)floor(xx1);
cx2=(int)floor(xx2);
if(slope2>=0)
{ zz1=(z1+slope2*(yy1-y1))/dz+nz2;
zz2=(z1+slope2*(yy2-y1))/dz+nz2;
}
else
{ zz1=(z1+slope2*(yy2-y1))/dz+nz2;
zz2=(z1+slope2*(yy1-y1))/dz+nz2;
}
cz1=(int)floor(zz1);
cz2=(int)floor(zz2);
if(cx2==cx1)
{ if(cx1>=0&&cx1<=nx-1)
{ if(cz2==cz1)
{ if(cz1>=0&&cz1<=nz-1)// 11
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
ix=cx1;iz=cz1;y[id]+=l*x[iz*ny*nx+iy*nx+ix];
}
}
else
{ if(cz2>0&&cz2<nz)// 12
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rz=(cz2-zz1)/(zz2-zz1);
ix=cx1;iz=cz1;y[id]+=rz*l*x[iz*ny*nx+iy*nx+ix];
ix=cx1;iz=cz2;y[id]+=(1-rz)*l*x[iz*ny*nx+iy*nx+ix];
}
else
{ if(cz2==0)// 13
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rz=(cz2-zz1)/(zz2-zz1);
ix=cx1;iz=cz2;y[id]+=(1-rz)*l*x[iz*ny*nx+iy*nx+ix];
}
if(cz2==nz)// 14
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rz=(cz2-zz1)/(zz2-zz1);
ix=cx1;iz=cz1;y[id]+=rz*l*x[iz*ny*nx+iy*nx+ix];
}
}
}
}
}
else
{ if(cx2>0&&cx2<nx)
{ if(cz2==cz1)
{ if(cz1>=0&&cz1<=nz-1)// 21
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rx=(cx2-xx1)/d;
ix=cx1;iz=cz1;y[id]+=rx*l*x[iz*ny*nx+iy*nx+ix];
ix=cx2;iz=cz1;y[id]+=(1-rx)*l*x[iz*ny*nx+iy*nx+ix];
}
}
else
{ if(cz2>0&&cz2<nz)// 22
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rx=(cx2-xx1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(rx>rz)
{ ix=cx1;iz=cz1;y[id]+=rz*l*x[iz*ny*nx+iy*nx+ix];
ix=cx1;iz=cz2;y[id]+=(rx-rz)*l*x[iz*ny*nx+iy*nx+ix];
ix=cx2;iz=cz2;y[id]+=(1-rx)*l*x[iz*ny*nx+iy*nx+ix];
}
else
{ ix=cx1;iz=cz1;y[id]+=rx*l*x[iz*ny*nx+iy*nx+ix];
ix=cx2;iz=cz1;y[id]+=(rz-rx)*l*x[iz*ny*nx+iy*nx+ix];
ix=cx2;iz=cz2;y[id]+=(1-rz)*l*x[iz*ny*nx+iy*nx+ix];
}
}
else
{ if(cz2==0)// 23
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rx=(cx2-xx1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(rx>rz)
{ ix=cx1;iz=cz2;y[id]+=(rx-rz)*l*x[iz*ny*nx+iy*nx+ix];
ix=cx2;iz=cz2;y[id]+=(1-rx)*l*x[iz*ny*nx+iy*nx+ix];
}
else
{ ix=cx2;iz=cz2;y[id]+=(1-rz)*l*x[iz*ny*nx+iy*nx+ix];
}
}
if(cz2==nz)// 24
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rx=(cx2-xx1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(rx>rz)
{ ix=cx1;iz=cz1;y[id]+=rz*l*x[iz*ny*nx+iy*nx+ix];
}
else
{ ix=cx1;iz=cz1;y[id]+=rx*l*x[iz*ny*nx+iy*nx+ix];
ix=cx2;iz=cz1;y[id]+=(rz-rx)*l*x[iz*ny*nx+iy*nx+ix];
}
}
}
}
}
else
{ if(cx2==0)
{ if(cz2==cz1)
{ if(cz1>=0&&cz1<=nz-1)// 31
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rx=(cx2-xx1)/d;
ix=cx2;iz=cz1;y[id]+=(1-rx)*l*x[iz*ny*nx+iy*nx+ix];
}
}
else
{ if(cz2>0&&cz2<nz)// 32
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rx=(cx2-xx1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(rx>rz)
{ ix=cx2;iz=cz2;y[id]+=(1-rx)*l*x[iz*ny*nx+iy*nx+ix];
}
else
{ ix=cx2;iz=cz1;y[id]+=(rz-rx)*l*x[iz*ny*nx+iy*nx+ix];
ix=cx2;iz=cz2;y[id]+=(1-rz)*l*x[iz*ny*nx+iy*nx+ix];
}
}
else
{ if(cz2==0)// 33
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rx=(cx2-xx1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(rx>rz)
{ ix=cx2;iz=cz2;y[id]+=(1-rx)*l*x[iz*ny*nx+iy*nx+ix];
}
else
{ ix=cx2;iz=cz2;y[id]+=(1-rz)*l*x[iz*ny*nx+iy*nx+ix];
}
}
if(cz2==nz)// 34
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rx=(cx2-xx1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(rx>rz)
{
}
else
{ ix=cx2;iz=cz1;y[id]+=(rz-rx)*l*x[iz*ny*nx+iy*nx+ix];
}
}
}
}
}
if(cx2==nx)
{ if(cz2==cz1)
{ if(cz1>=0&&cz1<=nz-1)// 41
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rx=(cx2-xx1)/d;
ix=cx1;iz=cz1;y[id]+=rx*l*x[iz*ny*nx+iy*nx+ix];
}
}
else
{ if(cz2>0&&cz2<nz)// 42
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rx=(cx2-xx1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(rx>rz)
{ ix=cx1;iz=cz1;y[id]+=rz*l*x[iz*ny*nx+iy*nx+ix];
ix=cx1;iz=cz2;y[id]+=(rx-rz)*l*x[iz*ny*nx+iy*nx+ix];
}
else
{ ix=cx1;iz=cz1;y[id]+=rx*l*x[iz*ny*nx+iy*nx+ix];
}
}
else
{ if(cz2==0)// 43
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rx=(cx2-xx1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(rx>rz)
{ ix=cx1;iz=cz2;y[id]+=(rx-rz)*l*x[iz*ny*nx+iy*nx+ix];
}
else
{
}
}
if(cz2==nz)// 44
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rx=(cx2-xx1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(rx>rz)
{ ix=cx1;iz=cz1;y[id]+=rz*l*x[iz*ny*nx+iy*nx+ix];
}
else
{ ix=cx1;iz=cz1;y[id]+=rx*l*x[iz*ny*nx+iy*nx+ix];
}
}
}
}
}
}
}
}
}
y[id]*=scale;
yNorm[id] = y[id]/norm;
}
}
void Ax_cone_mf_gpu_new(float *X,float *y,float *yNorm,float *sd_phi,float *sd_z,float *y_det,float *z_det,int *id_Y,int *Nv,
float SO,float OD,float scale,float dz,int nx,int ny,int nz,int nt,int na,int nb,int nv,int tmp_size,int nv_block)
//assuming dx=dy; during the computation, dx=dy=1; after the computation, the "dx=dy=scale" is taken into account.
//the input can be multiple 3D images, e.g., temporally-resolved images from 4DCT.
//the user needs to supply
// "id_X" -- the image index for each projection
// "id_Y" -- the projection index for each image
// "nv_block" -- to deal with the GPU time limit
{ float *y_d,*yNorm_d,*x_d,*sd_phi_d,*sd_z_d,*y_det_d,*z_det_d;
int *id_Y_d,nd,N,id,v0,it,i,n,nv2;
N=nx*ny*nz;
nd=na*nb;
cudaMalloc(&y_d,nv_block*nd*sizeof(float));
cudaMalloc(&yNorm_d,nv_block*nd*sizeof(float));
cudaMalloc(&x_d,N*sizeof(float));
cudaMalloc(&sd_phi_d,nv*sizeof(float));
cudaMemcpy(sd_phi_d,sd_phi,nv*sizeof(float),cudaMemcpyHostToDevice);
cudaMalloc(&sd_z_d,nv*sizeof(float));
cudaMemcpy(sd_z_d,sd_z,nv*sizeof(float),cudaMemcpyHostToDevice);
cudaMalloc(&y_det_d,na*sizeof(float));
cudaMemcpy(y_det_d,y_det,na*sizeof(float),cudaMemcpyHostToDevice);
cudaMalloc(&z_det_d,nb*sizeof(float));
cudaMemcpy(z_det_d,z_det,nb*sizeof(float),cudaMemcpyHostToDevice);
cudaMalloc(&id_Y_d,nt*tmp_size*sizeof(int));
cudaMemcpy(id_Y_d,id_Y,nt*tmp_size*sizeof(int),cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE_x,BLOCK_SIZE_y);
id=0;
for(it=0;it<nt;it++)
{ cudaMemcpy(x_d,&X[it*N],N*sizeof(float),cudaMemcpyHostToDevice);
n=(Nv[it]+nv_block-1)/nv_block;
v0=0;
for(i=0;i<n;i++)
{ if(i<n-1)
{nv2=nv_block;}
else
{nv2=Nv[it]-nv_block*(n-1);}
dim3 dimGrid_t((na+dimBlock.x-1)/dimBlock.x,(nv2*nb+dimBlock.y-1)/dimBlock.y);
Ax_cone_mf_gpu_kernel_new<<<dimGrid_t, dimBlock>>>(x_d,y_d,yNorm_d,sd_phi_d,sd_z_d,y_det_d,z_det_d,&id_Y_d[it*tmp_size+v0],
SO,OD,scale,dz,nx,ny,nz,na,nb,nv2);
cudaThreadSynchronize();
cudaMemcpy(&y[id*nd],y_d,nv2*nd*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(&yNorm[id*nd],yNorm_d,nv2*nd*sizeof(float),cudaMemcpyDeviceToHost);
v0+=nv2;id+=nv2;
}
}
cudaFree(y_d);cudaFree(yNorm_d);cudaFree(x_d);cudaFree(sd_phi_d);cudaFree(sd_z_d);cudaFree(y_det_d);cudaFree(z_det_d);cudaFree(id_Y_d);
}
|
3,969 |
__device__ void HSV2RGB(float h, float s, float v, float &r, float &g, float &b) {
if(h < 0) {
r=v;
g=v;
b=v;
return;
}
h *= .0166666666666667; // convert from 360 to 0-6;
int i = (int) floor(h);
float f = h - i;
f = (!(i&1)) ? 1-f : f; // if even
float m = v * (1-s);
float n = v * (1-s * f);
switch(i) {
case 6:
case 0:
r = v;
g= n;
b = m;
return;
case 1:
r = n;
g= v;
b = m;
return;
case 2:
r = m;
g= v;
b = n;
return;
case 3:
r = m;
g= n;
b = v;
return;
case 4:
r = n;
g= m;
b = v;
return;
case 5:
r = v;
g= n;
b = m;
return;
}
}
__global__ void gpu_calcColor_kernel(int pointCnt, float* pixels, float minZ, float diffZ, float minC1, float minC2, float minC3, float diffC1, float diffC2, float diffC3, bool hsv, bool quads, float* colors) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
bool validPoint = ((i > 0) && (i < pointCnt))? true : false;
i *=3;
float z = validPoint ? pixels[i+2] : 0.0;
if(z != 0.0) { // only equal to 0.0 if set as invalid in cloud contructor
float distZ = z - minZ;
float percent = distZ/diffZ;
percent = percent<=0.0 ? 0.01 : percent;
percent = percent>=1.0 ? 1.0 : percent;
float r;
float g;
float b;
if(hsv) {
if(percent == 0.0) {
HSV2RGB(minC1 , minC2, minC3 , r,g,b);
} else {
HSV2RGB(minC1 + percent * diffC1, minC2 + percent * diffC2, minC3 + percent * diffC3, r,g,b);
}
} else {
r= minC1 + percent * diffC1;
g= minC2 + percent * diffC2;
b= minC3 + percent * diffC3;
}
if(quads) {
i*=4;
colors[i++] = r;
colors[i++] = g;
colors[i++] = b;
colors[i++] = r;
colors[i++] = g;
colors[i++] = b;
colors[i++] = r;
colors[i++] = g;
colors[i++] = b;
colors[i++] = r;
colors[i++] = g;
colors[i] = b;
} else {
colors[i++] = r;
colors[i++] = g;
colors[i] = b;
}
}
}
extern "C" void gpu_calcColor(int pointCnt, float* pixels, float minZ, float diffZ, float minC1, float minC2, float minC3, float diffC1, float diffC2, float diffC3, bool hsv, bool quads, float* colors)
{
int theadsPerBlock = 256;
int blocks = (int) ceilf(pointCnt/(float) theadsPerBlock);
gpu_calcColor_kernel <<<blocks,theadsPerBlock>>> (pointCnt, pixels, minZ, diffZ, minC1, minC2, minC3, diffC1, diffC2, diffC3, hsv, quads, colors);
};
|
3,970 | /*
Author: Vedanta Pawar
NetID: vp273
Class: M.Eng ECE, Cornell University
Email: vp273@cornell.edu
Instructions for Compiling and Executing Code:
Compile: /usr/local/cuda-10.1/bin/nvcc -o vp273_hw5_2.out vp273_hw5_2.cu
Run: ./vp273_hw5_2.out "Enter the dimension of the matrix:" "Enter the Block Size:"
Example: ./vp273_hw5_2.out 4096 8
*/
#include <stdio.h> // For printf()
#include <stdint.h> // For uint64
#include <stdlib.h> // For srand48() and drand48()
#include <time.h> // For clock_gettime()
#define BILLION 1000000000L // To convert clock time in floating point seconds to nanoseconds/
__global__ void matrixMul( double** dev_matA , double** dev_matB , double** dev_matC , int ndim , int tile_size )
{
extern __shared__ double A_B_shared[];
double *A_tile = &A_B_shared[ 0 ] ;
double *B_tile = &A_B_shared[ tile_size * tile_size * sizeof( double ) ] ;
double partial = 0.0;
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
/* transpose B while loading to shared memory */
for (int m = 0; m < ndim / blockDim.x ; ++m)
{
A_tile[ ty * tile_size + tx ] = dev_matA[row][m*blockDim.x + tx]; /* load coalesced */
B_tile[ ty * tile_size + tx ] = dev_matB[( m * blockDim.y + ty )][col]; /* not load coalesced */
__syncthreads();
for (int k = 0; k < blockDim.x; ++k)
{
partial += A_tile[ ty *tile_size + k ] * B_tile[ k *tile_size + tx ]; /*Bank conflicts */
}
__syncthreads();
dev_matC[row][col] = partial;
}
}
int main( int argc, char *argv[] )
{
uint64_t diff; // Stores the time in nanoseconds
struct timespec start, end; // Used to implement the high resolution timer included in <time.h>
int ndim = atoi( argv[1] ); // Ask user and store the dimension of the square matrix
FILE *ptr_file ; // File pointer used to point to a .csv file for storing the time vs ndim
int block_size ;
/*
Create matrices on heap by malloc and storing and assigning pointers to
the individual matrices as matA, matB, matC.
Each matrix is a linear array of size ndim x ndim
Total memory malloced is 3 x ndim^2
*/
double *matA[ndim] ;
double *matB[ndim] ;
double *matC[ndim] ;
for ( int i = 0 ; i < ndim ; i ++ )
{
matA[i] = ( double * )malloc( ndim * sizeof( double ) );
matB[i] = ( double * )malloc( ndim * sizeof( double ) );
matC[i] = ( double * )malloc( ndim * sizeof( double ) );
}
clock_gettime( CLOCK_MONOTONIC , &start );
// Iterate through the rows of the Matrix A and B
for ( int i = 0 ; i < ndim ; i++ )
{
// Iterate through the columns of the Matrix A and B
for ( int j = 0 ; j < ndim ; j++ )
{
clock_gettime( CLOCK_MONOTONIC , &end ); // End clock timer.
diff = BILLION * ( end.tv_sec - start.tv_sec ) + end.tv_nsec - start.tv_nsec;
srand48( diff ) ; // Set random seed to for initializing drand48() later
// Store same random numbers in A and B
matA[i][j] = drand48() ;
matB[i][j] = drand48() ;
}
}
double *dev_matA[ndim] , *dev_matB[ndim] , *dev_matC[ndim];
for ( int i = 0 ; i < ndim ; i++ )
{
cudaMalloc( ( void** )&dev_matA[i], ndim * sizeof( double ) );
cudaMalloc( ( void** )&dev_matB[i], ndim * sizeof( double ) );
cudaMalloc( ( void** )&dev_matC[i], ndim * sizeof( double ) );
}
// Start high resolution clock timer
clock_gettime( CLOCK_MONOTONIC , &start );
for ( int i = 0 ; i < ndim ; i++ )
{
cudaMemcpy( dev_matA[i] , matA[i] , ndim * sizeof( double ) , cudaMemcpyHostToDevice );
cudaMemcpy( dev_matB[i] , matB[i] , ndim * sizeof( double ) , cudaMemcpyHostToDevice );
}
block_size = atoi( argv[2] ) ;
int shared_mem_size = 2 * block_size * block_size * sizeof( double ) ;
dim3 Block( block_size , block_size) ;
dim3 Grid( ndim / Block.x , ndim / Block.y) ;
matrixMul<<< Grid, Block , shared_mem_size >>>( dev_matA , dev_matB , dev_matC , ndim , block_size );
cudaMemcpy( matC , dev_matC , ndim * ndim * sizeof( double ) , cudaMemcpyDeviceToHost );
cudaDeviceSynchronize( );
clock_gettime( CLOCK_MONOTONIC , &end ); // End clock timer.
//Calculate the difference in timer and convert it to nanosecond by multiplying by 10^9
diff = BILLION * ( end.tv_sec - start.tv_sec ) + end.tv_nsec - start.tv_nsec;
printf( "elapsed time = %llu nanoseconds\n", ( long long unsigned int ) diff );
ptr_file = fopen("output_hw5_2.csv", "a"); // Save the time and corresponding matrix dim. in a csv file
// Store the time and corresponding matrix dimension in a csv file
fprintf( ptr_file ,"%d , %d , %llu\n", ndim , block_size , ( long long unsigned int ) diff );
//Deallocate the memory allocated to matrices A, B and C
for ( int i = 0 ; i < ndim ; i++ )
{
free ( matA[i] ) ;
free ( matB[i] ) ;
free ( matC[i] ) ;
}
cudaFree( dev_matA ) ;
cudaFree( dev_matB ) ;
cudaFree( dev_matC ) ;
exit( 0 ) ;
} |
3,971 | #include <algorithm>
#include <cassert>
#include <cstdlib>
#include <iostream>
#include <vector>
#include <chrono>
using namespace std;
__global__ void convolution_1d(int *array, int *mask, int *result, int n,int m);
void verify_result(int *array, int *mask, int *result, int n, int m);
auto get_time() { return chrono::high_resolution_clock::now(); }
int main() {
int n = 1000 << 16;
int bytes_n = n * sizeof(int);
int m = 7;
int bytes_m = m * sizeof(int);
// CPU
vector<int> h_array(n);
vector<int> h_mask(m);
vector<int> h_result(n);
generate(begin(h_array), end(h_array), [](){ return rand() % 100; });
generate(begin(h_mask), end(h_mask), [](){ return rand() % 10; });
// GPU
int *d_array, *d_mask, *d_result;
cudaMalloc(&d_array, bytes_n);
cudaMalloc(&d_mask, bytes_m);
cudaMalloc(&d_result, bytes_n);
// CPU --> GPU
cudaMemcpy(d_array, h_array.data(), bytes_n, cudaMemcpyHostToDevice);
cudaMemcpy(d_mask, h_mask.data(), bytes_m, cudaMemcpyHostToDevice);
//kernel
int THREADS = 256;
int GRID = (n + THREADS - 1) / THREADS;
auto start = get_time();
convolution_1d<<<GRID, THREADS>>>(d_array, d_mask, d_result, n, m);
// GPU --> CPU
cudaMemcpy(h_result.data(), d_result, bytes_n, cudaMemcpyDeviceToHost);
auto finish = get_time();
auto duration =
chrono::duration_cast<std::chrono::milliseconds>(finish - start);
cout << "temps écoulé en kernel = " << duration.count() << " ms\n";
verify_result(h_array.data(), h_mask.data(), h_result.data(), n, m);
cout << "Terminé avec succès"<<endl;
cudaFree(d_result);
cudaFree(d_mask);
cudaFree(d_array);
return 0;
}
__global__ void convolution_1d(int *array, int *mask, int *result, int n,int m)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int r = m / 2;
int start = tid - r;
int temp = 0;
for (int j = 0; j < m; j++)
if (((start + j) >= 0) && (start + j < n))
temp += array[start + j] * mask[j];
result[tid] = temp;
}
void verify_result(int *array, int *mask, int *result, int n, int m)
{
int radius = m / 2;
int temp;
int start;
for (int i = 0; i < n; i++)
{
start = i - radius;
temp = 0;
for (int j = 0; j < m; j++)
if ((start + j >= 0) && (start + j < n))
temp += array[start + j] * mask[j];
assert(temp == result[i]);
}
} |
3,972 |
#include <cuda.h>
#ifdef _WIN32
# define IMPORT __declspec(dllimport)
#else
# define IMPORT
#endif
IMPORT int simplelib();
int main(void)
{
return simplelib();
}
|
3,973 | #include "includes.h"
__global__ void get_iou_cuda_(int nInstance, int nProposal, int *proposals_idx, int *proposals_offset, long *instance_labels, int *instance_pointnum, float *proposals_iou){
for(int proposal_id = blockIdx.x; proposal_id < nProposal; proposal_id += gridDim.x){
int start = proposals_offset[proposal_id];
int end = proposals_offset[proposal_id + 1];
int proposal_total = end - start;
for(int instance_id = threadIdx.x; instance_id < nInstance; instance_id += blockDim.x){
int instance_total = instance_pointnum[instance_id];
int intersection = 0;
for(int i = start; i < end; i++){
int idx = proposals_idx[i];
if((int)instance_labels[idx] == instance_id){
intersection += 1;
}
}
proposals_iou[proposal_id * nInstance + instance_id] = (float)intersection / ((float)(proposal_total + instance_total - intersection) + 1e-5);
}
}
} |
3,974 | #include "includes.h"
/* https://zxi.mytechroad.com/blog/dynamic-programming/leetcode-730-count-different-palindromic-subsequences/ */
long kMod = 1000000007;
__global__ void helperKernel(char *S, int *dp, int n, long kMod, int len) {
for(int i = blockIdx.x * blockDim.x + threadIdx.x; i < n - len; i += blockDim.x * gridDim.x) {
int j = i + len; // jth element is the end of current string
if(S[i] == S[j]) { // if front and rear are the same
dp[i * n + j] = dp[(i + 1) * n + (j - 1)] * 2;
int left = i + 1;
int right = j - 1;
while(left <= right && S[left] != S[i]) {
left++;
}
while(left <= right && S[right] != S[i]) {
right--;
}
if(left == right) {
dp[i * n + j] += 1;
} else if(left > right) {
dp[i * n + j] += 2;
} else {
dp[i * n + j] -= dp[(left + 1) * n + (right - 1)];
}
} else {
dp[i * n + j] = dp[i * n + (j - 1)] + dp[((i + 1) * n) + j] - dp[(i + 1) * n + (j - 1)];
}
dp[i * n + j] = (dp[i * n + j] + kMod) % kMod; // perform positive modulo
}
//__syncthreads();
} |
3,975 | #include <stdio.h>
#include <cuda.h>
#include <string>
//using namespace std;
__global__ void myKernel(int* c,int N )
{ if(threadIdx.x <N){
c[threadIdx.x] = 2;
printf("Hello, world from the device! \n");
//__syncthreads();
}
}
int main()
{
//int dayName[] = {1, 1,1,1,1,1,1,1,2,2};
int* dayName = (int*)malloc(10*sizeof(int));
for(int i=0; i<10; i++){
//printf("dayName spot %d , %d \n", i, dayName[i]);
dayName[i]= 1;
}
int* d_c;
cudaMalloc((void **)&d_c,10*sizeof(int));
cudaMemcpy(d_c, dayName, 10*sizeof(int), cudaMemcpyHostToDevice);
//printf("first is %s \n", d_c[1]);
//cudaPrintfInit();
myKernel<<<1,10>>>(d_c, 10);
//cudaPrintfDisplay(stdout, true);
//cudaPrintfEnd();
cudaDeviceSynchronize();
cudaMemcpy(dayName, &d_c, 10*sizeof(int), cudaMemcpyDeviceToHost );
cudaFree(d_c);
for(int i=0; i<10; i++){
printf("dayName spot %d , %d \n", i, dayName[i]);
}
printf("I am saine \n");
}
|
3,976 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/functional.h>
#include <thrust/transform.h>
#include <iostream>
#include <math.h>
int main() {
thrust::device_vector<double> AAPL;
thrust::device_vector<double> MSFT;
thrust::device_vector<double> MEAN_DIF(2518,0);
double stocks_AAPL, stocks_MSFT, mean;
for(int i =0; i < 2518; i++){
std::cin >> stocks_AAPL >> stocks_MSFT;
AAPL.push_back(stocks_AAPL);
MSFT.push_back(stocks_MSFT);
}
thrust::transform(AAPL.begin(), AAPL.end(), MSFT.begin(), MEAN_DIF.begin(), thrust::minus<double>());
double val = thrust::reduce(MEAN_DIF.begin(), MEAN_DIF.end());
mean = val/2517;
std::cout << "Média: " << fabs(mean) << "\n";
// printf("V1: ");
// for (thrust::device_vector<double>::iterator i = MEAN_DIF.begin(); i != MEAN_DIF.end(); i++) {
// std::cout << *i << " ";
// }
// printf("\n");
// printf("V2: ");
// for (thrust::device_vector<double>::iterator i = V2.begin(); i != V2.end(); i++) {
// std::cout << *i << " ";
// }
// printf("\n");
// printf("V3: ");
// for (thrust::device_vector<double>::iterator i = V3.begin(); i != V3.end(); i++) {
// std::cout << *i << " ";
// }
// printf("\n");
// printf("V4: ");
// for (thrust::device_vector<double>::iterator i = V4.begin(); i != V4.end(); i++) {
// std::cout << *i << " ";
// }
// printf("\n");
}
|
3,977 | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
// Necessary for random numbers in CUDA
#include <curand_kernel.h>
#include <curand.h>
#define NUM_ITER 1000000000
#define TPB 128 // Threads PER block
#define NUM_THREADS 10000 // Total number of threads to execute
/**
* Function which, for each instance of the kernel, generates a random point
* and calculates whether or not it is within a circle.
*
* @param counts Array for each thread to store the total number of
* randomly generate points that were within a circle
* @param numIter Number of iterations / points each thread should make
* @param numThreads Number of threads that should be doing work
* @param curandState Array for each thread to store its own curandState
* structure
*/
__global__ void estimatePiKernel(unsigned int *counts, unsigned int numIter,
unsigned int numThreads,
curandState *randState) {
double x, y, distance;
// Unique ID of the current thread to determine what work to compute
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
// This thread has no work to do, exit
if (threadId > numThreads) return;
// Used threadId as a seed of randomness so that every thread is generating
// different random values
int seed = threadId;
curand_init(threadId, seed, 0, &randState[threadId]);
for (int iter = 0; iter < numIter; iter++) {
// Generate random x, y coordinates from 0.0 (exclusive) to 1.0
// (inclusive) for a point
x = (double) curand_uniform(&randState[threadId]);
y = (double) curand_uniform(&randState[threadId]);
// Distance from the origin of the circle
distance = sqrt((x * x) + (y * y));
// If the distance from the origin of the circle is less than or equal
// to 1, that means that the randomly generated point is inside the
// circle because the circle has a radius of 1. Increment number of
// points randomly generated within the circle
if (distance <= 1.0) counts[threadId]++;
}
}
/**
* Tally up the counts in an array indicating the number of randomly generated
* points that were inside a circle and estimate pi.
*
* @param counts Array of counts of points generate inside a circle
*/
void estimatePi(unsigned int *counts) {
unsigned int totalCount = 0;
// accumulate the counts of coins in the circle into totalCount
for (int index = 0; index < NUM_THREADS; index++) {
totalCount += counts[index];
}
printf("total count: %d\n", totalCount);
// Calculate pi according to the formula P(coin in circle) * 4 where
// P(coin in circle) is equivalents to (coins in circle) / (total coins)
double piEstimation = ((double) totalCount / (double) NUM_ITER) * 4.0;
printf("The result is %f\n", piEstimation);
}
/**
* Return a timestamp with double percision.
*/
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
int main() {
// Allocate space for curandState for each thread
curandState *randState;
cudaMalloc(&randState, NUM_THREADS * sizeof(curandState));
// Allocate space to keep track of counts of points generated in the circle
unsigned int *deviceCounts;
cudaMalloc(&deviceCounts, NUM_THREADS * sizeof(unsigned int));
// Allocate space to copy the GPU result back to the CPU
unsigned int *hostCounts = (unsigned int*) malloc(
NUM_THREADS * sizeof(unsigned int));
// Set all of the memory to 0
cudaMemset(deviceCounts, 0, NUM_THREADS * sizeof(unsigned int));
double startTime = cpuSecond();
// Launch the kernel
estimatePiKernel <<<(NUM_THREADS + TPB - 1) / TPB, TPB>>> (
deviceCounts, NUM_ITER / NUM_THREADS, NUM_THREADS, randState);
// Watch for the kernel to finish
cudaDeviceSynchronize();
printf("Total time: %f\n", cpuSecond() - startTime);
// Copy GPU counts to the CPU
cudaMemcpy(
hostCounts, deviceCounts,
NUM_THREADS * sizeof(unsigned int), cudaMemcpyDeviceToHost
);
// Print pi estimation
estimatePi(hostCounts);
return 0;
}
|
3,978 | #include <cuda.h>
#include <assert.h>
#define N 2//(64*64)//(2048*2048)
#define THREADS_PER_BLOCK 2//512
__global__ void Asum(int *a, int *b, int *c){
int index = threadIdx.x + blockIdx.x*blockDim.x;
c[index] = a[index] + b[index];
}
|
3,979 | #include "includes.h"
__global__ void topBoundaryKernel(double* temperature, int block_size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < block_size) {
temperature[1 + i] = 1.0;
}
} |
3,980 | #include "includes.h"
__global__ void swap(unsigned short *d_input, float *d_output, int nchans, int nsamp) {
size_t t = blockIdx.x * blockDim.x + threadIdx.x;
size_t c = blockIdx.y * blockDim.y + threadIdx.y;
d_input[(size_t)(c * nsamp) + t] = (unsigned short) __ldg(&d_output[(size_t)(c * nsamp) + t]);
} |
3,981 | /*
152096 - William Matheus
Friendly Numbers
Programacao Paralela e Distribuida
CUDA - 2019/2 - UPF
Programa 2
*/
#include <stdio.h>
#include <cuda.h>
__device__ void gcd ( int a, int b, int *result){
int c, resto;
while ( a != 0 ) {
c = a;
a = b % a;
b = c;
}
*result = b;
}
__global__ void numDem(long int *device_num, long int *device_den, long int start, long int end, int size, int inc)
{
int i = blockIdx.x * blockDim.x + threadIdx.x + inc;
int result;
long int factor, ii, sum, done, n;
if (i < size) {
ii = i - start;
sum = 1 + i;
done = i;
factor = 2;
while (factor < done) {
if ((i % factor) == 0) {
sum += (factor + (i / factor));
//doneFactor(i, factor, &done);
if ((done = i / factor) == factor)
sum -= factor;
}
factor++;
}
device_num[ii] = sum;
device_den[ii] = i;
gcd(device_num[ii], device_den[ii], &result);
n = result;
device_num[ii] /= n;
device_den[ii] /= n;
}
}
__global__ void sum(long int* device_num, long int* device_den, long int* device_vet, int size, int x)
{
int i = blockIdx.x * blockDim.x + threadIdx.x + x;
int j;
if (i < size) {
for (j = i + 1; j < size; j++) {
if ((device_num[i] == device_num[j]) && (device_den[i] == device_den[j]))
device_vet[i]++;
}
}
}
void friendly_numbers(long int start, long int end) {
cudaSetDevice(0);
int deviceId;
int number_grid, c=0, i;
long int *device_num, *device_den, *device_vet;
struct cudaDeviceProp prop;
cudaGetDevice(&deviceId);
cudaGetDeviceProperties(&prop, deviceId);
long int last = end - start + 1;
size_t size = last * sizeof(long int);
int nBlocks = 4*((prop.maxThreadsPerMultiProcessor * prop.multiProcessorCount)/prop.maxThreadsPerBlock);
int threads = prop.maxThreadsPerBlock;
number_grid = last / (nBlocks * threads)+1;
long int *num;
long int *den;
long int *vet;
num = (long int*) malloc(size);
den = (long int*) malloc(size);
vet = (long int*) malloc(size);
cudaMalloc((void**)&device_num, size);
cudaMalloc((void**)&device_den, size);
cudaMalloc((void**)&device_vet, size);
for (i = 0; i < last; i++) {
vet[i] = 0;
}
cudaMemcpy(device_num, num, size, cudaMemcpyHostToDevice);
cudaMemcpy(device_den, den, size, cudaMemcpyHostToDevice);
cudaMemcpy(device_vet, vet, size, cudaMemcpyHostToDevice);
int x = 0;
for (i = 0; i < number_grid; i++) {
numDem<<<nBlocks, threads>>>(device_num, device_den, start, end, last, x);
x += nBlocks * threads;
}
x = 0;
for (i = 0; i < number_grid; i++) {
sum<<<nBlocks, threads>>>(device_num, device_den, device_vet, last, x);
x += nBlocks * threads;;
}
cudaMemcpy(vet, device_vet, size, cudaMemcpyDeviceToHost);
for (i = 0; i < last; i++) {
c += vet[i];
}
printf("Found %d pairs of mutually friendly numbers\n", c);
free(num);
free(den);
free(vet);
cudaFree(device_num);
cudaFree(device_den);
cudaFree(device_vet);
}
int main(int argc, char **argv) {
long int start;
long int end;
if (argc != 3){
printf("Wrong number of arguments\n");
return EXIT_FAILURE;
}
start = atoi(argv[1]);
end = atoi(argv[2]);
printf("Number %ld to %ld\n", start, end);
friendly_numbers(start, end);
return EXIT_SUCCESS;
}
|
3,982 | #include "includes.h"
__global__ void ComputeDistanceKernel( float *symbolVectors, float *inputVector, float *distance, int symbolSize, int symbols )
{
int symbolId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(symbolId < symbols)
{
float sum = 0.00f;
for(int i = 0; i < symbolSize; i++)
{
sum += symbolVectors[symbolId * symbolSize + i] * inputVector[i];
}
distance[symbolId] = sum;
}
} |
3,983 | /***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
/*
* CUDA accelerated coulombic potential grid test code
* John E. Stone and Chris Rodrigues
* http://www.ks.uiuc.edu/~johns/
*/
#include <stdio.h>
#include <stdlib.h>
#define CUERR { cudaError_t err; \
if ((err = cudaGetLastError()) != cudaSuccess) { \
printf("CUDA error: %s, line %d\n", cudaGetErrorString(err), __LINE__); \
return -1; }}
// max constant buffer size is 64KB, minus whatever
// the CUDA runtime and compiler are using that we don't know about
// At 16 bytes for atom, for this program 4070 atoms is about the max
// we can store in the constant buffer.
#define MAXATOMS 4000
__constant__ float4 atominfo[MAXATOMS];
#define UNROLLX 8
#define UNROLLY 1
#define BLOCKSIZEX 16
#define BLOCKSIZEY 16
#define BLOCKSIZE BLOCKSIZEX * BLOCKSIZEY
// This kernel calculates coulombic potential at each grid point and
// stores the results in the output array.
//
// This kernel was written by Chris Rodrigues of Wen-mei's group
//
__global__ void cenergy(int numatoms, float gridspacing, float * energygrid) {
unsigned int xindex = __umul24(blockIdx.x, blockDim.x) * UNROLLX
+ threadIdx.x;
unsigned int yindex = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
unsigned int outaddr = (__umul24(gridDim.x, blockDim.x) * UNROLLX) * yindex
+ xindex;
float coory = gridspacing * yindex;
float coorx = gridspacing * xindex;
float energyvalx1=0.0f;
float energyvalx2=0.0f;
float energyvalx3=0.0f;
float energyvalx4=0.0f;
#if UNROLLX == 8
float energyvalx5=0.0f;
float energyvalx6=0.0f;
float energyvalx7=0.0f;
float energyvalx8=0.0f;
#endif
float gridspacing_u = gridspacing * BLOCKSIZEX;
//
// XXX 59/8 FLOPS per atom
//
int atomid;
for (atomid=0; atomid<numatoms; atomid++) {
float dy = coory - atominfo[atomid].y;
float dyz2 = (dy * dy) + atominfo[atomid].z;
float atomq=atominfo[atomid].w;
float dx1 = coorx - atominfo[atomid].x;
float dx2 = dx1 + gridspacing_u;
float dx3 = dx2 + gridspacing_u;
float dx4 = dx3 + gridspacing_u;
#if UNROLLX == 8
float dx5 = dx4 + gridspacing_u;
float dx6 = dx5 + gridspacing_u;
float dx7 = dx6 + gridspacing_u;
float dx8 = dx7 + gridspacing_u;
#endif
energyvalx1 += atomq * rsqrtf(dx1*dx1 + dyz2);
energyvalx2 += atomq * rsqrtf(dx2*dx2 + dyz2);
energyvalx3 += atomq * rsqrtf(dx3*dx3 + dyz2);
energyvalx4 += atomq * rsqrtf(dx4*dx4 + dyz2);
#if UNROLLX == 8
energyvalx5 += atomq * rsqrtf(dx5*dx5 + dyz2);
energyvalx6 += atomq * rsqrtf(dx6*dx6 + dyz2);
energyvalx7 += atomq * rsqrtf(dx7*dx7 + dyz2);
energyvalx8 += atomq * rsqrtf(dx8*dx8 + dyz2);
#endif
}
energygrid[outaddr ] += energyvalx1;
energygrid[outaddr+1*BLOCKSIZEX] += energyvalx2;
energygrid[outaddr+2*BLOCKSIZEX] += energyvalx3;
energygrid[outaddr+3*BLOCKSIZEX] += energyvalx4;
#if UNROLLX == 8
energygrid[outaddr+4*BLOCKSIZEX] += energyvalx5;
energygrid[outaddr+5*BLOCKSIZEX] += energyvalx6;
energygrid[outaddr+6*BLOCKSIZEX] += energyvalx7;
energygrid[outaddr+7*BLOCKSIZEX] += energyvalx8;
#endif
}
int copyatomstoconstbuf(float *atoms, int count, float zplane) {
CUERR // check and clear any existing errors
if (count > MAXATOMS) {
printf("Atom count exceeds constant buffer storage capacity\n");
return -1;
}
float atompre[4*MAXATOMS];
int i;
for (i=0; i<count*4; i+=4) {
atompre[i ] = atoms[i ];
atompre[i + 1] = atoms[i + 1];
float dz = zplane - atoms[i + 2];
atompre[i + 2] = dz*dz;
atompre[i + 3] = atoms[i + 3];
}
cudaMemcpyToSymbol(atominfo, atompre, count * 4 * sizeof(float), 0);
CUERR // check and clear any existing errors
return 0;
}
int initatoms(float **atombuf, int count, dim3 volsize, float gridspacing) {
dim3 size;
int i;
float *atoms;
atoms = (float *) malloc(count * 4 * sizeof(float));
*atombuf = atoms;
// compute grid dimensions in angstroms
size.x = (unsigned int) gridspacing * volsize.x;
size.y = (unsigned int) gridspacing * volsize.y;
size.z = (unsigned int) gridspacing * volsize.z;
for (i=0; i<count; i++) {
int addr = i * 4;
atoms[addr ] = (rand() / (float) RAND_MAX) * size.x;
atoms[addr + 1] = (rand() / (float) RAND_MAX) * size.y;
atoms[addr + 2] = (rand() / (float) RAND_MAX) * size.z;
atoms[addr + 3] = ((rand() / (float) RAND_MAX) * 2.0) - 1.0; // charge
}
return 0;
}
|
3,984 | #include <cassert>
#include <cstdlib>
#include <iostream>
#include <chrono>
using namespace std;
#define MASK_DIM 7
#define MASK_OFFSET (MASK_DIM / 2)
// allocation in constant memory
__constant__ int mask[7 * 7];
__global__ void convolution_2d(int *matrix, int *result, int N);
void verify_result(int *m, int *mask, int *result, int N) ;
void init_matrix(int *m, int n);
auto get_time() { return chrono::high_resolution_clock::now(); }
int main()
{
int N = 1 << 10;
size_t bytes_n = N * N * sizeof(int);
size_t bytes_m = MASK_DIM * MASK_DIM * sizeof(int);
// CPU
int *matrix = new int[N * N];
int *result = new int[N * N];
int *h_mask = new int[MASK_DIM * MASK_DIM];
init_matrix(matrix, N);
init_matrix(h_mask, MASK_DIM);
// GPU
int *d_matrix;
int *d_result;
cudaMalloc(&d_matrix, bytes_n);
cudaMalloc(&d_result, bytes_n);
// CPU ---> GPU
cudaMemcpy(d_matrix, matrix, bytes_n, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(mask, h_mask, bytes_m);
int THREADS = 16;
int BLOCKS = (N + THREADS - 1) / THREADS;
dim3 block_dim(THREADS, THREADS);
dim3 grid_dim(BLOCKS, BLOCKS);
auto start = get_time();
convolution_2d<<<grid_dim, block_dim>>>(d_matrix, d_result, N);
// GPU --> CPU
cudaMemcpy(result, d_result, bytes_n, cudaMemcpyDeviceToHost);
auto finish = get_time();
auto duration =
chrono::duration_cast<std::chrono::milliseconds>(finish - start);
cout << "temps écoulé en kernel = " << duration.count() << " ms\n";
// Fonction test
verify_result(matrix, h_mask, result, N);
cout << "terminé avec succès"<<endl;
cudaFree(d_matrix);
cudaFree(d_result);
delete[] matrix;
delete[] result;
delete[] h_mask;
return 0;
}
__global__ void convolution_2d(int *matrix, int *result, int N)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int start_r = row - MASK_OFFSET;
int start_c = col - MASK_OFFSET;
int temp = 0;
for (int i = 0; i < MASK_DIM; i++)
for (int j = 0; j < MASK_DIM; j++)
if ((start_r + i) >= 0 && (start_r + i) < N)
if ((start_c + j) >= 0 && (start_c + j) < N)
temp += matrix[(start_r + i) * N + (start_c + j)] *mask[i * MASK_DIM + j];
result[row * N + col] = temp;
}
void init_matrix(int *m, int n)
{
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
m[n * i + j] = rand() % 100;
}
void verify_result(int *m, int *mask, int *result, int N)
{
int temp;
int offset_r,offset_c;
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
{
temp = 0;
for (int k = 0; k < MASK_DIM; k++)
{
offset_r = i - MASK_OFFSET + k;
for (int l = 0; l < MASK_DIM; l++)
{
offset_c = j - MASK_OFFSET + l;
if (offset_r >= 0 && offset_r < N)
if (offset_c >= 0 && offset_c < N)
temp += m[offset_r * N + offset_c] * mask[k * MASK_DIM + l];
}
}
assert(result[i * N + j] == temp);
}
} |
3,985 | #include "includes.h"
__global__ void host_api_kernel(float *randomValues, float *out, int N)
{
int i;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int nthreads = gridDim.x * blockDim.x;
for (i = tid; i < N; i += nthreads)
{
float rand = randomValues[i];
rand = rand * 2;
out[i] = rand;
}
} |
3,986 | /*
*
* Accessing out of bound memory from GPU
* Vector addition
*
*/
#include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) {
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__
void vector_addition(int n, int *e_x, int *e_y, int *e_z) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
//Inserted Error: id = n
if (id <= n) { //for (id = 0; id <= n; id++) {
e_z[id] = e_x[id] + e_y[id];
}
}
int main() {
int no_el = 1048576;
int block_size = 512;
int grid_size = (no_el/block_size) + 1; //ceil doesn't give correct grid size
int *h_x, *d_x, *h_y, *d_y, *h_z, *d_z;
h_x = (int*)malloc(no_el*sizeof(int));
h_y = (int*)malloc(no_el*sizeof(int));
h_z = (int*)malloc(no_el*sizeof(int));
for (int i = 0; i < no_el; i++) {
h_x[i] = i;
h_y[i] = i + 2;
h_z[i] = 0;
}
cudaMalloc(&d_x, no_el*sizeof(int));
cudaMalloc(&d_y, no_el*sizeof(int));
cudaMalloc(&d_z, no_el*sizeof(int));
cudaMemcpy(d_x, h_x, no_el*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, h_y, no_el*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_z, h_z, no_el*sizeof(int), cudaMemcpyHostToDevice);
dim3 block(block_size);
dim3 grid(grid_size);
vector_addition<<<grid, block>>>(no_el, d_x, d_y, d_z);
gpuErrchk(cudaMemcpy(h_z, d_z, no_el*sizeof(int), cudaMemcpyDeviceToHost));
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
free(h_x);
free(h_y);
free(h_z);
}
|
3,987 | // Copyright (c) Meta Platforms, Inc. and its affiliates.
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#include "RedwoodNoiseModel.cuh"
#include <algorithm>
#include <cmath>
#include <cuda_runtime.h>
#include <curand_kernel.h>
namespace {
int roundToNearestMultiple(int num, int base) {
return std::round(static_cast<float>(num) / static_cast<float>(base)) * base;
}
const int MODEL_N_DIMS = 5;
const int MODEL_N_COLS = 80;
// Read about the noise model here: http://www.alexteichman.com/octo/clams/
// Original source code: http://redwood-data.org/indoor/data/simdepth.py
__device__ float undistort(const int _x,
const int _y,
const float z,
const float* __restrict__ model) {
const int i2 = (z + 1) / 2;
const int i1 = i2 - 1;
const float a = (z - (i1 * 2.0f + 1.0f)) / 2.0f;
const int x = _x / 8;
const int y = _y / 6;
const float f =
(1.0f - a) *
model[(y * MODEL_N_COLS + x) * MODEL_N_DIMS + min(max(i1, 0), 4)] +
a * model[(y * MODEL_N_COLS + x) * MODEL_N_DIMS + min(i2, 4)];
if (f < 1e-5f)
return 0.0f;
else
return z / f;
}
__global__ void redwoodNoiseModelKernel(const float* __restrict__ depth,
const int H,
const int W,
curandState_t* __restrict__ states,
const float* __restrict__ model,
const float noiseMultiplier,
float* __restrict__ noisyDepth) {
const int ID = blockIdx.x * blockDim.x + threadIdx.x;
const int STRIDE = gridDim.x * blockDim.x;
curandState_t curandState = states[ID];
const float ymax = H - 1;
const float xmax = W - 1;
for (int idx = ID; idx < H * W; idx += STRIDE) {
// Shuffle pixels
const int y = min(max((idx / W) + curand_normal(&curandState) * 0.25f *
noiseMultiplier,
0.0f),
ymax) +
0.5f;
const int x = min(max((idx % W) + curand_normal(&curandState) * 0.25f *
noiseMultiplier,
0.0f),
xmax) +
0.5f;
// downsample
const float d = depth[(y - y % 2) * W + x - x % 2];
// If depth is greater than 10m, the sensor will just return a zero
if (d >= 10.0f) {
noisyDepth[idx] = 0.0f;
} else {
// Distortion
// The noise model was originally made for a 640x480 sensor,
// so re-map our arbitrarily sized sensor to that size!
const float undistorted_d =
undistort(static_cast<float>(x) / xmax * 639.0f + 0.5f,
static_cast<float>(y) / ymax * 479.0f + 0.5f, d, model);
// quantization and high freq noise
if (undistorted_d == 0.0f) {
noisyDepth[idx] = 0.0f;
} else {
const float denom =
round((35.130f / static_cast<double>(undistorted_d) +
curand_normal(&curandState) * 0.027778f * noiseMultiplier) *
8.0f);
noisyDepth[idx] = denom > 1e-5 ? (35.130f * 8.0f / denom) : 0.0f;
}
}
}
states[ID] = curandState;
}
__global__ void curandStatesSetupKernel(curandState_t* states,
int seed,
int n) {
const int ID = blockIdx.x * blockDim.x + threadIdx.x;
if (ID < n) {
curand_init(seed, ID + 1, 0, &states[ID]);
}
}
} // namespace
namespace esp {
namespace sensor {
namespace impl {
struct CurandStates {
void alloc(const int nStates, const int maxThreadsPerBlock) {
if (nStates > nStates_) {
release();
cudaMalloc(&devStates, nStates * sizeof(curandState_t));
const int nBlocks =
std::ceil(static_cast<float>(nStates) / maxThreadsPerBlock);
curandStatesSetupKernel<<<nBlocks, maxThreadsPerBlock>>>(devStates,
rand(), nStates);
nStates_ = nStates;
}
}
void release() {
if (devStates != 0) {
cudaFree(devStates);
devStates = 0;
nStates_ = 0;
}
}
~CurandStates() { release(); }
curandState_t* devStates = 0;
private:
int nStates_ = 0;
};
CurandStates* getCurandStates() {
return new CurandStates();
}
void freeCurandStates(CurandStates* curandStates) {
if (curandStates != 0)
delete curandStates;
}
void simulateFromGPU(const int maxThreadsPerBlock,
const int warpSize,
const float* __restrict__ devDepth,
const int H,
const int W,
const float* __restrict__ devModel,
CurandStates* curandStates,
const float noiseMultiplier,
float* __restrict__ devNoisyDepth) {
const int totalConcurrency = std::ceil(static_cast<float>(H * W) / 4.0f);
const int nThreads =
std::min(std::max(roundToNearestMultiple(totalConcurrency, warpSize), 1),
maxThreadsPerBlock);
const int nBlocks =
std::ceil(static_cast<float>(totalConcurrency) / nThreads);
curandStates->alloc(nBlocks * nThreads, maxThreadsPerBlock);
redwoodNoiseModelKernel<<<nBlocks, nThreads>>>(
devDepth, H, W, curandStates->devStates, devModel, noiseMultiplier,
devNoisyDepth);
}
void simulateFromCPU(const int maxThreadsPerBlock,
const int warpSize,
const float* __restrict__ depth,
const int H,
const int W,
const float* __restrict__ devModel,
CurandStates* curandStates,
const float noiseMultiplier,
float* __restrict__ noisyDepth) {
float *devDepth, *devNoisyDepth;
cudaMalloc(&devDepth, H * W * sizeof(float));
cudaMalloc(&devNoisyDepth, H * W * sizeof(float));
cudaMemcpy(devDepth, depth, H * W * sizeof(float), cudaMemcpyHostToDevice);
simulateFromGPU(maxThreadsPerBlock, warpSize, devDepth, H, W, devModel,
curandStates, noiseMultiplier, devNoisyDepth);
cudaMemcpy(noisyDepth, devNoisyDepth, H * W * sizeof(float),
cudaMemcpyDeviceToHost);
cudaFree(devNoisyDepth);
cudaFree(devDepth);
}
} // namespace impl
} // namespace sensor
} // namespace esp
|
3,988 | /*
compile
$ nvcc -o matrix_transpose_dot_product matrix_transpose_dot_product.cu
elementwise multiplication and subtraction
numpy version
import numpy as np
m1 = np.array(((0, 1, 2), (3, 4, 5), (6, 7, 8)))
m2 = np.array(((8, 7, 6), (5, 4, 3), (2, 1, 0)))
m1.dot(m2.T) # m1 dot m2_transpose (m1_m2T)
m1.T.dot(m2) # m1_transpose dot m2 (m1T_m2)
*/
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
__global__
void kDot_m1_m2T(const int nThreads, const float *m1, const float *m2, float *output, const int m1_columns, const int m2_rows ){
/* Updates the output matrix with the product of two matrices: m1 and m2 transposed.
Inputs:
m1: array, left matrix of size m1_rows x m1_columns
m2: array, right matrix of size m2_rows x m1_columns (m2 transposed will be of size m1_columns x m2_rows)
output: array, the results of the computation are to be stored here:
m1 * m2, product of two arrays m1 and m2, a matrix of size m1_rows x m2_rows
m1_columns: int, number of columns in the left matrix m1
m2_rows: int, number of rows in the left matrix m2
*/
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
int r = (int)i / m2_rows;
int c = i % m2_rows;
float t_output = 0.0;
int id_T;
for( int k = 0; k < m1_columns; ++k ) {
id_T = c * m1_columns + k;
t_output += m1[ r * m1_columns + k ] * m2[ id_T ];
}
output[i] = t_output;
}
}
float* dDot_m1_m2T(const float *m1, const float *m2, float *output, const int m1_rows , const int m1_columns, const int m2_rows )
{
kDot_m1_m2T <<< m1_rows, m2_rows >>> ( m1_rows * m2_rows, m1, m2, output, m1_columns, m2_rows );
cudaDeviceSynchronize();
return output;
}
__global__
void kDot_m1T_m2(const int nThreads, const float *m1, const float *m2, float *output, const int m1_rows,
const int m1_columns, const int m2_columns ){
/* Increments the output matrix with the product of two matrices: m1 transposed and m2.
Inputs:
m1: array, left matrix of size m1_rows x m1_columns (m1 transposed will be of size m1_columns x m1_rows)
m2: array, right matrix of size m1_rows x m2_columns
output: array, the results of the computation are to be stored here:
m1 * m2, product of two arrays m1 and m2, a matrix of size m1_columns x m2_columns
m1_rows: int, number of rows in the left matrix m1
m1_columns: int, number of columns in the left matrix m1
m2_rows: int, number of rows in the left matrix m2
*/
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
int r = (int)i / m2_columns;
int c = i % m2_columns;
int id_T;
float t_output = 0.0;
for( int k = 0; k < m1_rows; ++k ) {
id_T = k * m1_columns + r;
t_output += m1[ id_T ] * m2[ k * m2_columns + c ];
}
output[i] += t_output;
}
}
void dDot_m1T_m2(const float *m1, const float *m2, float *output, const int m1_height , const int m1_width, const int m2_width )
{
kDot_m1T_m2 <<< m1_width, m2_width >>> (m1_width * m2_width, m1, m2, output, m1_height, m1_width, m2_width );
cudaDeviceSynchronize();
}
int main(void)
{
// host initialization
const int M_SIZE = 9; // 3x3 matrix
const int M_BYTES = M_SIZE * sizeof(float);
float h_m1[M_SIZE], h_m2[M_SIZE], h_out[M_SIZE];
for (int i = 0; i < M_SIZE; i++)
{
h_m1[i] = float(i); // 0, 1, 2, 3, 4, 5, 6, 7, 8
h_m2[i] = float(M_SIZE - 1 - i); // 8, 7, 6, 5, 4, 3, 2, 1, 0
}
// GPU memory allocation and initialization
float *d_m1, *d_m2, *d_out;
cudaMalloc((void**) &d_m1, M_BYTES);
cudaMalloc((void**) &d_m2, M_BYTES);
cudaMalloc((void**) &d_out, M_BYTES);
cudaMemcpy(d_m1, h_m1, M_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_m2, h_m2, M_BYTES, cudaMemcpyHostToDevice);
// m1_transpose dot m2
dDot_m1T_m2(d_m1, d_m2, d_out, 3, 3, 3);
cudaMemcpy(h_out, d_out, M_BYTES, cudaMemcpyDeviceToHost);
// print result
printf("m1_transpose dot m2\n");
for (int i = 0; i < M_SIZE; i++)
{
printf("h_out[%d] = %f\n", i, h_out[i]);
}
// m1 dot m2_transpose
dDot_m1_m2T(d_m1, d_m2, d_out, 3, 3, 3);
cudaMemcpy(h_out, d_out, M_BYTES, cudaMemcpyDeviceToHost);
// print result
printf("m1 dot m2_transpose\n");
for (int i = 0; i < M_SIZE; i++)
{
printf("h_out[%d] = %f\n", i, h_out[i]);
}
// free memory
cudaFree(d_m1);
cudaFree(d_m2);
cudaFree(d_out);
// free(h_m1);
// free(h_m2);
// free(h_out);
} |
3,989 | #include "includes.h"
__device__ inline unsigned int RM_Index(unsigned int row, unsigned int col, unsigned int width) {
return (row * width + col);
}
__global__ void GaussianNBVarKernel(const float *d_data, const int *d_labels, const float *feature_means_, float *feature_vars_, const int *class_count_, const unsigned int n_samples_, const unsigned int n_classes_, const unsigned int n_features_) {
// Each thread will take care of one feature for all training samples
unsigned int tidx = threadIdx.x;
unsigned int feat_col = tidx + (blockIdx.x * blockDim.x);
unsigned int i = 0, row = 0;
// Calculate variances
if (feat_col < n_features_) { /* End condition check */
for (i = 0; i < n_samples_; ++i) { /* For each sample */
row = d_labels[i];
feature_vars_[RM_Index(row, feat_col, n_features_)] +=
pow(d_data[RM_Index(i, feat_col, n_features_)] -
feature_means_[RM_Index(row, feat_col, n_features_)],
2);
}
// Calculate coefficients
for (i = 0; i < n_classes_; ++i) { /* For each class */
feature_vars_[RM_Index(i, feat_col, n_features_)] /= class_count_[i];
}
}
} |
3,990 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define M 32
void desplegar(int *matriz, int m, int n);
__global__ void calcularGPU2D(int *mask, int *imagen, int *res, int p, int m, int n)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
res[i*n+j] = 0;
float div = (float)1/(p*p);
int offset = p/2; //offset de la mascara
//recorriendo la mascara...
for (int k=0; k<p; k++)
{
for (int l=0; l<p; l++)
{
if ((i- offset + k)*n >=0 && (j- offset + l) >= 0)
res[i*n+j] += round(imagen[(i-offset + k)*n + (j-offset + l)] * div * mask[k*p+l]);
}
}
}
float multiplicarGPU(int *mask, int *imagen, int *res, int p, int m, int n)
{
int *dev_mask, *dev_imagen, *dev_res;
cudaEvent_t gpuI, gpuF;
float gpuT;
cudaEventCreate( &gpuI );
cudaEventCreate( &gpuF );
cudaEventRecord( gpuI, 0 );
cudaMalloc( (void**)&dev_mask, p*p*sizeof(int) );
cudaMalloc( (void**)&dev_imagen, m*n*sizeof(int) );
cudaMalloc( (void**)&dev_res, m*n*sizeof(int) );
cudaMemcpy( dev_mask, mask, p*p*sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy( dev_imagen, imagen, m*n*sizeof(int), cudaMemcpyHostToDevice );
dim3 bloques( m/M, n/M );
dim3 threads( M, M );
calcularGPU2D<<<bloques, threads>>>( dev_mask, dev_imagen, dev_res, p, m, n );
cudaDeviceSynchronize();
cudaMemcpy( res, dev_res, m*n*sizeof(int), cudaMemcpyDeviceToHost );
cudaEventRecord( gpuF, 0 );
cudaEventSynchronize( gpuF );
cudaEventElapsedTime( &gpuT, gpuI, gpuF );
cudaFree( dev_mask );
cudaFree( dev_imagen );
cudaFree( dev_res );
return gpuT;
}
void desplegar(int *matriz, int m, int n)
{
for (int i=0; i<m; i++)
{
for (int j=0; j<n; j++)
printf("%d ", matriz[i*n+j]);
printf("\n");
}
printf("\n");
}
void inicializar(int *mask, int *imagen, int *res, int p, int m, int n)
{
for(int i = 0; i<m*n; i++)
{
imagen[i] = 255 - ((i/n)%256);
res[i] = 0;
}
for(int i = 0; i<p*p; i++)
{
mask[i] = (i/p) + 1;
}
desplegar(imagen, m, n);
}
void sumaEuler( int p, int m, int n)
{
int *mask, *imagen, *res;
mask = (int*) malloc(p*p*sizeof(int));
imagen = (int*) malloc(m*n*sizeof(int));
res = (int*) malloc(m*n*sizeof(int));
inicializar( mask, imagen, res, p, m, n );
printf("Tiempo (GPU): %f ms\n", multiplicarGPU( mask, imagen, res, p, m, n ) );
desplegar(res, m, n);
free( mask );
free( imagen );
free( res );
}
int main (int argc, char *argv[] )
{
if ( argc != 4 )
{
printf("%s P M N\n", argv[0]);
exit(0);
}
int p = atoi(argv[1]);
int m = atoi(argv[2]);
int n = atoi(argv[3]);
if ( p == 3 || p == 5 || p == 7 )
if (m>n && m/n==2 && m%512 == 0 && n%256==0)
sumaEuler (p, m, n);
else
{
printf("valor incorrecto para p\n", M);
exit(0);
}
return 1;
}
|
3,991 | #include <cstdio>
__global__ void kernel()
{
}
int main()
{
kernel<<<1, 1>>>();
printf ("Hello, CUDA!\n");
return 0;
} |
3,992 | #include <iostream>
#include <vector>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "Graph.cuh"
#include "Solver.cuh"
using namespace std;
namespace atspSolver
{
void fullCycle::display()
{
std::stringstream stream;
int pathSize = path.size();
for (int i = 0; i < pathSize; i++)
{
stream << path[i];
if (i != pathSize - 1) stream << " -> ";
}
stream << " | cost: " << totalCost;
std::cout << stream.str() << std::endl;
}
double calculateCost(const std::vector<int> &v, const Graph &graph)
{
double result = 0;
int vectorSize = v.size();
int numberOfNodes = graph.getNumberOfNodes();
for (int i = 0; (i + 1) < vectorSize; i++)
{
// v[i] = row, v[i + 1] = column
int graphIndex = v[i + 1] + v[i] * numberOfNodes;
result += graph[graphIndex];
}
return result;
}
std::vector<fullCycle> fullCycles;
void permute(vector<int> a, int l, int r, const Graph& graph)
{
if (l == r) {
a.push_back(a[0]);
double cost = calculateCost(a, graph);
fullCycles.push_back(fullCycle(a, cost));
}
else
{
for (int i = l; i <= r; i++)
{
swap(a[l], a[i]);
// Recursion
permute(a, l + 1, r, graph);
// Backtrack
swap(a[l], a[i]);
}
}
}
fullCycle findOptimalPath(const Graph &graph)
{
std::vector<int> v;
for (int i = 0; i < graph.getNumberOfNodes(); i++)
{
v.push_back(i);
}
permute(v, 0, v.size() - 1, graph);
fullCycle result = fullCycles[0];
for (int i = 0; i < fullCycles.size(); i++)
{
if (fullCycles[i].totalCost < result.totalCost)
{
result = fullCycles[i];
}
}
return result;
}
} |
3,993 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <ctime>
//Function that verify cuda calls and return cuda error if any
#define gpuCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
//Initialise ascendant array with random values in
void init_array(int* array, int size, int const adder=10)
{
array[0] = rand()%adder;
for(int i = 0; i < size;i++)
{
array[i] = array[i-1] + rand()%adder;
}
}
//Function that initialise array with random values
void init_array_no_order(int* array, int size, int const adder=10)
{
array[0] = rand()%adder;
for(int i = 0; i < size;i++)
{
array[i] = rand()%adder;
}
}
//Function that copy array in another
void copy_array(int* a, int* a_copy, int n){
for(int i = 0;i < n;i++){
a_copy[i] = a[i];
}
}
//Function that print an array of size size
void print_array(int* a, int size)
{
printf("[");
for(int i = 0; i < size;i++)
{
//printf("i = %d | v = %d " ,i, a[i]);
printf("%d " ,a[i]);
}
printf("]\n");
}
//Globall version of parallel merge of a and b in m with |m|<1024
__global__ void mergeSmall_k(int* d_a, int* d_b, int* d_m, int n_a, int n_b, int n_m){
int i = threadIdx.x;
if(i < n_m)
{
int2 K;
int2 P;
int2 Q;
if(i > n_a)
{
K.x = i - n_a;
K.y = n_a;
P.x = n_a;
P.y = i - n_a;
}
else
{
K.x = 0;
K.y = i;
P.x = i;
P.y = 0;
}
int offset = 0;
while(1)
{
offset = abs(K.y - P.y)/2;
Q.x = K.x + offset;
Q.y = K.y - offset;
if(Q.y >= 0 && Q.x <= n_b && (Q.y == n_a || Q.x == 0 || d_a[Q.y] > d_b[Q.x - 1]))
{
if(Q.x == n_b || Q.y == 0 || d_a[Q.y - 1] <= d_b[Q.x])
{
if(Q.y < n_a && (Q.x == n_b || d_a[Q.y] <= d_b[Q.x]))
{
d_m[i] = d_a[Q.y];
}
else
{
d_m[i] = d_b[Q.x];
}
break;
}
else
{
K.x = Q.x + 1;
K.y = Q.y - 1;
}
}
else
{
P.x = Q.x - 1;
P.y = Q.y + 1;
}
}
}
}
//Device version of parallel merge of a and b in m with |m|<1024
__device__ void mergeParallel(int* d_a, int* d_b, int* d_m, int n_a, int n_b, int n_m){
int i = threadIdx.x;
if(i < n_m)
{
int2 K;
int2 P;
int2 Q;
if(i > n_a)
{
K.x = i - n_a;
K.y = n_a;
P.x = n_a;
P.y = i - n_a;
}
else
{
K.x = 0;
K.y = i;
P.x = i;
P.y = 0;
}
int offset = 0;
while(1)
{
offset = abs(K.y - P.y)/2;
Q.x = K.x + offset;
Q.y = K.y - offset;
if(Q.y >= 0 && Q.x <= n_b && (Q.y == n_a || Q.x == 0 || d_a[Q.y] > d_b[Q.x - 1]))
{
if(Q.x == n_b || Q.y == 0 || d_a[Q.y - 1] <= d_b[Q.x])
{
if(Q.y < n_a && (Q.x == n_b || d_a[Q.y] <= d_b[Q.x]))
{
d_m[i] = d_a[Q.y];
}
else
{
d_m[i] = d_b[Q.x];
}
break;
}
else
{
K.x = Q.x + 1;
K.y = Q.y - 1;
}
}
else
{
P.x = Q.x - 1;
P.y = Q.y + 1;
}
}
}
}
//Giving a path ( from pathBig_k ) each block merge (with mergeParallel) each piece a_k and b_k in m_k of a and b. Then it replace elements in m
__global__ void mergeBig_k(int *m, int n_m, int *a, int n_a, int *b, int n_b, int2 *path, int n_path, int nbPartitions)
{
int blockId = blockIdx.x;
int threadId = threadIdx.x;
int i = blockId * blockDim.x + threadId;
if (blockId <= nbPartitions)//On utilise un block pour chaque partition
{
int x0, y0, x1, y1;
x0 = path[blockId].x;
y0 = path[blockId].y;
x1 = path[blockId+1].x;
y1 = path[blockId+1].y;
const int dimx=x1-x0;
const int dimy = y1-y0;
//A modifier par dimx dimy dimx+dimy
__shared__ int a_k[1024];
__shared__ int b_k[1024];
__shared__ int m_k[1024];
if (threadId < dimx) //On rempli a_k[i] : 0 <= i < dimx
{
a_k[threadId] = a[x0+threadId];
}
else if (threadId < dimy+dimx)//On rempli b_k[i] : indice dimx <= i < dimx+dimy+1
{
b_k[threadId-dimx] = b[y0+threadId-dimx];
}
__syncthreads();
mergeParallel(a_k, b_k, m_k, dimx, dimy, dimx+dimy);
m[i] = m_k[threadId];
}
}
//Function that generate a path to break down m into pieces that could be merge without conflict
//On appelle |m|/TPB blocks avec chacun un seul thread. Chaque thread s'occupe de la diagonale thread
__global__ void pathBig_k(int pas, int2* path, int n_path , int* d_a, int n_a ,int* d_b, int n_b)
{
int thread_i = blockIdx.x * blockDim.x + threadIdx.x;
if(thread_i <= (n_a + n_b)/pas) //<------------//On vérifie que l'indice du thread est inférieur à la taille du tableau de retour et qu'il est un multiple du pas
{
int i = thread_i*pas;
int2 K;
int2 P;
int2 Q;
if(i > n_a)
{
K.x = i - n_a;
K.y = n_a;
P.x = n_a;
P.y = i - n_a;
}
else
{
K.x = 0;
K.y = i;
P.x = i;
P.y = 0;
}
int offset = 0;
while(1)
{
//Calcul des coordonnées du milieu de P et K
offset = abs(K.y - P.y)/2;
Q.x = K.x + offset;
Q.y = K.y - offset;
//
if(Q.y >= 0 && Q.x <= n_b && (Q.y == n_a || Q.x == 0 || d_a[Q.y] > d_b[Q.x - 1]))
{
//
if(Q.x == n_b || Q.y == 0 || d_a[Q.y - 1] <= d_b[Q.x])
{
break;
}
else
{
K.x = Q.x + 1;
K.y = Q.y - 1;
}
}
else
{
P.x = Q.x - 1;
P.y = Q.y + 1;
}
}
//printf("thread : %d => (%d, %d)\n", thread_i, Q.y, Q.x);
//!\\ Problème ordre x et y
path[thread_i].x=Q.y;
path[thread_i].y=Q.x;
}
//Si |m| n'est pas un mutliple de pas, le thread 0 ajoute (n_a, n_b) à la fin du tableau
if (thread_i==0 && (n_a+n_b)%pas!=0)
{
//printf("thread : %d => (%d, %d)\n", thread_i, n_a, n_b);
path[n_path-1].x=n_a;
path[n_path-1].y=n_b;
}
}
//Function that decide wether to use mergeSmall_k or mergeBig_k giving the size of the pieces
void mergeAll(int* m, int n_m, int* a, int n_a, int* b, int n_b){
int pas = 1024; // <1024
int nbPartitions = n_m/pas+(n_m%pas!=0); // On ajoute 1 si n_m n'est pas un mutliple de p
int n_path = (1 + nbPartitions); //1(pour (0,0)) + |m|/pas(nbr de morceau de taille pas) + 1(si dernier morceau de taille < pas))
// printf("n_m : %d\nn_a : %d\nn_b : %d\n", n_m, n_a, n_b);
int *mGPU, *aGPU, *bGPU;
gpuCheck(cudaMalloc(&mGPU, n_m*sizeof(int)));
gpuCheck(cudaMalloc(&aGPU, n_a*sizeof(int)));
gpuCheck(cudaMalloc(&bGPU, n_b*sizeof(int)));
//Declaration et allocation de path
int2 *pathGPU;
gpuCheck(cudaMalloc(&pathGPU, n_path*sizeof(int2)));
gpuCheck(cudaMemcpy(aGPU, a, n_a*sizeof(int), cudaMemcpyHostToDevice));
gpuCheck(cudaMemcpy(bGPU, b, n_b*sizeof(int), cudaMemcpyHostToDevice));
// printf("n_12 : %d, n_1 : %d, n_2 : %d\n", n_m, n_a, n_b);
if(n_m <= 1024){
// printf("case mergeSmall\n");
// printf("n_m = %d, n_a = %d, n_b = %d\n", n_a, n_b, n_m);
mergeSmall_k<<<1, 1024>>>(aGPU, bGPU, mGPU, n_a, n_b, n_m);
gpuCheck( cudaPeekAtLastError() );
//gpuCheck(cudaDeviceSynchronize());
gpuCheck(cudaMemcpy(m, mGPU, n_m*sizeof(int), cudaMemcpyDeviceToHost));
}
else{
//================ Parallel : =======================\\
// printf("\n");
// printf("case mergeBig\n");
pathBig_k<<<nbPartitions/1024+1, 1024>>>(pas, pathGPU, n_path, aGPU, n_a, bGPU, n_b);
gpuCheck( cudaPeekAtLastError() );
gpuCheck( cudaDeviceSynchronize() );
mergeBig_k<<<nbPartitions, pas>>>(mGPU, n_m, aGPU, n_a, bGPU, n_b, pathGPU, n_path, nbPartitions);
gpuCheck( cudaDeviceSynchronize() );
gpuCheck(cudaMemcpy(m, mGPU, n_m*sizeof(int), cudaMemcpyDeviceToHost));
gpuCheck( cudaDeviceSynchronize() );
}
gpuCheck(cudaFree(aGPU));
gpuCheck(cudaFree(bGPU));
gpuCheck(cudaFree(pathGPU));
}
//Fonction de prétraitement qui trie chaque paire contigüe d'éléments d'un tableau m
__global__ void pretraitementFusionSort(int* m, int* mGPU, int n){
int blockId = blockIdx.x;
int threadId = threadIdx.x;
int i = blockId * blockDim.x + threadId;
int tmp;
if(i < n/2)
{
int indice = 2*i;
if(mGPU[indice] > mGPU[indice+1])
{
tmp = mGPU[indice];
mGPU[indice] = mGPU[indice+1];
mGPU[indice+1] = tmp;
}
}
}
//Function that sort any array
void fusionSort(int *m, int n_m)
{
//L1 : indice du premier élément de m_part1
//R1 : indice du dernier élément de m_part1
//L2 : indice du premier élément de m_part2
//R2 : indice du dernier élément de m_part2
int size = 1;
int L1, R1, L2, R2;
int i;
while (size <= n_m)
{
i = 0;
while(i < n_m){
// printf("m : ");
// print_array(m, n_m);
L1 = i;
R1 = i + size-1;
L2 = i + size;
R2 = i + 2*size-1;
if(L2 >= n_m){
break;
}
if(R2 >= n_m){
R2 = n_m-1;
}
int size_1 = R1-L1+1;
int size_2 = R2-L2+1;
int size_12 = size_1+size_2;
int *m_12, *m_1, *m_2;
m_12 = (int*)malloc(size_12*sizeof(int));
m_1 = (int*)malloc(size_1*sizeof(int));
m_2 = (int*)malloc(size_2*sizeof(int));
//Remplir les tableaux m_1 et m_2
for (int j = 0; j < size_1; j++)
{
m_1[j] = m[L1 + j];
}
for (int j = 0; j < size_2; j++)
{
m_2[j] = m[L2 + j];
}
mergeAll(m_12, size_12, m_1, size_1, m_2, size_2);
for (int j = 0; j < size_12; j++)
{
m[L1+j] = m_12[j];
}
free(m_12);
free(m_1);
free(m_2);
i = i + 2*size;
}
size *= 2;
}
}
void fusionMergeSeq(int* A, int* tmp, int L1, int R1, int L2, int R2){
int i = 0;
while(L1 <= R1 && L2 <= R2){
if(A[L1] <= A[L2]){
tmp[i] = A[L1];
i++;
L1++;
}
else{
tmp[i] = A[L2];
i++;
L2++;
}
}
while(L1 <= R1){
tmp[i] = A[L1];
i++;
L1++;
}
while(L2 <= R2){
tmp[i] = A[L2];
i++;
L2++;
}
}
void fusionSortSeq(int* A, int n){
int len = 1;
int i;
int L1, R1, L2, R2;
int* tmp = (int*)malloc(n*sizeof(int));
while(len < n){
i = 0;
while(i < n){
L1 = i;
R1 = i + len - 1;
L2 = i + len;
R2 = i + 2*len - 1;
tmp = (int*)realloc(tmp, (R2-L1+1)*sizeof(int));
if(L2 >= n){
break;
}
if(R2 >= n){
R2 = n - 1;
}
fusionMergeSeq(A, tmp, L1, R1, L2, R2);
for(int j = 0;j < R2-L1+1;j++){
A[i+j] = tmp[j];
}
i = i + 2*len;
}
len *= 2;
}
free(tmp);
}
//Fonction qui trie un tableau M en parallèle par tri fusion itératif (question 3)
//Fonctions de vérification
//Fonction qui vérifie qu'un tableau est bien trié (tous ses éléments rangés dans l'ordre croissant)
int assertOrder(int *tab, int size){
for (int i=0; i<size-1; i++){
if (tab[i] > tab[i+1]){
printf("WARNING : Unsuccessful merge or sort ... : unordered array on indice %d ...\n", i);
printf("tab[i]= %d > tab[i+1] = %d\n", tab[i], tab[i+1]);
return 0;
}
}
return 1;
}
//Fonction qui vérifie qu'on retrouve bien dans le nouveau tableau tous les éléments des deux tableaux qu'on veut fusionner
int assertMergeAllValuesPresent(int *tab, int n1, int *tab2, int n2, int* m, int size)
{
int verif[size]; //tableau avec des 1 là où l'on a déjà vérifié qu'il correspond déjà à un élément de a ou de b
for(int i = 0;i<size;i++){
verif[i] = 0;
}
for (int i=0; i<size; i++){
for(int j = 0;j < n1;j++){
if(tab[j] == m[i] && verif[i] == 0){ //si il y a une valeur identique et que celle-ci n'a pas été vérifiée
verif[i] = 1;
}
}
}
for (int i=0; i<size; i++){
for(int j = 0;j < n2;j++){
if(tab2[j] == m[i] && verif[i] == 0){
verif[i] = 1;
}
}
}
for(int i = 0;i<size;i++){
if(verif[i] != 1){
printf("\nWARNING : Unsuccessful merge : incorrect elements...\n");
return 0;
}
}
return 1;
}
//Fonction qui vérifie qu'on retrouve bien dans le nouveau tableau tous les éléments du tableau qu'on veut trier
int assertSortAllValuesPresent(int* m, int* m_sorted, int size){
int verif[size]; //tableau avec des 1 là où l'on a déjà vérifié qu'il correspond déjà à un élément de a ou de b
for(int i = 0;i<size;i++){
verif[i] = 0;
}
for (int i=0; i<size; i++){
for(int j = 0;j < size;j++){
if(m_sorted[j] == m[i]){ //si il y a une valeur identique
verif[i] = 1;
}
}
}
for(int i = 0;i<size;i++){
if(verif[i] != 1){
printf("i : %d\n", i);
printf("\nWARNING : Unsuccessful sort : incorrect elements...\n");
return 0;
}
}
return 1;
}
//Fonction qui vérifie qu'un tableau est bien trié et la fusion de deux tableaux
//tab et tab2 : les deux tableaux qu'on veut fusionner
//m : le tableau qui est la fusion triée de tab et tab2
int assertMerge(int *tab, int n1, int *tab2, int n2, int* m, int size){
int successfulOrder = assertOrder(m, size);
int successfulElements = assertMergeAllValuesPresent(tab, n1, tab2, n2, m, size);
//assertMergeAllValuesPresent(int *tab, int n1, int *tab2, int n2, int* m, int size)
if(successfulOrder && successfulElements){
printf("\nSuccessful merge !\n");
return 1;
}
else{
printf("\nUnsuccessful merge !\n");
return 0;
}
}
//Fonction qui vérifie qu'un tableau est bien trié
//m : le tableau non trié qu'on veut trier
//m_sorted : le tableau m soi-disant trié (on veut vérifier si c'est bien le cas)
//size : la taille du tableau
int assertSorted(int* m, int* m_sorted, int size)
{
int successfulOrder = assertOrder(m_sorted, size); // les éléments du tableau sont ils bien dans le bon ordre ?
int successfulElements = assertSortAllValuesPresent(m, m_sorted, size); //retrouve t-on bien toutes les valeurs ?
if(successfulOrder && successfulElements){
printf("\nSuccessful sort !\n");
return 1;
}
else{
printf("\nUnsuccessful sort !\n");
return 0;
}
}
int main(int argc, char *argv[])
{
std::clock_t startS, endS;
float seqMergeTime, parMergeTime;
srand(time(NULL));
int n_m = 100000;
int *m, *mseq, *mref;
if(argc==2)
{
n_m = atoi(argv[1]);
}
printf("========== Path Sort : =========\n");
printf("* Size of array : %d\n\n", n_m);
//int* mseq;
m = (int*)malloc(n_m*sizeof(int));
init_array_no_order(m, n_m, n_m*10);
mseq = (int*)malloc(n_m*sizeof(int)); //copie de m
copy_array(m, mseq, n_m);
mref = (int*)malloc(n_m*sizeof(int)); //copie de m
copy_array(m, mref, n_m);
//Partie des calculs1024
//================ Paral1024lel : =======================\\
//Etape de prétraitement :
startS = std::clock();
fusionSort(m, n_m);
gpuCheck(cudaDeviceSynchronize());
endS = std::clock();
parMergeTime = (endS - startS) / (float) CLOCKS_PER_SEC;
//Etape du tri fusion :
startS = std::clock();
fusionSortSeq(mseq, n_m);
endS = std::clock();
seqMergeTime = (endS - startS) / (float) CLOCKS_PER_SEC;
printf("========= Parallel sort : =============\n");
printf("Total time elapsed : %f s\n", parMergeTime);
// assertSorted(mref, m, n_m);
printf("Parrallel algorithm is %f times faster than sequential merge !\n", seqMergeTime/parMergeTime);
printf("Parrallel merge is %f times faster than sequential merge !\n", seqMergeTime/parMergeTime);
printf("========= Sequential sort : =============\n");
printf("Total time elapsed : %f s\n", seqMergeTime);
// assertSorted(mref, mseq, n_m);
return 0;
}
|
3,994 | #include "includes.h"
__global__ void set_cl(int *nnz_num, int *cl, int chunk, int pad_M)
{
int c_size = pad_M / chunk;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= c_size) {
return;
}
int offset = chunk * i;
int max = 0;
int j, length;
for (j = 0; j < chunk; j++) {
length = nnz_num[offset + j];
if (length > max) {
max = length;
}
}
cl[i] = max;
} |
3,995 | #include <stdio.h>
#include <iostream>
#define NB_COLS 1000 // Nombre de colonnes de la matrice.
#define NB_ROWS 500 // Nombre de lignes de la matrice.
#define NB_THREADS 16 // Nombre de threads par bloc dans 1 dimension
void matrixInit(int *mat); // Initialisation d'une matrice.
void checkRes(int *mat); // Vérification des résultats.
// Noyau CUDA
__global__ void MatrixAdd(int *a, int *b, int *c)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < NB_COLS && y < NB_ROWS)
{
int globalId = y * NB_COLS + x;
c[globalId] = a[globalId] + b[globalId];
}
}
// Code du Host
int main(void)
{
int *a, *b, *c; // Matrices A, B et C du host
int *dev_a, *dev_b, *dev_c; // Matrices A, B et C du device
int nbElements = NB_COLS * NB_ROWS;
int matrixSize = nbElements * sizeof(int);
dim3 threadsPerBlock(NB_THREADS, NB_THREADS);
dim3 blocksPerGrid ((NB_COLS + NB_THREADS - 1) / NB_THREADS, (NB_ROWS + NB_THREADS - 1) / NB_THREADS);
std::cout << threadsPerBlock.x << '\t' << threadsPerBlock.y << '\t' << threadsPerBlock.z << std::endl;
std::cout << blocksPerGrid.x << '\t' << blocksPerGrid.y << '\t' << blocksPerGrid.z << std::endl;
// Allocation des matrices du host.
a = (int *)malloc(matrixSize);
if (a == NULL) { printf("Allocation failure\n"); abort(); }
b = (int *)malloc(matrixSize);
if (b == NULL) { printf("Allocation failure\n"); abort(); }
c = (int *)malloc(matrixSize);
if (c == NULL) { printf("Allocation failure\n"); abort(); }
// Allocation des matrices du device.
cudaMalloc((void **)&dev_a, matrixSize);
cudaMalloc((void **)&dev_b, matrixSize);
cudaMalloc((void **)&dev_c, matrixSize);
auto cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel memory failed: %s\n", cudaGetErrorString(cudaStatus));
return -1;
}
// Initialisation des matrices A et B.
matrixInit(a);
matrixInit(b);
// Copie des matrices A et B sur le GPU.
cudaMemcpy(dev_a, a, matrixSize, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, matrixSize, cudaMemcpyHostToDevice);
// Lancement du noyau.
MatrixAdd <<<blocksPerGrid, threadsPerBlock>>> (dev_a, dev_b, dev_c);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return -1;
}
// Copie de la matrice C du GPU vers le host.
cudaMemcpy(c, dev_c, matrixSize, cudaMemcpyDeviceToHost);
checkRes(c);
// Libération des matrices host et device.
free(a);
free(b);
free(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
getchar();
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////
//
// Fonctions outils. Rien à modifier.
//
/////////////////////////////////////////////////////////////////////////////////////////////
void matrixInit(int *mat)
{
int l, c;
for (l = 0; l < NB_ROWS; l++)
for (c = 0; c < NB_COLS; c++)
mat[l * NB_COLS + c] = l + c;
}
void checkRes(int *mat)
{
int l, c;
for (l = 0; l < NB_ROWS; l++)
for (c = 0; c < NB_COLS; c++)
if (mat[l * NB_COLS + c] != 2 * (c + l)) {
printf("Erreur de calcul sur l'element %d:%d :\n", l, c);
printf(" - Valeur calculee : %d\n", mat[l * NB_COLS + c]);
printf(" - Valeur attendue : %d\n", 2 * (c + l));
exit(0);
}
printf("LEVEL 2: Done\n");
printf("Good job!\n");
}
|
3,996 |
// listPrimes - shows the prime numbers between a fixed range.
// this is a CUDA version that uses 1 thread in 1 block just using
// a simple serial approach
// Eric McCreath 2019 - GPL
// based on https://en.wikipedia.org/wiki/Integer_square_root
// assumes a positive number
#include<stdio.h>
#include<cuda.h>
// this macro checks for errors in cuda calls
#define Err(ans) { gpucheck((ans), __FILE__, __LINE__); }
inline void gpucheck(cudaError_t code, const char *file, int line)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPU Err: %s %s %d\n", cudaGetErrorString(code), file, line);
exit(code);
}
}
// based on https://en.wikipedia.org/wiki/Integer_square_root
// assumes a positive number
long intsquroot(long n) {
long shift = 2;
long nShifted = n >> shift;
while (nShifted != 0 && nShifted != n) {
shift += 2;
nShifted = n >> shift;
}
shift -= 2;
long result = 0;
while (shift >= 0) {
result = result << 1;
long candidateResult = result + 1;
if (candidateResult*candidateResult <= n >> shift) {
result = candidateResult;
}
shift = shift - 2;
}
return result;
}
__global__ void divides(long v, long sqrt, int *isprime) {
long i;
for (i = 2 + blockIdx.x * blockDim.x + threadIdx.x;
(i <= sqrt) && (*isprime == 1);
i += blockDim.x* gridDim.x) {
if (v % i == 0) *isprime = 0;
}
}
int isPrime(long v) {
long srt = intsquroot(v);
int isprime = 1;
int *isprime_d;
Err(cudaMalloc( &isprime_d, sizeof(int)));
Err(cudaMemcpy(isprime_d, &isprime, sizeof(int), cudaMemcpyHostToDevice));
divides<<<100000,1024>>>(v,srt,isprime_d);
Err(cudaMemcpy(&isprime,isprime_d,sizeof(int),cudaMemcpyDeviceToHost));
Err(cudaFree(isprime_d));
return isprime;
}
void listPrimes(long start, long end) {
for (long num = start; num < end ; num++) {
printf ("%ld : %s\n", num, (isPrime(num)? "yes" : "no"));
}
}
int main() {
long largestlong = 0x7FFFFFFFFFFFFFFFL;
listPrimes(largestlong-100L,largestlong);
return 0;
}
|
3,997 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include<iostream>
using namespace std;
__global__ void multiply(int *a, int *b, int *c, int m, int n, int q)
{
int id_x = threadIdx.x;
int id_y = threadIdx.y;
int i,d=0;
for(i=0;i<n;i++)
{
d = d + (a[(id_y * n) + i] * b[(i * q) + id_x]);
}
c[(id_y * q) + id_x] = d;
}
int main()
{
int i;
int m,n,x,y;
cout<<"Enter the size of first matrix:\n";
cin>>m>>n;
cout<<"Enter the size of second matrix:\n";
cin>>x>>y;
if(n != x)
{
cout<<"matrix size is not valid for multiplication";
cin>>i;
return 0;
}
int size_a = m*n;
int size_b = x*y;
int size_c = m*y;
int *h_a=new int[size_a];
int *h_b=new int[size_b];
int *h_c=new int[size_c];
cout<<"Enter the first matrix\n";
for(i=0;i<size_a;i++)
{
cin>>h_a[i];
}
cout<<"Enter the second array:\n";
for(i=0;i<size_b;i++)
{
cin>>h_b[i];
}
int*d_a,*d_b,*d_c;
if(cudaMalloc(&d_a, sizeof(int)*size_a) != cudaSuccess)
{
cout<<"Memory allocation failed\n";
cin>>i;
return 0;
}
if(cudaMalloc(&d_b, sizeof(int)*size_b) != cudaSuccess)
{
cout<<"Memory allocation failed\n";
cudaFree(d_a);
cin>>i;
return 0;
}
if(cudaMalloc(&d_c, sizeof(int)*size_c) != cudaSuccess)
{
cout<<"Memory allocation failed\n";
cudaFree(d_a);
cudaFree(d_b);
cin>>i;
return 0;
}
if(cudaMemcpy(d_a, h_a, sizeof(int)*size_a, cudaMemcpyHostToDevice) != cudaSuccess)
{
cout<<"Failed to copy data from host to device";
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cin>>i;
return 0;
}
if(cudaMemcpy(d_b, h_b, sizeof(int)*size_b, cudaMemcpyHostToDevice) != cudaSuccess)
{
cout<<"Failed to copy data from host to device";
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cin>>i;
return 0;
}
dim3 block(1,1,1);
dim3 thread(y,m,1);
multiply<<<block, thread>>>(d_a, d_b, d_c, m, n, y);
if(cudaMemcpy(h_c, d_c, size_c * sizeof(int), cudaMemcpyDeviceToHost) != cudaSuccess)
{
cout<<"Failed to copy data from device to host";
delete[] h_a;
delete[] h_b;
cudaFree(d_a);
cudaFree(d_b);
cin>>i;
return 0;
}
cout<<"Multiplied Matrix is:\n";
for(i=0;i<size_c;i++)
{
cout<<h_c[i]<<" ";
if((i+1)%y == 0) cout<<endl;
}
delete[] h_a;
delete[] h_b;
delete[] h_c;
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cin>>i;
return 0;
} |
3,998 | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "stdio.h"
#include <stdlib.h>
#include <sys/time.h>
void sumMatrixOnHost(float *MatA, float *MatB, float *MatC, const unsigned nx, const unsigned ny)
{
float *ia = MatA;
float *ib = MatB;
float *ic = MatC;
float *end = ic + nx * ny;
while (ic != end)
{
*ic = *ia + *ib;
++ia; ++ib; ++ic;
}
}
__global__ void sumMatrixOnGPU2D(float *MatA, float *MatB, float *MatC, const unsigned nx, const unsigned ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * nx + ix;
if (ix < nx && iy < ny)
{
MatC[idx] = MatA[idx] + MatB[idx];
}
}
__global__ void sumMatrixOnGPU1D(float *MatA, float *MatB, float *MatC, const unsigned nx, const unsigned ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < nx)
{
unsigned end = nx * ny;
unsigned idx = ix;
while (idx < end)
{
MatC[idx] = MatA[idx] + MatB[idx];
idx += nx;
}
}
}
__global__ void sumMatrixOnGPUMix(float *MatA, float *MatB, float *MatC, const unsigned nx, const unsigned ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = blockIdx.y;
unsigned int idx = iy * nx + ix;
if (ix < nx && iy < ny)
{
MatC[idx] = MatA[idx] + MatB[idx];
}
}
double cpuSecond();
char are_equal(float *a, float *b, unsigned n);
int main()
{
/*
CUDA task2
block/thread speed comparison
*/
const unsigned nx = 1 << 14;
const unsigned ny = 1 << 14;
const unsigned N = nx * ny;
float *MatA = (float *)malloc(N * sizeof(float));
float *MatB = (float *)malloc(N * sizeof(float));
float *MatC = (float *)malloc(N * sizeof(float));
float *Res = (float *)malloc(N * sizeof(float));
sumMatrixOnHost(MatA, MatB, Res, nx, ny);
dim3 block;
dim3 grid;
double time;
float *dev_MatA, *dev_MatB, *dev_MatC;
cudaMalloc(&dev_MatA, N * sizeof(float));
cudaMalloc(&dev_MatB, N * sizeof(float));
cudaMalloc(&dev_MatC, N * sizeof(float));
cudaMemcpy(dev_MatA, MatA, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_MatB, MatB, N * sizeof(float), cudaMemcpyHostToDevice);
// 2D grid - 2D block
block = {32, 16};
grid = {(nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y};
time = cpuSecond();
sumMatrixOnGPU2D<<<grid, block>>>(dev_MatA, dev_MatB, dev_MatC, nx, ny);
cudaDeviceSynchronize();
time = cpuSecond() - time;
for (unsigned i = 0; i < N; ++i)
{
MatC[i] = -1;
}
cudaMemcpy(MatC, dev_MatC, N * sizeof(float), cudaMemcpyDeviceToHost);
printf("(%d, %d) (%d %d): %f s - %s\n", grid.x, grid.y, block.x, block.y,
time, are_equal(MatC, Res, N) ? "correct":"BAD");
// 1D grid - 1D block
block = {128, 1};
grid = {(nx + block.x - 1) / block.x, 1};
time = cpuSecond();
sumMatrixOnGPU1D<<<grid, block>>>(dev_MatA, dev_MatB, dev_MatC, nx, ny);
cudaDeviceSynchronize();
time = cpuSecond() - time;
for (unsigned i = 0; i < N; ++i)
{
MatC[i] = -1;
}
cudaMemcpy(MatC, dev_MatC, N * sizeof(float), cudaMemcpyDeviceToHost);
printf("(%d, %d) (%d %d): %f s - %s\n", grid.x, grid.y, block.x, block.y,
time, are_equal(MatC, Res, N) ? "correct":"BAD");
// 2D grid - 1D block
block = {256, 1};
grid = {(nx + block.x - 1) / block.x, ny};
time = cpuSecond();
sumMatrixOnGPUMix<<<grid, block>>>(dev_MatA, dev_MatB, dev_MatC, nx, ny);
cudaDeviceSynchronize();
time = cpuSecond() - time;
for (unsigned i = 0; i < N; ++i)
{
MatC[i] = -1;
}
cudaMemcpy(MatC, dev_MatC, N * sizeof(float), cudaMemcpyDeviceToHost);
printf("(%d, %d) (%d %d): %f s - %s\n", grid.x, grid.y, block.x, block.y,
time, are_equal(MatC, Res, N) ? "correct":"BAD");
// custom configurations
block = {1, 1};
grid = {(nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y};
time = cpuSecond();
sumMatrixOnGPU2D<<<grid, block>>>(dev_MatA, dev_MatB, dev_MatC, nx, ny);
cudaDeviceSynchronize();
time = cpuSecond() - time;
printf("(%d, %d) (%d %d): %f s\n", grid.x, grid.y, block.x, block.y, time);
block = {128, 128};
grid = {(nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y};
time = cpuSecond();
sumMatrixOnGPU2D<<<grid, block>>>(dev_MatA, dev_MatB, dev_MatC, nx, ny);
cudaDeviceSynchronize();
time = cpuSecond() - time;
printf("(%d, %d) (%d %d): %f s\n", grid.x, grid.y, block.x, block.y, time);
block = {nx, ny};
grid = {(nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y};
time = cpuSecond();
sumMatrixOnGPU2D<<<grid, block>>>(dev_MatA, dev_MatB, dev_MatC, nx, ny);
cudaDeviceSynchronize();
time = cpuSecond() - time;
printf("(%d, %d) (%d %d): %f s\n", grid.x, grid.y, block.x, block.y, time);
return 0;
}
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6);
}
char are_equal(float *a, float *b, unsigned n)
{
for (unsigned i = 0; i < n; ++i)
{
if (a[i] != b[i])
{
return 0;
}
}
return 1;
} |
3,999 | #include "includes.h"
__global__ void simpleKernel(float *dst, float *src)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
float temp = src[idx];
dst[idx] = temp * temp;
} |
4,000 | /*
STEPS
1. Allocate host memory and initialized host data e.g. malloc
2. Allocate device memory e.g cudaMalloc
3. Transfer input data from host to device memory e.g cudaMemcpy
4. Execute kernels
5. Transfer output from device memory to host
6. Free Host & CUDA memory e.g. free & cudaFree
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define N 10000000
#define MAX_ERR 1e-6
__global__ void vector_add(float *out, float *a, float *b, int n) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index<n)
out[index] = a[index] + b[index];
}
int main(){
float *a, *b, *out;
float *d_a, *d_b, *d_out;
// 1. Allocate memory
a = (float*)malloc(sizeof(float) * N);
b = (float*)malloc(sizeof(float) * N);
out = (float*)malloc(sizeof(float) * N);
// 1. Initialize array
for(int i = 0; i < N; i++){
a[i] = 1.0f; b[i] = 2.0f;
}
// 2. Allocate device memory
cudaMalloc((void**)&d_a, sizeof(float) * N);
cudaMalloc((void**)&d_b, sizeof(float) * N);
cudaMalloc((void**)&d_out, sizeof(float) * N);
// 3. Transfer input data from host to device
cudaMemcpy(d_a, a, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(float) * N, cudaMemcpyHostToDevice);
// 4. Kernel launch
int block_size = 256; // 256 threads in each block
int grid_size = ((N + block_size) / block_size); // i.e. 39063 blocks // add blocksize to avoid 1 missing integer index. e.g. N = 20, block_size = 7 then grid_size = 20/7 = 2 and it will access threads only upto 13 index
vector_add<<<grid_size,block_size>>>(d_out, d_a, d_b, N); // no. of threads = N
// 5. Transfer output from device memory to host
cudaMemcpy(out, d_out, sizeof(float) * N, cudaMemcpyDeviceToHost);
// Verification
for(int i = 0; i < N; i++){
assert(fabs(out[i] - a[i] - b[i]) < MAX_ERR);
}
printf("PASSED\n");
// 6. Free cuda memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_out);
// 6. Deallocate host memory
free(a);
free(b);
free(out);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.