serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
14,501 | /* ECGR 6090 Heterogeneous Computing Homework0
Problem 1 - Vector add on gpu
Written by Bhavin Thakar - 801151488
*/
// To execute the program type: ./vectorAddGPU
#include<stdio.h>
#include <sys/time.h>
struct timeval stop, start,start1,stop1,start2,stop2;
#define N (10000*1024) //Defining N
#define THREADS_PER_BLOCK 1024
//Kernel Function
__global__ void add(int *a, int *b, int *c, int n){
int index =threadIdx.x + blockIdx.x * blockDim.x;
if(index<n){
c[index]=a[index]+b[index];
}
}
// function to generate random number and adding it to an array
void random(int *a, int n ){
int i;
for (i = 0; i < n; ++i)
a[i] = rand()%100; // Generate random integer values from 0 to 100
}
int main(){
int *a, *b, *c; // Declaring the integer array
int size= N*sizeof(int); // Declaring size as size of N * 4 bytes(int)
//Allocating memory in CPU
a=(int*)malloc(size);
b=(int*)malloc(size);
c=(int*)malloc(size);
// Declaring variables for GPU
int *d_a, *d_b, *d_c;
// Allocating memory in GPU
cudaMalloc((void ** )&d_a,size);
cudaMalloc((void **)&d_b,size);
cudaMalloc((void **)&d_c,size);
gettimeofday(&start1, NULL);
// Calling the random function
random(a,N);
random(b,N);
// Copying the CPU array to GPU device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
gettimeofday(&stop1, NULL);
// Launch add() kernel on GPU
gettimeofday(&start, NULL);
add<<<(N + THREADS_PER_BLOCK-1) / THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(d_a, d_b, d_c,N);
gettimeofday(&stop, NULL);
cudaDeviceSynchronize(); // To ensure that every stream is finished
printf("Execution time for kernel: %lu us\n", (stop.tv_sec - start.tv_sec) * 1000000 + stop.tv_usec - start.tv_usec);
// Copy result back to host
gettimeofday(&start2, NULL);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
gettimeofday(&stop2, NULL);
// Calculating execution time for Host to Device and Device to Host Data Transfer
unsigned int i;
i=(stop1.tv_sec - start1.tv_sec )* 1000000 + (stop1.tv_usec - start1.tv_usec);
i=i+((stop2.tv_sec - start2.tv_sec )* 1000000 + (stop2.tv_usec - start2.tv_usec));
printf("Execution time for data transfer: %lu us\n", i);
// Freeing up the resources
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
14,502 | /*
* 2D flow around a cylinder
*
* compile with nvcc -O2 LBM.cu -o LBMcuda
*
*/
#include <math.h>
#include <iostream>
#include <iomanip>
#include <fstream>
#include <sstream>
#include <string>
#include <cstring>
#define NBLOCKS_X 4
#define NBLOCKS_Y 4
#define NTHREADS_X 32
#define NTHREADS_Y 16
#define PI2 6.28318530718
//====== Flow parameters definition ==============================================
#define MAXITER 500 // Total number of time iterations.
#define OUTSTEP 10
#define Re 220.0 // Reynolds number.
#define NX 520 // Lattice dimensions and populations.
#define NY 180
#define LY (NY-1.0)
#define Q 9
#define Q0 3
#define CX (NX/4) // Coordinates and radius of the cylinder.
#define CY (NY/2)
#define R (NY/9)
#define ULB 0.04 // Velocity in lattice units.
typedef float real_t;
typedef unsigned int uint;
//----- Lattice Constants -------------------------------
__device__ __constant__ real_t C[Q][2] = {
{ 0., 0.},
{ 0.,-1.},
{ 0., 1.},
{-1., 0.},
{-1.,-1.},
{-1., 1.},
{ 1., 0.},
{ 1.,-1.},
{ 1., 1.}
};
__device__ __constant__ int iC[Q][2] = {
{ 0, 0},
{ 0,-1},
{ 0, 1},
{-1, 0},
{-1,-1},
{-1, 1},
{ 1, 0},
{ 1,-1},
{ 1, 1}
};
real_t C_h[Q][2]; //C on host
//noslip = [c.tolist().index((-c[i]).tolist()) for i in range(q)]
__device__ __constant__ int NOSLIP[Q]={0, 2, 1, 6, 8, 7, 3, 5, 4}; //noslip indexes for C
//i1 = arange(Q)[asarray([ci[0]<0 for ci in c])] # Unknown on right wall.
__device__ __constant__ int I1[Q0] = {3, 4, 5}; // Pops unknown on right wall.
//i2 = arange(Q)[asarray([ci[0]==0 for ci in c])] # Vertical middle.
__device__ __constant__ int I2[Q0] = {0,1,2}; // Vertical middle.
//i3 = arange(Q)[asarray([ci[0]>0 for ci in c])] # Unknown on left wall.
__device__ __constant__ int I3[Q0] = {6, 7, 8}; //Pops Unknown on left wall.
const real_t NULB_h = ULB*(real_t)(R)/(real_t)(Re);
const real_t OMEGA_h = 1.0 / (3.*NULB_h+0.5); // Relaxation parameter.
__device__ __constant__ real_t NULB;
__device__ __constant__ real_t OMEGA; // Relaxation parameter.
//----------------------------------------------------------------------
//========= Functions declaration =================
__host__ __device__ real_t norm2(const real_t * v);
__device__ bool isObstacle(int x, int y);
//convert 2D to 1D array address
// A[i][j]
__device__ __forceinline__ uint i2D(uint i, uint j) {return (NY*i+j);}
//convert 3D to 1D array address
// A[m][i][j]
__device__ __forceinline__ uint i3D(uint m, uint i, uint j) {return (m*NX*NY+NY*i+j);}
__global__ void initialize(real_t * vel, real_t * rho);
__global__ void getEquilibrium(const real_t* rho,
const real_t* u,
const real_t* t,
real_t* feq
);
__global__ void getHvars(real_t* rho,
real_t* u,
const real_t * f
);
// Right wall: apply outflow condition.
__global__ void outflow(real_t *fin);
// Left wall: compute density from known populations.
__global__ void leftwall(real_t* rho, real_t *u, const real_t* vel, const real_t* fin);
// Left wall: Zou/He boundary condition.
__global__ void zouhe(const real_t* feq, real_t* fin);
// Collision step.
__global__ void collision(const real_t* fin, real_t* fout, const real_t* feq);
//Wall rebound
__global__ void rebound(const real_t * fin, real_t* fout);
// Streaming step.
__global__ void streaming(real_t* fin, const real_t* fout);
//--------------------------------------------------
int main()
{
// copy NULB_h to NULB on device
cudaMemcpyToSymbol(NULB, &NULB_h, sizeof(real_t), 0, cudaMemcpyHostToDevice);
// copy OMEGA_h to OMEGA on device
cudaMemcpyToSymbol(OMEGA, &OMEGA_h, sizeof(real_t), 0, cudaMemcpyHostToDevice);
// copy C to C_h on host
cudaMemcpyFromSymbol(&C_h, C, 2*Q*sizeof(real_t), 0, cudaMemcpyDeviceToHost);
real_t t[Q];
real_t* t_d;
//allocation on device
cudaMalloc((void **)&t_d, Q * sizeof(real_t));
real_t rho[NX][NY];
real_t* rho_d;
//allocation on device
cudaMalloc((void **)&rho_d, NX*NY * sizeof(real_t));
real_t* vel_d;
//allocation on device
cudaMalloc((void **)&vel_d, 2*NX*NY * sizeof(real_t));
real_t u[2][NX][NY];
real_t* u_d;
//allocation on device
cudaMalloc((void **)&u_d, 2*NX*NY * sizeof(real_t));
real_t* feq_d;
//allocation on device
cudaMalloc((void **)&feq_d, Q*NX*NY * sizeof(real_t));
real_t* fin_d;
//allocation on device
cudaMalloc((void **)&fin_d, Q*NX*NY * sizeof(real_t));
real_t* fout_d;
//allocation on device
cudaMalloc((void **)&fout_d, Q*NX*NY * sizeof(real_t));
t[0]=4./9.;
for (int iq=1; iq<Q; iq++)
{
if (norm2(&C_h[iq][0])<2.)
{
t[iq]=1./9.;
}
else
{
t[iq]=1./36.;
}
}
cudaMemcpy((real_t *)t_d,t,Q*sizeof(real_t),cudaMemcpyHostToDevice);
//initial velocity and density setup
{
dim3 grid(NBLOCKS_X,NBLOCKS_Y,1);
dim3 threads(NTHREADS_X,NTHREADS_Y,1);
initialize<<<grid,threads>>>(vel_d,rho_d);
}
//-------------------------------
//equilibrium DF setup
{
dim3 grid(NBLOCKS_X,NBLOCKS_Y,1);
dim3 threads(NTHREADS_X,NTHREADS_Y,1);
getEquilibrium<<<grid,threads>>>(rho_d,vel_d,t_d,feq_d);
}
//-------------------------------
//copy feq to fin
cudaMemcpy( fin_d, feq_d, sizeof(real_t)*Q*NX*NY,cudaMemcpyDeviceToDevice );
//################################################################################
//###### Main time loop ##########################################################
for (int time=0; time < MAXITER; time++)
{
// Right wall: apply outflow condition.
{
dim3 grid(1,NBLOCKS_Y,1);
dim3 threads(1,NTHREADS_Y,1);
outflow<<<grid,threads>>>(fin_d);
}
//---------------------------------
// Calculate macroscopic density and velocity.
{
dim3 grid(NBLOCKS_X,NBLOCKS_Y,1);
dim3 threads(NTHREADS_X,NTHREADS_Y,1);
getHvars<<<grid,threads>>>(rho_d,u_d,fin_d);
}
//---------------------------------
// Left wall: compute density from known populations.
{
dim3 grid(1,NBLOCKS_Y,1);
dim3 threads(1,NTHREADS_Y,1);
leftwall<<<grid,threads>>>(rho_d, u_d, vel_d, fin_d);
}
//---------------------------------
{
dim3 grid(NBLOCKS_X,NBLOCKS_Y,1);
dim3 threads(NTHREADS_X,NTHREADS_Y,1);
getEquilibrium<<<grid,threads>>>(rho_d,u_d,t_d,feq_d);
}
//---------------------------------
// Left wall: Zou/He boundary condition.
{
dim3 grid(1,NBLOCKS_Y,1);
dim3 threads(1,NTHREADS_Y,1);
zouhe<<<grid,threads>>>(feq_d,fin_d);
}
//---------------------------------
// Collision step.
{
dim3 grid(NBLOCKS_X,NBLOCKS_Y,1);
dim3 threads(NTHREADS_X,NTHREADS_Y,1);
collision<<<grid,threads>>>(fin_d,fout_d,feq_d);
}
//---------------------------------
// Wall "rebound" step.
{
dim3 grid(NBLOCKS_X,NBLOCKS_Y,1);
dim3 threads(NTHREADS_X,NTHREADS_Y,1);
rebound<<<grid,threads>>>(fin_d,fout_d);
}
//---------------------------------
// Streaming step.
{
dim3 grid(NBLOCKS_X,NBLOCKS_Y,1);
dim3 threads(NTHREADS_X,NTHREADS_Y,1);
streaming<<<grid,threads>>>(fin_d, fout_d);
}
//---------------------------------
// Output.
if (time % OUTSTEP==0)
{
//copy back density and macroscopic velocity from device
cudaMemcpy(rho,rho_d,NX*NY*sizeof(real_t),cudaMemcpyDeviceToHost);
cudaMemcpy(u,u_d,2*NX*NY*sizeof(real_t),cudaMemcpyDeviceToHost);
std::ofstream fout;
std::stringstream filename("");
filename <<"out"<<std::setw(5)<<std::setfill('0')<<time<<".csv";
fout.open(filename.str().c_str(),std::ofstream::out);
fout << "x,y,rho,ux,uy,|u|" << std::endl;
for (int x=0; x<NX;x++)
{
for (int y=0; y<NY;y++)
{
fout << x << "," << y << "," <<rho[x][y] <<"," <<u[0][x][y] << "," << u[1][x][y] << ","
<< sqrt(norm2(&u[0][x][y])) << std::endl;
}
}
fout.close();
std::cout << "Step "<<time<<" done.\n";
}
}
}
//============================================================
//====== functions definition ================================
__host__ __device__ real_t norm2(const real_t * v)
{
return (v[0]*v[0]+v[1]*v[1]);
}
__device__ bool isObstacle(int x, int y)
{
real_t xx[2];
xx[0]=x-CX; xx[1]=y-CY;
return norm2(xx)<R*R;
}
// recurrent declarations of threads indexes inside kernels
#define CUDAHEADER_X \
const uint num_threads_x = gridDim.x*blockDim.x; \
const uint tstart_x = blockDim.x*blockIdx.x+threadIdx.x;
#define CUDAHEADER_Y \
const uint num_threads_y = gridDim.y*blockDim.y; \
const uint tstart_y = blockDim.y*blockIdx.y+threadIdx.y;
#define CUDAHEADER \
CUDAHEADER_X \
CUDAHEADER_Y
//-------------------------------------------------------
__global__ void initialize(real_t * vel, real_t * rho)
{
CUDAHEADER
for (uint x= tstart_x; x<NX; x += num_threads_x)
{
for (uint y= tstart_y; y<NY; y += num_threads_y)
{
vel[i3D(0,x,y)]=0.;
if (x>9 && x<20) vel[i3D(0,x,y)]=ULB;
vel[i3D(1,x,y)]=0.;
rho[i2D(x,y)]=1.;
}
}
}
// Equilibrium distribution function.
__global__ void getEquilibrium(const real_t* rho,
const real_t* u,
const real_t* t,
real_t* feq
)
{
CUDAHEADER
real_t cu;
real_t uxy[2];
for (uint x= tstart_x; x<NX; x += num_threads_x)
{
for (uint y= tstart_y; y<NY; y += num_threads_y)
{
uxy[0]=u[i3D(0,x,y)]; uxy[1]=u[i3D(1,x,y)];
for (uint iq=0; iq<Q;iq++)
{
cu = 3.0*(C[iq][0]*uxy[0]+C[iq][1]*uxy[1]);
feq[i3D(iq,x,y)] = rho[i2D(x,y)]*t[iq]*(1.+cu+0.5*cu*cu-1.5*norm2(uxy));
}
}
}
}
__global__ void getHvars(real_t* rho,
real_t* u,
const real_t * f
)
{
CUDAHEADER
real_t ff;
for (uint x= tstart_x; x<NX; x += num_threads_x)
{
for (uint y= tstart_y; y<NY; y += num_threads_y)
{
real_t& rhxy=rho[i2D(x,y)];
rhxy=0.;
u[i3D(0,x,y)]=u[i3D(1,x,y)]=0.;
for (uint iq=0; iq<Q;iq++)
{
ff=f[i3D(iq,x,y)];
rhxy += ff;
u[i3D(0,x,y)] += C[iq][0]*ff;
u[i3D(1,x,y)] += C[iq][1]*ff;
}
u[i3D(0,x,y)] /= rhxy; u[i3D(1,x,y)] /= rhxy;
}
}
}
// Right wall: apply outflow condition.
__global__ void outflow(real_t *fin)
{
CUDAHEADER_Y
for (uint y= tstart_y; y<NY; y += num_threads_y)
{
for (uint iq=0; iq<Q0;iq++)
{
fin[i3D(I1[iq],NX-1,y)] = fin[i3D(I1[iq],NX-2,y)];
}
}
}
// Left wall: compute density from known populations.
__global__ void leftwall(real_t* rho, real_t *u, const real_t* vel, const real_t* fin)
{
CUDAHEADER_Y
for (uint y= tstart_y; y<NY; y += num_threads_y)
{
u[i3D(0,0,y)] =vel[i3D(0,0,y)]; u[i3D(1,0,y)] =vel[i3D(1,0,y)];
real_t &rh0y = rho[i2D(0,y)];
rh0y = 0.;
for (uint iq=0; iq<Q0;iq++)
{
rh0y += fin[i3D(I2[iq],0,y)] + 2.*fin[i3D(I1[iq],0,y)];
}
rh0y /= (1.-u[i3D(0,0,y)]);
}
}
// Left wall: Zou/He boundary condition.
__global__ void zouhe(const real_t* feq, real_t* fin)
{
CUDAHEADER_Y
for (uint y= tstart_y; y<NY; y += num_threads_y)
{
for (uint iq=0; iq<Q0;iq++)
{
fin[i3D(I3[iq],0,y)] = fin[i3D(I1[iq],0,y)] + feq[i3D(I3[iq],0,y)] - feq[i3D(I1[iq],0,y)];
}
}
}
// Collision step.
__global__ void collision(const real_t* fin, real_t* fout, const real_t* feq)
{
CUDAHEADER
for (uint x= tstart_x; x<NX; x += num_threads_x)
{
for (uint y= tstart_y; y<NY; y += num_threads_y)
{
for (uint iq=0; iq<Q;iq++)
{
uint i=i3D(iq,x,y);
fout[i] = fin[i] - OMEGA * (fin[i] - feq[i]);
}
}
}
}
//Wall rebound
__global__ void rebound(const real_t * fin, real_t* fout)
{
CUDAHEADER
for (uint x= tstart_x; x<NX; x += num_threads_x)
{
for (uint y= tstart_y; y<NY; y += num_threads_y)
{
if (isObstacle(x,y))
{
for (uint iq=0; iq<Q;iq++)
{
fout[i3D(iq,x,y)] = fin[i3D(NOSLIP[iq],x,y)];
}
}
}
}
}
// Streaming step.
__global__ void streaming(real_t* fin, const real_t* fout)
{
CUDAHEADER
int xout,yout;
for (int x= tstart_x; x<NX; x += num_threads_x)
{
for (int y= tstart_y; y<NY; y += num_threads_y)
{
fin[i3D(0,x,y)]=fout[i3D(0,x,y)];
for (int iq=1; iq<Q;iq++)
{
//handle periodic conditions
xout = ((x + iC[iq][0])+NX) % NX;
yout = ((y + iC[iq][1])+NY) % NY;
fin[i3D(iq,xout,yout)]=fout[i3D(iq,x,y)];
}
}
}
}
|
14,503 | // we use the __device__ prefix to mark funcitons as callable from threads running // on the device memory
// device function (using __device__ ) are called from the __global__ functions
/*
__device__ float device_fucntion(float x){
return x + 5;
}
*/
//__device__ funcitons can call other device functions but not them selves
//OK
/*
__device__ float device_function_two(float y){
return device_function(y)/ 2;
}
*/
//NOT (currently not supported in CUDA)
/*
__device__ float device_function_two(float z){
return z * device_fucntion_two(z-1);
}
*/
#include <stdio.h>
#include <stdlib.h>
__device__ int get_thread_index(void){
return blockIdx.x * blockDim.x + threadIdx.x;
}
__device__ int get_fixedValue(void){
return 10;
}
__global__ void gpuone(int *a){
int index = get_thread_index();
a[index] = get_fixedValue();
}
__global__ void gputwo(int *b){
int index = get_fixedValue();
b[index] = get_thread_index();
}
void printArray(int* host, int elements){
printf("GPU array: \n");
for(int i = 0; i < elements; i++){
printf("%d ", host[i]);
}
printf("\n\n");
}
int main(void){
int elements = 256;
int bytes = elements * sizeof(int);
int *host = 0, *device = 0;
host = (int*) malloc(bytes);
cudaMalloc((void**)&device, bytes);
int blockSize = 128;
int gridSize = elements / blockSize;
gpuone<<<gridSize, blockSize>>>(device);
cudaMemcpy(host, device, bytes, cudaMemcpyDeviceToHost);
printArray(host, elements);
gputwo<<<gridSize, blockSize>>>(device);
cudaMemcpy(host, device, bytes, cudaMemcpyDeviceToHost);
printArray(host, elements);
free(host);
cudaFree(device);
return 0;
}
|
14,504 | #include <iostream>
#include "../ginkgo/GOrder.h"
#include <thrust/device_vector.h>
#define def_dvec(t) thrust::device_vector<t>
using namespace std;
__global__ void test(){
gpu_ginkgo::Order o(1024, 10, 180, 30);
printf("\n\n===== Checking basic member functions =====\n\n");
o.showOrderInfo();
o.getAcked(150);
printf("\nThe trade is acked!\n");
o.showOrderInfo();
o.qUpdateAgainstTrade(75, 15);
o.showOrderInfo();
o.qUpdateAgainstTrade(15, 15);
o.showOrderInfo();
int trade_size = 32;
int dq = 0;
int dqs = 0;
int filled_qty = 0;
int filled_pnl = 0;
int bz = 0;
o.filledAgainstTrade(trade_size, filled_qty, filled_pnl, dq, dqs, 1024, bz);
o.showOrderInfo();
printf("====Updating Info====\n");
printf("dq = %d, dqs = %d, filled_qty = %d, filled_pnl = %d, trade_size = %d\n",
dq, dqs, filled_qty, filled_pnl, trade_size);
printf("\n\n===== Checking Q update =====\n\n");
o.qUpdateAgainstBU(25);
printf("Book Volume = 25\n");
o.showOrderInfo();
o.qUpdateAgainstBU(100);
printf("\nBook Volume = 100\n");
o.showOrderInfo();
o.qUpdateAgainstBU(40);
printf("\nBook Volume = 40\n");
o.showOrderInfo();
printf("\n\n===== Canceling this order =====\n\n");
o.cancel(225. + 100);
o.showOrderInfo();
}
int main(){
def_dvec(float) dev_out(1, 0);
test<<<1, 1>>>();
return 0;
}
|
14,505 | // RUN: %clang_cc1 -dM -E -x hip %s | FileCheck -check-prefix=CXX14 %s
// RUN: %clang_cc1 -dM -E %s | FileCheck -check-prefix=CXX14 %s
// RUN: %clang_cc1 -dM -E -std=c++98 -x hip %s | FileCheck -check-prefix=CXX98 %s
// RUN: %clang_cc1 -dM -E -std=c++98 %s | FileCheck -check-prefix=CXX98 %s
// CXX98: #define __cplusplus 199711L
// CXX14: #define __cplusplus 201402L
|
14,506 |
#include <stdio.h>
#include <cuda_runtime.h>
__global__ void MatrixMut(int *A, int *B, int *C, int N)
{
int i=blockDim.x*blockIdx.x+threadIdx.x;
int j=blockDim.y*blockIdx.y+threadIdx.y;
int temp=0;
for(int k=0;k<N;k++)
temp+= A[i*N+k]*B[k*N+j];
C[i*N+j]=temp;
}
int main()
{
const int dim=1<<10;
const int size=dim*dim;
int *A= (int *)malloc(size*sizeof(int));
int *B= (int *)malloc(size*sizeof(int));
int *C= (int *)malloc(size*sizeof(int));
for (int i = 0; i < size; ++i)
{
A[i]=1;
B[i]=2;
}
int *d_A=NULL;
int *d_B=NULL;
int *d_C=NULL;
cudaMalloc((void**) &d_A,size*sizeof(int));
cudaMalloc((void**) &d_B,size*sizeof(int));
cudaMalloc((void**) &d_C,size*sizeof(int));
cudaMemcpy(d_A,A,size*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_B,B,size*sizeof(int),cudaMemcpyHostToDevice);
dim3 grid_size(64,64);
dim3 block_size(16,16);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
MatrixMut<<<grid_size,block_size>>>(d_A,d_B,d_C,dim);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
printf("Eclapsed time is %f ms \n", msecTotal);
cudaMemcpy(C,d_C,size*sizeof(int),cudaMemcpyDeviceToHost);
for (int i = 0; i < size; ++i)
{
if(C[i]!=dim*2)
{
printf("Test Failed!");
exit(-1);
}
}
printf("Test Passed \n");
free(A);
free(B);
free(C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
14,507 | #include <cuda_runtime_api.h>
__global__ void clamp_kernel(
float *y,
int dim,
float clamp_lo,
float clamp_hi)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float y_i = y[idx];
if (y_i < clamp_lo) {
y[idx] = clamp_lo;
} else if (y_i > clamp_hi) {
y[idx] = clamp_hi;
}
}
}
extern "C" void neuralops_cuda_clamp(
float *y,
size_t dim,
float clamp_lo,
float clamp_hi,
cudaStream_t stream)
{
clamp_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
y, dim, clamp_lo, clamp_hi);
}
|
14,508 | #include "stdio.h"
#include "stdlib.h"
#define N 8000000
#define T 8
__global__ void find_min(int *a, int *c)
{
int rank = threadIdx.x;
int numToSort = (8000000) / T;
int low = rank * numToSort;
int high = low + numToSort - 1;
int min = a[low];
for (int i = low; i < high; ++i)
{
if (min > a[i])
{
min = a[i];
}
}
c[rank] = min;
}
int main()
{
int *a;
a = (int *)malloc(sizeof(int) * N);
int *dev_a;
int c[T];
int *dev_c;
cudaMalloc((void**)&dev_a, N * sizeof(int));
cudaMalloc((void**)&dev_c, T * sizeof(int));
for (int i = 0; i < N; ++i)
{
a[i] = rand() % 1000000000;
}
for(int i = 0; i < T; ++i){
c[i] = 1000000001;
}
cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, c, T * sizeof(int), cudaMemcpyHostToDevice);
dim3 grid(1);
find_min <<<grid, T >>> (dev_a, dev_c);
cudaMemcpy(c, dev_c, T * sizeof(int), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize(); // Waits for threads to finish
int min = c[0];
for(int i = 0; i < T; ++i){
if(min > c[i]){
min = c[i];
}
}
printf("Minimal value parallel with cuda is: %d\n", min);
min = a[0];
for(int i =0; i < N; ++i){
if(min > a[i]){
min = a[i];
}
}
printf("Minimal value sequential: %d\n", min);
cudaFree(dev_a);
cudaFree(dev_c);
return 0;
}
|
14,509 | #include "includes.h"
__global__ void kernel6( int *a, int dimx, int dimy )
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = iy*dimx + ix;
if(ix < dimx && iy < dimy)
a[idx] = blockIdx.x;
} |
14,510 | #include <cuda.h>
#include <stdio.h>
#include <iostream>
#define NUM_THREADS 1000000
#define ARRAY_SIZE 100
#define BLOCK_WIDTH 1000
struct GpuTimer
{
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer ()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer ()
{
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start ()
{
cudaEventRecord(start, 0);
}
void Stop ()
{
cudaEventRecord(stop, 0);
}
float Elapsed ()
{
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
struct GpuTimerWrapper
{
GpuTimer timer;
GpuTimerWrapper ()
{
timer.Start();
}
~GpuTimerWrapper ()
{
timer.Stop();
printf("Time elapsed = %g ms\n", timer.Elapsed());
}
};
void print_array (int *array, int size)
{
printf("{");
for (int i = 0; i < size; i++) { printf("%d ", array[i]); }
printf("}\n");
}
__global__ void increment_atomic (int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
atomicAdd(& g[i], 1);
}
int main ()
{
//GpuTimer timer;
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, ARRAY_SIZE);
// declare and allocate host memory
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// declare, allocate, and zero out GPU memory
int * d_array;
// init data in host
cudaMalloc((void **) &d_array, ARRAY_BYTES);
if (d_array == NULL) {
std::cout << "Failed to alloc GPU mem\n";
return -1;
}
cudaMemset((void *) d_array, 0, ARRAY_BYTES);
// launch the kernel - comment out one of these
{
GpuTimerWrapper();
//timer.Start();
increment_atomic<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
//timer.Stop();
}
// copy back the array of sums from GPU and print
cudaMemcpy(h_array, d_array, ARRAY_BYTES, cudaMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
//printf("Time elapsed = %g ms\n", timer.Elapsed());
// free GPU memory allocation and exit
cudaFree(d_array);
return 0;
}
|
14,511 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#define RG 10
#define USECPSEC 1000000ULL
#define nTPB 256
// #define DSIZE (32768*1024)
#define FSIZE 17
//cuda error checking macros
#ifdef DEBUG
#define CUDA_CALL(F) if( (F) != cudaSuccess ) \
{printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \
__FILE__,__LINE__); exit(-1);}
#define CUDA_CHECK() if( (cudaPeekAtLastError()) != cudaSuccess ) \
{printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \
__FILE__,__LINE__-1); exit(-1);}
#else
#define CUDA_CALL(F) (F)
#define CUDA_CHECK()
#endif
typedef float mytype;
// host function to compute convolution reference results
void conv(const mytype *A, const mytype *B, mytype* out, int N, int P) {
// P is assumed to be odd, and greater than 1
int rb = (P-1)/2;
int lb = -rb;
for (int i = rb; i < N-rb; ++i)
for (int j = lb; j <= rb; ++j)
out[i] += A[i+j] * B[j+rb];
}
// timing measurement function
unsigned long long dtime_usec(unsigned long long prev){
timeval tv1;
gettimeofday(&tv1,0);
return ((tv1.tv_sec * USECPSEC)+tv1.tv_usec) - prev;
}
// convolution GPU kernel - not using shared memory
// Task 2
// __global__ void conv_Kernel(const mytype * __restrict__ A, const mytype * __restrict__ B, mytype *C, const int N, const int P){
// int idx = threadIdx.x+blockDim.x*blockIdx.x;
// int radius = (P-1)/2;
// if ((idx < (N-radius)) && (idx >= radius)){
// mytype my_sum = 0;
// for (int j = -radius; j <= radius; j++)
// my_sum += A[idx+j]*B[j+radius];
// C[idx] = my_sum;
// }
// }
__global__ void conv_shared_Kernel(const mytype * __restrict__ A, const mytype * __restrict__ B, mytype *C, const int N, const int P)
{
extern __shared__ mytype sA[];//[nTPB+FSIZE];
// extern __shared__ T sB[];//[FSIZE];
// T *sA = (T*)byte_sA;
int idx = threadIdx.x+blockDim.x*blockIdx.x;
int radius = (P-1)/2;
int lidx = threadIdx.x + radius;
if (threadIdx.x < P) sA[blockDim.x + P + threadIdx.x] = B[threadIdx.x];
if (idx < N)
{
sA[lidx] = A[idx];
if (threadIdx.x < radius)
{
if (idx >= radius)
sA[threadIdx.x] = A[idx - radius];
if ((idx + blockDim.x)< N)
sA[blockDim.x + lidx] = A[idx + blockDim.x];
}
}
__syncthreads();
if ((idx < (N-radius)) && (idx >= radius))
{
mytype my_sum = 0;
for (int j = -radius; j <= radius; j++)
my_sum += sA[lidx+j] * sA[blockDim.x + P + j+radius];
C[idx] = my_sum;
}
}
int main(int argc, char *argv[])
{
int DSIZE=321024;
if(argc ==2)
{
DSIZE = atoi(argv[1]);
}
mytype *d_A, *A, *d_B, *B, *d_C, *C, *h_C;
int my_N = DSIZE;
int my_P = FSIZE;
// allocate host data
A = (mytype *)malloc(my_N*sizeof(mytype));
B = (mytype *)malloc(my_P*sizeof(mytype));
C = (mytype *)malloc(my_N*sizeof(mytype));
h_C = (mytype *)malloc(my_N*sizeof(mytype));
// allocate device data
CUDA_CALL(cudaMalloc(&d_A, my_N*sizeof(mytype)));
CUDA_CALL(cudaMalloc(&d_B, my_P*sizeof(mytype)));
CUDA_CALL(cudaMalloc(&d_C, my_N*sizeof(mytype)));
//initialize host input data
for (int i=0; i < my_N; i++)
A[i] = rand()%RG;
for (int i=0; i < my_P; i++)
B[i] = 1;
//zero out host result data
for (int i=0; i < my_N; i++){
C[i] = 0;
h_C[i] = 0;}
//begin timing for host reference function
unsigned long long cpu_time = dtime_usec(0);
conv(A, B, C, my_N, my_P);
cpu_time = dtime_usec(cpu_time);
//initialize device result data
CUDA_CALL(cudaMemset(d_C, 0, my_N*sizeof(mytype)));
//begin timing for device function
unsigned long long gpu_time = dtime_usec(0);
//copy host input data to device
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
for(int i = 0; i<100; i++ )
{
CUDA_CALL(cudaMemcpy(d_A, A, my_N*sizeof(mytype), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(d_B, B, my_P*sizeof(mytype), cudaMemcpyHostToDevice));
//run convolution kernel on GPU
conv_shared_Kernel<<<(my_N+nTPB-1)/nTPB,nTPB,(nTPB + 2*FSIZE)*sizeof(mytype)>>>(d_A, d_B, d_C, my_N, my_P);
// CUDA_CHECK();
//copy results from device to host
CUDA_CALL(cudaMemcpy(h_C, d_C, my_N*sizeof(mytype), cudaMemcpyDeviceToHost));
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float time_in_ms;
cudaEventElapsedTime(&time_in_ms,start,stop);
printf("Native Convolve = %f\n",time_in_ms);
gpu_time = dtime_usec(gpu_time);
//check validity of results
for (int i = 0; i < my_N; i++) if (C[i] != h_C[i]) {printf("FAIL at %d, cpu: %f, gpu %f\n", i, C[i], h_C[i]); return 1;}
//print timing and speed comparison
// printf("PASS. cpu time: %ldus, gpu time: %ldus\n", cpu_time, gpu_time);
// printf("Speedup: cpu/gpu = %f\n", cpu_time/(float)gpu_time);
//all host and device allocated data will be implicitly freed at program termination
return 0;
}
|
14,512 | #include <iostream>
#define BLOCKSIZE 1024
__global__ void StupidSumArray(int* array, int* result) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
result[tid] = 0;
for (int i = tid * 1024; i < (tid + 1) * 1024; ++i) {
result[tid] += array[i];
}
}
int main() {
int N = 1 << 20;
int *h_x = new int[N];
for (int i = 0; i < N; ++i) {
h_x[i] = 1;
}
int *d_x;
int size = sizeof(int) * N;
cudaMalloc(&d_x, size);
int* h_result = new int[1024];
for (int i = 0; i < BLOCKSIZE; ++i) {
h_result[i] = 0;
}
int *d_result;
cudaMalloc(&d_result, sizeof(int) * 1024);
cudaMemcpy(d_x, h_x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_result, h_result, size, cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
StupidSumArray<<<1, 1024>>>(d_x, d_result);
cudaEventRecord(stop);
cudaMemcpy(h_result, d_result, sizeof(int) * 1024, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float ms;
cudaEventElapsedTime(&ms, start, stop);
for (int i = 0; i < 1024; ++i) {
std::cout << i << " " << h_result[i] << std::endl;
}
std::cout << ms << std::endl;
cudaFree(d_x);
cudaFree(d_result);
delete[] h_result;
delete[] h_x;
}
|
14,513 | #include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
using namespace std;
__global__
void vecAddKernel(float* A, float* B, float* C, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n)
{
C[i] = A[i] + B[i];
}
}
void vecAdd(float* A, float* B, float* C, int n)
{
int size = n * sizeof(float);
float *d_A, *d_B, *d_C;
cudaMalloc((void**)&d_A, size);
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_B, size);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_C, size);
vecAddKernel <<< ceil(n / 256.0), 256 >>> (d_A, d_B, d_C, n);
cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
int main()
{
float A[5] = { 1, 2, 3, 4, 5 };
float B[5] = { 10, 20, 30, 40, 50 };
float C[5];
cout << "A:";
for (int i = 0; i < 5; i++)
{
cout << A[i] << " ";
}
cout << endl;
cout << "B:";
for (int i = 0; i < 5; i++)
{
cout << B[i] << " ";
}
cout << endl;
vecAdd(A, B, C, 5);
cout << "C:";
for (int i = 0; i < 5; i++)
{
cout << C[i] << " ";
}
cout << endl;
// cout << "Hello" << endl;
} |
14,514 | #include "includes.h"
// Template for Assignment 1: CUDA
// Use "icc -O -openmp" to compile
#define threshold 1e-4
#define n (2048)
void init(void);
void ref(void);
void test(void);
void compare(int N, double *wref, double *w);
__global__ void test_kernel(int N, double *A, double *B, double *X)
{
int i,j,k;
double temp;
// Template version uses only one thread, which does all the work
// This must be changed (and the launch parameters) to exploit GPU parallelism
// You can make any changes; only requirement is that correctness test passes
k = (blockIdx.y*gridDim.x+blockIdx.x)*(blockDim.x*blockDim.y)+(threadIdx.y*blockDim.x+threadIdx.x);
//if(threadIdx.x == 0) {
//for(k=0;k<n;k++){
/*
if(k<n){
for (i=0;i<n;i++){
temp = B[k*N+i]; // temp = b[k][i];
for (j=0;j<i;j++) temp = temp - A[i*N+j] * X[k*N+j]; // temp = temp - a[i][j]*x[k][j];
X[k*N+i] = temp/A[i*N+i]; //x[k][i] = temp/a[i][i];
}
}
*/
if(k<n){
for (i=0;i<n;i++){
temp = B[i*N+k]; // temp = b[k][i];
for (j=0;j<i;j++) temp = temp - A[j*N+i] * X[j*N+k]; // temp = temp - a[i][j]*x[k][j];
X[i*N+k] = temp/A[i*N+i]; //x[k][i] = temp/a[i][i];
}
}
// }
// }
} |
14,515 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda.h"
#include <stdio.h>
#include <stdlib.h>
#define THREADS 5
#define BLOCKS 1
__global__ void testFunction(float *dev_a, float *dev_b, float dev_c)
{
int thread = threadIdx.x;
if(thread == 0)
{
printf("dev[%d] = %.2f;\n", thread, dev_a[thread]);
printf("b = %.2f;\n", *dev_b);
printf("c 1 = %.2f;\n", dev_c);
dev_c = dev_c*dev_c;
printf("c 2 = %.2f;\n", dev_c);
}
}
int main()
{
float a[THREADS] = { 1, 2, 3, 4, 5 };
printf("BEFORE START\n");
for(int i = 0; i<THREADS; i++)
{
printf("a[%d] = %.2f; ", i, a[i]);
}
printf("\nBEFORE END\n");
float *dev_a;
cudaMalloc((void**)&dev_a, THREADS*sizeof(float));
cudaMemcpy(dev_a, a, THREADS*sizeof(float), cudaMemcpyHostToDevice);
float b = 25;
float *dev_b;
cudaMalloc((void**)&dev_b, sizeof(float));
cudaMemcpy(dev_b, &b, sizeof(float), cudaMemcpyHostToDevice);
float c = 77;
testFunction<<<BLOCKS, THREADS>>>(dev_a, dev_b, c);
cudaFree(dev_a);
cudaFree(dev_b);
printf("after kernel free: c = %.2f;\n", c);
return 0;
}
|
14,516 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t RellenaMatriz(int *x, int *y, int *m, unsigned int sizeX, unsigned int sizeY);
void imprimeVector(int *v, int n) {
printf("{");
for (int i = 1; i <= n; i++) {
printf("%d", *v);
if (i != n) printf(", ");
v++;
}
printf("}");
}
void imprimeMatriz(int *v, int m, int n) {
int i, j;
printf("\n");
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
printf("%d\t", v[i*n+j]);
}
printf("\n");
}
}
__global__ void rmKernel(int *x, int *y, int *m) {
int idx = threadIdx.x;
int idy = blockIdx.x;
int id = blockIdx.x * blockDim.x + threadIdx.x;
x[idx] = idx;
y[idy] = idy;
m[id] = idy*10 + idx;
}
int main()
{
const int sizeX = 5;
const int sizeY = 6;
int x[sizeX] = { 0, 0, 0, 0, 0 };
int y[sizeY] = { 0, 0, 0, 0, 0, 0 };
int m[sizeY*sizeX] = { 0, 0, 0, 0, 0
, 0, 0, 0, 0, 0
, 0, 0, 0, 0, 0
, 0, 0, 0, 0, 0
, 0, 0, 0, 0, 0
, 0, 0, 0, 0, 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = RellenaMatriz(x, y, m, sizeX, sizeY);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Fallo en RellenaMatriz");
return 1;
}
imprimeVector(x, sizeX);
imprimeVector(y, sizeY);
imprimeMatriz(m, sizeY, sizeX);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t RellenaMatriz(int *x, int *y, int *m, unsigned int sizeX, unsigned int sizeY)
{
int *dev_x = 0;
int *dev_y = 0;
int *dev_m = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_x, sizeX * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_y, sizeY * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_m, sizeY * sizeX * sizeof(int *));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
/*
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_x, x, sizeX * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_y, y, sizeY * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}*/
// Launch a kernel on the GPU with one thread for each element.
rmKernel <<<sizeY, sizeX>>>(dev_x, dev_y, dev_m);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(x, dev_x, sizeX * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(y, dev_y, sizeY * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(m, dev_m, sizeY * sizeX * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_x);
cudaFree(dev_y);
cudaFree(dev_m);
return cudaStatus;
}
|
14,517 | /*Se trabajo en un computador personal, por lo que compilación sería: nvcc
* -std=c++11 -D_MWAITXINTRIN_H_INCLUDED MultTiles.cu -o mul $(pkg-config --libs
* opencv) */
#include <chrono>
#include <ctime>
#include <cuda.h>
#include <fstream>
#include <iostream>
#include <stdio.h>
#include <time.h>
using namespace std;
#define TILE_WIDTH 16
__global__ void matrixMultiply(float *A, float *B, float *C, int numARows,
int numAColumns, int numBRows, int numBColumns,
int numCRows, int numCColumns) {
__shared__ float ds_M[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_N[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
// ciclo sobre las matrices "shared" M y N para calcular el
// elemento P
for (int m = 0; m < (numAColumns - 1) / TILE_WIDTH + 1; ++m) {
// Se verifica que tanto como tx como ty, no excedan el tamaño de la
// matrices, y si lo llegaran a hacer por el tamaño del grid, estos se
// asignaran como 0
if (Row < numARows && ((m * TILE_WIDTH) + tx) < numAColumns) {
ds_M[ty][tx] = A[Row * numAColumns + m * TILE_WIDTH + tx];
} else {
ds_M[ty][tx] = 0;
}
if ((Col < numBColumns) && ((m * TILE_WIDTH) + ty) < numBRows) {
ds_N[ty][tx] = B[(m * TILE_WIDTH + ty) * numBColumns + Col];
} else {
ds_N[ty][tx] = 0.0;
}
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k)
Pvalue += ds_M[ty][k] * ds_N[k][tx];
__syncthreads();
}
// Solo se guardaran si hilos corresponden a una posicion valida para la
// matriz resultante
if (Row < numCRows && Col < numCColumns) {
C[Row * numCColumns + Col] = Pvalue;
}
}
void matMultiplyOnHost(float *A, float *B, float *C, int numARows,
int numAColumns, int numBRows, int numBColumns,
int numCRows, int numCColumns) {
for (int i = 0; i < numARows; i++) {
for (int j = 0; j < numBColumns; j++) {
float result = 0.0;
for (int k = 0; k < numAColumns; k++) {
result += A[i * numAColumns + k] * B[k * numBColumns + j];
}
C[i * numBColumns + j] = result;
}
}
}
void Check(float *m_h, float *m_d, int numCRows, int numCColumns) {
for (int i = 0; i < numCRows * numCColumns; i++) {
if (m_h[i] != m_d[i]) {
cout << "Iqual: False" << endl;
break;
}
}
cout << "Iqual: True" << endl;
}
int main() {
float *hostA; // The A matrix
float *hostB; // The B matrix
float *hostC; // The output C matrix
float *hostResultC;
float *deviceA = NULL;
float *deviceB = NULL;
float *deviceC = NULL;
int numArows = 5000; // number of rows in the matrix A
int numAColumns = 2500; // number of columns in the matrix A
int numBrows = 2500; // number of rows in the matrix B
int numBColumns = 5000; // number of columns in the matrix B
int numCrows;
int numCColumns;
if (numAColumns == numBrows) {
numCrows = numArows;
numCColumns = numBColumns;
float sizeA = sizeof(float) * numArows * numAColumns;
float sizeB = sizeof(float) * numBrows * numBColumns;
float sizeC = sizeof(float) * numCrows * numCColumns;
// Memoria en host
// Reservo memoria en el host, la cantidad de columnas x filas x el tamaño
// de cada dato.
hostA = (float *)malloc(sizeA);
hostB = (float *)malloc(sizeB);
hostC = (float *)malloc(sizeC);
hostResultC = (float *)malloc(sizeC);
// Llenamos matrices
for (int i = 0; i < numArows * numAColumns; i++) {
hostA[i] = 3;
}
for (int i = 0; i < numBrows * numBColumns; i++) {
hostB[i] = 2;
}
// Memoria en device
std::chrono::time_point<std::chrono::system_clock> start, end;
std::chrono::duration<double> elapsed_seconds;
start = std::chrono::system_clock::now();
// Resevamos memoria en el device, del mismo tamaño que las anteriores
// matrices.
cudaMalloc((void **)&deviceA, sizeA);
cudaMalloc((void **)&deviceB, sizeB);
cudaMalloc((void **)&deviceC, sizeC);
end = std::chrono::system_clock::now();
elapsed_seconds = end - start;
cout << "Cuda Malloc Time: " << elapsed_seconds.count() << "s\n";
// Host to Device
start = std::chrono::system_clock::now();
// Pasamos la informacion que posee las matrices que estan en el host al
// device
cudaMemcpy(deviceA, hostA, sizeA, cudaMemcpyHostToDevice);
cudaMemcpy(deviceB, hostB, sizeB, cudaMemcpyHostToDevice);
end = std::chrono::system_clock::now();
elapsed_seconds = end - start;
cout << "Cuda Memcpy Host to Device Time: " << elapsed_seconds.count()
<< "s\n";
start = std::chrono::system_clock::now();
// Definimos tamaño del Grid y del bloque
// Donde si tenemos una matriz de MxN, N sera la cantidad de columnas en el
// grid y M la cantidad de filas en el grid..
// El tamaño del bloque es Tile_width x Tile_width
dim3 dimGrid((numCColumns - 1) / TILE_WIDTH + 1,
(numCrows - 1) / TILE_WIDTH + 1, 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
end = std::chrono::system_clock::now();
elapsed_seconds = end - start;
cout << "Dims Time: " << elapsed_seconds.count() << "s\n";
// Multiplicacion de matrices utilizando tiles en device
start = std::chrono::system_clock::now();
// Hago a la función donde le envío las matrices y sus respectivo datos
matrixMultiply<<<dimGrid, dimBlock>>>(deviceA, deviceB, deviceC, numArows,
numAColumns, numBrows, numBColumns,
numCrows, numCColumns);
end = std::chrono::system_clock::now();
elapsed_seconds = end - start;
cout << "Multplication Device Time: " << elapsed_seconds.count() << "s\n";
// Device to Host
start = std::chrono::system_clock::now();
cudaMemcpy(hostC, deviceC, sizeC, cudaMemcpyDeviceToHost);
end = std::chrono::system_clock::now();
elapsed_seconds = end - start;
cout << "Cuda Memcmpy Device to Host Time: " << elapsed_seconds.count()
<< "s\n";
// Multiplication Host
start = std::chrono::system_clock::now();
matMultiplyOnHost(hostA, hostB, hostResultC, numArows, numAColumns,
numBrows, numBColumns, numCrows, numCColumns);
end = std::chrono::system_clock::now();
elapsed_seconds = end - start;
cout << "Matrix Multiplication Host Time: " << elapsed_seconds.count()
<< "s\n";
Check(hostC, hostResultC, numCrows, numCColumns);
} else {
cout << "Las matrices no se pueden multiplicar " << endl;
}
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
free(hostA);
free(hostB);
free(hostC);
return 0;
}
|
14,518 |
// float2 addition
__device__ inline float2 ComplexAdd(float2 a, float2 b) {
float2 c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
// float2 scale
__device__ inline float2 ComplexScale(float2 a, float s) {
float2 c;
c.x = s * a.x;
c.y = s * a.y;
return c;
}
// float2 multiplication
__device__ inline float2 ComplexMul(float2 a, float2 b) {
float2 c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
}
// float2 pointwise multiplication
extern "C" __global__ void ComplexPointwiseMulAndScale(float2* a, const float2* b, int size, float scale) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < size; i += numThreads)
a[i] = ComplexScale(ComplexMul(a[i], b[i]), scale);
}
|
14,519 | #include "includes.h"
__global__ void chol_kernel_optimized_div_old(float * U, int k, int stride) {
//General thread id
int tx = blockIdx.x * blockDim.x + threadIdx.x;
//Iterators
unsigned int j;
unsigned int num_rows = MATRIX_SIZE;
//Only let one thread do this
if (tx == 0) {
// Take the square root of the diagonal element
U[k * num_rows + k] = sqrt(U[k * num_rows + k]);
//Don't bother doing check...live life on the edge!
// Division step
for (j = (k + 1); j < num_rows; j++) {
U[k * num_rows + j] /= U[k * num_rows + k]; // Division step
}
}
} |
14,520 | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void curvi (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) {
//Determing the block's indices
int blockdim_k= (int)(blockDim.x);
int k0 = (int)(blockIdx.x)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
double (*u1)[304][304] = (double (*)[304][304])in_u1;
double (*u2)[304][304] = (double (*)[304][304])in_u2;
double (*u3)[304][304] = (double (*)[304][304])in_u3;
double (*mu)[304][304] = (double (*)[304][304])in_mu;
double (*la)[304][304] = (double (*)[304][304])in_la;
double (*r1)[304][304] = (double (*)[304][304])in_r1;
double (*met1)[304][304] = (double (*)[304][304])in_met1;
double (*met2)[304][304] = (double (*)[304][304])in_met2;
double (*met3)[304][304] = (double (*)[304][304])in_met3;
double (*met4)[304][304] = (double (*)[304][304])in_met4;
if (j>=2 & k>=2 & j<=N-3 & k<=N-3) {
for (int i=2; i<=N-3; i++) {
double _t_26_;
double _t_46_;
double _t_65_;
double _t_7_;
double _t_23_;
double _t_176_;
double _t_35_;
double _t_29_;
double _t_171_;
double _t_62_;
double _t_201_;
double _t_74_;
double _t_68_;
double _t_196_;
double _t_43_;
double _t_189_;
double _t_55_;
double _t_49_;
double _t_184_;
double _t_4_;
double _t_164_;
double _t_16_;
double _t_10_;
double _t_159_;
double _t_50_;
double _t_146_;
double _t_128_;
double _t_69_;
double _t_109_;
double _t_0_;
double _t_91_;
double _t_11_;
double _t_30_;
double _t_17_;
double _t_114_;
double _t_96_;
double _t_151_;
double _t_15_;
double _t_133_;
double _t_36_;
double _t_34_;
double _t_56_;
double _t_54_;
double _t_75_;
double _t_73_;
double _t_24_;
double _t_102_;
double _t_84_;
double _t_139_;
double _t_121_;
double _t_21_;
double _t_5_;
double _t_2_;
double _t_44_;
double _t_41_;
double _t_63_;
double _t_60_;
double r1ic0jc0kc0 = r1[i][j][k];
double _t_104_;
double _t_123_;
double _t_141_;
double _t_86_;
double _t_108_;
double _t_81_;
double _t_113_;
double _t_101_;
double _t_90_;
double _t_95_;
double _t_83_;
double _t_79_;
double _t_127_;
double _t_118_;
double _t_132_;
double _t_120_;
double _t_145_;
double _t_150_;
double _t_138_;
double _t_165_;
double _t_155_;
double _t_177_;
double _t_190_;
double _t_202_;
double _t_172_;
double _t_169_;
double _t_197_;
double _t_194_;
double _t_160_;
double _t_157_;
double _t_185_;
double _t_182_;
_t_26_ = 2.0 * mu[i][j][k-2];
_t_26_ += la[i][j][k-2];
_t_46_ = 2.0 * mu[i][j][k+1];
_t_46_ += la[i][j][k+1];
_t_65_ = 2.0 * mu[i][j][k-1];
_t_65_ += la[i][j][k-1];
_t_7_ = 2.0 * mu[i][j][k+2];
_t_7_ += la[i][j][k+2];
_t_23_ = met1[i][j][k-2] * _t_26_ * met2[i][j][k-2];
_t_176_ = met1[i][j][k-2] * la[i][j][k-2] * met2[i][j][k-2];
_t_35_ = met1[i][j][k-2] * mu[i][j][k-2] * met4[i][j][k-2];
_t_29_ = met1[i][j][k-2] * mu[i][j][k-2] * met3[i][j][k-2];
_t_171_ = met1[i][j][k-2] * mu[i][j][k-2] * met3[i][j][k-2];
_t_62_ = met1[i][j][k-1] * _t_65_ * met2[i][j][k-1];
_t_201_ = met1[i][j][k-1] * la[i][j][k-1] * met2[i][j][k-1];
_t_74_ = met1[i][j][k-1] * mu[i][j][k-1] * met4[i][j][k-1];
_t_68_ = met1[i][j][k-1] * mu[i][j][k-1] * met3[i][j][k-1];
_t_196_ = met1[i][j][k-1] * mu[i][j][k-1] * met3[i][j][k-1];
_t_43_ = met1[i][j][k+1] * _t_46_ * met2[i][j][k+1];
_t_189_ = met1[i][j][k+1] * la[i][j][k+1] * met2[i][j][k+1];
_t_55_ = met1[i][j][k+1] * mu[i][j][k+1] * met4[i][j][k+1];
_t_49_ = met1[i][j][k+1] * mu[i][j][k+1] * met3[i][j][k+1];
_t_184_ = met1[i][j][k+1] * mu[i][j][k+1] * met3[i][j][k+1];
_t_4_ = met1[i][j][k+2] * _t_7_ * met2[i][j][k+2];
_t_164_ = met1[i][j][k+2] * la[i][j][k+2] * met2[i][j][k+2];
_t_16_ = met1[i][j][k+2] * mu[i][j][k+2] * met4[i][j][k+2];
_t_10_ = met1[i][j][k+2] * mu[i][j][k+2] * met3[i][j][k+2];
_t_159_ = met1[i][j][k+2] * mu[i][j][k+2] * met3[i][j][k+2];
_t_50_ = -c1 * u2[i-1][j][k+1];
_t_146_ = c1 * u2[i-1][j][k+1];
_t_50_ += c1 * u2[i+1][j][k+1];
_t_128_ = c1 * u2[i+1][j][k+1];
_t_69_ = -c1 * u2[i-1][j][k-1];
_t_146_ -= c1 * u2[i-1][j][k-1];
_t_69_ += c1 * u2[i+1][j][k-1];
_t_128_ -= c1 * u2[i+1][j][k-1];
_t_69_ -= c2 * u2[i-2][j][k-1];
_t_109_ = -c1 * u2[i-2][j][k-1];
_t_50_ -= c2 * u2[i-2][j][k+1];
_t_109_ += c1 * u2[i-2][j][k+1];
_t_69_ += c2 * u2[i+2][j][k-1];
_t_0_ = c1 * _t_68_ * _t_69_;
_t_91_ = -c1 * u2[i+2][j][k-1];
_t_50_ += c2 * u2[i+2][j][k+1];
_t_0_ += c1 * _t_49_ * _t_50_;
_t_91_ += c1 * u2[i+2][j][k+1];
_t_11_ = -c2 * u2[i-2][j][k+2];
_t_109_ += c2 * u2[i-2][j][k+2];
_t_11_ -= c1 * u2[i-1][j][k+2];
_t_146_ += c2 * u2[i-1][j][k+2];
_t_11_ += c1 * u2[i+1][j][k+2];
_t_128_ += c2 * u2[i+1][j][k+2];
_t_11_ += c2 * u2[i+2][j][k+2];
_t_0_ += c2 * _t_10_ * _t_11_;
_t_91_ += c2 * u2[i+2][j][k+2];
_t_30_ = -c2 * u2[i-2][j][k-2];
_t_109_ -= c2 * u2[i-2][j][k-2];
_t_30_ -= c1 * u2[i-1][j][k-2];
_t_146_ -= c2 * u2[i-1][j][k-2];
_t_30_ += c1 * u2[i+1][j][k-2];
_t_128_ -= c2 * u2[i+1][j][k-2];
_t_30_ += c2 * u2[i+2][j][k-2];
_t_0_ += c2 * _t_29_ * _t_30_;
_t_91_ -= c2 * u2[i+2][j][k-2];
_t_17_ = -c2 * u3[i-2][j][k+2];
_t_114_ = c2 * u3[i-2][j][k+2];
_t_17_ += c2 * u3[i+2][j][k+2];
_t_96_ = c2 * u3[i+2][j][k+2];
_t_17_ -= c1 * u3[i-1][j][k+2];
_t_151_ = c2 * u3[i-1][j][k+2];
_t_17_ += c1 * u3[i+1][j][k+2];
_t_15_ = _t_16_ * _t_17_;
_t_0_ += c2 * _t_15_ * stry[j];
_t_133_ = c2 * u3[i+1][j][k+2];
_t_36_ = -c2 * u3[i-2][j][k-2];
_t_114_ -= c2 * u3[i-2][j][k-2];
_t_36_ -= c1 * u3[i-1][j][k-2];
_t_151_ -= c2 * u3[i-1][j][k-2];
_t_36_ += c1 * u3[i+1][j][k-2];
_t_133_ -= c2 * u3[i+1][j][k-2];
_t_36_ += c2 * u3[i+2][j][k-2];
_t_96_ -= c2 * u3[i+2][j][k-2];
_t_34_ = _t_35_ * _t_36_;
_t_0_ += c2 * _t_34_ * stry[j];
_t_56_ = -c2 * u3[i-2][j][k+1];
_t_114_ += c1 * u3[i-2][j][k+1];
_t_56_ -= c1 * u3[i-1][j][k+1];
_t_151_ += c1 * u3[i-1][j][k+1];
_t_56_ += c1 * u3[i+1][j][k+1];
_t_133_ += c1 * u3[i+1][j][k+1];
_t_56_ += c2 * u3[i+2][j][k+1];
_t_96_ += c1 * u3[i+2][j][k+1];
_t_54_ = _t_55_ * _t_56_;
_t_0_ += c1 * _t_54_ * stry[j];
_t_75_ = -c2 * u3[i-2][j][k-1];
_t_114_ -= c1 * u3[i-2][j][k-1];
_t_75_ -= c1 * u3[i-1][j][k-1];
_t_151_ -= c1 * u3[i-1][j][k-1];
_t_75_ += c1 * u3[i+1][j][k-1];
_t_133_ -= c1 * u3[i+1][j][k-1];
_t_75_ += c2 * u3[i+2][j][k-1];
_t_96_ -= c1 * u3[i+2][j][k-1];
_t_73_ = _t_74_ * _t_75_;
_t_0_ += c1 * _t_73_ * stry[j];
_t_24_ = -c2 * u1[i-2][j][k-2];
_t_102_ = -c2 * u1[i-2][j][k-2];
_t_24_ += c2 * u1[i+2][j][k-2];
_t_84_ = -c2 * u1[i+2][j][k-2];
_t_24_ -= c1 * u1[i-1][j][k-2];
_t_139_ = -c2 * u1[i-1][j][k-2];
_t_24_ += c1 * u1[i+1][j][k-2];
_t_121_ = -c2 * u1[i+1][j][k-2];
_t_21_ = strx[i] * _t_23_ * _t_24_;
_t_0_ += c2 * _t_21_ * stry[j];
_t_5_ = -c2 * u1[i-2][j][k+2];
_t_102_ += c2 * u1[i-2][j][k+2];
_t_5_ -= c1 * u1[i-1][j][k+2];
_t_139_ += c2 * u1[i-1][j][k+2];
_t_5_ += c1 * u1[i+1][j][k+2];
_t_121_ += c2 * u1[i+1][j][k+2];
_t_5_ += c2 * u1[i+2][j][k+2];
_t_84_ += c2 * u1[i+2][j][k+2];
_t_2_ = strx[i] * _t_4_ * _t_5_;
_t_0_ += c2 * _t_2_ * stry[j];
_t_44_ = -c2 * u1[i-2][j][k+1];
_t_102_ += c1 * u1[i-2][j][k+1];
_t_44_ -= c1 * u1[i-1][j][k+1];
_t_139_ += c1 * u1[i-1][j][k+1];
_t_44_ += c1 * u1[i+1][j][k+1];
_t_121_ += c1 * u1[i+1][j][k+1];
_t_44_ += c2 * u1[i+2][j][k+1];
_t_84_ += c1 * u1[i+2][j][k+1];
_t_41_ = strx[i+2] * _t_43_ * _t_44_;
_t_0_ += c1 * _t_41_ * stry[j];
_t_63_ = -c2 * u1[i-2][j][k-1];
_t_102_ -= c1 * u1[i-2][j][k-1];
_t_63_ -= c1 * u1[i-1][j][k-1];
_t_139_ -= c1 * u1[i-1][j][k-1];
_t_63_ += c1 * u1[i+1][j][k-1];
_t_121_ -= c1 * u1[i+1][j][k-1];
_t_63_ += c2 * u1[i+2][j][k-1];
_t_84_ -= c1 * u1[i+2][j][k-1];
_t_60_ = strx[i-2] * _t_62_ * _t_63_;
_t_0_ += c1 * _t_60_ * stry[j];
r1ic0jc0kc0 += _t_0_;
_t_104_ = 2.0 * mu[i-2][j][k];
_t_123_ = 2.0 * mu[i+1][j][k];
_t_141_ = 2.0 * mu[i-1][j][k];
_t_86_ = 2.0 * mu[i+2][j][k];
_t_104_ += la[i-2][j][k];
_t_141_ += la[i-1][j][k];
_t_123_ += la[i+1][j][k];
_t_86_ += la[i+2][j][k];
_t_108_ = met1[i-2][j][k] * la[i-2][j][k] * met3[i-2][j][k];
_t_81_ = stry[j] * _t_108_ * _t_109_;
_t_113_ = met1[i-2][j][k] * la[i-2][j][k] * met4[i-2][j][k];
_t_81_ += _t_113_ * _t_114_;
_t_101_ = met1[i-2][j][k] * _t_104_ * met2[i-2][j][k];
_t_81_ += strx[i] * _t_101_ * _t_102_;
_t_90_ = met1[i+2][j][k] * la[i+2][j][k] * met3[i+2][j][k];
_t_81_ += stry[j] * _t_90_ * _t_91_;
_t_95_ = met1[i+2][j][k] * la[i+2][j][k] * met4[i+2][j][k];
_t_81_ += _t_95_ * _t_96_;
_t_83_ = met1[i+2][j][k] * _t_86_ * met2[i+2][j][k];
_t_81_ += strx[i] * _t_83_ * _t_84_;
_t_79_ = stry[j] * c2 * _t_81_;
_t_127_ = met1[i+1][j][k] * la[i+1][j][k] * met3[i+1][j][k];
_t_118_ = stry[j] * _t_127_ * _t_128_;
_t_132_ = met1[i+1][j][k] * la[i+1][j][k] * met4[i+1][j][k];
_t_118_ += _t_132_ * _t_133_;
_t_120_ = met1[i+1][j][k] * _t_123_ * met2[i+1][j][k];
_t_118_ += strx[i] * _t_120_ * _t_121_;
_t_145_ = met1[i-1][j][k] * la[i-1][j][k] * met3[i-1][j][k];
_t_118_ += stry[j] * _t_145_ * _t_146_;
_t_150_ = met1[i-1][j][k] * la[i-1][j][k] * met4[i-1][j][k];
_t_118_ += _t_150_ * _t_151_;
_t_138_ = met1[i-1][j][k] * _t_141_ * met2[i-1][j][k];
_t_118_ += strx[i] * _t_138_ * _t_139_;
_t_79_ += stry[j] * c1 * _t_118_;
r1ic0jc0kc0 += _t_79_;
_t_165_ = -c2 * u2[i][j-2][k+2];
_t_165_ -= c1 * u2[i][j-1][k+2];
_t_165_ += c1 * u2[i][j+1][k+2];
_t_165_ += c2 * u2[i][j+2][k+2];
_t_155_ = c2 * _t_164_ * _t_165_;
_t_177_ = -c2 * u2[i][j-2][k-2];
_t_177_ -= c1 * u2[i][j-1][k-2];
_t_177_ += c1 * u2[i][j+1][k-2];
_t_177_ += c2 * u2[i][j+2][k-2];
_t_155_ += c2 * _t_176_ * _t_177_;
_t_190_ = -c2 * u2[i][j-2][k+1];
_t_190_ -= c1 * u2[i][j-1][k+1];
_t_190_ += c1 * u2[i][j+1][k+1];
_t_190_ += c2 * u2[i][j+2][k+1];
_t_155_ += c1 * _t_189_ * _t_190_;
_t_202_ = -c2 * u2[i][j-2][k-1];
_t_202_ -= c1 * u2[i][j-1][k-1];
_t_202_ += c1 * u2[i][j+1][k-1];
_t_202_ += c2 * u2[i][j+2][k-1];
_t_155_ += c1 * _t_201_ * _t_202_;
_t_172_ = -c2 * u1[i][j-2][k-2];
_t_172_ -= c1 * u1[i][j-1][k-2];
_t_172_ += c1 * u1[i][j+1][k-2];
_t_172_ += c2 * u1[i][j+2][k-2];
_t_169_ = stry[j] * _t_171_ * _t_172_;
_t_155_ += c2 * _t_169_ * strx[i];
_t_197_ = -c2 * u1[i][j-2][k-1];
_t_197_ -= c1 * u1[i][j-1][k-1];
_t_197_ += c1 * u1[i][j+1][k-1];
_t_197_ += c2 * u1[i][j+2][k-1];
_t_194_ = stry[j] * _t_196_ * _t_197_;
_t_155_ += c1 * _t_194_ * strx[i];
_t_160_ = -c2 * u1[i][j-2][k+2];
_t_160_ -= c1 * u1[i][j-1][k+2];
_t_160_ += c1 * u1[i][j+1][k+2];
_t_160_ += c2 * u1[i][j+2][k+2];
_t_157_ = stry[j+2] * _t_159_ * _t_160_;
_t_155_ += c2 * _t_157_ * strx[i];
_t_185_ = -c2 * u1[i][j-2][k+1];
_t_185_ += c2 * u1[i][j+2][k+1];
_t_185_ -= c1 * u1[i][j-1][k+1];
_t_185_ += c1 * u1[i][j+1][k+1];
_t_182_ = stry[j-2] * _t_184_ * _t_185_;
_t_155_ += c1 * _t_182_ * strx[i];
r1ic0jc0kc0 += _t_155_;
r1[i][j][k] = r1ic0jc0kc0;
r1[i][j][k] += c2*(
mu[i][j+2][k]*met3[i][j+2][k]*met1[i][j+2][k]*(
c2*(u1[i][j+2][k+2]-u1[i][j+2][k-2]) +
c1*(u1[i][j+2][k+1]-u1[i][j+2][k-1]) )*stry[j+1]*strx[i]
+ mu[i][j+2][k]*met2[i][j+2][k]*met1[i][j+2][k]*(
c2*(u2[i][j+2][k+2]-u2[i][j+2][k-2]) +
c1*(u2[i][j+2][k+1]-u2[i][j+2][k-1]) )
+ ( mu[i][j-2][k]*met3[i][j-2][k]*met1[i][j-2][k]*(
c2*(u1[i][j-2][k+2]-u1[i][j-2][k-2]) +
c1*(u1[i][j-2][k+1]-u1[i][j-2][k-1]) )*stry[j]*strx[i]
+ mu[i][j-2][k]*met2[i][j-2][k]*met1[i][j-2][k]*(
c2*(u2[i][j-2][k+2]-u2[i][j-2][k-2]) +
c1*(u2[i][j-2][k+1]-u2[i][j-2][k-1]) ) )
) + c1*(
mu[i][j+1][k]*met3[i][j+1][k]*met1[i][j+1][k]*(
c2*(u1[i][j+1][k+2]-u1[i][j+1][k-2]) +
c1*(u1[i][j+1][k+1]-u1[i][j+1][k-1]) )*stry[j-1]*strx[i]
+ mu[i][j+1][k]*met2[i][j+1][k]*met1[i][j+1][k]*(
c2*(u2[i][j+1][k+2]-u2[i][j+1][k-2]) +
c1*(u2[i][j+1][k+1]-u2[i][j+1][k-1]) )
+ ( mu[i][j-1][k]*met3[i][j-1][k]*met1[i][j-1][k]*(
c2*(u1[i][j-1][k+2]-u1[i][j-1][k-2]) +
c1*(u1[i][j-1][k+1]-u1[i][j-1][k-1]) )*stry[j]*strx[i]
+ mu[i][j-1][k]*met2[i][j-1][k]*met1[i][j-1][k]*(
c2*(u2[i][j-1][k+2]-u2[i][j-1][k-2]) +
c1*(u2[i][j-1][k+1]-u2[i][j-1][k-1]) ) ) );
r1[i][j][k] +=
c2*( mu[i][j+2][k]*met1[i][j+2][k]*met1[i][j+2][k]*(
c2*(u2[i+2][j+2][k]-u2[i-2][j+2][k]) +
c1*(u2[i+1][j+2][k]-u2[i-1][j+2][k]) )
+ mu[i][j-2][k]*met1[i][j-2][k]*met1[i][j-2][k]*(
c2*(u2[i+2][j-2][k]-u2[i-2][j-2][k])+
c1*(u2[i+1][j-2][k]-u2[i-1][j-2][k]) )
) +
c1*( mu[i][j+1][k]*met1[i][j+1][k]*met1[i][j+1][k]*(
c2*(u2[i+2][j+1][k]-u2[i-2][j+1][k]) +
c1*(u2[i+1][j+1][k]-u2[i-1][j+1][k]) )
+ mu[i][j-1][k]*met1[i][j-1][k]*met1[i][j-1][k]*(
c2*(u2[i+2][j-1][k]-u2[i-2][j-1][k]) +
c1*(u2[i+1][j-1][k]-u2[i-1][j-1][k])))
+
c2*( la[i+2][j][k]*met1[i+2][j][k]*met1[i+2][j][k]*(
c2*(u2[i+2][j+2][k]-u2[i+2][j-2][k]) +
c1*(u2[i+2][j+1][k]-u2[i+2][j-1][k]) )
+ la[i-2][j][k]*met1[i-2][j][k]*met1[i-2][j][k]*(
c2*(u2[i-2][j+2][k]-u2[i-2][j-2][k])+
c1*(u2[i-2][j+1][k]-u2[i-2][j-1][k]) )
) +
c1*( la[i+1][j][k]*met1[i+1][j][k]*met1[i+1][j][k]*(
c2*(u2[i+1][j+2][k]-u2[i+1][j-2][k]) +
c1*(u2[i+1][j+1][k]-u2[i+1][j-1][k]) )
+ la[i-1][j][k]*met1[i-1][j][k]*met1[i-1][j][k]*(
c2*(u2[i-1][j+2][k]-u2[i-1][j-2][k]) +
c1*(u2[i-1][j+1][k]-u2[i-1][j-1][k])));
}
}
}
extern "C" void host_code (double *h_r1, double *h_u1, double *h_u2, double *h_u3, double *h_mu, double *h_la, double *h_met1, double *h_met2, double *h_met3, double *h_met4, double *h_strx, double *h_stry, double c1, double c2, int N) {
double *r1;
cudaMalloc (&r1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for r1\n");
cudaMemcpy (r1, h_r1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u1;
cudaMalloc (&u1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u1\n");
cudaMemcpy (u1, h_u1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u2;
cudaMalloc (&u2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u2\n");
cudaMemcpy (u2, h_u2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u3;
cudaMalloc (&u3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u3\n");
cudaMemcpy (u3, h_u3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *mu;
cudaMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *la;
cudaMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met1;
cudaMalloc (&met1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met1\n");
cudaMemcpy (met1, h_met1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met2;
cudaMalloc (&met2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met2\n");
cudaMemcpy (met2, h_met2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met3;
cudaMalloc (&met3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met3\n");
cudaMemcpy (met3, h_met3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met4;
cudaMalloc (&met4, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met4\n");
cudaMemcpy (met4, h_met4, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *strx;
cudaMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice);
double *stry;
cudaMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1);
curvi <<<gridconfig, blockconfig>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N);
cudaMemcpy (h_r1, r1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
}
|
14,521 | #include<iostream>
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#define THREADS_PER_BLOCK 1024
using namespace std;
__global__ void subAvg(int* input,int count,int avg)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
if(index<count)
input[index] = pow(input[index]-avg,2);
}
__global__ void max(int* input,int count)
{
int blockStartPoint = blockDim.x*blockIdx.x;
int threadWithinBlock = threadIdx.x;
int scopeSize = 1;
while(scopeSize<=THREADS_PER_BLOCK)
{
int threadLimit = THREADS_PER_BLOCK/scopeSize;
if(threadWithinBlock<threadLimit)
{
int first = blockStartPoint*2 + threadWithinBlock*scopeSize*2;
int second = first + scopeSize;
if(first<count && second<count)
{
if(input[second]>input[first])
input[first] = input[second];
}
}
__syncthreads();
scopeSize*=2;
}
}
__global__ void maxFinalize(int* input,int count)
{
int maximum = input[0];
for(int i=2048;i<count;i+=2048)
{
if(input[i]>maximum)
maximum = input[i];
}
input[0] = maximum;
}
__global__ void min(int* input,int count)
{
int blockStartPoint = blockDim.x*blockIdx.x;
int threadWithinBlock = threadIdx.x;
int scopeSize = 1;
while(scopeSize<=THREADS_PER_BLOCK)
{
int threadLimit = THREADS_PER_BLOCK/scopeSize;
if(threadWithinBlock<threadLimit)
{
int first = blockStartPoint*2 + threadWithinBlock*scopeSize*2;
int second = first + scopeSize;
if(first<count && second<count)
{
if(input[second]<input[first])
input[first] = input[second];
}
}
__syncthreads();
scopeSize*=2;
}
}
__global__ void minFinalize(int* input,int count)
{
int minimum = input[0];
for(int i=2048;i<count;i+=2048)
{
if(input[i]<minimum)
minimum = input[i];
}
input[0] = minimum;
}
__global__ void sum(int* input,int count)
{
int blockStartPoint = blockDim.x*blockIdx.x;
int threadWithinBlock = threadIdx.x;
int scopeSize = 1;
while(scopeSize<=THREADS_PER_BLOCK)
{
int threadLimit = THREADS_PER_BLOCK/scopeSize;
if(threadWithinBlock<threadLimit)
{
int first = blockStartPoint*2 + threadWithinBlock*scopeSize*2;
int second = first + scopeSize;
if(first<count && second<count)
{
input[first] += input[second];
}
}
__syncthreads();
scopeSize*=2;
}
}
__global__ void sum2(int* input,int count)
{
int blockStartPoint = blockDim.x*blockIdx.x;
int threadWithinBlock = threadIdx.x;
int scopeSize = 1;
while(scopeSize<=THREADS_PER_BLOCK)
{
int threadLimit = THREADS_PER_BLOCK/scopeSize;
if(threadWithinBlock<threadLimit)
{
int first = blockStartPoint*2 + threadWithinBlock*scopeSize*2;
int second = first + scopeSize;
if(first<count && second<count)
{
input[first] += input[second];
}
}
__syncthreads();
scopeSize*=2;
}
}
__global__ void sumUp(int* input,int count)
{
for(int i=2048;i<count;i+=2048)
{
input[0] += input[i];
}
}
int main(int argc, char const *argv[])
{
srand(3);
//common part
int count = 0;
cout<<"Enter the number of elements:";
cin>>count;
int size = count * sizeof(int);
int h[count]; //allocating host memory
int *d;
cudaMalloc(&d,size); //allocating device memory
int blockSize = 1024;//initializing the max block size
float numBlocks = floor((count+blockSize-1)/blockSize);
numBlocks = ceil(numBlocks/2);//calculating number of blocks
cout<<"Elements are:"<<endl;
for (int i = 0; i < count; i++)
{
h[i] = i + 1;
cout<<h[i]<<"\t";
}
//calculating minimum
cudaMemcpy(d,h,size,cudaMemcpyHostToDevice);
min<<<numBlocks,blockSize>>> (d,count);
minFinalize<<<1,1>>> (d,count);
int result;
cudaMemcpy(&result,d,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"Minimum Element:"<<result<<endl;
//calculating maximum
cudaMemcpy(d,h,size,cudaMemcpyHostToDevice);
max<<<numBlocks,blockSize>>> (d,count);
maxFinalize<<<1,1>>> (d,count);
cudaMemcpy(&result,d,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"Maximum Element:"<<result<<endl;
//calculating sum
cudaMemcpy(d,h,size,cudaMemcpyHostToDevice);
sum<<<numBlocks,blockSize>>> (d,count);
sumUp<<<1,1>>> (d,count);
cudaMemcpy(&result,d,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"Sum is "<<result<<endl;
cout<<"Correct sum(by formula)*ONLY IF INPUT IS 1...n* is:"<<count*(2+(count-1))/2<<endl;
int sum = result;
int average = sum/count;
cout<<"Average is:"<<average<<endl;
//calculating variance and standard deviation
cudaMemcpy(d,h,size,cudaMemcpyHostToDevice);
int subAvgnumBlocks = (count+blockSize-1)/blockSize;
subAvg<<<subAvgnumBlocks,blockSize>>>(d,count,average);
sum2<<<numBlocks,blockSize>>> (d,count);
sumUp<<<1,1>>>(d,count);
cudaMemcpy(&result,d,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"Variance is "<<result<<endl;
cout<<"Standard Deviation is "<<sqrt(result)<<endl;
getchar();
cudaFree(d);
return 0;
}
|
14,522 | #include <iostream>
#include <cuda_runtime.h>
#include <stdint.h>
#include <math.h>
#include <curand.h>
using namespace std;
#define CUDA_CALL(x) do { \
cudaError_t _m_cudaStat = x; \
if((_m_cudaStat) != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1);}} while(0)
#define CURAND_CALL(x) do { if((x) != CURAND_STATUS_SUCCESS) { \
printf("CURAND rrror at %s:%d\n",__FILE__,__LINE__); \
exit(1);}} while(0)
__global__ void cnat_compress_cuda_kernel(
float* __restrict__ input,
uint8_t* __restrict__ output,
int len) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < len) {
if (input[index] == 0)
output[index] = 0;
else {
int rand_bits = output[index];
rand_bits <<= 15;
int exp;
int prob = reinterpret_cast<int &>(input[index]) & 0b00000000011111111000000000000000;
frexpf(input[index], &exp);
if (rand_bits >= prob) exp -= 1;
exp += 127;
uint8_t encode;
if (exp<=17) encode = 0;
else if (exp<=143) encode = uint8_t(exp-17);
else encode = 127;
if (input[index] < 0) encode += 128;
output[index] = encode;
}
}
}
/*
generate random bits using the output buffer (8 bits for each element)
and compare with the 8 most significant bits of mantissa
*/
void cnat_compress(float* input, uint8_t* output, int count, cudaStream_t stream, curandGenerator_t* gen) {
const int threads = 1024;
auto blocks = count/threads;
if (count%threads || !blocks) blocks++;
CURAND_CALL(curandSetStream(*gen, stream));
CURAND_CALL(curandGenerate(*gen, (uint32_t*)output, count/4)); // (0, 1]
cnat_compress_cuda_kernel<<<blocks, threads, 0, stream>>>(
input,
output,
count);
}
__global__ void cnat_decompress_cuda_kernel(
uint8_t* __restrict__ input,
float* __restrict__ output,
int len) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < len) {
uint32_t decode;
if (input[index]<=127) decode = input[index]+17;
else decode = input[index]+145;
if (!input[index] % 128) decode -= 17;
uint32_t sign_and_exp = decode << 23;
output[index] = reinterpret_cast<float &>(sign_and_exp);
}
}
void cnat_decompress(uint8_t* input, float* output, int count, cudaStream_t stream) {
const int threads = 1024;
auto blocks = count/threads;
if (count%threads || !blocks) blocks++;
cnat_decompress_cuda_kernel<<<blocks, threads, 0, stream>>>(
input,
output,
count);
}
int main(){
curandGenerator_t gen;
cudaStream_t stream;
CUDA_CALL(cudaStreamCreate(&stream));
CURAND_CALL(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen, time(NULL)));
float host[] = {3., 3., 3., 3., 3., 3., 3., 3.};
float* ptr;
uint8_t* output;
uint8_t host_output[8];
for(int i=0; i<8; ++i) cout<<host[i]<<", ";
cout<<endl;
CUDA_CALL(cudaMalloc((void **)&ptr, 8*sizeof(float)));
CUDA_CALL(cudaMalloc((void **)&output, 8));
cudaMemcpy(ptr, host, 8*sizeof(float), cudaMemcpyHostToDevice);
cnat_compress(ptr, output, 8, stream, &gen);
cnat_decompress(output, ptr, 8, stream);
cudaStreamSynchronize(stream);
cudaMemcpy(host_output, output, 8, cudaMemcpyDeviceToHost);
cudaMemcpy(host, ptr, 32, cudaMemcpyDeviceToHost);
cudaStreamSynchronize(stream);
for(int i=0; i<8; ++i) cout<<int(host_output[i])<<", ";
cout<<endl;
for(int i=0; i<8; ++i) cout<<(host[i])<<", ";
cout<<endl;
CURAND_CALL(curandDestroyGenerator(gen));
return 0;
}
|
14,523 | #include "includes.h"
__global__ void get_entropy(int nbins, int nsamples, int nx, float * bin_scores, int pitch_bin_scores, float * entropies)
{
int
col_x = blockDim.x * blockIdx.x + threadIdx.x;
if(col_x >= nx)
return;
float
* in_col = bin_scores + col_x * pitch_bin_scores,
entropy = 0.f, prob, logp;
for(int i = 0; i < nbins; i++) {
prob = 0.f;
for(int j = 0; j < nsamples; j++)
prob += in_col[j * nbins + i];
prob /= (double) nsamples;
if(prob <= 0.f)
logp = 0.f;
else
logp = __log2f(prob);
entropy += prob * logp;
}
entropies[col_x] = -entropy;
} |
14,524 | #define BLOCK_DIM 4
#define TILE_DIM BLOCK_DIM
#include <stdio.h>
#include <stdlib.h>
#include<time.h>
void PrintMatrixToText(int* mtxArray, int height, int width, const char* fileName);
// Matrix Mult Kernel
__global__ void matrixMult( int* A, int* B, int* C, const int n, const int m, const int k, const int numPhase)
{
__shared__ int Ads[TILE_DIM][TILE_DIM];
__shared__ int Bds[TILE_DIM][TILE_DIM];
/*Todo: Write C = A*B */
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int Row = by * blockDim.y + ty;
int Col = bx * blockDim.x + tx;
int Pvalue = 0;
// Loop over the A and B tiles required to compute the C element
for ( int ph=0 ; ph<numPhase ; ++ph )
{
// Collaborative loading of A and B tiles into shared memory
if( Row < n && ph*TILE_DIM + tx < m ) Ads[ty][tx] = A[ Row*m + ph*TILE_DIM + tx ];
else Ads[ty][tx] = 0;
if( ph*TILE_DIM + ty < m && Col < k ) Bds[ty][tx] = B[ (ph*TILE_DIM + ty)*k + Col ];
else Bds[ty][tx] = 0;
__syncthreads();
if( Col < k && Row < n )
{
for ( int i=0 ; i<TILE_DIM ; ++i )
{
Pvalue += (Ads[ ty ][ i ] * Bds[ i ][ tx ]);//인덱스 입력
}
__syncthreads();
}
}
if( Col < k && Row < n ) C[Row*k + Col] = Pvalue;
}
int main(){
int *h_a, *h_b, *h_c; // Host Variables
int *d_a, *d_b, *d_c; // Device Variables
int n,m,k;
// Receive Parameters
/*Todo:Write Receive Parameters through console commands*/
printf("Input the Value of n : ");
scanf("%d",&n);
printf("Input the Value of m : ");
scanf("%d",&m);
printf("Input the Value of k : ");
scanf("%d",&k);
// Alloc & Initialize Host Input Matrices
h_a = (int*)malloc(n*m*sizeof(int));
h_b = (int*)malloc(m*k*sizeof(int));
h_c = (int*)malloc(n*k*sizeof(int));
srand(time(NULL));
/*Todo: Initialize A and B with Random Varialbles Ranged (-10, 10)*/
for(int i = 0 ; i < n*m ; i++)
{
h_a[i] = rand() % 21 - 10;
}
for(int i = 0 ; i < m*k ; i++)
{
h_b[i] = rand() % 21 - 10;
}
// Allocate Device Memory
cudaMalloc((void **) &d_a, n*m*sizeof(int));
cudaMalloc((void **) &d_b, m*k*sizeof(int));
cudaMalloc((void **) &d_c, n*k*sizeof(int));
// Copy Input Matrices to Device Memory
cudaMemcpy(d_a, h_a, n*m*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, m*k*sizeof(int), cudaMemcpyHostToDevice);
// Set Grid/Block Dimensions
int dimxG = (int)ceil((float)k/(float)BLOCK_DIM);
int dimyG = (int)ceil((float)n/(float)BLOCK_DIM);
dim3 dimGrid( dimxG , dimyG);
dim3 dimBlock(BLOCK_DIM,BLOCK_DIM);
//페이징
int numPhase = (int)ceil((float)m/(float)BLOCK_DIM);
// Launch Kernel
matrixMult<<<dimGrid ,dimBlock >>>(d_a, d_b, d_c, n, m, k, numPhase);
// Copy Result to Host
cudaMemcpy( h_c, d_c, n*k*sizeof(int) , cudaMemcpyDeviceToHost );
// Free Device Memory
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
// Print Results
PrintMatrixToText(h_a,n,m,"matrixA.txt");
PrintMatrixToText(h_b,m,k,"matrixB.txt");
PrintMatrixToText(h_c,n,k,"matrixC.txt");
// Free Host Memory
free(h_a); free(h_b); free(h_c);
return 0;
}
// Utilities
void PrintMatrixToText(int* mtxArray, int height, int width, const char* fileName){
FILE *f = fopen(fileName, "w");
for ( int i=0 ; i<height ; i++ )
{
for ( int j=0 ; j<width ; j++ )
{
fprintf(f, "%d\t" , mtxArray[i*width+j] );
}
fprintf(f,"\n" );
}
fclose(f);
}
|
14,525 | #include "includes.h"
__global__ void GPU_simple_power_and_interbin_kernel(float2 *d_input_complex, float *d_output_power, int nTimesamples, float norm){
int pos_x = blockIdx.x*blockDim.x + threadIdx.x;
int pos_y = blockIdx.y*nTimesamples;
float2 A;
A.x = 0; A.y = 0;
if( pos_x < nTimesamples ) {
A = d_input_complex[pos_y + pos_x];
d_output_power[pos_y + pos_x] = (A.x*A.x + A.y*A.y)*norm;
}
} |
14,526 | #include <thrust/host_vector.h>
#include <iostream>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/binary_search.h>
#include <thrust/adjacent_difference.h>
int main (void){
thrust::host_vector<int> v0(4);
v0[0] = 0;
v0[1] = 2;
v0[2] = 3;
v0[3] = 5;
for (int i = 0; i < 4; ++i){std::cout << v0[i] << " ";}
std::cout << std::endl;
thrust::host_vector<int> v1(6);
v1[0] = 0;
v1[1] = 0;
v1[2] = 0;
v1[3] = 0;
v1[4] = 0;
v1[5] = 0;
thrust::counting_iterator<int> i2(0);
thrust::upper_bound(v0.begin(),v0.end(),i2,i2 + 6,v1.begin());
thrust::adjacent_difference(v1.begin(),v1.end(),v1.begin());
for (int i = 0; i < 6; ++i){std::cout << v1[i] << " ";}
std::cout << std::endl;
}
|
14,527 | #include <stdio.h>
#include <cuda.h>
#include <time.h>
#define GAMMA 0.5
double r2();
__global__ void JORkernel(double *cudaM,double *cudaX, int dim)
{
//int idx = blockIdx.x * blockDim.x + threadIdx.x; // Finds the thread_id
int blocksize=16;
int gridsize=gridDim.x; //this will always be 1
// int gridsize2=gridDim.x;
// printf("gridsize: %d %d\n", gridsize,gridsize2);
// __device__ bool myGlobalFlag=true;
int bx=blockIdx.x;
int by=blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
int blocktotal=blocksize*blocksize;
int temp1=by*gridsize+bx;
int temp2=ty*blocksize+tx;
int j;
//bool localFlag; // idea is to "and" all the flag and see any false
// printf("bx: %d \n",bx);
// printf("by: %d \n",by);
// printf("tx: %d \n",tx);
// printf("ty: %d \n",ty);
//double temp; //this temp is going to check with the old x and see if closer enough if not it will be copied into cudaX;
int ind=temp1*blocktotal+temp2;
if(ind<dim)
{
//printf("%d \n",ind);
//put JOR formula here
//b is 0 all the time so we don't need to consider about it so far
int tempi=ind%dim;
double sum=0;
for(j=0;j<dim;j++)
{
if(j!=tempi)
{
sum=sum+cudaM[tempi*dim+j]*cudaX[j];
}
}
cudaX[ind]=(1-GAMMA)*cudaX[ind]-GAMMA/cudaM[tempi*dim+tempi]*sum; //temp is the updated x
//now comapre
/* if(temp-cudaX[ind]>=0.01||cudaX[ind]-temp>=0.01)
{
localFlag=false;
}
else
{
localFlag=true;
}
myGlobalFlag=myGlobalFlag && localFlag;*/
}
__syncthreads();
//wait for all the threads to finish, this is not going to work because it only snyc threads inside of one block.
}
int main(int argc, char *argv[])
{
if( argc == 2 ) {
printf("The matrix dimension is %s\n", argv[1]);
}
else if( argc > 2 ) {
printf("Too many arguments supplied.\n");
}
else {
printf("One argument expected.\n");
}
int i;
int j;
int dim=atoi(argv[1]);
double *matrix;
double *x;
double *previousx; // to install the previous x for compare purpose
int loopCount=0; //use to see how many iteration we need to get the correct result;
double tolerance=0.001;
/* double *b;*/
matrix=(double*) malloc(dim*dim*sizeof(double));
/* b=(double*) malloc(dim*sizeof(double));*/
x=(double*) malloc(dim*sizeof(double));
previousx=(double*) malloc(dim*sizeof(double));
//the diagonal dominated matrix will be automatically be not singular!!
for(i=0;i<dim;i++)
{
double rowSum=0.0;
for(j=0;j<dim;j++)
{
matrix[i*dim+j]=r2();
rowSum=matrix[i*dim+j]+rowSum;
}
matrix[i*dim+i]=rowSum;
}
//x will all be initilized to be 1
for(i=0;i<dim;i++)
{
x[i]=1.0;
previousx[i]=1.0;
//b[i]=0.0; //make b to be 0 as initial then easy to check that the value x should goes to 0;
}
//print the matrix out to check
// for(i=0;i<dim;i++)
// {
// for(j=0;j<dim;j++)
// {
// printf("%lf ",matrix[i*dim+j]);
// }
// printf("\n");
// }
double *cudaM; //prepare for cuda global memory
double *cudaX;
/* double *cudaB;*/
int xsize=dim*sizeof(double);
int msize=dim*dim*sizeof(double);
cudaMalloc((void**)&cudaM,msize);
cudaMalloc((void**)&cudaX,xsize);
/* cudaMalloc((void**)&cudaB,xsize);*/ //b have the same size with x
//start timing here
clock_t begin,end;
begin=clock();
cudaMemcpy(cudaM,matrix,msize,cudaMemcpyHostToDevice);
cudaMemcpy(cudaX,x,xsize,cudaMemcpyHostToDevice);
/* cudaMemcpy(cudaB,b,xsize,cudaMemcpyHostToDevice);*/
// cudaMalloc( (void**)&ad, csize );
// cudaMalloc( (void**)&bd, isize );
// cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice );
// cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );
int blocksize=16;
int gridsize= dim/256+1; //make the gridsize to be an ro like (1,2)
dim3 Grid( 1, gridsize); // Number of threads per block
dim3 Block( blocksize,blocksize); // Number of thread blocks
bool stopFlag=false;
while (!stopFlag)
{
loopCount=loopCount+1;
JORkernel<<<Grid, Block>>>(cudaM,cudaX,dim);
cudaMemcpy( x, cudaX, xsize, cudaMemcpyDeviceToHost);
//comapre new x with previous x
for(i=0;i<dim;i++)
{
if((x[i]-previousx[i]>=tolerance)||(previousx[i]-x[i])>=tolerance) //check if the current value and previous value is close enough
//if not we need to keep going
{
for(j=0;j<dim;j++)
{
previousx[j]=x[j];
}
stopFlag=false;
//don't need to copy to cuda again since it will be the same cudaX.
//JORKernel<<<Grid,Block>>>(cudaM,cudaX,dim);
break;
}
if(i==dim-1) //this means there is no break in the middle at all
{
stopFlag=true;
}
}
}
double time_spent;
end=clock();
time_spent=(double)(end-begin)/CLOCKS_PER_SEC;
printf("matrix size: %d-iteration times: %d-error tolerance set to:%lf \n",dim,loopCount,tolerance);
printf("time spent:%lf seconds \n",time_spent);
for(i=0;i<dim;i++)
{
if(i%10==0)
{
printf("\n");
}
printf("[%d]:%lf ",i,x[i]);
}
cudaFree(cudaX);
cudaFree(cudaM);
// printf("The modified string: %s\n", a);
return 0;
}
double r2()
{
return (double)rand() / (double)RAND_MAX ;
}
|
14,528 | // Copyright (c) 2020 Saurabh Yadav
//
// This software is released under the MIT License.
// https://opensource.org/licenses/MIT
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#define TOTAL_ROWS 1000U
#define TOTAL_COLS 2000U
__global__
void init_matrix(float *matrix, int width, int height, float val) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = idx; i < width * height; i += gridDim.x * blockDim.x) {
matrix[i]=val;
}
}
__global__
void add_matrices(float * mat_A_arr, float * mat_B_arr, float * mat_C_arr,
int num_cols, int num_rows) {
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
if(row < num_rows && col < num_cols) {
mat_C_arr[row*num_cols + col] = mat_A_arr[row*num_cols + col] +
mat_B_arr[row*num_cols + col];
}
}
int main() {
cudaError_t err = cudaSuccess;
float *mat_A, *mat_B, *mat_C;
size_t memsize = TOTAL_COLS * TOTAL_ROWS * sizeof(float);
/* Allocate memories for the matrices*/
err = cudaMallocManaged(&mat_A, memsize);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate memory for matrix A (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMallocManaged(&mat_B, memsize);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate memory for matrix B (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMallocManaged(&mat_C, memsize);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate memory for matrix C (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Initialize matrices A and B */
int blocksize_for_init = 256;
int blocks_for_init = (TOTAL_ROWS*TOTAL_COLS + blocksize_for_init - 1)
/ (blocksize_for_init);
init_matrix<<<blocks_for_init, blocksize_for_init>>>(mat_A, TOTAL_COLS, TOTAL_ROWS, 1);
init_matrix<<<blocks_for_init, blocksize_for_init>>>(mat_B, TOTAL_COLS, TOTAL_ROWS, 2);
err = cudaGetLastError();
if( err != cudaSuccess) {
fprintf(stderr, "Failed to initialize matrix (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Do the matrix addition */
size_t blocksizeX = 16;
size_t blocksizeY = 16;
dim3 DimGrid( (TOTAL_COLS-1)/blocksizeX + 1, (TOTAL_ROWS-1)/blocksizeY + 1);
dim3 DimBlock( blocksizeX, blocksizeY);
add_matrices<<<DimGrid, DimBlock>>>(mat_A, mat_B, mat_C, TOTAL_COLS, TOTAL_ROWS);
err = cudaGetLastError();
if( err != cudaSuccess) {
fprintf(stderr, "Failed to perform matrix addition (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < (TOTAL_ROWS*TOTAL_COLS); i++)
maxError = fmax(maxError, fabs(mat_C[i]-3.0f));
printf("Max error: %f\n", maxError);
return EXIT_SUCCESS;
} |
14,529 | #include "includes.h"
__global__ void DropoutMaskKernel( float *dropoutMaskPtr, float dropout, int inputSize )
{
int i = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (i < inputSize)
{
dropoutMaskPtr[i] = dropout > dropoutMaskPtr[i];
/*if (dropoutMaskPtr[i] > dropout)
dropoutMaskPtr[i] = 0.0f;
else
dropoutMaskPtr[i] = 1.0f;*/
}
} |
14,530 | //#include <cudaDefs.h>
//#include <time.h>
//#include <math.h>
//#include <random>
//
//using namespace std;
//
//
//cudaError_t error = cudaSuccess;
//cudaDeviceProp deviceProp = cudaDeviceProp();
//
//
//__constant__ __device__ int hodnota;
//
//typedef struct { int x; int y;} Point;
//__constant__ __device__ Point dPoint;
//__constant__ __device__ int dPole[5];
//
//
//
//void Cv1()
//{
// initializeCUDA(deviceProp);
//
//
// int hA = 100;
// int hB = 0;
// cudaMemcpyToSymbol(static_cast<const void*>(&hodnota), static_cast<const void*>(&hA), sizeof(hodnota));
// cudaMemcpyFromSymbol(static_cast<void*>(&hB), static_cast<const void*>(&hodnota), sizeof(hodnota));
//
// cout << hB << endl;
//
//
//
// Point hL;
// hL.x = 1;
// hL.y = 2;
//
// Point hL2;
// cudaMemcpyToSymbol(static_cast<const void*>(&dPoint), static_cast<const void*>(&hL), sizeof(hL));
// cudaMemcpyFromSymbol(static_cast<void*>(&hL2), static_cast<const void*>(&dPoint), sizeof(hL));
//
// cout << hL2.x << " " << hL2.y << endl;
//
// int Pole[5] = { 1,2,3,4,5 };
// int hPole2[5];
// cudaMemcpyToSymbol(dPole, Pole, sizeof(Pole));
// cudaMemcpyFromSymbol(hPole2, dPole, sizeof(Pole));
//
// cout << hPole2[0] << " " << hPole2[1];
//}
//
//
//int main(int argc, char *argv[])
//{
// initializeCUDA(deviceProp);
//
// //Cv1();
// Cv2();
// system("pause");
//
//}
|
14,531 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <numeric>
#include <math.h>
using namespace std;
__global__ void min(int* input, int n)
{
const int tid = threadIdx.x; //Index of the thread within the block
int step_size = 1;
int number_of_threads = blockDim.x; //Number of threads in thread block
while (number_of_threads > 0)
{
if (tid < number_of_threads)
{
const int fst = tid * step_size * 2;
const int snd = fst + step_size;
if(snd < n)
{
if(input[snd] < input[fst])
input[fst] = input[snd];
}
}
step_size <<= 1; //1 -> 2, 2 -> 4, 3->6; shift operator
if(number_of_threads == 1)
break;
number_of_threads = (int)ceil((float)number_of_threads/2.0); // divide number of threads by 2
__syncthreads();
}
}
int main()
{
int count;
int result;
int* d;
cout<<"\nEnter the number of elements : ";
cin>>count;
const int size = count * sizeof(int);
//when sizeof() is used with data types it simply returns the
// the amount of memory allocated to that data types
int *h;
h = new int[count];
cout<<"\nEnter the elements : \n";
for(int i=0;i<count;i++)
cin>>h[i];
cudaMalloc(&d, size);
cudaMemcpy(d, h, size, cudaMemcpyHostToDevice);
min <<<1, ceil((float)count/2.0) >>>(d , count);
cudaMemcpy(&result, d, sizeof(int), cudaMemcpyDeviceToHost);
cout << "Min is " << result << endl;
getchar();
cudaFree(d);
delete[] h;
return 0;
}
/*
PS D:\MyFiles\Projects\LP1-LabAsg\1-HPC> nvcc ParRedMin.cu -o ParRedMin
ParRedMin.cu
Creating library ParRedMin.lib and object ParRedMin.exp
PS D:\MyFiles\Projects\LP1-LabAsg\1-HPC> nvprof ./ParRedMin
Enter the number of elements : 4
Enter the elements :
1
2
3
67
==1876== NVPROF is profiling process 1876, command: ./ParRedMin
Min is 1
==1876== Profiling application: ./ParRedMin
==1876== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 59.68% 2.3680us 1 2.3680us 2.3680us 2.3680us min(int*, int)
25.81% 1.0240us 1 1.0240us 1.0240us 1.0240us [CUDA memcpy HtoD]
14.52% 576ns 1 576ns 576ns 576ns [CUDA memcpy DtoH]
API calls: 79.80% 172.77ms 1 172.77ms 172.77ms 172.77ms cudaMalloc
19.78% 42.826ms 1 42.826ms 42.826ms 42.826ms cuDevicePrimaryCtxRelease
0.13% 271.80us 97 2.8020us 100ns 172.50us cuDeviceGetAttribute
0.11% 245.30us 1 245.30us 245.30us 245.30us cudaLaunchKernel
0.07% 144.50us 1 144.50us 144.50us 144.50us cudaFree
0.05% 106.90us 2 53.450us 25.500us 81.400us cudaMemcpy
0.04% 89.600us 1 89.600us 89.600us 89.600us cuModuleUnload
0.01% 21.400us 1 21.400us 21.400us 21.400us cuDeviceTotalMem
0.00% 9.7000us 1 9.7000us 9.7000us 9.7000us cuDeviceGetPCIBusId
0.00% 1.9000us 3 633ns 200ns 900ns cuDeviceGetCount
0.00% 1.7000us 2 850ns 300ns 1.4000us cuDeviceGet
0.00% 900ns 1 900ns 900ns 900ns cuDeviceGetName
0.00% 300ns 1 300ns 300ns 300ns cuDeviceGetUuid
0.00% 300ns 1 300ns 300ns 300ns cuDeviceGetLuid
*/ |
14,532 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__device__ int myAtomicAdd(int *address, int incr)
{
int expected = *address;
int oldValue = atomicCAS(address, expected, expected + incr);
while (oldValue != expected)
{
expected = oldValue;
oldValue = atomicCAS(address, expected, expected + incr);
}
return oldValue;
}
__global__ void new_atomic_add_test(int *ptr)
{
myAtomicAdd(ptr,1);
}
//int main()
//{
// int value = 23;
// int SIZE = sizeof(int);
// int ref = -1;
//
// int *d_val;
// cudaMalloc((void**)&d_val, SIZE);
// cudaMemcpy(d_val, &value, SIZE, cudaMemcpyHostToDevice);
// new_atomic_add_test << <1, 32 >> > (d_val);
// cudaDeviceSynchronize();
// cudaMemcpy(&ref, d_val, SIZE, cudaMemcpyDeviceToHost);
//
// printf("Updated value : %d \n", ref);
//
// cudaDeviceReset();
// return 0;
//} |
14,533 | #include "includes.h"
__device__ float logarithmic_mapping(float k, float q, float val_pixel){
return (log10(1 + q * val_pixel))/(log10(1 + k * maxLum));
}
__global__ void tonemap_logarithmic(float* imageIn, float* imageOut, int width, int height, int channels, int depth, float q, float k){
//printf("maxLum : %f\n", maxLum);
int Row = blockDim.y * blockIdx.y + threadIdx.y;
int Col = blockDim.x * blockIdx.x + threadIdx.x;
if(Row < height && Col < width) {
imageOut[(Row*width+Col)*3+BLUE] = logarithmic_mapping(k, q, imageIn[(Row*width+Col)*3+BLUE]);
imageOut[(Row*width+Col)*3+GREEN] = logarithmic_mapping(k, q, imageIn[(Row*width+Col)*3+GREEN]);
imageOut[(Row*width+Col)*3+RED] = logarithmic_mapping(k, q, imageIn[(Row*width+Col)*3+RED]);
}
} |
14,534 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
/*
Topic :: Blocks
GPU computing is about massive parallelism add<<<N,1>>> - execute N times in parallel
Terminology: each parallel invocation of add()is referred to as a block
- The set of blocks is referred to as a grid.
- Each invocation can refer to its block index using blockIdx.x
By using blockIdx.xto index into the array, each block handles a different element of the array
*/
#define N 512
__global__ void add1(int *a,int *b,int *c) {
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
void cudamain1()
{
int *a, *b, *c; // host copies of a,b,c
int *d_a, *d_b,*d_c; // device copies of a,b,c
int size = N * sizeof(int);
// Allocate space for device copies of a,b,c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Allocspace for host copies of a, b, c and setup input values
a = (int *)malloc(size); //random_ints(a, N);
b = (int *)malloc(size); //random_ints(b, N);
c = (int *)malloc(size);
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU
add1<<<N,1>>>(d_a,d_b,d_c);
// Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
//printf("c : %d\n",c);
// Cleanup
free(a); free(b); free(c);
cudaFree(d_a);cudaFree(d_b);cudaFree(d_c);
return;
}
|
14,535 | #include<chrono>
#include<stdio.h>
#include<iostream>
#include<math.h>
#include<string>
#include<sstream>
#include<fstream>
#include<vector>
#include<malloc.h>
#define LENGTH_DICTIONARY 57664
#define LENGTH_DOCS 10000
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
using namespace std;
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void cosine_similarity(int *sparsemat, double *cosinematrix, int dicsize,int docsize)
{
double mul = 0.0, d_a = 0.0, d_b = 0.0 ;
int doc1index = blockIdx.x;
int doc2index = threadIdx.x;
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(doc1index == doc2index)
cosinematrix[index] = -1;
else {
int *A = &sparsemat[doc1index*dicsize];
int *B = &sparsemat[doc2index*dicsize] ;
for(unsigned int i = 0; i < dicsize; ++i)
{
mul += A[i] * B[i] ;
d_a += A[i] * A[i] ;
d_b += B[i] * B[i] ;
}
cosinematrix[index] = mul / (sqrt(d_a) * sqrt(d_b)) ;
}
}
void getTokens(string line,vector<string>&tokens)
{
istringstream tokenStream(line);
string token;
while (getline(tokenStream, token,' '))
{
tokens.push_back(token);
}
}
void printVector(vector<string> v)
{
cout<<"size: "<<v.size()<<endl;
for(int i=0;i<v.size();i++)
{
cout<<v[i]<<" ";
}
cout<<endl;
}
void feedTheMatrix(vector<string> tokens, int * mat)
{
for(int i=0;i<LENGTH_DICTIONARY;i++)
{
mat[i] = 0;
//cout<<i<<" "<<LENGTH_DICTIONARY<<endl;
}
for(int i=1;i<tokens.size();i++)
{
mat[stoi(tokens[i])] +=1;
}
}
void printTheMatrix(int *mat,int row,int col)
{
for(int i=0;i<row;i++) {
for(int j=0;j<col;j++)
cout<<mat[(i*LENGTH_DICTIONARY)+j]<<" ";
cout<<endl;
}
}
void printTheCosineMatrix(double *mat,int row,int col)
{
for(int i=0;i<row;i++) {
for(int j=0;j<col;j++)
cout<<mat[(i*LENGTH_DOCS)+j]<<" ";
cout<<endl;
}
}
int findIndexofHighestSimilarity(double *cosinematrix)
{
int index = 0;
double maxvalue = -1;
for(int i=0;i<LENGTH_DOCS;i++)
{
if(cosinematrix[i] > maxvalue)
{
maxvalue = cosinematrix[i];
index = i;
}
}
return index;
}
int main()
{
int *sparsemat = NULL;
cudaMallocHost((void**)&sparsemat,LENGTH_DOCS*LENGTH_DICTIONARY*sizeof(int));
//get the contents of file
ifstream inFile;
inFile.open("./sample10000.txt");
if (!inFile) {
cerr << "Unable to open file sample100.txt";
// exit(1); // call system to stop
return -1;
}
string line;
int linenum = 0;
while (getline(inFile,line)) {
//cout<<line<<endl;
vector<string> tokens;
getTokens(line,tokens);
//cout<<linenum<<" "<<LENGTH_DOCS<<endl;
//printVector(tokens);
feedTheMatrix(tokens,&(sparsemat[linenum*LENGTH_DICTIONARY]));
linenum++;
}
inFile.close();
//printTheMatrix(sparsemat,LENGTH_DOCS,LENGTH_DICTIONARY);
//create a docs*docs matrix
double *cosinematrix=NULL;
cudaMallocHost((void**)&cosinematrix,LENGTH_DOCS*LENGTH_DOCS*sizeof(double));
//creating 8 streams for 2 GPUS with 4 streams each
int *d_sparsemat1,*d_sparsemat2;
double *d_cosinematrix1,*d_cosinematrix2,*d_cosinematrix3,*d_cosinematrix4,*d_cosinematrix5,*d_cosinematrix6,*d_cosinematrix7,*d_cosinematrix8;
//Both the GPU devices will have all the data in them. Only the computation is done parallely.
int chunkSize = LENGTH_DOCS/8;
gpuErrchk( cudaMalloc((void **)&d_sparsemat1,LENGTH_DOCS*LENGTH_DICTIONARY*sizeof(int)));
gpuErrchk( cudaMalloc((void **)&d_sparsemat2,LENGTH_DOCS*LENGTH_DICTIONARY*sizeof(int)));
gpuErrchk(cudaMalloc((void **)&d_cosinematrix1,chunkSize*LENGTH_DOCS*sizeof(double)));
gpuErrchk(cudaMalloc((void **)&d_cosinematrix2,chunkSize*LENGTH_DOCS*sizeof(double)));
gpuErrchk(cudaMalloc((void **)&d_cosinematrix3,chunkSize*LENGTH_DOCS*sizeof(double)));
gpuErrchk(cudaMalloc((void **)&d_cosinematrix4,chunkSize*LENGTH_DOCS*sizeof(double)));
gpuErrchk(cudaMalloc((void **)&d_cosinematrix5,chunkSize*LENGTH_DOCS*sizeof(double)));
gpuErrchk(cudaMalloc((void **)&d_cosinematrix6,chunkSize*LENGTH_DOCS*sizeof(double)));
gpuErrchk(cudaMalloc((void **)&d_cosinematrix7,chunkSize*LENGTH_DOCS*sizeof(double)));
gpuErrchk(cudaMalloc((void **)&d_cosinematrix8,chunkSize*LENGTH_DOCS*sizeof(double)));
cudaStream_t stream1,stream2,stream3,stream4,stream5,stream6,stream7,stream8;
cudaSetDevice(0);
gpuErrchk(cudaStreamCreate(&stream1));
gpuErrchk(cudaStreamCreate(&stream2));
gpuErrchk(cudaStreamCreate(&stream3));
gpuErrchk(cudaStreamCreate(&stream4));
gpuErrchk(cudaMemcpy(d_sparsemat1,sparsemat,LENGTH_DOCS*LENGTH_DICTIONARY*sizeof(int),cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpyAsync(d_cosinematrix1,&cosinematrix[0*chunkSize*LENGTH_DOCS],chunkSize*LENGTH_DOCS*sizeof(double),cudaMemcpyHostToDevice,stream1));
gpuErrchk(cudaMemcpyAsync(d_cosinematrix2,&cosinematrix[1*chunkSize*LENGTH_DOCS],chunkSize*LENGTH_DOCS*sizeof(double),cudaMemcpyHostToDevice,stream2));
gpuErrchk(cudaMemcpyAsync(d_cosinematrix3,&cosinematrix[2*chunkSize*LENGTH_DOCS],chunkSize*LENGTH_DOCS*sizeof(double),cudaMemcpyHostToDevice,stream3));
gpuErrchk(cudaMemcpyAsync(d_cosinematrix4,&cosinematrix[3*chunkSize*LENGTH_DOCS],chunkSize*LENGTH_DOCS*sizeof(double),cudaMemcpyHostToDevice,stream4));
gpuErrchk(cudaStreamSynchronize(stream1));
gpuErrchk(cudaStreamSynchronize(stream2));
gpuErrchk(cudaStreamSynchronize(stream3));
gpuErrchk(cudaStreamSynchronize(stream4));
cudaSetDevice(1);
gpuErrchk(cudaStreamCreate(&stream5));
gpuErrchk(cudaStreamCreate(&stream6));
gpuErrchk(cudaStreamCreate(&stream7));
gpuErrchk(cudaStreamCreate(&stream8));
gpuErrchk(cudaMemcpy(d_sparsemat2,sparsemat,LENGTH_DOCS*LENGTH_DICTIONARY*sizeof(int),cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpyAsync(d_cosinematrix5,&cosinematrix[4*chunkSize*LENGTH_DOCS],chunkSize*LENGTH_DOCS*sizeof(double),cudaMemcpyHostToDevice,stream5));
gpuErrchk(cudaMemcpyAsync(d_cosinematrix6,&cosinematrix[5*chunkSize*LENGTH_DOCS],chunkSize*LENGTH_DOCS*sizeof(double),cudaMemcpyHostToDevice,stream6));
gpuErrchk(cudaMemcpyAsync(d_cosinematrix7,&cosinematrix[6*chunkSize*LENGTH_DOCS],chunkSize*LENGTH_DOCS*sizeof(double),cudaMemcpyHostToDevice,stream7));
gpuErrchk(cudaMemcpyAsync(d_cosinematrix8,&cosinematrix[7*chunkSize*LENGTH_DOCS],chunkSize*LENGTH_DOCS*sizeof(double),cudaMemcpyHostToDevice,stream8));
gpuErrchk(cudaStreamSynchronize(stream5));
gpuErrchk(cudaStreamSynchronize(stream6));
gpuErrchk(cudaStreamSynchronize(stream7));
gpuErrchk(cudaStreamSynchronize(stream8));
chrono::time_point<chrono::system_clock> start = chrono::system_clock::now();
cudaSetDevice(0);
cosine_similarity<<<chunkSize,LENGTH_DOCS,0,stream1>>>(d_sparsemat1,d_cosinematrix1,LENGTH_DICTIONARY,LENGTH_DOCS);
cosine_similarity<<<chunkSize,LENGTH_DOCS,0,stream2>>>(d_sparsemat1,d_cosinematrix2,LENGTH_DICTIONARY,LENGTH_DOCS);
cosine_similarity<<<chunkSize,LENGTH_DOCS,0,stream3>>>(d_sparsemat1,d_cosinematrix3,LENGTH_DICTIONARY,LENGTH_DOCS);
cosine_similarity<<<chunkSize,LENGTH_DOCS,0,stream4>>>(d_sparsemat1,d_cosinematrix4,LENGTH_DICTIONARY,LENGTH_DOCS);
gpuErrchk(cudaMemcpyAsync(&cosinematrix[0*chunkSize*LENGTH_DOCS],d_cosinematrix1,chunkSize*LENGTH_DOCS*sizeof(double),cudaMemcpyHostToDevice,stream1));
gpuErrchk(cudaMemcpyAsync(&cosinematrix[1*chunkSize*LENGTH_DOCS],d_cosinematrix2,chunkSize*LENGTH_DOCS*sizeof(double),cudaMemcpyHostToDevice,stream2));
gpuErrchk(cudaMemcpyAsync(&cosinematrix[2*chunkSize*LENGTH_DOCS],d_cosinematrix3,chunkSize*LENGTH_DOCS*sizeof(double),cudaMemcpyHostToDevice,stream3));
gpuErrchk(cudaMemcpyAsync(&cosinematrix[3*chunkSize*LENGTH_DOCS],d_cosinematrix4,chunkSize*LENGTH_DOCS*sizeof(double),cudaMemcpyHostToDevice,stream4));
cudaSetDevice(1);
cosine_similarity<<<chunkSize,LENGTH_DOCS,0,stream5>>>(d_sparsemat2,d_cosinematrix5,LENGTH_DICTIONARY,LENGTH_DOCS);
cosine_similarity<<<chunkSize,LENGTH_DOCS,0,stream6>>>(d_sparsemat2,d_cosinematrix6,LENGTH_DICTIONARY,LENGTH_DOCS);
cosine_similarity<<<chunkSize,LENGTH_DOCS,0,stream7>>>(d_sparsemat2,d_cosinematrix7,LENGTH_DICTIONARY,LENGTH_DOCS);
cosine_similarity<<<chunkSize,LENGTH_DOCS,0,stream8>>>(d_sparsemat2,d_cosinematrix8,LENGTH_DICTIONARY,LENGTH_DOCS);
gpuErrchk(cudaMemcpyAsync(&cosinematrix[4*chunkSize*LENGTH_DOCS],d_cosinematrix5,chunkSize*LENGTH_DOCS*sizeof(double),cudaMemcpyHostToDevice,stream5));
gpuErrchk(cudaMemcpyAsync(&cosinematrix[5*chunkSize*LENGTH_DOCS],d_cosinematrix6,chunkSize*LENGTH_DOCS*sizeof(double),cudaMemcpyHostToDevice,stream6));
gpuErrchk(cudaMemcpyAsync(&cosinematrix[6*chunkSize*LENGTH_DOCS],d_cosinematrix7,chunkSize*LENGTH_DOCS*sizeof(double),cudaMemcpyHostToDevice,stream7));
gpuErrchk(cudaMemcpyAsync(&cosinematrix[7*chunkSize*LENGTH_DOCS],d_cosinematrix8,chunkSize*LENGTH_DOCS*sizeof(double),cudaMemcpyHostToDevice,stream8));
cudaSetDevice(0);
gpuErrchk(cudaStreamSynchronize(stream1));
gpuErrchk(cudaStreamSynchronize(stream2));
gpuErrchk(cudaStreamSynchronize(stream3));
gpuErrchk(cudaStreamSynchronize(stream4));
cudaStreamDestroy(stream1);
cudaStreamDestroy(stream2);
cudaStreamDestroy(stream3);
cudaStreamDestroy(stream4);
cudaSetDevice(1);
gpuErrchk(cudaStreamSynchronize(stream5));
gpuErrchk(cudaStreamSynchronize(stream6));
gpuErrchk(cudaStreamSynchronize(stream7));
gpuErrchk(cudaStreamSynchronize(stream8));
cudaStreamDestroy(stream5);
cudaStreamDestroy(stream6);
cudaStreamDestroy(stream7);
cudaStreamDestroy(stream8);
/*
for(int i=0;i<LENGTH_DOCS;i++)
{
for(int j =0;j<LENGTH_DOCS;j++)
{
if(i==j)
{
//obviously same docs have highest similarity so equating them to -1
cosinematrix[i][j] = -1;
}
else
{
cosinematrix[i][j] = cosine_similarity(sparsemat[i],sparsemat[j],LENGTH_DICTIONARY);
}
}
}
*/
chrono::time_point<chrono::system_clock> end = chrono::system_clock::now();
chrono::duration<double> elapsed_sec = end - start;
double count_sec = elapsed_sec.count();
gpuErrchk( cudaDeviceSynchronize());
cout<<"Time(cosine_similarity_calculations/sec): "<<(LENGTH_DOCS*LENGTH_DOCS)/count_sec<<endl;
//printTheCosineMatrix(cosinematrix,LENGTH_DOCS,LENGTH_DOCS);
//sort the matrix
for(int i=0;i<LENGTH_DOCS;i++)
{
int similardoc = findIndexofHighestSimilarity(&cosinematrix[i*LENGTH_DOCS]);
cout<<"doc "<<i<<" is similart to doc "<<similardoc<<endl;
}
free(sparsemat);
free(cosinematrix);
cudaFree(d_sparsemat1);
cudaFree(d_sparsemat2);
cudaFree(d_cosinematrix1);
cudaFree(d_cosinematrix2);
cudaFree(d_cosinematrix3);
cudaFree(d_cosinematrix4);
cudaFree(d_cosinematrix5);
cudaFree(d_cosinematrix6);
cudaFree(d_cosinematrix7);
cudaFree(d_cosinematrix8);
}
|
14,536 | #include<stdio.h>
__global__ void hello_world() {
// printf("Hello World from Cuda\n");
// printf("threadIdx.x: %d, threadIdx.y: %d, threadIdx.z\n", threadIdx.x, threadIdx.y, threadIdx.z);
printf("threadIdx.x: %d, threadIdx.y: %d, threadIdx.z: %d <-> blockIdx.x: %d, blockIdx.y: %d, blockIdx.z: %d \n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z);
}
int main() {
dim3 grid(4, 1, 1);
dim3 block(2, 1, 1);
hello_world<<<block, grid>>>();
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
} |
14,537 | __global__ void create_leveled_bitmaps_carry_index(char *file, long n, long *string_index, char *level_carry_index) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// We want to always calculate on 64-character boundaries, to match what
// we did in previous steps.
long normal_chars_per_thread = (n+stride-1) / stride;
long chars_per_thread = ((normal_chars_per_thread + 64 - 1) / 64) * 64;
long start = index * chars_per_thread;
long end = start + chars_per_thread;
// Temporary variable for storing the current string index
long strings = 0;
signed char level = 0;
for (long i = start; i < end && i < n; i += 1) {
long offsetInBlock = i % 64;
// At the start of each boundary (including the first), set the string characters
if (offsetInBlock == 0) {
strings = string_index[i / 64];
}
// Do not process the character if we're in a string
if ((strings & (1L << offsetInBlock)) != 0) {
continue;
}
char value = file[i];
if (value == '{' || value == '[') {
level++;
} else if (value == '}' || value == ']') {
level--;
}
}
level_carry_index[index] = level;
}
|
14,538 | #include <iostream>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include<fstream>
#include<sstream>
#include<vector>
using namespace std;
void cdf_intreader(char* fname,int** A, int* n){
ifstream F(fname);
stringstream buf;
buf<< F.rdbuf();
string S(buf.str());
int lastidx=-1;
int nextidx=string::npos+1;
int nread=0;
string toinsert;
vector<int> Av;
int i=0;
while (nextidx!=string::npos){
nextidx=S.find(',',lastidx+1);
if (nextidx!=string::npos){
toinsert=S.substr(lastidx+1,nextidx-lastidx-1);
lastidx=nextidx;
}else{
toinsert=S.substr(lastidx+1,S.length()-lastidx-1);
}
Av.push_back(atoi(toinsert.c_str()));
i++;
}
*n=Av.size();
*A=new int[Av.size()];
for (i=0;i<Av.size();i++){
(*A)[i]=Av[i];
}
}
//Write seq, which has length n, to fname.
//Also hamelessly copied from Dr. Eicholz...
void cdf_write_seq(char* fname, int* seq, int n){
ofstream F(fname);
for (int i=0;i<n-1;i++){
F<<seq[i]<<",";
}
F<<seq[n-1]<<endl;
}
//This section runs on the GPUs
__global__ void kernel(int* arr, int length){
//What is my ID?
int id = blockIdx.x * blockDim.x + threadIdx.x;
//If we're going to access something outside the array, exit
if(id >= length-1) return;
//Odd/even transpose elements in the list, in parallel (avoiding accessing the same memory)
for(int j = 0; j < length; j++){
int temp;
//If I'm going to perform a swap this round, swap!
if((j % 2 == 0 && id % 2 == 0) || (j % 2 != 0 && id % 2 != 0)){
if(arr[id] > arr[id+1]){
temp = arr[id];
arr[id] = arr[id+1];
arr[id+1] = temp;
}
}
}
}
//Main program
int main(int argc, char** argv){
//Process input files
if(argc < 3){
cerr << "Please provide input and output filenames\n";
abort();
}
char* in_file=argv[1];
char* out_file=argv[2];
int* A; //list of integers to be sorted
int n; //size of the list of integers
//Bring data in from txt file
cdf_intreader(in_file,&A,&n);
//Print out initial data if small enough
if(n < 20){
cout << "Input list is: ";
for(int i = 0; i < n-1; i++){
cout << A[i] << ",";
}
cout << A[n-1] << endl;
}
//How much data is in each thread?
int bytes = n * sizeof(int);
//Create pointer to device array
int *deviceArray;
//Create the array on the GPU and copy data to it's memory
cudaMalloc((void**)&deviceArray, bytes);
cudaMemcpy(deviceArray, A, bytes, cudaMemcpyHostToDevice);
//How many threads and blocks per thread will we have?
int threads = n/2;
//Launch kernel on the GPU
kernel<<<n/threads+1,threads>>>(deviceArray,n);
//Gather data back from processors
cudaMemcpy(A, deviceArray, bytes, cudaMemcpyDeviceToHost);
//Print output
if(n < 20){
cout << "Sorted list is: ";
for(int i = 0; i < n-1; i++){
cout << A[i] << ",";
}
cout << A[n-1] << endl;
}
//Write to output file
cdf_write_seq(out_file,A,n);
//Deallocate the two arrays
cudaFree(deviceArray);
//Exit from the calling program
return 0;
} |
14,539 | #include "includes.h"
__global__ void blurKernel(float *out, float *in, int width, int height) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (col < width && row < height) {
int pixVal = 0;
int pixels = 0;
// Get the average of the surrounding BLUR_SIZE x BLUR_SIZE box
for (int blurrow = -BLUR_SIZE; blurrow < BLUR_SIZE + 1; ++blurrow) {
for (int blurcol = -BLUR_SIZE; blurcol < BLUR_SIZE + 1; ++blurcol) {
int currow = row + blurrow;
int curcol = col + blurcol;
// Verify we have a valid image pixel
if (currow > -1 && currow < height && curcol > -1 &&
curcol < width) {
pixVal += in[currow * width + curcol];
pixels++; // Keep track of number of pixels in the avg
}
}
}
// Write our new pixel value out
out[row * width + col] = (unsigned char)(pixVal / pixels);
}
} |
14,540 | /*
============================================================================
Name : sobel1D.cu
Author : Taru Doodi
Version : v1
Contact : tarudoodi@ufl.edu / tarudoodi@gmail.com
Copyright : Your copyright notice
Description : CUDA code for GPU K20x, specifically coded for [3x3] gradient masks.
runs for image sizes 2x2 to 2048x2048 and stores the timing data in .csv file.
============================================================================
*/
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include <time.h>
#include <stdlib.h>
#include <cuda.h>
__global__ void sobel(int *d_input, int *d_output, long int N)
{
//__shared__ int smBytes[];
int idx = threadIdx.x;
int bidx = blockIdx.x;
float sobel_x,sobel_y;
int pixel = bidx*blockDim.x + idx;
if(N<=pixel && pixel<N*(N-1))
{
// the gradient in x direction
sobel_x = (float) (1*(d_input[pixel -1 -N]) + 2*(d_input[pixel-N]) +1*(d_input[pixel-N+1]) - 1*(d_input[pixel +N-1]) - 2*(d_input[pixel+N]) - 1*(d_input[pixel+N+1]));
// the gradient in y direction
sobel_y = (float) (1*(d_input[pixel -1 -N]) - 1*(d_input[pixel -N+1]) + 2*(d_input[pixel-1]) - 2*(d_input[pixel +1]) + 1*(d_input[pixel -1 +N]) - 1*(d_input[pixel +1 +N]));
/*
// the gradient in x direction
sobel_x = (double)( 1*(d_input[(idx-1)*N + idy-1]) +2*(d_input[idy-1 +idx*N]) +1*(d_input[idy-1 +N*(idx+1)]) - 1*(d_input[idy+1+N*(idx-1)]) - 2*(d_input[idy+1 + N*idx]) - 1*(d_input[idy+1 + N*(idx+1)]));
// the gradient in y direction
sobel_y = (double) (1*(d_input[idy-1 + N*(idx-1)]) - 1*(d_input[idy-1 +N*(idx+1)]) + 2*(d_input[idy + N*(idx-1)]) - 2*(d_input[idy +N*(idx+1)]) + 1*(d_input[idy+1 +N*(idx-1)]) - 1*(d_input[idy+1 + N*(idx+1)]));
*/
d_output[pixel] = (int) sqrt((sobel_x*sobel_x) + (sobel_y*sobel_y));
//d_output[pixel] =(int) hypot(sobel_x, sobel_y);// doesnt make any difference
if (d_output[pixel] > 255)
{
(d_output[pixel]) = 255;
}
}
else
{
d_output[pixel] = d_input[pixel];
}
}
/* Initializing the image matrix */
void initializeImage(int *d_input,int N)
{
for(int i=0; i<N*N; i++)
{
d_input[i] = (int) (rand()%255);
//printf("%d the value of input array is %d \n", (i),h_input[i]);
}
}
double timerval ()
{
struct timeval st;
gettimeofday (&st, NULL);
return st. tv_sec + st. tv_usec * 1e-6;
}
int main()
{
int i; // loop counters
int m=0;
int N=8; // rows, columns of image
int *h_input,*h_output; //*h_x,*h_y;
//double start_time, end_time;
//CUDA Variables
int *d_output,*d_input;
int blockNum, threadNum;
cudaEvent_t start,end;
cudaEventCreate(&start); //timers
cudaEventCreate(&end);
float time;
int kStart =2;
int kStop =12;
double seconds[kStop];
int k =0;
// k = kStop;
for(k=kStart;k<=kStop; k++)
{
N = pow(2,k);
h_input = (int*)malloc(N*N*sizeof(int));
h_output = (int*)malloc(N*N*sizeof(int));
// allocate GPU memory
cudaMalloc((void**) &d_input, (N)*(N)*sizeof(int));
cudaMalloc((void**) &d_output,(N)*(N)*sizeof(int));
// generate the input array on the host
//calculate thread number and block number
if(N<=4)//8)//32)
{
blockNum = 1;
threadNum =N*N;//128;//256;//512;// N*N;
}
else
{
blockNum = N*N/32;//64;//128;//256;//512;
threadNum = 32; //64;//128;//256;//512;
}
initializeImage(h_input,N);
cudaMemcpy(d_input, h_input, N*N*sizeof(int), cudaMemcpyHostToDevice); // transfer the array to the GPU
cudaThreadSynchronize();
cudaEventSynchronize(start);
//start_time = timerval();
cudaEventRecord(start,0);
// launch the kernel
for(i=0;i<1000;i++)
{
// printf("ran for ith = %d for size k =%d",i,k);
// sobel_edges(d_output, d_input, N); //just do the edges of the image on CPU as they
//sobel<<<blockNum,(threadNum+2*N)*sizeof(int) >>>(d_input, d_output,N);
sobel<<<blockNum,threadNum>>>(d_input, d_output,N);
cudaThreadSynchronize();
}
cudaEventSynchronize(end);
//end_time = timerval();
cudaEventRecord(end,0);
cudaMemcpy(h_output, d_output, N*N*sizeof(int), cudaMemcpyDeviceToHost);// copy back the result array to the CPU
cudaEventElapsedTime(&time,start,end);
//seconds[m++] = (start_time - end_time)/1000;
seconds[m++] = time/1000;
cudaFree(d_input);
cudaFree(d_output);
free(h_input);
free(h_output);
}
// printing to file
FILE *sobelParallelOutputfile;
sobelParallelOutputfile = fopen("sobelParallelOutputfile.csv","a+");
if(sobelParallelOutputfile == NULL)
{
printf("Could not open file\n");
return EXIT_FAILURE;
}
fprintf(sobelParallelOutputfile,"N, Time taken \n");
for(i=0;i<m;i++)
{
fprintf(sobelParallelOutputfile,"%lf,%f\n", pow(2,(i+kStart)), seconds[i]);
}
fclose(sobelParallelOutputfile); //Closing the file
m=0; //reset m;
return EXIT_SUCCESS;
}
|
14,541 | // tests cuEventCreate
#include <iostream>
#include <memory>
using namespace std;
#include <cuda.h>
__global__ void longKernel(float *data, int N, float value) {
for(int i = 0; i < N; i++) {
data[i] += value;
}
}
int main(int argc, char *argv[]) {
int N = 202400; // * 1024;
float *hostfloats = new float[N];
float *gpufloats;
cudaMalloc((void **)&gpufloats, N * sizeof(float));
longKernel<<<dim3(102400 / 32, 1, 1), dim3(32, 1, 1)>>>(gpufloats, N, 3.0f);
cout << "queued kernel x" << endl;
CUevent event;
cuEventCreate(&event, CU_EVENT_DISABLE_TIMING);
CUstream stream;
cuStreamCreate(&stream, 0);
cout << "created event" << endl;
for(int i = 0; i < 5; i++) {
if(i % 10 == 0) {
cout << "i " << i << endl;
}
longKernel<<<dim3(102400 / 32, 1, 1), dim3(32, 1, 1)>>>(gpufloats, N, 3.0f);
// cout << "queued kernel x" << endl;
cuEventRecord(event, stream);
// cout << "recoreded event" << endl;
// cout << "event finished? " << (cuEventQuery(event) == 0) << endl;
cuEventSynchronize(event);
// cout << "synchronized event" << endl;
// cout << "event finished? " << (cuEventQuery(event) == 0) << endl;
}
cuStreamDestroy(stream);
cuEventDestroy(event);
cudaFree(gpufloats);
cout << "finished" << endl;
return 0;
}
|
14,542 | #include "includes.h"
__global__ void meanMatrix(double *dMatrix, double *dMean, int dSize, int *d_mutex){
__shared__ double cache[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
double temp = 0;
while (tid < dSize) {
temp += dMatrix[tid];
tid += blockDim.x * gridDim.x;
}
// set the cache values
cache[cacheIndex] = temp;
// synchronize threads in this block
__syncthreads();
int i = blockDim.x/2;
while (i != 0) {
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if(cacheIndex == 0){
while(atomicCAS(d_mutex,0,1) != 0); //lock
*dMean += cache[0];
atomicExch(d_mutex, 0); //unlock
*dMean = dMean[0]/dSize;
}
} |
14,543 | #include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include "time.h"
#include "gpu_track.cuh"
#include "cufft.h"
#include "device_functions.h"
using namespace std;
#define THREADS_NUM 64
#define BLOCKS_NUM 1
__global__ void cu_compHist(int h, int w, int sBin, int oBin, int oGran, float *G, int *O, float *hist);
__global__ void cu_compnorm(int oBin, int nb, float* hist, float* norm);
__global__ void cu_compmxH(float* mxH, float* norm, int* out,float* hist, int hb, int nb,int oBin, const int outp);
extern "C" void cu_compGradImg1( float *I,float* I_tmp, float *G, int *O, int h, int w, int nCh, int oBin );
extern "C" __global__ void SaXPY(float a, float* X_d, float* Y_d, int n)
{
if (threadIdx.x < n)
Y_d[threadIdx.x] = a * X_d[threadIdx.x] + Y_d[threadIdx.x];
}
extern "C" void print_hello_world()
{
cout << "hello world version 2" << endl;
}
//__global__ void cu_compHist(int h, int w, int sBin, int oBin, int oGran, double *G, int *O, double *hist)
__global__ void cu_compHist(int h, int w, int sBin, int oBin, int oGran, float *G, int *O, float *hist)
{
//hbмcellwbǺмcellh0cellڵĵλw0ںcellڵĵλnbcellܸ
// const int hb=h/sBin, wb=w/sBin, h0=hb*sBin-4, w0=wb*sBin-4, nb=wb*hb;
const int hb=h/sBin, wb=w/sBin, w0=wb*sBin-4, nb=wb*hb;
__shared__ float hist_cache[34*34*9];
const int tid = blockIdx.x*792+threadIdx.x;
if(tid<612)
{
for (int qq = 0; qq<17; qq++)
{
hist_cache [tid*17+qq] = 0;
}
}
// hist_cache[0] = 0.0;
__syncthreads();
// if(hist_cache[0]==0){
for (int p=0; p<11; p++){
int xy = tid+p*792*2;
// int xy = tid*11 + p;
int x = xy/w0+2;
int y = xy%w0+2;
float v=float(*(G+x*h+y)); float o = float(*(O+x*h+y))/float(oGran);
int o0=int(o); int o1=(o0+1)%oBin; float od0=o-o0, od1=1.0-od0;
float xb = (float(x)+.5)/float(sBin)-0.5;
int xb0=int(xb);
float yb = (float(y)+.5)/float(sBin)-0.5;
int yb0=int(yb);
float xd0=xb-xb0, xd1=1.0-xd0; float yd0=yb-yb0, yd1=1.0-yd0;
float *dst = hist_cache + xb0*hb + yb0;
*(dst+o0*nb) += od1*xd1*yd1*v;
*(dst+hb+o0*nb) += od1*xd0*yd1*v;
*(dst+1+o0*nb) += od1*xd1*yd0*v;
*(dst+hb+1+o0*nb) += od1*xd0*yd0*v;
*(dst+o1*nb) += od0*xd1*yd1*v;
*(dst+hb+o1*nb) += od0*xd0*yd1*v;
*(dst+1+o1*nb) += od0*xd1*yd0*v;
*(dst+hb+1+o1*nb) += od0*xd0*yd0*v;
}
// }
__syncthreads();
// if (blockIdx.x == 0){
if(tid<612)
{
for (int qq = 0; qq<17; qq++)
{
*(hist + tid*17+qq) = hist_cache[tid*17+qq];
// *(hist + tid*17+qq) = *(hist_cache+tid*17+qq);
}
}
// }
/*
__syncthreads();
if (blockIdx.x == 1){
if((tid-792)<612)
{
for (int qq = 0; qq<17; qq++)
{
*(hist + (tid-792)*17+qq) += hist_cache[(tid-792)*17+qq];
}
}
}
*/
}
// oBin = 9
// nb = 34*34
__global__ void cu_compnorm(int oBin, int nb, float* hist, float* norm)
{
const int tid = threadIdx.x;
// const int block_size = (oBin*nb) / (THREADS_NUM-1);
const int block_size = (oBin*nb) / 612;
// double *dst=norm, *end=norm+nb;
// double *src = hist;
// for( int o=0; o<oBin; o++ ) {
// for( int oi= tid *block_size; oi<((tid==(THREADS_NUM-1)) ? oBin*nb:(tid+1)*block_size); oi++ ) {
for(int oi = tid*block_size; oi<(tid+1)*block_size;oi++){
// int o = oi/oBin;
// int nb = oi%oBin;
float* dst = norm + oi;
float* src = hist + oi;
*(dst)+=(*src)*(*src);
}
}
__global__ void cu_compmxH(float* mxH, float* norm, int* out,float* hist, int hb, int nb,int oBin, const int outp)
{
// double *H = mxH;
const int tid = threadIdx.x;
// const int block_size = (out[1]*out[0]) / (THREADS_NUM-1);
// for( int x=0; x<out[1]; x++ )
// for( int y=0; y<out[0]; y++ ) {
// for( int xy=tid*block_size; xy<((tid==(THREADS_NUM-1)) ? out[1]*out[0]:(tid+1)*block_size); xy++ ) {
// int x = xy/out[1];
// int y = xy%out[1];
int x = tid/32;
int y = tid%32;
float *dst=mxH+x*out[0]+y;
float *src, *p, n;
p = norm + (x)*hb + (y);
n = 1.0/sqrt(*p + *(p+1) + *(p+hb) + *(p+hb+1) + eps);
src = hist + (x+1)*hb + (y+1);
for( int o=0; o<oBin; o++ ) {
*dst=min(float(*src*n), 0.2); dst+=outp; src+=nb;
}
// }
}
__global__ void cu_compGradImg_part(int oBin, int nCh, float* I, float* G, int* O, int h, int w)
{
const int tid = blockIdx.x * 782 + threadIdx.x;
int h_tmp = h+2;
int w_tmp = w+2;
int x_tmp, y_tmp;
__shared__ float I_tmp[138*70];
int mem_block_count = 2;
for ( int mem_block =0; mem_block<mem_block_count; mem_block++)
{
int tmp_block = 136*69/(782);
int start_x = mem_block*68;
for (int xy=(tid*tmp_block); xy<((tid+1)*tmp_block); xy++)
{
int x = 1 + xy/136;
int y = 1 + xy%136;
I_tmp[x*w_tmp+y] = I[(x-1+start_x)*w+y-1];
}
__syncthreads();
if (tid <136)
{
x_tmp = mem_block?69:0;
int y = tid+1;
if(mem_block)
I_tmp[x_tmp*h_tmp+y] = 2*I_tmp[(x_tmp-1)*h_tmp+y]-I_tmp[(x_tmp-2)*h_tmp+y];
else
I_tmp[x_tmp*w_tmp+y] = 2*I_tmp[(x_tmp+1)*w_tmp+y]-I_tmp[(x_tmp+2)*w_tmp+y];
}
if((tid>=136)&&(tid<205)){
y_tmp =0;
int x=tid-136+1;
I_tmp[x*w_tmp+y_tmp] = 2*I_tmp[x*w_tmp+y_tmp+1] - I_tmp[x*w_tmp+y_tmp+2];
}
if((tid>=205)&&(tid<274)){
y_tmp =w+1;
int x = tid-205+1;
I_tmp[x*w_tmp+y_tmp] = 2*I_tmp[x*w_tmp+y_tmp-1] - I_tmp[x*w_tmp+y_tmp-2];
}
__syncthreads();
// const int block_size = ((w-2)*(h-2))/(1024-1);
const int block_size = ((w)*(68))/(544);
if(tid<544){
// compute gradients for each channel, pick strongest gradient
int y, x;
float *I1, v, dx, dy, dx1, dy1, v1;
// centered differences on interior points
for (int xy = tid*block_size; xy<(tid+1)*block_size; xy++)
{
x = xy/(w);
y = xy%(w);
I1 = I_tmp + (x+1)*(w+2) + y+1;
dy1 = (*(I1+1)-*(I1-1));
dx1 = (*(I1+w+2)-*(I1-w-2));
v1=dx1*dx1+dy1*dy1;
v=v1;
dx=dx1;
dy=dy1;
*(G+ (start_x+x)*w+y)=sqrt(v);
float o = fabs(atan2(dy,dx));
int index = (int)((float)(o)/(PI/90)+0.5);
index %= 90;
*(O+(start_x+x)*w+y)=index;
}
}
__syncthreads();
}
}
// compute HOG features
extern "C" float* cu_hog( float *I, int h, int w, int nCh, int sBin, int oBin, int oGran ) {
// compute gradient magnitude (*2) and orientation for each location in I
long begin=clock();
// const int hb=h/sBin, wb=w/sBin, h0=hb*sBin, w0=wb*sBin, nb=wb*hb;
const int hb=h/sBin, wb=w/sBin, nb=wb*hb;
float *G;
cudaMalloc( &G, h * w * sizeof( float ) );
cudaMemset(G,0,sizeof(float)*h*w);
int *O;
cudaMalloc( &O, h*w*sizeof( int ));
cudaMemset(O,0,sizeof(int)*h*w);
//IͼƬnch̶
cu_compGradImg_part<<<1,782>>>(oBin, nCh, I, G, O, h, w);
begin = clock();
float *hist;
cudaMalloc(&hist,nb*oBin*sizeof(float));
cudaMemset(hist,0,sizeof(float)*nb*oBin);
cu_compHist<<<1,792>>>(h,w, sBin,oBin,oGran, G, O, hist);
cudaFree(G);
cudaFree(O);
float *norm;
cudaMalloc(&norm,nb*sizeof(float));
cudaMemset(norm,0,sizeof(float)*nb);
cu_compnorm<<<BLOCKS_NUM,612>>>(oBin, nb, hist, norm);
// compute normalized values (4 different normalizations per block)
const int out[3] = { max(hb-2, 0), max(wb-2, 0), oBin*4 }; const int outp=out[0]*out[1];
int * gpu_out;
cudaMalloc(&gpu_out,3*sizeof(int));
cudaMemset(gpu_out,0,sizeof(int)*3);
cudaMemcpy(gpu_out, out, sizeof(int)*3,cudaMemcpyHostToDevice);
float *mxH;
cudaMalloc(&mxH,out[0]*out[1]*oBin*4*sizeof(float));
cudaMemset(mxH,0,sizeof(float)*out[0]*out[1]*oBin*4);
cu_compmxH<<<BLOCKS_NUM,1024>>>(mxH,norm, gpu_out, hist, hb, nb,oBin, outp);
cudaFree(hist);
cudaFree(norm);
cudaFree(gpu_out);
return mxH;
}
__global__ void cu_memcpy(float* cu_raw, float* odata, int width, int height)
{
const int tid = blockIdx.x * 1024 + threadIdx.x;
for (int km=tid*16; km<(tid+1)*16; km++)
{
odata[km*2] = cu_raw[km];
odata[km*2+1] = 0;
}
}
extern "C" void cu_compFFT_forward(float* raw_data, float* dst_data, int width, int height, int flag, cufftComplex *odata, cufftHandle plan, float* cu_raw)
{
// clock_t start = clock();
// cufftComplex * odata;
// long t1 = clock();
// float* return_data = new float[height*width*2];
/*
if( odata == NULL)
{
cout<<"odata is NULL"<<endl;
cout<<" mem size is "<< sizeof(cufftComplex)*height*width<<endl;
exit(0);
}
cout<<" mem size is "<< sizeof(cufftComplex)*height*width<<endl;
// cudaMalloc((void**)&odata,sizeof(cufftComplex)*height*width);
// cufftHandle plan;
int rc = cufftPlan2d(&plan,height,width,CUFFT_C2C);
if (rc != CUFFT_SUCCESS)
{
cout<< "rc from plan is "<<rc<<endl;
exit(0);
}
*/
// cufftSetCompatibilityMode(plan, CUFFT_COMPATIBILITY_FFTW_PADDING);
// cout<<"------------------------fft_prepare"<<clock()-t1<<endl;
// t1 = clock();
// cudaMemcpy (odata,raw_data,sizeof(cufftComplex)*height*width,cudaMemcpyHostToDevice);
// cufftExecC2C(plan,odata,odata,CUFFT_INVERSE );
if (flag==0)
{
// float* cu_raw;
// cudaMalloc((void**)&cu_raw,sizeof(float)*height*width);
cudaMemcpy (cu_raw,raw_data,sizeof(float)*height*width,cudaMemcpyHostToDevice);
cu_memcpy<<<1,1024>>>((float*)cu_raw,(float*)odata,width,height);
int rc = cufftExecC2C(plan,(cufftComplex*)odata,(cufftComplex*)odata,CUFFT_FORWARD );
if(rc!=CUFFT_SUCCESS)
{
cout<<"forward*****************************************************"<<endl;
cout<<"RC is "<<rc<<endl;
// exit(0);
}
// cudaFree(cu_raw);
// else
// cout<<"forward.........................."<<endl;
}
else if(flag == 1)
{
cudaMemcpy (odata,raw_data,sizeof(cufftComplex)*height*width,cudaMemcpyHostToDevice);
int rc = cufftExecC2C(plan,(cufftComplex*)odata,(cufftComplex*)odata,CUFFT_INVERSE );
if(rc!=CUFFT_SUCCESS)
{
cout<<"inverse*****************************************************"<<endl;
cout<<"RC is "<<rc<<endl;
// exit(0);
}
// else
// cout<<"inverse.........................."<<endl;
}
cudaMemcpy (dst_data,odata,sizeof(cufftComplex)*height*width,cudaMemcpyDeviceToHost);
// cout<<"_-----------------------fft exec"<<clock()-t1<<endl;
// t1 = clock();
// cudaDeviceSynchronize();
// cufftDestroy(plan);
// cudaFree(odata);
// cout<<"------------------------fft end"<<clock()-t1<<endl;
// cout<< " FFT_forward..............................."<< clock()-start <<endl;
}
__global__ void cu_comp_gaussianKernel( float xx , float yy , float *xy , double sigma , float *output, int nWidth, int nHeigth ) {
const int tid = blockIdx.x * 1024 + threadIdx.x;
float sigma2 = sigma*sigma;
// __shared__ float output_cache = [1024];
float N = nWidth*nHeigth;
int block_size = N/1024;
for( int index = tid*block_size; index < (tid+1)*block_size; index++ ) {
float value = (xx + yy - 2*xy[index])/N;
float v = exp(-max(0.0, value) / sigma2);
// output_cache[index] = v;
output[index] = v;
}
}
extern "C" void cu_gaussianKernel( float xx , float yy , float *xy , double sigma , float *output, int nWidth, int nHeight )
{
float* cu_xy;
float* cu_output;
cudaMalloc((void**)&cu_xy,sizeof(float)*nHeight*nWidth);
cudaMalloc((void**)&cu_output,sizeof(float)*nHeight*nWidth);
cudaMemcpy (cu_xy,xy,sizeof(float)*nHeight*nWidth,cudaMemcpyHostToDevice);
cu_comp_gaussianKernel<<<1,1024>>>(xx,yy,cu_xy,sigma,cu_output,nWidth,nHeight);
cudaMemcpy (output,cu_output,sizeof(float)*nHeight*nWidth,cudaMemcpyDeviceToHost);
cudaFree(cu_xy);
cudaFree(cu_output);
}
|
14,544 | #include <limits>
/**
* Configuration indexes.
*/
#define KERNEL_0 conf[0]
#define KERNEL_1 conf[1]
#define X_IMAGE_SIZE conf[2]
#define X_FEATURE_SIZE conf[3]
#define X_ROW_SIZE conf[4]
#define N conf[5]
#define Y_IMAGE_SIZE conf[6]
#define Y_FEATURE_SIZE conf[7]
#define Y_ROW_SIZE conf[8]
/**
* Compute the kernel offset that correspond to the general index i.
* @param conf is the kernel configuration.
* @param i is the general index.
* @return the kernel offset.
*/
__device__ int compute_offset(int *conf, int i) {
return
threadIdx.x * X_IMAGE_SIZE +
blockIdx.x * X_FEATURE_SIZE +
blockIdx.y * X_ROW_SIZE * KERNEL_0 +
blockIdx.z * KERNEL_0;
}
/**
* Compute the maximal value of the kernel corresponding to the kernel offset.
* @param conf the kernel's configuration.
* @param x the input activation.
* @param offset the kernel offset.
* @return the maximal value.
*/
__device__ float avg(int *conf, float *x, int offset) {
float sum = 0;
for (int j = 0; j < KERNEL_0; j++) {
for (int k = 0; k < KERNEL_1; k++) {
int index = offset + j + k * X_ROW_SIZE;
sum += x[index];
}
}
return sum / (KERNEL_0 * KERNEL_1);
}
/**
* Compute the activation of the max pooling layer.
* @param conf the kernel's configuration.
* @param x the input activation.
* @param r the layer output activation, i.e. output buffer.
* @return nothing.
*/
extern "C"
__global__ void activation(int *conf, float *x, float *r)
{
int index = threadIdx.x * Y_IMAGE_SIZE + blockIdx.x * Y_FEATURE_SIZE + blockIdx.y * Y_ROW_SIZE + blockIdx.z;
if (index < N) {
r[index] = avg(conf, x, compute_offset(conf, index));
}
}
/**
* Compute the gradient with respect to the inputs.
* @param conf the kernel's configuration.
* @param g the gradient with respect to the outputs.
* @param y the gradient with respect to the inputs, i.e. output buffer.
* @return nothing.
*/
extern "C"
__global__ void inputs_gradients(int *conf, float *g, float *y)
{
int index = threadIdx.x * Y_IMAGE_SIZE + blockIdx.x * Y_FEATURE_SIZE + blockIdx.y * Y_ROW_SIZE + blockIdx.z;
if (index < N) {
int offset = compute_offset(conf, index);
for (int j = 0; j < KERNEL_0; j++) {
for (int k = 0; k < KERNEL_1; k++) {
int i = offset + j * X_ROW_SIZE + k;
y[i] = g[index] / (KERNEL_0 * KERNEL_1);
}
}
}
}
|
14,545 | /* Block size X: 32 */
__global__ void fct_ale_b3_horizontal(const int maxLevels, const int * __restrict__ nLevels, const int * __restrict__ nodesPerEdge, const int * __restrict__ elementsPerEdge, double * __restrict__ fct_adf_h, const double * __restrict__ fct_plus, const double * __restrict__ fct_minus)
{
int levelBound = 0;
const int nodeOne = (nodesPerEdge[blockIdx.x * 2] - 1) * maxLevels;
const int nodeTwo = (nodesPerEdge[blockIdx.x * 2 + 1] - 1) * maxLevels;
/* Compute the upper bound for the level */
const int elemOne = elementsPerEdge[blockIdx.x * 2];
const int elemTwo = elementsPerEdge[blockIdx.x * 2 + 1];
if ( elemTwo > 0 )
{
levelBound = max(nLevels[elemOne - 1] - 1, nLevels[elemTwo - 1] - 1);
}
else
{
levelBound = max(nLevels[elemOne - 1] - 1, 0);
}
for ( int level = threadIdx.x; level < levelBound; level += 32 )
{
double flux = 0.0;
double ae_plus = 0.0;
double ae_minus = 0.0;
flux = fct_adf_h[(blockIdx.x * maxLevels) + level];
ae_plus = 1.0;
ae_minus = 1.0;
ae_plus = fmin(ae_plus, fct_plus[nodeOne + (level)]);
ae_minus = fmin(ae_minus, fct_plus[nodeTwo + (level)]);
ae_minus = fmin(ae_minus, fct_minus[nodeOne + (level)]);
ae_plus = fmin(ae_plus, fct_minus[nodeTwo + (level)]);
if ( signbit(flux) == 0 )
{
flux *= ae_plus;
}
else
{
flux *= ae_minus;
}
fct_adf_h[(blockIdx.x * maxLevels) + level] = flux;
}
}
|
14,546 | //pass
//--gridDim=[1,1,1] --blockDim=[32,1,1]
__global__ void
testKernel(float *g_idata, float *g_odata)
{
// shared memory
// the size is determined by the host application
extern __shared__ float sdata[];
// access thread id
const unsigned int tid = threadIdx.x;
// access number of threads in this block
const unsigned int num_threads = blockDim.x;
// read in input data from global memory
sdata[tid] = g_idata[tid];
__syncthreads();
// perform some computations
sdata[tid] = (float) num_threads * sdata[tid];
__syncthreads();
// write data to global memory
g_odata[tid] = sdata[tid];
}
|
14,547 | //------------------------------------------------------------------------------
//
// A script that contains some useful functions for general purposes
//
// (c) P W Huang, Seagate Technology (2016). All rights reserved.
//
//------------------------------------------------------------------------------
#include <cstdlib>
// Only valid for 32-bit integer variables
// Ideal to be "unsigned int"
int Rnd_upto_pow2(int v){
if (v >= 0){
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v++;
return v;
}
else return -1;
} |
14,548 | #include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include<cuda.h>
#define BLOCK_DIM 16
__global__ void matrixMult(int *a,int *b,int *c,int width);
int main(){
int curr=2;
int N=BLOCK_DIM*curr;
printf("------------------------------------------\n");
while(N<=BLOCK_DIM*16){
int a[N][N], b[N][N], gpu_mul[N][N],cpu_mul[N][N];
int *dev_a, *dev_b, *dev_c;
float time_gpu,time_cpu,timeindex,timeinit;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
a[i][j]=i+j;
b[i][j]=i*j;
}
}
int size=N*N*sizeof(int);
cudaMalloc((void**) &dev_a,size);
cudaMalloc((void**) &dev_b,size);
cudaMalloc((void**) &dev_c,size);
cudaEvent_t startinit,endinit;
cudaEventCreate(&startinit);
cudaEventCreate(&endinit);
cudaEventRecord(startinit, 0);
cudaMemcpy(dev_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,size,cudaMemcpyHostToDevice);
cudaEventRecord(endinit, 0);
cudaEventSynchronize(endinit);
cudaEventElapsedTime(&timeinit, startinit, endinit);
cudaEvent_t gpu_start,gpu_end;
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_end);
cudaEventRecord(gpu_start, 0);
dim3 dimBlock(BLOCK_DIM,BLOCK_DIM);
dim3 dimGrid((int)ceil(N/dimBlock.x),(int)ceil(N/dimBlock.y));
matrixMult<<<dimGrid,dimBlock>>>(dev_a,dev_b,dev_c,N);
cudaDeviceSynchronize();
cudaEventRecord(gpu_end, 0);
cudaEventSynchronize(gpu_end);
cudaEventElapsedTime(&time_gpu, gpu_start, gpu_end);
cudaEvent_t startindex,endindex;
cudaEventCreate(&startindex);
cudaEventCreate(&endindex);
cudaEventRecord(startindex, 0);
cudaMemcpy(gpu_mul,dev_c,size,cudaMemcpyDeviceToHost);
cudaEventRecord(endindex, 0);
cudaEventSynchronize(endindex);
cudaEventElapsedTime(&timeindex, startindex, endindex);
clock_t cpu_start,cpu_end;
cpu_start=clock();
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
int sum=0;
for(int k=0;k<N;k++){
sum+=a[i][k]*b[k][j];
}
cpu_mul[i][j]=sum;
}
}
cpu_end=clock();
timeinit/=1000;
timeindex/=1000;
time_gpu/=1000;
time_cpu=float(cpu_end-cpu_start)/float(CLOCKS_PER_SEC);
printf("Time for sending initial data from host to device : %f\t sec\n",timeinit);
printf("Cuda program launched with %d blocks and %d threads\n",(int)ceil(N/dimBlock.x)*(int)ceil(N/dimBlock.y),BLOCK_DIM*BLOCK_DIM);
printf("Time for sending calculated data from device to host : %f\t sec\n",timeindex);
printf("GPU Time:%f seconds\n",time_gpu);
printf("CPU Time:%f seconds\n",time_cpu);
int flag=1;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
if(gpu_mul[i][j]!=cpu_mul[i][j]){
flag=0;
break;
}
}
}
if(flag){
printf("TEST PASSED\n");
printf("SPEED UP:%f\n",time_cpu/time_gpu);
}
else{
printf("TEST FAILED\n");
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
printf("------------------------------------------\n");
curr++;
N=BLOCK_DIM*curr;
}
}
__global__ void matrixMult(int *a,int *b,int *c,int width){
int k,sum=0;
int col=threadIdx.x+blockDim.x*blockIdx.x;
int row=threadIdx.y+blockDim.y*blockIdx.y;
if(col<width && row<width){
for(k=0;k<width;k++)
sum+=a[row*width+k]*b[k*width+col];
c[row*width+col]=sum;
}
}
|
14,549 |
#if defined(USE_THREADS_POSIX) && defined(HAVE_PTHREAD_H)
# include <pthread.h>
static int verify_linking_to_pthread_cuda()
{
return static_cast<int>(pthread_self());
}
#endif
|
14,550 | #include "includes.h"
__global__ void lumHistExclusiveScan_kernel(unsigned int *d_out, unsigned int *d_in, int numItems)
{
extern __shared__ unsigned int s_exScan[];
int tid = threadIdx.x;
s_exScan[tid] = (tid > 0) ? d_in[tid - 1] : 0;
__syncthreads();
for (int offset = 1; offset <= numItems; offset = offset * 2){
unsigned int temp = s_exScan[tid];
unsigned int neighbor = 0;
if ((tid - offset) >= 0) {
neighbor = s_exScan[tid - offset];
__syncthreads();
s_exScan[tid] = temp + neighbor;
}
__syncthreads();
}
d_out[tid] = s_exScan[tid];
} |
14,551 | /*
This file shows how to use the "gather" pattern in CUDA. This specific example is using the
Black-Scholes equation, where we parallelize over securities, but gather parameters for each
security.
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "curand.h" // CUDA random number generators
#include <math.h> // standard mathematical operations (log, erf, sqrt)
#define _USE_MATH_DEFINES // common mathematical constants (`M_SQRT1_2`, used below, is equal to sqrt(1/2))
#include <cstdio>
// cumulative distribution function (CDF) of a standard normal distribution
// note the `__inline__`, which tells the compiler to just inline the function at compile time for
// performance (at the cost of larger binary size)
__device__ __host__ __inline__ float N(float x) {
return 0.5 + 0.5 * erff(x * M_SQRT1_2);
}
// options are a right to buy (call) or sell (put) an asset at a specific price/date
// k = strike price, s = underlying asset price, t = time until option expires,
// r = rate at which money can be borrowed, v = volatility of option
// c = call price, p = put price
// this kernel actually does all the calculations for each security
__device__ __host__ void price(float k, float s, float t, float r, float v, float* c, float* p) {
float srt = v * sqrtf(t);
float d1 = (logf(s/k) + (r+0.5*v*v)*t) / srt;
float d2 = d1 - srt;
float kert = k * expf(-r*t);
*c = erff(d1)*s - erff(d2)*kert;
*p = kert - s + *c;
}
// intermediate kernel which selects the index of the appropriate security and passes on
// computation to the function defined above
__global__ void price(float* k, float* s, float* t, float* r, float* v, float* c, float* p) {
int idx = threadIdx.x;
price(k[idx], s[idx], t[idx], r[idx], v[idx], &c[idx], &p[idx]);
}
int main() {
const int count = 512; // number of securities to analyze
const int size = count * sizeof(float);
float *args[5]; // array of arrays of parameters for each security (k, s, t, r, v)
// random generator for security parameters
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MTGP32);
// generate values for the parameters directly on device
for (int i=0; i<5; i++) {
cudaMalloc(&args[i], size);
curandGenerateUniform(gen, args[i], count);
}
float *dc, *dp; // call and put arrays
cudaMalloc(&dc, size); // allocate space on device
cudaMalloc(&dp, size);
// just realized - `&dc` and `&dp` are pointers to pointers... passing them to `cudaMalloc` allows
// the function to overwrite the pointers `dc` and `dp` to point at _new_ memory locations on the
// device, rather than the original locations on the host they were pointed at when declared
// calculate call/put values
price<<<1,count>>>(args[0], args[1], args[2], args[3], args[4], dc, dp);
// copy memory from device to host
float hc[count], hp[count];
cudaMemcpy(hc, dc, size, cudaMemcpyDeviceToHost);
cudaMemcpy(hp, dp, size, cudaMemcpyDeviceToHost);
// free memory on device
cudaFree(&dc);
cudaFree(&dp);
dc = NULL; dp = NULL; // make sure pointers can't be used or freed a second time
for (int i=0; i<5; i++) {
cudaFree(&args[i]);
args[i] = NULL;
}
// print out some values
for (int s=0; s<10; s++) {
for (int i=0; i<5; i++) {
printf("Call price: $%.2f, put price: $%.2f\n", hc[i], hp[i]);
}
}
return 0;
} |
14,552 | #include "includes.h"
__global__ void add (float *d_A, float *d_B, float *d_C, int widthA, int widthB, int widthC)
{
int startA = blockIdx.x*64 + threadIdx.x*2 + (blockIdx.y*8 + threadIdx.y)*widthA;
int startB = blockIdx.x*64 + threadIdx.x*2 + (blockIdx.y*8 + threadIdx.y)*widthB;
int startC = blockIdx.x*64 + threadIdx.x*2 + (blockIdx.y*8 + threadIdx.y)*widthC;
float2 tempA = *(float2 *)(d_A+startA);
float2 tempB = *(float2 *)(d_B+startB);
tempA.x += tempB.x;
tempA.y += tempB.y;
*(float2 *)(d_C+startC) = tempA;
} |
14,553 | //git hello 0
//biyao test
#include<stdio.h>
//#include<stdlib.h>
//#include<cuda.h>
#include<cuda_runtime.h>//ṩʱĹܺ
__global__ void helloFromGPU(void)
{
printf("Hello World from GPU!\n");
}
int main()
{
printf("Hello world from CPU!\n");
helloFromGPU <<<2,10>>>();
cudaDeviceReset();
return 0;
} |
14,554 |
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
int n = 5;
void fill_arr_of_vector(thrust::host_vector<float>* arr_of_vec, int arr_size, int i){
thrust::host_vector<float> temp(n,i);
for( int i = 0; i < arr_size; i++){
arr_of_vec[i].insert(arr_of_vec[i].end(), temp.begin(), temp.end());
}
}
int main(){
int num_actions = 8;
int nt = 4;
thrust::host_vector<float> arr_of_vec[num_actions];
for(int i=0; i<num_actions; i++)
arr_of_vec[i] = thrust::host_vector<float> (0);
for(int i=0; i<nt; i++)
fill_arr_of_vector(arr_of_vec, num_actions, i);
for(int i=0; i<num_actions; i++){
for( int j =0; j<arr_of_vec[i].size(); j++)
std::cout << arr_of_vec[i][j] << " ";
std::cout << std::endl;
}
return 0;
} |
14,555 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <algorithm>
#include <iostream>
#include <map>
#include <string>
#include <fstream>
#include <functional>
#include <vector>
#include <cmath>
using std::string;
using std::cout;
using std::endl;
using std::map;
using std::ifstream;
using std::getline;
using std::vector;
using std::stoi;
using std::pair;
using std::mem_fun_ref;
using std::ofstream;
using std::pow;
#define P2_FILE "P2"
#define MY_PATH "C:\\lena.ascii.pgm"
#define OUTPUT_PATH "updated_lena.pgm"
void remove_empty_strings(vector<string>& strings);
size_t split(const string& text, vector<string>& parameters, char separator);
struct coordinates;
struct pixel;
class Photo
{
public:
Photo(string file_path);
void filter_negative();
void filter_threshold(int level);
void filter_black_threshold(int level);
void filter_white_threshold(int level);
void filter_gamma(double level);
void save_file(string file_path);
~Photo();
private:
vector<pixel*>* image_pixels_;
int height_;
int width_;
int max_gray_value_;
string image_comment_;
// Loading image
void load_image(const string file_path);
void load_image_size(ifstream* input);
void load_max_gray(ifstream* input);
void add_pixel(int row_counter, int column_counter, vector<std::basic_string<char>>* image_row, int i) const;
void load_pixels(ifstream* input, int row_counter, int column_counter, string current_line) const;
// Pixel filter methods
int negative(int value) const;
int threshold(int value, int level) const;
int black_threshold(int value, int level) const;
int white_threshold(int value, int level) const;
int gamma(int value, double level) const;
};
int main()
{
string my_path = MY_PATH;
auto photo = new Photo(my_path);
photo->filter_threshold(90);
photo->save_file(OUTPUT_PATH);
delete photo;
return 0;
}
void remove_empty_strings(vector<string>& strings)
{
vector<string>::iterator it = remove_if(strings.begin(), strings.end(), mem_fun_ref(&string::empty));
// erase the removed elements
strings.erase(it, strings.end());
}
size_t split(const string& text, vector<string>& parameters, const char separator)
{
size_t pos = text.find(separator);
size_t initialPos = 0;
parameters.clear();
// Decompose statement
while (pos != string::npos)
{
parameters.push_back(text.substr(initialPos, pos - initialPos));
initialPos = pos + 1;
pos = text.find(separator, initialPos);
}
// Add the last one
parameters.push_back(text.substr(initialPos, std::min(pos, text.size()) - initialPos + 1));
remove_empty_strings(parameters);
return parameters.size();
}
struct coordinates
{
int x;
int y;
};
struct pixel
{
int value;
coordinates* coordinates;
};
Photo::Photo(string file_path)
{
image_pixels_ = new vector<pixel*>();
load_image(file_path);
}
void Photo::filter_negative()
{
for (int i = 0; i < image_pixels_->size(); i++)
{
image_pixels_->at(i)->value = negative(image_pixels_->at(i)->value);
}
}
void Photo::filter_threshold(int level)
{
for (int i = 0; i < image_pixels_->size(); i++)
{
image_pixels_->at(i)->value = threshold(image_pixels_->at(i)->value, level);
}
}
void Photo::filter_black_threshold(int level)
{
for (int i = 0; i < image_pixels_->size(); i++)
{
image_pixels_->at(i)->value = black_threshold(image_pixels_->at(i)->value, level);
}
}
void Photo::filter_white_threshold(int level)
{
for (int i = 0; i < image_pixels_->size(); i++)
{
image_pixels_->at(i)->value = white_threshold(image_pixels_->at(i)->value, level);
}
}
void Photo::filter_gamma(double level)
{
for (int i = 0; i < image_pixels_->size(); i++)
{
image_pixels_->at(i)->value = gamma(image_pixels_->at(i)->value, level);
}
}
void Photo::save_file(string file_path)
{
ofstream processed_file(file_path);
processed_file << P2_FILE << "\n";
processed_file << image_comment_ << "\n";
processed_file << width_ << " " << height_ << "\n";
processed_file << max_gray_value_ << "\n";
int current_row = 0;
for (int i = 0; i < image_pixels_->size(); i++)
{
auto current_pixel = image_pixels_->at(i);
if (current_pixel->coordinates->y != current_row)
{
current_row++;
processed_file << "\n";
} else
{
processed_file << " ";
}
processed_file << current_pixel->value << " ";
}
processed_file << "\n";
processed_file.close();
}
void Photo::load_image_size(ifstream* input)
{
string size_line;
getline(*input, size_line);
auto image_size = new vector<string>();
split(size_line, *image_size, ' ');
width_ = stoi(image_size->at(0));
height_ = stoi(image_size->at(1));
delete image_size;
}
void Photo::load_max_gray(ifstream* input)
{
string max_gray;
getline(*input, max_gray);
max_gray_value_ = stoi(max_gray);
}
void Photo::add_pixel(int row_counter, int column_counter, vector<std::basic_string<char>>* image_row, int i) const
{
const auto current_pixel = new pixel();
const auto coords = new coordinates();
coords->x = column_counter;
coords->y = row_counter;
current_pixel->coordinates = coords;
current_pixel->value = stoi(image_row->at(i));
image_pixels_->push_back(current_pixel);
}
void Photo::load_pixels(ifstream* input, int row_counter, int column_counter, string current_line) const
{
while (getline(*input, current_line))
{
auto image_row = new vector<string>();
split(current_line, *image_row, ' ');
for (int i = 0; i < image_row->size(); i++)
{
add_pixel(row_counter, column_counter, image_row, i);
column_counter++;
}
column_counter = 0;
row_counter++;
image_row->clear();
delete image_row;
}
}
int Photo::negative(int value) const
{
return max_gray_value_ - value;
}
int Photo::threshold(int value, int level) const
{
if (value > level)
{
return max_gray_value_;
}
return 0;
}
int Photo::black_threshold(int value, int level) const
{
if (value > level)
{
return value;
}
return 0;
}
int Photo::white_threshold(int value, int level) const
{
if (value > level)
{
return max_gray_value_;
}
return value;
}
int Photo::gamma(int value, double level) const
{
return pow( (static_cast<double>(value) / static_cast<double>(max_gray_value_)), 1.0 / level) * max_gray_value_;
}
void Photo::load_image(const string file_path)
{
ifstream input(file_path);
if (input.is_open())
{
string file_type;
getline(input, file_type);
if (file_type == P2_FILE)
{
getline(input, image_comment_);
load_image_size(&input);
load_max_gray(&input);
int row_counter = 0;
int column_counter = 0;
string current_line;
load_pixels(&input, row_counter, column_counter, current_line);
}
}
}
Photo::~Photo()
{
delete image_pixels_;
}
|
14,556 | /******************************************************************************
* PROGRAM: copyStruture
* PURPOSE: This program is a test which test the ability to transfer multilevel
* C++ structured data from host to device, modify them and transfer back.
*
*
* NAME: Vuong Pham-Duy.
* College student.
* Faculty of Computer Science and Technology.
* Ho Chi Minh University of Technology, Viet Nam.
* vuongpd95@gmail.com
*
* DATE: 5/10/2017
*
******************************************************************************/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", \
cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
/************************** Test 1 lv nested structure ************************/
typedef struct {
int64_t offset;
int32_t len;
int32_t n_ambs;
uint32_t gi;
int32_t is_alt;
} bntann1_t;
typedef struct {
int64_t l_pac;
int32_t n_seqs;
bntann1_t *anns; // n_seqs elements
} bntseq_t;
void bns_to_device(const bntseq_t *bns, bntseq_t **d_bns);
__global__ void func0(bntann1_t *anns, int64_t l_pac, int32_t n_seqs, \
int *d_b) {
*d_b = n_seqs + anns[0].offset;
}
/************************** Test 1 lv nested structure ************************/
/************************** Test __constant__ *********************************/
typedef struct {
int b1;
int b2;
int b3;
} burge;
__constant__ burge opt;
__global__ void func(int *d_a) {
*d_a = opt.b1 + opt.b2 + opt.b3;
}
/************************** Test __constant__ *********************************/
int main(int argc, char *argv[])
{
/************************** Test __constant__ *********************************/
const burge *pb;
burge bu;
bu.b1 = 0;
bu.b2 = 1;
bu.b3 = 2;
pb = &bu;
int a;
int *d_a;
a = 1;
gpuErrchk(cudaMalloc(&d_a, sizeof(int)));
gpuErrchk(cudaMemcpyToSymbol(opt, pb, sizeof(burge), 0, \
cudaMemcpyHostToDevice));
func<<<1, 1>>>(d_a);
gpuErrchk(cudaMemcpy(&a, d_a, sizeof(int), cudaMemcpyDeviceToHost));
printf("a = %d\n", a);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
/************************** Test __constant__ *********************************/
/************************** Test 1 lv nested structure ************************/
// Assumptions
bntseq_t *bns;
bns = (bntseq_t*)malloc(sizeof(bntseq_t));
bns->l_pac = 1;
bns->n_seqs = 2500;
bns->anns = (bntann1_t*)malloc(bns->n_seqs * sizeof(bntann1_t));
bns->anns[0].offset = 10;
const bntseq_t *cbns = bns;
int b;
int *d_b;
gpuErrchk(cudaMalloc(&d_b, sizeof(int)));
//
bntann1_t *d_anns;
gpuErrchk(cudaMalloc(&d_anns, cbns->n_seqs * sizeof(bntann1_t)));
gpuErrchk(cudaMemcpy(d_anns, cbns->anns, \
cbns->n_seqs * sizeof(bntann1_t), cudaMemcpyHostToDevice));
func0<<<1, 1>>>(d_anns, cbns->l_pac, cbns->n_seqs, d_b);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(&b, d_b, sizeof(int), cudaMemcpyDeviceToHost));
printf("b = %d\n", b);
printf("b = %ld\n", bns->n_seqs + bns->anns[0].offset);
/************************** Test 1 lv nested structure ************************/
return 0;
}
|
14,557 | __global__ void daxpy(int n, double *ret, double a, double *x, double *y) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= n) return;
ret[i] = a*x[i] + y[i];
}
__global__ void rk4sum(int n, double dt, double *k1, double *k2, double *k3, double *k4, double *ret) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= n) return;
ret[i] += (dt/6)*(k1[i] + 2*k2[i] + 2*k3[i] + k4[i]);
}
|
14,558 | #include "includes.h"
__global__ void LreluForward(float* srcData, float* dstData, int data_size)
{
int thread_index = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for(int i = 0; i < data_size; i += num_threads)
{
int index = i + thread_index;
if(index < data_size)
{
dstData[index] = srcData[index] > 0 ? srcData[index] : srcData[index] * 0.01;
}
}
} |
14,559 | #include "includes.h"
__global__ void imageBNKernel(unsigned char* d_image, int h, int w)
{
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
int r, g, b;
if((Row < h) && (Col < w)){
r = d_image[4 * w * Row + 4 * Col + 0];
g = d_image[4 * w * Row + 4 * Col + 1];
b = d_image[4 * w * Row + 4 * Col + 2];
d_image[4 * w * Row + 4 * Col + 0] = 0;
d_image[4 * w * Row + 4 * Col + 1] = 0;
d_image[4 * w * Row + 4 * Col + 2] = 0;
d_image[4 * w * Row + 4 * Col + 3] = (int)(r*0.21 + g*0.71 + b*0.07);
}
} |
14,560 | #include <stdio.h>
#include <stdlib.h>
int main(int argc, char** argv)
{
float *device_data = NULL;
size_t size = 1024*sizeof(float);
cudaError_t err;
err = cudaMalloc((void **)&device_data, size);
printf("err = %d\n",err);
return 0;
}
|
14,561 | //pass
//--blockDim=32 --gridDim=2
#include <cuda.h>
__global__ void foo() {
__threadfence();
}
|
14,562 | #include "includes.h"
__global__ void MatSub(float *A, float *B, float *C, int nx, int ny){
int ix = threadIdx.x+ blockIdx.x*blockDim.x;
int iy = threadIdx.y+ blockIdx.y*blockDim.y;
int idx = ix*ny + iy;
if((ix<nx)&&(iy<ny)){
C[idx]=A[idx]-B[idx];
}
} |
14,563 | #include "includes.h"
// Kernel Average with Depth
extern "C"
//Converting 2D coordinates into one 1D coordinate
__global__ void AVERAGE_DEPTH_1D(int envSizeX, int envSizeY, float* envData, int depth){
int tidX = blockIdx.x * blockDim.x + threadIdx.x;
int tidY = blockIdx.y * blockDim.y + threadIdx.y;
float moyenne = 0;
int nbNombre = 0;
if(tidX < envSizeX && tidY < envSizeY){
for(int l = tidX - depth; l <= tidX + depth; l++){
if(l < 0){
int ltemp = l;
ltemp += envSizeX;
for(int k = tidY - depth; k <= tidY + depth; k++){
if(k < 0){
int ktemp = k;
ktemp += envSizeY;
if(envData[envSizeX * ltemp + ktemp] != -1){
moyenne += envData[envSizeX * ltemp + ktemp];
nbNombre++;
}
}
else if(k > envSizeY - 1){
int ktemp = k;
ktemp -= envSizeY;
if(envData[envSizeX * ltemp + ktemp] != -1){
moyenne += envData[envSizeX * ltemp + ktemp];
nbNombre++;
}
}
else{
if(envData[envSizeX * ltemp + k] != -1){
moyenne += envData[envSizeX * ltemp + k];
nbNombre++;
}
}
}
}
else if(l > envSizeX - 1){
int ltemp = l;
ltemp -= envSizeX;
for(int k = tidY - depth; k <= tidY + depth; k++){
if(k < 0){
int ktemp = k;
ktemp += envSizeY;
if(envData[envSizeX * ltemp + ktemp] != -1){
moyenne += envData[envSizeX * ltemp + ktemp];
nbNombre++;
}
}
else if(k > envSizeY - 1){
int ktemp = k;
ktemp -= envSizeY;
if(envData[envSizeX * ltemp + ktemp] != -1){
moyenne += envData[envSizeX * ltemp + ktemp];
nbNombre++;
}
}
else{
if(envData[envSizeX * ltemp + k] != -1){
moyenne += envData[envSizeX * ltemp + k];
nbNombre++;
}
}
}
}
else{
for(int k = tidY - depth; k <= tidY + depth; k++){
if(k < 0){
int ktemp = k;
ktemp += envSizeY;
if(envData[envSizeX * l + ktemp] != -1){
moyenne += envData[envSizeX * l + ktemp];
nbNombre++;
}
}
else if(k > envSizeY - 1){
int ktemp = k;
ktemp -= envSizeY;
if(envData[envSizeX * l + ktemp] != -1){
moyenne += envData[envSizeX * l + ktemp];
nbNombre++;
}
}
else{
if(envData[envSizeX * l + k] != -1){
moyenne += envData[envSizeX * l + k];
nbNombre++;
}
}
}
}
}
if(nbNombre != 0){
envData[envSizeX * tidX + tidY] = moyenne / nbNombre;
}
}
__syncthreads();
} |
14,564 | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <cuda.h>
#define NUM_THREADS 1024
#define NUM_BLOCKS 1024
#define NUM_VALUES NUM_THREADS*NUM_BLOCKS
//Macro per a swap
#define SWAP(_i, _ixj){\
int aux = vector[_i];\
vector[_i] = vector[_ixj];\
vector[_ixj] = aux;}
//Kernel per a bitonic sort
__global__ void bitonicSortKernel(int *vector, int j, int k){
int i, ixj;
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i ^ j;
if((ixj) > i){
if((i & k) == 0 && vector[i] > vector[ixj])
SWAP(i, ixj);
if((i & k) != 0 && vector[i] < vector[ixj])
SWAP(i, ixj);
}
}
//Funcio iterativa de bitonic sort
void bitonicSort(int length, int *vector){
int j, k;
dim3 numBlocks(NUM_BLOCKS, 1);
dim3 numThreads(NUM_THREADS, 1);
for(k = 2; k <= length; k = 2*k){
//Els shifts son equivalents de dividir entre 2
for(j = k >> 1; j > 0; j = j >> 1){
bitonicSortKernel<<<numBlocks, numThreads>>>(vector, j, k);
}
}
}
//Funcio de testeig per mirar que el vector esta ordenat
int testOrdenacio(int length, int *vector){
int ordenat = 1;
int i;
for(i = 0; i < length -1 && ordenat; ++i){
if(vector[i] > vector[i+1]) ordenat = 0;
}
return ordenat;
}
int main(int argc, char **argv){
int n = NUM_VALUES;
if(argc > 1) n = atoi(argv[1]);
//Vectors i variables auxiliars
int *host_v, *dev_v;
//Creacio d'events
cudaEvent_t E0, E1, E2, E3;
cudaEventCreate(&E0);
cudaEventCreate(&E1);
cudaEventCreate(&E2);
cudaEventCreate(&E3);
unsigned int numBytes = NUM_VALUES * sizeof(int);
//Timing
cudaEventRecord(E0, 0);
cudaEventSynchronize(E0);
//Reservem memoria al host
cudaMallocHost( &host_v, numBytes);
//Inicialitzem vector amb valors random
int i;
srand(time(NULL));
for(i = 0; i < NUM_VALUES; ++i){
host_v[i] = rand();
}
//Reservem memoria al device
cudaMalloc((int**)&dev_v, numBytes);
//Enviem les dades del host al device
cudaMemcpy(dev_v, host_v, numBytes, cudaMemcpyHostToDevice);
//Timing
cudaEventRecord(E2, 0);
cudaEventSynchronize(E2);
//Executem el kernel
bitonicSort(NUM_VALUES ,dev_v);
//Timing
cudaEventRecord(E3, 0);
cudaEventSynchronize(E3);
//Recuperem les dades tractades del device
cudaMemcpy( host_v, dev_v, numBytes, cudaMemcpyDeviceToHost);
//Apliquem el test de correctesa
if(testOrdenacio(NUM_VALUES, host_v)) printf("TEST CORRECTO\n");
else printf("TEST FALLADO\n\n");
//Alliberem memoria reservada anteriorment
cudaFree(dev_v);
cudaFree(host_v);
//Timing
cudaDeviceSynchronize();
cudaEventRecord(E1, 0);
cudaEventSynchronize(E1);
//Calculem i mostrem temps d'execucio del programa
float tempsTotal, tempsKernel;
cudaEventElapsedTime(&tempsTotal, E0, E1);
cudaEventElapsedTime(&tempsKernel, E2, E3);
n = NUM_VALUES;
printf("Numero de threads: %d\n", NUM_THREADS);
printf("Numero de blocks: %d\n", NUM_BLOCKS);
printf("Numero de valores de entrada: %d\n", n);
printf("Tiempo total de programa: %f ms\n", tempsTotal);
printf("Tiempo de kernel: %f ms\n", tempsKernel);
//Destruim els events
cudaEventDestroy(E0);
cudaEventDestroy(E1);
cudaEventDestroy(E2);
cudaEventDestroy(E3);
}
|
14,565 | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <time.h>
#include <math.h>
#define point_size 30000
static int *d_VMap;
static int *d_RMap;
static float *d_point_x = new float[point_size];
static float *d_point_y = new float[point_size];
static int *d_point_i = new int[point_size];
static double *d_w = new double[point_size];
static float *d_transed_point_x = new float[point_size];
static float *d_transed_point_y = new float[point_size];
__global__ void MeasInRMap(float *d_point_x, float *d_point_y, int *d_point_i, int *d_Map, float Map_resolution,
unsigned int Map_width, unsigned int Map_height, double Map_origin_x, double Map_origin_y, double *d_w)
{
int tid = blockIdx.x;
int xIndex, yIndex;
int mapmeas;
double resolutionInverse = 1/Map_resolution;
if(tid < point_size){
xIndex = (int)((d_point_x[tid] - Map_origin_x)*resolutionInverse);
yIndex = (int)((d_point_y[tid] - Map_origin_y)*resolutionInverse);
if(xIndex < Map_width && yIndex < Map_height){
int mapIndex = Map_width*yIndex+xIndex;
mapmeas = d_Map[mapIndex];
if(mapmeas <0)
mapmeas +=256;
if(mapmeas > 100)
d_w[tid] = 1;
else
d_w[tid] = 0;
if(d_w[tid]>100)
printf("road weight : %f\n",d_w[tid]);
}
else{
d_w[tid]=0;
printf("Out of RMap size!!!!!!!!!\n");
}
}
else
printf("Out of Road Point size!!!!!!!\n");
}
__global__ void MeasInVMap(float *d_point_x, float *d_point_y, int *d_point_i, int *d_Map, float Map_resolution,
unsigned int Map_width, unsigned int Map_height, double Map_origin_x, double Map_origin_y, double *d_w)
{
int tid = blockIdx.x;
int xIndex, yIndex;
int mapmeas;
double resolutionInverse = 1/Map_resolution;
if(tid<point_size){
xIndex = (int)((d_point_x[tid] - Map_origin_x)*resolutionInverse);
yIndex = (int)((d_point_y[tid] - Map_origin_y)*resolutionInverse);
if(xIndex < Map_width && yIndex < Map_height){
int mapIndex = Map_width*yIndex+xIndex;
mapmeas = d_Map[mapIndex];
if(mapmeas < 0)
mapmeas +=256;
int b1=0, b2=0;
b1 = mapmeas&d_point_i[tid];
int shBits;
for (shBits=0; b1!=0;shBits++){
b1 = b1 & (b1 -1);
}
// b2 = shBits*shBits;
// if(mapmeas == d_point_i[tid] && b2!=1)
// b2=b2*2;
// d_w[tid] = b2;
d_w[tid] = shBits;
}
else{
printf("VMap%d\t%d\t%d\t%d\n",xIndex,Map_width,yIndex,Map_height);
d_w[tid] = 0;
printf("Out of VMap size!!!!!!!!!\n");
}
}
else
printf("Out of Vertical Point size!!!!!!!!!\n");
}
__global__ void Transformcuda(float *d_trans_point_x, float *d_trans_point_y, float *d_transed_point_x, float *d_transed_point_y, float Tx, float Ty, float theta)
{
int tid = blockIdx.x;
d_transed_point_x[tid] = d_trans_point_x[tid]*cos(theta) - d_trans_point_y[tid]*sin(theta) + Tx;
d_transed_point_y[tid] = d_trans_point_x[tid]*sin(theta) + d_trans_point_y[tid]*cos(theta) + Ty;
}
double *MeasInMapCUDA(int N, float *point_x, float *point_y , int *point_i, int *Map, float Map_resolution,
unsigned int Map_width, unsigned int Map_height, double Map_origin_x, double Map_origin_y, float Tx, float Ty, float theta, double *w, std::string type)
{
// Device copies of three inputs and output, size of allocated memory, num of threads and blocks
cudaMemcpy(d_point_x,point_x,N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_point_y,point_y,N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_point_i,point_i,N*sizeof(int), cudaMemcpyHostToDevice);
Transformcuda<<<N,1>>>(d_point_x, d_point_y, d_transed_point_x, d_transed_point_y, Tx, Ty, theta);
if(type == "vertical")
MeasInVMap<<<N,1>>>(d_transed_point_x, d_transed_point_y, d_point_i, d_VMap, Map_resolution, Map_width, Map_height, Map_origin_x, Map_origin_y, d_w);
else if(type == "road")
MeasInRMap<<<N,1>>>(d_transed_point_x, d_transed_point_y, d_point_i, d_RMap, Map_resolution, Map_width, Map_height, Map_origin_x, Map_origin_y, d_w);
cudaMemcpy(w, d_w, N*sizeof(double), cudaMemcpyDeviceToHost);
return w;
}
void CopyVMapCUDA(int *Map, unsigned int Map_width, unsigned int Map_height){
d_VMap = new int[Map_width*Map_height];
cudaMalloc((void **)&d_VMap, Map_width*Map_height*sizeof(int));
cudaMemcpy(d_VMap, Map, Map_width*Map_height*sizeof(int), cudaMemcpyHostToDevice);
}
void CopyRMapCUDA(int *Map, unsigned int Map_width, unsigned int Map_height) {
d_RMap = new int[Map_width*Map_height];
cudaMalloc((void **)&d_RMap, Map_width * Map_height * sizeof(int));
cudaMemcpy(d_RMap, Map, Map_width * Map_height * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_point_x, point_size*sizeof(float));
cudaMalloc((void **)&d_point_y, point_size*sizeof(float));
cudaMalloc((void **)&d_point_i, point_size*sizeof(int));
cudaMalloc((void **)&d_transed_point_x, point_size*sizeof(float));
cudaMalloc((void **)&d_transed_point_y, point_size*sizeof(float));
cudaMalloc((void **)&d_w, point_size*sizeof(double));
}
void CUDAFree(){
cudaFree(&d_VMap); cudaFree(&d_RMap); cudaFree(&d_point_x); cudaFree(&d_point_y); cudaFree(&d_point_i); cudaFree(&d_w);
cudaFree(&d_transed_point_x);cudaFree(&d_transed_point_y);
} |
14,566 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#include <cuda.h>
#define COMMENT "Histogram_GPU"
#define RGB_COMPONENT_COLOR 255
#define TILE_WIDTH 16
typedef struct {
unsigned char red, green, blue;
} PPMPixel;
typedef struct {
int x, y;
PPMPixel *data;
} PPMImage;
double rtclock()
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
//funcao que le o imagem ppm
static PPMImage *readPPM(const char *filename);
__global__ void device_histogram(PPMPixel *image ,float *h, int *l, int *c){
int row = blockIdx.y * TILE_WIDTH + threadIdx.y;
int col = blockIdx.x * TILE_WIDTH + threadIdx.x;
int n = *l * *c;
int j = threadIdx.x*blockDim.x + threadIdx.y;
int x, y, z;
__shared__ float h_private[64];
//inicializando o contador do bin na copia privada de h
if(j < 64){
h_private[j] = 0;
}
__syncthreads();
if(row < *l && col < *c){
for (x = 0; x <= 3; x++) {
for (y = 0; y <= 3; y++) {
for (z = 0; z <= 3; z++) {
if (image[*c*row + col].red == x && image[*c*row + col].green == y
&& image[*c*row + col].blue == z) {
atomicAdd(&(h_private[x*16+y*4+z]), 1.0f);
}
}
}
}
}
__syncthreads();
//adicionando em h com a normalizacao
if(j < 64){
atomicAdd(&(h[j]), h_private[j]/n);
}
}
void Histogram(PPMImage *image, float *h) {
int rows, cols, i;
int *d_r, *d_c;
float *d_h;
PPMPixel *d_image;
float n = image->y * image->x;
cols = image->x;
rows = image->y;
size_t bytes = sizeof(float)*64;
for (i = 0; i < n; i++) {
image->data[i].red = floor((image->data[i].red * 4) / 256);
image->data[i].blue = floor((image->data[i].blue * 4) / 256);
image->data[i].green = floor((image->data[i].green * 4) / 256);
}
cudaMalloc((void**)&d_r, sizeof(int));
cudaMalloc((void**)&d_c, sizeof(int));
cudaMalloc((void**)&d_h, bytes);
cudaMalloc((void**)&d_image, sizeof(PPMPixel)*cols*rows);
dim3 dimGrid(ceil((float) cols/TILE_WIDTH), ceil((float) rows/TILE_WIDTH ), 1);//numero de blocos de threads
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); //numero de threads por bloco
cudaMemcpy(d_h, h, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_image, image->data, sizeof(PPMPixel)*cols*rows, cudaMemcpyHostToDevice);
cudaMemcpy(d_r, &rows, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, &cols, sizeof(int), cudaMemcpyHostToDevice);
device_histogram<<<dimGrid , dimBlock>>>(d_image, d_h, d_r, d_c);
cudaMemcpy(h, d_h, bytes, cudaMemcpyDeviceToHost);
cudaFree(d_c);
cudaFree(d_r);
cudaFree(d_h);
cudaFree(d_image);
}
int main(int argc, char *argv[]) {
if( argc != 2 ) {
printf("Too many or no one arguments supplied.\n");
}
double t_start, t_end;
int i;
char *filename = argv[1]; //Recebendo o arquivo!;
PPMImage *image = readPPM(filename);
float *h = (float*)malloc(sizeof(float) * 64);
//Inicializar h
for(i=0; i < 64; i++) h[i] = 0.0;
t_start = rtclock();
Histogram(image, h);
t_end = rtclock();
for (i = 0; i < 64; i++){
printf("%0.3f ", h[i]);
}
printf("\n");
fprintf(stdout, "\n%0.6lfs\n", t_end - t_start);
free(h);
return 0;
}
static PPMImage *readPPM(const char *filename) {
char buff[16];
PPMImage *img;
FILE *fp;
int c, rgb_comp_color;
fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
if (!fgets(buff, sizeof(buff), fp)) {
perror(filename);
exit(1);
}
if (buff[0] != 'P' || buff[1] != '6') {
fprintf(stderr, "Invalid image format (must be 'P6')\n");
exit(1);
}
img = (PPMImage *) malloc(sizeof(PPMImage));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
c = getc(fp);
while (c == '#') {
while (getc(fp) != '\n')
;
c = getc(fp);
}
ungetc(c, fp);
if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) {
fprintf(stderr, "Invalid image size (error loading '%s')\n", filename);
exit(1);
}
if (fscanf(fp, "%d", &rgb_comp_color) != 1) {
fprintf(stderr, "Invalid rgb component (error loading '%s')\n",
filename);
exit(1);
}
if (rgb_comp_color != RGB_COMPONENT_COLOR) {
fprintf(stderr, "'%s' does not have 8-bits components\n", filename);
exit(1);
}
while (fgetc(fp) != '\n');
img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
if (fread(img->data, 3 * img->x, img->y, fp) != img->y) {
fprintf(stderr, "Error loading image '%s'\n", filename);
exit(1);
}
fclose(fp);
return img;
}
/*
entrada, tempo_serial, tempo_GPU_criar_buffer, tempo_GPU_offload_enviar, tempo_kernel, tempo_GPU_offload_receber, GPU_total, speedup
arq1.ppm, 0.218407, 0.112063, 0.008347, 0.000029, 0.003583, 0.121043, 1.804375305
arq2.ppm, 0.410912, 0.146193, 0.018426, 0.000035, 0.013859, 0.139981, 2.935484101
arq3.ppm, 1.532259, 0.110749, 0.072426, 0.000042, 0.055907, 0.239100, 6.408444166
*/ |
14,567 | #include "aabb_tree.cuh"
#include <stdio.h>
AABBTree::AABBTree() {}
void AABBTree::receive_world(Block* blocks, int amount) {
construct_tree(blocks, amount);
}
void AABBTree::construct_tree(Block* blocks, int amount) {
this->root = new AABBTreeNode(&blocks[0]);
for (int i = 1; i < amount; i++) {
int depth = 0;
AABBTreeNode* current_node = root;
while (!current_node->is_leaf()) {
depth += 1;
current_node->bounding_box = current_node->bounding_box.get_union(&blocks[i].aabb);
float surface1_growth = current_node->get_c1()->bounding_box.get_union(&blocks[i].aabb).surface() - current_node->get_c1()->bounding_box.surface();
float surface2_growth = current_node->get_c2()->bounding_box.get_union(&blocks[i].aabb).surface() - current_node->get_c2()->bounding_box.surface();
if (surface1_growth < surface2_growth) {
// insert into c1
current_node = current_node->get_c1();
} else {
current_node = current_node->get_c2();
}
}
if (i % 10 == 0) {
printf("adding block: %i at depth: %i\n", i, depth);
}
current_node->insert_block(&blocks[i]);
}
} |
14,568 | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
// Kernel launch benchmark which will launch one empty kernel and record the cost in event mode and wall mode.
// event mode: using cuda event to record the elapsed time of kernel launch on device.
// wall mode: using host timer to record the elapsed time kernel launch on both host and device.
#include <algorithm>
#include <chrono>
#include <stdio.h>
#include <string>
#include <sys/time.h>
#include <thread>
#include "cuda_runtime.h"
__global__ void EmptyKernel() {}
double test_cuda_kernel_launch_event_time(int num_warmups, int num_steps) {
float time = 0.f;
double total_time = 0.0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
for (int i = 0; i < num_warmups; i++) {
cudaEventRecord(start, 0);
EmptyKernel<<<1, 1>>>();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
}
for (int i = 0; i < num_steps; i++) {
cudaEventRecord(start, 0);
EmptyKernel<<<1, 1>>>();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
total_time += time;
}
cudaEventDestroy(start);
cudaEventDestroy(stop);
return total_time;
}
double test_cuda_kernel_launch_wall_time(int num_warmups, int num_steps) {
double total_time = 0.0;
for (int i = 0; i < num_warmups; i++) {
EmptyKernel<<<1, 1>>>();
cudaDeviceSynchronize();
}
struct timeval begin_tv, end_tv;
for (int i = 0; i < num_steps; i++) {
gettimeofday(&begin_tv, NULL);
EmptyKernel<<<1, 1>>>();
cudaDeviceSynchronize();
gettimeofday(&end_tv, NULL);
total_time += (((end_tv.tv_sec) * 1000 + (end_tv.tv_usec) / 1000) -
((begin_tv.tv_sec) * 1000 + (begin_tv.tv_usec) / 1000));
}
return total_time;
}
char *getCmdOption(char **begin, char **end, const std::string &option) {
char **itr = std::find(begin, end, option);
if (itr != end && ++itr != end) {
return *itr;
}
return 0;
}
int main(int argc, char *argv[]) {
int num_warmups = 100;
int num_steps = 2000000;
int interval = 2000;
if (char *value = getCmdOption(argv, argv + argc, "-w")) {
num_warmups = std::stoi(value);
}
if (char *value = getCmdOption(argv, argv + argc, "-n")) {
num_steps = std::stoi(value);
}
if (char *value = getCmdOption(argv, argv + argc, "-i")) {
interval = std::stoi(value);
}
// Test the kernel launch event time.
double event_total_time = test_cuda_kernel_launch_event_time(num_warmups, num_steps);
printf("Kernel launch overhead - event time: %3.5f ms \n", event_total_time / num_steps);
// Sleep for interval milliseconds and run the next test.
std::this_thread::sleep_for(std::chrono::milliseconds(interval));
// Test the kernel launch wall time.
double wall_total_time = test_cuda_kernel_launch_wall_time(num_warmups, num_steps);
printf("Kernel launch overhead - wall time: %3.5f ms \n", wall_total_time / num_steps);
return 0;
}
|
14,569 | __global__ void count_sort(int *a, int *s_a, int n) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int total_threads = gridDim.x * blockDim.x;
int i, j, count;
for (i = index; i < n; i+=total_threads) {
count = 0;
for (j = 0; j < n; ++j)
if (a[j] < a[i])
++count;
else if (a[j] == a[i] && j < i)
++count;
s_a[count] = a[i];
}
} |
14,570 | // Created by Alex Getz
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#include <cooperative_groups.h>
#include <assert.h>
#include <iostream>
namespace cg = cooperative_groups;
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if(result!=cudaSuccess){
fprintf(stderr,"CUDA Runtime Error: %s\n",
cudaGetErrorString(result));
assert(result==cudaSuccess);
}
#endif
return result;
}
#define CUDA_CALL(x) do { if((x) != cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
///// Host Functions
///// Device Functions
__device__ int ReduceSum(cg::thread_group tg, int *temp, int value){
int lane = tg.thread_rank();
for(int i=tg.size()/2;i>0;i/=2){
temp[lane]=value;
tg.sync();
if(lane<i){ value += temp[lane+i]; }
tg.sync();
}
return value;
}
__device__ int ThreadSum(int *input, int n){
int sum=0;
for(int i=threadIdx.x+(blockIdx.x*blockDim.x);i<n/4;i+=blockDim.x*gridDim.x)
{
int4 in = ((int4*)input)[i];
sum += in.x + in.y + in.z + in.w;
}
return sum;
}
///// Device Kernels
__global__ void SumKernel(int *input, int *output, int n){
// unsigned int idx = threadIdx.x + (blockIdx.x*blockDim.x);
int my_sum = ThreadSum(input, n);
extern __shared__ int temp[];
auto g = cg::this_thread_block();
int block_sum = ReduceSum(g, temp, my_sum);
if (g.thread_rank() == 0) atomicAdd(output, block_sum);
// output[idx]=input[idx];
}
/////
int main(int argc, char const *argv[]) {
// Should expand to do some command line parsing of arrays in future
int n = 1<<24;
int blockSize = 256;
int nBlocks = (n + blockSize - 1) / blockSize;
int sharedBytes = blockSize * sizeof(int);
int *sum, *data;
cudaMallocManaged(&sum, sizeof(int));
cudaMallocManaged(&data, n * sizeof(int));
std::fill_n(data, n, 1); // initialize data
cudaMemset(sum, 0, sizeof(int));
SumKernel<<<nBlocks, blockSize, sharedBytes>>>(data, sum, n);
// const unsigned int arrSize = 300;
// const unsigned int bytes = 300*sizeof(int);
// int *h_input, *d_input, *h_output, *d_output;
//
// checkCuda( cudaMallocHost((int**)&h_input,bytes));
// cudaMalloc((void**)&d_input,bytes);
// checkCuda( cudaMallocHost((void**)&h_output,bytes));
// checkCuda( cudaMalloc((void**)&d_output,bytes));
// printf("Before\n");
// for(int i=0;i<300;++i){
// h_input[i]=i;
// std::cout << h_input[i] << '\n';
// }
//
// //////////
// checkCuda( cudaMemcpy(d_input,h_input,bytes,cudaMemcpyHostToDevice));
// //////////
// SumKernel<<<3,128>>>(d_input,d_output,arrSize);
// //////////
// checkCuda( cudaMemcpy(h_output,d_output,bytes,cudaMemcpyDeviceToHost));
// //////////
//
// printf("After\n");
// for(int a=0;a<300;++a){
// std::cout<<h_output[a]<<std::endl;
// }
//
// cudaFree(d_input);
// cudaFree(d_output);
// cudaFreeHost(h_input);
// cudaFreeHost(h_output);
return 0;
}
|
14,571 | #include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <chrono>
#include <cstdlib>
#include <iostream>
__global__ void addVectorsKernel(const double* a,
const double* b,
double* c,
int n) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
while (x < n) {
c[x] = a[x] + b[x];
printf("x = %d | threadIdx.x = %d | blockIdx.x = %d\n", x, threadIdx.x,
blockIdx.x);
}
}
int main(int argc, char** argv) {
if (argc == 2) {
int n = atoi(argv[1]);
size_t size = n * sizeof(double);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
double* h_A = (double*)malloc(size);
double* h_B = (double*)malloc(size);
double* h_C = (double*)malloc(size);
for (int i = 0; i < n; i++) {
h_A[i] = rand() % 10;
h_B[i] = rand() % 10;
}
double* d_A = NULL;
double* d_B = NULL;
double* d_C = NULL;
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_B, size);
cudaMalloc((void**)&d_C, size);
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
double Blocks = 1024; // threads per block
double Grids = (n - 1) / Blocks + 1; // blocks per grid
cudaEventRecord(start);
addVectorsKernel<<<Grids, Blocks>>>(d_A, d_B, d_C, n);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float msecs = 0;
cudaEventElapsedTime(&msecs, start, stop);
std::cout << "GPU Elapsed Time: " << msecs << " ms.\n";
for (int i = 0; i < n; i++) {
if (h_C[i] != h_A[i] + h_B[i]) {
std::cerr << "TEST FAILED...\n";
return 1;
}
}
std::cout << "TEST PASSED!\n";
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
}
return 0;
} |
14,572 | #include "includes.h"
__global__ void count(int *A, int *B, int n) {
int b_id = blockIdx.x,
b_num = gridDim.x,
b_size,
b_offset,
t_id = threadIdx.x,
t_num = blockDim.x,
t_size,
t_offset,
offset;
// initialize a shared memory array to store the count for each block.
__shared__ int count[MAX_VALUE];
// set intial values to zeros. Each thread sets its own share to zero.
t_size = (t_num > MAX_VALUE ? 1 : MAX_VALUE / t_num);
offset = t_id * t_size;
for (int i = offset; i < offset + t_size && i < MAX_VALUE; ++i)
count[i] = 0;
// wait until all threads have completed the initialization process.
__syncthreads();
// accumulate the counts of each value. Each thread counts a certain portain
// of the unsorted array.
b_size = (b_num > n ? 1 : n / b_num);
b_offset = b_id * b_size;
t_size = (t_num > b_size ? 1 : b_size / t_num);
offset = b_offset + t_id * t_size;
for (int i = offset; i < offset + t_size && i < b_offset + b_size && i < n; ++i)
atomicAdd(&count[A[i]], 1);
// wait until all threads have completed the couting phase.
__syncthreads();
// copy the block count into global memory. Each thread copies its portioin to
// the global memory.
t_size = (t_num > MAX_VALUE ? 1 : MAX_VALUE / t_num);
t_offset = t_id * t_size;
offset = b_id * MAX_VALUE + t_offset;
if (offset + t_size <= (b_id + 1) * MAX_VALUE)
memcpy(&B[offset], &count[t_offset], sizeof(int) * t_size);
} |
14,573 | #define z11 0
#define z12 6
#define z13 12
#define z21 13
#define z22 19
#define z23 25
#include <iostream>
void update(double R[6][6], double Rt[6][6], double zeta1[], double zeta2[], int n, int bodynum, int zetanum);
void MSsetup(double Minv[6][6],double S[6][6],double m, double I[3][3]);
void makeSt(double S[6][6], double r[]);
void savez3(double Minv[6][6],double S[6][6] ,double Fa[], double nZetas[], int n, int bodynum, int zetanum);
void makeR(double R[6][6], double Rt[6][6], double angle);
void printm(double A[6][6]);
void printa(double A[], int len);
void Update_Properties(double bodyZetas[],double nZetas[], int n, double state[], double m[], double l[], double II[])
{
double R[6][6];
double rv[3];
double Fa[6];
double Rt[6][6];
double Minv[6][6];
double S[6][6];
double Inertia[3][3];
double g = 9.81; //Set the gravitational constant
for(int r =0; r<6; r++)
{
Fa[r]=0;
}
long index1;
long rowlen;
double angle;
double w;
angle=0;
for(int i =0; i<n; i++)
{
angle=0;
w=0;
for(int c =0; c<=i; c++)
{
angle += state[c];
w += state[c+n];
}
index1=i*26;
rowlen=n*26;
for(int r =0; r<3; r++)
{
for(int c =0; c<3; c++)
{
Inertia[r][c]=II[c+r*n*3+i*3];
}
}
//std::cout<<(double)angle<<std::endl;
makeR(R, Rt, angle);
update(R,Rt,bodyZetas,nZetas,n,i,z11);
update(R,Rt,bodyZetas,nZetas,n,i,z12);
update(R,Rt,bodyZetas,nZetas,n,i,z21);
update(R,Rt,bodyZetas,nZetas,n,i,z22);
/*
for(int r =0; r<6; r++)
{
for(int c =0; c<6; c++)
{
std::cout<<nZetas[c+r*26*n+i*26+z22]<<" ";
}
std::cout<<std::endl;
}
*/
for(int r=0; r<6; r++)
//Loop through the first column of S and set it to 0
for(int r =0; r<6 ; r++)
{
nZetas[r*rowlen+index1+z13]=0;
nZetas[r*rowlen+index1+z23]=0;
}
//rv is now r01
rv[0]=-1*l[i]*sin(angle)/2;
rv[1]=l[i]*cos(angle)/2;
rv[2]=0;
Fa[3]=-1*m[i]*w*w*rv[0];
Fa[4]=-1*g*m[i]-m[i]*w*w*rv[1];
MSsetup(Minv,S,m[i],Inertia);
makeSt(S,rv);
savez3(Minv,S,Fa,nZetas,n,i,z13);
rv[0]=l[i]*sin(angle)/2;
rv[1]=-1*l[i]*cos(angle)/2;
rv[2]=0;
Fa[3]=-1*m[i]*w*w*rv[0];
Fa[4]=-1*g*m[i]-m[i]*w*w*rv[1];
makeSt(S,rv);
savez3(Minv,S,Fa,nZetas,n,i,z23);
}
}
|
14,574 | #include <cuda.h>
#include <iostream>
using namespace std;
// macros
#define printCudaProperty(PROPERTY) { \
cout << "\t" #PROPERTY "=" << dev_prop.PROPERTY << endl; \
}
// print cuda device properties
void printCudaDeviceProperties()
{
int dev_count;
cudaGetDeviceCount(&dev_count);
cout << "Number of devices=" << dev_count << endl << endl;
cudaDeviceProp dev_prop;
for (int i = 0; i < dev_count; i++)
{
cout << "Device " << i << ":" << endl;
cudaGetDeviceProperties(&dev_prop, i);
printCudaProperty(name);
printCudaProperty(totalGlobalMem);
printCudaProperty(sharedMemPerBlock);
printCudaProperty(regsPerBlock);
printCudaProperty(warpSize);
printCudaProperty(memPitch);
printCudaProperty(maxThreadsPerBlock);
printCudaProperty(maxThreadsDim[0]);
printCudaProperty(maxThreadsDim[1]);
printCudaProperty(maxThreadsDim[2]);
printCudaProperty(maxGridSize[0]);
printCudaProperty(maxGridSize[1]);
printCudaProperty(maxGridSize[2]);
printCudaProperty(clockRate);
printCudaProperty(totalConstMem);
printCudaProperty(major);
printCudaProperty(minor);
printCudaProperty(deviceOverlap);
printCudaProperty(multiProcessorCount);
printCudaProperty(kernelExecTimeoutEnabled);
printCudaProperty(integrated);
printCudaProperty(canMapHostMemory);
printCudaProperty(computeMode);
printCudaProperty(concurrentKernels);
printCudaProperty(ECCEnabled);
printCudaProperty(pciBusID);
printCudaProperty(pciDeviceID);
printCudaProperty(pciDomainID);
printCudaProperty(tccDriver);
printCudaProperty(asyncEngineCount);
printCudaProperty(unifiedAddressing);
printCudaProperty(memoryClockRate);
printCudaProperty(memoryBusWidth);
printCudaProperty(l2CacheSize);
printCudaProperty(maxThreadsPerMultiProcessor);
}
}
// This program prints out the CUDA device properties.
int main(int argc, char* argv[])
{
printCudaDeviceProperties();
return 0;
} |
14,575 | #include<stdlib.h>
#include<stdio.h>
__global__ void add(int* a , int *b,int *c)
{
*c = *a + *b;
}
int main(void)
{
int a , b ,c;
int *d_a , *d_b ,*d_c;
int size = sizeof(int);
cudaMalloc((void**)&d_a,size);
cudaMalloc((void**)&d_b,size);
cudaMalloc((void**)&d_c,size);
a = 3;
b = 5;
cudaMemcpy(d_a,&a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,&b,size,cudaMemcpyHostToDevice);
add<<<1,1>>>(d_a,d_b,d_c);
cudaMemcpy(&c,d_c,size,cudaMemcpyDeviceToHost);
printf("Result :%d \n",c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
14,576 | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define N 512
#define BLOCK_SIZE 16
// GPU 的 Kernel
__global__ void MatAdd(float *A, float *B, float *C)
{
// 根據 CUDA 模型,算出當下 thread 對應的 x 與 y
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
// 換算成線性的 index
int idx = j * N + i;
if (i < N && j < N)
{
C[idx] = A[idx] + B[idx];
}
}
int main()
{
float *h_A, *h_B, *h_C;
float *d_A, *d_B, *d_C;
int i;
// 宣告 Host 記憶體 (線性)
h_A = (float *)malloc(N * N * sizeof(float));
h_B = (float *)malloc(N * N * sizeof(float));
h_C = (float *)malloc(N * N * sizeof(float));
// 初始化 Host 的數值
for (i = 0; i < (N * N); i++)
{
h_A[i] = 1.0;
h_B[i] = 2.0;
h_C[i] = 0.0;
}
// 宣告 Device (GPU) 記憶體
cudaMalloc((void **)&d_A, N * N * sizeof(float));
cudaMalloc((void **)&d_B, N * N * sizeof(float));
cudaMalloc((void **)&d_C, N * N * sizeof(float));
// 將資料傳給 Device
cudaMemcpy(d_A, h_A, N * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, N * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, h_C, N * N * sizeof(float), cudaMemcpyHostToDevice);
dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE);
dim3 numBlock(N / BLOCK_SIZE, N / BLOCK_SIZE);
// 執行 MatAdd kernel
MatAdd<<<numBlock, blockSize>>>(d_A, d_B, d_C);
// 等待 GPU 所有 thread 完成
cudaDeviceSynchronize();
// 將 Device 的資料傳回給 Host
cudaMemcpy(h_C, d_C, N * N * sizeof(float), cudaMemcpyDeviceToHost);
// 驗證正確性
for (i = 0; i < (N * N); i++)
{
if (h_C[i] != 3.0)
{
printf("Error:%f, idx:%d\n", h_C[i], i);
break;
}
}
printf("PASS\n");
// free memory
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
14,577 | #include "includes.h"
__global__ void stencil_1d(float *in, float *out)
{
// shared memory
__shared__ float smem[BDIM + 2 * RADIUS];
// index to global memory
int idx = threadIdx.x + blockIdx.x * blockDim.x;
// index to shared memory for stencil calculatioin
int sidx = threadIdx.x + RADIUS;
// Read data from global memory into shared memory
smem[sidx] = in[idx];
// read halo part to shared memory
if (threadIdx.x < RADIUS)
{
smem[sidx - RADIUS] = in[idx - RADIUS];
smem[sidx + BDIM] = in[idx + BDIM];
}
// Synchronize (ensure all the data is available)
__syncthreads();
// Apply the stencil
float tmp = 0.0f;
#pragma unroll
for (int i = 1; i <= RADIUS; i++)
{
tmp += coef[i] * (smem[sidx + i] - smem[sidx - i]);
}
// Store the result
out[idx] = tmp;
} |
14,578 | #include "includes.h"
__global__ void tridiag_x_matrix_k(float* p_d, float* p_m, float* p_u, float* u, int n)
{
// Identifies the thread working within a group
int tidx = threadIdx.x % n;
// Identifies the data concerned by the computations
int Qt = (threadIdx.x - tidx) / n;
extern __shared__ float sAds[];
float* su, * sp_d, * sp_m, * sp_u;
su = (float*)&sAds[4 * Qt * n];
sp_d = (float*)&su[n];
sp_m = (float*)&sp_d[n];
sp_u = (float*)&sp_m[n];
su[threadIdx.x] = u[blockIdx.x * blockDim.x + threadIdx.x];
sp_d[threadIdx.x] = p_d[tidx];
sp_m[threadIdx.x] = p_m[tidx];;
sp_u[threadIdx.x] = p_u[tidx];;
__syncthreads();
float temp;
if (tidx > 0 && tidx < n - 1)
temp = sp_d[tidx] * su[tidx - 1] + sp_m[tidx] * su[tidx] + sp_u[tidx] * su[tidx + 1];
else if (tidx == 0)
temp = sp_m[tidx] * su[tidx] + sp_u[tidx] * su[tidx + 1];
else
temp = sp_d[tidx] * su[tidx - 1] + sp_m[tidx] * su[tidx];
u[blockIdx.x * blockDim.x + threadIdx.x] = temp;
} |
14,579 | // All done in the .cpp file |
14,580 | // ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Winter Semester 2015/2016, March 15 - April 15
// ###
// ###
#include <cuda_runtime.h>
#include <iostream>
using namespace std;
// cuda error checking
#define CUDA_CHECK cuda_check(__FILE__, __LINE__)
void cuda_check(string file, int line) {
cudaError_t e = cudaGetLastError();
if (e != cudaSuccess) {
cout << endl
<< file << ", line " << line << ": " << cudaGetErrorString(e) << " ("
<< e << ")" << endl;
exit(1);
}
}
__device__ void square(float *a, int n) {
int ind = threadIdx.x + blockDim.x * blockIdx.x;
if (ind < n)
a[ind] = a[ind] * a[ind];
}
__global__ void vecSq(float *a, int n) { square(a, n); }
int main(int argc, char **argv) {
// alloc and init input arrays on host (CPU)
int n = 10;
float *a = new float[n];
for (int i = 0; i < n; i++)
a[i] = i;
// CPU computation
for (int i = 0; i < n; i++) {
float val = a[i];
val = val * val;
a[i] = val;
}
// print result
cout << "CPU:" << endl;
for (int i = 0; i < n; i++)
cout << i << ": " << a[i] << endl;
cout << endl;
// GPU computation
// reinit data
for (int i = 0; i < n; i++)
a[i] = i;
// ###
// ### TODO: Implement the "square array" operation on the GPU and store the
// result in "a"
// ###
// ### Notes:
// ### 1. Remember to free all GPU arrays after the computation
// ### 2. Always use the macro CUDA_CHECK after each CUDA call, e.g.
// "cudaMalloc(...); CUDA_CHECK;"
// ### For convenience this macro is defined directly in this file, later
// we will only include "helper.h"
// Memory allocation on Device
float *d_a;
cudaMalloc(&d_a, n * sizeof(float));
cudaMemcpy(d_a, a, n * sizeof(float), cudaMemcpyHostToDevice);
// Device Blocka allocation
dim3 block = dim3(64, 1, 1); // 64 threads
// allocate blocks in grid
dim3 grid = dim3((n + block.x - 1) / block.x, 1, 1);
vecSq<<<grid, block>>>(d_a, n);
cudaMemcpy(a, d_a, n * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_a);
// print result
cout << "GPU:" << endl;
for (int i = 0; i < n; i++)
cout << i << ": " << a[i] << endl;
cout << endl;
// free CPU arrays
delete[] a;
}
|
14,581 | extern "C"
__global__ void add_kernel(double *vals, double *ans, int N, double mu)
{
// Taken from geco.mines.edu/workshop/aug2010/slides/fri/cuda1.pd
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
/* how big is each block within a grid */
int blocksize = blockDim.x * blockDim.y * blockDim.z;
/* get thread within a block */
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
if(idx < N)
ans[idx] = vals[idx] + mu;
}
extern "C"
__global__ void fadd_kernel(float *vals, int N, float mu)
{
// Taken from geco.mines.edu/workshop/aug2010/slides/fri/cuda1.pd
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
/* how big is each block within a grid */
int blocksize = blockDim.x * blockDim.y * blockDim.z;
/* get thread within a block */
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
if(idx < N)
vals[idx] = mu;
}
extern "C"
__global__ void dset_kernel(double *vals, int N, double mu)
{
// Taken from geco.mines.edu/workshop/aug2010/slides/fri/cuda1.pd
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
/* how big is each block within a grid */
int blocksize = blockDim.x * blockDim.y * blockDim.z;
/* get thread within a block */
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
if(idx < N)
vals[idx] = mu;
}
extern "C"
__global__ void dset_both_kernel(double *vals, int N, double mu, float sd)
{
// Taken from geco.mines.edu/workshop/aug2010/slides/fri/cuda1.pd
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
/* how big is each block within a grid */
int blocksize = blockDim.x * blockDim.y * blockDim.z;
/* get thread within a block */
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
if(idx < N)
vals[idx] = mu + sd;
}
extern "C"
__global__ void dset_array_kernel(double *vals, int N, double *mu)
{
// Taken from geco.mines.edu/workshop/aug2010/slides/fri/cuda1.pd
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
/* how big is each block within a grid */
int blocksize = blockDim.x * blockDim.y * blockDim.z;
/* get thread within a block */
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
if(idx < N)
vals[idx] = mu[0];
}
|
14,582 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#define imin(a,b) (a<b?a:b)
const int N = 33 * 1024;
const int threadsPerBlock = 256;
const int blocksPerGrid = imin(32, (N+threadsPerBlock-1)/threadsPerBlock);
__global__ void dotProduct(float* a, float* b, float* c) {
__shared__ float cache[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float temp = 0;
while(tid < N) {
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
//set the cache values.
cache[cacheIndex] = temp;
//synchronize the threads in this block.
__syncthreads();
//for reductions, threadsPerBlock must be power of 2 because of the following code.
int i = blockDim.x/2;
while(i != 0) {
if(cacheIndex < i) {
cache[cacheIndex] += cache[cacheIndex + i];
}
__syncthreads();
i /= 2;
}
if(cacheIndex == 0) {
c[blockIdx.x] = cache[0];
}
}
int main()
{
float *a, *b, *partial_c;
float *dev_a, *dev_b, *partial_dev_c;
//allocate memory on the CPU side.
a = (float*)malloc(N * sizeof(float));
b = (float*)malloc(N * sizeof(float));
partial_c = (float*)malloc(blocksPerGrid * sizeof(float));
//allocate the memory on the GPU.
cudaMalloc((void**)&dev_a, N*sizeof(float));
cudaMalloc((void**)&dev_b, N*sizeof(float));
cudaMalloc((void**)&partial_dev_c, blocksPerGrid * sizeof(float));
//fill in the host memory with data.
for(int i = 0; i < N; i++) {
a[i] = i;
b[i] = 2 * i;
}
//copy the arrays 'a' and 'b' to the GPU.
cudaMemcpy(dev_a, a, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * sizeof(float), cudaMemcpyHostToDevice);
dotProduct<<<blocksPerGrid, threadsPerBlock>>>(dev_a, dev_b, partial_dev_c);
//copy the array c back from the GPU to the CPU.
cudaMemcpy(partial_c, partial_dev_c, blocksPerGrid * sizeof(float), cudaMemcpyDeviceToHost);
//finish up on the CPU side.
float c = 0;
for(int i = 0; i < blocksPerGrid; i++) {
c += partial_c[i];
}
#define sum_squares(x) (x*(x+1)*(2*x+1)/6)
std::cout<<"Does GPU value "<<c<<" = "<<2*sum_squares((float)(N - 1))<<std::endl;
//free memory on the GPU side.
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(partial_dev_c);
//free memory on the CPU side.
free(a);
free(b);
free(partial_c);
return 0;
}
|
14,583 | #include <fstream>
#include <iostream>
#include <string>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <cuda.h>
#define TILE_WIDTH 16
// Charge une matrice disponible dans les repertoires exemples
// Calcul C = A * B
__global__ void matrixMultiplyShared(float * A, float * B, float * C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
__shared__ float sharedA[TILE_WIDTH][TILE_WIDTH];
__shared__ float sharedB[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by*TILE_WIDTH + ty;
int Col = bx*TILE_WIDTH + tx;
float Cvalue = 0.0;
if (numAColumns != numBRows) return ;
for (int i = 0; i < (int)(ceil((float)numAColumns/TILE_WIDTH)); i++)
{
if (i*TILE_WIDTH + tx < numAColumns && Row < numARows){
sharedA[ty][tx] = A[Row*numAColumns + i*TILE_WIDTH + tx];
}else{
sharedA[ty][tx] = 0.0;
}
if (i*TILE_WIDTH + ty < numBRows && Col < numBColumns){
sharedB[ty][tx] = B[(i*TILE_WIDTH + ty)*numBColumns + Col];
}else{
sharedB[ty][tx] = 0.0;
}
__syncthreads();
if(Row < numARows && Col < numBColumns){
for(int j = 0; j < TILE_WIDTH; j++)
Cvalue += sharedA[ty][j] * sharedB[j][tx];
}
__syncthreads();
}
if (Row < numCRows && Col < numCColumns)
C[Row*numCColumns + Col] = Cvalue;
}
bool load_matrix(char * filename, float * &matrix, int &nx, int &ny){
std::string line;
std::ifstream infile(filename);
if (!infile.is_open()) {
std::cout << "Fichier introuvable: "<< filename << std::endl;
return 0;
}
// Charge la taile de la matrice
infile >> nx >> ny;
// Alloue le tableau correspondant
matrix = new float[nx*ny];
// Charge la matrice
for (int i=0; i< nx*ny; i++){
infile >> matrix[i];
}
infile.close();
return 1;
}
int main(int argc, char ** argv) {
float * hostA;
float * hostB;
float * hostC;
float * hostExpectedOutput;
float * deviceA;
float * deviceB;
float * deviceC;
int numARows;
int numAColumns;
int numBRows;
int numBColumns;
int numCRows;
int numCColumns;
/// Charger le fichier d'entree
load_matrix(argv[1], hostA, numARows, numAColumns);
load_matrix(argv[2], hostB, numBRows, numBColumns);
/// Initialiser numCRows et numCColumns
numCRows = numARows;
numCColumns = numBColumns;
/// Allouer hostC
hostC = (float*) malloc(sizeof(float)*numCRows*numCColumns);
/// Allouer la memoire sur GPU
cudaMalloc((void**)&deviceA , sizeof(float)*numARows*numAColumns );
cudaMalloc((void**)&deviceB , sizeof(float)*numBRows*numBColumns);
cudaMalloc((void**)&deviceC , sizeof(float)*numCRows*numCColumns);
/// Copier la memoire sur le GPU
cudaMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, cudaMemcpyHostToDevice);
cudaMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, cudaMemcpyHostToDevice);
/// Initialise la grille et les dimensions de chaque bloc
int dimX = (int)(ceil((float)numCColumns / TILE_WIDTH));
int dimY = (int)(ceil((float)numCRows / TILE_WIDTH));
dim3 DimGrid(dimX, dimY);
dim3 DimBlock(TILE_WIDTH, TILE_WIDTH);
/// Execute le kernel
matrixMultiplyShared<<<DimGrid , DimBlock>>>(deviceA , deviceB , deviceC , numARows , numAColumns, numBRows ,numBColumns , numCRows , numCColumns);
cudaThreadSynchronize();
/// Charge le resultat en memoire CPU
cudaMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns , cudaMemcpyDeviceToHost);
/// Libere la memoire
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
int numORows, numOColumns;
load_matrix(argv[3], hostExpectedOutput, numORows, numOColumns);
float error = 0;
for (int i = 0; i < numCColumns * numCRows; i++) {
error += (hostExpectedOutput[i] - hostC[i]) * (hostExpectedOutput[i] - hostC[i]);
}
error /= (float)(numCColumns * numCRows);
std::cout << "SQM: " << error << std::endl;
/// Libere la memoire
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
delete hostExpectedOutput;
delete hostA;
delete hostB;
delete hostC;
free(hostA);
free(hostB);
free(hostC);
return 0;
}
|
14,584 | #include <stdio.h>
#define BLOCK_SIZE 64
__global__ void Mul(int *a, int *b, int row_a, int col_a, int row_b, int col_b, int *c) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < row_a * col_b) {
int value = 0;
int row = tid / col_b, col = tid % col_b;
for (int k = 0; k < col_a; k++)
value += a[row * col_a + k] * b[k * col_b + col];
c[tid] = value;
//c[tid] = 1;
}
}
int main() {
int data_a[3][3] = {0}, data_b[3][3] = {0}, data_c[3][3];
int *dev_a, *dev_b, *dev_c;
data_a[0][0] = 1, data_a[1][1] = 1, data_a[2][2] = 1;
data_b[0][0] = 2, data_b[1][1] = 1, data_b[2][2] = 1;
cudaMalloc((void **)&dev_a, 3 * 3 * sizeof(int));
cudaMemcpy(dev_a, data_a, 3 * 3 * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&dev_b, 3 * 3 * sizeof(int));
cudaMemcpy(dev_b, data_b, 3 * 3 * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&dev_c, 3 * 3 * sizeof(int));
Mul<<<10, 10>>>(dev_a, dev_b, 3, 3, 3, 3, dev_c);
cudaMemcpy(data_c, dev_c, 3 * 3 * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
printf("%d ", data_c[i][j]);
}
printf("\n");
}
}
|
14,585 | // input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3)
// output: idx (b,m,nsample), pts_cnt (b,m)
__global__ void query_ball_point_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) {
int batch_index = blockIdx.x;
xyz1 += n*3*batch_index;
xyz2 += m*3*batch_index;
idx += m*nsample*batch_index;
pts_cnt += m*batch_index; // counting how many unique points selected in local region
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
int cnt = 0;
for (int k=0;k<n;++k) {
if (cnt == nsample)
break; // only pick the FIRST nsample points in the ball
float x2=xyz2[j*3+0];
float y2=xyz2[j*3+1];
float z2=xyz2[j*3+2];
float x1=xyz1[k*3+0];
float y1=xyz1[k*3+1];
float z1=xyz1[k*3+2];
float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f);
if (d<radius) {
if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices
for (int l=0;l<nsample;++l)
idx[j*nsample+l] = k;
}
idx[j*nsample+cnt] = k;
cnt+=1;
}
}
pts_cnt[j] = cnt;
}
}
// input: points (b,n,c), idx (b,m,nsample)
// output: out (b,m,nsample,c)
__global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) {
int batch_index = blockIdx.x;
points += n*c*batch_index;
idx += m*nsample*batch_index;
out += m*nsample*c*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
out[j*nsample*c+k*c+l] = points[ii*c+l];
}
}
}
}
// input: grad_out (b,m,nsample,c), idx (b,m,nsample),
// output: grad_points (b,n,c)
__global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) {
int batch_index = blockIdx.x;
idx += m*nsample*batch_index;
grad_out += m*nsample*c*batch_index;
grad_points += n*c*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
atomicAdd(&grad_points[ii*c+l], grad_out[j*nsample*c+k*c+l]);
}
}
}
}
// input: k (1), distance matrix dist (b,m,n)
// output: idx (b,m,n), dist_out (b,m,n)
// only the top k results within n are useful
__global__ void selection_sort_gpu(int b, int n, int m, int k, const float *dist, int *outi, float *out) {
int batch_index = blockIdx.x;
dist+=m*n*batch_index;
outi+=m*n*batch_index;
out+=m*n*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
// copy from dist to dist_out
for (int j=index;j<m;j+=stride) {
for (int s=0;s<n;++s) {
out[j*n+s] = dist[j*n+s];
outi[j*n+s] = s;
}
}
float *p_dist;
for (int j=index;j<m;j+=stride) {
p_dist = out+j*n;
// selection sort for the first k elements
for (int s=0;s<k;++s) {
int min=s;
// find the min
for (int t=s+1;t<n;++t) {
if (p_dist[t]<p_dist[min]) {
min = t;
}
}
// swap min-th and i-th element
if (min!=s) {
float tmp = p_dist[min];
p_dist[min] = p_dist[s];
p_dist[s] = tmp;
int tmpi = outi[j*n+min];
outi[j*n+min] = outi[j*n+s];
outi[j*n+s] = tmpi;
}
}
}
}
void queryBallPointLauncher(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) {
query_ball_point_gpu<<<b,256>>>(b,n,m,radius,nsample,xyz1,xyz2,idx,pts_cnt);
//cudaDeviceSynchronize();
}
void selectionSortLauncher(int b, int n, int m, int k, const float *dist, int *outi, float *out) {
selection_sort_gpu<<<b,256>>>(b,n,m,k,dist,outi,out);
//cudaDeviceSynchronize();
}
void groupPointLauncher(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out){
group_point_gpu<<<b,256>>>(b,n,c,m,nsample,points,idx,out);
//cudaDeviceSynchronize();
}
void groupPointGradLauncher(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points){
group_point_grad_gpu<<<b,256>>>(b,n,c,m,nsample,grad_out,idx,grad_points);
//group_point_grad_gpu<<<1,1>>>(b,n,c,m,nsample,grad_out,idx,grad_points);
//cudaDeviceSynchronize();
}
|
14,586 | // compile -> nvcc lab_2.cu -o lab_2
// execute -> lab_2.exe | lab_2.out
// Bruno Maglioni A01700879
#include <iostream>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#define N 4
#define ThreadsPerBlock N * N
#define NumBlocks ceil((ThreadsPerBlock + (N * N)) / ThreadsPerBlock)
__global__ void multi(double *a, double *b, double *c, int n){
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
double sum = 0.0;
if(row < n && col < n){
for(int i = 0; i < n; i++){
sum += a[row * n + i] * b[i * n + col];
}
}
c[row * n + col] = sum;
}
// matrix multiplication using CPU
// a = M x N, b = P x Q
// m = rows of first matrix, p = rows of second matrix, q = columns of second matrix
void mat_multi(double *a, double *b, double *c, int m, int p, int q){
int i, j, k;
double sum = 0.0;
for (i = 0; i < m; i++) {
for (j = 0; j < q; j++) {
for (k = 0; k < p; k++) {
sum += a[i * m + k] * b[k * m + j];
}
c[i * m + j] = sum;
sum = 0.0;
}
}
}
// Fills matrix
void fill_mat(double* mat){
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
mat[i * N + j] = rand() % 50;
}
}
}
// Prints matrix
void print_mat(double* mat){
int i, j;
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
printf("%f\t", mat[i * N + j]);
}
printf("\n");
}
printf("\n");
}
int main(){
double *mat_1, *mat_2, *res; // CPU variables
double *d_mat_1, *d_mat_2, *d_res; // GPU variables
// for random number generation
time_t t;
srand((unsigned) time(&t));
// Allocate memory on CPU
mat_1 = (double*) malloc(sizeof(double) * N * N); // Matrix 1
mat_2 = (double*) malloc(sizeof(double) * N * N); // Matrix 2
res = (double*) malloc(sizeof(double) * N * N); // Result Matrix
// Allocate memory on GPU
cudaMalloc((void**)&d_mat_1, sizeof(double) * N * N);
cudaMalloc((void**)&d_mat_2, sizeof(double) * N * N);
cudaMalloc((void**)&d_res, sizeof(double) * N * N);
fill_mat(mat_1);
fill_mat(mat_2);
printf("Matrix 1:\n");
print_mat(mat_1);
printf("\nMatrix 2:\n");
print_mat(mat_2);
// Copy CPU variables to GPU
cudaMemcpy(d_mat_1, mat_1, sizeof(double) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_mat_2, mat_2, sizeof(double) * N * N, cudaMemcpyHostToDevice);
// create 2D grid
dim3 blocks(NumBlocks, NumBlocks); // dimensions of resulting matrix
dim3 threads(ThreadsPerBlock, ThreadsPerBlock);
// Call function in GPU
multi<<<blocks, threads>>>(d_mat_1, d_mat_2, d_res, N);
// Copy result matrix from GPU to CPU
cudaMemcpy(res, d_res, sizeof(double) * N * N, cudaMemcpyDeviceToHost);
printf("\nResult matrix:\n");
print_mat(res);
// Free CPU memory
free(mat_1);
free(mat_2);
free(res);
// Free GPU memory
cudaFree(d_mat_1);
cudaFree(d_mat_2);
cudaFree(d_res);
return 0;
}
|
14,587 | #include "includes.h"
__global__ void cudaSSaturation_backPropagate_kernel(float* x, float* dx, unsigned int size, int shifting, float threshold)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < size; i += stride) {
if (shifting > 0)
dx[i] /= (1 << shifting);
else if (shifting < 0)
dx[i] *= (1 << (-shifting));
if (threshold != 0.0f) {
dx[i] *= (x[i] > -threshold && x[i] < threshold)
? 1.0f : 0.0f;
}
}
} |
14,588 | #include <cstdio>
#include <cstdlib>
#include <cmath>
#include <cuda_runtime_api.h>
#define BASE_TYPE float
__global__ void map(const BASE_TYPE *points, BASE_TYPE *result, const BASE_TYPE h)
{
extern __shared__ BASE_TYPE s[];
int index = blockDim.x * blockIdx.x + threadIdx.x;
s[threadIdx.x] = points[index] * h;
__syncthreads();
if (threadIdx.x == 0)
{
for (int i = 1; i < blockDim.x; i++)
s[0] += s[i];
result[blockIdx.x] = s[0];
}
}
BASE_TYPE reduce(const BASE_TYPE *dev_map, const int map_count)
{
BASE_TYPE *host_map = new BASE_TYPE[map_count];
BASE_TYPE result = 0;
cudaMemcpy(host_map, dev_map, map_count * sizeof(BASE_TYPE), cudaMemcpyDeviceToHost);
for (int i = 0; i < map_count; i++)
result += host_map[i];
return result;
}
BASE_TYPE func(BASE_TYPE x)
{
return x;
}
BASE_TYPE* points(const BASE_TYPE a, const BASE_TYPE b, const int N)
{
BASE_TYPE *p = new BASE_TYPE[N];
BASE_TYPE h = (b - a) / N;
for (int i = 0; i < N; i++)
{
p[i] = func(a + (i + 0.5) * h);
}
return p;
}
void cuda_init_array(BASE_TYPE **dev, const BASE_TYPE *host, const size_t size)
{
cudaError_t err;
err = cudaMalloc((void **)dev, size);
if (err != cudaSuccess)
throw err;
if (host != NULL)
{
err = cudaMemcpy(*dev, host, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
throw err;
}
}
void cuda_init_grid_and_block(dim3 *grid, dim3 *block, const int threads_per_block, const int N)
{
*grid = dim3(N / threads_per_block);
*block = dim3(threads_per_block);
printf("Block (%d, %d, %d)\n", block->x, block->y, block->z);
printf("Grid (%d, %d, %d)\n", grid->x, grid->y, grid->z);
}
int main()
{
const int N = 20;
const int threads_per_block = 5;
const int block_count = N / threads_per_block;
const size_t in_size = N * sizeof(BASE_TYPE);
const size_t out_size = block_count * sizeof(BASE_TYPE);
BASE_TYPE a = 0, b = 5;
dim3 blockDim, gridDim;
cuda_init_grid_and_block(&blockDim, &gridDim, threads_per_block, N);
BASE_TYPE *host_a = points(a, b, N), result;
BASE_TYPE *dev_a, *dev_result;
try
{
cuda_init_array(&dev_a, host_a, in_size);
cuda_init_array(&dev_result, NULL, out_size);
}
catch (cudaError_t err)
{
fprintf(stderr, "Failed to allocate device (error code: %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
map<<<blockDim, gridDim, threads_per_block * sizeof(BASE_TYPE)>>>(dev_a, dev_result, (b - a) / N);
result = reduce(dev_result, block_count);
printf("%3.2f\n", result);
cudaFree(dev_a);
cudaFree(dev_result);
delete[] host_a;
return 0;
} |
14,589 | // fermi
/*
* Copyright 2018 Vrije Universiteit Amsterdam, The Netherlands
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* typedef struct __attribute__ ((packed)) { */
/* float real; */
/* float imag; */
/* } Complex; */
extern "C" {
__global__ void crossCorrelateKernel(const int n, float* out, const float* x, const float* y);
}
__global__ void crossCorrelateKernel(const int n, float* out, const float* x, const float* y) {
const int bi = blockIdx.x;
const int wti = threadIdx.y;
const int tti = threadIdx.x;
const int nrThreadsN = min(512, n);
const int nrThreadsNrThreadsN = min(32, nrThreadsN);
const int ti = wti * (1 * nrThreadsNrThreadsN) + tti;
if (ti < nrThreadsN) {
const int i = bi * (1 * nrThreadsN) + ti;
if (i < n) {
const float x_real = x[i * 2];
const float x_imag = x[i * 2 + 1];
const float y_real = y[i * 2];
const float y_imag = y[i * 2 + 1];
out[i * 2] = x_real * y_real - x_imag * y_imag;
out[i*2+1] = x_real * y_imag + x_imag * y_real;
}
}
}
|
14,590 | #include <stdio.h>
#define Width 32 // size of Width x Width matrix
#define TILE_WIDTH 16
__global__ void MatrixMulKernel (float* Md, float* Nd, float* Pd, int ncols){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
//Pvalue is used to store the element of the output matrix
// that is computed by the thread
float Pvalue = 0;
for (int k = 0; k < ncols; ++k){
float Melement = Md[row*ncols+k];
float Nelement = Nd[k*ncols+col];
Pvalue += Melement * Nelement;
}
Pd[row*ncols+col] = Pvalue;
}
int main (int argc, char *argv[]){
int i,j;
int size = Width * Width * sizeof(float);
float M[Width][Width], N[Width][Width], P[Width][Width];
float* Md, *Nd, *Pd;
for (i = 0; i < Width; i++){
for (j = 0; j < Width; j++){
M[i][j] = 1; N[i][j] = 2;
}
}
cudaMalloc( (void**)&Md, size);
cudaMalloc( (void**)&Nd, size);
cudaMalloc( (void**)&Pd, size);
cudaMemcpy( Md, M, size, cudaMemcpyHostToDevice);
cudaMemcpy( Nd, N, size, cudaMemcpyHostToDevice);
//Setup the execution configuration
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
dim3 dimGrid(Width/TILE_WIDTH, Width/TILE_WIDTH);
//Launch the device computation threads!
MatrixMulKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd, Width);
//Read P from the device
cudaMemcpy( P, Pd, size, cudaMemcpyDeviceToHost);
//Free device matrices
cudaFree( Md);
cudaFree( Nd);
cudaFree( Pd);
for (i = 0; i < Width; i++){
for (j = 0; j < Width; j++){
printf("%.2f ",P[i][j]);
}
printf("\n");
}
} |
14,591 | #include <stdio.h>
__global__ void helloCUDA(void)
{
printf("Hello thread %d in block %d\n", threadIdx.x, blockIdx.x);
}
int main()
{
helloCUDA<<<3, 4>>>();
cudaDeviceReset();
return 0;
}
|
14,592 | #include "includes.h"
__global__ void customErrorCalc_f32 (float* vector, float* ideal_vector, float threshold, float scaleFoff, float scaleFon, float* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
float vectorValue = vector[idx];
if (ideal_vector[idx] > threshold) {
output[idx] = 1.0 - vectorValue;
if (vectorValue < threshold) {
output[idx] *= scaleFoff;
}
} else {
output[idx] = vectorValue * vectorValue;
if (vectorValue > threshold) {
output[idx] *= scaleFon;
}
}
}
} |
14,593 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define BILLION 1000000000.0F
// Pour pouvoir experimenter les performances avec les différents types
// FMT Permet d'avoir un % adapté pour le printf et donc de pas avoir de warning
#define TYPE int
#define FMT "d"
typedef struct
{
int x ;
int y ;
} Point ;
void Affiche (char * tabMsg, TYPE * ptBuffer, int NB)
{
TYPE * pt = ptBuffer ;
for ( int k = 0 ; k < NB ; k++ , pt ++)
{ printf(" - %s[%03d] = %6" FMT, tabMsg, k , *pt) ;
if ((k % 5) == (4))
{ printf("\n") ; fflush(stdout); }
}
printf("\n") ;
fflush(stdout);
}
int check(char * msg, int Nb, TYPE * pto)
{
TYPE * pt1 = pto ;
TYPE * pt2 = pto + 1 ;
int i ;
for (i = 0 ; i < Nb-1 ; i ++)
{
if (*pt1 > *pt2)
{ printf("Check %s pour %d - Erreur en position %d %" FMT " > %" FMT " \n", msg, Nb, i, *pt1, *pt2) ;
// return i ;
exit(25) ;
}
pt1 ++ ; pt2 ++ ;
}
printf("Check %s pour %d est OK \n", msg, Nb) ;
return 0 ;
}
__global__ void MergeSmallBatch_k(TYPE *M, int sizeM_tot, TYPE* N, int d)
{
int i = threadIdx.x%d;
int Qt = (threadIdx.x-i)/d;
int gbx = Qt + blockIdx.x*(blockDim.x/d);
if (threadIdx.x + blockIdx.x*blockDim.x >= sizeM_tot) return; //gerer les débordements
int t = d/2;
int sizeA = t;
int sizeB = t;
M=M+gbx*d;
TYPE* A=M;
TYPE* B=A+sizeA;
Point K, P, Q;
int offset ;
if (i > sizeA)
{
K.x = i - sizeA ; K.y = sizeA ;
P.x = sizeA ; P.y = i - sizeA ;
}
else // x ~ horizontal
{
K.x = 0 ; K.y = i ;
P.x = i ; P.y = 0 ;
}
while (1)
{
offset = abs(K.y - P.y) / 2 ;
Q.x = K.x + offset ; Q.y = K.y - offset ;
if ( (Q.y >= 0) && (Q.x <= sizeB) &&
( (Q.y == sizeA) || (Q.x == 0) || (A[Q.y] > B[Q.x -1])) )
{
if ((Q.x == sizeB) || (Q.y == 0) || (A[Q.y-1] <= B[Q.x]))
{
if ((Q.y < sizeA) && ((Q.x == sizeB) || (A[Q.y] <= B[Q.x])))
{ N[i+gbx*d] = A[Q.y] ; }
else
{ N[i+gbx*d] = B[Q.x] ; }
break ;
}
else
{ K.x = Q.x + 1 ; K.y = Q.y - 1 ; }
}
else
{ P.x = Q.x -1 ; P.y = Q.y + 1 ; }
}
}
int main(int argc, char ** argv)
{
//déclaration
int N = 10000;
cudaError_t errCuda;
TYPE* ABAB; //[A_0,B_0,A_1,B_1,...]
TYPE* MM; // [M_0,M_1,...], les merges respectifs de [A_0,B_0,A_1,B_1,...]
TYPE* cudaABAB;
TYPE* cudaMM;
for (int d=4; d<=1024; d=d*2)
{
float m1;
cudaEvent_t Start; cudaEvent_t Stop; cudaEventCreate(&Start) ; cudaEventCreate(&Stop) ;
int size_total=d*N;
//allocation
if ((ABAB = (TYPE *) malloc(size_total * sizeof(TYPE))) == NULL)
{ printf("PB allocation Vecteur Ori\n") ; exit (1) ; }
if ((MM = (TYPE *) malloc(size_total * sizeof(TYPE))) == NULL)
{ printf("PB allocation Vecteur Dest\n") ; exit (1) ; }
//initialisation
srand(5);
for (int i =0; i<size_total; i++)
{
if (i%(d/2)==0) ABAB[i] = rand()%100;
else ABAB[i]=ABAB[i-1]+rand()%100;
}
// Il faut que tous les A et les B soient triés
// Donc ABAB est trié par blocs de taille (d/2)
//Allocation
if (cudaSuccess != (errCuda = cudaMalloc((void**)&cudaABAB, size_total * sizeof(TYPE))))
{ printf("PB allocation CudaVecteurABAB - %d - %s \n",errCuda,cudaGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } // cleanup a rajouter pour plus propre
if (cudaSuccess != (errCuda = cudaMalloc((void**)&cudaMM, size_total * sizeof(TYPE))))
{ printf("PB allocation CudaVecteurMM - %d - %s \n",errCuda,cudaGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } // cleanup a rajouter pour plus propre
if (cudaSuccess != (errCuda = cudaMemcpy(cudaABAB, ABAB, size_total * sizeof(TYPE), cudaMemcpyHostToDevice)))
{ printf("PB Copie ABAB -> cudaABAB - %d - %s \n",errCuda,cudaGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; }
cudaEventRecord(Start);
MergeSmallBatch_k<<<1024,1024>>>(cudaABAB,size_total,cudaMM,d); //a revoir
cudaEventRecord(Stop);
if (cudaSuccess != cudaMemcpy(MM, cudaMM, size_total * sizeof(TYPE), cudaMemcpyDeviceToHost))
{ printf("PB copie cudaMM -> MM \n") ; fflush(stdout); exit(2) ; }
cudaEventElapsedTime(&m1, Start, Stop) ;
printf("Duree pour d = %4d : %f ms\n",d,m1) ;
//free
free(MM);
free(ABAB);
if (cudaABAB != NULL) { cudaFree(cudaABAB) ; cudaABAB = NULL ; }
if (cudaMM != NULL) { cudaFree(cudaMM) ; cudaMM = NULL ; }
}
return 0 ;
}
|
14,594 | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
*/
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include <cuda.h>
//All three kernels run 512 threads per workgroup
//Must be a power of two
#define THREADBLOCK_SIZE 256
typedef unsigned int uint;
#define T int
#define T4 int4
#ifndef IDENTITY
#define IDENTITY 0
#endif
enum {ADD_OP=1,MUL_OP,MAX_OP,MIN_OP};
#define LOG2_WARP_SIZE 5U
#define WARP_SIZE (1U << LOG2_WARP_SIZE)
inline __device__ T scan_sum(T t1,T t2)
{
return t1+t2;
}
inline __device__ T scan_max(T t1,T t2)
{
return t1>=t2?t1:t2;
}
inline __device__ T scan_op(T t1,T t2, uint oper)
{
T res;
switch(oper)
{
case ADD_OP:res = t1+t2;
break;
case MUL_OP:res = t1*t2;
break;
case MAX_OP:res = max(t1,t2);
break;
case MIN_OP:res = min(t1,t2);
break;
}
return res;
}
//Almost the same as naive scan1Inclusive, but doesn't need __syncthreads()
//assuming size <= WARP_SIZE
inline __device__ T warpScanInclusive(T idata, T *s_Data, uint size,T* warpEndResult, uint oper, T Identity){
int pos = 2 * threadIdx.x - (threadIdx.x & (WARP_SIZE -1 ));
s_Data[pos] = Identity;
pos += WARP_SIZE;
s_Data[pos] = idata;
for(uint offset = 1; offset < size; offset <<= 1)
s_Data[pos] = scan_op(s_Data[pos],s_Data[pos - offset], oper);
*warpEndResult=s_Data[pos];
return s_Data[pos-1];
}
inline __device__ T scan1Inclusive(T idata, T *s_Data, uint size, uint oper, T Identity){
T warpEndResult;
if(size > WARP_SIZE){
//Bottom-level inclusive warp scan
T warpResult = warpScanInclusive(idata, s_Data, WARP_SIZE,&warpEndResult,oper,Identity);
//Save top elements of each warp for exclusive warp scan
//sync to wait for warp scans to complete (because s_Data is being overwritten)
__syncthreads();
if( (threadIdx.x & (WARP_SIZE - 1)) == (WARP_SIZE - 1) )
s_Data[threadIdx.x >> LOG2_WARP_SIZE] = warpEndResult;
//wait for warp scans to complete
__syncthreads();
if( threadIdx.x < (THREADBLOCK_SIZE / WARP_SIZE) ){
//grab top warp elements
T val = s_Data[threadIdx.x];
//calculate exclsive scan and write back to shared memory
s_Data[threadIdx.x] = warpScanInclusive(val, s_Data, size >> LOG2_WARP_SIZE,&warpEndResult,oper,Identity);
}
//return updated warp scans with exclusive scan results
__syncthreads();
return scan_op(warpResult , s_Data[threadIdx.x >> LOG2_WARP_SIZE], oper);
}else{
return warpScanInclusive(idata, s_Data, size,&warpEndResult,oper,Identity);
}
}
inline __device__ T4 scan4Inclusive(T4 idata4, T *s_Data, uint size,uint oper, T Identity){
//Level-0 inclusive scan
idata4.y =scan_op(idata4.y, idata4.x,oper);
idata4.z =scan_op(idata4.z, idata4.y,oper);
idata4.w = scan_op(idata4.w,idata4.z,oper);
//Level-1 exclusive scan
T oval = scan1Inclusive(idata4.w, s_Data, size / 4,oper,Identity);
idata4.x = scan_op(idata4.x,oval,oper);
idata4.y = scan_op(idata4.y,oval,oper);
idata4.z = scan_op(idata4.z,oval,oper);
idata4.w = scan_op(idata4.w,oval,oper);
return idata4;
}
////////////////////////////////////////////////////////////////////////////////
// Scan kernels
////////////////////////////////////////////////////////////////////////////////
__global__ void scanExclusiveShared(
T4 *d_Dst,
T4 *d_Src,
uint size,
uint oper, T Identity
){
__shared__ T s_Data[2 * THREADBLOCK_SIZE];
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load data
T4 idata4 = d_Src[pos];
//Calculate exclusive scan
T4 odata4 = scan4Inclusive(idata4, s_Data, size,oper,Identity);
//Write back
d_Dst[pos] = odata4;
}
//Exclusive scan of top elements of bottom-level scans (4 * THREADBLOCK_SIZE)
__global__ void scanExclusiveShared2(
T *d_Buf,
T *d_Dst,
T *d_Src,
uint N,
uint arrayLength,
uint oper, T Identity
){
__shared__ T s_Data[2 * THREADBLOCK_SIZE];
//Skip loads and stores for inactive threads of last threadblock (pos >= N)
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load top elements
//Convert results of bottom-level scan back to inclusive
T idata = 0;
if(pos < N)
idata =
d_Dst[(4 * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE) * pos] ;
//Compute
T odata = scan1Inclusive(idata, s_Data, arrayLength,oper,Identity);
//Avoid out-of-bound access
if(pos < N)
d_Buf[pos] = odata;
}
//Final step of large-array scan: combine basic inclusive scan with exclusive scan of top elements of input arrays
__global__ void uniformUpdate(
T4 *d_Data,
T *d_Buffer,
uint oper, T Identity
){
__shared__ T buf;
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
if(threadIdx.x == 0)
buf = d_Buffer[blockIdx.x];
__syncthreads();
T4 data4 = d_Data[pos];
data4.x = scan_op(data4.x,buf,oper);
data4.y = scan_op(data4.y,buf,oper);
data4.z = scan_op(data4.z,buf,oper);
data4.w = scan_op(data4.w,buf,oper );
d_Data[pos] = data4;
}
|
14,595 | #include <limits>
/**
* Configuration indexes.
*/
#define X_IMAGE_SIZE conf[0]
#define X_FEATURE_SIZE conf[1]
#define X_ROW_SIZE conf[2]
#define N conf[3]
/**
* Compute the index of the maximal value over all the feature maps.
* @param conf the kernel's configuration.
* @param x the initial layer activation.
* @param y the kWTA layer activation, i.e. output buffer.
* @param i the offset.
* @return the index of the maximal value.
*/
__device__ int arg_max(int *conf, float *x, float *y, int i) {
float m = -INFINITY;
int mi = i;
for (int j = 0; j < X_IMAGE_SIZE; j += X_FEATURE_SIZE) {
int index = i + j;
if (x[index] > m && y[index] == 0) {
m = x[index];
mi = index;
}
}
return mi;
}
/**
* Compute the kWTA activation of the layer.
* @param conf the kernel's configuration.
* @param k the number of winners.
* @param x the initial layer activation.
* @param y the kWTA layer activation, i.e. output buffer.
* @param m the mask.
* @return nothing.
*/
extern "C"
__global__ void activation(int *conf, int k, float *x, float *y, float *m)
{
int index = threadIdx.x * X_IMAGE_SIZE + blockIdx.x * X_ROW_SIZE + blockIdx.y;
if (index < N) {
for (int j = 0; j < X_IMAGE_SIZE; j += X_FEATURE_SIZE) {
y[index + j] = 0;
m[index + j] = 0;
}
for (int j = 0; j < k; j++) {
int idx = arg_max(conf, x, y, index);
y[idx] = x[idx];
m[idx] = 1;
}
}
}
|
14,596 | #include "includes.h"
__global__ void fillWithIndexKernel(int size, int *array)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
array[idx] = idx;
}
} |
14,597 | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void sw4 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.z);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double mux1, mux2, mux3, mux4, muy1, muy2, muy3, muy4, muz1, muz2, muz3, muz4;
double r1, r2, r3;
if (i>=2 & j>=2 & k>=2 & i<=N-3 & j<=N-3 & k<=N-3) {
mux1 = mu[k][j][i-1] * strx[i-1];
mux1 -= 3.0 / 4.0 * mu[k][j][i] * strx[i];
mux1 -= 3.0 / 4.0 * mu[k][j][i-2] * strx[i-2];
mux2 = mu[k][j][i-2] * strx[i-2];
mux2 += mu[k][j][i+1] * strx[i+1];
mux2 += 3.0 * mu[k][j][i] * strx[i];
mux2 += 3.0 * mu[k][j][i-1] * strx[i-1];
mux3 = mu[k][j][i-1] * strx[i-1];
mux3 += mu[k][j][i+2] * strx[i+2];
mux3 += 3.0 * mu[k][j][i+1] * strx[i+1];
mux3 += 3.0 * mu[k][j][i] * strx[i];
mux4 = mu[k][j][i+1] * strx[i+1];
mux4 -= 3.0 / 4.0 * mu[k][j][i] * strx[i];
mux4 -= 3.0 / 4.0 * mu[k][j][i+2] * strx[i+2];
muy1 = mu[k][j-1][i] * stry[j-1];
muy1 -= 3.0 / 4.0 * mu[k][j][i] * stry[j];
muy1 -= 3.0 / 4.0 * mu[k][j-2][i] * stry[j-2];
muy2 = mu[k][j-2][i] * stry[j-2];
muy2 += mu[k][j+1][i] * stry[j+1];
muy2 += 3.0 * mu[k][j][i] * stry[j];
muy2 += 3.0 * mu[k][j-1][i] * stry[j-1];
muy3 = mu[k][j-1][i] * stry[j-1];
muy3 += mu[k][j+2][i] * stry[j+2];
muy3 += 3.0 * mu[k][j+1][i] * stry[j+1];
muy3 += 3.0 * mu[k][j][i] * stry[j];
muy4 = mu[k][j+1][i] * stry[j+1];
muy4 -= 3.0 / 4.0 * mu[k][j][i] * stry[j];
muy4 -= 3.0 / 4.0 * mu[k][j+2][i] * stry[j+2];
muz1 = mu[k-1][j][i] * strz[k-1];
muz1 -= 3.0 / 4.0 * mu[k][j][i] * strz[k];
muz1 -= 3.0 / 4.0 * mu[k-2][j][i] * strz[k-2];
muz2 = mu[k-2][j][i] * strz[k-2];
muz2 += mu[k+1][j][i] * strz[k+1];
muz2 += 3.0 * mu[k][j][i] * strz[k];
muz2 += 3.0 * mu[k-1][j][i] * strz[k-1];
muz3 = mu[k-1][j][i] * strz[k-1];
muz3 += mu[k+2][j][i] * strz[k+2];
muz3 += 3.0 * mu[k+1][j][i] * strz[k+1];
muz3 += 3.0 * mu[k][j][i] * strz[k];
muz4 = -(3.0 / 4.0 * mu[k][j][i] * strz[k]);
muz4 += mu[k+1][j][i] * strz[k+1];
muz4 -= 3.0 / 4.0 * mu[k+2][j][i] * strz[k+2];
double _t_3_ = u_0[k][j][i-2];
_t_3_ -= u_0[k][j][i];
double _v_18_ = la[k][j][i-1] * strx[i-1];
double _t_2_ = _v_18_;
_t_2_ += 2.0 * mux1;
_t_2_ -= 3.0 / 4.0 * la[k][j][i] * strx[i];
_t_2_ -= 3.0 / 4.0 * la[k][j][i-2] * strx[i-2];
double _t_1_ = _t_2_ * _t_3_;
double _t_4_ = la[k][j][i-2] * strx[i-2];
double _v_21_ = la[k][j][i+1] * strx[i+1];
_t_4_ += _v_21_;
_t_4_ += 2.0 * mux2;
_t_4_ += 3.0 * la[k][j][i] * strx[i];
_t_4_ += 3.0 * la[k][j][i-1] * strx[i-1];
double _t_5_ = u_0[k][j][i-1];
_t_5_ -= u_0[k][j][i];
_t_1_ += _t_4_ * _t_5_;
double _t_6_ = _v_18_;
_t_6_ += la[k][j][i+2] * strx[i+2];
_t_6_ += 2.0 * mux3;
_t_6_ += 3.0 * la[k][j][i+1] * strx[i+1];
_t_6_ += 3.0 * la[k][j][i] * strx[i];
double _t_8_ = -(3.0 / 4.0 * la[k][j][i] * strx[i]);
_t_8_ -= 3.0 / 4.0 * la[k][j][i+2] * strx[i+2];
_t_8_ += 2.0 * mux4;
double _t_7_ = u_0[k][j][i+1];
_t_7_ -= u_0[k][j][i];
_t_1_ += _t_6_ * _t_7_;
_t_8_ += _v_21_;
double _t_9_ = u_0[k][j][i+2];
_t_9_ -= u_0[k][j][i];
_t_1_ += _t_8_ * _t_9_;
double _t_0_ = strx[i] * _t_1_;
double _t_11_ = u_0[k][j-2][i];
_t_11_ -= u_0[k][j][i];
double _t_10_ = muy1 * _t_11_;
double _t_12_ = u_0[k][j-1][i];
_t_12_ -= u_0[k][j][i];
_t_10_ += muy2 * _t_12_;
double _t_13_ = u_0[k][j+1][i];
_t_13_ -= u_0[k][j][i];
_t_10_ += muy3 * _t_13_;
double _t_14_ = u_0[k][j+2][i];
_t_14_ -= u_0[k][j][i];
_t_10_ += muy4 * _t_14_;
_t_0_ += stry[j] * _t_10_;
double _t_16_ = u_0[k-2][j][i];
_t_16_ -= u_0[k][j][i];
double _t_15_ = muz1 * _t_16_;
double _t_17_ = u_0[k-1][j][i];
_t_17_ -= u_0[k][j][i];
_t_15_ += muz2 * _t_17_;
double _t_18_ = u_0[k+1][j][i];
_t_18_ -= u_0[k][j][i];
_t_15_ += muz3 * _t_18_;
double _t_19_ = -(u_0[k][j][i]);
_t_19_ += u_0[k+2][j][i];
_t_15_ += muz4 * _t_19_;
_t_0_ += strz[k] * _t_15_;
r1 = 1.0 / 6.0 * _t_0_;
double _t_22_ = u_1[k][j][i-2];
_t_22_ -= u_1[k][j][i];
double _t_21_ = mux1 * _t_22_;
double _t_23_ = u_1[k][j][i-1];
_t_23_ -= u_1[k][j][i];
_t_21_ += mux2 * _t_23_;
double _t_24_ = u_1[k][j][i+1];
_t_24_ -= u_1[k][j][i];
_t_21_ += mux3 * _t_24_;
double _t_25_ = u_1[k][j][i+2];
_t_25_ -= u_1[k][j][i];
_t_21_ += mux4 * _t_25_;
double _t_20_ = strx[i] * _t_21_;
double _t_28_ = u_1[k][j-2][i];
_t_28_ -= u_1[k][j][i];
double _v_44_ = la[k][j-1][i] * stry[j-1];
double _t_27_ = _v_44_;
_t_27_ += 2.0 * muy1;
_t_27_ -= 3.0 / 4.0 * la[k][j][i] * stry[j];
_t_27_ -= 3.0 / 4.0 * la[k][j-2][i] * stry[j-2];
double _t_26_ = _t_27_ * _t_28_;
double _t_29_ = la[k][j-2][i] * stry[j-2];
double _v_47_ = la[k][j+1][i] * stry[j+1];
_t_29_ += _v_47_;
_t_29_ += 2.0 * muy2;
_t_29_ += 3.0 * la[k][j][i] * stry[j];
_t_29_ += 3.0 * la[k][j-1][i] * stry[j-1];
double _t_30_ = u_1[k][j-1][i];
_t_30_ -= u_1[k][j][i];
_t_26_ += _t_29_ * _t_30_;
double _t_31_ = _v_44_;
_t_31_ += la[k][j+2][i] * stry[j+2];
_t_31_ += 2.0 * muy3;
_t_31_ += 3.0 * la[k][j+1][i] * stry[j+1];
_t_31_ += 3.0 * la[k][j][i] * stry[j];
double _t_33_ = -(3.0 / 4.0 * la[k][j][i] * stry[j]);
_t_33_ -= 3.0 / 4.0 * la[k][j+2][i] * stry[j+2];
_t_33_ += 2.0 * muy4;
double _t_32_ = u_1[k][j+1][i];
_t_32_ -= u_1[k][j][i];
_t_26_ += _t_31_ * _t_32_;
_t_33_ += _v_47_;
double _t_34_ = u_1[k][j+2][i];
_t_34_ -= u_1[k][j][i];
_t_26_ += _t_33_ * _t_34_;
_t_20_ += stry[j] * _t_26_;
double _t_36_ = u_1[k-2][j][i];
_t_36_ -= u_1[k][j][i];
double _t_35_ = muz1 * _t_36_;
double _t_37_ = u_1[k-1][j][i];
_t_37_ -= u_1[k][j][i];
_t_35_ += muz2 * _t_37_;
double _t_38_ = u_1[k+1][j][i];
_t_38_ -= u_1[k][j][i];
_t_35_ += muz3 * _t_38_;
double _t_39_ = -(u_1[k][j][i]);
_t_39_ += u_1[k+2][j][i];
_t_35_ += muz4 * _t_39_;
_t_20_ += strz[k] * _t_35_;
r2 = 1.0 / 6.0 * _t_20_;
double _t_42_ = u_2[k][j][i-2];
_t_42_ -= u_2[k][j][i];
double _t_41_ = mux1 * _t_42_;
double _t_43_ = u_2[k][j][i-1];
_t_43_ -= u_2[k][j][i];
_t_41_ += mux2 * _t_43_;
double _t_44_ = u_2[k][j][i+1];
_t_44_ -= u_2[k][j][i];
_t_41_ += mux3 * _t_44_;
double _t_45_ = u_2[k][j][i+2];
_t_45_ -= u_2[k][j][i];
_t_41_ += mux4 * _t_45_;
double _t_40_ = strx[i] * _t_41_;
double _t_47_ = u_2[k][j-2][i];
_t_47_ -= u_2[k][j][i];
double _t_46_ = muy1 * _t_47_;
double _t_48_ = u_2[k][j-1][i];
_t_48_ -= u_2[k][j][i];
_t_46_ += muy2 * _t_48_;
double _t_49_ = u_2[k][j+1][i];
_t_49_ -= u_2[k][j][i];
_t_46_ += muy3 * _t_49_;
double _t_50_ = u_2[k][j+2][i];
_t_50_ -= u_2[k][j][i];
_t_46_ += muy4 * _t_50_;
_t_40_ += stry[j] * _t_46_;
double _t_53_ = u_2[k-2][j][i];
_t_53_ -= u_2[k][j][i];
double _t_52_ = 2.0 * muz1;
double _v_70_ = la[k-1][j][i] * strz[k-1];
double _t_54_ = 3.0 * la[k-1][j][i] * strz[k-1];
_t_54_ += 2.0 * muz2;
_t_54_ += 3.0 * la[k][j][i] * strz[k];
double _t_56_ = 3.0 * la[k][j][i] * strz[k];
_t_56_ += 2.0 * muz3;
double _t_58_ = -(3.0 / 4.0 * la[k][j][i] * strz[k]);
_t_58_ += 2.0 * muz4;
_t_52_ -= 3.0 / 4.0 * la[k][j][i] * strz[k];
_t_52_ += _v_70_;
_t_52_ -= 3.0 / 4.0 * la[k-2][j][i] * strz[k-2];
double _t_51_ = _t_52_ * _t_53_;
_t_54_ += la[k-2][j][i] * strz[k-2];
double _v_73_ = la[k+1][j][i] * strz[k+1];
_t_56_ += 3.0 * la[k+1][j][i] * strz[k+1];
_t_54_ += _v_73_;
double _t_55_ = u_2[k-1][j][i];
_t_55_ -= u_2[k][j][i];
_t_51_ += _t_54_ * _t_55_;
_t_56_ += _v_70_;
_t_56_ += la[k+2][j][i] * strz[k+2];
_t_58_ -= 3.0 / 4.0 * la[k+2][j][i] * strz[k+2];
double _t_57_ = u_2[k+1][j][i];
_t_57_ -= u_2[k][j][i];
_t_51_ += _t_56_ * _t_57_;
_t_58_ += _v_73_;
double _t_59_ = -(u_2[k][j][i]);
_t_59_ += u_2[k+2][j][i];
_t_51_ += _t_58_ * _t_59_;
_t_40_ += strz[k] * _t_51_;
r3 = 1.0 / 6.0 * _t_40_;
double _t_65_ = -u_0[k-1][j][i-2];
_t_65_ += u_0[k+1][j][i-2];
double _t_93_ = u_0[k-1][j][i-2];
_t_93_ -= u_0[k-1][j][i+2];
double _t_96_ = u_0[k+1][j][i-2];
_t_96_ -= u_0[k+1][j][i+2];
double _t_73_ = -u_0[k-1][j][i+2];
_t_73_ += u_0[k+1][j][i+2];
double _t_64_ = 8.0 * _t_65_;
_t_64_ += u_0[k-2][j][i-2];
_t_64_ -= u_0[k+2][j][i-2];
double _t_90_ = u_0[k-2][j][i-2];
_t_90_ -= u_0[k-2][j][i+2];
double _t_98_ = u_0[k+2][j][i-2];
_t_98_ -= u_0[k+2][j][i+2];
double _t_72_ = u_0[k-2][j][i+2];
_t_72_ -= u_0[k+2][j][i+2];
_t_72_ += 8.0 * _t_73_;
double _t_62_ = mu[k][j][i-2] * _t_64_;
double _t_68_ = -u_0[k-1][j][i-1];
_t_68_ += u_0[k+1][j][i-1];
double _t_94_ = -u_0[k-1][j][i-1];
_t_94_ += u_0[k-1][j][i+1];
double _t_97_ = -u_0[k+1][j][i-1];
_t_97_ += u_0[k+1][j][i+1];
double _t_71_ = -u_0[k-1][j][i+1];
_t_71_ += u_0[k+1][j][i+1];
double _t_67_ = 8.0 * _t_68_;
_t_67_ += u_0[k-2][j][i-1];
_t_67_ -= u_0[k+2][j][i-1];
double _t_91_ = -u_0[k-2][j][i-1];
_t_91_ += u_0[k-2][j][i+1];
double _t_99_ = -u_0[k+2][j][i-1];
_t_99_ += u_0[k+2][j][i+1];
double _t_70_ = u_0[k-2][j][i+1];
_t_70_ -= u_0[k+2][j][i+1];
_t_70_ += 8.0 * _t_71_;
_t_62_ -= mu[k][j][i-1] * _t_67_;
_t_62_ += mu[k][j][i+1] * _t_70_;
_t_62_ -= mu[k][j][i+2] * _t_72_;
double _t_61_ = strx[i] * strz[k];
double _t_60_ = _t_61_ * _t_62_;
_t_90_ += 8.0 * _t_91_;
double _t_88_ = la[k-2][j][i] * _t_90_;
_t_93_ += 8.0 * _t_94_;
_t_88_ -= la[k-1][j][i] * _t_93_;
_t_96_ += 8.0 * _t_97_;
_t_88_ += la[k+1][j][i] * _t_96_;
_t_98_ += 8.0 * _t_99_;
_t_88_ -= la[k+2][j][i] * _t_98_;
double _t_87_ = strx[i] * strz[k];
_t_60_ += _t_87_ * _t_88_;
double _t_78_ = -u_1[k-1][j-2][i];
_t_78_ += u_1[k+1][j-2][i];
double _t_106_ = u_1[k-1][j-2][i];
_t_106_ -= u_1[k-1][j+2][i];
double _t_109_ = u_1[k+1][j-2][i];
_t_109_ -= u_1[k+1][j+2][i];
double _t_86_ = -u_1[k-1][j+2][i];
_t_86_ += u_1[k+1][j+2][i];
double _t_77_ = 8.0 * _t_78_;
_t_77_ += u_1[k-2][j-2][i];
_t_77_ -= u_1[k+2][j-2][i];
double _t_103_ = u_1[k-2][j-2][i];
_t_103_ -= u_1[k-2][j+2][i];
double _t_111_ = u_1[k+2][j-2][i];
_t_111_ -= u_1[k+2][j+2][i];
double _t_85_ = u_1[k-2][j+2][i];
_t_85_ -= u_1[k+2][j+2][i];
_t_85_ += 8.0 * _t_86_;
double _t_75_ = mu[k][j-2][i] * _t_77_;
double _t_81_ = -u_1[k-1][j-1][i];
_t_81_ += u_1[k+1][j-1][i];
double _t_107_ = -u_1[k-1][j-1][i];
_t_107_ += u_1[k-1][j+1][i];
double _t_110_ = -u_1[k+1][j-1][i];
_t_110_ += u_1[k+1][j+1][i];
double _t_84_ = -u_1[k-1][j+1][i];
_t_84_ += u_1[k+1][j+1][i];
double _t_80_ = 8.0 * _t_81_;
_t_80_ += u_1[k-2][j-1][i];
_t_80_ -= u_1[k+2][j-1][i];
double _t_104_ = -u_1[k-2][j-1][i];
_t_104_ += u_1[k-2][j+1][i];
double _t_112_ = -u_1[k+2][j-1][i];
_t_112_ += u_1[k+2][j+1][i];
double _t_83_ = u_1[k-2][j+1][i];
_t_83_ -= u_1[k+2][j+1][i];
_t_83_ += 8.0 * _t_84_;
_t_75_ -= mu[k][j-1][i] * _t_80_;
_t_75_ += mu[k][j+1][i] * _t_83_;
_t_75_ -= mu[k][j+2][i] * _t_85_;
double _t_74_ = stry[j] * strz[k];
_t_60_ += _t_74_ * _t_75_;
_t_103_ += 8.0 * _t_104_;
double _t_101_ = la[k-2][j][i] * _t_103_;
_t_106_ += 8.0 * _t_107_;
_t_101_ -= la[k-1][j][i] * _t_106_;
_t_109_ += 8.0 * _t_110_;
_t_101_ += la[k+1][j][i] * _t_109_;
_t_111_ += 8.0 * _t_112_;
_t_101_ -= la[k+2][j][i] * _t_111_;
double _t_100_ = stry[j] * strz[k];
_t_60_ += _t_100_ * _t_101_;
r3 += _t_60_;
double _t_144_ = -u_1[k][j-2][i-1];
_t_144_ += u_1[k][j-2][i+1];
double _t_120_ = u_1[k][j-2][i-1];
_t_120_ -= u_1[k][j+2][i-1];
double _t_123_ = u_1[k][j-2][i+1];
_t_123_ -= u_1[k][j+2][i+1];
double _t_152_ = -u_1[k][j+2][i-1];
_t_152_ += u_1[k][j+2][i+1];
double _t_143_ = 8.0 * _t_144_;
_t_143_ += u_1[k][j-2][i-2];
_t_143_ -= u_1[k][j-2][i+2];
double _t_117_ = u_1[k][j-2][i-2];
_t_117_ -= u_1[k][j+2][i-2];
double _t_125_ = u_1[k][j-2][i+2];
_t_125_ -= u_1[k][j+2][i+2];
double _t_151_ = u_1[k][j+2][i-2];
_t_151_ -= u_1[k][j+2][i+2];
_t_151_ += 8.0 * _t_152_;
double _t_141_ = mu[k][j-2][i] * _t_143_;
double _t_147_ = -u_1[k][j-1][i-1];
_t_147_ += u_1[k][j-1][i+1];
double _t_121_ = -u_1[k][j-1][i-1];
_t_121_ += u_1[k][j+1][i-1];
double _t_124_ = -u_1[k][j-1][i+1];
_t_124_ += u_1[k][j+1][i+1];
double _t_150_ = -u_1[k][j+1][i-1];
_t_150_ += u_1[k][j+1][i+1];
double _t_146_ = 8.0 * _t_147_;
_t_146_ += u_1[k][j-1][i-2];
_t_146_ -= u_1[k][j-1][i+2];
double _t_118_ = -u_1[k][j-1][i-2];
_t_118_ += u_1[k][j+1][i-2];
double _t_126_ = -u_1[k][j-1][i+2];
_t_126_ += u_1[k][j+1][i+2];
double _t_149_ = u_1[k][j+1][i-2];
_t_149_ -= u_1[k][j+1][i+2];
_t_149_ += 8.0 * _t_150_;
_t_141_ -= mu[k][j-1][i] * _t_146_;
_t_141_ += mu[k][j+1][i] * _t_149_;
_t_141_ -= mu[k][j+2][i] * _t_151_;
double _t_142_ = strx[i] * stry[j];
double _t_140_ = _t_142_ * 1.0 / 144.0;
double _t_113_ = _t_140_ * _t_141_;
_t_117_ += 8.0 * _t_118_;
double _t_115_ = la[k][j][i-2] * _t_117_;
_t_120_ += 8.0 * _t_121_;
_t_115_ -= la[k][j][i-1] * _t_120_;
_t_123_ += 8.0 * _t_124_;
_t_115_ += la[k][j][i+1] * _t_123_;
_t_125_ += 8.0 * _t_126_;
_t_115_ -= la[k][j][i+2] * _t_125_;
double _t_114_ = strx[i] * stry[j];
_t_113_ += _t_114_ * _t_115_;
double _t_131_ = -u_2[k-1][j][i-2];
_t_131_ += u_2[k+1][j][i-2];
double _t_159_ = u_2[k-1][j][i-2];
_t_159_ -= u_2[k-1][j][i+2];
double _t_162_ = u_2[k+1][j][i-2];
_t_162_ -= u_2[k+1][j][i+2];
double _t_139_ = -u_2[k-1][j][i+2];
_t_139_ += u_2[k+1][j][i+2];
double _t_130_ = 8.0 * _t_131_;
_t_130_ += u_2[k-2][j][i-2];
_t_130_ -= u_2[k+2][j][i-2];
double _t_156_ = u_2[k-2][j][i-2];
_t_156_ -= u_2[k-2][j][i+2];
double _t_164_ = u_2[k+2][j][i-2];
_t_164_ -= u_2[k+2][j][i+2];
double _t_138_ = u_2[k-2][j][i+2];
_t_138_ -= u_2[k+2][j][i+2];
_t_138_ += 8.0 * _t_139_;
double _t_128_ = la[k][j][i-2] * _t_130_;
double _t_134_ = -u_2[k-1][j][i-1];
_t_134_ += u_2[k+1][j][i-1];
double _t_160_ = -u_2[k-1][j][i-1];
_t_160_ += u_2[k-1][j][i+1];
double _t_163_ = -u_2[k+1][j][i-1];
_t_163_ += u_2[k+1][j][i+1];
double _t_137_ = -u_2[k-1][j][i+1];
_t_137_ += u_2[k+1][j][i+1];
double _t_133_ = 8.0 * _t_134_;
_t_133_ += u_2[k-2][j][i-1];
_t_133_ -= u_2[k+2][j][i-1];
double _t_157_ = -u_2[k-2][j][i-1];
_t_157_ += u_2[k-2][j][i+1];
double _t_165_ = -u_2[k+2][j][i-1];
_t_165_ += u_2[k+2][j][i+1];
double _t_136_ = u_2[k-2][j][i+1];
_t_136_ -= u_2[k+2][j][i+1];
_t_136_ += 8.0 * _t_137_;
_t_128_ -= la[k][j][i-1] * _t_133_;
_t_128_ += la[k][j][i+1] * _t_136_;
_t_128_ -= la[k][j][i+2] * _t_138_;
double _t_129_ = strx[i] * strz[k];
double _t_127_ = _t_129_ * 1.0 / 144.0;
_t_113_ += _t_127_ * _t_128_;
_t_156_ += 8.0 * _t_157_;
double _t_154_ = mu[k-2][j][i] * _t_156_;
_t_159_ += 8.0 * _t_160_;
_t_154_ -= mu[k-1][j][i] * _t_159_;
_t_162_ += 8.0 * _t_163_;
_t_154_ += mu[k+1][j][i] * _t_162_;
_t_164_ += 8.0 * _t_165_;
_t_154_ -= mu[k+2][j][i] * _t_164_;
double _t_153_ = _t_129_;
_t_113_ += _t_153_ * _t_154_;
r1 += _t_113_;
double _t_195_ = stry[j] * strz[k];
double _t_171_ = -u_0[k][j-1][i-2];
_t_171_ += u_0[k][j+1][i-2];
double _t_186_ = u_0[k][j-1][i-2];
_t_186_ -= u_0[k][j-1][i+2];
double _t_189_ = u_0[k][j+1][i-2];
_t_189_ -= u_0[k][j+1][i+2];
double _t_179_ = -u_0[k][j-1][i+2];
_t_179_ += u_0[k][j+1][i+2];
double _t_170_ = 8.0 * _t_171_;
_t_170_ += u_0[k][j-2][i-2];
_t_170_ -= u_0[k][j+2][i-2];
double _t_183_ = u_0[k][j-2][i-2];
_t_183_ -= u_0[k][j-2][i+2];
double _t_191_ = u_0[k][j+2][i-2];
_t_191_ -= u_0[k][j+2][i+2];
double _t_178_ = u_0[k][j-2][i+2];
_t_178_ -= u_0[k][j+2][i+2];
_t_178_ += 8.0 * _t_179_;
double _t_168_ = mu[k][j][i-2] * _t_170_;
double _t_174_ = -u_0[k][j-1][i-1];
_t_174_ += u_0[k][j+1][i-1];
double _t_187_ = -u_0[k][j-1][i-1];
_t_187_ += u_0[k][j-1][i+1];
double _t_190_ = -u_0[k][j+1][i-1];
_t_190_ += u_0[k][j+1][i+1];
double _t_177_ = -u_0[k][j-1][i+1];
_t_177_ += u_0[k][j+1][i+1];
double _t_173_ = 8.0 * _t_174_;
_t_173_ += u_0[k][j-2][i-1];
_t_173_ -= u_0[k][j+2][i-1];
double _t_184_ = -u_0[k][j-2][i-1];
_t_184_ += u_0[k][j-2][i+1];
double _t_192_ = -u_0[k][j+2][i-1];
_t_192_ += u_0[k][j+2][i+1];
double _t_176_ = u_0[k][j-2][i+1];
_t_176_ -= u_0[k][j+2][i+1];
_t_176_ += 8.0 * _t_177_;
_t_168_ -= mu[k][j][i-1] * _t_173_;
_t_168_ += mu[k][j][i+1] * _t_176_;
_t_168_ -= mu[k][j][i+2] * _t_178_;
double _t_169_ = _t_142_;
double _t_167_ = _t_169_ * 1.0 / 144.0;
double _t_166_ = _t_167_ * _t_168_;
_t_183_ += 8.0 * _t_184_;
double _t_181_ = la[k][j-2][i] * _t_183_;
_t_186_ += 8.0 * _t_187_;
_t_181_ -= la[k][j-1][i] * _t_186_;
_t_189_ += 8.0 * _t_190_;
_t_181_ += la[k][j+1][i] * _t_189_;
_t_191_ += 8.0 * _t_192_;
_t_181_ -= la[k][j+2][i] * _t_191_;
double _t_180_ = _t_169_;
_t_166_ += _t_180_ * _t_181_;
double _t_197_ = -u_2[k-1][j-2][i];
_t_197_ += u_2[k+1][j-2][i];
double _t_212_ = u_2[k-1][j-2][i];
_t_212_ -= u_2[k-1][j+2][i];
double _t_215_ = u_2[k+1][j-2][i];
_t_215_ -= u_2[k+1][j+2][i];
double _t_205_ = -u_2[k-1][j+2][i];
_t_205_ += u_2[k+1][j+2][i];
double _t_196_ = 8.0 * _t_197_;
_t_196_ += u_2[k-2][j-2][i];
_t_196_ -= u_2[k+2][j-2][i];
double _t_209_ = u_2[k-2][j-2][i];
_t_209_ -= u_2[k-2][j+2][i];
double _t_217_ = u_2[k+2][j-2][i];
_t_217_ -= u_2[k+2][j+2][i];
double _t_204_ = u_2[k-2][j+2][i];
_t_204_ -= u_2[k+2][j+2][i];
_t_204_ += 8.0 * _t_205_;
double _t_194_ = la[k][j-2][i] * _t_196_;
double _t_200_ = -u_2[k-1][j-1][i];
_t_200_ += u_2[k+1][j-1][i];
double _t_213_ = -u_2[k-1][j-1][i];
_t_213_ += u_2[k-1][j+1][i];
double _t_216_ = -u_2[k+1][j-1][i];
_t_216_ += u_2[k+1][j+1][i];
double _t_203_ = -u_2[k-1][j+1][i];
_t_203_ += u_2[k+1][j+1][i];
double _t_199_ = 8.0 * _t_200_;
_t_199_ += u_2[k-2][j-1][i];
_t_199_ -= u_2[k+2][j-1][i];
double _t_210_ = -u_2[k-2][j-1][i];
_t_210_ += u_2[k-2][j+1][i];
double _t_218_ = -u_2[k+2][j-1][i];
_t_218_ += u_2[k+2][j+1][i];
double _t_202_ = u_2[k-2][j+1][i];
_t_202_ -= u_2[k+2][j+1][i];
_t_202_ += 8.0 * _t_203_;
_t_194_ -= la[k][j-1][i] * _t_199_;
_t_194_ += la[k][j+1][i] * _t_202_;
_t_194_ -= la[k][j+2][i] * _t_204_;
double _t_193_ = _t_195_ * 1.0 / 144.0;
_t_166_ += _t_193_ * _t_194_;
_t_209_ += 8.0 * _t_210_;
double _t_207_ = mu[k-2][j][i] * _t_209_;
_t_212_ += 8.0 * _t_213_;
_t_207_ -= mu[k-1][j][i] * _t_212_;
_t_215_ += 8.0 * _t_216_;
_t_207_ += mu[k+1][j][i] * _t_215_;
_t_217_ += 8.0 * _t_218_;
_t_207_ -= mu[k+2][j][i] * _t_217_;
double _t_206_ = _t_195_;
_t_166_ += _t_206_ * _t_207_;
r2 += _t_166_;
double uacc_0kc0jc0ic0 = cof * r1;
uacc_0kc0jc0ic0 += a1 * uacc_0[k][j][i];
double _v_120_ = cof * r2;
double _v_122_ = cof * r3;
double uacc_1kc0jc0ic0 = _v_120_;
uacc_1kc0jc0ic0 += a1 * uacc_1[k][j][i];
double uacc_2kc0jc0ic0 = _v_122_;
uacc_2kc0jc0ic0 += a1 * uacc_2[k][j][i];
uacc_0[k][j][i] = uacc_0kc0jc0ic0;
uacc_1[k][j][i] = uacc_1kc0jc0ic0;
uacc_2[k][j][i] = uacc_2kc0jc0ic0;
}
}
extern "C" void host_code (double *h_uacc_0, double *h_uacc_1, double *h_uacc_2, double *h_u_0, double *h_u_1, double *h_u_2, double *h_mu, double *h_la, double *h_strx, double *h_stry, double *h_strz, int N) {
double *uacc_0;
cudaMalloc (&uacc_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_0\n");
cudaMemcpy (uacc_0, h_uacc_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *uacc_1;
cudaMalloc (&uacc_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_1\n");
cudaMemcpy (uacc_1, h_uacc_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *uacc_2;
cudaMalloc (&uacc_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_2\n");
cudaMemcpy (uacc_2, h_uacc_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_0;
cudaMalloc (&u_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_0\n");
cudaMemcpy (u_0, h_u_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_1;
cudaMalloc (&u_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_1\n");
cudaMemcpy (u_1, h_u_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_2;
cudaMalloc (&u_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_2\n");
cudaMemcpy (u_2, h_u_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *mu;
cudaMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *la;
cudaMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *strx;
cudaMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice);
double *stry;
cudaMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice);
double *strz;
cudaMalloc (&strz, sizeof(double)*N);
check_error ("Failed to allocate device memory for strz\n");
cudaMemcpy (strz, h_strz, sizeof(double)*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 2, 2);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), ceil(N, blockconfig.z));
sw4 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
cudaMemcpy (h_uacc_0, uacc_0, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_uacc_1, uacc_1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_uacc_2, uacc_2, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaFree (uacc_0);
cudaFree (uacc_1);
cudaFree (uacc_2);
cudaFree (u_0);
cudaFree (u_1);
cudaFree (u_2);
cudaFree (mu);
cudaFree (la);
cudaFree (strx);
cudaFree (stry);
cudaFree (strz);
}
|
14,598 | #include <iostream>
#include <cmath>
#include <cstdlib>
#include <thrust/extrema.h>
#include <thrust/device_ptr.h>
struct comparator {
__host__ __device__ bool operator()(double a, double b) {
return std::fabs(a) < std::fabs(b);
}
};
#define CSC(call) \
do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \
__FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while(0)
__global__ void kernel_swap_rows(double *matrix, int row_idx_1, int row_idx_2, int n, int m) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offsetx = blockDim.x * gridDim.x;
int i;
double tmp;
for (i = idx; i < m; i += offsetx) {
tmp = matrix[i * n + row_idx_1];
matrix[i * n + row_idx_1] = matrix[i * n + row_idx_2];
matrix[i * n + row_idx_2] = tmp;
}
}
__global__ void kernel(double *matrix, int row, int column, int n, int m) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int idy = blockDim.y * blockIdx.y + threadIdx.y;
int offsetx = blockDim.x * gridDim.x;
int offsety = blockDim.y * gridDim.y;
int i, j;
for (i = idy + column + 1; i < m; i += offsety)
for (j = idx + row + 1; j < n; j += offsetx)
matrix[i * n + j] -= (matrix[column * n + j] / matrix[column * n + row]) * matrix[i * n + row];
}
int main() {
std::ios_base::sync_with_stdio(false);
int n, m;
int i, j, column, row = 0, rank = 0;
double max_element_row, eps = 0.0000001;
double *matrix, *dev_matrix;
comparator comp;
std::cin >> n >> m;
matrix = (double*)malloc(n * m * sizeof(double));
for (i = 0; i < n; ++i)
for (j = 0; j < m; ++j)
std::cin >> matrix[j * n + i];
CSC(cudaMalloc(&dev_matrix, n * m * sizeof(double)));
CSC(cudaMemcpy(dev_matrix, matrix, n * m * sizeof(double), cudaMemcpyHostToDevice));
free(matrix);
thrust::device_ptr<double> matrix_p = thrust::device_pointer_cast(dev_matrix);
for (column = 0; column < m && row < n; ++column) {
thrust::device_ptr<double> max_p = thrust::max_element(matrix_p + n * column + row, matrix_p + n * (column + 1), comp);
if (std::fabs(*max_p) <= eps)
continue;
max_element_row = max_p - (matrix_p + n * column);
if (row != max_element_row) {
kernel_swap_rows <<<256, 256>>> (dev_matrix, row, max_element_row, n, m);
CSC(cudaGetLastError());
}
kernel <<<dim3(8, 8), dim3(32, 32)>>> (dev_matrix, row, column, n, m);
CSC(cudaGetLastError());
++rank;
++row;
}
CSC(cudaFree(dev_matrix));
std::cout << rank << "\n";
return 0;
} |
14,599 | #include "includes.h"
__global__ void Addwith_Kernel(float* in_out_put, const float* other, const float weight, const int width, const int height, const int nChannels)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= width || y >= height)
return;
int offset = y*width + x;
for (int c = 0; c < nChannels; c++)
{
in_out_put[offset*nChannels + c] += other[offset*nChannels + c] * weight;
}
} |
14,600 | #include "includes.h"
__global__ void ComputeOrientations(float *g_Data, int *d_Ptrs, float *d_Orient, int maxPts, int w, int h)
{
__shared__ float data[16*15];
__shared__ float hist[32*13];
__shared__ float gauss[16];
const int tx = threadIdx.x;
const int bx = blockIdx.x;
for (int i=0;i<13;i++)
hist[i*32+tx] = 0.0f;
__syncthreads();
float i2sigma2 = -1.0f/(2.0f*3.0f*3.0f);
if (tx<15)
gauss[tx] = exp(i2sigma2*(tx-7)*(tx-7));
int p = d_Ptrs[bx];
int yp = p/w - 7;
int xp = p%w - 7;
int px = xp & 15;
int x = tx - px;
for (int y=0;y<15;y++) {
int memPos = 16*y + x;
int xi = xp + x;
int yi = yp + y;
if (xi<0) xi = 0;
if (xi>=w) xi = w-1;
if (yi<0) yi = 0;
if (yi>=h) yi = h-1;
if (x>=0 && x<15)
data[memPos] = g_Data[yi*w+xi];
}
__syncthreads();
for (int y=1;y<14;y++) {
int memPos = 16*y + x;
if (x>=1 && x<14) {
float dy = data[memPos+16] - data[memPos-16];
float dx = data[memPos+1] - data[memPos-1];
int bin = 16.0f*atan2f(dy, dx)/3.1416f + 16.5f;
if (bin==32)
bin = 0;
float grad = sqrtf(dx*dx + dy*dy);
hist[32*(x-1)+bin] += grad*gauss[x]*gauss[y];
}
}
__syncthreads();
for (int y=0;y<5;y++)
hist[y*32+tx] += hist[(y+8)*32+tx];
__syncthreads();
for (int y=0;y<4;y++)
hist[y*32+tx] += hist[(y+4)*32+tx];
__syncthreads();
for (int y=0;y<2;y++)
hist[y*32+tx] += hist[(y+2)*32+tx];
__syncthreads();
hist[tx] += hist[32+tx];
__syncthreads();
if (tx==0)
hist[32] = 6*hist[0] + 4*(hist[1]+hist[31]) + (hist[2]+hist[30]);
if (tx==1)
hist[33] = 6*hist[1] + 4*(hist[2]+hist[0]) + (hist[3]+hist[31]);
if (tx>=2 && tx<=29)
hist[tx+32] = 6*hist[tx] + 4*(hist[tx+1]+hist[tx-1]) +
(hist[tx+2]+hist[tx-2]);
if (tx==30)
hist[62] = 6*hist[30] + 4*(hist[31]+hist[29]) + (hist[0]+hist[28]);
if (tx==31)
hist[63] = 6*hist[31] + 4*(hist[0]+hist[30]) + (hist[1]+hist[29]);
__syncthreads();
float v = hist[32+tx];
hist[tx] = (v>hist[32+((tx+1)&31)] && v>=hist[32+((tx+31)&31)] ? v : 0.0f);
__syncthreads();
if (tx==0) {
float maxval1 = 0.0;
float maxval2 = 0.0;
int i1 = -1;
int i2 = -1;
for (int i=0;i<32;i++) {
float v = hist[i];
if (v>maxval1) {
maxval2 = maxval1;
maxval1 = v;
i2 = i1;
i1 = i;
} else if (v>maxval2) {
maxval2 = v;
i2 = i;
}
}
float val1 = hist[32+((i1+1)&31)];
float val2 = hist[32+((i1+31)&31)];
float peak = i1 + 0.5f*(val1-val2) / (2.0f*maxval1-val1-val2);
d_Orient[bx] = 11.25f*(peak<0.0f ? peak+32.0f : peak);
if (maxval2<0.8f*maxval1)
i2 = -1;
if (i2>=0) {
float val1 = hist[32+((i2+1)&31)];
float val2 = hist[32+((i2+31)&31)];
float peak = i2 + 0.5f*(val1-val2) / (2.0f*maxval2-val1-val2);
d_Orient[bx+maxPts] = 11.25f*(peak<0.0f ? peak+32.0f : peak);;
} else
d_Orient[bx+maxPts] = i2;
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.