serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
3,201 | #include <cuda_runtime.h>
#include <iostream>
#include <memory>
#include <string>
#include <cuda.h>
#include <stdio.h>
#ifndef BLOCK_SIZE
# define BLOCK_SIZE 16
#endif
#ifndef _M
# define _M 10000
#endif
#ifndef _N
# define _N 10000
#endif
#if !defined(CUDA) && !defined(CPU) && !defined(CHECK)
# define CUDA
#endif
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"gpuAssert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void mx_dist(float *m_in, float *m_out, int m, int n)
{
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
float s = 0, sum = 0;
if( i < m && j < m) {
for(int k = 0; k < n; ++k) {
s = m_in[i*m + k] - m_in[j*m + k];
sum += s*s;
}
// printf("--> %d %d %f %f\n", j, i, m_in[j*n], sum);
m_out[i*m + j] = sum;
}
}
void mx_dist_cpu(float *m_in, float *m_out, int m, int n)
{
float s, sum;
for(int i = 0; i < m; ++i)
for(int j = 0; j < m; ++j) {
sum = 0;
for(int k = 0; k < n; ++k) {
s = m_in[i*m + k] - m_in[j*m + k];
sum += s*s;
}
m_out[i*m + j] = sum;
}
}
void init_mx(float *A, size_t m, size_t n)
{
for(int i = 0; i < m; ++i) {
for(int j = 0; j < n; ++j) {
float t = sin(i*m + j) * 10 + 1;
A[i*m + j] = t;
}
}
}
void print_mx(float *A, size_t m, size_t n)
{
for(int i = 0; i < m; ++i) {
for(int j = 0; j < n; ++j) {
printf("%d %d %f\n", i, j, A[i*m + j]);
}
}
}
void cmp_mx(float *A, float *B, size_t m, size_t n)
{
for(int i = 0; i < m; ++i) {
for(int j = 0; j < n; ++j) {
if( abs(A[i*m + j] - B[i*m + j]) > 0.01) {
printf("not equal %f %f\n", A[i*m + j], B[i*m + j]);
return;
} else {
printf("Equal\n");
}
}
}
}
float *run_cuda(float *A, size_t m, size_t n)
{
cudaError_t e;
float *A_d;
float *B, *B_d;
B = (float*) malloc(m*m*sizeof(float));
e = cudaMalloc(&A_d, m*n*sizeof(float));
gpuErrchk(e);
e = cudaMalloc(&B_d, m*m*sizeof(float));
gpuErrchk(e);
e = cudaMemcpy(A_d, A, m*n*sizeof(float),
cudaMemcpyHostToDevice);
gpuErrchk(e);
unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (n + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
mx_dist<<<dimGrid, dimBlock>>>(A_d, B_d, m, n);
e = cudaMemcpy(B, B_d, m*m*sizeof(float),
cudaMemcpyDeviceToHost);
gpuErrchk(e);
cudaFree(A_d);
cudaFree(B_d);
return B;
}
float *run_cpu(float *A, size_t m, size_t n)
{
float *B;
B = (float*) malloc(m*m*sizeof(float));
mx_dist_cpu(A, B, m, n);
return B;
}
int main()
{
int m = _M, n = _N;
float *A;
A = (float*) malloc(m*n*sizeof(float));
init_mx(A, m, n);
#if defined(CUDA) | defined(CHECK)
float *gpu = run_cuda(A, m, n);
#endif
#if defined(CPU) | defined(CHECK)
float *cpu = run_cpu(A, m, n);
#endif
#if defined(CHECK)
cmp_mx(gpu, cpu, m, m);
#endif
//for(int _j = 0; _j < size; ++_j) printf("%f ", h_vec[2][_j]);
// printf("\n");
return 0;
} |
3,202 | extern "C"
__global__ void applyKernels( float* kernels_hat,
float* kernels_hat_sum_2,
float* inimg,
float* pos2zncc,
float* pos2sigma,
float* pos2vx,
float* pos2vy,
int* pos,
int L,
int N,
int scale,
int dimsx,
int dimsy,
int N_threads,
int NN)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int tid = index;
while (tid < NN)
{
int x = pos[2 * tid];
int y = pos[2 * tid + 1];
int L2 = L / 2;
float imgVal;
float imgValsAvg = 0;
for (int pidx = 0; pidx < L*L; pidx++) {
int xx = x + (pidx % L) - L2;
int yy = y + (pidx / L) - L2;
imgVal = (xx>=0 && xx<dimsx && yy>=0 && yy<dimsy)?inimg[xx + yy * dimsx]:0;
imgValsAvg += imgVal;
}
imgValsAvg /= (float) (L*L);
for (int didx = 0; didx < N; didx++) {
float num = 0;
float den = 0;
float zncc;
for (int pidx = 0; pidx < L*L; pidx++) {
// sample from the image
int xx = x + (pidx % L) - L2;
int yy = y + (pidx / L) - L2;
imgVal = (xx>=0 && xx<dimsx && yy>=0 && yy<dimsy)?inimg[xx + yy * dimsx]:0;
num += (imgVal - imgValsAvg) * kernels_hat[scale*N*L*L + didx*L*L + pidx];
den += (imgVal - imgValsAvg) * (imgVal - imgValsAvg);
}
zncc = num / (float) sqrtf(den * kernels_hat_sum_2[didx + scale*N]);
if (zncc > pos2zncc[tid]) {
pos2zncc[tid] = zncc;
pos2sigma[tid] = scale;
float ang = didx * (3.141592654 / N);
float vx = -sinf(ang);
float vy = cosf(ang);
pos2vx[tid] = vx;
pos2vy[tid] = vy;
}
}
tid += N_threads;
}
} |
3,203 | // CUDA kernels for embedding shortest path metric into normed vector space
// Calculate all pairs shortest path.
// after Okuyama, Ino, and Hagihara 2008.
__global__ void scatter (int nv, int *vertex, int *edge, int *weight, int *cost, int *modify) {
// Note: the kernel does not need to know the origin vertices - their costs are simply set to 0
int fromv_rel_index = blockIdx.x + nv * threadIdx.x;
if ( !modify[fromv_rel_index] ) return; // kill thread if this vertex was not changed in the last pass
int fromv_cost = cost[fromv_rel_index]; // get current cost for this vertex
modify[fromv_rel_index] = 0;
int edge_index_low = vertex[blockIdx.x]; // block number is vertex number (one vertex per block)
int edge_index_high = vertex[blockIdx.x + 1]; // edges out of a vertex are contiguous
for (int edge_index = edge_index_low; edge_index < edge_index_high; edge_index++) {
int new_cost = fromv_cost + weight[edge_index];
int tov_rel_index = edge[edge_index] + nv * threadIdx.x;
if (new_cost < atomicMin(cost + tov_rel_index, new_cost)) { // atomicMin returns old value
modify[tov_rel_index] = 1; // enqueue the modified vertex for the next round
}
}
}
/*
// accumulate forces proportional to embedding error
// (each block should work on blockdim.x different origins, randomly)
__global__ void force (float *coord, float *force, int *cost) {
int tindex = blockIdx.x + blockDim.x * threadIdx.x; // damn fortran ordering
int tdindex = tindex * D;
float dist = 0;
float vector[D];
for (int d = 0; d < D; d++) {
vector[d] = (coord[tdindex + d] - something);
dist += abs(vector[d]); // l1 norm
}
if (dist == 0) return; // avoid division by zero when points are superimposed
float adjust = cost[tindex] / dist - 1;
for (int d = 0; d < D; d++) force[tdindex + d] += adjust * vector[d];
}
// shift embedded points according to forces, then reset forces
__global__ void integrate (float *coord, float *force) {
int tdindex = D * (blockIdx.x + blockDim.x * threadIdx.x); // damn fortran ordering
for (int i = tdindex; i < tdindex + D; i++) {
coord[i] += force[i] / blockDim.x; // push points around
force[i] = 0; // reset force to zero
}
}
*/
|
3,204 | extern "C" __global__ void saxpy(float* Z, float A, float* X, float* Y, size_t blockOff_x) {
size_t id = (blockOff_x + blockIdx.x) * blockDim.x + threadIdx.x;
Z[id] = A * X[id] + Y[id];
}
|
3,205 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <unistd.h>
#define THREADS 1024
__global__ void kernel(float* d_1,float* d_2){
int id;
id = blockDim.x*blockIdx.x + threadIdx.x;
d_1[id] += 1.0f;
d_2[id] += d_1[id];
}
int main(){
cudaError_t res;
float *d_1,*d_2,*h;
size_t pitch;
size_t size = THREADS * 10;
int i,ite = 1000;
h = (float*)malloc(sizeof(float)*size*size);
for(i = 0 ; i < size*size ; i ++)
h[i] = 0.0f;
res = cudaMallocPitch(&d_1,&pitch,sizeof(float)*size,size);
if(res != cudaSuccess){
printf("Oops ...\n");
exit(-1);
}
res = cudaMallocPitch(&d_2,&pitch,sizeof(float)*size,size);
if(res != cudaSuccess){
printf("Oops ...\n");
exit(-1);
}
printf("pitch : %lu\n",pitch);
printf("height : %lu\n",size);
printf("region : %lu[MB]\n",pitch*size >> 20);
res = cudaMemcpy2D(d_1,pitch,h,sizeof(float)*size,sizeof(float)*size,size,cudaMemcpyHostToDevice);
if(res != cudaSuccess){
printf("Oops ...\n");
exit(-1);
}
res = cudaMemcpy2D(d_2,pitch,h,sizeof(float)*size,sizeof(float)*size,size,cudaMemcpyHostToDevice);
if(res != cudaSuccess){
printf("Oops ...\n");
exit(-1);
}
dim3 threads(THREADS,1,1);
dim3 blocks(size/THREADS,1,1);
for(i = 0 ; i < ite ; i ++){
kernel<<<blocks,threads>>>(d_1,d_2);
}
res = cudaMemcpy(h,d_2,sizeof(float)*size*size,cudaMemcpyDeviceToHost);
if(res != cudaSuccess){
printf("Oops ...\n");
exit(-1);
}
for(i = 0 ; i < size ; i ++){
if(h[i] != ((ite+1)*ite)/2.0f ){
printf("h[%d] == %f\n",i,h[i]);
exit(-1);
}
}
sleep(10);
return 0;
}
|
3,206 | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void __launch_bounds__(256,1) curvi_1 (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) {
//Determing the block's indices
int blockdim_k= (int)(blockDim.x);
int k0 = (int)(blockIdx.x)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
double (*u1)[304][304] = (double (*)[304][304])in_u1;
double (*u2)[304][304] = (double (*)[304][304])in_u2;
double (*u3)[304][304] = (double (*)[304][304])in_u3;
double (*mu)[304][304] = (double (*)[304][304])in_mu;
double (*la)[304][304] = (double (*)[304][304])in_la;
double (*r1)[304][304] = (double (*)[304][304])in_r1;
double (*met1)[304][304] = (double (*)[304][304])in_met1;
double (*met2)[304][304] = (double (*)[304][304])in_met2;
double (*met3)[304][304] = (double (*)[304][304])in_met3;
double (*met4)[304][304] = (double (*)[304][304])in_met4;
if (j>=2 & k>=2 & j<=N-3 & k<=N-3) {
for (int i=2; i<=N-3; i++) {
double _t_6_;
double _t_4_;
double _t_7_;
double _t_5_;
double _t_8_;
double _t_3_;
double _t_2_;
double _t_1_;
double _t_11_;
double _t_9_;
double _t_12_;
double _t_10_;
double _t_13_;
double _t_18_;
double _t_16_;
double _t_19_;
double _t_17_;
double _t_20_;
double _t_15_;
double _t_14_;
double _t_23_;
double _t_21_;
double _t_24_;
double _t_22_;
double _t_25_;
double _t_0_;
double _t_31_;
double _t_29_;
double _t_32_;
double _t_30_;
double _t_33_;
double _t_28_;
double _t_27_;
double _t_26_;
double _t_36_;
double _t_34_;
double _t_37_;
double _t_35_;
double _t_38_;
double _t_43_;
double _t_41_;
double _t_44_;
double _t_42_;
double _t_45_;
double _t_40_;
double _t_39_;
double _t_48_;
double _t_46_;
double _t_49_;
double _t_47_;
double _t_50_;
double r1ic0jc0kc0 = r1[i][j][k];
double _t_58_;
double _t_57_;
double _t_55_;
double _t_59_;
double _t_56_;
double _t_60_;
double _t_54_;
double _t_53_;
double _t_64_;
double _t_62_;
double _t_65_;
double _t_63_;
double _t_66_;
double _t_61_;
double _t_69_;
double _t_67_;
double _t_70_;
double _t_68_;
double _t_71_;
double _t_76_;
double _t_75_;
double _t_73_;
double _t_77_;
double _t_74_;
double _t_78_;
double _t_72_;
double _t_82_;
double _t_80_;
double _t_83_;
double _t_81_;
double _t_84_;
double _t_79_;
double _t_87_;
double _t_85_;
double _t_88_;
double _t_86_;
double _t_89_;
double _t_52_;
double _t_95_;
double _t_94_;
double _t_92_;
double _t_96_;
double _t_93_;
double _t_97_;
double _t_91_;
double _t_90_;
double _t_101_;
double _t_99_;
double _t_102_;
double _t_100_;
double _t_103_;
double _t_98_;
double _t_106_;
double _t_104_;
double _t_107_;
double _t_105_;
double _t_108_;
double _t_113_;
double _t_112_;
double _t_110_;
double _t_114_;
double _t_111_;
double _t_115_;
double _t_109_;
double _t_119_;
double _t_117_;
double _t_120_;
double _t_118_;
double _t_121_;
double _t_116_;
double _t_124_;
double _t_122_;
double _t_125_;
double _t_123_;
double _t_126_;
double _t_51_;
double _t_134_;
double _t_133_;
double _t_131_;
double _t_135_;
double _t_132_;
double _t_136_;
double _t_130_;
double _t_129_;
double _t_128_;
double _t_139_;
double _t_137_;
double _t_140_;
double _t_138_;
double _t_141_;
double _t_145_;
double _t_143_;
double _t_146_;
double _t_144_;
double _t_147_;
double _t_142_;
double _t_153_;
double _t_152_;
double _t_150_;
double _t_154_;
double _t_151_;
double _t_155_;
double _t_149_;
double _t_148_;
double _t_158_;
double _t_156_;
double _t_159_;
double _t_157_;
double _t_160_;
double _t_164_;
double _t_162_;
double _t_165_;
double _t_163_;
double _t_166_;
double _t_161_;
double _t_127_;
double _t_173_;
double _t_172_;
double _t_170_;
double _t_174_;
double _t_171_;
double _t_175_;
double _t_169_;
double _t_168_;
double _t_167_;
double _t_178_;
double _t_176_;
double _t_179_;
double _t_177_;
double _t_180_;
double _t_184_;
double _t_182_;
double _t_185_;
double _t_183_;
double _t_186_;
double _t_181_;
double _t_192_;
double _t_191_;
double _t_189_;
double _t_193_;
double _t_190_;
double _t_194_;
double _t_188_;
double _t_187_;
double _t_197_;
double _t_195_;
double _t_198_;
double _t_196_;
double _t_199_;
double _t_203_;
double _t_201_;
double _t_204_;
double _t_202_;
double _t_205_;
double _t_200_;
_t_6_ = mu[i][j][k+2] * met3[i][j][k+2];
_t_4_ = _t_6_ * met1[i][j][k+2];
_t_7_ = u1[i][j+2][k+2];
_t_7_ -= u1[i][j-2][k+2];
_t_5_ = c2 * _t_7_;
_t_8_ = u1[i][j+1][k+2];
_t_8_ -= u1[i][j-1][k+2];
_t_5_ += c1 * _t_8_;
_t_3_ = _t_4_ * _t_5_;
_t_2_ = _t_3_ * stry[j+2];
_t_1_ = _t_2_ * strx[i];
_t_11_ = la[i][j][k+2] * met2[i][j][k+2];
_t_9_ = _t_11_ * met1[i][j][k+2];
_t_12_ = u2[i][j+2][k+2];
_t_12_ -= u2[i][j-2][k+2];
_t_10_ = c2 * _t_12_;
_t_13_ = u2[i][j+1][k+2];
_t_13_ -= u2[i][j-1][k+2];
_t_10_ += c1 * _t_13_;
_t_1_ += _t_9_ * _t_10_;
_t_18_ = mu[i][j][k-2] * met3[i][j][k-2];
_t_16_ = _t_18_ * met1[i][j][k-2];
_t_19_ = u1[i][j+2][k-2];
_t_19_ -= u1[i][j-2][k-2];
_t_17_ = c2 * _t_19_;
_t_20_ = u1[i][j+1][k-2];
_t_20_ -= u1[i][j-1][k-2];
_t_17_ += c1 * _t_20_;
_t_15_ = _t_16_ * _t_17_;
_t_14_ = _t_15_ * stry[j];
_t_1_ += _t_14_ * strx[i];
_t_23_ = la[i][j][k-2] * met2[i][j][k-2];
_t_21_ = _t_23_ * met1[i][j][k-2];
_t_24_ = u2[i][j+2][k-2];
_t_24_ -= u2[i][j-2][k-2];
_t_22_ = c2 * _t_24_;
_t_25_ = u2[i][j+1][k-2];
_t_25_ -= u2[i][j-1][k-2];
_t_22_ += c1 * _t_25_;
_t_1_ += _t_21_ * _t_22_;
_t_0_ = c2 * _t_1_;
_t_31_ = mu[i][j][k+1] * met3[i][j][k+1];
_t_29_ = _t_31_ * met1[i][j][k+1];
_t_32_ = u1[i][j+2][k+1];
_t_32_ -= u1[i][j-2][k+1];
_t_30_ = c2 * _t_32_;
_t_33_ = u1[i][j+1][k+1];
_t_33_ -= u1[i][j-1][k+1];
_t_30_ += c1 * _t_33_;
_t_28_ = _t_29_ * _t_30_;
_t_27_ = _t_28_ * stry[j-2];
_t_26_ = _t_27_ * strx[i];
_t_36_ = la[i][j][k+1] * met2[i][j][k+1];
_t_34_ = _t_36_ * met1[i][j][k+1];
_t_37_ = u2[i][j+2][k+1];
_t_37_ -= u2[i][j-2][k+1];
_t_35_ = c2 * _t_37_;
_t_38_ = u2[i][j+1][k+1];
_t_38_ -= u2[i][j-1][k+1];
_t_35_ += c1 * _t_38_;
_t_26_ += _t_34_ * _t_35_;
_t_43_ = mu[i][j][k-1] * met3[i][j][k-1];
_t_41_ = _t_43_ * met1[i][j][k-1];
_t_44_ = u1[i][j+2][k-1];
_t_44_ -= u1[i][j-2][k-1];
_t_42_ = c2 * _t_44_;
_t_45_ = u1[i][j+1][k-1];
_t_45_ -= u1[i][j-1][k-1];
_t_42_ += c1 * _t_45_;
_t_40_ = _t_41_ * _t_42_;
_t_39_ = _t_40_ * stry[j];
_t_26_ += _t_39_ * strx[i];
_t_48_ = la[i][j][k-1] * met2[i][j][k-1];
_t_46_ = _t_48_ * met1[i][j][k-1];
_t_49_ = u2[i][j+2][k-1];
_t_49_ -= u2[i][j-2][k-1];
_t_47_ = c2 * _t_49_;
_t_50_ = u2[i][j+1][k-1];
_t_50_ -= u2[i][j-1][k-1];
_t_47_ += c1 * _t_50_;
_t_26_ += _t_46_ * _t_47_;
_t_0_ += c1 * _t_26_;
r1ic0jc0kc0 += _t_0_;
_t_58_ = 2.0 * mu[i+2][j][k];
_t_58_ += la[i+2][j][k];
_t_57_ = _t_58_ * met2[i+2][j][k];
_t_55_ = _t_57_ * met1[i+2][j][k];
_t_59_ = u1[i+2][j][k+2];
_t_59_ -= u1[i+2][j][k-2];
_t_56_ = c2 * _t_59_;
_t_60_ = u1[i+2][j][k+1];
_t_60_ -= u1[i+2][j][k-1];
_t_56_ += c1 * _t_60_;
_t_54_ = _t_55_ * _t_56_;
_t_53_ = _t_54_ * strx[i];
_t_64_ = la[i+2][j][k] * met3[i+2][j][k];
_t_62_ = _t_64_ * met1[i+2][j][k];
_t_65_ = u2[i+2][j][k+2];
_t_65_ -= u2[i+2][j][k-2];
_t_63_ = c2 * _t_65_;
_t_66_ = u2[i+2][j][k+1];
_t_66_ -= u2[i+2][j][k-1];
_t_63_ += c1 * _t_66_;
_t_61_ = _t_62_ * _t_63_;
_t_53_ += _t_61_ * stry[j];
_t_69_ = la[i+2][j][k] * met4[i+2][j][k];
_t_67_ = _t_69_ * met1[i+2][j][k];
_t_70_ = u3[i+2][j][k+2];
_t_70_ -= u3[i+2][j][k-2];
_t_68_ = c2 * _t_70_;
_t_71_ = u3[i+2][j][k+1];
_t_71_ -= u3[i+2][j][k-1];
_t_68_ += c1 * _t_71_;
_t_53_ += _t_67_ * _t_68_;
_t_76_ = 2.0 * mu[i-2][j][k];
_t_76_ += la[i-2][j][k];
_t_75_ = _t_76_ * met2[i-2][j][k];
_t_73_ = _t_75_ * met1[i-2][j][k];
_t_77_ = u1[i-2][j][k+2];
_t_77_ -= u1[i-2][j][k-2];
_t_74_ = c2 * _t_77_;
_t_78_ = u1[i-2][j][k+1];
_t_78_ -= u1[i-2][j][k-1];
_t_74_ += c1 * _t_78_;
_t_72_ = _t_73_ * _t_74_;
_t_53_ += _t_72_ * strx[i];
_t_82_ = la[i-2][j][k] * met3[i-2][j][k];
_t_80_ = _t_82_ * met1[i-2][j][k];
_t_83_ = u2[i-2][j][k+2];
_t_83_ -= u2[i-2][j][k-2];
_t_81_ = c2 * _t_83_;
_t_84_ = u2[i-2][j][k+1];
_t_84_ -= u2[i-2][j][k-1];
_t_81_ += c1 * _t_84_;
_t_79_ = _t_80_ * _t_81_;
_t_53_ += _t_79_ * stry[j];
_t_87_ = la[i-2][j][k] * met4[i-2][j][k];
_t_85_ = _t_87_ * met1[i-2][j][k];
_t_88_ = u3[i-2][j][k+2];
_t_88_ -= u3[i-2][j][k-2];
_t_86_ = c2 * _t_88_;
_t_89_ = u3[i-2][j][k+1];
_t_89_ -= u3[i-2][j][k-1];
_t_86_ += c1 * _t_89_;
_t_53_ += _t_85_ * _t_86_;
_t_52_ = c2 * _t_53_;
_t_95_ = 2.0 * mu[i+1][j][k];
_t_95_ += la[i+1][j][k];
_t_94_ = _t_95_ * met2[i+1][j][k];
_t_92_ = _t_94_ * met1[i+1][j][k];
_t_96_ = u1[i+1][j][k+2];
_t_96_ -= u1[i+1][j][k-2];
_t_93_ = c2 * _t_96_;
_t_97_ = u1[i+1][j][k+1];
_t_97_ -= u1[i+1][j][k-1];
_t_93_ += c1 * _t_97_;
_t_91_ = _t_92_ * _t_93_;
_t_90_ = _t_91_ * strx[i];
_t_101_ = la[i+1][j][k] * met3[i+1][j][k];
_t_99_ = _t_101_ * met1[i+1][j][k];
_t_102_ = u2[i+1][j][k+2];
_t_102_ -= u2[i+1][j][k-2];
_t_100_ = c2 * _t_102_;
_t_103_ = u2[i+1][j][k+1];
_t_103_ -= u2[i+1][j][k-1];
_t_100_ += c1 * _t_103_;
_t_98_ = _t_99_ * _t_100_;
_t_90_ += _t_98_ * stry[j];
_t_106_ = la[i+1][j][k] * met4[i+1][j][k];
_t_104_ = _t_106_ * met1[i+1][j][k];
_t_107_ = u3[i+1][j][k+2];
_t_107_ -= u3[i+1][j][k-2];
_t_105_ = c2 * _t_107_;
_t_108_ = u3[i+1][j][k+1];
_t_108_ -= u3[i+1][j][k-1];
_t_105_ += c1 * _t_108_;
_t_90_ += _t_104_ * _t_105_;
_t_113_ = 2.0 * mu[i-1][j][k];
_t_113_ += la[i-1][j][k];
_t_112_ = _t_113_ * met2[i-1][j][k];
_t_110_ = _t_112_ * met1[i-1][j][k];
_t_114_ = u1[i-1][j][k+2];
_t_114_ -= u1[i-1][j][k-2];
_t_111_ = c2 * _t_114_;
_t_115_ = u1[i-1][j][k+1];
_t_115_ -= u1[i-1][j][k-1];
_t_111_ += c1 * _t_115_;
_t_109_ = _t_110_ * _t_111_;
_t_90_ += _t_109_ * strx[i];
_t_119_ = la[i-1][j][k] * met3[i-1][j][k];
_t_117_ = _t_119_ * met1[i-1][j][k];
_t_120_ = u2[i-1][j][k+2];
_t_120_ -= u2[i-1][j][k-2];
_t_118_ = c2 * _t_120_;
_t_121_ = u2[i-1][j][k+1];
_t_121_ -= u2[i-1][j][k-1];
_t_118_ += c1 * _t_121_;
_t_116_ = _t_117_ * _t_118_;
_t_90_ += _t_116_ * stry[j];
_t_124_ = la[i-1][j][k] * met4[i-1][j][k];
_t_122_ = _t_124_ * met1[i-1][j][k];
_t_125_ = u3[i-1][j][k+2];
_t_125_ -= u3[i-1][j][k-2];
_t_123_ = c2 * _t_125_;
_t_126_ = u3[i-1][j][k+1];
_t_126_ -= u3[i-1][j][k-1];
_t_123_ += c1 * _t_126_;
_t_90_ += _t_122_ * _t_123_;
_t_52_ += c1 * _t_90_;
_t_51_ = _t_52_ * stry[j];
r1ic0jc0kc0 += _t_51_;
_t_134_ = 2.0 * mu[i][j][k+2];
_t_134_ += la[i][j][k+2];
_t_133_ = _t_134_ * met2[i][j][k+2];
_t_131_ = _t_133_ * met1[i][j][k+2];
_t_135_ = u1[i+2][j][k+2];
_t_135_ -= u1[i-2][j][k+2];
_t_132_ = c2 * _t_135_;
_t_136_ = u1[i+1][j][k+2];
_t_136_ -= u1[i-1][j][k+2];
_t_132_ += c1 * _t_136_;
_t_130_ = _t_131_ * _t_132_;
_t_129_ = _t_130_ * strx[i];
_t_128_ = _t_129_ * stry[j];
_t_139_ = mu[i][j][k+2] * met3[i][j][k+2];
_t_137_ = _t_139_ * met1[i][j][k+2];
_t_140_ = u2[i+2][j][k+2];
_t_140_ -= u2[i-2][j][k+2];
_t_138_ = c2 * _t_140_;
_t_141_ = u2[i+1][j][k+2];
_t_141_ -= u2[i-1][j][k+2];
_t_138_ += c1 * _t_141_;
_t_128_ += _t_137_ * _t_138_;
_t_145_ = mu[i][j][k+2] * met4[i][j][k+2];
_t_143_ = _t_145_ * met1[i][j][k+2];
_t_146_ = u3[i+2][j][k+2];
_t_146_ -= u3[i-2][j][k+2];
_t_144_ = c2 * _t_146_;
_t_147_ = u3[i+1][j][k+2];
_t_147_ -= u3[i-1][j][k+2];
_t_144_ += c1 * _t_147_;
_t_142_ = _t_143_ * _t_144_;
_t_128_ += _t_142_ * stry[j];
_t_153_ = 2.0 * mu[i][j][k-2];
_t_153_ += la[i][j][k-2];
_t_152_ = _t_153_ * met2[i][j][k-2];
_t_150_ = _t_152_ * met1[i][j][k-2];
_t_154_ = u1[i+2][j][k-2];
_t_154_ -= u1[i-2][j][k-2];
_t_151_ = c2 * _t_154_;
_t_155_ = u1[i+1][j][k-2];
_t_155_ -= u1[i-1][j][k-2];
_t_151_ += c1 * _t_155_;
_t_149_ = _t_150_ * _t_151_;
_t_148_ = _t_149_ * strx[i];
_t_128_ += _t_148_ * stry[j];
_t_158_ = mu[i][j][k-2] * met3[i][j][k-2];
_t_156_ = _t_158_ * met1[i][j][k-2];
_t_159_ = u2[i+2][j][k-2];
_t_159_ -= u2[i-2][j][k-2];
_t_157_ = c2 * _t_159_;
_t_160_ = u2[i+1][j][k-2];
_t_160_ -= u2[i-1][j][k-2];
_t_157_ += c1 * _t_160_;
_t_128_ += _t_156_ * _t_157_;
_t_164_ = mu[i][j][k-2] * met4[i][j][k-2];
_t_162_ = _t_164_ * met1[i][j][k-2];
_t_165_ = u3[i+2][j][k-2];
_t_165_ -= u3[i-2][j][k-2];
_t_163_ = c2 * _t_165_;
_t_166_ = u3[i+1][j][k-2];
_t_166_ -= u3[i-1][j][k-2];
_t_163_ += c1 * _t_166_;
_t_161_ = _t_162_ * _t_163_;
_t_128_ += _t_161_ * stry[j];
_t_127_ = c2 * _t_128_;
_t_173_ = 2.0 * mu[i][j][k+1];
_t_173_ += la[i][j][k+1];
_t_172_ = _t_173_ * met2[i][j][k+1];
_t_170_ = _t_172_ * met1[i][j][k+1];
_t_174_ = u1[i+2][j][k+1];
_t_174_ -= u1[i-2][j][k+1];
_t_171_ = c2 * _t_174_;
_t_175_ = u1[i+1][j][k+1];
_t_175_ -= u1[i-1][j][k+1];
_t_171_ += c1 * _t_175_;
_t_169_ = _t_170_ * _t_171_;
_t_168_ = _t_169_ * strx[i+2];
_t_167_ = _t_168_ * stry[j];
_t_178_ = mu[i][j][k+1] * met3[i][j][k+1];
_t_176_ = _t_178_ * met1[i][j][k+1];
_t_179_ = u2[i+2][j][k+1];
_t_179_ -= u2[i-2][j][k+1];
_t_177_ = c2 * _t_179_;
_t_180_ = u2[i+1][j][k+1];
_t_180_ -= u2[i-1][j][k+1];
_t_177_ += c1 * _t_180_;
_t_167_ += _t_176_ * _t_177_;
_t_184_ = mu[i][j][k+1] * met4[i][j][k+1];
_t_182_ = _t_184_ * met1[i][j][k+1];
_t_185_ = u3[i+2][j][k+1];
_t_185_ -= u3[i-2][j][k+1];
_t_183_ = c2 * _t_185_;
_t_186_ = u3[i+1][j][k+1];
_t_186_ -= u3[i-1][j][k+1];
_t_183_ += c1 * _t_186_;
_t_181_ = _t_182_ * _t_183_;
_t_167_ += _t_181_ * stry[j];
_t_192_ = 2.0 * mu[i][j][k-1];
_t_192_ += la[i][j][k-1];
_t_191_ = _t_192_ * met2[i][j][k-1];
_t_189_ = _t_191_ * met1[i][j][k-1];
_t_193_ = u1[i+2][j][k-1];
_t_193_ -= u1[i-2][j][k-1];
_t_190_ = c2 * _t_193_;
_t_194_ = u1[i+1][j][k-1];
_t_194_ -= u1[i-1][j][k-1];
_t_190_ += c1 * _t_194_;
_t_188_ = _t_189_ * _t_190_;
_t_187_ = _t_188_ * strx[i-2];
_t_167_ += _t_187_ * stry[j];
_t_197_ = mu[i][j][k-1] * met3[i][j][k-1];
_t_195_ = _t_197_ * met1[i][j][k-1];
_t_198_ = u2[i+2][j][k-1];
_t_198_ -= u2[i-2][j][k-1];
_t_196_ = c2 * _t_198_;
_t_199_ = u2[i+1][j][k-1];
_t_199_ -= u2[i-1][j][k-1];
_t_196_ += c1 * _t_199_;
_t_167_ += _t_195_ * _t_196_;
_t_203_ = mu[i][j][k-1] * met4[i][j][k-1];
_t_201_ = _t_203_ * met1[i][j][k-1];
_t_204_ = u3[i+2][j][k-1];
_t_204_ -= u3[i-2][j][k-1];
_t_202_ = c2 * _t_204_;
_t_205_ = u3[i+1][j][k-1];
_t_205_ -= u3[i-1][j][k-1];
_t_202_ += c1 * _t_205_;
_t_200_ = _t_201_ * _t_202_;
_t_167_ += _t_200_ * stry[j];
_t_127_ += c1 * _t_167_;
r1ic0jc0kc0 += _t_127_;
r1[i][j][k] = r1ic0jc0kc0;
}
}
}
__global__ void __launch_bounds__(128,4) curvi_2 (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) {
//Determing the block's indices
int blockdim_k= (int)(blockDim.x);
int k0 = (int)(blockIdx.x)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_i= (int)(blockDim.z);
int i0 = (int)(blockIdx.z)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.z);
double (*u1)[304][304] = (double (*)[304][304])in_u1;
double (*u2)[304][304] = (double (*)[304][304])in_u2;
double (*u3)[304][304] = (double (*)[304][304])in_u3;
double (*mu)[304][304] = (double (*)[304][304])in_mu;
double (*la)[304][304] = (double (*)[304][304])in_la;
double (*r1)[304][304] = (double (*)[304][304])in_r1;
double (*met1)[304][304] = (double (*)[304][304])in_met1;
double (*met2)[304][304] = (double (*)[304][304])in_met2;
double (*met3)[304][304] = (double (*)[304][304])in_met3;
double (*met4)[304][304] = (double (*)[304][304])in_met4;
if (j>=2 & k>=2 & j<=N-3 & k<=N-3 & i>=2 & i<=N-3) {
double _t_6_;
double _t_4_;
double _t_7_;
double _t_5_;
double _t_8_;
double _t_3_;
double _t_2_;
double _t_1_;
double _t_11_;
double _t_9_;
double _t_12_;
double _t_10_;
double _t_13_;
double _t_18_;
double _t_16_;
double _t_19_;
double _t_17_;
double _t_20_;
double _t_15_;
double _t_14_;
double _t_23_;
double _t_21_;
double _t_24_;
double _t_22_;
double _t_25_;
double _t_0_;
double _t_31_;
double _t_29_;
double _t_32_;
double _t_30_;
double _t_33_;
double _t_28_;
double _t_27_;
double _t_26_;
double _t_36_;
double _t_34_;
double _t_37_;
double _t_35_;
double _t_38_;
double _t_43_;
double _t_41_;
double _t_44_;
double _t_42_;
double _t_45_;
double _t_40_;
double _t_39_;
double _t_48_;
double _t_46_;
double _t_49_;
double _t_47_;
double _t_50_;
double r1ic0jc0kc0 = r1[i][j][k];
double _t_55_;
double _t_53_;
double _t_56_;
double _t_54_;
double _t_57_;
double _t_52_;
double _t_60_;
double _t_58_;
double _t_61_;
double _t_59_;
double _t_62_;
double _t_51_;
double _t_66_;
double _t_64_;
double _t_67_;
double _t_65_;
double _t_68_;
double _t_63_;
double _t_71_;
double _t_69_;
double _t_72_;
double _t_70_;
double _t_73_;
double _t_77_;
double _t_75_;
double _t_78_;
double _t_76_;
double _t_79_;
double _t_74_;
double _t_82_;
double _t_80_;
double _t_83_;
double _t_81_;
double _t_84_;
double _t_88_;
double _t_86_;
double _t_89_;
double _t_87_;
double _t_90_;
double _t_85_;
double _t_93_;
double _t_91_;
double _t_94_;
double _t_92_;
double _t_95_;
_t_6_ = mu[i][j+2][k] * met3[i][j+2][k];
_t_4_ = _t_6_ * met1[i][j+2][k];
_t_7_ = u1[i][j+2][k+2];
_t_7_ -= u1[i][j+2][k-2];
_t_5_ = c2 * _t_7_;
_t_8_ = u1[i][j+2][k+1];
_t_8_ -= u1[i][j+2][k-1];
_t_5_ += c1 * _t_8_;
_t_3_ = _t_4_ * _t_5_;
_t_2_ = _t_3_ * stry[j+1];
_t_1_ = _t_2_ * strx[i];
_t_11_ = mu[i][j+2][k] * met2[i][j+2][k];
_t_9_ = _t_11_ * met1[i][j+2][k];
_t_12_ = u2[i][j+2][k+2];
_t_12_ -= u2[i][j+2][k-2];
_t_10_ = c2 * _t_12_;
_t_13_ = u2[i][j+2][k+1];
_t_13_ -= u2[i][j+2][k-1];
_t_10_ += c1 * _t_13_;
_t_1_ += _t_9_ * _t_10_;
_t_18_ = mu[i][j-2][k] * met3[i][j-2][k];
_t_16_ = _t_18_ * met1[i][j-2][k];
_t_19_ = u1[i][j-2][k+2];
_t_19_ -= u1[i][j-2][k-2];
_t_17_ = c2 * _t_19_;
_t_20_ = u1[i][j-2][k+1];
_t_20_ -= u1[i][j-2][k-1];
_t_17_ += c1 * _t_20_;
_t_15_ = _t_16_ * _t_17_;
_t_14_ = _t_15_ * stry[j];
_t_1_ += _t_14_ * strx[i];
_t_23_ = mu[i][j-2][k] * met2[i][j-2][k];
_t_21_ = _t_23_ * met1[i][j-2][k];
_t_24_ = u2[i][j-2][k+2];
_t_24_ -= u2[i][j-2][k-2];
_t_22_ = c2 * _t_24_;
_t_25_ = u2[i][j-2][k+1];
_t_25_ -= u2[i][j-2][k-1];
_t_22_ += c1 * _t_25_;
_t_1_ += _t_21_ * _t_22_;
_t_0_ = c2 * _t_1_;
_t_31_ = mu[i][j+1][k] * met3[i][j+1][k];
_t_29_ = _t_31_ * met1[i][j+1][k];
_t_32_ = u1[i][j+1][k+2];
_t_32_ -= u1[i][j+1][k-2];
_t_30_ = c2 * _t_32_;
_t_33_ = u1[i][j+1][k+1];
_t_33_ -= u1[i][j+1][k-1];
_t_30_ += c1 * _t_33_;
_t_28_ = _t_29_ * _t_30_;
_t_27_ = _t_28_ * stry[j-1];
_t_26_ = _t_27_ * strx[i];
_t_36_ = mu[i][j+1][k] * met2[i][j+1][k];
_t_34_ = _t_36_ * met1[i][j+1][k];
_t_37_ = u2[i][j+1][k+2];
_t_37_ -= u2[i][j+1][k-2];
_t_35_ = c2 * _t_37_;
_t_38_ = u2[i][j+1][k+1];
_t_38_ -= u2[i][j+1][k-1];
_t_35_ += c1 * _t_38_;
_t_26_ += _t_34_ * _t_35_;
_t_43_ = mu[i][j-1][k] * met3[i][j-1][k];
_t_41_ = _t_43_ * met1[i][j-1][k];
_t_44_ = u1[i][j-1][k+2];
_t_44_ -= u1[i][j-1][k-2];
_t_42_ = c2 * _t_44_;
_t_45_ = u1[i][j-1][k+1];
_t_45_ -= u1[i][j-1][k-1];
_t_42_ += c1 * _t_45_;
_t_40_ = _t_41_ * _t_42_;
_t_39_ = _t_40_ * stry[j];
_t_26_ += _t_39_ * strx[i];
_t_48_ = mu[i][j-1][k] * met2[i][j-1][k];
_t_46_ = _t_48_ * met1[i][j-1][k];
_t_49_ = u2[i][j-1][k+2];
_t_49_ -= u2[i][j-1][k-2];
_t_47_ = c2 * _t_49_;
_t_50_ = u2[i][j-1][k+1];
_t_50_ -= u2[i][j-1][k-1];
_t_47_ += c1 * _t_50_;
_t_26_ += _t_46_ * _t_47_;
_t_0_ += c1 * _t_26_;
r1ic0jc0kc0 += _t_0_;
_t_55_ = mu[i][j+2][k] * met1[i][j+2][k];
_t_53_ = _t_55_ * met1[i][j+2][k];
_t_56_ = u2[i+2][j+2][k];
_t_56_ -= u2[i-2][j+2][k];
_t_54_ = c2 * _t_56_;
_t_57_ = u2[i+1][j+2][k];
_t_57_ -= u2[i-1][j+2][k];
_t_54_ += c1 * _t_57_;
_t_52_ = _t_53_ * _t_54_;
_t_60_ = mu[i][j-2][k] * met1[i][j-2][k];
_t_58_ = _t_60_ * met1[i][j-2][k];
_t_61_ = u2[i+2][j-2][k];
_t_61_ -= u2[i-2][j-2][k];
_t_59_ = c2 * _t_61_;
_t_62_ = u2[i+1][j-2][k];
_t_62_ -= u2[i-1][j-2][k];
_t_59_ += c1 * _t_62_;
_t_52_ += _t_58_ * _t_59_;
_t_51_ = c2 * _t_52_;
_t_66_ = mu[i][j+1][k] * met1[i][j+1][k];
_t_64_ = _t_66_ * met1[i][j+1][k];
_t_67_ = u2[i+2][j+1][k];
_t_67_ -= u2[i-2][j+1][k];
_t_65_ = c2 * _t_67_;
_t_68_ = u2[i+1][j+1][k];
_t_68_ -= u2[i-1][j+1][k];
_t_65_ += c1 * _t_68_;
_t_63_ = _t_64_ * _t_65_;
_t_71_ = mu[i][j-1][k] * met1[i][j-1][k];
_t_69_ = _t_71_ * met1[i][j-1][k];
_t_72_ = u2[i+2][j-1][k];
_t_72_ -= u2[i-2][j-1][k];
_t_70_ = c2 * _t_72_;
_t_73_ = u2[i+1][j-1][k];
_t_73_ -= u2[i-1][j-1][k];
_t_70_ += c1 * _t_73_;
_t_63_ += _t_69_ * _t_70_;
_t_51_ += c1 * _t_63_;
_t_77_ = la[i+2][j][k] * met1[i+2][j][k];
_t_75_ = _t_77_ * met1[i+2][j][k];
_t_78_ = u2[i+2][j+2][k];
_t_78_ -= u2[i+2][j-2][k];
_t_76_ = c2 * _t_78_;
_t_79_ = u2[i+2][j+1][k];
_t_79_ -= u2[i+2][j-1][k];
_t_76_ += c1 * _t_79_;
_t_74_ = _t_75_ * _t_76_;
_t_82_ = la[i-2][j][k] * met1[i-2][j][k];
_t_80_ = _t_82_ * met1[i-2][j][k];
_t_83_ = u2[i-2][j+2][k];
_t_83_ -= u2[i-2][j-2][k];
_t_81_ = c2 * _t_83_;
_t_84_ = u2[i-2][j+1][k];
_t_84_ -= u2[i-2][j-1][k];
_t_81_ += c1 * _t_84_;
_t_74_ += _t_80_ * _t_81_;
_t_51_ += c2 * _t_74_;
_t_88_ = la[i+1][j][k] * met1[i+1][j][k];
_t_86_ = _t_88_ * met1[i+1][j][k];
_t_89_ = u2[i+1][j+2][k];
_t_89_ -= u2[i+1][j-2][k];
_t_87_ = c2 * _t_89_;
_t_90_ = u2[i+1][j+1][k];
_t_90_ -= u2[i+1][j-1][k];
_t_87_ += c1 * _t_90_;
_t_85_ = _t_86_ * _t_87_;
_t_93_ = la[i-1][j][k] * met1[i-1][j][k];
_t_91_ = _t_93_ * met1[i-1][j][k];
_t_94_ = u2[i-1][j+2][k];
_t_94_ -= u2[i-1][j-2][k];
_t_92_ = c2 * _t_94_;
_t_95_ = u2[i-1][j+1][k];
_t_95_ -= u2[i-1][j-1][k];
_t_92_ += c1 * _t_95_;
_t_85_ += _t_91_ * _t_92_;
_t_51_ += c1 * _t_85_;
r1ic0jc0kc0 += _t_51_;
r1[i][j][k] = r1ic0jc0kc0;
}
}
extern "C" void host_code (double *h_r1, double *h_u1, double *h_u2, double *h_u3, double *h_mu, double *h_la, double *h_met1, double *h_met2, double *h_met3, double *h_met4, double *h_strx, double *h_stry, double c1, double c2, int N) {
double *r1;
cudaMalloc (&r1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for r1\n");
cudaMemcpy (r1, h_r1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u1;
cudaMalloc (&u1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u1\n");
cudaMemcpy (u1, h_u1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u2;
cudaMalloc (&u2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u2\n");
cudaMemcpy (u2, h_u2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u3;
cudaMalloc (&u3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u3\n");
cudaMemcpy (u3, h_u3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *mu;
cudaMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *la;
cudaMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met1;
cudaMalloc (&met1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met1\n");
cudaMemcpy (met1, h_met1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met2;
cudaMalloc (&met2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met2\n");
cudaMemcpy (met2, h_met2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met3;
cudaMalloc (&met3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met3\n");
cudaMemcpy (met3, h_met3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met4;
cudaMalloc (&met4, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met4\n");
cudaMemcpy (met4, h_met4, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *strx;
cudaMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice);
double *stry;
cudaMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1);
curvi_1 <<<gridconfig, blockconfig>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N);
dim3 blockconfig_1 (16, 2, 2);
dim3 gridconfig_1 (ceil(N, blockconfig_1.x), ceil(N, blockconfig_1.y), ceil(N, blockconfig_1.z));
curvi_2 <<<gridconfig_1, blockconfig_1>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N);
cudaMemcpy (h_r1, r1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
}
|
3,207 | #include "includes.h"
static unsigned int GRID_SIZE_N;
static unsigned int GRID_SIZE_4N;
static unsigned int MAX_STATE_VALUE;
__global__ static void cudaEvaluateRightGammaKernel(int *wptr, double *x1, double *x2, double *diagptable, double *output, const int limit) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
output[i] = 0.0;
if (i >= limit) {
return;
}
int j;
double term = 0.0;
x1 += 16 * i;
x2 += 16 * i;
#pragma unroll
for (j = 0; j < 4; j++) {
term += x1[0] * x2[0] * diagptable[0];
term += x1[1] * x2[1] * diagptable[1];
term += x1[2] * x2[2] * diagptable[2];
term += x1[3] * x2[3] * diagptable[3];
x1 += 4;
x2 += 4;
diagptable += 4;
}
term = log(0.25 * fabs(term));
output[i] += wptr[i] * term;
} |
3,208 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add (int *A, int *B,int *a) {
int idx = blockIdx.x;
printf("idx = %d\n", idx);
A[idx] = (*a)*A[idx] + B[idx];
}
int main () {
int M;
int i, j;
printf("Enter the size : ");
scanf("%d",&M);
int A[M], B[M];
printf("Enter the X and Y values : \n");
for (i = 0; i < M; ++i) {
scanf("%d%d",&A[i],&B[i]);
}
printf("Enter alpha : ");
int a;
scanf("%d",&a);
int *d_a, *d_b, *d_c;
int size = sizeof(int) * M;
cudaMalloc((void**)&d_c,sizeof(int));
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMemcpy(d_a, &A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &B, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, &a, sizeof(int), cudaMemcpyHostToDevice);
add<<<M, 1>>>(d_a, d_b, d_c);
cudaMemcpy(&A, d_a, size, cudaMemcpyDeviceToHost);
printf("\n");
printf("aA + B:\n");
for (j = 0; j < M; ++j) {
printf("%d\t", A[j]);
}
printf("\n");
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
} |
3,209 | #include "includes.h"
__global__ void sortKernelMulti(int *arr, int arr_len, int num_elem, int oddEven)
{
int i = 2 * (blockIdx.x * blockDim.x * num_elem) + oddEven;
int iterEnd = min(arr_len - 1, i + 2 * blockDim.x *num_elem);
// Increment to thread start index:
i += 2 * threadIdx.x;
// Every thread in block (warp) step by num_elem
for (; i < iterEnd; i += 2 * blockDim.x)
{
//Even
int a = arr[i];
int b = arr[i + 1];
if (a > b)
{
arr[i] = b;
arr[i + 1] = a;
}
}
} |
3,210 | #include <stdio.h>
#include <stdlib.h>
__global__ void dot_prod(int n, int *a, int *b, int *c)
{
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < n; i += stride){ // T threads per iteration
c[i] = a[i] * b[i];
}
}
int main(int argc, char **argv){
int sum = 0;
int N = 100;
int size = N * sizeof(int);
// Create counter
cudaEvent_t start, stop;
float elapsedTime;
int *a, *b, *c; // Host copies of a, b, c
int *dev_a, *dev_b, *dev_c; // Device copies of a, b, c
// Allocate space for device copies a, b, c
cudaMalloc((void **) &dev_a, size);
cudaMalloc((void **) &dev_b, size);
cudaMalloc((void **) &dev_c, size);
a = (int *) malloc(size);
b = (int *) malloc(size);
c = (int *) malloc(size);
// initialization of the arrays
for(int i = 0; i < N; i++){
a[i] = b[i] = 1;
}
// Start counter
cudaEventCreate(&start);
cudaEventRecord(start,0);
// Copy inputs to device
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
//dot_prod<<<1,N>>>(dev_a, dev_b, dev_c); // 1 block, N threads (block size)
dot_prod<<<1,64>>>(N, dev_a, dev_b, dev_c);
// Copy result back to host
cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
for( int i = 0; i < N; i++ ){
sum += c[i];
}
// Stop counter
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
for( int i = 0; i < N; i++ ){
printf("%d ", c[i]);
}
printf("Dot Product: %d\n", sum);
printf("Elapsed time: %f ms\n", elapsedTime);
// Clean up
free(a); free(b);
cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c);
return 0;
} |
3,211 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/scan.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <thrust/functional.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
struct bitCount : public thrust::binary_function<int, int, int> {
// doesn't work because it's not associative
__device__ int operator()(int x, int y) {
return x + __popc(y);
}
};
int main() {
thrust::host_vector<int> src_h;
thrust::device_vector<int> src_d;
for (int i = 0; i < 10; i++) {
src_h.push_back(i);
}
src_d = src_h;
int bit_sum = thrust::reduce(src_d.begin(), src_d.end(), 0, bitCount());
printf("%d\n", bit_sum);
} |
3,212 | extern "C" {
__global__ void fullgrow_kernel(double* d_image, double* d_region, double* d_conv, int h, int w)
{
int j = blockDim.x * blockIdx.x + threadIdx.x;
int i = blockDim.y * blockIdx.y + threadIdx.y;
int index = i*w + j;
if ((0 < i) && (i < (h - 1)) && (0 < j) && (j < (w - 1))) {
if (d_image[index] > 0.0 && d_image[index] < .8) {
if (d_region[index + 1] == 1. || d_region[index - 1] == 1. || d_region[index + w] == 1. || d_region[index - w] == 1.) {
d_region[index] = 1.;
d_conv[index] = 1.;
}
}
}
}
__global__ void findseeds_kernel(double* d_image, double* d_region, int h, int w) {
int j = blockDim.x * blockIdx.x + threadIdx.x;
int i = blockDim.y * blockIdx.y + threadIdx.y;
int index = i*w + j;
if ((0 < i) && (i < (h - 1)) && (0 < j) && (j < (w - 1))) {
if (d_image[index] > 0.0 && d_image[index] < .8) {
d_region[index] = 1;
}
}
}
} |
3,213 | // RUN: %clang_cc1 %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
// RUN: -fcuda-is-device -target-cpu gfx906 -fsanitize=address \
// RUN: -x hip | FileCheck -check-prefix=ASAN %s
// RUN: %clang_cc1 %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
// RUN: -fcuda-is-device -target-cpu gfx906 -x hip \
// RUN: | FileCheck %s
// REQUIRES: amdgpu-registered-target
// ASAN-DAG: declare void @__amdgpu_device_library_preserve_asan_functions()
// ASAN-DAG: @__amdgpu_device_library_preserve_asan_functions_ptr = weak addrspace(1) constant void ()* @__amdgpu_device_library_preserve_asan_functions
// ASAN-DAG: @llvm.compiler.used = {{.*}}@__amdgpu_device_library_preserve_asan_functions_ptr
// CHECK-NOT: @__amdgpu_device_library_preserve_asan_functions_ptr
|
3,214 | #include <stdio.h>
__global__ void reverse (int* in, int* out){
out[blockDim.x-threadIdx.x-1]=in[threadIdx.x];
}
int main() {
int d_in[]={100,110,200,220,300};
int size = 5* sizeof( int );
int* d_out=(int*)malloc(size);
int *dev_in, *dev_out; // device copies
int i;
// allocate device copies of dev_in/out
cudaMalloc( (void**)&dev_in, size );
cudaMalloc( (void**)&dev_out, size);
cudaMemcpy( dev_in, d_in, size, cudaMemcpyHostToDevice ); //send data to device
// launch reverse() kernel
reverse<<< 1, 5 >>>(dev_in, dev_out); //1 block of size threads
// copy device result back to host copy of c
cudaMemcpy( d_out, dev_out, size, cudaMemcpyDeviceToHost );
for(i=0;i<5;i++){
printf(" %d ",d_in[i]);
}
printf("\n");
for(i=0;i<5;i++){
printf(" %d ",d_out[i]);
}
printf("\n");
free( d_out );
cudaFree( dev_in );
cudaFree( dev_out );
return 0;
}
|
3,215 | // Device code
typedef struct BUFFERDIMS_t {
unsigned int X;
unsigned int Y;
unsigned int Z;
unsigned int stride;
unsigned int pitch;
} BUFFERDIMS;
extern "C" __global__ void
scale(
float * A,
float * B,
float scalar,
BUFFERDIMS dims
)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < dims.X * dims.Y)
B[i] = A[i]*scalar;
}
|
3,216 | /*
A simple CUDA test program that adds two vectors
*/
#include <iostream>
__global__ void vAdd(int* a, int* b, int* c, int N)
{
int gIdx = (blockDim.x * blockIdx.x) + threadIdx.x;
if(gIdx < N)
{
c[gIdx] = a[gIdx] + b[gIdx];
}
}
int main(int argc, char** argv)
{
if(argc != 2) {
std::cout << "Usage: " << argv[0] << " <SIZE>" << std::endl;
exit(1);
}
const int N = atoi(argv[1]);;
//Allocate on host
int* h_a = new int[N];
int* h_b = new int[N];
int* h_c = new int[N];
//Allocate on device
int* d_a = NULL;
int* d_b = NULL;
int* d_c = NULL;
cudaMalloc(&d_a, sizeof(int) * N);
cudaMalloc(&d_b, sizeof(int) * N);
cudaMalloc(&d_c, sizeof(int) * N);
//Fill input vectors
for(int i = 0; i < N; ++i)
{
h_a[i] = i;
h_b[i] = i;
h_c[i] = 0;
}
//Copy vectors to device
cudaMemcpy(d_a, h_a, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeof(int) * N, cudaMemcpyHostToDevice);
//Call kernel
int threadsPerBlock = 256;
int numBlocks = (N + threadsPerBlock - 1) / threadsPerBlock; //ceil(n/threadsPerBlock)
cudaEvent_t start, stop;
//record start event
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
vAdd<<<numBlocks, threadsPerBlock>>>(d_a, d_b, d_c, N);
//record stop event
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsed;
cudaEventElapsedTime(&elapsed, start, stop);
//Copy output vector back to host
cudaMemcpy(h_c, d_c, sizeof(int) * N, cudaMemcpyDeviceToHost);
//Print the output vector
for(int i = 0; i < N; ++i)
{
std::cout << h_c[i] << " ";
if((i + 1) % 15 == 0)
std::cout << std::endl;
}
std::cout << std::endl << "Compute time: " << elapsed << "ms" << std::endl;
}
|
3,217 | /*
module load cudatoolkit
qsub -q gpu -l nodes=1:ppn=1,walltime=00:20:00 -I
nvcc matrixTranspose.cu
*/
#include <stdio.h>
#define DIM 32
__global__ void transposeNaive(double *odata, const double *idata,int BLOCK_ROWS)
{
int x = blockIdx.x * DIM + threadIdx.x;
int y = blockIdx.y * DIM + threadIdx.y;
int width = gridDim.x * DIM;
for (int j = 0; j < DIM; j+= BLOCK_ROWS)
odata[x*width + (y+j)] = idata[(y+j)*width + x];
}
__global__ void transposeFast(double *odata, double *idata, int size_x, int size_y, int BLOCK_ROWS)
{
__shared__ double tile[DIM][DIM];
int xIndex = blockIdx.x * DIM + threadIdx.x;
int yIndex = blockIdx.y * DIM + threadIdx.y;
int index_in = xIndex + (yIndex) * size_x;
xIndex = blockIdx.y * DIM + threadIdx.x;
yIndex = blockIdx.x * DIM + threadIdx.y;
int index_out = xIndex + (yIndex)* size_y;
for (int i = 0; i < DIM; i+=BLOCK_ROWS) {
tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*size_x];
}
__syncthreads();
for (int i = 0; i < DIM; i+=BLOCK_ROWS) {
odata[index_out+i*size_y] = tile[threadIdx.x][threadIdx.y+i];
}
}
int main(int argc, char const *argv[]) {
const int size_x = 8192;
const int size_y = 8192;
int BLOCK_ROWS = argc>=2 ? atoi(argv[1]) : 2; // default case: 2 --> 64 threads
//execution configuration parameters
dim3 grid(size_x/DIM, size_y/DIM);
dim3 block (DIM, BLOCK_ROWS);
//size of memory required to store the matrix
const int mem_size = sizeof(double) * size_x*size_y;
//allocate host memory
double *h_idata = (double*) malloc(mem_size);
double *h_odata = (double*) malloc(mem_size);
//allocate device memory
double *d_idata;
double *d_odata;
cudaMalloc((void**) &d_idata, mem_size);
cudaMalloc((void**) &d_odata, mem_size);
// objects to timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//initialize host data
for (int i = 0; i < (size_x*size_y); i++)
h_idata[i] = (double) i;
//copy host data to device
cudaMemcpy(d_idata, h_idata, mem_size, cudaMemcpyHostToDevice);
printf("\nMatrix size: %dx%d, block: %dx%d, nthreads: %d\n",size_x,size_y, DIM, BLOCK_ROWS, BLOCK_ROWS*DIM );
/****** Naive transpose ******/
cudaEventRecord(start, 0);
transposeNaive<<<grid,block>>>(d_idata, d_odata, BLOCK_ROWS);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsTimeNaive;
cudaEventElapsedTime(&elapsTimeNaive, start, stop);
cudaMemcpy(h_odata, d_odata, mem_size, cudaMemcpyDeviceToHost);
//the bandwidth is twice the size of the matrix divided by the time execution
float bandNaive = (2 * mem_size) / elapsTimeNaive/1e6;
printf("Naive bandwidth = %f, time = %f\n",bandNaive,elapsTimeNaive );
/****** Fast transpose ******/
cudaEventRecord(start, 0);
transposeFast<<<grid,block>>>(d_idata, d_odata, size_x,size_y,BLOCK_ROWS);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsTimeFast;
cudaEventElapsedTime(&elapsTimeFast, start, stop);
cudaMemcpy(h_odata, d_odata, mem_size, cudaMemcpyDeviceToHost);
//the bandwidth is twice the size of the matrix divided by the time execution
float bandFast = (2 * mem_size) / elapsTimeFast/1e6;
printf("Fast bandwidth = %f, time = %f\n",bandFast,elapsTimeFast );
//free memory
free(h_idata);
free(h_odata);
cudaFree(d_idata);
cudaFree(d_odata);
return 0;
}
|
3,218 | #include <stdio.h>
// #include <cuda.h>
#include <iostream>
#include <random>
#include <chrono>
#define DIM 2048
#define N (DIM*DIM)
#define THREAD_PER_BLOCK 512
__global__ void add(int* a, int* b, int* c)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
}
void randomInts(int* a, int size)
{
std::random_device random;
for (int i = 0 ; i < size ; ++i)
a[i] = random() % 1000;
}
void printMatrix(int* m, int dim)
{
printf("matrix %p :\n", m);
for(int i = 0 ; i < 3 ; ++i)
{
for(int j = 0 ; j < 2 ; ++j)
printf("%d\t", m[i+j*dim]);
printf("%d...\n", m[i+2*dim]);
}
printf("...\n\n");
}
// nvcc -o bin/exo2 src/exo2.cu
int main(void)
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
a = (int*)malloc(size); randomInts(a, N);
b = (int*)malloc(size); randomInts(b, N);
c = (int*)malloc(size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
auto t1 = std::chrono::high_resolution_clock::now();
add<<<N/THREAD_PER_BLOCK,THREAD_PER_BLOCK>>>(d_a, d_b, d_c);
auto t2 = std::chrono::high_resolution_clock::now();
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
// j'affiche les 3 premières lignes/col de la mat1, mat2
// et mat résultat
printMatrix(a, DIM); printMatrix(b, DIM); printMatrix(c, DIM);
auto int_us = std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1);
std::cout<<"done in "<<int_us.count()<<" μs"<<std::endl;
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
} |
3,219 | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
// Compute vector sum C = A+B
//CUDA kernel. Each thread performes one pair-wise addition
__global__ void vecAddKernel(float *A, float *B, float *C, int n)
{
//Get our global thread ID
int i = threadIdx.x;
if (i<n) C[i] = A[i] + B[i];
}
int main(int argc, char* argv[])
{
//Size of vectors
int n = 100;
int size = n * sizeof(float);
//Host input vectors
float *h_A, *h_B;
//Host output vector
float *h_C;
//Device input vectors
float *d_A, *d_B;
//Device output vector
float *d_C;
//Allocate memory for each vector on host
h_A = (float*)malloc((size));
h_B = (float*)malloc((size));
h_C = (float*)malloc((size));
for (int i=0; i<n; ++i) h_A[i]=h_B[i]=i;
//Allocate memory for each vector on GPU
cudaMalloc( (void **) &d_A, size);
cudaMalloc( (void **) &d_B, size);
cudaMalloc( (void **) &d_C, size);
//Copy host vectors to device
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
//Number of threads in each block
vecAddKernel<<<1,100>>>(d_A, d_B, d_C, n);
//Synchronize threads
cudaThreadSynchronize();
//Copy array back to host
cudaMemcpy( h_C, d_C, size, cudaMemcpyDeviceToHost );
for (int i=0; i<n; ++i) {
printf( "c[%d] = %f\n",i, h_C[i] );
printf( "a[%d] = %f\n",i, h_A[i] );
}
//Release device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
//Release host memory
free(h_A);
free(h_B);
free(h_C);
return 0;
}
|
3,220 | #include "includes.h"
__global__ void matmulKernel(float *A, float *B, float *C, int rA, int cA, int cB){
int i = blockIdx.y*gridDim.x + blockIdx.x, j = threadIdx.y*blockDim.x + threadIdx.x;
if(i < rA && j < cB){
C[i*cB + j] = 0.;
for(int k=0;k<cA;++k) C[i*cB + j] += A[i*cA + k] * B[k*cB + j];
}
return;
} |
3,221 | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <cuda_runtime.h>
extern "C" void saxpy(int,float,float*,float*);
extern "C" void set(int,float,float*);
extern "C" void map(float*, float*, int);
int main(int argc, char **argv)
{
float *x, *y, *dx, *dy, tmp;
int n = 1<<20;
x = (float*) malloc(n*sizeof(float));
y = (float*) malloc(n*sizeof(float));
cudaMalloc((void**)&dx,(size_t)n*sizeof(float));
cudaMalloc((void**)&dy,(size_t)n*sizeof(float));
map(x, dx, n*sizeof(float));
map(y, dy, n*sizeof(float));
set(n,1.0f,x);
set(n,0.0f,y);
saxpy(n, 2.0, x, y);
cudaMemcpy(&tmp,dy,(size_t)sizeof(float),cudaMemcpyDeviceToHost);
printf("%f\n",tmp);
return 0;
}
|
3,222 | #define __rose_lt(x,y) ((x)<(y)?(x):(y))
#define __rose_gt(x,y) ((x)>(y)?(x):(y))
//this is only used for cuda-chill
//heavy simplification
#define NsolventMolecules_ 1024
#define NsolventAtoms_ 1024
struct MolDist
{
///< Original solvent molecule number (starts from 1).
int mol;
///< Closest distance of solvent molecule to atoms in distanceMask.
double D;
//AtomMask mask; ///< Original topology solvent molecule atom mask.
///< Actual solvent atom #s to loop over.
double solventAtoms[1024][3];
}
;
//using dist for no image
// and kernel for when we use solute molecule center
//extracting pulling out arrays out from struct
void Action_NoImage_Center(struct MolDist SolventMols_[1024],double maskCenter[3],double maxD)
{
double Dist;
int solventMol;
int solventAtom;
//Vec3 maskCenter = frmIn.VGeometricCenter( distanceMask_ );
//standard loop
for (solventMol = 0; solventMol < 1024; solventMol++) {
SolventMols_[solventMol].D = maxD;
for (solventAtom = 0; solventAtom < 1024; solventAtom++) {
//main dist2_noImage code
//double *a1 = maskCenter.Dptr(); //center of solute molecule
//double *a2 = frmIn.XYZ(*solvent_atom);
//double *a1 = maskCenter; //center of solute molecule
//double *a2 = SolventMols_[solventMol][solventAtom];
//double x = a1[0] - a2[0];
//double y = a1[1] - a2[1];
//double z = a1[2] - a2[2];
//Dist = (x*x + y*y + z*z);
Dist = maskCenter[0] * SolventMols_[solventMol].solventAtoms[solventAtom][0] + maskCenter[1] * SolventMols_[solventMol].solventAtoms[solventAtom][1] + maskCenter[2] * SolventMols_[solventMol].solventAtoms[solventAtom][2];
//D_[solventMol] = Dist < D_[solventMol] ? Dist : D_[solventMol];
if (Dist < SolventMols_[solventMol].D)
SolventMols_[solventMol].D = Dist;
}
}
}
|
3,223 | /**
* Author: Kapil Gupta <kpgupta98@gmail.com>
* Organization: XantheLabs
* Created: January 2017
*/
#pragma once
#ifndef HOUGH_LINES_DRAW_H_
#define HOUGH_LINES_DRAW_H_
#endif // HOUGH_LINES_DRAW_H_
|
3,224 | #include "includes.h"
__global__ void cunnx_WindowSparse_accGradParameters_kernel( float *gradWeight, float* gradBias, float *gradOutput, float *input, float *inputIndice, float *outputIndice, int inputWindowSize, int outputWindowSize, int inputSize, int outputSize, float scale)
{
__shared__ float buffer[WINDOWSPARSE_THREADS];
int tx = threadIdx.x;
int i_step = blockDim.x;
int k = blockIdx.x;
int inputIdx = (int)inputIndice[k] - 1;
int outputIdx = (int)outputIndice[k] - 1;
float *input_k = input + k*inputWindowSize;
float *gradOutput_k = gradOutput + k*outputWindowSize;
float *gradWeight_k = gradWeight + outputIdx*inputSize + inputIdx;
float *gradBias_k = gradBias + outputIdx;
// addr weights (scalar-products)
for (int i=tx; i<inputWindowSize; i+=i_step)
{
// copy input to buffer
buffer[tx] = input_k[i]*scale;
// multiply accumulate weights
for (int j=0; j<outputWindowSize; j++)
atomicAdd(&(gradWeight_k[j*inputSize + i]), gradOutput_k[j]*buffer[tx]);
}
// cadd bias i.e. multiply accumulate biases
for (int j=tx; j<outputWindowSize; j+=i_step)
atomicAdd(&(gradBias_k[j]), gradOutput_k[j]*scale);
} |
3,225 | #include "includes.h"
__global__ void GetSpikes(double *spike_array, int array_size, int n_port, int n_var, float *port_weight_arr, int port_weight_arr_step, int port_weight_port_step, float *port_input_arr, int port_input_arr_step, int port_input_port_step)
{
int i_target = blockIdx.x*blockDim.x+threadIdx.x;
int port = blockIdx.y*blockDim.y+threadIdx.y;
if (i_target < array_size && port<n_port) {
int i_array = port*array_size + i_target;
int port_input = i_target*port_input_arr_step
+ port_input_port_step*port;
int port_weight = i_target*port_weight_arr_step
+ port_weight_port_step*port;
double d_val = (double)port_input_arr[port_input]
+ spike_array[i_array]
* port_weight_arr[port_weight];
port_input_arr[port_input] = (float)d_val;
}
} |
3,226 | #include "includes.h"
__global__ void callOperationSharedStatic(int *a, int *b, int x, int *res, int n)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= n)
{
return;
}
__shared__ int s_a[size], s_b[size], s_res[size];
__shared__ int s_x;
s_x = x;
s_a[tid] = a[tid];
s_b[tid] = b[tid];
s_res[tid] = ((s_a[tid] * s_x) + s_b[tid]);
res[tid] = s_res[tid];
} |
3,227 | #include "includes.h"
__global__ void transposeCoalesced(float *odata, const float *idata)
{
__shared__ float tile[TILE_DIM][TILE_DIM];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*width + x];
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
odata[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j];
} |
3,228 | #include <cuda_runtime.h>
__device__ unsigned int buildSum(int *s_data)
{
unsigned int thid = threadIdx.x;
unsigned int stride = 1;
// build the sum in place up the tree
for (int d = blockDim.x; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
//ai += CONFLICT_FREE_OFFSET(ai);
//bi += CONFLICT_FREE_OFFSET(bi);
s_data[bi] += s_data[ai];
}
stride *= 2;
}
return stride;
}
__device__ void scanRootToLeaves(int *s_data, unsigned int stride)
{
unsigned int thid = threadIdx.x;
// traverse down the tree building the scan in place
for (int d = 1; d <= blockDim.x; d *= 2)
{
stride >>= 1;
__syncthreads();
if (thid < d)
{
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
//ai += CONFLICT_FREE_OFFSET(ai);
//bi += CONFLICT_FREE_OFFSET(bi);
int t = s_data[ai];
s_data[ai] = s_data[bi];
s_data[bi] += t;
}
}
}
__global__ void uniformAdd(int *g_data,
int *uniforms,
int n,
int blockOffset,
int baseIndex)
{
__shared__ int uni;
if (threadIdx.x == 0)
uni = uniforms[blockIdx.x + blockOffset];
unsigned int address = __mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x;
__syncthreads();
// note two adds per thread
g_data[address] += uni;
g_data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni;
} |
3,229 | #include <iostream>
#define N 10
// __global__ qualifier indicates that
// this function is a kernel function of CUDA.
__global__
void reverse(int *da){
int tid = blockIdx.x; // which block handling the data
if (tid < N){
int cross = N - 1 - tid;
int temp = da[tid];
da[tid] = da[cross];
da[cross] = temp;
}
}
int main(int argc, char *argv[])
{
std::cout << "Press any button to continue...";
std::cin.get();
// Define the arrays to be stored on host.
int A[N], Arev[N];
// Define the array (pointer) to be stored on device (GPU)
int *da;
// Fill the array with some values.
for(int i=0; i<N; i++){
A[i] = i;//rand() % 100;
Arev[i] = -1;
}
// Allocate memory on device for N-item
cudaMalloc((void**)&da, N*sizeof(int));
// Copy the values on host (A) to device (da)
// "da" is the memory location to copy A
cudaMemcpy(da, A, N*sizeof(int), cudaMemcpyHostToDevice);
// Call kernel with N/2 block (grid).
reverse<<<N / 2, 1>>>(da);
// Wait for all thread to finish
cudaThreadSynchronize();
// Copy "da" from device to host (Arev)
cudaMemcpy(Arev, da, N*sizeof(int), cudaMemcpyDeviceToHost);
// Print them
for(int i=0; i<N; i++){
printf("%d \n", Arev[i]);
}
// Free the allocated memory on device
cudaFree(da);
return 0;
}
|
3,230 | #include <stdio.h>
#include "cs_motion_report.h"
void
ma_report_header ( FILE *ofd, int y, int x, int t, int vr, int hr, int tr )
{
fprintf( ofd, "****==== video info (1) ====****\n") ;
fprintf( ofd, "vid_size_v,vid_size_h,vid_size_t,uv_ratio_v,uv_ratio_h,uv_ratio_t\n") ;
fprintf( ofd, "I,I,I,I,I,I\n") ;
fprintf( ofd, "%d,%d,%d,%d,%d,%d\n", y, x, t, vr, hr, tr ) ;
fprintf( ofd, "****==== encoder measurements analysis ====****\n") ;
fprintf( ofd, "indx_v,indx_h,indx_t,"
"ofst_v,ofst_h,ofst_t,"
"len_v,len_h,len_t,"
"ovlp_b_v,ovlp_b_h,ovlp_b_t,"
"ovlp_f_v,ovlp_f_h,ovlp_f_t,"
"w_blk,w_type,"
"mxv,mxp_v,mxp_h,mxp_t,"
"mdv,mdp_v,mdp_h,mdp_t,"
"vlc_v,vlc_h\n") ;
fprintf( ofd, "I,I,I,I,I,I,I,I,I,I,I,I,I,I,I,I,I,F,I,I,I,F,I,I,I,F,F\n") ;
}
void
ma_report_record ( FILE *ofd, int *dp, int cnt, int xs, int ys, int zs,
int blk_in_x, int blk_in_y, int ovh, int ovv, int ovt, int wblk, int wtype )
{
int t, v, h, va, ot, ov, oh, ova ;
int xss, yss, j, k ;
int off_h, off_v, off_t ;
int fovh, fovv, fovt, bovh, bovv, bovt ;
off_t = cnt * ( zs - ovt ) ;
if ( cnt == 1 )
{
bovt = 0 ;
fovt = ovt ;
} else
{
bovt = ovt ;
fovt = ovt ;
}
for ( j = 1 ; j <= blk_in_y ; j++ )
{
if ( j == 1 )
off_v = 0 ;
else
off_v = ( j - 2 ) * ovv ;
if (( j == 1 ) || ( j == blk_in_y ))
yss = ys >> 1 ;
else
yss = ys ;
if ( j == 1 )
{
fovv = ovv ;
bovv = 0 ;
} else if ( j == blk_in_y )
{
fovv = 0 ;
bovv = ovv ;
} else
{
fovv = ovv ;
bovv = ovv ;
}
for ( k = 1 ; k <= blk_in_x ; k++ )
{
t = *dp++ ;
v = *dp++ ;
h = *dp++ ;
va = *dp++ ;
ot = *dp++ ;
ov = *dp++ ;
oh = *dp++ ;
ova = *dp++ ;
if ( k == 1 )
off_h = 0 ;
else
off_h = ( k - 2 ) * ovh ;
if (( k == 1 ) || ( k == blk_in_x ))
xss = xs >> 1 ;
else
xss = xs ;
if ( k == 1 )
{
fovh = ovh ;
bovh = 0 ;
} else if ( k == blk_in_x )
{
fovh = 0 ;
bovh = ovh ;
} else
{
fovh = ovh ;
bovh = ovh ;
}
fprintf( ofd, "%d,%d,%d,%d,%d,%d,"
"%d,%d,%d,%d,%d,%d,%d,%d,%d,"
"%d,%d,"
"%f,%d,%d,%d,%f,%d,%d,%d,"
"%f,%f\n",
j,k,cnt, // index
off_v, off_h, off_t,
yss, xss, zs,
bovv, bovh, bovt,
fovv, fovh, fovt,
wblk, wtype,
(float)va / 1000, v, h, t,
(float)ova / 1000, ov, oh, ot,
(( float )ov -( float )v ) / (float)t,
(( float )oh -( float )h ) / (float)t ) ;
}
}
}
|
3,231 | #include<stdio.h>
#include<cuda.h>
__global__ void oddeven(int* x,int I,int n)
{
int id=blockIdx.x;
if(I==0 && ((id*2+1)< n)){
if(x[id*2]>x[id*2+1]){
int X=x[id*2];
x[id*2]=x[id*2+1];
x[id*2+1]=X;
}
}
if(I==1 && ((id*2+2)< n)){
if(x[id*2+1]>x[id*2+2]){
int X=x[id*2+1];
x[id*2+1]=x[id*2+2];
x[id*2+2]=X;
}
}
}
int main()
{
int a[100],n,c[100],i;
int *d;
printf("Enter how many elements of first array:");
scanf("%d",&n);
printf("Enter No.\n");
for(i=0; i<n; i++)
{
scanf("%d",&a[i]);
}
cudaMalloc((void**)&d, n*sizeof(int));
cudaMemcpy(d,a,n*sizeof(int),cudaMemcpyHostToDevice);
for(i=0;i<n;i++){
//int size=n/2;
oddeven<<<n/2,1>>>(d,i%2,n);
}
printf("\n");
cudaMemcpy(c,d,n*sizeof(int), cudaMemcpyDeviceToHost);
printf("Sorted Array is:\t");
for(i=0; i<n; i++)
{
printf("%d\t",c[i]);
}
cudaFree(d);
return 0;
}
|
3,232 | extern "C" {
//灰度直方图统计
__global__ void histogram(unsigned char *dataIn, int *hist)
{
int threadIndex = threadIdx.x + threadIdx.y * blockDim.x;
int blockIndex = blockIdx.x + blockIdx.y * gridDim.x;
int index = threadIndex + blockIndex * blockDim.x * blockDim.y;
atomicAdd(&hist[dataIn[index]], 1);
}
//灰度图像直方图(优化)
__global__ void histogram_optimized(unsigned char *buffer, long size, unsigned int *histo){
__shared__ unsigned int private_histo[256];
if(threadIdx.x < 256) //初始化shared histo
private_histo[threadIdx.x] = 0;
__syncthreads();
int i = threadIdx.x + blockIdx.x * blockDim.x;
// 步长是所有threads的数目
int stride = blockDim.x * gridDim.x;
while(i < size) {
atomicAdd(&(private_histo[buffer[i]]), 1);
i += stride;
}
//等待所有线程执行完
__syncthreads();
if(threadIdx.x < 256){
atomicAdd(&(histo[threadIdx.x]), private_histo[threadIdx.x]);
}
}
} |
3,233 | /*__global__ void Rotate3D(float* Destination, float* Source, int sizeX, int sizeY, float deg)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;// Kernel definition
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z + threadIdx.z;
int xc = sizeX - sizeX/2;
int yc = sizeY - sizeY/2;
int newx = ((float)i-xc)*cos(deg) + ((float)j-yc)*sin(deg) + xc;
int newy = -((float)i-xc)*sin(deg) + ((float)j-yc)*cos(deg) + yc;
if (newx >= 0 && newx < sizeX && newy >= 0 && newy < sizeY)
{
// putPixVal(Destination, sizeX, i , j, readPixVal(Source, sizeX, newx, newy));
Destination[k* sizeX*sizeY + j*sizeX+i] = Source[k* sizeX*sizeY + newy*sizeX + newx];
}
} */
// rotate a 3D array (2D rotate, with the third dim simultaenously processed) with bilinear interpolation
# define pi 3.141592654f
__global__ void Rotate3D_bilinear(float* Destination, float* Source, int sizeX, int sizeY, float deg)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;// Kernel definition
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z + threadIdx.z;
int z_offset = k*sizeX*sizeY;
int xc = sizeX - sizeX/2;
int yc = sizeY - sizeY/2;
float cos_rad = cosf(deg/180*pi);
float sin_rad = sinf(deg/180*pi);
float newx = +((float)i-xc)*cos_rad + ((float)j-yc)*sin_rad + xc ;
float newy = -((float)i-xc)*sin_rad + ((float)j-yc)*cos_rad + yc ;
int newx_0 = floorf(newx);
int newy_0 = floorf(newy);
int newx_1 = newx_0 + 1;
int newy_1 = newy_0 + 1;
// put the pixel coo. in unit square
float x = newx - float(newx_0);
float y = newy - float(newy_0);
float val_00 = 0.0, val_01 = 0.0, val_10 = 0.0, val_11 = 0.0, val_intp = 0.0;
if (newx_0 >= 0 && newx_1 < sizeX && newy_0 >= 0 && newy_1 < sizeY)
{
// perform bilinear interpolation
val_00 = Source[z_offset + newy_0*sizeX + newx_0];
val_01 = Source[z_offset + newy_1*sizeX + newx_0];
val_10 = Source[z_offset + newy_0*sizeX + newx_1];
val_11 = Source[z_offset + newy_1*sizeX + newx_1];
val_intp = val_00 + (val_01-val_00)*y + (val_10-val_00)*x + (val_11 + val_00 - val_10 - val_01)*x*y;
Destination[z_offset + j*sizeX+i] = val_intp;
}
} |
3,234 | #include<stdio.h>
#include<iostream>
#include<stdlib.h>
#include<string.h>
#define NUM_THREADS 256
#define IMG_SIZE 1048576
// Coefficients with Structure of Array
struct Coefficients_SOA {
int* r;
int* b;
int* g;
int* hue;
int* saturation;
int* maxVal;
int* minVal;
int* finalVal;
};
__global__
void complicatedCalculation(Coefficients_SOA data)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int grayscale = (data.r[i] + data.g[i] + data.b[i])/data.maxVal[i];
int hue_sat = data.hue[i] * data.saturation[i] / data.minVal[i];
data.finalVal[i] = grayscale*hue_sat;
}
void complicatedCalculation()
{
Coefficients_SOA d_x;
cudaMalloc(&d_x.r, IMG_SIZE*sizeof(int));
cudaMalloc(&d_x.g, IMG_SIZE*sizeof(int));
cudaMalloc(&d_x.b, IMG_SIZE*sizeof(int));
cudaMalloc(&d_x.hue, IMG_SIZE*sizeof(int));
cudaMalloc(&d_x.saturation, IMG_SIZE*sizeof(int));
cudaMalloc(&d_x.maxVal, IMG_SIZE*sizeof(int));
cudaMalloc(&d_x.minVal, IMG_SIZE*sizeof(int));
cudaMalloc(&d_x.finalVal, IMG_SIZE*sizeof(int));
int num_blocks = IMG_SIZE/NUM_THREADS;
complicatedCalculation<<<num_blocks,NUM_THREADS>>>(d_x);
cudaFree(d_x.r);
cudaFree(d_x.g);
cudaFree(d_x.b);
cudaFree(d_x.hue);
cudaFree(d_x.saturation);
cudaFree(d_x.maxVal);
cudaFree(d_x.maxVal);
cudaFree(d_x.minVal);
cudaFree(d_x.finalVal);
}
int main(int argc, char*argv[])
{
complicatedCalculation();
return 0;
}
|
3,235 | #include <curand.h>
#include <curand_kernel.h>
#define DIM 1600
#define PI 3.14159265
__global__ void grayscale(unsigned char *R_input, unsigned char *G_input,
unsigned char *B_input, size_t i_size,
unsigned int *hist) {
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int offset = x + y * i_size;
R_input[offset] = 0.2989 * R_input[offset] + 0.587 * G_input[offset] + 0.1140 * B_input[offset];
G_input[offset] = 0.2989 * R_input[offset] + 0.587 * G_input[offset] + 0.1140 * B_input[offset];
B_input[offset] = 0.2989 * R_input[offset] + 0.587 * G_input[offset] + 0.1140 * B_input[offset];
atomicAdd( &(hist[R_input[offset]]), 1);
}
__global__ void binary(unsigned char *R_input, unsigned char *G_input,
unsigned char *B_input, size_t i_size,
int um) {
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int offset = x + y * i_size;
unsigned char c;
if (R_input[offset] > um) c = 255;
else c = 0;
R_input[offset] = c;
G_input[offset] = c;
B_input[offset] = c;
}
|
3,236 | #include "cuda_runtime_api.h"
#include <vector>
namespace CudaHelpers {
template<typename T>
bool copy_vector_to_gpu(T* gpu_mem, const std::vector<T>& vec){
cudaError_t err;
err = cudaMalloc((void**)&gpu_mem, vec.size() * (size_t)sizeof(T));
cudaMemcpy((void*)gpu_mem, (void*)vec.data(), vec.size() * (size_t)sizeof(T), cudaMemcpyHostToDevice);
return err;
}
template<typename T>
bool retrieve_vector_from_gpu(T* gpu_mem, const std::vector<T>& vec) {
return cudaMemcpy((void*)vec.data(), gpu_mem, vec.size() * (size_t)sizeof(T), cudaMemcpyDeviceToHost);
}
}
|
3,237 | #include <chrono>
#include <iostream>
//Kernel Definition
__global__ void emptyKernel() {
}
int main () {
using namespace std::chrono;
//Call emptyKernel to get the starting cost out of the measurement
emptyKernel<<<1,1>>>();
for (int n = 0; n <= 6; n++){
//Time Measururement Point 1
high_resolution_clock::time_point timeBefore = high_resolution_clock::now();
unsigned I = pow(10, n);
for (unsigned i = 0; i < I; i++) {
emptyKernel<<<1,1>>>();
cudaDeviceSynchronize();
}
//Time Measururement Point 1
high_resolution_clock::time_point timeAfter = high_resolution_clock::now();
//Output Time Measurement Result
duration<double> time_span = duration_cast<duration<double>>(timeAfter - timeBefore);
std::cout << "Time for 10^" << n << " kernels started: " << time_span.count() << " seconds";
std::cout << std:: endl;
}
return 0;
}
|
3,238 | #include "includes.h"
__device__ void sumByReduction( volatile double* sdata, double mySum, const unsigned int tid )
{
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads();
if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads();
if (tid < 32)
{
sdata[tid] = mySum = mySum + sdata[tid + 32];
sdata[tid] = mySum = mySum + sdata[tid + 16];
sdata[tid] = mySum = mySum + sdata[tid + 8];
sdata[tid] = mySum = mySum + sdata[tid + 4];
sdata[tid] = mySum = mySum + sdata[tid + 2];
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
__syncthreads() ;
}
__global__ void computePdKernel(double* particle_pd, int particles_per_feature, int n_features, double* feature_pd)
{
__shared__ double shmem[256] ;
for ( int n = blockIdx.x ; n < n_features ;n+= gridDim.x ){
int offset = n*particles_per_feature ;
double val = 0 ;
for ( int i = offset+threadIdx.x ; i < offset + particles_per_feature ; i+= blockDim.x ){
val += particle_pd[i] ;
}
sumByReduction(shmem,val,threadIdx.x);
if ( threadIdx.x == 0)
feature_pd[n] = shmem[0]/particles_per_feature ;
__syncthreads() ;
}
} |
3,239 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float* var_8,float var_9,float var_10,float var_11,float var_12,float var_13) {
if (comp == (+1.1195E-36f - +1.9660E-43f * fmodf((+1.8591E-42f / var_4), +1.6926E36f))) {
for (int i=0; i < var_1; ++i) {
float tmp_1 = +1.5888E-16f;
float tmp_2 = -1.4103E-42f / log10f(ceilf(var_5 + var_6));
comp = tmp_2 - tmp_1 / var_7 * +1.8376E35f;
for (int i=0; i < var_2; ++i) {
var_8[i] = +0.0f;
comp = var_8[i] * (+1.0901E-35f + var_9);
}
for (int i=0; i < var_3; ++i) {
float tmp_3 = (var_10 * +1.8910E-43f * (var_11 - var_12));
comp += tmp_3 - -1.0091E-37f / var_13;
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float* tmp_9 = initPointer( atof(argv[9]) );
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14);
cudaDeviceSynchronize();
return 0;
}
|
3,240 | /*
* sgemm.cu:
*
*/
#include <stdio.h>
#include <sys/time.h>
#include <cuda_runtime.h>
enum {
BLOCK_SIZE = 32,
N = 1024
};
__global__ void sgemm_naive(const float *a, const float *b, float *c, int n)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < n && col < n) {
float s = 0.0;
for (int k = 0; k < n; k++)
s += a[row * n + k] * b[k * n + col];
c[row * n + col] = s;
}
}
__global__ void sgemm_tailed(const float *a, const float *b, float *c, int n)
{
__shared__ float as[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float bs[BLOCK_SIZE][BLOCK_SIZE];
int tail_size = blockDim.x;
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
// Result for c[i, j]
float sum = 0.0;
// Index of first tail (sub-matrix) in A
int Astart = by * n * tail_size;
int Aend = Astart + n - 1;
int Astep = tail_size;
// Index of first tail (sub-matrix) in B
int Bstart = bx * tail_size;
int Bstep = n * tail_size;
int ai = Astart;
int bi = Bstart;
while (ai <= Aend) {
// Load tail to shared memory - each thread load one item
as[ty][tx] = a[ai + ty * n + tx];
bs[ty][tx] = b[bi + ty * n + tx];
// Wait all threads
__syncthreads();
// Compute partial result
for (int k = 0; k < tail_size; k++)
sum += as[ty][k] * bs[k][tx];
// Wait for all threads before overwriting of as and bs
__syncthreads();
ai += Astep;
bi += Bstep;
}
int Cstart = by * n * tail_size + bx * tail_size;
c[Cstart + ty * n + tx] = sum;
}
void sgemm_host(float *a, float *b, float *c, int n)
{
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
float s = 0.0;
for (int k = 0; k < n; k++)
s += a[i * n + k] * b[k * n + j];
c[i * n + j] = s;
}
}
}
double wtime()
{
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
int main()
{
double tcpu, tgpu, tmem;
cudaError_t err;
/* Allocate memory on host */
size_t size = sizeof(float) * N * N;
float *h_A = (float *)malloc(size);
float *h_B = (float *)malloc(size);
float *h_C = (float *)malloc(size);
if (h_A == NULL || h_B == NULL || h_C == NULL) {
fprintf(stderr, "Allocation error.\n");
exit(EXIT_FAILURE);
}
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
h_A[i * N + j] = 2.0;
h_B[i * N + j] = 3.0;
}
}
tcpu = -wtime();
sgemm_host(h_A, h_B, h_C, N);
tcpu += wtime();
// Verify that the result vector is correct
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
if (fabs(6.0 * N - h_C[i * N + j]) > 1e-5) {
fprintf(stderr, "CPU results verification failed at element %d %d!\n", i, j);
exit(EXIT_FAILURE);
}
}
}
/* Allocate vectors on device */
float *d_A = NULL, *d_B = NULL, *d_C = NULL;
if (cudaMalloc((void **)&d_A, size) != cudaSuccess) {
fprintf(stderr, "Allocation error\n");
exit(EXIT_FAILURE);
}
if (cudaMalloc((void **)&d_B, size) != cudaSuccess) {
fprintf(stderr, "Allocation error\n");
exit(EXIT_FAILURE);
}
if (cudaMalloc((void **)&d_C, size) != cudaSuccess) {
fprintf(stderr, "Allocation error\n");
exit(EXIT_FAILURE);
}
/* Copy the host vectors to device */
tmem = -wtime();
if (cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) != cudaSuccess) {
fprintf(stderr, "Host to device copying failed\n");
exit(EXIT_FAILURE);
}
if (cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) != cudaSuccess) {
fprintf(stderr, "Host to device copying failed\n");
exit(EXIT_FAILURE);
}
tmem += wtime();
/* Launch the kernel */
int threadsPerBlockDim = BLOCK_SIZE;
dim3 blockDim(threadsPerBlockDim, threadsPerBlockDim, 1);
int blocksPerGridDimX = ceilf(N / (float)threadsPerBlockDim);
int blocksPerGridDimY = ceilf(N / (float)threadsPerBlockDim);
dim3 gridDim(blocksPerGridDimX, blocksPerGridDimY, 1);
printf("CUDA kernel launch with %d (%d %d) blocks of %d (%d %d) threads\n",
blocksPerGridDimX * blocksPerGridDimY, blocksPerGridDimX, blocksPerGridDimY,
threadsPerBlockDim * threadsPerBlockDim, threadsPerBlockDim, threadsPerBlockDim);
tgpu = -wtime();
//sgemm_naive<<<gridDim, blockDim>>>(d_A, d_B, d_C, N);
sgemm_tailed<<<gridDim, blockDim>>>(d_A, d_B, d_C, N);
cudaDeviceSynchronize();
tgpu += wtime();
if ( (err = cudaGetLastError()) != cudaSuccess) {
fprintf(stderr, "Failed to launch kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Copy the device vectors to host */
tmem -= wtime();
if (cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) != cudaSuccess) {
fprintf(stderr, "Device to host copying failed\n");
exit(EXIT_FAILURE);
}
tmem += wtime();
// Verify that the result vector is correct
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
if (fabs(6.0 * N - h_C[i * N + j]) > 1e-5) {
fprintf(stderr, "GPU results verification failed at element %d %d!\n", i, j);
exit(EXIT_FAILURE);
}
}
}
printf("CPU version (sec.): %.6f\n", tcpu);
printf("GPU version (sec.): %.6f\n", tgpu);
printf("Memory ops. (sec.): %.6f\n", tmem);
printf("Memory bw. (MiB/sec.): %.2f\n", ((3 * size) >> 20) / tmem);
printf("CPU GFLOPS: %.2f\n", 2.0 * N * N * N * 1.0E-9F / tcpu);
printf("GPU GFLOPS: %.2f\n", 2.0 * N * N * N * 1.0E-9F / tgpu);
printf("Speedup: %.2f\n", tcpu / tgpu);
printf("Speedup (with mem ops.): %.2f\n", tcpu / (tgpu + tmem));
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
cudaDeviceReset();
return 0;
}
|
3,241 | #include "includes.h"
__global__ void kernel(const uint *__restrict__ a, const uint *__restrict__ b, const uint length, uint *c)
{
uint tid = (blockIdx.x * blockDim.x) + threadIdx.x;
const uint stride = blockDim.x * gridDim.x;
while (tid < length)
{
c[tid] = a[tid] + b[tid];
tid += stride;
}
} |
3,242 | #include <stdio.h>
#include <future>
#include <thread>
#include <chrono>
#include <iostream>
#include <iterator>
#include <cstring>
#define N 1000000
#define SIZE 100
__constant__ int factor = 1;
//
// NOTE: while loop is for the case when number of elements in the array exceeds the
// number of blocks possible total in device to be launched
//
__global__
void vectorAdd(int *a, int *b, int *c) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
while (i < N) {
c[i] = factor*(a[i] + b[i]);
i += blockDim.x * gridDim.x;
}
}
__global__
void matrixAdd(int **a,int **b, int**c) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
c[i][j] = a[i][j] + b[i][j];
}
#define PRINT(x) \
std::cout << #x " = " << x << std::endl
void printImage(char* ptr) {
for (auto i=0; i<SIZE; i++) {
char cpyPtr[SIZE+1];
std::memcpy((void*)cpyPtr, (void*)(ptr + SIZE*i), SIZE);
cpyPtr[SIZE] = '\0';
printf("%s\n", cpyPtr);
}
printf("\n");
}
int main(int argc, char** argv) {
// start time
auto startTime = std::chrono::high_resolution_clock::now();
printf("Hello World\n");
// get the number of devices
int numDevices;
cudaGetDeviceCount(&numDevices);
PRINT(numDevices);
cudaDeviceProp prop;
for (auto i=0 ; i<numDevices; i++) {
cudaGetDeviceProperties(&prop, i);
PRINT(prop.name);
PRINT(prop.totalGlobalMem);
PRINT(prop.sharedMemPerBlock);
PRINT(prop.regsPerBlock);
PRINT(prop.warpSize);
PRINT(prop.memPitch);
PRINT(prop.maxThreadsPerBlock);
PRINT(prop.maxThreadsDim[0]);
PRINT(prop.maxThreadsDim[1]);
PRINT(prop.maxThreadsDim[2]);
PRINT(prop.maxGridSize[0]);
PRINT(prop.maxGridSize[1]);
PRINT(prop.maxGridSize[2]);
PRINT(prop.totalConstMem);
PRINT(prop.major);
PRINT(prop.minor);
PRINT(prop.clockRate);
PRINT(prop.textureAlignment);
PRINT(prop.deviceOverlap);
PRINT(prop.multiProcessorCount);
PRINT(prop.kernelExecTimeoutEnabled);
PRINT(prop.integrated);
PRINT(prop.canMapHostMemory);
PRINT(prop.computeMode);
PRINT(prop.maxTexture1D);
PRINT(prop.maxTexture2D[0]);
PRINT(prop.maxTexture2D[1]);
PRINT(prop.maxTexture3D[0]);
PRINT(prop.maxTexture3D[1]);
PRINT(prop.maxTexture3D[2]);
// PRINT(prop.maxTexture2DArray[0]);
// PRINT(prop.maxTexture2DArray[1]);
// PRINT(prop.maxTexture2DArray[2]);
PRINT(prop.concurrentKernels);
}
int h_a[N], h_b[N], h_c[N];
int *d_a, *d_b, *d_c;
cudaMalloc(&d_a, N*sizeof(int));
cudaMalloc(&d_b, N*sizeof(int));
cudaMalloc(&d_c, N*sizeof(int));
for (auto i=0; i<N; i++) {
h_a[i ] = i;
h_b[i] = i*i;
}
cudaMemcpy(d_a, h_a, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, N*sizeof(int), cudaMemcpyHostToDevice);
vectorAdd<<<(N+127)/128, 128>>>(d_a, d_b, d_c);
cudaMemcpy(h_c, d_c, N*sizeof(int), cudaMemcpyDeviceToHost);
for (auto i=0; i<N; i++) {
if (i < 10)
printf("c[%d] = %d\n", i, h_c[i]);
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// stop time
auto stopTime = std::chrono::high_resolution_clock::now();
PRINT((stopTime - startTime).count());
printf("Goodbye World\n");
}
|
3,243 | #include "includes.h"
__global__ void squared_difference(float *x, float *y, int len) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < len) {
x[i] = (x[i] - y[i])*(x[i] - y[i]);
}
} |
3,244 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void what_is_my_id(unsigned int * const block,
unsigned int * const thread,
unsigned int * const warp,
unsigned int * const calc_thread)
{
// Thread_ID is block_index * block_size + thread_index inside this block
const unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
block[idx] = blockIdx.x;
thread[idx] = threadIdx.x;
warp[idx] = threadIdx.x / warpSize; // Use build in variable warpSize=32 to calculate actual warp
calc_thread[idx] = idx;
}
#define ARRAY_SIZE 128
#define ARRAY_SIZE_IN_BYTES (sizeof(unsigned int) * (ARRAY_SIZE))
unsigned int cpu_block[ARRAY_SIZE];
unsigned int cpu_thread[ARRAY_SIZE];
unsigned int cpu_warp[ARRAY_SIZE];
unsigned int cpu_calc_thread[ARRAY_SIZE];
int main(void){
// Total threads: 64 * 2 = 128
const unsigned int num_blocks = 2;
const unsigned int num_threads = 64;
// Declare pointers for GPU based params
unsigned int * gpu_block;
unsigned int * gpu_thread;
unsigned int * gpu_warp;
unsigned int * gpu_calc_thread;
// Declaration of loop iterator
unsigned int i;
// Allocate four arrays on GPU
cudaMalloc((void **)&gpu_block, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_thread, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_warp, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_calc_thread, ARRAY_SIZE_IN_BYTES);
// Execute kernel
what_is_my_id<<<num_blocks, num_threads>>>(gpu_block, gpu_thread, gpu_warp, gpu_calc_thread);
cudaMemcpy(cpu_block, gpu_block, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_thread, gpu_thread, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_warp, gpu_warp, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_calc_thread, gpu_calc_thread, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost);
cudaFree(gpu_block);
cudaFree(gpu_thread);
cudaFree(gpu_warp);
cudaFree(gpu_calc_thread);
for(i=0;i<ARRAY_SIZE;i++){
printf("Calculcated thread: %3u | Block: %3u | Warp: %3u | Thread: %3u\n",
cpu_calc_thread[i], cpu_block[i], cpu_warp[i], cpu_thread[i]);
}
}
|
3,245 | using namespace std;
#include <iostream>
#include <fstream>
#include <string>
#include <cstdlib>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
__global__ void is_odd( int * d_in){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
printf("hello from thread %d, data is %d\n", idx, d_in[idx]);
}
int main(int argc, char ** argv) {
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Memory Global Mem: %d\n", prop.totalGlobalMem);
printf(" Memory shared per block: %d\n", prop.sharedMemPerBlock);
printf(" can map host memory: %d\n", prop.canMapHostMemory);
printf(" device overlap: %d\n", prop.deviceOverlap);
printf(" max threads per block: %d\n", prop.maxThreadsPerBlock);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
printf("entering\n");
int input1[1024];
//input1 = (int*) malloc(sizeof(int)*1024);
int input2[1024];
//input2 = (int*) malloc(sizeof(int)*1024);
int output_a[1024];// = malloc(sizeof(int)*2048);
int output_b[1024];
int * d_in;
int * d_out;
cudaMalloc((void**) &d_in, sizeof(int)*2048);
cudaMalloc((void**) &d_out, sizeof(int)*2048);
for(int i=0; i< 1024; i++){
input1[i] = i*2;
input2[i] = i*2+1;
}
cudaMemcpy2D(d_in, 2*sizeof(int), input1, sizeof(int), sizeof(int), 1024, cudaMemcpyHostToDevice);
cudaMemcpy2D(d_in + 1, 2*sizeof(int), input2, sizeof(int), sizeof(int), 1024, cudaMemcpyHostToDevice);
is_odd<<<2, 1024>>>(d_in);
cudaDeviceSynchronize();
cudaMemcpy2D(output_a, sizeof(int), d_in, 2*sizeof(int), sizeof(int), 1024, cudaMemcpyDeviceToHost);
cudaMemcpy2D(output_b, sizeof(int), d_in+1, 2*sizeof(int), sizeof(int), 1024, cudaMemcpyDeviceToHost);
printf("index at 10: a is %d, b is %d\n", output_a[10], output_b[10]);
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
3,246 | #include <stdio.h>
#include <stdlib.h>
#include "mesh.cuh"
#include "material.cuh"
void print_mesh_details(struct mesh *m);
void print_material_details(struct material *mat);
void mesh_material_print(struct mesh *me, struct material *ma) {
printf("MESH PROPERTIES: \n");
print_mesh_details(me);
printf("MATERIAL PROPERTIES: \n");
print_material_details(ma);
}
void print_mesh_details(struct mesh *m) {
printf("lx:%f ly:%f vmax:%f\n", m->lx, m->ly, m->vmax);
printf("nelx:%d nely:%d volfrac:%f\n", m->nelx, m->nely, m->volfrac);
printf("ax:%f by:%f area:%f\n", m->ax, m->by, m->area);
printf("penal:%f prho:%f rmin:%f ft:%d\n", m->penal, m->prho, m->rmin, m->ft);
printf("alpha:%f beta:%f ninp:%d nout:%d\n", m->alpha, m->beta, m->ninp, m->nout);
}
void print_material_details(struct material *mat) {
printf("e0:%f emin:%f rho0:%f rhomin:%f nu:%f\n", mat->e0, mat->emin, mat->rho0, mat->rhomin, mat->nu);
}
void print_mat_int(int *mat, int nrow, int ncol) {
for (int row_index = 0; row_index < nrow; row_index++) {
for (int col_index = 0;col_index < ncol; col_index++) {
printf("%d ", mat[row_index*ncol + col_index]);
}
printf("\n");
}
}
void print_mat_double(double *mat, int nrow, int ncol) {
for (int row_index = 0; row_index < nrow; row_index++) {
for (int col_index = 0;col_index < ncol; col_index++) {
printf("%f ", mat[row_index*ncol + col_index]);
}
printf("\n");
}
}
void print_vec_double(double *vec, int nelem) {
for (int i = 0;i < nelem;i++) {
printf("%.15f\n ",vec[i]);
}
printf("\n");
}
void print_vec_int(int *vec, int nelem) {
for (int i = 0;i < nelem;i++) {
printf("%d\n ", vec[i]);
}
printf("\n");
}
|
3,247 | #include "cuda_runtime.h"
#include "cudafile2.cuh"
#include <stdio.h>
#include <iostream>
#include "device_launch_parameters.h"
using namespace std;
#define N 10
void fillalldata(int ** data[N][N]) {
for (int i = 0; i < N;i++) {
for (int j = 0; j < N; j++) {
**data[i][j] = j * 4;
}
}
}
__global__ void AddMatrix(int a[][N],int b[][N],int c[][N]) {
int i = threadIdx.x;
int j = threadIdx.y;
c[i][j] = a[i][j] + b[i][j];
}
void PrintEndValue(int a[N][N], int b[N][N], int c[N][N]) {
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N;j++)
{
printf("%d %d %d", a[i][j], b[i][j], c[i][j]);
}
printf("\n");
}
}
void RunCudaFile2()
{
} |
3,248 | #include <stdio.h>
#include <cufft.h>
cufftHandle plan;
cufftResult result;
// 1D FFT single precision ====================================================
void sPlan1dCUFFT(int n, void *stream) {
result = cufftPlan1d(&plan, n, CUFFT_C2C, 1);
if (result!=CUFFT_SUCCESS) {
printf ("Error: cufftPlan1d failed: code = %i\n",result);
return;
}
result = cufftSetStream(plan, (cudaStream_t)stream);
if (result!=CUFFT_SUCCESS) {
printf ("Error: cufftSetStream failed: code = %i\n",result);
return;
}
}
// 2D FFT single precision ====================================================
void sPlan2dCUFFT(int nx, int ny, void *stream) {
result = cufftPlan2d(&plan, nx, ny, CUFFT_C2C);
if (result!=CUFFT_SUCCESS) {
printf ("Error: cufftPlan2d failed: code = %i\n",result);
return;
}
result = cufftSetStream(plan, (cudaStream_t)stream);
if (result!=CUFFT_SUCCESS) {
printf ("Error: cufftSetStream failed: code = %i\n",result);
return;
}
}
// single precision execute & destroy =========================================
void sExecCUFFT(float *sdata) {
result = cufftExecC2C(plan, (cufftComplex*)sdata, (cufftComplex*)sdata, CUFFT_FORWARD);
if (result!=CUFFT_SUCCESS) {
printf ("Error: cufftExecC2C failed: code = %i\n",result);
return;
}
}
void sDestroyCUFFT() {
result = cufftDestroy(plan);
if (result!=CUFFT_SUCCESS) {
printf ("Error: cufftDestroy failed: code = %i\n",result);
return;
}
}
// 1D FFT double precision ====================================================
void dPlan1dCUFFT(int n, void *stream) {
result = cufftPlan1d(&plan, n, CUFFT_Z2Z, 1);
if (result!=CUFFT_SUCCESS) {
printf ("Error: cufftPlan1d failed: code = %i\n",result);
return;
}
result = cufftSetStream(plan, (cudaStream_t)stream);
if (result!=CUFFT_SUCCESS) {
printf ("Error: cufftSetStream failed: code = %i\n",result);
return;
}
}
// 2D FFT double precision ====================================================
void dPlan2dCUFFT(int nx, int ny, void *stream) {
result = cufftPlan2d(&plan, nx, ny, CUFFT_Z2Z);
if (result!=CUFFT_SUCCESS) {
printf ("Error: cufftPlan2d failed: code = %i\n",result);
return;
}
result = cufftSetStream(plan, (cudaStream_t)stream);
if (result!=CUFFT_SUCCESS) {
printf ("Error: cufftSetStream failed: code = %i\n",result);
return;
}
}
// double precision execute & destroy =========================================
void dExecCUFFT(double *ddata) {
result = cufftExecZ2Z(plan, (cufftDoubleComplex*)ddata, (cufftDoubleComplex*)ddata, CUFFT_FORWARD);
if (result!=CUFFT_SUCCESS) {
printf ("Error: cufftExecZ2Z failed: code = %i\n",result);
return;
}
}
void dDestroyCUFFT() {
result = cufftDestroy(plan);
if (result!=CUFFT_SUCCESS) {
printf ("Error: cufftDestroy failed: code = %i\n",result);
return;
}
}
/*
// Original versions from the OLCF website
// https://www.olcf.ornl.gov/tutorials/mixing-openacc-with-gpu-libraries/
//
void sLaunchCUFFT(float *sdata, int n, void *stream) {
cufftHandle plan;
cufftPlan1d(&plan, n, CUFFT_C2C, 1);
cufftSetStream(plan, (cudaStream_t)stream);
cufftExecC2C(plan, (cufftComplex*)sdata, (cufftComplex*)sdata, CUFFT_FORWARD);
cufftDestroy(plan);
}
void dLaunchCUFFT(double *ddata, int n, void *stream) {
cufftHandle plan;
cufftPlan1d(&plan, n, CUFFT_Z2Z, 1);
cufftSetStream(plan, (cudaStream_t)stream);
cufftExecZ2Z(plan, (cufftDoubleComplex*)ddata, (cufftDoubleComplex*)ddata, CUFFT_FORWARD);
cufftDestroy(plan);
}
*/
|
3,249 | #include <stdio.h>
__global__ void myKernel(int64_t *dA) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
dA[id] = dA[id] + 1;
}
extern "C" {
void kernel(int64_t *ptr) {
myKernel<<<1,128>>>(ptr);
cudaDeviceSynchronize();
}
} |
3,250 | #include "includes.h"
__global__ void cudaDSaturation_propagate_kernel(double* x, double* y, unsigned int size, double threshold)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < size; i += stride) {
double value = x[i];
if (threshold != 0.0) {
y[i] = (value < -threshold) ? -threshold
: (value > threshold) ? threshold
: value;
}
}
} |
3,251 | /*
* CUDA program to multiply matrices (fills in matrices itself)
*
* compile with:
* nvcc -o matrix_multiply matrix_multiply.cu
*
* run with:
* ./matrix_multiply
*/
#include <stdio.h>
#include <cassert>
#include <cstdlib>
//constants to control the program:
#define NTESTS 1 /* # of tests to run */
#define TILE_WIDTH 32 /* # of threads in each dimension per block */
/* #threads per block = TILE_WIDTH * TILE_WIDTH */
#define WIDTH 1024 /* matrix dimensions (assumes square matrix) */
__global__ void kernel(float* Md, float* Nd, float* Pd, int width) {
//method to run on GPU; called once per element of output matrix
//calculate indices for the element to compute:
int row = blockIdx.y*TILE_WIDTH + threadIdx.y;
int col = blockIdx.x*TILE_WIDTH + threadIdx.x;
if(row >= width || col >= width) //check that indices are in bounds
return;
float tmp = 0; //local variable in which to accumulate the answer
for(int k=0; k < width; ++k)
tmp += Md[row*width + k] * Nd[k*width+col];
Pd[row*width+col] = tmp;
}
void verify_solution(float *a, float *b, float *c, int N) {
//verify the solution on the CPU
//threshold for matching: (0 ok since all vals are small ints)
float epsilon = 0;
for (int i = 0; i < N; i++) { //for every column...
for (int j = 0; j < N; j++) { //for every row in that column
float tmp = 0;
for (int k = 0; k < N; k++) {
tmp += a[i * N + k] * b[k * N + j];
}
// Check against the GPU result, throw an error if not equal
assert(fabs(c[i * N + j] - tmp) <= epsilon);
}
}
}
void check(cudaError_t retVal) {
//takes return value of a CUDA function and checks if it was an error
if(retVal != cudaSuccess) {
if (retVal==cudaErrorInvalidConfiguration)
printf("Number of Threads per block is not valid");
fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(retVal));
exit(1);
}
}
float runTest (float* M, float* N, float* P, float* Md, float* Nd, float* Pd, int size) {
//allocate timers
cudaEvent_t start;
check(cudaEventCreate(&start));
cudaEvent_t stop;
check(cudaEventCreate(&stop));
//start timer
check(cudaEventRecord(start,0));
//transfer a and b to the GPU
check(cudaMemcpy(Md, M, size, cudaMemcpyHostToDevice));
check(cudaMemcpy(Nd, N, size, cudaMemcpyHostToDevice));
//call the kernel
int gridsize = (WIDTH+TILE_WIDTH-1)/TILE_WIDTH;
dim3 dimGrid(gridsize, gridsize);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
kernel<<<dimGrid,dimBlock>>>(Md, Nd, Pd, WIDTH);
//check if kernel encountered an error due to invalid configurations
cudaError_t err = cudaGetLastError();
check(err);
//transfer result matrix to the host
check(cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost));
//stop timer and store it
check(cudaEventRecord(stop,0));
check(cudaEventSynchronize(stop));
float diff;
check(cudaEventElapsedTime(&diff, start, stop));
//deallocate timers
check(cudaEventDestroy(start));
check(cudaEventDestroy(stop));
//print and return time
printf("Time: %f ms\n", diff);
return diff;
}
int main() {
float* M; //input arrays (on host)
float* N;
float* P; //output array (on host)
float* Md; //input arrays (on device)
float* Nd;
float* Pd; //output array (on device)
int size = WIDTH * WIDTH * sizeof(float); //size of matrix in bytes
//allocate memory
M = (float*) malloc(size);
N = (float*) malloc(size);
P = (float*) malloc(size);
check(cudaMalloc((void**) &Md, size));
check(cudaMalloc((void**) &Nd, size));
check(cudaMalloc((void**) &Pd, size));
//fill M and N arrays (all elements <= 2048 so results stay small)
int cor=0;
for(int i=0; i < WIDTH * WIDTH; i++){
M[i] = N[i] = i-cor;
if(i % 2048 == 0)
cor=i;
}
float total_time = 0; //accumultate execution times for averaging
for(int i=0; i < NTESTS; i++)
total_time += runTest(M, N, P, Md, Nd, Pd, size);
printf("Avg for %d tests: %f ms and size of matrix %d\n",
NTESTS, total_time/(float)NTESTS, WIDTH);
verify_solution(M,N,P,WIDTH); //verify result
//free all memory:
free(M);
free(N);
free(P);
check(cudaFree(Md));
check(cudaFree(Nd));
check(cudaFree(Pd));
}
|
3,252 | /*****************************************************
* This file tests cuda memory management APIs.
*****************************************************/
#include <cuda_runtime.h>
#include <stdio.h>
__global__ void vecAdd(float* A, float* B, float* C) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
//printf("From GPU %f.\n", C[i]);
}
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*
* See cuda.h for error code descriptions.
*/
#define CHECK_CUDA_RESULT(N) { \
cudaError_t result = N; \
if (result != 0) { \
printf("CUDA call on line %d returned error %d\n", __LINE__, \
result); \
printf("Error: %s.\n", cudaGetErrorString(result)); \
exit(1); \
} }
void initialData(float *h, long long n, float data) {
long long i;
for (i = 0; i < n; i++) {
h[i] = data;
}
}
void test_cudaDeviceGetAttribute(int* value, cudaDeviceAttr attr, int device) {
cudaDeviceGetAttribute(value, attr, device);
}
void test_cudaMallocManaged(int dev, int ipower) {
int val;
// Check if supports managed memory
CHECK_CUDA_RESULT(cudaDeviceGetAttribute(&val, cudaDevAttrManagedMemory, dev));
// Check concurrent managed access, for cuda 8.0
cudaDeviceGetAttribute(&val, cudaDevAttrConcurrentManagedAccess, dev);
if (!val) {
printf("*** Warn: Concurrent managed access is not supported!\n");
}
// Calculate number of elements and bytes
long long nElem = ((long long) 1) << ipower;
long long nBytes = nElem * sizeof(float);
// allocate memory
float *g_A[64*1024], *g_B[64*1024], *g_C[64*1024];
int totalLoop = 1024*2;
//int totalLoop = 10;
for (int loop=0; loop<totalLoop; loop++) {
printf("==== ==== ==== ==== Loop: %d.\n", loop);
if (ipower < 18) {
printf("Vector size is %lld, nbytes is %f KB\n", nElem,
(float) nBytes / (1024.0f));
} else {
printf("Vector size is %lld, nbytes is %f MB\n", nElem,
(float) nBytes / (1024.0f * 1024.0f));
}
// unsigned int flags = cudaMemAttachHost;
unsigned int flags = cudaMemAttachGlobal;
CHECK_CUDA_RESULT(cudaMallocManaged(&g_A[loop], nBytes, flags));
CHECK_CUDA_RESULT(cudaMallocManaged(&g_B[loop], nBytes, flags));
CHECK_CUDA_RESULT(cudaMallocManaged(&g_C[loop], nBytes, flags));
printf("===== inital data begins...\n");
initialData(g_A[loop], nElem, 2.0f);
initialData(g_B[loop], nElem, 2.0f);
printf("===== synchronize begins...\n");
cudaDeviceSynchronize();
printf("===== add data begins...\n");
dim3 threadsPerBlock(1024);
dim3 numBlocks((nElem+threadsPerBlock.x-1) / threadsPerBlock.x);
printf("===== numBlocks is %d, threadsPerBlock is %d\n", numBlocks.x, threadsPerBlock.x);
// Kernel invocation with N threads
vecAdd<<<numBlocks, threadsPerBlock>>>(g_A[loop], g_B[loop], g_C[loop]);
//cudaMemcpy(g_C[loop], g_A[loop], nElem, cudaMemcpyDeviceToDevice);
cudaDeviceSynchronize();
}
printf("===== Check the results...\n");
float ans = 4.0f;
printf("===== ans is %f\n", ans);
for (int i = 0; i < totalLoop; i++) {
printf("\n======================================================\n");
for (int j = 0; j < 8; j++) {
//if ((g_C[i])[j] != ans)
{
printf("%3.0f ", (g_C[i])[j]);
}
}
}
printf("\n");
cudaFree(g_A);
cudaFree(g_B);
cudaFree(g_C);
cudaDeviceReset();
}
int main(int argc, char* argv[]) {
// set up device
int dev = 0;
cudaSetDevice(dev);
// get device properties
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
// check uva supporting
if (deviceProp.unifiedAddressing) {
printf("Device %d supports uva memory!\n", dev);
} else {
printf("Device %d does not support uva memory!\n", dev);
exit(EXIT_SUCCESS);
}
// set up date size of vectors
int ipower = 20+4;
if (argc > 1)
ipower = atoi(argv[1]);
test_cudaMallocManaged(dev, ipower);
}
|
3,253 | #include "includes.h"
__global__ void cuConvertHSVToRGBKernel(const float4* src, float4* dst, size_t stride, int width, int height, bool denormalize)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
int c = y*stride + x;
if (x<width && y<height)
{
// Read
float4 in = src[c];
float H = in.x;
float S = in.y;
float V = in.z;
float4 rgb = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
// float C = V*S;
// // Denormalize
// if (denormalize)
// H = H*360.0f;
// // RGB
// H /= 60.0f;
// float X = C*(1.0f - abs(fmod(H, 2.0f) - 1.0f));
// if (H >= 0.0f)
// {
// if (H < 1.0f)
// rgb = make_float4(C, X, 0.0f, 0.0f);
// else if (H < 2.0f)
// rgb = make_float4(X, C, 0.0f, 0.0f);
// else if (H < 3.0f)
// rgb = make_float4(0.0f, C, X, 0.0f);
// else if (H < 4.0f)
// rgb = make_float4(0.0f, X, C, 0.0f);
// else if (H < 5.0f)
// rgb = make_float4(X, 0.0f, C, 0.0f);
// else if (H <= 6.0f)
// rgb = make_float4(C, 0.0f, X, 0.0f);
// }
// float m = V-C;
// rgb += m;
if (S == 0)
{
rgb = make_float4(V, V, V, in.w);
dst[c] = rgb;
return;
}
H /= 60.0f;
int i = floor(H);
float f = H-i;
float p = V*(1.0f - S);
float q = V*(1.0f - S*f);
float t = V*(1.0f - S*(1.0f-f));
if (i == 0)
rgb = make_float4(V, t, p, in.w);
else if (i == 1)
rgb = make_float4(q, V, p, in.w);
else if (i == 2)
rgb = make_float4(p, V, t, in.w);
else if (i == 3)
rgb = make_float4(p, q, V, in.w);
else if (i == 4)
rgb = make_float4(t, p, V, in.w);
else if (i == 5)
rgb = make_float4(V, p, q, in.w);
// Write Back
rgb.w = in.w;
dst[c] = rgb;
}
} |
3,254 | #include "includes.h"
__global__ void cu_kron(const float *a, const float* b, float* dst, const int rowsa, const int colsa, const int rowsdst, const int colsdst, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int colsb = colsdst / colsa;
int rowsb = rowsdst / rowsa;
while(tid < n){
int c2 = tid % colsdst;
int r2 = tid / colsdst;
int rb = r2 % rowsb;
int cb = c2 % colsb;
int ra = r2 / rowsb;
int ca = c2 / colsb;
dst[tid] = a[ra * colsa + ca] * b[rb * colsb + cb];
tid += stride;
}
} |
3,255 | // /usr/local/cuda/bin/nvcc task1.cu -o task1
// nvcc task1.cu -o task1
//./task1
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void solver(double *T_new, const double *T_old, int cols, int rows)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int idx = (row*cols + col);
if (row < rows-1 && col < cols-1 && row > 0 && col > 0)
{
T_new[idx] = 0.25*(T_old[idx+cols] + T_old[idx-cols] + T_old[idx-1] + T_old[idx+1]);
}
}
int main()
{
int M = 200; //rows
int N = 200; //columns
int arrSize = M * N * sizeof(double);
int maxiter = 100;
double err;
int iter;
double *T_new = (double *) malloc(arrSize);
double *T_old = (double *) malloc(arrSize);
int i, j;
double temp = 1.0;
for(i = 0; i < M; i++)
{
for (j = 0; j < N; j++)
{
T_old[i*N + j] = 0.0;
T_new[i*N + j] = 0.0;
}
}
for (j = 0; j < N; j++)
{
T_old[0*N + j] = temp;
T_new[0*N + j] = temp;
}
for (j = 0; j < N; j++)
{
T_old[(N-1)*N + j] = 0.0;
T_new[(N-1)*N + j] = 0.0;
}
for (i = 0; i < M; i++)
{
T_old[i*N + 0] = 0.0;
T_new[i*N + 0] = 0.0;
}
for (i = 0; i < M; i++)
{
T_old[i*N + N-1] = 0.0;
T_new[i*N + N-1] = 0.0;
}
double *T_new_d, *T_old_d;
cudaMalloc(&T_new_d, arrSize);
cudaMalloc(&T_old_d, arrSize);
printf("Allocate Device memory for matrices\n");
cudaMemcpy(T_new_d, T_new, arrSize, cudaMemcpyHostToDevice);
cudaMemcpy(T_old_d, T_old, arrSize, cudaMemcpyHostToDevice);
printf("Copy matrices from the host memory to the CUDA device\n");
const dim3 BLOCK_DIM(32, 32); // 1024 threads
const dim3 GRID_DIM( (N-1)/BLOCK_DIM.x+1, (M-1)/BLOCK_DIM.y+1);
printf("CUDA kernel launch with BLOCK_DIM[%d %d] GRID_DIM[%d %d]\n", BLOCK_DIM.x, BLOCK_DIM.y, GRID_DIM.x, GRID_DIM.y);
for (iter = 0; iter < maxiter; iter++)
{
solver<<<GRID_DIM, BLOCK_DIM>>>(T_new_d, T_old_d, N, M);
solver<<<GRID_DIM, BLOCK_DIM>>>(T_old_d, T_new_d, N, M);
if (iter%10 == 0)
{
cudaMemcpy(T_new, T_new_d, arrSize, cudaMemcpyDeviceToHost);
cudaMemcpy(T_old, T_old_d, arrSize, cudaMemcpyDeviceToHost);
err = 0.0;
for(i = 1; i < (M-1); i++)
{
for(j = 1; j < (N-1); j++)
{
if (fabs(T_old[i*N + j]-T_new[i*N + j]) > err) err = fabs(T_old[i*N + j]-T_new[i*N + j]);
}
}
printf("|%d| %f\n", iter, err);
}
}
cudaDeviceSynchronize();
printf("Done solving\n");
cudaMemcpy(T_new, T_new_d, arrSize, cudaMemcpyDeviceToHost);
cudaMemcpy(T_old, T_old_d, arrSize, cudaMemcpyDeviceToHost);
printf("Copy matrices from the CUDA device to the host memory\n");
cudaFree(T_new_d);
cudaFree(T_old_d);
free(T_new);
free(T_old);
printf("Free device and device memory\n");
}
|
3,256 | #include <stdio.h>
typedef struct NET_T {
int nb_layers;
int* size0;//size of output
int* size1;//size of input
double** layers;
double** biases;
} net_t;
typedef struct IMG_T{
int l;
int ll;
double* pixels;
} img_t;
net_t load_coeffs(char* file_address){
FILE* f = fopen(file_address, "r");
//printf("Opened\n");
net_t network;
int current_layer;
int size0, size1;
char garbage;
fscanf(f, "%c:", &garbage);
fscanf(f, "%d", &(network.nb_layers));
double** n = (double**) malloc(network.nb_layers*sizeof(double*));
int* s0 = (int*) malloc(network.nb_layers*sizeof(int));
int* s1 = (int*) malloc(network.nb_layers*sizeof(int));
double** b = (double**) malloc(network.nb_layers*sizeof(double*));
network.layers = n;
network.biases = b;
network.size0 = s0;
network.size1 = s1;
//printf("Entering loop, nb_layers:%d\n", network.nb_layers);
int old_layer =-1;
while(!feof(f)){
fscanf(f, " %c", &garbage); //"L"
fscanf(f, " %d", ¤t_layer);
if(current_layer == old_layer)
break;
old_layer=current_layer;
fscanf(f, " %c", &garbage); // ":"
fscanf(f, " %d", &size0);
s0[current_layer] = size0;
fscanf(f, " %d", &size1);
s1[current_layer] = size1;
//printf("Current Layer %d, size0 = %d, size1 = %d\n", current_layer, size0, size1);
double* tab = (double*) malloc(size0 * size1 * sizeof(double));
network.layers[current_layer] = tab;
for(int i = 0; i < size0*size1; i++){
fscanf(f, "%lf", &(tab[i]));
//printf("%lf",tab[i]);
}
fscanf(f, " %c", &garbage); //"L"
fscanf(f, " %d", ¤t_layer);
fscanf(f, " %c", &garbage); // ":"
fscanf(f, " %d", &size0);
//printf("Current Bias %d, size0 = %d\n", current_layer, size0);
double* tab2 = (double*) malloc(size0 * sizeof(double));
network.biases[current_layer] = tab2;
for(int i = 0; i < size0; i++){
fscanf(f, " %lf", &(tab2[i]));
}
}
fclose(f);
return network;
}
img_t* load_image(FILE* f){
img_t* image = (img_t*)malloc(sizeof(img_t));
image->l = 28;
image->ll = 28;
double* img = (double*) malloc(image->l*image->ll*sizeof(double));
image->pixels = img;
char garbage;
fscanf(f, " %c", &garbage);
for(int i=0; i<image->l*image->ll; i++){
fscanf(f, " %lf", &(img[i]));
}
return image;
}
double relu(double in){
if(in > 0)
return in;
return 0;
}
double* forward(net_t net, img_t img){
double* in, *out;
in = (double*)malloc(net.size1[0]*sizeof(double));
for(int i = 0; i<net.size1[0]; i++){
in[i]= img.pixels[i];
//printf("in[%d]=%lf\n", i, in[i]);
}
//for(int i=0;i<28;i++)
// for(int j =0; j<28; j++)
// in[i*8+j] = img.pixels[j*8+i];
for(int l = 0; l <net.nb_layers; l++){
int size_o = net.size0[l];
int size_i = net.size1[l];
out = (double*) malloc(size_o*sizeof(double));
for(int ligne = 0; ligne < size_o; ligne++){
out[ligne] = 0;
for(int col = 0; col < size_i; col++){
//printf("net.layers[l][ligne+col*]=%lf\n",net.layers[l][ligne+col*size_o]);
out[ligne]+=in[col]*net.layers[l][ligne*size_i+col];
}
out[ligne] = relu(out[ligne]+net.biases[l][ligne]);
}
free(in);
in = out;
}
return in;
}
void print_image(img_t* im){
for(int i = 0; i<28*28;i++){
printf( "%lf ",im->pixels[i]);
}
printf("\n\n");
}
int main(){
char address[256] = "test.txt";
net_t test = load_coeffs(address);
char garbage;
int res[64];
printf("Reseau ok\n");
FILE* f = fopen("images3.txt", "r");
img_t* images[64];
for(int i=0; i<64; i++){
images[i]=load_image(f);
//print_image(images[i]);
}
printf("Images chargees\n");
fscanf(f, " %c", &garbage);
for(int i=0; i<64; i++){
fscanf(f, "%d", &res[i]);
}
double* result ;
int max =-1,i_max=-1;
int found = 0;
for(int k =0; k<64; k++){
result = forward(test, *images[k]);
max=-1;
i_max=-1;
for(int i =0; i < 10; i++){
printf("proba %d: %lf\n", i, result[i]);
if(result[i]>max){
i_max=i;
max=result[i];
}
}
if(i_max == res[k]){
found+=1;
}
}
printf("Percentage = %lf\n", (float)found/64);
// printf("Expected: %d\n", res[0]);
free(result);
return 0;
}
|
3,257 | // Invocar como: './practico_sol nombre_archivo, ejercicio'. En donde ejercicio es 1, 2 o 3.
#include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
// Macro para wrappear funciones de cuda e interceptar errores
#define CUDA_CHK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) {
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void read_file(const char*, int*);
int get_text_length(const char * fname);
// Cifrado: E(x) = (Ax + B) mod M
// Descifrado: D(x) = A^{-1}(x - B) mod M
// A y B son las claves del cifrado. A y M son co-primos.
#define A 15
#define B 27
#define M 256 // Cantidad de caracteres en la tabla ASCII extendida
#define A_MMI_M -17 // A^{-1}. Inverso multiplicativo de A modulo M
// El operador mod no es el mismo que (%) para números negativos, por lo que se provee una función módulo en el código.
__device__ int modulo(int a, int b) {
int r = a % b;
r = (r < 0) ? r + b : r;
return r;
}
// Kernel para el ejercicio 1 y 2. Cada hilo procesa un solo caracter.
__global__ void decrypt_kernel(int *device_message, int length) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < length) {
device_message[index] = modulo(A_MMI_M*(device_message[index] - B), M);
}
}
// Kernel para el ejercicio 3. Cada hilo procesa char_per_block caracteres.
// Para mantener el coalesced memory access cada cada warp de hilos procesa caracteres secuenciales.
// Es por esto que un hilo modifica caracteres no contiguos (separados por una distancia block_span).
// En lugar de su index sea index*char_per_block y cada hilo modifique char_per_block caracteres contiguos (lo cual no mantendría el coalesced memory access).
__global__ void decrypt_kernel_ej3(int *device_message, int char_per_block, int length) {
int index = (blockIdx.x*blockDim.x) + threadIdx.x;
int block_span = blockDim.x*gridDim.x;
for(int i = 0; i < char_per_block; ++i) {
if((index + i*block_span) < length) {
device_message[index + i*block_span] = modulo(A_MMI_M*(device_message[index + i*block_span] - B), M);
}
}
}
/*
. Normalmente se resolvería con un for recorriendo todo el texto usando D(x)
. Como cada caracter puede ser encriptado y desencriptado de forma independiente podemos utilizar la GPU para desencriptar el texto en paralelo.
. Para esto debemos lanzar un kernel que realice el desencriptado, asignando un thread por caracter.
*/
int main(int argc, char *argv[]) {
int *host_message;
int *device_message;
unsigned int size;
const char * fname;
int algorithm;
if (argc < 3) printf("Invocar como: './practico_sol nombre_archivo, ejercicio'. En donde ejercicio es 1, 2 o 3.\n");
else
fname = argv[1];
algorithm = atoi(argv[2]);
int length = get_text_length(fname);
size = length * sizeof(int);
// Reservo memoria para el mensaje
host_message = (int *)malloc(size);
// Leo el archivo de la entrada
read_file(fname, host_message);
// Reservo memoria en la GPU
CUDA_CHK(cudaMalloc((void**)& device_message, size));
// Copio los datos a la memoria de la GPU
CUDA_CHK(cudaMemcpy(device_message, host_message, size, cudaMemcpyHostToDevice)); // puntero destino, puntero origen, numero de bytes a copiar, tipo de transferencia
int block_size = 1024;
int char_per_block, bloques;
switch(algorithm) {
// Ej1: 1 bloque de 1024 threads (No procesa más de 1024 caracteres).
case 1:
bloques = 1;
char_per_block = 1;
break;
// Ej2: Múltiples bloques, los necesarios para procesar todo el texto.
case 2:
bloques = length/block_size + (length % block_size != 0); // Division with ceiling
char_per_block = 1;
break;
// Ej3: 128 bloques. Procesa todo el texto
// Si el texto tiene más caracteres de 128*1024 un kernel debe procesar más de un caracter secuencialmente.
case 3:
bloques = 128;
char_per_block = length/(bloques*block_size) + (length % bloques*block_size != 0);
break;
default:
printf("Algoritmo seleccionado invalido.\n");
printf("Invocar como: './practico_sol nombre_archivo, ejercicio'. En donde ejercicio es 1, 2 o 3.\n");
}
dim3 tamGrid(bloques); // Grid dimension
dim3 tamBlock(block_size); // Block dimension
// Ejecuto el kernel
if(algorithm == 1 || algorithm == 2) {
decrypt_kernel<<<tamGrid, tamBlock>>>(device_message, length);
} else {
if(algorithm == 3) {
decrypt_kernel_ej3<<<tamGrid, tamBlock>>>(device_message, char_per_block, length);
}
}
// Copio los datos nuevamente a la memoria de la CPU
CUDA_CHK(cudaMemcpy(host_message, device_message, size, cudaMemcpyDeviceToHost)); // puntero destino, puntero origen, numero de bytes a copiar, tipo de transferencia
// Despliego el mensaje
if(algorithm == 1 || algorithm == 2 || algorithm == 3) {
for (int i = 0; i < length; i++) {
printf("%c", (char)host_message[i]);
}
printf("\n");
}
// Libero la memoria en la GPU
CUDA_CHK(cudaFree(device_message));
// Libero la memoria en la CPU
free(host_message);
return 0;
}
int get_text_length(const char * fname) {
FILE *f = NULL;
f = fopen(fname, "r"); // read and binary flags
size_t pos = ftell(f);
fseek(f, 0, SEEK_END);
size_t length = ftell(f);
fseek(f, pos, SEEK_SET);
fclose(f);
return length;
}
void read_file(const char * fname, int* input) {
// printf("leyendo archivo %s\n", fname );
FILE *f = NULL;
f = fopen(fname, "r"); // read and binary flags
if (f == NULL){
fprintf(stderr, "Error: Could not find %s file \n", fname);
exit(1);
}
// fread(input, 1, N, f);
int c;
while ((c = getc(f)) != EOF) {
*(input++) = c;
}
fclose(f);
}
|
3,258 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include <cstdlib>
#include <ctime>
#define max 20
#define min 0
#define dato 10
using namespace std;
__global__ void busqueda_bin(int* x, int *a, int* flag)
{
int i = threadIdx.x;
if (*(a + i) == *x)
*(flag + i) = 1;
else
*(flag + i) = 0;
}
int main()
{
int* a;
int* x;
int* flag;
int* d_flag = 0;
int* dev_a = 0;
int* b = 0;
x = new int[1];
a = new int[max];
flag = new int[max];
*x = dato;
cout << "Busqueda Binaria" << endl << "Dato: " << *x << endl << "Datos:\t";
srand(time(0));
for (int i = min; i < max; i++)
* (a + i) = rand()%20;
for (int i = min; i < max; i++)
cout << *(a + i) << "\t";
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudasetdevice failed! do you have a cuda-capable gpu installed?");
goto Error;
}
//Reservar memoria en GPU
cudaStatus = cudaMalloc((void**)& d_flag, max * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)& dev_a, max * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)& b, sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, max * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(b, x, sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
//Launch a kernel on the GPU with one thread for each element.
busqueda_bin<<<1, max>>>(b, dev_a, d_flag);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(flag, d_flag, max * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(b);
cudaFree(dev_a);
cudaFree(d_flag);
cout << endl << "Flag:\t";
for (int i = min; i < max; i++)
cout << *(flag + i) << "\t";
delete[] a, flag;
return 0;
}
|
3,259 | #include <stdio.h>
#include <stdlib.h>
#include<algorithm>
using namespace std;
#define BLOCKSIZE 256
#define DATASIZE 101
//INSERT CODE HERE---------------------------------
//Counting Sort
__global__ void countingData(int * pSource_d,int *offsetArray,int input_size){
//Shared memory for saving data counts
__shared__ int dataCounter_s[DATASIZE];
int tx=threadIdx.x;
int gx=blockIdx.x*blockDim.x + tx;
//set initial value of array elements to 0
if(tx<DATASIZE)
dataCounter_s[tx]=0;
__syncthreads();
if(gx<input_size)
//atomically counts data
atomicAdd(&(dataCounter_s[pSource_d[gx]]),1);
__syncthreads();
//add all shared memory values
if(tx<DATASIZE)
atomicAdd(&(offsetArray[tx]),dataCounter_s[tx]);
}
//Prefix Sum(Double-Buffered Kogge-Stone Parallel Scan Algorithm)
__global__ void prefixSum(int * pResult_d, int * offsetArray){
__shared__ int source[DATASIZE];
__shared__ int destination[DATASIZE];
int tx=threadIdx.x;
int temp;
int stride=1;
int index,i;
source[tx]=offsetArray[tx];
__syncthreads();
while(1){
index=DATASIZE-tx-1;
if(index>=stride)
destination[index]=source[index]+source[index-stride];
else
destination[index]=source[index];
__syncthreads();
stride*=2;
if(stride>DATASIZE)
break;
//Swap between arrays
temp=source[tx];
source[tx]=destination[tx];
destination[tx]=temp;
}
if(tx==0){
for(i=0;i<destination[tx];i++)
pResult_d[i]=tx;
}
else{
for(i=destination[tx-1];i<destination[tx];i++)
pResult_d[i]=tx;
}
}
void verify(int* src, int*result, int input_size){
sort(src, src+input_size);
long long match_cnt=0;
for(int i=0; i<input_size;i++)
{
if(src[i]==result[i])
match_cnt++;
}
if(match_cnt==input_size)
printf("TEST PASSED\n\n");
else
printf("TEST FAILED\n\n");
}
void genData(int* ptr, unsigned int size) {
while (size--) {
*ptr++ = (int)(rand() % 101);
}
}
int main(int argc, char* argv[]) {
int* pSource = NULL;
int* pResult = NULL;
int input_size=0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
if (argc == 2)
input_size=atoi(argv[1]);
else
{
printf("\n Invalid input parameters!"
"\n Usage: ./sort <input_size>"
"\n");
exit(0);
}
//allocate host memory
pSource=(int*)malloc(input_size*sizeof(int));
pResult=(int*)malloc(input_size*sizeof(int));
// generate source data
genData(pSource, input_size);
// start timer
cudaEventRecord(start, 0);
//INSERT CODE HERE--------------------
//Device Memory
int *pSource_d;
int *pResult_d;
int *offsetArray;
//Device memory allocation
cudaMalloc((void**)&pSource_d,input_size*sizeof(int));
cudaMalloc((void**)&pResult_d,input_size*sizeof(int));
cudaMalloc((void**)&offsetArray,DATASIZE*sizeof(int));
//Copy Host to Device
cudaMemcpy(pSource_d,pSource,input_size*sizeof(int),cudaMemcpyHostToDevice);
//launch kernel
dim3 dimGrid(ceil((double)input_size/BLOCKSIZE),1,1);
dim3 dimBlock(BLOCKSIZE,1,1);
countingData<<< dimGrid, dimBlock>>>(pSource_d,offsetArray,input_size);
cudaDeviceSynchronize();
prefixSum<<<1,DATASIZE>>>(pResult_d,offsetArray);
cudaDeviceSynchronize();
//Copy Device to Host
cudaMemcpy(pResult,pResult_d,input_size*sizeof(int),cudaMemcpyDeviceToHost);
//Free Device Memory
cudaFree(pSource_d);
cudaFree(pResult_d);
cudaFree(offsetArray);
// end timer
float time;
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("elapsed time = %f msec\n", time);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("Verifying results...");
fflush(stdout);
verify(pSource, pResult, input_size);
fflush(stdout);
}
|
3,260 | #include "includes.h"
/** Modifed version of knn-CUDA from https://github.com/vincentfpgarcia/kNN-CUDA
* The modifications are
* removed texture memory usage
* removed split query KNN computation
* added feature extraction with bilinear interpolation
*
* Last modified by Christopher B. Choy <chrischoy@ai.stanford.edu> 12/23/2016
*/
// Includes
// Constants used by the program
#define BLOCK_DIM 16
#define DEBUG 0
/**
* Computes the distance between two matrix A (reference points) and
* B (query points) containing respectively wA and wB points.
*
* @param A pointer on the matrix A
* @param wA width of the matrix A = number of points in A
* @param B pointer on the matrix B
* @param wB width of the matrix B = number of points in B
* @param dim dimension of points = height of matrices A and B
* @param AB pointer on the matrix containing the wA*wB distances computed
*/
/**
* Gathers k-th smallest distances for each column of the distance matrix in the top.
*
* @param dist distance matrix
* @param ind index matrix
* @param width width of the distance matrix and of the index matrix
* @param height height of the distance matrix and of the index matrix
* @param k number of neighbors to consider
*/
/**
* Computes the square root of the first line (width-th first element)
* of the distance matrix.
*
* @param dist distance matrix
* @param width width of the distance matrix
* @param k number of neighbors to consider
*/
//-----------------------------------------------------------------------------------------------//
// K-th NEAREST NEIGHBORS //
//-----------------------------------------------------------------------------------------------//
/**
* K nearest neighbor algorithm
* - Initialize CUDA
* - Allocate device memory
* - Copy point sets (reference and query points) from host to device memory
* - Compute the distances + indexes to the k nearest neighbors for each query point
* - Copy distances from device to host memory
*
* @param ref_host reference points ; pointer to linear matrix
* @param ref_nb number of reference points ; width of the matrix
* @param query_host query points ; pointer to linear matrix
* @param query_nb number of query points ; width of the matrix
* @param dim dimension of points ; height of the matrices
* @param k number of neighbor to consider
* @param dist_host distances to k nearest neighbors ; pointer to linear matrix
* @param dist_host indexes of the k nearest neighbors ; pointer to linear matrix
*
*/
__global__ void cuParallelSqrt(float *dist, int width, int k){
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if (xIndex<width && yIndex<k)
dist[yIndex*width + xIndex] = sqrt(dist[yIndex*width + xIndex]);
} |
3,261 | #include <cuda.h>
#include <cuda_profiler_api.h>
#include <iostream>
#define N 1024
using namespace std;
__global__ void transpose(int A[][N])//,int B[][N],int C[][N])
{
int id = threadIdx.x;
for(int j=0;j<id;j++)
{
int t = A[id][j] ^ A[j][id];
A[id][j] = t ^ A[id][j];
A[j][id] = t ^ A[j][id];
}
}
int A[N][N];
int main(int argc,char *argv[])
{
for(int i=0;i<N;i++)
{
for(int j=0;j<N;j++)
{
A[i][j]=2*i+j;
// cout<<A[i][j]<<" ";
}
//cout<<endl;
}
int (*A_D)[N];
cudaMalloc((void**)&A_D,(N*N)*sizeof(int));
cudaMemcpy(A_D,A,(N*N)*sizeof(int),cudaMemcpyHostToDevice);
transpose<<<1,N>>>(A_D);//,B_D,C_D);
cudaMemcpy(A,A_D,(N*N)*sizeof(int),cudaMemcpyDeviceToHost);
// for(int i=0;i<N;i++)
// {
// for(int j=0;j<N;j++)
// {
// cout<<A[i][j]<<" ";
// }
// cout<<endl;
// }
cudaFree(A_D);
return 0;
} |
3,262 | /**
* The memory shared between the threads of each block.
*/
extern __shared__ float sdata[];
/**
* Arg max function along a row.
* @param n the number of column.
* @param i the row index.
* @param a the array (data).
* @param r the output buffer.
* @return nothing.
*/
extern "C"
__global__ void arg_max_row(int n, int i, float *a, float *r)
{
int offset = n * i;
float m = a[offset];
float mi = 0;
for (int j = 1; j < n; j++) {
if (m < a[offset + j]) {
m = a[offset + j];
mi = (float)j;
}
}
r[0] = mi;
}
/**
* Sum the elements of the array.
* @param n the number of elements.
* @param a the array (data).
* @param r the output buffer.
* @return nothing.
*/
extern "C"
__global__ void sum(int n, float *a, float *r)
{
sdata[threadIdx.x] = 0;
for (int i = threadIdx.x; i < n; i += blockDim.x)
sdata[threadIdx.x] += a[i];
__syncthreads();
if (threadIdx.x == 0) {
for (int i = 1; i < blockDim.x; i++)
sdata[0] += sdata[i];
r[0] = sdata[0];
}
}
/**
* Array power element wise.
* @param n the number of elements.
* @param a the array (output buffer).
* @param s the power.
* @return nothing.
*/
extern "C"
__global__ void pow_array(int n, float *a, int s)
{
for (int i = blockIdx.x; i < n; i += gridDim.x) {
a[i] = pow(a[i], s);
}
}
/**
* Array subtraction element wise.
* @param n the number of elements.
* @param a1 the first array (output buffer).
* @param a2 the second array.
* @return nothing.
*/
extern "C"
__global__ void sub_array(int n, float *a1, float *a2)
{
for (int i = blockIdx.x; i < n; i += gridDim.x) {
a1[i] -= a2[i];
}
}
/**
* Array multiplication element wise.
* @param n the number of elements.
* @param a1 the first array (output buffer).
* @param a2 the second array.
* @return nothing.
*/
extern "C"
__global__ void mul_array(int n, float *a1, float *a2)
{
for (int i = blockIdx.x; i < n; i += gridDim.x) {
a1[i] *= a2[i];
}
}
/**
* Multiplication element wise with a scalar.
* @param n the number of elements.
* @param a the array (output buffer).
* @param s the scalar.
* @return nothing.
*/
extern "C"
__global__ void mul_float(int n, float *a, float s)
{
for (int i = blockIdx.x; i < n; i += gridDim.x) {
a[i] *= s;
}
}
/**
* Array addition element wise.
* @param n the number of elements.
* @param a1 the first array (output buffer).
* @param a2 the second array.
* @return nothing.
*/
extern "C"
__global__ void add_array(int n, float *a1, float *a2)
{
for (int i = blockIdx.x; i < n; i += gridDim.x) {
a1[i] += a2[i];
}
}
|
3,263 | /*
Authors: Erkin Verbeek, Prabhat Bhootra
Date: 12/2/2019
* *** with 3 x 3 patch ***
*
M = 10000
N = 10000
The elapsed time: 71.6 ms
M = 30000
N = 10000
The elapsed time: 215.1 ms
M = 20000
N = 20000
The elapsed time: 227.6 ms
* *** with 7 x 7 patch ***
M = 10000
N = 10000
The elapsed time: 225.5 ms
M = 30000
N = 10000
The elapsed time: 766.0 ms
M = 20000
N = 20000
The elapsed time: 964.6 ms
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define MAX_VAL 255 // the max value for each pixel
#define M 10000
#define N 10000
__global__ void blurKernel(unsigned char * in_img,
unsigned char * out_img, int patchWidth) {
int Col = blockIdx.x*blockDim.x + threadIdx.x;
int Row = blockIdx.y*blockDim.y + threadIdx.y;
if (Col < N && Row < M) {
int pixVal = 0;
int pixels = 0;
for (int blurRow = -patchWidth; blurRow < patchWidth + 1; blurRow++) {
for (int blurCol = -patchWidth; blurCol < patchWidth + 1; blurCol++) {
int curRow = Row + blurRow;
int curCol = Col + blurCol;
if (curRow > -1 && curRow < M && curCol > -1 && curCol < N) {
pixVal += in_img[curRow*N + curCol];
pixels++;
}
}
}
out_img[Row*N + Col] = (unsigned char) (pixVal/pixels);
}
}
int main(int argc, char* argv[]) {
int i;
unsigned char * img; // original IMG
unsigned char * dev_img; // original IMG
unsigned char * dev_newImg; // new IMG
int size = M * N * sizeof(unsigned char);
// dimensions of the grid and the block
dim3 DimGrid((N-1)/16 + 1, (M-1)/16+1, 1);
dim3 DimBlock(16, 16, 1);
// allocates memory on device
cudaMalloc((void **)&dev_img, size); // device space for original img
cudaMalloc((void **)&dev_newImg, size); // device space for new img
// allocate memory on host
img = (unsigned char * ) malloc(size);
srand(time(0)); // seed the random number generator
// we iterate through img and fill with random values
for (i = 0; i < M * N; i++) {
img[i] = rand() % (MAX_VAL + 1);
//printf("img[%d] = %d\n", i, img[i]);
}
printf("M = %d\nN = %d\n", M, N);
cudaMemcpy(dev_img, img, size, cudaMemcpyHostToDevice); // copy image from host to device
float elapsedTime; // timing stuff
cudaEvent_t start,end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start,0);
//launch dot() kernel with 1 block and N threads
blurKernel<<<DimGrid, DimBlock>>>(dev_img, dev_newImg, 3);
cudaEventRecord(end, 0); // stop timer
cudaEventSynchronize(end);
cudaEventElapsedTime(&elapsedTime,start,end);
// copy device result back to the host copy of c
cudaMemcpy(img, dev_newImg, size, cudaMemcpyDeviceToHost); // copy new image back to host
/*
// write out new image so we can see if we did anything
for (i = 0; i < M * N; i++)
printf("newImg[%d] = %d\n", i, img[i]);
*/
// print elapsed time
printf("The elapsed time: %3.1f ms\n", elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(end);
// free up heap space variables
free(img);
cudaFree(dev_img);
cudaFree(dev_newImg);
return 0;
}
|
3,264 | #include <stdio.h>
__device__
int geti() {
int i = blockIdx.z;
i = i*gridDim.y + blockIdx.y;
i = i*gridDim.x + blockIdx.x;
i = i*blockDim.z + threadIdx.z;
i = i*blockDim.y + threadIdx.y;
i = i*blockDim.x + threadIdx.x;
return i;
}
__global__
void process_kernel1(const float *A, const float *B, float *C, const int numElements)
{
int i = geti();
if (i < numElements)
{
C[i] = sin(A[i]) + cos(B[i]);
}
}
__global__
void process_kernel2(const float *A, float *C, const int numElements)
{
int i = geti();
if (i < numElements)
{
C[i] = log(A[i]);
}
}
__global__
void process_kernel3(const float *A, float *C, const int numElements)
{
int i = geti();
if (i < numElements)
{
C[i] = sqrt(A[i]);
}
}
|
3,265 | /*
* Copyright (c) 2020 Yaroslav Pogrebnyak <yyyaroslav@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
extern "C" {
__global__ void Overlay_Cuda(
int x_position, int y_position,
unsigned char* main, int main_linesize,
unsigned char* overlay, int overlay_linesize,
int overlay_w, int overlay_h,
unsigned char* overlay_alpha, int alpha_linesize,
int alpha_adj_x, int alpha_adj_y)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= overlay_w + x_position ||
y >= overlay_h + y_position ||
x < x_position ||
y < y_position ) {
return;
}
int overlay_x = x - x_position;
int overlay_y = y - y_position;
float alpha = 1.0;
if (alpha_linesize) {
alpha = overlay_alpha[alpha_adj_x * overlay_x + alpha_adj_y * overlay_y * alpha_linesize] / 255.0f;
}
main[x + y*main_linesize] = alpha * overlay[overlay_x + overlay_y * overlay_linesize] + (1.0f - alpha) * main[x + y*main_linesize];
}
}
|
3,266 | #include "includes.h"
__global__ void sum(int *a, int *b, int *c, int N) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid<N) {
c[tid] = a[tid] + b[tid];
}
} |
3,267 | #include "includes.h"
__global__ void BuildColorFieldDev(float* data, uchar4* colors, float* minmax, uint xx, uint yy)
{
float mn = minmax[0];
float mx = minmax[1];
float median = (mx - mn)/2.0f;
const uint idx = threadIdx.x*gridDim.x/yy/yy + blockIdx.x/xx;
float val = data[idx];
uchar4 col;
#if 1
if(val < median)
{
float alpha = (val - mn)/(median - mn);
col.x = 0;
col.y = 255*(1-alpha);
col.z = 255*alpha;
}else
{
float alpha = (val - median)/(mx - median);
col.x = 255*alpha;
col.y = 0;
col.z = 255*(1-alpha);
}
#else
float alpha = 1;
if(!(val < 0.1 || mn == mx || mx < 0.1))
alpha = val/(mx-mn);
col.x = 255*(1-alpha);
col.y = 255*(1-alpha);
col.z = 255*(1-alpha);
#endif
col.w = 255;
const uint col_idx = threadIdx.x*gridDim.x + blockIdx.x;
colors[col_idx] = col;
} |
3,268 |
__global__ void invert(double * I, double * A, const int * n){
for (int i = 0; i<n[0]; i++){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
//Non diagonal normalization
if (x < n[0] && y < n[0])
if (x == i && x!=y){
I[x*n[0] + y] /= A[i*n[0] + i];
A[x*n[0] + y] /= A[i*n[0] + i];
}
__syncthreads();
//Diagonal normalization
if (x < n[0] && y < n[0])
if (x == y && x == i){
I[x*n[0] + y] /= A[i*n[0] + i];
A[x*n[0] + y] /= A[i*n[0] + i];
}
__syncthreads();
//Gauss Jordan Elimination
if (x < n[0] && y < n[0]){
if (x != i){
I[x*n[0] + y] -= I[i*n[0] + y] * A[x*n[0] + i];
if (y != i){
A[x*n[0] + y] -= A[i*n[0] + y] * A[x*n[0] + i];
}
}
}
__syncthreads();
//Set to zero
if (x < n[0] && y < n[0]){
if (x != i){
if (y == i){
A[x*n[0] + y] = 0;
}
}
}
}
for(int a = 0; a < n[0]*n[0]; a++)
A[a] = 5.0;
}
|
3,269 | //matrix_mult.cu
//template provided by Prof. Andrew Grimshaw
//implementation by Jerry Sun(ys7va) 2017.05.08
//the program will take 4 parameters to specify the size of two matrices
//if only provided 1 value N, it will calculate the multiplication of two N * N matrices
#include<stdio.h>
#include<sys/time.h>
#include<stdlib.h>
#include<iostream>
#include<cuda_runtime.h>
using namespace std;
//Macro to specify block size
#define T_block 32
//----------------------------------- Structures and Globals---------------------------------------------
//store dimension of a matrix
typedef struct {
int dimension1;
int dimension2;
} ArrayMetadata2D;
// metadata variables describing dimensionalities of all data structures involved in the computation
ArrayMetadata2D A_MD, B_MD, C_MD;
// pointers for input and output arrays in the host memory
// *_CPU is for CPU calculation
// C_GPU_result is for storing GPU calculation result
float *A_CPU, *B_CPU, *C_CPU, *C_GPU_result;
// pointers for input and output arrays in the device memory (NVIDIA DRAM)
float *A_GPU, *B_GPU, *C_GPU;
//----------------------------------- host function definitions -----------------------------------------
void allocateAndInitializeHost(); //allocate and initialize all necessary memory on host machine
void computeCpuMMM(); //matrix multiplication on CPU
void computeGpuMMM(); //matrix multiplication on GPU, may use different kernel method
void copyMatricesToGPU(); //copy value in A_CPU & B_CPU to A_GPU & B_GPU respectively
void copyResultFromGPU(); //copy calculated value in C_GPU back into C_GPU_result
void compareHostAndGpuOutput(); //check if the result in C_GPU_result and C_CPU is identical
void die(const char *error); //end the program
void check_error(cudaError e); //check memory allocation on cuda
long long start_timer(); //timer for measurement
long long stop_timer(long long start_time, const char *name); //timer for measurement
//----------------------------------- CUDA function definitions -----------------------------------------
//baseline approach for kernel method, each thread is responsible for one cell in final result
__global__ void mult_matrix_baseline(float *A, float *B, float *C, int dim_1, int dim_2, int dim_3);
//shared memory version for kernel method, a block of threads read data from DRAM together into shared
//memory and then do calculation block-wise
__global__ void mult_matrix_shared(float *A, float *B, float *C, int dim_1, int dim_2, int dim_3);
//-------------------------------------------------------------------------------------------------------
int main(int argc, char **argv) {
//parse the command-line argument
A_MD.dimension1 = (argc > 1) ? atoi(argv[1]) : 100;
A_MD.dimension2 = (argc > 2) ? atoi(argv[2]) : A_MD.dimension1;
B_MD.dimension1 = (argc > 3) ? atoi(argv[3]) : A_MD.dimension2;
B_MD.dimension2 = (argc > 4) ? atoi(argv[4]) : B_MD.dimension1;
C_MD.dimension1 = A_MD.dimension1;
C_MD.dimension2 = B_MD.dimension2;
printf("Matrix A is %d-by-%d\n", A_MD.dimension1, A_MD.dimension2);
printf("Matrix B is %d-by-%d\n", B_MD.dimension1, B_MD.dimension2);
printf("Matrix C is %d-by-%d\n", C_MD.dimension1, C_MD.dimension2);
//if dim2 of A and dim1 of B is different then they can't be multiplied
if (A_MD.dimension2 != B_MD.dimension1) die("Dimension inconsistent for two matrices");
//allocate all necessary memory on host
allocateAndInitializeHost();
// matrix multiplication in the CPU, commented for large-scale
// long long CPU_start_time = start_timer();
// computeCpuMMM();
// long long CPU_time = stop_timer(CPU_start_time, "\nCPU");
// matrix multiplication on the GPU
long long GPU_start_time = start_timer();
computeGpuMMM();
long long GPU_time = stop_timer(GPU_start_time, "\tTotal");
//check the final result
//commented when CPU result is not available
//compareHostAndGpuOutput();
return 0;
}
__global__ void mult_matrix_baseline(float *A, float *B, float *C, int dim_1, int dim_2, int dim_3) {
// retrieve the corresponding row & col in final output matrix
int r = blockIdx.x * T_block + threadIdx.x;
int c = blockIdx.y * T_block + threadIdx.y;
// check if index is in bound
if (r < dim_1 && c < dim_3) {
float sum = 0;
// calculate inner product of two vectors
for (int i = 0; i < dim_2; i++) {
sum += A[r * dim_1 + i] * B[i * dim_2 + c];
}
// assign final results
C[r * dim_3 + c] = sum;
}
}
// Compute C = A * B
__global__ void mult_matrix_shared(float *A, float *B, float *C, int dim_1, int dim_2, int dim_3) {
// store corresponding value in registers
int b_x = blockIdx.x;
int b_y = blockIdx.y;
int t_x = threadIdx.x;
int t_y = threadIdx.y;
// retrieve row & col number in final output
int r = b_y * T_block + t_y;
int c = b_x * T_block + t_x;
float s = 0;
// initiate share memory space
__shared__ float block_A[T_block][T_block];
__shared__ float block_B[T_block][T_block];
// bool variable to check if inbound
bool inplace = r < dim_1 && c < dim_3;
// iterate through all blocks in using a ceiling function to deal with corner cases
for (int m = 0; m < (dim_2 - 1) / T_block + 1; m++) {
// column num for the retrieved cell in matrix A
int col = m * T_block + t_x;
// load value from matrix A, if not available assign 0
block_A[t_y][t_x] = (r < dim_1 && col < dim_2) ? A[r * dim_1 + col] : 0.0;
// row num for the retrieved cell in matrix B
int row = m * T_block + t_y;
// load value from matrix B, if not available assign 0
block_B[t_y][t_x] = (row < dim_2 && c < dim_3) ? B[row * dim_3 + c] : 0.0;
// sync all threads, wait till all threads finish loading
__syncthreads();
//if inplace calculate the inner product within two blocks in A and B
if (inplace)
for (int i = 0; i < T_block; i++)
s += block_A[t_y][i] * block_B[i][t_x];
//sync threads, wait till all threads finish using shared memory in current iteration
__syncthreads();
}
//assign final result
if (inplace)
C[r * dim_3 + c] = s;
}
// GPU version MM
void computeGpuMMM() {
copyMatricesToGPU();
//for a matrix multiplication problem, only three dimensions are needed
//two dims for the final matrix, and one for dim2 of A and dim1 of B(identical)
int dim_1 = A_MD.dimension1;
int dim_2 = A_MD.dimension2;
int dim_3 = B_MD.dimension2;
//initialize gridblock, and threadblock size
//here we assume each thread always responsible for cell
dim3 thread(T_block, T_block);
//if dim_1 not divisible by T_block, we use ceiling function
//in order to handle corner cases
dim3 grid((dim_1 - 1) / T_block + 1, (dim_3 - 1) / T_block + 1);
long long exec_start_time = start_timer();
//call kernel method, passing in three GPU pointers and three dimensions
mult_matrix_shared <<<grid, thread>>> (A_GPU, B_GPU, C_GPU, dim_1, dim_2, dim_3);
//synchroniztion
cudaThreadSynchronize();
stop_timer(exec_start_time, "\tkernal excution time");
//copy the result from GPU
copyResultFromGPU();
}
// allocate and initialize A and B using a random number generator,
// also initialize C_CPU and C_GPU_resul
void allocateAndInitializeHost() {
size_t sizeofA = A_MD.dimension1 * A_MD.dimension2 * sizeof(float);
A_CPU = (float*) malloc(sizeofA);
srand(time(NULL));
for (int i = 0; i < A_MD.dimension1; i++) {
for (int j = 0; j < A_MD.dimension2; j++) {
int index = i * A_MD.dimension2 + j;
A_CPU[index] = (rand() % 1000) * 0.001;
}
}
size_t sizeofB = B_MD.dimension1 * B_MD.dimension2 * sizeof(float);
B_CPU = (float*) malloc(sizeofB);
for (int i = 0; i < B_MD.dimension1; i++) {
for (int j = 0; j < B_MD.dimension2; j++) {
int index = i * B_MD.dimension2 + j;
B_CPU[index] = (rand() % 1000) * 0.001;
}
}
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(float);
C_GPU_result = (float*) malloc(sizeofC);
C_CPU = (float*) malloc(sizeofC);
}
// allocate memory in the GPU for all matrices, and copy A and B content from the host CPU memory to the GPU memory
void copyMatricesToGPU() {
long long memory_start_time = start_timer();
size_t sizeofA = A_MD.dimension1 * A_MD.dimension2 * sizeof(float);
check_error(cudaMalloc((void **) &A_GPU, sizeofA));
check_error(cudaMemcpy(A_GPU, A_CPU, sizeofA, cudaMemcpyHostToDevice));
size_t sizeofB = B_MD.dimension1 * B_MD.dimension2 * sizeof(float);
check_error(cudaMalloc((void **) &B_GPU, sizeofB));
check_error(cudaMemcpy(B_GPU, B_CPU, sizeofB, cudaMemcpyHostToDevice));
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(float);
check_error(cudaMalloc((void **) &C_GPU, sizeofC));
stop_timer(memory_start_time, "\nGPU:\tTransfer to GPU");
}
// copy results from C_GPU which is in GPU card memory to C_CPU_result which is in the host CPU for result comparison
void copyResultFromGPU() {
long long memory_start_time = start_timer();
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(float);
check_error(cudaMemcpy(C_GPU_result, C_GPU, sizeofC, cudaMemcpyDeviceToHost));
stop_timer(memory_start_time, "\tTransfer from GPU");
}
// do a straightforward matrix-matrix multiplication in the CPU
// notice that this implementation can be massively improved in the CPU by doing proper cache blocking but we are
// not providing you the efficient CPU implementation as that reveals too much about the ideal GPU implementation
void computeCpuMMM() {
// compute C[i][j] as the sum of A[i][k] * B[k][j] for all columns k of A
for (int i = 0; i < A_MD.dimension1; i++) {
int a_i = i * A_MD.dimension2;
int c_i = i * C_MD.dimension2;
for (int j = 0; j < B_MD.dimension2; j++) {
int c_index = c_i + j;
C_CPU[c_index] = 0;
for (int k = 0; k < B_MD.dimension1; k++) {
int a_index = a_i + k;
int b_index = k * B_MD.dimension2 + j;
C_CPU[c_index] += A_CPU[a_index] * B_CPU[b_index];
}
}
}
}
// function to determine if the GPU computation is done correctly by comparing the output from the GPU with that
void compareHostAndGpuOutput() {
int totalElements = C_MD.dimension1 * C_MD.dimension2;
int missmatchCount = 0;
for (int i = 0; i < totalElements; i++) {
if (fabs(C_GPU_result[i] - C_CPU[i]) > 0.01) {
missmatchCount++;
printf("mismatch at index %i: %f\t%f\n", i, C_CPU[i], C_GPU_result[i]);
}
}
if (missmatchCount > 0) {
printf("Computation is incorrect: outputs do not match in %d indexes\n", missmatchCount);
} else {
printf("Computation is correct: CPU and GPU outputs match\n");
}
}
// Prints the specified error message and then exits
void die(const char *error) {
printf("%s", error);
exit(1);
}
// If the specified error code refers to a real error, report it and quit the program
void check_error(cudaError e) {
if (e != cudaSuccess) {
printf("\nCUDA error: %s\n", cudaGetErrorString(e));
exit(1);
}
}
// Returns the current time in microseconds
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// Prints the time elapsed since the specified time
long long stop_timer(long long start_time, const char *label) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
printf("%s: %.5f sec\n", label, ((float) (end_time - start_time)) / (1000 * 1000));
return end_time - start_time;
}
|
3,270 | #include <cuda.h>
#include <limits.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define max(x, y) x > y ? x : y
#define TIME(f, msg) \
_begin = clock(); \
(f); \
_end = clock(); \
printf("%s done in %f\n", (msg), (float)(_end - _begin) / CLOCKS_PER_SEC);
void testRand(int *a, int n, int max) {
for (int i = 0; i < n; i++) {
a[i] = rand() % max;
}
}
void print(char *pref, int *a, int n) {
printf("%s", pref);
for (int i = 0; i < n; i++)
printf("%d ", a[i]);
printf("\n");
}
int getMaxError(int *x, int *y, int n) {
int max_error = -INT_MAX;
for (int i = 0; i < n; i++) {
max_error = max(max_error, abs(x[i] - y[i]));
}
return max_error;
}
void vectorAddSeq(int* h_a, int *h_b, int *h_c, int n) {
for (int i = 0; i < n; i++) {
h_c[i] = h_a[i] + h_b[i];
}
}
__global__
void vectorAddKernel(int* d_a, int *d_b, int *d_c, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
d_c[i] = d_a[i] + d_b[i];
}
}
void checkError(cudaError_t &err) {
if (err != cudaSuccess) {
printf("ERROR: %s in %s, line %d\n",cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
void vectorAdd(int* h_a, int *h_b, int *h_c, int n, int num_thread) {
int size = n * sizeof(int);
int *d_a, *d_b, *d_c;
cudaError_t err;
err = cudaMalloc((void **) &d_a, size); checkError(err);
err = cudaMalloc((void **) &d_b, size); checkError(err);
err = cudaMalloc((void **) &d_c, size); checkError(err);
err = cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice); checkError(err);
err = cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice); checkError(err);
int num_blocks = ceil((double)n / num_thread);
vectorAddKernel<<<num_blocks, num_thread>>>(d_a, d_b, d_c, n);
err = cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost); checkError(err);
cudaDeviceSynchronize();
err = cudaFree(d_a); checkError(err);
err = cudaFree(d_b); checkError(err);
err = cudaFree(d_c); checkError(err);
}
int main(int argc, char const *argv[]) {
int n = 10000000;
int num_threads = 256;
int *a, *b, *c_seq, *c_par;
clock_t _begin, _end;
a = (int *) malloc(n * sizeof(int));
b = (int *) malloc(n * sizeof(int));
c_seq = (int *) malloc(n * sizeof(int));
c_par = (int *) malloc(n * sizeof(int));
testRand(a, n, 10);
testRand(b, n, 10);
TIME(vectorAddSeq(a, b, c_seq, n), "Sequential");
TIME(vectorAdd(a, b, c_par, n, num_threads), "Parallel");
if (n < 20) {
print("Seq ", c_seq, n);
print("Par ", c_par, n);
}
printf("Max error: %d\n", getMaxError(c_seq, c_par, n));
free(a);
free(b);
free(c_seq);
free(c_par);
return 0;
}
|
3,271 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/transform.h>
struct TenX {
__host__ __device__ int operator() (int x) const {
return x*10;
}
} myFunctor;
void hostVectors() {
thrust::host_vector<int> vec1(4), vec2(4);
printf("Host\n");
for (size_t i=0; i < vec1.size(); i++) vec1[i] = i;
thrust::transform(vec1.begin(), vec1.end(), vec2.begin(), myFunctor);
for (size_t i=0; i < vec1.size(); i++) {
printf("%d\t%d\n", vec1[i], vec2[i]);
}
}
void deviceVectors() {
thrust::host_vector<int> vec1(4), vec2(4);
thrust::device_vector<int> vec1_dev(4), vec2_dev(4);
printf("Device\n");
for (size_t i=0; i < vec1.size(); i++) vec1[i] = i;
vec1_dev = vec1;
thrust::transform(vec1_dev.begin(), vec1_dev.end(), vec2_dev.begin(),
myFunctor);
vec2 = vec2_dev;
for (size_t i=0; i < vec1.size(); i++) {
printf("%d\t%d\n", vec1[i], vec2[i]);
}
}
void deviceVectorDevPointers() {
thrust::host_vector<int> vec1(4), vec2(4);
thrust::device_vector<int> vec1_dev(4), vec2_dev(4);
thrust::device_ptr<int> vec1_dev_start(thrust::raw_pointer_cast(&vec1_dev[0]));
thrust::device_ptr<int> vec2_dev_start(thrust::raw_pointer_cast(&vec2_dev[0]));
printf("Device device pointers\n");
for (size_t i=0; i < vec1.size(); i++) vec1[i] = i;
vec1_dev = vec1;
thrust::transform(thrust::device, vec1_dev_start, vec1_dev_start + 4,
vec2_dev_start, myFunctor);
vec2 = vec2_dev;
for (size_t i=0; i < vec1.size(); i++) {
printf("%d\t%d\n", vec1[i], vec2[i]);
}
}
void deviceVectorPointers() {
thrust::host_vector<int> vec1(4), vec2(4);
thrust::device_vector<int> vec1_dev(4), vec2_dev(4);
int *vec1_dev_start = thrust::raw_pointer_cast(&vec1_dev[0]);
int *vec2_dev_start = thrust::raw_pointer_cast(&vec2_dev[0]);
printf("Device pointers\n");
for (size_t i=0; i < vec1.size(); i++) vec1[i] = i;
vec1_dev = vec1;
thrust::transform(thrust::device, vec1_dev_start, vec1_dev_start + 4,
vec2_dev_start, myFunctor);
vec2 = vec2_dev;
for (size_t i=0; i < vec1.size(); i++) {
printf("%d\t%d\n", vec1[i], vec2[i]);
}
}
void deviceVecToPointerToDevPointer() {
thrust::host_vector<int> vec1(4), vec2(4);
thrust::device_vector<int> vec1_dev(4), vec2_dev(4);
int *vec1_dev_start = thrust::raw_pointer_cast(&vec1_dev[0]);
int *vec2_dev_start = thrust::raw_pointer_cast(&vec2_dev[0]);
thrust::device_ptr<int> vec1_dev_ptr(vec1_dev_start);
thrust::device_ptr<int> vec2_dev_ptr(vec2_dev_start);
printf("Device vec->ptr->devptr\n");
for (size_t i=0; i < vec1.size(); i++) vec1[i] = i;
vec1_dev = vec1;
thrust::transform(thrust::device, vec1_dev_ptr, vec1_dev_ptr + 4,
vec2_dev_ptr, myFunctor);
vec2 = vec2_dev;
for (size_t i=0; i < vec1.size(); i++) {
printf("%d\t%d\n", vec1[i], vec2[i]);
}
}
int main() {
hostVectors();
deviceVectors();
deviceVectorDevPointers();
deviceVectorPointers();
deviceVecToPointerToDevPointer();
return 0;
}
|
3,272 | #include <stdio.h>
__global__ void saxpy(uint n, float a, float *x, float *y) {
uint i = blockIdx.x*blockDim.x + threadIdx.x; // nvcc built-ins
if(i < n)
y[i] = a*x[i] + y[i];
}
void misc(void) {
int ndev;
cudaDeviceProp prop;
cudaGetDeviceCount(&ndev);
printf("This machine has %d CUDA devices.\n", ndev);
for(int i = 0; i < ndev; i++) {
const char *indent = (ndev == 0) ? "" : " ";
cudaGetDeviceProperties(&prop, i);
if(ndev > 0)
printf("Device %d:\n", i);
printf("%sdevice.name = %s\n", indent, prop.name);
printf("%sdevice.maxThreadsPerBlock = %d\n", indent, prop.maxThreadsPerBlock);
}
}
int main(int argc, char **argv) {
uint n = atoi(argv[1]);
int size = n*sizeof(float);
float *x, *y, *yy;
float *dev_x, *dev_y;
misc();
x = (float *)malloc(size);
y = (float *)malloc(size);
yy = (float *)malloc(size);
for(int i = 0; i < n; i++) {
x[i] = i;
y[i] = i*i;
}
cudaMalloc((void**)(&dev_x), size);
cudaMalloc((void**)(&dev_y), size);
cudaMemcpy(dev_x, x, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_y, y, size, cudaMemcpyHostToDevice);
float a = 3.0;
saxpy<<<ceil(n/256.0),256>>>(n, a, dev_x, dev_y);
cudaMemcpy(yy, dev_y, size, cudaMemcpyDeviceToHost);
for(int i = 0; i < n; i++) { // check the result
if(yy[i] != a*x[i] + y[i]) {
fprintf(stderr, "ERROR: i=%d, a = %s, x[i]=%f, y[i]=%f, yy[i]=%f\n",
i, a, x[i], y[i], yy[i]);
exit(-1);
}
}
printf("The results match!\n");
free(x);
free(y);
free(yy);
cudaFree(dev_x);
cudaFree(dev_y);
exit(0);
}
|
3,273 | #include <iostream>
#include <math.h>
#include <ctime>
#include <cmath>
#include <stdlib.h>
#include <fstream>
#include <sstream>
#define PI 3.14159265358979323846
//this function returns the transition densities between nodes
__device__ double densityW(double Xold, double Xnew, double sigma, double r, double delta, double delta_t){
double f=0, x=0;
x=(1/(sigma*sqrt(delta_t)))*(Xnew-Xold-(r-delta-0.5*sigma*sigma)*delta_t);
f= (1/(sigma*sqrt(delta_t)))*(1/(sqrt(2*PI)))*exp(-0.5*x*x);
return f;
}
//this function provides a gpu index interface for 2-dim matrices stored as arrays
__device__ double* two_dim_indexW(double* vector, int i, int j, double m, int b){
double* p;
//specify index layout here
p=&vector[b*(i)+(j)];
return p;
}
//this function provides a gpu index interface for 3-dim matrices stored as arrays
__device__ double* three_dim_indexW(double* matrix, int i, int j, int k, double m, int b, int num_assets){
double* p;
//specify index layout here
p=&matrix[i*b*num_assets+j*num_assets+k];
return p;
}
//this kernel calculates the numerator values of the weight equation
__global__ void valuesKernel(double* tempW_device ,double m,int b, double* sigma_device,double* delta_device,double r, double delta_t,double* X_device,int num_assets){
int idx =blockDim.x*blockIdx.x + threadIdx.x;
int m_int=(int)m;
if(idx<(m_int-1)*b*b){
double w;
int i=idx/(b*b);
int j=idx/b;
if(j>(b-1)){
j=j%b;
}
int k=idx%b;
w=1;
for(int jjj=0; jjj<num_assets; jjj++){
w = w * densityW(*three_dim_indexW(X_device, (i), k, jjj, m, b, num_assets), *three_dim_indexW(X_device, i+1, j, jjj, m, b, num_assets), sigma_device[jjj], r, delta_device[jjj], delta_t);
}
tempW_device[idx]=w;
}
}
//this kernel calculates the denominator values in the weight equation
__global__ void sumweightsKernel(double* tempW_device , int b, double* weight_denominator_device, double m){
int idx =blockDim.x*blockIdx.x + threadIdx.x;
int m_int=(int)m;
if(idx<(m_int-1)*b){
double sum=0, c=0, y, t;
int start=idx*b;
for(int i=start; i<start+b; i++){
y=tempW_device[i]-c;
t=sum+y;
c=(t-sum)-y;
sum=t;
}
weight_denominator_device[idx]=sum;
}
}
//this kernel calculates the mesh weights
__global__ void meshweightsKernel(double* W_device, double m, int b, double* sigma_device, double* delta_device, double r, double delta_t, double* X_device, int num_assets, double* weight_denominator_device, double* tempW_device){
double wdenominator;
int idx =blockDim.x*blockIdx.x + threadIdx.x;
int m_int=(int)m;
if(idx<b*b*m_int){
int i=idx/(b*b);
int k=idx/b;
if(k>(b-1)){
k=k%b;
}
int j=idx%b;
if(i==0){
if(j==0){
*three_dim_indexW(W_device, i, k, j, m, b, b)=1;
}// all weights from the starting node are equal to 1
else{
*three_dim_indexW(W_device, i, k, j, m, b, b)=0;
}
}
if(i>0){
wdenominator= *two_dim_indexW(weight_denominator_device, i-1, k, m-1, b);
*three_dim_indexW(W_device, (i), k, j, m, b, b)=(((double)b) * (*three_dim_indexW(tempW_device, i-1, k, j, m-1, b, b)))/wdenominator;
}
}
}
//this function updates the weights matrix. it allocates memory on the device and initialises all the weights related kernels.
void meshweights(double* W, double m, int b, double sigma[], double delta[], double r, double delta_t, double* X, int num_assets, double* weight_denominator){
int m_int=(int)m;
int temp_N=(m_int-1) * b*b;
double* sigma_host;
sigma_host =sigma;
double* delta_host;
delta_host=delta;
double* tempW;
tempW= new double[temp_N];
int X_N=(m_int) * b * (num_assets);
int W_N=(m_int) * b*b;
int w_N=(m_int-1)*b;
int sigma_N=num_assets;
int delta_N=num_assets;
double* X_device;
double* W_device;
double* weight_denominator_device;
double* sigma_device;
double* delta_device;
double* tempW_device;
cudaMalloc((void**) &X_device, X_N*sizeof(double) );
cudaMemcpy(X_device, X, X_N*sizeof(double), cudaMemcpyHostToDevice);
cudaError_t error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
cudaMalloc((void**) &W_device, W_N*sizeof(double) );
cudaMemcpy(W_device, W, W_N*sizeof(double), cudaMemcpyHostToDevice);
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
cudaMalloc((void**) &sigma_device, sigma_N*sizeof(double) );
cudaMemcpy(sigma_device, sigma_host, sigma_N*sizeof(double), cudaMemcpyHostToDevice);
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
cudaMalloc((void**) &delta_device, delta_N*sizeof(double) );
cudaMemcpy(delta_device, delta_host, delta_N*sizeof(double), cudaMemcpyHostToDevice);
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
cudaMalloc((void**) &weight_denominator_device, w_N*sizeof(double) );
cudaMemcpy(weight_denominator_device, weight_denominator, w_N*sizeof(double), cudaMemcpyHostToDevice);
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
cudaMalloc((void**) &tempW_device, temp_N*sizeof(double) );
cudaMemcpy(tempW_device, tempW, temp_N*sizeof(double), cudaMemcpyHostToDevice);
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 80000000*sizeof(double));
dim3 VgridDim((int)ceil(temp_N/512.0));
dim3 VblockDim(512.0);
valuesKernel<<<VgridDim,VblockDim>>>(tempW_device , m, b, sigma_device, delta_device, r, delta_t, X_device, num_assets);
cudaDeviceSynchronize();
cudaMemcpy(tempW, tempW_device, sizeof(double)*temp_N, cudaMemcpyDeviceToHost);
cudaMemcpy(tempW_device, tempW, temp_N*sizeof(double), cudaMemcpyHostToDevice);
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
dim3 sgridDim((int)ceil(w_N/512.0));
dim3 sblockDim(512.0);
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
sumweightsKernel<<<sgridDim, sblockDim>>>(tempW_device , b, weight_denominator_device, m);
cudaDeviceSynchronize();
cudaMemcpy(weight_denominator, weight_denominator_device, sizeof(double)*w_N, cudaMemcpyDeviceToHost);
cudaMemcpy(weight_denominator_device, weight_denominator, w_N*sizeof(double), cudaMemcpyHostToDevice);
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
dim3 mgridDim((int)ceil(W_N/512.0));
dim3 mblockDim(512.0);
meshweightsKernel<<<mgridDim, mblockDim>>>(W_device , m, b, sigma_device, delta_device, r, delta_t, X_device, num_assets, weight_denominator_device, tempW_device);
cudaDeviceSynchronize();
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
cudaMemcpy(W, W_device, sizeof(double)*W_N, cudaMemcpyDeviceToHost);
cudaMemcpy(weight_denominator, weight_denominator_device, sizeof(double)*w_N, cudaMemcpyDeviceToHost);
cudaFree(X_device);
cudaFree(sigma_device);
cudaFree(delta_device);
cudaFree(W_device);
cudaFree(weight_denominator_device);
cudaFree(tempW_device);
delete[] tempW;
}
|
3,274 | // clang-format off
#include <cstdio>
#include <cassert>
__global__ void init_random_numbers(unsigned int seed) {
printf("seed = %d\n", seed);
atomicAdd((int *)(12312433432), 123);
atomicAdd((float *)(12312433432), 123.0f);
__threadfence_block(); // membar.cta
__threadfence(); // membar.gl
__threadfence_system(); // membar.sys
assert(seed != 0);
}
// How LLVM deals with CUDA kernels with huge structs as parameters:
struct Arg {
float x[128];
int y[128];
};
/* llvm IR of function below:
; Function Attrs: convergent noinline nounwind optnone
define dso_local void @_Z20test_struct_argument3Arg(%struct.Arg* byval align 4) #0 {
%2 = alloca %printf_args.0
%3 = getelementptr inbounds %struct.Arg, %struct.Arg* %0, i32 0, i32 0
%4 = getelementptr inbounds [128 x float], [128 x float]* %3, i64 0, i64 123
%5 = load float, float* %4, align 4
%6 = fpext float %5 to double
%7 = getelementptr inbounds %struct.Arg, %struct.Arg* %0, i32 0, i32 1
%8 = getelementptr inbounds [128 x i32], [128 x i32]* %7, i64 0, i64 53
%9 = load i32, i32* %8, align 4
%10 = getelementptr inbounds %printf_args.0, %printf_args.0* %2, i32 0, i32 0
store double %6, double* %10, align 8
%11 = getelementptr inbounds %printf_args.0, %printf_args.0* %2, i32 0, i32 1
store i32 %9, i32* %11, align 4
%12 = bitcast %printf_args.0* %2 to i8*
%13 = call i32 @vprintf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str3, i32 0, i32 0), i8* %12)
ret void
}
*/
__global__ void test_struct_argument(Arg arg) {
printf("%f %d\n", arg.x[123], arg.y[53]);
}
/* llvm IR of function below:
; Function Attrs: convergent noinline nounwind optnone
define dso_local void @_Z24test_struct_argument_ptrP3Arg(%struct.Arg*) #0 {
%2 = alloca %struct.Arg*, align 8
%3 = alloca %printf_args.1
store %struct.Arg* %0, %struct.Arg** %2, align 8
%4 = load %struct.Arg*, %struct.Arg** %2, align 8
%5 = getelementptr inbounds %struct.Arg, %struct.Arg* %4, i32 0, i32 0
%6 = getelementptr inbounds [128 x float], [128 x float]* %5, i64 0, i64 123
%7 = load float, float* %6, align 4
%8 = fpext float %7 to double
%9 = load %struct.Arg*, %struct.Arg** %2, align 8
%10 = getelementptr inbounds %struct.Arg, %struct.Arg* %9, i32 0, i32 1
%11 = getelementptr inbounds [128 x i32], [128 x i32]* %10, i64 0, i64 53
%12 = load i32, i32* %11, align 4
%13 = getelementptr inbounds %printf_args.1, %printf_args.1* %3, i32 0, i32 0
store double %8, double* %13, align 8
%14 = getelementptr inbounds %printf_args.1, %printf_args.1* %3, i32 0, i32 1
store i32 %12, i32* %14, align 4
%15 = bitcast %printf_args.1* %3 to i8*
%16 = call i32 @vprintf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str3, i32 0, i32 0), i8* %15)
ret void
}
*/
__global__ void test_struct_argument_ptr(Arg *arg) {
printf("%f %d\n", arg->x[123], arg->y[53]);
}
int main() {
init_random_numbers<<<1024, 1024>>>(1);
Arg arg;
test_struct_argument<<<1, 1>>>(arg);
return 0;
}
// clang-format on
|
3,275 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
}
int main()
{
system("PAUSE");
return 0;
}
|
3,276 | #include <iostream>
#include <cuda.h>
#include <cstdlib>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
const int BLOCK = 256;
__global__
void ScanListK(float *I, float *O, int l)
{
int b = blockIdx.x;
int t = threadIdx.x;
__shared__ float tSum[BLOCK*2];
int start = 2*blockDim.x*b;
if(start+t < l){
tSum[t] = I[start+t];
//if(tSum[t]!=I[start+t])printf("Mismatch at %d\n", start+t);
}
else{
tSum[t] = 0.0;//First half
}
if(start+blockDim.x+t < l){
tSum[t+blockDim.x] = I[start+blockDim.x+t];
//if(tSum[t+blockDim.x]!=I[start+blockDim.x+t])printf("Mismatch at %d\n", start+t+blockDim.x);
}
else{
tSum[t+blockDim.x] = 0.0;//Second half
}
__syncthreads();
//Reduction
for(int s = 1; s <= BLOCK; s*=2){
int i = (t+1)*s*2-1;
if (i < BLOCK*2) tSum[i] += tSum[i-s];
__syncthreads();
}
//Post-reduction
for(int s = BLOCK/2; s > 0; s /= 2){
__syncthreads();
int j = (t+1)*s*2-1; // Same as other index
if(j+s < 2*BLOCK)tSum[j+s] += tSum[j];
}
if(start+t < l){
O[start+t] = tSum[t];
}
if(t+start+blockDim.x < l)
O[t+start+blockDim.x] = tSum[t+blockDim.x];
}
__host__
void scanList(float *h_I, float * h_O, int h_l){
float *d_I, *d_O;
int olen;
olen = h_l / (BLOCK*2); //The output length equals twice the total of the length divided by width
if ((h_l - olen*BLOCK*2) > 0) {
olen++;
}
printf("%d blocks\n", olen);
cudaMalloc((void **) &d_I, sizeof(float)*h_l);
cudaMalloc((void **) &d_O, sizeof(float)*h_l);
cudaMemcpy(d_I, h_I, sizeof(float)*h_l, cudaMemcpyHostToDevice);
cudaError_t error = cudaGetLastError();
if(error!=cudaSuccess){
fprintf(stderr,"ERROR1: %s\n", cudaGetErrorString(error) );
}
dim3 dimGrid(olen, 1, 1);
dim3 dimBlock(BLOCK, 1, 1);
ScanListK<<<dimGrid, dimBlock>>>(d_I, d_O, h_l);
cudaDeviceSynchronize();
error = cudaGetLastError();
if(error!=cudaSuccess){
fprintf(stderr,"ERROR: %s\n", cudaGetErrorString(error) );
}
cudaMemcpy(h_O, d_O, sizeof(float)*h_l, cudaMemcpyDeviceToHost);
cudaFree(d_I);cudaFree(d_O);
if(olen>1){
for(int i = 1; i < olen; i++){
float preSum = h_O[(BLOCK*i*2)-1];
for(int j = 0; j < 2*BLOCK; j++){
int idx = (BLOCK*i*2)+j;
if(idx < h_l){
h_O[idx]+=preSum;
}
}
}
}
}
void populateArray(float a[], int l){
srand48(time(NULL));
float prev = drand48()*100;
float nxt;
for(int i = 1; i < l; i++){
do{
nxt = drand48()*10;
}while(nxt==prev);
a[i] = nxt;
prev = nxt;
}
}
float absDif(float a, float b){
float c = a-b;
if(c < 0)c*=-1;
return c;
}
int main(){
int lengths[5] = {128, 256, 200, 1500, 1100};
//for(int x=0; x < 5; x++){
//int ilen = lengths[x];
int ilen = 1500;
float * I;
I = new float [ilen];
populateArray(I, ilen);
printf("%d items\n", ilen);
float gtotal[ilen];
scanList(I,gtotal,ilen);
float rtotal = 0.0;
for(int i = 0; i < ilen; i ++){
rtotal += I[i];
I[i]=rtotal;
}
for(int i =0; i < ilen; i++){
float dif = absDif(I[i], gtotal[i]);
if(dif > 1.0)printf("Mistatake @%d %f\n", i, dif);
}
delete [] I;
//}
return 0;
}
|
3,277 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
__global__ void vectorAdd(int* a, int* b, int* c, int n){
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < n){
c[tid] = a[tid] + b[tid];
}
}
void matrix_init(int* a, int n){
for (int i = 0; i < n; i++){
a[i] = rand()%100;
}
}
void check_error(int* a, int* b, int* c, int n){
for (int i = 0; i < n; i++){
assert(c[i] == a[i] + b[i]);
}
}
int main(){
int id = cudaGetDevice(&id);
// 2^16
int n = 1 << 16;
size_t bytes = sizeof(int) * n;
// unified memory vector pointers
int *a, *b, *c;
// Allocation for unifed memory vectors
cudaMallocManaged(&a, bytes);
cudaMallocManaged(&b, bytes);
cudaMallocManaged(&c, bytes);
// Initialize vectors with random values
matrix_init(a, n);
matrix_init(b, n);
int NUM_THREADS = 256;
int NUM_BLOCKS = (int)ceil(n / NUM_THREADS);
// Prefetch memory sync to device
cudaMemPrefetchAsync(a, bytes, id);
cudaMemPrefetchAsync(b, bytes, id);
vectorAdd<<<NUM_BLOCKS, NUM_THREADS>>>(a, b, c, n);
cudaDeviceSynchronize();
cudaMemPrefetchAsync(c, bytes, cudaCpuDeviceId);
check_error(a, b, c, n);
printf("Completed.\n");
return 0;
} |
3,278 | #include "includes.h"
__global__ void multi_scale_kernel(const float *data_in, const float *scale, float *data_out, int width, int height) {
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < width && y < height) {
int index = y * width + x;
data_out[index] = data_in[index] * scale[y];
}
} |
3,279 | //
// Created by yevhen on 8/1/21.
//
#include "iostream"
#include "cassert"
#include "../mmul.cuh"
__global__ void mmul_bl(const int* a, const size_t Arows, const size_t Acols,
const int* b, const size_t Bcols,
int* c, const size_t ID) {
// get thread ids
const unsigned int row = (blockIdx.y * blockDim.y) + threadIdx.y;
const unsigned int col = (blockIdx.x * blockDim.x) + threadIdx.x;
// check boundaries
if((row < Arows) && (col < Bcols)) {
c[row * Bcols + col] = 0;
for(size_t k = 0; k < ID; ++k) {
c[row * Bcols + col] += a[row*Acols + k] * b[k*Bcols + col];
}
}
}
void verify_results_mm_bl(const int* a, const size_t Arows, const size_t Acols,
const int* b, const size_t Bcols,
const int* c, const size_t ID) {
for(size_t row = 0; row < Arows; ++row){
for(size_t col = 0; col < Bcols; ++col) {
int tmp = 0;
for(size_t k = 0; k < ID; ++k) {
tmp += a[row*Acols + k] * b[k*Bcols + col];
}
assert(c[row*Bcols + col] == tmp);
}
}
}
void mmul_baseline() {
// matrix size
// A[1024][512] * B[512, 256] -> C[1024][256]
const int AROWS = 1024;
const int ACOLS = 512;
constexpr size_t ASIZE = AROWS * ACOLS;
const int BROWS = ACOLS;
const int BCOLS = 256;
constexpr size_t BSIZE = BROWS * BCOLS;
const int CROWS = AROWS;
const int CCOLS = BCOLS;
constexpr size_t CSIZE = CROWS * CCOLS;
// the same as const size_t INNER_DIMENSION = BROWS
const size_t INNER_DIMENSION = ACOLS;
constexpr size_t ABYTES = sizeof(int) * ASIZE;
constexpr size_t BBYTES = sizeof(int) * BSIZE;
constexpr size_t CBYTES = sizeof(int) * CSIZE;
// allocate device mem
int *a = (int*) malloc(ABYTES);
int *b = (int*) malloc(BBYTES);
int *c = (int*) malloc(CBYTES);
// populate arrays
for(size_t i = 0; i < ASIZE; ++i) a[i] = rand() % 100;
for(size_t i = 0; i < BSIZE; ++i) b[i] = rand() % 100;
// allocate device mem
int *d_a, *d_b, *d_c;
cudaMalloc(&d_a, ABYTES);
cudaMalloc(&d_b, BBYTES);
cudaMalloc(&d_c, CBYTES);
// copy arrays to device
cudaMemcpy(d_a, a, ABYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, BBYTES, cudaMemcpyHostToDevice);
// num threads per dimension: sqrt(1024) -> 32
// because of 2 dimensions
const int THREADS_PER_BLOCK = 32;
// num blocks
// let's try to calc each scalar product on own thread
// C[1024][256] -> 1024*256 = 262144 threads
const int NUM_BLOCKS = int(ceil(sqrt(1.0 * CSIZE / THREADS_PER_BLOCK)));
const dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK);
const dim3 blocks(NUM_BLOCKS, NUM_BLOCKS);
// call matrix multiplication func
mmul_bl<<<blocks, threads>>>(d_a, AROWS, ACOLS,
d_b, BCOLS,
d_c, INNER_DIMENSION);
// get results back to CPU
cudaMemcpy(c, d_c, CBYTES, cudaMemcpyDeviceToHost);
// verify results
verify_results_mm_bl(a, AROWS, ACOLS,
b, BCOLS,
c, INNER_DIMENSION);
// free cuda mem
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// free allocated mem
free(a);
free(b);
free(c);
std::cout << "MMUL_BASELINE COMPLETED" << std::endl;
} |
3,280 | #include "includes.h"
__global__ void IntDataPointIdentity(int *size, const int *inputX, const int *inputY, int *outputX, int *outputY, int *length) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
// copy int array
const int *inArrayBody = &inputX[ix* *length];
int *outArrayBody = &outputX[ix* *length];
for (long i = 0; i < *length; i++) {
outArrayBody[i] = inArrayBody[i];
}
// copy int scalar value
outputY[ix] = inputY[ix];
}
} |
3,281 | #include "includes.h"
__global__ void mm_tiled(float *dA, float *dB, float *dC, int DIM, int N, int GPUN) {
int it, jt, kt, i, j, k;
__shared__ float sA[32][32], sB[32][32];
// (it, jt) => the first element of a specific tile
it = blockIdx.y * 32;
jt = blockIdx.x * 32;
// (i, j) => specific element
i = it + threadIdx.y;
j = jt + threadIdx.x;
if (i*DIM+j <= GPUN) {
float sum = 0.0f;
// per tile loop
for (kt = 0; kt < DIM; kt += 32) {
// copy to shared memory
sA[threadIdx.y][threadIdx.x] = dA[(it+threadIdx.y)*DIM + kt + threadIdx.x];
sB[threadIdx.y][threadIdx.x] = dB[(kt+threadIdx.y)*DIM + jt + threadIdx.x];
__syncthreads();
// two 32x32 small shared (dB[it + 0:31][kt + 0:31], dC[kt+0:31][jt + 0:31]) at this point
for (k = kt; k < kt+32; k++) {
sum += sA[i-it][k-kt] * sB[k-kt][j-jt];
}
__syncthreads();
}
dC[i*DIM+j] = sum;
}
} |
3,282 |
#include <stdio.h>
#include <stdlib.h>
#include "cuda.h"
void serialAddVectors(int N, double *a, double *b, double *c){
int n;
for(n=0;n<N;++n){
c[n] = a[n] + b[n];
}
}
// code to be executed by each CUDA "thread"
__global__ void addVectorsKernel(int N, double *a, double *b, double *c){
int threadRank = threadIdx.x; // thread rank in thread-block
int blockRank = blockIdx.x; // rank of thread-block
int blockSize = blockDim.x; // number of threads in each thread-block
int n = threadRank + blockSize*blockRank;
if(n<N)
c[n] = a[n] + b[n];
}
int main(int argc, char **argv){
int N = 1000;
double *h_a = (double*) malloc(N*sizeof(double));
double *h_b = (double*) malloc(N*sizeof(double));
double *h_c = (double*) malloc(N*sizeof(double));
int n;
for(n=0;n<N;++n){
h_a[n] = 1+n;
h_b[n] = 1-n;
}
double *d_a, *d_b, *d_c;
cudaMalloc(&d_a, N*sizeof(double));
cudaMalloc(&d_b, N*sizeof(double));
cudaMalloc(&d_c, N*sizeof(double));
cudaMemcpy(d_a, h_a, N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, N*sizeof(double), cudaMemcpyHostToDevice);
int TPB = 100;
int B = (N + TPB -1)/TPB;
// execute the kernel code with TPB threads per block and B thread-blocks
// (total of B*TPB threads)
addVectorsKernel <<< B, TPB >>> (N, d_a, d_b, d_c);
cudaMemcpy(h_c, d_c, N*sizeof(double), cudaMemcpyDeviceToHost);
for(n=0;n<5;++n){
printf("h_c[%d] = %g\n", n, h_c[n]);
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
3,283 | #include <stdio.h>
#include <stdlib.h>
#define BLOCK_SIZE 64
#define N 1024
__global__ void doubleValues(int* numbers, int length) {
numbers[BLOCK_SIZE*blockIdx.x + threadIdx.x] *= 2;
}
int main() {
int* cpu_arr = (int*)malloc(N * sizeof(int));
if(!cpu_arr) {
perror("malloc");
exit(1);
}
for(int i = 0; i < N; i++) {
cpu_arr[i] = i;
}
int* gpu_arr;
if(cudaMalloc(&gpu_arr, sizeof(int) * N) != cudaSuccess) {
fprintf(stderr, "Failed to allocate array on GPU\n");
exit(2);
}
if(cudaMemcpy(gpu_arr, cpu_arr, sizeof(int) * N, cudaMemcpyHostToDevice) != cudaSuccess) {
fprintf(stderr, "Failed to copy array to the GPU\n");
}
doubleValues<<<N/BLOCK_SIZE, BLOCK_SIZE>>>(gpu_arr, N);
doubleValues<<<N/BLOCK_SIZE, BLOCK_SIZE>>>(gpu_arr, N);
doubleValues<<<N/BLOCK_SIZE, BLOCK_SIZE>>>(gpu_arr, N);
cudaDeviceSynchronize();
if(cudaMemcpy(cpu_arr, gpu_arr, sizeof(int) * N, cudaMemcpyDeviceToHost) != cudaSuccess) {
fprintf(stderr, "Failed to copy array to the CPU\n");
}
for(int i = 0; i < N; i++) {
printf("%d\n", cpu_arr[i]);
}
return 0;
}
|
3,284 | #include "includes.h"
__global__ void multiply_device (double *d_a, double *d_b,int dim) {
//Declaration of required variables.
double a, b, sum;
//Retrive the thread and block specific information.
int i = threadIdx.x,j,k;
// Begine Matrix Computation.
for (j = blockIdx.x; j < dim; j += gridDim.x) {
sum = 0;
for(k=0; k<dim; k++) {
a =d_a[k *dim+i];
b =d_a[k*dim+j];
sum = sum + a * b;
}
d_b[ i * dim + j ] = sum;
}
} |
3,285 | extern "C" __global__ void
mmkernel( float* a, float* b, float* c,
int pitch_a, int pitch_b, int pitch_c,
int n, int m, int p )
{
int tx = threadIdx.x;
int bx = blockDim.x;
int i = blockIdx.x * bx * 2 + tx;
int j = blockIdx.y;
__shared__ float cb[512];
float sum0 = 0.0, sum1 = 0.0;
for( int ks = 0; ks < p; ks += bx ){
cb[tx] = c[ks+tx+pitch_c*j];
__syncthreads();
for( int k = ks; k < ks+bx; ++k ){
sum0 += b[i+pitch_b*k] * cb[k-ks];
sum1 += b[i+bx+pitch_b*k] * cb[k-ks];
}
__syncthreads();
}
a[i+pitch_a*j] = sum0;
a[i+bx+pitch_a*j] = sum1;
}
|
3,286 | #include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include <iostream>
template <typename T, typename C>
__global__
void awkward_listarray_compact_offsets(T* tooffsets, const C* fromstarts, const C* fromstops, int64_t startsoffset, int64_t stopsoffset, int64_t length) {
int thid = threadIdx.x + (blockIdx.x * blockDim.x);
extern __shared__ T temp[];
int pout = 0, pin = 1;
if (thid < length) {
temp[thid] = fromstops[stopsoffset + thid] - fromstarts[startsoffset + thid];
__syncthreads();
for (int offset = 1; offset < length; offset *=2) {
pout = 1 - pout;
pin = 1 - pout;
if (thid >= offset)
temp[pout*length + thid] = temp[pin*length + thid - offset] + temp[pin*length + thid];
else
temp[pout*length + thid] = temp[pin*length + thid];
__syncthreads();
}
tooffsets[thid] = temp[pout*length + thid];
}
}
template <typename T, typename C>
void offload(T* tooffsets, const C* fromstarts, const C* fromstops, int64_t startsoffset, int64_t stopsoffset, int64_t length) {
T* d_tooffsets;
C* d_fromstarts, * d_fromstops;
cudaMalloc((void**)&d_tooffsets, (length+1) * sizeof(T));
cudaMalloc((void**)&d_fromstarts, length * sizeof(C));
cudaMemcpy(d_fromstarts, fromstarts, length * sizeof(C), cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_fromstops, length * sizeof(C));
cudaMemcpy(d_fromstops, fromstops, length * sizeof(C), cudaMemcpyHostToDevice);
int block, thread;
if (length > 1024) {
block = (length / 1024) + 1;
thread = 1024;
}
else {
thread = length;
block = 1;
}
awkward_listarray_compact_offsets<T, C><<<block, thread, length*2*sizeof(T)>>>(d_tooffsets, d_fromstarts, d_fromstops, startsoffset, stopsoffset, length);
cudaDeviceSynchronize();
cudaMemcpy(tooffsets, d_tooffsets, (length + 1) * sizeof(T), cudaMemcpyDeviceToHost);
tooffsets[length] = tooffsets[length - 1] + fromstops[length - 1 + stopsoffset] - fromstarts[length - 1 + startsoffset];
cudaFree(d_tooffsets);
cudaFree(d_fromstarts);
cudaFree(d_fromstops);
}
int main() {
const int size = 1000;
int tooffsets[size + 1], fromstarts[size], fromstops[size];
for (int i = 0; i < size; i++) {
fromstarts[i] = i;
fromstops[i] = i + 10;
}
offload<int, int>(tooffsets, fromstarts, fromstops, 0, 0, size);
for (int i = 0; i < size + 1; i++) {
std::cout << tooffsets[i] << "\n";
}
return 0;
} |
3,287 | #include <stdio.h>
#include <limits.h>
/* GPU */
__global__ void find_odd(int n, int *A, int *B) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
if (A[i] % 2 > 0) { B[i] = A[i]; }
else { B[i] = 0; }
}
}
int main() {
/* Open File */
printf("Opening File!\n");
FILE *fp;
fp = fopen("inp.txt", "r");
printf("Init Arrays and Such!\n");
char buff[256];
const int M = 1<<20;
int *A = new int[M];
int *B = new int[M];
int *D = new int[M];
int i, count = 0;
/* Copy to GPU Memory */
printf("Copying to GPU Memory!\n");
cudaMallocManaged(&A, M * sizeof(int));
cudaMallocManaged(&B, M * sizeof(int));
/* Read numbers as integers one by one */
printf("Scanning File!\n");
while (fscanf(fp, "%d", &i) != EOF) {
A[count++] = i; // Add number to array
fscanf(fp, "%s", buff); // Read until whitespace
}
/* Close File */
printf("Closing File!\n");
fclose(fp);
/* Kernel */
printf("Accessing GPU!\n");
int blockSize = 256;
int numBlocks = (count + blockSize - 1) / blockSize;
find_odd<<<numBlocks, blockSize>>>(count, A, B);
/* Wait for GPU */
cudaDeviceSynchronize();
/* Remove 0s */
printf("Removing Zeros!\n");
int zeroCount = 0;
for (int i = 0; i < count; i++) {
if (B[i] != 0) { D[i - zeroCount] = B[i]; }
else { zeroCount++; }
}
/* Print D */
printf("Printing Array!\n");
for (int i = 0; D[i] != 0; i++) {
printf("%d", D[i]);
if (D[i + 1] != 0) printf(", ");
}
printf("\n");
/* Write D */
printf("Writing File!\n");
FILE *f = fopen("q3.txt", "w");
for (int i = 0; D[i] != 0; i++) {
fprintf(f, "%d", D[i]);
if (D[i + 1] != 0) fprintf(f, ", ");
}
fclose(f);
/* Free Memory */
printf("Freeing Memory!\n");
cudaFree(A);
cudaFree(B);
return 0;
}
|
3,288 | /******************************
* Tisma Miroslav 2006/0395
* Multiprocesorski sistemi
* domaci zadatak 6 - 4. zadatak
*******************************/
/**
* 4. Sastaviti program koji menja znak svim elementima niza celih brojeva. Po zavrsenoj obradi niza, treba
* ispisati izmenjeni niz, ukupan broj pozitivnih i ukupan broj negativnih elemenata rezultujuceg niza.
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_functions.h"
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define NUM_OF_GPU_THREADS 256
__global__ void changeSignsInArray(int *array1, int *positive, int *negative, int n) {
int i, counter = 0;
int idx = threadIdx.x;
__shared__ int numbers[NUM_OF_GPU_THREADS];
int slice = n / NUM_OF_GPU_THREADS;
int start = idx * slice;
if (idx == NUM_OF_GPU_THREADS - 1)
slice += n % NUM_OF_GPU_THREADS;
int end = start + slice;
for (i = start; i < end; i++) {
array1[i] = -array1[i];
if (array1[i] > 0)
counter++;
}
numbers[idx] = counter;
__syncthreads();
int half = NUM_OF_GPU_THREADS;
do {
__syncthreads();
half >>= 1;
if (idx < half)
numbers[idx] += numbers[idx + half];
} while(half != 1);
if (idx == 0) {
*positive = numbers[0];
*negative = n - *positive;
}
}
int main(int argc, char *argv[]) {
int i, n, h_positives, h_negatives;
int *h_array;
int *d_array, *d_positives, *d_negatives;
FILE *out;
printf("Nizovi ce biti upisani u fajl dz6_4_izl1.in\n");
printf("Unesite velicinu niza:\n");
scanf("%d", &n);
h_array = (int*)malloc(n*sizeof(int));
out = fopen("dz6_4_izl1.in", "w");
if (out == NULL) {
printf("Greska pri otvaranju fajla!");
exit(EXIT_FAILURE);
}
fprintf(out, "ORIGINALNI NIZ\n");
srand(time(0));
for (i = 0; i < n; i++) {
h_array[i] = -100 + rand() % 200;
fprintf(out, "%3d ", h_array[i]);
}
fprintf(out, "\n");
cudaMalloc((void**)&d_array, n*sizeof(int));
cudaMalloc((void**)&d_positives, sizeof(int));
cudaMalloc((void**)&d_negatives, sizeof(int));
cudaMemcpy(d_array, h_array, n*sizeof(int), cudaMemcpyHostToDevice);
changeSignsInArray<<<1, NUM_OF_GPU_THREADS>>>(d_array, d_positives, d_negatives, n);
cudaThreadSynchronize();
cudaMemcpy(h_array, d_array, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&h_positives, d_positives, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&h_negatives, d_negatives, sizeof(int), cudaMemcpyDeviceToHost);
fprintf(out, "IZMENJENI NIZ:\n");
for (i = 0; i < n; i++)
fprintf(out, "%3.2d ", h_array[i]);
fprintf(out, "\n");
fprintf(out, "Broj pozitivnih elemenata u nizu je: %d\n", h_positives);
fprintf(out, "Broj negativnih elemenata u nizu je: %d\n", h_negatives);
cudaFree(d_array);
cudaFree(d_positives);
cudaFree(d_negatives);
free(h_array);
fclose(out);
return EXIT_SUCCESS;
} |
3,289 | #if GOOGLE_CUDA
#define EIGEN_USE_GPU
extern "C" __global__ void default_function_kernel0(const float* __restrict__ Data,
const float* __restrict__ K0,
const float* __restrict__ K1,
const float* __restrict__ K2,
float* __restrict__ Output) {
float Output_local[16];
__shared__ float pad_temp_shared[4096];
__shared__ float K0_shared[16];
__shared__ float K1_shared[1];
__shared__ float K2_shared[2];
for (int nn_inner_outer = 0; nn_inner_outer < 8; ++nn_inner_outer) {
for (int hh_inner_outer = 0; hh_inner_outer < 8; ++hh_inner_outer) {
for (int ww_inner_outer = 0; ww_inner_outer < 2; ++ww_inner_outer) {
for (int nn_c_init = 0; nn_c_init < 4; ++nn_c_init) {
for (int hh_c_init = 0; hh_c_init < 2; ++hh_c_init) {
for (int ww_c_init = 0; ww_c_init < 2; ++ww_c_init) {
Output_local[(((nn_c_init * 4) + (hh_c_init * 2)) + ww_c_init)] = 0.000000e+00f;
}
}
}
for (int rr_outer = 0; rr_outer < 6; ++rr_outer) {
for (int rh_outer = 0; rh_outer < 3; ++rh_outer) {
for (int rw_outer = 0; rw_outer < 3; ++rw_outer) {
__syncthreads();
for (int ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner = 0; ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner < 128; ++ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner) {
pad_temp_shared[((((((int)threadIdx.z) * 2048) + (((int)threadIdx.y) * 1024)) + (((int)threadIdx.x) * 128)) + ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner)] = (((((((1 - ((ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner % 64) / 16)) - rh_outer) <= (hh_inner_outer * 4)) && ((hh_inner_outer * 4) < ((33 - ((ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner % 64) / 16)) - rh_outer))) && (((1 - rw_outer) - (ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner % 16)) <= (ww_inner_outer * 16))) && ((ww_inner_outer * 16) < ((33 - rw_outer) - (ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner % 16)))) ? Data[((((((((((((((((int)blockIdx.z) / 8) * 524288) + (nn_inner_outer * 65536)) + (((int)threadIdx.z) * 32768)) + (((int)threadIdx.y) * 16384)) + (((int)threadIdx.x) * 2048)) + ((ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner / 64) * 1024)) + (hh_inner_outer * 128)) + (((ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner % 64) / 16) * 32)) + (rh_outer * 32)) + (ww_inner_outer * 16)) + rw_outer) + (ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner % 16)) - 33)] : 0.000000e+00f);
}
if (((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 4)) < (16 - ((int)threadIdx.x))) {
if ((((int)threadIdx.y) * 4) < (8 - ((int)threadIdx.x))) {
if (((int)threadIdx.x) < 4) {
K0_shared[(((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 4)) + ((int)threadIdx.x))] = K0[((((((int)threadIdx.z) * 48) + (((int)threadIdx.y) * 24)) + (((int)threadIdx.x) * 6)) + rr_outer)];
}
}
}
if (((int)threadIdx.x) < ((1 - ((int)threadIdx.z)) - ((int)threadIdx.y))) {
if (((int)threadIdx.x) < (1 - ((int)threadIdx.y))) {
if (((int)threadIdx.x) < 1) {
if (((int)threadIdx.x) < (((3 - rh_outer) - ((int)threadIdx.z)) - ((int)threadIdx.y))) {
K1_shared[((((int)threadIdx.x) + ((int)threadIdx.y)) + ((int)threadIdx.z))] = K1[((((((((int)threadIdx.x) * 18) + (((int)threadIdx.y) * 18)) + (((int)threadIdx.z) * 18)) + (rh_outer * 18)) + (rw_outer * 6)) + rr_outer)];
}
}
}
}
if (((int)threadIdx.x) < ((2 - ((int)threadIdx.z)) - ((int)threadIdx.y))) {
if (((int)threadIdx.x) < (1 - ((int)threadIdx.y))) {
if (((int)threadIdx.x) < 1) {
if ((((((int)threadIdx.x) + ((int)threadIdx.y)) + ((int)threadIdx.z)) / 2) < (6 - rr_outer)) {
K2_shared[((((int)threadIdx.x) + ((int)threadIdx.y)) + ((int)threadIdx.z))] = K2[(((((((((int)threadIdx.x) + ((int)threadIdx.y)) + ((int)threadIdx.z)) / 2) * 16) + (rr_outer * 16)) + ((((int)blockIdx.z) % 8) * 2)) + (((((int)threadIdx.x) + ((int)threadIdx.y)) + ((int)threadIdx.z)) % 2))];
}
}
}
}
__syncthreads();
for (int rc_inner = 0; rc_inner < 16; ++rc_inner) {
for (int nn_c = 0; nn_c < 4; ++nn_c) {
for (int hh_c = 0; hh_c < 2; ++hh_c) {
for (int ww_c = 0; ww_c < 2; ++ww_c) {
Output_local[(((nn_c * 4) + (hh_c * 2)) + ww_c)] = (Output_local[(((nn_c * 4) + (hh_c * 2)) + ww_c)] + (((pad_temp_shared[((((((nn_c * 1024) + (rc_inner * 64)) + (((int)threadIdx.y) * 32)) + (hh_c * 16)) + (((int)threadIdx.x) * 2)) + ww_c)] * K0_shared[rc_inner]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
}
}
}
}
}
}
}
for (int nn_inner_inner_inner = 0; nn_inner_inner_inner < 4; ++nn_inner_inner_inner) {
for (int hh_inner_inner_inner = 0; hh_inner_inner_inner < 2; ++hh_inner_inner_inner) {
for (int ww_inner_inner_inner = 0; ww_inner_inner_inner < 2; ++ww_inner_inner_inner) {
Output[((((((((((((((int)blockIdx.z) / 8) * 524288) + (nn_inner_outer * 65536)) + (nn_inner_inner_inner * 16384)) + ((((int)blockIdx.z) % 8) * 2048)) + (((int)threadIdx.z) * 1024)) + (hh_inner_outer * 128)) + (((int)threadIdx.y) * 64)) + (hh_inner_inner_inner * 32)) + (ww_inner_outer * 16)) + (((int)threadIdx.x) * 2)) + ww_inner_inner_inner)] = Output_local[(((nn_inner_inner_inner * 4) + (hh_inner_inner_inner * 2)) + ww_inner_inner_inner)];
}
}
}
}
}
}
}
void Conv2dCpFusedNchwKernelLauncher(const float* U, const float* K0,
const float* K1, const float* K2, float* V){
dim3 gridDim0(1, 1, 16);
dim3 blockDim0(8, 2, 2);
default_function_kernel0<<<gridDim0, blockDim0>>>(U, K0, K1, K2, V);
cudaDeviceSynchronize();
}
#endif
|
3,290 | /*
Single Author info:
hmajety Hari Krishna Majety
Group info:
hmajety Hari Krishna Majety
srout Sweta Rout
mreddy2 Harshavardhan Reddy Muppidi
*/
#include<stdio.h>
#include<cuda_runtime.h>
#include<math.h>
#include<curand_kernel.h>
#define SEED 35791246
__global__ void setup_kernel(curandState *state, int numElements)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
/* Each thread gets same seed, a different sequence
number, no offset */
if(id<numElements){
curand_init(SEED, id, 0, &state[id]);
}
}
__global__ void getMonteCarloVal(float *dX,float *dY, float *dZ, curandState *state, int numElements){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
//Ignore additional threads spawned
if(idx<numElements){
//dX[idx] = rand()/(float)RAND_MAX;
//dY[idx] = rand()/(float)RAND_MAX;
//curandGenerator_t prng;
//curandCreateGenerator(&prng, CURAND_RNG_PSEUDO_DEFAULT);
//curandSetPseudoRandomGeneratorSeed(prng, SEED);
//curandGenerateUniform(prng, dX+idx, 1);
//curandGenerateUniform(prng, dY+idx, 1);
curandState localState = state[idx];
dX[idx] = curand_uniform(&localState);
dY[idx] = curand_uniform(&localState);
dZ[idx] = ((dX[idx]*dX[idx] + dY[idx]*dY[idx])<=1?1:0);
state[idx] = localState;
//curandDestroyGenerator(prng);
}
}
int main(int argc, char** argv){
//cudaError_t err = cudaSuccess;
int numElements = atoi(argv[1]);
size_t size = numElements * sizeof(float);
float *dX, *dY, *dZ, *hZ;
//float *hX. *hY;
cudaMalloc((void **)&dX, size);
cudaMalloc((void **)&dY, size);
cudaMalloc((void **)&dZ, size);
hZ = (float *)malloc(size);
//hX = (float *)malloc(size);
//hY = (float *)malloc(size);
curandState *devStates;
cudaMalloc((void **)&devStates, numElements * sizeof(curandState));
//Cuda Configuration
int threadsPerBlock = 256;
int nBlocks = (numElements+threadsPerBlock-1)/threadsPerBlock;
printf("%d\n", nBlocks);
//Setup Curand generator states
setup_kernel<<<nBlocks,threadsPerBlock>>>(devStates,numElements);
//Call the Cuda kernel to perform Monte Carlo simulation
getMonteCarloVal<<<nBlocks,threadsPerBlock>>>(dX, dY, dZ, devStates, numElements);
cudaMemcpy(hZ, dZ, numElements*sizeof(float), cudaMemcpyDeviceToHost);
//cudaMemcpy(hX, dX, numElements*sizeof(float), cudaMemcpyDeviceToHost);
//cudaMemcpy(hY, dY, numElements*sizeof(float), cudaMemcpyDeviceToHost);
int count = 0;
//Aggregate the values received from the GPU
for (int i = 0; i < numElements; ++i)
{
count=count+hZ[i];
//printf("%f,%f - %f\n",hX[i],hY[i],hZ[i]);
}
//printf("%d\n",count);
printf("The approximate value of Pi is %f\n", ((float)count/numElements)*4 );
cudaFree(dX);
//cudaFree(dY);
//cudaFree(dZ);
free(hZ);
cudaDeviceReset();
return 0;
} |
3,291 | #include <cuda_runtime.h>
#include <vector>
#include <iostream>
__global__ void vector_add(const float *a, const float *b, float *c, int num_elements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num_elements)
{
c[i] = a[i] + b[i];
}
}
int main(void)
{
size_t const num_elements = 5000000;
size_t const elements_size = num_elements * sizeof(float);
// allocate cpu data
std::vector<float> host_a(num_elements);
std::vector<float> host_b(num_elements);
std::vector<float> host_c(num_elements);
for (size_t i = 0; i < num_elements; ++i)
{
host_a[i] = rand() / (float)RAND_MAX;
host_b[i] = rand() / (float)RAND_MAX;
}
// allocate device data
float *dev_a = nullptr;
float *dev_b = nullptr;
float *dev_c = nullptr;
cudaMalloc((void **)&dev_a, elements_size);
cudaMalloc((void **)&dev_b, elements_size);
cudaMalloc((void **)&dev_c, elements_size);
// Copy "A" and "B" from host to device
cudaMemcpy(dev_a, &host_a[0], elements_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, &host_b[0], elements_size, cudaMemcpyHostToDevice);
// launch_kernel
int block_size = 256;
int num_blocks = (num_elements + block_size - 1) / block_size;
vector_add <<< num_blocks, block_size >>>(dev_a, dev_b, dev_c, num_elements);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
std::cerr << "Failed to launch vectorAdd kernel" << cudaGetErrorString(err);
exit(1);
}
// Copy "C" from device to host
cudaMemcpy(&host_c[0], dev_c, elements_size, cudaMemcpyDeviceToHost);
// verify result
for (size_t i = 0; i < num_elements; ++i)
{
if (fabs(host_a[i] + host_b[i] - host_c[i]) > 1e-5)
{
std::cerr << "Failure at " << i << std::endl;
exit(1);
}
}
// free device memory
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
std::cout << "Done" << std::endl;
return 0;
}
|
3,292 | #include <iostream>
#include <string>
#include "program/program.cuh"
#include "program/image_program.cuh"
#include "program/video_program.cuh"
using namespace std;
void usage()
{
ImageProgram().usage();
VideoProgram().usage();
}
int main(int argc, char *argv[])
{
if (argc < 2)
{
cout << "This program should be run with parameters (-h for more information)" << endl;
return - 1;
}
else
{
string mode = argv[1];
if (mode == "-i")
return ImageProgram().main(argc, argv);
else if (mode == "-v")
return VideoProgram().main(argc, argv);
else if (mode == "-h")
usage();
else
{
cout << "Invalid parameter: " << mode << endl << "Usage:" << endl;
usage();
}
}
return 0;
} |
3,293 | #include <stdio.h>
#include<cuda.h>
#include<cuda_runtime.h>
#include<time.h>
#include<stdlib.h>
#define BLOCK_NUM 32 //块数量
#define THREAD_NUM 32 // 每个块中的线程数
#define R_SIZE 1024//BLOCK_NUM * THREAD_NUM
#define M_SIZE R_SIZE * R_SIZE
__global__ void mat_mul(int *mat1, int *mat2, int *result) {
const int bid = blockIdx.x;
const int tid = threadIdx.x;
// 每个线程计算一行
const int row = bid * THREAD_NUM + tid;
for (int c = 0; c < R_SIZE; c++) {
for (int n = 0; n < R_SIZE; n++) {
result[row*R_SIZE+c] += mat1[row*R_SIZE+n] * mat2[n*R_SIZE+c];
}
}
}
int main(int argc, char *argv[]) {
int *mat1, *mat2, *result;
int *g_mat1, *g_mat2, *g_mat_result;
// 用一位数组表示二维矩阵
mat1 = (int*) malloc(M_SIZE * sizeof(int));
mat2 = (int*) malloc(M_SIZE * sizeof(int));
result = (int*) malloc(M_SIZE * sizeof(int));
// initialize
for (int i = 0; i < M_SIZE; i++) {
mat1[i] = rand()/1000000;
mat2[i] = rand()/1000000;
result[i] = 0;
}
clock_t start, finish;
start = clock();
cudaMalloc((void **)&g_mat1, sizeof(int) * M_SIZE);
cudaMalloc((void **)&g_mat2, sizeof(int) * M_SIZE);
cudaMalloc((void **)&g_mat_result, sizeof(int) * M_SIZE);
cudaMemcpy(g_mat1, mat1, sizeof(int) * M_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(g_mat2, mat2, sizeof(int) * M_SIZE, cudaMemcpyHostToDevice);
mat_mul<<<BLOCK_NUM, THREAD_NUM>>>(g_mat1, g_mat2, g_mat_result);
cudaMemcpy(result, g_mat_result, sizeof(int) * M_SIZE, cudaMemcpyDeviceToHost);
finish = clock();
printf("total times: %.3f\n",(double)(finish-start)/CLOCKS_PER_SEC);
for(int i=0;i<10;i++)
printf("%d ",result[i]);
printf("\n");
cudaFree(g_mat1);cudaFree(g_mat2);cudaFree(g_mat_result);
free(mat1); free(mat2); free(result);
return 0;
}
|
3,294 | #include <cmath>
#include <cstdio>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <device_atomic_functions.h>
#include "CudaSudoku_cuda.cuh"
/**
* This function takes in a bitmap and clears them all to false.
*/
__device__
void clearBitmap(bool *map, int size) {
for (int i = 0; i < size; i++) {
map[i] = false;
}
}
/**
* This device checks the entire board to see if it is valid.
*
* board: this is a N * N sized array that stores the board to check. Rows are stored contiguously,
* so to access row r and col c, use board[r * N + c]
*/
__device__
bool validBoard(const int *board) {
bool seen[N];
clearBitmap(seen, N);
// check if rows are valid
for (int i = 0; i < N; i++) {
clearBitmap(seen, N);
for (int j = 0; j < N; j++) {
int val = board[i * N + j];
if (val != 0) {
if (seen[val - 1]) {
return false;
} else {
seen[val - 1] = true;
}
}
}
}
// check if columns are valid
for (int j = 0; j < N; j++) {
clearBitmap(seen, N);
for (int i = 0; i < N; i++) {
int val = board[i * N + j];
if (val != 0) {
if (seen[val - 1]) {
return false;
} else {
seen[val - 1] = true;
}
}
}
}
// finally check if the sub-boards are valid
for (int ridx = 0; ridx < n; ridx++) {
for (int cidx = 0; cidx < n; cidx++) {
clearBitmap(seen, N);
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
int val = board[(ridx * n + i) * N + (cidx * n + j)];
if (val != 0) {
if (seen[val - 1]) {
return false;
} else {
seen[val-1] = true;
}
}
}
}
}
}
// if we get here, then the board is valid
return true;
}
/**
* This function takes a board and an index between 0 and N * N - 1. This function assumes the board
* without the value at changed is valid and checks for validity given the new change.
*
* board: this is a N * N sized array that stores the board to check. Rows are stored
* contiguously, so to access row r and col c, use board[r * N + c]
*
* changed: this is an integer that stores the index of the board that was changed
*/
__device__
bool validBoard(const int *board, int changed) {
int r = changed / 9;
int c = changed % 9;
// if changed is less than 0, then just default case
if (changed < 0) {
return validBoard(board);
}
if ((board[changed] < 1) || (board[changed] > 9)) {
return false;
}
bool seen[N];
clearBitmap(seen, N);
// check if row is valid
for (int i = 0; i < N; i++) {
int val = board[r * N + i];
if (val != 0) {
if (seen[val - 1]) {
return false;
} else {
seen[val - 1] = true;
}
}
}
// check if column is valid
clearBitmap(seen, N);
for (int j = 0; j < N; j++) {
int val = board[j * N + c];
if (val != 0) {
if (seen[val - 1]) {
return false;
} else {
seen[val - 1] = true;
}
}
}
// finally check if the sub-board is valid
int ridx = r / n;
int cidx = c / n;
clearBitmap(seen, N);
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
int val = board[(ridx * n + i) * N + (cidx * n + j)];
if (val != 0) {
if (seen[val - 1]) {
return false;
} else {
seen[val - 1] = true;
}
}
}
}
// if we get here, then the board is valid
return true;
}
/**
* This kernel has each thread try to solve a different board in the input array using the
* backtracking algorithm.
*
* boards: This is an array of size numBoards * N * N. Each board is stored contiguously,
* and rows are contiguous within the board. So, to access board x, row r, and col c,
* use boards[x * N * N + r * N + c]
*
* numBoards: The total number of boards in the boards array.
*
* emptySpaces: This is an array of size numBoards * N * N. board is stored contiguously, and stores
* the indices of the empty spaces in that board. Note that this N * N pieces may not
* be filled.
*
* numEmptySpaces: This is an array of size numBoards. Each value stores the number of empty spaces
* in the corresponding board.
*
* finished: This is a flag that determines if a solution has been found. This is a stopping
* condition for the kernel.
*
* solved: This is an output array of size N * N where the solved board is stored.
*/
__global__
void sudokuBacktrack(int *boards,
const int numBoards,
int *emptySpaces,
int *numEmptySpaces,
int *finished,
int *solved) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
int *currentBoard;
int *currentEmptySpaces;
int currentNumEmptySpaces;
while ((*finished == 0) && (index < numBoards)) {
int emptyIndex = 0;
currentBoard = boards + index * 81;
currentEmptySpaces = emptySpaces + index * 81;
currentNumEmptySpaces = numEmptySpaces[index];
while ((emptyIndex >= 0) && (emptyIndex < currentNumEmptySpaces)) {
currentBoard[currentEmptySpaces[emptyIndex]]++;
if (!validBoard(currentBoard, currentEmptySpaces[emptyIndex])) {
// if the board is invalid and we tried all numbers here already, backtrack
// otherwise continue (it will just try the next number in the next iteration)
if (currentBoard[currentEmptySpaces[emptyIndex]] >= 9) {
currentBoard[currentEmptySpaces[emptyIndex]] = 0;
emptyIndex--;
}
}
// if valid board, move forward in algorithm
else {
emptyIndex++;
}
}
if (emptyIndex == currentNumEmptySpaces) {
// solved board found
*finished = 1;
// copy board to output
for (int i = 0; i < N * N; i++) {
solved[i] = currentBoard[i];
}
}
index += gridDim.x * blockDim.x;
}
}
void cudaSudokuBacktrack(const unsigned int blocks,
const unsigned int threadsPerBlock,
int *boards,
const int numBoards,
int *emptySpaces,
int *numEmptySpaces,
int *finished,
int *solved) {
sudokuBacktrack<<<blocks, threadsPerBlock>>>
(boards, numBoards, emptySpaces, numEmptySpaces, finished, solved);
}
/**
* This kernel takes a set of old boards and finds all possible next boards by filling in the next
* empty space.
*
* old_boards: This is an array of size sk. Each N * N section is another board. The rows
* are contiguous within the board. This array stores the previous set of boards.
*
* new_boards: This is an array of size sk. Each N * N section is another board. The rows
* are contiguous within the board. This array stores the next set of boards.
*
* total_boards: Number of old boards.
*
* board_index: Index specifying the index of the next opening in new_boards.
*
* empty_spaces: This is an array of size sk. Each N * N section is another board, storing the
* indices of empty spaces in new_boards.
*
* empty_space_count: This is an array of size sk / N / N + 1 which stores the number of empty
* spaces in the corresponding board.
*/
__global__
void
cudaBFSKernel(int *old_boards,
int *new_boards,
int total_boards,
int *board_index,
int *empty_spaces,
int *empty_space_count) {
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
// board_index must start at zero
while (index < total_boards) {
// find the next empty spot
int found = 0;
for (int i = (index * N * N); (i < (index * N * N) + N * N) && (found == 0); i++) {
// found a open spot
if (old_boards[i] == 0) {
found = 1;
// get the correct row and column shits
int temp = i - N * N * index;
int row = temp / N;
int col = temp % N;
// figure out which numbers work here
for (int attempt = 1; attempt <= N; attempt++) {
int works = 1;
// row constraint, test various columns
for (int c = 0; c < N; c++) {
if (old_boards[row * N + c + N * N * index] == attempt) {
works = 0;
}
}
// column contraint, test various rows
for (int r = 0; r < N; r++) {
if (old_boards[r * N + col + N * N * index] == attempt) {
works = 0;
}
}
// box constraint
for (int r = n * (row / n); r < n; r++) {
for (int c = n * (col / n); c < n; c++) {
if (old_boards[r * N + c + N * N * index] == attempt) {
works = 0;
}
}
}
if (works == 1) {
// copy the whole board
int next_board_index = atomicAdd(board_index, 1);
int empty_index = 0;
for (int r = 0; r < 9; r++) {
for (int c = 0; c < 9; c++) {
new_boards[next_board_index * 81 + r * 9 + c] = old_boards[index * 81 + r * 9 + c];
if (old_boards[index * 81 + r * 9 + c] == 0 && (r != row || c != col)) {
empty_spaces[empty_index + 81 * next_board_index] = r * 9 + c;
empty_index++;
}
}
}
empty_space_count[next_board_index] = empty_index;
new_boards[next_board_index * 81 + row * 9 + col] = attempt;
}
}
}
}
index += blockDim.x * gridDim.x;
}
}
void callBFSKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
int *old_boards,
int *new_boards,
int total_boards,
int *board_index,
int *empty_spaces,
int *empty_space_count) {
cudaBFSKernel<<<blocks, threadsPerBlock>>>
(old_boards, new_boards, total_boards, board_index, empty_spaces, empty_space_count);
}
|
3,295 | //pass
//--blockDim=64 --gridDim=64 --no-inline
#include "cuda.h"
__global__ void foo() {
int a, b, c;
a = 2;
b = 3;
c = a + b;
}
|
3,296 | // System includes
#include <stdio.h>
#include <cuda_runtime.h>
#include<device_launch_parameters.h>
#include<curand.h>
#define _USE_MATH_DEFINES
#include<math.h>
__global__ void sumSingleBlock(int *d)
{
int tid = threadIdx.x;
int myIdx = tid * 2;
int diff = 1;
while(myIdx + diff < 2 * blockDim.x) {
d[myIdx] += d[myIdx + diff];
diff <<= 1;
__syncthreads();
}
}
int main()
{
const int count = 512;
const int size = count * sizeof(int);
int h[count];
for(int i = 0 ; i < count; i++) {
h[i] = 1 + i;
}
int *d;
cudaMalloc(&d, size);
cudaMemcpy(d, h, size, cudaMemcpyHostToDevice);
sumSingleBlock<<<1, count/2>>>(d);
int result;
cudaMemcpy(&result, d, sizeof(int), cudaMemcpyDeviceToHost);
printf("Result: %d\n", result);
// cudaMemcpy(h, d, size, cudaMemcpyDeviceToHost);
// for(int i = 0 ; i < count ; i++) {
// printf("element %d: %d\n", i, h[i]);
// }
cudaFree(d);
return 0;
}
|
3,297 | #include <iostream>
#include <cuda.h>
#include <chrono>
#include <stdlib.h>
#include <ctime>
#include <cmath>
#include <limits>
#define BLOCK_SIZE 1024
__global__ void gpu_transposition(double *a, double *b, int m, int n) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < m * n) {
b[(idx / n) + (idx % n)*m] = a[idx];
}
}
void cpu_transposition(double *a, double *b, int m, int n)
{
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < n; ++j)
{
b[j * m + i] = a[i * n + j];
}
}
}
void print_matrix(double *a, int m, int n) {
for (int i = 0; i < n; ++i)
{
for (int j = 0; j < m; ++j)
{
std::cout << a[i * m + j] << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
}
int main(int argc, char const **argv)
{
int m, n;
srand(3333);
m = atoi(argv[1]);
n = atoi(argv[2]);
size_t bytes = m * n * sizeof(double);
double *h_a, *h_b, *h_c;
h_a = (double *) malloc(bytes);
h_b = (double *) malloc(bytes);
h_c = (double *) malloc(bytes);
// random initialize matrix
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < n; ++j)
{
h_a[i * n + j] = (double)(rand()%1024); //для наглядности
}
}
// print_matrix(h_a, n, m);
double *d_a, *d_b;
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
int gridSize;
gridSize = (m * n - 1) / BLOCK_SIZE + 1;
float gpu_elapsed_time, cpu_elapsed_time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start the GPU version
cudaEventRecord(start, 0);
gpu_transposition<<<gridSize, BLOCK_SIZE>>>(d_a, d_b, m, n);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
//time elapse on GPU
cudaEventElapsedTime(&gpu_elapsed_time, start, stop);
printf("Time elapsed on matrix transposition of %dx%d on GPU: %f ms.\n\n", m, n, gpu_elapsed_time);
cudaMemcpy(h_b, d_b, bytes, cudaMemcpyDeviceToHost);
// print_matrix(h_b, m, n);
// start the CPU version
cudaEventRecord(start, 0);
cpu_transposition(h_a, h_c, m, n);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&cpu_elapsed_time, start, stop);
printf("Time elapsed on matrix transposition of %dx%d on CPU: %f ms.\n\n", m, n, cpu_elapsed_time);
// print_matrix(h_c, m, n);
// validate results computed by GPU
int all_ok = 1;
for (int i = 0; i < n; ++i)
{
for (int j = 0; j < m; ++j)
{
if(h_b[i*m + j] != h_c[i*m + j])
{
all_ok = 0;
}
}
}
// roughly compute speedup
if(all_ok)
{
printf("all results are correct!!!, speedup = %f\n", cpu_elapsed_time / gpu_elapsed_time);
}
else
{
printf("incorrect results\n");
}
// free memory
free(h_a);
free(h_b);
free(h_c);
cudaFree(d_a);
cudaFree(d_b);
return 0;
}
|
3,298 | __global__ void
matmultvec(int m, double *A, double *b, double *c){
int i, j;
i = threadIdx.x+blockIdx.x*blockDim.x;
for (j=0; j<m; j++){
c[i] += A[i+j] * b[j];
}
}
|
3,299 | #include <iostream>
#include <cmath>
#include <vector>
#include <fstream>
#include <curand.h>
#include <curand_kernel.h>
#define Ndmsq 1000
#define Nssq2th 1000
#define Ngrid Ndmsq*Nssq2th
#define Nfexp 500
#define Bkgd 100
__global__ void fakeexps(unsigned int, double*, double*, double*);
__device__ double oscprob(double, double, double);
__device__ double chisquare(double*, double*, double*, double*);
__global__ void fakeexps(unsigned int seed, double* dmsq, double* ssq2th, double* chisq) {
// Intra-block indexing
unsigned int blockID = blockIdx.y * gridDim.x + blockIdx.x;
// Random generator initiation
curandState_t state;
curand_init(seed, 0, 0, &state);
// cacheChisq with length equals to number of threads (or number of fake experiments)
__shared__ double cacheChisq[Nfexp];
// cache Ebin
__shared__ double ebin[5];
if (threadIdx.x == 0) {
for (int k = 0; k < 5; k++) {
ebin[k] = 15 + k*10;
}
}
double en[5];
double bkgd[5];
double mu[5];
double n[5];
double mubf[5];
for (int i = 0; i < 5; i++) {
en[i] = ebin[i] + (curand_uniform(&state) - 0.5) * 10;
bkgd[i] = (double) Bkgd;
mu[i] = oscprob(dmsq[blockIdx.y], ssq2th[blockIdx.x], en[i]) * 10000.;
n[i] = (double) curand_poisson(&state, mu[i] + bkgd[i]);
mubf[i] = fmax(0., n[i] - bkgd[i]);
}
cacheChisq[threadIdx.x] = chisquare(bkgd, mu, n, mubf);
__syncthreads();
if (threadIdx.x == 1) {
chisq[blockID] = cacheChisq[90];
}
}
__device__ double oscprob(double dmsq, double ssq2th, double en) {
return 2*ssq2th*(0.2+0.197*(en/dmsq)*(sin(1.5228*(dmsq/en))-sin(2.538*(dmsq/en))));
}
__device__ double chisquare(double* b, double* mu, double* n, double* mubf) {
double temp = 0;
for (int i = 0; i < 5; i++) {
temp += 2*(mu[i] - mubf[i] + n[i]*log((mubf[i] + b[i]) / (mu[i] + b[i])));
}
return temp;
}
int main() {
// chisq array to save the chi square values at every point on the grid
double* chisq = new double[Ngrid];
// Device pointers
double* dev_dmsq;
double* dev_ssq2th;
double* dev_chisq;
cudaMalloc((void**)&dev_dmsq, Ndmsq*sizeof(double));
cudaMalloc((void**)&dev_ssq2th, Nssq2th*sizeof(double));
cudaMalloc((void**)&dev_chisq, Nfexp*Ngrid*sizeof(double));
// Initiate input values for kernel (grid)
double dmsqmin = 1;
double dmsqmax = 1000;
double ssq2thmin = 0.0001;
double ssq2thmax = 1;
double* dmsq = new double[Ndmsq];
double* ssq2th = new double[Nssq2th];
for (unsigned int i = 0; i < Ndmsq; i++) {
dmsq[i] = std::exp(std::log10(dmsqmin) + i*(std::log10(dmsqmax) - std::log10(dmsqmin))/Ndmsq);
}
for (unsigned int i = 0; i < Nssq2th; i++) {
ssq2th[i] = std::exp(std::log10(ssq2thmin) + i*(std::log10(ssq2thmax) - std::log10(ssq2thmin))/Nssq2th);
}
cudaMemcpy(dev_dmsq, dmsq, Ndmsq*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_ssq2th, ssq2th, Nssq2th*sizeof(double), cudaMemcpyHostToDevice);
// Call kernel (using grid of deltamsq and ssq2th)
dim3 grid(Nssq2th, Ndmsq);
fakeexps<<<grid,Nfexp>>>(time(NULL), dev_dmsq, dev_ssq2th, dev_chisq);
cudaMemcpy(chisq, dev_chisq, Ngrid*sizeof(double), cudaMemcpyDeviceToHost);
// Output chisq to file
std::ofstream ofs;
ofs.open ("chi.dat", std::ofstream::out | std::ofstream::app);
for (int i = 0; i < Ndmsq; i++) {
for (int j = 0; j < Nssq2th; j++) {
ofs << dmsq[i] << "\t\t"<< ssq2th[j] << "\t\t" << chisq[j + i*Ndmsq] << std::endl;
}
}
ofs.close();
return 0;
}
|
3,300 | #include "includes.h"
__global__ void cuMult(int *a, int *b, int *c, int wA, int wB, int hA)
{
// global index
int gidx = blockDim.x * blockIdx.x + threadIdx.x; // col
int gidy = blockDim.y * blockIdx.y + threadIdx.y; // row
if(gidx < wB && gidy < hA)
{
int sum = 0;
for(int k=0; k<wA; k++)
{
// Multiply row of A by column of B
sum += a[gidy*wA + k] * b[k*wB +gidx];
}
c[gidy * wB + gidx] = sum;
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.