serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
22,401 | #include <stdio.h>
#include <cuda_runtime.h>
#include <time.h>
#include <vector>
using namespace std;
const int GPUs[] = {0,5}; // If left blank all available GPUs will be used.
vector<int> g(GPUs, GPUs + sizeof(GPUs)/sizeof(int));
void configure(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d,
vector<cudaEvent_t> &start, vector<cudaEvent_t> &stop,
cudaStream_t stream[])
{
for (int i=0; i<g.size(); i++)
{
cudaSetDevice(g[i]);
cudaMalloc(&buffer_s[i], size);
cudaMalloc(&buffer_d[i], size);
cudaEventCreate(&start[i]);
cudaEventCreate(&stop[i]);
cudaStreamCreate(&stream[i]);
for (int j=0; j<g.size(); j++)
{
int access;
if (i!=j)
{
cudaDeviceCanAccessPeer(&access, g[i], g[j]);
if (access)
{
cudaSetDevice(g[i]);
cudaDeviceEnablePeerAccess(g[j], 0);
cudaDeviceSynchronize();
cudaSetDevice(g[j]);
cudaDeviceEnablePeerAccess(g[i], 0);
cudaDeviceSynchronize();
}
}
}
}
}
void reset(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d,
vector<cudaEvent_t> &start, vector<cudaEvent_t> &stop,
cudaStream_t stream[])
{
for (int i=0; i<g.size(); i++)
{
cudaSetDevice(g[i]);
cudaFree(buffer_s[i]);
cudaFree(buffer_d[i]);
cudaEventDestroy(start[i]);
cudaEventDestroy(stop[i]);
cudaStreamDestroy(stream[i]);
for (int j=0; j<g.size(); j++)
{
int access;
if (i!=j)
{
cudaDeviceCanAccessPeer(&access, g[i], g[j]);
if (access)
{
cudaSetDevice(g[i]);
cudaDeviceDisablePeerAccess(g[j]);
cudaDeviceSynchronize();
cudaSetDevice(g[j]);
cudaDeviceDisablePeerAccess(g[i]);
cudaDeviceSynchronize();
}
}
}
}
}
void cudaMemcpyPoolAsync(
int* &dst, int dstDevice, int* &src, int srcDevice,
size_t count, int route, size_t chunk)
{
void* rbuff[2];
cudaStream_t rstream[2];
cudaSetDevice(srcDevice);
cudaDeviceEnablePeerAccess(route, 0);
cudaStreamCreate(&rstream[0]);
cudaStreamCreate(&rstream[1]);
cudaSetDevice(dstDevice);
cudaStreamCreate(&rstream[0]);
cudaStreamCreate(&rstream[1]);
cudaSetDevice(route);
cudaDeviceEnablePeerAccess(dstDevice, 0);
cudaMalloc(&rbuff[0], chunk);
cudaMalloc(&rbuff[1], chunk);
cudaStreamCreate(&rstream[0]);
cudaStreamCreate(&rstream[1]);
int strm=1;
for(int i=0; i<count; i+=chunk)
{
strm^=(0^1);
cudaMemcpyPeerAsync(rbuff[strm], route, &src[i], srcDevice, chunk, rstream[strm]);
cudaMemcpyPeerAsync(&dst[i], dstDevice, rbuff[strm], route, chunk, rstream[strm]);
}
cudaSetDevice(srcDevice);
cudaDeviceDisablePeerAccess(route);
cudaStreamDestroy(rstream[0]);
cudaStreamDestroy(rstream[1]);
cudaSetDevice(dstDevice);
cudaStreamDestroy(rstream[0]);
cudaStreamDestroy(rstream[1]);
cudaSetDevice(route);
cudaStreamDestroy(rstream[0]);
cudaStreamDestroy(rstream[1]);
cudaDeviceDisablePeerAccess(dstDevice);
cudaFree(rbuff[0]);
cudaFree(rbuff[1]);
}
void blocked_copy(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d,
vector<cudaEvent_t> &start, vector<cudaEvent_t> &stop,
cudaStream_t stream[])
{
float time_taken[g.size()*g.size()], bw[g.size()*g.size()];
printf("\nBlocked Memory Transfers: Only one memory transfer at a time\n");
configure(size, buffer_s, buffer_d, start, stop, stream);
for (int i=0; i<g.size(); i++)
{
for (int j=0; j<g.size(); j++)
{
if (i!=j)
{
printf("Copying from %d to %d\n", g[i], g[j]);
cudaEventRecord(start[i]);
//cudaMemcpyPeerAsync(buffer_s[i],g[i],buffer_d[j],g[j], size);
//cudaMemcpyPoolAsync(buffer_s[i],g[i],buffer_d[j],g[j], size, 1,
//size);
cudaEventRecord(stop[i]);
cudaEventSynchronize(stop[i]);
cudaDeviceSynchronize();
float time_ms;
cudaEventElapsedTime(&time_ms,start[i],stop[i]);
time_taken[i*g.size()+j] = time_ms*1e3;
bw[i*g.size()+j] = (float)size*1000/time_ms/(1<<30);
}
}
}
printf("Time(ms) spent in memcpy\n");
printf(" D\\D");
for (int j=0; j<g.size(); j++)
printf("%10d ", g[j]);
printf("\n");
for (int i=0; i<g.size(); i++)
{
printf("%6d", g[i]);
for (int j=0; j<g.size(); j++)
{
if (i==j)
printf("%12.2f", 0.0);
else
printf("%12.2f", time_taken[i*g.size()+j]);
}
printf("\n");
}
printf("bandwidth(Gbps) utilized during memcpy\n");
printf(" D\\D");
for (int j=0; j<g.size(); j++)
printf("%10d ", g[j]);
printf("\n");
for (int i=0; i<g.size(); i++)
{
printf("%6d", g[i]);
for (int j=0; j<g.size(); j++)
if (i==j)
printf("%12.2f", 0.0);
else
printf("%12.2f", bw[i*g.size()+j]);
printf("\n");
}
}
void perf_analyze(size_t size)
{
vector<int*> buffer_s(g.size());
vector<int*> buffer_d(g.size());
vector<cudaEvent_t> start(g.size());
vector<cudaEvent_t> stop(g.size());
cudaStream_t stream[g.size()];
configure(size, buffer_s, buffer_d, start, stop, stream);
// Cyclic
blocked_copy(size, buffer_s, buffer_d, start, stop, stream);
reset(size, buffer_s, buffer_d, start, stop, stream);
}
int main(int argc, char** argv)
{
// NVLink D<->D performance
size_t size = (1<<30);
if (!g.size())
{
int n;
printf("Using all 8 GPUs\n");
cudaGetDeviceCount(&n);
for (int i=0; i<n; i++)
g.push_back(i);
}
//define size
perf_analyze(size);
return 0;
}
|
22,402 | __global__
void norec(float const * x, float const * y, float const * z, float * q) {
auto i = threadIdx.x;;
q[i] = x[i] + y[i]*z[i];
}
__global__
void rec(float const * __restrict__ x, float const * __restrict__ y, float const * __restrict__ z, float * __restrict__ q) {
auto i = threadIdx.x;;
q[i] = x[i] + y[i]*z[i];
}
inline
__device__
void norecf(float const * x, float const * y, float const * z, float * q) {
auto i = threadIdx.x;;
q[i] = x[i] + y[i]*z[i];
}
inline
__device__
void recf(float const * __restrict__ x, float const * __restrict__ y, float const * __restrict__ z, float * __restrict__ q) {
auto i = threadIdx.x;;
q[i] = x[i] + y[i]*z[i];
}
__global__
void recg(float const * __restrict__ x, float const * __restrict__ y, float const * __restrict__ z, float * __restrict q) {
norecf(x,y,z,q);
}
__global__
void norecg(float const * x, float const * y, float const * z, float * q) {
recf(x,y,z,q);
}
struct H {
float * x;
float * y;
float * z;
__device__
__forceinline__
float const * __restrict__ xg() const { return x;}
constexpr
__forceinline__
float const * __restrict__ yg() const { return y;}
__device__ __forceinline__
float zg(int i) const { return __ldg(z+i);}
};
__global__
void rech(H const * __restrict__ ph, float * __restrict__ q) {
auto const & h = *ph;
float const * __restrict__ yg = h.y;
auto i = threadIdx.x;;
q[i] = h.xg()[i] + yg[i]*h.zg(i);
}
struct AC {
float const * __restrict__ x;
float const * __restrict__ y;
float const * __restrict__ z;
};
__global__
void rechc(AC const h, float * __restrict__ q, float * __restrict__ w) {
auto i = threadIdx.x;;
q[i] = h.x[i] + h.y[i]*h.z[i];
w[i] = h.x[i] - h.y[i]*h.z[i];
}
|
22,403 | #include "includes.h"
// filename: eeTanh.cu
// a simple CUDA kernel to square the elements of a matrix
extern "C" // ensure function name to be exactly "eeTanh"
{
}
__global__ void tanhGradient(int N, int M, float *z, float *tanh_grad_z) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = j*N + i;
float c1 = __fdividef(2.0, 3.0);
if (i < N && j < M) {
float el = __fmul_rn(z[index], c1);
if (el > 4.97) {
z[index] = 1.7159;
tanh_grad_z[index] = 0.0;
}
else if(el < -4.97) {
z[index] = -1.7159;
tanh_grad_z[index] = 0.0;
}
else {
float x2 = __fmul_rn(el, el);
float a = __fmul_rn(el, __fmaf_rn(x2, __fmaf_rn(x2, __fadd_rn(378.0, x2), 17235.0), 135135.0));
float b = __fmaf_rn(x2, __fmaf_rn(x2, __fmaf_rn(x2, 28.0, 3150.0), 62370.0), 135135.0);
float tanh = __fdividef(a, b);
z[index] = __fmul_rn(1.7159, tanh);
tanh_grad_z[index] = __fmul_rn(1.7159, __fmul_rn(__fmaf_rn(-tanh, tanh, 1.0), c1));
}
}
} |
22,404 | #include "includes.h"
__global__ void Add(float* d_a, float* d_b, float* d_c, int N)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < N)
d_c[id] = d_a[id] + d_b[id];
} |
22,405 | #include <stdio.h>
// Function that catches the error
void testCUDA(cudaError_t error, const char *file, int line) {
if (error != cudaSuccess) {
printf("There is an error in file %s at line %d\n", file, line);
exit(EXIT_FAILURE);
}
}
// Has to be defined in the compilation in order to get the correct value of the
// macros __FILE__ and __LINE__
#define testCUDA(error) (testCUDA(error, __FILE__ , __LINE__))
__global__ void empty_k(void){
// printf("Hello World!\n");
printf("thread idx %d, block idx %d\n", threadIdx.x, blockIdx.x);
}
int main (void){
// threads are synchronized by group of 32
empty_k<<<8,2>>>(); // <number of block, number of thread per block>
cudaDeviceSynchronize();
int deviceCount;
cudaGetDeviceCount(&deviceCount);
printf("Number of GPUs: %d\n", deviceCount);
cudaDeviceProp deviceProp;
// testCUDA(cudaGetDeviceProperties(&deviceProp, deviceCount)); // will return error
cudaGetDeviceProperties(&deviceProp, deviceCount-1);
printf("Device %d has compute capability %d.%d.\n",
deviceCount-1, deviceProp.major, deviceProp.minor);
printf("Name: %s\n", deviceProp.name);
printf("Number of processors: %d\n", 128*deviceProp.multiProcessorCount);
printf("GPU RAM size in bytes: %zd\n", deviceProp.totalGlobalMem);
printf("Shared memory per block in bytes: %zd\n", deviceProp.sharedMemPerBlock);
/*************************************************************
Once requested, replace this comment by the appropriate code
*************************************************************/
return 0;
} |
22,406 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <functional>
#include <curand_kernel.h>
#define threadsPerBlock 256
typedef struct path_struct_t
{
double cost; // path cost.
int *path; // best order of city visits
} path_t;
#define DEBUG
#ifdef DEBUG
#define cudaCheckError(ans) cudaAssert((ans), __FILE__, __LINE__);
inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "CUDA Error: %s at %s:%d\n",
cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#else
#define cudaCheckError(ans) ans
#endif
void printCudaInfo()
{
// for fun, just print out some stats on the machine
int deviceCount = 0;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++)
{
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
}
__device__ void set_dist_cu(double *dist, int n_cities, int i, int j, double value)
{
int offset = i * n_cities + j;
dist[offset] = value;
return;
}
__device__ double get_dist_cu(double *dist, int n_cities, int i, int j)
{
int offset = i * n_cities + j;
return dist[offset];
}
__device__ void init_path_cu(path_t *cost_path, int n_cities, double *dist, curandState_t *state, int first)
{
cost_path->cost = 0.0;
// initialize path in 0->1->2->3 ... ->n
for (int i = 0; i < n_cities; i++)
{
int city = i;
cost_path->path[i] = city;
}
int temp = cost_path->path[first];
cost_path->path[first] = cost_path->path[0];
cost_path->path[0] = temp;
// create a random permutation of the path
for (int i = n_cities - 1; i >= 1; --i)
{
int j = curand(state) % (i) + 1;
int temp = cost_path->path[i];
cost_path->path[i] = cost_path->path[j];
cost_path->path[j] = temp;
}
// compute the cost after permutation
for (int i = 0; i < n_cities - 1; ++i)
{
cost_path->cost += get_dist_cu(dist, n_cities, cost_path->path[i], cost_path->path[i + 1]);
}
}
__device__ double edge_dist_cu(double *dist, int n_cities, int *cost_path, int *rand_position)
{
double cost = 0;
// if the position is not the start
if (*rand_position != 0)
{
cost += get_dist_cu(dist, n_cities, cost_path[*rand_position - 1], cost_path[*rand_position]);
}
// if the position is not the end
if (*rand_position != n_cities - 1)
{
cost += get_dist_cu(dist, n_cities, cost_path[*rand_position], cost_path[*rand_position + 1]);
}
return cost;
}
__device__ void swap_city_cu(int *cost_path_path, int *rand_position_1, int *rand_position_2)
{
int tmp = cost_path_path[*rand_position_1];
cost_path_path[*rand_position_1] = cost_path_path[*rand_position_2];
cost_path_path[*rand_position_2] = tmp;
}
__device__ double random_swap_city_cost_cu(path_t *cost_path, int n_cities, double *dist, int *rand_position_1, int *rand_position_2, curandState_t *state)
{
double cost = cost_path->cost;
// randomly select to cities. Make sure two cities are different.
// also, because of search space decomposition, the first city cannot be choosen.
*rand_position_1 = (curand(state) % (n_cities - 1)) + 1;
*rand_position_2 = (curand(state) % (n_cities - 1)) + 1;
while (*rand_position_1 == *rand_position_2)
{
*rand_position_1 = curand(state) % (n_cities - 1) + 1;
}
// minus the cost when taking out two cities from path
cost -= edge_dist_cu(dist, n_cities, cost_path->path, rand_position_1);
cost -= edge_dist_cu(dist, n_cities, cost_path->path, rand_position_2);
// swap the city
swap_city_cu(cost_path->path, rand_position_1, rand_position_2);
// add the cost when adding two cities to the path
cost += edge_dist_cu(dist, n_cities, cost_path->path, rand_position_1);
cost += edge_dist_cu(dist, n_cities, cost_path->path, rand_position_2);
return cost;
}
__global__ void wsp_sa_kernel(double *dev_all_cost, int *dev_all_path, double* dist, int n_cities)
{
__shared__ double sdata[threadsPerBlock];
__shared__ int idata[threadsPerBlock];
int tid = threadIdx.x;
idata[tid] = tid;
curandState state;
double temperature = 20.0;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(idx, 0, 0, &state);
if (idx >= n_cities) return;
path_t *cost_path = (path_t *)malloc(sizeof(path_t));
cost_path->cost = 0.0;
cost_path->path = new int[n_cities];
init_path_cu(cost_path, n_cities, dist, &state, idx);
int *rand_position_1 = new int(1);
int *rand_position_2 = new int(2);
int cnt = 0;
while(cnt < 2000)
{
double original_cost = cost_path->cost;
double new_cost = random_swap_city_cost_cu(cost_path, n_cities, dist, rand_position_1, rand_position_2, &state);
// if new cost is smaller, accept
if (new_cost < original_cost)
{
cost_path->cost = new_cost;
cnt = 0;
}
else
{
// if new cost is bigger, accept with probability
double diff = static_cast<double>(original_cost - new_cost);
double prob;
if (temperature < 1e-12)
{
prob = 0.0;
}
else
{
prob = exp(diff / temperature);
}
// obtain a random number in (0,1) to decision
double rand_number = curand_uniform_double(&state);
if (rand_number < prob)
{
cost_path->cost = new_cost;
cnt = 0;
}
else
{
// if not accepted, recover the state
swap_city_cu(cost_path->path, rand_position_1, rand_position_2);
cnt++;
}
}
// annealing step (i.e. reduce temperature)
temperature *= 0.999999;
}
sdata[tid] = cost_path->cost;
__syncthreads();
// reduction
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
int idx1 = idx;
int idx2 = idx1 + s;
if (tid < s && idx1 < n_cities && idx2 < n_cities) {
double s1 = sdata[tid];
double s2 = sdata[tid + s];
if(s1 > s2)
{
sdata[tid] = sdata[tid + s];
idata[tid] = idata[tid + s];
}
}
__syncthreads();
}
// now sdata[0] is the min cost, this cost is from thread minId
if(tid == idata[0])
{
dev_all_cost[blockIdx.x] = sdata[0];
memcpy(dev_all_path + blockIdx.x * n_cities, cost_path->path, sizeof(int)*n_cities);
}
}
void wsp_simulate_annealing_cuda(path_t *solution, int n_cities, double *dist, float *msec)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int blocks = n_cities / threadsPerBlock + 1;
int *dev_all_path;
double *dev_all_cost;
int *host_all_path = new int[n_cities * blocks];
double *host_all_cost = new double[blocks];
double *dist_cu;
cudaMalloc(&dev_all_path, sizeof(int)*n_cities*blocks);
cudaMalloc(&dev_all_cost, sizeof(double)*blocks);
cudaMalloc(&dist_cu, sizeof(double)*n_cities*n_cities);
cudaMemcpy(dist_cu, dist, sizeof(double)*n_cities*n_cities, cudaMemcpyHostToDevice);
cudaEventRecord(start);
wsp_sa_kernel<<<blocks, threadsPerBlock>>>(dev_all_cost, dev_all_path, dist_cu, n_cities);
cudaDeviceSynchronize();
cudaMemcpy(host_all_path, dev_all_path, sizeof(int)*n_cities*blocks, cudaMemcpyDeviceToHost);
cudaMemcpy(host_all_cost, dev_all_cost, sizeof(double)*blocks, cudaMemcpyDeviceToHost);
int index = 0;
double min = host_all_cost[0];
for(int i = 0; i < blocks; i++)
{
double local = host_all_cost[i];
if(local < min)
{
index = i;
min = local;
}
}
solution->cost = host_all_cost[index];
memcpy(solution->path, host_all_path + index * n_cities, sizeof(int)*n_cities);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(msec, start, stop);
delete[] host_all_path;
cudaFree(&dev_all_path);
cudaFree(&dist_cu);
}
|
22,407 | #include "includes.h"
__global__ void multiply(float *dest, float *a, float *b)
{
const int i = threadIdx.x;
dest[i] = a[i] * b[i];
} |
22,408 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<stdio.h>
__global__ void k_means_gpu(int b, int n, int num, const float *xyz, const float *init_xyz, int *result) { //xyz(b,n,3) result(b,n) init_xyz(b,num,3)
int batch_idx = blockIdx.x;
xyz += batch_idx*n*3;
init_xyz += batch_idx*num*3;
result += batch_idx*n;
float x_c,y_c,z_c,x,y,z;
int inte = 0;
extern __shared__ float s[];
float *temp_dist = s;
int *ct_idx_old = (int*)&temp_dist[n*num];
float *ct_xyz =(float*)&ct_idx_old[n];
float *temp_ct = (float*)&ct_xyz[num*3];
int *ct_cnt = (int*)&temp_ct[num*3];
int tid = threadIdx.x; //test: num=2 n=1024
float min_dist = 1e8;
if(tid<num)
{
ct_cnt[tid]=0;
temp_ct[tid*3]=0;
temp_ct[tid*3+1]=0;
temp_ct[tid*3+2]=0;
}
if(tid<n)
{
ct_idx_old[tid]=0;
}
__syncthreads();
while(inte<3)
{
if(tid<num)
{
temp_ct[tid*3] += xyz[tid*3];
temp_ct[tid*3+1] += xyz[tid*3+1];
temp_ct[tid*3+2] += xyz[tid*3+2];
}
for (int j=threadIdx.x;j<n;j+=blockDim.x) //一个点一个点处理
{
x = xyz[j*3];
y = xyz[j*3+1];
z = xyz[j*3+2];
for(int i=0;i<num;i+=1) //获得第j个点与第i个中心点的距离
{
if(inte == 0)
{
x_c = init_xyz[i*3];
y_c = init_xyz[i*3+1];
z_c = init_xyz[i*3+2];
}
else
{
x_c = ct_xyz[i*3];
y_c = ct_xyz[i*3+1];
z_c = ct_xyz[i*3+2];
}
temp_dist[j*num+i] = (x-x_c)*(x-x_c)+(y-y_c)*(y-y_c)+(z-z_c)*(z-z_c);
if(temp_dist[j*num+i]<min_dist)
{
result[j] = i;
min_dist = temp_dist[j*num+i];
}
} //
min_dist=1e8;
}
if(tid==0)
{
for(int ct=0;ct<n;ct++)
{
ct_cnt[result[ct]]++;
temp_ct[result[ct]*3] += xyz[ct*3];
temp_ct[result[ct]*3+1] += xyz[ct*3+1];
temp_ct[result[ct]*3+2] += xyz[ct*3+2];
}
}
__syncthreads();
if(tid<num)
{
ct_xyz[tid*3] = temp_ct[tid*3]/ct_cnt[tid];
ct_xyz[tid*3+1] = temp_ct[tid*3+1]/ct_cnt[tid];
ct_xyz[tid*3+2] = temp_ct[tid*3+2]/ct_cnt[tid];
temp_ct[tid*3] =0;
temp_ct[tid*3+1] =0;
temp_ct[tid*3+2] =0;
ct_cnt[tid] = 0;
}
/*
for (int j=threadIdx.x;j<n;j+=blockDim.x) //遍历所有的点,检查是否需要继续迭代
{
if(result[j]==ct_idx_old[j])
{
cnt=cnt++;
__syncthreads();
result[j]=1;
}
else
{
result[j]=0;
}
}*/
for (int j=threadIdx.x;j<n;j+=blockDim.x) //遍历所有的点
{
ct_idx_old[j]=result[j];
}
inte++;
}
}
/*
for(int i=0;i<num;i+=1) //num个类
{
while(p_num!=1)
{
if(p_num>512)
{
if(temp_dist[i*n+j]>temp_dist[i*m+j+cnt*512])
{
temp_dist[i*n+j] = temp_dist[i*m+j+cnt*512];
idx_dist[j] = i*m+j+cnt*512;
cnt++;
p_num -=512;
}
}
if(p_num<=512)
{
if(j<(p_num/2)
{
int stride = p_num/2;
if(temp_dist[i*n+j]>temp_dist[i*m+j+stride])
{
idx_dist[j] = i*m+j+stride;
temp_dist[i*n+j]= = temp_dist[i*m+j+stride];
p_num = p_num/2;
}
}
}
}
}
*/
void kmeans(int b, int n,int num, const float *xyz, const float *init_xyz, int *result)
{
k_means_gpu<<<b,512,n*num*sizeof(float)+n*sizeof(int)+num*3*sizeof(float)+num*3*sizeof(float)+num*sizeof(int)>>>(b,n,num,xyz,init_xyz,result);
} |
22,409 | #include <math.h>
#include <stdio.h>
#include <cuda_runtime.h>
// Array access macros
#define f(i,j) A[(i) + (j)*(m)]
#define B(i,j) B[(i) + (j)*(m)]
#define Z(x,y) Z[(x) + (y)*(m)]
#define f_(x,y) f_[(x) + (y)*(m)]
__global__ void Zcalc(float const * const A, float *Z,float const * const H,int patchSize,float patchSigma,float fltSigma, int m, int n) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x<m-(2*patchSize-1)/2 && y<n-(2*patchSize-1)/2){
int i,j,k,l,counter=0;
float FNij=0.0;
float temp=0.0;
patchSize=(patchSize-1)/2;
for(i=patchSize;i<m-patchSize;i++){
for(j=patchSize;j<n-patchSize;j++){
for(k=-patchSize;k<=patchSize;k++){
for(l=-patchSize;l<=patchSize;l++){
temp=(f(x+patchSize+k,y+patchSize+l)-f(i+k,j+l))*H[counter];
temp=temp*temp;
FNij=FNij+(temp);
counter++;
}
}
Z(x+patchSize,y+patchSize)=Z(x+patchSize,y+patchSize)+expf(-(FNij/(fltSigma)));
FNij=0.0;
counter=0;
}
}
}
}
__global__ void fCalc(float const * const A,float const * const Z,float const * const H, float *f_,int patchSize,float patchSigma,float fltSigma, int m, int n){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x<m-(2*patchSize-1)/2 && y<n-(2*patchSize-1)/2){
int i,j,k,l,counter=0;
patchSize=(patchSize-1)/2;
float FNij=0.0;
float temp=0.0;
float Z_local=Z(x+patchSize,y+patchSize);
for(i=patchSize;i<m-patchSize;i++){
for(j=patchSize;j<n-patchSize;j++){
for(k=-patchSize;k<=patchSize;k++){
for(l=-patchSize;l<=patchSize;l++){
temp=(f(x+patchSize+k,y+patchSize+l)-f(i+k,j+l))*H[counter];
temp=temp*temp;
FNij=FNij+(temp);
counter++;
}
}
f_(x+patchSize,y+patchSize)=f_(x+patchSize,y+patchSize)+((1/Z_local)*expf(-(FNij/(fltSigma))))*f(i,j);
FNij=0.0;
counter=0;
}
}
}
}
|
22,410 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand_kernel.h>
#include <math_constants.h>
#include <math.h>
//for boolean functionality
#include <stdbool.h>
extern "C"
{
__global__ void
rtruncnorm_kernel(float *vals, int n,
float *mu, float *sigma,
float *lo, float *hi,
int mu_len, int sigma_len,
int lo_len, int hi_len,
int max_tries,
int rng_a, int rng_b, int rng_c)
{
/* Usual block/thread indexing. Note this code, according to Prof. Baines,
is only robust when the grid and block dims have the structure like;
(x, 1,1)
*/
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
//make sure index is not overrunning the number of threads
if (idx < n) {
// Setup the RNG:
curandState rng_state;
curand_init(rng_a + idx*rng_b, rng_c, 0, &rng_state);
/*
Rejection Sampling:
(i) First try two-sided truncation naive approach. It works for cases
where: |high - low| >> 0. which will be true for this homework
assignment. This code is not robust to really tricky two-sided
truncation. because the probability of sampling from that region
is high.
(ii) When that fails for the one-sided tail regions, that have a small
probability of being sampled, then apply the Robert approach.
*/
int accepted = 0;
int iter_count = 0;
while (accepted == 0 && iter_count < max_tries) {
iter_count = iter_count + 1;
vals[idx] = mu[idx] + sigma[idx] * curand_normal(&rng_state);
//accepted or not?
if (vals[idx] > lo[idx] && vals[idx] <= hi[idx]) {
accepted = 1;
return;
}
}
/*If it never accepted, then for this assignment we can assume that
we have a case of heavy right or left truncation where we are
trying to sample from only one of the tails.
*/
if (accepted == 0) {
/*right truncation requires adaptation because the Robert-rejection
sampling for one-sided truncation defaults to left truncation*/
//indicate whether it is right truncated to flip the sign of the
//sampled value if right_trunc = 1.
int right_trunc;
float mu_tmp = mu[idx];
float lo_tmp = lo[idx];
if (hi[idx] < mu_tmp) {
right_trunc = 1;
mu_tmp = -1 * mu_tmp;
lo_tmp = -1 * hi[idx];
} else {
//left truncation
right_trunc = 0;
}
//see Appendix A below
int mu_minus = ( lo_tmp - mu_tmp ) / sigma[idx];
/****************************************/
/* left truncation, right tail sampling*/
/*step 0: set the optimal rate parameter for the exponential
distribution*/
float alpha = ( mu_minus + sqrtf(mu_minus*mu_minus + 4) ) / 2;
while (accepted == 0) {
/*step 1:generate: z ~ Expo(\alpha, \mu_minus)
by the inv-cdf transform (since z is continous)
*/
float z = mu_minus - log (curand_uniform(&rng_state)) / alpha;
/*step 2: compute ratio h(z) / ( M * g(z) ) */
float psi;
float offset1 = alpha - z;
float offset2;
if (mu_minus < alpha) {
psi = exp( -0.5 * offset1*offset1 );
} else {
offset2 = mu_minus - alpha;
psi = exp( -0.5 * ( offset1*offset1 + offset2*offset2 ) );
}
//accepted the sample
if (curand_uniform(&rng_state) <= psi) {
if (right_trunc == 1) {
vals[idx] = -1 * (mu_tmp + sigma[idx]*z);
} else {
vals[idx] = mu_tmp + sigma[idx]*z;
}
accepted = 1;
return;
}
} // END while loop
} // END Robert rejection-sampling.
}
return;
} // END rtruncnorm_kernel
} // END extern "C"
/*Appendix A*/
/*Need to adjust the truncation boundary of X, which is
lo[idx] to the truncation boundary of a standard normal (Z):
Std Normal (left truncation):
Z ~ N(\mu = 0, \sigma = 1, low_z = \mu_minus, hi_z = \Inf)
Standardized location-scale relation:
Z = (X - \mu_x) / \sigma_x
X = \mu_x + \sigma_x * Z
X ~ N(\mu = \mu_x,
\sigma = \sigma_x,
lo[idx] = \sigma_x*\mu_minus + \mu_x,
\Inf)
Then:
\mu_minus = ( lo[idx] - \mu_x ) / sigma_x;
*/
|
22,411 | __global__ void FlexFDM1D_naive(float* U, float* Ux, int N, int alpha, float* stencils)
//
// Naive version where only global memory and automatic variables are accessed.
//
{
// YOUR TASKS:
// - Write body of kernel for computing Finite Difference Approksimations for
// threads in the grid.
// - Arbitrary sizes of N should be allowed (N can be larger than total threads).
// Insert code below this line.
}
//
// Kernel v2
//
__global__ void FlexFDM1D_v2(float* U, float* Ux, int N, int alpha, float* stencils)
//
// Improved version where shared memory is used to reduce global memory accesses.
//
{
// YOUR TASKS:
// - Write body of kernel for computing Finite Difference Approksimations for
// threads in the grid.
// - Arbitrary sizes of N should be allowed (N can be larger than total threads).
// - Utilize shared memory
// Insert code below this line.
}
//
// Kernel v3
//
__global__ void FlexFDM1D_v3(float* U, float* Ux, int N, int alpha, float* stencils)
//
// Improved version where shared memory is used to reduce global memory accesses.
//
{
// YOUR TASKS:
// - Write body of kernel for computing Finite Difference Approksimations for
// threads in the grid.
// - Arbitrary sizes of N should be allowed (N can be larger than total threads).
// - Utilize shared memory
// - Utilize constant memory for stencils coefficients
// Insert code below this line.
}
|
22,412 | #include <stdio.h>
#include <cstdio>
#include <stdlib.h>
#define WIDTH 10000
typedef struct input{
int x;
}input;
__global__ void inputkernel(input *c, const input *a)
{
int i = threadIdx.x + blockIdx.x *100;
c[i].x = a[i].x+1;
}
int main(void)
{
input *inputt=0;
input *minput=0;
int *cary=0;
int *cary2=0;
int *ary;
minput = (input *)malloc(sizeof(input)*WIDTH);
cudaMalloc((void **)&inputt , WIDTH * sizeof(input));
for(int i = 0 ; i < WIDTH ; i++)
{
minput[i].x=0;
}
inputkernel<<<10,100>>>(inputt,minput);
printf("kernel exit\n");
cudaMemcpy(minput,inputt,sizeof(input)*WIDTH,cudaMemcpyDeviceToHost);
printf("final result : %d %d %d", minput[0].x,minput[1].x ,minput[9999].x );
cudaFree(inputt);
return 0;
}
|
22,413 | #include <stdio.h>
#include <assert.h>
#define N 1000000
__global__ void vecadd(int *a, int *b, int *c){
int idx=blockIdx.x*blockDim.x+threadIdx.x;
if (idx<N) c[idx]=a[idx]+b[idx];
}
int main (int argc, char **argv){
int a_host[N], b_host[N], c_host[N];
int *a_device, *b_device, *c_device;
int i;
int blocksize=256;
dim3 dimBlock(blocksize);
dim3 dimGrid(ceil(N/(float)blocksize));
for (i=0;i<N;i++) a_host[i]=i;
for (i=0;i<N;i++) b_host[i]=i;
cudaMalloc((void**)&a_device,N*sizeof(int));
cudaMalloc((void**)&b_device,N*sizeof(int));
cudaMalloc((void**)&c_device,N*sizeof(int));
cudaMemcpy(a_device,a_host,N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(b_device,b_host,N*sizeof(int),cudaMemcpyHostToDevice);
vecadd<<<dimGrid,dimBlock>>>(a_device,b_device,c_device);
cudaMemcpy(c_host,c_device,N*sizeof(int),cudaMemcpyDeviceToHost);
for (i=0;i<N;i++) assert (c_host[i] == a_host[i] + b_host[i]);
cudaFree(a_device);
cudaFree(b_device);
cudaFree(c_device);
return 0;
}
|
22,414 | #include <stdio.h>
// Beginning of GPU Architecture definitions
inline int _ConvertSMVer2Cores(int major, int minor) {
// Defines for GPU Architecture types (using the SM version to determine
// the # of cores per SM
typedef struct {
int SM; // 0xMm (hexidecimal notation), M = SM Major version,
// and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] = {
{0x30, 192},
{0x32, 192},
{0x35, 192},
{0x37, 192},
{0x50, 128},
{0x52, 128},
{0x53, 128},
{0x60, 64},
{0x61, 128},
{0x62, 128},
{0x70, 64},
{0x72, 64},
{0x75, 64},
{0x80, 64},
{0x86, 128},
{-1, -1}};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1) {
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) {
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
return 0;
}
int main()
{
int driverVersion = 0;
cudaDriverGetVersion(&driverVersion);
printf("CUDA driver: %d\n", driverVersion);
int runtimeVersion = 0;
cudaRuntimeGetVersion(&runtimeVersion);
printf("CUDA runtime: %d\n", runtimeVersion);
int numDevices;
cudaError_t stat = cudaGetDeviceCount(&numDevices);
for (int i = 0; i < numDevices; i++)
{
cudaDeviceProp prop;
stat = cudaGetDeviceProperties(&prop, i);
printf("%d: %s, CC %d.%d, %dx%d=%d@%dMHz CUDA cores, %luMB\n", i, prop.name,
prop.major, prop.minor,
prop.multiProcessorCount, _ConvertSMVer2Cores(prop.major, prop.minor),
prop.multiProcessorCount*_ConvertSMVer2Cores(prop.major, prop.minor), prop.clockRate/1000,
prop.totalGlobalMem/1024/1024);
}
return 0;
}
|
22,415 | #include <stdio.h>
#include <time.h>
__global__
void bin_search(int* a, int* l, int* r, int* e, int* searchValue) {
int idx = threadIdx.x;
int lm = l[0];
int rm = r[0];
int gap = (int)ceil((float)(rm-lm+1)/(float)(256));
int num_proc = (int)ceil((float)(rm - lm + 1)/(float)gap);
int currl = idx*gap + lm;
if(currl > rm) return;
int currr = min((idx+1)*gap + lm,rm+1) - 1;
if(searchValue[0] >= a[currl] && searchValue[0] <= a[currr]) {
l[0] = currl;
r[0] = currr;
}
}
int main(int argc, char* argv[]) {
int n;
scanf("%d",&n);
int *a;
int *searchValue;
cudaMallocManaged(&a, n*sizeof(int));
cudaMallocManaged(&searchValue, sizeof(int));
scanf("%d",&searchValue[0]);
for(int i=0;i<n;i++) scanf("%d",&a[i]);
int *l, *r;
int *e;
cudaMallocManaged(&l, sizeof(int));
cudaMallocManaged(&r, sizeof(int));
cudaMallocManaged(&e, sizeof(int));
l[0] = 0; r[0] = (n-1);
while(l[0] < r[0]) {
bin_search<<<1,256>>>(a,l,r,e,searchValue);
cudaDeviceSynchronize();
}
printf("%d\n",l[0]);
cudaFree(a);
cudaFree(l);
cudaFree(r);
cudaFree(e);
cudaFree(searchValue);
return 0;
} |
22,416 | #include <stdio.h>
#define BLOCKDIM 512
__global__ void partial (const char *cuStr, int *cuPos, int strLen) {
int tid = threadIdx.x;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int buf[BLOCKDIM];
if (gid > strLen) {
return ;
}
buf[tid] = (cuStr[gid] == ' ') ? tid : -1;
for (int i = 1; i <= tid; i <<= 1) {
__syncthreads();
int tmp = buf[tid - i];
__syncthreads();
buf[tid] = (buf[tid] > tmp)? buf[tid] : tmp;
}
__syncthreads();
cuPos[gid] = tid - buf[tid];
}
__global__ void fix (int *cuPos, int strLen) {
int tid = threadIdx.x;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < strLen && cuPos[gid] == tid + 1) {
cuPos[gid] += cuPos[gid - tid - 1];
}
}
void labeling (const char *cuStr, int *cuPos, int strLen) {
int block_dim = strLen / BLOCKDIM;
partial <<<block_dim + 1, BLOCKDIM>>> (cuStr, cuPos, strLen);
fix <<<block_dim, BLOCKDIM>>> (cuPos + BLOCKDIM, strLen - BLOCKDIM);
}
|
22,417 | /* This program finds the count of Odd numbers in an input integer array.
* The program uses shared memory to count the occurence of odd number in each block.
* The shared memory counter is then added using parallel reduction algorithm.
* Bank conflicts are avoided using padding in the shared memory.
* Output of each block is passed back to the CPU, where all of them are added to get the final count.
* Implemented in CUDA
*
*
*
* coded by Anand Goyal. Dated: 12/13/2014
*/
#include<stdio.h>
#include<cuda.h>
#include<time.h>
#include<sys/time.h>
#define SIZE 2000000
#define THREAD_NUM 4
__global__ void countOddKernel(int *inData, long size, int *count)
{
__shared__ int temp[THREAD_NUM + 1];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < size) {
if(inData[tid] % 2 != 0)
temp[threadIdx.x] = 1;
else
temp[threadIdx.x] = 0;
}
__syncthreads();
int i = blockDim.x/2;
while(i != 0) {
if(threadIdx.x < i)
temp[threadIdx.x] += temp[threadIdx.x + i];
__syncthreads();
i /= 2;
}
if(threadIdx.x == 0)
count[blockIdx.x] = temp[0];
}
int main()
{
int *data, *dev_data, *count, *dev_c;
int i, total_count = 0;
int numOfBlocks = (SIZE + THREAD_NUM - 1)/THREAD_NUM;
float elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start, 0);
cudaEventCreate(&stop, 0);
data = (int *)malloc(sizeof(int) * SIZE);
count = (int *)malloc(sizeof(int) * numOfBlocks );
cudaMalloc((void **)&dev_data, sizeof(int) * SIZE);
cudaMalloc((void **)&dev_c, sizeof(int) * numOfBlocks);
srand(time(NULL));
for(i = 0; i < SIZE; i++) {
data[i] = rand()%100 + 1;
}
/* for(i = 0; i < SIZE; i++)
printf("%d\n", data[i]);
printf("*************************\n");
*/
cudaEventRecord(start, 0);
cudaMemcpy(dev_data, data, sizeof(int) * SIZE, cudaMemcpyHostToDevice);
countOddKernel<<<numOfBlocks, THREAD_NUM>>>(dev_data, SIZE, dev_c);
cudaMemcpy(count, dev_c, sizeof(int) * numOfBlocks, cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
for(i = 0; i < numOfBlocks; i++) {
total_count += count[i];
}
// printf("Number of Odd numbers = %d\n", total_count);
printf("Time : %3.1f ms \n", elapsedTime);
free(data);
free(count);
cudaFree(dev_data);
cudaFree(dev_c);
return 0;
}
|
22,418 | #include <stdio.h>
#include <math.h>
#include <time.h>
#include <iostream>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#define MAX_TRIES 100
#define N_LIMIT 20
#define MAX_TEMP_STEPS 500
#define TEMP_START 20
#define COOLING 0.95
#define THREADS 256
#define MAX_CITY 512
#define BOLTZMANN_COEFF 0.1
static void HandleError( cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
using namespace std;
struct city {
double x;
double y;
};
struct permutation {
int cost;
int order[MAX_CITY];
int nSucc;
};
struct GlobalConstants {
int CITY_N;
city* cities;
curandState* devStates;
};
//global variables
struct city *cities;
int CITY_N;
//global variables on GPU
__constant__ GlobalConstants cuTspParam;
/* rounding function, but at .5 rounds to the lower int. Due to the TSPLIB
* standard library.
*/
__device__ __host__ __inline__ int nint(float x)
{
return (int) (x + 0.5);
}
/* Randomisation is done by a simple linear congruential generator.
* We use A and C values as done by glibc.
*/
__device__ unsigned __inline__ int randomInt(curandState *state, unsigned int max) {
return curand(state) % max;
}
__device__ __inline__ double randomDouble(curandState *state)
{
return (double) curand_uniform(state);
}
__device__ __inline__ bool randomBool(curandState *state)
{
if ((randomInt(state, 256) >> 7) & 0x00000001)
return true;
else
return false;
}
__global__ void initCurand(curandState *state, unsigned long seed) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(seed, idx, 0, &state[idx]);
}
__device__ __host__ __inline__ int euclideanDistance(struct city *a, struct city *b)
{
float dx = b->x - a->x;
float dy = b->y - a->y;
return nint((sqrt(dx * dx + dy * dy)));
}
/* Calcuates the delta of the costs given by a new order using reverse
*/
__device__ int reverseCost(struct city *cities, int *order, int *n)
{
int cost;
cost = -euclideanDistance(&cities[order[n[0]]], &cities[order[n[2]]]);
cost -= euclideanDistance(&cities[order[n[1]]], &cities[order[n[3]]]);
cost += euclideanDistance(&cities[order[n[0]]], &cities[order[n[3]]]);
cost += euclideanDistance(&cities[order[n[1]]], &cities[order[n[2]]]);
return cost;
}
/* The order of the city is changed by swapping the
* order between n[0] and n[1].
* The swapping is done beginning from the outer end
* going into the middle
*/
__device__ void reverse(int *order, int *n)
{
int CITY_N = cuTspParam.CITY_N;
int swaps = (1 + ((n[1] - n[0] + CITY_N) % CITY_N)) / 2; // this many elements have to be swapped to have a complete reversal
for (int j = 0; j < swaps; ++j) {
int k = (n[0] + j) % CITY_N;
int l = (n[1] - j + CITY_N) % CITY_N;
int tmp = order[k];
order[k] = order[l];
order[l] = tmp;
}
}
/* Calculates the delta of the costs of the city order if
* the transportation of this segments (given by n) are actually
* done.
*/
__device__ int transportCost(struct city *cities, int *order, int *n)
{
int cost;
cost = -euclideanDistance(&cities[order[n[1]]], &cities[order[n[5]]]);
cost -= euclideanDistance(&cities[order[n[0]]], &cities[order[n[4]]]);
cost -= euclideanDistance(&cities[order[n[2]]], &cities[order[n[3]]]);
cost += euclideanDistance(&cities[order[n[0]]], &cities[order[n[2]]]);
cost += euclideanDistance(&cities[order[n[1]]], &cities[order[n[3]]]);
cost += euclideanDistance(&cities[order[n[4]]], &cities[order[n[5]]]);
return cost;
}
/* Transport the path segment (consisting of the start n[0] and end at n[1]
* to the path given by n[2] and n[3], which are adjacent and the segment is
* to be placed in between. n[4] is the city preceding n[0] and n[5] succeeds
* n[1].
* Transportation should only be done if the metroplis algorithm agrees.
*
*/
__device__ void transport(int *order, int *n)
{
int CITY_N = cuTspParam.CITY_N;
int newOrder[MAX_CITY];
int m1 = (n[1] - n[0] + CITY_N) % CITY_N;
int m2 = (n[4] - n[3] + CITY_N) % CITY_N;
int m3 = (n[2] - n[5] + CITY_N) % CITY_N;
int i = 0;
for (int j = 0; j <= m1; ++j) {
newOrder[i++] = order[(j + n[0]) % CITY_N];
}
for (int j = 0; j <= m2; ++j) {
newOrder[i++] = order[(j + n[3]) % CITY_N];
}
for (int j = 0; j <= m3; ++j) {
newOrder[i++] = order[(j + n[5]) % CITY_N];
}
for (int j = 0; j < CITY_N; ++j) {
order[j] = newOrder[j];
}
}
/* Metroplis algorithm: Always take the downhill path and
* sometime take the uphill path to avoid local minima
*/
__device__ __inline__ bool metropolis(const int cost, const double t, curandState *state)
{
return cost < 0 || randomDouble(state) < exp((double) (BOLTZMANN_COEFF * -cost / t));
}
/* Main kernel function */
__global__ void solve(struct permutation *permutations, const float t)
{
struct city* cities = cuTspParam.cities;
int CITY_N = cuTspParam.CITY_N;
int notSeg; // number of cities not on the segment
int maxChangeTries = MAX_TRIES * CITY_N;
int succLimit = N_LIMIT * CITY_N;
int dCost;
bool ans;
int n[6];
int id = blockDim.x * blockIdx.x + threadIdx.x;
struct permutation *perm = &(permutations[id]);
curandState localState = cuTspParam.devStates[id];
perm->nSucc = 0;
for (int j = 0; j < maxChangeTries; ++j) {
do {
n[0] = randomInt(&localState, CITY_N);
n[1] = randomInt(&localState, CITY_N - 1);
if (n[1] >= n[0])
++n[1];
notSeg = (n[0] - n[1] + CITY_N - 1) % CITY_N;
} while (notSeg < 2);
/* It is randomly choosen whether a transportation or a reversion is done */
if (randomBool(&localState)) {
n[2] = (n[1] + randomInt(&localState, abs(notSeg - 1)) + 1) % CITY_N;
n[3] = (n[2] + 1) % CITY_N;
n[4] = (n[0] + CITY_N- 1) % CITY_N;
n[5] = (n[1] + 1) % CITY_N;
dCost = transportCost(cities, perm->order, n);
ans = metropolis(dCost, t, &localState);
if (ans) {
++perm->nSucc;
perm->cost += dCost;
transport(perm->order, n);
}
} else {
n[2] = (n[0] + CITY_N - 1) % CITY_N;
n[3] = (n[1] + 1) % CITY_N;
dCost = reverseCost(cities, perm->order, n);
ans = metropolis(dCost, t, &localState);
if (ans) {
++perm->nSucc;
perm->cost += dCost;
reverse(perm->order, n);
}
}
/* Finish early if there are enough successful changes */
if (perm->nSucc > succLimit)
break;
}
}
class Anneal {
private:
/* Calculates the length of the initial path, which is already given.
* This is in O(n)
*/
void initialPath(struct permutation *perm, struct city *cities)
{
int i, i1, i2;
perm->cost= 0;
for (i = 0; i < CITY_N - 1; i++) {
i1 = perm->order[i];
i2 = perm->order[i+1];
perm->cost += euclideanDistance(&cities[i1], &cities[i2]);
}
i1 = perm->order[CITY_N - 1];
i2 = perm->order[0];
perm->cost += euclideanDistance(&cities[i1], &cities[i2]);
cout << "Initial path length: " << perm->cost << endl;
}
void printInformation(struct permutation *currPerm, bool showOrder = true)
{
cout << "Path Length = " << currPerm->cost << endl;
cout << "Successful Moves: " << currPerm->nSucc << endl;
if (showOrder) {
cout << "Order: ";
for (int j = 0; j < CITY_N; j++) {
cout << currPerm->order[j] << " ";
}
}
cout << endl;
}
public:
double runtime;
int resultCost;
Anneal() {}
void order(struct city *cities, int *order)
{
double t = TEMP_START;
struct permutation *dPermutation;
struct permutation *hPermutation = (struct permutation *) malloc(THREADS * sizeof(struct permutation));
struct city *dCities;
struct permutation *currPerm = (struct permutation *) malloc(sizeof(struct permutation));
struct permutation *allMinPerm= (struct permutation *) malloc(sizeof(struct permutation));
int oldCost = 2147483647;
int repeatCost = 0;
clock_t startAll, endAll; // timer to measure the overall run time
double runtimeAll;
clock_t startCuda, endCuda; //timer to measure the run time of cuda
double cudaRuntime = 0.0f;
curandState *devStates;
startAll = clock();
// Kernel invocation
int threadsPerBlock = 256;
int blocksPerGrid = (THREADS + threadsPerBlock - 1) / threadsPerBlock;
cout << "Threads: " << THREADS << ", Blocks: " << blocksPerGrid << endl;
memcpy(currPerm->order, order, CITY_N * sizeof(int));
initialPath(currPerm, cities);
memcpy(allMinPerm, currPerm, sizeof(struct permutation));
HANDLE_ERROR(cudaMalloc(&dPermutation, THREADS * sizeof(struct permutation)));
HANDLE_ERROR(cudaMalloc(&dCities, CITY_N * sizeof(struct city)));
HANDLE_ERROR(cudaMemcpy(dCities, cities, CITY_N * sizeof(struct city), cudaMemcpyHostToDevice));
// for generate random numbers directly on the device
HANDLE_ERROR(cudaMalloc((void **)&devStates, THREADS * sizeof(curandState)));
initCurand<<<blocksPerGrid, threadsPerBlock>>>(devStates, 1234);
//put global constants to constant memory
GlobalConstants params;
params.cities = dCities;
params.CITY_N = CITY_N;
params.devStates = devStates;
cudaMemcpyToSymbol(cuTspParam, ¶ms, sizeof(GlobalConstants));
/* Try up to MAX_TEMP_STEPS temperature steps. It could stop before if no kernel
* showed any succesful change or if the solution did not change 5 times
*/
for (int i = 0; i < MAX_TEMP_STEPS; ++i) {
cudaThreadSynchronize();
startCuda = clock();
//Copies the initial permutation to each result permutation
for (int i = 0; i < THREADS; ++i) {
memcpy(hPermutation[i].order, currPerm->order, CITY_N * sizeof(int));
hPermutation[i].cost = currPerm->cost;
}
HANDLE_ERROR(cudaMemcpy(dPermutation, hPermutation, THREADS * sizeof(struct permutation), cudaMemcpyHostToDevice));
//invoke cuda
solve<<<blocksPerGrid, threadsPerBlock>>>(dPermutation, t);
HANDLE_ERROR(cudaThreadSynchronize());
endCuda = clock();
cudaRuntime += (endCuda - startCuda) * 1000 / CLOCKS_PER_SEC;
HANDLE_ERROR(cudaMemcpy(hPermutation, dPermutation, THREADS * sizeof(struct permutation), cudaMemcpyDeviceToHost));
/* Loops through all resulting permutations and store the one with minimal length but
* at least one swap.
* If all threads didn't swap, exit the program.
* Takes O(n) time.
*/
int minCost = 2147483647;
bool swap = false;
for (int j = 0; j < THREADS; ++j) {
if (minCost >= hPermutation[j].cost && hPermutation[j].nSucc != 0) {
currPerm = &(hPermutation[j]);
minCost = currPerm->cost;
swap = true;
if (minCost < allMinPerm->cost)
memcpy(allMinPerm, currPerm, sizeof(struct permutation));
}
}
if (!swap) {
cout << "No swaps occured. Exit" << endl;
break;
}
if (oldCost == minCost) {
if (++repeatCost == 5) {
cout << "Cost did not change 5 times in a row. Exit" << endl;
break;
}
} else
repeatCost = 0;
cout << endl << "T = " << t << endl;
printInformation(currPerm, false);
oldCost = minCost;
t *= COOLING;
}
endAll = clock();
runtimeAll = (endAll - startAll) / (1.0f * CLOCKS_PER_SEC) * 1000;
cout << endl << "Final Result:" << endl;
cout << "=============" << endl;
printInformation(allMinPerm);
runtime = runtimeAll;
resultCost = allMinPerm->cost;
printf("\nThe program needed an overall time of %.2lf ms.\n", runtimeAll);
printf("%.2lf ms were spent at the CUDA part.\n", cudaRuntime);
printf("So %.2lf ms were spent at the host.", runtimeAll - cudaRuntime);
cudaFree(dPermutation);
cudaFree(dCities);
free(allMinPerm);
free(hPermutation);
}
};
|
22,419 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <float.h>
__global__ void transformKernel(float *x ,float *y,float *z,float *transform)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
/*if (_finite(x[i])||
_finite(y[i])||
_finite(z[i]))
return;*/
float x_,y_,z_;
x_ = static_cast<float> (transform [0] * x[i] + transform [1] * y[i] + transform [2] * z[i] + transform [3]);
y_ = static_cast<float> (transform [4] * x[i] + transform [5] * y[i] + transform [6] * z[i] + transform [7]);
z_ = static_cast<float> (transform [8] * x[i] + transform [9] * y[i] + transform [10] * z[i] + transform [11]);
x[i]=x_;
y[i]=y_;
z[i]=z_;
}
void transformCloudGPU_CU(float *x ,float*y ,float *z,float *transform,int size)
{
float *dev_x,*dev_y,*dev_z,*dev_transform;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
return;
}
cudaStatus = cudaMalloc((void**)&dev_x, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
return;
}
cudaStatus = cudaMalloc((void**)&dev_y, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
return;
}
cudaStatus = cudaMalloc((void**)&dev_z, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
return;
}
cudaStatus = cudaMalloc((void**)&dev_transform, 16 * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
return;
}
cudaStatus = cudaMemcpy(dev_x, x, size * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return;
}
cudaStatus = cudaMemcpy(dev_y, y, size * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return;
}
cudaStatus = cudaMemcpy(dev_z, z, size * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return;
}
cudaStatus = cudaMemcpy(dev_transform, transform, 16 * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return;
}
transformKernel<<<640,480>>>(dev_x,dev_y,dev_z,dev_transform);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
return;
}
cudaStatus = cudaMemcpy(x,dev_x, size * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return;
}
cudaStatus = cudaMemcpy( y,dev_y, size * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return;
}
cudaStatus = cudaMemcpy( z, dev_z,size * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return;
}
/*cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return ;
}*/
}
|
22,420 | #include <stdio.h>
inline void checkCuda(cudaError_t result) {
if(result != cudaSuccess) printf("CUDA Error: %s\n", cudaGetErrorString(result));
}
void initWith(float num, float *a, int N)
{
for(int i = 0; i < N; ++i)
{
a[i] = num;
}
}
__global__ void addVectorsInto(float *result, float *a, float *b, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *array, int N)
{
for(int i = 0; i < N; i++)
{
if(array[i] != target)
{
printf("FAIL: array[%d] - %0.0f does not equal %0.0f\n", i, array[i], target);
exit(1);
}
}
printf("SUCCESS! All values added correctly.\n");
}
int main()
{
const int N = 2<<20;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
checkCuda(cudaMallocManaged(&a, size));
checkCuda(cudaMallocManaged(&b, size));
checkCuda(cudaMallocManaged(&c, size));
initWith(3, a, N);
initWith(4, b, N);
initWith(0, c, N);
size_t threads_per_block = 256;
size_t blocks = (N + threads_per_block - 1) / threads_per_block;
addVectorsInto<<<blocks, threads_per_block>>>(c, a, b, N);
checkCuda(cudaGetLastError());
checkCuda(cudaDeviceSynchronize());
checkElementsAre(7, c, N);
checkCuda(cudaFree(a));
checkCuda(cudaFree(b));
checkCuda(cudaFree(c));
}
|
22,421 | /**
*
*
*
*
* Designed and Developed By:
Tahir Mustafa - tahir.mustafa53@gmail.com / k132162@nu.edu.pk
Akhtar Zaman - k132168@nu.edu.pk
Jazib ul Hassan - k132138@nu.edu.pk
Mishal Gohar - k132184@nu.edu.pk
*
* For BS(CS) Final Year Project 2017, NUCES-FAST
* Under the supervision of:
Dr Jawwad Shamsi (HOD CS Department)
Miss Nausheen Shoaib
* With due gratitude to NVIDIA Research Lab, NUCES-FAST
* This code is the intellectual property of the authors,
* available for use under Academic and Educational purposes
* only.
* The Authors reserve the rights to this code
* and related material.
*
* Copyrights 2017
*
*
*
*
*/
#include<sys/socket.h>
#include<arpa/inet.h> // for inet_ntoa()
#include<net/ethernet.h>
#include<netinet/ip_icmp.h> //Provides declarations for icmp header
#include<netinet/udp.h> //Provides declarations for udp header
#include<netinet/tcp.h> //Provides declarations for tcp header
#include<netinet/ip.h> //Provides declarations for ip header
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "kmp_gpu.cuh"
#include <stdio.h>
#include <iostream>
using namespace std;
/*********************************************************/
// GPU Vars
__device__ __constant__ char * dPatterns = NULL; // The Patterns on GPU. Used As String[] / Char[][]
__device__ __constant__ short * dJumps = NULL; // The jump arrays for each pattern, on the GPU memory
__device__ __constant__ short * dPatternLen = NULL; // The patterns lengths, on GPU memory
__device__ __constant__ int dNumPatterns = 0;
char * d_patterns_ptr = NULL;
short * d_jumps_ptr = NULL;
short * d_pattern_len_ptr = NULL;
/*********************************************************/
/*********************************************************/
// Host Vars
int nPatterns = 0; // The number of patterns, on Host RAM
short * hPatternLen = NULL; // Length of patterns, on Host RAM
char ** hPatternText = NULL; // Patterns to scan, on Host RAM
/*********************************************************/
/*********************************************************
Allocates the variables on Host
*/
bool allocate_GPU_pattern_vars(int patterns, int size)
{
try {
hPatternLen = new short[patterns];
hPatternText = new char*[patterns];
nPatterns = patterns;
cudaError_t res;
res = cudaMalloc((void**)&d_patterns_ptr, sizeof(char)*size);
if(res != cudaSuccess) {
delete[] hPatternLen;
delete[] hPatternText;
return false;
}
res = cudaMalloc((void**)&d_jumps_ptr, sizeof(short)*size);
if(res != cudaSuccess) {
delete[] hPatternLen;
delete[] hPatternText;
cudaFree(d_patterns_ptr);
return false;
}
res = cudaMalloc((void**)&d_pattern_len_ptr, sizeof(short)*patterns);
if(res != cudaSuccess) {
delete[] hPatternLen;
delete[] hPatternText;
cudaFree(d_patterns_ptr);
cudaFree(d_jumps_ptr);
return false;
}
return true;
} catch(exception e) {
cout << e.what() << endl;
if(hPatternLen != NULL) {
delete[] hPatternLen;
}
if(hPatternText != NULL) {
delete[] hPatternText;
}
return false;
}
}
/*********************************************************/
/*********************************************************
Free the allocated GPU and host memories
*/
void free_memory()
{
int i;
if(d_patterns_ptr != NULL)
{
cudaFree(d_patterns_ptr);
}
if(hPatternText != NULL)
{
for(i=0; i<nPatterns; i++)
{
if(hPatternText[i] != NULL)
delete[] hPatternText[i];
}
delete[] hPatternText;
}
if(d_jumps_ptr != NULL)
{
cudaFree(d_jumps_ptr);
}
if(hPatternLen != NULL)
{
delete[] hPatternLen;
}
if(d_pattern_len_ptr != NULL)
{
cudaFree(d_pattern_len_ptr);
}
nPatterns = 0;
}
/*********************************************************/
/********************************************************
An general function which allocates a GPU variable
Copies the given data, and returns the pointer to
the copied data.
*/
void* copy_to_gpu(void *data, int size)
{
char *dVar;
cudaError_t cudaRes;
if((cudaRes = cudaMalloc((void **)&dVar, size)) != cudaSuccess ) {
printf("Memory Allocation Failed!\n%s\n", cudaGetErrorString(cudaRes));
return NULL;
}
if((cudaRes = cudaMemcpy(dVar, data, size, cudaMemcpyHostToDevice)) != cudaSuccess)
{
printf("Memory Allocation Failed!\n%s\n", cudaGetErrorString(cudaRes));
cudaFree(dVar);
return NULL;
}
return (void*) dVar;
}
/*********************************************************/
/********************************************************
Given a vector of string patterns
Prepocess them and create jump tables
Then copy them to GPU
*/
int load_patterns_to_gpu(vector<string> pattern, int size)
{
if(!allocate_GPU_pattern_vars(pattern.size(), size)) {
return -1;
}
char* ptrn = new char[size];
short* jmps = new short[size];
memset(ptrn, 0, sizeof(char)*size);
memset(jmps, 0, sizeof(short)*size);
int i, j;
for(i=0; i<pattern.size(); i++) {
try
{
short n = hPatternLen[i] = (short) pattern[i].length();
hPatternText[i] = new char[n+1];
strncpy(hPatternText[i], pattern[i].c_str(), n+1);
hPatternText[i][n] = '\0';
// Pre Process the pattern
short *jump = new short[n];
preProcess(pattern[i].c_str(), n, jump);
// Column major store
for(j=0; j<n; j++) {
ptrn[(j*pattern.size())+i] = pattern[i][j];
jmps[(j*pattern.size())+i] = jump[j];
}
//cout << pattern[i] << " " << hPatternText[i] << endl;
delete[] jump;
}
catch(exception e)
{
cout << e.what() << endl;
return -3;
}
}
cudaError_t res;
res = cudaMemcpy(d_patterns_ptr, ptrn, sizeof(char)*size, cudaMemcpyHostToDevice);
if(res != cudaSuccess) {
cout << 1 << " " << cudaGetErrorString(res) << endl;
delete[] jmps;
delete[] ptrn;
return -2;
}
res = cudaMemcpy(d_jumps_ptr, jmps, sizeof(short)*size, cudaMemcpyHostToDevice);
if(res != cudaSuccess) {
cout << 2 << " " << cudaGetErrorString(res) << endl;
delete[] jmps;
delete[] ptrn;
return -2;
}
res = cudaMemcpy(d_pattern_len_ptr, hPatternLen, sizeof(short)*nPatterns, cudaMemcpyHostToDevice);
if(res != cudaSuccess) {
cout << 3 << " " << cudaGetErrorString(res) << endl;
delete[] jmps;
delete[] ptrn;
return -2;
}
res = cudaMemcpyToSymbol(dPatterns, &d_patterns_ptr, sizeof(char*), 0, cudaMemcpyHostToDevice);
if(res != cudaSuccess) {
cout << 4 << " " << cudaGetErrorString(res) << endl;
delete[] jmps;
delete[] ptrn;
return -2;
}
res = cudaMemcpyToSymbol(dJumps, &d_jumps_ptr, sizeof(short*), 0, cudaMemcpyHostToDevice);
if(res != cudaSuccess) {
cout << 5 << " " << cudaGetErrorString(res) << endl;
delete[] jmps;
delete[] ptrn;
return -2;
}
res = cudaMemcpyToSymbol(dPatternLen, &d_pattern_len_ptr, sizeof(short*), 0, cudaMemcpyHostToDevice);
if(res != cudaSuccess) {
cout << 6 << " " << cudaGetErrorString(res) << endl;
delete[] jmps;
delete[] ptrn;
return -2;
}
res = cudaMemcpyToSymbol(dNumPatterns, &nPatterns, sizeof(int), 0, cudaMemcpyHostToDevice);
if(res != cudaSuccess) {
cout << 7 << " " << cudaGetErrorString(res) << endl;
delete[] jmps;
delete[] ptrn;
return -2;
}
return 1;
}
/*********************************************************/
/********************************************************
KMP Preprocess function
IDK how it works
*/
void preProcess(const char* pat, int m, short* jump)
{
short i = 1, j = 0;
jump[0] = 0;
while (i < m) {
if (pat[i] == pat[j]) {
jump[i] = j + 1;
i++;
j++;
} else if (j > 0) {
j = jump[j - 1];
} else {
jump[i] = 0;
i++;
}
}
}
/********************************************************
The GPU packet processor kernel.
Launches the string matcher for part of the data
respective of the thread.
********************************************************/
__global__ void process_packet_gpu(char* buffer, int len, int* results)
{
extern __shared__ char localBuf[];
int i, stride;
for(i = threadIdx.x; i < len; i += blockDim.x) {
localBuf[i] = buffer[i];
}
__syncthreads();
i = (blockIdx.x * blockDim.x + threadIdx.x);
stride = (blockDim.x * blockDim.y * blockDim.z * gridDim.x);
while(i < dNumPatterns)
{
kmpSearch((char*) localBuf, len, i, results);
i += stride;
}
}
/********************************************************
GPU based KMP Search
same as host based.
*/
__device__ void kmpSearch(
char* text,
int n,
int patternId,
int * results
)
{
int j=0, i=0, ptr = patternId, m = dPatternLen[patternId];
while (i < n) {
if (text[i] == dPatterns[ptr]) {
i++;
j++;
ptr += dNumPatterns;
} else if (j > 0) {
ptr -= dNumPatterns;
j = dJumps[ptr];
ptr = (dNumPatterns * j) + patternId;
} else {
i++;
}
if (j == m) {
results[0] = i-j;
results[1] = patternId;
}
}
}
|
22,422 | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
__global__ void reduce_kernel(float *in, float *out, int ntot)
{
// TODO : coder ici
int nthreads = 1;
int totthreads = blockDim.x;
int test = 2;
int index = blockIdx.x * blockDim.x + threadIdx.x;
while(nthreads!=totthreads)
{
if((index % test == 0) && (index + nthreads < ntot))
{
in[index] = in[index] + in[index + nthreads];
}
__syncthreads();
nthreads = nthreads * 2;
test = test * 2;
}
out[blockIdx.x] = in[blockIdx.x * blockDim.x];
}
__host__ void init_vec(float *h_in, int ntot)
{
for(int i = 0 ; i < ntot ; i++)
{
h_in[i] = sinf(float(i));
}
}
__host__ void verif(float sum, float *h_in, int ntot)
{
float sum_res = 0.;
for(int i = 0 ; i < ntot ; i++)
{
sum_res += h_in[i];
}
float err = fabsf((sum - sum_res)/sum);
printf("GPU sum : %.4e\n", sum);
printf("CPU sum : %.4e\n", sum_res);
if (err < 1.e-4)
{
printf("TEST PASSED (err %.4e < 1.e-4).\n", err);
}
else
{
printf("TEST FAILED (err %.4e > 1.e-4).\n", err);
}
}
int main(int argc, char **argv)
{
float sum;
int nthreads, nblocks, ntot;
nthreads = 128;
ntot = atoi(argv[1]);
nblocks = (ntot + nthreads - 1) / nthreads;
printf("Ntot : %d\n", ntot);
printf("nthreads : %d\n", nthreads);
printf("nblocks : %d\n", nblocks);
float *d_sum, *d_bl, *d_in, *h_in;
h_in = (float*)malloc(ntot*sizeof(float));
cudaMalloc((void**)&d_sum, sizeof(float));
cudaMalloc((void**)&d_bl, nblocks*sizeof(float));
cudaMalloc((void**)&d_in, ntot*sizeof(float));
init_vec(h_in, ntot);
cudaMemcpy(d_in, h_in, ntot*sizeof(float), cudaMemcpyHostToDevice);
// TODO : la réduction de d_in a lieu ici, le resultat est obtenu dans *d_sum
dim3 block(nblocks,1,1);
dim3 thread(nthreads,1,1);
dim3 unique(1,1,1);
reduce_kernel<<<block, thread>>>(d_in, d_bl, ntot);
reduce_kernel<<<unique,block>>>(d_bl, d_sum, ntot);
cudaMemcpy(&sum, d_sum, sizeof(float), cudaMemcpyDeviceToHost);
verif(sum, h_in, ntot);
cudaFree(d_sum);
cudaFree(d_bl);
cudaFree(d_in);
free(h_in);
return 0;
}
|
22,423 | #include "includes.h"
using namespace std;
__global__ void multiplyDigits(char* d_str1, char* d_str2, int* d_matrix, int str1_len, int str2_len) {
int row = blockDim.y * blockIdx.x + threadIdx.y;
int col = blockDim.x * blockIdx.y + threadIdx.x;
int idx = row * str1_len + (col + (str2_len * row)) + 1 + (row);
d_matrix[idx] = (d_str2[row] - '0') * (d_str1[col] - '0');
} |
22,424 | #include <iostream>
#include <ctime>
#include <time.h>
using namespace std;
__global__ void GPU_MatMul(float *A, float *B, float *C, int N)
{
// Multiplication for NxN matrices C=A*B
// Every thread computes a single element of C
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
float sum = 0.f;
for (int n = 0; n < N; ++n)
sum += A[row*N+n]*B[n*N+col];
C[row*N+col] = sum;
}
int main(int argc, char *argv[])
{
cout << "Executing Matrix Multiplication" << endl;
for(int BLOCK_SIZE =1; BLOCK_SIZE<=10; BLOCK_SIZE++){
// Perform matrix multiplication C = A*B
// where A, B and C are NxN matrices
// Restricted to matrices where N = K*BLOCK_SIZE;
int N,K;
K = 100;
N = K*BLOCK_SIZE;
clock_t t;
t= clock();
cout << "Matrix size: " << N << "x" << N << endl;
// Allocate memory on the host
float *hA,*hB,*hC;
hA = new float[N*N];
hB = new float[N*N];
hC = new float[N*N];
// Initialize matrices on the host
for (int j=0; j<N; j++){
for (int i=0; i<N; i++){
hA[j*N+i] = 2.f*(j+i);
hB[j*N+i] = 1.f*(j-i);
}
}
// Allocate memory on the device
int size = N*N*sizeof(float); // Size of the memory in bytes
float *dA,*dB,*dC;
cudaMalloc(&dA,size);
cudaMalloc(&dB,size);
cudaMalloc(&dC,size);
dim3 threadBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 grid(K,K);
// Copy matrices from the host to device
cudaMemcpy(dA,hA,size,cudaMemcpyHostToDevice);
cudaMemcpy(dB,hB,size,cudaMemcpyHostToDevice);
//Execute the matrix multiplication kernel
GPU_MatMul<<<grid,threadBlock>>>(dA,dB,dC,N);
// Matrix multiplication on the CPU
float sum;
for (int row=0; row<N; row++){
for (int col=0; col<N; col++){
sum = 0.f;
for (int n=0; n<N; n++){
sum += hA[row*N+n]*hB[n*N+col];
}
hC[row*N+col] = sum;
}
}
// Memory allocation for storing the GPU answer on the host
float *C;
C = new float[N*N];
// Copying the GPU result back to CPU
cudaMemcpy(C,dC,size,cudaMemcpyDeviceToHost);
// Results crosschecking
for (int row=0; row<N; row++){
for (int col=0; col<N; col++){
if ( C[row*N+col] != hC[row*N+col] ){
cout << "Wrong answer!" << endl;
row = col = N;
}
}
}
t = clock() - t;
cout<<"Time taken is: "<<((float)t)/CLOCKS_PER_SEC<<endl;
cout << "Finished." << endl;
}getchar();
} |
22,425 | #include <cuda_runtime.h>
#include <math.h> // for truncf
#include <stdio.h>
#include <curand_kernel.h>
/****************************************
** Helper functions for CUDA encoding **
** Written by Julieta Martinez, 2016 **
** jltmtzc@gmail.com **
** https://www.cs.ubc.ca/~julm/ **
****************************************/
// Create a global cuda random state. Used for perturbations.
__device__ void _setup_kernel(
int n, // number of codes
curandState *state) { // memory where the curand state will be initialized
int x = threadIdx.x + blockIdx.x * blockDim.x;
if (x < n) {
// Each thread gets same seed, a different sequence number, no offset
curand_init((unsigned long long)clock(), x, 0, &state[x]);
}
}
// Perturb codes using reservoir sampling.
__device__ void _perturb(
curandState *state, // random state
unsigned char *codes, // codes to perturb
int n, // number of codes
int m, // number of codebooks
int n_to_perturb ) { // how many codes we want to perturb
int x = blockIdx.x;
int y = threadIdx.y;
// Copy the codes to local memory
__shared__ unsigned char local_codes [16];
local_codes[y] = codes[(x*m) + y];
__syncthreads();
if (y == 0) {
// Get the rand generator state from global memory
curandState localState = state[x];
// reservoir sampling loop
float n_needed = n_to_perturb;
float n_left = m;
float a_rand;
for (int i=0; i<m; i++) {
a_rand = curand_uniform( &localState );
if (a_rand < (n_needed / n_left)) {
// FIXME hard-coding 256 entries in each codebook
a_rand = curand_uniform( &localState )*256;
local_codes[i] = (unsigned char) truncf(a_rand);
n_needed -= 1.0f;
if (n_needed <= 0.0f) {
// Then we have all the numbers we want
break;
}
}
// Otherwise decrease the number of codes we can change
n_left -= 1.0f;
}
// Save the state back
state[x] = localState;
}
__syncthreads();
// Save the codes back
codes[(x*m)+y] = local_codes[y];
}
// Compute cost of a certain quantization. Optimized according to
// http://developer.download.nvidia.com/assets/cuda/files/reduction.pdf. This is
// the method described in the eccvw paper.
__device__ void _veccost(
float *d_rx, // data to use (X)
float *d_codebooks, // codebooks (C)
unsigned char *d_codes, // the codes (B)
float *d_veccost, // where to save the cost
int m, // number of codebooks
int n) { // number of vectors in X
// FIXME hard-coding 256 entries in each codebook, and 128 dimensions
const int H = 256; // size of each codebook
const int D = 128; // dimensionality of each vector
int x = threadIdx.x + blockIdx.x * blockDim.x; // 1-to-n
int y = threadIdx.y; // 1-to-128
if ( x >= n ) {return; }
/** Copy rx to shared memory **/
__shared__ float local_rx[ D ];
local_rx[ y ] = d_rx[ x*D + y ];
__syncthreads();
// Loop through each codebook
for (int i=0; i<m; i++) {
local_rx[ y ] -= d_codebooks[ H*D*i + d_codes[ n*i + x ]*D + y ];
}
// Square all the input
local_rx[ y ] = local_rx[ y ]*local_rx[ y ];
__syncthreads();
// Now reduce by summing along D
if ( y >= 64 ) { return; }
local_rx[y] += local_rx[ y+64 ];
__syncthreads();
if ( y >= 32 ) { return; }
local_rx[y] += local_rx[ y+32 ];
if ( y >= 16 ) { return; }
local_rx[y] += local_rx[ y+16 ];
if ( y >= 8 ) { return; }
local_rx[y] += local_rx[ y+8 ];
if ( y >= 4 ) { return; }
local_rx[y] += local_rx[ y+4 ];
if ( y >= 2 ) { return; }
local_rx[y] += local_rx[ y+2 ];
if ( y >= 1 ) { return; }
local_rx[y] += local_rx[ y+1 ];
d_veccost[ x ] = local_rx[y];
}
// Compute cost of a certain quantization -- maximize thread workload
// This implementation is preferred to _veccost1 (above) as it is almost as fast
// but does not hard-code the vector dimensionality.
__device__ void _veccost2(
float *d_rx, // data to use (X)
float *d_codebooks, // codebooks (C)
unsigned char *d_codes, // the codes (B)
float *d_veccost, // where to save the cost
int d, // dimensionality of the data
int m, // number of codebooks
int n) { // number of vectors in X
// FIXME hard-coding 256 entries in each codebook
const int H = 256; // size of each codebook
int x = threadIdx.x + blockIdx.x * blockDim.x; // 1-to-n
int y = threadIdx.y; // 1-to-d
if ( x >= n ) {return; }
/** Copy rx to shared memory **/
extern __shared__ float local_rx[];
local_rx[ y ] = d_rx[ x*d + y ];
__syncthreads();
// Loop through each codebook
for (int i=0; i<m; i++) {
local_rx[ y ] -= d_codebooks[ H*d*i + d_codes[ n*i + x ]*d + y ];
}
// Square all the inputs
local_rx[ y ] = local_rx[ y ]*local_rx[ y ];
__syncthreads();
// Leave only one thread
if( y > 0 ) {return; }
for ( int i=1; i<d; i++ ) {
local_rx[0] += local_rx[i];
}
d_veccost[ x ] = local_rx[0];
}
// Adds a vector to all the columns of a matrix
__device__ void _vec_add( float *matrix, float *vec, int n, int h) {
int x = threadIdx.x + blockIdx.x * blockDim.x; // 1-to-n
int y = threadIdx.y; // 1-to-256
if (x < n) {
matrix[ x*h + y ] += vec[ y ];
}
}
// Adds the hth column of d_binaries to matrix, and finds the minimum value and index in each
// column and puts the output in d_minv and d_mini
__device__ void _viterbi_forward(
float *matrix,
float *d_binaries,
float *d_minv,
int *d_mini,
int n,
int j) {
// Hard-coding 256 entries in each codebook
const int H = 256;
int x = threadIdx.x + blockIdx.x * blockDim.x; // 1-to-n
int y = threadIdx.y; // 1-to-256
if (x < n) {
// Shared memory to find the min
__shared__ float values[H];
__shared__ unsigned char indices[H];
// Add matrix + vec
values[y] = matrix[x*H + y] + d_binaries[H*j + y];
// Find the minimum after conditioning
if ( y >= 128 ) {return; }
__syncthreads();
bool ismin = values[ y ] > values[ y+128 ];
if ( ismin ) {
values[ y ] = values[ y+128 ];
indices[ y ] = y+128;
} else {
indices[ y ] = (unsigned char) y;
}
__syncthreads();
if ( y >= 64 ) {return; }
ismin = values[ y ] > values[ y+64 ];
if ( ismin ) {
values[ y ] = values[ y+64 ];
indices[ y ] = indices[ y+64 ];
}
__syncthreads();
if ( y >= 32 ) {return; }
ismin = values[ y ] > values[ y+32 ];
if ( ismin ) {
values[ y ] = values[ y+32 ];
indices[ y ] = indices[ y+32 ];
}
if ( y >= 16 ) {return; }
ismin = values[ y ] > values[ y+16 ];
if ( ismin ) {
values[ y ] = values[ y+16 ];
indices[ y ] = indices[ y+16 ];
}
if ( y >= 8 ) {return; }
ismin = values[ y ] > values[ y+8 ];
if ( ismin ) {
values[ y ] = values[ y+8 ];
indices[ y ] = indices[ y+8 ];
}
if ( y >= 4 ) {return; }
ismin = values[ y ] > values[ y+4 ];
if ( ismin ) {
values[ y ] = values[ y+4 ];
indices[ y ] = indices[ y+4 ];
}
if ( y >= 2 ) {return; }
ismin = values[ y ] > values[ y+2 ];
if ( ismin ) {
values[ y ] = values[ y+2 ];
indices[ y ] = indices[ y+2 ];
}
if ( y >= 1 ) {return; }
// When we get here, only the 0th thread must be alive
ismin = values[ y ] > values[ y+1 ];
if ( ismin ) {
values[ y ] = values[ y+1 ];
indices[ y ] = indices[ y+1 ];
}
// Copy the new code back to GPU global memory
d_minv[H*x + j] = values[y];
d_mini[x] = indices[y];
}
}
/****************************
** Encoding ICM functions **
*****************************/
/** Per-column version. Very non-coalesced **/
__device__ void _condition_icm(
float *d_ub,
float *d_bb,
unsigned char *d_codek,
int n, int h) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if ( idx < n ) {
unsigned char d_codek_idx = d_codek[idx];
for( int i=0; i<h; i++) {
d_ub[ idx*h + i ] += d_bb[ d_codek_idx*h + i ];
}
}
}
/** per-element version. More coalesced **/
__device__ void _condition_icm2(
float *d_ub,
float *d_bb,
unsigned char *d_codek,
int n, int h) {
int i_idx = threadIdx.x + blockIdx.x * blockDim.x;
int y_idx = threadIdx.y;
if ( i_idx < n ) {
unsigned char d_codek_idx = d_codek[ i_idx ];
d_ub[ i_idx*h + y_idx ] += d_bb[ d_codek_idx*h + y_idx ];
}
}
// Version used in final code. Does conditioning and minimization.
__device__ void _condition_icm3(
float *d_ub, // unary terms
float *d_bb, // binary terms
unsigned char *d_codek, // codes
int conditioning, // which codebook we are minimizing in ICM
int m, // number of codebooks
int n) { // number of vectors
// FIXME hard-coding 256 entries in each codebook
const int H = 256;
int i_idx = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y;
if ( i_idx < n ) {
// Copy unaries to shared memory
__shared__ float s_ub[ H ];
__shared__ unsigned char s_ib[ H ];
s_ub[ y ] = d_ub[ i_idx*H + y ];
__syncthreads();
// Conditioning step of ICM
// Loop through binaries and add them to the unaries
int j = 0;
for (int i=0; i<m; i++) {
if ( i == conditioning ) {
continue;
}
s_ub[ y ] += d_bb[ H*H*j + d_codek[ i_idx + n*i ]*H + y ];
j++; //
}
// Minimization step of ICM
// Find the minimum after conditioning
if ( y >= 128 ) { return; }
__syncthreads();
bool ismin = s_ub[ y ] > s_ub[ y+128 ];
if ( ismin ) {
s_ub[ y ] = s_ub[ y+128 ];
s_ib[ y ] = y+128;
} else {
s_ib[ y ] = (unsigned char) y;
}
__syncthreads();
if ( y >= 64 ) { return; }
ismin = s_ub[ y ] > s_ub[ y+64 ];
if ( ismin ) {
s_ub[ y ] = s_ub[ y+64 ];
s_ib[ y ] = s_ib[ y+64 ];
}
__syncthreads();
if ( y >= 32 ) { return; }
ismin = s_ub[ y ] > s_ub[ y+32 ];
if ( ismin ) {
s_ub[ y ] = s_ub[ y+32 ];
s_ib[ y ] = s_ib[ y+32 ];
}
if ( y >= 16 ) { return; }
ismin = s_ub[ y ] > s_ub[ y+16 ];
if ( ismin ) {
s_ub[ y ] = s_ub[ y+16 ];
s_ib[ y ] = s_ib[ y+16 ];
}
if ( y >= 8 ) { return; }
ismin = s_ub[ y ] > s_ub[ y+8 ];
if ( ismin ) {
s_ub[ y ] = s_ub[ y+8 ];
s_ib[ y ] = s_ib[ y+8 ];
}
if ( y >= 4 ) { return; }
ismin = s_ub[ y ] > s_ub[ y+4 ];
if ( ismin ) {
s_ub[ y ] = s_ub[ y+4 ];
s_ib[ y ] = s_ib[ y+4 ];
}
if ( y >= 2 ) { return; }
ismin = s_ub[ y ] > s_ub[ y+2 ];
if ( ismin ) {
s_ub[ y ] = s_ub[ y+2 ];
s_ib[ y ] = s_ib[ y+2 ];
}
if ( y >= 1 ) { return; }
// When we get here, only the 0th thread must be alive
ismin = s_ub[ y ] > s_ub[ y+1 ];
if ( ismin ) {
s_ub[ y ] = s_ub[ y+1 ];
s_ib[ y ] = s_ib[ y+1 ];
}
// Copy the new code back to GPU global memory
d_codek[ i_idx + n*conditioning ] = s_ib[ y ];
}
}
// C interface that we can call from Julia
extern "C"
{
// Initializes the curand state
void __global__ setup_kernel( int n, void* state ) {
_setup_kernel( n, (curandState*) state );
}
// Perturbs the solution using reservoir sampling
void __global__ perturb( void* state, unsigned char *codes, int n, int m, int k ) {
_perturb( (curandState*) state, codes, n, m, k );
}
// Veccost optimized following CUDA's reduce guide at
// http://developer.download.nvidia.com/assets/cuda/files/reduction.pdf
// requires us to hard-code vector dimensionality though
void __global__ veccost(
float *d_rx,
float *d_codebooks,
unsigned char *d_codes,
float *d_veccost,
int m,
int n
) {
_veccost(d_rx, d_codebooks, d_codes, d_veccost, m, n);
}
// Veccost maximizing load per thread. Slightly slower than veccost1, but does
// does not require hard-coding of vector dimensionality.
void __global__ veccost2(
float *d_rx,
float *d_codebooks,
unsigned char *d_codes,
float *d_veccost,
int d,
int m,
int n
) {
_veccost2(d_rx, d_codebooks, d_codes, d_veccost, d, m, n);
}
// Adds a vector to each column of a matrix. Used to add unary terms.
void __global__ vec_add(float *matrix, float *vec, int n, int h) {
_vec_add( matrix, vec, n, h );
}
// Adds a vector to each column of a matrix. Used to add unary terms.
void __global__ viterbi_forward(float *matrix, float *vec, float *d_minv, int *d_mini, int n, int j) {
_viterbi_forward(matrix, vec, d_minv, d_mini, n, j);
}
// ICM conditioning
void __global__ condition_icm(float *d_ub, float *d_bb, unsigned char *d_codek, int n, int h) {
_condition_icm( d_ub, d_bb, d_codek, n, h );
}
void __global__ condition_icm2(float *d_ub, float *d_bb, unsigned char *d_codek, int n, int h) {
_condition_icm2( d_ub, d_bb, d_codek, n, h );
}
// ICM conditioning and minimization
void __global__ condition_icm3(float *d_ub, float *d_bb, unsigned char *d_codek, int conditioning, int m, int n) {
_condition_icm3( d_ub, d_bb, d_codek, conditioning, m, n );
}
}
|
22,426 | #include "includes.h"
extern "C" {
#ifndef DTYPE
#define DTYPE float
#endif
}
__global__ void tensor_5d_equals (const int n, const int c, const int d, const int h, const int w, const DTYPE* x, const int offset_x, const int n_x, const int c_x, const int d_x, const int h_x, const int w_x, const DTYPE* y, const int offset_y, const int n_y, const int c_y, const int d_y, const int h_y, const int w_y, int* eq_flag) {
const int gid_n = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_c = blockIdx.y * blockDim.y + threadIdx.y;
const int gid_d = blockIdx.z * blockDim.z + threadIdx.z;
const bool valid = (gid_n < n) && (gid_c < c) && (gid_d < d);
if (valid) {
const int ix = offset_x + gid_n * n_x + gid_c * c_x + gid_d * d_x;
const int iy = offset_y + gid_n * n_y + gid_c * c_y + gid_d * d_y;
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
if (x[ix + i * h_x + j * w_x] != y[iy + i * h_y + j * w_y]){
eq_flag[0]++;
}
}
};
}
} |
22,427 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3) {
float tmp_1 = +1.8785E-20f / atan2f((+1.7303E-35f / acosf(-1.3519E-35f / var_1)), var_2 / +1.9360E35f);
float tmp_2 = coshf(+1.8422E-44f);
float tmp_3 = +1.8333E35f;
comp += tmp_3 / tmp_2 - tmp_1 * +1.8329E-25f / (-0.0f * var_3);
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4);
cudaDeviceSynchronize();
return 0;
}
|
22,428 | // Matrix addition, GPU version
// nvcc matrix_gpu.cu -L /usr/local/cuda/lib -lcudart -o matrix_gpu
#include <stdio.h>
const int blocksize = 16;
const int N = 256;
const int gridsize = N / blocksize;
__global__
void add_matrix(float *a, float *b, float *c, int N)
{
// coalesced
/*
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
int grid_width = gridDim.x * blockDim.x;
int index = index_y * grid_width + index_x;
c[index] = a[index] + b[index];
*/
// Non Coalesced
int index_y = blockIdx.x * blockDim.x + threadIdx.x;
int index_x = blockIdx.y * blockDim.y + threadIdx.y;
int grid_width = gridDim.x * blockDim.x;
int index = index_y * grid_width + index_x;
c[index] = a[index] + b[index];
}
int main()
{
float *a = new float[N*N];
float *b = new float[N*N];
float *c = new float[N*N];
unsigned long size = N*N*sizeof(float);
float *gpu_a;
float *gpu_b;
float *gpu_c;
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
{
a[i+j*N] = 10 + i;
b[i+j*N] = (float)j / N;
}
cudaEvent_t begin;
cudaEvent_t end;
float elapsed;
cudaEventCreate(&begin);
cudaEventCreate(&end);
cudaMalloc( (void**)&gpu_a, size);
cudaMalloc( (void**)&gpu_b, size);
cudaMalloc( (void**)&gpu_c, size);
// dim3 dimBlock( blockDim.x, blockDim.y, 1);
dim3 dimBlock( blocksize,blocksize,1);
// dim3 dimGrid( blockIdx.x, blockIdx.y );
dim3 dimGrid( gridsize, gridsize );
cudaMemcpy( gpu_a, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( gpu_b, b, size, cudaMemcpyHostToDevice );
cudaEventRecord(begin, 0);
add_matrix<<<dimGrid, dimBlock>>>(gpu_a,gpu_b,gpu_c,N);
cudaThreadSynchronize();
cudaEventRecord(end, 0);
cudaMemcpy( c, gpu_c, size, cudaMemcpyDeviceToHost );
cudaFree( gpu_a );
cudaFree( gpu_b );
cudaFree( gpu_c );
cudaEventSynchronize(end);
cudaEventElapsedTime(&elapsed, begin, end);
for (int i = N-16; i < N; i++)
{
for (int j = N-16; j < N; j++)
{
printf("%0.2f ", c[i+j*N]);
}
printf("\n");
}
printf("\n");
//printf("Blocksize = %i\tN = %i Time : %f\n",blocksize,N,elapsed*1000);
printf("%f\n",elapsed*1000);
}
|
22,429 | /*
Copyright 2013--2018 James E. McClure, Virginia Polytechnic & State University
This file is part of the Open Porous Media project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/
// Basic cuda functions callable from C/C++ code
#include <cuda.h>
extern "C" void dvc_AllocateDeviceMemory(void** address, size_t size){
cudaMalloc(address,size);
cudaMemset(*address,0,size);
}
extern "C" void dvc_CopyToDevice(void* dest, void* source, size_t size){
cudaMemcpy(dest,source,size,cudaMemcpyHostToDevice);
}
extern "C" void dvc_CopyToHost(void* dest, void* source, size_t size){
cudaMemcpy(dest,source,size,cudaMemcpyDeviceToHost);
}
extern "C" void dvc_Barrier(){
cudaDeviceSynchronize();
}
/*
#if __CUDA_ARCH__ < 600
__device__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
}
while (assumed != old); return __longlong_as_double(old);
}
#endif
*/ |
22,430 | #include <iostream>
#include <math.h>
#include <algorithm>
#include <map>
#include <random>
#include <time.h>
#include <cuda_runtime.h>
using namespace std;
__host__ __device__
unsigned hash_func(unsigned key, int hash_num, unsigned tablesize){
int c2=0x27d4eb2d;
switch (hash_num){
case 0:
key = (key+0x7ed55d16) + (key<<12);
key = (key^0xc761c23c) ^ (key>>19);
key = (key+0x165667b1) + (key<<5);
key = (key+0xd3a2646c) ^ (key<<9);
key = (key+0xfd7046c5) + (key<<3);
key = (key^0xb55a4f09) ^ (key>>16);
return key%tablesize;
case 1:
key = (key^61)^(key>>16);
key = key+(key<<3);
key = key^(key>>4);
key = key*c2;
key = key^(key>>15);
return key%tablesize;
case 2:
return ((66*key+32)%537539573)%tablesize;
default:
//printf("wrong hash_num\n");
return 0;
}
}
__device__
void secondtableInsertion(unsigned key, unsigned* secondtable){
unsigned secondtablesize = pow(2,18);
unsigned location = ((33*key+87)%116743349)&(secondtablesize-1);
for(unsigned i = 0; i < 200; ++i) {
key = atomicExch(&secondtable[location],key);
if(key!=NULL){
location++;
if(location == secondtablesize-1){
location = 0;
}
continue;
}
return;
}
printf("Failed.\n");
return;
}
__global__ void lookupHash(unsigned* keys, unsigned* table,unsigned keysize,unsigned tablesize, unsigned* secondtable,int hash_num){
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index>keysize){
return;
}
unsigned key = keys[index];
unsigned location[3];
for(unsigned j = 0; j < hash_num; ++j) {
location[j] = hash_func(key,j,tablesize);
}
for(unsigned i = 0; i < hash_num; ++i) {
if(atomicCAS(&table[location[i]], key, key) == key){
return;
}
}
unsigned secondtablesize = pow(2,18);
unsigned location1 = ((33*key+87)%116743349)&(secondtablesize-1);
unsigned key1;
for(unsigned i = 0; i < 200; ++i) {
key1 = atomicCAS(&table[location1],key,key);
if(key1 == key || key1 == NULL){
return;
}
location1++;
if(location1 == secondtablesize-1){
location1 = 0;
}
}
return;
}
__global__ void cuckooHash(unsigned* cuda_tables, unsigned* cuda_keys, unsigned keysize, int M,int hash_num, unsigned tablesize, unsigned* secondtable){
int index = blockDim.x*blockIdx.x + threadIdx.x;
if(index >= keysize) {
return;
}
unsigned key = cuda_keys[index];
unsigned location[3];
location[0] = hash_func(key,0,tablesize);
for(unsigned i = 0; i <= hash_num*M; ++i) {
if(i==hash_num*M) {
secondtableInsertion(key,secondtable);
}
key = atomicExch(&cuda_tables[location[i%hash_num]],key);
if(key==NULL) {
return;
}
for(unsigned j = 0; j < hash_num; ++j) {
location[j] = hash_func(key,j,tablesize);
}
}
}
int main() {
for(unsigned t = 0; t < 5; ++t) {
for(unsigned s = 10; s < 25; ++s) {
int hash_num = 3;
unsigned tablesize = pow(2,25);
unsigned secondtablesize = pow(2,18);
unsigned *tables = (unsigned *)malloc(tablesize*sizeof(unsigned));
unsigned *secondtable = (unsigned *)malloc(secondtablesize*sizeof(unsigned));
unsigned keysize = pow(2,s);
unsigned *keys = (unsigned *)malloc(keysize*sizeof(unsigned));
for(unsigned i = 0; i < tablesize; ++i) {
tables[i] = 0;
}
for(unsigned i = 0; i < secondtablesize; ++i) {
secondtable[i] = 0;
}
std::map<unsigned ,bool> randommap;
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<unsigned> dis(1, pow(2,32)-1);
for(unsigned i = 0; i < keysize; ++i) {
unsigned rand = dis(gen);
while(randommap.find(rand) != randommap.end()) {
rand = dis(gen);
}
randommap[rand] = true;
keys[i] = rand;
}
unsigned* cuda_keys;
unsigned* cuda_tables;
unsigned* cuda_secondtable;
int blockSize;
int minGridSize;
int gridSize;
cudaDeviceReset();
cudaMalloc(&cuda_tables, tablesize*sizeof(unsigned));
cudaMalloc(&cuda_keys, keysize*sizeof(unsigned));
cudaMalloc(&cuda_secondtable, secondtablesize*sizeof(unsigned));
cudaMemcpy(cuda_tables, tables, tablesize, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_keys, keys, keysize, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_secondtable, secondtable, secondtablesize, cudaMemcpyHostToDevice);
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cuckooHash, 0, 1000000);
gridSize = (keysize + blockSize - 1) / blockSize;
int M = (int)4*ceil(log2((double)keysize));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cuckooHash<<<gridSize,blockSize>>>(cuda_tables, cuda_keys, keysize, M,hash_num,tablesize,cuda_secondtable);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float kernelTime;
cudaEventElapsedTime(&kernelTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("s = %d,time: %.2f ms\n",s,kernelTime);
}
}
return 0;
} |
22,431 | #include "includes.h"
__global__ void bp_output_conv(float *d_output, float *weight, float *nd_preact, const int size, const int kernel_size, const int n_size, const int in_channel, const int out_channel, bool CONV, bool SAME)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int totalPos = blockDim.x * gridDim.x;
const int N = kernel_size * kernel_size * size * size * in_channel * out_channel;
const int weight_channel = out_channel * in_channel;
const int padding = (kernel_size - 1) / 2; // must be int
for (int n = N * pos / totalPos; n < N * (pos+1) / totalPos; ++n) {
int idx = n;
int bpinput_row;
int bpinput_col;
const int i_channel = ((idx /= 1 ) % weight_channel);
const int i_kernel_row = ((idx /= weight_channel) % kernel_size);
const int i_kernel_col = ((idx /= kernel_size) % kernel_size);
const int i_row = ((idx /= kernel_size ) % size);
const int i_col = ((idx /= size) % size);
if (SAME){ // SAME padding scheme implemented
bpinput_row = i_kernel_row + i_row - padding;
bpinput_col = i_kernel_col + i_col - padding;
}
else{
bpinput_row = i_kernel_row + i_row - 2 * padding;
bpinput_col = i_kernel_col + i_col - 2 * padding;
}
if(bpinput_row >= 0 && bpinput_row < n_size && bpinput_col >=0 && bpinput_col < n_size){
atomicAdd(&d_output[((i_channel % in_channel) * size + i_col) * size + i_row],
weight[(i_channel * kernel_size + (kernel_size - 1 - i_kernel_col)) * kernel_size + kernel_size - 1 - i_kernel_row]
* nd_preact[((i_channel % out_channel) * n_size + bpinput_col) * n_size + bpinput_row]);
}
}
} |
22,432 | #include <stdio.h>
int main(void){
cudaDeviceProp prop;
int count;
cudaGetDeviceCount(&count);
printf("\nNumber of Devices: %d\n", count);
for(int i=0; i<count; i++){
cudaGetDeviceProperties(&prop, i);
printf("\n ---Device %d Information---\n", i);
printf("Name: %s\n", prop.name);
//Thread/Blocks
printf("Shared Mem Per Block: %lu\n", prop.sharedMemPerBlock);
printf("Registers Per Block: %d\n", prop.regsPerBlock);
printf("Registers Per MultiProcessor: %d\n", prop.regsPerMultiprocessor);
printf("Max Threads per Block: %d\n", prop.maxThreadsPerBlock);
printf("Max Threads per MultiProcessor: %d\n\n", prop.maxThreadsPerMultiProcessor);
printf("Warp Size: %d\n", prop.warpSize);
printf( "Max thread dimensions:(%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1],
prop.maxThreadsDim[2] );
printf( "Max grid dimensions:(%d, %d, %d)\n\n",
prop.maxGridSize[0], prop.maxGridSize[1],
prop.maxGridSize[2] );
//Memory
printf("Total Global Mem: %lu\n", prop.totalGlobalMem);
printf("Total Constant Memory: %lu\n", prop.totalConstMem);
printf("Total Managed Memory: %d\n", prop.managedMemory);
printf("Shared Memory Per Block: %lu\n", prop.sharedMemPerBlock);
printf("Shared Memory Per MultiProcessor: %lu\n", prop.sharedMemPerMultiprocessor);
printf("Device can Map Host Memory: %s\n", prop.canMapHostMemory?"Enabled":"Disabled");
printf("Error Correcting code Mem: %s\n", prop.ECCEnabled?"Enabled":"Disabled");
printf("Memory Bus Width: %d\n", prop.memoryBusWidth);
printf("Memory Pitch: %lu\n\n", prop.memPitch);
//Computational Info
printf("Major Compute Capability: %d\n", prop.major);
printf("Minor Compute Capability: %d\n", prop.minor);
printf("ClockRate: %d\n", prop.clockRate);
printf("MultiProcessor Count: %d\n", prop.multiProcessorCount);
printf("Device Overlap: %d\n", prop.deviceOverlap);
printf("Kernel Execution Timeout: %s\n", prop.kernelExecTimeoutEnabled?"Enabled":"Disabled");
printf("Concurrent Kernels: %d\n", prop.concurrentKernels);
}
return 0;
}
|
22,433 |
__global__
void main_kernel()
{
}
|
22,434 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/scan.h>
extern "C" {
void scan_int_wrapper( int *data_in, int N, int *data_out)
{
thrust::device_ptr<int> dev_ptr_in(data_in);
thrust::device_ptr<int> dev_ptr_out(data_out);
thrust::inclusive_scan(dev_ptr_in, dev_ptr_in+N, dev_ptr_out);
}
void scan_float_wrapper( float *data_in, int N, float *data_out)
{
thrust::device_ptr<float> dev_ptr_in(data_in);
thrust::device_ptr<float> dev_ptr_out(data_out);
thrust::inclusive_scan(dev_ptr_in, dev_ptr_in+N, dev_ptr_out);
}
void scan_double_wrapper( double *data_in, int N, double *data_out)
{
thrust::device_ptr<double> dev_ptr_in(data_in);
thrust::device_ptr<double> dev_ptr_out(data_out);
thrust::inclusive_scan(dev_ptr_in, dev_ptr_in+N, dev_ptr_out);
}
} |
22,435 | // N-S equation demonstration
// High Performance Scitific Computation
// Cavity Lid Driven Flow
// CUDA version
// 19M18085 Lian Tongda
#include <iostream>
#include <cmath>
#include <cstdlib>
#include <iomanip>
#include <fstream>
#include <sstream>
#include <string>
#include <algorithm>
using namespace std;
// Initialization of variables
void init(double *u, double *un, double *v, double *vn, double *p, double *pn, double *b, int nx, int ny)
{
int i, j;
for(i = 0; i < nx ; i++){
for(j = 0; j < ny ; j++){
u[j*nx+i] = 0.0;
un[j*nx+i] = 0.0;
v[j*nx+i] = 0.0;
vn[j*nx+i] = 0.0;
p[j*nx+i] = 0.0;
pn[j*nx+i] = 0.0;
b[j*nx+i] = 0.0;
}
}
// Velocity of Lid
for(i = 0 ; i < nx ;i++)
{
u[(nx-1)*nx + i] = 1.0;
un[(nx-1)*nx + i] = 1.0;
}
cout<<"DATA INITIALIZED"<<endl;
}
__global__
void build_up_b(double *b, double *u, double *v, double rho, double dt, double dx, double dy, int nx, int ny)
{
int i = threadIdx.x + blockIdx.x * blockDim.x ;
int j = threadIdx.y + blockDim.y * blockIdx.y ;
// double term1, term2, term3, term4;
if(i > 0 && i < nx - 1 && j > 0 && j < ny - 1)
{
// term1 = (u[j*nx + i+1] - u[j*nx + i-1]) / (2 * dx) +
// (v[(j+1)*nx+i] - v[(j-1)*nx+i]) / (2 * dy);
//
// term2 = (u[j*nx + i+1] - u[j*nx + i-1]) / (2 * dx);
//
// term3 = (u[(j+1)*nx+i] - u[(j-1)*nx+i]) *
// (v[j*nx + i+1] - v[j*nx + i-1]) / (2*2*dx*dy);
//
// term4 = (v[(j+1)*nx+i] - v[(j-1)*nx+i]) / (2 * dy);
//
// b[j*nx+i] = (term1) / (dt) - (term2) * (term2) - 2 * (term3) - (term4) * (term4);
b[j*nx+i] =
(rho * ( 1.0/dt *
((u[j*nx + i+1] - u[j*nx + i-1]) / (2 * dx)
+ (v[(j+1)*nx+i] - v[(j-1)*nx+i]) / (2 * dy)) -
((u[j*nx+i+1] - u[j*nx+i-1]) / (2*dx)) * ((u[j*nx+i+1] - u[j*nx+i-1]) / (2*dx)) -
2 * ((u[(j+1)*nx+i] - u[(j-1)*nx+i]) / (2*dy) *
(v[j*nx+i+1] - v[j*nx + i-1]) / (2*dx)) -
((v[(j+1)*nx+i] - v[(j-1)*nx+i]) / (2*dy)) * ((v[(j+1)*nx+i] - v[(j-1)*nx+i]) / (2*dy)) ));
}
__syncthreads();
}
__global__
void pressure_poisson(double *p, double *pn, double *b, double dx, double dy, int nx, int ny, double rho)
{
int i = threadIdx.x + blockIdx.x * blockDim.x ;
int j = threadIdx.y + blockDim.y * blockIdx.y ;
if(i > 0 && i < nx - 1 && j > 0 && j < ny - 1)
{
p[j*nx+i] =
(((pn[j*nx+i+1] + pn[j*nx+i-1]) * dy * dy +
(pn[(j+1)*nx+i] + pn[(j-1)*nx+i]) * dx * dx)/
(2 * (dx * dx + dy * dy)) -
dx * dx * dy * dy * b[j*nx+i] * rho / (2 * (dx *dx + dy * dy)));
}
__syncthreads();
}
__global__
void pressure_boundary(double *p, int nx, int ny)
{
int i = threadIdx.x + blockIdx.x * blockDim.x ;
int j = threadIdx.y + blockDim.y * blockIdx.y ;
if(i == nx - 1 && j > 0 && j < ny - 1)
{
p[j*nx+i] = p[j*nx+i-1]; // dp/dx = 0 at x = 2
}
else if(i == 0 && j > 0 && j < ny - 1)
{
p[j*nx+i] = p[j*nx+i+1]; // dp/dx = 0 at x = 0
}
else if(j == 0 && i < nx )
{
p[j*nx+i] = p[(j+1)*nx+i]; // dp/dy = 0 at y = 0
}
else if(j == ny - 1 && i < nx)
{
p[j*nx+i] = 0.0; // p = 0 at y = 2
}
__syncthreads();
}
__global__
void cavity_flow(double *u, double *un, double *v, double *vn, double *p, double *pn, double *b, int nx , int ny, double dx, double dy, double dt, double nu, double rho)
{
int i = threadIdx.x + blockIdx.x * blockDim.x ;
int j = threadIdx.y + blockDim.y * blockIdx.y ;
if(i > 0 && i < nx - 1 && j > 0 && j < ny - 1)
{
u[j*nx+i] = (
un[j*nx+i] - un[j*nx+i] * dt / dx * (un[j*nx+i] - un[j*nx+i-1]) -
vn[j*nx+i] * dt / dy * (un[j*nx+i] - un[(j-1)*nx+i]) -
dt / (2 * rho * dx) * (p[j*nx+i+1] - p[j*nx+i-1]) +
nu * (dt / (dx * dx) * (un[j*nx+i+1] - 2*un[j*nx+i] + un[j*nx+i-1]) +
dt / (dy * dy) * (un[(j+1)*nx+i] - 2*un[j*nx+i] + un[(j-1)*nx+i])));
v[j*nx+i] = (
vn[j*nx+i] - un[j*nx+i] * dt / dx * (vn[j*nx+i] - vn[j*nx+i-1]) -
vn[j*nx+i] * dt / dy * (vn[j*nx+i] - vn[(j-1)*nx+i]) -
dt / (2 * rho * dy) * (p[(j+1)*nx+i] - p[(j-1)*nx+i]) +
nu * (dt / (dx * dx) * (vn[j*nx+i+1] - 2*vn[j*nx+i] + vn[j*nx+i-1]) +
dt / (dy * dy) * (vn[(j+1)*nx+i] - 2*vn[j*nx+i] + vn[(j-1)*nx+i])));
}
__syncthreads();
}
__global__
void velocity_boundary(double *u, double *v, int nx , int ny)
{
int i = threadIdx.x + blockIdx.x * blockDim.x ;
int j = threadIdx.y + blockDim.y * blockIdx.y ;
if(i == nx - 1 && j > 0 && j < ny - 1)
{
u[j*nx+i] = 0.0; // u = 0 at x = 2
v[j*nx+i] = 0.0; // v = 0 at x = 2
}
else if(i == 0 && j > 0 && j < ny - 1)
{
u[j*nx+i] = 0.0; // u = 0 at x = 0
v[j*nx+i] = 0.0; // v = 0 at x = 0
}
else if(j == 0 && i < nx )
{
u[j*nx+i] = 0.0; // u = 0 at y = 0
v[j*nx+i] = 0.0; // v = 0 at y = 0
}
else if(j == ny - 1 && i < nx)
{
u[j*nx+i] = 1.0; // u = 1.0 at y = 2
v[j*nx+i] = 0.0; // v = 0 at y = 2
}
__syncthreads();
}
// Check of relative error of u and v between adjacent time steps
double Error(double *u, double *un, double *v, double *vn, int nx , int ny)
{
double temp1 = 0.0, temp2 = 0.0;
int i, j, index;
for(i = 0; i < nx ; i++){
for(j = 0; j < ny ; j++){
index = j* nx +i;
temp1 += (u[index] - un[index]) * (u[index] - un[index])
+ (v[index] - vn[index]) * (v[index] - vn[index]);
temp2 += (u[index] * u[index]) + (v[index] * v[index]);
}
}
temp1 = sqrt(temp1);
temp2 = sqrt(temp2);
return ( temp1 / (temp2 + 1e-30));
}
// Output u and v along the central line.
void output_u(int m, double *u, double *v, int nx)
{
ostringstream name;
name << "u_" << m << ".txt";
ofstream out(name.str().c_str());
for(int i = 0; i < nx ; i++){
out << u[i * nx + (nx+1)/2] <<" "<< v[ nx * (nx + 1) / 2 + i] << endl;
}
out.close();
}
int main(void)
{
const int nx = 41; // Mesh points in X direction
const int ny = 41; // Mesh points in Y direction
const int nt = 10000; //Total time steps
const int nit = 50; // Poisson equation iterations
//const double c = 1;
const double dx = 2.0 / (nx - 1); // LX = 2.0 , dx = 0.05
const double dy = 2.0 / (ny - 1); // LY = 2.0 , dy = 0.05
// Re = U_lid * L / nu = 1.0 * 2.0 / 0.1 = 20.0
const double rho = 1.0; // rho is considered constant in this case
const double nu = 0.1; // Kinematic viscosity
const double dt = 0.001; // length of time step
double *u, *un, *v, *vn, *p, *pn, *b, error;
int size = nx * ny * sizeof(double);
u = (double*)malloc(size);
un = (double*)malloc(size);
v = (double*)malloc(size);
vn = (double*)malloc(size);
p = (double*)malloc(size);
pn = (double*)malloc(size);
b = (double*)malloc(size);
init(u, un, v, vn, p, pn, b, nx, ny);
double *u_g, *un_g, *v_g, *vn_g, *p_g, *pn_g, *b_g;
cudaMalloc((void**)&u_g , size);
cudaMalloc((void**)&un_g , size);
cudaMalloc((void**)&v_g , size);
cudaMalloc((void**)&vn_g , size);
cudaMalloc((void**)&p_g , size);
cudaMalloc((void**)&pn_g , size);
cudaMalloc((void**)&b_g , size);
cudaMemcpy(u_g, u, size, cudaMemcpyHostToDevice);
cudaMemcpy(un_g, un, size, cudaMemcpyHostToDevice);
cudaMemcpy(v_g, v, size, cudaMemcpyHostToDevice);
cudaMemcpy(vn_g, vn, size, cudaMemcpyHostToDevice);
cudaMemcpy(p_g, p, size, cudaMemcpyHostToDevice);
cudaMemcpy(pn_g, pn, size, cudaMemcpyHostToDevice);
cudaMemcpy(b_g, b, size, cudaMemcpyHostToDevice);
dim3 threadsPerBlock( 128, 1 );
dim3 blockNumber( (nx + threadsPerBlock.x - 1) / threadsPerBlock.x,
(ny + threadsPerBlock.y - 1) / threadsPerBlock.y);
double *p_temp, *u_temp, *v_temp;
for(int loops = 1; loops <= nt ; loops++)
{
build_up_b<<<blockNumber,threadsPerBlock>>>(b_g, u_g, v_g, rho, dt, dx, dy, nx ,ny);
cudaDeviceSynchronize();
for(int k = 0; k < nit ; k++){
pressure_poisson<<<blockNumber,threadsPerBlock>>>(p_g, pn_g, b_g, dx, dy, nx ,ny, rho);
pressure_boundary<<<blockNumber,threadsPerBlock>>>(p_g, nx, ny);
p_temp = pn_g; pn_g = p_g; p_g = p_temp;
}
cudaDeviceSynchronize();
cavity_flow<<<blockNumber,threadsPerBlock>>>(u_g, un_g, v_g, vn_g, p_g, pn_g, b_g, nx, ny, dx, dy, dt, nu, rho);
velocity_boundary<<<blockNumber,threadsPerBlock>>>(u_g, v_g, nx, ny);
u_temp = un_g; un_g = u_g ; u_g = u_temp;
v_temp = vn_g; vn_g = v_g ; v_g = v_temp;
cudaDeviceSynchronize();
if (loops % 100 == 0)
{
cudaMemcpy(u , u_g , size, cudaMemcpyDeviceToHost);
cudaMemcpy(v , v_g , size, cudaMemcpyDeviceToHost);
cudaMemcpy(un , un_g, size, cudaMemcpyDeviceToHost);
cudaMemcpy(vn , vn_g, size, cudaMemcpyDeviceToHost);
cudaMemcpy(p , p_g, size, cudaMemcpyDeviceToHost);
error = Error(u, un, v, vn, nx, ny);
cout << loops << " loops has been operated." <<endl;
cout << "The relative error is : " << error <<endl;
// output_paraview_format(loops, nx, ny, 1, u, v, v, p);
output_u(loops, u, v, nx);
}
}
cout << "Computation ends." << endl;
return 0;
}
// Lian Tongda
// Jun 19 2020
|
22,436 | #include "includes.h"
__global__ void trans_norm_vector(double* A, double* x, double* y, double* tmp, int NX, int NY)
{
int j;
int i = blockDim.x * blockIdx.x + threadIdx.x;
tmp[i] = 0;
//Α*Χ
for (j = 0; j < NY; j++) {
tmp[i] = tmp[i] + A[i*NY + j] * x[j];
}
} |
22,437 | #include <iostream>
#include <math.h>
using namespace std;
#define W 500
#define H 500
#define TPB 32
__device__ float square(float x)
{
return (x*x);
}
__global__ void distKernel(float *dout, int w, int h, float2 pos)
{
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
const int i = r*w + c;
if ((c >= w) || (r >= h)) return;
dout[i] = sqrt(square(c-pos.x) + square(r-pos.y));
}
int main()
{
float *out = new float [W*H];
float *dout = new float;
const int size = W*H*sizeof(float);
cudaMalloc(&dout,size);
const float2 pos = {0.0, 0.0};
const dim3 tpb(TPB, TPB);
const dim3 bpg((W+TPB-1)/TPB, (H+TPB-1)/TPB);
distKernel<<<bpg,tpb>>>(dout,W,H,pos);
cudaMemcpy(out,dout,size,cudaMemcpyDeviceToHost);
cudaFree(dout);
delete [] out;
}
|
22,438 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define RAD_CONV_FAC (1.0f/60.0f)*(M_PI/180.0f)
#define DEG_CONV_FAC 180.0f/M_PI
/*
Compile with: nvcc -O3 -Xptxas="-v" -arch=sm_30 galaxy_distribution.cu
Run with: time ./a.out real.txt sim.txt
*/
/* ---------- Device code ---------- */
/*
N = number of galaxies (in one list)
a1 = ascension (alpha) for first galaxy list
d1 = declination (delta) for first galaxy list
a2 = ascension for second glaxy list
d2 = declination for second galaxy list
*/
__global__ void theta_calc(int N, float* a1, float* d1, float* a2, float* d2, int* hist) {
// Thread index
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
float angle, temp;
int j, bin_nr;
// Each thread multiplies one value (their index) of the first galaxy list against all the values in the second galaxy list
for (j = 0; j < N; j++) {
temp = sinf(d1[i])*sinf(d2[j])+cosf(d1[i])*cosf(d2[j])*cosf(a1[i]-a2[j]);
angle = acosf(fminf(temp, 1.0f));
// Convert to degree and determine bin number
bin_nr = floor((angle*DEG_CONV_FAC)/0.25);
// Increment histogram
atomicAdd(&hist[bin_nr], 1);
}
}
}
void debug (int* dd, int* dr, int* rr, float* omega_hist, int bins) {
// Save histograms to files for analysis
FILE *out_file = fopen("output.txt", "w");
FILE *out_file2 = fopen("output2.txt", "w");
FILE *out_file3 = fopen("output3.txt", "w");
FILE *out_file4 = fopen("output4.txt", "w");
long int dd_tot = 0;
long int dr_tot = 0;
long int rr_tot = 0;
for(int k = 0; k < bins; k++) {
fprintf(out_file, "%d\n", dd[k]);
fprintf(out_file2, "%d\n", dr[k]);
fprintf(out_file3, "%d\n", rr[k]);
fprintf(out_file4, "%f\n", omega_hist[k]);
dd_tot += dd[k];
dr_tot += dr[k];
rr_tot += rr[k];
}
printf("Total entries in histogram dd: %ld\n", dd_tot);
printf("Total entries in histogram dr: %ld\n", dr_tot);
printf("Total entries in histogram rr: %ld\n", rr_tot);
}
void omega_calc(int* dd, int* dr, int* rr, float* omega_hist, int bins) {
// Calculate omega (difference between two equally big sets) with the three histograms
for (int m = 0; m < bins; m++) {
omega_hist[m] = (float)((float)dd[m]-2.0f*(float)dr[m]+(float)rr[m])/(float)rr[m];
if (m < 15) {
printf("Omega %d: %f\n", m, omega_hist[m]);
}
}
}
/* ---------- Host code ---------- */
int main (int agrc, char *argv[]) {
// Allocate arrays for the values
int N = 100000;
float* real_values_asc;
float* real_values_dec;
float* sim_values_asc;
float* sim_values_dec;
cudaMallocManaged(&real_values_asc, N*sizeof(float));
cudaMallocManaged(&real_values_dec, N*sizeof(float));
cudaMallocManaged(&sim_values_asc, N*sizeof(float));
cudaMallocManaged(&sim_values_dec, N*sizeof(float));
// Allocate arrays for the histograms
int bins = 180*4;
int* dd;
int* dr;
int* rr;
float* omega_hist = (float*)malloc(bins*sizeof(float));
cudaMallocManaged(&dd, bins*sizeof(int));
cudaMallocManaged(&dr, bins*sizeof(int));
cudaMallocManaged(&rr, bins*sizeof(int));
// Read values from the files
FILE * file_real = fopen(argv[1], "r");
FILE * file_sim = fopen(argv[2], "r");
if(!file_real || !file_sim) {
printf("Something went wrong with the file reading...\n");
exit(-1);
}
for (int i = 0; i < 2*N; i++) { // The values are read one by one, not in pairs, so we need to double the iteration value
if(i%2 == 0) { // First column, right ascension
fscanf(file_real, "%f", &real_values_asc[i/2]);
// Convert to radians
real_values_asc[i/2] *= RAD_CONV_FAC;
fscanf(file_sim, "%f", &sim_values_asc[i/2]);
// Convert to radians
sim_values_asc[i/2] *= RAD_CONV_FAC;
}
else { // Second column, declination
fscanf(file_real, "%f", &real_values_dec[i/2]);
// Convert to radians
real_values_dec[i/2] *= RAD_CONV_FAC;
fscanf(file_sim, "%f", &sim_values_dec[i/2]);
// Convert to radians
sim_values_dec[i/2] *= RAD_CONV_FAC;
}
// Initialize the histograms
if(i < bins) {
dd[i] = 0;
dr[i] = 0;
rr[i] = 0;
}
}
fclose(file_real);
fclose(file_sim);
int threads_in_block = 512;
int blocks_in_grid = (N+threads_in_block-1)/threads_in_block;
// GPU function
theta_calc<<<blocks_in_grid, threads_in_block>>>(N, real_values_asc, real_values_dec, real_values_asc, real_values_dec, dd);
theta_calc<<<blocks_in_grid, threads_in_block>>>(N, real_values_asc, real_values_dec, sim_values_asc, sim_values_dec, dr);
theta_calc<<<blocks_in_grid, threads_in_block>>>(N, sim_values_asc, sim_values_dec, sim_values_asc, sim_values_dec, rr);
// Wait for GPU to finish
cudaDeviceSynchronize();
// Calculate omega values
omega_calc(dd, dr, rr, omega_hist, bins);
//debug(dd, dr, rr, omega_hist, bins);
// Free all the things
cudaFree(real_values_asc);
cudaFree(real_values_dec);
cudaFree(sim_values_asc);
cudaFree(sim_values_dec);
cudaFree(dd);
cudaFree(dr);
cudaFree(rr);
free(omega_hist);
} |
22,439 | /**
* @file coeff.cu
* @brief Filter coefficients
* @author John Melton, G0ORX/N6LYT
*/
/* Copyright (C)
* 2015 - John Melton, G0ORX/N6LYT
*
* Based on code from WDSP written by Warren Pratt, NR0V
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
static double Ino(double x) {
/*
* This function calculates the zeroth order Bessel function
*/
double d = 0.0, ds = 1.0, s = 1.0;
do
{
d += 2.0;
ds *= x*x/(d*d);
s += ds;
}
while (ds > s*1e-6);
return s;
}
double *calcFilter(double Fs, double Fa, double Fb, int M, double Att) {
int Np = (M-1)/2;
double A[Np+1];
double Alpha;
int j;
double pi = 3.1415926535897932;
double Inoalpha;
double *H;
H=(double*)malloc(M*sizeof(double));
// Calculate the impulse response of the ideal filter
A[0] = 2.0*(Fb-Fa)/Fs;
for(j=1; j<=Np; j++)
{
A[j] = (sin(2.0*(double)j*pi*Fb/Fs)-sin(2.0*(double)j*pi*Fa/Fs))/((double)j*pi);
}
// Calculate the desired shape factor for the Kaiser-Bessel window
if (Att<21.0)
{
Alpha = 0.0;
}
else if (Att>50.0)
{
Alpha = 0.1102*(Att-8.7);
}
else
{
Alpha = 0.5842*pow((Att-21.0), 0.4)+0.07886*(Att-21.0);
}
// Window the ideal response with the Kaiser-Bessel window
Inoalpha = Ino(Alpha);
for (j=0; j<=Np; j++)
{
H[Np+j] = A[j]*Ino(Alpha*sqrt(1.0-((double)j*(double)j/((double)Np*(double)Np))))/Inoalpha;
}
for (j=0; j<Np; j++)
{
H[j] = H[M-1-j];
}
return H;
}
|
22,440 | //This code is a modification of L1 cache benchmark from
//"Dissecting the NVIDIA Volta GPU Architecture via Microbenchmarking": https://arxiv.org/pdf/1804.06826.pdf
//This benchmark measures the maximum read bandwidth of L1 cache for 64 bit read
//This code have been tested on Volta V100 architecture
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define SHARED_MEM_SIZE_BYTE (48*1024) //size in bytes, max 96KB for v100
#define SHARED_MEM_SIZE (SHARED_MEM_SIZE_BYTE/4)
#define ITERS (SHARED_MEM_SIZE/2)
#define BLOCKS_NUM 1
#define THREADS_PER_BLOCK 1024
#define WARP_SIZE 32
#define TOTAL_THREADS (THREADS_PER_BLOCK*BLOCKS_NUM)
// GPU error check
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void shared_bw(uint32_t *startClk, uint32_t *stopClk, float *dsink){
// thread index
uint32_t tid = threadIdx.x;
uint32_t bid = blockIdx.x;
uint32_t uid = bid*blockDim.x+tid;
uint32_t n_threads = blockDim.x * gridDim.x;
__shared__ float s[SHARED_MEM_SIZE]; //static shared memory
// one thread to initialize the pointer-chasing array
for (uint32_t i=uid; i<(SHARED_MEM_SIZE); i+=n_threads)
s[i] = (float)i;
// a register to avoid compiler optimization
float sink0 = 0;
float sink1 = 0;
float sink2 = 0;
float sink3 = 0;
// synchronize all threads
asm volatile ("bar.sync 0;");
// start timing
uint32_t start = 0;
asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory");
// load data from l1 cache and accumulate
for(uint32_t i=0; i<ITERS; ++i){
for(uint32_t j=uid*4; j<(SHARED_MEM_SIZE-ITERS); j+=(4*n_threads)){
sink0 += s[j+0+i];
sink1 += s[j+1+i];
sink2 += s[j+2+i];
sink3 += s[j+3+i];
}
}
// synchronize all threads
asm volatile("bar.sync 0;");
// stop timing
uint32_t stop = 0;
asm volatile("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory");
// write time and data back to memory
startClk[uid] = start;
stopClk[uid] = stop;
dsink[uid] = sink0+sink1+sink2+sink3;
}
int main(){
uint32_t *startClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t));
uint32_t *stopClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t));
float *dsink = (float*) malloc(TOTAL_THREADS*sizeof(float));
uint32_t *startClk_g;
uint32_t *stopClk_g;
float *dsink_g;
gpuErrchk( cudaMalloc(&startClk_g, TOTAL_THREADS*sizeof(uint32_t)) );
gpuErrchk( cudaMalloc(&stopClk_g, TOTAL_THREADS*sizeof(uint32_t)) );
gpuErrchk( cudaMalloc(&dsink_g, TOTAL_THREADS*sizeof(float)) );
shared_bw<<<BLOCKS_NUM,THREADS_PER_BLOCK>>>(startClk_g, stopClk_g, dsink_g);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaMemcpy(startClk, startClk_g, TOTAL_THREADS*sizeof(uint32_t), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(stopClk, stopClk_g, TOTAL_THREADS*sizeof(uint32_t), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(dsink, dsink_g, TOTAL_THREADS*sizeof(float), cudaMemcpyDeviceToHost) );
float bw;
bw = (float)(ITERS*(SHARED_MEM_SIZE-ITERS)*4*4)/((float)(stopClk[0]-startClk[0]));
printf("Shared Memory Bandwidth = %f (byte/clk/SM)\n", bw);
printf("Total Clk number = %u \n", stopClk[0]-startClk[0]);
return 0;
} |
22,441 | __device__ float g_a = 0;
extern "C" __global__ void test(float *a, float *b, const float c) {
size_t i = blockDim.x * blockIdx.x + threadIdx.x;
a[i] += b[i] * c;
}
|
22,442 | #include <iostream>
int main(int argc, char* argv[])
{
cudaDeviceProp dev_prop;
int dev_cnt = 0;
cudaGetDeviceCount(&dev_cnt);
for(int i=0; i < dev_cnt; ++i)
{
cudaGetDeviceProperties(&dev_prop, i);
std::cout << "Device : " << i << " has compute capability " << dev_prop.major << "." << dev_prop.minor << std::endl;
}
return 0;
}
|
22,443 |
__global__ void init_kernel(int * domain, int domain_x)
{
// Dummy initialization
/*domain[blockIdx.y * domain_x + blockIdx.x * blockDim.x + threadIdx.x]
= (1664525ul * (blockIdx.x + threadIdx.y + threadIdx.x) + 1013904223ul) % 3; */
int iy = blockDim.y * blockIdx.y + threadIdx.y;
int ix = blockDim.x * blockIdx.x + threadIdx.x;
int idx = iy * domain_x + ix;
domain[idx] = (1664525ul * (blockIdx.x + threadIdx.y + threadIdx.x) + 1013904223ul) % 3;
__syncthreads();
}
// Reads a cell at (x+dx, y+dy)
__device__ int read_cell(int * source_domain, int x, int y, int dx, int dy,
unsigned int domain_x, unsigned int domain_y)
{
x = (unsigned int)(x + dx) % domain_x; // Wrap around
y = (unsigned int)(y + dy) % domain_y;
return source_domain[y * domain_x + x];
}
// Compute kernel
__global__ void life_kernel(int * source_domain, int * dest_domain,
int domain_x, int domain_y)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;// computing the y-dimension
//Shared Memory used by all the threads inside the block
extern __shared__ int shared_source_domain[];
for (int i=tx; i<tx+8; i++)
{
for (int j=ty; j<ty+8; j++)
{
shared_source_domain[i * (domain_x/16) + j] = source_domain[i * domain_x + j];
}
}
__syncthreads();
// Read cell
int myself = read_cell(shared_source_domain, tx, ty, 0, 0,
domain_x, domain_y);
// TODO: Read the 8 neighbors and count number of blue and red
int neighbors=0;
int red=0, blue=0, blank=0;
for (int i=-1; i<2; i++)
{
for (int j=-1; j<2; j++)
{
if ((i !=0) || (j !=0))
{
neighbors = read_cell(shared_source_domain, tx, ty, i, j, domain_x, domain_y);
if (neighbors == 1)
{
red++;
}
else if (neighbors == 2)
{
blue++;
}
else if (neighbors == 0)
{
blank++;
}
}
}
}
__syncthreads();
// TODO: Compute new value
int all_neighbors = red + blue;
//control flow divergence
if ((all_neighbors < 2) || (all_neighbors > 3))
{
myself = 0;
}
else if ((all_neighbors == 2) || (all_neighbors == 3))
{
if ( blue >= 2)
{
myself = 2;
}
else
{
myself = 1;
}
}
__syncthreads();
// TODO: Write it in dest_domain
dest_domain[(ty * domain_x) + tx] = myself;
}
|
22,444 |
#include <cuda.h>
#include <stdint.h>
extern "C" __global__ void vectorAdd(int *A, int *B, int *C, uint64_t N)
{
uint64_t i = (uint64_t)blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
|
22,445 | // RUN: %clang_cc1 -triple spirv64 -aux-triple x86_64-unknown-linux-gnu \
// RUN: -fcuda-is-device -verify -fsyntax-only %s
#define __device__ __attribute__((device))
__int128 h_glb;
__device__ __int128 d_unused;
// expected-note@+1 {{'d_glb' defined here}}
__device__ __int128 d_glb;
__device__ __int128 bar() {
// expected-error@+1 {{'d_glb' requires 128 bit size '__int128' type support, but target 'spirv64' does not support it}}
return d_glb;
}
|
22,446 | //This code is a modification of L1 cache benchmark from
//"Dissecting the NVIDIA Volta GPU Architecture via Microbenchmarking": https://arxiv.org/pdf/1804.06826.pdf
//This benchmark measures the latency of L1 cache
//This code have been tested on Volta V100 architecture
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <fstream>
//Define constants
#define L1_SIZE_BYTE (128*1024) //L1 size in bytes
#define L2_SIZE_BYTE (6144*1024) //L2 size in bytes
#define WARP_SIZE 32
//Varibales
#define L1_SIZE (L1_SIZE_BYTE/4) //L1 size in 32 bit words
#define L2_SIZE (L2_SIZE_BYTE/4) //L2 size in 32 bit words
#define SHARED_MEM_SIZE_BYTE (48*1024) //size in bytes, max 96KB for v100
#define SHARED_MEM_SIZE (SHARED_MEM_SIZE_BYTE/4)
#define ARRAY_SIZE (4*L2_SIZE)
#define BLOCK_SIZE 32 //Launch only one thread to calcaulte the latency using a pointer-chasing array technique
#define GRID_SIZE 1
#define TOTAL_THREADS (BLOCK_SIZE*GRID_SIZE)
#define STRIDE_SIZE 1
#define WARMUP_ITER L2_SIZE/STRIDE_SIZE
#define TEST_ITER SHARED_MEM_SIZE
// GPU error check
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void l1_associativity(uint32_t *results, uint32_t *dsink, uint32_t stride, uint32_t *array){
// thread index
uint32_t tid = threadIdx.x;
uint32_t bid = blockIdx.x;
uint32_t uid = bid*blockDim.x+tid;
uint32_t n_threads = blockDim.x * gridDim.x;
uint32_t start;
uint32_t stop;
__shared__ uint32_t time[SHARED_MEM_SIZE];
//uint32_t *array = new uint32_t(ARRAY_SIZE);
//__shared__ uint32_t start[SHARED_MEM_SIZE]; //static shared memory
//__shared__ uint32_t stop[SHARED_MEM_SIZE]; //static shared memory
// one thread to initialize the pointer-chasing array
for (uint32_t i=uid; i<(ARRAY_SIZE); i+=n_threads)
array[i] = (i+stride)%ARRAY_SIZE;
__syncthreads();
if(uid == 0){
//initalize pointer chaser
uint32_t p_chaser = 0;
for(uint32_t i=0; i<WARMUP_ITER; ++i) {
// chase pointer
p_chaser = array[p_chaser];
}
for(uint32_t i=0; i<TEST_ITER; ++i) {
__syncthreads();
// start timing
start = clock();
__syncthreads();
// chase pointer
p_chaser = array[p_chaser];
__syncthreads();
dsink[i] = p_chaser;
__syncthreads();
// stop timing
stop = clock();
time[i] = stop - start;
}
__syncthreads();
// write time and data back to memory
for (uint32_t i=0; i<SHARED_MEM_SIZE; i++){
results[i] = time[i];
}
}
}
int main(){
uint32_t *results = (uint32_t*) malloc(SHARED_MEM_SIZE*sizeof(uint32_t));
uint32_t *dsink = (uint32_t*) malloc(TEST_ITER*sizeof(uint32_t));
uint32_t *array = (uint32_t*) malloc(ARRAY_SIZE*sizeof(uint32_t));
uint32_t *results_g;
uint32_t *dsink_g;
uint32_t *array_g;
gpuErrchk( cudaMalloc(&results_g, SHARED_MEM_SIZE*sizeof(uint32_t)) );
gpuErrchk( cudaMalloc(&dsink_g, TEST_ITER*sizeof(uint32_t)) );
gpuErrchk( cudaMalloc(&array_g, ARRAY_SIZE*sizeof(uint32_t)) );
l1_associativity<<<GRID_SIZE, BLOCK_SIZE>>>(results_g, dsink_g,STRIDE_SIZE, array_g);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaMemcpy(results, results_g, SHARED_MEM_SIZE*sizeof(uint32_t), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(dsink, dsink_g, TEST_ITER*sizeof(uint32_t), cudaMemcpyDeviceToHost) );
// write results to file
std::ofstream myfile;
myfile.open ("output_l1_associativity.csv", std::ios::app);
for (uint32_t i=0; i<SHARED_MEM_SIZE; i++)
myfile << results[i] << "\n";
myfile.close();
return 0;
}
|
22,447 | /////////////////////////////////////////////////////////////////////////////////
////
//// The MIT License
////
//// Copyright (c) 2006 Scientific Computing and Imaging Institute,
//// University of Utah (USA)
////
//// License for the specific language governing rights and limitations under
//// Permission is hereby granted, free of charge, to any person obtaining a
//// copy of this software and associated documentation files (the "Software"),
//// to deal in the Software without restriction, including without limitation
//// the rights to use, copy, modify, merge, publish, distribute, sublicense,
//// and/or sell copies of the Software, and to permit persons to whom the
//// Software is furnished to do so, subject to the following conditions:
////
//// The above copyright notice and this permission notice shall be included
//// in all copies or substantial portions of the Software.
////
//// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
//// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
//// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
//// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
//// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
//// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
//// DEALINGS IN THE SOFTWARE.
////
/////////////////////////////////////////////////////////////////////////////////
//
//#include <optix_cuda.h>
//#include <optix_math.h>
//#include <optixu/optixu_matrix.h>
//#include <optixu/optixu_aabb.h>
//#include <ElVis/Float.cu>
//
// rtDeclareVariable(float3, center, , );
// rtDeclareVariable(float, radius, , );
// rtDeclareVariable(optix::Ray, ray, rtCurrentRay, );
// rtDeclareVariable(ElVisFloat3, normal, attribute normal_vec, );
//
// RT_PROGRAM void SphereIntersection( int primIdx )
//{
// float3 d = ray.direction;
// float3 o = ray.origin;
//
// float3 oc = o-center;
//
// float A = dot(d,d);
// float B = 2*(dot(oc,d));
// float C = dot(oc,oc) - radius*radius;
//
// float D = B*B - 4*A*C;
//
// if( D < 0 )
// {
// return;
// }
//
// // In this case we know that there is at least 1 intersection.
// float denom = 2.0f * A;
// float square_D = sqrt(D);
//
// // Of the two roots, this is the one which is closest to the viewer.
// float t1 = (-B - square_D)/denom;
//
// if( t1 > 0.0f )
// {
// if( rtPotentialIntersection( t1 ) )
// {
// const float3 intersectionPoint = ray.origin + t1 * ray.direction;
// normal = intersectionPoint - center;
// normalize(normal);
// rtReportIntersection(0);
// }
// }
// else
// {
// float t2 = (-B + square_D)/denom;
//
// if( t2 > 0.0f )
// {
// if( rtPotentialIntersection( t2 ) )
// {
// const float3 intersectionPoint = ray.origin + t2 *
// ray.direction;
// normal = intersectionPoint - center;
// normalize(normal);
// rtReportIntersection(0);
// }
// }
// }
//}
//
// RT_PROGRAM void SphereBounding (int, float result[6])
//{
// optix::Aabb* aabb = (optix::Aabb*)result;
// aabb->m_min = center - make_float3(radius);
// aabb->m_max = center + make_float3(radius);
//}
//
|
22,448 | // Create a sample address sanitizer bitcode library.
// RUN: %clang_cc1 -x ir -fcuda-is-device -triple amdgcn-amd-amdhsa -emit-llvm-bc \
// RUN: -disable-llvm-passes -o %t.asanrtl.bc %S/Inputs/amdgpu-asanrtl.ll
// Check sanitizer runtime library functions survive
// optimizations without being removed or parameters altered.
// RUN: %clang_cc1 %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
// RUN: -fcuda-is-device -target-cpu gfx906 -fsanitize=address \
// RUN: -mlink-bitcode-file %t.asanrtl.bc -x hip \
// RUN: | FileCheck -check-prefixes=ASAN,MFCHECK %s
// RUN: %clang_cc1 %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
// RUN: -fcuda-is-device -target-cpu gfx906 -fsanitize=address \
// RUN: -O3 -mlink-bitcode-file %t.asanrtl.bc -x hip \
// RUN: | FileCheck -check-prefixes=ASAN,MFCHECK %s
// RUN: %clang_cc1 %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
// RUN: -fcuda-is-device -target-cpu gfx906 -x hip \
// RUN: | FileCheck %s
// REQUIRES: amdgpu-registered-target
// ASAN-DAG: define weak void @__amdgpu_device_library_preserve_asan_functions()
// ASAN-DAG: @__amdgpu_device_library_preserve_asan_functions_ptr = weak addrspace(1) constant void ()* @__amdgpu_device_library_preserve_asan_functions
// ASAN-DAG: @llvm.compiler.used = {{.*}}@__amdgpu_device_library_preserve_asan_functions_ptr
// ASAN-DAG: define weak void @__asan_report_load1(i64 %{{.*}})
// MFCHECK: !llvm.module.flags = !{![[FLAG1:[0-9]+]], ![[FLAG2:[0-9]+]]}
// MFCHECK: ![[FLAG1]] = !{i32 4, !"amdgpu_hostcall", i32 1}
// CHECK-NOT: @__amdgpu_device_library_preserve_asan_functions
// CHECK-NOT: @__asan_report_load1
|
22,449 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <chrono>
using namespace std;
using namespace std::chrono;
int SIZE = 50;
const float BLOCK_WIDTH = 8;
void add_matrix(int* , int* , int* , int, char); //Just work with same sizes matrices.
__global__ void add_matrix_kernel(int*, int*, int*, int);
__global__ void add_matrix_row_kernel(int*, int*, int*, int);
__global__ void add_matrix_col_kernel(int*, int*, int*, int);
void mul_matrix_vec(int*, int*, int*, int);
__global__ void mul_matrix_vec_kernel(int*, int*, int*, int);
//void BlockTranspose(int*, int*, int, int);
//__global__ void BlockTransposeKernel(int*, int, int);
void print_matrix(int*, int, int);
void print_vec(int*, int);
int main()
{
int* m_a = new int[SIZE * SIZE];
int* m_b = new int[SIZE * SIZE];
int* m_c = new int[SIZE * SIZE];
int* m_c2 = new int[SIZE * SIZE];
int* m_c3 = new int[SIZE * SIZE];
int* vec1 = new int[SIZE];
int* vec2 = new int[SIZE];
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
int pos = (i * SIZE) + j;
m_a[pos] = i + j;
m_b[pos] = i * j;
m_c[pos] = 0;
m_c2[pos] = 0;
m_c3[pos] = 0;
}
}
for (int j = 0; j < SIZE; j++) {
vec1[j] = j;
vec2[j] = 0;
}
/*
cout << "Vector" << endl;
print_vec(vec1, SIZE); cout << endl;
cout << "Matriz" << endl;
print_matrix(m_a, SIZE, SIZE); cout << endl;
cout << "Resultado" << endl;
mul_matrix_vec(m_a, vec1, vec2, SIZE);
print_vec(vec2, SIZE);
*/
//print_matrix(m_a, SIZE, SIZE); cout << endl;
//print_matrix(m_b, SIZE, SIZE); cout << endl;
high_resolution_clock::time_point t1 = high_resolution_clock::now();
//BlockTranspose(m_a, m_c, SIZE, SIZE);
add_matrix(m_a, m_b, m_c, SIZE, 'n');
high_resolution_clock::time_point t2 = high_resolution_clock::now();
//print_matrix(m_c, SIZE, SIZE);
auto duration1 = duration_cast<microseconds>(t2 - t1).count();
t1 = high_resolution_clock::now();
add_matrix(m_a, m_b, m_c2, SIZE, 'r');
t2 = high_resolution_clock::now();
//print_matrix(m_c, SIZE, SIZE);
auto duration2 = duration_cast<microseconds>(t2 - t1).count();
t1 = high_resolution_clock::now();
add_matrix(m_a, m_b, m_c3, SIZE, 'c');
t2 = high_resolution_clock::now();
//print_matrix(m_c, SIZE, SIZE);
auto duration3 = duration_cast<microseconds>(t2 - t1).count();
cout << "Normal 1: " << duration1 << endl;
cout << "Filas 2: " << duration2 << endl;
cout << "Columnas 3: " << duration3 << endl;
return 0;
}
void add_matrix(int* a, int* b, int* c, int size, char ans) {
int total_size = size * size * sizeof(int);
int* d_a;
int* d_b;
int* d_c;
cudaMalloc( (void**) &d_a, total_size);
cudaMalloc( (void**) &d_b, total_size);
cudaMalloc( (void**) &d_c, total_size);
cudaMemcpy(d_a, a, total_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, total_size, cudaMemcpyHostToDevice);
float block = 16;
dim3 grid_size(ceil(size/block), ceil(size/block), 1);
dim3 block_size(block, block, 1);
switch (ans)
{
case 'n':
add_matrix_kernel <<< grid_size, block_size >>> (d_a, d_b, d_c, size);
break;
case 'r':
add_matrix_row_kernel <<< grid_size, block_size >>> (d_a, d_b, d_c, size);
break;
case 'c':
add_matrix_col_kernel <<< grid_size, block_size >>> (d_a, d_b, d_c, size);
break;
default:
break;
}
cudaMemcpy(c, d_c, total_size, cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
//normal addition;
__global__ void add_matrix_kernel(int* a, int* b, int* c, int size) {
int gpu_columna = (blockDim.x * blockIdx.x) + threadIdx.x;
int gpu_fila = (blockDim.y * blockIdx.y) + threadIdx.y;
if (gpu_columna < size && gpu_fila < size) {
int pos = gpu_fila * size + gpu_columna;
c[pos] = a[pos] + b[pos];
}
}
//row addition;
__global__ void add_matrix_col_kernel(int* a, int* b, int* c, int size) {
int gpu_columna = (blockDim.x * blockIdx.x) + threadIdx.x;
int gpu_fila = (blockDim.y * blockIdx.y) + threadIdx.y;
if (gpu_fila == 0){
for (int i = 0; i < size; i++) {
int pos = (i * size + gpu_columna);
c[pos] = a[pos] + b[pos];
}
}
}
//col addition;
__global__ void add_matrix_row_kernel(int* a, int* b, int* c, int size) {
int gpu_columna = (blockDim.x * blockIdx.x) + threadIdx.x;
int gpu_fila = (blockDim.y * blockIdx.y) + threadIdx.y;
if (gpu_columna == 0) {
for (int i = 0; i < size; i++) {
int pos = (gpu_fila * size + i);
c[pos] = a[pos] + b[pos];
}
}
}
/*
void BlockTranspose(int* h_mat, int* out_mat, int columnas, int filas) {
int* d_mat;
int m_size = columnas * filas * sizeof(int);
cudaMalloc((void**) &d_mat, m_size);
cudaMemcpy(d_mat, h_mat, m_size, cudaMemcpyHostToDevice);
dim3 gridDim(ceil(columnas/BLOCK_WIDTH), ceil(filas/BLOCK_WIDTH), 1);
dim3 blockDim(BLOCK_WIDTH, BLOCK_WIDTH, 1);
BlockTransposeKernel<<< gridDim, blockDim >>>(d_mat, columnas, filas);
cudaMemcpy(out_mat, d_mat, m_size, cudaMemcpyDeviceToHost);
cudaFree(d_mat);
}
*/
void mul_matrix_vec(int* h_m, int* h_vec_1, int* h_vec_2, int size) {
int* d_m;
int* d_vec_1;
int* d_vec_2;
int size_mat = size * size * sizeof(int);
int size_vec = size * sizeof(int);
cudaMalloc((void**) &d_m, size_mat);
cudaMalloc((void**) &d_vec_1, size_vec);
cudaMalloc((void**) &d_vec_2, size_vec);
cudaMemcpy(d_m, h_m, size_mat, cudaMemcpyHostToDevice);
cudaMemcpy(d_vec_1, h_vec_1, size_vec, cudaMemcpyHostToDevice);
dim3 gridDim(ceil(size / BLOCK_WIDTH), ceil(size / BLOCK_WIDTH), 1);
dim3 blockDim(BLOCK_WIDTH, BLOCK_WIDTH, 1);
mul_matrix_vec_kernel <<< gridDim, blockDim >>> (d_m, d_vec_1, d_vec_2, size);
cudaMemcpy(h_vec_2, d_vec_2, size_vec, cudaMemcpyDeviceToHost);
cudaFree(d_m);
cudaFree(d_vec_1);
cudaFree(d_vec_2);
}
//execution order vect(x_1, y_1) x mat(x_2, y_2); y_1 == x_2
__global__ void mul_matrix_vec_kernel(int* h_m, int* h_vec_1, int* h_vec_2, int size) {
int gpu_columna = (blockIdx.x * blockDim.x) + threadIdx.x;
int gpu_fila = (blockIdx.y * blockDim.y) + threadIdx.y;
if (gpu_fila == 0) {
h_vec_2[gpu_columna] = 0; //Index for resultant matrix
for (int i = 0; i < size; i++) { // To descend in matrix
h_vec_2[gpu_columna] += h_m[i*size + gpu_columna] * h_vec_1[i];
}
}
}
void print_matrix(int* a, int s_1, int s_2) {
for (int i = 0; i < s_1; i++) {
for (int j = 0; j < s_2; j++) {
cout << a[i*s_2 + j] << " ";
}
cout << endl;
}
}
void print_vec(int* a, int s_1) {
for (int i = 0; i < s_1; i++) {
cout << a[i] << " ";
}
}
|
22,450 | #include <cstdio>
#include <cstdlib>
#include <iostream>
#define Width 32
#define Element 1024
using namespace std;
__global__
void MatrixMulKernel(int* Md, int* Nd, int* Pd)
{
//Thread Index
int ty = threadIdx.y; //Row
int tx = threadIdx.x; //Col
//Pvalue is used to store the element of the matrix
//That is computed by the thread
int Pvalue = 0;
if((ty<Width) && (tx<Width))
{
for(int k=0 ; k<Width ; ++k)
Pvalue += Md[ty*Width+k]*Nd[k*Width+tx];
}
Pd[ty*Width + tx] = Pvalue;
}
int main()
{
cout << "----------------------------------------------Start" << endl;
cout << "This is CPU faster than GPU Version" << endl;
cout << "---------------------------------------------------" << endl;
cout << "Grid Dimension : " << "1" << endl;
cout << "Block Dimension : " << Width << endl;
cout << "Dimension : " << Width << endl;
cout << "Total Elements : " << Element << endl;
cout << "---------------------------------------------------" << endl;
//Variables for Time
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
int size = Element*sizeof(int);
int* M = (int*)malloc(size);
int* N = (int*)malloc(size);
int* P = (int*)malloc(size);
int* Temp_sum_array = (int*)malloc(size);
int* Md;
int* Nd;
int* Pd;
srand(time(0));
for(int i=0 ; i<Element ; i++)
{
M[i] = rand()%100;
N[i] = rand()%100;
P[i] = 0;
}
cudaEventRecord(start, 0);
//CPU Matrix Multiplication
int Temp_sum = 0;
for(int row=0 ; row<Width ; row++)
{
for(int col=0 ; col<Width ; col++)
{
Temp_sum = 0;
for(int n=0 ; n<Width ; n++)
{
Temp_sum += M[row*Width+n]*N[n*Width+col];
}
Temp_sum_array[row*Width+col] = Temp_sum;
}
}
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
float CPU_time;
cudaEventElapsedTime(&CPU_time, start, end);
cout << "Matrix Multiplication by CPU : " << CPU_time/1000 << 's' << endl;
//Finish
/////////////////////////////////////////////////
//////// CUDA //////////
/////////////////////////////////////////////////
cudaEventRecord(start, 0);
cudaMalloc((void**)&Md, size);
cudaMemcpy(Md, M, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&Nd, size);
cudaMemcpy(Nd, N, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&Pd, size);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
float Memory_time;
cudaEventElapsedTime(&Memory_time, start, end);
cout << "Time of Processing Memory : " << Memory_time/1000 << 's' << endl;
cudaEventRecord(start, 0);
dim3 dimGrid(1, 1);
dim3 dimBlock(Width, Width);
MatrixMulKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
float GPU_time;
cudaEventElapsedTime(&GPU_time, start, end);
cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost);
cout << "Matrix Multiplication by GPU : " << GPU_time/1000 << 's' << endl;
cout << "---------------------------------------------------" << endl;
//Print CPU Result
//cout << "CPU Result :" << endl;
//for(int i=0 ; i<Element ; i++)
// cout << Temp_sum_array[i] << ", ";
//cout << endl;
//Print GPU Result
//cout << "GPU Result :" << endl;
//for(int i=0 ; i<Element ; i++)
// cout << P[i] << ", ";
//cout << endl;
//Check Multiplication Result
int check_flag = 0;
for(int i=0 ; i<Element ; i++)
if(Temp_sum_array[i] != P[i])
{
cout << "Wrong Point at : " << i << endl;
cout << "CPU Results is : " << Temp_sum_array[i] << endl;
cout << "GPU Results is : " << P[i] << endl;
check_flag = 1;
break;
}
if(check_flag == 1)
cout << "Wrong Result" << endl;
else if(check_flag == 0)
cout << "Correct Result" << endl;
//Finish
//Compare CPU_time and GPU_time
if(CPU_time > GPU_time)
cout << "GPU is faster" << endl;
else
cout << "CPU is faster" << endl;
//Finish
cout << "------------------------------------------------End" << endl;
free(M);
free(N);
free(P);
free(Temp_sum_array);
cudaFree(Md);
cudaFree(Nd);
cudaFree(Pd);
cudaEventDestroy(start);
cudaEventDestroy(end);
return EXIT_SUCCESS;
}
|
22,451 | #include "includes.h"
__global__ void vadd(const float *A, const float *B, float *C, int ds){
for (int idx = threadIdx.x+blockDim.x*blockIdx.x; idx < ds; idx+=gridDim.x*blockDim.x) // a grid-stride loop
C[idx] = A[idx] + B[idx]; // do the vector (element) add here
} |
22,452 |
/*
1. so vsi proteini enako dolgi ?
2. mutacija
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <random>
#include <algorithm>
// dolzina proteina 5 - 256
#define maxLenProtein 256
#define minLenProtein 5
#define numProtein 210
#define Cr 6
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
//nikamor je namjnen zgolj prvemu clenu
enum smer{nikamor, naprej, nazaj, gor, dol, levo, desno};
//enum smerobratno{nikamor, nazaj, naprej, dol, gor, desno, levo};
enum vrsta{H, P};
struct tocka {
int x = 0;
int y = 0;
int z = 0;
};
struct protein
{
int dolzinaProteina = maxLenProtein;
int proteinSmer[maxLenProtein] = {0};
bool proteinVrsta[maxLenProtein];
struct tocka tocke [maxLenProtein];
int hevristika = 0;
};
__global__ void addKernel(int *c, struct protein* Arr)
{
int i = threadIdx.x;
c[i] = Arr[i].proteinSmer[1];
}
bool enako(struct protein ena, struct protein dva) {
bool enak = true;
for (int i = 0; i < maxLenProtein; i++) {
if (ena.proteinSmer[i] != dva.proteinSmer[i]) {
return false;
}
}
return true;
}
void tvori_mrezo(struct protein &prot) {
for (int i = 0; i < maxLenProtein; i++) {
struct tocka t;
if (i != 0) {
t = prot.tocke[i - 1];
}
if (prot.proteinSmer[i] == nikamor) {
prot.tocke[i] = t;
}
if (prot.proteinSmer[i] == naprej) {
t.x++;
prot.tocke[i] = t;
}
if (prot.proteinSmer[i] == nazaj) {
t.x--;
prot.tocke[i] = t;
}
if (prot.proteinSmer[i] == gor) {
t.z++;
prot.tocke[i] = t;
}
if (prot.proteinSmer[i] == dol) {
t.z--;
prot.tocke[i] = t;
}
if (prot.proteinSmer[i] == levo) {
t.y++;
prot.tocke[i] = t;
}
if (prot.proteinSmer[i] == desno) {
t.y--;
prot.tocke[i] = t;
}
}
}
bool cikelj(struct tocka a, struct tocka b) {
if (a.x == b.x && a.y == b.y && a.z == b.z) {
return true;
}
return false;
}
bool hevristicna_povezava(struct tocka a, struct tocka b) {
//povezav za nazaj ni potrebno preverjati, saj tocke v katerih se isce pocvezava so vsaj 3 narazen
if (a.x == b.x && a.y == b.y && (a.z == b.z + 1 || a.z == b.z - 1)) {
return true;
}
if (a.x == b.x && a.z == b.z && (a.y == b.y + 1 || a.y == b.y - 1)) {
return true;
}
if (a.z == b.z && a.y == b.y && (a.x == b.x + 1 || a.x == b.x - 1)) {
return true;
}
return false;
}
void doloci_hevristiko(struct protein &prot) {
int br = 0;
prot.hevristika = 0;
for (int i = 0; i < maxLenProtein-3; i++) {
for (int j = i+3; j < maxLenProtein; j++) {
if (cikelj(prot.tocke[i], prot.tocke[j])) {
// v primeru cikla minus dolzina proteina
prot.hevristika -= prot.dolzinaProteina;
}
if (prot.proteinVrsta[i] == H && prot.proteinVrsta[j] == H) {
if (hevristicna_povezava(prot.tocke[i], prot.tocke[j])) {
prot.hevristika++;
}
}
}
}
}
int main()
{
//struct protein *arr = new struct protein[numProtein];
int nfes = 0;
std::cin >> nfes;
nfes *= 1000;
int count = 0;
struct protein arr1[numProtein];
srand(5);
for (int i = 0; i < numProtein; i++) {
struct protein generiranProtein ;
// generiramo dolzino proteina
generiranProtein.dolzinaProteina = rand() %(maxLenProtein - minLenProtein) + minLenProtein;
int prev = 20;
int premik = 0;
//tvorimo smeri proteina pri cemer preverjamo da netvorimo kontra strani od prejsne
for ( int j = 0; j < generiranProtein.dolzinaProteina; j++) {
generiranProtein.proteinSmer[j] = rand() % 6+1;
if (prev % 2 == 0) {
premik = -1;
}
else {
premik = 1;
}
while (prev + premik == generiranProtein.proteinSmer[j]) {
generiranProtein.proteinSmer[j] = rand() % 6+1;
}
prev = generiranProtein.proteinSmer[j];
generiranProtein.proteinVrsta[j] = rand() % 2;
}
arr1[i] = generiranProtein;
}
while (count < nfes) {
for (int i = 0; i < numProtein; i++) {
// KRIZANJE
int r1 = rand() % numProtein;
while (r1 == i)
{
r1 = rand() % numProtein;
}
struct protein generiranProtein1;
struct protein generiranProtein2;
struct protein najdenI = arr1[i];
struct protein najdenR1 = arr1[r1];
int crospoint = 0;
if (najdenI.dolzinaProteina < najdenR1.dolzinaProteina) {
crospoint = najdenI.dolzinaProteina;
}
else {
crospoint = najdenR1.dolzinaProteina;
}
crospoint -= 3;
int crosoverPoint = rand() % crospoint + 3;
std::copy(najdenI.proteinSmer, najdenI.proteinSmer + crosoverPoint, generiranProtein1.proteinSmer);
std::copy(najdenR1.proteinSmer + crosoverPoint, najdenR1.proteinSmer + najdenR1.dolzinaProteina, generiranProtein1.proteinSmer + crosoverPoint);
std::copy(najdenI.proteinVrsta, najdenI.proteinVrsta + crosoverPoint, generiranProtein1.proteinVrsta);
std::copy(najdenR1.proteinVrsta + crosoverPoint, najdenR1.proteinVrsta + najdenR1.dolzinaProteina, generiranProtein1.proteinVrsta + crosoverPoint);
generiranProtein1.dolzinaProteina = najdenR1.dolzinaProteina;
std::copy(najdenR1.proteinSmer, najdenR1.proteinSmer + crosoverPoint, generiranProtein2.proteinSmer);
std::copy(najdenI.proteinSmer + crosoverPoint, najdenI.proteinSmer + najdenI.dolzinaProteina, generiranProtein2.proteinSmer + crosoverPoint);
std::copy(najdenR1.proteinVrsta, najdenR1.proteinVrsta + crosoverPoint, generiranProtein2.proteinVrsta);
std::copy(najdenI.proteinVrsta + crosoverPoint, najdenI.proteinVrsta + najdenI.dolzinaProteina, generiranProtein2.proteinVrsta + crosoverPoint);
generiranProtein2.dolzinaProteina = najdenI.dolzinaProteina;
/*
for (int j = 0; j < maxLenProtein; j++)
{
if (crosoverPoint < j)
{
// memory copy std::copy
generiranProtein1.proteinSmer[j] = arr1[i].proteinSmer[j];
generiranProtein1.proteinVrsta[j] = arr1[i].proteinVrsta[j];
generiranProtein2.dolzinaProteina = arr1[i].dolzinaProteina;generiranProtein2.dolzinaProteina = arr1[i].dolzinaProteina;
generiranProtein2.proteinSmer[j] = arr1[r1].proteinSmer[j];
generiranProtein2.proteinVrsta[j] = arr1[r1].proteinVrsta[j];
}
else {
generiranProtein1.proteinSmer[j] = arr1[r1].proteinSmer[j];
generiranProtein1.proteinVrsta[j] = arr1[r1].proteinVrsta[j];
generiranProtein2.dolzinaProteina = arr1[i].dolzinaProteina;generiranProtein2.dolzinaProteina = arr1[i].dolzinaProteina;
generiranProtein2.proteinSmer[j] = arr1[i].proteinSmer[j];
generiranProtein2.proteinVrsta[j] = arr1[i].proteinVrsta[j];
}
}
*/
if (enako(generiranProtein1, najdenI) || enako(generiranProtein1, najdenR1) || rand() % 100 < 2) {
int tockaObrta1 = rand() % generiranProtein1.dolzinaProteina;
int tockaObrta2 = rand() % generiranProtein1.dolzinaProteina;
int tockaObrta3 = rand() % generiranProtein1.dolzinaProteina;
int premik = 0;
generiranProtein1.proteinSmer[tockaObrta1] = rand() % 6+1;
if (generiranProtein1.proteinSmer[tockaObrta1 - 1] % 2 == 0) {
premik = -1;
}
else {
premik = 1;
}
while (generiranProtein1.proteinSmer[tockaObrta1 - 1] + premik == generiranProtein1.proteinSmer[tockaObrta1]) {
generiranProtein1.proteinSmer[tockaObrta1 - 1] = rand() % 6+1;
}
generiranProtein1.proteinSmer[tockaObrta2] = rand() % 6+1;
if (generiranProtein1.proteinSmer[tockaObrta2 - 1] % 2 == 0) {
premik = -1;
}
else {
premik = 1;
}
while (generiranProtein1.proteinSmer[tockaObrta2 - 1] + premik == generiranProtein1.proteinSmer[tockaObrta2]) {
generiranProtein1.proteinSmer[tockaObrta2 - 1] = rand() % 6+1;
}
generiranProtein1.proteinSmer[tockaObrta3] = rand() % 6+1;
if (generiranProtein1.proteinSmer[tockaObrta3 - 1] % 2 == 0) {
premik = -1;
}
else {
premik = 1;
}
while (generiranProtein1.proteinSmer[tockaObrta3 - 1] + premik == generiranProtein1.proteinSmer[tockaObrta3]) {
generiranProtein1.proteinSmer[tockaObrta3 - 1] = rand() % 6+1;
}
generiranProtein1.proteinVrsta[tockaObrta1] = rand() % 2;
generiranProtein1.proteinVrsta[tockaObrta2] = rand() % 2;
generiranProtein1.proteinVrsta[tockaObrta3] = rand() % 2;
}
if (enako(generiranProtein2, najdenI) || enako(generiranProtein2, najdenR1) || rand() % 100 < 1) {
int tockaObrta1 = rand() % generiranProtein2.dolzinaProteina;
int tockaObrta2 = rand() % generiranProtein2.dolzinaProteina;
int tockaObrta3 = rand() % generiranProtein2.dolzinaProteina;
int premik = 0;
generiranProtein2.proteinSmer[tockaObrta1] = rand() % 6+1;
if (generiranProtein2.proteinSmer[tockaObrta1 - 1] % 2 == 0) {
premik = -1;
}
else {
premik = 1;
}
while (generiranProtein2.proteinSmer[tockaObrta1 - 1] + premik == generiranProtein2.proteinSmer[tockaObrta1]) {
generiranProtein2.proteinSmer[tockaObrta1 - 1] = rand() % 6+1;
}
generiranProtein2.proteinSmer[tockaObrta2] = rand() % 6+1;
if (generiranProtein2.proteinSmer[tockaObrta2 - 1] % 2 == 0) {
premik = -1;
}
else {
premik = 1;
}
while (generiranProtein2.proteinSmer[tockaObrta2 - 1] + premik == generiranProtein2.proteinSmer[tockaObrta2]) {
generiranProtein2.proteinSmer[tockaObrta2 - 1] = rand() % 6+1;
}
generiranProtein2.proteinSmer[tockaObrta3] = rand() % 6+1;
if (generiranProtein2.proteinSmer[tockaObrta3 - 1] % 2 == 0) {
premik = -1;
}
else {
premik = 1;
}
while (generiranProtein2.proteinSmer[tockaObrta3 - 1] + premik == generiranProtein2.proteinSmer[tockaObrta3]) {
generiranProtein2.proteinSmer[tockaObrta3 - 1] = rand() % 6+1;
}
generiranProtein2.proteinVrsta[tockaObrta1] = rand() % 2;
generiranProtein2.proteinVrsta[tockaObrta2] = rand() % 2;
generiranProtein2.proteinVrsta[tockaObrta3] = rand() % 2;
}
tvori_mrezo(generiranProtein1);
tvori_mrezo(generiranProtein2);
tvori_mrezo(najdenI);
doloci_hevristiko(generiranProtein1);
doloci_hevristiko(generiranProtein2);
doloci_hevristiko(najdenI);
if ( generiranProtein1.hevristika > generiranProtein2.hevristika) {
if (najdenI.hevristika < generiranProtein1.hevristika) {
arr1[i] = generiranProtein1;
}
}
else {
if (najdenI.hevristika < generiranProtein2.hevristika) {
arr1[i] = generiranProtein2;
}
}
count++;
}
}
int c[10] = {0};
printf(" {%d,%d,%d,%d,%d,%d,%d}\n",
arr1[1].proteinSmer[1], arr1[2].proteinSmer[1], arr1[3].proteinSmer[1], arr1[4].proteinSmer[1], arr1[5].proteinSmer[1], arr1[6].proteinSmer[1], arr1[7].proteinSmer[1]);
printf(" {%d,%d,%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4], c[5], c[6]);
// Add vectors in parallel.
int* dev_b = 0;
struct protein* arrr = 0;
cudaSetDevice(0);
cudaMalloc((void**)&dev_b, 10 * sizeof(int));
cudaMalloc((void**)&arrr, 10 * sizeof(struct protein));
cudaMemcpy(arrr, arr1, 10 * sizeof(struct protein), cudaMemcpyHostToDevice);
addKernel <<<1, 10 >>> (dev_b, arrr);
cudaMemcpy(c, dev_b, 10 * sizeof(int), cudaMemcpyDeviceToHost);
printf(" {%d,%d,%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4], c[5], c[6]);
cudaFree(c);
cudaFree(arrr);
cudaFree(dev_b);
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
|
22,453 | #include "includes.h"
__global__ void cuArraysCopyExtractVaryingOffset(const float *imageIn, const int inNX, const int inNY, float *imageOut, const int outNX, const int outNY, const int nImages, const int2 *offsets)
{
int outx = threadIdx.x + blockDim.x*blockIdx.x;
int outy = threadIdx.y + blockDim.y*blockIdx.y;
if(outx < outNX && outy < outNY)
{
int idxImage = blockIdx.z;
int idxOut = (blockIdx.z * outNX + outx)*outNY+outy;
int idxIn = (blockIdx.z*inNX + outx + offsets[idxImage].x)*inNY + outy + offsets[idxImage].y;
imageOut[idxOut] = imageIn[idxIn];
}
} |
22,454 |
/*
compile using :
nvcc -std=c++11 -arch=sm_35 -DnumOfArrays=<number of arrays> -DmaxElements=<maximum number of elements per array> GPU-ArraySort.cu -o out
*/
/*
Copyright (C) Muaaz Gul Awan and Fahad Saeed
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include<iostream>
#include<vector>
#include<stdlib.h>
#include<time.h>
#include<math.h>
#include<ctime>
#include<algorithm>
#include<utility>
#include <curand.h>
#include <curand_kernel.h>
#include<random>
using namespace std;
#define numOfArrays 50000
#define maxElements 1000
// #define numOfArrays 5
// #define maxElements 20
#define tempo 2
#define m 20
#define BUCKETS (maxElements/m)
#define sampleRate 10
#define SAMPLED (sampleRate*maxElements)/100
#define BLOCK_SIZE 1024
//data generation
template <typename mType>
struct dataArrays{
vector<mType> dataList;
int *prefixArray;
};
/* template <typename type>
dataArrays<type> dataGen (int numOfArrays, int maxArraySize, int minArraySize){
dataArrays<int> data;
data.prefixArray = new int[numOfArrays+1]; //exclusive prefix scan
const int range_from = 0;
const unsigned int range_to = 30;//2147483647; //2^31 - 1
random_device rand_dev;
mt19937 generator(rand_dev());
uniform_int_distribution<int> distr(range_from, range_to);
int prefixSum = 0;
srand(time(0));
for( int i = 0; i < numOfArrays; i++){
int size = rand()%(maxArraySize-minArraySize + 1) + minArraySize;
data.prefixArray[i] = prefixSum;
for(int j = prefixSum; j < prefixSum + size; j++){
data.dataList.push_back(distr(generator));
}
prefixSum += size;
}
data.prefixArray[numOfArrays] = prefixSum;
return data;
} */
//swap function for Insertion sort
template <class type>
__device__ void swapD (type &a, type &b)
{
/* &a and &b are reference variables */
type temp;
temp=a;
a=b;
b=temp;
}
//insertion sort
template <class type>
__device__ void insertionSort(type *input, int begin, int end){
int i, j; //,tmp;
for (i = begin+1; i < end; i++) {
j = i;
while (j > begin && input[j - 1] > input[j]) {
swapD(input[j], input[j-1]);
j--;
}//end of while loop
}
}
__device__ int left(int index) {
return (index << 1) + 1;
}
/*
template <typename type>
__device__ void maxSiftDown(type *array,int fromIndex, int toIndex, int index) {
int leftChildIndex = left(index);
// Right child index is one position from left child index towards
// larger indices.
int rightChildIndex = leftChildIndex + 1;
int maxChildIndex = index;
// Save the array component we want to sift down.
int target = array[fromIndex + index];
for (;;) {
if (fromIndex + leftChildIndex < toIndex
&& array[fromIndex + leftChildIndex] > target) {
maxChildIndex = leftChildIndex;
}
if (maxChildIndex == index) {
if (fromIndex + rightChildIndex < toIndex
&& array[fromIndex + rightChildIndex] > target) {
maxChildIndex = rightChildIndex;
}
} else {
if (fromIndex + rightChildIndex < toIndex
&& array[fromIndex + rightChildIndex] >
array[fromIndex + leftChildIndex]) {
maxChildIndex = rightChildIndex;
}
}
if (maxChildIndex == index) {
// No swap. Just insert the sifted element.
array[fromIndex + maxChildIndex] = target;
return;
}
// No swap here neither.
// Just move up the maximum to current position.
array[fromIndex + index] = array[fromIndex + maxChildIndex];
index = maxChildIndex;
leftChildIndex = left(index);
rightChildIndex = leftChildIndex + 1;
}
}
template <class type>
__device__ void buildMaxHeap(type *array, int fromIndex, int toIndex) {
int rangeLength = toIndex - fromIndex;
for (int i = rangeLength / 2; i >= 0; --i) {
maxSiftDown(array, fromIndex, toIndex, i);
}
}
// main function to do heap sort
template <class type>
__device__ void heapSort(type *array, int fromIndex, int toIndex) {
if (toIndex - fromIndex < 2) {
return;
}
// CLRS says 'BUILD-MAX-HEAP' is O(n).
buildMaxHeap(array, fromIndex, toIndex);
// And this is O(n log n).
for (int i = toIndex - 1; i > fromIndex; --i) {
int tmp = array[i];
array[i] = array[fromIndex];
array[fromIndex] = tmp;
maxSiftDown(array, fromIndex, i, 0);
}
}*/
template <class type>
__device__ void heapify(type *arr, int n, int i, int ini)
{
int largest = i; // Initialize largest as root
i -= ini;
int l = 2*i + 1 + ini; // left = 2*i + 1
int r = 2*i + 2 + ini; // right = 2*i + 2
i += ini;
// If left child is larger than root
if (l < n && arr[l] > arr[largest]){
largest = l;
}
// If right child is larger than largest so far
if (r < n && arr[r] > arr[largest]){
largest = r;
}
// If largest is not root
if (largest != i){
swapD(arr[i], arr[largest]);
// Recursively heapify the affected sub-tree
heapify(arr, n, largest, ini);
}
}
// main function to do heap sort
template <class type>
__device__ void heapSort(type *arr, int ini, int fin, int n)
{
// Build heap (rearrange array)
for (int i = ((fin+1-ini)/ 2) - 1+ini; i >= ini; i--){
heapify(arr, fin+1, i, ini);
}
//printArray(arr, ini, n);
// One by one extract an element from heap
for (int i=fin; i>=ini; i--){
// Move current root to en
swapD(arr[ini], arr[i]);
// call max heapify on the reduced heap
heapify(arr, i, ini, ini);
//printArray(arr, ini, n);
}
}
int findArr(float input[], int size, int key){
for(int i = 0; i < size; i++)
{
if(input[i] == key)
return 2;
}
return 0;
}
__device__ void getMinMax(float input[], int beginPtr, int endPtr, float *ret){
float min = input[beginPtr];
float max = 0;
// int *ret = new int[2];
for(int i = beginPtr; i < endPtr; i++){
if(min > input[i])
min = input[i];
if (max < input[i])
max = input[i];
}
ret[0] = min;
ret[1] = max;
//return ret;
}
__device__ void getSplitters (float input[], float splitters[], int sample[], int beginPtr, int endPtr){
__shared__ float mySamples[SAMPLED];
float *ret = new float[2];
for(int i = 0; i < SAMPLED; i++)
mySamples[i] = input[beginPtr+sample[i]];
insertionSort(mySamples, 0, SAMPLED);
int splitterIndex = blockIdx.x*(BUCKETS+1)+1;
int splittersSize=0;
for(int i = (SAMPLED)/(BUCKETS);splittersSize < BUCKETS-1; i +=SAMPLED/(BUCKETS)){
splitters[splitterIndex] = mySamples[i];
splitterIndex++;
splittersSize++;
}
getMinMax(input, beginPtr, endPtr, ret);
splitters[blockIdx.x*(BUCKETS+1)] = ret[0]-2;//to accodmodate the smallest
splitters[blockIdx.x*(BUCKETS+1)+BUCKETS] = ret[1];
delete [] ret;
}
__device__ void getBuckets2(float input[], float splitters[], int beginPtr, int endPtr, int bucketsSize[], float myInput[]){
int id = threadIdx.x;
int sizeOffset = blockIdx.x*BUCKETS+threadIdx.x;
int bucketSizeOff = sizeOffset+1;
float myBucket[maxElements];
int indexSum=0;
bucketsSize[bucketSizeOff] = 0;
for(int i = 0; i < maxElements; i++){
if(myInput[i] > splitters[id] && myInput[i] <= splitters[id+1]){
myBucket[bucketsSize[bucketSizeOff]] = myInput[i];
bucketsSize[bucketSizeOff]++;
}
}
__syncthreads();
//prefix sum for bucket sizes of current array
for(int j = 0; j < threadIdx.x; j++)
indexSum += bucketsSize[blockIdx.x*BUCKETS+j+1];
//writing back current buckt back to the input memory
for(int i = 0; i < bucketsSize[bucketSizeOff]; i++)
input[indexSum+beginPtr+i] = myBucket[i];
}
__device__ void getBuckets(float input[], float splitters[], int beginPtr, int endPtr, int bucketsSize[]){
int id = threadIdx.x;
int sizeOffset = blockIdx.x*BUCKETS+threadIdx.x;
int bucketSizeOff = sizeOffset+1;
float myBucket[maxElements];
int indexSum=0;
bucketsSize[bucketSizeOff] = 0;
for(int i = 0; i < maxElements; i++){
if(input[beginPtr+i] > splitters[id] && input[beginPtr+i] <= splitters[id+1]){
myBucket[bucketsSize[bucketSizeOff]] = input[beginPtr+i];
bucketsSize[bucketSizeOff]++;
}
}
__syncthreads();
//prefix sum for bucket sizes of current array
for(int j = 0; j < threadIdx.x; j++)
indexSum += bucketsSize[blockIdx.x*BUCKETS+j+1];
//writing back current buckt back to the input memory
for(int i = 0; i < bucketsSize[bucketSizeOff]; i++)
input[indexSum+beginPtr+i] = myBucket[i];
}
__device__ void bucketer(int input[], int bucketsSize[], int sample[], int beginPtr, int endPtr, int output[]){
int id = blockIdx.x;
const int toBeSampled = SAMPLED;
const int buckets = BUCKETS;
__shared__ int splitters[buckets-1];
//converting samples into unsorted-unselected-splitters
for(int i = 0; i < toBeSampled; i ++)
sample[i] = input[beginPtr+sample[i]];
insertionSort(sample, 0, toBeSampled);
//taking splitters out
int splittersSize=0;
for(int i = (toBeSampled)/(buckets);splittersSize < buckets-1; i +=toBeSampled/(buckets)){
splitters[splittersSize] = sample[i];
splittersSize++;
}
int sumBsize=0;
int sIndex = 0;
for(int i = id*BUCKETS; i < (id*BUCKETS+BUCKETS); i++){
bucketsSize[i] = 0;
for(int j = 0; j <maxElements ; j++){
//for bucket 0
if(sIndex == 0){
if( input[beginPtr+j] <= splitters[0]){
output[beginPtr+sumBsize+bucketsSize[i]]=input[beginPtr+j];
bucketsSize[i]++;
}
}
//for last bucket
else if(sIndex == buckets-1){
if( input[beginPtr+j] > splitters[splittersSize-1]){
output[beginPtr+sumBsize+bucketsSize[i]] = input[beginPtr+j];
bucketsSize[i]++;
}
}
else{
if( input[beginPtr+j] > splitters[sIndex-1] && input[beginPtr+j] <= splitters[sIndex]) {
output[beginPtr+sumBsize+bucketsSize[i]] = input[beginPtr+j];
bucketsSize[i]++;
}
}
}
sumBsize += bucketsSize[i];
sIndex++;
}
}
__global__ void splitterKer(float *data, float *splitters, int *mySample){
if(blockIdx.x < numOfArrays){
int id = blockIdx.x;
int arrBegin = id*maxElements;
int arrEnd = arrBegin + maxElements;
__shared__ int sampleSh[SAMPLED];
for(int i = 0; i < SAMPLED; i++)
sampleSh[i] = mySample[i];
getSplitters(data, splitters, sampleSh, arrBegin, arrEnd);
}
data[0] = 9999;
}
__global__ void bucketEM2(float *data, int *bucketSizes, float *splittersGlob){
if(blockIdx.x < numOfArrays){
bucketSizes[0] = 0;
int bid = blockIdx.x;
int tid = threadIdx.x;
int leftOvers = maxElements%BUCKETS;
int jmpFac = maxElements/BUCKETS;
int gArrayStart = bid*maxElements+tid*jmpFac;
int gArrayEnd = (tid==(BUCKETS-1))?(gArrayStart + jmpFac+leftOvers):(gArrayStart + jmpFac);
int lArrayStart = tid*jmpFac;
__shared__ float myInput [maxElements];
int arrBegin = bid*maxElements;
int arrEnd = arrBegin + maxElements;
int splitterIndexSt = blockIdx.x*(BUCKETS+1);
int splitterIndexEd = splitterIndexSt + BUCKETS+1;
__shared__ float splitters[BUCKETS+1];
//copy my array in shared memory in parallel
for(int i=lArrayStart,j=gArrayStart;j<gArrayEnd;i++,j++){
myInput[i] = data[j];
}
__syncthreads();
int j = 0;
for(int i = splitterIndexSt; i < splitterIndexEd; i++){
splitters[j] = splittersGlob[i];
j++;
}
getBuckets2(data, splitters, arrBegin, arrEnd, bucketSizes, myInput);
}
}
__global__ void sortEM2(float *buckets, int *bucketSizes){
if(blockIdx.x < numOfArrays && threadIdx.x < BUCKETS){
int bid = blockIdx.x;
int tid = threadIdx.x;
int leftOvers = maxElements%BUCKETS;
int jmpFac = maxElements/BUCKETS;
int gArrayStart = bid*maxElements+tid*jmpFac;
int gArrayEnd = (tid==(BUCKETS-1))?(gArrayStart + jmpFac+leftOvers):(gArrayStart + jmpFac);
int lArrayStart = tid*jmpFac;
__shared__ float myArray [maxElements];
int indexSum = 0;
for(int i=lArrayStart,j=gArrayStart;j<gArrayEnd;i++,j++){
myArray[i] = buckets[j];
}
__syncthreads();
for(int j = 0; j < threadIdx.x; j++)
indexSum += bucketSizes[blockIdx.x*BUCKETS+j+1];
// insertionSort(myArray, indexSum,indexSum+ bucketSizes[blockIdx.x*BUCKETS+threadIdx.x+1]);
//heapSort(myArray, indexSum, indexSum + bucketSizes[blockIdx.x*BUCKETS+threadIdx.x+1]);
heapSort(myArray, indexSum, indexSum + bucketSizes[blockIdx.x*BUCKETS+threadIdx.x+1], maxElements);
__syncthreads();
for(int i=lArrayStart,j=gArrayStart;j<gArrayEnd;i++,j++){
buckets[j] = myArray[i];
}
__syncthreads();
}
}
__global__ void sortEM(int *buckets, int *prefixSum){
if(blockIdx.x < numOfArrays && threadIdx.x < BUCKETS){
int bid = blockIdx.x;
int tid = threadIdx.x;
int left =(tid)+bid*(BUCKETS);
int right = (tid+1)+bid*(BUCKETS);
insertionSort(buckets, prefixSum[left], prefixSum[right]);
}
}
int main ()
{
const int range_from = 0;
// const unsigned int range_to = 2147483647; //2^31 - 1
const unsigned int range_to = 1024;
random_device rand_dev;
mt19937 generator(rand_dev());
uniform_int_distribution<int> distr(range_from, range_to);
size_t f,t;
int *d_bucketSizes , *h_bucketSizes;
float *d_data, *h_buckets, *d_splitters, *h_splitters;
int numBlocks = ceil((float)(BUCKETS*numOfArrays+1)/(BLOCK_SIZE<<1));
dim3 dimGrid(numBlocks, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
float *h_data = new float[numOfArrays*maxElements];
h_buckets = new float[numOfArrays*maxElements];
h_bucketSizes = new int[BUCKETS*numOfArrays+1];
h_splitters = new float[(BUCKETS+1)*sizeof(float)*numOfArrays];
size_t size_heap, size_stack;
int *h_sample = new int[SAMPLED];
int *d_sample;
//cudaSetDevice(0);
cudaMemGetInfo(&f, &t);
//setting stack size limit
cudaDeviceSetLimit(cudaLimitStackSize,10240);
cudaDeviceGetLimit(&size_heap, cudaLimitMallocHeapSize);
cudaDeviceGetLimit(&size_stack, cudaLimitStackSize);
//generating regular samples
int max = maxElements;
int sam = SAMPLED;
int stride = max/sam;
int sampleVal = 0;
for( int i = 0; i < SAMPLED; i++){
h_sample[i] = sampleVal;
sampleVal += stride;
}
// allocating device memory for data, sampled indices and bucket sizes
cudaMalloc((void**) &d_sample, SAMPLED*sizeof(float));
cudaMalloc((void**) &d_data, numOfArrays*maxElements*sizeof(float));
cudaMalloc((void**) &d_bucketSizes, numOfArrays*sizeof(int)*BUCKETS+sizeof(int));
cudaMalloc((void**) &d_splitters, (BUCKETS+1)*sizeof(float)*numOfArrays);
srand(time(NULL));
cudaMemGetInfo(&f,&t);
//new data gens
//cout<<"OJO"<<endl;
for(int i = 0; i < numOfArrays; i++){
for(int j = 0; j < maxElements; j++){
h_data [j+i*maxElements] = distr(generator) ;
//cout<<h_data [j+i*maxElements]<<",";
}
//cout<<endl;
}
//copy data and samples to GPU
cudaMemcpy(d_data, h_data, numOfArrays*maxElements*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_sample, h_sample, SAMPLED*sizeof(float), cudaMemcpyHostToDevice);
clock_t firstKrTime = clock();
splitterKer<<<numOfArrays,1>>>(d_data, d_splitters, d_sample);
cudaThreadSynchronize();
firstKrTime = clock() - firstKrTime;
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess){
printf("Sync kernel error: %s\n", cudaGetErrorString(errSync));
}
if (errAsync != cudaSuccess){
printf("Async kernel error: %s\n", cudaGetErrorString(errAsync));
}
cudaMemcpy(h_splitters, d_splitters, (BUCKETS+1)*sizeof(float)*numOfArrays, cudaMemcpyDeviceToHost);
clock_t secondKrTime = clock();
//cout<<secondKrTime<<endl;
bucketEM2<<<numOfArrays,BUCKETS>>>(d_data, d_bucketSizes, d_splitters);
cudaThreadSynchronize();
secondKrTime = clock()-secondKrTime;
//cout<<secondKrTime<<endl;
//cout<<"--------"<<endl;
cudaMemGetInfo(&f,&t);
//copying bucket sizes from first kernel back to cpu for prefix sum, to be replaced with prefix sum code
cudaMemcpy(h_bucketSizes, d_bucketSizes, sizeof(int)*(BUCKETS*numOfArrays+1), cudaMemcpyDeviceToHost);
//freeing the sample indices memory space and bucket sizes memory
cudaFree(d_sample);
cudaFree(d_splitters);
clock_t fourKrTime = clock();
sortEM2<<<numOfArrays, BUCKETS>>>(d_data, d_bucketSizes);
cudaThreadSynchronize();
fourKrTime = clock()-fourKrTime;
cout<<(firstKrTime+secondKrTime+fourKrTime)/double(CLOCKS_PER_SEC)*1000<<endl;
//copying the sorted data back
cudaMemcpy(h_buckets, d_data, numOfArrays*maxElements*sizeof(float), cudaMemcpyDeviceToHost);
// for(int i = 0; i < numOfArrays; i++){
// for(int j = 0; j < maxElements; j++){
// cout<<h_buckets[j+i*maxElements]<<"-";
// }
// cout<<endl;
// }
//freeing the space for prefixSum and sorted data
cudaFree(d_data);
cudaFree(d_bucketSizes);
free(h_bucketSizes);
free(h_buckets);
free(h_data);
free(h_sample);
free(h_splitters);
return 0;
}
|
22,455 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
/* square root of number of threads in a block (the number of threads in a block is NT^2) */
#define NT 32
/* length of the target domain */
#define L 10.0
/* number of division for the discretization of the target domain */
#define N 256
/* dimensionless time step size (theta = D * dt / dx^2) */
#define THETA 0.1
/* number of iterations */
#define M 2000
/* constants on a GPU */
__device__ __constant__ int n;
__device__ __constant__ double theta;
//GPU functions-----------------------------------------------------------------
__global__ void diffusion_global(double *field_device, double *field_device_new) {
int i_global;
int j_global;
int i_left, i_right;
int j_top, j_bottom;
i_global = blockDim.x * blockIdx.x + threadIdx.x;
if(i_global < n) {
i_right = (i_global + 1) % n;
i_left = (i_global - 1 + n) % n;
for(j_global = threadIdx.y; j_global < n; j_global += NT) {
j_top = (j_global + 1) % n;
j_bottom = (j_global - 1 + n) % n;
field_device_new[i_global * n + j_global] = field_device[i_global * n + j_global]
+ theta * (field_device[i_right * n + j_global] + field_device[i_left * n + j_global]
+ field_device[i_global * n + j_top] + field_device[i_global * n + j_bottom]
- 4.0 * field_device[i_global * n + j_global]);
}
}
}
__global__ void diffusion_shared(double *field_device, double *field_device_new) {
int i_global;
int j_global;
int i_shared;
int j_shared;
int i_left, i_right;
int j_top, j_bottom;
double field_register;
__shared__ double field_shared[(NT + 2) * (NT + 2)];
i_global = blockDim.x * blockIdx.x + threadIdx.x;
i_shared = threadIdx.x + 1;
j_shared = threadIdx.y + 1;
if(i_global < n) {
for(j_global = threadIdx.y; j_global < n; j_global += NT) {
//copy field from global to shared----------------------
field_register = field_device[i_global * n + j_global];
field_shared[i_shared * (NT + 2) + j_shared] = field_register;
if(i_shared == 1) {
i_left = (i_global - 1 + n) % n;
field_shared[0 * (NT + 2) + j_shared] = field_device[i_left * n + j_global];
} else if(i_shared == NT) {
i_right = (i_global + 1) % n;
field_shared[(NT + 1) * (NT + 2) + j_shared] = field_device[i_right * n + j_global];
}
if(j_shared == 1) {
j_bottom = (j_global - 1 + n) % n;
field_shared[i_shared * (NT + 2) + 0] = field_device[i_global * n + j_bottom];
} else if(j_shared == NT) {
j_top = (j_global + 1) % n;
field_shared[i_shared * (NT + 2) + (NT + 1)] = field_device[i_global * n + j_top];
}
__syncthreads();
//calculate field evolution-----------------------------
field_device_new[i_global * n + j_global] = field_register
+ theta * (field_shared[(i_shared + 1) * (NT + 2) + j_shared] + field_shared[(i_shared - 1) * (NT + 2) + j_shared]
+ field_shared[i_shared * (NT + 2) + (j_shared + 1)] + field_shared[i_shared * (NT + 2) + (j_shared - 1)]
- 4.0 * field_register);
}
}
}
//Host functions----------------------------------------------------------------
void init_field(double *field_host, int n_host, int l_host) {
int i;
int j;
double x;
double y;
double dx = l_host / (double)n_host;
double dy = l_host / (double)n_host;
double midst = l_host * 0.5;
for(i = 0; i < n_host; i += 1) {
x = (double)i * dx;
for(j = 0; j < n_host; j += 1) {
y = (double)j * dy;
if((x > midst && y > midst) || (x < midst && y < midst)) {
field_host[n_host * j + i] = 1.0;
} else {
field_host[n_host * j + i] = 0.0;
}
}
}
}
void flip_ij(int *i, int *j) {
int i_temp;
i_temp = *i;
*i = *j;
*j = i_temp;
}
void print_field(FILE *file_write, double *field, int n, double l) {
int i;
int j;
double x;
double y;
double d = l/(double)n;
for(i = 0; i < N; i += 1) {
y = (double)j * d;
for(j = 0; j < N; j += 1) {
x = (double)i * d;
fprintf(file_write, "%f %f %f\n", x, y, field[i * n + j]);
}
}
}
void diffusion_host(double *field_host, double *field_host_new, int n_host, double theta_host) {
int i;
int j;
int i_right, i_left;
int j_top, j_bottom;
for(i = 0; i < n_host; i += 1) {
i_right = (i + 1) % n_host;
i_left = (i - 1 + n_host) % n_host;
for(j = 0; j < n_host; j += 1) {
j_top = (j + 1) % n_host;
j_bottom = (j - 1 + n_host) % n_host;
field_host_new[i * n_host + j] = field_host[i * n_host + j]
+ theta_host * (field_host[i_right * n_host + j] + field_host[i_left * n_host + j]
+ field_host[i * n_host + j_top] + field_host[i * n_host + j_bottom]
- 4.0 * field_host[i * n_host + j]);
}
}
}
double check_residue(double *field_host, double *field_device, int n_host) {
int i;
double residue = 0.0;
for(i = 0; i < n_host * n_host; i += 1) {
residue += (field_host[i] - field_device[i]) * (field_host[i] - field_device[i]);;
}
return residue;
}
int main(void) {
//delcare variavles-------------------------------------------------------------
int i;
int j;
int k;
int n_host;
int n_square;
int iteration;
int n_blocks;
double l_host;
double theta_host;
dim3 dim_threads;
double *field_host[2];
double *field_device[2];
double *result_host;
double *result_global_host;
double *result_shared_host;
FILE *file_write;
char filename_write[256];
clock_t start, end;
//initialize--------------------------------------------------------------------
//set variables---------------------------------------------------------
n_host = N;
n_square = N * N;
l_host = L;
theta_host = THETA;
dim_threads.x = NT;
dim_threads.y = NT;
dim_threads.z = 1;
n_blocks = (int)(ceil((double)n_host / NT));
iteration = M;
//allocate memories-----------------------------------------------------
cudaMemcpyToSymbol(n, &n_host, sizeof(int), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(theta, &theta_host, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaHostAlloc((void **)&field_host[0], n_square * sizeof(double), cudaHostAllocMapped);
cudaHostAlloc((void **)&field_host[1], n_square * sizeof(double), cudaHostAllocMapped);
cudaHostAlloc((void **)&result_global_host, n_square * sizeof(double), cudaHostAllocMapped);
cudaHostAlloc((void **)&result_shared_host, n_square * sizeof(double), cudaHostAllocMapped);
cudaMalloc((void **)&field_device[0], n_square * sizeof(double));
cudaMalloc((void **)&field_device[1], n_square * sizeof(double));
result_host = (double *)malloc(n_square * sizeof(double));
//calculate on CPU--------------------------------------------------------------
//initialize field------------------------------------------------------
init_field(field_host[0], n_host, l_host);
start = clock();
//iteration-------------------------------------------------------------
i = 0;
j = 1;
for(k = 0; k < iteration; k += 1) {
diffusion_host(field_host[i], field_host[j], n_host, theta_host);
flip_ij(&i, &j);
}
//save and print out----------------------------------------------------
memcpy(result_host, field_host[i], n_square * sizeof(double));
end = clock();
printf("host:%ld\n", end - start);
/*sprintf(filename_write, "result_host.txt");
file_write = fopen(filename_write, "w");
print_field(file_write, result_host, n_host, l_host);
fclose(file_write);*/
//calculate using only global memory--------------------------------------------
//initialize field------------------------------------------------------
init_field(field_host[0], n_host, l_host);
start = clock();
cudaMemcpy(field_device[0], field_host[0], n_square * sizeof(double), cudaMemcpyHostToDevice);
//iteration-------------------------------------------------------------
i = 0;
j = 1;
for(k = 0; k < iteration; k += 1) {
diffusion_global<<<n_blocks, dim_threads>>>(field_device[i], field_device[j]);
cudaDeviceSynchronize();
flip_ij(&i, &j);
}
//copy to host and print out--------------------------------------------
cudaMemcpy(result_global_host, field_device[i], n_square * sizeof(double), cudaMemcpyDeviceToHost);
end = clock();
printf("global:%ld\n", end - start);
/*sprintf(filename_write, "result_global.txt");
file_write = fopen(filename_write, "w");
print_field(file_write, result_global_host, n_host, l_host);
fclose(file_write);*/
//calculate using shared memory-------------------------------------------------
//initialize field------------------------------------------------------
init_field(field_host[0], n_host, l_host);
start = clock();
cudaMemcpy(field_device[0], field_host[0], n_square * sizeof(double), cudaMemcpyHostToDevice);
//iteration-------------------------------------------------------------
i = 0;
j = 1;
for(k = 0; k < iteration; k += 1) {
diffusion_shared<<<n_blocks, dim_threads>>>(field_device[i], field_device[j]);
cudaDeviceSynchronize();
flip_ij(&i, &j);
}
//copy to host and print out--------------------------------------------
cudaMemcpy(result_shared_host, field_device[i], n_square * sizeof(double), cudaMemcpyDeviceToHost);
end = clock();
printf("shared:%ld\n", end - start);
/*sprintf(filename_write, "result_shared.txt");
file_write = fopen(filename_write, "w");
print_field(file_write, result_shared_host, n_host, l_host);
fclose(file_write);*/
//check answers-----------------------------------------------------------------
printf("answers\n");
printf("global:%f\n", check_residue(result_host, result_global_host, n_host));
printf("shared:%f\n", check_residue(result_host, result_shared_host, n_host));
//finalize----------------------------------------------------------------------
cudaFreeHost(field_host[0]);
cudaFreeHost(field_host[1]);
cudaFreeHost(result_global_host);
cudaFreeHost(result_shared_host);
cudaFree(field_device[0]);
cudaFree(field_device[1]);
free(result_host);
return 0;
}
|
22,456 | #include "includes.h"
__global__ void Sin( float * x, size_t idx, size_t N, float W0)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
{
x[(idx-1)*N+i] = sin(W0*x[(idx-1)*N+i]);
}
return;
} |
22,457 | #include <stdio.h>
#include <stdlib.h>
void cudaHandleError( cudaError_t err,const char *file,int line ) {
if (err != cudaSuccess) {
printf( "CUDA Error\n%s in %s at line %d\n", cudaGetErrorString( err ),file, line );
exit( EXIT_FAILURE );
}
}
__host__ __device__ int threads_ceildiv(int size,int blocks){
return (blocks + size - 1)/blocks;
}
|
22,458 | /*
Copyright 2017 the arraydiff authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <cuda_runtime_api.h>
#include <stdint.h>
#include <stdlib.h>
__global__ void cast_u8_to_f32(
uint32_t dim,
const uint8_t *x,
float *y)
{
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < dim) {
y[idx] = (float)(x[idx]);
}
}
extern "C" void arraydiff_cuda_kernel_cast_u8_to_f32(
size_t dim,
const uint8_t *x,
float *y,
cudaStream_t stream)
{
cast_u8_to_f32<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
dim, x, y);
}
__global__ void cast_u8x4_to_f32x4(
uint32_t dim,
const uint8_t *x,
float *y)
{
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx + 4 <= dim) {
uint32_t i = idx >> 2;
uchar4 vx_i = ((const uchar4 *)x)[i];
float4 vy_i = { (float)vx_i.x, (float)vx_i.y, (float)vx_i.z, (float)vx_i.w };
((float4 *)y)[i] = vy_i;
} else if (idx < dim) {
y[idx] = (float)(x[idx]);
}
}
extern "C" void arraydiff_cuda_kernel_cast_u8x4_to_f32x4(
size_t dim,
const uint8_t *x,
float *y,
cudaStream_t stream)
{
cast_u8x4_to_f32x4<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
dim, x, y);
}
|
22,459 | #include <stdio.h>
#include <stdlib.h>
__global__ void add(int* d_a, int* d_b, int* d_c) {
int t = threadIdx.x;
int index = t + blockIdx.x*blockDim.x;
d_c[index] = d_a[index] + d_b[index];
}
int main( void) {
const int N = 512;
const int M = 64;
int size = N*sizeof(int);
int *a, *b, *c;
int *d_a, *d_b, *d_c;
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(size);
for(int i = 0; i<N; i++){
a[i] = i;
b[i] = 2*i;
}
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
add<<<N/M,M>>>(d_a, d_b, d_c);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
for(int i = 0; i<N; i++){
printf ("%d \t",c[i]);
}
printf("\n");
return 0;
}
|
22,460 | #include<vector>
#include<iostream>
#include<algorithm>
using namespace std;
const int know_stop_size = 100000 + 10;
vector<int > know_stop_num[know_stop_size], know_stop_len[know_stop_size];
int nlz(unsigned x){
int n;
if (x == 0) return(32);
n = 1;
if ((x >> 16) == 0) {n = n +16; x = x <<16;}
if ((x >> 24) == 0) {n = n + 8; x = x << 8;}
if ((x >> 28) == 0) {n = n + 4; x = x << 4;}
if ((x >> 30) == 0) {n = n + 2; x = x << 2;}
n = n - (x >> 31);
return n;
}
int main(){
int N = 100,early_size = 10;
srand(time(0));
for (int i = 0; i < N; i++) {
know_stop_num[i/early_size].push_back (rand()%N);
}
for (int i = 0;i < N/ early_size; i++){
know_stop_num[i].push_back(0);
know_stop_num[i].push_back((1U<<31)-1);
sort(know_stop_num[i].begin(),know_stop_num[i].end());
know_stop_num[i].erase(unique(know_stop_num[i].begin(), know_stop_num[i].end()), know_stop_num[i].end());
know_stop_len[i].push_back(0);
for(int j=1;j < know_stop_num[i].size();j++)
know_stop_len[i].push_back(nlz(know_stop_num[i][j] ^ know_stop_num[i][j-1]));
}
for (int i = 0;i < N/ early_size; i++){
for(int j=0;j < know_stop_num[i].size();j++)
printf("%d %d\n",know_stop_num[i][j],know_stop_len[i][j]);
}
}
|
22,461 | /*
* Example of using reducing (tree) type algorithms to parallelize finding the sum of
* a set of numbers. On a GF 8600 GT the two parallel algorithms (sumControl = 0 or 1)
* are about 35 times faster than the serial algorithm also running on the GPU but using
* global memory (sumControl=2), for an array of 512 floats. This is both because
* the parallel algorithms scale as ln N while the serial algorithm scales as N, and because the
* parallel algorithms use the shared memory while the serial one uses the (generally slower)
* global memory in these tests. If we do the serial algorithm on the same computer but use
* shared memory (set sumControl=3), the parallel algorithms are only 3.5 times faster. Thus a
* factor of 10 in the speed up is because of using the shared memory in this example. Note,
* however, that shared memory can only be shared among the threads within a single block.
*
*/
#include <stdio.h>
#define BLOCKSIZE 512
// Define some GPU timing utilities. These are invoked from the host program. Usage:
// START_GPU;
// kernelFunction <<< numBlocks, threadsPerBlock >>> (args)
// STOP_GPU;
// PRINT_GPU
// in the host code. This estimates the time for the kernel kernelFunction to run on the GPU.
// For a more extensive discusion, see Section 5.1.2 of the CUDA Best Practices Guide at
// http://developer.download.nvidia.com/compute/DevZone/docs/html/C/doc/CUDA_C_Best_Practices_Guide.pdf
float timeGPU;
cudaEvent_t start, stop;
#define START_GPU cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0);
#define STOP_GPU cudaEventRecord(stop, 0); cudaEventSynchronize(stop);\
cudaEventElapsedTime(&timeGPU, start, stop);\
cudaEventDestroy(start);cudaEventDestroy(stop);
#define PRINT_GPU printf("\n\nTime to compute on GPU: %f ms \n", timeGPU);
// Define a utility to check for CUDA errors. Place it immediately after a CUDA kernel
// call in the host code. The initial cudaDeviceSynchronize() command ensures that the device
// has completed all preceding requested tasks.
#define CUDA_ERROR_CHECK cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError();\
if(error != cudaSuccess){printf("***CUDA error: %s\n", cudaGetErrorString(error)); exit(-1);}\
else{printf("\nNo CUDA errors detected\n" );}
// Device code. Sums the elements of the array Array and puts the result in Sum
__global__ void SumKernel(float* Array, float* Sum, int arraySize)
{
__device__ float reductionSum(int, float *);
*Sum = reductionSum(arraySize, Array);
}
/*
Function to do parallel reduction sum. This should scale as ln N. The parallel butterfly
algorithm taken from the literature works generally. My homegrown parallel version
works as written for even number of entries in the array, so this algorithm can be used
for an odd number by padding the array with an added zero entry. Note that this version
assumes that all summations are within one block, so a max of 512 threads on 1.1 devices
(presently blocksize is set to 256). One option for larger sums is to break the array up
onto multiple blocks, use this algorithm on each block to get a block sum, and then sum
the block sums.
*/
__device__ float reductionSum(int length, float *array)
{
float sum = 0.0f;
// = 0 or 1 for parallel with shared memory, 2 for serial with global, 3 for serial with shared
int sumControl = 0;
// Copy the array to be summed into shared memory and initialize
__shared__ float sarray[BLOCKSIZE];
int i = threadIdx.x;
sarray[i] = 0.0f;
if(i<length) sarray[i] = array[i];
__syncthreads();
if(sumControl == 0)
{
// Parallel butterfly sum
// see http://cs.anu.edu.au/files/systems/GPUWksp/PDFs/02_CUDAParallelProgrammingModel.pdf
for(int bit=BLOCKSIZE/2; bit>0; bit /= 2)
{
if(i<length)
{
float t=sarray[i] + sarray[i^bit];
__syncthreads();
sarray[i] = t;
__syncthreads();
}
}
// The array entries sarray[i] with i<length/2 now all contain the sum
sum = sarray[0];
}
else if(sumControl == 1)
{
// Another home-made parallel version of a reduction sum. As written, this requires an even
// number of entries in the array to be summed, so pad with a zero to handle odd number
// (or rewrite to deal with odd number).
int L=length;
int steps = 0;
int inc = 1;
float t = 0;
while(L > 2 )
{
steps ++;
if(i < (length-inc))
t = sarray[i] + sarray[i+inc];
__syncthreads();
sarray[i] = t;
__syncthreads();
inc *= 2;
L /= 2;
}
sum = sarray[0] + sarray[inc]; // This contains the sum
}
else if(sumControl == 2)
{
// Serial version of sum accessing global (not shared) memory
sum = 0.0f;
for(int i=0; i<length; i++)
{
sum += array[i];
}
}
else
{
// Serial version of sum accessing shared memory
sum = 0.0f;
for(int i=0; i<length; i++)
{
sum += sarray[i];
}
}
return sum;
}
// Host code
int main(void)
{
int arraySize = 512;
float* Array;
Array = (float*) malloc(sizeof(float) * arraySize);
// Fill array with some numbers
for(int i=0; i<arraySize; i++)
{
Array[i] = 2.0f * (float) i;
}
// Set up device pointers
float *devPtrArray;
float Sum;
float* devPtrSum;
cudaMalloc((void**)&devPtrSum, sizeof(float));
// Allocate device memory
cudaMalloc((void**)&devPtrArray, arraySize * sizeof(float));
// Copy array to device
cudaMemcpy(devPtrArray, Array, arraySize * sizeof(float), cudaMemcpyHostToDevice);
// Launch the kernel.
START_GPU; // Start timer for device code
SumKernel<<<1, 512>>>(devPtrArray, devPtrSum, arraySize);
STOP_GPU; // Stop timer for device code
PRINT_GPU; // Print timing for device code
CUDA_ERROR_CHECK
// Copy the sum back from the GPU to the host
cudaMemcpy(&Sum, devPtrSum, sizeof(float), cudaMemcpyDeviceToHost);
printf("\nSum array elements multiplied by 2 (GPU) = %6.2f\n\n", Sum);
// Free the memory allocated on the device
cudaFree(devPtrSum);
cudaFree(devPtrArray);
// Free the memory allocated on the CPU
free(Array);
return 0;
}
|
22,462 | #pragma once
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
dim3 *dim3Ctr(int x, int y = 1, int z = 1)
{
dim3 *a;
a = (dim3 *)malloc(sizeof(dim3));
a->x = x;
a->y = y;
a->z = z;
return a;
}
dim3 *dim3Unit()
{
dim3 *a;
a = (dim3 *)malloc(sizeof(dim3));
a->x = 1;
a->y = 1;
a->z = 1;
return a;
}
int dim3Vol(dim3 *a)
{
return a->x * a->y * a->z;
}
void printDim3(dim3 *yow)
{
printf("yow: {%d, %d, %d}", yow->x, yow->y, yow->z);
}
int ThreadChop2d(int width)
{
if (width > 255)
return 16;
if (width > 63)
return 8;
if (width > 15)
return 4;
if (width > 3)
return 2;
return 1;
}
|
22,463 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <cuda.h>
#include <cuda_runtime.h>
/* Problem size */
#define M 1024
#define N 1024
#define BDIMX 16
#define BDIMY 16
#define FLOAT_N 3214212.01
void init_arrays(double* data)
{
int i, j;
for (i = 1; i < (M+1); i++) {
for (j = 1; j < (N+1); j++) {
data[i*(N+1) + j] = ((double) i*j) / M;
}
}
}
void covariance(double* data, double* symmat, double* mean)
{
int i, j, j1,j2;
/* Determine mean of column vectors of input data matrix */
for (j = 1; j < (M+1); j++) {
mean[j] = 0.0;
for (i = 1; i < (N+1); i++) {
mean[j] += data[i*(M+1) + j];
}
mean[j] /= FLOAT_N;
}
/* Center the column vectors. */
for (i = 1; i < (N+1); i++) {
for (j = 1; j < (M+1); j++) {
data[i*(M+1) + j] -= mean[j];
}
}
/* Calculate the m * m covariance matrix. */
for (j1 = 1; j1 < (M+1); j1++) {
for (j2 = j1; j2 < (M+1); j2++) {
symmat[j1*(M+1) + j2] = 0.0;
for (i = 1; i < N+1; i++) {
symmat[j1*(M+1) + j2] += data[i*(M+1) + j1] * data[i*(M+1) + j2];
}
symmat[j2*(M+1) + j1] = symmat[j1*(M+1) + j2];
}
}
}
__global__ void kernel1(int m, int n, double *mean, double *data)
{
unsigned int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < M)
{
mean[j] = 0.0;
int i;
for(i = 0; i < N; i++)
{
mean[j] += data[i * M + j];
}
mean[j] /= FLOAT_N;
}
}
__global__ void kernel2(int m, int n,double *mean, double *data)
{
unsigned int j = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < N) && (j <M))
{
data[i * M + j] -= mean[j];
}
}
__global__ void kernel3(int m, int n,double *symmat,double *data)
{
unsigned int j1 = blockIdx.x * blockDim.x + threadIdx.x;
int i, j2;
if (j1 < M)
{
for (j2 = j1; j2 < M; j2++)
{
symmat[j1*M + j2] = 0.0;
for(i = 0; i < N; i++)
{
symmat[j1 * M + j2] += data[i * M + j1] * data[i * M + j2];
}
symmat[j2 * M + j1] = symmat[j1 * M + j2];
}
}
}
int main(int argc, char *argv[])
{
double *data;
double *symmat;
double *mean;
unsigned int n=N,m=N;
printf("covariance starting...\n");
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("Device properties %d: %s\n", dev, deviceProp.name);
cudaSetDevice(dev);
struct timeval cpu_start, cpu_end;
data = (double*)malloc((M+1)*(N+1)*sizeof(double));
symmat = (double*)malloc((M+1)*(M+1)*sizeof(double));
mean = (double*)malloc((M+1)*sizeof(double));
init_arrays(data);
gettimeofday(&cpu_start, NULL);
covariance(data, symmat, mean);
gettimeofday(&cpu_end, NULL);
fprintf(stdout, "CPU Runtime: %0.6lfs\n", ((cpu_end.tv_sec - cpu_start.tv_sec) * 1000000.0 + (cpu_end.tv_usec - cpu_start.tv_usec)) / 1000000.0);
double *dataOnGPU;
double *meanOnGPU;
double *symmatOnGPU;
// malloc device global memory
cudaMalloc((void **)&dataOnGPU,(M+1)*(N+1)*sizeof(double));
cudaMalloc((void **)&symmatOnGPU,(M+1)*(M+1)*sizeof(double));
cudaMalloc((void **)&meanOnGPU,(M+1)*sizeof(double));
// transfer data from host to device
cudaMemcpy(dataOnGPU,data,(M+1)*(N+1)*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(symmatOnGPU,symmat,(M+1)*(M+1)*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(meanOnGPU, mean,(M+1)*sizeof(double),cudaMemcpyHostToDevice);
// invoke kernel at host side
dim3 block (BDIMX, BDIMY);
dim3 grid (1, 1);
printf("<<< grid (%d,%d) block (%d,%d)>>>\n", grid.x, grid.y, block.x,
block.y);
gettimeofday(&cpu_start, NULL);
kernel1<<<grid, block>>>(m,n,meanOnGPU,dataOnGPU);
cudaDeviceSynchronize();
kernel2<<<grid, block>>>(m,n,meanOnGPU,dataOnGPU);
cudaDeviceSynchronize();
kernel3<<<grid, block>>>(m,n,symmatOnGPU,dataOnGPU);
cudaDeviceSynchronize();
gettimeofday(&cpu_end, NULL);
fprintf(stdout, "GPU Runtime: %0.6lfs\n", ((cpu_end.tv_sec - cpu_start.tv_sec) * 1000000.0 + (cpu_end.tv_usec - cpu_start.tv_usec)) / 1000000.0);
// check kernel error
cudaGetLastError();
// free device global memory
cudaFree(dataOnGPU);
cudaFree(symmatOnGPU);
cudaFree(meanOnGPU);
// free host memory
free(data);
free(symmat);
free(mean);
// reset device
cudaDeviceReset();
return (0);
}
|
22,464 | #include "imageprocessing.cuh"
#include <stdio.h>
#include <iostream>
#include <string.h>
#include <string>
#include <math.h>
#include <assert.h>
#include <sstream>
#include <cuda_runtime.h>
// TODO: read about the CUDA programming model: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#programming-model
// If everything is setup correctly, this file is compiled by the CUDA/C++ compiler (that is different from the C++ compiler).
// The CUDA/C++ compiler understands certain things that your C++ compiler doesn't understand - like '__global__', 'threadIdx', and function calls with triple-angle brackets, e.g., testArray<<<...>>>();
#define RIDX(X, Y, H, W) (X*W+Y)
#define GIDX(X, Y, H, W) ((H+X)*W+Y)
#define BIDX(X, Y, H, W) ((H*2+X)*W+Y)
#define KERNEL_SIZE 9
__constant__ float KERNEL[KERNEL_SIZE*KERNEL_SIZE]; // define a constant memory
__global__
void _brighten(unsigned char *out_image, unsigned char *in_image, float brightnessfactor, int height, int width) {
// name the output before input as CUDA style
int pos_x = threadIdx.x + blockIdx.x * blockDim.x;
int pos_y = threadIdx.y + blockIdx.y * blockDim.y;
if (pos_x >= width || pos_y >= height) return;
// images are stacked horizontally
int pos_r = pos_x * width + pos_y; // get the position of r channel
int pos_g = (height + pos_x) * width + pos_y; // get the position of r channel
int pos_b = (height * 2 + pos_x) * width + pos_y; // get the position of r channel
float val_r, val_g, val_b;
// change brightness
val_r = in_image[pos_r] * brightnessfactor;
val_g = in_image[pos_g] * brightnessfactor;
val_b = in_image[pos_b] * brightnessfactor;
// check legality, assume it is 8 bit image
if (val_r > 255)
val_r = 255;
if (val_g > 255)
val_g = 255;
if (val_b > 255)
val_b = 255;
out_image[pos_r] = val_r;
out_image[pos_g] = val_g;
out_image[pos_b] = val_b;
}
__global__
void _contrast(unsigned char *out_image, unsigned char *in_image, float contrastfactor, int height, int width) {
int pos_x = threadIdx.x + blockIdx.x * blockDim.x;
int pos_y = threadIdx.y + blockIdx.y * blockDim.y;
if (pos_x >= width || pos_y >= height) return;
// images are stacked horizontally
int pos_r = pos_x * width + pos_y; // get the position of r channel
int pos_g = (height + pos_x) * width + pos_y; // get the position of r channel
int pos_b = (height * 2 + pos_x) * width + pos_y; // get the position of r channel
float val_r, val_g, val_b;
val_r = in_image[pos_r] * contrastfactor + (1 - contrastfactor) * 255;
val_g = in_image[pos_g] * contrastfactor + (1 - contrastfactor) * 255;
val_b = in_image[pos_b] * contrastfactor + (1 - contrastfactor) * 255;
// check legality, assume it is 8 bit image
if (val_r > 255)
val_r = 255;
if (val_g > 255)
val_g = 255;
if (val_b > 255)
val_b = 255;
out_image[pos_r] = val_r;
out_image[pos_g] = val_g;
out_image[pos_b] = val_b;
}
__global__ void
_saturation(unsigned char *out_image, unsigned char *in_image, float saturationfactor, int height, int width) {
int pos_x = threadIdx.x + blockIdx.x * blockDim.x;
int pos_y = threadIdx.y + blockIdx.y * blockDim.y;
if (pos_x >= width || pos_y >= height) return;
// images are stacked horizontally
int pos_r = pos_x * width + pos_y; // get the position of r channel
int pos_g = (height + pos_x) * width + pos_y; // get the position of r channel
int pos_b = (height * 2 + pos_x) * width + pos_y; // get the position of r channel
// intensity
// lumCoeff: 0.2125, 0.7154, 0.0721
float intensity = (0.2125 * in_image[pos_r] + 0.7154 * in_image[pos_g] + 0.0721 * in_image[pos_b]);
float val_r, val_g, val_b;
val_r = in_image[pos_r] * saturationfactor + (1 - saturationfactor) * intensity;
val_g = in_image[pos_g] * saturationfactor + (1 - saturationfactor) * intensity;
val_b = in_image[pos_b] * saturationfactor + (1 - saturationfactor) * intensity;
// check legality, assume it is 8 bit image
if (val_r > 255)
val_r = 255;
if (val_g > 255)
val_g = 255;
if (val_b > 255)
val_b = 255;
out_image[pos_r] = val_r;
out_image[pos_g] = val_g;
out_image[pos_b] = val_b;
}
__global__ void
_sharpen(unsigned char *out_image, unsigned char *in_image, float sharpenfactor, float *conv_kernel, int length,
int height, int width) {
int pos_x = threadIdx.x + blockIdx.x * blockDim.x;
int pos_y = threadIdx.y + blockIdx.y * blockDim.y;
if (pos_x >= width || pos_y >= height) return;
int l = 2 * length + 1;
float tmpr, tmpg, tmpb, originr, origing, originb;
originr = ((float) in_image[RIDX(pos_x, pos_y, height, width)]) / 255.0f;
origing = ((float) in_image[GIDX(pos_x, pos_y, height, width)]) / 255.0f;
originb = ((float) in_image[BIDX(pos_x, pos_y, height, width)]) / 255.0f;
tmpr = tmpg = tmpb = 0.0f;
for (int i = (-length); i <= length; i++) {
for (int j = (-length); j <= length; j++) {
int convidx = (i + length) * l + j + length;
if (pos_x + i < 0 || pos_x + i >= height || pos_y + j < 0 || pos_y + j >= width) {
tmpr += conv_kernel[convidx] * originr;
tmpg += conv_kernel[convidx] * origing;
tmpb += conv_kernel[convidx] * originb;
} else {
tmpr += conv_kernel[convidx] * ((float) in_image[RIDX((pos_x + i), (pos_y + j), height, width)]) /
255.0f;
tmpg += conv_kernel[convidx] * ((float) in_image[GIDX((pos_x + i), (pos_y + j), height, width)]) /
255.0f;
tmpb += conv_kernel[convidx] * ((float) in_image[BIDX((pos_x + i), (pos_y + j), height, width)]) /
255.0f;
}
}
}
tmpr = (originr + sharpenfactor * tmpr)*255;
tmpg = (origing + sharpenfactor * tmpg)*255;
tmpb = (originb + sharpenfactor * tmpb)*255;
// check legality, assume it is 8 bit image
if (tmpr > 255)
tmpr = 255;
if (tmpg > 255)
tmpg = 255;
if (tmpb > 255)
tmpb = 255;
out_image[RIDX(pos_x, pos_y, height, width)] = tmpr;
out_image[GIDX(pos_x, pos_y, height, width)] = tmpg;
out_image[BIDX(pos_x, pos_y, height, width)] = tmpb;
}
__global__ void
_conv2d(unsigned char *out_image, unsigned char *in_image, float *conv_kernel, int length, int height, int width) {
int pos_x = threadIdx.x + blockIdx.x * blockDim.x;
int pos_y = threadIdx.y + blockIdx.y * blockDim.y;
if (pos_x >= width || pos_y >= height) return;
int l = 2 * length + 1;
float tmpr, tmpg, tmpb, originr, origing, originb;
originr = ((float) in_image[RIDX(pos_x, pos_y, height, width)]) / 255.0f;
origing = ((float) in_image[GIDX(pos_x, pos_y, height, width)]) / 255.0f;
originb = ((float) in_image[BIDX(pos_x, pos_y, height, width)]) / 255.0f;
tmpr = tmpg = tmpb = 0.0f;
for (int i = (-length); i <= length; i++) {
for (int j = (-length); j <= length; j++) {
int convidx = (i + length) * l + j + length;
if (pos_x + i < 0 || pos_x + i >= height || pos_y + j < 0 || pos_y + j >= width) {
tmpr += conv_kernel[convidx] * originr;
tmpg += conv_kernel[convidx] * origing;
tmpb += conv_kernel[convidx] * originb;
} else {
// min
tmpr += conv_kernel[convidx] * ((float) in_image[RIDX((pos_x + i), (pos_y + j), height, width)]) /
255.0f;
tmpg += conv_kernel[convidx] * ((float) in_image[GIDX((pos_x + i), (pos_y + j), height, width)]) /
255.0f;
tmpb += conv_kernel[convidx] * ((float) in_image[BIDX((pos_x + i), (pos_y + j), height, width)]) /
255.0f;
}
}
}
tmpr = tmpr*255;
tmpg = tmpg*255;
tmpb = tmpb*255;
// check legality, assume it is 8 bit image
if (tmpr > 255)
tmpr = 255;
if (tmpg > 255)
tmpg = 255;
if (tmpb > 255)
tmpb = 255;
out_image[RIDX(pos_x, pos_y, height, width)] = tmpr;
out_image[GIDX(pos_x, pos_y, height, width)] = tmpg;
out_image[BIDX(pos_x, pos_y, height, width)] = tmpb;
}
__global__ void
_conv2d_constant_memory(unsigned char *out_image, unsigned char *in_image, int height, int width) {
// since we are using constant memory, we remote the conv_kernel from the kernel function signature.
int pos_x = threadIdx.x + blockIdx.x * blockDim.x;
int pos_y = threadIdx.y + blockIdx.y * blockDim.y;
if (pos_x >= width || pos_y >= height) return;
int length = KERNEL_SIZE/2;
int l = KERNEL_SIZE;
float tmpr, tmpg, tmpb, originr, origing, originb;
originr = ((float) in_image[RIDX(pos_x, pos_y, height, width)]) / 255.0f;
origing = ((float) in_image[GIDX(pos_x, pos_y, height, width)]) / 255.0f;
originb = ((float) in_image[BIDX(pos_x, pos_y, height, width)]) / 255.0f;
tmpr = tmpg = tmpb = 0.0f;
for (int i = (-length); i <= length; i++) {
for (int j = (-length); j <= length; j++) {
int convidx = (i + length) * l + j + length;
if (pos_x + i < 0 || pos_x + i >= height || pos_y + j < 0 || pos_y + j >= width) {
tmpr += KERNEL[convidx] * originr;
tmpg += KERNEL[convidx] * origing;
tmpb += KERNEL[convidx] * originb;
} else {
tmpr += KERNEL[convidx] * ((float) in_image[RIDX((pos_x + i), (pos_y + j), height, width)]) /
255.0f;
tmpg += KERNEL[convidx] * ((float) in_image[GIDX((pos_x + i), (pos_y + j), height, width)]) /
255.0f;
tmpb += KERNEL[convidx] * ((float) in_image[BIDX((pos_x + i), (pos_y + j), height, width)]) /
255.0f;
}
}
}
tmpr = tmpr*255;
tmpg = tmpg*255;
tmpb = tmpb*255;
// check legality, assume it is 8 bit image
if (tmpr > 255)
tmpr = 255;
if (tmpg > 255)
tmpg = 255;
if (tmpb > 255)
tmpb = 255;
out_image[RIDX(pos_x, pos_y, height, width)] = tmpr;
out_image[GIDX(pos_x, pos_y, height, width)] = tmpg;
out_image[BIDX(pos_x, pos_y, height, width)] = tmpb;
}
__global__ void
_conv2d_shared_mem(unsigned char *out_image, unsigned char *in_image, float *conv_kernel,
int length, int height, int width) {
int pos_x = threadIdx.x + blockIdx.x * blockDim.x;
int pos_y = threadIdx.y + blockIdx.y * blockDim.y;
if (pos_x >= width || pos_y >= height) return;
const int share_size=16;
__shared__ float sdata[share_size * share_size * 3];
__shared__ float sconv_kernel[share_size * share_size * 3];
if (threadIdx.x * share_size + threadIdx.y < length * length * 3)
sconv_kernel[threadIdx.x * share_size + threadIdx.y] = conv_kernel[threadIdx.x * share_size + threadIdx.y];
sdata[threadIdx.x * share_size + threadIdx.y] = ((float) in_image[RIDX(pos_x, pos_y, height, width)]) / 255.0f;
sdata[threadIdx.x * share_size + threadIdx.y + share_size * share_size] = ((float) in_image[GIDX(pos_x, pos_y, height, width)]) / 255.0f;
sdata[threadIdx.x * share_size + threadIdx.y + share_size * share_size * 2] =
((float) in_image[BIDX(pos_x, pos_y, height, width)]) / 255.0f;
__syncthreads(); // then other process will be free using the shared memory based on cache mechanism.
int l = 2 * length + 1;
float tmpr, tmpg, tmpb, originr, origing, originb;
originr = sdata[threadIdx.x * share_size + threadIdx.y];
origing = sdata[threadIdx.x * share_size + threadIdx.y + share_size * share_size];
originb = sdata[threadIdx.x * share_size + threadIdx.y + share_size * share_size * 2];
tmpr = tmpg = tmpb = 0.0f;
for (int i = (-length); i <= length; i++) {
for (int j = (-length); j <= length; j++) {
int convidx = (i + length) * l + j + length;
if (pos_x + i < 0 || pos_x + i >= height || pos_y + j < 0 || pos_y + j >= width) {
tmpr += sconv_kernel[convidx] * originr;
tmpg += sconv_kernel[convidx] * origing;
tmpb += sconv_kernel[convidx] * originb;
} else {
int idx = (int) threadIdx.x;
int idy = (int) threadIdx.y;
if (idx + i < 0 || idx + i >= 16 || idy + j < 0 || idy + j >= 16) {
tmpr += sconv_kernel[convidx] * ((float) in_image[RIDX((pos_x + i), (pos_y + j), height, width)]) /
255.0f;
tmpg += sconv_kernel[convidx] * ((float) in_image[GIDX((pos_x + i), (pos_y + j), height, width)]) /
255.0f;
tmpb += sconv_kernel[convidx] * ((float) in_image[BIDX((pos_x + i), (pos_y + j), height, width)]) /
255.0f;
} else {
tmpr += sconv_kernel[convidx] * sdata[(threadIdx.x + i) * 16 + threadIdx.y];
tmpg += sconv_kernel[convidx] * sdata[(threadIdx.x + i) * 16 + threadIdx.y + 16 * 16];
tmpb += sconv_kernel[convidx] * sdata[(threadIdx.x + i) * 16 + threadIdx.y + 16 * 16 * 2];
}
}
}
}
if (tmpr >= 1.0f) tmpr = 1.0f;
if (tmpg >= 1.0f) tmpg = 1.0f;
if (tmpb >= 1.0f) tmpb = 1.0f;
out_image[RIDX(pos_x, pos_y, height, width)] = (unsigned char) (tmpr * 255.0f);
out_image[GIDX(pos_x, pos_y, height, width)] = (unsigned char) (tmpg * 255.0f);
out_image[BIDX(pos_x, pos_y, height, width)] = (unsigned char) (tmpb * 255.0f);
}
void brighten_func(unsigned char *out_image, unsigned char *in_image,
float brightness_factor, int height, int width) {
dim3 grid(1, 512);
dim3 block(512, 1);
int imgproduct = height * width;
unsigned char *cuda_out, *cuda_in;
cudaMalloc((void **) &cuda_in, imgproduct * 3 * sizeof(unsigned char));
cudaMalloc((void **) &cuda_out, imgproduct * 3 * sizeof(unsigned char));
// copy the original_image into cuda
cudaMemcpy(cuda_in, in_image, imgproduct * 3 * sizeof(unsigned char), cudaMemcpyHostToDevice);
// cudaEvent
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
_brighten <<< grid, block >>> (cuda_out, cuda_in, brightness_factor, height, width);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
// copy the output into the image array
cudaMemcpy(out_image, cuda_out, imgproduct * 3 * sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaFree(cuda_out);
cudaFree(cuda_in);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("contrast takes time: %f ms \n", milliseconds);
}
void contrast_func(unsigned char *out_image, unsigned char *in_image,
float contrastfactor, int height, int width) {
dim3 grid(1, 512);
dim3 block(512, 1);
int imgproduct = height * width;
unsigned char *cuda_out, *cuda_in;
cudaMalloc((void **) &cuda_in, imgproduct * 3 * sizeof(unsigned char));
cudaMalloc((void **) &cuda_out, imgproduct * 3 * sizeof(unsigned char));
// copy the original_image into cuda
cudaMemcpy(cuda_in, in_image, imgproduct * 3 * sizeof(unsigned char), cudaMemcpyHostToDevice);
// cudaEvent
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
_contrast <<< grid, block >>> (cuda_out, cuda_in, contrastfactor, height, width);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
// copy the output into the image array
cudaMemcpy(out_image, cuda_out, imgproduct * 3 * sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaFree(cuda_out);
cudaFree(cuda_in);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("contrast takes time: %f ms \n", milliseconds);
}
void saturation_func(unsigned char *out_image, unsigned char *in_image,
float saturationfactor, int height, int width) {
dim3 grid(1, 512);
dim3 block(512, 1);
int imgproduct = height * width;
unsigned char *cuda_out, *cuda_in;
cudaMalloc((void **) &cuda_in, imgproduct * 3 * sizeof(unsigned char));
cudaMalloc((void **) &cuda_out, imgproduct * 3 * sizeof(unsigned char));
// copy the original_image into cuda
cudaMemcpy(cuda_in, in_image, imgproduct * 3 * sizeof(unsigned char), cudaMemcpyHostToDevice);
// cudaEvent
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
_saturation <<< grid, block >>> (cuda_out, cuda_in, saturationfactor, height, width);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
// copy the output into the image array
cudaMemcpy(out_image, cuda_out, imgproduct * 3 * sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaFree(cuda_out);
cudaFree(cuda_in);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("saturation takes time: %f ms \n", milliseconds);
}
void sharpen_func(unsigned char *out_image, unsigned char *in_image, float sharpenfactor,
float *conv_kernel, int length, int height, int width) {
assert(length % 2 == 1);
int halflen = length / 2;
dim3 grid(1, 512);
dim3 block(512, 1);
int imgproduct = height * width;
unsigned char *cuda_out, *cuda_in;
float *cuda_conv_kernel;
cudaMalloc((void **) &cuda_in, imgproduct * 3 * sizeof(unsigned char));
cudaMalloc((void **) &cuda_out, imgproduct * 3 * sizeof(unsigned char));
cudaMalloc((void **) &cuda_conv_kernel, length * length * sizeof(float));
// copy the original_image into cuda
cudaMemcpy(cuda_in, in_image, imgproduct * 3 * sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_conv_kernel, conv_kernel, length * length * sizeof(float), cudaMemcpyHostToDevice);
// cudaEvent
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
_sharpen <<< grid, block >>> (cuda_out, cuda_in, sharpenfactor, cuda_conv_kernel, halflen, height, width);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
// copy the output into the image array
cudaMemcpy(out_image, cuda_out, imgproduct * 3 * sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaFree(cuda_in);
cudaFree(cuda_out);
cudaFree(cuda_conv_kernel);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("sharpening takes time: %f ms \n", milliseconds);
}
void conv2d_func(unsigned char *out_image, unsigned char *in_image, float *conv_kernel, int length,
int height, int width) {
assert(length % 2 == 1);
int halflen = length / 2;
dim3 grid(512, 512);
dim3 block(1, 1);
int imgproduct = height * width;
unsigned char *cuda_out, *cuda_in;
float *cuda_conv_kernel;
cudaMalloc((void **) &cuda_in, imgproduct * 3 * sizeof(unsigned char));
cudaMalloc((void **) &cuda_out, imgproduct * 3 * sizeof(unsigned char));
cudaMalloc((void **) &cuda_conv_kernel, length * length * sizeof(float));
// copy the original_image into cuda
cudaMemcpy(cuda_in, in_image, imgproduct * 3 * sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_conv_kernel, conv_kernel, length * length * sizeof(float), cudaMemcpyHostToDevice);
// cudaEvent
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
_conv2d <<< grid, block >>> (cuda_out, cuda_in, cuda_conv_kernel, halflen, height, width);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
// copy the output into the image array
cudaMemcpy(out_image, cuda_out, imgproduct * 3 * sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaFree(cuda_in);
cudaFree(cuda_out);
cudaFree(cuda_conv_kernel);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("conv takes time: %f ms \n", milliseconds);
}
void conv2d_const_mem_func(unsigned char *out_image, unsigned char *in_image, float *conv_kernel,
int height, int width) {
assert(KERNEL_SIZE % 2 == 1);
dim3 grid(1, 512);
dim3 block(512, 1);
int imgproduct = height * width;
unsigned char *cuda_out, *cuda_in;
cudaMalloc((void **) &cuda_in, imgproduct * 3 * sizeof(unsigned char));
cudaMalloc((void **) &cuda_out, imgproduct * 3 * sizeof(unsigned char));
// cudaMalloc((void **) KERNEL, KERNEL_SIZE * KERNEL_SIZE * sizeof(float));
// copy the original_image into cuda
cudaMemcpy(cuda_in, in_image, imgproduct * 3 * sizeof(unsigned char), cudaMemcpyHostToDevice);
// show the err returned from here
cudaError_t mem_err = cudaMemcpyToSymbol(KERNEL, conv_kernel, KERNEL_SIZE * KERNEL_SIZE * sizeof(float), cudaMemcpyHostToDevice);
if (mem_err != cudaSuccess)
printf("Mem copy Error: %s\n", cudaGetErrorString(mem_err));
// cudaEvent
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
// could do something similar here.
_conv2d_constant_memory <<< grid, block >>> (cuda_out, cuda_in, height, width);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
// copy the output into the image array
cudaMemcpy(out_image, cuda_out, imgproduct * 3 * sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaFree(cuda_in);
cudaFree(cuda_out);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("conv takes time: %f ms \n", milliseconds);
}
void conv2d_shared_mem_func(unsigned char *out_image, unsigned char *in_image, float *conv_kernel, int share_size,
int length, int height, int width) {
assert(length % 2 == 1);
int halflen = length / 2;
dim3 grid((int) (height*width/16), (int) (height*width/16));
dim3 block(16, 16);
int imgproduct = height * width;
unsigned char *cuda_out, *cuda_in;
float *cuda_conv_kernel;
cudaMalloc((void **) &cuda_in, imgproduct * 3 * sizeof(unsigned char));
cudaMalloc((void **) &cuda_out, imgproduct * 3 * sizeof(unsigned char));
cudaMalloc((void **) &cuda_conv_kernel, length * length * sizeof(float));
// copy the original_image into cuda
cudaMemcpy(cuda_in, in_image, imgproduct * 3 * sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_conv_kernel, conv_kernel, length * length * sizeof(float), cudaMemcpyHostToDevice);
// cudaEvent
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
_conv2d_shared_mem <<< grid, block >>> (cuda_out, cuda_in, cuda_conv_kernel, halflen, height, width);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
// copy the output into the image array
cudaMemcpy(out_image, cuda_out, imgproduct * 3 * sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaFree(cuda_in);
cudaFree(cuda_out);
cudaFree(cuda_conv_kernel);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("conv takes time: %f ms \n", milliseconds);
}
// do not use this method for anything else than verifying cuda compiled, linked and executed
__global__ void testArray(float *dst, float value) {
unsigned int index = threadIdx.x;
dst[index] = value;
}
void testCudaCall() {
// quick and dirty test of CUDA setup
const unsigned int N = 1024;
float *device_array;
cudaMalloc(&device_array, N * sizeof(float));
testArray <<< 1, N >>> (device_array, -0.5f);
float x[N];
cudaMemcpy(x, device_array, N * sizeof(float), cudaMemcpyDeviceToHost);
std::cout << "quick and dirty test of CUDA setup: " << x[0] << " " << x[1] << " " << x[1023] << std::endl;
cudaFree(device_array);
// initilize global
// cudaMalloc((void **) &KERNEL, KERNEL_SIZE * KERNEL_SIZE * sizeof(float));
}
|
22,465 | #include <iostream>
#include <vector>
#include <random>
#include <time.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/copy.h>
using std::vector;
using std::random_device;
using std::mt19937;
using std::uniform_real_distribution;
#define SIZE 100000
int main()
{
clock_t start = clock();
random_device rnd;
mt19937 mt;
mt.seed(rnd());
uniform_real_distribution<double> real_rnd(0.0, 1.0);
vector<double> host_w(SIZE, 10.0);
vector<double> host_input(SIZE);
vector<double> host_output(SIZE);
for(int i = 0; i < SIZE; ++i) host_input[i] = real_rnd(mt);
// allocate device side vector
thrust::device_vector<double> device_w(SIZE);
thrust::device_vector<double> device_input(SIZE);
thrust::device_vector<double> device_output(SIZE);
// copy host to device
thrust::copy(host_w.begin(), host_w.end(), device_w.begin());
thrust::copy(host_input.begin(), host_input.end(), device_input.begin());
// device_output = device_w * device_input
clock_t transform_start = clock();
thrust::transform(device_w.begin(), device_w.end(), device_input.begin(), device_output.begin(), thrust::multiplies<double>());
clock_t transform_end = clock();
// copy device to host
thrust::copy(device_output.begin(), device_output.end(), host_output.begin());
clock_t end = clock();
std::cout << "transform time: " << (double)(transform_end - transform_start) / CLOCKS_PER_SEC << std::endl;
std::cout << "program time: " << (double)(end - start) / CLOCKS_PER_SEC << std::endl;
return 0;
}
|
22,466 | /*
* Copyright 1993-2008 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
// includes, system
#include <stdio.h>
#include <assert.h>
#include <iostream>
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char *msg);
// Part 3 of 5: implement the kernel
__global__ void myFirstKernel(int* d_a)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
d_a[idx] = 1000 * blockIdx.x + threadIdx.x;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
// pointer for host memory
int *h_a;
// pointer for device memory
int *d_a;
// define grid and block size
int numBlocks = 8;
int numThreadsPerBlock = 8;
// Part 1 of 5: allocate host and device memory
size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int);
h_a = (int *) malloc(memSize);
cudaMalloc((void**)&d_a, memSize);
// Part 2 of 5: configure and launch kernel
dim3 dimGrid(numBlocks);
dim3 dimBlock(numThreadsPerBlock);
myFirstKernel<<<dimGrid, dimBlock, 0>>>(d_a);
// block until the device has completed
cudaThreadSynchronize();
// check if kernel execution generated an error
//checkCUDAError("kernel execution");
// Part 4 of 5: device to host copy
cudaMemcpy(h_a, d_a, memSize, cudaMemcpyDeviceToHost);
// Check for any CUDA errors
checkCUDAError("cudaMemcpy");
// Part 5 of 5: verify the data returned to the host is correct
for (int i = 0; i < numBlocks; i++)
{
for (int j = 0; j < numThreadsPerBlock; j++)
{
assert(h_a[i * numThreadsPerBlock + j] == 1000 * i + j);
}
}
// free device memory
cudaFree(d_a);
// free host memory
free(h_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
printf("Correct!\n");
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(-1);
}
}
|
22,467 | #include <limits.h>
#include <stdio.h>
#define ALLOC_SIZE 1024
__global__ void simple_kernel() {
int devMem[ALLOC_SIZE];
int i = devMem[0];
i = i*i; // for unreferenced warning
}
int main() {
simple_kernel<<<1, 1>>>();
cudaDeviceReset();
return 0;
}
|
22,468 |
#include <iostream>
#include <ctime>
#include <stdio.h>
#define N 500000
__global__
void add_kernel(int *a, int *b, int *c) {
// blockIdx contains the value of the block index of the block
// running
// blockIdx can be defined in 2 dim
int i = blockIdx.x; // built in variables defined by cuda
printf("Index: %d\n", i);
if (i < N) {
c[i] = a[i] + b[i];
}
}
__host__
void add_cpu(int *a, int *b, int *c) {
for (int i = 0; i < N; i++) {
c[i] = a[i] + b[i];
printf("%d\n", c[i]);
}
}
__host__
void deviceBasics() {
int d_count;
cudaGetDeviceCount(&d_count);
std::cout << "Device #" << d_count << "\n";
cudaDeviceProp d_prop;
cudaGetDeviceProperties(&d_prop, 0);
std::cout << d_prop.maxTexture3D[2] * 3 << "\n";
}
int main(int argc, char *argv[]) {
deviceBasics();
std::clock_t start;
double duration;
start = std::clock();
int a[N];
int b[N];
int c[N];
int *dev_a;
int *dev_b;
int *dev_c;
// allocate memory on GPU
cudaMalloc((void**)&dev_a, N * sizeof(int));
cudaMalloc((void**)&dev_b, N * sizeof(int));
cudaMalloc((void**)&dev_c, N * sizeof(int));
for (int i = 0; i < N; i++) {
a[i] = -1;
b[i] = i * i;
}
// copy 2 arrays to device memory
cudaMemcpy(dev_a, a, N * sizeof(N), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * sizeof(N), cudaMemcpyHostToDevice);
// <<< first element is the # of parallel blocks to launch
// second >>> the # of threads per block
add_kernel<<<N, 1>>>(dev_a, dev_b, dev_c);
// copy from device to host
cudaMemcpy(c, dev_c, N * sizeof(N), cudaMemcpyDeviceToHost);
// for (int i = 0; i < N; i++) {
// std::cout << a[i] << " + " << b[i] << " = " << c[i] << "\n";
// }
// add_cpu(a, b, c);
std::cout << "DONE" << "\n";
duration = ( std::clock() - start ) / (double) CLOCKS_PER_SEC;
std::cout<<"printf: "<< duration <<'\n';
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
22,469 | #include <stdio.h>
__global__ void helloFromGPU(void) {
printf("Hello World from GPU %d!\n",threadIdx.x);
}
int main(void) {
printf("Hello World from CPU\n");
helloFromGPU <<<1, 100>>>();
cudaDeviceSynchronize();
return 0;
} |
22,470 | #include "includes.h"
__global__ void init(){} |
22,471 | #include "includes.h"
__global__ void Image_SumReduceStep_Kernel( int* devBufIn, int* devBufOut, int lastBlockSize)
{
// ONLY USE THIS FUNCTION WITH BLOCK SIZE = (256,1,1);
// NOTE: This method was originally written to use exactly the amt
// of shared memory available for each block, but I believe
// I later was told that cmd args use shared mem, which would
// result in this method spilling over. Need to check on that.
__shared__ char sharedMem[4096];
int* shmBuf1 = (int*)sharedMem;
int* shmBuf2 = (int*)&sharedMem[512];
int globalIdx = 512 * blockIdx.x + threadIdx.x;
int localIdx = threadIdx.x;
shmBuf1[localIdx] = 0;
shmBuf1[localIdx+256] = 0;
shmBuf2[localIdx] = 0;
shmBuf2[localIdx+256] = 0;
if(blockIdx.x == gridDim.x-1)
{
if(localIdx+256 >= lastBlockSize) devBufIn[globalIdx+256] = 0;
if(localIdx >= lastBlockSize) devBufIn[globalIdx] = 0;
}
// Now we reduce each block of 512 values (256 threads) to a single number
shmBuf1[localIdx] = devBufIn[globalIdx] + devBufIn[globalIdx + 256]; __syncthreads();
if(localIdx < 128) shmBuf2[localIdx] = shmBuf1[localIdx]+shmBuf1[localIdx+128]; __syncthreads();
if(localIdx < 64) shmBuf1[localIdx] = shmBuf2[localIdx]+shmBuf2[localIdx+64]; __syncthreads();
if(localIdx < 32) shmBuf2[localIdx] = shmBuf1[localIdx]+shmBuf1[localIdx+32]; __syncthreads();
if(localIdx < 16) shmBuf1[localIdx] = shmBuf2[localIdx]+shmBuf2[localIdx+16]; __syncthreads();
if(localIdx < 8) shmBuf2[localIdx] = shmBuf1[localIdx]+shmBuf1[localIdx+8]; __syncthreads();
if(localIdx < 4) shmBuf1[localIdx] = shmBuf2[localIdx]+shmBuf2[localIdx+4]; __syncthreads();
if(localIdx < 2) shmBuf2[localIdx] = shmBuf1[localIdx]+shmBuf1[localIdx+2]; __syncthreads();
// 2 -> 1
if(localIdx < 1)
devBufOut[blockIdx.x] = shmBuf2[localIdx] + shmBuf2[localIdx + 1];
__syncthreads();
} |
22,472 | /*
* ARQUITECTURA DE COMPUTADORES
* 2º Grado en Ingenieria Informatica
*
* PRACTICA 2: "Suma De Matrices Paralela"
* >> Arreglar for en __global__
* >> Pasar numElem como argumento
*
* AUTOR: Ivanes
*/
///////////////////////////////////////////////////////////////////////////
// Includes
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
// Defines
#define RAN_MIN 1
#define RAN_MAX 9
// Declaracion de funciones
int numHilos()
{
int numHilos;
// Saca num hilos, funcion CUDA
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
int maxHilos = deviceProp.maxThreadsPerBlock;
//
printf("\nEl numero maximo de elementos del array es: %d \n", maxHilos);
do {
printf("\n\nCuantos elementos quieres que tenga los vectores: ");
scanf("%d", &numHilos);
getchar();
} while ((numHilos > maxHilos) || (numHilos <= 0));
return numHilos;
}
__global__
void reverseMatriz(int *dev_matriz, int *dev_matriz_reverse, int *dev_matriz_resultado, int numElem)
{
// Crea la matriz inversa
int id = threadIdx.x;
dev_matriz_reverse[id] = dev_matriz[numElem - 1 - id];
// Suma las matrices
dev_matriz_resultado[id] = dev_matriz[id] + dev_matriz_reverse[id];
}
// MAIN: Rutina principal ejecutada en el host
int main(int argc, char** argv)
{
// Declaracion
int *hst_matriz;
int *hst_matriz_reverse;
int *hst_matriz_resultado;
int *dev_matriz;
int *dev_matriz_reverse;
int *dev_matriz_resultado;
// Saca numero de hilos y pregunta cuantos elementos quiere en el array. Pone el número de bloques a usar, 1 en este caso
int numElem = numHilos();
int numBlock = 1;
// Reserva en el host
hst_matriz = (int*)malloc(numElem * sizeof(int));
hst_matriz_reverse = (int*)malloc(numElem * sizeof(int));
hst_matriz_resultado = (int*)malloc(numElem * sizeof(int));
// Reserva en el device
cudaMalloc( &dev_matriz, numElem * sizeof(int));
cudaMalloc( &dev_matriz_reverse, numElem * sizeof(int));
cudaMalloc( &dev_matriz_resultado, numElem * sizeof(int));
// Insertamos valores random en la matriz
srand((int)time(NULL));
for (int i = 0; i < numElem; i++)
{
hst_matriz[i] = RAN_MIN + rand() % RAN_MAX;
}
// Pasamos el array al device y le damos la vuelta
cudaMemcpy(dev_matriz, hst_matriz, numElem * sizeof(int), cudaMemcpyHostToDevice);
reverseMatriz <<< numBlock, numElem>>>(dev_matriz, dev_matriz_reverse, dev_matriz_resultado, numElem);
// Check de errores
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
{
fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(error));
exit(-1);
}
// Pasamos el array inverso a la cpu
cudaMemcpy(hst_matriz_reverse, dev_matriz_reverse, numElem * sizeof(int), cudaMemcpyDeviceToHost);
// Pasamos el resultado a la cpu
cudaMemcpy(hst_matriz_resultado, dev_matriz_resultado, numElem * sizeof(int), cudaMemcpyDeviceToHost);
// Muestra contenido de arrays y resultado
printf("\n\nMatriz: \n");
for (int i = 0; i < numElem; i++)
printf("%d ", hst_matriz[i]);
printf("\n\nMatriz Inversa: \n");
for (int i = 0; i < numElem; i++)
printf("%d ", hst_matriz_reverse[i]);
printf("\n\nMatriz Resultado: \n");
for (int i = 0; i < numElem; i++)
printf("%d ", hst_matriz_resultado[i]);
free(hst_matriz);
free(hst_matriz_reverse);
free(hst_matriz_resultado);
cudaFree(dev_matriz);
cudaFree(dev_matriz_reverse);
cudaFree(dev_matriz_resultado);
// salida
time_t fecha;
time(&fecha);
printf("\n\n***************************************************\n");
printf("Programa ejecutado el: %s\n", ctime(&fecha));
printf("<pulsa [INTRO] para finalizar>");
getchar();
return 0;
}
|
22,473 | #include <cuda_runtime.h>
#include <stdio.h>
#define CHECK(call)\
{\
const cudaError_t error = call;\
if (error != cudaSuccess)\
{\
printf("Error: %s:%d, ", __FILE__, __LINE__);\
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error));\
exit(1);\
}\
}
void initialInt(int *ip, int size)
{
for (int i=0; i<size; i++)
{
ip[i] = i;
}
}
void printMatrix(int *C, const int nx, const int ny)
{
int *ic = C;
printf("\nMatrix: (%d.%d)\n", nx, ny);
for (int iy=0; iy<ny; iy++)
{
for (int ix=0; ix<nx; ix++)
{
printf("%3d", ic[ix]);
}
ic += nx;
printf("\n");
}
printf("\n");
}
__global__ void printThreadIndex(int *A, const int nx, const int ny)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * nx + ix;
printf("thread_id: (%d, %d), block_id: (%d, %d), coordinate: (%d, %d) global index %2d ival %2d\n",
threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, ix, iy, idx, A[idx]);
}
int main(int argc, char *argv[])
{
printf("%s Starting...\n", argv[0]);
// get device information.
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// set matrix dimension.
int nx = 8;
int ny = 6;
int nxy = nx * ny;
int nBytes = nxy * sizeof(int);
// malloc host memory.
int *h_A;
h_A = (int *)malloc(nBytes);
// initialize host matrix.
initialInt(h_A, nxy);
printMatrix(h_A, nx, ny);
// malloc gpu memory.
int *d_matA;
cudaMalloc((void **)&d_matA, nBytes);
// transfer data from host to gpu.
cudaMemcpy(d_matA, h_A, nBytes, cudaMemcpyHostToDevice);
// set up execution configuation.
dim3 block(4, 2);
dim3 grid( (nx + block.x - 1)/block.x, (ny + block.y - 1)/block.y);
// invoke the kernel
printThreadIndex <<<grid, block>>> (d_matA, nx, ny);
cudaDeviceSynchronize();
// free the memory.
cudaFree(d_matA);
free(h_A);
// reset the device.
cudaDeviceReset();
return 0;
}
|
22,474 | extern "C"
__global__ void updateCenters(float *centers, float *images, int *updates, int noClusters)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int imagesOffset;
int centersIndex=0;
float sum=0;
int index=0;
float weight;
float min;
int minCenterIndex=-1;
int imageSize=784;
float pImage[784];
imagesOffset = gid*imageSize;
for (index=0;index<imageSize;index++){
pImage[index]=images[imagesOffset+index];
}
min=100000000;
for(centersIndex=0;centersIndex<100;centersIndex++)
{
sum = 0;
for(index=0;index<784;index++)
{
weight = centers[centersIndex*imageSize+index]-pImage[index];
sum = sum+weight*weight;
}
if (sum<min)
{
min = sum;
minCenterIndex = centersIndex;
}
}
updates[gid]=minCenterIndex;
}
|
22,475 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define NUM_BLOCKS 16
#define BLOCK_WIDTH 1
__global__ void hello()
{
printf("I am the thread of block %d\n", blockIdx.x);
}
int main()
{
hello<<<NUM_BLOCKS, BLOCK_WIDTH>>>();
cudaDeviceSynchronize();
printf("This is the END!");
return 0;
}
|
22,476 | #include "includes.h"
extern "C" {
}
__global__ void reverse_conv_filter(const float* x, float beta, float* y, unsigned int filter_len, unsigned int len) {
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if (tid < len) {
if (beta == 0.0f) {
for(int i = 0; i < filter_len; ++i) {
y[tid*filter_len + i] = x[tid*filter_len + ((filter_len-1) - i)];
}
}
else {
for(int i = 0; i < filter_len; ++i) {
y[tid*filter_len + i] = x[tid*filter_len + ((filter_len-1) - i)] + beta * y[tid*filter_len + i];
}
}
}
} |
22,477 | /*
Demo: CUDA program to compute the squares of the first N natural numners
*/
#include <stdio.h>
typedef float data_t; // makes it easy to change type later
__global__ void square(data_t *d_in, data_t *d_out); // kernel function
// note the use of __global__
void printArray( data_t a[], int num_elements );
int main(){
const int num_elements = 64;
int block_size = sizeof(data_t) * num_elements;
// set-up sample data to beb procesed
data_t h_in[num_elements];
for ( int idx=0; idx < num_elements; idx++ )
h_in[idx] = idx;
// define device data pointers
data_t *d_in = NULL;
data_t *d_out = NULL;
// allocate device memory
cudaMalloc( (void **) &d_in, block_size );
cudaMalloc( (void **) &d_out, block_size );
// transfer data to device memory
cudaMemcpy( d_in, h_in, block_size, cudaMemcpyHostToDevice );
// launch kernel
const int threads_per_block = num_elements;
square <<< 1, threads_per_block >>> (d_out, d_in);
// retrieve data from device memory
data_t h_out[num_elements];
cudaMemcpy( h_out, d_out, block_size, cudaMemcpyDeviceToHost );
// free device memory
cudaFree( d_in );
cudaFree( d_out );
// display results
printArray( h_out, num_elements );
}
// kernel definition
__global__ void square(data_t *d_out, data_t *d_in){
int t_idx = threadIdx.x; // get thread id
d_out[t_idx] = d_in[t_idx] * d_in[t_idx];
}
void printArray( data_t a[], int num_elements ){
for( int idx = 0; idx < num_elements; idx++ )
printf("%5.1f%c", a[idx], ((idx % 4) != 3) ? '\t' : '\n');
printf("\n");
}
|
22,478 | #include "includes.h"
__global__ void calcReluForwardGPU(float *in, float *out, int elements)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < elements ){
float v = in[id];
if ( v < 0 ){
v = 0.0;
}
out[id] = v;
}
/* original
for( unsigned i = 0; i < data_size; ++i ){
float v = in.data[i];
if ( v < 0 ){
v = 0;
}
out.data[i] = v;
}
*/
} |
22,479 | //Based on the work of Andrew Krepps
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#define N 256
#define BLOCK_SIZE 16
#define NUM_BLOCKS N/BLOCK_SIZE
#define ARRAY_SIZE N
#define ARRAY_SIZE_IN_BYTES (sizeof(int) * (ARRAY_SIZE))
///generate data//
__host__ void generateData(int * host_data_ptr, int arrayNum)
{
for(unsigned int i=0; i < N; i++)
{
if(arrayNum ==1)
{
host_data_ptr[i] = i;
}
else if(arrayNum ==2)
{
host_data_ptr[i] = 1;
}
else
{
host_data_ptr[i] = rand()%3;
}
}
}
//REGISTER MEMORY
__global__ void operations_reg(int* array1,int* array2,int* array3, int opNum)
{
int marker = threadIdx.x+blockDim.x*blockIdx.x;
int tmp_data1 = array1[marker];
int tmp_data2 = array2[marker];
int tmp_data3 = array3[marker];
if (opNum ==1){ tmp_data3=tmp_data1+tmp_data2;}
else if (opNum ==2){tmp_data3=tmp_data1-tmp_data2;}
else if (opNum ==3){tmp_data3=tmp_data1*tmp_data2;}
else { tmp_data3=tmp_data1%tmp_data2;}
array3[marker]=tmp_data3;
}
//SHARED MEMORY
__global__ void operations_shared(int * array1, int * array2, int *array3, int opNum)
{
int i = threadIdx.x;
__shared__ int tmpArray1_s[ARRAY_SIZE];
__shared__ int tmpArray2_s[ARRAY_SIZE];
__shared__ int tmpArray3_s[ARRAY_SIZE];
tmpArray1_s[i] = array1[i];
tmpArray2_s[i] = array2[i];
tmpArray3_s[i] = array3[i];
if (opNum ==1)
{ tmpArray3_s[i]=tmpArray1_s[i]+tmpArray2_s[i];}
else if (opNum ==2)
{ tmpArray3_s[i]=tmpArray1_s[i]-tmpArray2_s[i];}
else if (opNum ==3)
{ tmpArray3_s[i]=tmpArray1_s[i]*tmpArray2_s[i];}
else //if (opNum ==4)
{ tmpArray3_s[i]=tmpArray1_s[i]%tmpArray2_s[i];}
__syncthreads();
array1[i] = tmpArray1_s[i];
array2[i] = tmpArray2_s[i];
array3[i] = tmpArray3_s[i];
}
//****************************************************************************
void main_register(int opNum)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
///////////Declare Arrays/////////
int* host_array1;
int* host_array2;
int* host_array3;
host_array1=(int*)malloc(ARRAY_SIZE_IN_BYTES);
host_array2=(int*)malloc(ARRAY_SIZE_IN_BYTES);
host_array3=(int*)malloc(ARRAY_SIZE_IN_BYTES);
/* Declare pointers for GPU based params */
int *gpu_array1;
int *gpu_array2;
int *gpu_array3;
cudaMalloc((void**)&gpu_array1, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void**)&gpu_array2, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void**)&gpu_array3, ARRAY_SIZE_IN_BYTES);
///////////Fill host arrays with values/////////
generateData(host_array1, 1);
generateData(host_array2, 2);
generateData(host_array3, 3);
cudaEventRecord(start);
///////////copy over memory /////////
cudaMemcpy( gpu_array1,host_array1, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice );
cudaMemcpy( gpu_array2,host_array2, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice );
cudaMemcpy( gpu_array3,host_array3, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice );
///////////execute operation/////////
operations_reg<<<NUM_BLOCKS,BLOCK_SIZE>>>(gpu_array1,gpu_array2,gpu_array3,opNum);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Time elapsed: %f\n", milliseconds);
///////////copy memory back /////////
cudaMemcpy(host_array3, gpu_array3, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost); //only need array 3
cudaFree(gpu_array1);
cudaFree(gpu_array2);
cudaFree(gpu_array3);
printf("\n/////////////////REGISTER MEMORY RESULTS//////////////////\n");
for( int k=0; k<ARRAY_SIZE; k++)
{
printf("\nINDEX: %i\tVALUE:%i\n",k, host_array3[k]);
}
}
//****************************************************************************
/////////////////////////USE SHARED MEMORY/////////////////////////
void main_shared( int opNum)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
///////////Declare Arrays/////////
int* host_array1;
int* host_array2;
int* host_array3;
host_array1=(int*)malloc(ARRAY_SIZE_IN_BYTES);
host_array2=(int*)malloc(ARRAY_SIZE_IN_BYTES);
host_array3=(int*)malloc(ARRAY_SIZE_IN_BYTES);
/* Declare pointers for GPU based params */
int *gpu_array1;
int *gpu_array2;
int *gpu_array3;
cudaMalloc((void**)&gpu_array1, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void**)&gpu_array2, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void**)&gpu_array3, ARRAY_SIZE_IN_BYTES);
///////////Fill host arrays with values/////////
generateData(host_array1, 1);
generateData(host_array2, 2);
generateData(host_array3, 3);
cudaEventRecord(start);
///////////copy over memory /////////
cudaMemcpy( gpu_array1,host_array1, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice );
cudaMemcpy( gpu_array2,host_array2, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice );
cudaMemcpy( gpu_array3,host_array3, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice );
///////////execute operation/////////
operations_shared<<<NUM_BLOCKS,BLOCK_SIZE>>>(gpu_array1,gpu_array2,gpu_array3,opNum);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Time elapsed: %f\n", milliseconds);
///////////copy memory back /////////
cudaMemcpy(host_array3, gpu_array3, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost); //only need array 3
cudaFree(gpu_array1);
cudaFree(gpu_array2);
cudaFree(gpu_array3);
printf("\n/////////////////SHARED MEMORY RESULTS//////////////////\n");
for( int k=0; k<ARRAY_SIZE; k++)
{
printf("\nINDEX: %i\tVALUE:%i\n",k, host_array3[k]);
}
}
int main(int argc, char** argv){
int opNum=1;
if (argc >= 2) {
opNum = atoi(argv[1]);
}
main_shared(opNum);
main_register(opNum);
return 0;
}
|
22,480 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void complement(int* a , int* b,int n)
{
int id = threadIdx.x;
int m = blockDim.x;
int j = 0;
if(id!=0 && id!=(m-1))
{
for(j=1;j<n-1;j++)
{
int rem = 0,p=0;
int d = a[id*m+j];
for(p=1;d>0;p = p*10)
{
rem = d%2;
if(rem == 0)
rem =1;
else
rem = 0;
b[id*m+j] += p*rem;
d = d/2;
}
}
b[id*m+0] = a[id*m+0];
b[id*m+n-1] = a[id*m+n-1];
}
else
{
for(j=0;j<n;j++)
b[id*m+j] = a[id*m+j];
}
}
int main(void)
{
int *a,*t,n,m,i,j;
int *d_a,*d_t;
printf("Enter the value of m and n ");
scanf("%d",&m);
scanf("%d",&n);
int size = sizeof(int)*m*n;
a = (int*)malloc(m*n*sizeof(int));
t = (int*)malloc(m*n*sizeof(int));
printf("Enter input matrix: \n");
for(i = 0;i<m*n;i++)
scanf("%d",&a[i]);
cudaMalloc((void**)&d_a,size);
cudaMalloc((void**)&d_t,size);
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
dim3 threadsPerBlock(m,1);
dim3 numBlocks(1,1);
complement<<<numBlocks,threadsPerBlock>>>(d_a,d_t,n);
cudaMemcpy(t,d_t,size,cudaMemcpyDeviceToHost);
printf("Result vector is :\n");
for(i = 0;i<n;i++)
{
for(j = 0;j<n;j++)
printf("%d ",t[i*n+j]);
printf("\n");
}
getchar();
cudaFree(d_a);
cudaFree(d_t);
return 0;
} |
22,481 | // the subroutine for GPU code can be found in several separated text file from the Brightspace.
// You can add these subroutines to this main code.
////////////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "cuda.h"
const int BLOCK_SIZE =32; // number of threads per block
// Input Array Variables
float* h_MatA = NULL;
float* d_MatA = NULL;
// Output Array
float* h_VecV = NULL;
float* d_VecV = NULL;
float* h_VecW = NULL;
float* d_VecW = NULL;
float* h_NormW = NULL;
float* d_NormW = NULL;
// Variables to change
int GlobalSize = 5000; // this is the dimension of the matrix, GlobalSize*GlobalSize
int BlockSize = 32; // number of threads in each block
// const float EPS = 0.000005; // tolerence of the error
// int max_iteration = 100; // the maximum iteration steps
const float EPS = -1;
int max_iteration = 10;
// Functions
void Cleanup(void);
void InitOne(float*, int);
void UploadArray(float*, int);
float CPUReduce(float*, int);
void ParseArguments(int, char**);
void checkCardVersion(void);
// Kernels
__global__ void Av_Product(float* g_MatA, float* g_VecV, float* g_VecW, int N);
__global__ void FindNormW(float* g_VecW, float* g_tempV, float * g_NormW, int N);
__global__ void NormalizeW(float* g_VecW, float * g_NormW, float* g_VecV, int N);
__global__ void ComputeLamda( float* g_VecV, float* g_VecW, float * g_Lamda,int N);
void CPU_AvProduct()
{
int N = GlobalSize;
int matIndex =0;
for(int i=0;i<N;i++)
{
h_VecW[i] = 0;
for(int j=0;j<N;j++)
{
matIndex = i*N + j;
h_VecW[i] += h_MatA[matIndex] * h_VecV[j];
}
}
}
void CPU_NormalizeW()
{
int N = GlobalSize;
float normW=0;
for(int i=0;i<N;i++)
normW += h_VecW[i] * h_VecW[i];
normW = sqrt(normW);
for(int i=0;i<N;i++)
h_VecV[i] = h_VecW[i]/normW;
}
float CPU_ComputeLamda()
{
int N = GlobalSize;
float lamda =0;
for(int i=0;i<N;i++)
lamda += h_VecV[i] * h_VecW[i];
return lamda;
}
void RunCPUPowerMethod()
{
printf("*************************************\n");
float oldLamda =0;
float lamda=0;
//AvProduct
CPU_AvProduct();
//power loop
for (int i=0;i<max_iteration;i++)
{
CPU_NormalizeW();
// for(int j=0;j<10;j++) {
// printf("%.5f ", h_VecV[i]);
// }
// printf("\n");
CPU_AvProduct();
lamda= CPU_ComputeLamda();
printf("CPU lamda at %d: %f \n", i, lamda);
// If residual is lass than epsilon break
if(abs(oldLamda - lamda) < EPS)
break;
oldLamda = lamda;
}
printf("*************************************\n");
}
void start_timer(struct timespec *timer) {
clock_gettime(CLOCK_REALTIME, timer); // Here I start to count
}
double stop_timer(struct timespec *timer) {
struct timespec t_end;
clock_gettime(CLOCK_REALTIME, &t_end);
return (t_end.tv_sec - timer->tv_sec) + 1e-9*(t_end.tv_nsec - timer->tv_nsec);
}
// Host code
int main(int argc, char** argv)
{
srand(time(0));
struct timespec timer;
double runtime;
ParseArguments(argc, argv);
int N = GlobalSize;
printf("Matrix size %d X %d \n", N, N);
printf("BlockSize: %d\n\n", BLOCK_SIZE);
size_t vec_size = N * sizeof(float);
size_t mat_size = N * N * sizeof(float);
size_t norm_size = sizeof(float);
// Allocate normalized value in host memory
h_NormW = (float*)malloc(norm_size);
// Allocate input matrix in host memory
h_MatA = (float*)malloc(mat_size);
// Allocate initial vector V in host memory
h_VecV = (float*)malloc(vec_size);
// Allocate W vector for computations
h_VecW = (float*)malloc(vec_size);
// Initialize input matrix
UploadArray(h_MatA, N);
InitOne(h_VecV,N);
printf("Power method in CPU starts\n");
start_timer(&timer);
RunCPUPowerMethod(); // the lamda is already solved here
runtime = stop_timer(&timer);
printf("CPU: run time = %f secs.\n",runtime);
printf("Power method in CPU is finished\n\n\n");
/////////////////////////////////////////////////
// This is the starting points of GPU
printf("Power method in GPU starts\n");
checkCardVersion();
// Initialize input matrix
InitOne(h_VecV,N);
start_timer(&timer); // Here I start to count
double gpu_time=0.0;
// Set the kernel arguments
int threadsPerBlock = BlockSize;
int sharedMemSize = threadsPerBlock * threadsPerBlock * sizeof(float); // in per block, the memory is shared
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
// Allocate matrix and vectors in device memory
cudaMalloc((void**)&d_MatA, mat_size);
cudaMalloc((void**)&d_VecV, vec_size);
cudaMalloc((void**)&d_VecW, vec_size); // This vector is only used by the device
cudaMalloc((void**)&d_NormW, norm_size);
//Copy from host memory to device memory
cudaMemcpy(d_MatA, h_MatA, mat_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_VecV, h_VecV, vec_size, cudaMemcpyHostToDevice);
// cutilCheckError(cutStopTimer(timer_mem));
//Power method loops
printf("*************************************\n");
float lamda = 0;
float OldLamda =0;
// start_timer(&timer);
Av_Product<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_MatA, d_VecV, d_VecW, N);
cudaThreadSynchronize(); //Needed, kind of barrier to sychronize all threads
// gpu_time += stop_timer(&timer);
// This part is the main code of the iteration process for the Power Method in GPU.
// Please finish this part based on the given code. Do not forget the command line
// cudaThreadSynchronize() after calling the function every time in CUDA to synchoronize the threads
//power loop
float zero = 0.0;
for (int i=0;i<max_iteration;i++)
{
cudaMemcpy(d_NormW, &zero, sizeof(float), cudaMemcpyHostToDevice);
// start_timer(&timer);
FindNormW<<<blocksPerGrid, threadsPerBlock, threadsPerBlock * sizeof(float)>>>(d_VecW, d_VecV, d_NormW, N);
// gpu_time += stop_timer(&timer);
cudaMemcpy(h_NormW, d_NormW, sizeof(float), cudaMemcpyDeviceToHost);
h_NormW[0] = sqrt(h_NormW[0]);
cudaMemcpy(d_NormW, h_NormW, sizeof(float), cudaMemcpyHostToDevice);
// start_timer(&timer);
NormalizeW<<<blocksPerGrid, threadsPerBlock, sizeof(float)>>>(d_VecW, d_NormW, d_VecV, N);
Av_Product<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_MatA, d_VecV, d_VecW, N);
// gpu_time += stop_timer(&timer);
cudaMemcpy(d_NormW, &zero, sizeof(float), cudaMemcpyHostToDevice);
// start_timer(&timer);
ComputeLamda<<<blocksPerGrid, threadsPerBlock, threadsPerBlock * sizeof(float)>>>(d_VecV, d_VecW, d_NormW, N);
// gpu_time += stop_timer(&timer);
cudaMemcpy(&lamda, d_NormW, sizeof(float), cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
printf("GPU lamda at %d: %f \n", i, lamda);
// If residual is less than epsilon break
if(abs(OldLamda - lamda) < EPS)
break;
OldLamda = lamda;
}
printf("*************************************\n");
gpu_time = stop_timer(&timer);
printf("GPU: run time = %f secs.\n\n\n\n\n",gpu_time);
// printf("Overall CPU Execution Time: %f (ms) \n", cutGetTimerValue(timer_CPU));
Cleanup();
}
void Cleanup(void)
{
// Free device memory
if (d_MatA)
cudaFree(d_MatA);
if (d_VecV)
cudaFree(d_VecV);
if (d_VecW)
cudaFree(d_VecW);
if (d_NormW)
cudaFree(d_NormW);
// Free host memory
if (h_MatA)
free(h_MatA);
if (h_VecV)
free(h_VecV);
if (h_VecW)
free(h_VecW);
if (h_NormW)
free(h_NormW);
exit(0);
}
// Allocates an array with zero value.
void InitOne(float* data, int n)
{
for (int i = 0; i < n; i++)
data[i] = 0;
data[0]=1;
}
void UploadArray(float* data, int n)
{
int total = n*n;
for (int i = 0; i < total; i++)
{
data[i] = (int) (rand() % (int)(101));
}
}
// Obtain program arguments
void ParseArguments(int argc, char** argv)
{
for (int i = 0; i < argc; ++i)
{
if (strcmp(argv[i], "--size") == 0 || strcmp(argv[i], "-size") == 0)
{
GlobalSize = atoi(argv[i+1]);
i = i + 1;
}
if (strcmp(argv[i], "--max_iteration") == 0 || strcmp(argv[i], "-max_iteration") == 0)
{
max_iteration = atoi(argv[i+1]);
i = i + 1;
}
if (strcmp(argv[i], "--seed") == 0 || strcmp(argv[i], "-seed") == 0)
{
int seed = atoi(argv[i+1]);
srand(seed);
}
}
}
void checkCardVersion()
{
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
printf("This GPU has major architecture %d, minor %d \n",prop.major,prop.minor);
if(prop.major < 2)
{
fprintf(stderr,"Need compute capability 2 or higher.\n");
exit(1);
}
}
/*****************************************************************************
This function finds the product of Matrix A and vector V
*****************************************************************************/
// ****************************************************************************************************************************************************/
// parallelization method for the Matrix-vector multiplication as follows:
// each thread handle a multiplication of each row of Matrix A and vector V;
// The share memory is limited for a block, instead of reading an entire row of matrix A or vector V from global memory to share memory,
// a square submatrix of A is shared by a block, the size of square submatrix is BLOCK_SIZE*BLOCK_SIZE; Thus, a for-loop is used to
// handle a multiplication of each row of Matrix A and vector V step by step. In eacg step, two subvectors with size BLOCK_SIZE is multiplied.
//*****************************************************************************************************************************************************/
__global__ void Av_Product(float* g_MatA, float* g_VecV, float* g_VecW, int N)
{
// Block index
int bx = blockIdx.x;
// Thread index
int tx = threadIdx.x;
float Csub = 0;
int row = tx + bx * BLOCK_SIZE;
int row_offset = N * row;
if(row < N) {
for (int j = 0; j < N; j++) {
Csub += g_MatA[j + row_offset] * g_VecV[j];
}
}
__syncthreads();
if(row < N) {
g_VecW[ row ] = Csub;
}
}
__global__ void ComputeLamda( float* g_VecV, float* g_VecW, float * g_Lamda,int N)
{
// shared memory size declared at kernel launch
unsigned int tid = threadIdx.x;
unsigned int offset = blockIdx.x * blockDim.x;
unsigned int globalid = offset + tid;
// For thread ids greater than data space
if (globalid < N) {
g_VecV[globalid] = g_VecV[globalid] * g_VecW[globalid];
}
// each thread loads one element from global to shared mem
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x / 2; s > 0; s = s >> 1) {
if ((tid < s) && (globalid + s < N)) {
g_VecV[globalid] = g_VecV[globalid] + g_VecV[globalid + s];
}
__syncthreads();
}
// atomic operations:
if (tid == 0) atomicAdd(g_Lamda,g_VecV[offset]);
}
/****************************************************
Normalizes vector W : W/norm(W)
****************************************************/
__global__ void FindNormW(float* g_VecW, float* g_tempV, float * g_NormW, int N)
{
// shared memory size declared at kernel launch
unsigned int tid = threadIdx.x;
unsigned int offset = blockIdx.x*blockDim.x;
unsigned int globalid = blockIdx.x*blockDim.x + threadIdx.x;
// For thread ids greater than data space
if (globalid < N) {
g_tempV[globalid] = g_VecW[globalid] * g_VecW[globalid];
}
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x / 2; s > 0; s = s >> 1) {
if ((tid < s) && (globalid + s < N)) {
g_tempV[globalid] = g_tempV[globalid] + g_tempV[globalid + s];
}
__syncthreads();
}
// atomic operations:
if (tid == 0) atomicAdd(g_NormW,g_tempV[offset]);
}
__global__ void NormalizeW(float* g_VecW, float * g_NormW, float* g_VecV, int N)
{
unsigned int globalid = blockIdx.x*blockDim.x + threadIdx.x;
__syncthreads();
// For thread ids greater than data space
if (globalid < N) {
g_VecV[globalid] = g_VecW[globalid] / g_NormW[0];
}
}
|
22,482 | // g++ -DTHRUST_DEVICE_SYSTEM=THRUST_DEVICE_SYSTEM_OMP -I../../../thrust/ -fopenmp -x c++ exemplo2.cu -o exemplo2 && ./exemplo2 < ../17-intro-gpu/stocks2.csv
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <iostream>
#include <math.h>
#include <thrust/iterator/constant_iterator.h>
int main()
{
int N = 0;
thrust::host_vector<double> hostMicrosoft;
thrust::host_vector<double> hostApple;
double a, m;
while (std::cin.fail() == false)
{
N += 1;
std::cin >> a;
std::cin >> m;
hostMicrosoft.push_back(m);
hostApple.push_back(a);
}
thrust::device_vector<double> diferenca(N);
thrust::device_vector<double> MSFT(hostMicrosoft);
thrust::device_vector<double> AAPL(hostApple);
thrust::device_vector<double> mean_vector(N);
thrust::device_vector<double> var(N);
thrust::device_vector<double> var_double(N);
// diference
thrust::transform(MSFT.begin(), MSFT.end(), AAPL.begin(), diferenca.begin(), thrust::minus<double>());
// mean
double mean = thrust::reduce(diferenca.begin(), diferenca.end(), 0, thrust::plus<double>()) / N;
// thrust::fill(mean_vector.begin(), mean_vector.end(), mean);
thrust::transform(diferenca.begin(), diferenca.end(), thrust::constant_iterator<double>(mean), var.begin(), thrust::minus<double>());
thrust::transform(var.begin(), var.end(), var.begin(),
var_double.begin(), thrust::multiplies<double>());
// for (auto i = var_double.begin(); i != var_double.end(); i++)
// {
// std::cout << *i / N << " "; // este acesso é rápido -- CPU
// }
double variancia = thrust::reduce(var_double.begin(), var_double.end(), 0, thrust::plus<double>()) / N;
std::cout << "variancia: " << variancia << "\n";
} |
22,483 | #include <stdio.h>
__global__ void initFun(int *nf) {
int n = threadIdx.x + blockIdx.x * blockDim.x;
nf[n] *= 10;
}
int main(int argc, char* argv[]) {
if (argc < 2) {
fprintf(stderr, "USAGE: main <num_of_devices> "
"<device_indices>\n");
return -1;
}
int *info_devs = (int *) calloc(argc - 2, sizeof(int));
info_devs[0] = atoi(argv[1]);
for (int i = 1; i < argc - 2; i++) {
info_devs[i] = atoi(argv[i + 1]);
}
int N = atoi(argv[4]);
float elapsed_time;
//printf("%d\n", N);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaStream_t* streams;
int **nfd = (int **) calloc(info_devs[0], sizeof(int *));
int **nfh = (int **) calloc(info_devs[0], sizeof(int *));
streams = (cudaStream_t *) calloc(info_devs[0], sizeof(cudaStream_t));
cudaEventRecord(start, 0);
for (int i = 0; i < info_devs[0]; i++) {
cudaSetDevice(info_devs[i + 1]);
cudaStreamCreate(&streams[i]);
cudaMalloc((void **) &nfd[i], (N / info_devs[0]) * sizeof(int));
cudaMallocHost((void **) &nfh[i], (N / info_devs[0]) * sizeof(int));
for (int n = 0; n < N / info_devs[0]; n++)
nfh[i][n] = n + i * N / info_devs[0];
cudaMemcpyAsync(nfd[i], nfh[i], (N / info_devs[0]) * sizeof(int),
cudaMemcpyHostToDevice, streams[i]);
initFun <<< N / info_devs[0] / 32, 32, 0, streams[i] >>>(nfd[i]);
cudaMemcpyAsync(nfh[i], nfd[i], (N / info_devs[0]) * sizeof(int),
cudaMemcpyDeviceToHost, streams[i]);
}
for (int i = 0; i < info_devs[0]; i++) {
cudaSetDevice(info_devs[i + 1]);
cudaStreamSynchronize(streams[i]);
//for (int n = 0; n < N / info_devs[0]; n++)
//fprintf(stderr, "nfh[%d][%d] = %d\n", i, n, nfh[i][n]);
}
cudaSetDevice(info_devs[1]);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
printf("%f\n", elapsed_time);
for (int i = 0; i < info_devs[0]; i++) {
cudaStreamDestroy(streams[i]);
cudaFree(nfd[i]);
cudaFreeHost(nfh[i]);
cudaDeviceReset();
}
return 0;
} |
22,484 | #include <iostream>
#include <numeric>
#include <random>
#include <vector>
// Here you can set the device ID that was assigned to you
#define MYDEVICE 1
#define BLOCK_SIZE 512
#define BLOCKS_NUMBER 512
// Part 1 of 6: implement the kernel
__global__ void block_sum(const int* input, int* per_block_results,
const size_t n)
{
// fill me
__shared__ int shared_mem[BLOCK_SIZE];
uint block_id = blockIdx.x;
uint thread_id = threadIdx.x;
shared_mem[thread_id] = input[block_id * BLOCK_SIZE + thread_id];
__syncthreads();
/*if (thread_id == 0) {
int sum = 0;
for (uint i = 0; i < BLOCK_SIZE; ++i) {
sum += shared_mem[i];
}
per_block_results[block_id] = sum;
}*/
uint max_allowed_id = BLOCK_SIZE / 2;
while (thread_id < max_allowed_id) {
shared_mem[thread_id] += shared_mem[thread_id + max_allowed_id];
max_allowed_id = max_allowed_id / 2;
__syncthreads();
}
if (thread_id == 0) {
per_block_results[block_id] = shared_mem[0];
}
}
void cpu_block_sum(int block_id, int thread_id, int* global_inoutput, int* per_block_results) {
// BLOCK_SIZE is a const si no : const size_t n
uint istart = block_id * BLOCK_SIZE;
/*if (thread_id == 0) {
int sum = 0;
for (uint i = 0; i < BLOCK_SIZE; ++i) {
sum += shared_mem[i];
}
per_block_results[block_id] = sum;
}*/
uint max_allowed_id = BLOCK_SIZE / 2;
while (thread_id < max_allowed_id) {
global_inoutput[istart + thread_id] += global_inoutput[istart + thread_id + max_allowed_id];
max_allowed_id = max_allowed_id / 2;
}
if (thread_id == 0) {
per_block_results[block_id] = global_inoutput[istart + thread_id];
}
}
void cpu_reduction(const int* input, int* per_block_results, const size_t input_size, uint blocks_number) {
int global_inoutput[input_size];
// Copy the input to a teporary array
for (uint i = 0; i < input_size; ++i) {
global_inoutput[i] = input[i];
}
// For each block
for (uint ib = 0; ib < blocks_number; ++ib) {
// For each thread, descending order
for (int ith = BLOCK_SIZE - 1; ith >= 0; --ith) {
//for (int ith = 0; ith < BLOCK_SIZE; ++ith) {
cpu_block_sum(ib, ith, global_inoutput, per_block_results);
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(void)
{
std::random_device
rd; // Will be used to obtain a seed for the random number engine
std::mt19937 gen(rd()); // Standard mersenne_twister_engine seeded with rd()
std::uniform_int_distribution<> distrib(-10, 10);
// create array of 262144 elements
const int num_elements = 1 << 18;
uint num_blocks = num_elements / BLOCK_SIZE;
// generate random input on the host
std::vector<int> h_input(num_elements);
for (auto& elt : h_input) {
elt = distrib(gen);
}
// Compute the sum on the host
const int host_result = std::accumulate(h_input.begin(), h_input.end(), 0);
std::cerr << "Host sum: " << host_result << std::endl;
// Input size
uint in_size = num_elements * sizeof(int);
// === CPU verification ===
int host_partial_sums_and_total[num_blocks];
if (num_blocks == BLOCK_SIZE) {
std::cout << "OK, right num_blocks and BLOCK_SIZE\n";
} else {
std::cout << "ERROR - num_blocks(" << num_blocks << ") != BLOCK_SIZE(" << BLOCK_SIZE << ")\n";
return 1;
}
cpu_reduction(h_input.data(), host_partial_sums_and_total, num_elements, num_blocks);
int h_result;
cpu_reduction(host_partial_sums_and_total, &h_result, BLOCK_SIZE, 1);
std::cout << "Host reduce sum: " << h_result << std::endl;
// === CUDA kernels ===
// Move input to device memory
int* d_input;
cudaMalloc(&d_input, in_size);
cudaMemcpy(d_input, h_input.data(), in_size, cudaMemcpyHostToDevice);
// Allocate the partial sums: How much space does it need?
int* d_partial_sums_and_total;
cudaMalloc(&d_partial_sums_and_total, in_size);
// Launch one kernel to compute, per-block, a partial sum. How
// much shared memory does it need?
block_sum<<<num_blocks, BLOCK_SIZE>>>(d_input, d_partial_sums_and_total,
num_elements);
// 1) Sommer les sommes partielles sur CPU
// 2) Comparer les sommes partielles lues depuis le GPU et depuis le CPU
int* d_result;
cudaMalloc(&d_result, sizeof(int));
// Compute the sum of the partial sums
block_sum<<<1, BLOCKS_NUMBER>>>(d_partial_sums_and_total, d_result, BLOCKS_NUMBER);
// Copy the result back to the host
int device_result = 0;
cudaMemcpy(&device_result, d_result, sizeof(int), cudaMemcpyDeviceToHost);
int h_partial_sums_and_total[num_blocks];
cudaMemcpy(h_partial_sums_and_total, d_partial_sums_and_total, BLOCK_SIZE * sizeof(int), cudaMemcpyDeviceToHost);
int hsum = 0;
for (uint ib = 0; ib < num_blocks; ++ib) {
hsum += h_partial_sums_and_total[ib];
//std::cout << "b(" << ib << ") = " << h_partial_sums_and_total[ib] << std::endl;
}
std::cout << "Device sum: " << device_result << std::endl;
std::cout << "Host sum of partial sums: " << hsum << std::endl;
std::cout << "\nPartial sums comparison: " << std::endl;
for (uint ib = 0; ib < num_blocks; ++ib) {
int gpu_v = h_partial_sums_and_total[ib];
int cpu_v = host_partial_sums_and_total[ib];
if (gpu_v == cpu_v) {
std::cout << "-\n";
} else {
std::cout << "e[" << ib << ": " << gpu_v << "!=" << cpu_v << "]\n";
}
//std::cout << "b(" << ib << ") = " << h_partial_sums_and_total[ib] << std::endl;
}
// // Part 1 of 6: deallocate device memory
return 0;
}
|
22,485 | #include <stdio.h>
#include <iostream>
#include <fstream>
#include <cstring>
#include <string>
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <vector>
#include <set>
#include <iterator>
#include <algorithm>
using namespace std;
// Training image file name
const string training_image_fn = "train-images.idx3-ubyte";
// Training label file name
const string training_label_fn = "train-labels.idx1-ubyte";
__global__
void saxpy(float n, float a, float *x, float *w)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
w[i] = w[i]*x[i] + a;
}
__global__
void softMax(float n, float a, float *x, float *w)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
w[i] = w[i]*x[i] + a;
}
// Software: Training Artificial Neural Network for MNIST database
// Author: Hy Truong Son
// Major: BSc. Computer Science
// Class: 2013 - 2016
// Institution: Eotvos Lorand University
// Email: sonpascal93@gmail.com
// Website: http://people.inf.elte.hu/hytruongson/
// Copyright 2015 (c). All rights reserved.
// File stream to read data (image, label) and write down a report
ifstream image;
ifstream label;
ofstream report;
// Number of training samples
const int nTraining = 1;
// Image size in MNIST database
const int width = 28;
const int height = 28;
// Image. In MNIST: 28x28 gray scale images.
int d[width][height];
char inputNum;
int classes = 1;
void input() {
// Reading image
for(int i = 0; i < 10; i++ ) {
for (int j = 0; j < height; ++j) {
for (int i = 0; i < width; ++i) {
image.read(&inputNum, sizeof(char));
if (inputNum == 0) {
d[i][j] = 0;
} else {
d[i][j] = 1;
}
}
}
label.read(&inputNum, sizeof(char));
cout << "Label:" << (int)inputNum << endl;
}
}
int main(void)
{
float *x, *d_x;
float **d_w;
float **w;
int N = width * height;
cout << "Starting code......." << endl;
x = (float *)malloc( N *sizeof(float));
w = (float **)malloc( classes *sizeof(float*));
for(int i = 0; i < classes; i++) {
w[i] = (float *)malloc( N *sizeof(float));
}
cudaMalloc(&d_x, N *sizeof(float));
for(int i = 0; i < classes; i++) {
cudaMalloc(&d_w[i], N *sizeof(float));
}
image.open(training_image_fn.c_str(), ios::in | ios::binary); // Binary image file
label.open(training_label_fn.c_str(), ios::in | ios::binary ); // Binary label file
// Reading file headers
char number;
for (int i = 1; i <= 16; ++i) {
image.read(&number, sizeof(char));
}
for (int i = 1; i <= 8; ++i) {
label.read(&number, sizeof(char));
}
// Neural Network Initialization
//init_array();
for (int sample = 1; sample <= nTraining; ++sample) {
cout << "Sample ---------- **************" << sample << endl;
// Getting (image, label)
input();
}
report.close();
image.close();
label.close();
for (int i = 0; i < width * height; i++) {
x[i] = (float)d[i % width][i / width];
for(int j = 0; j < 10; j++)
w[j][i] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
}
cout << "Image:" << endl;
for (int j = 0; j < height; ++j) {
for (int i = 0; i < width; ++i) {
cout << x[ (j ) * height + (i )];
}
cout << endl;
}
cout << "Label:" << (int)inputNum << endl;
cudaMemcpy(d_x, x, N * sizeof(float), cudaMemcpyHostToDevice);
for(int i = 0; i < classes; i++)
cudaMemcpy(d_w[i], w[i], N * sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(d_w, w, N * sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(d_w, w, N * sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(d_w, w, N * sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(d_w, w, N * sizeof(float), cudaMemcpyHostToDevice);
// Perform SAXPY on 1M elements
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
saxpy<<<numBlocks, blockSize>>>(N, 2.0f, d_x, d_w[0]);
cudaMemcpy(w[0], d_w[0], N*sizeof(float), cudaMemcpyDeviceToHost);
for (int j = 0; j < height; ++j) {
for (int i = 0; i < width; ++i) {
cout << (float)w[i][(j) * height + (i)] << " ";
}
cout << endl;
}
cout << "Label:" << (int)inputNum << endl;
cudaFree(d_x);
for(int i = 0; i < classes; i++)
cudaFree(d_w[i]);
free(x);
for(int i = 0; i < classes; i++)
free(w[i]);
}
|
22,486 | #include <stdio.h>
#include <stdlib.h>
#define N 250000
struct Strategy {
double profitLoss;
void (*backtest)(struct Strategy *, struct Tick *);
};
struct Tick {
long timestamp;
double open;
double high;
double low;
double close;
double rsi2;
double rsi5;
double rsi7;
double rsi9;
double rsi14;
double stochastic5K;
double stochastic5D;
double stochastic10K;
double stochastic10D;
double stochastic14K;
double stochastic14D;
};
__device__ void backtest(struct Strategy *self, struct Tick *tick) {
int i;
int j = 0;
// Pretend to do something.
// TODO: Actually do something useful.
for (i=0; i<50; i++) {
j++;
}
}
__global__ void initializeStrategies(struct Strategy *strategies) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
strategies[i].profitLoss = 10000 + i;
strategies[i].backtest = backtest;
}
}
__global__ void backtestStrategies(struct Strategy *strategies, struct Tick *tick) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
strategies[i].backtest(&strategies[i], tick);
}
}
int main() {
int threadsPerBlock = 1000;
int blockCount = N / threadsPerBlock;
struct Strategy strategies[N];
struct Strategy *devStrategies;
int i;
int j;
int tickCount = 1000000;
struct Tick *ticks = (Tick*) malloc(tickCount * sizeof(Tick));
struct Tick *devTicks;
int kFoldCount = 10;
void (*backtester)(struct Strategy*, struct Tick*);
backtester = &backtestStrategies;
for (i=0; i<tickCount; i++) {
ticks[i].timestamp = 1460611103;
ticks[i].open = 89.5;
ticks[i].high = 89.9;
ticks[i].low = 89.2;
ticks[i].close = 89.4;
ticks[i].rsi2 = 89.7;
ticks[i].rsi5 = 89.75;
ticks[i].rsi7 = 89.72;
ticks[i].rsi9 = 89.76;
ticks[i].rsi14 = 89.9;
ticks[i].stochastic5K = 89.2;
ticks[i].stochastic5D = 89.4;
ticks[i].stochastic10K = 89.7;
ticks[i].stochastic10D = 89.75;
ticks[i].stochastic14K = 89.72;
ticks[i].stochastic14D = 89.76;
}
cudaSetDevice(0);
// Allocate memory on the GPU for the strategies.
cudaMalloc((void**)&devStrategies, N * sizeof(Strategy));
// Copy tick data to the GPU.
cudaMalloc((void**)&devTicks, N * sizeof(Tick));
cudaMemcpy(devTicks, ticks, N * sizeof(Tick), cudaMemcpyHostToDevice);
// Initialize strategies on the GPU.
initializeStrategies<<<blockCount, threadsPerBlock>>>(devStrategies);
for (i=0; i<kFoldCount; i++) {
for (j=0; j<tickCount; j++) {
// Run backtests for all strategies.
(*backtester)<<<blockCount, threadsPerBlock>>>(devStrategies, &devTicks[j]);
}
}
// Free memory for the tick data from the GPU.
cudaFree(devTicks);
// TODO: Determine if this is necessary.
//cudaDeviceSynchronize();
// Copy strategies from the GPU.
cudaMemcpy(strategies, devStrategies, N * sizeof(Strategy), cudaMemcpyDeviceToHost);
// Display results.
for (i=0; i<N; i++) {
printf("%f\n", strategies[i].profitLoss);
}
// Free memory for the strategies on the GPU.
cudaFree(devStrategies);
return 0;
}
|
22,487 | #include <stdlib.h>
#include <stdio.h>
#include <limits>
#include <algorithm>
using namespace std;
#define BLOCK_SIZE 512
__global__ void reduce_max(float * in, float * out, int numel, float smallest) {
//@@ Load a segment of the input vector into shared memory
__shared__ float s[2 * BLOCK_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = 2 * blockIdx.x * BLOCK_SIZE;
if (start + t < numel)
s[t] = in[start + t];
else
s[t] = smallest;
if (start + BLOCK_SIZE + t < numel)
s[BLOCK_SIZE + t] = in[start + BLOCK_SIZE + t];
else
s[BLOCK_SIZE + t] = smallest;
//@@ Traverse the reduction tree
for (unsigned int stride = BLOCK_SIZE; stride >= 1; stride >>= 1) {
__syncthreads();
if (t < stride)
s[t] = fmax(s[t], s[t+stride]);
}
//@@ Write the computed sum of the block to the output vector at the
//@@ correct index
if (t == 0)
out[blockIdx.x] = s[0];
}
void fillarray(float* in, int size){
srand(3);
for (int i = 0; i < size; i++){
// in[i] = i+1;
// in[i] = rand()%100-50;
in[i] = rand()%100;
//TODO put randomizer here
}
}
void fillTest(float* in, int size){
// in[0]=1.0;
// in[1]=1.0;
// in[2]=2.0;
// in[3]=3.0;
// in[4]=4.0;
// in[5]=5.0;
// in[6]=5.0;
// in[7]=6.0;
// in[8]=6.0;
// in[9]=6.0;
// in[10]=3.0;
// in[11]=3.0;
// in[12]=4.0;
// in[13]=4.0;
// in[14]=5.0;
// in[15]=5.0;
for (int i = 0; i < size; i++){
in[i] = i;
}
}
int main(int argc, char ** argv) {
int ii;
// wbArg_t args;
float * hostInput; // The input 1D list
float * hostOutput; // The output list
float * deviceInput;
float * deviceOutput;
int numInputElements; // number of elements in the input list
int numOutputElements; // number of elements in the output list
float smallest = numeric_limits<float>::min();
if (argc != 2){
printf("wrong arguments\n");
return 0;
}
numInputElements = atoi(argv[1]);
hostInput = (float*) malloc(numInputElements * sizeof(float));
fillTest (hostInput, numInputElements);
numOutputElements = numInputElements / (BLOCK_SIZE<<1);
if (numInputElements % (BLOCK_SIZE<<1)) {
numOutputElements++;
}
hostOutput = (float*) malloc(numOutputElements * sizeof(float));
cudaMalloc(&deviceInput, sizeof(float) * numInputElements);
cudaMalloc(&deviceOutput, sizeof(float) * numOutputElements);
cudaMemcpy(deviceInput, hostInput, sizeof(float) * numInputElements, cudaMemcpyHostToDevice);
dim3 dimGrid(numOutputElements, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
reduce_max<<<dimGrid, dimBlock>>>(deviceInput, deviceOutput, numInputElements, smallest);
cudaDeviceSynchronize();
cudaMemcpy(hostOutput, deviceOutput, sizeof(float) * numOutputElements, cudaMemcpyDeviceToHost);
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] = max(hostOutput[ii], hostOutput[0]);
}
printf("Final max %f\n", hostOutput[0]);
cudaFree(deviceInput);
cudaFree(deviceOutput);
free(hostInput);
free(hostOutput);
} |
22,488 | #include <ctime>
#include <cuda.h>
#include <iomanip>
#include <iostream>
using namespace std;
#define MASK_WIDTH 5
#define WIDTH 7
// Secuencial
void convolution_2D(double *m, double *mask, double *result) {
for (int i = 0; i < WIDTH; i++) {
for (int j = 0; j < WIDTH; j++) {
double Pvalue = 0;
int N_start_point = i - (MASK_WIDTH / 2);
int M_start_point = j - (MASK_WIDTH / 2);
for (int ii = 0; ii < MASK_WIDTH; ii++) {
for (int jj = 0; jj < MASK_WIDTH; jj++) {
if (N_start_point + ii >= 0 && N_start_point + ii < WIDTH &&
M_start_point + jj >= 0 && M_start_point + jj < WIDTH) {
Pvalue += m[WIDTH * (N_start_point + ii) + (M_start_point + jj)] * mask[MASK_WIDTH * ii + jj];
}
}
}
result[WIDTH * i + j] = Pvalue;
}
}
}
__global__
void convolution_2D_kernel(double *m, double *mask, double *result) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < WIDTH && col < WIDTH) {
double Pvalue = 0;
int N_start_point = row - (MASK_WIDTH / 2);
int M_start_point = col - (MASK_WIDTH / 2);
for (int i = 0; i < MASK_WIDTH; i++) {
for (int j = 0; j < MASK_WIDTH; j++) {
if (N_start_point + i >= 0 && N_start_point + i < WIDTH &&
M_start_point + j >= 0 && M_start_point + j < WIDTH) {
Pvalue += m[WIDTH * (N_start_point + i) + (M_start_point + j)] * mask[MASK_WIDTH * i + j];
}
}
}
result[WIDTH * row + col] = Pvalue;
}
}
void printMatrix(double *v) {
for (int i = 0; i < WIDTH; i++) {
cout << "[";
for (int j = 0; j < WIDTH; j++) {
if (j) cout << ", ";
cout << v[WIDTH * i + j];
}
cout << "]" << endl;
}
cout << endl;
}
void fillMatrix(double *v) {
for (int i = 0; i < WIDTH; i++) {
for (int j = 0; j < WIDTH; j++) {
v[WIDTH * i + j] = j + 1;
}
}
}
int main() {
// Host
double h_mask[] = {1, 2, 3, 2, 1, \
2, 3, 4, 3, 2, \
3, 4, 5, 4, 3, \
2, 3, 4, 3, 2, \
1, 2, 3, 2, 1};
double h_m[] = {1, 2, 3, 4, 5, 6, 7, \
2, 3, 4, 5, 6, 7, 8, \
3, 4, 5, 6, 7, 8, 9, \
4, 5, 6, 7, 8, 5, 6, \
5, 6, 7, 8, 5, 6, 7, \
6, 7, 8, 9, 0, 1, 2, \
7, 8, 9, 0, 1, 2, 3};
double *h_result = new double[WIDTH * WIDTH];
double *ans = new double[WIDTH * WIDTH];
// fillMatrix(h_m);
{
clock_t start = clock();
convolution_2D(h_m, h_mask, h_result);
printMatrix(h_m);
printMatrix(h_result);
clock_t end = clock();
double time_used = double(end - start) / CLOCKS_PER_SEC;
cout << "Tiempo invertido CPU = " << setprecision(10) << time_used << "s" << endl << endl;
}
// Device
double *d_mask, *d_m, *d_result;
int blockSize = 4;
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(ceil(WIDTH / float(blockSize)), ceil(WIDTH / float(blockSize)), 1);
cudaMalloc(&d_mask, sizeof(double) * MASK_WIDTH * MASK_WIDTH);
cudaMalloc(&d_m, sizeof(double) * WIDTH * WIDTH);
cudaMalloc(&d_result, sizeof(double) * WIDTH * WIDTH);
// Device
{
clock_t start = clock();
cudaMemcpy(d_m, h_m, sizeof(double) * WIDTH * WIDTH, cudaMemcpyHostToDevice);
cudaMemcpy(d_mask, h_mask, sizeof(double) * MASK_WIDTH * MASK_WIDTH, cudaMemcpyHostToDevice);
convolution_2D_kernel<<< dimGrid, dimBlock >>>(d_m, d_mask, d_result);
cudaMemcpy(ans, d_result, sizeof(double) * WIDTH * WIDTH, cudaMemcpyDeviceToHost);
printMatrix(h_m);
printMatrix(ans);
clock_t end = clock();
double time_used = double(end - start) / CLOCKS_PER_SEC;
cout << "Tiempo invertido GPU = " << setprecision(10) << time_used << "s" << endl << endl;
}
return 0;
}
|
22,489 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#define WIDTH 512 // 64 ~ 512
#define TILE_WIDTH 16
#define ARYTYPE float
ARYTYPE M[WIDTH][WIDTH] = {0};
ARYTYPE N[WIDTH][WIDTH] = {0};
ARYTYPE P[WIDTH][WIDTH] = {0};
ARYTYPE MxN[WIDTH][WIDTH] = {0};
__device__ ARYTYPE GetElement(ARYTYPE *matrix, int row, int col, int width);
__device__ void SetElement(ARYTYPE *matrix, int row, int col, int width, ARYTYPE value);
__device__ ARYTYPE *GetSubMatrix(ARYTYPE *matrix, int blockrow, int blockcol, int width);
__global__ void MatMulKernel(ARYTYPE *Md, ARYTYPE *Nd, ARYTYPE *Pd, int width);
void MatMul(ARYTYPE *M, ARYTYPE *N, ARYTYPE *P, int width);
int main(int argc, char *argv[])
{
int width = WIDTH;
int pass = 1;
for (int i = 0; i < width; ++i) {
for (int j = 0; j < width; ++j) {
M[i][j] = rand() % 30;
N[i][j] = rand() % 30;
}
}
struct timeval starttime, endtime;
gettimeofday(&starttime, NULL);
for (int i = 0; i < width; ++i) {
for (int j = 0; j < width; ++j) {
for (int k = 0; k < width; ++k) {
MxN[i][j] += M[i][k] * N[k][j];
}
}
}
gettimeofday(&endtime, NULL);
double executime;
executime = (endtime.tv_sec - starttime.tv_sec) * 1000.0;
executime += (endtime.tv_usec - starttime.tv_usec) / 1000.0;
printf("CPU time: %13lf msec\n", executime);
MatMul((ARYTYPE *)M, (ARYTYPE *)N, (ARYTYPE *)P, width);
for (int i = 0; i < width; ++i) {
for (int j = 0; j < width; ++j) {
if(MxN[i][j] != P[i][j]) {
printf("MxN[%d][%d] = %2.0f P[%d][%d] = %2.0f\n", i, j, (float)MxN[i][j], i, j, (float)P[i][j]);
pass = 0;
}
}
}
printf("Test %s\n", (pass)?"PASSED":"FAILED");
return 0;
}
// Get a matrix element
__device__ ARYTYPE GetElement(ARYTYPE *matrix, int row, int col, int width)
{
return *(matrix + row*width + col);
}
// Set a matrix element
__device__ void SetElement(ARYTYPE *matrix, int row, int col, int width, ARYTYPE value)
{
*(matrix + row*width + col) = value;
}
// Get the TILE_WIDTHxTILE_WIDTH sub-matrix matsub of matrix that is
// located col sub-matrices to the right and row sub-matrices down
// from the upper-left corner of matrix
__device__ ARYTYPE *GetSubMatrix(ARYTYPE *matrix, int blockrow, int blockcol, int width)
{
return (matrix + blockrow*TILE_WIDTH*width + blockcol*TILE_WIDTH);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(ARYTYPE *Md, ARYTYPE *Nd, ARYTYPE *Pd, int width)
{
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Psub of P
ARYTYPE *Pd_sub = GetSubMatrix(Pd, blockRow, blockCol, width);
// Thread row and column within sub-matrix
int row = threadIdx.y;
int col = threadIdx.x;
// Each thread computes one element of Psub
// by accumulating results into Pvalue
ARYTYPE Pvalue = 0;
// Shared memory used to store Msub and Nsub respectively
__shared__ ARYTYPE Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ ARYTYPE Nds[TILE_WIDTH][TILE_WIDTH];
// Loop over all the sub-matrices of M and N that are
// required to compute Psub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int m = 0; m < (width / TILE_WIDTH); ++m) {
// Get sub-matrix Msub of M
ARYTYPE *Md_sub = GetSubMatrix(Md, blockRow, m, width);
// Get sub-matrix Nsub of N
ARYTYPE *Nd_sub = GetSubMatrix(Nd, m, blockCol, width);
// Load Msub and Nsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
Mds[row][col] = GetElement(Md_sub, row, col, width);
Nds[row][col] = GetElement(Nd_sub, row, col, width);
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Msub and Nsub together
for (int k = 0; k < TILE_WIDTH; ++k) {
Pvalue += Mds[row][k] * Nds[k][col];
//Pvalue += __mul24(Mds[row][k],Nds[k][col]);
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of M and N in the next iteration
__syncthreads();
}
// Write Psub to device memory
// Each thread writes one element
SetElement(Pd_sub, row, col, width, Pvalue);
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of TILE_WIDTH
void MatMul(ARYTYPE *M, ARYTYPE *N, ARYTYPE *P, int width)
{
size_t size = width * width * sizeof(ARYTYPE);
ARYTYPE *Md, *Nd, *Pd;
// Allocate and Load M, N to device memory
cudaMalloc((void **)&Md, size);
cudaMemcpy(Md, M, size, cudaMemcpyHostToDevice);
cudaMalloc((void **)&Nd, size);
cudaMemcpy(Nd, N, size, cudaMemcpyHostToDevice);
// Allocate P on the device
cudaMalloc((void **)&Pd, size);
// Setup the execution configuration
dim3 dimGrid(width/TILE_WIDTH, width/TILE_WIDTH);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
// Get start time event
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Invoke kernel
MatMulKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd, width);
cudaError_t cuda_err = cudaGetLastError();
if ( cudaSuccess != cuda_err ){
printf("before kernel call: error = %s\n", cudaGetErrorString (cuda_err));
exit(1) ;
}
// Get stop time event
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Compute execution time
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("GPU time: %13f msec\n", elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Read P from device memory
cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(Md);
cudaFree(Nd);
cudaFree(Pd);
}
|
22,490 | #include <stdio.h>
#include <stdlib.h>
#define TOTAL_THREADS 1024
#define THREADS_PER_BLOCK 256
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
__global__ void gather_points_kernel(int b, int c, int n, int m,
const float *__restrict__ points,
const int *__restrict__ idx,
float *__restrict__ out) {
// points: (B, C, N)
// idx: (B, M)
// output:
// out: (B, C, M)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || c_idx >= c || pt_idx >= m) return;
out += bs_idx * c * m + c_idx * m + pt_idx;
idx += bs_idx * m + pt_idx;
points += bs_idx * c * n + c_idx * n;
out[0] = points[idx[0]];
}
void gather_points_kernel_launcher(int b, int c, int n, int npoints,
const float *points, const int *idx,
float *out, cudaStream_t stream) {
// points: (B, C, N)
// idx: (B, npoints)
// output:
// out: (B, C, npoints)
cudaError_t err;
dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c,
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
gather_points_kernel<<<blocks, threads, 0, stream>>>(b, c, n, npoints, points,
idx, out);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
__global__ void gather_points_grad_kernel(int b, int c, int n, int m,
const float *__restrict__ grad_out,
const int *__restrict__ idx,
float *__restrict__ grad_points) {
// grad_out: (B, C, M)
// idx: (B, M)
// output:
// grad_points: (B, C, N)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || c_idx >= c || pt_idx >= m) return;
grad_out += bs_idx * c * m + c_idx * m + pt_idx;
idx += bs_idx * m + pt_idx;
grad_points += bs_idx * c * n + c_idx * n;
atomicAdd(grad_points + idx[0], grad_out[0]);
}
void gather_points_grad_kernel_launcher(int b, int c, int n, int npoints,
const float *grad_out, const int *idx,
float *grad_points,
cudaStream_t stream) {
// grad_out: (B, C, npoints)
// idx: (B, npoints)
// output:
// grad_points: (B, C, N)
cudaError_t err;
dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c,
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
gather_points_grad_kernel<<<blocks, threads, 0, stream>>>(
b, c, n, npoints, grad_out, idx, grad_points);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
|
22,491 | #include "includes.h"
__device__ double dnorm(float x, float mu, float sigma)
{
float std = (x - mu)/sigma;
float e = exp( - 0.5 * std * std);
return(e / ( sigma * sqrt(2 * 3.141592653589793)));
}
__global__ void log_truncNorm(float *out, float *unifVals, int N)
{
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
/* how big is each block within a grid */
int blocksize = blockDim.x * blockDim.y * blockDim.z;
/* get thread within a block */
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
if(idx < N) {
// 0.3413447460685 is pnorm(1) - pnorm(0), i.e. Pr( 0 <= Z <= 1) for Z ~ N(0, 1)
out[idx] = log(unifVals[idx]) * dnorm(unifVals[idx], 0, 1)/0.3413447460685;
}
} |
22,492 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
int main()
{
int device;
cudaDeviceProp properties;
cudaError_t err = cudaSuccess;
err = cudaGetDevice(&device);
err = cudaGetDeviceProperties(&properties, device);
std::cout << "processor count" << properties.multiProcessorCount << std::endl;
std::cout << "warp size " << properties.warpSize << std::endl;
std::cout << "name=" << properties.name << std::endl;
std::cout << "Compute capability " << properties.major << "." << properties.minor << "\n";
std::cout << "shared Memory/SM " << properties.sharedMemPerMultiprocessor
<< std::endl;
// std::cout<<"max blocks/SM "<<properties.maxBlocksPerMultiProcessor
// <<std::endl;
if (err == cudaSuccess)
printf("device =%d\n", device);
else
printf("error getting deivce\n");
return 0;
}
|
22,493 | #include<cuda_runtime.h>
#include<stdio.h>
#include<iostream>
//define the multithread action
__global__ void cube(float * d_out, float * d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f*f*f;
}
//start main activity
int main(int argc,char **argv){
//initilize array specs
const int ARRAY_SIZE = 96;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
//initalize array values
float h_in[ARRAY_SIZE];
for(int i=0;i<ARRAY_SIZE;i++){
h_in[i]=float(i);
}
//print array
std::cout<<"Before: \n";
for(int i=0;i<ARRAY_SIZE;i++){
printf("%f", h_in[i]);
printf(((i%4)!=3) ? "\t" : "\n");
}
std::cout<<"\n";
//initlize an array of the same size as our input
float h_out[ARRAY_SIZE];
//initalize the inputs to the multithread functiuon
float * d_in;
float * d_out;
//allocate memory for the arrays
cudaMalloc((void**) &d_in,ARRAY_BYTES);
cudaMalloc((void**) &d_out,ARRAY_BYTES);
//error check
//std::cout<<cudaGetErrorString(cudaGetLastError())<<std::endl;
//copy array from CPU to GPU to preform function on GPU's threads
cudaMemcpy(d_in,h_in,ARRAY_BYTES,cudaMemcpyHostToDevice);
cube<<<1,ARRAY_SIZE>>>(d_out,d_in);
//copy result from function back from GPU to CPU
cudaMemcpy(h_out,d_out,ARRAY_BYTES,cudaMemcpyDeviceToHost);
//print result array
std::cout<<"\nAfter: \n";
for(int i=0;i<ARRAY_SIZE;i++){
printf("%f", h_out[i]);
printf(((i%4)!=3) ? "\t" : "\n");
}
//free memory
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
22,494 | //xfail:BOOGIE_ERROR
//main.cu: error: possible read-write race
//however, this didn't happen in the tests
// In CUDA providing static and __attribute__((always_inline)) SHOUD NOT
// keep a copy of inlined function around.
//ps: the values from A[N-1-offset] to A[N-1] always will receive unpredictable values,
//because they acess values because they access memory positions that were not initiated
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#define tid threadIdx.x
#define N 2//32
__device__ static __attribute__((always_inline))
void inlined(int *A, int offset)
{
int temp = A[tid + offset];
A[tid] += temp;
}
__global__ void inline_test(int *A, int offset) {
inlined(A, offset);
}
int main(){
int *a;
int *dev_a;
int size = N*sizeof(int);
cudaMalloc((void**)&dev_a, size);
a = (int*)malloc(N*size);
for (int i = 0; i < N; i++)
a[i] = i;
cudaMemcpy(dev_a,a,size, cudaMemcpyHostToDevice);
printf("a: ");
for (int i = 0; i < N; i++)
printf("%d ", a[i]);
inline_test<<<1,N>>>(dev_a, 2); //you can change this offset for tests
//ESBMC_verify_kernel_intt(inline_test, 1, N, dev_a, 2);
cudaMemcpy(a,dev_a,size,cudaMemcpyDeviceToHost);
printf("\nFunction Results:\n ");
for (int i = 0; i < N; i++)
printf("%d ", a[i]);
free(a);
cudaFree(dev_a);
return 0;
}
|
22,495 | #include<stdio.h>
#include<cuda.h>
#include<string.h>
#include<stdlib.h>
void print_matrix(int* mat, int rows, int cols) {
for(int i=0; i<rows; i++) {
for(int j=0; j<cols; j++) {
printf("%d ", mat[i*cols + j]);
}
printf("\n");
}
}
void print_matrix_file(FILE* f, int* mat, int rows, int cols) {
for(int i=0; i<rows; i++) {
for(int j=0; j<cols; j++) {
fprintf(f, "%d ", mat[i*cols + j]);
}
fprintf(f, "\n");
}
}
// Run query on the specified row
__device__ void runQuery(int *data, int n, int *query, int row) {
int n_queries = query[2];
int len = n_queries*3 + 3;
for(int i=3; i<len; i+=3) {
int op = query[i+2];
if (op == -1)
atomicSub(&data[row*n+query[i]-1], query[i+1]);
else
atomicAdd(&data[row*n+query[i]-1], query[i+1]);
}
}
__global__ void searchQuery(int* data, int n, int m, int* query, int col, int x) {
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if (tid < m) {
if(data[tid*n + col-1] == x)
runQuery(data, n, query, tid);
}
}
__global__ void runQueries(int* data, int m, int n, int** queries, int q) {
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if (tid < q) {
int *query = queries[tid];
int col = query[0];
int x = query[1];
// Search in database
for(int row=0; row<m; row++) {
if(data[row*n + col-1] == x)
runQuery(data, n, query, row);
}
// Try kernel in kernel
// int n_blocks = ceil((float)m / 1024);
// searchQuery<<<n_blocks, 1024>>>(data, n, m, query, col, x);
}
}
int main(int argc, char *argv[]) {
if (argc < 3) {
printf("Usage: ./a.out <input-file-name> <output-file-name>\n");
return 0;
}
FILE *in = fopen(argv[1], "r");
if (in == NULL) {
printf("Error opening input file!\n");
return -1;
}
int m,n;
fscanf(in, "%d %d", &m, &n);
int *data, *ddata;
data = (int*)malloc(m*n*sizeof(int));
cudaMalloc(&ddata, m*n*sizeof(int));
for(int i=0; i<m; i++) {
for(int j=0; j<n; j++) {
fscanf(in, "%d", &data[i*n+j]);
}
}
// print_matrix(data, m, n);
int q;
char s[10]; // Assuming every number is less than 1000000000
fscanf(in, "%d", &q);
int* queries[q]; // Storage on CPU
int* dqueries[q]; // Storage on GPU
for(int i=0; i<q; i++) {
fscanf(in, "%s", s);
if (strcmp(s, "U")!=0) {
// First char is not "U", some problem
printf("Incorrect input, first character in query must be U, exiting!\n");
return -1;
}
fscanf(in, "%s", s);
int col = atoi(&s[1]); // Skip first char C
fscanf(in, "%s", s);
int x = atoi(s); // Value to be matched against column
fscanf(in, "%s", s);
int p = atoi(s); // No of update ops
int len = p*3 + 3; // +1 for column, +1 for key, +1 for no. of updates
queries[i] = (int*)malloc(len*sizeof(int));
// For copying query to GPU
int *dquery;
cudaMalloc(&dquery, len*sizeof(int));
queries[i][0] = col;
queries[i][1] = x;
queries[i][2] = p;
for(int j=3; j<len; j+=3) {
fscanf(in, "%s", s);
queries[i][j] = atoi(&s[1]);
fscanf(in, "%s", s);
queries[i][j+1] = atoi(s);
fscanf(in, "%s", s);
queries[i][j+2] = (strcmp(s,"+") ? -1 : 1); // -1 if -, 1 for +
// printf("%d %d %d\n", queries[i][j], queries[i][j+1], queries[i][j+2]);
}
cudaMemcpy(dquery, queries[i], len*sizeof(int), cudaMemcpyHostToDevice);
dqueries[i] = dquery;
}
// for(int i=0; i<q; i++) {
// int len = queries[i][2]*3 + 3;
// // printf("%d\n", len);
// for(int j=0; j<len; j++) {
// printf("%d ", queries[i][j]);
// }
// printf("\n");
// }
// Copy Database to GPU
cudaMemcpy(ddata, data, m*n*sizeof(int), cudaMemcpyHostToDevice);
// Copy array of pointers(pointing to queries) to GPU
int** dquerieslist;
cudaMalloc(&dquerieslist, q*sizeof(int*));
cudaMemcpy(dquerieslist, dqueries, q*sizeof(int*), cudaMemcpyHostToDevice);
// One query per thread
int n_blocks = ceil((float)q / 1024);
runQueries<<<n_blocks, 1024>>>(ddata, m, n, dquerieslist, q);
cudaMemcpy(data, ddata, m*n*sizeof(int), cudaMemcpyDeviceToHost);
// print_matrix(data, m, n);
// Output to file
FILE *out = fopen(argv[2], "w");
if (out == NULL) {
printf("Error opening output file!");
return -1;
}
print_matrix_file(out, data, m, n);
fclose(in);
fclose(out);
// Free allocated memory
free(data);
cudaFree(ddata);
for(int i=0; i<q; i++) {
free(queries[i]);
cudaFree(dqueries[i]);
}
return 0;
}
|
22,496 | #include "date.hh"
#include <chrono>
namespace date
{
long now()
{
return std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now().time_since_epoch()
).count();
}
}
|
22,497 | #include<stdio.h>
#include<stdlib.h>
// This value is the largest subsets we must unrank.
#define LARGEST_SUBSET 5
// This is value is the largest number a graph could have.
// It is 2^(LARGEST_SUBSET*LARGEST_SUBSET)-1 (i.e. the LARGEST_SUBSET x LARGEST_SUBSET matrix of all 1's).
#define SMALLEST_GRAPH 33554431
// Device code to compute the binomial coefficient "n choose k."
__device__ int binom_d(int n, int k){
if(k > n){
return 0;
}
if(k == n){
return 1;
}
int retVal = 1;
for(int i=0; i<k; i++){
retVal = retVal * (n-i) / (i+1);
}
return retVal;
}
__device__ void unrank_combination(int n, int k, int initial_value, int* Kset) {
int cash_on_hand = initial_value;
int digit;
int cost_to_increment;
Kset[0] = 0; //Initialize the first element.
//Each of the following elements will start off one bigger than the previous element.
//Use the cash_on_hand value to "pay" for incrementing each digit.
//Pay 1-unit for each combination that is "skipped" over.
//E.g. To increment the 0 in 0, 1, 2, ..., k-1 to a 1 (and force the others to increment to 2, 3, ..., k)
//it would cost binom(n-1, k-1) since we skipped over each combination of the form
// 0 * * * ... * and there are binom(n-1, k-1) of those combinations
for(digit=0; digit<k-1; digit++){
//There are n-1-Kset[digit] elements left to choose from.
//Those elements must be used to fill k-1-digit places.
cost_to_increment = binom_d( n-1-Kset[digit], k-1-digit );
while(cost_to_increment <= cash_on_hand){
Kset[digit]++;
cash_on_hand = cash_on_hand - cost_to_increment;
cost_to_increment = binom_d( n-1-Kset[digit], k-1-digit );
}
Kset[digit+1] = Kset[digit]+1; //Ititialize the next element of Kset making sure the elements
//come in sorted order.
}
//Kset[k-1] has been initialized to Kset[k-2]+1 (last step).
//Now, if there is anything left to pay, we simply increment Kset[k-1] by this amount.
Kset[k-1] += cash_on_hand;
}
// The Myrvold/Rusky linear time algorithm for ranking/unranking permutations.
// See: http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.216.9916&rep=rep1&type=pdf [4 pages]
__device__ void unrank_permutation(int k, int initial_value, int* Perm){
int r = initial_value;
int tmp;
int n, index;
for(n=k; n>0; n--){
index = r % n;
//swap Perm[index1] with Perm[index2]
tmp = Perm[n-1];
Perm[n-1] = Perm[index];
Perm[index] = tmp;
r = r / (index + 1);
}
}
// IMPORTANT: we must make sure that the number of subsets (N choose K) is a value that fits in a integer type (probably 4 bytes).
// Otherwise, "my_subset" will overflow and not be able to write to its location in the array.
// We don't allow bigger than 4 byte values of "my_subset" because we don't want to assume that we have more than 4 GB of memory on the card.
__global__ void examine_subsets(int n, int k, long offset, long MAX, int KFAC, short* A, int* Results){
const long my_subset = threadIdx.x + blockIdx.x*blockDim.x + offset;
if( my_subset < MAX ){
int i, j;
int Kset[LARGEST_SUBSET];
for(i=0; i<k; i++){
Kset[i] = 0;
}
unrank_combination(n, k, my_subset, Kset); //unrank should modify Kset to be the selection of k vertices to examine.
short local_A[LARGEST_SUBSET*LARGEST_SUBSET];
//short local_B[LARGEST_SUBSET*LARGEST_SUBSET];
for(i=0; i<k; i++){
for(j=0; j<k; j++){
local_A[i*k + j] = A[ Kset[i]*n + Kset[j] ]; //A is n by n (in 1D form) but local_A is k by k (in 1D form)
}
}
//Apply permutations to the vertices of local_A to try to create the "smallest" representative graph.
int permutation_number;
int Perm[LARGEST_SUBSET];
int smallest_graph = SMALLEST_GRAPH; //Default "maximum" graph.
int this_graph;
for(permutation_number=0; permutation_number < KFAC; permutation_number++){
//Apply the Myrvold/Rusky Linear time unranking algorithm to determine the permutation.
for(i=0; i<k; i++){
Perm[i] = i; //Initialize the permutaiton
}
unrank_permutation(k, permutation_number, Perm);
this_graph = 0;
for(i=0; i<k; i++){
for(j=0; j<k; j++){
//Applying Perm to local_A sends vertex i to Perm[i], vertex j to Perm[j].
//local_B[ i*k + j] = local_A[ Perm[i]*k + Perm[j] ];
if( local_A[Perm[i]*k + Perm[j]] == 1 ){
this_graph = this_graph | (1 << (i*k + j));
}
}
}
if(this_graph < smallest_graph){
smallest_graph = this_graph;
}
}
Results[my_subset-offset] = smallest_graph;
}
}
int binom_h(int n, int k){
int retVal = 1;
for(int i=0; i<k; i++){
retVal = retVal*(n-i) / (i+1);
}
return retVal;
}
int main(int argc, char** argv){
int N = atoi( argv[1] ); //Number of vertices in the graph (not necessarily required from command-line)
int K = atoi( argv[2] ); //Size of the subsets to examine (upper bounded by LARGEST_SUBSET)
int threads_per_block = atoi( argv[3] );
short* h_A;
short* d_A;
int* h_Results;
int* d_Results;
int number_of_subsets = binom_h(N, K);
int size_of_subsets = number_of_subsets*sizeof(int);
int size_of_A = N*N*sizeof(short);
h_A = (short *) malloc( size_of_A );
h_Results = (int *) malloc( size_of_subsets );
cudaSetDevice(1);
cudaMalloc((void **) &d_A, size_of_A);
cudaMalloc((void **) &d_Results, size_of_subsets);
int i;
int KFAC = 1;
for(i=2; i<=K; i++){
KFAC *= i;
}
//h_A is the adjacecy matrix (on the host)... here is just a dummy matrix.
//
// IN THESE LINES WE NEED TO MAKE SURE THAT THE ADJACENCY MATRIX IS THE ONE WE WANT TO EXAMINE
//
for(i=0; i<N*N; i++){
h_A[i] = 0;
}
h_A[ 0*N+1 ] = 1; //h_A[0][1] = 1
h_A[ 0*N+2 ] = 1; //h_A[0][2] = 1
//Initialize the results matrix.
//In h_Results[i] will be the integer representing the graph found by looking at subset i.
for(i=0; i<N; i++){
h_Results[i]=-1;
}
//Copy adjacency matrix and results array to the device
cudaMemcpy(d_A, h_A, size_of_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_Results, h_Results, size_of_subsets, cudaMemcpyHostToDevice);
examine_subsets<<<(number_of_subsets+threads_per_block-1)/threads_per_block, threads_per_block>>>(N, K, 0, number_of_subsets, KFAC, d_A, d_Results);
printf("CUDA error: %s\n", cudaGetErrorString(cudaGetLastError()));
cudaMemcpy(h_Results, d_Results, size_of_subsets, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
for(i=0; i<number_of_subsets; i++){
if(h_Results[i] != 0){
printf("%d\t%d\n", i, h_Results[i]);
}
}
free(h_A);
free(h_Results);
cudaFree(d_A);
cudaFree(d_Results);
return 0;
}
|
22,498 | #include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#define MAX_VALUE ((1UL << 24) + 1U)
#define BLOCK_DIM (16U)
#define GRID_DIM (16U)
typedef unsigned uint;
#define CSC(call) \
do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \
__FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while(0)
__global__ static void hist(
const int * __restrict__ const input,
int * __restrict__ const counts,
const uint count
) {
const uint offset = blockDim.x * gridDim.x;
uint idx = threadIdx.x + blockIdx.x * blockDim.x;
for (; idx < count; idx += offset)
atomicAdd(counts + input[idx], 1);
}
__global__ static void sort(
const int * __restrict__ const input,
int * __restrict__ const counts,
int * __restrict__ const output,
const uint count
) {
const uint offset = blockDim.x * gridDim.x;
uint idx = threadIdx.x + blockIdx.x * blockDim.x;
for (; idx < count; idx += offset) {
const int i = atomicAdd(counts + input[idx], -1) - 1;
output[i] = input[idx];
}
}
int main(void) {
uint count;
fread(&count, sizeof(uint), 1U, stdin);
const uint size = sizeof(int) * count;
int *buffer = (int *) malloc(size);
fread(buffer, sizeof(int), count, stdin);
int *deviceInput;
CSC(cudaMalloc(&deviceInput, size));
CSC(cudaMemcpy(deviceInput, buffer, size, cudaMemcpyHostToDevice));
int *deviceCounts;
CSC(cudaMalloc(&deviceCounts, sizeof(int) * MAX_VALUE));
CSC(cudaMemset(deviceCounts, 0, sizeof(int) * MAX_VALUE));
int *deviceOutput;
CSC(cudaMalloc(&deviceOutput, size));
cudaEvent_t startTime, endTime;
CSC(cudaEventCreate(&startTime));
CSC(cudaEventCreate(&endTime));
CSC(cudaEventRecord(startTime));
hist<<<GRID_DIM, BLOCK_DIM>>>(deviceInput, deviceCounts, count);
thrust::inclusive_scan(thrust::device,
deviceCounts, deviceCounts + MAX_VALUE, deviceCounts
);
sort<<<GRID_DIM, BLOCK_DIM>>>(deviceInput, deviceCounts, deviceOutput, count);
CSC(cudaGetLastError());
CSC(cudaEventRecord(endTime));
CSC(cudaEventSynchronize(endTime));
float t;
CSC(cudaEventElapsedTime(&t, startTime, endTime));
CSC(cudaEventDestroy(startTime));
CSC(cudaEventDestroy(endTime));
CSC(cudaMemcpy(buffer, deviceOutput, sizeof(int) * count,
cudaMemcpyDeviceToHost
));
CSC(cudaFree(deviceInput));
CSC(cudaFree(deviceOutput));
fwrite(buffer, sizeof(int), count, stdout);
free(buffer);
return 0;
}
|
22,499 | #include <string.h>
#include <stdint.h>
#include <sys/types.h>
#include "seq_sha1.cuh"
#ifdef HMAC_SHA1_DATA_PROBLEMS
unsigned int sha1_data_problems = 1;
#endif
void lrad_hmac_sha1(const unsigned char *text, int text_len,
const unsigned char *key, int key_len,
unsigned char *digest)
{
SHA1_CTX context;
unsigned char k_ipad[65]; /* inner padding -
* key XORd with ipad
*/
unsigned char k_opad[65]; /* outer padding -
* key XORd with opad
*/
unsigned char tk[20];
int i;
/* if key is longer than 64 bytes reset it to key=SHA1(key) */
if (key_len > 64) {
SHA1_CTX tctx;
SHA1Init(&tctx);
SHA1Update(&tctx, key, key_len);
SHA1Final(tk, &tctx);
key = tk;
key_len = 20;
}
#ifdef HMAC_SHA1_DATA_PROBLEMS
if(sha1_data_problems)
{
int j,k;
printf("\nhmac-sha1 key(%d): ", key_len);
j=0; k=0;
for (i = 0; i < key_len; i++) {
if(j==4) {
printf("_");
j=0;
}
j++;
printf("%02x", key[i]);
}
printf("\nDATA: (%d) ",text_len);
j=0; k=0;
for (i = 0; i < text_len; i++) {
if(k==20) {
printf("\n ");
k=0;
j=0;
}
if(j==4) {
printf("_");
j=0;
}
k++;
j++;
printf("%02x", text[i]);
}
printf("\n");
}
#endif
/*
* the HMAC_SHA1 transform looks like:
*
* SHA1(K XOR opad, SHA1(K XOR ipad, text))
*
* where K is an n byte key
* ipad is the byte 0x36 repeated 64 times
* opad is the byte 0x5c repeated 64 times
* and text is the data being protected
*/
/* start out by storing key in pads */
memset( k_ipad, 0, sizeof(k_ipad));
memset( k_opad, 0, sizeof(k_opad));
memcpy( k_ipad, key, key_len);
memcpy( k_opad, key, key_len);
/* XOR key with ipad and opad values */
for (i = 0; i < 64; i++) {
k_ipad[i] ^= 0x36;
k_opad[i] ^= 0x5c;
}
/*
* perform inner SHA1
*/
SHA1Init(&context); /* init context for 1st
* pass */
SHA1Update(&context, k_ipad, 64); /* start with inner pad */
SHA1Update(&context, text, text_len); /* then text of datagram */
SHA1Final(digest, &context); /* finish up 1st pass */
/*
* perform outer MD5
*/
SHA1Init(&context); /* init context for 2nd
* pass */
SHA1Update(&context, k_opad, 64); /* start with outer pad */
SHA1Update(&context, digest, 20); /* then results of 1st
* hash */
SHA1Final(digest, &context); /* finish up 2nd pass */
#ifdef HMAC_SHA1_DATA_PROBLEMS
if(sha1_data_problems)
{
int j;
printf("\nhmac-sha1 mac(20): ");
j=0;
for (i = 0; i < 20; i++) {
if(j==4) {
printf("_");
j=0;
}
j++;
printf("%02x", digest[i]);
}
printf("\n");
}
#endif
}
/*
Test Vectors (Trailing '\0' of a character string not included in test):
key = "Jefe"
data = "what do ya want for nothing?"
data_len = 28 bytes
digest =
key = 0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
key_len 16 bytes
data = 0xDDDDDDDDDDDDDDDDDDDD...
..DDDDDDDDDDDDDDDDDDDD...
..DDDDDDDDDDDDDDDDDDDD...
..DDDDDDDDDDDDDDDDDDDD...
..DDDDDDDDDDDDDDDDDDDD
data_len = 50 bytes
digest = 0x56be34521d144c88dbb8c733f0e8b3f6
*/
#ifdef TESTING
/*
* cc -DTESTING -I ../include/ hmac.c md5.c -o hmac
*
* ./hmac Jefe "what do ya want for nothing?"
*/
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char **argv)
{
unsigned char digest[20];
char *key;
int key_len;
char *text;
int text_len;
int i;
key = argv[1];
key_len = strlen(key);
text = argv[2];
text_len = strlen(text);
lrad_hmac_sha1(text, text_len, key, key_len, digest);
for (i = 0; i < 20; i++) {
printf("%02x", digest[i]);
}
printf("\n");
exit(0);
return 0;
}
#endif
|
22,500 | /*
* Title: prefixScan.cu
* Author: 陈志韬
* Student ID: SA12011089
*/
#include<stdio.h>
#include<stdlib.h>
#include<assert.h>
/*#include<c*/
#define NUM_BANKS 16
#define LOG_NUM_BANKS 4
#define CONFLICT_FREE_OFFSET(n) \
((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS))
#define DATA_SIZE 32
#define DEFAULT_BLOCK_SIZE 256
#define RAND_INT 50
//Only generate integers with value 0 or 1
#define MAX_INT_SIZE 2
typedef float calcType;
void randomInit(calcType *array, int m)
{
/*srand(time(0));*/
for(int i = 0;i < m; i++)
{
array[i] = ((calcType)random() / RAND_MAX) * RAND_INT;
}
}
void randomInit(int *array, int m)
{
/*srand(time(0));*/
for(int i = 0;i < m; i++)
{
array[i] = random()%MAX_INT_SIZE;
}
}
void printArray(calcType * array, int m)
{
int i = 0;
for(i = 0;i < m; i++)
{
printf("%f ", array[i]);
}
printf("\n");
}
//The size of out is larger than in by one, i.e the size of m is m+1
void cpuCalc(calcType *out, const calcType *in, int m)
{
int j = 0;
out[0] = 0;
for(j = 1;j < m;j++)
{
out[j] = out[j - 1] + in[j - 1];
}
}
__global__ void prescanEasy(calcType *g_odata, calcType *g_idata, int n)
{
extern __shared__ calcType temp[];
int thid = threadIdx.x;
int pout = 0, pin = 1;
//Make sure the data has been transfomed into the kernel thread
/*g_odata[thid] = g_idata[thid];*/
temp[pout*n + thid] = (thid > 0) ? g_idata[thid - 1] : 0;
//下面一行GPU Gems 3没有,是我在进行错误的测试的时候认为是错误点添加上的
//但是在修改73行代码之后的时候测试,不对结果产生影响。。
//temp[pin*n + thid] = (thid > 0) ? g_idata[thid - 1] : 0;
__syncthreads();
/*for(int offset = 1; offset < n; offset *= 2)*/
for(int offset = 1; offset < n; offset *= 2)
{
pout = 1 - pout; //交换双缓冲区的索引
pin = 1 - pin;
if( thid >= offset)
{
//GPU Gems 3 的样例程序的代码如下:
//temp[pout*n+thid] += temp[pin*n+thid - offset];
temp[pout*n + thid] = temp[pin*n + thid] + temp[pin*n + thid -offset];
}
else
temp[pout*n + thid] = temp[pin*n + thid];
__syncthreads();
}
g_odata[thid] = temp[pout*n + thid];
}
__global__ void prescanBank(calcType *g_odata, calcType *g_idata, int n)
{
extern __shared__ calcType temp[]; // allocated on invocation
int thid = threadIdx.x;
int offset = 1;
int d, ai, bi;
calcType t;
//Below is A
temp[2*thid] = g_idata[2*thid]; // load input into shared memory
temp[2*thid+1] = g_idata[2*thid+1];
//A end
for (d = n>>1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (thid < d)
{
//Below is B
ai = offset*(2*thid+1)-1;
bi = offset*(2*thid+2)-1;
//B end
temp[bi] += temp[ai];
}
offset *= 2;
}
//Below is C
if (thid == 0) { temp[n - 1] = 0; } // clear the last element
//C end
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
//Below is D
ai = offset*(2*thid+1)-1;
bi = offset*(2*thid+2)-1;
//D end
t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
//Below is E
g_odata[2*thid] = temp[2*thid]; // write results to device memory
g_odata[2*thid+1] = temp[2*thid+1];
//E end
}
__global__ void prescan(calcType *g_odata, calcType *g_idata, int n)
{
extern __shared__ calcType temp[]; // allocated on invocation
int thid = threadIdx.x;
int offset = 1;
//Below is A
/*temp[2*thid] = g_idata[2*thid]; // load input into shared memory*/
/*temp[2*thid+1] = g_idata[2*thid+1];*/
int ai = thid;
int bi = thid + (n/2);
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
temp[ai + bankOffsetA] = g_idata[ai];
temp[bi + bankOffsetB] = g_idata[bi];
//A end
for (int d = n>>1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (thid < d)
{
//Below is B
/*int ai = offset*(2*thid+1)-1;*/
/*int bi = offset*(2*thid+2)-1;*/
int ai = offset * (2*thid + 1) - 1;
int bi = offset * (2*thid + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
//B end
temp[bi] += temp[ai];
}
offset *= 2;
}
//Below is C
/*if (thid == 0) { temp[n - 1] = 0; } // clear the last element*/
if(thid == 0) { temp[n - 1 + CONFLICT_FREE_OFFSET(n -1)] = 0;}
//C end
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
//Below is D
/*int ai = offset*(2*thid+1)-1;*/
/*int bi = offset*(2*thid+2)-1;*/
int ai = offset * (2*thid + 1) - 1;
int bi = offset * (2*thid + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
//D end
calcType t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
//Below is E
/*g_odata[2*thid] = temp[2*thid]; // write results to device memory*/
/*g_odata[2*thid+1] = temp[2*thid+1];*/
g_odata[ai] = temp[ai + bankOffsetA];
g_odata[bi] = temp[bi + bankOffsetB];
//E end
}
int main(int argc, char *argv[])
{
int data_bytes = DATA_SIZE * sizeof(calcType);
calcType *h_out = 0, *d_out = 0;
calcType *h_in = 0, *d_in = 0;
calcType *cpu = 0;
h_out = (calcType*)malloc(data_bytes);
h_in = (calcType*)malloc(data_bytes);
cpu = (calcType*)malloc(data_bytes);
cudaMalloc( (void**)&d_out, data_bytes);
cudaMalloc( (void**)&d_in, data_bytes);
if(0 == h_out || 0 == h_in || 0 == d_out || 0 == d_in)
{
printf("Couldn't allocate memory\n");
return 1;
}
randomInit(h_in, DATA_SIZE);
printArray(h_in, DATA_SIZE);
cudaMemcpy(d_in, h_in, data_bytes, cudaMemcpyHostToDevice);
cudaMemset(d_out, 0, data_bytes);
//Cpu Calc
cpuCalc(cpu, h_in, DATA_SIZE);
printArray(cpu, DATA_SIZE);
//开始进入GPU执行
dim3 grid, block;
float time;
block.x = DATA_SIZE;
/*grid.x = (DATA_SIZE + block.x - 1)/block.x;*/
grid.x = 1;
cudaEvent_t start, stop;
cudaEventCreate(&start),
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
prescanEasy<<<grid, block, data_bytes * 2>>>(d_out, d_in, DATA_SIZE);
/*prescanBank<<<grid, block, data_bytes * 2>>>(d_out, d_in, DATA_SIZE);*/
/*prescan<<<grid, block, data_bytes * 2>>>(d_out, d_in, DATA_SIZE);*/
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("PrescanEasy Time elapsed: %fms\n", time);
cudaEventRecord(start, 0);
/*prescanEasy<<<grid, block, data_bytes * 2>>>(d_out, d_in, DATA_SIZE);*/
prescanBank<<<grid, block, data_bytes * 2>>>(d_out, d_in, DATA_SIZE);
/*prescan<<<grid, block, data_bytes * 2>>>(d_out, d_in, DATA_SIZE);*/
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("PrescanBank Time elapsed: %fms\n", time);
cudaEventRecord(start, 0);
/*prescanEasy<<<grid, block, data_bytes * 2>>>(d_out, d_in, DATA_SIZE);*/
/*prescanBank<<<grid, block, data_bytes * 2>>>(d_out, d_in, DATA_SIZE);*/
prescan<<<grid, block, data_bytes * 2>>>(d_out, d_in, DATA_SIZE);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Prescan Time elapsed: %fms\n", time);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaMemcpy( h_out, d_out, data_bytes, cudaMemcpyDeviceToHost );
printArray(h_out, DATA_SIZE);
free(h_in);
free(h_out);
free(cpu);
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.