serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
2,801 | // P2P Test by Greg Gutmann
// https://codingbyexample.com/2020/09/14/p2p-memcpy-with-nvlink/
#include "stdio.h"
#include "stdint.h"
int main()
{
// GPUs
int gpuid_0 = 0;
int gpuid_1 = 1;
// Memory Copy Size
uint32_t size = pow(2, 26); // 2^26 = 67MB
// Allocate Memory
uint32_t* dev_0;
cudaSetDevice(gpuid_0);
cudaMalloc((void**)&dev_0, size);
uint32_t* dev_1;
cudaSetDevice(gpuid_1);
cudaMalloc((void**)&dev_1, size);
//Check for peer access between participating GPUs:
int can_access_peer_0_1;
int can_access_peer_1_0;
cudaDeviceCanAccessPeer(&can_access_peer_0_1, gpuid_0, gpuid_1);
cudaDeviceCanAccessPeer(&can_access_peer_1_0, gpuid_1, gpuid_0);
printf("cudaDeviceCanAccessPeer(%d->%d): %d\n", gpuid_0, gpuid_1, can_access_peer_0_1);
printf("cudaDeviceCanAccessPeer(%d->%d): %d\n", gpuid_1, gpuid_0, can_access_peer_1_0);
if (can_access_peer_0_1 && can_access_peer_1_0) {
// Enable P2P Access
cudaSetDevice(gpuid_0);
cudaDeviceEnablePeerAccess(gpuid_1, 0);
cudaSetDevice(gpuid_1);
cudaDeviceEnablePeerAccess(gpuid_0, 0);
}
// Init Timing Data
uint32_t repeat = 10;
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Init Stream
cudaStream_t stream;
cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking);
// ~~ Start Test ~~
cudaEventRecord(start, stream);
//Do a P2P memcpy
for (int i = 0; i < repeat; ++i) {
cudaMemcpyAsync(dev_0, dev_1, size, cudaMemcpyDeviceToDevice, stream);
}
cudaEventRecord(stop, stream);
cudaStreamSynchronize(stream);
// ~~ End of Test ~~
// Check Timing & Performance
float time_ms;
cudaEventElapsedTime(&time_ms, start, stop);
double time_s = time_ms / 1e3;
double gb = size * repeat / (double)1e9;
double bandwidth = gb / time_s;
printf("Seconds: %f\n", time_s);
printf("Unidirectional Bandwidth: %f (GB/s)\n", bandwidth);
if (can_access_peer_0_1 && can_access_peer_1_0) {
// Shutdown P2P Settings
cudaSetDevice(gpuid_0);
cudaDeviceDisablePeerAccess(gpuid_1);
cudaSetDevice(gpuid_1);
cudaDeviceDisablePeerAccess(gpuid_0);
}
// Clean Up
cudaFree(dev_0);
cudaFree(dev_1);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaStreamDestroy(stream);
}
|
2,802 | //Parallel Programming Final Project (CUDA)
//Team: 22
//ver 2.4 2018/12/16 21:15
#include <iostream>
#include <fstream>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <vector>
#include <time.h>
#include <cuda.h>
using namespace std;
int NUM_STEPS;
int NUM_DATA;
double *C_gpu,
*P_gpu,
*C_price_gpu,
*P_price_gpu;
struct data {
double K;
double S;
double r;
double v;
double T;
double dt;
double vdt;
double u;
double d;
double p;
double* last_C;
double* last_P;
double C_price;
double P_price;
};
typedef struct data option_data;
void init(option_data *in){
in->r = 0.005 ; // Risk-free rate (5%)
in->v = 0.3 ; // Volatility of the underlying (20%)
in->T = 1.0; // One year until expiry
in->dt = in->T/NUM_STEPS;
in->vdt = in->v*sqrt(in->dt);
in->u = exp(in->vdt);
in->d = 1/in->u;
in->p = (exp(in->r*in->dt)-in->d)/(in->u-in->d);
in->last_C = new double [NUM_STEPS+1];
in->last_P = new double [NUM_STEPS+1];
}
//Call Option 認購期權
double CallOption(const double& S,const double& K,const double& vDt,const int& i){
double d = S * exp(vDt * (2.0 * i - NUM_STEPS)) - K;
return max(d,(double)0);
}
//Put Option 認沽期權
double PutOption(const double& S,const double& K,const double& vDt,const int& i){
double d = K-S * exp(vDt * (2.0 * i - NUM_STEPS));
return max(d,(double)0);
}
void last_step_price(option_data* temp_data,int NUM_STEPS){
for(int k = 0; k <=NUM_STEPS; k++){
double sd = temp_data->S * exp(temp_data->vdt * (2.0 * (NUM_STEPS-k) - NUM_STEPS)) ;
temp_data->last_C[k] = max(sd-temp_data->K,(double)0);
temp_data->last_P[k] = max(temp_data->K-sd,(double)0);
}
return;
}
/*
__global__ void trace_back_gpu(double* C_option, double* P_option, double* C_price, double* P_price, double r, double dt, double p, int NUM_DATA, int NUM_STEPS){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ double data_c[2000];
__shared__ double data_p[2000];
for(int i=0; i<(NUM_STEPS+1); i++){
data_c[i] = C_option[x*(NUM_STEPS+1)+i];
data_p[i] = P_option[x*(NUM_STEPS+1)+i];
}
__syncthreads();
if(x<NUM_DATA && y<NUM_STEPS){
for(int t = 0; t < NUM_STEPS; ++t){
data_c[y] = exp(-r*dt) * (p*data_c[y] + (1-p)*data_c[y+1]);
data_p[y] = exp(-r*dt) * (p*data_p[y] + (1-p)*data_p[y+1]);
__syncthreads();
}
}
C_price[x] = data_c[0];
P_price[x] = data_p[0];
return;
}
*/
__global__ void trace_back_gpu(double* C_option, double* P_option, double* C_price, double* P_price, double r, double dt, double p, int NUM_DATA, int NUM_STEPS){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ double data[4000];
for(int i=0; i<(NUM_STEPS+1); i++){
data[i] = C_option[x*(NUM_STEPS+1)+i];
}
for(int i=0; i<(NUM_STEPS+1); i++){
data[i+(NUM_STEPS+1)] = P_option[x*(NUM_STEPS+1)+i];
}
__syncthreads();
if(x<NUM_DATA && y<NUM_STEPS){
for(int t = 0; t < NUM_STEPS; ++t){
data[y] = exp(-r*dt) * (p*data[y] + (1-p)*data[y+1]);
data[y+(NUM_STEPS+1)] = exp(-r*dt) * (p*data[y+(NUM_STEPS+1)] + (1-p)*data[y+1+(NUM_STEPS+1)]);
__syncthreads();
}
}
C_price[x] = data[0];
P_price[x] = data[NUM_STEPS+1];
return;
}
int main(int argc, char **argv){
if(argc!=3){
cout << "Input error!\n";
cout << "./project <NUM_STEPS> <DATA_FILE>\n";
return 1;
}
sscanf(argv[1],"%d",&NUM_STEPS);
ifstream infile(argv[2]);
if(!infile) {
cout << "Can not open input file!\n";
return 1;
}
infile >> NUM_DATA;
cout << "num_data = " << NUM_DATA << endl;
cout << "num_step = " << NUM_STEPS << endl;
size_t size = ((NUM_STEPS+1)*NUM_DATA)*sizeof(double);
vector<option_data*> option_vector;
for(int i = 0; i < NUM_DATA; ++i){
double K, S;
infile >> K >> S;
option_data *temp_data = new option_data();
init(temp_data);
temp_data->K = K;
temp_data->S = S;
last_step_price(temp_data,NUM_STEPS);
option_vector.push_back(temp_data);
}
double** C_matrix = new double*[NUM_DATA];
double** P_matrix = new double*[NUM_DATA];
double* C_array = new double[(NUM_STEPS+1)*NUM_DATA];
double* P_array = new double[(NUM_STEPS+1)*NUM_DATA];
for(int i = 0; i < NUM_DATA; ++i){
C_matrix[i] = option_vector[i]->last_C;
P_matrix[i] = option_vector[i]->last_P;
for(int j = 0; j < NUM_STEPS+1; ++j){
C_array[i*(NUM_STEPS+1)+j] = C_matrix[i][j];
P_array[i*(NUM_STEPS+1)+j] = P_matrix[i][j];
}
}
double r = option_vector[0]->r;
double dt = option_vector[0]->dt;
double p = option_vector[0]->p;
double C_price[NUM_DATA];
double P_price[NUM_DATA];
dim3 dimBlock(1,NUM_STEPS);
dim3 dimGrid(NUM_DATA,1);
cudaMalloc((void**)&C_gpu, size);
cudaMalloc((void**)&P_gpu, size);
cudaMalloc((void**)&C_price_gpu, NUM_DATA*sizeof(double));
cudaMalloc((void**)&P_price_gpu, NUM_DATA*sizeof(double));
cudaMemcpy(C_gpu, C_array, size, cudaMemcpyHostToDevice);
cudaMemcpy(P_gpu, P_array, size, cudaMemcpyHostToDevice);
trace_back_gpu<<<dimGrid,dimBlock,(NUM_STEPS+1)*sizeof(double)>>>(C_gpu, P_gpu, C_price_gpu, P_price_gpu, r, dt, p, NUM_DATA, NUM_STEPS);
cudaMemcpy(C_price, C_price_gpu, NUM_DATA*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(P_price, P_price_gpu, NUM_DATA*sizeof(double), cudaMemcpyDeviceToHost);
//print
for(int i =0;i<NUM_DATA;i++){
option_data* print_price = option_vector[i];
printf("Data %d: Call Price: %.5f\tPut Price: %.5f\n", i, C_price[i], P_price[i]);
}
return 0;
}
|
2,803 | // Copyright 2018-2019 Tsinghua University, Author: Hongyu Xiang
// Apache 2.0.
// This file contains functions for calculating the denominator gradients in log domain.
#include <cstdio>
#include <cstdlib>
#include <vector>
// for each state
// start_weight
// end_weight
// Transition: float weight, int input_label, int state
// alpha_transition_index
// beta_transition_index
#define CHECK_CUDA(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
#define ATOMIC_CONST 32
#define CU_BLOCK_DIM 1024
__host__ __device__
inline float log_plus(float a, float b) {
if (a == -float(INFINITY)) return b;
if (b == -float(INFINITY)) return a;
float m = a > b ? a : b;
return log1pf(expf(-fabs(a - b))) + m;
}
__device__ float atomic_log_plus(float *addr_f, float value) {
int *addr = (int*)addr_f;
float expected = *addr_f;
float sum = log_plus(expected, value);
int old_value = atomicCAS(addr, __float_as_int(expected), __float_as_int(sum));
while (old_value != __float_as_int(expected)) {
expected = __int_as_float(old_value);
sum = log_plus(expected, value);
old_value = atomicCAS(addr, __float_as_int(expected), __float_as_int(sum));
}
return __int_as_float(old_value);
}
struct Transition {
float weight = -float(INFINITY);
int label = 0;
int state = 0;
};
struct IntPair {
int first = 1;
int second = 0;
};
// <<<batch_size, CU_BLOCK_CONST>>>
__global__ void alpha_first_kernel(float *alpha,
const int alpha_size,
const int batch_size,
const int T,
const float * const start_weight) {
int mini_batch_idx = blockIdx.x;
int tid = threadIdx.x;
for (int idx = tid; idx < alpha_size; idx += blockDim.x) {
alpha[mini_batch_idx * alpha_size * (T+1) + idx] = start_weight[idx];
}
}
__global__ void alpha_kernel(float *alpha,
const float* const logits,
const int batch_size,
const int T,
const int t,
const int * const input_lengths,
const int alpha_size,
const int logits_size,
const IntPair * const alpha_transition_index,
const Transition * const alpha_transition) {
int mini_batch_idx = blockIdx.x;
int tid = threadIdx.x;
if (t > input_lengths[mini_batch_idx]) return;
int idx1 = mini_batch_idx * alpha_size * (T+1) + alpha_size * t;
int idx2 = mini_batch_idx * alpha_size * (T+1) + alpha_size * (t-1);
int idx3 = mini_batch_idx * logits_size * T + logits_size * (t-1);
for (int idx = tid; idx < alpha_size; idx += blockDim.x) {
int start = alpha_transition_index[idx].first;
int end = alpha_transition_index[idx].second;
float result = -float(INFINITY);
for (int k = start; k <= end; k++) {
result = log_plus(alpha[idx2+alpha_transition[k].state] +
alpha_transition[k].weight + logits[idx3+alpha_transition[k].label], result);
}
alpha[idx1+idx] = result;
}
}
__global__ void alpha_last_kernel(float *alpha,
const int alpha_size,
const int batch_size,
const int T,
const int * const input_lengths,
const float * const end_weight) {
int mini_batch_idx = blockIdx.x;
int tid = threadIdx.x;
int alpha_start = mini_batch_idx * alpha_size * (T+1);
int cT = input_lengths[mini_batch_idx];
for (int idx = tid; idx < alpha_size; idx += blockDim.x) {
alpha[alpha_start+cT*alpha_size+idx] += end_weight[idx];
}
}
// <<< minibatch, N = 32,64,128...>>>
__global__ void alpha_lld_kernal(const float * const alpha,
const int alpha_size,
const int T,
const int * const input_lengths,
float * loglikelihood) {
int mini_batch_idx = blockIdx.x;
int idx = threadIdx.x;
int block_dim = blockDim.x;
int cT = input_lengths[mini_batch_idx];
int last_idx = alpha_size * (T+1) * mini_batch_idx + cT*alpha_size;
// printf("enter alpha_lld_kernal, block.x: %d, thread.x: %d\n", blockIdx.x, threadIdx.x);
extern __shared__ float sdata[];
float temp = -float(INFINITY);
for (int i = idx; i < alpha_size; i += block_dim) {
temp = log_plus(temp, alpha[last_idx+i]);
}
sdata[idx] = temp;
__syncthreads();
for (int shift = block_dim / 2; shift > warpSize; shift >>= 1) {
if (idx < shift) {
sdata[idx] = log_plus(sdata[idx], sdata[idx+shift]);
}
__syncthreads();
}
if (idx < warpSize) {
for (int shift = warpSize; shift > 0; shift >>= 1) {
sdata[idx] = log_plus(sdata[idx], sdata[idx+shift]);
}
}
__syncthreads();
if (idx == 0) {
loglikelihood[mini_batch_idx] = sdata[0];
// printf("alpha loglikelihod: %f mini_batch %d\n", loglikelihood[mini_batch_idx], mini_batch_idx);
}
}
__global__ void beta_last_kernel(float *beta,
const int beta_size,
const int batch_size,
const int * const input_lengths,
const float * const end_weight) {
int mini_batch_idx = blockIdx.x;
int tid = threadIdx.x;
int cT = input_lengths[mini_batch_idx];
for (int idx = tid; idx < beta_size; idx += blockDim.x) {
beta[mini_batch_idx * 2 * beta_size + (cT % 2) * beta_size + idx] = end_weight[idx];
}
}
__global__ void beta_first_kernel(float *beta,
const int beta_size,
const int batch_size,
const float * const start_weight) {
int mini_batch_idx = blockIdx.x;
int tid = threadIdx.x;
for (int idx = tid; idx < beta_size; idx += blockDim.x) {
beta[mini_batch_idx * 2 * beta_size + idx] += start_weight[idx];
}
}
__global__ void beta_kernel(float *beta,
const float* const alpha,
const float* const logits,
float *grad_storage,
const int batch_size,
const int T,
const int t,
const int *input_lengths,
const int beta_size,
const int logits_size,
const IntPair * const beta_transition_index,
const Transition * const beta_transition) {
int mini_batch_idx = blockIdx.x;
int tid = threadIdx.x;
if (t >= input_lengths[mini_batch_idx]) return;
int idx1 = mini_batch_idx * beta_size * (T+1) + beta_size * t;
int idx2 = mini_batch_idx * beta_size * 2 + beta_size * ((t+1) % 2);
int idx3 = mini_batch_idx * beta_size * 2 + beta_size * (t % 2);
int idx4 = mini_batch_idx * logits_size * T + logits_size * t;
int idx5 = mini_batch_idx * logits_size * ATOMIC_CONST;
for (int idx = tid; idx < beta_size; idx += blockDim.x) {
int start = beta_transition_index[idx].first;
int end = beta_transition_index[idx].second;
float beta_result = -float(INFINITY);
float temp_value = -float(INFINITY);
for (int k = start; k <= end; k++) {
temp_value = beta[idx2+beta_transition[k].state] + beta_transition[k].weight +
logits[idx4+beta_transition[k].label];
beta_result = log_plus(temp_value, beta_result);
float partial_grad = alpha[idx1+idx] + temp_value;
float *grad_position = grad_storage + idx5 + beta_transition[k].label * ATOMIC_CONST + threadIdx.x % ATOMIC_CONST;
atomic_log_plus(grad_position, partial_grad);
}
beta[idx3+idx] = beta_result;
}
}
__global__ void copy_grad(float *grad_storage,
float *grad_net,
const float * const alpha_lld,
const int * const input_lengths,
const int batch_size,
const int logits_size,
const int T,
const int t) {
int mini_batch_idx = blockIdx.x;
int tid = threadIdx.x;
if (t >= input_lengths[mini_batch_idx]) return;
float lld = alpha_lld[mini_batch_idx];
for (int idx = tid; idx < logits_size; idx += blockDim.x) {
float *grad_position = grad_net + mini_batch_idx*logits_size*T + t*logits_size + idx;
int idx_storage = mini_batch_idx*logits_size*ATOMIC_CONST+idx*ATOMIC_CONST;
float grad = -float(INFINITY);
for (int i = 0; i < ATOMIC_CONST; i++) {
grad = log_plus(grad_storage[idx_storage+i], grad);
grad_storage[idx_storage+i] = -float(INFINITY);
}
*grad_position = expf(grad - lld);
}
}
__global__ void beta_lld_kernal(const float * const beta,
const int beta_size,
float * loglikelihood) {
int idx = threadIdx.x;
int first_idx = beta_size * 2 * idx;
loglikelihood[idx] = beta[first_idx];
}
Transition ** TRANSITION_ALPHA = NULL;
Transition ** TRANSITION_BETA = NULL;
IntPair ** TRANSITION_INDEX_ALPHA = NULL;
IntPair ** TRANSITION_INDEX_BETA = NULL;
float ** START_WEIGHT = NULL;
float ** END_WEIGHT = NULL;
int DEN_NUM_ARCS = 0;
int DEN_NUM_STATES = 0;
int *DEVICE_HASH = NULL;
void ReadFst(const char * fst_name,
std::vector<std::vector<int> > &alpha_next,
std::vector<std::vector<int> > &beta_next,
std::vector<std::vector<int> > &alpha_ilabel,
std::vector<std::vector<int> > &beta_ilabel,
std::vector<std::vector<float> > &alpha_weight,
std::vector<std::vector<float> > &beta_weight,
std::vector<float> &start_weight,
std::vector<float> &end_weight,
int &num_states,
int &num_arcs);
extern "C" {
void Init(const char * fst_name, int n_gpus, int * gpus) {
std::vector<std::vector<int> > alpha_next;
std::vector<std::vector<int> > beta_next;
std::vector<std::vector<int> > alpha_ilabel;
std::vector<std::vector<int> > beta_ilabel;
std::vector<std::vector<float> > alpha_weight;
std::vector<std::vector<float> > beta_weight;
std::vector<float> start_weight;
std::vector<float> end_weight;
// const char * fst_name = "test_lm.fst";
int num_states = 0;
int num_arcs = 0;
ReadFst(fst_name, alpha_next, beta_next, alpha_ilabel, beta_ilabel,
alpha_weight, beta_weight, start_weight, end_weight, num_states, num_arcs);
DEN_NUM_ARCS = num_arcs;
DEN_NUM_STATES = num_states;
std::vector<Transition> transition_alpha(num_arcs);
std::vector<Transition> transition_beta(num_arcs);
std::vector<IntPair> transition_index_alpha(num_states);
std::vector<IntPair> transition_index_beta(num_states);
int count = 0;
for (int i = 0; i < num_states; i++) {
if (alpha_next[i].empty()) {
transition_index_alpha[i].first = 1;
transition_index_alpha[i].second = 0;
} else {
transition_index_alpha[i].first = count;
for (int j = 0; j < alpha_next[i].size(); j++) {
transition_alpha[count].state = alpha_next[i][j];
transition_alpha[count].label = alpha_ilabel[i][j];
transition_alpha[count].weight = alpha_weight[i][j];
count++;
}
transition_index_alpha[i].second = count-1;
}
}
if (count != num_arcs) {
fprintf(stderr, "count does not equal to num_arcs\n");
exit(-1);
}
count = 0;
for (int i = 0; i < num_states; i++) {
if (beta_next[i].empty()) {
transition_index_beta[i].first = 1;
transition_index_beta[i].second = 0;
} else {
transition_index_beta[i].first = count;
for (int j = 0; j < beta_next[i].size(); j++) {
transition_beta[count].state = beta_next[i][j];
transition_beta[count].label = beta_ilabel[i][j];
transition_beta[count].weight = beta_weight[i][j];
count++;
}
transition_index_beta[i].second = count-1;
}
}
if (count != num_arcs) {
fprintf(stderr, "count does not equal to num_arcs\n");
exit(-1);
}
int max_gpu = 0;
for (int i = 0; i < n_gpus; i++) {
if (gpus[i] > max_gpu) max_gpu = gpus[i];
}
DEVICE_HASH = new int[max_gpu+1];
memset(DEVICE_HASH, 0, sizeof(int)*(max_gpu+1));
for (int i = 0; i < n_gpus; i++) DEVICE_HASH[gpus[i]] = i;
TRANSITION_ALPHA = new Transition*[n_gpus];
TRANSITION_BETA= new Transition*[n_gpus];
TRANSITION_INDEX_ALPHA = new IntPair*[n_gpus];
TRANSITION_INDEX_BETA= new IntPair*[n_gpus];
START_WEIGHT = new float*[n_gpus];
END_WEIGHT = new float*[n_gpus];
int prev_device = 0;
CHECK_CUDA(cudaGetDevice(&prev_device));
for (int i = 0; i < n_gpus; i++) {
CHECK_CUDA(cudaSetDevice(gpus[i]));
CHECK_CUDA(cudaMalloc((void**)&TRANSITION_ALPHA[i], sizeof(Transition)*num_arcs));
CHECK_CUDA(cudaMalloc((void**)&TRANSITION_BETA[i], sizeof(Transition)*num_arcs));
CHECK_CUDA(cudaMalloc((void**)&TRANSITION_INDEX_ALPHA[i], sizeof(IntPair)*num_states));
CHECK_CUDA(cudaMalloc((void**)&TRANSITION_INDEX_BETA[i], sizeof(IntPair)*num_states));
CHECK_CUDA(cudaMalloc((void**)&START_WEIGHT[i], sizeof(float)*num_states));
CHECK_CUDA(cudaMalloc((void**)&END_WEIGHT[i], sizeof(float)*num_states));
CHECK_CUDA(cudaMemcpy(TRANSITION_ALPHA[i], transition_alpha.data(), sizeof(Transition)*num_arcs, cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMemcpy(TRANSITION_BETA[i], transition_beta.data(), sizeof(Transition)*num_arcs, cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMemcpy(TRANSITION_INDEX_ALPHA[i], transition_index_alpha.data(), sizeof(IntPair)*num_states, cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMemcpy(TRANSITION_INDEX_BETA[i], transition_index_beta.data(), sizeof(IntPair)*num_states, cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMemcpy(START_WEIGHT[i], start_weight.data(), sizeof(float)*num_states, cudaMemcpyHostToDevice));
CHECK_CUDA(cudaMemcpy(END_WEIGHT[i], end_weight.data(), sizeof(float)*num_states, cudaMemcpyHostToDevice));
}
CHECK_CUDA(cudaSetDevice(prev_device));
}
void Release(int n_gpus, int *gpus) {
int prev_device = 0;
CHECK_CUDA(cudaGetDevice(&prev_device));
for (int i = 0; i < n_gpus; i++) {
CHECK_CUDA(cudaSetDevice(gpus[i]));
CHECK_CUDA(cudaFree(TRANSITION_ALPHA[i]));
CHECK_CUDA(cudaFree(TRANSITION_BETA[i]));
CHECK_CUDA(cudaFree(TRANSITION_INDEX_ALPHA[i]));
CHECK_CUDA(cudaFree(TRANSITION_INDEX_BETA[i]));
CHECK_CUDA(cudaFree(START_WEIGHT[i]));
CHECK_CUDA(cudaFree(END_WEIGHT[i]));
}
CHECK_CUDA(cudaSetDevice(prev_device));
delete[] TRANSITION_ALPHA;
delete[] TRANSITION_BETA;
delete[] TRANSITION_INDEX_ALPHA;
delete[] TRANSITION_INDEX_BETA;
delete[] START_WEIGHT;
delete[] END_WEIGHT;
TRANSITION_ALPHA = NULL;
TRANSITION_BETA = NULL;
TRANSITION_INDEX_ALPHA = NULL;
TRANSITION_INDEX_BETA = NULL;
START_WEIGHT = NULL;
END_WEIGHT = NULL;
delete[] DEVICE_HASH;
DEVICE_HASH = NULL;
}
void compute_alpha(float *alpha,
float *logits,
const int batch_size,
int T,
const int alpha_size,
int logits_size,
int *input_lengths,
float * loglikelihood,
cudaStream_t stream) {
int device = 0;
CHECK_CUDA(cudaGetDevice(&device));
int gid = DEVICE_HASH[device];
int alpha_lld_dim = 128;
alpha_first_kernel<<<batch_size, CU_BLOCK_DIM, 0, stream>>>(alpha, alpha_size, batch_size, T, START_WEIGHT[gid]);
for (int t = 1; t <= T; t++) {
alpha_kernel<<<batch_size, CU_BLOCK_DIM, 0, stream>>>(alpha, logits, batch_size, T, t, input_lengths,
alpha_size, logits_size, TRANSITION_INDEX_ALPHA[gid], TRANSITION_ALPHA[gid]);
}
alpha_last_kernel<<<batch_size, CU_BLOCK_DIM, 0, stream>>>(alpha, alpha_size, batch_size, T, input_lengths, END_WEIGHT[gid]);
alpha_lld_kernal<<<batch_size, alpha_lld_dim, sizeof(float)*alpha_lld_dim, stream>>>(alpha, alpha_size, T, input_lengths, loglikelihood);
// cudaDeviceSynchronize();
}
void compute_beta_and_grad(float *beta,
const float * const alpha,
const float * const logits,
const float * const alpha_lld,
float *grad_storage,
float *grad_net,
const int batch_size,
const int T,
const int beta_size,
const int logits_size,
const int * const input_lengths,
float * loglikelihood,
cudaStream_t stream) {
int device = 0;
CHECK_CUDA(cudaGetDevice(&device));
int gid= DEVICE_HASH[device];
// set grad_storage
copy_grad<<<batch_size, CU_BLOCK_DIM, 0, stream>>>(grad_storage, grad_net, alpha_lld, input_lengths, batch_size, logits_size, T, 0);
beta_last_kernel<<<batch_size, CU_BLOCK_DIM, 0, stream>>>(beta, beta_size, batch_size, input_lengths, END_WEIGHT[gid]);
for (int t = T-1; t >= 0; t--) {
beta_kernel<<<batch_size, CU_BLOCK_DIM, 0, stream>>>(beta, alpha, logits, grad_storage, batch_size, T, t, input_lengths, beta_size, logits_size,
TRANSITION_INDEX_BETA[gid], TRANSITION_BETA[gid]);
copy_grad<<<batch_size, CU_BLOCK_DIM, 0, stream>>>(grad_storage, grad_net, alpha_lld, input_lengths, batch_size, logits_size, T, t);
}
beta_first_kernel<<<batch_size, CU_BLOCK_DIM, 0, stream>>>(beta, beta_size, batch_size, START_WEIGHT[gid]);
beta_lld_kernal<<<1, batch_size, 0, stream>>>(beta, beta_size, loglikelihood);
}
}
|
2,804 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <math.h>
// Matrix dimension
int N;
// Cuda variables
int n_blocks = 16;
int n_threads_per_block = 32;
/* Initialize A and B*/
void initialize_inputs(int argc, char** argv, float*& A, float*& B) {
// User requested specific number of threads
if (argc <= 1) {
printf("USAGE: %s <N> <blocks> <threads per block> <seed>\n", argv[0]);
exit(1);
}
// Set dimension
if (argc > 1)
N = atoi(argv[1]);
// Set number of blocks
if (argc > 2) {
n_blocks = atoi(argv[2]);
}
// Set threads per block
if (argc > 3) {
n_threads_per_block = atoi(argv[3]);
}
// Set seed
if (argc > 4)
srand(atoi(argv[4]));
else
srand((unsigned)time(NULL));
// Allocate space for the matricies
A = (float*) malloc(N * N * sizeof(float));
B = (float*) malloc(N * N * sizeof(float));
// Initialize the matrix with random values
for (int row = 0; row < N; row++)
for (int col = 0; col < N; col++) {
A[row * N + col] = (float)rand() / 32768.0;
B[row * N + col] = 0.0;
}
}
/// Print a matrix's content for debugging
void print_matrix(float* m) {
printf("[");
for (int row = 0; row < N; row++) {
printf("\n\t");
for (int col = 0; col < N; col++)
printf("%10.5f\t", m[row * N + col]);
}
printf("]\n");
}
/* Kernel function */
__global__ void matrixNorm(float* A, float* B, int N) {
// Calculate column number
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Maybe some wasted threads
if (col < N) {
int row; // Row index for loops
float mu, sigma; // Mean and Standard Deviation
// Calculate mean for column
mu = 0.0;
for (row = 0; row < N; ++row)
mu += A[row * N + col];
mu /= N;
__syncthreads();
// Calculate standard deviation for the column
sigma = 0.0;
for (row = 0; row < N; ++row)
sigma += powf(A[col * N + row] - mu, 2.0);
sigma /= N;
sigma = sqrt(sigma);
__syncthreads();
// Normalize column
for (row = 0; row < N; ++row)
if (sigma == 0.0)
B[row * N + col] = 0.0;
else
B[row * N + col] = (A[col * N + row] - mu) / sigma;
}
}
int main(int argc, char** argv) {
/* Timing variables */
struct timeval start, stop; /* Elapsed times using gettimeofday() */
struct timeval norm_start, norm_stop; // time values for the normalization algorithm
struct timezone tzdummy;
unsigned long long runtime;
/* Initialize A and B */
float* A, * B;
initialize_inputs(argc, argv, A, B);
/* Start Clock */
gettimeofday(&start, &tzdummy);
// Create buffers on device
float* gpu_A, * gpu_B;
cudaMalloc((void**) &gpu_A, N * N * sizeof(float));
cudaMalloc((void**) &gpu_B, N * N * sizeof(float));
// Send problem to device
cudaMemcpy((void*) gpu_A, A, N * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy((void*) gpu_B, B, N * N * sizeof(float), cudaMemcpyHostToDevice);
/* Matrix Normalization */
gettimeofday(&norm_start, &tzdummy);
matrixNorm<<<n_blocks, n_threads_per_block>>>(gpu_A, gpu_B, N);
gettimeofday(&norm_stop, &tzdummy);
// Pull result from the device
cudaMemcpy((void*) B, gpu_B, N * N * sizeof(float), cudaMemcpyDeviceToHost);
/* Calculate runtimes */
gettimeofday(&stop, &tzdummy);
runtime = (unsigned long long)(stop.tv_sec - start.tv_sec) * 1000000 + (stop.tv_usec - start.tv_usec);
unsigned long long norm_time = (unsigned long long)
(norm_stop.tv_sec - norm_start.tv_sec) * 1000000 + (norm_stop.tv_usec - norm_start.tv_usec);
/* Display timing results */
printf("Runtime = %g ms.\n", (float)runtime/(float)1000);
printf("Normalization time = %g ms.\n", (float)norm_time/(float)1000);
// Debug for small N
if (N <= 20) {
print_matrix(A);
print_matrix(B);
}
// Cleanup and exit
free(A);
free(B);
cudaFree(gpu_A);
cudaFree(gpu_B);
return 0;
}
|
2,805 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <pthread.h>
#include <time.h>
#include <sys/time.h>
/*Dados HOST*/
struct data{
double c_x_min;
double c_x_max;
double c_y_min;
double c_y_max;
double pixel_width;
double pixel_height;
int i_x_max;
int i_y_max;
int n_blocks;
int n_threads;
};
struct data * global_data;
int colors[17][3] = {
{66, 30, 15},
{25, 7, 26},
{9, 1, 47},
{4, 4, 73},
{0, 7, 100},
{12, 44, 138},
{24, 82, 177},
{57, 125, 209},
{134, 181, 229},
{211, 236, 248},
{241, 233, 191},
{248, 201, 95},
{255, 170, 0},
{204, 128, 0},
{153, 87, 0},
{106, 52, 3},
{16, 16, 16},
};
int gradient_size = 16;
unsigned char **image_buffer;
int image_size;
int image_buffer_size;
//////////////////////////////////////////
/*Timer info*/
struct timer_info {
clock_t c_start;
clock_t c_end;
struct timespec t_start;
struct timespec t_end;
struct timeval v_start;
struct timeval v_end;
};
struct timer_info timer;
//////////////////////////////////////////
void allocate_image_buffer(){
int rgb_size = 3;
image_buffer = (unsigned char **) malloc(sizeof(unsigned char *) * image_buffer_size);
for(int i = 0; i < image_buffer_size; i++){
image_buffer[i] = (unsigned char *) malloc(sizeof(unsigned char) * rgb_size);
};
};
void init(int argc, char *argv[]){
global_data = (struct data * )malloc(sizeof(struct data));
if(argc < 8){
printf("usage: ./mandelbrot_cuda c_x_min c_x_max c_y_min c_y_max image_size blocks threads\n");
printf("examples with image_size = 11500:\n");
printf(" Full Picture: ./mandelbrot_cuda -2.5 1.5 -2.0 2.0 11500 4 256\n");
printf(" Seahorse Valley: ./mandelbrot_cuda -0.8 -0.7 0.05 0.15 11500 4 256\n");
printf(" Elephant Valley: ./mandelbrot_cuda 0.175 0.375 -0.1 0.1 11500 4 256\n");
printf(" Triple Spiral Valley: ./mandelbrot_cuda -0.188 -0.012 0.554 0.754 11500 4 256\n");
exit(0);
}
else{
sscanf(argv[1], "%lf", &global_data->c_x_min);
sscanf(argv[2], "%lf", &global_data->c_x_max);
sscanf(argv[3], "%lf", &global_data->c_y_min);
sscanf(argv[4], "%lf", &global_data->c_y_max);
sscanf(argv[5], "%d", &image_size);
sscanf(argv[6], "%d", &global_data->n_blocks);
sscanf(argv[6], "%d", &global_data->n_threads);
global_data->i_x_max = image_size;
global_data->i_y_max = image_size;
image_buffer_size = image_size * image_size;
global_data->pixel_width = (global_data->c_x_max - global_data->c_x_min) / global_data->i_x_max;
global_data->pixel_height = (global_data->c_y_max - global_data->c_y_min) / global_data->i_y_max;
};
};
void update_rgb_buffer(int iteration, int x, int y){
int color;
int i_y_max = global_data->i_y_max;
int iteration_max = 200;
if(iteration == iteration_max){
image_buffer[(i_y_max * y) + x][0] = colors[gradient_size][0];
image_buffer[(i_y_max * y) + x][1] = colors[gradient_size][1];
image_buffer[(i_y_max * y) + x][2] = colors[gradient_size][2];
}
else{
color = iteration % gradient_size;
image_buffer[(i_y_max * y) + x][0] = colors[color][0];
image_buffer[(i_y_max * y) + x][1] = colors[color][1];
image_buffer[(i_y_max * y) + x][2] = colors[color][2];
};
};
void write_to_file(){
FILE * file;
char * filename = "Output/cuda.ppm";
char * comment = "# ";
int max_color_component_value = 255;
file = fopen(filename,"wb");
fprintf(file, "P6\n %s\n %d\n %d\n %d\n", comment,
global_data->i_x_max, global_data->i_y_max, max_color_component_value);
for(int i = 0; i < image_buffer_size; i++){
fwrite(image_buffer[i], 1 , 3, file);
};
fclose(file);
};
__global__
void compute_mandelbrot_thread(struct data * gpu_data, int * out){
int i_y_max = gpu_data->i_y_max;
int n_threads = gpu_data->n_threads * gpu_data->n_blocks; //Numero de threads total
int ind = blockIdx.x * blockDim.x + threadIdx.x;
int chunkSize = i_y_max / n_threads;
int leftover = i_y_max % n_threads;
int begin = ind * chunkSize + ((ind != 0) ? leftover : 0);
int end = begin + chunkSize + (ind == 0 ? leftover : 0);
double z_x;
double z_y;
double z_x_squared;
double z_y_squared;
double escape_radius_squared = 4;
int iteration;
int i_x;
int i_y;
double c_x;
double c_y;
double c_y_min = gpu_data->c_y_min;
double c_x_min = gpu_data->c_x_min;
int i_x_max = gpu_data->i_x_max;
int iteration_max = 200;
double pixel_height = gpu_data->pixel_height;
double pixel_width = gpu_data->pixel_width;
for(i_y = begin; i_y < end; i_y++){
c_y = c_y_min + i_y * pixel_height;
if(fabs(c_y) < pixel_height / 2){
c_y = 0.0;
};
for(i_x = 0; i_x < i_x_max; i_x++){
c_x = c_x_min + i_x * pixel_width;
z_x = 0.0;
z_y = 0.0;
z_x_squared = 0.0;
z_y_squared = 0.0;
for(iteration = 0;
iteration < iteration_max && \
((z_x_squared + z_y_squared) < escape_radius_squared);
iteration++){
z_y = 2 * z_x * z_y + c_y;
z_x = z_x_squared - z_y_squared + c_x;
z_x_squared = z_x * z_x;
z_y_squared = z_y * z_y;
};
out[ (i_x)*i_y_max + i_y ] = iteration;
};
};
};
int main(int argc, char *argv[]){
int * d_out;
int * out;
struct data * gpu_data;
init(argc, argv);
allocate_image_buffer();
cudaSetDevice(0);
cudaMalloc((void **)&d_out, global_data->i_x_max * global_data->i_y_max * sizeof(int));
cudaMalloc((void **)&gpu_data, sizeof(struct data));
cudaMemcpy(gpu_data, global_data, sizeof(struct data), cudaMemcpyHostToDevice);
/*Lançamento da gpu*/
int n_threads = global_data->n_threads;
int n_blocks = global_data->n_blocks;
clock_gettime(CLOCK_MONOTONIC, &timer.t_start);
compute_mandelbrot_thread<<<n_blocks, n_threads>>>(gpu_data, d_out);
cudaDeviceSynchronize();
/*Copia dados para cpu*/
out = (int *) malloc(global_data->i_x_max * global_data->i_y_max * sizeof(int));
cudaMemcpy(out, d_out, global_data->i_x_max * global_data->i_y_max * sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i<global_data->i_x_max * global_data->i_y_max; i++){
int linha = i/global_data->i_y_max;
int coluna = i%global_data->i_y_max;
int iteration = out[i];
update_rgb_buffer(iteration, linha, coluna);
}
clock_gettime(CLOCK_MONOTONIC, &timer.t_end);
printf("%f\n",
(double) (timer.t_end.tv_sec - timer.t_start.tv_sec) +
(double) (timer.t_end.tv_nsec - timer.t_start.tv_nsec) / 1000000000.0);
write_to_file();
cudaFree(d_out);
cudaFree(gpu_data);
return 0;
};
|
2,806 | #include <stdio.h>
#define BLOCK_SIZE_X 128
__global__
void warmUp(float* out, float* in, int count) {
float* local_array = in + (blockIdx.x * blockDim.x);
if (threadIdx.x == 0) { out[blockIdx.x] = local_array[0]; }
}
__global__
void sumUnrollGlobal(float* out, float* in, int count) {
if ((blockIdx.x * blockDim.x) + threadIdx.x > count) return;
float* local_array = in + (blockIdx.x * blockDim.x);
if (blockDim.x >= 1024 && threadIdx.x < 512) { local_array[threadIdx.x] += local_array[threadIdx.x + 512]; }
__syncthreads();
if (blockDim.x >= 512 && threadIdx.x < 256) { local_array[threadIdx.x] += local_array[threadIdx.x + 256]; }
__syncthreads();
if (blockDim.x >= 256 && threadIdx.x < 128) { local_array[threadIdx.x] += local_array[threadIdx.x + 128]; }
__syncthreads();
if (blockDim.x >= 128 && threadIdx.x < 64) { local_array[threadIdx.x] += local_array[threadIdx.x + 64]; }
__syncthreads();
if (threadIdx.x < 32) {
volatile float* v_local_array = local_array;
v_local_array[threadIdx.x] += v_local_array[threadIdx.x + 32];
v_local_array[threadIdx.x] += v_local_array[threadIdx.x + 16];
v_local_array[threadIdx.x] += v_local_array[threadIdx.x + 8];
v_local_array[threadIdx.x] += v_local_array[threadIdx.x + 4];
v_local_array[threadIdx.x] += v_local_array[threadIdx.x + 2];
v_local_array[threadIdx.x] += v_local_array[threadIdx.x + 1];
}
if (threadIdx.x == 0) { out[blockIdx.x] = local_array[0]; }
}
__global__
void sumUnrollShared(float* out, float* in, int count) {
if ((blockIdx.x * blockDim.x) + threadIdx.x > count) return;
float* local_array = in + (blockIdx.x * blockDim.x);
__shared__ float shared_array[BLOCK_SIZE_X];
shared_array[threadIdx.x] = local_array[threadIdx.x];
__syncthreads();
if (blockDim.x >= 1024 && threadIdx.x < 512) { shared_array[threadIdx.x] += shared_array[threadIdx.x + 512]; }
__syncthreads();
if (blockDim.x >= 512 && threadIdx.x < 256) { shared_array[threadIdx.x] += shared_array[threadIdx.x + 256]; }
__syncthreads();
if (blockDim.x >= 256 && threadIdx.x < 128) { shared_array[threadIdx.x] += shared_array[threadIdx.x + 128]; }
__syncthreads();
if (blockDim.x >= 128 && threadIdx.x < 64) { shared_array[threadIdx.x] += shared_array[threadIdx.x + 64]; }
__syncthreads();
if (threadIdx.x < 32) {
volatile float* v_local_array = shared_array;
v_local_array[threadIdx.x] += v_local_array[threadIdx.x + 32];
v_local_array[threadIdx.x] += v_local_array[threadIdx.x + 16];
v_local_array[threadIdx.x] += v_local_array[threadIdx.x + 8];
v_local_array[threadIdx.x] += v_local_array[threadIdx.x + 4];
v_local_array[threadIdx.x] += v_local_array[threadIdx.x + 2];
v_local_array[threadIdx.x] += v_local_array[threadIdx.x + 1];
}
if (threadIdx.x == 0) { out[blockIdx.x] = shared_array[0]; }
}
int main(void) {
int count = 1 << 16;
dim3 block(BLOCK_SIZE_X);
dim3 grid((count + block.x - 1) / block.x);
float* host_array = (float*)malloc(count*sizeof(float));
for (int x = 0; x < count; x++) { host_array[x] = x; }
float* host_result_array = (float*)malloc(grid.x*sizeof(float));
float *device_array, *device_result_array;
cudaMalloc((float**)&device_array, count*sizeof(float));
cudaMemcpy(device_array, host_array, count*sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((float**)&device_result_array, grid.x*sizeof(float));
warmUp<<<grid,block>>>(device_result_array, device_array, count);
sumUnrollGlobal<<<grid,block>>>(device_result_array, device_array, count);
cudaDeviceSynchronize();
cudaMemcpy(host_result_array, device_result_array, grid.x*sizeof(float), cudaMemcpyDeviceToHost);
sumUnrollShared<<<grid,block>>>(device_result_array, device_array, count);
cudaDeviceSynchronize();
cudaMemcpy(host_result_array, device_result_array, grid.x*sizeof(float), cudaMemcpyDeviceToHost);
free(host_array);
free(host_result_array);
cudaFree(device_array);
cudaFree(device_result_array);
cudaDeviceReset();
return 0;
} |
2,807 | #include <stdio.h>
#include <cuda_runtime_api.h>
#include <time.h>
__device__ char* is_a_match(char * attempt) {
char password1[] = "OKNXRT3171";
char * newPassword = (char *) malloc(sizeof(char) * 11);
newPassword[0] = password1[0] - 2;
newPassword[1] = password1[0] + 2;
newPassword[2] = password1[0] - 1;
newPassword[3] = password1[1] - 3;
newPassword[4] = password1[1] + 3;
newPassword[5] = password1[1] + 1;
newPassword[6] = password1[2] - 2;
newPassword[7] = password1[2] + 2;
newPassword[8] = password1[3] - 4;
newPassword[9] = password1[3] + 4;
newPassword[10] = '\0';
printf("------");
//
for(int i =0; i<10; i++){
printf("%s\n", newPassword[i]);
if(i >= 0 && i < 6){ //checking all lower case letter limits
printf("%s", newPassword[i]);
if(newPassword[i] > 122){
newPassword[i] = (newPassword[i] - 122) + 97;
}else if(newPassword[i] < 97){
newPassword[i] = (97 - newPassword[i]) + 97;
}
}else{ //checking number section
if(newPassword[i] > 57){
newPassword[i] = (newPassword[i] - 57) + 48;
}else if(newPassword[i] < 48){
newPassword[i] = (48 - newPassword[i]) + 48;
}
}
}
// char * a = attempt;
// char * pass1 = password1;
// while ( * a == * pass1) {
// if ( * a == '\0') {
// printf("password:%s\n", password1);
// break;
// }
// a++;
// pass1++;
// }
// return 0;
return newPassword;
}
__global__ void kernel() {
char i1, i2;
char password[7];
password[6] = '\0';
password[0] = blockIdx.x + 65;
password[1] = threadIdx.x + 65;
for (i1 = '0'; i1 <= '9'; i1++) {
for (i2 = '0'; i2 <= '9'; i2++) {
password[2] = i1;
password[3] = i2;
// if (is_a_match(password)) {
// printf("%s \n Encrypted\n", is_a_match(password));
// } else {
// //printf("tried: %s\n",password);
// }
is_a_match(password);
}
}
}
int time_difference(struct timespec * start, struct timespec * finish, long long int * difference) {
long long int ds = finish -> tv_sec - start -> tv_sec;
long long int dn = finish -> tv_nsec - start -> tv_nsec;
if (dn < 0) {
ds--;
dn += 1000000000;
}
* difference = ds * 1000000000 + dn;
return !( * difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, & start);
kernel <<< 26, 26 >>> ();
cudaThreadSynchronize();
clock_gettime(CLOCK_MONOTONIC, & finish);
time_difference( & start, & finish, & time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed / 1.0e9));
return 0;
} |
2,808 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define SIZE 8192
#define BLOCKSIZE 32
#define wbCheck(stmt) do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
printf("Failed to run stmt %s", #stmt); \
return -1; \
} \
} while(0)
// Compute P = N * M
__global__ void matrixMultiply(double * N, double * M, double * P, int size) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if ((row < size) && (col < size)) {
double sum = 0;
for(int k = 0; k < size; k++) {
sum += N[row * size + k] * M[k * size + col];
}
P[row * size + col] = sum;
}
}
__global__ void matrixMultiplyShared(double * A, double * B, double * C,
int size) {
//@@ Insert code to implement matrix multiplication here
//@@ You have to use shared memory for this MP
__shared__ double s_A[BLOCKSIZE][BLOCKSIZE];
__shared__ double s_B[BLOCKSIZE][BLOCKSIZE];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * BLOCKSIZE + ty;
int col = bx * BLOCKSIZE + tx;
double Cvalue = 0;
for(int m = 0; m < size / BLOCKSIZE; m++) {
s_A[ty][tx] = A[row * size + m * BLOCKSIZE + tx];
s_B[ty][tx] = B[(m * BLOCKSIZE + ty) * size + col];
__syncthreads();
for(int k = 0; k < BLOCKSIZE; k++)
Cvalue += s_A[ty][k] * s_B[k][tx];
__syncthreads();
}
C[row * size + col] = Cvalue;
}
int main() {
struct timeval start, end;
double *h_N = (double *) malloc(SIZE * SIZE * sizeof(double));
double *h_M = (double *) malloc(SIZE * SIZE * sizeof(double));
double *h_P = (double *) malloc(SIZE * SIZE * sizeof(double));
long i;
for(i = 0; i < SIZE * SIZE; i++) {
h_N[i] = 2.0;
h_M[i] = 2.0;
h_P[i] = 0.0;
}
double *d_N;
double *d_M;
double *d_P;
wbCheck(cudaMalloc((void **) &d_N, SIZE * SIZE * sizeof(double)));
wbCheck(cudaMalloc((void **) &d_M, SIZE * SIZE * sizeof(double)));
wbCheck(cudaMalloc((void **) &d_P, SIZE * SIZE * sizeof(double)));
wbCheck(cudaMemcpy(d_N, h_N, SIZE * SIZE * sizeof(double), cudaMemcpyHostToDevice));
wbCheck(cudaMemcpy(d_M, h_M, SIZE * SIZE * sizeof(double), cudaMemcpyHostToDevice));
dim3 dimGrid(SIZE / BLOCKSIZE, SIZE / BLOCKSIZE,1);
dim3 dimBlock(BLOCKSIZE, BLOCKSIZE, 1);
gettimeofday(&start, NULL);
//matrixMultiply<<<dimGrid,dimBlock>>>(d_N, d_M, d_P, SIZE);
matrixMultiplyShared<<<dimGrid,dimBlock>>>(d_N, d_M, d_P, SIZE);
cudaError_t cudaResult = cudaGetLastError();
if (cudaResult != cudaSuccess) {
printf("Failed to call CUDA's kernel function: %s\n",
cudaGetErrorString(cudaResult));
return 1;
}
cudaThreadSynchronize();
gettimeofday(&end, NULL);
wbCheck(cudaMemcpy(h_P, d_P, SIZE * SIZE * sizeof(double), cudaMemcpyDeviceToHost));
// time calculation
if(end.tv_sec < start.tv_sec) {
printf("You are very unlucky, please, run me again\n");
return 1;
}
double usec_diff = (end.tv_sec - start.tv_sec) +
(double)(end.tv_usec - start.tv_usec) / 1000 / 1000;
double time_spent = (double)(usec_diff);
printf("Multiplication finished, wallclock: %f sec, %f\n", time_spent, h_P[0]);
return 0;
}
|
2,809 | /************************************************
FILENAME: example_paddedpencil.cu
AUTHOR: Anuva K
DESCRIPTION: Test code to perform 3d FFTs on CUDA
according to the proposed pruned framework. FFTs of a small
non-zero subvolume of a larger volume of zeros are to be
computed pencil by pencil without storing the large 3d array
This script tests X dimension FFT of k x k x k signal to N x k x k
*/
#include <cufft.h>
#include <cuComplex.h>
#include <stdio.h>
#define N_SIGS 1
#define IN_SIG_LEN 8
#define OUT_SIG_LEN 8
int main(){
cuFloatComplex *h_signal, *d_signal, *h_result, *d_result;
h_signal = (cuFloatComplex *)malloc(N_SIGS*IN_SIG_LEN*sizeof(cuFloatComplex));
h_result = (cuFloatComplex *)malloc(N_SIGS*OUT_SIG_LEN*sizeof(cuFloatComplex));
for (int i = 0; i < N_SIGS; i ++)
for (int j = 0; j < IN_SIG_LEN/2; j++) // to include padding
h_signal[(i*IN_SIG_LEN) + j] = make_cuFloatComplex(100*sin((i+1)*6.283*j/IN_SIG_LEN), 0); //this is how to put data into cuFloatComplex type variable
cudaMalloc(&d_signal, N_SIGS*IN_SIG_LEN*sizeof(cuFloatComplex));
cudaMalloc(&d_result, N_SIGS*OUT_SIG_LEN*sizeof(cuFloatComplex));
cudaMemcpy(d_signal, h_signal, N_SIGS*IN_SIG_LEN*sizeof(cuFloatComplex), cudaMemcpyHostToDevice);
cufftHandle plan;
int n[1] = {IN_SIG_LEN};
cufftResult res = cufftPlanMany(&plan, 1, n,
NULL, 1, IN_SIG_LEN, //advanced data layout, NULL shuts it off. idist=IN_SIG_LEN
NULL, 1, OUT_SIG_LEN, //advanced data layout, NULL shuts it off. odist= OUT_SIG_LEN
CUFFT_C2C, N_SIGS);
if (res != CUFFT_SUCCESS) {printf("plan create fail\n"); return 1;}
res = cufftExecC2C(plan, d_signal, d_result, CUFFT_FORWARD);
if (res != CUFFT_SUCCESS) {printf("forward transform fail\n"); return 1;}
cudaMemcpy(h_result, d_result, N_SIGS*OUT_SIG_LEN*sizeof(cuFloatComplex), cudaMemcpyDeviceToHost);
for (int i = 0; i < N_SIGS; i++){
for (int j = 0; j < IN_SIG_LEN; j++)
printf("%.3f ", cuCrealf(h_signal[(i*IN_SIG_LEN)+j]));
printf("\n"); }
printf("result:\n");
for (int i = 0; i < N_SIGS; i++){
for (int j = 0; j < OUT_SIG_LEN; j++)
printf("%.3f ", cuCrealf(h_result[(i*OUT_SIG_LEN)+j]));
printf("\n"); }
return 0;
}
|
2,810 | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/time.h>
#include <cuda_runtime.h>
__global__ void Convolution(double* A, double* B, int I, int J)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
double c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +0.2; c21 = +0.5; c31 = -0.8;
c12 = -0.3; c22 = +0.6; c32 = -0.9;
c13 = +0.4; c23 = +0.7; c33 = +0.1;
if (i>J && i<I*J-J && (i%J!=0) && ((i+1)%J!=0)) {
B[i] = c11 * A[i-J-1] + c12 * A[i-1] + c13 * A[i+J-1]
+ c21 * A[i-J] + c22 * A[i] + c23 * A[i+J]
+ c31 * A[i-J+1] + c32 * A[i+1] + c33 * A[i+J+1];
}
}
void init(double* A, int I, int J)
{
int i, j;
for (i = 0; i < I; ++i) {
for (j = 0; j < J; ++j) {
A[i*J + j] = (double)rand()/RAND_MAX;
}
}
}
int main(int argc, char *argv[])
{
FILE *output1;
double *A;
double *B;
cudaEvent_t start, stop;
float elapsedTime;
output1 = fopen("convgpu.out", "w");
int I = atoi(argv[1]), J = atoi(argv[2]);
int size = I*J*sizeof(double);
A = (double*)malloc(size);
B = (double*)malloc(size);
cudaError_t err = cudaSuccess;
double *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
double *d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//initialize the arrays
init(A, I, J);
//host to Device
err = cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int threadsPerBlock=128;
int blocksPerGrid;
if (I*J%threadsPerBlock != 0){
blocksPerGrid = I*J/threadsPerBlock+1;
}else {
blocksPerGrid=I*J/threadsPerBlock;
}
printf("blocksPerGrid: %d\n", blocksPerGrid);
printf("threadsPerBlock: %d\n", threadsPerBlock);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
Convolution<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, I, J);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
err = cudaMemcpy(B, d_B, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "error code %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
for (int i = 0; i < I; i++) {
for (int j = 0; j < J; j++) {
fprintf(output1, "%f ", B[i*J+j]);
}
fprintf(output1, "\n");
}
printf("Elapsed time : %f s\n" ,elapsedTime/1000);
free(A);
free(B);
cudaFree(d_A);
cudaFree(d_B);
fclose(output1);
return 0;
}
|
2,811 | // https://stackoverflow.com/questions/57187912/how-to-differentiate-gpu-threads-in-a-single-gpu-for-different-host-cpu-thread
// nvcc cuda_std_thread.cu -o cuda_std_thread -std=c++11
#include <iostream>
#include <math.h>
#include <thread>
#include <vector>
#include <cuda.h>
using namespace std;
const unsigned NUM_THREADS = std::thread::hardware_concurrency(); //.. no. of core in the machine
// Kernel function to add the elements of two arrays
__global__
void add_2(int n, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < n) {
y[i] = x[i] + y[i];
}
}
//
void thread_func(int N, float *x, float *y, int idx_thread)
{
cudaSetDevice(0); //.. set to current GPU. Since I have only one, this is set to 0. Can skip this line since by default, it is set to 0
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the
// maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
cudaOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize, add_2, 0, N);
// sample size for each host thread
int workSize = (N + NUM_THREADS - 1)/NUM_THREADS;
// Round up according to array size
gridSize = (workSize + blockSize - 1)/blockSize;
cout<<"blockSize: "<<blockSize<<" minGridSize: "<<minGridSize<<" gridSize: "<<gridSize<<endl;
// Run kernel on 1M elements on the GPU
add_2<<<gridSize, blockSize>>>(workSize, x+idx_thread*workSize, y+idx_thread*workSize);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
}
//
int main()
{
int N = 1<<30;
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
//.. begin multithreading ..
vector<std::thread> t;
for(int i = 0; i<NUM_THREADS; i++)
t.push_back(thread(thread_func, N, x, y, i));
for(int i = 0; i<NUM_THREADS; i++)
t[i].join();
// Check for errors (all values should be 3.0f)
//float maxError = 0.0f;
//for (int i = 0; i < N; i++) {
// if(!(i%100000000))
// std::cout<<i<<" "<<y[i]<<std::endl;
// maxError = fmax(maxError, fabs(y[i]-3.0f));
//}
//std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
2,812 | #include<iostream>
#include<cuda.h>
// Device code
__global__ void VecAdd(float* A, float* B, float* C, int N){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < N){
C[i] = A[i] + B[i];
}
}
// Host code
int main(){
int N = 10;
size_t size = N*sizeof(float);
// Allocate memory for the host
// float* h_A = (float*)malloc(size);
// float* h_B = (float*)malloc(size);
// float* h_C = (float*)malloc(size);
// Another way of writing - Pinned memory
float *h_A, *h_B, *h_C;
cudaMallocHost(&h_A, size);
cudaMallocHost(&h_B, size);
cudaMallocHost(&h_C, size);
// Initialize the input vectors
for(auto i = 0; i < N; i++){
h_A[i] = i;
h_B[i] = 2*i;
h_C[i] = 0;
}
// Allocate memory for the device
float* d_A;
cudaMalloc(&d_A, size);
float* d_B;
cudaMalloc(&d_B, size);
float* d_C;
cudaMalloc(&d_C, size);
// Copy contents of host to device
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_C, h_C, size, cudaMemcpyHostToDevice);
// Invoke the kernel to do the computation
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock -1)/threadsPerBlock;
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
// Copy results from device to host
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
// Free the memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// Print values
for(auto i = 0; i < N; i++){
std::cout << h_C[i] << std::endl;
}
// Free Pinned memory
cudaFree(h_A);
cudaFree(h_B);
cudaFree(h_C);
return 0;
}
|
2,813 | #include "vector.cuh"
|
2,814 | # include <stdio.h>
# include <math.h>
__global__ void Add( int n, float *A, float *B, float *C, float S1, float S2, int steps) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < n){
for (int i = 0; i < steps; i++){
C[steps*idx+i]=(powf(S1,3.0)- 3*B[steps*idx+i])/(A[steps*idx+i] + S2); //powf(S1,3.0)
}
}
}
int main ( int argc, char * argv [] ) {
if (argc != 4){
printf("To few args\n");
return 1;
}
int n = atoi(argv[1]);
printf("n = %d\n", n);
int steps;
float *hA, *hB, *hC;
// float hS1, hS2;
float *devA, *devB, *devC;
float devS1, devS2, elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
elapsedTime = 0.0f;
cudaEventRecord(start, 0);
hA = (float *) malloc(n*sizeof(float));
hB = (float *) malloc(n*sizeof(float));
hC = (float *) malloc(n*sizeof(float));
cudaMalloc ( &devA, n*sizeof(float));
cudaMalloc ( &devB, n*sizeof(float));
cudaMalloc ( &devC, n*sizeof(float));
// cudaMalloc (&devS1, sizeof(float));
// cudaMalloc (&devS2, sizeof(float));
for (int i = 0; i < n; i++) {
hA[i] = i;
hB[i] = n-i;
}
devS1 = 15.4;
devS2 = 4.4;
// blocks=4; blocksize=64;
dim3 block(atoi(argv[3]));
dim3 grid(atoi(argv[2]));
steps=(int)n/(atoi(argv[3])*atoi(argv[2]));
// dim3 block(512);
// dim3 grid((n-1)/512 + 1);
printf("block = %d, grid = %d, threads = %d, steps = %d\n", block.x, grid.x, block.x*grid.x, steps);
cudaMemcpy ( devA, hA, n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy ( devB, hB, n*sizeof(float), cudaMemcpyHostToDevice);
Add<<<grid, block>>> ( n, devA, devB, devC, devS1, devS2, steps);
cudaMemcpy ( hC, devC, n*sizeof(float), cudaMemcpyDeviceToHost );
for (int i = n-10; i < n; i++) printf("devC[%d] = %.3f\n", i, hC[i]);
cudaFree (devC);
cudaFree (devA);
cudaFree (devB);
free(hA);
free(hB);
free(hC);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("\n\ntime = %.5f millisec\n", elapsedTime);
return 0;
}
// Yi =(S1^m - 3Bi)/(Ai + S2) |
2,815 | //Submitted by GAutham M 15co118 and yashwanth 15co154
#include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include <time.h>
__global__ void func(float *da_in,float *db_in,float *d_out)
{
int idx = blockIdx.x*100 + threadIdx.x;
d_out[idx] = da_in[idx] + db_in[idx];
}
int main()
{
const int array_size = 16000;
const int array_bytes = array_size* sizeof(float);
float a_in[array_size],b_in[array_size];
for(int i=0;i<array_size;i++)
{
a_in[i] = float(i);
b_in[i]=float(i);
}
/*for(int i=0;i<array_size;i++)
{
b_in[i]=rand()%16000;
}*/
float h_out[array_size];
float *da_in;
float *db_in;
float *d_out;
int temp=array_size;
int array_bytes1=array_bytes;
time_t t,t1;
srand((unsigned) time(&t));
t1=clock();
while(temp>1)
{
//printf("abc");
if((temp)%2==1)
{
a_in[temp]=0;
//printf("con fail\n");
temp++;
array_bytes1+=8;
}
temp=temp/2;
array_bytes1/=2;
cudaMalloc((void **)&da_in,array_bytes1);
cudaMalloc((void **)&db_in,array_bytes1);
cudaMalloc((void **)&d_out,array_bytes1);
cudaMemcpy(da_in,a_in,array_bytes1,cudaMemcpyHostToDevice);
cudaMemcpy(db_in,a_in+(temp),array_bytes1,cudaMemcpyHostToDevice);
//kernel
func<<<dim3(160,1,1),dim3(100,1,1)>>>(da_in,db_in,d_out);
//copying back
cudaMemcpy(h_out,d_out,array_bytes1,cudaMemcpyDeviceToHost);
for(int i=0;i<temp;i++)
{
// a_in[i]=h_out[i];
// printf("%d=%f",i+1,h_out[i]);
// printf(((i%4)!=3)? "\t":"\n");
a_in[i]=h_out[i];
}
cudaFree(da_in);
cudaFree(d_out);
cudaFree(db_in);
//printf("\n");
}
t1=clock()-t1;
double time_taken = ((double)t1)/CLOCKS_PER_SEC;
printf("parallel execution gave answer as%f- time taken as %f\n",a_in[0],time_taken);
} |
2,816 | /* Computes quadrature rules (i.e. circumference) for unit circle in 2D */
/* Adapted from: https://people.sc.fsu.edu/~jburkardt/c_src/circle_rule/circle_rule.html */
#include <stdio.h>
#define NUM_ANGLES 100000
#define PI 3.14159265358
#define F(x,y) x*y
#define CUDA_BLOCK_X 128
#define CUDA_BLOCK_Y 1
#define CUDA_BLOCK_Z 1
__global__ void _auto_kernel_1(float w[100000],float Q[100000],float x[100000],float y[100000])
{
int thread_x_id;thread_x_id = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_x_id)
if (thread_x_id <= 100000) {
Q[1 * thread_x_id + -1] = w[1 * thread_x_id + -1] * x[1 * thread_x_id + -1] * y[1 * thread_x_id + -1];
}
}
__global__ void _auto_kernel_0(float w[100000],float a[100000])
{
int thread_x_id;thread_x_id = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_x_id)
if (thread_x_id <= 100000) {
w[1 * thread_x_id + -1] = ((float )(1.0 / ((double )((float )100000))));
a[1 * thread_x_id + -1] = ((float )(6.28319 * ((double )((float )(1 * thread_x_id + -1))) / ((double )((float )100000))));
}
}
int main()
{
int i_nom_3;
int i_nom_2;
int i_nom_1;
int i;
/* Weights */
float w[100000];
/* Angles */
float a[100000];
/* Result */
float Q[100000];
{
/* Auto-generated code for call to _auto_kernel_0 */
typedef float _narray_w;
_narray_w *d_w;
cudaMalloc((void **) &d_w, sizeof(float ) * 100000);
cudaMemcpy(d_w, w, sizeof(float ) * 100000, cudaMemcpyHostToDevice);
typedef float _narray_a;
_narray_a *d_a;
cudaMalloc((void **) &d_a, sizeof(float ) * 100000);
cudaMemcpy(d_a, a, sizeof(float ) * 100000, cudaMemcpyHostToDevice);
int CUDA_GRID_X;
CUDA_GRID_X = (100000 + CUDA_BLOCK_X - 1)/CUDA_BLOCK_X;
int CUDA_GRID_Y;
CUDA_GRID_Y = (1 + CUDA_BLOCK_Y - 1)/CUDA_BLOCK_Y;
int CUDA_GRID_Z;
CUDA_GRID_Z = (1 + CUDA_BLOCK_Z - 1)/CUDA_BLOCK_Z;
const dim3 CUDA_blockSize(CUDA_BLOCK_X, CUDA_BLOCK_Y, CUDA_BLOCK_Z);
const dim3 CUDA_gridSize(CUDA_GRID_X, CUDA_GRID_Y, CUDA_GRID_Z);
_auto_kernel_0<<<CUDA_gridSize,CUDA_blockSize>>>(d_w, d_a);
cudaMemcpy(w, d_w, sizeof(float ) * 100000, cudaMemcpyDeviceToHost);
cudaMemcpy(a, d_a, sizeof(float ) * 100000, cudaMemcpyDeviceToHost);
}
/* Useful sin/cos values */
float x[100000];
float y[100000];
for (i_nom_1 = 1; i_nom_1 <= 100000; i_nom_1 += 1) {
x[1 * i_nom_1 + -1] = (cos(a[1 * i_nom_1 + -1]));
y[1 * i_nom_1 + -1] = (sin(a[1 * i_nom_1 + -1]));
}
{
/* Auto-generated code for call to _auto_kernel_1 */
typedef float _narray_w;
_narray_w *d_w;
cudaMalloc((void **) &d_w, sizeof(float ) * 100000);
cudaMemcpy(d_w, w, sizeof(float ) * 100000, cudaMemcpyHostToDevice);
typedef float _narray_Q;
_narray_Q *d_Q;
cudaMalloc((void **) &d_Q, sizeof(float ) * 100000);
cudaMemcpy(d_Q, Q, sizeof(float ) * 100000, cudaMemcpyHostToDevice);
typedef float _narray_x;
_narray_x *d_x;
cudaMalloc((void **) &d_x, sizeof(float ) * 100000);
cudaMemcpy(d_x, x, sizeof(float ) * 100000, cudaMemcpyHostToDevice);
typedef float _narray_y;
_narray_y *d_y;
cudaMalloc((void **) &d_y, sizeof(float ) * 100000);
cudaMemcpy(d_y, y, sizeof(float ) * 100000, cudaMemcpyHostToDevice);
int CUDA_GRID_X;
CUDA_GRID_X = (100000 + CUDA_BLOCK_X - 1)/CUDA_BLOCK_X;
int CUDA_GRID_Y;
CUDA_GRID_Y = (1 + CUDA_BLOCK_Y - 1)/CUDA_BLOCK_Y;
int CUDA_GRID_Z;
CUDA_GRID_Z = (1 + CUDA_BLOCK_Z - 1)/CUDA_BLOCK_Z;
const dim3 CUDA_blockSize(CUDA_BLOCK_X, CUDA_BLOCK_Y, CUDA_BLOCK_Z);
const dim3 CUDA_gridSize(CUDA_GRID_X, CUDA_GRID_Y, CUDA_GRID_Z);
_auto_kernel_1<<<CUDA_gridSize,CUDA_blockSize>>>(d_w, d_Q, d_x, d_y);
cudaMemcpy(w, d_w, sizeof(float ) * 100000, cudaMemcpyDeviceToHost);
cudaMemcpy(Q, d_Q, sizeof(float ) * 100000, cudaMemcpyDeviceToHost);
cudaMemcpy(x, d_x, sizeof(float ) * 100000, cudaMemcpyDeviceToHost);
cudaMemcpy(y, d_y, sizeof(float ) * 100000, cudaMemcpyDeviceToHost);
}
double sum = 0;
for (i_nom_3 = 1; i_nom_3 <= 100000; i_nom_3 += 1) {
sum += Q[1 * i_nom_3 + -1];
}
double result = 2 * 3.14159265358 * sum;
/* Report the result */
printf("Result: %f\n",result);
return 0;
}
|
2,817 | extern "C"
__global__ void vectorScalarSet(float* A, float alpha, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
A[i] = alpha;
}
}
extern "C"
__global__ void vectorScalarAdd(const float* __restrict__ A, float* B, float alpha, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
B[i] = A[i] + alpha;
}
}
extern "C"
__global__ void vectorLog(const float* __restrict__ A, float* B, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
B[i] = log(A[i]);
}
}
extern "C"
__global__ void vectorExp(const float* __restrict__ A, float* B, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
B[i] = exp(A[i]);
}
}
extern "C"
__global__ void vectorSign(const float* __restrict__ A, float* B, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
B[i] = (A[i] > 0.0 ? 1.0 : -1.0);
}
}
extern "C"
__global__ void vectorAbs(const float* __restrict__ A, float* B, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
B[i] = abs(A[i]);
}
}
extern "C"
__global__ void vectorDiv(const float* __restrict__ A, const float* __restrict__ B, float* C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] / B[i];
}
}
extern "C"
__global__ void vectorMul(const float* __restrict__ A, const float* __restrict__ B, float* C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] * B[i];
}
}
extern "C"
__global__ void vectorMax(const float* __restrict__ A, float* B, float val, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
B[i] = max(A[i], val);
}
}
extern "C"
__global__ void vectorMin(const float* __restrict__ A, float* B, float val, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
B[i] = min(A[i], val);
}
}
extern "C"
__global__ void vectorPow(const float* __restrict__ A, float* B, float val, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
B[i] = pow((double) A[i], (double) val);
}
}
extern "C"
__global__ void vectorSqr(const float* __restrict__ A, float* B, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float val;
if (i < numElements)
{
val = A[i];
B[i] = val*val;
}
}
extern "C"
__global__ void vectorSqrt(const float* __restrict__ A, float* B, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
B[i] = sqrt(A[i]);
}
}
|
2,818 | /*
simple wrapper to utility cuda routines
*/
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
extern "C" int CountDevices()
{
int num_gpus = -1;
cudaGetDeviceCount(&num_gpus);
return num_gpus;
}
extern "C" void SetDevice(int gpu_id)
{
cudaSetDevice(gpu_id);
}
extern "C" int GetDevice()
{
int gpu_id = -1;
cudaGetDevice(&gpu_id);
return gpu_id;
}
|
2,819 | #include "includes.h"
__device__ double efficientLocalMean_dev (const long x,const long y,const long k, double * input_img, int rowsize, int colsize) {
long k2 = k/2;
long dimx = rowsize;
long dimy = colsize;
//wanting average over area: (y-k2,x-k2) ... (y+k2-1, x+k2-1)
long starty = y-k2;
long startx = x-k2;
long stopy = y+k2-1;
long stopx = x+k2-1;
if (starty < 0) starty = 0;
if (startx < 0) startx = 0;
if (stopx > dimx-1) stopx = dimx-1;
if (stopy > dimy-1) stopy = dimy-1;
double unten, links, oben, obenlinks;
if (startx-1 < 0) links = 0;
else links = *(input_img+(stopy * dimx + startx-1));
if (starty-1 < 0) oben = 0;
else oben = *(input_img+((stopy-1) * dimx + startx));
if ((starty-1 < 0) || (startx-1 <0)) obenlinks = 0;
else obenlinks = *(input_img+((stopy-1) * dimx + startx-1));
unten = *(input_img+(stopy * dimx + startx));
long counter = (stopy-starty+1)*(stopx-startx+1);
return (unten-links-oben+obenlinks)/counter;
}
__global__ void process_coarseness_ak_pix(double * output_ak,double * input_img,int colsize, int rowsize,long lenOf_ak)
{
int index;
int y = threadIdx.x + blockIdx.x * blockDim.x;
int x = threadIdx.y + blockIdx.y * blockDim.y;
if(y < (colsize) && x < (rowsize))
{
index = y * rowsize + x ;
output_ak[index] = efficientLocalMean_dev(x,y,lenOf_ak,input_img,rowsize,colsize);
}
} |
2,820 | #include <iostream>
#include <math.h>
#include <functional>
#include <stdlib.h> /* srand, rand */
#include <time.h> /* time */
#define ROW_TILE_WIDTH 32
#define COL_TILE_WIDTH 32
template<typename T>
__global__
void naive_matrix_multiply(T *A, T *B, T* C, int width, int C_rows, int C_cols)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// check boundry conditions
if( row < C_rows && col < C_cols ){
// do the multiplication for one row and col
T value = 0;
for(int k = 0; k < width; k++){
value += A[row * width + k] * B[k * C_cols + col];
}
// store result
C[row * C_cols + col] = value;
}
}
template<typename T>
void initialize_matrix(T* M, int rows, int cols, std::function<float()> F) {
for(int i = 0; i < rows; i++){
for(int j = 0; j < cols; j++){
M[i * cols + j] = F();
}
}
}
template<typename T>
void naive_matrix_multiply_cpu(T *A, T *B, T* C, int width, int C_rows, int C_cols){
for(int i = 0; i < C_rows; i++)
for(int j = 0; j < C_cols; j++){
T value = 0.0f;
for(int k = 0; k < width; k++){
value += A[i * width + k] * B[k * C_cols + j];
}
C[i * C_cols + j] = value;
}
}
template<typename T>
bool check_equal(T* A1, T* A2, int rows, int cols){
for(int i = 0; i < rows; i++)
for(int j = 0; j < cols; j++){
if(abs(A1[i * cols + j] - A2[i * cols + j]) > 0.00001){
return false;
}
}
return true;
}
int main(void)
{
int A_rows = 1 << 8;
int A_cols = 1 << 10;
int B_rows = A_cols;
int B_cols = 1 << 12;
int C_rows = A_rows;
int C_cols = B_cols;
int A_size = A_rows * A_cols;
int B_size = B_rows * B_cols;
int C_size = C_rows * C_cols;
float *A, *B, *C, *C_cpu;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&A, A_size*sizeof(float));
cudaMallocManaged(&B, B_size*sizeof(float));
cudaMallocManaged(&C, C_size*sizeof(float));
cudaMallocManaged(&C_cpu, C_size*sizeof(float));
// initialize A and B matrices
auto all_ones = []() -> float {
return 1.0f;
};
srand (time(NULL));
auto rand_numbers = []() -> float {
auto f = static_cast<float>(rand())/(static_cast<float>(RAND_MAX/1000));
int n = static_cast<int>(f);
return static_cast<float>(n);
};
initialize_matrix<float>(A, A_rows, A_cols, rand_numbers);
initialize_matrix<float>(B, B_rows, B_cols, rand_numbers);
dim3 dim_grid(C_cols/COL_TILE_WIDTH, C_rows/ROW_TILE_WIDTH, 1);
dim3 dim_block(COL_TILE_WIDTH, ROW_TILE_WIDTH, 1);
naive_matrix_multiply<float><<<dim_grid, dim_block>>>(A, B, C, A_cols, C_rows, C_cols);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// check results
naive_matrix_multiply_cpu<float>(A, B, C_cpu, A_cols, C_rows, C_cols);
if(check_equal<float>(C, C_cpu, C_rows, C_cols))
std::cout << "PASS" << std::endl;
else
std::cout << "FAIL" << std::endl;
// Free memory
cudaFree(A);
cudaFree(B);
cudaFree(C);
return 0;
}
|
2,821 | #include<stdio.h>
#include<string.h>
#include<stdlib.h>
using namespace std;
#define SUBMATRIX_SIZE 50000
#define NUM_BIN 100
#define HIST_MIN 0.0
#define HIST_MAX 3e9
////////////////////////////////////////////////////////////////////////
__global__ void distance(float *x, float *y, float *z, int xind, int yind, int *dev_hist)
{
//int idx = xind * blockDim.x + yind;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int thread_idx = idx;
idx += xind;
float x_idx = x[idx], y_idx =y[idx], z_idx = z[idx];
float dist_x, dist_y, dist_z, dist;
//int max = SUBMATRIX_SIZE*
int ymax = yind + SUBMATRIX_SIZE;
int bin_index;
int offset = 0;
for(int i=yind; i<ymax; i++)
{
//if(idx != i)
if(idx > i)
{
dist_x = x_idx - x[i];
dist_y = y_idx - y[i];
dist_z = z_idx - z[i];
dist = sqrt(dist_x * dist_x + dist_y * dist_y + dist_z * dist_z);
if(dist < HIST_MIN)
bin_index = 0;
else if(dist >= HIST_MAX)
bin_index = NUM_BIN + 1;
else
bin_index = int(((dist - HIST_MIN) * NUM_BIN / HIST_MAX) +1);
//bin_index = 5;
//dev_hist[bin_index]++;
offset = ((NUM_BIN+2)*thread_idx);
bin_index += offset;
//dev_hist[i] = xind;
//dev_hist[i+idx] = idx;
dev_hist[bin_index]++;
//dev_hist[0+offset] = blockDim.x;
//dev_hist[1+offset] = blockIdx.x;
//dev_hist[2+offset] = threadIdx.x;
//dev_hist[3+offset] = thread_idx;
//dev_hist[4+offset] = idx;
//dev_hist[5+offset] = yind;
//dev_hist[6+offset] = ymax;
}
}
//dev_hist[0 + (threadIdx.x*12)] = threadIdx.x;
/*
for (int i=0;i<10;i++)
{
offset = i*12;
//offset = 0.0;
dev_hist[threadIdx.x+offset] = threadIdx.x;
//dev_hist[offset] = threadIdx.x;
//dev_hist[offset] = 999;
}
*/
}
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
float *dev_pos_x, *dev_pos_y, *dev_pos_z;
float *pos_x, *pos_y, *pos_z;
int NUM_PARTICLES;
if (argc < 3)
{
printf("\nMust pass in cluster_data file on command line!\n");
printf("\nUsage: ", argv[0] );
printf(" <cluster_data file> <distances file> \n\n");
exit(1);
}
FILE *infile, *outfile ;
infile = fopen(argv[1],"r");
outfile = fopen(argv[2], "w");
//////////////////////////////////////////////////////////////////////
// Read in the cluster_data file
////////////////////////////////////////////////////////////////////////////
// char axis_titles[256];
char dummy[256];
fscanf(infile, "%d", &NUM_PARTICLES);
//fscanf(infile, "%s %s %s %s", &axis_titles, &dummy, &axis_titles, &dummy);
int size = NUM_PARTICLES * sizeof(float);
printf("# particles: %d\n",NUM_PARTICLES);
pos_x = (float*)malloc(size);
pos_y = (float*)malloc(size);
pos_z = (float*)malloc(size);
for(int i=0; i<NUM_PARTICLES; i++)
{
fscanf(infile, "%e %s %e %s %e %s", &pos_x[i], &dummy, &pos_y[i], &dummy, &pos_z[i], &dummy);
//printf("%e %s %e %s %e %s\n", pos_x[i], dummy, pos_y[i], dummy, pos_z[i], dummy);
}
////////////////////////////////////////////////////////////////////////////
//allocation of histogram
///////////////////////////////////////////////////////////////////////////
int *hist, *dev_hist;
int size_hist = SUBMATRIX_SIZE * (NUM_BIN+2);
int size_hist_bytes = size_hist*sizeof(int);
hist = (int*)malloc(size_hist_bytes);
memset(hist, 0, size_hist_bytes);
printf("size_hist: %d\n",size_hist_bytes);
cudaMalloc((void **) &dev_hist, (size_hist_bytes));
cudaMemset(dev_hist, 0, size_hist_bytes);
unsigned long *hist_array;
hist_array = (unsigned long*)malloc((NUM_BIN+2) * sizeof(unsigned long));
memset(hist_array, 0, (NUM_BIN+2)*sizeof(unsigned long));
////////////////////////////////////////////////////////////////////////////
// Define the grid and block size
////////////////////////////////////////////////////////////////////////////
dim3 grid, block;
grid.x =100;
block.x = SUBMATRIX_SIZE/grid.x; //NUM_PARTICLES/block.x;
////////////////////////////////////////////////////////////////////////////
cudaMalloc((void **) &dev_pos_x, size );
cudaMalloc((void **) &dev_pos_y, size );
cudaMalloc((void **) &dev_pos_z, size );
// Check to see if we allocated enough memory.
if (0==dev_pos_z || 0==dev_pos_y|| 0==dev_pos_x || 0==dev_hist)
{
printf("couldn't allocate memory\n");
return 1;
}
// Initialize array to all 0's
cudaMemset(dev_pos_x,0,size);
cudaMemset(dev_pos_z,0,size);
cudaMemset(dev_pos_y,0,size);
cudaMemcpy(dev_pos_x, pos_x, size, cudaMemcpyHostToDevice );
cudaMemcpy(dev_pos_y, pos_y, size, cudaMemcpyHostToDevice );
cudaMemcpy(dev_pos_z, pos_z, size, cudaMemcpyHostToDevice );
int x, y;
int num_submatrices = NUM_PARTICLES / SUBMATRIX_SIZE;
int bin_index = 0;
for(int k = 0; k < num_submatrices; k++)
{
y = k*SUBMATRIX_SIZE;
// printf("%d %d\n",k,y);
for(int j = 0; j < num_submatrices; j++)
{
x = j *SUBMATRIX_SIZE;
//printf("----\n");
//printf("%d %d\t\t%d %d\n",k,y,j,x);
//printf("----\n");
cudaMemset(dev_hist,0,size_hist_bytes);
distance<<<grid,block>>>(dev_pos_x, dev_pos_y, dev_pos_z, x, y, dev_hist);
cudaMemcpy(hist, dev_hist, size_hist_bytes, cudaMemcpyDeviceToHost);
for(int m=0; m<size_hist; m++)
{
bin_index = m%(NUM_BIN+2);
//if(bin_index == 0)
//printf("\n");
//printf("%3i:%3i ", m, hist[m]);
//printf("%3i ", hist[m]);
hist_array[bin_index] += hist[m];
}
//printf("\n");
}
}
// cudaMemcpy(hist, dev_hist, size_hist, cudaMemcpyDeviceToHost);
/*
for(int j=0; j<NUM_BIN+2; j++)
for(int i=0; i<SUBMATRIX_SIZE; i++)
hist_array[j] += hist[i*(NUM_BIN + 2)+j];
*/
unsigned long total = 0;
float bin_width = (HIST_MAX - HIST_MIN) / NUM_BIN;
float bins_mid = 0;
for(int k=0; k<NUM_BIN+2; k++)
{
if(k>0)
bins_mid = bin_width*(k - 0.5);
else
bins_mid = -1.;
fprintf(outfile, "%.3e %s %lu \n", bins_mid, ",", hist_array[k]);
printf("hist: %lu \n", hist_array[k]);
total += hist_array[k];
printf("total: %lu \n", total);
}
printf("total: %lu \n", total);
fclose(infile);
fclose(outfile);
free(pos_x);
free(pos_y);
free(pos_z);
free(hist);
cudaFree(dev_pos_x);
cudaFree(dev_pos_y);
cudaFree(dev_pos_z);
cudaFree(dev_hist);
return 0;
}
//////////////////////////////////////////////////////////////////////
|
2,822 | // Copyright (c) 2017 Madhavan Seshadri
// 2018 Patrick Diehl
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
extern "C" { __global__ void kernel(char *out, int *width, int *height, int *yStart, int* n){
unsigned int xDim = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yDim = blockIdx.y * blockDim.y + threadIdx.y;
//index of the output array, multiplied by 3 for R,G,B values
int arrayIndex = 3 * (*width) * yDim + xDim*3;
float xPoint = ((float) (xDim)/(*width)) * 3.25f - 2.0f;
float yPoint = ((float) (yDim+*yStart)/(*height)) * 2.5f - 1.25f;
//for calculation of complex number
float x = 0.0;
float y = 0.0;
int iterationCount = 0;
int numIterations = 256;
//terminating condition x^2+y^2 < 4 or iterations >= numIterations
while(y*y+x*x<=4 && iterationCount<(numIterations)){
float xTemp = x*x-y*y + xPoint;
y = 2*x*y + yPoint;
x = xTemp;
iterationCount++;
}
if (arrayIndex < *n)
{
if(iterationCount == (numIterations)){
out[arrayIndex] = iterationCount;
out[arrayIndex+1]=1;
out[arrayIndex+2]=iterationCount;
}else{
out[arrayIndex] = 0;
out[arrayIndex+1]=iterationCount;
out[arrayIndex+2]=0;
}
}
}
};
|
2,823 | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#define GRID 1
#define THRDS 256
__global__ void assign(int * buf)
{
int idx = threadIdx.x;
buf[idx] = idx;
}
void print_buf(int * buf,int size)
{
int i = 0;
for (i=0;i<size;i++)
printf("%d\t",buf[i]);
printf("\n");
}
int main()
{
int * buf_h;
int * buf_d0;
int * buf_d1;
int size = sizeof(int) * GRID * THRDS;
buf_h = (int*)malloc(size);
cudaSetDevice(0);
cudaMalloc((void **) &buf_d0,size);
assign<<<GRID,THRDS>>> (buf_d0);
cudaSetDevice(1);
cudaMalloc((void **) &buf_d1,size);
cudaMemcpyPeer(buf_d1,1,buf_d0,0,size);
cudaMemcpy(buf_h,buf_d1,size,cudaMemcpyDeviceToHost);
print_buf(buf_h,GRID * THRDS);
cudaFree(buf_d1);
cudaFree(buf_d0);
free(buf_h);
return 0;
}
|
2,824 | #include <stdio.h>
#include <sys/time.h>
#include <time.h>
#define GO_EMPTY 0
#define GO_BLACK 1
#define GO_WHITE 2
#define GO_BORDER 3
const int boardSize = 21;
const int totalSize = boardSize * boardSize;
struct BoardPoint{
int color;
int groupID;
int libertyNumber;
bool isBlackLegal;
bool isWhiteLegal;
};
struct DebugFlag{
int counter;
int changeFlag;
int targetGroupID[4];
int libertyCount;
};
__global__
void initBoard(BoardPoint *boardDevice){
int index = threadIdx.y * boardSize + threadIdx.x;
if (threadIdx.x == 0 || threadIdx.x == boardSize-1 || threadIdx.y == 0 || threadIdx.y == boardSize-1){
boardDevice[index].color = 3;
} else {
boardDevice[index].color = 0;
}
//boardDevice[index].groupID = totalSize; // all the initial group ID was set to none group id.
}
__device__
inline void updateLiberty(BoardPoint *boardDevice, int index, int *globalLiberty){
if (boardDevice[index].color == GO_EMPTY){
atomicAdd(&globalLiberty[boardDevice[index-1].groupID], 1);
if (boardDevice[index+boardSize].groupID != boardDevice[index-1].groupID){
atomicAdd(&globalLiberty[boardDevice[index+boardSize].groupID], 1);
}
if (boardDevice[index+1].groupID != boardDevice[index-1].groupID &&
boardDevice[index+1].groupID != boardDevice[index+boardSize].groupID){
atomicAdd(&globalLiberty[boardDevice[index+1].groupID], 1);
}
if (boardDevice[index-boardSize].groupID != boardDevice[index-1].groupID &&
boardDevice[index-boardSize].groupID != boardDevice[index+1].groupID &&
boardDevice[index-boardSize].groupID != boardDevice[index+boardSize].groupID){
atomicAdd(&globalLiberty[boardDevice[index-boardSize].groupID], 1);
}
}
}
__global__
void playBoard(BoardPoint *boardDevice, DebugFlag *debugFlagDevice, int row, int col, int color){
int index = threadIdx.y*boardSize + threadIdx.x;
int playPoint = row*boardSize + col;
__shared__ int globalLiberty[totalSize]; // shared array to count the liberty of each group.
__shared__ int targetGroupID[4] ;
__shared__ bool hasStoneRemoved;
if (threadIdx.y == 0 || threadIdx.y == boardSize || threadIdx.x == 0 || threadIdx.x == boardSize){
globalLiberty[0] = 0;
return;
}
if (index == playPoint){
boardDevice[index].color = color;
boardDevice[index].groupID = index;
if (boardDevice[index+1].color == color){
targetGroupID[0] = boardDevice[index+1].groupID;
}else{
targetGroupID[0] = -1;
}
if (boardDevice[index-1].color == color){
targetGroupID[1] = boardDevice[index-1].groupID;
}else{
targetGroupID[1] = -1;
}
if (boardDevice[index+boardSize].color == color){
targetGroupID[2] = boardDevice[index+boardSize].groupID;
}else{
targetGroupID[2] = -1;
}
if (boardDevice[index-boardSize].color == color){
targetGroupID[3] = boardDevice[index-boardSize].groupID;
}else{
targetGroupID[3] = -1;
}
}
__syncthreads();
//@todo , check whether this fence is necessory.
__threadfence_block();
if (boardDevice[index].groupID == targetGroupID[0] ||
boardDevice[index].groupID == targetGroupID[1] ||
boardDevice[index].groupID == targetGroupID[2] ||
boardDevice[index].groupID == targetGroupID[3] ){
boardDevice[index].groupID = playPoint;
}
globalLiberty[index] = 0;
hasStoneRemoved = false;
__syncthreads();
__threadfence_block();
updateLiberty(boardDevice, index, globalLiberty);
__syncthreads();
__threadfence_block();
int libertyNumber = globalLiberty[boardDevice[index].groupID];
if ( libertyNumber == 0 ){
boardDevice[index].color = GO_EMPTY;
boardDevice[index].groupID = 0;
boardDevice[index].libertyNumber = 0;
hasStoneRemoved = true;
} else {
boardDevice[index].libertyNumber = libertyNumber;
}
__syncthreads();
__threadfence_block();
if (hasStoneRemoved){
globalLiberty[index] = 0;
__syncthreads();
__threadfence_block();
updateLiberty(boardDevice, index, globalLiberty);
__syncthreads();
__threadfence_block();
libertyNumber = globalLiberty[boardDevice[index].groupID];
boardDevice[index].libertyNumber = libertyNumber;
}
//
//
//
// if (boardDevice[index].pointGroup != NULL){
// debugFlagDevice[index].changeFlag = boardDevice[index].pointGroup.numberOfLiberty;
//
// }
//
//
// debugFlagDevice[index].counter++;
// }
//
}
__device__
inline int inverseColor(int color){
if (color == GO_BLACK){
return GO_WHITE;
}else if(color == GO_WHITE){
return GO_BLACK;
}
return GO_EMPTY;
}
__global__
void updateLegleMove(BoardPoint *boardDevice, DebugFlag *debugFlagDevice, int color){
int index = threadIdx.y*boardSize + threadIdx.x;
if (boardDevice[index].color == GO_EMPTY){
int totalLiberty = 0;
if (boardDevice[index - 1].color == color){
totalLiberty = totalLiberty + boardDevice[index - 1].libertyNumber - 1;
}else if(boardDevice[index - 1].color == GO_EMPTY){
totalLiberty++;
}
if (boardDevice[index + 1].color == color){
totalLiberty = totalLiberty + boardDevice[index + 1].libertyNumber - 1;
}else if(boardDevice[index + 1].color == GO_EMPTY){
totalLiberty++;
}
if (boardDevice[index - boardSize].color == color){
totalLiberty = totalLiberty + boardDevice[index - boardSize].libertyNumber - 1;
}else if(boardDevice[index - boardSize].color == GO_EMPTY){
totalLiberty++;
}
if (boardDevice[index + boardSize].color == color){
totalLiberty = totalLiberty + boardDevice[index + boardSize].libertyNumber - 1;
}else if(boardDevice[index + boardSize].color == GO_EMPTY){
totalLiberty++;
}
debugFlagDevice[index].libertyCount = totalLiberty;
if (totalLiberty > 0){
if (color == GO_BLACK){
boardDevice[index].isBlackLegal = true;
}else if (color == GO_WHITE){
boardDevice[index].isWhiteLegal = true;
}
}else{
if (color == GO_BLACK){
boardDevice[index].isBlackLegal = false;
}else if (color == GO_WHITE){
boardDevice[index].isWhiteLegal = false;
}
}
} else {
if (color == GO_BLACK){
boardDevice[index].isBlackLegal = false;
}else if (color == GO_WHITE){
boardDevice[index].isWhiteLegal = false;
}
}
}
int main()
{
BoardPoint boardHost[totalSize];
BoardPoint *boardDevice;
DebugFlag debugFlagHost[totalSize];
DebugFlag *debugFlagDevice;
const int valueSizeDevice = totalSize*sizeof(BoardPoint);
const int debugFlagSize = totalSize*sizeof(DebugFlag);
cudaMalloc( (void**)&boardDevice, valueSizeDevice );
cudaMalloc( (void**)&debugFlagDevice, debugFlagSize );
struct timeval start_tv;
gettimeofday(&start_tv,NULL);
dim3 threadShape( boardSize, boardSize );
int numberOfBlock = 1;
initBoard<<<numberOfBlock, threadShape>>>(boardDevice);
// for (int i=0; i<19; i++){
// playBoard<<<numberOfBlock, threadShape>>>(boardDevice, globalFlag, i, i, 2);
// }
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 10, 10, 2);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 10, 11, 2);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 10, 12, 2);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 11, 10, 2);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 12, 10, 2);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 13, 10, 1);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 13, 9, 2);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 13, 11, 2);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 14, 10, 2);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 1, 1, 2);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 2, 1, 1);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 1, 2, 1);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 5, 10, 2);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 5, 11, 2);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 5, 12, 2);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 6, 10, 2);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 7, 10, 2);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 8, 10, 1);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 8, 9, 2);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 8, 11, 2);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 9, 10, 2);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 19, 19, 2);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 18, 19, 1);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 19, 18, 1);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 10, 4, 2);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 10, 5, 2);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 10, 6, 2);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 11, 4, 2);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 12, 4, 2);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 13, 4, 1);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 13, 3, 2);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 13, 5, 2);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 14, 4, 2);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 17, 16, 2);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 17, 17, 1);
playBoard<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, 15, 12, 1);
//updateLegleMove<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, GO_BLACK);
//updateLegleMove<<<numberOfBlock, threadShape>>>(boardDevice, debugFlagDevice, GO_WHITE);
cudaDeviceSynchronize();
cudaMemcpy( boardHost, boardDevice, valueSizeDevice, cudaMemcpyDeviceToHost );
cudaMemcpy( debugFlagHost, debugFlagDevice, debugFlagSize, cudaMemcpyDeviceToHost );
cudaFree( boardDevice );
cudaFree( debugFlagDevice );
cudaDeviceSynchronize();
struct timeval end_tv;
gettimeofday(&end_tv,NULL);
for (int i=boardSize-1; i>=0; i--){
for (int j=0; j<boardSize; j++){
int index = i*boardSize + j;
if (boardHost[index].color == 0){
printf(".");
}else if (boardHost[index].color == GO_BLACK){
printf("o");
}else if (boardHost[index].color == GO_WHITE){
printf("x");
}else if (boardHost[index].color == GO_BORDER){
printf("H");
}
}
printf("\n");
}
// for (int i=boardSize-1; i>=0; i--){
// for (int j=0; j<boardSize; j++){
// int index = i*boardSize + j;
//// if (boardHost[index].color == GO_BLACK || boardHost[index].color == GO_WHITE){
// printf("%d, %d | ", boardHost[index].groupID, boardHost[index].libertyNumber);
//// } else if (boardHost[index].color == GO_EMPTY) {
//// printf(" , | ");
//// }
// }
// printf("\n");
//
// }
for (int i=boardSize-1; i>=0; i--){
for (int j=0; j<boardSize; j++){
int index = i*boardSize + j;
if (boardHost[index].color == GO_BORDER){
printf("H");
}else{
if (boardHost[index].isBlackLegal){
printf("o");
}else {
printf(".");
}
}
}
printf(" ");
for (int j=0; j<boardSize; j++){
int index = i*boardSize + j;
if (boardHost[index].color == GO_BORDER){
printf("H");
}else{
if (boardHost[index].isWhiteLegal){
printf("x");
}else {
printf(".");
}
}
}
printf("\n");
}
// for (int i=boardSize-1; i>=0; i--){
// for (int j=0; j<boardSize; j++){
// int index = i*boardSize + j;
// printf("%d | ", debugFlagHost[index].libertyCount);
// }
// printf("\n");
//
// }
printf("\n");
if(end_tv.tv_usec >= start_tv.tv_usec){
printf("time %lu:%lu\n",end_tv.tv_sec - start_tv.tv_sec, end_tv.tv_usec - start_tv.tv_usec);
}else{
printf("time %lu:%lu\n",end_tv.tv_sec - start_tv.tv_sec - 1, 1000000 - start_tv.tv_usec + end_tv.tv_usec);
}
return EXIT_SUCCESS;
}
|
2,825 | #define N 16
__global__ void k(int* in)
{
if(threadIdx.x < N)
in[0] = 0;
}
int main()
{
int* din;
cudaMalloc((void**) &din, N*sizeof(int));
k<<<1,N>>>(din);
} |
2,826 | #include "includes.h"
__global__ void matrixMul_kernel(float * A, float * B, float * C, int N)
{
int ROW = blockIdx.y * blockDim.y + threadIdx.y;
int COL = blockIdx.x * blockDim.x + threadIdx.x;
float tmpSum = 0;
if (ROW < N && COL < N)
{
// each thread computes one elem of the block sub-matrix
for (int i = 0; i < N; i++)
{
tmpSum += A[ROW * N + i] * B[i * N + COL];
}
}
C[ROW * N + COL] = tmpSum;
} |
2,827 | #include<iostream>
#include <cuda.h>
__global__ void stencil_kernel(const float* image, const float* mask, float* output, unsigned int n, unsigned int R)
{
extern __shared__ float shared[];
float opsum=0;
int flag=(int)R;
float* mk = &shared[0];
float* ip = &mk[2*R+1];
float* op = &ip[blockDim.x+2*R + 1];
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id <n)
{
ip[threadIdx.x+R] = image[id];
}
else
{
ip[threadIdx.x + R]= 0;
}
if (threadIdx.x < 2*R+1)
{
mk[threadIdx.x] = mask[threadIdx.x];
}
if (threadIdx.x < R)
{
if (id -flag>0)
{
ip[threadIdx.x] = image[id -flag];
}
else
{
ip[threadIdx.x]=0;
}
if ( id + blockDim.x < n)
{
ip[blockDim.x+ threadIdx.x+R] = image[blockDim.x+ id ];
}
else
{
ip[blockDim.x+ threadIdx.x + R] = 0;
}
}
__syncthreads();
for (int k = 0; k < (2*R+1); k++)
{
opsum += ip[threadIdx.x+k] * mk[k];
}
op[threadIdx.x] = opsum;
if (id<n)
output[id] = op[threadIdx.x];
}
__host__ void stencil(const float* image,const float* mask,float* output,unsigned int n,unsigned int R,unsigned int threads_per_block)
{
int blocksize = (n + threads_per_block - 1) / threads_per_block;
int size = 2*threads_per_block*sizeof(float)+2*R*sizeof(float) + (2*R+1)*sizeof(float);
stencil_kernel <<<blocksize, threads_per_block,size>>>(image, mask, output, n, R);
cudaDeviceSynchronize();
}
|
2,828 | #include <cuda.h>
#include <iostream>
#include <math.h>
#include <ctime>
#include <cmath>
#include <unistd.h>
#include <stdio.h>
/* we need these includes for CUDA's random number stuff */
#include <curand.h>
#include <curand_kernel.h>
#define PI 3.14159265358979323846
double* three_dim_index(double* matrix, int i, int j, int k, double m, int b, int num_assets);
__device__ double* two_dim_indexGPU(double* vector, int i, int j, double m, int b){
double* p;
//specify index layout here
p=&vector[b*(i)+(j)];
return p;
}
__device__ double* three_dim_indexGPU(double* matrix, int i, int j, int k, double m, int b, int num_assets){
double* p;
//specify index layout here
p=&matrix[i*b*num_assets+j*num_assets+k];
return p;
}
//this function returns transition densities between nodes
__device__ double densityGPU(double Xold, double Xnew, double sigma, double r, double delta, double delta_t){
double f=0, x=0;
x=(1/(sigma*sqrt(delta_t)))*(Xnew-Xold-(r-delta-0.5*sigma*sigma)*delta_t);
f= (1/(sigma*sqrt(delta_t)))*(1/(sqrt(2*PI)))*exp(-0.5*x*x);
return f;
}
//this is the payoff function for a geometric call option
__device__ double GeometricPayOffCallV(double* X, double m, int b, int num_assets, double Strike){
double h;
h=1;
for(int l=0; l<num_assets; l++){
h*=exp(X[l]);
}
h=pow(h,1.0/(num_assets));
if(h-Strike>0){
h=h-Strike;
}
else{
h=0;
}
return h;
}
//this is the payoff function for a geometric put option
__device__ double GeometricPayOffPutV(double* X, double m, int b, int num_assets, double Strike){
double h;
h=1;
for(int l=0; l<num_assets; l++){
h*=exp(X[l]);
}
h=pow(h,1.0/(num_assets));
if(Strike-h>0){
h=Strike-h;
}
else{
h=0;
}
return h;
}
//this function updates the weights for the suboptimal stopping routine
__device__ void S_weights(double* S_Weights, double* X_device, double* S_new, int m, int b, double* sigma_device, double* delta_device, double delta_t, int num_assets, double r , int i, double* weight_denominator_device ){
double sum, w_s;
for(int h=0; h<b; h++){
sum=0;
w_s=1;
for(int kk=0; kk<num_assets; kk++){
w_s*=densityGPU(S_new[kk], *three_dim_indexGPU(X_device, (i+1), h, kk, m, b, num_assets), sigma_device[kk], r, delta_device[kk], delta_t);
}
sum = *two_dim_indexGPU(weight_denominator_device, i, h, m-1, b);
if(sum==0){printf("division by zero in weights function of path estimator\n");}
w_s = (((double)b)*w_s)/sum;
S_Weights[h]=w_s;
}
}
//this kernel function performs the sub optimal stopping rule for the low bias estimate
__global__ void PathEstimatorKernel(double* X_device, double* weight_denominator_device, double* V_device, double* delta_device, double* sigma_device, double* X0_device, int N, double strike, double r, double delta_t, int b, int m, int num_assets, curandState_t* states, double* results_dev, double* asset_amount_device){
int idx =blockDim.x*blockIdx.x + threadIdx.x;
if(idx<N){
double v_0, S_i, Z, C, H, sum, weight; //, w_s, sum_Z;
const int S_N= num_assets;
const int S_W_N= b;
double* S_new;
S_new= new double[S_N];
double* S_Weights;
S_Weights=new double[S_W_N];
int i=0;
do {
if(i==0){
for(int ll=0; ll<num_assets; ll++){
Z=curand_normal_double(&states[idx]);
S_i=X0_device[ll] + (r-delta_device[ll]-0.5*pow(sigma_device[ll], 2))*delta_t + sigma_device[ll]*sqrt(delta_t)*Z;
S_new[ll]=S_i;
}
}
else{
for(int jj=0; jj<num_assets; jj++){
Z=curand_normal_double(&states[idx]);
S_i=S_new[jj] + (r-delta_device[jj]-0.5*pow(sigma_device[jj], 2))*delta_t + sigma_device[jj]*sqrt(delta_t)*Z;
S_new[jj]=S_i;
}
}
if(i<m-1){
S_weights(S_Weights, X_device, S_new, m, b, sigma_device, delta_device, delta_t, num_assets, r, i, weight_denominator_device);
}
double con_val=0; //continuation value variable
sum=0;
if(i==m-1){
C=0;//continuation value at the last time step
}
else{
for(int k=0; k<b; k++){
weight= S_Weights[k];
con_val= *two_dim_indexGPU(V_device, (m-1-i-1), k, m, b);
sum+=(weight) * (con_val);
}
C=(1/(double)b)*sum; //continuation value
}
H= GeometricPayOffCallV(S_new, m, num_assets, num_assets, strike)*exp(-r*delta_t*((i+1)));
i=i+1;
}while(H<C);//this will stop once H is less then the continuation value. at m-1, c=0 therefore m-1 is the max amount of loops.
v_0=H;
results_dev[idx]=v_0;
delete[] S_new;
delete[] S_Weights;
}
}
//this function returns the low bias estimate to the main function. it also allocates memory on the device and initialises the low bias kernel.
double PathEstimator(double strike, double r, double delta_t, int b, double m, double sigma[], double delta[], double X0[], double* X, double* weight_denominator, double* V, double asset_amount[], int num_assets, int Path_estimator_iterations, int iterator, int Final_iteration, curandState_t* States, curandState_t* states, int threads ){
cudaError_t error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
int N= Path_estimator_iterations;
double* sigma_host;
sigma_host =sigma;
double* delta_host;
delta_host =delta;
double* X0_host;
X0_host =X0;
double* asset_amount_host;
asset_amount_host =asset_amount;
int m_int=(int)m;
int X_N=(m_int) * b * (num_assets);
int W_N=(m_int-1) * b;
int V_N=(m_int) * b;
int delta_N= num_assets;
int sigma_N=num_assets;
int X0_N=num_assets;
int asset_amount_N = num_assets;
double* X_device;
double* V_device;
double* weight_denominator_device;
double* sigma_device;
double* delta_device;
double* X0_device;
double* asset_amount_device;
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
cudaMalloc((void**) &X_device, X_N*sizeof(double) );
cudaMemcpy(X_device, X, X_N*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**) &V_device, V_N*sizeof(double) );
cudaMemcpy(V_device, V, V_N*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**) &weight_denominator_device, W_N*sizeof(double) );
cudaMemcpy(weight_denominator_device, weight_denominator, W_N*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**) &X0_device, X0_N*sizeof(double) );
cudaMemcpy(X0_device, X0_host, X0_N*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**) &sigma_device, sigma_N*sizeof(double) );
cudaMemcpy(sigma_device, sigma_host, sigma_N*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**) &delta_device, delta_N*sizeof(double) );
cudaMemcpy(delta_device, delta_host, delta_N*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**) &asset_amount_device, asset_amount_N*sizeof(double) );
cudaMemcpy(asset_amount_device, asset_amount_host, asset_amount_N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(states, States, threads*sizeof(curandState_t*), cudaMemcpyHostToDevice);
dim3 gridDim((int)ceil(N/512.0));
dim3 blockDim(512.0);
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
double* results;
results = new double[N];
double* results_dev;
cudaMalloc((void**) &results_dev, N*sizeof(double) );
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
PathEstimatorKernel<<<gridDim, blockDim>>>(X_device, weight_denominator_device, V_device, delta_device, sigma_device, X0_device, N, strike, r, delta_t, b, m_int, num_assets, states, results_dev, asset_amount_device);
cudaDeviceSynchronize();
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
cudaMemcpy(results, results_dev, sizeof(double)*N, cudaMemcpyDeviceToHost);
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("Found at line %d\n", __LINE__);
exit(1);
}
cudaMemcpy(States, states, sizeof(curandState_t)*threads, cudaMemcpyDeviceToHost);
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("Found at line %d\n", __LINE__);
exit(1);
}
double result=0;
for(int f=0; f<Path_estimator_iterations; f++){
result+=results[f];
}
result=(1/double(N))*result;
delete[] results;
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
cudaFree(X_device);
cudaFree(V_device);
cudaFree(weight_denominator_device);
cudaFree(sigma_device);
cudaFree(delta_device);
cudaFree(X0_device);
cudaFree(results_dev);
cudaFree(asset_amount_device);
if(iterator==Final_iteration-1){
cudaFree(states);
}
return result;
}
|
2,829 |
#include <stdio.h>
#include <cuda.h>
//#include <cudaMalloc.h>
__global__ void add(int *a, int *b, int *c)
{
*c = *a + *b;
}
int main(void)
{
int a, b, c;
int *pa, *pb, *pc;
int size = sizeof(int);
cudaMalloc((void **)&pa, size);
cudaMalloc((void **)&pb, size);
cudaMalloc((void **)&pc, size);
a = 7;
b = 8;
cudaMemcpy(pa, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(pb, &b, size, cudaMemcpyHostToDevice);
add<<<1,1>>>(pa, pb, pc);
cudaMemcpy(&c, pc, size, cudaMemcpyDeviceToHost);
cudaFree(pa);
cudaFree(pb);
cudaFree(pc);
printf("GPU computed value of c (a+b): %d\n", c);
return 0;
}
|
2,830 | #include "cuda.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define SIZE 900
#define HIDDINLAYERS 2
#define POINTS 583
#define TEST 100
#define ATTRIBUTES 10
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
// various squashing functions
__device__ __host__ float g(float value) {
return 1.0/(1.0+exp(-value));
}
__device__ __host__ float gSquash(float value) {
if (value<0.5) return 0.0;
else return 1.0;
}
__device__ __host__ float gPrime(float value){
float a = g(value);
return a;
}
// used for testing before and after training
// preforms forward propagation
__host__ void gLayer2(float *weights, float *values, float *outputs, float *in, float *bias, int weightsLen, int outputsLen){
for (int i = 0; i < outputsLen; ++i)
{
in[i]=0;
for (int j = 0; j < weightsLen; ++j)
{
in[i]+= values[j]*weights[j+SIZE*i];
}
in[i] += bias[i];
outputs[i] =g(in[i]);
}
}
// preforms back propagation on gpu
__global__ void gLayerBack(float *weights, float *delta, float *outputs, float *in, int inputLen, int outputsLen){
float temp;
int i = threadIdx.x + blockDim.x * blockIdx.x;
while(i < outputsLen)
{
temp=0.0;
for (int j = 0; j < inputLen; ++j)
{
temp+=delta[j]*weights[j+SIZE*i];
}
outputs[i] = gPrime(in[i])*temp;
i += gridDim.x*blockDim.x;
}
}
// preforms forward propagation on gpu
__global__ void gLayer(float *weights, float *values, float *outputs, float *in, float *bias, int weightsLen, int outputsLen){
float temp;
int i = threadIdx.x + blockDim.x * blockIdx.x;
while(i < outputsLen) {
temp=0;
for (int j = 0; j < weightsLen; ++j)
{
temp+= values[j]*weights[j+SIZE*i];
}
in[i] = temp+bias[i];
outputs[i] =g(in[i]);
i += gridDim.x*blockDim.x;
}
}
// updates weights once deltas have been calculated
__global__ void updateWeight(float *weights, float *values, float *delta, float learningRate, int inputLen, int outputLen){
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
while(i < outputLen)
{
while (j < inputLen)
{
weights[j+SIZE*i]+= learningRate*values[i]*delta[j];
j += gridDim.y*blockDim.y;
}
j = threadIdx.y + blockDim.y * blockIdx.y;
i += gridDim.x*blockDim.x;
}
}
// used for prepping for back propagation
__global__ void deltaInit(float *delta, float *in, float *dataSet, float *values){
delta[0] = gPrime(in[0])*(dataSet[ATTRIBUTES-1]-values[0]);
}
// used for prepping for forward propagation
__global__ void valueInit(float *values, float *dataSet, int i){
values[i] = dataSet[i];
}
int main(int argc, char const *argv[])
{
FILE *fp=NULL, *inFile=NULL;
clock_t t;
srand (time(NULL));
float weights[(HIDDINLAYERS+1)][SIZE*SIZE]; // each node in a layer conects to all nodes in the previous layer
float bias[(HIDDINLAYERS+1)][SIZE]; // all nodes other than the input layer
float values[(HIDDINLAYERS+2)][SIZE]; // holds the results of the last input
float in[(HIDDINLAYERS+1)][SIZE]; // values before squashing
float dataSet[(POINTS+TEST)][ATTRIBUTES]; // holds datafile
float learningRate = 0.3;
float learningTime = 50;
float *weights_d[(HIDDINLAYERS+1)], *bias_d[(HIDDINLAYERS+1)], *values_d[(HIDDINLAYERS+2)], *in_d[(HIDDINLAYERS+1)], *delta_d[(HIDDINLAYERS+1)], *dataSet_d[(POINTS+TEST)];
// read in data
inFile=fopen("data/breast-cancer-wisconsin.data","r");
for (int i = 0; i < POINTS+TEST; ++i)
{
for (int j = 0; j < ATTRIBUTES; ++j)
{
// update indexes
fscanf(inFile,"%f",&(dataSet[i][j]));
if (j==ATTRIBUTES-1){
if (dataSet[i][j]==4.0) dataSet[i][j]=1.0;
else dataSet[i][j] = 0.0;
}
}
}
fclose(inFile);
// initialize weights
for(int i=0; i<HIDDINLAYERS+1; i++) {
for (int j = 0; j < SIZE; ++j)
{
for (int l = 0; l < SIZE; ++l)
{
weights[i][l+SIZE*j] = ((float) rand() / (RAND_MAX/2))-1;
}
bias[i][j] = ((float) rand() / (RAND_MAX/2))-1;
}
}
// check results pre training
int outputLen=SIZE;
int inputLen=SIZE;
float correct = 0.0;
for (int j=POINTS; j<TEST+POINTS; j++){
for (int i=0; i<ATTRIBUTES-1; i++) {
values[0][i] = dataSet[j][i];
}
for (int i = 0; i < HIDDINLAYERS+1; ++i)
{
outputLen=SIZE;
inputLen=SIZE;
if (i == HIDDINLAYERS) outputLen = 1; // result layer
if (i == 0) inputLen = (ATTRIBUTES-1); // data set layer
gLayer2(weights[i], values[i], values[i+1], in[i], bias[i], inputLen, outputLen);
}
if (dataSet[j][ATTRIBUTES-1]==gSquash(values[HIDDINLAYERS+1][0])) correct += 1;
}
correct = ((float) correct/TEST);
printf("%f\n", correct);
// allocate/mem copy space on device
for (int i = 0; i < (HIDDINLAYERS+1); ++i)
{
HANDLE_ERROR(cudaMalloc((void **) &weights_d[i], sizeof(float)*SIZE*SIZE));
HANDLE_ERROR(cudaMemcpy(weights_d[i], weights[i], sizeof(float)*SIZE*SIZE, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMalloc((void **) &bias_d[i], sizeof(float)*SIZE));
HANDLE_ERROR(cudaMemcpy(bias_d[i], bias[i], sizeof(float)*SIZE, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMalloc((void **) &in_d[i], sizeof(float)*SIZE));
HANDLE_ERROR(cudaMalloc((void **) &delta_d[i], sizeof(float)*SIZE));
}
for (int i = 0; i < (HIDDINLAYERS+2); ++i)
{
HANDLE_ERROR(cudaMalloc((void **) &values_d[i], sizeof(float)*SIZE));
}
for (int i = 0; i < (POINTS+TEST); ++i)
{
HANDLE_ERROR(cudaMalloc((void **) &dataSet_d[i], sizeof(float)*ATTRIBUTES));
HANDLE_ERROR(cudaMemcpy(dataSet_d[i], dataSet[i], sizeof(float)*ATTRIBUTES, cudaMemcpyHostToDevice));
}
// initialize kernel launches
dim3 dimBlock1(32,1);
dim3 dimGrid1(SIZE/32+1,1);
dim3 dimBlock2(1,1);
dim3 dimGrid2(1,1);
dim3 dimBlock3(16,16);
dim3 dimGrid3(SIZE/16+1,SIZE/16+1);
// time training
t = clock();
for (int timeStep=0; timeStep<learningTime; timeStep++) {
for (int point=0; point<POINTS; point++) {
// get current data set
for (int i=0; i<ATTRIBUTES-1; i++) {
valueInit<<<dimGrid2,dimBlock2>>>(values_d[0], dataSet_d[point], i);
HANDLE_ERROR(cudaGetLastError());
}
// forward prop
for (int i = 0; i < HIDDINLAYERS+1; ++i)
{
outputLen=SIZE;
inputLen=SIZE;
if (i == HIDDINLAYERS) outputLen = 1; // result layer
if (i == 0) inputLen = (ATTRIBUTES-1); // data set layer
gLayer<<<dimGrid1,dimBlock1>>>(weights_d[i], values_d[i], values_d[i+1], in_d[i], bias_d[i], inputLen, outputLen);
HANDLE_ERROR(cudaGetLastError());
}
// back prop
deltaInit<<<dimGrid2,dimBlock2>>>(delta_d[HIDDINLAYERS], in_d[HIDDINLAYERS], dataSet_d[point], values_d[HIDDINLAYERS+1]);
HANDLE_ERROR(cudaGetLastError());
// error in previous layers
for (int i = HIDDINLAYERS-1; i > -1; i--)
{
outputLen=SIZE;
inputLen=SIZE;
if (i == HIDDINLAYERS-1) outputLen = (ATTRIBUTES-1); // data set layer
if (i == 0) inputLen = 1; // result layer
gLayerBack<<<dimGrid1,dimBlock1>>>(weights_d[i+1], delta_d[i+1], delta_d[i], in_d[i], inputLen, outputLen);
HANDLE_ERROR(cudaGetLastError());
}
// update weights
for (int i = 0; i < HIDDINLAYERS+1; ++i)
{
outputLen=SIZE;
inputLen=SIZE;
if (i == HIDDINLAYERS) outputLen = 1; // result layer
if (i == 0) inputLen = (ATTRIBUTES-1); // data set layer
updateWeight<<<dimGrid3,dimBlock3>>>(weights_d[i], values_d[i], delta_d[i], learningRate, inputLen, outputLen);
HANDLE_ERROR(cudaGetLastError());
}
}
}
t = clock() - t;
// mem copy
for (int i = 0; i < (HIDDINLAYERS+1); ++i)
{
HANDLE_ERROR(cudaMemcpy(weights[i], weights_d[i], sizeof(float)*SIZE*SIZE, cudaMemcpyDeviceToHost));
// HANDLE_ERROR(cudaMemcpy(weights[i], weights_d[i], sizeof(float)*SIZE*SIZE, cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(bias[i], bias_d[i], sizeof(float)*SIZE, cudaMemcpyDeviceToHost));
}
// free pointers
for (int i = 0; i < (HIDDINLAYERS+1); ++i)
{
HANDLE_ERROR(cudaFree(weights_d[i]));
HANDLE_ERROR(cudaFree(bias_d[i]));
HANDLE_ERROR(cudaFree(in_d[i]));
HANDLE_ERROR(cudaFree(delta_d[i]));
}
for (int i = 0; i < (HIDDINLAYERS+2); ++i)
{
HANDLE_ERROR(cudaFree(values_d[i]));
}
for (int i = 0; i < (POINTS+TEST); ++i)
{
HANDLE_ERROR(cudaFree(dataSet_d[i]));
}
// save runtime
fp=fopen("data/parallel.dat", "a");
fprintf (fp, "%d\t%f\n", SIZE,((float)t)/CLOCKS_PER_SEC);
fclose(fp);
// post training results
correct = 0.0;
for (int j=POINTS; j<TEST+POINTS; j++){
for (int i=0; i<ATTRIBUTES-1; i++) {
values[0][i] = dataSet[j][i];
}
for (int i = 0; i < HIDDINLAYERS+1; ++i)
{
outputLen=SIZE;
inputLen=SIZE;
if (i == HIDDINLAYERS) outputLen = 1; // result layer
if (i == 0) inputLen = (ATTRIBUTES-1); // data set layer
gLayer2(weights[i], values[i], values[i+1], in[i], bias[i], inputLen, outputLen);
}
if (dataSet[j][ATTRIBUTES-1]==gSquash(values[HIDDINLAYERS+1][0])) correct += 1;
}
correct = ((float) correct/TEST);
printf("%f\n", correct);
return 0;
} |
2,831 | #include "includes.h"
__global__ void find_closest_mine(float * mine_pos_v, float * distances_v, int * mineIdx_v, int num_sweeprs, int num_mines, float * inputs)
{
#define sweeperIdx blockIdx.y
#define first_item blockIdx.y*num_mines
int my_index = (gridDim.x * blockIdx.x) + threadIdx.x;
//mineIdx_v[sweeperIdx * num_mines + threadIdx.x] = threadIdx.x;
mineIdx_v[sweeperIdx * num_mines + my_index] = my_index;
for (int stride = num_mines / 2; stride > 1; stride /= 2)
{
__syncthreads();
if (my_index < stride)
{
if (distances_v[my_index + first_item] < distances_v[my_index + first_item + stride])
{
distances_v[my_index + first_item] = distances_v[my_index + first_item + stride];
mineIdx_v[my_index + first_item] = mineIdx_v[my_index + first_item + stride];
}
}
}
inputs[sweeperIdx * 4] = mine_pos_v[mineIdx_v[sweeperIdx] * 2];
inputs[sweeperIdx * 4 + 1] = mine_pos_v[mineIdx_v[sweeperIdx] * 2 + 1];
#undef sweeperIdx
#undef first_item
} |
2,832 | #include <stdio.h>
#include <cuda.h>
#include <time.h>
#define lim 99
#define threads 10
void print(int *w){
for(int i=0; i<lim; i++){
printf("%d\n", w[i]);
}
}
void fillVector(int *w){
for(int i=0; i<lim; i++){
w[i]=i;
}
}
__global__
void add(int *d_x, int *d_y, int *d_z){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<lim){
d_z[i] = d_x[i] + d_y[i];
}
}
int main(int argc, char const *argv[])
{
int *x = (int*)malloc(lim*sizeof(int));
int *y = (int*)malloc(lim*sizeof(int));
int *z = (int*)malloc(lim*sizeof(int));
fillVector(x);
fillVector(y);
int *d_x, *d_y, *d_z;
clock_t begin, end;
double time_spent;
begin = clock();
cudaMalloc((void**)&d_x, lim*sizeof(int));
cudaMalloc((void**)&d_y, lim*sizeof(int));
cudaMalloc((void**)&d_z, lim*sizeof(int));
cudaMemcpy(d_x, x, lim*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, lim*sizeof(int), cudaMemcpyHostToDevice);
int blocks = ceil((float)lim/threads);
//printf("%d",blocks);
add<<<blocks,threads>>>(d_x, d_y, d_z);
cudaMemcpy(z, d_z, lim*sizeof(int), cudaMemcpyDeviceToHost);
//print(z);
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("%lf\n", time_spent);
free(x);
free(y);
free(z);
return 0;
} |
2,833 | #include "includes.h"
__global__ void PictureKernell(unsigned char * d_Pin, unsigned char * d_Pout, int n, int m ){
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
if ((Row < m)&&(Col < n)){
d_Pout[Row*n + Col] = 2*d_Pin[Row*n+Col];
}
} |
2,834 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
void cudaDevicesInfo ()
{
int deviceCount;
cudaDeviceProp deviceProp;
cudaGetDeviceCount (&deviceCount);
for (int device = 0; device < deviceCount; ++device) {
printf ("Device #%d:\n\n", device);
cudaGetDeviceProperties (&deviceProp, device);
printf ("Device name : %s\n", deviceProp.name);
printf ("Total global memory : %llu MB\n", deviceProp.totalGlobalMem / 1024 / 1024);
printf ("Total constant memory: %llu\n", deviceProp.totalConstMem);
printf ("Shared memory per block : %llu\n", deviceProp.sharedMemPerBlock);
printf ("Registers per block : %d\n", deviceProp.regsPerBlock);
printf ("Warp size : %d\n", deviceProp.warpSize);
printf ("Max threads per block : %d\n", deviceProp.maxThreadsPerBlock);
printf ("Compute capability: %d.%d\n", deviceProp.major, deviceProp.minor);
printf ("Multiprocessor count: %d\n", deviceProp.multiProcessorCount);
printf ("Clock rate: %d\n", deviceProp.clockRate);
printf ("Memory clock rate: %d\n", deviceProp.memoryClockRate);
printf ("L2 cache size: %d\n", deviceProp.l2CacheSize);
printf ("Memory bus width: %d\n", deviceProp.memoryBusWidth);
printf ("Max dimension of a block in grid: %d x %d x %d\n",
deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]);
printf ("Max dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]);
}
}
int main () {
cudaDevicesInfo ();
} |
2,835 | #include <iostream>
int main(){
int dev_count;
cudaGetDeviceCount(&dev_count);
cudaDeviceProp dev_prop;
for (int i=0; i<dev_count; i++){
cudaGetDeviceProperties(&dev_prop,i);
std::cout << "Device number: " << i << "\n";
std::cout << "Shared memory per block:" << dev_prop.sharedMemPerBlock << "bytes \n";
std::cout << "Global Memory:" << dev_prop.totalGlobalMem << "bytes";
}
std::cout<< std::flush;
}
|
2,836 | #include "phong_implement.h"
#include "brdf_common.h"
__global__ void
phong_kernel(float3* pos, unsigned int width, float3 V, float3 N, float exposure, int divideByNdotL)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float3 L = calculateL(pos, width, x, y);
float3 R = reflect(L, N);
float val = pow(max(0.f, dot(R,V)), exposure);
if(divideByNdotL > 0)
val = val / max(10e-3, dot(N,L));
pos[y*width+x] = scale(L, val);
}
extern "C" void phong_brdf(float3 *pos, unsigned numVertices, unsigned width, float3 V, float3 N, float exposure, int divideByNdotL)
{
dim3 block(8, 8, 1);
unsigned height = numVertices / width;
dim3 grid(width / block.x, height / block.y, 1);
phong_kernel<<< grid, block>>>(pos, width, V, N, exposure, divideByNdotL);
}
|
2,837 | #include <stdio.h>
#include <stdlib.h>
__global__ void isExecuted(int *dev_a, int blockid, int threadid){
if(blockIdx.x == blockid && threadIdx.x == threadid)
*dev_a = 1;
else
*dev_a = 0;
}
int main(){
// Declare variables and allocate memory on the GPU.
int a[1], *dev_a;
cudaMalloc((void**) &dev_a, sizeof(int));
// Execute kernel and copy the result to CPU memory.
isExecuted<<<100,100>>>(dev_a, 2, 4); // NOTE: INDEXING OF THREADS AND BLOCKS STARTS FROM 0.
cudaMemcpy(a, dev_a, sizeof(int), cudaMemcpyDeviceToHost);
// Print result and free dynamically allocated memory.
printf("a[0] = %d\n", a[0]); // REMEMBER: INDEXING IN C STARTS FROM 0.
cudaFree(dev_a);
} |
2,838 | #include "includes.h"
const int Nthreads = 1024, maxFR = 10000, NrankMax = 3, nt0max=81, NchanMax = 17;
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void maxChannels(const double *Params, const float *dataraw, const float *data, const int *iC, int *st, int *id, int *counter){
int nt0, indx, tid, tid0, i, bid, NT, Nchan, NchanNear,j,iChan, nt0min;
double Cf, d;
float spkTh;
bool flag;
NT = (int) Params[0];
Nchan = (int) Params[1];
NchanNear = (int) Params[2];
nt0 = (int) Params[3];
nt0min = (int) Params[4];
spkTh = (float) Params[5];
tid = threadIdx.x;
bid = blockIdx.x;
tid0 = tid + bid * blockDim.x;
while (tid0<NT-nt0-nt0min){
for (i=0; i<Nchan;i++){
iChan = iC[0 + NchanNear * i];
Cf = (double) data[tid0 + NT * iChan];
flag = true;
for(j=1; j<NchanNear; j++){
iChan = iC[j+ NchanNear * i];
if (data[tid0 + NT * iChan] > Cf){
flag = false;
break;
}
}
if (flag){
iChan = iC[NchanNear * i];
if (Cf>spkTh){
d = (double) dataraw[tid0+nt0min-1 + NT*iChan]; //
if (d > Cf-1e-6){
// this is a hit, atomicAdd and return spikes
indx = atomicAdd(&counter[0], 1);
if (indx<maxFR){
st[indx] = tid0;
id[indx] = iChan;
}
}
}
}
}
tid0 += blockDim.x * gridDim.x;
}
} |
2,839 | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <sstream>
#include <string>
#include <iostream>
#include <stdlib.h>
#include <time.h>
#define X 40
#define Y 40
#define BLOCK_SIZE_X 16
#define BLOCK_SIZE_Y 8
#define GETCOORDS(row, col) (row) * (Y) + (col)
#define CEIL(x,y) (((x)-1) / (y)) + 1
void MatrixMultHost(float* M, float* N, float* P, int width){
for(int i=0; i < width; i++){
for(int j=0; j < width; j++){
float sum = 0;
for(int k = 0; k < width; k++){
sum += M[ GETCOORDS(i,k) ] * N[ GETCOORDS(k,j) ];
}
P[ GETCOORDS(i,j) ] = sum;
}
}
}
std::string sprint(float *M){
std::ostringstream oss;
for( int i = 0; i < X; i++) {
for( int j = 0; j < Y; j++) {
oss << M[ GETCOORDS(i,j) ] << " " ;
}
oss << std::endl;
}
return oss.str();
}
__global__ void MatrixMulKernel(float* Md, float* Nd, float* Pd, int width){
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * BLOCK_SIZE_Y + ty;
int col = bx * BLOCK_SIZE_X + tx;
if( row >= width || col >= width ) return;
float sum = 0.0f;
for(int i=0; i < width; i++){
float elemM = Md[row*width + i];
float elemN = Nd[i*width + col];
sum += elemM * elemN;
}
Pd[row*width + col] = sum;
}
void MatrixMultDevice(float* M, float* N, float* P, int width){
int size = width*width*sizeof(float);
float *Md, *Nd, *Pd;
//0. reserve memory on device
cudaMalloc( (void**)&Md, size );
cudaMalloc( (void**)&Nd, size );
cudaMalloc( (void**)&Pd, size );
//1. transfer M and N to device memory
cudaMemcpy(Md, M, size, cudaMemcpyHostToDevice);
cudaMemcpy(Nd, N, size, cudaMemcpyHostToDevice);
cudaMemset(Pd, 0, size);
//2. kernel invokation
dim3 dimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 dimGrid(CEIL(width,BLOCK_SIZE_X), CEIL(width,BLOCK_SIZE_Y));
printf("Using (%d, %d) blocks of (%d, %d) threads each\n", dimGrid.x, dimGrid.y, dimBlock.x, dimBlock.y);
MatrixMulKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd, width);
//3. copy P from device to host
cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost);
//4. Free Md, Nd, Pd
cudaFree(Md);
cudaFree(Nd);
cudaFree(Pd);
}
int main(){
srand(time(NULL));
float *M, *N, *P;
// allocate M, N, P
M = (float*)malloc( sizeof(float)*X*Y );
N = (float*)malloc( sizeof(float)*X*Y );
P = (float*)malloc( sizeof(float)*X*Y );
for( int i = 0; i < X; i++) {
for( int j = 0; j < Y; j++) {
M[ GETCOORDS(i,j) ] = rand() % 5;
}
}
for( int i = 0; i < X; i++) {
for( int j = 0; j < Y; j++) {
N[ GETCOORDS(i,j) ] = rand() % 5;
}
}
// M*N on host
MatrixMultHost(M,N,P, Y);
const std::string Phost(sprint(P));
std::cout << Phost << std::endl;
memset(P, 0, sizeof(float)*X*Y );
// M*N on device
MatrixMultDevice(M,N,P, Y);
const std::string Pdev(sprint(P));
std::cout << Pdev << std::endl;
if( Phost != Pdev ){
std::cout << "FAIL" << std::endl;
} else{
std::cout << "WIN" << std::endl;
}
// Free M, N, P
free(M);
free(N);
free(P);
return 0;
}
|
2,840 | #include <cuda_runtime.h>
#include<iostream>
using namespace std;
#include <device_launch_parameters.h>
int main(void) {
//struct containing info such as name, threads/block, etc.
cudaDeviceProp devProp;
int count;
//pass addr of var, get method populates
cudaGetDeviceCount(&count);
for (int i = 0; i < count; i++)
{
cudaGetDeviceProperties(&devProp, i);
cout << "Name: " << devProp.name << endl;
cout << "Clock rate: " << devProp.clockRate << endl;
cout << "Total global memory: " << devProp.totalGlobalMem << endl;
printf("Max thread dimensions: (%d, %d, %d)\n", devProp.maxThreadsDim[0], devProp.maxThreadsDim[1], devProp.maxThreadsDim[2]);
}
//Check for GPUs capable of double-precision floating-point math, only available on those w computer capability >= 1.3
//Fill block of mem w particular struct that is threshold, then use cudaChooseDevice to find one with best criteria
//Lower overhead, instead of iterating through all in for loop
int deviceID;
cudaGetDevice(&deviceID);
//copy obj for first 0 iterations and allocate mem
memset(&devProp, 0, sizeof(cudaDeviceProp));
devProp.major = 1;
devProp.minor = 3;
cudaChooseDevice(&deviceID, &devProp);
cout << "Device ID with closest capability: " << deviceID << endl;
//host comms w this device now
cudaSetDevice(deviceID);
} |
2,841 | /*
Compile using nvcc cuda_heat.cu
Author: Romit Maulik - romit.maulik@okstate.edu
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
const double PI = 3.1415926535;
const double lx = 2.0*PI, ly = 2.0*PI;
const int nx = 254, ny = 254;
const double ALPHA = 0.8, STAB_PARAM = 0.8;
const double dx = lx/nx, dy = ly/ny;
/*
Host Functions
*/
void initialize_array(double *u);
void write_array(double *u);
/*
Device Functions
*/
__global__ void update_solution(double *_u, double *_utemp, double *_const_mult);
__global__ void update_periodic_boundaries(double *_u);
__global__ void copy_arrays(double *_u, double *_utemp);
int main (int argc, char** argv)
{
double *u = new double [(nx+2)*(ny+2)];
//double *u = malloc(sizeof(double) * (nx+2) * (ny+2)); //Pointer to host memory
double *_u, *_utemp; //Pointer to device memory
double *const_mult = new double[1];
double *_const_mult;
initialize_array(u); //Initialize solution on host
// allocate storage space on the GPU
cudaMalloc((void **)&_u, (nx+2) * (ny+2) * sizeof(double));
cudaMalloc((void **)&_utemp, (nx+2) * (ny+2) * sizeof(double));
cudaMalloc((void **)&_const_mult,sizeof(double));
//Copy data to device
cudaMemcpy(_u,u,(nx+2)*(ny+2)*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(_utemp,u,(nx+2)*(ny+2)*sizeof(double),cudaMemcpyHostToDevice);
// assign a 2D distribution of CUDA "threads" within each CUDA "block"
int ThreadsPerBlock=16;
dim3 dimBlock( ThreadsPerBlock, ThreadsPerBlock );
// calculate number of blocks along X and Y in a 2D CUDA "grid"
dim3 dimGrid( ceil(float(nx+2)/float(dimBlock.x)), ceil(float(ny+2)/float(dimBlock.y)), 1 );
double t,dt;
const double ft=1.0;
dt = STAB_PARAM*dx*dx/(4.0*ALPHA);
((*const_mult)) = ALPHA*dt/(dx*dx);
//Copy constant to device
cudaMemcpy(_const_mult,const_mult,sizeof(double),cudaMemcpyHostToDevice);
clock_t start, end;
double cpu_time_used;
start = clock();
//FTCS integration - CUDA
//Boundary conditions
update_periodic_boundaries<<<dimGrid, dimBlock>>>(_u);
cudaThreadSynchronize();
update_periodic_boundaries<<<dimGrid, dimBlock>>>(_utemp);
cudaThreadSynchronize();
t = 0.0;
do{
//Update solution
update_solution<<<dimGrid, dimBlock>>>(_u,_utemp,_const_mult); // update T1 using data stored in T2
cudaThreadSynchronize();
//Boundary conditions
update_periodic_boundaries<<<dimGrid, dimBlock>>>(_u);
cudaThreadSynchronize();
//Copy arrays
copy_arrays<<<dimGrid, dimBlock>>>(_u,_utemp);
cudaThreadSynchronize();
t = t + dt;
}while(t<ft);
// copy final array to the CPU from the GPU
cudaMemcpy(u,_u,(nx+2)*(ny+2)*sizeof(double),cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("CPU time used = %f\n", cpu_time_used);
//Write temperature to disk
write_array(u);
// release memory on the host
delete u;
// release memory on the device
cudaFree(_u);
cudaFree(_utemp);
return 0;
}
void initialize_array(double *u)
{
double x,y;
for (int i = 1; i < nx+1; i++)
{
for (int j = 1; j < ny+1; j++)
{
x = (double) (i-1)/nx * 2.0 * PI;
y = (double) (j-1)/ny * 2.0 * PI;
(*(u + (ny+2)*i + j)) = sin(x+y);
}
}
}
__global__ void update_periodic_boundaries(double *_u)
{
// compute the "i" and "j" location of the node point
// handled by this thread
int i = blockIdx.x * blockDim.x + threadIdx.x ;
int j = blockIdx.y * blockDim.y + threadIdx.y ;
// Left boundary
if(j>0 && j<ny+1 && i == 0)
{
(*(_u + j)) = (*(_u + (ny+2)*nx + j));//Correct
}
// Right boundary
if(j>0 && j<ny+1 && i == nx + 1)
{
(*(_u + (ny+2)*(nx+1) + j)) = (*(_u + (ny+2) + j));//Correct
}
// Bottom boundary
if(i>=0 && i<nx+2 && j == 0)
{
(*(_u + (ny+2)*i)) = (*(_u + (ny+2)*i + ny)); //Correct
}
// top boundary
if(i>=0 && i<nx+2 && j == ny + 1)
{
(*(_u + (ny+2)*i + ny + 1)) = (*(_u + (ny+2)*i + 1)); //Correct
}
}
__global__ void update_solution(double *_u, double *_utemp, double *_const_mult)
{
// compute the "i" and "j" location of the node point
// handled by this thread
int i = blockIdx.x * blockDim.x + threadIdx.x ;
int j = blockIdx.y * blockDim.y + threadIdx.y ;
// get the natural index values of node (i,j) and its neighboring nodes
int p = (ny+2)*i + j;
int n = (ny+2)*i + j + 1;
int s = (ny+2)*i + j - 1;
int w = (ny+2)*(i-1) + j;
int e = (ny+2)*(i+1) + j;
// only update "interior" node points
if(i>0 && i<nx+1 && j>0 && j<ny+1)
{
(*(_u + p)) = (*(_utemp + p)) + (*_const_mult)*(-4.0*(*(_utemp + p)) + (*(_utemp + n)) + (*(_utemp + s)) + (*(_utemp + w)) + (*(_utemp + e)));
}
}
__global__ void copy_arrays(double *_u, double *_utemp)
{
// compute the "i" and "j" location of the node point
// handled by this thread
int i = blockIdx.x * blockDim.x + threadIdx.x ;
int j = blockIdx.y * blockDim.y + threadIdx.y ;
(*(_utemp + (ny+2)*i + j)) = (*(_u + (ny+2)*i + j));
}
void write_array(double *u)
{
FILE *fp;
fp = fopen("Temperature.txt","wb");
for(int i=0;i<nx+2;i++) {
for(int j=0;j<ny+2;j++) {
double value = (*(u + (ny+2)*i + j));
fprintf(fp,"%f ",value);
}
fprintf(fp,"\n");
}
}
|
2,842 | #include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#define CUDA_ERROR_EXIT(str) do{\
cudaError err = cudaGetLastError();\
if( err != cudaSuccess){\
printf("Cuda Error: '%s' for %s\n", cudaGetErrorString(err), str);\
exit(-1);\
}\
}while(0);
#define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec))
#define USAGE_EXIT(s) do{ \
printf("Usage: %s <# of elements> <# of threads> \n %s\n", argv[0], s); \
exit(-1);\
}while(0);
__global__ void xorsum(int num_elements,int *a,double l)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i >= num_elements)
return;
int k=(int)l+1;
int p=num_elements/2;
int num=num_elements;
while(p)
{
if(i<p)
{
a[i]=a[i]^a[num-i-1];
}
num=(num+1)/2;
p=num/2;
}
}
int main(int argc, char const *argv[])
{
struct timeval start, end, t_start, t_end;
int num_elements;
int SEED,ctr,blocks;
int *ptr;
int *gpu_mem,*a;
if(argc==3)
{
num_elements=atoi(argv[1]);
SEED=atoi(argv[2]);
}
else
{
printf("Wrong command line arguments\n" );
exit(-1);
}
a = (int *)malloc(num_elements * sizeof(int));
if(!a){
USAGE_EXIT("invalid num elements, not enough memory");
}
srand(SEED);
for(ctr=0; ctr<num_elements; ++ctr)
{
a[ctr] = random();
}
for (size_t i = 0; i < num_elements; i++)
{
printf("%d\n",a[i] );
if (i==num_elements-1)
{
printf("\n" );
}
}
gettimeofday(&t_start,NULL);
cudaMalloc(&gpu_mem, num_elements*sizeof(int));
CUDA_ERROR_EXIT("cudaMalloc");
cudaMemcpy(gpu_mem,a, num_elements*sizeof(int) , cudaMemcpyHostToDevice);
CUDA_ERROR_EXIT("cudaMemcpy");
gettimeofday(&start, NULL);
blocks = num_elements /1024;
if(num_elements % 1024)
{
++blocks;
}
double l=log(num_elements)/log(2);
xorsum<<<blocks, 1024>>>(num_elements,gpu_mem,l);
CUDA_ERROR_EXIT("kernel invocation");
gettimeofday(&end, NULL);
/* Copy back result*/
cudaMemcpy(a, gpu_mem,num_elements*sizeof(int),cudaMemcpyDeviceToHost);
CUDA_ERROR_EXIT("memcpy");
gettimeofday(&t_end, NULL);
printf("Xor based checksum is %d\n",a[0]);
printf("Total time = %ld microsecs Processsing =%ld microsecs\n", TDIFF(t_start, t_end), TDIFF(start, end));
cudaFree(gpu_mem);
return 0;
}
|
2,843 | #pragma once
#include "Matrix.cuh"
Matrix::Matrix(long width, long height) {
this->width = width;
this->height = height;
}
long Matrix::getWidth() {
return width;
}
long Matrix::getHeight() {
return height;
} |
2,844 | // ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2014, September 8 - October 10
// ###
// ###
// ### Maria Klodt, Jan Stuehmer, Mohamed Souiai, Thomas Moellenhoff
// ###
// ###
// ### Dennis Mack, dennis.mack@tum.de, p060
// ### Adrian Haarbach, haarbach@in.tum.de, p077
// ### Markus Schlaffer, markus.schlaffer@in.tum.de, p070
#include <cuda_runtime.h>
#include <iostream>
using namespace std;
// cuda error checking
#define CUDA_CHECK cuda_check(__FILE__,__LINE__)
void cuda_check(string file, int line)
{
cudaError_t e = cudaGetLastError();
if (e != cudaSuccess)
{
cout << endl << file << ", line " << line << ": " << cudaGetErrorString(e) << " (" << e << ")" << endl;
exit(1);
}
}
__device__ void square_arr(float* d_a, size_t n) {
size_t tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < n)
d_a[tid] = d_a[tid] * d_a[tid];
}
__global__ void kernel_call(float *d_a, size_t n) {
square_arr(d_a, n);
}
int main(int argc,char **argv)
{
// alloc and init input arrays on host (CPU)
int n = 10;
float *h_a = new float[n];
for(int i=0; i<n; i++) h_a[i] = i;
// CPU computation
for(int i=0; i<n; i++)
{
float val = h_a[i];
val = val*val;
h_a[i] = val;
}
// print result
cout << "CPU:"<<endl;
for(int i=0; i<n; i++) cout << i << ": " << h_a[i] << endl;
cout << endl;
// GPU computation
// reinit data
for(int i=0; i<n; i++) h_a[i] = i;
// ###
// ### TODO: Implement the "square array" operation on the GPU and store the result in "a"
// ###
// ### Notes:
// ### 1. Remember to free all GPU arrays after the computation
// ### 2. Always use the macro CUDA_CHECK after each CUDA call, e.g. "cudaMalloc(...); CUDA_CHECK;"
// ### For convenience this macro is defined directly in this file, later we will only include "helper.h"
float *d_a;
size_t size_of_array = n * sizeof(float);
// allocate data for array on GPU
cudaMalloc(&d_a, size_of_array);
CUDA_CHECK;
// copy the data onto GPU
cudaMemcpy(d_a, h_a, size_of_array, cudaMemcpyHostToDevice);
CUDA_CHECK;
// compute the appropiate dimensions for the grid/block
dim3 block_size = dim3(128, 1, 1);
dim3 grid_size = dim3((n + block_size.x - 1) / block_size.x, 1, 1);
// launch the kernel - for the first stuff only 10 threads
kernel_call<<< grid_size, block_size>>>(d_a, n);
CUDA_CHECK;
// copy the stuff back
cudaMemcpy(h_a, d_a, size_of_array, cudaMemcpyDeviceToHost);
CUDA_CHECK;
// free the stuff
cudaFree(d_a);
CUDA_CHECK;
// print result
cout << "GPU:" << endl;
for(int i=0; i<n; i++) cout << i << ": " << h_a[i] << endl;
cout << endl;
// free CPU arrays
delete[] h_a;
}
|
2,845 | #include <cuda.h>
#include <iostream>
#include <vector>
void printArray(const float* x, int n)
{
std::cout << "(";
for (int i = 0; i < n; i++)
{
std::cout << x[i] << ", ";
}
std::cout << ")" << std::endl;
}
// My attempt at using shared mem among blocks. Runs slightly slower than my naïve
// algorithm did but I like this more as it is at least an attempt at optimization
// even though it runs much slower than it should had it.
__global__
void f_h(const int n, const float h, const float *x, float *y, int memSize)
{
extern __shared__ float x_reg[];
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const float coef = 1 / (n * h) * .3989422804;
float sum = 0;
float x_val = x[idx];
for (int i = 0; i < n; i += memSize)
{
for (int j = 0; j < memSize; j += blockDim.x)
{
if(i + j + threadIdx.x < n)
{
x_reg[j + threadIdx.x] = x[j + i + threadIdx.x];
}
}
__syncthreads();
if (idx >= n)
{
return;
}
for (int k = 0; k < memSize && k+i < n; k++)
{
float val = (x_val-x_reg[k]) / h;
float k_x = exp(-(val * val) / 2);
sum = sum + k_x;
}
}
y[idx] = coef * sum;
}
void gpuCall(int n, float h, const float *x_v, float *y_v)
{
int arrSize = n*sizeof(float);
float *x, *y;
cudaMalloc(&x, arrSize);
cudaMalloc(&y, arrSize);
cudaMemcpy(x, x_v, arrSize, cudaMemcpyHostToDevice);
cudaMemcpy(y, y_v, arrSize, cudaMemcpyHostToDevice);
int blockSize = 256;
int numBlocks = (n + blockSize - 1) / blockSize;
int memSize = blockSize * 4;
f_h<<<numBlocks, blockSize, memSize * sizeof(float)>>>(n, h, x, y, memSize);
cudaDeviceSynchronize();
cudaMemcpy(y_v, y, arrSize, cudaMemcpyDeviceToHost);
cudaFree(x);
cudaFree(y);
}
|
2,846 | #include <stdio.h>
#include <stdlib.h>
__global__ void gpu_add_two_vectors(void)
{
}
int main()
{
printf("Adding Vectors: \n");
return 0;
} |
2,847 | #include "includes.h"
__global__ void nms_kernel( const int num_per_thread, const float threshold, const int num_detections, const int *indices, float *scores, const float *classes, const float4 *boxes) {
// Go through detections by descending score
for (int m = 0; m < num_detections; m++) {
for (int n = 0; n < num_per_thread; n++) {
int i = threadIdx.x * num_per_thread + n;
if (i < num_detections && m < i && scores[m] > 0.0f) {
int idx = indices[i];
int max_idx = indices[m];
int icls = classes[idx];
int mcls = classes[max_idx];
if (mcls == icls) {
float4 ibox = boxes[idx];
float4 mbox = boxes[max_idx];
float x1 = max(ibox.x, mbox.x);
float y1 = max(ibox.y, mbox.y);
float x2 = min(ibox.z, mbox.z);
float y2 = min(ibox.w, mbox.w);
float w = max(0.0f, x2 - x1 + 1);
float h = max(0.0f, y2 - y1 + 1);
float iarea = (ibox.z - ibox.x + 1) * (ibox.w - ibox.y + 1);
float marea = (mbox.z - mbox.x + 1) * (mbox.w - mbox.y + 1);
float inter = w * h;
float overlap = inter / (iarea + marea - inter);
if (overlap > threshold) {
scores[i] = 0.0f;
}
}
}
}
// Sync discarded detections
__syncthreads();
}
} |
2,848 | /*******************************************************************
* Sparse Auto-Encoder
* by
* David Klaus and Alex Welles
* EC527 Final Project
*
* Serial Implementation With Timing Code
*
* Compile with:
*
* nvcc -Xcompiler -fopenmp -lgomp -o sparseAutoencoder sparseAutoencoder.cu
*
*******************************************************************/
#include <cstdio>
#include <cstdlib>
#include <time.h>
#include <math.h>
#include <string>
#include <sstream>
#include <fstream>
#include <iostream>
#include <string>
#define GIG 1000000000
#define CPG 2.527
#define OPTIONS 1
//Parameters necessary to set up network
#define PATCHES_PATH "c_patches.csv"
#define W1_PATH "W1.csv"
#define W2_PATH "W2.csv"
#define IMAGE_DIM 512 //pixels in 1 dimension (assumes square)
#define SAMPLE_SIZE 10000 //number of input patches
#define VISIBLE_SIZE 64 //number of input/output nodes
#define HIDDEN_SIZE 25 //number hidden nodes
#define HIDDEN_LAYERS 1 //number hidden layers
#define NUM_SAMPLE_ELEMENTS SAMPLE_SIZE * VISIBLE_SIZE
//desired average activation of hidden nodes
#define SPARSITY_PARAM 0.01
#define SPARSITY_COMPLEMENT 1-SPARSITY_PARAM
//weight decay paramater
#define LAMBDA 0.0001
//weight of sparsity penalty term
#define BETA 3.0
using namespace std;
/* VECTOR FUNCTIONS */
void initializeMatrixWeightsRand(float *arr, int rows, int cols, int seed);
void initializeMatrixWeightsZero(float *arr, int rows, int cols);
void initializeVectorWeightsZero(float *arr, int numElements);
void mmm_kij(float* src1, float* src2, float* dest, int row1, int col1, int row2,int col2);
void mmm_ijk(float* src1, float* src2, float* dest, int row1, int col1, int row2,int col2);
void dotPdt(float* src1,float* src2, float* dest, int length);
float altDotPdt(float* A, float* B, int length);//BROKEN DONT USE
void readCSV(float* array, int numElements, string filename);
void addVectors(float* src1, float* src2, float* dest, int length);
void subVectors(float* src1, float* src2, float* dest, int length);
void vectElemSigmoid(float* src,float* dest,int length);
void vectElemIntDiv(float* src, float* dest,int length,int divisor);
void vectElemFloatDiv(float* src, float* dest,int length,float divisor);
void vectElemVectDiv(float* src1,float* src2,float* dest,int length);
void initializeVector(float *array, int length, float val);
void vectElemVectMult(float* src1, float* src2, float* dest, int length);
void vectElemFloatMult(float* src, float* dest, int length,float multiplicand);
void matrixTranspose(float* src,float* dest,int rows, int cols);
float normVector(float* src,int length);
void vectElemLog(float* src,float* dest,int length);
float sumVector(float* src,int length);
/* PRINTOUT, DEBUG, AND TIMING FUNCTIONS */
void printVector(float* A, int length);
void printMatrix(float* A, int rows, int cols);
void printTiming(struct timespec* time_stamp,int numTimings);
int main(int argc, char *argv[]){
/***********************************
TIMING STUFF
***********************************/
struct timespec diff(struct timespec start, struct timespec end);
struct timespec time1, time2;
struct timespec time_stamp[OPTIONS];//Can be increased if necessary.
/***********************************
ALLOCATE MEMORY
***********************************/
//Arrays on host memory (CPU)
//input patches to train the autoencoder
float *h_inputs;// 64 x 10000 [visible x sample]
//sparsity vector
float *h_rhoHat;//hidden x 1 [25 x 1]
//weight matrices
float *h_W1;//hidden X visible [25 x 64]
float *h_W2;//visible X hidden [64 x 25]
//weight vectors
float *h_b1;//hidden X 1 [25 x 1]
float *h_b2;//visible X 1 [64 x 1]
//weight gradient matrices
float *h_W1grad;//hidden x visible [25 x 64]
float *h_W2grad;//visible x hidden [64 x 25]
//weight gradient vectors
float *h_b1grad;//hidden x 1 [25 x 1]
float *h_b2grad;//visible x 1 [64 x 1]
//z product vectors
float *h_z2;//hidden x 1 [25 x 1]
float *h_z3;//visible x 1 [64 x 1]
//a product vectors
float *h_a2;//hidden x 1 [25 x 1]
float *h_a3;//visible x 1 [64 x 1]
//partial derivatives for back prop
float *h_d2;//hidden x 1 [25 x 1]
float *h_d3;//visible x 1 [64 x 1]
//temp vectors: both are 64 elements but will not always be used
float *h_temp1;//64 x 1
float *h_temp2;//64 x1
//temp matrix
float *h_Wtemp1;//64 x 25 or 25 x 64
float *h_Wtemp2;//25 x 64 or 64 x 25
//sparsity penalty
float *h_sparsePen;//25x1
//Allocate input patches on host memory (CPU)
size_t allocSize = VISIBLE_SIZE * SAMPLE_SIZE * sizeof(float);
h_inputs = (float *) malloc(allocSize);
//Allocate sparsity vector on host memory (CPU)
allocSize = HIDDEN_SIZE * sizeof(float);
h_rhoHat = (float *) malloc(allocSize);
//Alocate weight arrays on host memory (CPU)
allocSize = VISIBLE_SIZE * HIDDEN_SIZE * sizeof(float);
h_W1 = (float *) malloc(allocSize);
h_W2 = (float *) malloc(allocSize);
//Alocate gradient arrays on host memory (CPU)
allocSize = VISIBLE_SIZE * HIDDEN_SIZE * sizeof(float);
h_W1grad = (float *) malloc(allocSize);
h_W2grad = (float *) malloc(allocSize);
//Allocate weight vectors on host memory (CPU)
allocSize = HIDDEN_SIZE * sizeof(float);
h_b1 = (float *) malloc(allocSize);
allocSize = VISIBLE_SIZE * sizeof(float);
h_b2 = (float *) malloc(allocSize);
//Allocate weight vectors on host memory (CPU)
allocSize = HIDDEN_SIZE * sizeof(float);
h_b1grad = (float *) malloc(allocSize);
allocSize = VISIBLE_SIZE * sizeof(float);
h_b2grad = (float *) malloc(allocSize);
//Allocate z product vectors (CPU)
allocSize = HIDDEN_SIZE * sizeof(float);
h_z2 = (float *) malloc(allocSize);
allocSize = VISIBLE_SIZE * sizeof(float);
h_z3 = (float *) malloc(allocSize);
//Allocate a product vectors (CPU)
allocSize = HIDDEN_SIZE * sizeof(float);
h_a2 = (float *) malloc(allocSize);
allocSize = VISIBLE_SIZE * sizeof(float);
h_a3 = (float *) malloc(allocSize);
//Allocate partial vectors (CPU)
allocSize = HIDDEN_SIZE * sizeof(float);
h_d2 = (float *) malloc(allocSize);
allocSize = VISIBLE_SIZE * sizeof(float);
h_d3 = (float *) malloc(allocSize);
//Allocate temp vectors (CPU)
allocSize = VISIBLE_SIZE * sizeof(float);
h_temp1 = (float *) malloc(allocSize);
h_temp2 = (float *) malloc(allocSize);
//Allocate temp matrix (CPU)
allocSize = VISIBLE_SIZE * HIDDEN_SIZE * sizeof(float);
h_Wtemp1 = (float *) malloc(allocSize);
h_Wtemp2 = (float *) malloc(allocSize);
//Allocate sparsity penalty vector (CPU)
allocSize = HIDDEN_SIZE * sizeof(float);
h_sparsePen = (float *) malloc(allocSize);
/***********************************
VARIABLES
***********************************/
float cost = 0;
/***********************************
INITIALIZE NETWORK WEIGHTS
***********************************/
//Initialize the weight matrices to random values
initializeMatrixWeightsRand(h_W1, HIDDEN_SIZE, VISIBLE_SIZE, 2254);
initializeMatrixWeightsRand(h_W2, VISIBLE_SIZE, HIDDEN_SIZE, 1345);
initializeMatrixWeightsZero(h_W2grad,VISIBLE_SIZE,HIDDEN_SIZE);
initializeMatrixWeightsZero(h_W1grad,HIDDEN_SIZE,VISIBLE_SIZE);
initializeVectorWeightsZero(h_b1, HIDDEN_SIZE);
initializeVectorWeightsZero(h_b2, VISIBLE_SIZE);
initializeVectorWeightsZero(h_rhoHat, HIDDEN_SIZE);
initializeVectorWeightsZero(h_z2, HIDDEN_SIZE);
initializeVectorWeightsZero(h_a2, HIDDEN_SIZE);
initializeVectorWeightsZero(h_z3, VISIBLE_SIZE);
initializeVectorWeightsZero(h_a3, VISIBLE_SIZE);
/***********************************
READ IN SAMPLE PATCHES
***********************************/
readCSV(h_inputs, NUM_SAMPLE_ELEMENTS, PATCHES_PATH);
//the following are for debug only
readCSV(h_W1, HIDDEN_SIZE*VISIBLE_SIZE, W1_PATH);
readCSV(h_W2, HIDDEN_SIZE*VISIBLE_SIZE, W2_PATH);
/***************************************
BEGIN SERIAL TIMING
****************************************/
printf("\nTesting Baseline Sparse Autoencoder");
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time1);
int OPTION = 0;//default is zero. Placeholder in case.
/***************************************
CALCULATE SPARSITY CONSTRAINT RHO-HAT
****************************************/
/*
This involves forward propagation through 10000 64 pixel values
(note values are stored as floats). These are stored as a 64000
element vector. Another forward propagation is performed later.
*/
for(int i = 0; i < NUM_SAMPLE_ELEMENTS; i+=VISIBLE_SIZE){//for number of input patches
//we are performing the following matrix op
//[hsize x vsize] * [vsize x 1] = [vsize x 1]
//[25x64] * [64x1] = [64x1]
//using 25 dot product calls
for(int j = 0;j < HIDDEN_SIZE;j++ ){
dotPdt(&h_inputs[i],&h_W1[j*VISIBLE_SIZE],&h_z2[j],VISIBLE_SIZE);
//h_z2[j] = altDotPdt(&h_inputs[i],&h_W1[j*VISIBLE_SIZE],VISIBLE_SIZE);
// &h_inputs[i] --> pointer to current input patch
// &h_W1[j*VISIBLE_SIZE] --> pointer to current row of the W1 weights
// &h_z2[j] --> pointer to current output element of z1
// VISIBLE_SIZE --> patch size, should be 64
}
//[25x1] + [25x1] = [25x1]
//[hidden x 1] + [hidden x1] = [hidden x 1]
addVectors(h_z2,h_b1,h_z2, HIDDEN_SIZE);
//sigma([25x1]) = [25x1]
vectElemSigmoid(h_z2,h_a2,HIDDEN_SIZE);
//calc the sparsity constraint
addVectors(h_rhoHat,h_a2,h_rhoHat,HIDDEN_SIZE);
}
vectElemIntDiv(h_rhoHat,h_rhoHat,HIDDEN_SIZE,SAMPLE_SIZE);
/*
cout << "z2" << endl;//DEBUG
printVector(h_z2,HIDDEN_SIZE);//DEBUG
cout << "a2" << endl;//DEBUG
printVector(h_a2,HIDDEN_SIZE);//DEBUG
cout << "rhoHat" << endl;//DEBUG
printVector(h_rhoHat,HIDDEN_SIZE);//DEBUG
*/
/***************************************
FORWARD AND BACKWARD PROPAGATION
****************************************/
for(int i = 0; i < NUM_SAMPLE_ELEMENTS; i+=VISIBLE_SIZE){
/***************************************
FORWARD PROPAGATION a(1) --> a(2) //DEBUG PASSED
****************************************/
/*
%forward propagate
%from a1 = input to a2
xM = data(:,i);%current input
z2 = W1 * xM + b1;
a2 = 1./ ( 1 + exp(-z2));
*/
//we are performing the following matrix op
//[hsize x vsize] * [vsize x 1] = [vsize x 1]
//[25x64] * [64x1] = [64x1]
//using 25 dot product calls
for(int j = 0;j < HIDDEN_SIZE;j++ ){
dotPdt(&h_inputs[i],&h_W1[j*VISIBLE_SIZE],&h_z2[j],VISIBLE_SIZE);
//h_z2[j] = altDotPdt(&h_inputs[i],&h_W1[j*VISIBLE_SIZE],VISIBLE_SIZE);
// &h_inputs[i] --> pointer to current input patch
// &h_W1[j*VISIBLE_SIZE] --> pointer to current row of the W1 weights
// &h_z2[j] --> pointer to current output element of z1
// VISIBLE_SIZE --> patch size, should be 64
}
//[25x1] + [25x1] = [25x1]
//[hidden x 1] + [hidden x1] = [hidden x 1]
addVectors(h_z2,h_b1,h_z2, HIDDEN_SIZE);
//sigma([25x1]) = [25x1]
vectElemSigmoid(h_z2,h_a2,HIDDEN_SIZE);
/***************************************
FORWARD PROPAGATION a(2) --> a(3) //DEBUG FAILED
****************************************/
/*
%from a2 to a3 = output
z3 = W2 * a2 + b2;
a3 = 1./ (1 + exp(-z3));
*/
//we are performing the following matrix op
//[visible x hidden] * [hidden x 1] = [visible x 1]
//[64x25] * [25x1] = [64x1]
//using 64 dot product calls
for(int j = 0;j < VISIBLE_SIZE;j++ ){
//the key difference here is that we are using h_a2 as the input
dotPdt(h_a2,&h_W2[j*HIDDEN_SIZE],&h_z3[j],VISIBLE_SIZE);// W2 * a2 --> h_z3
//h_z2[j] = altDotPdt(&h_inputs[i],&h_W1[j*VISIBLE_SIZE],VISIBLE_SIZE);
// h_a2 --> pointer to current input vector
// &h_W1[j*VISIBLE_SIZE] --> pointer to current row of the W1 weights
// &h_z2[j] --> pointer to current output element of z1
}
//[64x1] + [64x1] = [64x1]
//[visible x 1] + [visible x1] = [visible x 1]
addVectors(h_z3,h_b2,h_z3,VISIBLE_SIZE);
//sigma([64x1]) = [64x1]
vectElemSigmoid(h_z3,h_a3,VISIBLE_SIZE);
/***************************************
BACK PROPAGATION
****************************************/
/*
%back propagate
%a3 -> a2
d3 = -(xM - a3) .* (a3 .* (1 - a3));
%d2 = (transpose(W2) * d3) .* (a2 .* (1 - a2));
d2 = ((W2' * d3) + beta .* (-(sparsityParam./rhoHat)
+ (1-sparsityParam)./(1-rhoHat))).* (a2 .* (1 - a2));
%compute partial derivatives
W2grad = W2grad + d3 * a2';
b2grad = b2grad + d3;
W1grad = W1grad + d2 * xM';
b1grad = b1grad + d2;
%for calculating cost
%equiv. to (HwbXi-y)^2
cost = cost + norm(a3 - xM)^2;
*/
//d3 = -(xM - a3) .* (a3 .* (1 - a3));
initializeVector(h_temp1, VISIBLE_SIZE, 1.0);//vec(1) --> h_temp1
subVectors(h_temp1,h_a3,h_temp1,VISIBLE_SIZE);//(1 - a3) --> h_temp1
vectElemVectMult(h_a3,h_temp1,h_temp1,VISIBLE_SIZE);//(a3 .* (1 - a3)) --> h_temp2
subVectors(h_a3,&h_inputs[i],h_temp2,VISIBLE_SIZE);//-(xM - a3) = (a3 - xM) --> h_temp2
vectElemVectMult(h_temp1,h_temp2,h_d3,VISIBLE_SIZE);//d3 = -(xM - a3) .* (a3 .* (1 - a3)); --> h_d3
//initializeVector(h_temp1,VISIBLE_SIZE,-1);
//vectElemVectMult(h_temp1,h_d3,h_d3,VISIBLE_SIZE);
//d2 = ((W2' * d3) + beta .* (-(sparsityParam./rhoHat) + (1-sparsityParam)./(1-rhoHat))) .* (a2 .* (1 - a2));
//Note the h_temp1 & 2 vectors are actually VISIBLE_SIZE long
//but we are only using HIDDEN_SIZE values
initializeVector(h_temp2, HIDDEN_SIZE, 1.0);//(1)
subVectors(h_temp2,h_rhoHat,h_temp2,HIDDEN_SIZE);//(1-rhoHat) --> h_temp2
initializeVector(h_temp1, HIDDEN_SIZE, SPARSITY_COMPLEMENT);//(1-sparsityParam) --> h_temp1
vectElemVectDiv(h_temp1,h_temp2,h_temp2,HIDDEN_SIZE);//(1-sparsityParam)./(1-rhoHat) --> h_temp2
initializeVector(h_temp1,HIDDEN_SIZE,-1.0*SPARSITY_PARAM);//-(sparsityParam) --> h_temp1
vectElemVectDiv(h_temp1,h_rhoHat,h_temp1,HIDDEN_SIZE);//-(sparsityParam./rhoHat) --> h_temp1
addVectors(h_temp1,h_temp2,h_temp1,HIDDEN_SIZE);//(-(sparsityParam./rhoHat) + (1-sparsityParam)./(1-rhoHat)) --> h_temp1
vectElemFloatMult(h_temp1,h_temp1,HIDDEN_SIZE,BETA);//beta .* (-(sparsityParam./rhoHat) + (1-sparsityParam)./(1-rhoHat))) --> h_temp1
matrixTranspose(h_W2,h_Wtemp1,VISIBLE_SIZE,HIDDEN_SIZE);//W2' --> h_Wtemp1
//Parallelize with OpenMP
for(int j = 0;j < VISIBLE_SIZE;j++){//(W2' * d3) --> h_temp2
dotPdt(h_d3,&h_Wtemp1[j*HIDDEN_SIZE],&h_temp2[j],VISIBLE_SIZE);
}
//((W2' * d3) + beta .* (-(sparsityParam./rhoHat) + (1-sparsityParam)./(1-rhoHat))) --> h_temp2
addVectors(h_temp2,h_temp1,h_temp2,HIDDEN_SIZE);
initializeVector(h_temp1, HIDDEN_SIZE, 1);//1-a2 --> h_temp1
subVectors(h_temp1,h_a2,h_temp1,HIDDEN_SIZE);//(1-a2) --> h_temp1
vectElemVectMult(h_a2,h_temp1,h_temp1,HIDDEN_SIZE);//(a2 .* (1 - a2)) --> h_temp1
//d2 = ((W2' * d3) + beta .* (-(sparsityParam./rhoHat) + (1-sparsityParam)./(1-rhoHat))) .* (a2 .* (1 - a2));--> h_d2
vectElemVectMult(h_temp1,h_temp2,h_d2,HIDDEN_SIZE);
/*
%compute partial derivatives
W2grad = W2grad + d3 * a2';
b2grad = b2grad + d3;
W1grad = W1grad + d2 * xM';
b1grad = b1grad + d2;
*/
/*
int ii, jj;
for(ii = 0; i < HIDDEN_SIZE; ii++) {
for(jj = 0; jj < VISIBLE_SIZE; jj++) {
h_Wtemp1[ii*HIDDEN_SIZE+jj]= h_d3[jj]*h_a2[ii];
}
}*/
mmm_kij(h_d3,h_a2,h_Wtemp1,VISIBLE_SIZE,1,1,HIDDEN_SIZE);//d3 * a2' --> h_Wtemp1
addVectors(h_W2grad,h_Wtemp1,h_W2grad,HIDDEN_SIZE*VISIBLE_SIZE);//W2grad + d3 * a2'; --> h_W2grad
addVectors(h_b2grad,h_d3,h_b2grad,VISIBLE_SIZE);//b2grad = b2grad + d3;
/*
for(ii = 0; ii < HIDDEN_SIZE; ii++) {
for(jj = 0; jj < VISIBLE_SIZE; jj++) {
h_Wtemp1[ii*HIDDEN_SIZE+jj]= h_d2[jj]*h_inputs[i+ii];
}
}*/
mmm_kij(h_d2,&h_inputs[i],h_Wtemp1,HIDDEN_SIZE,1,1,VISIBLE_SIZE);//d2 * xM' --> h_Wtemp1
addVectors(h_W1grad,h_Wtemp1,h_W1grad,HIDDEN_SIZE*VISIBLE_SIZE);//W1grad = W1grad + d2 * xM'; --> h_W1grad
addVectors(h_b1grad,h_d2,h_b1grad,HIDDEN_SIZE);//b1grad = b1grad + d2; --> h_b1grad
/*
%for calculating cost
%equiv. to (HwbXi-y)^2
cost = cost + norm(a3 - xM)^2;
*/
subVectors(h_a3,&h_inputs[i],h_temp1,VISIBLE_SIZE);//(a3 - xM) --> h_temp1
float temp = 0;
temp = normVector(h_temp1,VISIBLE_SIZE);//norm(a3 - xM)^2; --> temp
cost += temp*temp;//cost = cost + norm(a3 - xM)^2; --> cost
}
/*
%W1grad = [(1/m) \Delta W^{(1)} + \lambda W^{(1)}]
W2grad = W2grad ./ M + lambda .* W2;
b2grad = b2grad ./ M;
W1grad = W1grad ./ M + lambda .* W1;
b1grad = b1grad ./ M;
*/
//NOTE: M = SAMPLE_SIZE = 10000 by default
//W2grad = W2grad ./ M + lambda .* W2;
vectElemFloatDiv(h_W2grad,h_W2grad,VISIBLE_SIZE*HIDDEN_SIZE,SAMPLE_SIZE);//W2grad ./ M --> h_W2grad
vectElemFloatMult(h_W2,h_Wtemp1,VISIBLE_SIZE*HIDDEN_SIZE,LAMBDA);// lambda .* W2; --> h_Wtemp1
addVectors(h_W2grad,h_Wtemp1,h_W2grad,VISIBLE_SIZE*HIDDEN_SIZE);//W2grad = W2grad ./ M + lambda .* W2; --> h_W2grad
//b2grad = b2grad ./ M;
vectElemFloatDiv(h_b2grad,h_b2grad,VISIBLE_SIZE,SAMPLE_SIZE);//b2grad = b2grad ./ M; --> h_b2grad
//W1grad = W1grad ./ M + lambda .* W1;
vectElemFloatDiv(h_W2grad,h_W2grad,VISIBLE_SIZE*HIDDEN_SIZE,SAMPLE_SIZE);//W1grad ./ M --> h_W2grad
vectElemFloatMult(h_W2,h_Wtemp1,VISIBLE_SIZE*HIDDEN_SIZE,LAMBDA);// lambda .* W1; --> h_Wtemp1
addVectors(h_W2grad,h_Wtemp1,h_W2grad,VISIBLE_SIZE*HIDDEN_SIZE);//W1grad = W1grad ./ M + lambda .* W1; --> h_W1grad
//b1grad = b1grad ./ M;
vectElemFloatDiv(h_b1grad,h_b1grad,HIDDEN_SIZE,SAMPLE_SIZE);//b1grad = b1grad ./ M; --> h_b1grad
/*
%rho
sparsePen = sparsityParam .* log(sparsityParam./rhoHat) + (1-sparsityParam).*log((1-sparsityParam)./(1-rhoHat));
*/
initializeVector(h_temp1,HIDDEN_SIZE,1.0);//1--> h_temp1
subVectors(h_temp1,h_rhoHat,h_temp2,HIDDEN_SIZE);//(1-rhoHat) --> h_temp2
initializeVector(h_temp1, HIDDEN_SIZE, SPARSITY_COMPLEMENT);//(1-sparsityParam) --> h_temp1
vectElemVectDiv(h_temp1,h_temp2,h_temp2,HIDDEN_SIZE);//(1-sparsityParam)./(1-rhoHat) --> h_temp2
vectElemLog(h_temp2,h_temp2,HIDDEN_SIZE);//log((1-sparsityParam)./(1-rhoHat)) --> h_temp2
vectElemVectMult(h_temp1,h_temp2,h_temp2,HIDDEN_SIZE);//(1-sparsityParam).*log((1-sparsityParam)./(1-rhoHat)) --> h_temp2
initializeVector(h_temp1,HIDDEN_SIZE,SPARSITY_PARAM);//sparsityParam --> h_temp1
vectElemVectDiv(h_temp1,h_rhoHat,h_temp1,HIDDEN_SIZE);//(sparsityParam./rhoHat) --> h_temp1
vectElemLog(h_temp1,h_temp1,HIDDEN_SIZE);//log(sparsityParam./rhoHat) --> h_temp1
vectElemFloatMult(h_temp1,h_temp1,HIDDEN_SIZE,SPARSITY_PARAM);//sparsityParam .* log(sparsityParam./rhoHat)--> h_temp1
addVectors(h_temp1,h_temp2,h_sparsePen,HIDDEN_SIZE);//sparsePen = --> h_sparsePen
/*
cost = (cost / (2 * M)) + (lambda / 2 ) * (sum(sum(W1.^2)) + (sum(sum(W2.^2)))) + beta * sum(sparsePen);
*/
vectElemVectMult(h_W1,h_W1,h_Wtemp1,VISIBLE_SIZE*HIDDEN_SIZE);//W1.^2 --> h_Wtemp1
float sumW1 = sumVector(h_Wtemp1,VISIBLE_SIZE*HIDDEN_SIZE);//sum(sum(W1.^2)) --> sumW1
vectElemVectMult(h_W2,h_W2,h_Wtemp2,VISIBLE_SIZE*HIDDEN_SIZE);//W2.^2 --> h_Wtemp2
float sumW2 = sumVector(h_Wtemp2,VISIBLE_SIZE*HIDDEN_SIZE);// (sum(sum(W2.^2))) --> sumW2
cost = (cost/(2*SAMPLE_SIZE)) + (LAMBDA/2)*(sumW1 + sumW2) + BETA*sumVector(h_sparsePen,HIDDEN_SIZE);
/***************************************
END AND PRINT SERIAL TIMING
****************************************/
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time2);
time_stamp[OPTION] = diff(time1,time2);
printTiming(time_stamp,OPTIONS);
/***************************************
DEBUG OUTPUTS
****************************************/
cout << "z2" << endl;//DEBUG
printVector(h_z2,HIDDEN_SIZE);//DEBUG
cout << "a2" << endl;//DEBUG
printVector(h_a2,HIDDEN_SIZE);//DEBUG
cout << "rhoHat" << endl;//DEBUG
printVector(h_rhoHat,HIDDEN_SIZE);//DEBUG
cout << "z3" << endl;//DEBUG
printVector(h_z3,VISIBLE_SIZE);//DEBUG
cout << "a3" << endl;//DEBUG
printVector(h_a3,VISIBLE_SIZE);//DEBUG
cout << "Cost: " << cost << endl;//DEBUG
cout << "sparsePen" << endl;//DEBUGs
printVector(h_sparsePen,HIDDEN_SIZE);//DEBUG
/*
cout << "Wtemp1" << endl;//DEBUG
printVector(h_Wtemp1,VISIBLE_SIZE*HIDDEN_SIZE);//DEBUG
cout << "W2" << endl;//DEBUG
printVector(h_W2, HIDDEN_SIZE);//DEBUG
mmm_kij(h_W2,h_a2,h_z3,VISIBLE_SIZE,HIDDEN_SIZE,HIDDEN_SIZE,1);//DEBUG
cout<< "Z3_kij" << endl;//DEBUG
printVector(h_z3, VISIBLE_SIZE);//DEBUG
mmm_kij(h_W2,h_a2,h_z3,VISIBLE_SIZE,HIDDEN_SIZE,HIDDEN_SIZE,1);//DEBUG
cout << "Z3_ijk" << endl;//DEBUG
printVector(h_z3,VISIBLE_SIZE);//DEBUG
cout << "d2" << endl;//DEBUG
printVector(h_d2,HIDDEN_SIZE);//DEBUG
cout << "d3" << endl;//DEBUG
printVector(h_d3,VISIBLE_SIZE);//DEBUG
cout << "b1" << endl;//DEBUG
printVector(h_b1,HIDDEN_SIZE);//DEBUG
cout << "b2" << endl;//DEBUG
printVector(h_b2,VISIBLE_SIZE);//DEBUG
*/
/***************************************
FREEING MEMORY
****************************************/
free(h_inputs);
free(h_rhoHat);
free(h_W1);
free(h_W2);
free(h_b1);
free(h_b2);
free(h_W1grad);
free(h_W2grad);
free(h_b1grad);
free(h_b2grad);
free(h_z2);
free(h_z3);
free(h_a2);
free(h_a3);
free(h_d2);
free(h_d3);
free(h_temp1);
free(h_temp2);
free(h_Wtemp1);
free(h_Wtemp2);
free(h_sparsePen);
return 0;
}
/***********************************************
TIMING FUNCTIONS AND STRUCTS
***********************************************/
struct timespec diff(struct timespec start, struct timespec end)
{
struct timespec temp;
if ((end.tv_nsec-start.tv_nsec)<0) {
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec;
} else {
temp.tv_sec = end.tv_sec-start.tv_sec;
temp.tv_nsec = end.tv_nsec-start.tv_nsec;
}
return temp;
}
void printTiming(struct timespec* time_stamp,int numTimings){
for (int j = 0; j < numTimings; j++) {
if (j != 0) printf(", ");
printf("\nCPU time: %ld (nsec)", (long int)((double)(CPG)*(double)
(GIG * time_stamp[j].tv_sec + time_stamp[j].tv_nsec)));
}
printf("\n");
}
/***********************************************
NAIVE VECTOR OPERATIONS
***********************************************/
float sumVector(float* src,int length){
float sum = 0;
for(int i = 0;i < length;i++){
sum += src[i];
}
return sum;
}
void vectElemLog(float* src,float* dest,int length){
for(int i = 0;i < length;i++){
dest[i] = log(src[i]);
}
}
float normVector(float* src,int length){
float x = 0;
float sum = 0;
for(int i = 0;i < length;i++){
x = src[i];
sum += x*x;
}
sum = sqrt(sum);
return sum;
}
void vectElemSigmoid(float* src,float* dest,int length){
for(int i = 0; i < length;i++){
dest[i] = float(1/(1+exp(-src[i])));
}
}
void vectElemVectMult(float* src1, float* src2, float* dest, int length){
for(int i = 0; i < length;i++){
dest[i] = src1[i] * src2[i];
}
}
//faster if float is used instead?
void vectElemIntDiv(float* src, float* dest,int length,int divisor){
for(int i = 0;i < length;i++){
dest[i] = float(src[i]/divisor);
}
}
void vectElemFloatDiv(float* src, float* dest,int length,float divisor){
for(int i = 0;i < length;i++){
dest[i] = float(src[i]/divisor);
}
}
void vectElemFloatMult(float* src, float* dest, int length,float multiplicand){
for(int i = 0;i < length;i++){
dest[i] = src[i] * multiplicand;
}
}
void vectElemVectDiv(float* src1,float* src2,float* dest,int length){
for(int i = 0;i < length;i++){
dest[i] = (src1[i]/src2[i]);
}
}
//Just for debugging eh?
void printVector(float* A, int length){
for(int i = 0;i < length; i++){
cout << A[i] << endl;
}
}
void initializeVector(float *array, int length, float val){
for(int i = 0; i < length; i++){
array[i] = val;
}
}
//Just for debugging eh?
void printMatrix(float* A, int rows, int cols){
for(int i = 0;i < rows; i++){
for(int j = 0;j < cols;j++){
cout << A[i*rows+j] << "\t";
}
cout << endl;
}
}
void addVectors(float* src1, float* src2, float* dest, int length){
for(int i = 0;i < length; i++){
dest[i] = src1[i] + src2[i];
}
}
void subVectors(float* src1, float* src2, float* dest, int length){
for(int i = 0;i < length;i++){
dest[i] = src1[i] - src2[i];
}
}
void dotPdt(float* src1,float* src2, float *dest, int length){
float accum = 0;
for(int i = 0; i< length;i++){
accum += src1[i] * src2[i];
}
*dest = accum;
//cout << accum << endl;//DEBUG
}
//don't use me. I am broken.
float altDotPdt(float* A, float* B, int length){
float accum = 0;
for(int i = 0; i < length; i++){
accum += A[i] + B[i];
}
return accum;
}
void matrixTranspose(float* src,float* dest,int rows,int cols){
for(int i = 0;i < rows;i++){
for(int j = 0;j < cols;j++){
//cout << src[i*rows+j] << "I: " << i << "J: " << j << endl;//DEBUG
dest[j*rows+i] = src[i*cols+j];
}
}
}
void initializeMatrixWeightsRand(float *arr, int rows, int cols, int seed) {
int i;
float randNum, r;
srand(seed);
//rows and cols depend on hidden and visible sizes
int numElements = rows*cols;
for (i = 0; i < numElements; i++) {
//Choose weights uniformly from the interval [-r, r]
r = sqrt(6) / sqrt(rows+cols+1);
randNum = float(rand()%10000)/10000;
randNum = randNum * 2 * r - r;
arr[i] = randNum;
}
}
void initializeMatrixWeightsZero(float *arr, int rows, int cols) {
//rows and cols depend on hidden and visible sizes
int numElements = rows*cols;
for (int i = 0; i < numElements; i++) {
arr[i] = 0.0;
}
}
//initialize the vector weights to 0
void initializeVectorWeightsZero(float *arr, int numElements){
int i;
for (i = 0; i < numElements; i++){
arr[i] = 0;
}
}
void mmm_kij(float* src1, float* src2, float* dest, int row1, int col1, int row2, int col2){
for(int k = 0; k < row1; k++){
for(int i = 0; i < col1; i++){//or row2
for(int j = 0; j < col2; j++){
dest[k*col2+j] += src1[j*col1+i] * src2[i*col2+j];
//cout << "src1: " << src1[i*col1+j] << " src2: " << src2[j*col2+k] << endl;//DEBUG
//cout << "I: " << i << " J: " << j << " K: " << k << endl;//DEBUG
}
}
}
}
//http://www.cplusplus.com/forum/general/13087/
//http://www.cplusplus.com/forum/general/17771/
//http://www.cplusplus.com/forum/beginner/24906/
void readCSV(float* array, int numElements, string filename){
ifstream infile(filename.c_str());
int index = 0;
if(infile){
string line;
while(getline(infile,line)){
istringstream sep(line);
string result;
while(getline(sep, result,',')){
array[index] = atof(result.c_str());
if(array[index] == 0){
cout << index << endl;//DEBUG
}
index++;
}
}
}
//cout << "COUNT WAS " << index << endl;//DEBUG
//cout << "Last val was " << array[index-1] << endl;//DEBUG
}
|
2,849 | // David Ramirez A01206423
#include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#define RECTS 1e9
#define BLOCKS 1000
#define THREADS 512
// long num_rects = 100000, i;
// double mid, height, width, area;
// sum = 0.0;
// width = 1.0 / (double) num_rects;
// for (i = 0; i < num_rects; i++){
// mid = (i + 0.5) * width;
// height = 4.0 / (1.0 + mid * mid);
// sum += height;
// }
// area = width * sum;
//This function is for the GPU:
__global__ void piCalc(double *area, double width, int rects) {
double mid, height;
// Get our index
int index = threadIdx.x + (blockIdx.x * blockDim.x);
// Pos in array
int id = index;
// do while we are inside our array
while(index<rects){
//Original pi algo
mid = (index + 0.5) * width;
height = 4.0 / (1.0 + mid * mid);
area[id] += height;
// Move our index
index += (blockDim.x*gridDim.x);
}
}
int main(){
// Normal Array
double *pi;
// GPU Array
double *d_pi;
// Dimention of our threads
int size=(BLOCKS*THREADS);
// Result var
double area=0;
// with var initialization
double width=1.0/(double) RECTS;
pi = (double*) malloc(size*sizeof(double));
cudaMalloc((void **)&d_pi, size * sizeof(double));//Device memory (GPU)
// Send vars to GPU
cudaMemcpy(d_pi, pi, size * sizeof(double), cudaMemcpyHostToDevice);
// Do the operation in the GPU
piCalc<<<BLOCKS, THREADS>>>(d_pi, width, RECTS); // Launch GPU with its corresponding inputs
// Retrieve the results
cudaMemcpy(pi, d_pi, size * sizeof(double), cudaMemcpyDeviceToHost); // Copy output array from GPU back to CPU (Device to host)
// Sum all of our values
for(int i = 0; i<size; ++i){
area += pi[i];
}
// Calc the area
area=width*area;
printf("Pi = %lf\n", area);
// Free our CPUs and GPUs
free(pi);
cudaFree(d_pi);
return 0;
}
|
2,850 | /*
* Alexandre Maros - 2016
*
* Cuda Matrix Multiplication with Global Memory.
*
* nvcc cuda_matrix_global.cu -o cg.o
*
* Implemented by Alexandre Maros for learning purposes.
* A version of this code using Shared Memory is in here:
* https://github.com/alepmaros/cuda_matrix_multiplication
*
* Distributed under the MIT Lincese.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
//32x32
#define NTHREADS_X 32
#define NTHREADS_Y 32
#define THREADS_PER_BLOCK NTHREADS_X * NTHREADS_Y
/* A macro used for error checking in CUDA function calls
* Credit to: http://stackoverflow.com/a/14038590 for the gpuErrchk macro.
*/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void matrix_mul(int *a, int *b, int *c, int a_ncolumns, int c_nlines, int c_ncolumns)
{
int column = blockIdx.x * blockDim.x + threadIdx.x;
int line = blockIdx.y * blockDim.y + threadIdx.y;
if (column >= c_ncolumns || line >= c_nlines)
return;
int i, sum = 0;
int beginA = a_ncolumns * line;
int beginB = column;
for (i = 0; i < a_ncolumns; i++)
{
sum += a[beginA + i] * b[i * c_ncolumns + beginB];
}
c[line * c_ncolumns + column] = sum;
}
int main(){
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int a_nlines, a_ncolumns;
int b_nlines, b_ncolumns;
int c_nlines, c_ncolumns;
size_t a_size, b_size, c_size;
int i, j;
cudaEvent_t start, stop;
gpuErrchk( cudaEventCreate(&start) );
gpuErrchk( cudaEventCreate(&stop) );
scanf("%d", &a_nlines);
scanf("%d", &a_ncolumns);
scanf("%d", &b_nlines);
scanf("%d", &b_ncolumns);
c_nlines = a_nlines;
c_ncolumns = b_ncolumns;
#ifdef __DEBUG
printf("a_nlines: %d\na_ncolumns: %d\nb_nlines: %d\nb_ncolumns: %d\nc_nlines: %d\nc_ncolumns: %d\n", a_nlines, a_ncolumns, b_nlines, b_ncolumns, c_nlines, c_ncolumns);
#endif
if ( a_ncolumns != b_nlines )
{
printf("Number of columns in Matrix A should be equals to number of lines in Matrix B\n");
return EXIT_FAILURE;
}
a_size = a_nlines * a_ncolumns * sizeof(int);
b_size = b_nlines * b_ncolumns * sizeof(int);
c_size = c_nlines * c_ncolumns * sizeof(int);
gpuErrchk( cudaMalloc((void **) &d_a, a_size) );
gpuErrchk( cudaMalloc((void **) &d_b, b_size) );
gpuErrchk( cudaMalloc((void **) &d_c, c_size) );
a = (int *)malloc(a_size);
b = (int *)malloc(b_size);
c = (int *)malloc(c_size);
memset(c, 0, c_nlines*c_ncolumns*sizeof(int));
for (i = 0; i < a_nlines; i++)
{
for (j = 0; j < a_ncolumns; j++)
{
scanf("%d", &a[i * a_ncolumns + j]);
}
}
for (i = 0; i < b_nlines; i++)
{
for (j = 0; j < b_ncolumns; j++)
{
scanf("%d", &b[i * b_ncolumns + j]);
}
}
gpuErrchk( cudaMemcpy(d_a, a, a_size, cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_b, b, b_size, cudaMemcpyHostToDevice) );
dim3 tbloco = dim3(
(int) std::ceil( (double) c_ncolumns / NTHREADS_X ),
(int) std::ceil ( (double) c_nlines / NTHREADS_Y ),
1
);
dim3 tthreads = dim3(
NTHREADS_X,
NTHREADS_Y,
1
);
#ifdef __DEBUG
printf("tbloco.x: %d tbloco.y: %d tbloco.z: %d\n", tbloco.x, tbloco.y, tbloco.z);
printf("tthreads.x: %d tthreads.y: %d\n", tthreads.x, tthreads.y);
#endif
cudaEventRecord(start);
// kernel call
matrix_mul<<<tbloco,tthreads>>>(d_a, d_b, d_c, a_ncolumns, c_nlines, c_ncolumns);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaEventRecord(stop) );
gpuErrchk( cudaMemcpy(c, d_c, c_size, cudaMemcpyDeviceToHost) );
gpuErrchk( cudaEventSynchronize(stop) );
#ifndef __NO_OUTPUT
// print Matrix
for (i = 0; i < c_nlines; i++)
{
for (j = 0; j < c_ncolumns; j++)
{
printf("%d ", c[i * c_ncolumns + j]);
}
printf("\n");
}
printf("\n");
#endif
#ifdef __TIME
float milliseconds = 0;
gpuErrchk( cudaEventElapsedTime(&milliseconds, start, stop) );
printf("%.5f\n", milliseconds);
#endif
free(a); free(b); free(c);
gpuErrchk( cudaFree(d_a) );
gpuErrchk( cudaFree(d_b) );
gpuErrchk( cudaFree(d_c) );
return 0;
}
|
2,851 | #include "includes.h"
__global__ void simple_histo(int *d_bins, const int *d_in, const int BIN_COUNT)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int myItem = d_in[myId];
int myBin = myItem % BIN_COUNT;
atomicAdd(&(d_bins[myBin]), 1);
} |
2,852 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cmath>
#include <cstdio>
#define N 300
#define NSTREAM 4
__global__ void kernel_1()
{
double sum = 0.0;
for (int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_2()
{
double sum = 0.0;
for (int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_3()
{
double sum = 0.0;
for (int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__global__ void kernel_4()
{
double sum = 0.0;
for (int i = 0; i < N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
int main(int argc, char **argv)
{
int n_streams = NSTREAM;
int isize = 1;
int iblock = 1;
float elapsed_time;
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("> Using Device %d: %s with num_streams=%d\n", dev, deviceProp.name,n_streams);
cudaSetDevice(dev);
// check if device support hyper-q
if (deviceProp.major < 3 || (deviceProp.major == 3 && deviceProp.minor < 5))
{
if (deviceProp.concurrentKernels == 0)
{
printf("> GPU does not support concurrent kernel execution (SM 3.5 or higher required)\n");
printf("> CUDA kernel runs will be serialized\n");
}
else
{
printf("> GPU does not support HyperQ\n");
printf("> CUDA kernel runs will have limited concurrency\n");
}
}
printf("> Compute Capability %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// Allocate and initialize an array of stream handles
cudaStream_t *streams = (cudaStream_t *)malloc(n_streams * sizeof(cudaStream_t));
cudaEvent_t *events = (cudaEvent_t *)malloc(n_streams * sizeof(cudaEvent_t));
for (int i = 0; i < n_streams; i++)
{
cudaStreamCreate(&(streams[i]));
}
for(int i = 0; i< n_streams; ++i)
{
cudaEventCreateWithFlags(&events[i], cudaEventDisableTiming);
}
// set up execution configuration
dim3 block(iblock);
dim3 grid(isize / iblock);
printf("> grid %d block %d\n", grid.x, block.x);
// creat events
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// record start event
cudaEventRecord(start, 0);
// dispatch job with depth first ordering
for (int i = 0; i < n_streams; i++)
{
kernel_1 <<<grid, block, 0, streams[i] >>>();
kernel_2 <<<grid, block, 0, streams[i] >>>();
kernel_3 <<<grid, block, 0, streams[i] >>>();
kernel_4 <<<grid, block, 0, streams[i] >>>();
cudaEventRecord(events[i], streams[i]);
cudaStreamWaitEvent(streams[i], events[i], 0);
}
// record stop event
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// calculate elapsed time
cudaEventElapsedTime(&elapsed_time, start, stop);
printf("Measured time for parallel execution = %.3fs\n", elapsed_time / 1000.0f);
// release all stream
for (int i = 0; i < n_streams; i++)
{
cudaStreamDestroy(streams[i]);
}
free(streams);
free(events);
// destroy events
cudaEventDestroy(start);
cudaEventDestroy(stop);
// reset device
cudaDeviceReset();
system("Pause");
return 0;
}
|
2,853 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <unistd.h>
int main(int argc,char* argv[]){
cudaError_t res;
float* d;
int i,j;
size_t pitch,width,height;
for(i = 0 ; i < 1000 ; i ++){
for(j = 0 ; j < 100 ; j ++){
width = 5*i;
height = 100*j;
res = cudaMallocPitch(&d,&pitch,width,height);
// size_t expected_pitch = width*height != 0 ? ((width*height-1)/(512*100)+1)*512 : 0;
size_t expected_pitch;
expected_pitch = width*height != 0 ? (width/512 + 1)*512 : 0;
// printf("pitch : %lu\n",pitch);
// printf("expected_pitch : %lu\n",expected_pitch);
if(pitch != expected_pitch){
printf("width : %lu\n",width);
printf("height : %lu\n",height);
exit(1);
}
if(res != cudaSuccess){
printf("What ... ?(%d)\n",res);
exit(1);
}
if(expected_pitch != pitch){
printf("pitch == %d\n",pitch);
printf("expected pitch == %d\n",expected_pitch);
printf("Total device memory usage : %d\n",width*height);
}
}
cudaFree(d);
}
return 0;
}
|
2,854 | #include "includes.h"
__global__ void myfirstkernel(void) {
// Code start here
} |
2,855 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda_runtime.h>
//#define DEBUG
#define L1 1024
#define L2 1024
#define L3 1024
/* ========== Multiple block, Multiple threads ========== */
/* ========== Can change different matrix length and width ========== */
/* ========== B matrix doen't transposed ========== */
/* ========== fixed block dimension as 32 * 32 ========== */
/* ========== Max array length: 1024 due to MaxThread per side is 1024 ========== */
__global__ void MatMulKernel(float *Ad, float *Bd, float *Cd);
void MatMul(float *A, float *B, float *C);
int main(int argc, char *argv[])
{
float *A, *B, *C, *AxB;
int pass = 1;
A = (float *)calloc(L1 * L2, sizeof(float));
B = (float *)calloc(L2 * L3, sizeof(float));
C = (float *)calloc(L1 * L3, sizeof(float));
AxB = (float *)calloc(L1 * L3, sizeof(float));
/* ========== Assign values to array A and B ========== */
for (int i = 0; i < L1; ++i) {
for (int j = 0; j < L2; ++j) {
A[i * L2 + j] = rand() % 30;
}
}
for (int i = 0; i < L2; ++i) {
for (int j = 0; j < L3; ++j) {
B[i * L3 + j] = rand() % 30;
}
}
#ifdef DEBUG
printf("Matrix A:\n");
for (int i = 0; i < L1; i++) {
for (int j = 0; j < L2; j++) {
printf("%3.0f", A[i * L2 + j]);
}
printf("\n");
}
printf("Matrix B:\n");
for (int i = 0; i < L2; i++) {
for (int j = 0; j < L3; j++) {
printf("%3.0f", B[i * L3 + j]);
}
printf("\n");
}
#endif
/* ========== Calculate correct answers by CPU ========== */
struct timeval starttime, endtime;
gettimeofday(&starttime, NULL);
for (int i = 0; i < L1; ++i) {
for (int j = 0; j < L3; ++j) {
for (int k = 0; k < L2; ++k) {
AxB[i * L3 + j] += A[i * L2 + k] * B[k * L3 + j];
}
}
}
gettimeofday(&endtime, NULL);
double executime;
executime = (endtime.tv_sec - starttime.tv_sec) * 1000.0;
executime += (endtime.tv_usec - starttime.tv_usec) / 1000.0;
printf("CPU time: %13lf msec\n", executime);
#ifdef DEBUG
printf("Matrix AxB:\n");
for (int i = 0; i < L1; i++) {
for (int j = 0; j < L3; j++) {
printf("%5.0f", AxB[i * L3 + j]);
}
printf("\n");
}
#endif
/* ========== Calculate answers by GPU ========== */
MatMul((float *)A, (float *)B, (float *)C);
#ifdef DEBUG
printf("Matrix C:\n");
for (int i = 0; i < L1; i++) {
for (int j = 0; j < L3; j++) {
printf("%5.0f", C[i * L3 + j]);
}
printf("\n");
}
#endif
/* ========== Check if answers correct ========== */
for (int i = 0; i < L1; ++i) {
for (int j = 0; j < L3; ++j) {
if(AxB[i * L3 + j] != C[i * L3 + j]) {
printf("AxB[%d][%d] = %2.0f C[%d][%d] = %2.0f\n", i, j, AxB[i * L3 + j], i, j, C[i * L3 + j]);
pass = 0;
}
}
}
printf("Test %s\n", (pass)?"PASSED":"FAILED");
free(A);
free(B);
free(C);
free(AxB);
return 0;
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(float *Ad, float *Bd, float *Cd)
{
// Thread row and column within matrix
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < L1 && col < L3) {
float Cvalue = 0;
for (int k = 0; k < L2; k++) {
float Aval = Ad[row * L2 + k];
float Bval = Bd[k * L3 + col];
Cvalue += Aval * Bval;
}
Cd[row * L3 + col] = Cvalue;
}
}
/* ========== Matrix multiplication - Host code ========== */
void MatMul(float *A, float *B, float *C)
{
size_t size_1 = L1 * L2 * sizeof(float);
size_t size_2 = L2 * L3 * sizeof(float);
size_t size_3 = L1 * L3 * sizeof(float);
float *Ad, *Bd, *Cd;
/* ========== Allocate and Load A, B to device memory ========== */
cudaMalloc((void **)&Ad, size_1);
cudaMemcpy(Ad, A, size_1, cudaMemcpyHostToDevice);
cudaMalloc((void **)&Bd, size_2);
cudaMemcpy(Bd, B, size_2, cudaMemcpyHostToDevice);
/* ========== Allocate C on the device ========== */
cudaMalloc((void **)&Cd, size_3);
/* ========== Setup the execution configuration ========== */
int GridDim_x = (L3 + 31) / 32, GridDim_y = (L1 + 31) / 32;
dim3 dimGrid(GridDim_x, GridDim_y);
dim3 dimBlock(32, 32);
/* ========== Get start time event ========== */
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
/* ========== Invoke kernel ========== */
MatMulKernel<<<dimGrid, dimBlock>>>(Ad, Bd, Cd);
cudaError_t cuda_err = cudaGetLastError();
if ( cudaSuccess != cuda_err ){
printf("before kernel call: error = %s\n", cudaGetErrorString (cuda_err));
exit(1) ;
}
/* ========== Get stop time event ========== */
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
/* ========== Compute execution time ========== */
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("GPU time: %13f msec\n", elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
/* ========== Read C from device memory ========== */
cudaMemcpy(C, Cd, size_3, cudaMemcpyDeviceToHost);
/* ========== Free device memory ========== */
cudaFree(Ad);
cudaFree(Bd);
cudaFree(Cd);
}
|
2,856 | //imports
#include <stdio.h>
#include <math.h>
#include <cuda.h>
#include <stdlib.h>
__global__ void printSome(int i){
printf("%d",i);
}
int main(){
cudaStream_t streams[5];
int i;
for(i=0;i<5;i++){
cudaStreamCreate(&streams[i]);
}
for(i=0;i<5;i++){
printSome<<<1,1,0,streams[i]>>>(i);
}
for(i=0;i<5;i++){
cudaStreamDestroy(streams[i]);
}
return 0;
}
|
2,857 | #include <stdio.h>
__global__ void spmv_csr_kernel(unsigned int dim, unsigned int *csrRowPtr,
unsigned int *csrColIdx, float *csrData, float *inVector,
float *outVector) {
// INSERT KERNEL CODE HERE
int rowIdx = blockIdx.x*blockDim.x + threadIdx.x;
if(rowIdx<dim){
float dotP = 0.0f;
for(int i=csrRowPtr[rowIdx]; i<csrRowPtr[rowIdx+1]; i++){
dotP += csrData[i]*inVector[csrColIdx[i]];
}
outVector[rowIdx] = dotP;
}
}
__global__ void spmv_jds_kernel(unsigned int dim, unsigned int *jdsRowPerm,
unsigned int *jdsRowNNZ, unsigned int *jdsColStartIdx,
unsigned int *jdsColIdx, float *jdsData, float* inVector,
float *outVector) {
// INSERT KERNEL CODE HERE
int rowIdx = blockIdx.x*blockDim.x + threadIdx.x;
if(rowIdx<dim){
float dotP = 0.0f;
for(int i=0; i<jdsRowNNZ[rowIdx]; i++){
dotP += jdsData[rowIdx+jdsColStartIdx[i]]*inVector[jdsColIdx[rowIdx+jdsColStartIdx[i]]];
}
outVector[jdsRowPerm[rowIdx]] = dotP;
}
}
void spmv_csr(unsigned int dim, unsigned int *csrRowPtr, unsigned int *csrColIdx,
float *csrData, float *inVector, float *outVector) {
// INSERT CODE HERE
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
int maxThreadsPerBlock = prop.maxThreadsPerBlock;
spmv_csr_kernel<<<ceil(dim/(float)maxThreadsPerBlock), maxThreadsPerBlock>>>(dim, csrRowPtr,
csrColIdx, csrData, inVector, outVector);
}
void spmv_jds(unsigned int dim, unsigned int *jdsRowPerm, unsigned int *jdsRowNNZ,
unsigned int *jdsColStartIdx, unsigned int *jdsColIdx, float *jdsData,
float* inVector, float *outVector) {
// INSERT CODE HERE
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
int maxThreadsPerBlock = prop.maxThreadsPerBlock;
spmv_jds_kernel<<<ceil(dim/(float)maxThreadsPerBlock), maxThreadsPerBlock>>>(dim, jdsRowPerm,
jdsRowNNZ, jdsColStartIdx, jdsColIdx, jdsData, inVector, outVector);
}
|
2,858 | #include <stdint.h>
__global__ void adjust_hue_hwc(const int height, const int width,
uint8_t * const __restrict__ input, uint8_t * const __restrict__ output, const float hue_delta) {
// multiply by 3 since we're dealing with contiguous RGB bytes for each pixel
const int idx = (blockDim.x * blockIdx.x + threadIdx.x) * 3;
// bounds check
if (idx > height * width * 3) {
return;
}
// RGB to HSV
const float r = input[idx];
const float g = input[idx + 1];
const float b = input[idx + 2];
const float M = fmaxf(r, fmaxf(g, b));
const float m = fminf(r, fminf(g, b));
const float chroma = M - m;
// v is the same as M
float h = 0.0f, s = 0.0f; // v = 0.0;
// hue
if (chroma > 0.0f) {
if (M == r) {
const float num = (g - b) / chroma;
const float sgn = num < 0.0f;
const float sign = powf(-1.0f, sgn);
h = (sgn * 6.0f + sign * fmodf(sign * num, 6.0f)) / 6.0f;
} else if (M == g) {
h = ((b - r) / chroma + 2.0f) / 6.0f;
} else {
h = ((r - g) / chroma + 4.0f) / 6.0f;
}
} else {
h = 0.0f;
}
// saturation
if (M > 0.0f) {
s = chroma / M;
} else {
s = 0.0f;
}
// hue adjustment
h = fmodf(h + hue_delta, 1.0f);
////////////////////////////////////////////
// Murmurhash - based random adjustment
// uint32_t k = idx;
// uint32_t seed = 42;
// k *= 0xcc9e2d51;
// k = (k << 15) | (k >> 17);
// k *= 0x1b873593;
// seed ^= k;
// seed ^= 1;
// seed ^= seed >> 16;
// seed *= 0x85ebca6b;
// seed ^= seed >> 13;
// seed *= 0xc2b2ae35;
// seed ^= seed >> 16;
// // TODO: Add scaling factor
// // TODO: Cover shifts both up and down with wrap-around
// float rand_delta = (seed >> 8) / (float) (1 << 24);
// h = fmod(h + rand_delta, 1.0f);
////////////////////////////////////////////
// HSV to RGB
const float new_h = h * 6.0f;
const float new_chroma = M * s;
const float x = chroma * (1.0 - fabs(fmod(new_h, 2.0f) - 1.0f));
const float new_m = M - chroma;
const bool between_0_and_1 = new_h >= 0.0f && new_h < 1.0f;
const bool between_1_and_2 = new_h >= 1.0f && new_h < 2.0f;
const bool between_2_and_3 = new_h >= 2.0f && new_h < 3.0f;
const bool between_3_and_4 = new_h >= 3.0f && new_h < 4.0f;
const bool between_4_and_5 = new_h >= 4.0f && new_h < 5.0f;
const bool between_5_and_6 = new_h >= 5.0f && new_h < 6.0f;
output[idx] = roundf(new_chroma * (between_0_and_1 || between_5_and_6) +
x * (between_1_and_2 || between_4_and_5) + new_m);
output[idx + 1] = roundf(new_chroma * (between_1_and_2 || between_2_and_3) +
x * (between_0_and_1 || between_3_and_4) + new_m);
output[idx + 2] = roundf(new_chroma * (between_3_and_4 || between_4_and_5) +
x * (between_2_and_3 || between_5_and_6) + new_m);
}
|
2,859 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
//implement one grid with 4 blocks and 256 threads in total, 8x8 threads for each block
__global__ void print_threadIds()
{
printf("blockIdx,x : %d, blockIdx.y : %d, blockIdx.z : %d, blockDim.x : %d, blockDim.y : %d, blockDim.z : %d gridDim.x : %d, gridDim.y : %d, gridDim.z : %d \n",blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z);
}
__global__ void unique_idx_calc_threadIdx(int * input)
{
int tid = threadIdx.x;
int offset = (blockIdx.x>0)? 4:0;
printf("blockIdx : %d, threadIdx : %d, value : %d\n", blockIdx.x, tid, input[tid+offset]);
}
__global__ void unique_gid_calculation(int * input){
int tid = threadIdx.y * blockDim.x + threadIdx.x;
int offset = blockIdx.y * gridDim.x * (blockDim.x * blockDim.y) + blockIdx.x * (blockDim.x * blockDim.y);
//number of threads in one row = gridDim.x * blockDim.x
//row offset: gridDim.x * blockDim.x * blockIdx.y
//int offset = blockIdx.x * (blockDim.x * blockDim.y) + blockIdx.y * (blockDim.x * blockDim.y);
int gid = tid + offset;
printf("gid: %d, input[gid]: %d \n",gid, input[gid]);
printf("threadIdx.x : %d, blockIdx.x : %d, blockIdx.y : %d, blockDim.x : %d, blockDim.y : %d, gridDim.x : %d gid : %d value : %d\n",
threadIdx.x, blockIdx.x, blockIdx.y, blockDim.x, blockDim.y, gridDim.x, gid, input[gid]);
}
int main()
{
//define number of threads for each dimension
int array_size = 16;
int array_byte_size = sizeof(int) * array_size;
int cpu_data[] = {23,9,4,53,65,12,1,33,34,51,3,100,2,22,15,99,98};
//printout data from traditional cpu memory
for(int i=0;i<array_size;i++){
printf("the %d th element is: %d\n", i, cpu_data[i]);
}
printf("\n\n");
//gpu data copied from cpu memory
int *gpu_data;
cudaMalloc((void**)&gpu_data, array_byte_size);
cudaMemcpy(gpu_data, cpu_data, array_byte_size, cudaMemcpyHostToDevice);
//2x2 thread blocks, each has 4 threads
dim3 block(2,2);
dim3 grid(2,2);
//printout thread id and each element from one array by using gpu
//unique_idx_calc_threadIdx <<< grid, block >>> (gpu_data);
unique_gid_calculation <<<grid,block>>>(gpu_data);
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
|
2,860 | #include <stdio.h>
#include <cuda_runtime.h>
// #include <helper_cuda.h>
#define N 1000000
__global__ void doubleElements(int *a){
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += gridDim.x * blockDim.x)
a[i] *= 2;
}
int main(void){
cudaError_t err = cudaSuccess;
size_t size = N * sizeof(int);
int *a;
cudaMallocManaged(&a, size); // Use `a` on the CPU and/or on any GPU in the accelerated system.
for(int i = 0; i < N; i++)
a[i] = i;
size_t threads_per_block = 256;
size_t number_of_blocks = (N + threads_per_block - 1) / threads_per_block;
doubleElements<<<number_of_blocks, threads_per_block>>>(a);
if ((err = cudaGetLastError()) != cudaSuccess){
fprintf(stderr, "Failed to launch kernel: %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
for(int i = 0; i < N; i++)
if(a[i] != i * 2){
printf("Failed\n");
break;
}
printf("Done\n");
cudaFree(a);
return 0;
} |
2,861 | #include <stdio.h>
#include <curand_kernel.h>
#include <chrono>
#define host_t float
#define device_t float*
#define t_size sizeof(host_t)
#define size_t unsigned long
#define time_point_t std::chrono::time_point<std::chrono::high_resolution_clock>
template<typename A, typename B>
struct pair_t { A first; B second; };
host_t fZero(size_t x, size_t y, size_t w) { return 0; }
host_t fIndex(size_t x, size_t y, size_t w) { return (y * w + x); }
void printMatrix(FILE* file, host_t* matrix, size_t width, size_t height) {
for (size_t y = 0; y < height; y++) {
for (size_t x = 0; x < width; x++)
fprintf(file, "%03.0f ", matrix[y * width + x]);
fprintf(file, "\n");
}
fprintf(file, "\n");
}
void fillMatrix(host_t* matrix, size_t width, size_t height, host_t(*function)(size_t, size_t, size_t)) {
for (size_t y = 0; y < height; y++)
for (size_t x = 0; x < width; x++)
matrix[y * width + x] = function(x, y, width);
}
pair_t<size_t, size_t> compare(host_t* a, host_t* b, size_t width, size_t height) {
for (size_t y = 0; y < height; y++) {
for (size_t x = 0; x < width; x++) {
if (a[y * width + x] != b[y * width + x]) {
printf("missmatch %f %f\n", a[y * width + x], b[y * width + x]);
return pair_t<size_t, size_t>{x, y};
}
}
}
return pair_t<size_t, size_t>{width, height};
}
void printWindow(FILE* file, host_t* matrix, size_t width, size_t w_start, size_t h_start, size_t w_width, size_t w_height) {
for (size_t y = w_start; y < w_height; y++) {
for (size_t x = h_start; x < w_width; x++)
fprintf(file, "%03.0f ", matrix[y * width + x]);
fprintf(file, "\n");
}
fprintf(file, "\n");
}
struct shared_t {
device_t device_ptr;
host_t *host_ptr;
size_t count;
size_t bytes;
shared_t(size_t element_count, bool cuda):
host_ptr{(host_t*) malloc(element_count * t_size)},
device_ptr{nullptr},
count{element_count},
bytes{element_count * t_size} {
if (cuda) {
cudaMalloc(&device_ptr, bytes);
}
printf("[shared_t] host:%p device:%p count:%lu bytes:%lu\n",
host_ptr, device_ptr, element_count, bytes);
}
~shared_t() {
printf("[shared_t] dispose %p %p\n", host_ptr, device_ptr);
if (host_ptr != nullptr)
free(host_ptr);
if (device_ptr != nullptr)
cudaFree(device_ptr);
}
int sync(cudaMemcpyKind kind) {
if (host_ptr == nullptr) {
printf("[shared_t] host_ptr is nullptr (host:%p, device:%p)\n", host_ptr, device_ptr);
return 1;
}
if (device_ptr == nullptr) {
printf("[shared_t] device_ptr is nullptr (host:%p, device:%p)\n", host_ptr, device_ptr);
return 2;
}
if (kind == cudaMemcpyDeviceToHost)
return cudaMemcpy(host_ptr, device_ptr, bytes, kind);
else
return cudaMemcpy(device_ptr, host_ptr, bytes, kind);
}
int upload() {
auto result = sync(cudaMemcpyHostToDevice);
if (result == cudaSuccess)
return 0;
printf("[shared_t] upload of (%p, %p, %lu) failed %d\n",
device_ptr, host_ptr, bytes, result);
return result;
}
int download() {
auto result = sync(cudaMemcpyDeviceToHost);
if (result == cudaSuccess)
return 0;
printf("[shared_t] download of (%p, %p, %lu) failed %d\n",
device_ptr, host_ptr, bytes, result);
return result;
}
int randomize() {
if (device_ptr == nullptr) {
printf("[shared_t] device_ptr is nullptr for randomize (host:%p, device:%p)\n",
host_ptr, device_ptr);
return 1;
}
curandGenerator_t generator;
auto r_gen_create = curandCreateGenerator(&generator, CURAND_RNG_PSEUDO_DEFAULT);
if (r_gen_create != CURAND_STATUS_SUCCESS) {
printf("[shared_t] create generator failed %d\n", r_gen_create);
return r_gen_create;
}
auto r_gen = curandGenerateNormal(generator, device_ptr, count, 40, 20);
if (r_gen != CURAND_STATUS_SUCCESS) {
printf("[shared_t] generate failed %d\n", r_gen);
}
auto r_destroy = curandDestroyGenerator(generator);
if (r_destroy != CURAND_STATUS_SUCCESS) {
printf("[shared_t] destroy generator failed %d\n", r_destroy);
return r_destroy;
}
return 0;
}
device_t device() const {
if (device_ptr == nullptr)
printf("[shared_t] device() has to return nullptr (host:%p, device:%p)\n",
host_ptr, device_ptr);
return device_ptr;
}
host_t* host() const {
if (host_ptr == nullptr)
printf("[shared_t] host() has to return nullptr (host:%p, device:%p)\n",
host_ptr, device_ptr);
return host_ptr;
}
};
struct measure_t {
bool use_cuda;
time_point_t tp_start;
time_point_t tp_end;
cudaEvent_t e_start;
cudaEvent_t e_end;
float mcs_value;
float cuda_mcs_value;
measure_t(bool cuda):
use_cuda{cuda} {
if (use_cuda) {
auto r_start = cudaEventCreate(&e_start);
auto r_end = cudaEventCreate(&e_end);
printf("[measure_t] create cuda events start:%d end:%d\n", r_start, r_end);
if (r_start != cudaSuccess || r_end != cudaSuccess) {
if (r_start != cudaSuccess && r_end == cudaSuccess)
cudaEventDestroy(e_end);
if (r_start == cudaSuccess && r_end != cudaSuccess)
cudaEventDestroy(e_start);
printf("[measure_t] disabling cuda events\n");
use_cuda = false;
}
}
}
~measure_t() {
if (use_cuda) {
auto r_end = cudaEventDestroy(e_end);
auto r_start = cudaEventDestroy(e_start);
printf("[measure_t] dispose cuda events start:%d end:%d\n", r_start, r_end);
}
}
void start() {
tp_start = std::chrono::high_resolution_clock::now();
if (use_cuda)
cudaEventRecord(e_start, 0);
}
void end() {
tp_end = std::chrono::high_resolution_clock::now();
mcs_value = std::chrono::duration_cast<std::chrono::microseconds>(tp_end - tp_start).count() * 1.0;
if (use_cuda) {
auto r_record = cudaEventRecord(e_end, 0);
auto r_sync = cudaEventSynchronize(e_end);
float elapsed = 0;
auto r_time = cudaEventElapsedTime(&elapsed, e_start, e_end);
if (r_record != cudaSuccess || r_sync != cudaSuccess || r_time != cudaSuccess) {
printf("[measure_t] cuda stop event is failed record:%d sync:%d time:%d\n",
r_record, r_sync, r_time);
return;
}
cuda_mcs_value = elapsed * 1000.0;
}
}
float mcs() const { return mcs_value; }
float cuda_mcs() const { return cuda_mcs_value; }
float ms() const { return mcs_value / 1000.0; }
float cuda_ms() { return cuda_mcs_value / 1000.0; }
};
__global__ void transformKernel(const device_t input, device_t output, size_t width, size_t threads) {
const size_t i = blockIdx.x * threads + threadIdx.x;
/* *block* vector index is equal to linear index */
/* but *block* index is 4 linear indecices */
const size_t y = i * 4 / width;
/* every *block* index is 4 linear indices */
const size_t x = 4 * (i % (width / 4));
/* every input vector is 2 output vectors */
const size_t oy = y * 2;
/* every *block* index is 2 linear indices */
const size_t ox = i * 2 % (width / 2);
/* aligned view into *block* */
output[(1 + oy) * width / 2 + ox + 0] = input[y * width + x + 0];
output[(1 + oy) * width / 2 + ox + 1] = input[y * width + x + 1];
output[(0 + oy) * width / 2 + ox + 0] = input[y * width + x + 2];
output[(0 + oy) * width / 2 + ox + 1] = input[y * width + x + 3];
}
__host__ void transformCpu(const host_t* input, host_t* output, size_t width, size_t height) {
const size_t iter = width * height / 4;
for (size_t i = 0; i < iter; i++) {
const size_t y = i * 4 / width;
const size_t x = 4 * (i % (width / 4));
const size_t oy = y * 2;
const size_t ox = i * 2 % (width / 2);
output[(1 + oy) * width / 2 + ox + 0] = input[y * width + x + 0];
output[(1 + oy) * width / 2 + ox + 1] = input[y * width + x + 1];
output[(0 + oy) * width / 2 + ox + 0] = input[y * width + x + 2];
output[(0 + oy) * width / 2 + ox + 1] = input[y * width + x + 3];
}
}
int main() {
const size_t height = 8444;
const size_t width = 4213;
const size_t count = height * width;
const size_t o_width = width / 2;
const size_t o_height = height * 2;
FILE* matrix_file = stdout;
if (width > 32 || height > 32) {
printf("[warning] matrix will not be printed because sizes are too big\n");
matrix_file = fopen("/dev/null", "w");
}
shared_t input{count, true};
shared_t output{count, true};
shared_t check{count, false};
measure_t measure{true};
measure_t cpu_measure{false};
/* generate random numbers in device memory */
if (input.randomize() != 0)
return 4;
/* copy it to host memory */
if (input.download() != 0)
return 3;
fillMatrix(output.host(), width, height, fZero);
fillMatrix(check.host(), width, height, fZero);
/* copy data into the device */
if (input.upload() != 0)
return 1;
/* output copy isn't necessary */
if (output.upload() != 0)
return 1;
/* calculate kernel start params */
const size_t iter_count = count / 4;
size_t thread_count = iter_count < 1024 ? iter_count : 1024;
for (; thread_count >= 2; thread_count--)
if (iter_count % thread_count == 0)
break;
printf("thread count: %lu\n", thread_count);
/* execute kernel on the device */
measure.start();
transformKernel<<<iter_count / thread_count, thread_count>>>(
input.device(),
output.device(),
width,
thread_count
);
measure.end();
cpu_measure.start();
transformCpu(input.host(), check.host(), width, height);
cpu_measure.end();
/* copy data from the device */
if (output.download() != 0)
return 2;
/* input copy isn't necessary */
if (input.download() != 0)
return 2;
fprintf(matrix_file, "input: \n");
printMatrix(matrix_file, input.host(), width, height);
fprintf(matrix_file, "output: \n");
printMatrix(matrix_file, output.host(), o_width, o_height);
fprintf(matrix_file, "check: \n");
printMatrix(matrix_file, check.host(), o_width, o_height);
printf("measure device: %f mcs\n", measure.cuda_mcs());
printf("measure host: %f mcs\n", cpu_measure.mcs());
auto result = compare(output.host(), check.host(), o_width, o_height);
if (result.first < o_width || result.second < o_height) {
printf("compare missmatch on %lu %lu\n", result.first, result.second);
} else {
printf("compare check passed!\n");
}
printf("input window: \n");
printWindow(stdout, input.host(), o_width, 0, 0, 4, 1);
printf("output window: \n");
printWindow(stdout, output.host(), o_width, 0, 0, 2, 2);
printf("check window: \n");
printWindow(stdout, check.host(), o_width, 0, 0, 2, 2);
/* all shared_t objects is allocated on the stack */
/* so, destructor will automatically free all memory */
return 0;
}
|
2,862 | // Сложение векторов и сравнение с количеством тредов в памяти
#include <iostream>
#include <cuda.h>
using namespace std;
__global__ void add( float *a, float *b, float *c ) {
if(a[ threadIdx.x ] + b[ threadIdx.x ]<10)
c[ threadIdx.x ] = a[ threadIdx.x ] + b[ threadIdx.x ];
else
c[ threadIdx.x ] = 10;
}
#define N 64 //Количество суммирований и сравнений
int main( void ) {
float *a, *b, *c; // host копии a, b, c
float *dev_a, *dev_b, *dev_c; // device копии a, b, c
int size = N * sizeof( float );
//выделяем память для device копий a, b, c
cudaMalloc( (void**)&dev_a, size );
cudaMalloc( (void**)&dev_b, size );
cudaMalloc( (void**)&dev_c, size );
a = (float*)malloc( size );
b = (float*)malloc( size );
c = (float*)malloc( size );
for (int i = 0; i < N; ++i)
a[i] = (float)rand()/(float)RAND_MAX;
for (int i = 0; i < N; ++i)
b[i] = (float)rand()/(float)RAND_MAX;
// копируем ввод на device
cudaMemcpy( dev_a, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, size, cudaMemcpyHostToDevice );
// launch add() kernel with N parallel blocks
add<<< 1, N >>>( dev_a, dev_b, dev_c ); //Использование N тредов и 1 блока
// add<<< N, 1 >>>( dev_a, dev_b, dev_c ); //Использование N блоков И 1 треда
// копируем результат работы device обратно на host – копию c
cudaMemcpy( c, dev_c, size, cudaMemcpyDeviceToHost );
for(int i=0; i<N; i++)
cout<<"c["<<i<<"]="<<c[i]<<endl;
free( a ); free( b ); free( c );
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
return 0;
} |
2,863 | #include "includes.h"
__device__ __forceinline__ size_t gpu_fieldn_index(unsigned int x, unsigned int y, unsigned int d)
{
return (NX*(NY*(d-1)+y)+x);
}
__global__ void gpu_stream(double *f0, double *f1, double *f2, double *h0, double *h1, double *h2)
{
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
// streaming step
unsigned int xp1 = (x + 1) % NX;
unsigned int yp1 = (y + 1) % NY;
unsigned int xm1 = (NX + x - 1) % NX;
unsigned int ym1 = (NY + y - 1) % NY;
// direction numbering scheme
// 6 2 5
// 3 0 1
// 7 4 8
// load populations from adjacent nodes (ft is post-streaming population of f1)
f1[gpu_fieldn_index(x, y, 1)] = f2[gpu_fieldn_index(xm1, y, 1)];
f1[gpu_fieldn_index(x, y, 2)] = f2[gpu_fieldn_index(x, ym1, 2)];
f1[gpu_fieldn_index(x, y, 3)] = f2[gpu_fieldn_index(xp1, y, 3)];
f1[gpu_fieldn_index(x, y, 4)] = f2[gpu_fieldn_index(x, yp1, 4)];
f1[gpu_fieldn_index(x, y, 5)] = f2[gpu_fieldn_index(xm1, ym1, 5)];
f1[gpu_fieldn_index(x, y, 6)] = f2[gpu_fieldn_index(xp1, ym1, 6)];
f1[gpu_fieldn_index(x, y, 7)] = f2[gpu_fieldn_index(xp1, yp1, 7)];
f1[gpu_fieldn_index(x, y, 8)] = f2[gpu_fieldn_index(xm1, yp1, 8)];
h1[gpu_fieldn_index(x, y, 1)] = h2[gpu_fieldn_index(xm1, y, 1)];
h1[gpu_fieldn_index(x, y, 2)] = h2[gpu_fieldn_index(x, ym1, 2)];
h1[gpu_fieldn_index(x, y, 3)] = h2[gpu_fieldn_index(xp1, y, 3)];
h1[gpu_fieldn_index(x, y, 4)] = h2[gpu_fieldn_index(x, yp1, 4)];
h1[gpu_fieldn_index(x, y, 5)] = h2[gpu_fieldn_index(xm1, ym1, 5)];
h1[gpu_fieldn_index(x, y, 6)] = h2[gpu_fieldn_index(xp1, ym1, 6)];
h1[gpu_fieldn_index(x, y, 7)] = h2[gpu_fieldn_index(xp1, yp1, 7)];
h1[gpu_fieldn_index(x, y, 8)] = h2[gpu_fieldn_index(xm1, yp1, 8)];
} |
2,864 | #include "includes.h"
__device__ unsigned int getGid3d3d(){
int blockId = blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.y * blockDim.x)
+ (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x;
return threadId;
}
__global__ void vecSum(double *in, double *factor, double *out){
double result;
unsigned int gid = getGid3d3d();
result = in[gid] + factor[gid];
out[gid] = result;
} |
2,865 | #pragma once
#include <cstdint>
#include <memory>
namespace freeform
{
} |
2,866 | #include <iostream>
#include <cstring>
#include <fstream>
#include "time.h"
using namespace std;
__host__ void preprocesamientoKMP(char* pattern, int m, int f[])
{
int k;
f[0] = -1;
for (int i = 1; i < m; i++){
k = f[i - 1];
while (k >= 0){
if (pattern[k] == pattern[i - 1])
break;
else
k = f[k];
}
f[i] = k + 1;
}
}
__global__ void KMP(char* pattern, char* target,int f[],int c[],int sizePattern, int sizeText)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
int i = sizePattern * index;
int j = sizePattern * (index + 2)-1;
if(i > sizeText)
return;
if(j > sizeText)
j = sizeText;
int k = 0;
while (i < j)
{
if (k == -1)
{
i++;
k = 0;
}
else if (target[i] == pattern[k])
{
i++;
k++;
if (k == sizePattern)
{
c[i - sizePattern] = i - sizePattern;
i = i - k + 1;
}
}
else
k = f[k];
}
return;
}
int main(int argc, char* argv[])
{
// constante de tamaño
const int S = 40000000;
// cantidad de threads
int M = 1024;
// controla tamaño de char 1 a 4
int charSize = 4;
// varibles en CPU
char *tar;
char *pat;
tar = (char*)malloc(2000000);
pat = (char*)malloc(S*charSize);
// Variables en GPU
char *d_tar;
char *d_pat;
// Stream Files
ifstream inputFileText;
ifstream inputFilePattern;
ofstream outputFileText;
// Abrir archivos
inputFileText.open(argv[1]);
inputFilePattern.open(argv[2]);
outputFileText.open("DATA/result.txt");
inputFileText>>tar;
inputFilePattern>>pat;
int m = strlen(tar);
int n = strlen(pat);
int *fault;
int *coin;
fault = new int[m];
coin = new int[m];
int *d_fault;
int *d_coin;
// inicializar arreglo c con -1 para procesamiento y resultados
for(int i = 0;i<m; i++)
coin[i] = -1;
//num blocks
int blocks = (m/n+M)/M;
printf("Copiando datos a GPU\n");
//time_t timeL_init, timeL_end, timeT_end, timeT_init;
cudaEvent_t start, stop, local_s, local_e;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&local_s);
cudaEventCreate(&local_e);
preprocesamientoKMP(pat, m, fault);
cudaEventRecord(start);
/* Crear variables en cuda */
cudaMalloc((void **)&d_tar, m*charSize);
cudaMalloc((void **)&d_pat, n*charSize);
cudaMalloc((void **)&d_fault, m*charSize);
cudaMalloc((void **)&d_coin, m*charSize);
/* Copia de datos a GPU */
cudaMemcpy(d_tar, tar, m*charSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_pat, pat, n*charSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_fault, fault, m*charSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_coin, coin, m*charSize, cudaMemcpyHostToDevice);
cudaEventRecord(local_s);
KMP<<<blocks,M>>>(d_pat, d_tar ,d_fault, d_coin, n, m);
cudaEventRecord(local_e);
cudaMemcpy(coin, d_coin, m*charSize, cudaMemcpyDeviceToHost);
// liberar memoria de GPU
cudaFree(d_tar);
cudaFree(d_pat);
cudaFree(d_fault);
cudaFree(d_coin);
cudaEventRecord(stop);
float milis, local;
cudaEventSynchronize(stop);
cudaEventSynchronize(local_e);
cudaEventElapsedTime(&milis, start, stop);
cudaEventElapsedTime(&local, local_s, local_e);
// mostrar resultados
for(int i = 0;i<m; i++)
if(coin[i]!=-1)
outputFileText<<"position: "<<i<<"\tmatch: "<<coin[i]<<'\n';
printf("Blocks: %i Threads: %i n: %i m:%i\n", (m/n+M)/M, M, n, m);
printf("Tiempo ejecucion: %1.15f ml.\n", milis);
printf("Tiempo ejecucion kernel: %1.15f ml.\n", local);
return 0;
}
|
2,867 | #include <device_launch_parameters.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#include <unistd.h>
#include <math.h>
#include <stdlib.h>
//һκ˺üһCеԪ
void __global__ MVMulCUDA(float *A, float *B, float *C, int rowSize, int columnSize, int wA){
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
uint t_id = blockDim.x * bx+ tx;
if(rowSize <= t_id)
return;
float Csub = 0;
for(int i = 0; i < columnSize; i++){
Csub += A[t_id * wA + i] * B[i];
}
C[t_id] = Csub;
}
void ConstantInit_A(float *data, int w, int h) {
//row
for (int i = 0; i < h; i++) {
//column
for (int j = 0; j<w; j++) {
data[i*w + j] = i - 0.1*j + 1;
}
}
}
void ConstantInit_B(float *data, int h) {
//row
for (int i = 0; i < h; i++) {
data[i] = log(sqrt(i*i - i + 1));
}
}
void MVMul(float* A, float* B, float* C, int row, int column){
int wA = 10;
/*
int width = column * sizeof(float);
int height = row;
int columnSize = column * sizeof(float);
int rowSize = row * sizeof(float);
*/
int size_A = row * column;
int mem_size_A = sizeof(float)*size_A;
float *h_A = (float*)malloc(mem_size_A);
int size_B = row * 1;
int mem_size_B = sizeof(float) * size_B;
float *h_B = (float*)malloc(mem_size_B);
// Allocate device memory
float* d_A, *d_B, *d_C;
// Allocate host vector C
dim3 dimsC(row, 1, 1);
int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = reinterpret_cast<float *>(malloc(mem_size_C));
if (h_C == NULL) {
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
//Allocate device memory
cudaMalloc(&d_A, mem_size_A);
cudaMalloc(&d_B, mem_size_B);
cudaMalloc(&d_C, mem_size_C);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
//setup the execution configuration
int dimGrid = 10;
int dimBlock = 1000;
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
cudaEventCreate(&start);
cudaEvent_t stop;
cudaEventCreate(&stop);
// Record the start event
cudaEventRecord(start, NULL);
//Launch the device computeation threads
MVMulCUDA<<<dimGrid, dimBlock>>>(d_A, d_B, d_C,row, column, wA/sizeof(float));
// Record the stop event
cudaEventRecord(stop, NULL);
// Wait for the stop event to complete
cudaEventSynchronize(stop);
float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
// Compute and print the performance
printf("GPU running time= %.12f msec\n",msecTotal);
//Read C from device
cudaMemcpy(C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
//Free device matrices
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
int show(float* C, int rowSize, int columnSize, int showsize){
int i, j;
if(showsize < rowSize)
rowSize = showsize;
if(rowSize == 1)
columnSize = 0;
for(i=0; i<rowSize; i++){
for(j=0; j<showsize; j++){
printf("%f ", C[i*columnSize + j]);
}
printf("\n");
}
return 0;
}
int main(){
//ڴ
float* A;
float* B;
float* C;
int i, j, rowSize = 10000, columnSize = 10000;
int size = rowSize * columnSize;
A = (float*)malloc(size * sizeof(float));
B = (float*)malloc(columnSize * sizeof(float));
C = (float*)malloc(rowSize * sizeof(float));
//Initial matrix A and vector B
ConstantInit_A(A, columnSize, rowSize);
ConstantInit_B(B, rowSize);
struct timeval tvs,tve;
//Calculate
cudaDeviceReset();
gettimeofday(&tvs,NULL);
MVMul(A, B, C, rowSize, columnSize);
gettimeofday(&tve,NULL);
cudaDeviceReset();
//Result examples
double span = tve.tv_sec-tvs.tv_sec + (tve.tv_usec-tvs.tv_usec)/1000000.0;
printf("Total running time: %.12f sec\n",span);
printf("Result examples:\n");
show(C, 1, rowSize, 10);
free(A);
free(B);
free(C);
A = NULL;
B = NULL;
C = NULL;
return 0;
}
|
2,868 | #include <stdio.h>
#define N 40
__global__ void MatAdd(float *A, float *B, float *C) {
int i = threadIdx.x;
C[i] = A[i] + B[i];
}
size_t ind(int x, int y) {
return y * N + x;
}
int main() {
float A[N * N];
float B[N * N];
float C[N * N];
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
A[ind(j, i)] = 3.4;
B[ind(j, i)] = 5.4;
}
}
float *a;
float *b;
float *c;
cudaMalloc((void **) &a, N * N * sizeof(float));
cudaMalloc((void **) &b, N * N * sizeof(float));
cudaMalloc((void **) &c, N * N * sizeof(float));
cudaMemcpy(a, A, sizeof(*a) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(b, B, sizeof(*a) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(c, C, sizeof(*a) * N * N, cudaMemcpyHostToDevice);
int numBlocks = 1;
dim3 threadsPerBlock(N * N);
MatAdd<<<numBlocks, threadsPerBlock>>>(a, b, c);
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
printf("%f ", c[ind(j, i)]);
}
printf("\n");
}
cudaFree(a);
cudaFree(b);
cudaFree(c);
}
|
2,869 | #include "includes.h"
__global__ void gpu_array_2norm2_r4__(size_t arr_size, const float *arr, float *bnorm2)
/** Computes the squared Euclidean (Frobenius) norm of an array arr(0:arr_size-1)
INPUT:
# arr_size - size of the array;
# arr(0:arr_size-1) - array;
OUTPUT:
# bnorm2[0:gridDim.x-1] - squared 2-norm of a sub-array computed by each CUDA thread block;
**/
{
size_t i,n;
float _thread_norm2;
extern __shared__ float thread_norms2_r4[];
n=gridDim.x*blockDim.x; _thread_norm2=0.0f;
for(i=blockIdx.x*blockDim.x+threadIdx.x;i<arr_size;i+=n){_thread_norm2+=arr[i]*arr[i];}
thread_norms2_r4[threadIdx.x]=_thread_norm2;
__syncthreads();
if(threadIdx.x == 0){
bnorm2[blockIdx.x]=thread_norms2_r4[0]; for(i=1;i<blockDim.x;i++){bnorm2[blockIdx.x]+=thread_norms2_r4[i];}
}
__syncthreads();
return;
} |
2,870 | #include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include <math.h>
#include <iostream>
#include<fstream>
#define Pi 3.141516
#define Nthreads 32
using namespace std;
__global__ void Sinodails(double* cosine, double* sine, int tam){
int Id= threadIdx.x + blockDim.x* blockIdx.x;
if(Id<tam){
for(int i=0;i<tam;i++){
cosine[Id*tam+i]=cos((2 * Id * i * Pi) / tam);
sine[Id*tam+i]= sin((2 * Id * i * Pi) / tam);
}
}
}
__device__ double GetAmp(double real, double im)
{
return sqrt(real*real+im*im);
}
__global__ void DFT(double* signal, double* cosine, double* sine, int tam, double* spectrum){
int Id= threadIdx.x + blockDim.x* blockIdx.x;
extern __shared__ double Shasignal[];
double temp1, temp2;
Shasignal[threadIdx.x]=signal[threadIdx.x];
__syncthreads();
for(int i=0;i<tam;i++){
temp1+= cosine[Id*tam +i]*Shasignal[i];
temp2+= sine[Id*tam +i]*Shasignal[i];
}
spectrum[Id]=GetAmp(temp1,temp2);
__syncthreads();
printf(" jaja");
}
void Signals(double *signal,double *time,int tam){
double dt=0.02;
for(int i=0;i<tam;i++){
double R1 = (double) rand() / (double) RAND_MAX;
double R2 = (double) rand() / (double) RAND_MAX;
signal[i] = (double) sqrt( -2.0f * log( R1 )) * cos( 2.0f * Pi * R2 );
time[i]=i*dt;
}
}
int main(){
// host variables
double *signal, *time;
double *cosine, *sine;
double *spectrum;
int tam=256;
size_t dBytes=tam*sizeof(double);
size_t ddBytes=tam*tam*sizeof(double);
// device variables
double *d_cosine, *d_sine;
double *d_signal;
double *d_spectrum;
// kernel variables
int Blocks= tam/Nthreads;
// ------------------------- BODY ------------------ /////
cosine= (double*)malloc(ddBytes);
sine=(double*)malloc(ddBytes);
spectrum=(double*)malloc(dBytes);
signal=(double*)malloc(dBytes);
time=(double*)malloc(dBytes);
Signals(signal,time,tam);
/// ---------------------------------------------------
// we allocate cosine and sine arrays
cudaMalloc((void**)&d_cosine,ddBytes);
cudaMalloc((void**)&d_sine,ddBytes);
Sinodails<<<Blocks,Nthreads>>>(d_cosine,d_sine,tam);
cudaDeviceSynchronize();
/// ---------------------------------------------------
cudaMalloc((void**)&d_spectrum,dBytes);
cudaMalloc((void**)&d_signal,dBytes);
cudaMemcpy(d_signal,signal,dBytes,cudaMemcpyHostToDevice);
DFT<<<Blocks,Nthreads,dBytes >>>(d_signal,d_cosine,d_sine,tam,d_spectrum);
cudaDeviceSynchronize();
cudaMemcpy(spectrum,d_spectrum,dBytes,cudaMemcpyDeviceToHost);
// ------------------------- BODY ------------------ /////
/// ---------------
ofstream file1,file2;
file1.open("fourier.dat");
file2.open("signal.dat");
for (int i=0; i<tam;i++){
file1<<time[i]<<" "<<spectrum[i]<<endl;
file2<<time[i]<<" "<<signal[i]<<endl;
}
/// ---------------
ofstream file3;
file3.open("imprimir.gnu");
file3<<"set terminal eps transparent size 6,4 lw 1.8 enhanced font \"Times,24\""<<endl;
file3<<"set encoding iso_8859_1"<<endl;
file3<<"set title 'fourier'"<<endl;
file3<<"set output \"imprimir.eps\""<<endl;
file3<<"set grid"<<endl;
file3<<"set xrange[0:5]"<<endl;
file3<<"set yrange[0:10]"<<endl;
//file3<<"set datafile separator whiteespace"<<endl;
file3<<"plot 'fourier.dat' w l"<<endl;
file3.close();
system("gnuplot imprimir.gnu");
system(" evince imprimir.eps");
/// ----------------
/// ---------------
ofstream file4;
file4.open("imprimir1.gnu");
file4<<"set terminal eps transparent size 6,4 lw 1.8 enhanced font \"Times,24\""<<endl;
file4<<"set encoding iso_8859_1"<<endl;
file4<<"set title 'Senal'"<<endl;
file4<<"set output \"imprimir1.eps\""<<endl;
file4<<"set grid"<<endl;
file4<<"set xrange[0:5]"<<endl;
file4<<"set yrange[-3:3]"<<endl;
//file3<<"set datafile separator whiteespace"<<endl;
file4<<"plot 'signal.dat' w l"<<endl;
file4.close();
system("gnuplot imprimir1.gnu");
system(" evince imprimir1.eps");
/// ----------------
cudaFree(d_cosine);
cudaFree(d_sine);
cudaFree(d_signal);
free(cosine);
free(sine);
free(signal);
return 0;
}
|
2,871 | #include <stdlib.h>
#include <stdio.h>
#include <cstdlib>
#include <math.h>
#include <random>
#include <chrono>
#include <iostream>
class Particle
{
public:
float3 pos = make_float3(0,0,0);
float3 vel = make_float3(1,1,1);
Particle() {}
Particle(float3 velocity){
vel = velocity;
}
void print_particle() {
printf("position (%f,%f,%f) \n", pos.x, pos.y, pos.z);
printf("velocity (%f,%f,%f) \n", vel.x, vel.y, vel.z);
}
};
__device__ float3 operator+(const float3 &a, const float3 &b)
{
return make_float3(a.x + b.x, a.y + b.y, a.z + b.z);
}
float3 add_float3(const float3 &a, const float3 &b)
{
return make_float3(a.x + b.x, a.y + b.y, a.z + b.z);
}
float3 sub_float3(const float3 &a, const float3 &b)
{
return make_float3(a.x - b.x, a.y - b.y, a.z - b.z);
}
float mag_float3(const float3 &a)
{
return abs(a.x) + abs(a.y) + abs(a.z);
}
__global__
void timestep_update(Particle *particles, int n_particles)
{
int thread = blockIdx.x*blockDim.x + threadIdx.x;
// Avoid index out of bounds
if(thread > n_particles - 1){
return;
}
// Update velocity
// particles[thread].vel = particles[thread].vel + particles[thread].vel;
// Update position
particles[thread].pos = particles[thread].pos + particles[thread].vel;
}
float3 random_velocity()
{
std::random_device rd;
std::mt19937 rng(rd());
std::uniform_int_distribution<int> uni(-10, 10);
auto x = uni(rng);
auto y = uni(rng);
auto z = uni(rng);
return make_float3(x,y,z);
}
void timestep_update_cpu(Particle *particles, int n_particles){
for(int i = 0; i < n_particles; i++){
particles[i].pos = add_float3(particles[i].pos, particles[i].vel);
}
}
int main(int argc, char** argv)
{
int n_particles, n_iterations, n_threads;
n_iterations = 1000;
n_particles = 1000000;
n_threads = 256;
int grid_size = n_particles/n_threads;
if (n_particles%n_threads != 0){
grid_size++;
}
int bytes = sizeof(Particle) * n_particles;
Particle *particles;
cudaMallocManaged(&particles, bytes);
for(int i = 0; i < n_particles; i++)
{
float3 random_vel = random_velocity();
particles[i] = Particle(random_vel);
}
for(int i = 0; i < n_iterations; i++){
timestep_update<<<grid_size, n_threads>>>(particles, n_particles);
}
}
|
2,872 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#define ROWS 4096
#define COLS 4096
__global__ void histo(int* d_hist1, int* d_hist2, int* mat) {
int id;
id = blockIdx.x * blockDim.x + threadIdx.x;
switch (d_hist1[id]) {
case 0:
atomicAdd(&mat[0], 1);
break;
case 1:
atomicAdd(&mat[1], 1);
break;
case 2:
atomicAdd(&mat[2], 1);
break;
case 3:
atomicAdd(&mat[3], 1);
break;
case 4:
atomicAdd(&mat[4], 1);
break;
case 5:
atomicAdd(&mat[5], 1);
break;
case 6:
atomicAdd(&mat[6], 1);
break;
case 7:
atomicAdd(&mat[7], 1);
break;
case 8:
atomicAdd(&mat[8], 1);
break;
case 9:
atomicAdd(&mat[9], 1);
break;
}
__syncthreads();
switch (d_hist2[id]) {
case 0:
atomicAdd(&mat[0], 1);
break;
case 1:
atomicAdd(&mat[1], 1);
break;
case 2:
atomicAdd(&mat[2], 1);
break;
case 3:
atomicAdd(&mat[3], 1);
break;
case 4:
atomicAdd(&mat[4], 1);
break;
case 5:
atomicAdd(&mat[5], 1);
break;
case 6:
atomicAdd(&mat[6], 1);
break;
case 7:
atomicAdd(&mat[7], 1);
break;
case 8:
atomicAdd(&mat[8], 1);
break;
case 9:
atomicAdd(&mat[9], 1);
break;
}
}
int main(int argc, char* argv[]) {
//int histogram[ROWS][COLS];
int x, y;
int hist[ROWS * COLS], hist1[(ROWS * COLS) / 2], hist2[(ROWS * COLS) / 2];
int *d_hist1, *d_hist2;
int i = 0;
int mat[10];
int *d_mat;
FILE *fp;
fp = fopen("input.txt", "r");
for (x = 0; x < ROWS; x++)
{
for ( y = 0; y < COLS; y++)
{
fscanf(fp, "%d", &hist[i]);
// printf("%d\t",hist[i]);
i++;
}
}
fclose(fp);
printf("\n");
//int i = 0;
/*for (x = 0; x < ROWS; x++) {
for (y = 0; y < COLS; y++) {
hist[i] = histogram[x][y];
}
}*/
for (i = 0; i < (ROWS * COLS) / 2; i++) {
hist1[i] = hist[i];
// printf("%d\t", hist[i]);
}
printf("\n");
for (i = 0; i < (ROWS * COLS) / 2; i++) {
hist2[i] = hist[((ROWS * COLS) / 2) + i];
// printf("%d\t", hist[i]);
}
for (i = 0; i < 10; i++) {
mat[i] = 0;
}
cudaMalloc(&d_hist1, ROWS * COLS * sizeof(int) / 2);
cudaMalloc(&d_hist2, ROWS * COLS * sizeof(int) / 2);
cudaMalloc(&d_mat, 10 * sizeof(int));
cudaMemcpy(d_hist1, hist1, (ROWS * COLS * sizeof(int)) / 2, cudaMemcpyHostToDevice);
cudaMemcpy(d_hist2, hist2, (ROWS * COLS * sizeof(int)) / 2, cudaMemcpyHostToDevice);
cudaMemcpy(d_mat, &mat, 10 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemset(d_mat, 0, sizeof(d_mat));
//histo <<<16384,1024>>>(d_hist, d_mat, ROWS * COLS);
histo <<< 8192, 1024>>>(d_hist1, d_hist2, d_mat);
cudaThreadSynchronize();
cudaMemcpy(mat, d_mat, 10 * sizeof(int), cudaMemcpyDeviceToHost);
FILE *fout;
fout = fopen("output.txt", "w");
for (i = 0; i < 10; i++) {
printf("\n%d => %d", i, mat[i]);
fprintf(fout, "%d => %d\n", i, mat[i]);
}
fclose(fout);
cudaFree(d_mat);
cudaFree(d_hist1);
cudaFree(d_hist2);
return 0;
}
|
2,873 | #include "includes.h"
__global__ void dot(int *a, int *b, int *c)
{
/* shared memory cache for partial sum results */
__shared__ int cache[THREADS_PER_BLOCK];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int result = 0;
/* multiplication step: write a partial sum into the cache */
while(i < N)
{
result += a[i] * b[i];
i += blockDim.x * gridDim.x;
}
cache[threadIdx.x] = result;
/* wait for all other threads in the same block */
__syncthreads();
/* reduction step: sum all entries in the cache */
i = blockDim.x / 2;
while (i != 0)
{
/* only threads 0 through i are busy */
if (threadIdx.x < i)
{
cache[threadIdx.x] += cache[threadIdx.x + i];
}
/* wait for all threads within the block */
__syncthreads();
i /= 2;
}
/* thread 0 writes the result for this block */
if (threadIdx.x == 0)
{
c[blockIdx.x] = cache[0];
}
} |
2,874 | // fdk-ts-h.cu
#include <stdio.h>
void fdk_ts_help(void)
{
printf("\n\
\n\
image = function('fdk,ts,back', nx,ny,nz, dx,dy,dz, \n\
offset_x, offset_y, offset_z, mask2, \n\
dso, dsd, ds, dt, offset_s, offset_t, proj, beta, nthread)\n\
\n\
image output is single [nz nx ny] <- trick!\n\
nx,ny,nz: (int32) image size\n\
dx,dy,dz: (double) voxel size\n\
offset_x,_y,_z: (double) center offset in pixels (usually 0)\n\
mask2: (uint8) [nx ny] 2D support mask\n\
dso: (double) distance from source to isocenter\n\
dsd: (double) distance from source to detector\n\
dfs: (double) distance from focal point to source (0 or inf)\n\
ds: (double) horizontal ray spacing\n\
dt: (double) vertical ray spacing\n\
offset_s: (double) channel offset [pixels]\n\
offset_t: (double) vertical offset on detector [pixels]\n\
nthread: (int32) # of processors\n\
proj: (single) [nt ns na] (trick!) projection view for each beta\n\
beta: (double) [na] source angle(s) [radians]\n\
(CUDA version)\n\
\n");
}
|
2,875 | #include<stdio.h>
#include <cuda.h>
void random_ints(int* a, int N)
{
int i;
for (i = 0; i < N; ++i)
a[i] = rand()%100;
}
__global__ void add_vector(int* a,int* b,int*c)
{
int i = blockIdx.x*blockDim.x+ threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
int N = 10000; //size of vector
int M = 10; //Number of thread
size_t size = N * sizeof(int);
//allocate memory for host vector
int* vector1 = (int*)malloc(size);
int* vector2 = (int*)malloc(size);
int* vector3 = (int*)malloc(size);
//insert number into vector
random_ints(vector1,N);
random_ints(vector2,N);
//create device vector pointer
int *d_vector1;
int *d_vector2;
int *d_vector3;
//allocate device memory for vector
cudaMalloc(& d_vector1, size);
cudaMalloc(& d_vector2, size);
cudaMalloc(& d_vector3, size);
//copy vectors from host memory to dvice memory
cudaMemcpy(d_vector1,vector1, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_vector2,vector2, size, cudaMemcpyHostToDevice);
//call kernal
add_vector<<<(N+M-1)/M,M>>>(d_vector1, d_vector2, d_vector3);
//cudaDeviceSynchronize();
cudaMemcpy(vector3,d_vector3, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < 10000; i++)
{
printf("%d %d + %d =%d\n",i,vector1[i], vector2[i], vector3[i]);
}
}
|
2,876 | #include "includes.h"
__global__ void gPasteRows(float* out, const float* in, size_t cols, const size_t* targetRowIdx, size_t rows) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
size_t dstId = targetRowIdx[j];
size_t srcId = j;
float* rowOut = out + dstId * cols;
const float* rowIn = in + srcId * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols)
atomicAdd(rowOut + i, rowIn[i]);
}
}
}
} |
2,877 |
extern "C" __global__ void multiply(unsigned int *a, unsigned int *b, unsigned int *c,
int n)
{
unsigned int i;
unsigned int product = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < n && col < n){
for (i = 0; i < n; i++)
product += a[row * n + i] * b[i * n + col];
c[row*n + col] = product;
}
}
|
2,878 | /* Histogram generation on the GPU.
* Host-side code.
* Author: Naga Kandasamy
* Date modified: May 17, 2020
*/
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <string.h>
#include <math.h>
#include <float.h>
#define THREAD_BLOCK_SIZE 256
#define NUM_BLOCKS 40
#define HISTOGRAM_SIZE 256 /* Histogram has 256 bins */
#include "histogram_kernel.cu"
void run_test(int);
void compute_on_device(int *, int *, int, int);
void check_for_error(const char *);
extern "C" void compute_gold(int *, int *, int, int);
void check_histogram(int *, int, int);
void print_histogram(int *, int , int);
int main(int argc, char **argv)
{
if (argc < 2) {
printf("Usage: %s num-elements\n", argv[0]);
exit(EXIT_SUCCESS);
}
int num_elements = atoi(argv[1]);
run_test(num_elements);
exit(EXIT_SUCCESS);
}
void run_test(int num_elements)
{
float diff;
int i;
/* Allocate and initialize space to store histograms generated by the CPU and the GPU */
int *histogram_on_cpu = (int *)malloc(sizeof(int) * HISTOGRAM_SIZE);
memset(histogram_on_cpu, 0, sizeof(int) * HISTOGRAM_SIZE);
int *histogram_on_gpu = (int *)malloc(sizeof(int) * HISTOGRAM_SIZE);
memset(histogram_on_gpu, 0, sizeof(int) * HISTOGRAM_SIZE);
/* Generate input data to be integer values between 0 and (HISTOGRAM_SIZE - 1) */
printf("\nGenerating input data\n");
int size = sizeof(int) * num_elements;
int *input_data = (int *)malloc (size);
for(i = 0; i < num_elements; i++)
input_data[i] = floorf((HISTOGRAM_SIZE - 1) * (rand()/(float)RAND_MAX));
printf("\nGenerating histrgram on CPU\n");
struct timeval start, stop;
gettimeofday(&start, NULL);
compute_gold(input_data, histogram_on_cpu, num_elements, HISTOGRAM_SIZE);
gettimeofday(&stop, NULL);
printf("Eexcution time = %f\n",stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000);
check_histogram(histogram_on_cpu, num_elements, HISTOGRAM_SIZE);
printf("\nGenerating histrgram on device\n");
compute_on_device(input_data, histogram_on_gpu, num_elements, HISTOGRAM_SIZE);
check_histogram(histogram_on_gpu, num_elements, HISTOGRAM_SIZE);
/* Compute the differences between the CPU and GPU results. */
diff = 0.0;
for(i = 0; i < HISTOGRAM_SIZE; i++)
diff += abs(histogram_on_cpu[i] - histogram_on_gpu[i]);
printf("Difference between CPU and device results = %f\n", diff);
/* cleanup memory. */
free((void *)input_data);
free((void *)histogram_on_cpu);
free((void *)histogram_on_gpu);
exit(EXIT_SUCCESS);
}
void compute_on_device(int *input_data, int *histogram, int num_elements, int histogram_size)
{
int *input_data_on_device = NULL;
int *histogram_on_device = NULL;
/* Allocate space on GPU for input data */
cudaMalloc((void**)&input_data_on_device, num_elements * sizeof(int));
cudaMemcpy(input_data_on_device, input_data, num_elements * sizeof(int), cudaMemcpyHostToDevice);
/* Allocate space on GPU for histogram and initialize contents to zero */
cudaMalloc((void**)&histogram_on_device, histogram_size * sizeof(int));
cudaMemset(histogram_on_device, 0, histogram_size * sizeof(int));
/* Set up the execution grid on GPU */
dim3 thread_block(THREAD_BLOCK_SIZE, 1, 1);
dim3 grid(NUM_BLOCKS,1);
struct timeval start, stop;
gettimeofday(&start, NULL);
printf("Using global memory to generate histrogram\n");
histogram_kernel_slow<<<grid, thread_block>>>(input_data_on_device, histogram_on_device, num_elements, histogram_size);
cudaDeviceSynchronize();
gettimeofday(&stop, NULL);
printf("Execution time = %f \n",stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000);
check_for_error("KERNEL FAILURE");
printf("Using shared memory to generate histogram\n");
gettimeofday(&start, NULL);
cudaMemset(histogram_on_device, 0, histogram_size * sizeof(int)); /* Reset histogram */
histogram_kernel_fast<<<grid, thread_block>>>(input_data_on_device, histogram_on_device, num_elements, histogram_size);
cudaDeviceSynchronize();
gettimeofday(&stop, NULL);
printf("Eexecution time = %f\n",stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000);
check_for_error("KERNEL FAILURE");
/* Copy result back from GPU */
cudaMemcpy(histogram, histogram_on_device, histogram_size * sizeof(int), cudaMemcpyDeviceToHost);
print_histogram(histogram, histogram_size, num_elements);
/* Free memory */
cudaFree(input_data_on_device);
cudaFree(histogram_on_device);
}
/* Check correctness of result: sum of histogram bins must equal number of input elements */
void check_histogram(int *histogram, int num_elements, int histogram_size)
{
int sum = 0;
int i;
for (i = 0; i < histogram_size; i++)
sum += histogram[i];
printf("Number of histogram entries = %d. \n", sum);
if (sum == num_elements)
printf("Histogram generated successfully. \n");
else
printf("Error generating histogram. \n");
printf("\n");
}
/* Check for errors during kernel execution */
void check_for_error(const char *msg)
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
printf("CUDA ERROR: %s (%s)\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
|
2,879 | #include<stdio.h>
#include<cuda.h>
#include <string.h>
#include <math.h>
#define MAXNUM 10000000000
#define BNUM 190
#define TNUM 1024
long long MakeNum(int *number,long long size){
int i,j,now=0;
for(i=0;i<size;i++)
number[i]=0;
number[2]=1;number[3]=1;
for(i=5,j=2;i<size;i+=j,j=6-j){
number[i]=1;
now++;
}//printf("%llu %llu\n",now,number[now-1]);
return size;
//number[0] = 2;
}
__global__ void running(int *deviceArr,long long arrSize){
int BID=blockIdx.x; //區塊索引
int TID=threadIdx.x; //執行緒索引
//int n=blockDim.x; //區塊中包含的執行緒數目
//int x=BID*n+TID; //執行緒在陣列中對應的位置
//deviceArr[arrSize-1]++;
long long i,j,k;
for(i = BID * TNUM + TID; i < arrSize;i += BNUM * TNUM){
if(deviceArr[i]==1){
for(j=2;i*j<arrSize;j++)
//for (j = 5,k=2; j * i < arrSize;j+=k,k=6-k)
{
deviceArr[i * j] = 0;
}
}
}
};
int main(){
int *arr;
int *hostArr;
int *deviceArr;
long long i,j,k,arrSize,temp,biggest;
float dTime;
cudaEvent_t start,end;
arr = (int *)malloc(MAXNUM*sizeof(int));
hostArr = (int *)malloc(MAXNUM*sizeof(int));
cudaEventCreate(&start);
cudaEventCreate(&end);
arrSize=MakeNum(arr,MAXNUM);
//printf("%llu %llu\n",arrSize,arr[arrSize-1]);
/*for(i=0;i<arrSize;i++)
if(arr[i]==1)
printf("%llu ",i);
printf("\n",arr[i]);*/
cudaMalloc((void**) &deviceArr, MAXNUM*sizeof(int));
cudaMemcpy(deviceArr,arr,sizeof(int)*MAXNUM,cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
running<<<BNUM,TNUM>>>(deviceArr,arrSize);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaMemcpy(hostArr, deviceArr, MAXNUM*sizeof(int), cudaMemcpyDeviceToHost);
temp=0;
for(i=0;i<arrSize;i++){
if(hostArr[i]==1){
temp++;
biggest=i;
//printf("%llu ",i);
}
}/**/
cudaEventElapsedTime(&dTime, start, end);
printf("2~%llu num:%llu biggest:%llu time:%f.\n",MAXNUM,temp,biggest,dTime);
cudaFree(deviceArr);
/*
cudaMalloc((void**) &d, 100*sizeof(Index));
int g=3, b=4, m=g*b;
running<<<g,b>>>(d);
cudaMemcpy(h, d, 100*sizeof(Index), cudaMemcpyDeviceToHost);
for(int i=0; i<m; i++){
printf("h[%d]={block:%d, thread:%d,%d,%d}\n", i,h[i].block,h[i].thread,h[i].n,h[i].x);
}
cudaFree(d);*/
return 0;
}
|
2,880 | #include "includes.h"
__global__ void max_pooling_kernel(float *feature_map, float *probs, float *target, int feature_map_size, int feature_map_num, int pooling_rate, float *rnd_array, int rnd_num){
__shared__ float shFm[16*MAX_POOLING_RATE][16*MAX_POOLING_RATE];
int imgIdx = blockIdx.y / (feature_map_size / 16 / pooling_rate);
int fmIdx = blockIdx.x / (feature_map_size / 16 / pooling_rate);
int tx = (blockIdx.x % (feature_map_size / pooling_rate / 16)) * 16 + threadIdx.x;
int ty = (blockIdx.y % (feature_map_size / pooling_rate / 16)) * 16 + threadIdx.y;
int subsample_size = feature_map_size / pooling_rate;
int rnd_index = ((blockIdx.y * blockDim.y + threadIdx.y) * (blockIdx.x * blockDim.x) + threadIdx.x ) % rnd_num;
float rnd = rnd_array[rnd_index];
float *fm = feature_map + imgIdx * feature_map_num * feature_map_size * feature_map_size +
fmIdx * feature_map_size * feature_map_size;
probs = probs + imgIdx * feature_map_num * feature_map_size * feature_map_size +
fmIdx * feature_map_size * feature_map_size;
target = target + imgIdx * feature_map_num * subsample_size * subsample_size +
fmIdx * subsample_size * subsample_size;
for(int i = 0; i < pooling_rate; i++){
for(int j = 0; j < pooling_rate; j++){
shFm[threadIdx.y*pooling_rate+i][threadIdx.x*pooling_rate+j] =
fm[(ty*pooling_rate+i) * feature_map_size + (tx*pooling_rate+j)];
}
}
__syncthreads();
float sum = 0;
for(int i = 0; i < pooling_rate; i++){
for(int j = 0; j < pooling_rate; j++){
if(shFm[threadIdx.y*pooling_rate+i][threadIdx.x*pooling_rate+j] > 50){
shFm[threadIdx.y*pooling_rate+i][threadIdx.x*pooling_rate+j] = 50.0f;
}
shFm[threadIdx.y*pooling_rate+i][threadIdx.x*pooling_rate+j] =
__expf(shFm[threadIdx.y*pooling_rate+i][threadIdx.x*pooling_rate+j]);
sum += shFm[threadIdx.y*pooling_rate+i][threadIdx.x*pooling_rate+j];
}
}
for(int i = 0; i < pooling_rate; i++){
for(int j = 0; j < pooling_rate; j++){
shFm[threadIdx.y*pooling_rate+i][threadIdx.x*pooling_rate+j] =
__fdividef(shFm[threadIdx.y*pooling_rate+i][threadIdx.x*pooling_rate+j], (1.0f + sum));
probs[(ty*pooling_rate+i) * feature_map_size + (tx*pooling_rate+j)] =
shFm[threadIdx.y*pooling_rate+i][threadIdx.x*pooling_rate+j];
fm[(ty*pooling_rate+i) * feature_map_size + (tx*pooling_rate+j)] = 0;
}
}
sum = 0;
bool isStop = false;
for(int i = 0; i < pooling_rate && !isStop; i++){
for(int j = 0; j < pooling_rate && !isStop; j++){
sum += shFm[threadIdx.y*pooling_rate+i][threadIdx.x*pooling_rate+j];
if(rnd < sum){
fm[(ty*pooling_rate+i) * feature_map_size + (tx*pooling_rate+j)] = 1;
isStop = true;
}
}
}
if(isStop){
target[threadIdx.y*subsample_size+threadIdx.x] = 1;
}else{
target[threadIdx.y*subsample_size+threadIdx.x] = 0;
}
} |
2,881 | //本质上来说,几维的数组其实都是一维数组,不过是变变表现形式而已,二维数组加法没什么意思,
//就是一维数组加法,还是二维数组乘法有点意思
//这个方法,还不是高并发,高并发,应该是把求和那一块for也并发了。估计要用device,现在的并发度是4
#include<iostream>
#include<cuda.h>
using namespace std;
const int N=2;
__global__ void mul(int *a,int *b,int *c){//并发度为4的矩阵乘法
int row=blockIdx.x;
int col=threadIdx.x;
int temp_sum=0;
for(int i=0;i<blockDim.x;i++){
//temp_sum+=a[row][i]*b[i][row];
temp_sum+=a[row*blockDim.x+i]*b[i*blockDim.x+col];
// temp_sum+=row+col;
// temp_sum=a[row*blockDim.x+col];
}
// c[row][col]=temp_sum;
c[row*blockDim.x+col]=temp_sum;
}
__global__ void mul_8(int *a,int *b,int *c){//实现了2*2*2个线程,每个都进行计算,并行度为8
int row=blockIdx.x/N;
int col=blockIdx.x%N;
__shared__ int mul[N];
//mul[0]=a[row][0]*b[0][col]
mul[threadIdx.x]=a[row*N+threadIdx.x]*b[threadIdx.x*N+col];
//mul[1]=a[row][1]*b[1][col]
__syncthreads();
int sum=0;
for(int i=0;i<blockDim.x;i++)
sum+=mul[i];
c[blockIdx.x]=sum;
}
int main(){
int *a,*b,*dev_a,*dev_b,*dev_c,*c;
a=new int[N*N];
b=new int [N*N];
c=new int[N*N];
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
a[i*N+j]=i*N+j;
b[i*N+j]=i*N+j;
cout<<a[i*N+j]<<endl;
}
}
cudaMalloc(&dev_a,N*N*sizeof(int));
cudaMalloc(&dev_b,N*N*sizeof(int));
cudaMalloc(&dev_c,N*N*sizeof(int));
cudaMemcpy(dev_a,a,N*N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,N*N*sizeof(int),cudaMemcpyHostToDevice);
// mul<<<N,N>>>(dev_a,dev_b,dev_c);
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
mul_8<<<N*N,N>>>(dev_a,dev_b,dev_c);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,start,stop);
cout<<"Time: "<<elapsedTime<<endl;
cudaMemcpy(c,dev_c,N*N*sizeof(int),cudaMemcpyDeviceToHost);
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
cout<<c[i*N+j]<<" ";
}
cout<<endl;
}
return 0;
} |
2,882 | #include "stdlib.h"
#include "stdio.h"
#include <math.h>
#include <cuda.h>
const int max_val=100;
void generateArray(float* data, int size);
__global__
void vectAddKernel(float* A, float* B, float* C, int n){
int i = threadIdx.x+blockDim.x*blockIdx.x;
if (i<n){
*(C+i)=*(A+i)+*(B+i);
}
}
void vectorAdd(float* A, float* B,float* C, int n){
int size=sizeof(float)*n;
float* d_A;
float* d_B;
float* d_C;
int _sa = cudaMalloc((void**)(&d_A),size);
int _sb = cudaMalloc((void**)(&d_B),size);
int _sc = cudaMalloc((void**)(&d_C),size);
int _cma=cudaMemcpy(d_A,A,size,cudaMemcpyHostToDevice);
int _cmb=cudaMemcpy(d_B,B,size,cudaMemcpyHostToDevice);
dim3 grid (ceil(n/256.0), 1, 1);
dim3 block (256, 1, 1);
vectAddKernel<<<grid,block>>>(d_A,d_B,d_C,n);
int _cmc=cudaMemcpy(C,d_C,size,cudaMemcpyDeviceToHost);
cudaFree((void**)&d_A);
cudaFree((void**)&d_B);
cudaFree((void**)&d_C);
}
int main(int argc, char* argv[]){
if (argc != 2){
printf("Numero incorrecto de argumentos\n");
return -1;
}
int n = atoi(argv[1]);
float* arr1 = (float*)malloc(sizeof(float)*n);
float* arr2 = (float*)malloc(sizeof(float)*n);
float* res = (float*)malloc(sizeof(float)*n);
generateArray(arr1,n);
generateArray(arr2,n);
vectorAdd(arr1,arr2,res,n);
/*
printf("Array 1:");
for(int i=0;i<n;i++){
printf(" %f",*(arr1+i));
}
printf("\n");
printf("Array 2:");
for(int i=0;i<n;i++){
printf(" %f",*(arr2+i));
}
printf("\n");
printf("Res:");
for(int i=0;i<n;i++){
printf(" %f",*(res+i));
}
printf("\n");*/
}
void generateArray(float* data, int size){
for(int i=0;i<size;i++){
*(data+i)=rand() % max_val;
}
}
|
2,883 | #include "includes.h"
#define N 2560
#define M 512
#define BLOCK_SIZE (N/M)
#define RADIUS 5
__global__ void add(double *a, double *b, double *c, int n){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < n){
c[idx] = a[idx] + b[idx];
}
} |
2,884 | #include <stdio.h>
#define RADIUS 3
#define BLOCK_SIZE 256
#define NUM_ELEMENTS (4096*2)
__global__ void stencil_1d_simple(int *in, int *out)
{
// compute this thread's global index
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x + RADIUS;
int alpha = 1;
int beta = 1;
if(i < NUM_ELEMENTS + RADIUS ){
/* FIX ME #1 */
}
}
__global__ void stencil_1d_improved(int *in, int *out)
{
__shared__ int temp[BLOCK_SIZE]; /* FIXME #2*/
int gindex = threadIdx.x + (blockIdx.x * blockDim.x) ; /* FIXME #3*/
int lindex = threadIdx.x ; /* FIXME #4 */
// Read input elements into shared memory
temp[lindex] = in[gindex];
//Load ghost cells (halos)
if (threadIdx.x < RADIUS)
{
/* FIXME #5 */
}
// Make sure all threads get to this point before proceeding!
/* FIXME #6 */
// Apply the stencil
int result = 0;
for (int offset = -RADIUS ; offset <= RADIUS ; offset++)
result += temp[lindex + offset];
// Store the result
out[gindex] = result;
}
int main()
{
unsigned int i;
int N = NUM_ELEMENTS + 2 * RADIUS;
int h_in[N], h_out[N];
int *d_in, *d_out;
// Initialize host data
for( i = 0; i < (N); ++i )
h_in[i] = 1; // With a value of 1 and RADIUS of 3, all output values should be 7
// Allocate space on the device
cudaMalloc( &d_in, N * sizeof(int)) ;
cudaMalloc( &d_out, N * sizeof(int)) ;
// Copy input data to device
cudaMemcpy( d_in, h_in, N * sizeof(int), cudaMemcpyHostToDevice) ;
stencil_1d_simple<<< (NUM_ELEMENTS + BLOCK_SIZE - 1)/BLOCK_SIZE, BLOCK_SIZE >>> (d_in, d_out);
//stencil_1d_improved<<< (NUM_ELEMENTS + BLOCK_SIZE - 1)/BLOCK_SIZE, BLOCK_SIZE >>> (d_in, d_out);
cudaMemcpy( h_out, d_out, N * sizeof(int), cudaMemcpyDeviceToHost) ;
// Verify every out value is 7
for( i = RADIUS; i < NUM_ELEMENTS+RADIUS; ++i )
if (h_out[i] != RADIUS*2+1)
{
printf("Element h_out[%d] == %d != 7\n", i, h_out[i]);
break;
}
if (i == NUM_ELEMENTS+RADIUS)
printf("SUCCESS!\n");
// Free out memory
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
2,885 | /*
(c) Matthew Lee
Spring 2019
MIT License
*/
#include <stdio.h>
#include <vector>
#include <math.h>
#include <iostream>
#include <time.h>
#include <curand.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <curand_kernel.h>
#include "barrier_options.cuh"
void down_out(unsigned N_STEPS, unsigned N_PATHS, float start_price, float sigma, float mu, float time, float barrier_u, float strike_price, float* price_est, float* payoff);
__global__ void barrier_simulation(unsigned total_paths, unsigned steps_per_sim, float start_price, float sigma, float mu, float time, float barrier_a, float strike_price, float* randoms, float* price_est, float* payoff);
__global__ void barrier_simulation(
unsigned total_paths,
unsigned steps_per_sim,
float start_price,
float sigma,
float mu,
float time,
float barrier_a,
float strike_price,
float* randoms,
float* price_est,
float* payoff)
{
//index of destination for simulated price and payoff
//TODO: Add block/grid index etc
const int result_index = blockIdx.x*blockDim.x + threadIdx.x;
//only continue execution if not finished with job
if (result_index > total_paths)
return;
//initializing the starting price of the option
float price = start_price;
//barrier adjustment
float barrier = barrier_a * (exp(+0.5826*sigma*sqrt(time / steps_per_sim)));
//time per iteration
float dt = (float)time / steps_per_sim;
//
float v = mu - ((sigma*sigma) / 2);
for (int i = 0; i < steps_per_sim; i++)
{
//discretized stoichastic difeq (geometric brownian motion), apparently - must investigate the math behind this further
price = price * exp(v*dt + sigma*randoms[result_index+i]);
}
__syncthreads();
//after completing specified number of steps, input price_est into array
price_est[result_index] = (price >= barrier ? price : 0.0);
//input expected payoff into array
if(price >= barrier)
{
float thispayoff = price - strike_price;
//risk = mu calculation
payoff[result_index] = (thispayoff > 0.0 ? thispayoff * exp((-mu)*time) : 0.0);
}
else {
payoff[result_index] = 0.0;
}
__syncthreads();
}
void down_out(unsigned N_STEPS, unsigned N_PATHS, float start_price, float sigma, float mu, float time, float barrier_u, float strike_price, float* price_est, float* payoff)
{
const unsigned BLOCK_SIZE = 1024;
const unsigned GRID_SIZE = ceil(float(N_PATHS) / float(BLOCK_SIZE));
//generating random floats on device memory
float* randoms;
curandGenerator_t gen;
cudaMalloc(&randoms, N_STEPS*N_PATHS*sizeof(float));
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(gen, clock());
curandGenerateNormal(gen, randoms, N_STEPS*N_PATHS, 0, sqrt((float)time/N_STEPS));
//calling simulation kernel
barrier_simulation <<<GRID_SIZE, BLOCK_SIZE>>> (N_PATHS, N_STEPS, start_price, sigma, mu, time, barrier_u, strike_price, randoms, price_est, payoff);
} |
2,886 | #include "includes.h"
__global__ void columnarize_groups(int8_t* columnar_buffer, const int8_t* rowwise_buffer, const size_t row_count, const size_t col_count, const size_t* col_widths, const size_t row_size) {
const auto thread_index =
threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x;
if (thread_index >= row_count) {
return;
}
auto read_ptr = rowwise_buffer + thread_index * row_size;
auto col_base = columnar_buffer;
for (size_t i = 0; i < col_count; ++i) {
switch (col_widths[i]) {
case 8: {
int64_t* write_ptr = reinterpret_cast<int64_t*>(col_base) + thread_index;
*write_ptr = *reinterpret_cast<const int64_t*>(read_ptr);
break;
}
case 4: {
int32_t* write_ptr = reinterpret_cast<int32_t*>(col_base) + thread_index;
*write_ptr = *reinterpret_cast<const int32_t*>(read_ptr);
break;
}
default:;
}
col_base += col_widths[i] * row_count;
read_ptr += col_widths[i]; // WARN(miyu): No padding!!
}
} |
2,887 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#define DataSize 1024
__global__ void Add(unsigned int *Da,int high,int width)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int bn = blockDim.x;
//int gn = gridDim.x;
int id = bx*bn+tx;
//for(int i=id;i<(high*width);i+=(bn*gn))
//Da[i] = 255 - Da[i];
int i = 1024/2; // CblockthreadsӰ
while (i != 0) {
if (tx < i) {
Da[id] += Da[id + i];
}
__syncthreads();
i /= 2;
}
}
int main()
{
FILE *fp = NULL;
unsigned int high, width, offset;
unsigned char *head;
unsigned char *img; // i줸A1 byte = 8 bits
high = 0;
width = 0;
offset = 0;
fp = fopen("lena.bmp","rb");
fseek(fp, 10, SEEK_SET);
fread(&offset, sizeof(unsigned int), 1, fp);
fseek(fp, 18, SEEK_SET);
fread(&width, sizeof(unsigned int), 1, fp);
fseek(fp, 22, SEEK_SET);
fread(&high, sizeof(unsigned int), 1, fp);
img = (unsigned char*)malloc(sizeof(unsigned char)*(width*high));
fseek(fp, offset, SEEK_SET);
fread(img, sizeof(char), (width*high), fp);
head =(unsigned char*)malloc(sizeof(unsigned char)*(offset));
fseek(fp, 0, SEEK_SET);
fread(head, sizeof(unsigned char), offset, fp);
dim3 block(1024, 1, 1); // @block1024threads
dim3 grid(256, 1, 1); // @grid256block
unsigned int Dimg[512*512]; // CPU
for (int j = 0; j < 512*512; j++) {
Dimg[j] = img[j]; // TO@pixelAӤpNe0A|YeȡAuO^
}
unsigned int *Da; // GPUA4ytes
cudaMalloc((void**)&Da, (sizeof(unsigned int)*(width*high))); // tmGPUx}Ŷ
cudaMemcpy(Da, Dimg, (sizeof(unsigned int)*(width*high)), cudaMemcpyHostToDevice);
Add <<< grid, block >>> (Da,high,width); // Iskernel
cudaThreadSynchronize();
cudaMemcpy(Dimg, Da, (sizeof(unsigned int)*(width*high)), cudaMemcpyDeviceToHost); // ƻsƨGPU
fclose(fp);
unsigned int sum = 0;
for (int i = 0; i < 512*512; i += 1024) { // 256block
sum += Dimg[i];
}
sum /= (512*512);
printf("\n%3d\n", sum);
}
|
2,888 | /*#include <cuda_runtime.h>*/
#include <cuda.h>
#include <stdio.h>
__global__ void kernel_vecDotProduct(double* invSigmaMuDev, double* muDev, int fDim, int cbNum, double* resDev) {
int cbIdx = blockDim.x * blockIdx.x + threadIdx.x;
if (cbIdx < cbNum) {
double t = 0;
double* v1 = invSigmaMuDev + cbIdx * fDim;
double* v2 = muDev + cbIdx * fDim;
for (int i = 0; i < fDim; i++) {
t += v1[i] * v2[i];
}
resDev[cbIdx] += t / 2;
}
}
__global__ void kernel_batchedDgemv(double* invSigmaDev, double* muDev, int fDim, int cbNum, int shareNum, double* resDev) {
extern __shared__ double sharedMu[];
int N = blockDim.x;
int vecLen = N * fDim;
int maxIdx = cbNum * fDim;
for (int i = 0; i < (vecLen + blockDim.x - 1) / blockDim.x; i++) {
int sharedIdx = i * blockDim.x + threadIdx.x;
int devIdx = blockIdx.x * vecLen + sharedIdx;
if (devIdx < maxIdx)
sharedMu[sharedIdx] = muDev[devIdx];
}
__syncthreads();
int invSigmaLen = (fDim + 1) * fDim / 2;
int cbIdx = N * blockIdx.x + threadIdx.x;
if (cbIdx < cbNum) {
double* r = resDev + cbIdx * fDim;
double* v = sharedMu + threadIdx.x * fDim;
double* A = invSigmaDev + (cbIdx / shareNum) * invSigmaLen;
for (int i = 0; i < fDim; i++) {
double t = 0;
for (int j = 0; j < i; j++)
t += A[i * (i + 1) / 2 + j] * v[j];
for (int j = i + 1; j < fDim; j++)
t += A[j * (j + 1) / 2 + i] * v[j];
t += A[i * (i + 1) / 2 + i] * 2 * v[i];
r[i] = t;
}
}
}
__global__ void kernel_lse(int mixNum, double* allAlpha, double* seperateLh, double* combinedLh) {
extern __shared__ double alpha[];
for (int i = 0; i < (mixNum + blockDim.x - 1) / blockDim.x; i++) {
if (i * blockDim.x + threadIdx.x < mixNum) {
alpha[i * blockDim.x + threadIdx.x] = allAlpha[blockIdx.x * mixNum + blockDim.x * i + threadIdx.x];
}
}
__syncthreads();
double maxLh = 0;
int maxLhIdx = -1;
for (int i = 0; i < mixNum; i++) {
if (alpha[i] > 0) {
int p = gridDim.x * mixNum * threadIdx.x + blockIdx.x * mixNum + i;
double t = log(alpha[i]) + seperateLh[p];
//printf("seperateLh[%d] = %f\n", p, seperateLh[p]);
if (maxLhIdx == -1 || t > maxLh) {
maxLh = t;
maxLhIdx = i;
}
seperateLh[p] = t;
}
}
for (int i = 0; i < mixNum; i++) {
if (alpha[i] > 0) {
int p = gridDim.x * mixNum * threadIdx.x + blockIdx.x * mixNum + i;
seperateLh[p] -= maxLh;
}
}
for (int i = 0; i < mixNum; i++) {
if (alpha[i] > 0) {
int p = gridDim.x * mixNum * threadIdx.x + blockIdx.x * mixNum + i;
seperateLh[p] = exp(seperateLh[p]); //ʱseperateLh[p]һС1
}
}
double sumExp = 0;
for (int i = 0; i < mixNum; i++) {
if (alpha[i] > 0) {
int p = gridDim.x * mixNum * threadIdx.x + blockIdx.x * mixNum + i;
sumExp += seperateLh[p];
}
}
double logSumExp = maxLh + log(sumExp);
combinedLh[gridDim.x * threadIdx.x + blockIdx.x] = logSumExp;
}
extern "C" void logSumExpForGMM(int cbNum, int mixNum, int fNum, double* allAlpha, double* seperateLh, double* combinedLh) {
int fStep = 128;
int fExtendNum = ((fNum + fStep - 1) / fStep) * fStep;
cudaError_t err;
double* allAlphaDev;
err = cudaMalloc((void**)&allAlphaDev, cbNum * mixNum * sizeof(double));
err = cudaMemcpy(allAlphaDev, allAlpha, cbNum * mixNum * sizeof(double), cudaMemcpyHostToDevice);
double* seperateLhDev;
err = cudaMalloc((void**)&seperateLhDev, fExtendNum * mixNum * cbNum * sizeof(double));
err = cudaMemset(seperateLhDev, 0, fExtendNum * mixNum * cbNum * sizeof(double));
cudaMemcpy(seperateLhDev, seperateLh, cbNum * mixNum * fNum * sizeof(double), cudaMemcpyHostToDevice);
double* combinedLhDev;
err = cudaMalloc((void**)&combinedLhDev, fExtendNum * cbNum * sizeof(double));
//int lastCpyFrameNum = fNum - (fExtendNum - fStep);
for (int i = 0; i < fExtendNum / fStep; i++) {
double* lhInDev = seperateLhDev + fStep * mixNum * cbNum * i;
double* lhOutDev = combinedLhDev + fStep * cbNum * i;
kernel_lse<<<cbNum, fStep, mixNum * sizeof(double)>>>(mixNum, allAlphaDev, lhInDev, lhOutDev);
err = cudaDeviceSynchronize();
if (err != CUDA_SUCCESS) {
printf("kernel launch failed in LSE with error \"%s\".\n",
cudaGetErrorString(err));
exit(-1);
}
}
err = cudaMemcpy(combinedLh, combinedLhDev, fNum * cbNum * sizeof(double), cudaMemcpyDeviceToHost);
// free(allLogAlpha);
cudaFree(combinedLhDev);
cudaFree(seperateLhDev);
cudaFree(allAlphaDev);
}
__global__ void kernel_mvn_share(int sigmaNum, int fDim, int shareNum, double* invSigma, double* feature, int fNum, double* resBuf)
{
extern __shared__ double memory[];
double* sharedInvSigma = memory;
int invSigmaLen = fDim * (fDim + 1) / 2;
int fLen = fNum * fDim;
double* sharedFt = sharedInvSigma + blockDim.y * invSigmaLen;
//memset(sharedFt, 0, fDim * fNum * sizeof(double));
double res = 0;
int sigmaIdx = blockDim.y * blockIdx.x + threadIdx.y;
int thIdx = threadIdx.x + threadIdx.y * blockDim.x;
int thNum = blockDim.x * blockDim.y;
if (sigmaIdx < sigmaNum)
{
for (int i = 0; i < (invSigmaLen + blockDim.x - 1) / blockDim.x; i++)
if (i * blockDim.x + threadIdx.x < invSigmaLen)
sharedInvSigma[threadIdx.y * invSigmaLen + i * blockDim.x + threadIdx.x] = invSigma[sigmaIdx * invSigmaLen + i * blockDim.x + threadIdx.x];
}
for (int i = 0; i < (fLen + thNum - 1) / thNum; i++)
if (i * thNum + thIdx < fLen)
sharedFt[i * thNum + thIdx] = feature[i * thNum + thIdx];
__syncthreads();
if (sigmaIdx < sigmaNum) {
res = 0;
int idx = 0;
for (int j = 0; j < fDim; j++) {
double t1 = sharedFt[threadIdx.x * fDim + j];
double rest = 0;
for (int k = 0; k < j; k++) {
double t2 = sharedFt[threadIdx.x * fDim + k];
rest += sharedInvSigma[threadIdx.y * invSigmaLen + idx++] * t2;
}
rest += sharedInvSigma[threadIdx.y * invSigmaLen + idx++] * t1;
res += rest * t1;
}
int resIdxStart = (sigmaIdx + threadIdx.x * sigmaNum) * shareNum;
for (int i = 0; i < shareNum; i++) {
resBuf[resIdxStart + i] -= res;
}
//resBuf[resIdx] = -res;
}
__syncthreads();
}
__global__ void kernel_share_add_cst(int cbNum, int fNum, double* cst, double* res) {
double* fVec = res + blockIdx.x * cbNum;
for (int i = 0; i < (cbNum + blockDim.x - 1) / blockDim.x; i++)
if (i * blockDim.x + threadIdx.x < cbNum)
fVec[i * blockDim.x + threadIdx.x] -= cst[i * blockDim.x + threadIdx.x];
}
__global__ void kernel_vecdot2(int cbNum, int fNum, int fDim, double* invSigmaMu, double* fVec, double* resBuf) {
extern __shared__ double memory[];
double* sharedFeature = memory;
double* sharedInvSigmaMu = sharedFeature + fNum * fDim;
int cbIdx = blockDim.y * blockIdx.x + threadIdx.y;
int thNum = blockDim.x * blockDim.y;
int thIdx = threadIdx.y * blockDim.x + threadIdx.x;
for (int i = 0; i < (fDim * fNum + thNum - 1) / thNum; i++)
if (i * thNum + thIdx < fDim * fNum)
sharedFeature[i * thNum + thIdx] = fVec[i * thNum + thIdx];
if (cbIdx < cbNum) {
for (int i = 0; i < (fDim + blockDim.x - 1) / blockDim.x; i++)
if (i * blockDim.x + threadIdx.x < fDim)
sharedInvSigmaMu[threadIdx.y * fDim + i * blockDim.x + threadIdx.x] = invSigmaMu[cbIdx * fDim + i * blockDim.x + threadIdx.x];
}
__syncthreads();
if (cbIdx < cbNum) {
int resIdx = threadIdx.x * cbNum + cbIdx;
double* v1 = sharedFeature + threadIdx.x * fDim;
double* v2 = sharedInvSigmaMu + threadIdx.y * fDim;
double res = 0;
for (int i = 0; i < fDim; i++) {
res += v1[i] * v2[i];
}
resBuf[resIdx] += res;
}
}
__global__ void kernel_mvn(int cbNum, int fDim, double* invSigma, double* invSigmaMu, double* cst, double* feature, int fNum, double* resBuf)
{
extern __shared__ double memory[];
double* sharedInvSigma = memory;
int invSigmaLen = fDim * (fDim + 1) / 2;
int fLen = fNum * fDim;
double* sharedInvSigmaMu = sharedInvSigma + blockDim.y * invSigmaLen;
double* sharedCst = sharedInvSigmaMu + blockDim.y * fDim;
double* sharedFt = sharedCst + blockDim.y;
//memset(sharedFt, 0, fDim * fNum * sizeof(double));
double res = 0;
int cbIdx = blockDim.y * blockIdx.x + threadIdx.y;
int thIdx = threadIdx.x + threadIdx.y * blockDim.x;
int thNum = blockDim.x * blockDim.y;
if (cbIdx < cbNum)
{
for (int i = 0; i < (invSigmaLen + blockDim.x - 1) / blockDim.x; i++)
if (i * blockDim.x + threadIdx.x < invSigmaLen)
sharedInvSigma[threadIdx.y * invSigmaLen + i * blockDim.x + threadIdx.x] = invSigma[cbIdx * invSigmaLen + i * blockDim.x + threadIdx.x];
for (int i = 0; i < (fDim + blockDim.x - 1) / blockDim.x; i++)
if (i * blockDim.x + threadIdx.x < fDim)
sharedInvSigmaMu[threadIdx.y * fDim + i * blockDim.x + threadIdx.x] = invSigmaMu[cbIdx * fDim + i * blockDim.x + threadIdx.x];
if (threadIdx.x == 0)
sharedCst[threadIdx.y] = cst[cbIdx];
}
for (int i = 0; i < (fLen + thNum - 1) / thNum; i++)
if (i * thNum + thIdx < fLen)
sharedFt[i * thNum + thIdx] = feature[i * thNum + thIdx];
__syncthreads();
if (cbIdx < cbNum) {
res = sharedCst[threadIdx.y];
int idx = 0;
for (int j = 0; j < fDim; j++) {
double t1 = sharedFt[threadIdx.x * fDim + j];
double rest = 0;
for (int k = 0; k <= j; k++) {
double t2 = sharedFt[threadIdx.x * fDim + k];
rest += sharedInvSigma[threadIdx.y * invSigmaLen + idx++] * t2;
}
//rest += sharedInvSigma[threadIdx.y * invSigmaLen + idx++] * t1;
rest -= sharedInvSigmaMu[threadIdx.y * fDim + j];
res += rest * t1;
}
int resIdx = cbIdx + threadIdx.x * cbNum;
resBuf[resIdx] = -res;
}
__syncthreads();
}
//kernel_share_add_cst(int cbNum, int fNum, double* cst, double* res)
extern "C" void kernelShareAddCstWrapper(int cbNum, int fNum, double* cstDev, double* resDev ,dim3 threads, dim3 blocks) {
kernel_share_add_cst<<< blocks, threads>>>(cbNum, fNum, cstDev, resDev);
cudaError_t err;
err = cudaPeekAtLastError();
if (err != CUDA_SUCCESS) {
printf("kernel share_add_cst launch failed with error \"%s\".\n",
cudaGetErrorString(err));
exit(-1);
}
return;
}
extern "C" void kernelVecDotWrapper(double* invSigmaMuDev, double* muDev, int fDim, int cbNum, double* resDev , dim3 threads, dim3 blocks) {
kernel_vecDotProduct<<< blocks, threads>>>(invSigmaMuDev, muDev, fDim, cbNum, resDev);
cudaError_t err;
err = cudaPeekAtLastError();
if (err != CUDA_SUCCESS) {
printf("kernel vecdot launch failed with error \"%s\".\n",
cudaGetErrorString(err));
exit(-1);
}
return;
}
extern "C" void kernelDgemvWrapper(double* invSigmaDev, double* muDev, int fDim, int cbNum, int shareNum, double* resDev , dim3 threads, dim3 blocks, int memSize) {
kernel_batchedDgemv<<< blocks, threads, memSize>>>(invSigmaDev, muDev, fDim, cbNum, shareNum, resDev);
cudaError_t err = cudaPeekAtLastError();
if (err != CUDA_SUCCESS) {
printf("kernel dgemv launch failed with error \"%s\".\n",
cudaGetErrorString(err));
exit(-1);
}
return;
}
extern "C" void kernelVecDot2Wrapper(double* invSigmaMuDev, double* featureDev, int fDim, int cbNum, int fNum, double* resDev , dim3 threads, dim3 blocks, int memSize) {
//kernel_vecdot2(int cbNum, int fNum, int fDim, double* invSigmaMu, double* fVec, double* resBuf)
kernel_vecdot2<<< blocks, threads, memSize>>>(cbNum, fNum, fDim, invSigmaMuDev, featureDev, resDev);
cudaError_t err;
err = cudaPeekAtLastError();
if (err != CUDA_SUCCESS) {
printf("kernel vecdot2 launch failed with error \"%s\".\n",
cudaGetErrorString(err));
exit(-1);
}
return;
}
extern "C" void kernelSharedMvnWrapper(int cbNum, int fDim, int mixNum, double* invSigmaDev, double* featureDev, int fNum, double* resBufDev, dim3 threads, dim3 blocks, int memSize) {
kernel_mvn_share<<< blocks, threads, memSize >>>(cbNum / mixNum, fDim, mixNum, invSigmaDev, featureDev, fNum, resBufDev);
cudaError_t err;
err = cudaPeekAtLastError();
if (err != CUDA_SUCCESS) {
printf("kernel launch failed with error \"%s\".\n",
cudaGetErrorString(err));
exit(-1);
}
cudaDeviceSynchronize();
return;
}
extern "C" void kernelMvnWrapper(int cbNum, int fDim, double* invSigmaDev, double* invSigmaMuDev, double* cstDev, double* featureDev, int fNum, double* resBufDev, dim3 threads, dim3 blocks, int memSize) {
//kernel_mvn(int cbNum, int fDim, double* invSigma, double* invSigmaMu, double* cst, double* feature, int fNum, double* resBuf)
kernel_mvn<<< blocks, threads, memSize >>>(cbNum, fDim, invSigmaDev, invSigmaMuDev, cstDev, featureDev, fNum, resBufDev);
cudaError_t err;
err = cudaPeekAtLastError();
if (err != CUDA_SUCCESS) {
printf("kernel launch failed with error \"%s\".\n",
cudaGetErrorString(err));
exit(-1);
}
cudaDeviceSynchronize();
return;
}
__global__ void kernel_diag_mvn(int cbNum, int fDim, double* invSigma, double* mu, double* cst, double* feature, int fNum, double* resBuf)
{
extern __shared__ double memory[];
double* sharedInvSigma = memory;
int fLen = fNum * fDim;
double* sharedMu = sharedInvSigma + blockDim.y * fDim;
double* sharedCst = sharedMu + blockDim.y * fDim;
double* sharedFt = sharedCst + blockDim.y;
int cbIdx = blockDim.y * blockIdx.x + threadIdx.y;
int thIdx = threadIdx.x + threadIdx.y * blockDim.x;
int thNum = blockDim.x * blockDim.y;
if (cbIdx < cbNum)
{
for (int i = 0; i < (fDim + blockDim.x - 1) / blockDim.x; i++)
if (i * blockDim.x + threadIdx.x < fDim)
sharedInvSigma[threadIdx.y * fDim + i * blockDim.x + threadIdx.x] = invSigma[cbIdx * fDim + i * blockDim.x + threadIdx.x];
for (int i = 0; i < (fDim + blockDim.x - 1) / blockDim.x; i++)
if (i * blockDim.x + threadIdx.x < fDim)
sharedMu[threadIdx.y * fDim + i * blockDim.x + threadIdx.x] = mu[cbIdx * fDim + i * blockDim.x + threadIdx.x];
if (threadIdx.x == 0)
sharedCst[threadIdx.y] = cst[cbIdx];
}
for (int i = 0; i < (fLen + thNum - 1) / thNum; i++)
if (i * thNum + thIdx < fLen)
sharedFt[i * thNum + thIdx] = feature[i * thNum + thIdx];
__syncthreads();
if (cbIdx < cbNum) {
double res = 0;
for (int j = 0; j < fDim; j++) {
double xj = sharedFt[threadIdx.x * fDim + j] - sharedMu[threadIdx.y * fDim + j];
res += xj * xj * sharedInvSigma[threadIdx.y * fDim + j];
}
res = -(sharedCst[threadIdx.y] + res / 2);
int resIdx = cbIdx + threadIdx.x * cbNum;
resBuf[resIdx] = res;
}
__syncthreads();
}
extern "C" void kernelDiagMvnWrapper(int cbNum, int fDim, double* invSigmaDev, double* muDev, double* cstDev, double* featureDev, int fNum, double* resBufDev, dim3 threads, dim3 blocks, int memSize) {
kernel_diag_mvn<<< blocks, threads, memSize >>>(cbNum, fDim, invSigmaDev, muDev, cstDev, featureDev, fNum, resBufDev);
cudaError_t err;
err = cudaDeviceSynchronize();
if (err != CUDA_SUCCESS) {
printf("kernel launch failed with error \"%s\".\n",
cudaGetErrorString(err));
exit(-1);
}
return;
}
//__global__ void kernel_lse(int mixNum, double* allAlpha, double* seperateLh, double* combinedLh)
extern "C" void kernelLSEWrapper(int cbNum, int fStep, int mixNum, double* alphaDev, double* seperateLhDev, double* combineLhDev) {
kernel_lse<<<cbNum, fStep, mixNum * sizeof(double)>>>(mixNum, alphaDev, seperateLhDev, combineLhDev);
cudaError_t err;
err = cudaDeviceSynchronize();
if (err != CUDA_SUCCESS) {
printf("kernel launch failed with error \"%s\".\n",
cudaGetErrorString(err));
exit(-1);
}
return;
}
|
2,889 | #include<cuda.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <cstdlib>
#include <iostream>
#include <sstream>
__global__ void fillArray(double* array, int size, double value)
{
unsigned int i=threadIdx.x+blockIdx.x*blockDim.x;
if(i<size){
array[i]=value;
}
}
__global__ void applyDirichlet(double* load, double* csr_matrix, int* csr_col_device,int* csrRowPtr,int *isboundaryNode, unsigned int entries, int elementsX, int elementsY, int degree, double boundaryValue)
{
unsigned int i=threadIdx.x+blockIdx.x*blockDim.x;
int row=0;
int col;
int ucindex;
unsigned long int pointCount=(degree+1+(elementsX-1)*degree)*(degree+1+(elementsY-1)*degree)-1;
double sum=0;
double bound[4];
bound[0]=0;
bound[1]=0;
bound[2]=0;
bound[3]=0;
if(i<pointCount+1){
int start=csrRowPtr[i];
int end =csrRowPtr[i+1];
for(int j=start;j<end;j++){
if(isboundaryNode[csr_col_device[j]]!=0){
sum+=bound[isboundaryNode[csr_col_device[j]]-1]*csr_matrix[j];
}
}
load[i]-=sum;
}
__syncthreads();
if(i<entries){
col=csr_col_device[i];
while((row<=pointCount) && (csrRowPtr[row]<=i)){
row++;
}
row--;
if((isboundaryNode[col]!=0)||(isboundaryNode[row]!=0)){
if(col!=row){
csr_matrix[i]=0;
}
}
__syncthreads();
if((isboundaryNode[col]!=0)||(isboundaryNode[row]!=0)){
if(col==row){
csr_matrix[i]=1;
}
}
}
}
__global__ void vectorDirichlet(double* load, double* csr_matrix, int* csr_col_device,int* csrRowPtr,int *isboundaryNode,unsigned int entries, int elementsX, int elementsY, int degree, double boundaryValue)
{
unsigned int i=threadIdx.x+blockIdx.x*blockDim.x;
int row=0;
int col;
int ucindex;
unsigned long int pointCount=(degree+1+(elementsX-1)*degree)*(degree+1+(elementsY-1)*degree)-1;
double sum=0;
double bound[4];
bound[0]=0;
bound[1]=0;
bound[2]=0;
bound[3]=0;
if(i<entries){
col=csr_col_device[i];
while((row<=pointCount) && (csrRowPtr[row]<=i)){
row++;
}
row--;
if((isboundaryNode[col]!=0)||(isboundaryNode[row]!=0)){
if(col==row){
load[col]=bound[isboundaryNode[col]-1];
}
}
}
}
__global__ void BernBinomCoeff(double *M, int n)
{
unsigned int i= threadIdx.x;
unsigned int j= threadIdx.y;
unsigned int top_0=1;
unsigned int top_1=1;
unsigned int bottom=1;
unsigned int n_save=n;
//guarantees that every step in the solution is smaller than the final solution thus avoiding overflow
for (int d=1; d <= i; d++){
top_0*= n_save--;
top_0 /= d;
}
n_save=n;
for (int d=1; d <= j; d++) {
top_1*= n_save--;
top_1 /= d;
}
n_save=2*n;
for (int d=1; d <= i+j; d++) {
bottom*= n_save--;
bottom /= d;
}
M[i+j*(n+1)]=(double)(top_0*top_1)/bottom;
}
__global__ void ass_A_exact(double a, double b,int *coo_row_device,int *coo_col_device, double*coo_value, int degree,long long int* index, int *elements, double *M, double *M_m, int elementsX, int elementsY)
{
unsigned long int pointCount=(degree+1+(elementsX-1)*degree)*(degree+1+(elementsY-1)*degree);
double *B;
B=(double*)malloc((degree+1)*(degree+1)*(degree+1)*(degree+1)*sizeof(double));
unsigned int i_glob;
unsigned int j_glob;
unsigned int shift;
double sum=0;
unsigned int element=threadIdx.x+blockIdx.x*blockDim.x;
int n=degree;
if(element<elementsX*elementsY){
for (int i=0; i<=n;i++)
for(int j=0; j<=n;j++)
for (int k=0; k<=n;k++)
for(int l=0; l<=n;l++){
sum=0;
shift=i+j*(degree+1)+(degree+1)*(degree+1)*(k+l*(degree+1));
if((i<n) && (k<n))
sum+=M_m[i+n*k];
if((i>0) && (i-1<n) && (k<n))
sum-=M_m[i-1+n*k];
if((k>0)&& (i<n) && (k-1<n))
sum-=M_m[i+n*(k-1)];
if((k>0) && (i>0) && (i-1<n)&& (k-1<n))
sum+=M_m[i-1+n*(k-1)];
B[shift]=M[j+l*(n+1)]*b/a*sum;
sum=0;
if((j<n) && (l<n))
sum=M_m[j+n*l];
if((j>0) && (j-1<n) && (l<n))
sum-=M_m[j-1+n*l];
if((l>0)&& (j<n) && (l-1<n))
sum-=M_m[j+n*(l-1)];
if((l>0) && (j>0) && (j-1<n)&& (l-1<n))
sum+=M_m[j-1+n*(l-1)];
B[shift]+=M[i+k*(n+1)]*a/b*(sum);
B[shift]*=(double)(n*n)/(4*n*n-1);
B[shift]*=(double)(n*n)/(4*n*n-1);
}
for(int i=0; i<(n+1)*(n+1);i++){
for(int j=0; j<(n+1)*(n+1);j++){
i_glob=elements[element*(n+1)*(n+1)+i];
j_glob=elements[element*(n+1)*(n+1)+j];
coo_row_device[element*(n+1)*(n+1)*(n+1)*(n+1)+i+j*(n+1)*(n+1)]=i_glob;
coo_col_device[element*(n+1)*(n+1)*(n+1)*(n+1)+i+j*(n+1)*(n+1)]=j_glob;
index[element*(n+1)*(n+1)*(n+1)*(n+1)+i+j*(n+1)*(n+1)]=i_glob*pointCount+j_glob;
coo_value[element*(n+1)*(n+1)*(n+1)*(n+1)+i+j*(n+1)*(n+1)]=B[i+j*(n+1)*(n+1)];
}
}
}
free(B);
}
__global__ void split(long long int* index, int*cols, int*rows,unsigned long int pointCount,unsigned long int length)
{
unsigned int i=threadIdx.x+blockIdx.x*blockDim.x;
if(i<length){
rows[i]=index[i]/pointCount;
cols[i]=index[i]%pointCount;
}
}
__global__ void loadVector(double* loadList, int* index,int *elements, double a, double b, int degree, double func, int ElementCount)
{
int i_glob;
int element=threadIdx.x+blockIdx.x*blockDim.x;
if(element<ElementCount){
for(int i=0;i<(degree+1)*(degree+1);i++){
loadList[element*(degree+1)*(degree+1)+i]=func*a*b/((degree+1)*(degree+1));
index[element*(degree+1)*(degree+1)+i]=elements[element*(degree+1)*(degree+1)+i];
}
}
} |
2,890 | #include "includes.h"
__global__ void backward_zero_nonmax_kernel(int n, int *indexes, float *prev_delta)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n) return;
if (indexes[id] != id) prev_delta[id] = 0;
} |
2,891 | #include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <iostream>
// constants for approximating the normal cdf
// gelu ->gelu_fast
constexpr static float A = 0.5;
constexpr static float B = 0.7978845608028654; // sqrt(2.0/M_PI)
constexpr static float C = 0.035677408136300125; // 0.044715 * sqrt(2.0/M_PI)
template <typename T>
__global__
void add_bias_gelu(T* out, const T* bias, int m, int n){
int idx = n * blockIdx.x + blockIdx.y * blockDim.x + threadIdx.x;
int bias_idx = blockIdx.y * blockDim.x + threadIdx.x;
if (idx < m * n){
T in = out[idx] + bias[bias_idx];
T cdf = A + A * tanh(in * (C * in * in + B));
out[idx] = in * cdf;
}
}
template <>
__global__
void add_bias_gelu<half>(half* out, const half* bias, int m, int n){
const half2 A2 = __floats2half2_rn(A, A);
const half2 B2 = __floats2half2_rn(B, B);
const half2 C2 = __floats2half2_rn(C, C);
half2 * out_ptr = (half2 *)out;
half2 * bias_ptr = (half2 *)bias;
int idx = n * blockIdx.x + blockIdx.y * blockDim.x + threadIdx.x;
int bias_idx = blockIdx.y * blockDim.x + threadIdx.x;
if (idx < m * n ){
half2 in = out_ptr[idx] + bias_ptr[bias_idx];
half2 tmp = in * (C2 * in * in + B2);
float x = tanh(__half2float(tmp.x));
float y = tanh(__half2float(tmp.y));
half2 cdf = A2 + A2 * make_half2(x, y);
out_ptr[idx] = in * cdf;
}
}
template <typename T>
__global__
void add_bias_relu(T* out, const T* bias, int m, int n){
int idx = n * blockIdx.x + blockIdx.y * blockDim.x + threadIdx.x;
int bias_idx = blockIdx.y * blockDim.x + threadIdx.x;
if (idx < m * n){
T val = out[idx] + bias[bias_idx];
out[idx] = (T)(val > 0.0f ? val : 0.0f);
}
}
template <>
__global__
void add_bias_relu(half* out, const half* bias, int m, int n){
int idx = n * blockIdx.x + blockIdx.y * blockDim.x + threadIdx.x;
int bias_idx = blockIdx.y * blockDim.x + threadIdx.x;
half2 * out_ptr = (half2*)out;
half2 * bias_ptr = (half2*)bias;
if (bias_idx < n && idx < m * n){
half2 val = __hadd2(out_ptr[idx], bias_ptr[bias_idx]);
val.x = val.x > (half)0.0f ? val.x : (half)0.0f;
val.y = val.y > (half)0.0f ? val.y : (half)0.0f;
out_ptr[idx] = val;
}
}
template<typename T>
void add_bias_act_kernel(void* ffn_inner, const void* bias, int m, int n ,const int act_type ,const cudaStream_t stream)
{
if (sizeof(T) == sizeof(half)){
int fold_coeff = 1;
if (n <= 2048){
fold_coeff = 1;
}else if( n <= 4096){
fold_coeff = 2;
}else if(n <= 8192){
fold_coeff = 4;
}else if(n <= 16384){
fold_coeff = 8;
}else if(n <= 16384 * 2){
fold_coeff = 16;
}else if(n <= 16384 * 4){
fold_coeff = 32;
}
dim3 grid(m, fold_coeff);
dim3 block(n / fold_coeff);
block.x /= 2;
if (act_type == 0){
add_bias_relu<T><<<grid, block, 0, stream>>>((T*)ffn_inner, (T*)bias, m, n / 2);
}else if(act_type == 1){
add_bias_gelu<T><<<grid, block, 0, stream>>>((T*)ffn_inner, (T*)bias, m, n / 2);
}else{
std::cerr << "unsupported activation " << std::endl;
}
} else {
int fold_coeff = 1;
if (n <= 1024){
fold_coeff = 1;
}else if( n <= 2048){
fold_coeff = 2;
}else if(n <= 4096){
fold_coeff = 4;
}else if(n <= 8192){
fold_coeff = 8;
}else if(n <= 16384){
fold_coeff = 16;
}else if(n <= 16384 * 2){
fold_coeff = 32;
}else if (n <= 16384 * 4){
fold_coeff = 64;
}
dim3 grid(m, fold_coeff);
dim3 block(n / fold_coeff);
if (act_type == 0){
add_bias_relu<T><<<grid, block, 0, stream>>>((T*)ffn_inner, (T*)bias, m, n);
}else if(act_type == 1){
add_bias_gelu<T><<<grid, block, 0, stream>>>((T*)ffn_inner, (T*)bias, m, n);
}else {
std::cerr << "unsupported activation " << std::endl;
}
}
}
template void add_bias_act_kernel<float>(void* ffn_inner, const void* bias, const int m, const int n ,const int act_type ,const cudaStream_t stream);
template void add_bias_act_kernel<half>(void* ffn_inner, const void* bias, const int m, const int n ,const int act_type,const cudaStream_t stream);
|
2,892 | #include "curand_kernel.h"
#define seed 42
__global__ void kernel(double* outdata)
{
// curandStateXORWOW_t state;
curandStateMRG32k3a_t state;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(seed, idx, 0, &state);
outdata[idx] = curand_uniform(&state);
}
int main()
{
double* data;
cudaMalloc((void**)data, sizeof(double));
kernel<<<1,1>>>(data);
cudaFree(data);
return 0;
} |
2,893 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define N 10000000
#define MAX_ERR 1e-6
__global__ void vector_add(float* out,float* a,float* b,int n){
int index = threadIdx.x;
int stride = blockDim.x;
for(int i=index ; i<n ;i=i+stride){
out[i]=a[i]+b[i];
}
}
int main(){
float *a,*b,*out;
float *d_a,*d_b,*d_out;
a=(float*)malloc(sizeof(float)*N);
b=(float*)malloc(sizeof(float)*N);
out=(float*)malloc(sizeof(float)*N);
for(int i=0;i<N;i++){
a[i]=1.0f;
b[i]=2.0f;
}
cudaMalloc((void**)&d_a, sizeof(float) * N);
cudaMalloc((void**)&d_b, sizeof(float) * N);
cudaMalloc((void**)&d_out, sizeof(float) * N);
cudaMemcpy(d_a,a,sizeof(float)*N,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,sizeof(float)*N,cudaMemcpyHostToDevice);
vector_add<<<1,256>>>(d_out,d_a,d_b,N);
cudaMemcpy(out,d_out,sizeof(float)*N,cudaMemcpyDeviceToHost);
// Verification
for(int i = 0; i < N; i++){
assert(fabs(out[i] - a[i] - b[i]) < MAX_ERR);
}
printf("out[0] = %f\n", out[0]);
printf("PASSED\n");
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_out);
free(a);
free(b);
free(out);
}
|
2,894 | #include <thrust/copy.h>
#include <thrust/remove.h>
#include <thrust/device_ptr.h>
#include <iostream>
#include <iterator>
#include <string>
// this functor returns true if the argument is negative, and false otherwise
struct is_negative
{
__host__ __device__
bool operator()(const int x)
{
return x < 0;
}
};
template <typename Iterator>
void print_range(const std::string& name, Iterator first, Iterator last)
{
typedef typename std::iterator_traits<Iterator>::value_type T;
std::cout << name << ": ";
thrust::copy(first, last, std::ostream_iterator<T>(std::cout, " "));
std::cout << "\n";
}
int main(void)
{
// input size
const int N = 10;
int h_A[N] = {-2 ,-10, 0, 9, -2, 3, 5, 0, -1, -20};
int* d_A;
cudaMalloc(&d_A, sizeof(int) * N);
cudaMemcpy(d_A, h_A, N * sizeof(int), cudaMemcpyHostToDevice);
thrust::device_ptr<int> d_thrust_A(d_A);
print_range("Original", d_thrust_A, d_thrust_A + N);
// we can also compact sequences with the remove functions, which do the opposite of copy
thrust::device_ptr<int> d_thrust_end = thrust::remove_if(d_thrust_A, d_thrust_A + N, is_negative());
size_t length = d_thrust_end - d_thrust_A;
std::cout << std::endl << "Size: " << length << std::endl;
print_range("values", d_thrust_A, d_thrust_end);
return 0;
} |
2,895 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define SIZE 128
#define THREADS 32
__global__ void squareWithForLoop(float * d_arr, size_t maxLoop, size_t increment)
{
float value;
size_t index;
size_t i;
index = threadIdx.x;
for (i = 0; i < maxLoop; i++) {
value = d_arr[index];
d_arr[index] = value * value;
index += increment;
}
}
int main(int argc, char ** argv)
{
float * h_array;
float * d_array;
size_t totalArraySize;
totalArraySize = sizeof(float) * SIZE;
h_array = (float *)malloc(totalArraySize);
for (size_t i = 0; i < SIZE; i++)
h_array[i] = i;
cudaMalloc((void **)&d_array, totalArraySize);
cudaMemcpy(d_array, h_array, totalArraySize, cudaMemcpyHostToDevice);
squareWithForLoop<<<1, THREADS >>> (d_array, SIZE/THREADS, THREADS);
cudaMemcpy(h_array, d_array, totalArraySize, cudaMemcpyDeviceToHost);
cudaFree(d_array);
for (size_t i = 0; i < SIZE; i++) {
printf("%8.3f", h_array[i]);
printf(((i % 7) != 6) ? "\t" : "\n");
}
printf("\n");
free(h_array);
return 0;
}
|
2,896 | #include "includes.h"
__global__ void add(int *a, int *b, int *c, int n)
{
//blockDim.x represents threads per block
int index = threadIdx.x + blockIdx.x * blockDim.x;
// as we need to avoid to go beyond the end of the arrays, we need to define the limit
if (index < n)
c[index] = a[index] + b[index];
} |
2,897 | // =================================================================
//
// File: intro3.cu
// Author: Pedro Perez
// Description: This file shows some of the basic CUDA directives.
//
// Copyright (c) 2020 by Tecnologico de Monterrey.
// All Rights Reserved. May be reproduced for any non-commercial
// purpose.
//
// =================================================================
#include <stdio.h>
#include <cuda_runtime.h>
__global__ void kernel(void) {
printf("GPU: Hello world\n");
}
int main(int argc, char* argv[]) {
kernel<<<2, 4>>>();
cudaDeviceSynchronize();
return 0;
}
|
2,898 | // Assignment For Module 03:
// Blocks, Warps and Threads
// Author: Justin Renga
#include <stdio.h>
#include <stdlib.h>
/// @brief The Kernel function that will execute on the GPU.
///
/// @param [inout] input1 The first input array (contains integers)
/// @param [inout] input2 The second input array (contains integers)
/// @param [inout] output The resulting array (first + second)
__global__ void integer_addition(int* input1, int* input2, int* output)
{
const unsigned int thread_index = (blockIdx.x * blockDim.x) + threadIdx.x;
output[thread_index] = input1[thread_index] + input2[thread_index];
}
/// @brief The main function
///
/// @param [in] argc The number of arguments passed into the program
/// @param [in] argv The actual arguments passed into the program
///
/// @return EXIT_SUCCESS upon success
int main(int argc, const char* argv[])
{
// 1. Check the number of arguments.
if (argc != 3)
{
printf("ERROR: Incorrect number of arguments provided!\n");
printf("----------------------------------------------\n");
printf("Usage:\n");
printf(" module_3_jrenga2.exe <number of blocks> <number of thread per block>\n");
printf("\nPlease check your inputs and try again. Thank you! :)\n");
return EXIT_FAILURE;
}
// 2. Attempt to retrieve the integer values of the parameters
// (a value less than or equal to 0 is considered invalid)
int numBlocks = atoi(argv[1]);
if (numBlocks <= 0)
{
printf("ERROR: Invalid number of blocks provided!\n");
printf("-----------------------------------------\n");
printf("The number of blocks must be a positive integer.\n");
printf("\nPlease check your inputs and try again. Thank you! :)\n");
return EXIT_FAILURE;
}
int numThreads = atoi(argv[2]);
if (numThreads <= 0)
{
printf("ERROR: Invalid number of threads provided!\n");
printf("------------------------------------------\n");
printf("The number of threads per block must be a positive integer.\n");
printf("\nPlease check your inputs and try again. Thank you! :)\n");
return EXIT_FAILURE;
}
// 2.5 Check to see if the minimum number of threads has been achieved (64)
if ((numBlocks * numThreads) < 64)
{
int threadCount = numBlocks * numThreads;
printf("ERROR: Minimum number of threads not achieved!\n");
printf("----------------------------------------------\n");
printf("The total number of threads must be greater than 64. You have provided %d threads.\n", threadCount);
printf("\nPlease check your inputs and try again. Thank you! :)\n");
return EXIT_FAILURE;
}
// 3. Compute necessary array structures
int array_size = numBlocks * numThreads;
int array_size_in_bytes = array_size * sizeof(unsigned int);
unsigned int* cpu_input_array1 = (unsigned int*) calloc(array_size, sizeof(unsigned int));
unsigned int* cpu_input_array2 = (unsigned int*) calloc(array_size, sizeof(unsigned int));
unsigned int* cpu_output_array = (unsigned int*) calloc(array_size, sizeof(unsigned int));
int* gpu_input_1;
int* gpu_input_2;
int* gpu_output;
int loopCounter = 0;
for ( ; loopCounter < array_size; loopCounter++)
{
cpu_input_array1[loopCounter] = loopCounter;
cpu_input_array2[loopCounter] = loopCounter * 2;
}
// 4. Prepare the GPU call
cudaMalloc((void **)&gpu_input_1, array_size_in_bytes);
cudaMemcpy(gpu_input_1, cpu_input_array1, array_size_in_bytes, cudaMemcpyHostToDevice);
cudaMalloc((void **)&gpu_input_2, array_size_in_bytes);
cudaMemcpy(gpu_input_2, cpu_input_array2, array_size_in_bytes, cudaMemcpyHostToDevice);
cudaMalloc((void **)&gpu_output, array_size_in_bytes);
cudaMemcpy(gpu_output, cpu_output_array, array_size_in_bytes, cudaMemcpyHostToDevice);
// 5. Perform the GPU call
integer_addition<<< numBlocks, numThreads >>>(gpu_input_1, gpu_input_2, gpu_output);
// 6. Retrieve the data
cudaMemcpy( cpu_output_array, gpu_output, array_size_in_bytes, cudaMemcpyDeviceToHost);
// 6.1 Free the remaining data
cudaFree(gpu_output);
cudaFree(gpu_input_2);
cudaFree(gpu_input_1);
// 7. Display the data
for ( loopCounter = 0; loopCounter < array_size; loopCounter++)
{
printf("Computed value (index %d): %u\n", loopCounter, cpu_output_array[loopCounter]);
}
free(cpu_output_array);
free(cpu_input_array2);
free(cpu_input_array1);
return EXIT_SUCCESS;
}
|
2,899 | #include <math.h> // for abs
#include <stdio.h>
#include <stdint.h> // for uint8_t
#include <string.h>
#include <sys/time.h>
#include <stdlib.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
//using namespace std;
#define PIXEL uint8_t
#define H 288 // height of each frame
#define W 352 // width of each frame
#define NF 200 // number of frames
#define NRF 3 // number of refernce frames
#define BS 4 // block_size
#define SR 2 // search_radius
//#define ENABLE_PRINT
//because using c not c++
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
// time stamp function in seconds
double getTimeStamp()
{
struct timeval tv ;
gettimeofday( &tv, NULL ) ;
return (double) tv.tv_usec/1000000 + tv.tv_sec ;
}
/////////////////////////////////////////CPU FUNCTIONS///////////////////////////////////////////////////////////////////
int generate_mv_for_block(PIXEL *currunt_frame, PIXEL *reference_frames, int yb, int xb, int h, int w, int *motion_vector_y, int *motion_vector_x, int *motion_vector_f)
{
int lowest_SAD = 256 * BS * BS;// variable storing the SAD value
int block_size = BS;
int y = yb*BS;
int x = xb*BS;
// (y,x) the pixel location of the top-left corner of the block.
//Search for the best matching block in the reference frame.
//The search processes only the block is within the reference frame (not out of boundary).
for (int ref_index = 0; ref_index < NRF; ref_index++){
for (int search_y_radius = MAX(y-SR,0); search_y_radius <= MIN(y+SR,H-BS) ; search_y_radius++){
for (int search_x_radius = MAX(x-SR,0); search_x_radius <= MIN(x+SR,W-BS); search_x_radius++){
//Calculate SAD of this block with the input block.
int SAD = 0;
for(int j =0; j < block_size; j++)
{
for (int i=0; i < block_size; i++)
{
SAD += abs(currunt_frame[(y+j)*w+x+i] - reference_frames[(NRF-1-ref_index)*w*h+(search_y_radius+j)*w +search_x_radius+i]);
}
}
//If this block is better in SAD...
if (lowest_SAD > SAD){
lowest_SAD = SAD; // Update SAD.
(*motion_vector_x) = search_x_radius - x;
(*motion_vector_y) = search_y_radius - y; //Update motion vector.
(*motion_vector_f) = ref_index + 1;
}
//If there is a tie in SAD keep last change
}
}
}
return 1;
}
void generate_mv_for_frames_cpu(int *motion_vector,PIXEL *luma, int h, int w){
int nblock_x = w/BS;
int nblock_y = h/BS;
PIXEL *currunt_frame = (PIXEL *) malloc(h*w*sizeof(PIXEL)); if (currunt_frame == NULL) fprintf(stderr, "Bad malloc on currunt_frame \n");
PIXEL *reference_frames= (PIXEL*) malloc(h*w*NRF*sizeof(PIXEL)); if (reference_frames == NULL) fprintf(stderr, "Bad malloc on reference_frames \n");
for (int f =NRF; f < NF; f++)
{
memcpy(currunt_frame ,&luma[h*w*f] ,h*w *sizeof(PIXEL));
memcpy(reference_frames,&luma[h*w*(f-NRF)],h*w*NRF*sizeof(PIXEL));
for (int y = 0; y < nblock_y; y++){
for (int x = 0; x < nblock_x; x++){
int mvy = 0;
int mvx = 0;
int mvf = 1;
generate_mv_for_block(currunt_frame, reference_frames, y, x, h, w, &mvy, &mvx, &mvf);
motion_vector[(nblock_y*nblock_x*f + y*nblock_x + x)*3+0] = mvx;
motion_vector[(nblock_y*nblock_x*f + y*nblock_x + x)*3+1] = mvy;
motion_vector[(nblock_y*nblock_x*f + y*nblock_x + x)*3+2] = mvf;
}
}
}
free(currunt_frame );
free(reference_frames);
}
/////////////////////////////GPU FUNCTIONS///////////////////////////////////////////////////////////////
__global__ void d_generate_mv_one_frame_naive( PIXEL *currunt_frame, PIXEL *reference_frames, int *motion_vector, int h, int w)
{
int block_size = BS;
int nblock_x = w/BS;
int nblock_y = h/BS;
// (y,x) the pixel location of the top-left corner of the block.
//Search for the best matching block in the reference frame.
//The search processes only the block is within the reference frame (not out of boundary).
for (int f =NRF; f < NF; f++){
for (int yb = 0; yb < nblock_y; yb++){
for (int xb = 0; xb < nblock_x; xb++){
int lowest_SAD = 256 * BS * BS;// variable storing the SAD value
int y = yb*BS;
int x = xb*BS;
for (int ref_index = 0; ref_index < NRF; ref_index++){
for (int search_y_radius = MAX(y-SR,0); search_y_radius <= MIN(y+SR,H-BS) ; search_y_radius++){
for (int search_x_radius = MAX(x-SR,0); search_x_radius <= MIN(x+SR,W-BS); search_x_radius++){
//Calculate SAD of this block with the input block.
int SAD = 0;
for(int j =0; j < block_size; j++)
{
for (int i=0; i < block_size; i++)
{
SAD += abs(currunt_frame[(y+j)*w+x+i] - reference_frames[(NRF-1-ref_index)*w*h+(search_y_radius+j)*w +search_x_radius+i]);
}
}
//If this block is better in SAD...
if (lowest_SAD > SAD){
lowest_SAD = SAD; // Update SAD.
motion_vector[(yb*nblock_x+xb)*3+0] = search_x_radius - x;
motion_vector[(yb*nblock_x+xb)*3+1] = search_y_radius - y;
motion_vector[(yb*nblock_x+xb)*3+2] = ref_index + 1;
}
//If there is a tie in SAD keep last change
}
}
}
}
}
}
}
//////////////////////////////////////
__global__ void d_generate_mv_one_frame( PIXEL *currunt_frame, PIXEL *reference_frames, int *motion_vector, int h, int w)
{
__shared__ PIXEL reference_blocks[NRF][BS+2*SR][BS+2*SR];
__shared__ PIXEL currunt_block [BS][BS];
__shared__ int sub_result [NRF][2*SR+1][2*SR+1];
//int nblock_y = h/BS;
int nblock_x = w/BS;
//cordination of upper left pixel
int x_block = blockIdx.x*BS;
int y_block = blockIdx.y*BS;
//these are equal to the motion vector
int ix = threadIdx.x;
int iy = threadIdx.y;
int iz = threadIdx.z;
// index to read block of data to search into from reference block
int idx = x_block+ix-SR;
int idy = y_block+iy-SR;
// index to read block of data that needs processing
int idxc = x_block+ix;
int idyc = y_block+iy;
// first all threads read the inputs required
if ((idx >= 0 ) && (idx < w) && (idy >=0) && (idy < h)) //&& (iy < (BS+2*SR)) && (ix < (BS+2*SR)) )
{
reference_blocks[iz][iy][ix] = reference_frames[h*w*(NRF-1-iz)+idy*w+idx];
}
else
reference_blocks[iz][iy][ix] = 127;
// some threads also read current frame
if (iz == 0 && ix < BS && iy < BS)
{
currunt_block[iy][ix] = currunt_frame[idyc*w + idxc];
}
__syncthreads();
// second add all the abs values for each direction of movement(iy, ix) over whole block
if ((ix < 2*SR+1) && (iy < 2*SR+1))
{
sub_result[iz][iy][ix] = 0;
for (int j = 0; j < BS; j++){
for (int i =0; i < BS; i++)
{
sub_result[iz][iy][ix] += abs(currunt_block[j][i] - reference_blocks[iz][iy+j][ix+i]);
}
}
}
__syncthreads();
// third only one thread compares all SAD and selects the least one and stores motion vector
if (ix == 0 && iy ==0 && iz == 0) // only one
{
int lowest_SAD = 256 * BS * BS;
// intiallizing motion vector
motion_vector[(blockIdx.y*nblock_x+blockIdx.x)*3 + 0 ] = 0;
motion_vector[(blockIdx.y*nblock_x+blockIdx.x)*3 + 1 ] = 0;
motion_vector[(blockIdx.y*nblock_x+blockIdx.x)*3 + 2 ] = 1;
for(int z = 0; z< NRF; z++){
for (int j = 0; j <= 2*SR; j++){
for (int i =0; i <= 2*SR; i++)
{
if (lowest_SAD > sub_result[z][j][i])
{
// found lower value so update motion vector
lowest_SAD = sub_result[z][j][i];
motion_vector[(blockIdx.y*nblock_x+blockIdx.x)*3 + 0 ] = i - SR;
motion_vector[(blockIdx.y*nblock_x+blockIdx.x)*3 + 1 ] = j - SR;
motion_vector[(blockIdx.y*nblock_x+blockIdx.x)*3 + 2 ] = z + 1 ;
}
}
}
}
}
}
// this function invokes gpu kernels
void generate_mv_for_frames_gpu (int *h_motion_vector,PIXEL *luma, int h, int w){
int nblock_y = h/BS;
int nblock_x = w/BS;
PIXEL *currunt_frame;
if ( cudaMalloc( (void **) &currunt_frame, h*w*sizeof(PIXEL)) != cudaSuccess ){
fprintf(stderr, "Failed to allocate device vector for currunt_frame\n");
//exit(EXIT_FAILURE);
}
PIXEL *reference_frames;
if ( cudaMalloc( (void **) &reference_frames, h*w*NRF*sizeof(PIXEL)) != cudaSuccess ){
fprintf(stderr, "Failed to allocate device vector for reference_frames\n");
//exit(EXIT_FAILURE);
}
int *d_motion_vector;
if ( cudaMalloc( (void **) &d_motion_vector, nblock_y*nblock_x*sizeof(int)*3) != cudaSuccess ){
fprintf(stderr, "Failed to allocate device vector for d_motion_vector\n");
}
for (int f =NRF; f < NF; f++)
{
if (cudaMemcpy( currunt_frame, &luma[h*w*f], h*w*sizeof(PIXEL), cudaMemcpyDeviceToDevice) != cudaSuccess){
fprintf(stderr, "Failed to copy vector for currunt_frame\n");
}
if (cudaMemcpy(reference_frames, &luma[h*w*(f-NRF)] ,h*w*NRF*sizeof(PIXEL), cudaMemcpyDeviceToDevice) != cudaSuccess ) {
fprintf(stderr, "Failed to copy vector for reference_frames\n");
}
dim3 block(2*SR+BS, 2*SR+BS, NRF);
dim3 grid(nblock_x,nblock_y) ;
d_generate_mv_one_frame<<<grid, block>>>( currunt_frame, reference_frames, d_motion_vector, h, w);
//d_generate_mv_one_frame2<<<1, 1>>>( currunt_frame, reference_frames, d_motion_vector, h, w);
cudaDeviceSynchronize() ;
if (cudaMemcpy(&h_motion_vector[nblock_y*nblock_x*f*3], d_motion_vector,nblock_y*nblock_x*sizeof(int)*3, cudaMemcpyDeviceToDevice) != cudaSuccess ) {
fprintf(stderr, "Failed to copy vector for motion_vector \n");
}
}
cudaFree(currunt_frame );
cudaFree(reference_frames);
cudaFree(d_motion_vector);
}
/////////////////////////SHARED FUNCTIONS///////////////////////////////////
void reconstruct_frames(PIXEL *reconstructed,PIXEL *luma,int *motion_vector, int h, int w){
int nblock_x = w/BS;
int nblock_y = h/BS;
for (int f = NRF; f < NF; f++)
{
for (int y = 0; y < nblock_y; y++)
{
for (int x = 0; x < nblock_x; x++)
{
int mvx = motion_vector[(nblock_y*nblock_x*f + y*nblock_x + x)*3+0];
int mvy = motion_vector[(nblock_y*nblock_x*f + y*nblock_x + x)*3+1];
int mvf = motion_vector[(nblock_y*nblock_x*f + y*nblock_x + x)*3+2];
for(int j =0; j < BS; j++)
{
//copy row by row (size of block)
memcpy(&reconstructed[f*W*H + W*(y*BS+j) + x*BS] ,&luma[(f-mvf)*w*h + w*(mvy+y*BS+j) + mvx+x*BS], BS*sizeof(PIXEL));
}
}
}
}
}
void read_luma(FILE *fid, PIXEL *luma, PIXEL *crb, int h, int w){
PIXEL *temp= (PIXEL*) malloc(H*W*NF*sizeof(PIXEL)); if (temp== NULL) fprintf(stderr, "Bad malloc on temp\n");
for (int f=0; f<NF; f++) {
fread (&temp[H*W*f] ,1,W*H, fid);
fread (&crb [W*H/2*f],1,W*H/2,fid);
//fseek (fid,W*H/2,SEEK_CUR); //seek cb and cr
}
for (int f=0; f<NF; f++) {
for ( int idh =0; idh < H; idh++){
memcpy(&luma[h*w*f+idh*w] , &temp[H*W*f+idh*W], W*sizeof(PIXEL));
}
}
free(temp);
}
void write_yuv(FILE *fid, PIXEL *reconstructed, PIXEL *crb){
for (int f=0; f<NF; f++) {
fwrite(&reconstructed[W*H*f] ,1,W*H,fid);
fwrite(&crb [W*H/2*f],1,W*H/2,fid);
}
}
void pad_luma(PIXEL *luma,int height,int width){
for (int i = 0; i < height*width*NF; i++)
luma[i] = 127;
}
//////////////////////////////////////////////////////////////////////////////
int main()
{
// take of available refrence frames
// CIF format
int nblock_x = W/BS; // (W+(BS-1))/BS;
int nblock_y = H/BS; // (H+(BS-1))/BS;
int height = nblock_y*BS;
int width = nblock_x*BS;
int number_frames = NF;
////////////////////////////////////
FILE *fid_in = fopen("akiyo_cif.yuv","rb");
FILE *fid_out = fopen("akiyo_cif_constructed.yuv","wb");
FILE *h_fid_out = fopen("akiyo_cif_constructed_GPU.yuv","wb");
int *h_motion_vector; //= (int *) malloc(nblock_x*nblock_y*NF*3 *sizeof(int)); if (h_motion_vector == NULL) fprintf(stderr, "Bad malloc on h_motion_vector \n");
if ( cudaHostAlloc( (void**)&h_motion_vector ,nblock_x*nblock_y*NF*3*sizeof(int), cudaHostAllocWriteCombined) != cudaSuccess )
fprintf(stderr, "Bad malloc on h_motion_vector \n");
PIXEL *h_reconstructed= (PIXEL *) malloc(height*width *NF*sizeof(PIXEL)); if (h_reconstructed == NULL) fprintf(stderr, "Bad malloc on h_reconstructed \n");
PIXEL *luma ;// = (PIXEL *) malloc(height*width*number_frames*sizeof(PIXEL)); if (luma == NULL) fprintf(stderr, "Bad malloc on luma \n");
if ( cudaHostAlloc( (void**)&luma ,height*width*number_frames*sizeof(PIXEL), cudaHostAllocWriteCombined) != cudaSuccess )
fprintf(stderr, "Bad malloc on luma \n");
int *motion_vector = (int * ) malloc(nblock_x*nblock_y*NF*3 *sizeof(int)); if (motion_vector == NULL) fprintf(stderr, "Bad malloc on motion_vector \n");
PIXEL *crb = (PIXEL *) malloc(H*W/2*number_frames *sizeof(PIXEL)); if (crb == NULL) fprintf(stderr, "Bad malloc on crb \n");
PIXEL *reconstructed = (PIXEL *) malloc(H*W*number_frames *sizeof(PIXEL)); if (reconstructed == NULL) fprintf(stderr, "Bad malloc on reconstructed \n");
pad_luma (luma, height, width);
read_luma(fid_in,luma,crb,height,width);
fclose(fid_in);
double timeStampA = getTimeStamp() ;
generate_mv_for_frames_cpu(motion_vector ,luma, height, width);
double timeStampB= getTimeStamp() ;
generate_mv_for_frames_gpu(h_motion_vector,luma, height ,width);
double timeStampC= getTimeStamp() ;
int totalreads = (NRF*(BS+2*SR)*(BS+2*SR) +BS*BS)*NF*sizeof(PIXEL);
printf("total CPU time = %.6f\n", timeStampB - timeStampA);
printf("total GPU time = %.6f\n", timeStampC - timeStampB);
printf("SpeedUP = %.3f\n", (timeStampB - timeStampA)/(timeStampC - timeStampB));
printf("BandWidth = %.3f\n", totalreads/(timeStampC - timeStampB)/1000);
#ifdef ENABLE_PRINT
//printf("motion_vector\n");
for (int f =NRF; f < NF; f++)
{
for (int y = 0; y < nblock_y; y++)
{
for (int x = 0; x < nblock_x; x++)
{
if ((motion_vector[(nblock_y*nblock_x*f + y*nblock_x + x)*3+0]-h_motion_vector[(nblock_y*nblock_x*f + y*nblock_x + x)*3+0] != 0) || (motion_vector[(nblock_y*nblock_x*f + y*nblock_x + x)*3+1]-h_motion_vector[(nblock_y*nblock_x*f + y*nblock_x + x)*3+1] != 0) || (motion_vector[(nblock_y*nblock_x*f + y*nblock_x + x)*3+2]-h_motion_vector[(nblock_y*nblock_x*f + y*nblock_x + x)*3+2] != 0))
//if ((motion_vector[(nblock_y*nblock_x*f + y*nblock_x + x)*3+0] != 0) && (motion_vector[(nblock_y*nblock_x*f + y*nblock_x + x)*3+1] != 0) && (motion_vector[(nblock_y*nblock_x*f + y*nblock_x + x)*3+2] != 1))
printf("-->frame %d at y = %d and x = %d mv = (%d,%d,%d) and h_mv = (%d,%d,%d) \n", f, y*BS, x*BS,
motion_vector[(nblock_y*nblock_x*f + y*nblock_x + x)*3+2] , motion_vector[(nblock_y*nblock_x*f + y*nblock_x + x)*3+1], motion_vector[(nblock_y*nblock_x*f + y*nblock_x + x)*3+0],
h_motion_vector[(nblock_y*nblock_x*f + y*nblock_x + x)*3+2] , h_motion_vector[(nblock_y*nblock_x*f + y*nblock_x + x)*3+1], h_motion_vector[(nblock_y*nblock_x*f + y*nblock_x + x)*3+0]);
//else
// printf("frame %d at y = %d and x = %d mv = (%d,%d,%d) and h_mv = (%d,%d,%d) \n", f, y*BS, x*BS,
// motion_vector[(nblock_y*nblock_x*f + y*nblock_x + x)*3+2] , motion_vector[(nblock_y*nblock_x*f + y*nblock_x + x)*3+1], motion_vector[(nblock_y*nblock_x*f + y*nblock_x + x)*3+0],
// h_motion_vector[(nblock_y*nblock_x*f + y*nblock_x + x)*3+2] , h_motion_vector[(nblock_y*nblock_x*f + y*nblock_x + x)*3+1], h_motion_vector[(nblock_y*nblock_x*f + y*nblock_x + x)*3+0]);
}
}
}
#endif
reconstruct_frames(reconstructed,luma,motion_vector,height,width);
write_yuv(fid_out,reconstructed,crb);
fclose(fid_out);
////////DEVICE///////////////
reconstruct_frames(h_reconstructed,luma,h_motion_vector, height, width);
write_yuv(h_fid_out,h_reconstructed,crb);
fclose(h_fid_out);
//////////////////////////////
cudaFreeHost(luma );
free(reconstructed);
free(h_reconstructed);
free(motion_vector );
cudaFreeHost(h_motion_vector);
free(crb);
} |
2,900 | #include <cuda_runtime.h>
#include<cassert>
#include<sys/time.h>
#include<time.h>
#include<stdio.h>
#include<string>
#include<sstream>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert (%d): %s (%s, line %d)\n", code, cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int timeval_subtract(struct timeval *result, struct timeval *t2, struct timeval *t1)
{
unsigned int resolution=1000000;
long int diff = (t2->tv_usec + resolution * t2->tv_sec) - (t1->tv_usec + resolution * t1->tv_sec);
result->tv_sec = diff / resolution;
result->tv_usec = diff % resolution;
return (diff<0);
}
class MyInt8Tuple {
unsigned int x0; unsigned int x1; unsigned int x2; unsigned int x3;
unsigned int x4; unsigned int x5; unsigned int x6; unsigned int x7;
public:
__device__ __host__ inline MyInt8Tuple()
: x0(0), x1(0), x2(0), x3(0), x4(0), x5(0), x6(0), x7(0) {
}
__device__ __host__ inline unsigned int& operator[](const unsigned int i) {
assert(i < 8);
if (i == 0) return x0;
else if (i == 1) return x1;
else if (i == 2) return x2;
else if (i == 3) return x3;
else if (i == 4) return x4;
else if (i == 5) return x5;
else if (i == 6) return x6;
else return x7; // i == 7
}
};
class MyInt8Array {
unsigned int arr[8];
public:
__device__ __host__ inline MyInt8Array()
: arr() {
}
__device__ __host__ unsigned int& operator[](const unsigned int i) {
assert(i < 8);
return arr[i];
}
};
void populateIntArray(const unsigned int num_elems,
unsigned int* in_array
) {
for(unsigned int i = 0; i < num_elems; i++) {
in_array[i] = std::rand() % 20;
}
}
template<typename T>
void printIntCollection(unsigned int length, std::string title, T collection) {
fprintf(stderr, "%-12s [", (title + ":").c_str());
bool first = true;
for(unsigned int i = 0; i < length; i++) {
if (first) {
fprintf(stderr, "%2d", collection[i]);
first = false;
}
else {
fprintf(stderr, ", %2d", collection[i]);
}
}
fprintf(stderr, "]\n");
}
#define NUM_ITERATIONS 50
template<typename IntType>
__global__ void testKernel(
// IntType* in_array,
IntType* out_array,
unsigned int* indices,
const unsigned int num_elems
) {
const unsigned int gid = blockIdx.x*blockDim.x + threadIdx.x;
if (gid < num_elems) {
const unsigned int lane8 = threadIdx.x & 7;
const unsigned int index = indices[lane8];
IntType elem;
for (unsigned int i = 0; i < NUM_ITERATIONS; i++) {
elem[index]++;
}
out_array[gid] = elem;
}
}
#define BLOCK_SIZE 512
template<typename IntType>
void test(const unsigned int num_elems,
const unsigned int i0,
const unsigned int i1,
const unsigned int i2,
const unsigned int i3,
const unsigned int i4,
const unsigned int i5,
const unsigned int i6,
const unsigned int i7
) {
// fprintf(stderr, "sizeof(IntType) = %d\n", sizeof(IntType));
struct timeval t_start, t_end, t_diff;
unsigned long int elapsed;
// IntType* in_array = (IntType*) malloc(num_elems * sizeof(IntType));
IntType* out_array = (IntType*) malloc(num_elems * sizeof(IntType));
// memset(in_array, 0, num_elems * sizeof(IntType));
unsigned int indices[8] = {i0,i1,i2,i3,i4,i5,i6,i7};
// IntType *in_array_d;
IntType *out_array_d;
unsigned int *indices_d;
// cudaMalloc((void**)&in_array_d, num_elems * sizeof(IntType));
gpuErrchk( cudaMalloc((void**)&out_array_d, num_elems * sizeof(IntType)) );
gpuErrchk( cudaMalloc((void**)&indices_d, 8 * sizeof(unsigned int)) );
// cudaMemcpy(in_array_d, in_array, num_elems * sizeof(IntType), cudaMemcpyHostToDevice);
gpuErrchk( cudaMemset(out_array_d, 0, num_elems * sizeof(IntType)) );
gpuErrchk( cudaMemcpy(indices_d, indices, 8 * sizeof(unsigned int), cudaMemcpyHostToDevice) );
gpuErrchk( cudaThreadSynchronize() );
const unsigned int num_blocks = (num_elems + BLOCK_SIZE - 1) / BLOCK_SIZE;
// fprintf(stderr, "num_blocks = %d\n", num_blocks);
gettimeofday(&t_start, NULL);
testKernel<IntType><<<num_blocks, BLOCK_SIZE>>>(out_array_d, indices_d, num_elems);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaThreadSynchronize() );
gettimeofday(&t_end, NULL);
gpuErrchk( cudaMemcpy(out_array, out_array_d, num_elems * sizeof(IntType), cudaMemcpyDeviceToHost) );
// cudaFree(in_array_d);
gpuErrchk( cudaFree(out_array_d) );
gpuErrchk( cudaFree(indices_d) );
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
fprintf(stderr, "Total runtime: %6lu microsecs\n", elapsed);
std::srand(time(NULL));
unsigned int index = std::rand() % num_elems;
std::stringstream sstm;
sstm << "Random element (" << index << ")";
std::string s = sstm.str();
#ifdef PRINT
printIntCollection(8, s, out_array[index]);
#endif
// free(in_array);
free(out_array);
}
int main(int argc, char** argv) {
if (argc != 10) {
printf("The program takes <num_elems> and eight indices as arguments!\n");
return EXIT_FAILURE;
}
const unsigned int num_elems = strtoul(argv[1], NULL, 10);
const unsigned int i0 = strtoul(argv[2], NULL, 10);
const unsigned int i1 = strtoul(argv[3], NULL, 10);
const unsigned int i2 = strtoul(argv[4], NULL, 10);
const unsigned int i3 = strtoul(argv[5], NULL, 10);
const unsigned int i4 = strtoul(argv[6], NULL, 10);
const unsigned int i5 = strtoul(argv[7], NULL, 10);
const unsigned int i6 = strtoul(argv[8], NULL, 10);
const unsigned int i7 = strtoul(argv[9], NULL, 10);
fprintf(stderr, "MyInt8Tuple:\n");
test<MyInt8Tuple>(num_elems,i0,i1,i2,i3,i4,i5,i6,i7);
fprintf(stderr, "MyInt8Array:\n");
test<MyInt8Array>(num_elems,i0,i1,i2,i3,i4,i5,i6,i7);
return EXIT_SUCCESS;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.