serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
3,301 | __global__ void create_quote_index(char *file, long n, long *escape_index, long *quote_index, char *quote_carry_index, long quote_index_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// We want to always calculate on 64-character boundaries, such that we can put
// all bits of 64 characters into 1 long.
long normal_chars_per_thread = (n+stride-1) / stride;
long chars_per_thread = ((normal_chars_per_thread + 64 - 1) / 64) * 64;
long start = index * chars_per_thread;
long end = start + chars_per_thread;
// This will contain the bitmask of escaped characters
long escaped = 0;
// Temporary variable for storing the current bit index
long bit_index = 0;
int quote_count = 0;
int final_loop_iteration = end;
if (n < end) {
final_loop_iteration = n;
}
for (long i = start; i < final_loop_iteration; i += 1) {
long offsetInBlock = i % 64;
// At the start of each boundary (including the first), set the escaped characters
if (offsetInBlock == 0) {
escaped = escape_index[i / 64];
}
if (file[i] == '"') {
if ((escaped & (1L << offsetInBlock)) == 0) {
bit_index = bit_index | (1L << offsetInBlock);
quote_count++;
}
}
// If we are at the end of a boundary, set our result. We do not do it
// if we are at the end since that would reset our bit_index.
if (offsetInBlock == 63L) {
quote_index[i / 64] = bit_index;
// Reset the bit index since we're starting over
bit_index = 0;
}
}
if (n < end && (final_loop_iteration - 1) % 64 != 63L && n - start > 0) {
// In the final thread with data, we need to do this to make sure the last longs are actually set
int final_index = (final_loop_iteration - 1) / 64;
quote_index[final_index] = bit_index;
}
quote_carry_index[index] = quote_count & 1;
}
|
3,302 | #include <iostream>
#include <cuda.h>
#include <cstdlib>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
__global__
void AsyncvecAddK(int *A, int *B, int *C, int len, int offset)
{
int i = threadIdx.x+blockDim.x*blockIdx.x+offset;
if(i<len) C[i] = A[i] - B[i];
}
__global__
void vecAddK(int *A, int *B, int *C, int len)
{
int i = threadIdx.x+blockDim.x*blockIdx.x;
if(i<len) C[i] = A[i] - B[i];
}
void populateArray(int a[], int l){
time(NULL);
int prev = rand() % 10;
int nxt;
for(int i = 1; i < l; i++){
do{
nxt = rand() % 10;
}while(nxt==prev);
a[i] = nxt;
prev = nxt;
}
}
__host__
void svecAdd(){
int const items = 1;
int const len = 1024*1024;
int const nStreams = 4;
///Device query boilerplate
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess)
{
printf("cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id));
printf("Result = FAIL\n");
exit(EXIT_FAILURE);
}
// This function call returns 0 if there are no CUDA capable devices.
if (deviceCount == 0)
{
printf("There are no available device(s) that support CUDA\n");
return;
}
else
{
printf("Detected %d CUDA Capable device(s)\n", deviceCount);
}
int fastestDevice = 0;
int fastestSpeed = 0;
int bx = 0;
int gx = 0;
for (int dev = 0; dev < deviceCount; ++dev)
{
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
int speed = deviceProp.multiProcessorCount;
if(speed > fastestSpeed){
fastestDevice = dev;
fastestSpeed = speed;
bx = deviceProp.maxThreadsDim[0];
gx = deviceProp.maxGridSize[0];
}
}
cudaSetDevice(fastestDevice);
int BLOCK = 256;
while(BLOCK * gx < len && BLOCK < bx){///While current block size is too small
BLOCK *= 2;
}
//int A[items][len];
//int B[items][len];
///float A[SIZE];float B[SIZE];float C[SIZE];
//for(int i=0; i < items; i++){
//int a[len];
//populateArray(a, len);
//int b[len];
//populateArray(b, len);
//for(int j=0; j < len; j++){
// A[i][j] = a[j];
// B[i][j] = b[j];
//}
//}
int size = len*sizeof(int);
cudaStream_t stream[nStreams];
int * dA;
int * hA;
int * dB;
int * hB;
int * dC;
int * hC;
///Create streams and allocated memory to accomodate one vector
for (int i = 0; i < nStreams; ++i){
cudaStreamCreate(&stream[i]);
}
cudaMallocHost((void**)&hA, size);
cudaMalloc((void **) &dA, size);
cudaMalloc((void **) &dB, size);
cudaMallocHost((void**)&hB, size);
cudaMalloc((void **) &dC, size);
cudaMallocHost((void**)&hC, size);
float gms = 0.0; //Time for all Asynch GPU
float sgms = 0.0; //Time for all synch GPU
float cms = 0.0; //Time for all CPU
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
int segSize = len/nStreams;
dim3 DimGrid = (segSize-1)/BLOCK + 1;
dim3 DimBlock = BLOCK;
for(int h = 0; h < items; h++){
populateArray(hA, len);
populateArray(hB, len);
//int * hA = A[h];
//int * hB = B[h];
float ms;
cudaEventRecord(startEvent,0);
for(int i = 0; i < nStreams; i++){ //transfer and compute with segment size
int offset = i * segSize;
cudaMemcpyAsync(&dA[offset], &hA[offset], segSize*sizeof(int), cudaMemcpyHostToDevice, stream[i]);
cudaMemcpyAsync(&dB[offset], &hB[offset], segSize*sizeof(int), cudaMemcpyHostToDevice, stream[i]);
AsyncvecAddK<<<DimGrid, DimBlock, 0 , stream[i%nStreams]>>>(dA,dB,dC,len, offset);
cudaMemcpyAsync(&hC[offset], &dC[offset], segSize*sizeof(int), cudaMemcpyDeviceToHost, stream[i]);
cudaStreamSynchronize(stream[i]);
}
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&ms, startEvent, stopEvent);
gms+=ms;
ms = 0.0;
dim3 DimSGrid((len-1)/BLOCK + 1);
dim3 DimSBlock(BLOCK);
cudaEventRecord(startEvent,0);
cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB, size, cudaMemcpyHostToDevice);
vecAddK<<<DimSGrid, DimSBlock>>>(dA, dB,dC, len);
cudaMemcpy(hC, dC, size, cudaMemcpyDeviceToHost);
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&ms, startEvent, stopEvent);
sgms+=ms;
time_t start, end;
time(&start);
for(int j = 0; j < len; j++){//cpu
hC[j]=hA[j]-hB[j];
}
time(&end);
cms += (float) difftime(end, start)*1000;
}
printf("Async GPU: %f\nGPU: %f\nCPU: %f\n", sgms/ (float) items, gms / (float) items, cms / (float) items);
cudaFree(dA);cudaFree(dB);cudaFree(dC);
for (int i = 0; i < nStreams; ++i)cudaStreamDestroy(stream[i]);
}
int main(){
svecAdd();
return 0;
}
|
3,303 | #include <stdio.h>
__global__ void vecAdd(float *a, float *b, float *c, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
c[i] = a[i] + b[i];
}
}
int main()
{
int N = 1024 * 1024;
size_t size = N * sizeof(float);
float *ha = (float *) malloc(size);
float *hb = (float *) malloc(size);
float *hc = (float *) malloc(size);
float *hc_check = (float *) malloc(size);
for (int i = 0; i < N; i++) {
ha[i] = i;
hb[i] = i + 1;
hc_check[i] = ha[i] + hb[i];
}
float *da;
cudaMalloc(&da, size);
float *db;
cudaMalloc(&db, size);
float *dc;
cudaMalloc(&dc, size);
cudaMemcpy(da, ha, size, cudaMemcpyHostToDevice);
cudaMemcpy(db, hb, size, cudaMemcpyHostToDevice);
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
vecAdd<<<blocksPerGrid, threadsPerBlock>>>(da, db, dc, N);
cudaMemcpy(hc, dc, size, cudaMemcpyDeviceToHost);
int cmp = memcmp(hc_check, hc, size);
if (cmp == 0) {
printf("Arrays are equal.\n");
} else {
printf("Arrays are not equal.\n");
}
cudaFree(da);
cudaFree(db);
cudaFree(dc);
free(ha);
free(hb);
free(hc);
return 0;
}
|
3,304 | #include "includes.h"
__global__ void threshold(float *vec, int *bin, const int k_bin, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
// xIndex is a value from 1 to k from the vector ind
if ( (xIndex < n) & (bin[xIndex]>k_bin) )
vec[xIndex]=0.0f;
} |
3,305 | #include "includes.h"
__global__ void matrixMul(float *M, float *N, float *P, int width)
{
int col= blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
if (row < width && col < width)
{
float pValue = 0;
for(int k=0; k<width; k++)
pValue += M[row * width + k] * N[k * width + col];
P[row * width + col] = pValue;
}
} |
3,306 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
typedef struct node {
int data;
struct node *parent;
struct node *left;
struct node *right;
int height;
int sema;
} node;
__device__ node* global_root = NULL;
__device__ volatile int MASTER_LOCK = 0;
__device__ int lock(node* n) {
int status = atomicExch(&n->sema, 1);
return (!status && !MASTER_LOCK);
}
__device__ void unlock(node* n) {
atomicExch(&n->sema, 0);
}
__device__ node* new_node(int val, node* parent) {
node *tmp = (node *) malloc(sizeof(node));
tmp->data = val;
tmp->parent = parent;
tmp->left = tmp->right = NULL;
tmp->height = 1;
tmp->sema = 0;
return tmp;
}
__device__ node* find(node* root, int key) {
if (root == NULL) return NULL;
if (root->data == key) return root;
else if (root->data > key) return find(root->left, key);
else return find(root->right, key);
}
__device__ int height(node *root)
{
if (root == NULL)
return 0;
return root->height;
}
__device__ int get_balance(node *root)
{
if (root == NULL)
return 0;
return height(root->left) - height(root->right);
}
__device__ node* left_rotate(node* root, node* parent)
{
node* temp1 = root->right;
node* temp2 = temp1->left;
temp1->left = root;
root->parent = temp1;
root->right = temp2;
if (temp2)
temp2->parent = root;
root->height = max(height(root->left), height(root->right))+1;
temp1->height = max(height(temp1->left), height(temp1->right))+1;
temp1->parent = parent;
return temp1;
}
__device__ node* right_rotate(node* root, node* parent)
{
node* temp1 = root->left;
node* temp2 = temp1->right;
temp1->right = root;
root->parent = temp1;
root->left = temp2;
if(temp2)
temp2->parent = root;
root->height = max(height(root->left), height(root->right))+1;
temp1->height = max(height(temp1->left), height(temp1->right))+1;
temp1->parent = parent;
return temp1;
}
__device__ void rebalance(node* root, int key) {
root->height = max(height(root->left),height(root->right)) + 1;
int balance = get_balance(root);
// Left Left Case
node* p = root->parent;
if (balance > 1 && key < root->left->data) {
if (p) {
if (root->data < p->data)
p->left = right_rotate(root, p);
else
p->right = right_rotate(root, p);
}
else
global_root = right_rotate(root, global_root);
}
// Right Right Case
else if (balance < -1 && key > root->right->data) {
if (p) {
if (root->data < p->data)
p->left = left_rotate(root, p);
else
p->right = left_rotate(root, p);
}
else
global_root = left_rotate(root, global_root);
}
// Left Right Case
else if (balance > 1 && key > root->left->data) {
root->left = left_rotate(root->left, root);
if (p) {
if (root->data < p->data)
p->left = right_rotate(root, p);
else
p->right = right_rotate(root, p);
}
else
global_root = right_rotate(root, global_root);
}
// Right Left Case
else if (balance < -1 && key < root->right->data)
{
root->right = right_rotate(root->right, root);
if (p) {
if (root->data < p->data)
p->left = left_rotate(root, p);
else
p->right = left_rotate(root, p);
}
else
global_root = left_rotate(root, global_root);
}
else {
if (root->parent)
rebalance(root->parent, key);
}
return;
}
__device__ void insert(node* root, int key) {
if (root == NULL) { // Empty Tree
root = new_node(key, NULL);
return;
}
int acquired = lock(root);
if (acquired) {
if (key < root->data) {
if (root->left == NULL) { // Can be inserted to the immediate left
root->left = new_node(key, root);
unlock(root);
while (!atomicExch((int*)&MASTER_LOCK, 1));
rebalance(root, key);
atomicExch((int*)&MASTER_LOCK, 0);
return;
} else { // Release this Node and proceed
unlock(root);
insert(root->left, key);
}
} else {
if (root->right == NULL) { // Can be inserted to the immediate right
root->right = new_node(key, root);
unlock(root);
while (!atomicExch((int*)&MASTER_LOCK, 1));
rebalance(root, key);
atomicExch((int*)&MASTER_LOCK, 0);
return;
} else {
unlock(root); // Release this Node and proceed
insert(root->right, key);
}
}
} else {
insert(root, key);
}
}
__device__ void pre_order(node* root)
{
if(root != NULL)
{
printf("%d ", root->data);
pre_order(root->left);
pre_order(root->right);
}
return;
}
__device__ void in_order(node* root)
{
if(root != NULL)
{
in_order(root->left);
printf("%d ", root->data);
in_order(root->right);
}
return;
}
|
3,307 | #include "includes.h"
__global__ void createHistCuda (float* siftCentroids, float* siftImage, int linesCent, int linesIm, float* temp)
{
__shared__ float cosines[BLOCK_SIZE][2];
size_t idx = blockIdx.x*blockDim.x + threadIdx.x;
size_t idy = blockIdx.y;
size_t tid = threadIdx.x;
if(idx < linesCent){
int centin = idx * 128;
int imin = idy * 128;
//Cosine similarity code ------------
float sumab = 0;
float suma2 = 0;
float sumb2 = 0;
for(int k = 0; k < 128; k++){
sumab += siftCentroids[centin + k] * siftImage[imin + k];
suma2 += siftImage[imin + k] * siftImage[imin + k];
sumb2 += siftCentroids[centin + k] * siftCentroids[centin + k];
}
float cossim = sumab/(sqrtf(suma2)/sqrtf(sumb2));
//debug[idy*linesCent + idx] = cossim;
cosines[threadIdx.x][0] = cossim;
cosines[threadIdx.x][1] = idx;
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s){
size_t tid2 = tid + s;
if(cosines[tid2][0] > cosines[tid][0]){
cosines[tid][0] = cosines[tid2][0];
cosines[tid][1] = cosines[tid2][1];
}
}
__syncthreads();
}
if (tid == 0){
temp[(blockIdx.y*gridDim.x + blockIdx.x)*2] = cosines[0][0];
temp[(blockIdx.y*gridDim.x + blockIdx.x)*2+1] = cosines[0][1];
}
}
} |
3,308 | #include <bits/stdc++.h>
#include <unistd.h>
#include <cuda.h>
template <typename Iter>
void cooley_tukey(Iter first, Iter last) {
auto size = last - first;
if (size >= 2) {
auto temp = std::vector<std::complex<double>>(size / 2);
for (int i = 0; i < size / 2; ++i) {
temp[i] = first[i * 2 + 1];
first[i] = first[i * 2];
}
for (int i = 0; i < size / 2; ++i) {
first[i + size / 2] = temp[i];
}
auto split = first + size / 2;
cooley_tukey(first, split);
cooley_tukey(split, last);
for (int k = 0; k < size / 2; ++k) {
auto w = std::exp(std::complex<double>(0, -2.0 * M_PI * k / size));
auto& bottom = first[k];
auto& top = first[k + size / 2];
top = bottom - w * top;
bottom -= top - bottom;
}
}
}
void mod_cooley_tukey(std::complex<double>* data, size_t left, size_t right) {
auto size = right - left;
if (size >= 2) {
auto temp = std::vector<std::complex<double>>(size / 2);
for (size_t i = 0; i < size / 2; ++i) {
temp[i] = data[left + i * 2 + 1];
data[left + i] = data[left + i * 2];
}
for (size_t i = 0; i < size / 2; ++i) {
data[left + i + size / 2] = temp[i];
}
auto split = left + size / 2;
mod_cooley_tukey(data, left, split);
mod_cooley_tukey(data, split, right);
for (size_t k = 0; k < size / 2; ++k) {
auto w = std::exp(std::complex<double>(0, -2. * M_PI * k / size));
auto& bottom = data[k + left];
auto& top = data[k + size / 2 + left];
top = bottom - w * top;
bottom -= top - bottom;
}
}
}
template <typename T>
void bit_reversal(std::vector<T>& data, size_t left, size_t right) {
auto size = right - left;
if (size >= 2) {
auto temp = std::vector<T>(size / 2);
for (size_t i = 0; i < size / 2; ++i) {
temp[i] = data[left + i * 2 + 1];
data[left + i] = data[left + i * 2];
}
for (size_t i = 0; i < size / 2; ++i) {
data[left + i + size / 2] = temp[i];
}
auto split = left + size / 2;
bit_reversal(data, left, split);
bit_reversal(data, split, right);
}
}
void iterative_cooley_tukey(std::vector<std::complex<double>>& data, size_t left, size_t right) {
bit_reversal(data, left, right);
for (size_t iter = 2; iter <= right - left; iter <<= 1u) {
for (size_t base_pos = 0; base_pos < right; base_pos += iter) {
for (size_t k = 0; k < iter / 2; ++k) {
auto w = std::exp(std::complex<double>(0, -2. * M_PI * k / iter));
data[k + iter / 2 + base_pos] = data[k + base_pos] - w * data[k + iter / 2 + base_pos];
data[k + base_pos] -= data[k + iter / 2 + base_pos] - data[k + base_pos];
}
}
}
}
static __device__ __host__ inline size_t rev_num(size_t num, size_t deg) {
size_t reverse_num = 0;
int i;
for (i = 0; i < deg; i++) {
if((num & (1 << i))) {
reverse_num |= 1 << ((deg - 1) - i);
}
}
return reverse_num;
}
__global__ void bit_reversed_order(double2* __restrict__ input, double2* output, size_t deg) {
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
size_t stride = blockDim.x * gridDim.x;
for (size_t i = index; i < (1 << deg); i += stride) {
size_t new_ind = rev_num(i, deg);
output[new_ind] = input[i];
}
}
static __device__ __host__ inline double2 CplxSub(double2 a, double2 b) {
double2 c;
c.x = a.x - b.x;
c.y = a.y - b.y;
return c;
}
static __device__ __host__ inline double2 CplxMul(double2 a, double2 b) {
double2 c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
}
__global__ void new_cooley_tukey_iteration(double2* data, size_t len) {
size_t pos = blockDim.x * blockIdx.x + threadIdx.x;
size_t k = pos % len;
size_t base_pos = pos - k;
if (k < len / 2) {
double2 w;
double phi = -2. * M_PI * k / len;
w.x = cos(phi);
w.y = sin(phi);
data[k + len / 2 + base_pos] = CplxSub(data[k + base_pos], CplxMul(w, data[k + len / 2 + base_pos]));
data[k + base_pos] = CplxSub(data[k + base_pos], CplxSub(data[k + len / 2 + base_pos], data[k + base_pos]));
}
}
void cuda_fft(std::vector<std::complex<double>>& data) {
double2* inp_data, *bit_rev_data;
cudaMallocManaged((void**)&inp_data, data.size() * sizeof(double2));
cudaMallocManaged((void**)&bit_rev_data, data.size() * sizeof(double2));
for (int i = 0; i < data.size(); ++i) {
inp_data[i].x = data[i].real();
inp_data[i].y = data[i].imag();
}
size_t deg = 0, sz = data.size();
while (sz != 1) {
++deg;
sz >>= 1;
}
bit_reversed_order<<<512, 256>>>(inp_data, bit_rev_data, deg);
cudaDeviceSynchronize();
cudaFree(inp_data);
for (size_t blockSize = 2; blockSize <= data.size(); blockSize <<= 1u) {
size_t threads = std::min(static_cast<size_t>(1024), blockSize);
new_cooley_tukey_iteration<<<data.size() / threads, threads>>>(bit_rev_data, blockSize);
cudaDeviceSynchronize();
}
for (size_t i = 0; i < data.size(); ++i) {
data[i].real(bit_rev_data[i].x);
data[i].imag(bit_rev_data[i].y);
}
cudaFree(bit_rev_data);
}
template <typename Iter>
void inversed_fft(Iter first, Iter last) {
cooley_tukey(first, last);
auto it = first;
auto size = last - first;
while (it != last) {
*it /= size;
++it;
}
std::reverse(first + 1, last);
}
const int kRuns = 1;
void test_speed_and_correctness(std::string filename) {
std::ifstream fin(filename);
std::vector<std::complex<double>> data;
while (!fin.eof()) {
double real, imag;
if (fin >> real >> imag) {
data.emplace_back(real, imag);
}
}
auto data1 = data;
for (int k = 0; k < kRuns; ++k) {
{
auto start = std::chrono::high_resolution_clock::now();
iterative_cooley_tukey(data, 0, data.size());
auto finish = std::chrono::high_resolution_clock::now();
auto milliseconds = std::chrono::duration_cast<std::chrono::milliseconds>(finish - start);
std::cout << milliseconds.count() << ' ';
}
{
auto start = std::chrono::high_resolution_clock::now();
cuda_fft(data1);
auto finish = std::chrono::high_resolution_clock::now();
auto milliseconds = std::chrono::duration_cast<std::chrono::milliseconds>(finish - start);
std::cout << milliseconds.count() << ' ';
}
double max_diff = 0, min_diff = 1E9;
for (size_t i = 0; i < data.size(); ++i) {
max_diff = std::max(max_diff, std::abs(data[i] - data1[i]));
min_diff = std::min(min_diff, std::abs(data[i] - data1[i]));
}
std::cout << max_diff << ' ' << min_diff << '\n';
}
}
int main(int argc, char* argv[]) {
test_speed_and_correctness(argv[1]);
return 0;
} |
3,309 | //#include "BLACKCAT_GPU_MATHEMATICS.cuh"
//
//__global__
//void GPU_MATHEMATICS::dot(float* store, unsigned s_LD, const float* m1, unsigned m1_r, unsigned m1_c, unsigned m1_LD,
// const float* m2, unsigned m2_r, unsigned m2_c, unsigned m2_LD)
//{
//// float* scal_one;
//// cudaMalloc(&scal_one, sizeof(float));
////
//// cublasHandle_t h;
//// cublasCreate(&h);
//// cublasSetPointerMode(h, CUBLAS_POINTER_MODE_DEVICE);
//// cublasSgemm(h,
//// CUBLAS_OP_N,
//// CUBLAS_OP_N,
//// m1_r, //height
//// m2_c, //width
//// m1_c,//wdith
//// scal_one, //alpha
//// m1, //d matb
//// m1_LD, //matb ld
////
//// m2, //d mat a
//// m2_LD, //mat a ld
//// scal_one, //beta scal
//// store, //dmat x
//// s_LD); //ld
////
//// cudaFree(scal_one);
//}
//
|
3,310 |
#include <iostream>
#include <numeric>
#include <cuda_runtime.h>
#include <stdlib.h>
#include <ctime>
using namespace std;
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
#define random(x) (rand()%x)
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err)
{
if (err == cudaSuccess)
return;
std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (1);
}
void printMatrix(double* inputMatrix, const int rows, const int cols)
{
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
std::cout << inputMatrix[i * cols + j] << "\t";
}
std::cout << std::endl;
}
}
/**
* CUDA kernel that computes reciprocal values for a given vector
*/
__global__ void harnessZeroKernel(double *d_augmentedMatrix, const int rowId1, const int rowId2, const int size) {
__shared__ double blockR1[512];
__shared__ double blockR2[512];
const int tIdx = threadIdx.x;
const int bIdx = blockIdx.x;
const int colI = blockIdx.x * blockDim.x + threadIdx.x;
if (colI < size * 2) {
blockR1[tIdx] = d_augmentedMatrix[rowId1 * 2 * size + blockDim.x * bIdx + tIdx];
blockR2[tIdx] = d_augmentedMatrix[rowId2 * 2 * size + blockDim.x * bIdx + tIdx];
__syncthreads();
d_augmentedMatrix[rowId1 * 2 * size + blockDim.x * bIdx + tIdx] = blockR1[tIdx] + blockR2[tIdx];
}
}
__global__ void computeRowsKernel(double *d_augmentedMatrix, const int rowId, const int size) {
__shared__ double blockR[512];
__shared__ double Aii;
const int tIdx = threadIdx.x;
const int bIdx = blockIdx.x;
const int colI = blockIdx.x * blockDim.x + threadIdx.x;
if (colI < size * 2) {
blockR[tIdx] = d_augmentedMatrix[rowId * 2 * size + blockDim.x * bIdx + tIdx];
Aii = d_augmentedMatrix[rowId * 2 * size + rowId];
__syncthreads();
blockR[tIdx] = blockR[tIdx] / Aii;
d_augmentedMatrix[rowId * 2 * size + blockDim.x * bIdx + tIdx] = blockR[tIdx];
}
}
__global__ void computeColsKernel(double *d_augmentedMatrix, const int colId, const int size) {
__shared__ double blockC[16][16]; // which col need to be zero
__shared__ double blockCCurent[16][16]; // which col is the current col
__shared__ double ARow[16]; // the pivot row
const int tIdx = threadIdx.x;
const int tIdy = threadIdx.y;
const int rowI = blockIdx.y * blockDim.y + threadIdx.y;
const int colI = blockIdx.x * blockDim.x + threadIdx.x;
if (colI < size * 2 && rowI < size) {
blockC[tIdy][tIdx] = d_augmentedMatrix[rowI * size * 2 + colId];
if (blockC[tIdy][tIdx] != 0) {
blockCCurent[tIdy][tIdx] = d_augmentedMatrix[rowI * size * 2 + colI];
ARow[tIdx] = d_augmentedMatrix[colId * size * 2 + colI];
__syncthreads();
if (rowI != colId) { // current row can't sub by current row
blockCCurent[tIdy][tIdx] = blockCCurent[tIdy][tIdx] - blockC[tIdy][tIdx] * ARow[tIdx];
}
d_augmentedMatrix[rowI * size * 2 + colI] = blockCCurent[tIdy][tIdx];
//d_augmentedMatrix[rowI * size * 2 + colI] = ARow[tIdx];
}
}
}
__global__ void augmentMatrixKernel(double *d_augmentedMatrix, double *d_inputMatrix, const int rows, const int cols) {
const int rowI = blockIdx.y * blockDim.y + threadIdx.y;
const int colI = blockIdx.x * blockDim.x + threadIdx.x;
if (colI < cols && rowI < rows) {
// initialize augmentedMatrix
if (colI < cols / 2) {
d_augmentedMatrix[rowI * cols + colI] = d_inputMatrix[rowI * cols / 2 + colI];
}
else if (colI - cols / 2 == rowI) {
d_augmentedMatrix[rowI * cols + colI] = 1;
} else {
d_augmentedMatrix[rowI * cols + colI] = 0;
}
}
}
__global__ void getInverseMatrixKernel(double *d_augmentedMatrix, double *d_inverseMatrix, const int rows, const int cols) {
const int rowI = blockIdx.y * blockDim.y + threadIdx.y;
const int colI = blockIdx.x * blockDim.x + threadIdx.x;
if (colI < cols / 2 && rowI < rows) {
// initialize augmentedMatrix
d_inverseMatrix[rowI * cols / 2 + colI] = d_augmentedMatrix[rowI * cols + colI + cols / 2];
}
}
/**
* Host function that copies the data and launches the work on GPU
*/
double *gpuMatrixInverse(double *inputMatrix, const int rows, const int cols)
{
double *h_inverseMatrix;
//double *h_augmentedMatrix;
double *d_inputMatrix;
double *d_inverseMatrix;
double *d_augmentedMatrix;
const int length = rows * cols;
const int size = rows;
//printMatrix(inputMatrix, rows, cols);
cout << endl;
// initialization
h_inverseMatrix = (double *)malloc(length * sizeof(double));
//h_augmentedMatrix = (double *)malloc(length * 2 * sizeof(double));
CUDA_CHECK_RETURN(cudaMalloc((void **)&d_augmentedMatrix, sizeof(double) * length * 2));
CUDA_CHECK_RETURN(cudaMalloc((void **)&d_inputMatrix, sizeof(double) * length));
CUDA_CHECK_RETURN(cudaMalloc((void **)&d_inverseMatrix, sizeof(double) * length));
CUDA_CHECK_RETURN(cudaMemcpy(d_inputMatrix, inputMatrix, sizeof(double) * length, cudaMemcpyHostToDevice));
dim3 blockSize1(16, 16);
dim3 gridSize1(cols * 2.0 / blockSize1.x + 1, rows * 1.0 / blockSize1.y + 1);
augmentMatrixKernel<<<gridSize1, blockSize1>>>(d_augmentedMatrix, d_inputMatrix, rows, cols * 2);
cudaDeviceSynchronize();
int i = 0;
while (i < size) {
if (inputMatrix[i * size + i] != 0) {
dim3 blockSize2(256);
dim3 gridSize2(cols * 2.0 / blockSize2.x + 1, 1);
computeRowsKernel<<<gridSize2, blockSize2>>>(d_augmentedMatrix, i, size);
cudaDeviceSynchronize();
} else {
int nonZeroRowIndex = 0;
for (int j = 0; j < size; j++) {
if (inputMatrix[j * size + i] != 0) {
nonZeroRowIndex = j;
break;
}
}
dim3 blockSize3(256);
dim3 gridSize3(cols * 2.0 / blockSize3.x + 1, 1);
harnessZeroKernel<<<gridSize3, blockSize3>>>(d_augmentedMatrix, i, nonZeroRowIndex, size);
cudaDeviceSynchronize();
dim3 blockSize4(256);
dim3 gridSize4(cols * 2.0 / blockSize4.x + 1, 1);
computeRowsKernel<<<gridSize4, blockSize4>>>(d_augmentedMatrix, i, size);
cudaDeviceSynchronize();
}
dim3 blockSize5(16, 16);
dim3 gridSize5(cols * 2.0 / blockSize5.x + 1, rows * 1.0 / blockSize5.y + 1);
computeColsKernel<<<gridSize5, blockSize5>>>(d_augmentedMatrix, i, size);
cudaDeviceSynchronize();
i++;
}
dim3 blockSize6(16, 16);
dim3 gridSize6(cols * 2.0 / blockSize6.x + 1, rows * 1.0 / blockSize6.y + 1);
getInverseMatrixKernel<<<gridSize1, blockSize1>>>(d_augmentedMatrix, d_inverseMatrix, rows, cols * 2);
CUDA_CHECK_RETURN(cudaMemcpy(h_inverseMatrix, d_inverseMatrix, sizeof(double) * length, cudaMemcpyDeviceToHost));
//CUDA_CHECK_RETURN(cudaMemcpy(h_augmentedMatrix, d_augmentedMatrix, sizeof(double) * length * 2, cudaMemcpyDeviceToHost));
CUDA_CHECK_RETURN(cudaFree(d_augmentedMatrix));
CUDA_CHECK_RETURN(cudaFree(d_inverseMatrix));
CUDA_CHECK_RETURN(cudaFree(d_inputMatrix));
return h_inverseMatrix;
}
double *createTest(const int rows, const int cols)
{
double *data = (double *)malloc(rows * cols * sizeof(double));
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; ++j) {
data[i * rows + j] = 0.0;
if (i == j) {
data[i * rows + j] = 1.0;
}
}
}
return data;
}
int main(void)
{
const int rows = 10;
const int cols = 10;
double *testMatrix = createTest(rows, cols);
double *inverseMatrixGPU;
// GPU code
clock_t start1,end1;
start1 = clock();
inverseMatrixGPU = gpuMatrixInverse(testMatrix, rows, cols);
end1 = clock();
double dur1 = (double)(end1 - start1);
cout << "\n running time on GPU is " << dur1 / CLOCKS_PER_SEC << " secs!\n" << endl;
if (rows < 20) {
printMatrix(inverseMatrixGPU, rows, cols);
double *resultMatrix = (double *)malloc(cols * rows * sizeof(double));
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
resultMatrix[i * cols + j] = 0;
}
}
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
for (int k = 0; k < cols; k++) {
resultMatrix[i * cols + j] += testMatrix[i * cols + k] * inverseMatrixGPU[k * cols + j];
}
}
}
cout << "\nTest the result from GPU\n" << endl;
printMatrix(resultMatrix, rows, cols);
}
/* Free memory */
delete[] inverseMatrixGPU;
return 0;
}
|
3,311 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
//implement one grid with 4 blocks and 256 threads in total, 8x8 threads for each block
__global__ void print_threadIds()
{
printf("blockIdx,x : %d, blockIdx.y : %d, blockIdx.z : %d, blockDim.x : %d, blockDim.y : %d, blockDim.z : %d gridDim.x : %d, gridDim.y : %d, gridDim.z : %d \n",blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z);
}
__global__ void unique_idx_calc_threadIdx(int * input)
{
int tid = threadIdx.x;
printf("threadIdx : %d, value : %d\n", tid, input[tid]);
}
int main()
{
//define number of threads for each dimension
int array_size = 8;
int array_byte_size = sizeof(int) * array_size;
int cpu_data[] = {23,9,4,53,65,12,1,33};
//printout data from traditional cpu memory
for(int i=0;i<array_size;i++){
printf("the %d th element is: %d\n", i, cpu_data[i]);
}
printf("\n\n");
//gpu data copied from cpu memory
int *gpu_data;
cudaMalloc((void**)&gpu_data, array_byte_size);
cudaMemcpy(gpu_data, cpu_data, array_byte_size, cudaMemcpyHostToDevice);
//one thread block which has 8 threads
dim3 block(8);
dim3 grid(1);
//printout thread id and each element from one array by using gpu
unique_idx_calc_threadIdx <<< grid, block >>> (gpu_data);
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
|
3,312 | // CUDA runtime
#include <cuda_runtime.h>
#include <stdio.h>
// Helper functions and utilities to work with CUDA
// #include <helper_functions.h>
/**********************************************
* Check whether we read back the same input
* The double check is just for debug purposes.
* We can comment it out when benchmarking the time.
**********************************************/
#define GPU_DEBUG
/*
Define all constant variavle below with a REASONABLE name
*/
#define out_channel_num 6 // number of feature maps
#define out_y_dim 358 // height of output map
#define out_x_dim 638 // width of output map
#define in_y_dim 720 // height of input map
#define in_x_dim 1280 // width of output map
#define conv_window_y 6 // height of convolution window
#define conv_window_x 6 // width of convolution window
#define filter_size (conv_window_y * conv_window_x) // size of convolution window
#define stride 2 // stride of layer
#define init_bias_thread_x 16 // thread x dimension of init_bias
#define init_bias_thread_y 16 // thread y dimension of init_bias
#define init_bias_thread_z 2 // thread z dimension of init_bias
#define feature_maps_thread_x 8 // thread x dimension of feature_maps
#define feature_maps_thread_y 8 // thread y dimension of feature_maps
#define feature_maps_thread_z 8 // thread z dimension of feature_maps
#define sigmoid_thread_x 14 // thread x dimension of sigmoid
#define sigmoid_thread_y 14 // thread y dimension of sigmoid
#define sigmoid_thread_z 2 // thread z dimension of sigmoid
/******************************************
* Device function declaration
*****************************************/
__global__ void layer1_init_bias(float* d_y, float* d_bias);
__global__ void layer1_feature_maps(float* d_y, unsigned char* d_in_layer, float* d_weight);
__global__ void layer1_sigmoid(float* d_y, unsigned char* d_out_layer);
/************************************************************************************
* Input : input image, pointer to output result, coefficients bias and weights
* Output : neuron outputs of the feature maps represented as an image
* Procedure: perform feed forward computation through the feature extraction layers
*******************************************************************************/
void cuda_convolution_layer1(unsigned char in_layer[], unsigned char out_layer[],
const float bias[], const float weight[]) {
/*********************************
* allocate device memory on GPU
*********************************/
unsigned int size_y = out_channel_num*out_y_dim*out_x_dim;
unsigned int mem_size_y = sizeof(float) * size_y;
float *d_y;
unsigned int size_bias = out_channel_num;
unsigned int mem_size_bias = sizeof(float) * size_bias;
float *d_bias;
unsigned int size_weight = out_channel_num*filter_size;
unsigned int mem_size_weight = sizeof(float) * size_weight;
float *d_weight;
unsigned int size_in_layer = in_y_dim*in_x_dim;
unsigned int mem_size_in_layer = sizeof(unsigned char) * size_in_layer;
unsigned char *d_in_layer;
unsigned int size_out_layer = out_channel_num*out_y_dim*out_x_dim;
unsigned int mem_size_out_layer = sizeof(unsigned char) * size_out_layer;
unsigned char *d_out_layer;
cudaError_t error;
/********************************
* Allocate device memory on GPU.
* Check the first cudaMalloc error,
* in case GPU is busy.
********************************/
error = cudaMalloc((void **) &d_y, mem_size_y);
/* Check the error code of the first CUDA API call */
if (error != cudaSuccess){
printf("cudaMalloc returned error code %d, line(%d)\n", error, __LINE__);
printf("CUDA error: %s\n", cudaGetErrorString(error));
}else{
printf("cudaMalloc success.\n");
}
/* if no error for the first cudaMalloc, continue other cudaMalloc */
error = cudaMalloc((void **) &d_in_layer, mem_size_in_layer);
error = cudaMalloc((void **) &d_bias, mem_size_bias);
error = cudaMalloc((void **) &d_weight, mem_size_weight);
error = cudaMalloc((void **) &d_out_layer, mem_size_out_layer);
/*********************************************
* copy data from host (CPU) to device (GPU)
********************************************/
error = cudaMemcpy(d_in_layer, in_layer, mem_size_in_layer, cudaMemcpyHostToDevice);
error = cudaMemcpy(d_bias, bias, mem_size_bias, cudaMemcpyHostToDevice);
error = cudaMemcpy(d_weight, weight, mem_size_weight, cudaMemcpyHostToDevice);
/* Synchronize all the cudaMemcpy API before doing the actual computation */
cudaDeviceSynchronize();
/*********************************************
* Layer 1, Step 1:
* init values of feature maps at bias value
********************************************/
/* 16*16*z(choose the correct z dimension) threads per block */
/* NOTE: threads per block limit is 1024 for K80 */
/* NOTE: if you use another GPU, check the deviceQuery */
dim3 threadsPerBlock = dim3(init_bias_thread_x, init_bias_thread_y, init_bias_thread_z);
dim3 blocksPerGrid = dim3((out_x_dim + init_bias_thread_x - 1) / init_bias_thread_x,
(out_y_dim + init_bias_thread_y - 1) / init_bias_thread_y,
(out_channel_num + init_bias_thread_z - 1) / init_bias_thread_z);
layer1_init_bias<<<blocksPerGrid, threadsPerBlock>>>(d_y, d_bias);
/* Just in case, put a sync here */
cudaDeviceSynchronize();
/*********************************************
* Layer 1, Step 2:
* loop over output feature maps
********************************************/
/* 8*8*z(choose the correct z dimension) threads per block */
/***********************************************
* The layer size is not diviadable by 8 either.
* Mask out extra threads in the kernel.
**********************************************/
threadsPerBlock = dim3(feature_maps_thread_x, feature_maps_thread_y, feature_maps_thread_z);
blocksPerGrid = dim3((out_x_dim + feature_maps_thread_x - 1) / feature_maps_thread_x,
(out_y_dim + feature_maps_thread_y - 1) / feature_maps_thread_y,
(out_channel_num + feature_maps_thread_z - 1) / feature_maps_thread_z);
// record time to execute layer1_feature_maps
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
layer1_feature_maps<<<blocksPerGrid, threadsPerBlock>>>(d_y, d_in_layer, d_weight);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Time to execute layer1_feature_maps: %3.1f ms \n", time);
/* Just in case, put a sync here */
cudaDeviceSynchronize();
/********************************************
14*14*z(choose the correct z dimension) threads per block
********************************************
* Layer 1, Step 3:
* sigmoid activation function
********************************************/
threadsPerBlock = dim3(sigmoid_thread_x, sigmoid_thread_y, sigmoid_thread_z);
blocksPerGrid = dim3((out_x_dim + sigmoid_thread_x - 1) / sigmoid_thread_x,
(out_y_dim + sigmoid_thread_y - 1) / sigmoid_thread_y,
(out_channel_num + sigmoid_thread_z - 1) / sigmoid_thread_z);
layer1_sigmoid<<<blocksPerGrid, threadsPerBlock>>>(d_y, d_out_layer);
/* Just in case, put a sync here */
cudaDeviceSynchronize();
/* Read back the output from device (GPU) to host (CPU) */
error = cudaMemcpy(out_layer, d_out_layer, mem_size_out_layer, cudaMemcpyDeviceToHost);
/* Just in case, put a sync here */
cudaDeviceSynchronize();
/* release device memory */
cudaFree(d_y);
cudaFree(d_in_layer);
cudaFree(d_bias);
cudaFree(d_weight);
cudaFree(d_out_layer);
}
/*********************************************
* GPU kernel
* Layer 1, Step 1:
* init values of feature maps at bias value
* 16*16*z(choose the correct z dimension) threads per block
********************************************/
__global__ void layer1_init_bias(float* d_y, float* d_bias) {
int col = threadIdx.x + blockIdx.x * init_bias_thread_x;
int row = threadIdx.y + blockIdx.y * init_bias_thread_y;
int depth = threadIdx.z + blockIdx.z * init_bias_thread_z;
if (row < out_y_dim && col < out_x_dim && depth < out_channel_num) // prevent out of bound access
d_y[depth * out_y_dim * out_x_dim + row * out_x_dim + col] = d_bias[depth]; // load the bias
}
/*********************************************
* GPU kernel
* Layer 1, Step 2:
* loop over output feature maps
* 8*8*z(choose the correct z dimension) threads per block
********************************************/
__global__ void layer1_feature_maps(float* d_y, unsigned char* d_in_layer, float* d_weight) {
int col = threadIdx.x + blockIdx.x * feature_maps_thread_x;
int row = threadIdx.y + blockIdx.y * feature_maps_thread_y;
int depth = threadIdx.z + blockIdx.z * feature_maps_thread_z;
// cache d_in_layer
__shared__ unsigned char in_layer[feature_maps_thread_y * stride + conv_window_y][feature_maps_thread_x * stride + conv_window_x];
// process [0, feature_maps_thread_y * stride - 1][0, feature_maps_thread_x * stride + conv_window_x - 1]
for (int i = 0; i < stride; i++)
in_layer[threadIdx.y * stride + i][threadIdx.x * stride + depth] =
d_in_layer[(row * stride + i) * in_x_dim + col * stride + depth];
// process [feature_maps_thread_y * stride, feature_maps_thread_y * stride + conv_window_y - 1][0, feature_maps_thread_x * stride - 1]
if (threadIdx.y == 0 && depth < conv_window_y)
for (int i = 0; i < stride; i++) {
in_layer[feature_maps_thread_y * stride + depth][threadIdx.x * stride + i] =
d_in_layer[((row + feature_maps_thread_y) * stride + depth) * in_x_dim + col * stride + i];
}
// process [feature_maps_thread_y * stride, feature_maps_thread_y * stride + conv_window_y - 1][feature_maps_thread_x * stride, feature_maps_thread_x * stride + conv_window_x - 1]
if (threadIdx.x < conv_window_x && threadIdx.y == 0 && depth < conv_window_y)
in_layer[feature_maps_thread_y * stride + depth][feature_maps_thread_x * stride + threadIdx.x] =
d_in_layer[((row + feature_maps_thread_y) * stride + depth) * in_x_dim + (col - threadIdx.x + feature_maps_thread_x) * stride + threadIdx.x];
// cache d_weight
__shared__ float weight[out_channel_num][filter_size];
if (threadIdx.y < out_y_dim && threadIdx.x < out_x_dim && depth < out_channel_num)
weight[depth][threadIdx.y * conv_window_x + threadIdx.x] = d_weight[depth * filter_size + threadIdx.y * conv_window_x + threadIdx.x];
__syncthreads();
if (row < out_y_dim && col < out_x_dim && depth < out_channel_num) // prevent out of bound access
for (int k = 0; k < conv_window_y; k++) // loop over convolution window (row)
for (int l = 0; l < conv_window_x; l++) // loop over convolution window (column)
// perform convolution over a window
d_y[depth * out_y_dim * out_x_dim + row * out_x_dim + col] +=
in_layer[threadIdx.y * stride + k][threadIdx.x * stride + l] * weight[depth][k * conv_window_x + l];
}
/*********************************************
* GPU kernel
* Layer 1, Step 3:
* sigmoid activation function
* 14*14*z(choose the correct z dimension) threads per block
********************************************/
__global__ void layer1_sigmoid(float* d_y, unsigned char* d_out_layer){
int col = threadIdx.x + blockIdx.x * sigmoid_thread_x;
int row = threadIdx.y + blockIdx.y * sigmoid_thread_y;
int depth = threadIdx.z + blockIdx.z * sigmoid_thread_z;
int idx = depth * out_y_dim * out_x_dim + row * out_x_dim + col; // index in the grid
if (row < out_y_dim && col < out_x_dim && depth < out_channel_num)
d_out_layer[idx] = (unsigned char)(255.999f / (1 + expf(-d_y[idx] / 256))); // apply the sigmoid function to the result
} |
3,313 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#define DataSize 1024
__global__ void Add(unsigned char *Da,int high,int width)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int bn = blockDim.x;
int gn = gridDim.x;
int id = bx*bn+tx;
for(int i=id;i<(high*width);i+=(bn*gn))
Da[i] = 255 - Da[i];
}
int main()
{
FILE *fp = NULL;
unsigned int high, width, offset;
unsigned char *head;
unsigned char *img;
high = 0;
width = 0;
offset = 0;
fp = fopen("lena.bmp","rb");
fseek(fp, 10, SEEK_SET);
fread(&offset, sizeof(unsigned int), 1, fp);
fseek(fp, 18, SEEK_SET);
fread(&width, sizeof(unsigned int), 1, fp);
fseek(fp, 22, SEEK_SET);
fread(&high, sizeof(unsigned int), 1, fp);
img = (unsigned char*)malloc(sizeof(unsigned char)*(width*high));
fseek(fp, offset, SEEK_SET);
fread(img, sizeof(char), (width*high), fp);
head =(unsigned char*)malloc(sizeof(unsigned int)*(offset));
fseek(fp, 0, SEEK_SET);
fread(head, sizeof(unsigned char), offset, fp);
dim3 block(1024, 1, 1);
dim3 grid(2, 1, 1);
unsigned char *Da;
cudaMalloc((void**)&Da, (sizeof(unsigned char)*(width*high)));
cudaMemcpy(Da, img, (sizeof(unsigned char)*(width*high)), cudaMemcpyHostToDevice);
Add <<< grid, block >>> (Da,high,width);
cudaThreadSynchronize();
cudaMemcpy(img, Da, (sizeof(unsigned char)*(width*high)), cudaMemcpyDeviceToHost);
fclose(fp);
fp = fopen("lena2.bmp","wb+");
fwrite(head, sizeof(unsigned char), offset, fp);
fwrite(img, sizeof(unsigned char), (width*high), fp);
fclose(fp);
}
|
3,314 | #include <math.h>
#define EPS2 0.000001
__global__ void update(float4 *pos, float3 *vel, float4 *pos_, float3 *vel_, int n, float timedelta)
{
float3 acc;
int id = threadIdx.x + blockDim.x*blockIdx.x;
for (int sub_id = 0; sub_id < n; sub_id ++)
{
float3 r;
r.x = pos_[sub_id].x - pos_[id].x;
r.y = pos_[sub_id].y - pos_[id].y;
r.z = pos_[sub_id].z - pos_[id].z;
float distSqr = r.x * r.x + r.y * r.y + r.z * r.z + EPS2;
float distSixth = distSqr * distSqr * distSqr;
float invDistCube = 1.0f/sqrtf(distSixth);
float s = pos_[id].w * invDistCube;
acc.x += r.x * s;
acc.y += r.y * s;
acc.z += r.z * s;
}
vel[id].x = vel_[id].x + timedelta * acc.x;
vel[id].y = vel_[id].y + timedelta * acc.y;
vel[id].z = vel_[id].z + timedelta * acc.z;
pos[id].x = pos_[id].x + timedelta * vel[id].x;
pos[id].y = pos_[id].y + timedelta * vel[id].y;
pos[id].z = pos_[id].z + timedelta * vel[id].z;
pos[id].w = pos_[id].w;
} |
3,315 | /**
* Global Memory (Linear Array) using Unified Memory
*/
#include <stdio.h>
#include <stdlib.h>
void check_cuda_errors()
{
cudaError_t rc;
rc = cudaGetLastError();
if (rc != cudaSuccess)
{
printf("Last CUDA error %s\n", cudaGetErrorString(rc));
}
}
__global__ void incrementor(int* numbers)
{
numbers[threadIdx.x]++;
}
int main(int argc, char **argv)
{
int *device_mem;
int i, num_elements;
// Ask user for number of elements
printf("How many elements to increment? ");
scanf("%d", &num_elements);
// Seed our RNG
srand(0);
// "Malloc" device memory
cudaMallocManaged((void **)&device_mem, num_elements * sizeof(int));
printf("Incrementor input:\n");
for (i = 0; i < num_elements; i++) {
device_mem[i] = rand() % 100;
printf("start[%d] = %d\n", i, device_mem[i]);
}
incrementor<<<1, num_elements>>>(device_mem);
check_cuda_errors();
// Ensure that we don't proceed till we get the results!
cudaDeviceSynchronize();
printf("Incrementor results:\n");
for (i = 0; i < num_elements; i++) {
printf("result[%d] = %d\n", i, device_mem[i]);
}
// Free both host and device memory
cudaFree(device_mem);
return 0;
}
|
3,316 | #include <stdio.h>
#include <assert.h>
#include <cuda.h>
void DisplayProperties( cudaDeviceProp* pDeviceProp )
{
if( !pDeviceProp )
return;
printf( "\nDevice Name \t - %s ", pDeviceProp->name );
printf( "\n**************************************");
printf( "\nTotal Global Memory\t\t -%d KB", pDeviceProp->totalGlobalMem/1024 );
printf( "\nShared memory available per block \t - %d KB", pDeviceProp->sharedMemPerBlock/1024 );
printf( "\nNumber of registers per thread block \t - %d", pDeviceProp->regsPerBlock );
printf( "\nWarp size in threads \t - %d", pDeviceProp->warpSize );
printf( "\nMemory Pitch \t - %d bytes", pDeviceProp->memPitch );
printf( "\nMaximum threads per block \t - %d", pDeviceProp->maxThreadsPerBlock );
printf( "\nMaximum Thread Dimension (block) \t - %d %d %d", pDeviceProp->maxThreadsDim[0], pDeviceProp->maxThreadsDim[1], pDeviceProp->maxThreadsDim[2] );
printf( "\nMaximum Thread Dimension (grid) \t - %d %d %d", pDeviceProp->maxGridSize[0], pDeviceProp->maxGridSize[1], pDeviceProp->maxGridSize[2] );
printf( "\nTotal constant memory \t - %d bytes", pDeviceProp->totalConstMem );
printf( "\nCUDA ver \t - %d.%d", pDeviceProp->major, pDeviceProp->minor );
printf( "\nClock rate \t - %d KHz", pDeviceProp->clockRate );
printf( "\nTexture Alignment \t - %d bytes", pDeviceProp->textureAlignment );
printf( "\nDevice Overlap \t - %s", pDeviceProp-> deviceOverlap?"Allowed":"Not Allowed" );
printf( "\nNumber of Multi processors \t - %d", pDeviceProp->multiProcessorCount );
}
int main(void)
{
cudaDeviceProp deviceProp;
int nDevCount = 0;
cudaGetDeviceCount( &nDevCount );
printf( "Total Device found: %d", nDevCount );
for (int nDeviceIdx = 0; nDeviceIdx < nDevCount; ++nDeviceIdx )
{
memset( &deviceProp, 0, sizeof(deviceProp));
if( cudaSuccess == cudaGetDeviceProperties(&deviceProp, nDeviceIdx))
DisplayProperties( &deviceProp );
else
printf( "\n%s", cudaGetErrorString(cudaGetLastError()));
}
}
|
3,317 | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
/*
* See section "B. 19 Launch Bounds" from "CUDA C Programming Guide" for more
* information about the optimal launch bounds, which differ across the major
* architecture revisions
*/
#define THREADS_PER_BLOCK_2D 16
/* Simple utility function to check for CUDA runtime errors */
void checkCUDAError(const char* msg);
/* Host function that transposes a matrix */
void transpose_cpu(const char* mat_in, char* mat_out, unsigned int rows,
unsigned int cols);
/* Kernel code */
__global__ void transpose_gpu(const char* mat_in, char* mat_out,
unsigned int rows, unsigned int cols) {
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx < cols && idy < rows) {
unsigned int pos = idy * cols + idx;
unsigned int trans_pos = idx * rows + idy;
mat_out[trans_pos] = mat_in[pos];
}
}
int main(int argc, char** argv) {
/* Process command-line arguments */
if (argc != 3) {
fprintf(stderr, "Usage: %s rows columns\n", argv[0]);
fprintf(stderr, " rows is the number of rows of the input matrix\n");
fprintf(stderr, " columns is the number of columns of the input matrix\n");
return EXIT_FAILURE;
}
cudaEvent_t start, stop;
float elapsed_time_ms;
unsigned int rows = atoi(argv[1]);
unsigned int cols = atoi(argv[2]);
/* Pointer for host memory */
char *h_mat_in, *h_mat_out;
size_t mat_size = rows * cols * sizeof(char);
/* Pointer for device memory */
char *dev_mat_in, *dev_mat_out;
/* Allocate host and device memory */
h_mat_in = (char *) malloc(mat_size);
h_mat_out = (char *) malloc(mat_size);
cudaMalloc(&dev_mat_in, mat_size);
cudaMalloc(&dev_mat_out, mat_size);
/* Check for any CUDA errors */
checkCUDAError("cudaMalloc");
/* Fixed seed for illustration */
srand(2047);
/* Initialize host memory */
for (unsigned int i = 0; i < rows; ++i) {
for (unsigned int j = 0; j < cols; ++j) {
h_mat_in[i * cols + j] = rand() % (rows * cols);
//printf("%d\t", h_mat_in[i * cols + j]);
}
//printf("\n");
}
cudaEventCreate(&start);
cudaEventCreate(&stop);
/*------------------------ COMPUTATION ON CPU ----------------------------*/
cudaEventRecord(start, 0);
// cudaEventSynchronize(start); needed?
transpose_cpu(h_mat_in, h_mat_out, rows, cols);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time_ms, start, stop);
/* Verify the correctness of transposed matrix computed on GPU */
for (unsigned int i = 0; i < cols; ++i) {
for (unsigned int j = 0; j < rows; ++j) {
assert(h_mat_out[i * rows + j] == h_mat_in[j * cols + i]);
}
}
printf("Time to transpose a matrix of %dx%d on CPU: %f ms.\n\n", rows, cols,
elapsed_time_ms);
/*------------------------ COMPUTATION ON GPU ----------------------------*/
/* Host to device memory copy */
cudaMemcpy(dev_mat_in, h_mat_in, mat_size, cudaMemcpyHostToDevice);
/* Check for any CUDA errors */
checkCUDAError("cudaMemcpy");
/* Set grid and block dimensions properly */
unsigned int grid_rows = (rows + THREADS_PER_BLOCK_2D - 1) / THREADS_PER_BLOCK_2D;
unsigned int grid_cols = (cols + THREADS_PER_BLOCK_2D - 1) / THREADS_PER_BLOCK_2D;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(THREADS_PER_BLOCK_2D, THREADS_PER_BLOCK_2D);
cudaEventRecord(start, 0);
// cudaEventSynchronize(start); needed?
/* Launch kernel */
transpose_gpu<<<dimGrid, dimBlock>>>(dev_mat_in, dev_mat_out, rows, cols);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time_ms, start, stop);
/* Check for any CUDA errors */
checkCUDAError("kernel invocation");
/* device to host copy */
cudaMemcpy(h_mat_out, dev_mat_out, mat_size, cudaMemcpyDeviceToHost);
/* Check for any CUDA errors */
checkCUDAError("cudaMemcpy");
/* Verify the correctness of transposed matrix computed on GPU */
for (unsigned int i = 0; i < cols; ++i) {
for (unsigned int j = 0; j < rows; ++j) {
assert(h_mat_out[i * rows + j] == h_mat_in[j * cols + i]);
}
}
printf("Time to transpose a matrix of %dx%d on GPU: %f ms.\n\n", rows, cols,
elapsed_time_ms);
/* Output the transposed matrix
for (unsigned int i = 0; i < cols; ++i) {
for (unsigned int j = 0; j < rows; ++j) {
printf("%d\t", (int) h_mat_out[i * rows + j]);
}
printf("\n");
}*/
/* Free host and device memory */
free(h_mat_in);
free(h_mat_out);
cudaFree(dev_mat_in);
cudaFree(dev_mat_out);
/* Check for any CUDA errors */
checkCUDAError("cudaFree");
}
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void transpose_cpu(const char* mat_in, char* mat_out, unsigned int rows,
unsigned int cols) {
for (unsigned int i = 0; i < cols; ++i) {
for (unsigned int j = 0; j < rows; ++j) {
mat_out[i * rows + j] = mat_in[j * cols + i];
}
}
}
|
3,318 | /*
* main.c
*
* Created on: 06/12/2017
* Author: roussian
*/
#include "HostManager.cuh"
#include <stdio.h>
int main(int argc, char *argv[])
{
// cudaSetDevice(0);
//Argumentos
if( argc < 5 ) {
printf( "\n Parametros incorretos.\n Uso: <top_K>, <blockSize>, <BlockRoundNumber>, <iGlobalNumberRound>,"
" <MergeNumberByBlock> <QueryType> onde: \n" );
printf( "\t <top_K> - quantidade de documentos retornados (precisa ser multiplo do blockSize).\n" );
printf( "\t <blockSize> - tamanho do bloco.\n" );
printf( "\t <BlockRoundNumber> - numero de partes continuas que cada bloco ira processar.\n" );
printf( "\t <GlobalNumberRound> - numero de partes nao continuas das listas invertidas que cada bloco ira processar.\n" );
printf( "\t <MergeNumberByBlock> - numero de merge que cada bloco irá executar.\n" );
printf( "\t <QueryType> (Optional) - [0] OR Query --- [1] AND Query.\n" );
return 0;
}
//Quantidade de postings em cada lista em função do tamanho do bloco que cada bloco irá processar
int iTopk = atoi( argv[1] );
int iBlockSize = atoi( argv[2] );
int iBlockNumberRound = atoi( argv[3] );
int iGlobalNumberRound = atoi( argv[4] );
int iMergeNumberByBlock = atoi( argv[5] );
int iQueryType = 0;
if(argc == 7)
iQueryType = atoi( argv[6] );
// #ifdef BATCH
// queryBatchProcessingHost_Mix(iTopk, iBlockSize, iBlockNumberRound, iGlobalNumberRound, iMergeNumberByBlock, iQueryType);
// #else
querySingleProcessingHost(iTopk, iBlockSize, iBlockNumberRound, iGlobalNumberRound, iMergeNumberByBlock, iQueryType, 1);
// #endif
// queryBatchProcessingHost_ByBlock(iTopk, iBlockSize, iBlockNumberRound,
// iGlobalNumberRound, iMergeNumberByBlock, iQueryType,500);
exit(EXIT_SUCCESS);
}
|
3,319 | #include <cmath>
__global__ void conditional(double* __restrict__ out,
double const* __restrict__ in,
double const* __restrict__ sgn) {
int i = threadIdx.x;
double helicity = sgn[i] > 0 ? 1 : -1;
out[i] = in[i] * helicity;
}
|
3,320 | #include "NeuralNetGPUFunctions.cuh"
__device__ double activationFunctionHidden(double x)
{
// Relu
return fmax(0.0, x);
}
__device__ double activationFunctionDerivativeHidden(double x)
{
return x >= 0.0 ? 1.0 : 0.0;
}
__device__ double activationFunctionOutput(double x)
{
// Sigmoid
// As expected, exp() gives slightly different
// results when comparing CUDA exp() and std::exp()
return 1.0 / (1.0 + exp(-x));
}
__device__ double activationFunctionDerivativeOutput(double x)
{
double s = activationFunctionOutput(x);
return s * (1.0 - s);
}
#define MAX_BLOCKING_SIZE 1024
__global__ void cudaForwardProp(
double* neuronOutputs,
double* neuronWeights,
int* neuronsPerLayer,
int numLayers
)
{
__shared__ double shared_lastLayerOutputs[MAX_BLOCKING_SIZE];
int id = blockIdx.x * blockDim.x + threadIdx.x;
int layerIndexStride = 0;
int lastLayerIndexStride = 0;
int lastLayerWeightStride = 0;
// Go through each layer
for (int l = 1; l < numLayers; ++l)
{
layerIndexStride += neuronsPerLayer[l - 1];
// Load last layer output values
// into shared memory
if (id < neuronsPerLayer[l - 1])
{
shared_lastLayerOutputs[id] =
neuronOutputs[lastLayerIndexStride + id];
}
__syncthreads();
// Don't calculate output for bias neurons
if (id < neuronsPerLayer[l] - 1)
{
neuronOutputs[layerIndexStride + id] = 0;
// Go through each neuron from the last layer
for (int n = 0; n < neuronsPerLayer[l - 1]; ++n)
{
double outVal = shared_lastLayerOutputs[n];
double weightVal =
neuronWeights[
lastLayerWeightStride +
(neuronsPerLayer[l] - 1) * n + // Ignore bias neuron
id
];
neuronOutputs[layerIndexStride + id] += outVal * weightVal;
}
// Activation function for hidden layers
if (l < numLayers - 1)
{
neuronOutputs[layerIndexStride + id] =
activationFunctionHidden(neuronOutputs[layerIndexStride + id]);
}
// Activation function for output layer
// (Let the CPU do it to keep precision,
// only takes <100 ms for 5000 training sets)
/*else
{
neuronOutputs[layerIndexStride + id] =
activationFunctionOutput(neuronOutputs[layerIndexStride + id]);
}*/
}
// Bias neuron
else if (id == neuronsPerLayer[l] - 1)
{
neuronOutputs[layerIndexStride + id] = 1.0;
}
lastLayerWeightStride += (neuronsPerLayer[l - 1]) * (neuronsPerLayer[l] - 1);
lastLayerIndexStride = layerIndexStride;
__syncthreads();
}
}
__global__ void cudaCalcGradients(
double* neuronOutputs,
double* neuronWeights,
double* neuronGradients,
int* neuronsPerLayer,
int numLayers
)
{
__shared__ double shared_nextLayerGradients[MAX_BLOCKING_SIZE];
int id = blockIdx.x * blockDim.x + threadIdx.x;
// ----- Calculate gradients in hidden layers -----
// Stride points to last hidden layer
int layerStride = 0;
int nextLayerStride = 0;
for (int i = 0; i < numLayers - 1 - 1; ++i)
layerStride += neuronsPerLayer[i];
nextLayerStride = layerStride + neuronsPerLayer[numLayers - 1 - 1];
// Stride points to last hidden layer weights
int weightStride = 0;
for (int i = 0; i < numLayers - 1 - 1; ++i)
{
// += <number of neurons> * <number of weights for each neuron>
weightStride += neuronsPerLayer[i] * (neuronsPerLayer[i + 1] - 1);
}
// Loop through each hidden layer, back to front,
// starting from the last hidden layer
for (int i = numLayers - 1 - 1; i > 0; --i)
{
// Load gradients into shared memory
if (id < neuronsPerLayer[i + 1] - 1)
{
shared_nextLayerGradients[id] = neuronGradients[nextLayerStride + id];
}
__syncthreads();
// Make sure this thread can work
if (id < neuronsPerLayer[i])
{
// Sum weight gradients
double swg = 0.0;
// Loop through each weight
for (int j = 0; j < neuronsPerLayer[i + 1] - 1; ++j)
{
// += <weight to next neuron> * <next neuron gradient>
swg +=
neuronWeights[weightStride + (neuronsPerLayer[i + 1] - 1) * id + j] *
shared_nextLayerGradients[j];
}
neuronGradients[layerStride + id] =
swg *
activationFunctionDerivativeHidden(neuronOutputs[layerStride + id]);
}
nextLayerStride = layerStride;
layerStride -= neuronsPerLayer[i - 1];
weightStride -= neuronsPerLayer[i - 1] * (neuronsPerLayer[i] - 1);
__syncthreads();
}
}
__global__ void cudaUpdateWeights(
double* neuronOutputs,
double* neuronWeights,
double* neuronDeltaWeights,
double* neuronGradients,
int* thisNeuronIndex,
int* nextNeuronIndex,
int numWeights,
float eta,
float alpha
)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < numWeights)
{
double oldDeltaWeight = neuronDeltaWeights[id];
double newDeltaWeight =
eta * neuronOutputs[thisNeuronIndex[id]] * neuronGradients[nextNeuronIndex[id]] +
alpha * oldDeltaWeight;
// Apply weight and delta weight
neuronDeltaWeights[id] = newDeltaWeight;
neuronWeights[id] += newDeltaWeight;
}
__syncthreads();
} |
3,321 | /* DATA_SIZE ̕_̐ωZ CPU łȂ */
/* - rev.201905 by Yoshiki NAGATANI */
#include <stdio.h>
#include <stdlib.h>
#define DATA_SIZE 1048576
/* xr̂ߓvZ REPEAT JԂ */
#define REPEAT 10000
/*-----------------------------------------------------------*/
/* ωZ R=A*B Ȃ(PRA) */
void MultiplyOnCPU(float* h_data_A, float* h_data_B, float* h_data_R) {
long i;
/* CPU ł̓f[^̐ for ܂킷 */
for (i = 0; i < DATA_SIZE; i++) {
h_data_R[i] = h_data_A[i] * h_data_B[i];
}
}
/*-----------------------------------------------------------*/
int main(void) {
int i;
printf("DATA_SIZE(%d)\n", DATA_SIZE);
float* h_data_A; /* Host(CPU) */
float* h_data_B; /* Host(CPU) */
float* h_data_R; /* Host(CPU) */
/* ̈̊mہiǐd̂߃G[`FbNȂ̂ŒӁj */
h_data_A = (float*)malloc(DATA_SIZE * sizeof(float));
h_data_B = (float*)malloc(DATA_SIZE * sizeof(float));
h_data_R = (float*)malloc(DATA_SIZE * sizeof(float));
/* f[^ */
for (i = 0; i < DATA_SIZE; i++) {
h_data_A[i] = (double)(rand()) / 32768.0;
h_data_B[i] = (double)(rand()) / 32768.0;
h_data_R[i] = 0.0;
}
/* ωZsixv̂ REPEAT JԂj */
printf("Start calculation on CPU for %d times...", REPEAT);
for (i = 0; i < REPEAT; i++) {
MultiplyOnCPU(h_data_A, h_data_B, h_data_R);
}
printf("done!!\n");
/* ʂ̕\(ʕ\̓sCŏƍŌ̃f[^\) */
printf("Results:\n");
printf(" %8d: %f\n", 0, h_data_R[0]);
printf(" %8d: %f\n", DATA_SIZE - 1, h_data_R[DATA_SIZE - 1]);
return 0;
}
|
3,322 | #include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void haversine(int n, float *x, float *y)
{
//int index = threadIdx.x;
//int stride = blockDim.x;
// for (int i = index; i < n; i += stride)
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
float R = 6378.137;
float toRad = 3.14159/180;
for (int i = index; i < n; i += stride) {
// y[i] = atan(sqrt(x[i])) * sin(sqrt(y[i])) *4;
float lon1 = x[i];
float lon2 = x[i];
float lat1 = y[i];
float lat2 = y[i];
lon1 = lon1 * toRad;
lon2 = lon2 * toRad;
lat1 = lat1 * toRad;
lat2 = lat2 * toRad;
float dlon = lon2 - lon1;
float dlat = lat2 - lat1;
double a = pow(sin(dlat / 2), 2) + (cos(lat1) * cos(lat2) * pow(sin(dlon / 2),2));
double d = 2 * atan2(sqrt(a), sqrt(1 - a)) * R;
x[i] = float(d);
}
}
int main(void)
{
int N = pow(2,30);
std::cout << "In: " << N << std::endl;
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
// int blockSize = 1024;
// int numBlocks = (N + blockSize - 1) / blockSize;
// haversine<<<numBlocks, blockSize>>>(N, x, y);
//add<<<1,1>>>(N, x, y);
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
std::cout << numBlocks << std::endl;
for (int z = 1; z < 1000; z++) { // Run 1000 calls to function to fill GPU for ~1 minute
haversine<<<numBlocks, blockSize>>>(N, x, y);
}
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
std::cout << "First: " << x[0] << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
} |
3,323 | #include <bits/stdc++.h>
#include <cuda.h>
#include <stdlib.h>
#define IFOR(v, s, e) for(int v = s; v < e; ++v)
#define UFOR(v, s, e) for(unsigned v = s; v < e; v++)
using namespace std;
class MatrixUtility
{
public:
void print1Dmat(double *arr, int m) {
IFOR(i, 0, m)
cout << arr[i] << " ";
cout << '\n';
}
void print2Dmat(double **arr, int m, int n) {
IFOR(i, 0, m)
{
IFOR(j, 0, n)
printf("%0.9f ", arr[i][j]);
cout << '\n';
}
cout << '\n';
}
void init_1D_mat(double *(&arr), int n) {
arr = (double *)malloc(n * sizeof(double));
}
void init_2D_mat(double **(&arr), int row, int col) {
arr = (double **)malloc(row * sizeof(double *));
IFOR(i, 0, row)
arr[i] = (double *)malloc(col * sizeof(double));
}
double **mat_add(double **(&A), double **(&B), int row, int col)
{
double **res = NULL;
init_2D_mat(res, row, col);
IFOR(i, 0, row)
IFOR(j, 0, col)
res[i][j] = A[i][j] + B[i][j];
return res;
}
double **mat_multiply(double **(&a), double **(&b), int r1, int c1, int r2, int c2) {
double **c = NULL;
init_2D_mat(c, r1, c2);
IFOR(i, 0, r1)
IFOR(j, 0, c2)
IFOR(k, 0, c1)
c[i][j] = c[i][j] + a[i][k] * b[k][j];
return c;
}
double *vector_add(double *(&a), double *(&b), int row) {
double *add = NULL;
init_1D_mat(add, row);
IFOR(i, 0, row)
add[i] = a[i] + b[i];
return add;
}
double **add_2D_mat_1D_mat(double **a, double *b, int r, int c) {
UFOR(i, 0, r)
UFOR(j, 0, c)
a[i][j] += b[j];
return a;
}
double **diff_2D_mat_1D_mat(double **a, double *b, int r, int c) {
UFOR(i, 0, r)
UFOR(j, 0, c)
a[i][j] -= b[i];
return a;
}
double **scalar_add_2D_mat(double **mat, int scalar, int r, int c) {
UFOR(i, 0, r)
UFOR(j, 0, c)
mat[i][j] += scalar;
return mat;
}
double **scalar_divide_2D_mat(double **mat, double scalar, int r, int c) {
UFOR(i, 0, r)
UFOR(j, 0, c)
mat[i][j] /= scalar;
return mat;
}
double **mat_transpose(double **a, int r, int c) {
double **trans;
init_2D_mat(trans, c, r);
IFOR(i, 0, r)
IFOR(j, 0, c)
trans[i][j] = a[j][i];
return trans;
}
double **scalar_multiply_2D_mat(double **mat, int scalar, int r, int c) {
UFOR(i, 0, r)
UFOR(j, 0, c)
mat[i][j] *= scalar;
return mat;
}
double *scalar_divide_1D_mat(double *mat, int scalar, int r) {
UFOR(i, 0, r)
mat[i] /= scalar;
return mat;
}
double *scalar_multiply_1D_mat(double *mat, int scalar, int r) {
UFOR(i, 0, r)
{
mat[i] *= scalar;
}
return mat;
}
double *sum_across_2nd_dim(double **a, int r, int c) {
double *sum;
init_1D_mat(sum, r);
UFOR(i, 0, r)
{
sum[i] = 0;
UFOR(j, 0, c)
sum[i] += a[i][j];
}
return sum;
}
double **element_wise_multiply(double **a, double **b, int r, int c) {
UFOR(i, 0, r)
UFOR(j, 0, c)
a[i][j] *= b[i][j];
return a;
}
};
|
3,324 | /*
Programming on Massively Parallel Systems
Fall 2018
Project # 3
Student: Patricia Wilthew
Compile: nvcc proj3.cu -o proj3
Usage: ./proj3 {#of_elements_in_array1} {#of_elements_in_array2}
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <assert.h>
#include <thrust/scan.h>
#define RAND_RANGE(N) ((double)rand()/((double)RAND_MAX + 1)*(N))
#define PARTITION_LENGTH 128
#define SECTION_LENGTH 4
cudaError_t err;
__device__ int matches = 0;
/*
Function: catch_error
Description: Prints any CUDA error to stdout.
*/
void catch_error(cudaError_t error)
{
if (error)
{
printf("Error: %s\n", cudaGetErrorString(err));
}
}
/*
Function: data_generator
Description: Uses Knuth Shuffle to generate integers in data.
*/
void data_generator(int* data, int count, int first, int step)
{
assert(data != NULL);
for (int i = 0; i < count; ++i)
{
data[i] = first + i * step;
}
srand(time(NULL));
for (int i = count-1; i>0; i--) //knuth shuffle
{
int j = RAND_RANGE(i);
int k_tmp = data[i];
data[i] = data[j];
data[j] = k_tmp;
}
}
/*
Function: bfe
Description: This function embeds PTX code of CUDA to extract bit
field from x.
Input:
start (uint): Starting bit position relative to the LSB.
nbits (uint): The bit field length.
Output: The extracted bit field as an unsigned integer.
*/
__device__ uint bfe(uint x, uint start, uint nbits)
{
uint bits;
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(bits) : "r"(x), "r"(start), "r"(nbits));
return bits;
}
/*
Function: histogram
Description: In order to efficiently partition the input in parallel,
first we need to compute a histogram of the radix values by
scanning the array of keys so that we know the number of keys
that should go to each partition.
*/
__global__ void histogram(int *array, int array_length, int *hist)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int radix;
if (idx < array_length)
{
radix = (int) bfe(array[idx], 0, 31-__builtin_clz(PARTITION_LENGTH));
atomicAdd(&hist[blockIdx.x * PARTITION_LENGTH + radix], 1);
}
}
/*
Function: organize_histogram
Description: Given a histogram `hist` of the form:
[w1, x1, y1, z1, w2, x2, y2, z2, w3, x3, y3, z3], create an
`organized` histogram of the form:
[w1, w2, w3, x1, x2, x3, y1, y2, y3, z1, z2, z3].
*/
__global__ void organize_histogram(int *hist, int hist_length, int *organized)
{
int number_of_sections = hist_length / PARTITION_LENGTH;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int value, section_offset;
if (idx < hist_length)
{
section_offset = idx/number_of_sections;
value = hist[
PARTITION_LENGTH * (idx % number_of_sections)
+ section_offset
];
organized[idx] = value;
}
}
/*
Function: Kogge_Stone_scan_kernel
Description: This is the first of the three kernels needed to implement the
hierarchical scan with three kernels. It uses Kogge Stone scan
algorithm to compute the prefix sum of each block of length
SECTION_LENGTH.
*/
__global__ void Kogge_Stone_block_scan_kernel(int *X, int *Y, int *S, int length)
{
__shared__ int XY[SECTION_LENGTH];
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length)
{
XY[threadIdx.x] = X[idx];
}
for (unsigned int stride = 1; stride < blockDim.x; stride *= 2)
{
__syncthreads();
if (threadIdx.x >= stride) XY[threadIdx.x] += XY[threadIdx.x-stride];
}
Y[idx] = XY[threadIdx.x];
__syncthreads();
// If this is the last thread of the block:
if (threadIdx.x == blockDim.x - 1)
{
S[blockIdx.x] = XY[SECTION_LENGTH - 1];
}
}
/*
Function: parallel_scan_kernel
Description: The second kernel of the hierarchical scan is simply one of the
three parallel scan kernels, which takes S as input and writes S as
output.
*/
__global__ void parallel_scan_kernel(int *X, int *Y, int length)
{
__shared__ int XY[SECTION_LENGTH];
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length)
{
XY[threadIdx.x] = X[idx];
}
for (unsigned int stride = 1; stride < blockDim.x; stride *= 2)
{
__syncthreads();
if (threadIdx.x >= stride) XY[threadIdx.x] += XY[threadIdx.x-stride];
}
Y[idx] = XY[threadIdx.x];
}
/*
Function: add_S_to_Y_kernel
Description: The third kernel of the hierarchical scan takes the S and Y
arrays as inputs and writes its output back into Y. Assuming that we
launch the kernel with SECTION_LENGTH threads in each block, each thread
adds one of the S elements (selected by blockIdx.x-1) to one Y element.
*/
__global__ void add_S_to_Y_kernel(int *Y, int *S, int length)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int j, sum;
if (blockIdx.x > 0 && idx < length)
{
Y[idx] += S[blockIdx.x - 1];
// Also add the prefix sum of previous sections.
if (blockIdx.x >= SECTION_LENGTH)
{
sum = 0;
for (j = SECTION_LENGTH - 1; j < blockIdx.x - 1; j += 4)
{
sum += S[j];
}
Y[idx] += sum;
}
}
}
/*
Function: shift_right
Description: Performs 1-shift right on original array and stores it in a new
array. This is performed to convert an inclusive prefix sum into
exclusive.
*/
__global__ void shift_right(int *original, int *shifted)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx == 0)
{
shifted[idx] = 0;
}
else
{
shifted[idx] = original[idx - 1];
}
}
/*
Function: reduce_prefix_sum
Description: Given a `prefix_sum` array of the form:
[0, 6, 15, 24, 30, 37, 44, 51, 60, 71, 79, 87, 90, 98, 106, 114],
Create a `reduced` array of PARTITION_LENGTH length, 4, of the form:
[0, 30, 60, 90] that contains the index in which each partition starts.
*/
__global__ void reduce_prefix_sum(int *prefix_sum, int *reduced, int *reduced_copy, int p_length)
{
int number;
number = prefix_sum[blockIdx.x * p_length];
reduced[blockIdx.x] = number;
reduced_copy[blockIdx.x] = number;
}
/*
Function: re_order
Description: Re-order original `array` by radix into a partitioned new
array, `ordered`.
*/
__global__ void re_order(int *prefix_sum, int *array, int *ordered, int length)
{
int offset;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int radix = (int) bfe(array[idx], 0, 31-__builtin_clz(PARTITION_LENGTH));
if (idx < length)
{
// `offset` will have the value of prefix_sum[radix]
// before it is incremented by one.
offset = atomicAdd(&prefix_sum[radix], 1);
ordered[offset] = array[idx];
}
}
/*
Function: probe
Description: With the reordered keys from both input arrays, we can
now perform the probe stage by only comparing the keys from the
corresponding partitions of both input arrays. This can be done
by nested-loop comparisons.
*/
__global__ void probe(int *r_delim, int *s_delim, int *r, int *s, int r_length, int s_length)
{
int r_start, r_end, s_start, s_end;
int i, j, count = 0;
// Get delimiters of current partition.
r_start = r_delim[blockIdx.x];
s_start = s_delim[blockIdx.x];
if (blockIdx.x == PARTITION_LENGTH - 1)
{
r_end = r_length - 1;
s_end = s_length - 1;
}
else
{
r_end = r_delim[blockIdx.x + 1] - 1;
s_end = s_delim[blockIdx.x + 1] - 1;
}
for (i=r_start; i <= r_end; i++)
{
for (j=s_start; j <= s_end; j++)
{
if (r[i] == s[j])
{
count += 1;
}
}
}
atomicAdd(&matches, count);
}
__global__ void print_matches()
{
printf("---> %d matches\n", matches);
}
int main(int argc, char const *argv[])
{
if (argc <= 2)
{
printf("Usage: ./proj3.out {#of_elements_in_array1} {#of_elements_in_array2}\n");
exit(1);
}
int r_length = atoi(argv[1]);
int s_length = atoi(argv[2]);
int r_size = sizeof(int)*r_length;
int s_size = sizeof(int)*s_length;
int r_hist_len, r_hist_size;
int s_hist_len, s_hist_size;
int blocks;
double threads = 1024.0;
float time, total_time = 0.0;
int *r_host, *r_hist, *r_prefix_sum, *r_ordered, *r_reduced_prefix,
*r_reduced_prefix_copy;
int *s_host, *s_hist, *s_prefix_sum, *s_ordered, *s_reduced_prefix,
*s_reduced_prefix_copy;
int *X, *Y, *S, *S2;
// Allocate arrays in host and device.
cudaMallocHost((void**)&r_host, r_size);
cudaMallocHost((void**)&s_host, s_size);
// Populate arrays.
data_generator(r_host, r_length, 0, 1);
data_generator(s_host, s_length, 0, 1);
// Recording variables.
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
/************************************
* *
* Order r into partitioned array *
* *
************************************/
blocks = ceil(r_length/threads);
r_hist_len = PARTITION_LENGTH * blocks;
r_hist_size = sizeof(int)*r_hist_len;
cudaMalloc((void**)&r_hist, r_hist_size); cudaMemset(r_hist, 0, r_hist_size);
cudaMalloc((void**)&X, r_hist_size);
cudaMalloc((void**)&Y, r_hist_size);
cudaMalloc((void**)&S, r_hist_size/SECTION_LENGTH);
cudaMalloc((void**)&S2, r_hist_size/SECTION_LENGTH);
cudaMalloc((void**)&r_prefix_sum, r_hist_size);
cudaMalloc((void**)&r_reduced_prefix, PARTITION_LENGTH*sizeof(int));
cudaMalloc((void**)&r_reduced_prefix_copy, PARTITION_LENGTH*sizeof(int));
cudaMalloc((void**)&r_ordered, r_size); cudaMemset(r_ordered, 0, r_size);
// Histogram for r: r_hist.
cudaEventRecord(start, 0);
histogram<<<blocks, threads>>>(r_host, r_length, r_hist);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
total_time += time;
// Organized histogram for r: X
cudaEventRecord(start, 0);
organize_histogram<<<ceil(r_hist_len/threads), threads>>>(
r_hist, r_hist_len, X);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
total_time += time;
// Hierarchical scan for r (Part I): Y and S.
cudaEventRecord(start, 0);
Kogge_Stone_block_scan_kernel<<<
ceil(r_hist_len/(SECTION_LENGTH)),
SECTION_LENGTH>>>(X, Y, S, r_hist_len);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
total_time += time;
// Hierarchical scan for r (Part II): S2.
cudaEventRecord(start, 0);
parallel_scan_kernel<<<ceil(r_hist_len/SECTION_LENGTH), SECTION_LENGTH>>>(
S, S2, r_hist_len/SECTION_LENGTH);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
total_time += time;
// Hierarchical scan for r (Part III): Y.
cudaEventRecord(start, 0);
add_S_to_Y_kernel<<<
ceil(r_hist_len/SECTION_LENGTH),
SECTION_LENGTH>>>(Y, S2, r_hist_len);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
total_time += time;
// Convert to exclusive prefix sum.
cudaEventRecord(start, 0);
shift_right<<<
ceil(r_hist_len/SECTION_LENGTH),
SECTION_LENGTH>>>(Y, r_prefix_sum);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
total_time += time;
// Obtain reduced prefix sum (of partition size)
cudaEventRecord(start, 0);
reduce_prefix_sum<<<PARTITION_LENGTH, 1>>>(
r_prefix_sum, r_reduced_prefix, r_reduced_prefix_copy, blocks);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
total_time += time;
// Reorder R array.
cudaEventRecord(start, 0);
re_order<<<ceil(r_length/threads), threads>>>(
r_reduced_prefix, r_host, r_ordered, r_length);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
total_time += time;
/************************************
* *
* Order s into partitioned array *
* *
************************************/
blocks = ceil(s_length/threads);
s_hist_len = PARTITION_LENGTH * blocks;
s_hist_size = sizeof(int)*s_hist_len;
cudaMalloc((void**)&s_hist, s_hist_size); cudaMemset(s_hist, 0, s_hist_size);
cudaMalloc((void**)&X, s_hist_size);
cudaMalloc((void**)&Y, s_hist_size);
cudaMalloc((void**)&S, s_hist_size/SECTION_LENGTH);
cudaMalloc((void**)&S2, s_hist_size/SECTION_LENGTH);
cudaMalloc((void**)&s_prefix_sum, s_hist_size);
cudaMalloc((void**)&s_reduced_prefix, PARTITION_LENGTH*sizeof(int));
cudaMalloc((void**)&s_reduced_prefix_copy, PARTITION_LENGTH*sizeof(int));
cudaMalloc((void**)&s_ordered, s_size); cudaMemset(s_ordered, 0, s_size);
// Histogram for s: s_hist.
cudaEventRecord(start, 0);
histogram<<<blocks, threads>>>(s_host, s_length, s_hist);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
total_time += time;
// Organized histogram for s: X
cudaEventRecord(start, 0);
organize_histogram<<<ceil(s_hist_len/threads), threads>>>(
s_hist, s_hist_len, X);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
total_time += time;
// Hierarchical scan for s (Part I): Y and S.
cudaEventRecord(start, 0);
Kogge_Stone_block_scan_kernel<<<
ceil(s_hist_len/(SECTION_LENGTH)),
SECTION_LENGTH>>>(X, Y, S, s_hist_len);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
total_time += time;
// Hierarchical scan for s (Part II): S2.
cudaEventRecord(start, 0);
parallel_scan_kernel<<<ceil(s_hist_len/SECTION_LENGTH), SECTION_LENGTH>>>(
S, S2, s_hist_len/SECTION_LENGTH);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
total_time += time;
// Hierarchical scan for s (Part III): Y.
cudaEventRecord(start, 0);
add_S_to_Y_kernel<<<
ceil(s_hist_len/SECTION_LENGTH),
SECTION_LENGTH>>>(Y, S2, s_hist_len);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
total_time += time;
// Convert to exclusive prefix sum.
cudaEventRecord(start, 0);
shift_right<<<
ceil(s_hist_len/SECTION_LENGTH),
SECTION_LENGTH>>>(Y, s_prefix_sum);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
total_time += time;
// Obtain reduced prefix sum (of partition size)
cudaEventRecord(start, 0);
reduce_prefix_sum<<<PARTITION_LENGTH, 1>>>(
s_prefix_sum, s_reduced_prefix, s_reduced_prefix_copy, blocks);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
total_time += time;
// Reorder s array.
cudaEventRecord(start, 0);
re_order<<<
ceil(s_length/threads),
threads>>>(s_reduced_prefix, s_host, s_ordered, s_length);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
total_time += time;
/************************************
* *
* Probing *
* *
************************************/
cudaEventRecord(start, 0);
probe<<<PARTITION_LENGTH, 1>>>(
r_reduced_prefix_copy, s_reduced_prefix_copy, r_ordered, s_ordered,
r_length, s_length);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
total_time += time;
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaDeviceSynchronize();
printf(
"******** Total Running Time of All Kernels = %.5f sec ********\n",
total_time/1000.0);
print_matches<<<1, 1>>>();
cudaDeviceSynchronize();
cudaFree(X);
cudaFree(Y);
cudaFree(S);
cudaFree(S2);
cudaFree(r_hist);
cudaFree(r_prefix_sum);
cudaFree(r_reduced_prefix);
cudaFree(r_ordered);
cudaFree(s_hist);
cudaFree(s_prefix_sum);
cudaFree(s_reduced_prefix);
cudaFree(s_ordered);
return 0;
}
|
3,325 | #include <stdio.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
// add() will execute on the device and will be called from the host
// as add runs on the device, we need to use pointers because a,b and c must point to device memory and we need to allocate memory on the GPU
__global__ void add(int *a, int *b, int *c)
{
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
int main(void)
{
// host copies of a, b, c
int a[] = { 1, 2, 3 };
int b[] = { 4, 5, 6 };
int c[] = { 0, 0, 0 };
// device copies of a, b, c
int *d_a, *d_b, *d_c;
int size = sizeof(int) * 3;
// we need to allocate memory on the GPU
// allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// copy inputs to device
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
// launch add() kernel on the GPU with a single thread
add<<<1,3>>>(d_a, d_b, d_c);
// copy result back to host
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
// don't forget to free the memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
for (size_t i = 0; i < 3; i++)
{
printf("Val is : %d \n", c[i]);
}
// check error
printf("CUDA error: %s\n", cudaGetErrorString(cudaGetLastError()));
return 0;
}
|
3,326 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float* var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24) {
if (comp > -1.5208E35f / (-1.8580E0f + fmodf((-1.2399E36f - fabsf(var_2 * var_3)), -1.4105E20f))) {
float tmp_1 = +0.0f / (+0.0f * +1.7331E35f);
comp += tmp_1 * var_4 * +1.5062E-44f - -1.0430E-6f + (var_5 + var_6);
comp += (+1.4624E1f - (var_7 - var_8));
if (comp >= var_9 / +1.7018E-37f * (+1.0035E35f * var_10 - var_11)) {
float tmp_2 = -1.8299E36f + var_12 + ceilf((-1.6404E-10f / (var_13 + (-1.1421E34f - var_14))));
comp += tmp_2 / +1.2436E-41f * var_15 + (var_16 - fabsf(var_17 + -1.9157E36f));
}
for (int i=0; i < var_1; ++i) {
var_18[i] = (+1.7607E35f * (+1.7467E36f * (var_19 / (+0.0f / +1.7233E-25f * var_20))));
comp = var_18[i] / fabsf(var_21 - atanf(var_22 - (+1.5075E-3f + var_23 * var_24)));
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float* tmp_19 = initPointer( atof(argv[19]) );
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25);
cudaDeviceSynchronize();
return 0;
}
|
3,327 | #include <iostream>
#include <stdio.h>
#include <time.h>
using namespace std;
#define PI 3.1415926535897932384
#define mu0 4*PI*1e-7
#define threadsPerBlock 1024
__global__ void init(double *rod_new, double imax, double ldr, double rlength, int rod_size){
int rem = rod_size%threadsPerBlock;
int divi = rod_size/threadsPerBlock;
int start, fin;
if(threadIdx.x<rem){
start = threadIdx.x*(divi+1);
fin = start + divi + 1;
}
else{
start = threadIdx.x*divi + rem;
fin = start + divi;
}
for(int i = start; i<fin; i++){
rod_new[i] = (1-(i*i*ldr*ldr/(3*rlength*rlength)))*3*mu0*imax*i*ldr/(4*PI*rlength*rlength);
}
}
__global__ void run(double *rod_new, double aug, long int maxSteps, int rod_size){
int rem = rod_size%threadsPerBlock;
int divi = rod_size/threadsPerBlock;
int start, fin;
if(threadIdx.x<rem){
start = threadIdx.x*(divi+1);
fin = start + divi + 1;
}
else{
start = threadIdx.x*divi + rem;
fin = start + divi;
}
long int steps = 0;
extern __shared__ double rod_new_s[];
extern __shared__ double rod_old_s[];
for(int i = start; i<fin; i++){
rod_new_s[i] = rod_new[i];
}
__syncthreads();
while(steps<maxSteps){
for(int i = start; i<fin; i++){
rod_old_s[i] = rod_new_s[i];
}
__syncthreads();
for(int i = start; i<fin; i++){
if(i==1)
rod_new_s[1]+= aug*(2*rod_old_s[2] - 4*rod_old_s[1]);
else if(i<(rod_size - 1) && i > 1)
rod_new_s[i] += aug*((1+(1/(2*i)))*rod_old_s[i+1] + (-2-(1/(i*i)))*rod_old_s[i] + (1-(1/(2*i)))*rod_old_s[i-1]);
}
steps++;
__syncthreads();
}
for(int i = start; i<fin; i++){
rod_new[i] = rod_new_s[i];
}
}
int main(){
FILE *myfile;
myfile = fopen("results.txt", "w");
double imax, rlength, eta, tstep, ldr, tottime;
int numseg;
printf("What is your I max? ");
scanf("%lf", &imax);
printf("What is the length of your rod? ");
scanf("%lf", &rlength);
printf("What is eta? ");
scanf("%lf", &eta);
printf("How many segments would you like? ");
scanf("%d", &numseg);
ldr = rlength/(numseg+1);
tstep = 0.25*ldr*ldr*mu0/eta;
printf("How long would you like to run? ");
scanf("%lf", &tottime);
double *h_rod, *d_rod;
size_t rod_size = (numseg + 2) * sizeof(double);
h_rod = (double*)malloc(rod_size);
cudaMalloc(&d_rod, rod_size);
init<<<1,threadsPerBlock>>>(d_rod, imax, ldr, rlength, numseg + 2);
int out;
//output r values
for(out = 0; out<numseg+1; out++){
fprintf( myfile, "%lf ", out*ldr );
}
fprintf( myfile, "%lf\n", out*ldr );
cudaMemcpy(h_rod, d_rod, rod_size, cudaMemcpyDeviceToHost);
for(out = 0; out<numseg+1; out++){
fprintf( myfile, "%lf ", *(h_rod+out) );
}
fprintf( myfile, "%lf\n", *(h_rod+out) );
double aug = eta*tstep/(mu0*ldr*ldr);
long int total_steps = tottime / tstep;
printf("\nSteps: %ld\n", total_steps);
clock_t begin, end;
double time_spent;
begin = clock();
//run
run<<<1,threadsPerBlock, (numseg+2)*sizeof(double)>>>(d_rod, aug, total_steps, numseg+2);
cudaDeviceSynchronize();
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
cudaMemcpy(h_rod, d_rod, rod_size, cudaMemcpyDeviceToHost);
for(out = 0; out<numseg+1; out++){
fprintf( myfile, "%lf ", *(h_rod+out) );
}
fprintf( myfile, "%lf\n", *(h_rod+out) );
fprintf(myfile, "STOP\n");
fclose(myfile);
free(h_rod);
cudaFree(d_rod);
cout << "\n------------------------------------\nExecution took: "<< time_spent << " sec\n";
return 0;
}
|
3,328 | #include "includes.h"
__global__ void sec_mean_cuda_(int nProposal, int C, float *inp, int *offsets, float *out){
for(int p_id = blockIdx.x; p_id < nProposal; p_id += gridDim.x){
int start = offsets[p_id];
int end = offsets[p_id + 1];
float count = (float)(end - start);
for(int plane = threadIdx.x; plane < C; plane += blockDim.x){
float mean = 0;
for(int i = start; i < end; i++){
mean += (inp[i * C + plane] / count);
}
out[p_id * C + plane] = mean;
}
}
} |
3,329 | #include "includes.h"
__global__ void hillisSteeleScanDevice(int *d_array , int numberOfElements, int *d_tmpArray,int moveIndex)
{
int index = threadIdx.x + blockDim.x * blockIdx.x;
if(index > numberOfElements)
{
return;
}
d_tmpArray[index] = d_array[index];
if(index - moveIndex >=0)
{
d_tmpArray[index] = d_tmpArray[index] +d_array[index - moveIndex];
}
} |
3,330 | #include "includes.h"
__global__ void dMSECost(float* predictions, float* target, float* dY, int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
dY[index] = 2 * (predictions[index] - target[index]);
}
} |
3,331 | #define BLOCK_SIZE_M 96
#define BLOCK_SIZE_N 64
#define ROUND_UP(n, d) (n + d - 1) / d
void setGrid(int n, dim3 &blockDim, dim3 &gridDim) {
// set your block dimensions and grid dimensions here
gridDim.x = ROUND_UP(n, BLOCK_SIZE_N);
gridDim.y = ROUND_UP(n, BLOCK_SIZE_M);
}
|
3,332 | #include<stdio.h>
#include<stdlib.h>
#include<unistd.h>
#include<time.h>
#include<cuda.h>
/*
__global__ void multiply(int *val, int *vec, int *result, int *cols, int *rowptr)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int sum=0;
int i;
for(i=0;i<cols[colidx];i++)
{
sum += vec[rowptr[tid]+i]*val[rowptr[tid]+i];
}
__syncthreads();
result[tid]=sum;
}
__global__ void printmatscreen(int* mat, int N)
{
int i;
for (i=0;i<N;i++)
{
printf("%d ",mat[i]);
}
printf("\n");
}
*/
int** Make2DIntArray(int arraySizeX, int arraySizeY)
{
int** theArray;
theArray = (int**) malloc(arraySizeX*sizeof(int*));
int i;
for (i = 0; i < arraySizeX; i++)
theArray[i] = (int*) malloc(arraySizeY*sizeof(int));
int j;
for (i=0;i<arraySizeX;i++)
{
for (j=0;j<arraySizeY;j++)
{
theArray[i][j]=0;
}
}
return theArray;
}
void init_zeros(int** matrix, int N)
{
int i,j;
for (i=0;i<N;i++)
{
for (j=0;j<N;j++)
{
matrix[i][j]=0;
}
}
}
void printmat(int** matrix, int N)
{
int i,j;
for (i=0;i<N;i++)
{
printf("\n");
for (j=0;j<N;j++)
{
printf("%d \t",matrix[i][j]);
}
}
printf("\n");
}
int* Make1DIntArray(int arraySizeX) {
int* theArray;
theArray = (int*)malloc(arraySizeX*sizeof(int));
int i;
for (i=0;i<arraySizeX;i++)
{
theArray[i]=0;
}
return theArray;
}
void freese(int sizeX, int sizeY, double** ptr)
{
int i;
for (i=0;i<sizeX;i++)
free(ptr[i]);
free(ptr);
}
void printtofile1D(int* matrix, int K, char* filename)
{
/*
Prints resultant matrix to a file
*/
FILE *fp;
fp=fopen(filename,"wt");
int i;
for (i=0;i<K;i++)
{
fprintf(fp, "%d\n", matrix[i]);
}
}
int main()
{
const int N=1000;
FILE *arr, *vec;
int i,j;
int** a=Make2DIntArray(N,N);
int* result=Make1DIntArray(N);
int* vecX=Make1DIntArray(N);
//int val[10],col[10],row[10];
int* resultsordered=Make1DIntArray(N);
// int sig=4,c=2;
// int* rowwidth=Make1DIntArray(N);
// int *dev_vec, *dev_scval, *dev_result, *dev_sccol, *dev_cols, *dev_rowptr;
arr=fopen("matrix1000.txt","r");
int k=0,cinrow=0;
//row[0]=0;
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
fscanf(arr,"%d",&a[i][j]);
if(a[i][j])
{
k++;
}
}
cinrow=0;
}
int Dsize=k;
int* val=Make1DIntArray(Dsize);
int* col=Make1DIntArray(Dsize);
int* row=Make1DIntArray(N+1);
printf("\n k = %d\n ", k);
//sleep(10);
k=0;
row[0]=0;
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
if(a[i][j])
{
val[k]=a[i][j];
col[k]=j;
cinrow++;
k++;
}
}
row[i+1]=row[i]+cinrow;
cinrow=0;
}
// sleep(5);
row[i]=k;
vec=fopen("vector1000.txt","r");
for (i=0;i<N;i++)
{
fscanf(vec,"%d",&vecX[i]);
}
/*printf("\n Vector is:\n");
for (i=0;i<N;i++)
{
printf("%d\n",vecX[i]);
}
//printing val, col and row
/*
printf("Val=");
for(i=0;i<Dsize;i++)
{
printf("%d\t",val[i]);
}
printf("\n");
printf("col=");
for(i=0;i<Dsize;i++)
{
printf("%d\t",col[i]);
}
printf("\n");
printf("row=");
for(i=0;i<k;i++)
{
printf("%d\t",row[i]);
}
*/
printf("\n");
/*Now the actual multiplication kernel*/
/*struct timeval start, end;
gettimeofday(&start, NULL);
*/
cudaEvent_t start_kernel, stop_kernel;
float time_kernel;
cudaEventCreate(&start_kernel);
cudaEventCreate(&stop_kernel);
cudaEventRecord(start_kernel,0);
for (i=0;i<N;i++)
{
for (j=row[i];j<row[i+1];j++)
{
result[i]+=val[j]*vecX[col[j]];
}
}
cudaEventRecord(stop_kernel,0);
/* gettimeofday(&end, NULL);
double delta = ((end.tv_sec - start.tv_sec) * 1000000u +
end.tv_usec - start.tv_usec) / 1.e6;
printf("\nTime spent=%f\n", delta);
*/ cudaEventSynchronize(stop_kernel);
// multiply<<<N,1>>>(val, vecX, result, col, row);
cudaEventElapsedTime(&time_kernel, start_kernel, stop_kernel);
printf("\nTime for kernel without data transfer = %f ms \n", time_kernel);
printtofile1D(result,N,"results.txt");
/* printf("\n Result is:\n");
for (i=0;i<N;i++)
{
printf("%d\n",result[i]);
}
*/
return 0;
}
|
3,333 | __global__ void grayscale(float4* imagem, int width, int height)
{
const int i = blockIdx.x * (blockDim.x * blockDim.y) + blockDim.x * threadIdx.y + threadIdx.x;
if(i < width * height)
{
float v = 0.3 * imagem[i].x + 0.6 * imagem[i].y + 0.1 * imagem[i].z;
imagem[i] = make_float4(v, v, v, 0);
}
}
extern "C" void cuda_grayscale(float* imagem, int width, int height, dim3 blocks, dim3 block_size)
{
grayscale <<< blocks, block_size >>> ((float4*)imagem, width, height);
}
|
3,334 | #include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void vecAdd(int n, float *a, float *b, float *c)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i+=stride)
c[i] = a[i] + b[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y, *z;
float msec;
cudaEvent_t start, stop;
// Allocate Unified Memory -- accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
cudaMallocManaged(&z, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaEventCreate(&start);
cudaEventCreate(&stop);
int blockSize = 256;
int numBlocks = 12; // good enough for P620
cudaEventRecord(start);
// Prefetch the data to the GPU
int device;
cudaGetDevice(&device);
cudaMemPrefetchAsync(x, N*sizeof(float), device, NULL);
cudaMemPrefetchAsync(y, N*sizeof(float), device, NULL);
vecAdd<<<numBlocks, blockSize>>>(N, x, y, z);
cudaEventRecord(stop);
// Wait for GPU to finish before accessing on host
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msec, start, stop);
printf("Kernel time: %f ms\n", msec);
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
cudaMemPrefetchAsync(z, N*sizeof(float), cudaCpuDeviceId, NULL);
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(z[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
cudaFree(z);
return 0;
}
|
3,335 | //
// cuda_update_live.cu
// LHON-Form
//
// Created by Pooya Merat in 2016.
//
extern "C" __global__ void cuda_update_live(int n_axons, float* tox, float* rate, float* detox, float* tox_prod, float on_death_tox, float k_rate_dead_axon, float k_detox_extra, float death_tox_thres,
unsigned int * axons_cent_pix, unsigned int* axons_inside_pix, unsigned int* axons_inside_pix_idx, unsigned int* axon_surr_rate, unsigned int* axon_surr_rate_idx,
bool* axon_is_alive, unsigned char* axon_mask, int* num_alive_axons, int* death_itr, int iteration)
{
int n = threadIdx.x + blockIdx.x * blockDim.x;
if (n < n_axons)
{
if (axon_is_alive[n] && tox[axons_cent_pix[n]] > death_tox_thres)
{ // Kill the axon
for (int p = axons_inside_pix_idx[n]; p < axons_inside_pix_idx[n + 1]; p++)
{
int idx = axons_inside_pix[p];
int idx4 = 4 * idx;
rate[idx4] = k_rate_dead_axon;
rate[idx4 + 1] = k_rate_dead_axon;
rate[idx4 + 2] = k_rate_dead_axon;
rate[idx4 + 3] = k_rate_dead_axon;
detox[idx] = k_detox_extra;
tox[idx] += on_death_tox;
tox_prod[idx] = 0;
axon_mask[idx] = 2; // dead
}
for (int p = axon_surr_rate_idx[n]; p < axon_surr_rate_idx[n + 1]; p++)
rate[axon_surr_rate[p]] = k_rate_dead_axon;
axon_is_alive[n] = false;
death_itr[n] = iteration;
atomicAdd(&num_alive_axons[0], -1);
}
}
}
|
3,336 | /*
**********************************************
* CS314 Principles of Programming Languages *
* Spring 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void collateSegments_gpu(int * src, int * scanResult, int * output, int numEdges) {
int numThreads = blockDim.x * gridDim.x; //total number of threads
int tid = blockDim.x * blockIdx.x + threadIdx.x; // global index of the thread
int i;
/*this code will automatically loop through the number of threads, as long as you refer to each element in the arrays as [tid]*/
for(i = tid; i < numEdges; i += numThreads)
{
if(src[i] != src[i+1]){ //we see that the data next to it isnt the same segment, so we've reached the last node
//then we just update the output array with the scanResults from strongestNeighbor
output[src[i]] = scanResult[i];
}
}
}
|
3,337 |
#include <sstream>
#include <iostream>
#include <cuda_runtime.h>
__global__ void kernel
(double *vec, double scalar, int num_elements)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_elements) {
vec[idx] = vec[idx] * scalar;
}
}
void run_kernel
(double *vec, double scalar, int num_elements)
{
dim3 dimBlock(256, 1, 1);
dim3 dimGrid(ceil((double)num_elements / dimBlock.x));
kernel<<<dimGrid, dimBlock>>>(vec, scalar, num_elements);
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
std::stringstream strstr;
strstr << "run_kernel launch failed" << std::endl;
strstr << "dimBlock: " << dimBlock.x << ", " << dimBlock.y << std::endl;
strstr << "dimGrid: " << dimGrid.x << ", " << dimGrid.y << std::endl;
strstr << cudaGetErrorString(error);
throw strstr.str();
}
}
|
3,338 | #include<stdio.h>
#include<math.h>
#include<cuda.h>
#define N 256
__global__ void matrix_vector_multi_gpu_1_256(float *A_d,float *B_d,float *C_d){
int i;
A_d[threadIdx.x]=0.0;
for(i=0;i<N;i++){
A_d[threadIdx.x]=A_d[threadIdx.x]+B_d[threadIdx.x*N+i]*C_d[i];
}
}
int main(){
int i,j;
float A[N],B[N*N],C[N];
float *A_d,*B_d,*C_d;
dim3 blocks(1,1,1);
dim3 threads(256,1,1);
for(j=0;j<N;j++){
for(i=0;i<N;i++){
B[j*N+i]=((float)j)/256.0;
}
}
for(j=0;j<N;j++){
C[j]=1.0F;
}
cudaMalloc((void**)&A_d,N*sizeof(float));
cudaMalloc((void**)&B_d,N*N*sizeof(float));
cudaMalloc((void**)&C_d,N*sizeof(float));
cudaMemcpy(A_d,A,N*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(B_d,B,N*N*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(C_d,C,N*sizeof(float),cudaMemcpyHostToDevice);
matrix_vector_multi_gpu_1_256<<<blocks,threads>>>(A_d,B_d,C_d);
cudaMemcpy(A,A_d,N*sizeof(float),cudaMemcpyDeviceToHost);
for(j=0;j<N;j++){
printf("A[ %d ]=%f \n",j,A[j]);
}
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
return 0;
}
|
3,339 | #include <stdio.h>
__global__ void hello_from_gpu()
{
printf("Hello World from the GPU!\n");
}
int main(void)
{
hello_from_gpu<<<1, 1>>>();
cudaDeviceSynchronize();
return 0;
} |
3,340 |
__global__ void
swap_reflect(float *A, int numElements)
{
int i=blockIdx.x;
int j=threadIdx.x;
float temp;
if ((i < numElements) && (j < numElements -1) && ((j)%2==0))
{
temp = A[i*numElements + j];
A[i*numElements + j] = A[i*numElements + j + 1];
A[i*numElements + j + 1] = temp;
}
__syncthreads();
if((i < numElements) && (j < numElements) && (i>j) && (i!=j))
{
A[j*numElements + i] = A[i*numElements + j];
//__syncthreads();
}
}
|
3,341 | #include <cuda_runtime.h>
__global__ void calcPReLUKernel(const float *input, float *output, const float *weights,
int width, int height, int channels)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= width || y >= height) {
return;
}
output[y * width + x] = input[y * width + x] > 0 ? input[y * width + x] : input[y * width + x] * weights[y % channels];
}
void calcPReLU(const float *input, float *output, const float* weights, int batchSize, int channels,
int width, int height, cudaStream_t stream)
{
dim3 grids((width * height + 31) / 32, (batchSize * channels + 31) / 32);
dim3 blocks(32, 32);
calcPReLUKernel<<<grids, blocks, 0, stream>>>(input, output, weights, width * height, channels * batchSize, channels);
}
|
3,342 | #include <stdio.h>
#define N 256
#define TPB 64
__global__ void printKernel()
{
// Get thread ID
const int i = blockIdx.x*blockDim.x + threadIdx.x;
// Print message
printf("Hello World! My threadId is %d\n\n", i);
}
int main()
{
// Launch kernel to print
printKernel<<<N/TPB, TPB>>>();
cudaDeviceSynchronize();
return 0;
} |
3,343 | /*
* simulator_cuda.cu
*
* Created on: Jul 18, 2014
* Author: bqian
*/
#include "simulator_cuda.cuh"
#include "simulator_kernel_impl.cuh"
#include "util.cuh"
|
3,344 | // #CSCS CUDA Training
//
// #Example 3.2 - transpose matrix, coalesced access
//
// #Author: Ugo Varetto
//
// #Goal: compute the transpose of a matrix with coalesced memory access
//
// #Rationale: shows how to increase speed by making use of shared (among threads in a thread block) memory
// and coalesced memory access.
// CUDA can perform a memory transfer of 16(half warp) contiguous elements(4,8 or 16 bytes each)
// in a single step if each thread accesses a different memory location in the 16 element buffer;
// also shared memory access can be two orders of magnitude faster than global memory access
//
//
// #Solution: copy input matrix elements into shared memory blocks and write transposed elements
// reading from shared memory. Access is coalesced if the block size is a multiple
// of a half warp i.e. 16
//
// #Code: 1) compute launch grid configuration
// 2) allocate data on host(cpu) and device(gpu)
// 3) initialize data directly on the GPU
// 4) create events
// 5) record start time
// 6) launch kernel
// 7) synchronize events to guarantee that kernel execution is finished
// 8) record stop time
// 9) read data back
// 10) print timing information as stop - start time
// 11) delete events
// 12) free memory
// The code uses the default stream 0; streams are used to sychronize operations
// to guarantee that all operations in the same stream are executed sequentially.
//
// #Compilation: nvcc -arch=sm_13 3_2_transpose-timing-coalesced.cu -o transpose-timining-coalesced
//
// #Execution: ./transpose-timining-coalesced
//
// #Note: kernel invocations ( foo<<<...>>>(...) ) are *always* asynchronous and a call to
// cudaThreadSynchronize() is required to wait for the end of kernel execution from
// a host thread; in case of synchronous copy operations like cudaMemcpy(...,cudaDeviceToHost)
// kernel execution is guaranteed to be terminated before data are copied
//
// #Note: the code is C++ also because the default compilation mode for CUDA is C++, all functions
// are named with C++ convention and the syntax is checked by default against C++ grammar rules
//
// #Note: -arch=sm_13 allows the code to run on every card with hw architecture GT200 (gtx 2xx) or better
//
// #Note: -arch=sm_13 is the lowest architecture version that supports double precision
//
// #Note: the example can be extended to read configuration data and matrix size from the command line
//
// #Note: despite improvements in Tesla2 and Fermi hardware, coalescing is BY NO MEANS obsolete.
// Even on Tesla2 or Fermi class hardware, failing to coalesce global memory transactions
// can result in a 2x performance hit. (On Fermi class hardware, this seems to be true only
// when ECC is enabled. Contiguous-but-uncoalesced memory transactions take about a 20% hit on Fermi.)
//#include <cuda_runtime.h> // automatically added by nvcc
#include <vector>
#include <iostream>
typedef float real_t;
const size_t TILE_SIZE = 16; //16 == half warp -> coalesced access
__global__ void transpose( const real_t* in, real_t *out, int num_rows, int num_columns ) {
// local cache
__shared__ real_t tile[ TILE_SIZE ][ TILE_SIZE ];
// locate element to transfer from input data into local cache
// CAVEAT: size of tile == size of thread block i.e. blockDim.x == blockDim.y == TILE_SIZE
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
const int input_index = row * num_columns + col;
// 1) copy data into tile
tile[ threadIdx.y ][ threadIdx.x ] = in[ input_index ];
// wait for all threads to perform copy operation since the threads that
// write data to the output matrix must read data which has been written into cache
// by different threads
// 2) locate output element of transposed matrix
row = blockIdx.x * blockDim.x + threadIdx.y;
col = blockIdx.y * blockDim.y + threadIdx.x;
// transposed matrix: num_columns -> num_rows == matrix width
const int output_index = row * num_rows + col;
// read data of transposed element from tile
__syncthreads();
out[ output_index ] = tile[ threadIdx.x ][ threadIdx.y ];
// note that (1) and (2) are completely separate and independent step
// the only requirement for (2) to work is that the data are
// available in shared memory
}
__global__ void init_matrix( real_t* in ) {
const int c = threadIdx.x + blockDim.x * blockIdx.x;
const int r = threadIdx.y + blockDim.y * blockIdx.y;
const int idx = c + gridDim.x * blockDim.x * r;
in[ idx ] = (real_t) idx;
}
void print_matrix( const real_t* m, int r, int c, int stride ) {
for( int i = 0; i != r; ++i ) {
for( int j = 0; j != c; ++j ) std::cout << m[ i * stride + j ] << ' ';
std::cout << '\n';
}
std::cout << std::endl;
}
//------------------------------------------------------------------------------
int main(int argc, char** argv ) {
const dim3 BLOCKS( 512, 512 );
const dim3 THREADS_PER_BLOCK( 16, 16 );
const int ROWS = 512 * 16; // 8192
const int COLUMNS = 512 * 16; // 8192
const size_t SIZE = ROWS * COLUMNS * sizeof( real_t );
// device storage
real_t* dev_in = 0;
real_t* dev_out = 0;
cudaMalloc( &dev_in, SIZE );
cudaMalloc( &dev_out, SIZE );
// host storage
std::vector< real_t > outmatrix( ROWS * COLUMNS );
// initialize matrix with kernel; much faster than using
// for loops on the cpu
init_matrix<<<dim3( COLUMNS, ROWS ), 1>>>( dev_in );
cudaMemcpy( &outmatrix[ 0 ], dev_in, SIZE, cudaMemcpyDeviceToHost );
// print upper 4x4 left corner of input matrix
std::cout << "INPUT MATRIX - " << ROWS << " rows, " << COLUMNS << " columns" << std::endl;
print_matrix( &outmatrix[ 0 ], 4, 4, COLUMNS );
// create events for timing execution
cudaEvent_t start = cudaEvent_t();
cudaEvent_t stop = cudaEvent_t();
cudaEventCreate( &start );
cudaEventCreate( &stop );
// record time into start event
cudaEventRecord( start, 0 ); // 0 is the default stream id
// execute kernel
transpose<<<BLOCKS, THREADS_PER_BLOCK>>>( dev_in, dev_out, ROWS, COLUMNS );
//transposeCoalesced<<<BLOCKS, THREADS_PER_BLOCK>>>>( dev_in, dev_out, COLUMNS, ROWS);
// issue request to record time into stop event
cudaEventRecord( stop, 0 );
// synchronize stop event to wait for end of kernel execution on stream 0
cudaEventSynchronize( stop );
// compute elapsed time (done by CUDA run-time)
float elapsed = 0.f;
cudaEventElapsedTime( &elapsed, start, stop );
std::cout << "Elapsed time (ms): " << elapsed << std::endl;
// copy output data from device(gpu) to host(cpu)
cudaMemcpy( &outmatrix[ 0 ], dev_out, SIZE, cudaMemcpyDeviceToHost );
// print upper 4x4 corner of transposed matrix
std::cout << "\nOUTPUT MATRIX - " << COLUMNS << " rows, " << ROWS << " columns" << std::endl;
print_matrix( &outmatrix[ 0 ], 4, 4, ROWS );
// free memory
cudaFree( dev_in );
cudaFree( dev_out );
// release events
cudaEventDestroy( start );
cudaEventDestroy( stop );
return 0;
}
|
3,345 | #include<cuda.h>
#include<cuda_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#include<cmath>
#define TILE_SIZE 2 // Tile size and block size, both are taken as 32
__device__ void store_full_row(float*,float*,int,int, int, int);
__device__ void load_full_row(float*,float*,int,int, int, int);
__device__ void store_full(float*,float*,int,int,int, int, int);
__device__ void load_full(float*,float*,int,int,int, int, int);
__device__ void store_lower(float*,float*,int,int,int, int, int);
__device__ void load_lower(float*,float*,int,int,int, int, int);
__device__ void potrf_tile(float*);
__device__ void trsm_tile(float*,int,int,int);
__device__ void syrk_tile(float*,float*,int,int,int);
__global__ void right_looking_launch_kernel(float*,int);
__device__ void store_full_row(float* read_data,float* write_data,int i,int N, int M, int shared_size_single_matrix)
{
int global_y;
int global_x = i*blockDim.y + threadIdx.y;
for(int j=0;j<N/TILE_SIZE;j++)
{
global_y = j*blockDim.z + threadIdx.z;
write_data[global_y*N*M + global_x*M + threadIdx.x] = read_data[threadIdx.y + (TILE_SIZE+1)*global_y + threadIdx.x*shared_size_single_matrix];
}
__syncthreads();
}
__device__ void load_full_row(float* read_data,float* write_data,int i,int N, int M, int shared_size_single_matrix)
{
int global_y;
int global_x = i*blockDim.y + threadIdx.y;
for(int j=0;j<N/TILE_SIZE;j++)
{
global_y = j*blockDim.z + threadIdx.z;
write_data[threadIdx.y + (TILE_SIZE+1)*global_y + threadIdx.x*shared_size_single_matrix] = read_data[global_y*N*M + global_x*M + threadIdx.x];
printf("%d, %d\n", threadIdx.y + (TILE_SIZE+1)*global_y + threadIdx.x*shared_size_single_matrix, global_y*N*M + global_x*M + threadIdx.x);
}
__syncthreads();
}
__device__ void store_full(float* read_data,float* write_data,int i,int j,int N, int M, int shared_size_single_matrix)
{
int global_y = j*blockDim.z + threadIdx.z;
int global_x = i*blockDim.y + threadIdx.y;
write_data[global_y*N*M + global_x*M + threadIdx.x] = read_data[threadIdx.y + (TILE_SIZE+1)*threadIdx.z + threadIdx.x*shared_size_single_matrix];
__syncthreads();
}
__device__ void load_full(float* read_data,float* write_data,int i,int j,int N, int M, int shared_size_single_matrix)
{
int global_y = j*blockDim.z + threadIdx.z;
int global_x = i*blockDim.y + threadIdx.y;
write_data[threadIdx.y + (TILE_SIZE+1)*threadIdx.z + threadIdx.x*shared_size_single_matrix] = read_data[global_y*N*M + global_x*M + threadIdx.x];
__syncthreads();
}
__device__ void store_lower(float* read_data,float* write_data,int i,int j,int N, int M, int shared_size_single_matrix)
{
int global_y = j*blockDim.z + threadIdx.z;
int global_x = i*blockDim.y + threadIdx.y;
// printf("%f is at %d\n", read_data[threadIdx.y + (TILE_SIZE+1)*threadIdx.z + threadIdx.x*shared_size_single_matrix], threadIdx.y + (TILE_SIZE+1)*threadIdx.z + threadIdx.x*shared_size_single_matrix);
if(threadIdx.z >= threadIdx.y)
write_data[global_y*N*M + global_x*M + threadIdx.x] = read_data[threadIdx.y + (TILE_SIZE+1)*threadIdx.z + threadIdx.x*shared_size_single_matrix];
else
write_data[global_y*N*M + global_x*M + threadIdx.x] = 0.0;
__syncthreads();
}
__device__ void load_lower(float* read_data,float* write_data,int i,int j,int N, int M, int shared_size_single_matrix)
{
int global_y = j*blockDim.z + threadIdx.z;
int global_x = i*blockDim.y + threadIdx.y;
if(threadIdx.z >= threadIdx.y)
write_data[threadIdx.y + (TILE_SIZE+1)*threadIdx.z + threadIdx.x*shared_size_single_matrix] = read_data[global_y*N*M + global_x*M + threadIdx.x];
else
write_data[threadIdx.y + (TILE_SIZE+1)*threadIdx.z + threadIdx.x*shared_size_single_matrix] = 0.0;
__syncthreads();
}
__device__ void potrf_tile(float* t_A)
{
int t_x = threadIdx.y;
int t_y = threadIdx.z;
__shared__ float temp2;
for(int k=0;k<TILE_SIZE;k++)
{
if(t_x==t_y && t_x==k)
{
t_A[k*(TILE_SIZE+1) + k] = sqrtf(t_A[k*(TILE_SIZE+1) + k]);
temp2 = t_A[k*(TILE_SIZE+1) + k];
}
__syncthreads();
if(t_x<t_y && t_x == k)
{
t_A[t_y*(TILE_SIZE+1) + k]/= temp2;
}
__syncthreads();
if(k<t_y && k<t_x && t_x<=t_y)
{
t_A[t_y*(TILE_SIZE+1) + t_x]-= t_A[t_x*(TILE_SIZE+1) + k]*t_A[t_y*(TILE_SIZE+1) + k];
}
__syncthreads();
}
}
__device__ void trsm_tile(float *row_data,int i,int j,int N)
{
int global_y = j*blockDim.z + threadIdx.z;
int global_x = i*blockDim.y + threadIdx.y;
int t_x = threadIdx.y;
int t_y = threadIdx.z;
for(int s=0;s<TILE_SIZE;s++)
{
if(t_x==s)
{
row_data[global_y*(TILE_SIZE+1) + t_x]/= row_data[global_x*(TILE_SIZE+1) + t_x];
}
__syncthreads();
if(t_x > s)
{
row_data[global_y*(TILE_SIZE+1) + t_x]-= row_data[global_x*(TILE_SIZE+1) + s]*row_data[global_y*(TILE_SIZE+1) + s];
}
__syncthreads();
}
}
__device__ void syrk_tile(float* row_data,float* edit_data,int i,int j,int N)
{
int global_y = j*blockDim.z + threadIdx.z;
int global_x = i*blockDim.y + threadIdx.y;
int t_y = threadIdx.z;
int t_x = threadIdx.y;
float valueToSubtract = 0.0;
for(int r=0;r<TILE_SIZE;r++)
{
valueToSubtract+= row_data[r + global_y*(TILE_SIZE+1)]*row_data[r + global_x*(TILE_SIZE+1)];
}
edit_data[t_y*(TILE_SIZE+1) + t_x]-= valueToSubtract;
__syncthreads();
}
__global__ void right_looking_launch_kernel(float* read_data,int N, int M , int num_of_matrices_per_block, int shared_size_single_matrix) // N -> dim, M -> num of matrices per block
{
int no_of_tiles = (N / TILE_SIZE) + (N % TILE_SIZE != 0);
int tx = threadIdx.x;
float *rA1 = NULL;
extern __shared__ float row_data[];
// __shared__ float tile_data[TILE_SIZE*(TILE_SIZE+1)]; // Using TILE_SIZE+1 to avoid Band-conflict in Shared Memory
int tile_data_index = M * (N*(TILE_SIZE+1) + TILE_SIZE*(TILE_SIZE+1) + 1);
// __shared__ float* tile_data = &row_data[M * (N*(TILE_SIZE+1) + TILE_SIZE*(TILE_SIZE+1) + 1)];
int shared_size_single_matrix_tile_data = TILE_SIZE * (TILE_SIZE + 1);
int i,j,k;
for(i=0;i<N/TILE_SIZE;i++)
{
load_lower(read_data,&row_data[tile_data_index],i,i,N, M, shared_size_single_matrix_tile_data);
// printf("%d \n", tile_data_index + shared_size_single_matrix_tile_data * M);
// if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) {
// for (int z = tile_data_index; z < tile_data_index + shared_size_single_matrix_tile_data * M; z++) {
// printf("%f is at %d\n", row_data[z], z);
// }
// }
rA1 = &row_data[tile_data_index + tx*shared_size_single_matrix_tile_data];
// printf("%d\n", tx*shared_size_single_matrix_tile_data);
// potrf_tile(tile_data);
potrf_tile(rA1);
store_lower(&row_data[tile_data_index],read_data,i,i,N, M, shared_size_single_matrix_tile_data);
load_full_row(read_data,row_data,i,N, M, shared_size_single_matrix);
for(j=i+1;j<N/TILE_SIZE;j++)
{
trsm_tile(&row_data[tx*shared_size_single_matrix],i,j,N);
for(k=i+1;k<j;k++)
{
load_full(read_data,&row_data[tile_data_index],k,j,N, M, shared_size_single_matrix_tile_data);
rA1 = &row_data[tile_data_index + tx*shared_size_single_matrix_tile_data];
// syrk_tile(row_data,tile_data,k,j,N);
syrk_tile(&row_data[tx*shared_size_single_matrix],rA1,k,j,N);
store_full(&row_data[tile_data_index],read_data,k,j,N, M, shared_size_single_matrix_tile_data);
}
load_full(read_data,&row_data[tile_data_index],k,j,N, M, shared_size_single_matrix_tile_data);
syrk_tile(&row_data[tx*shared_size_single_matrix],&row_data[tile_data_index + tx*shared_size_single_matrix_tile_data],k,j,N);
store_full(&row_data[tile_data_index],read_data,k,j,N, M, shared_size_single_matrix_tile_data);
}
store_full_row(row_data,read_data,i,N, M, shared_size_single_matrix);
}
}
int main()
{
// int n,N;
// printf("Enter dimension (N) : ");
// scanf("%d",&n);
// if((n%TILE_SIZE)==0)
// N = n;
// else
// N = (((int) (n/TILE_SIZE)) + 1)*TILE_SIZE;
// size_t size = N*N*sizeof(float);
// float *M = (float *)malloc(size);
// if(M == NULL)
// {
// fprintf(stderr,"Failed to allocate host vectors!\n");
// exit(EXIT_FAILURE);
// }
// int i,j;
// printf("Enter input matrix: \n");
// for(i=0;i<N;i++)
// {
// for(j=0;j<N;j++)
// {
// if(i>=n || j>=n)
// M[i*N + j] = 1; //Padding the matrix with 1
// else
// scanf("%f",&M[i*N + j]);
// }
// }
FILE *fptr;
fptr = fopen("./dim2_256matrices.txt", "r");
int num_of_matrices, dim_of_matrix;
fscanf(fptr, "%d", &num_of_matrices);
fscanf(fptr, "%d", &dim_of_matrix);
float read_element;
float* h_A = NULL;
int numElements = num_of_matrices * dim_of_matrix * dim_of_matrix;
size_t size = numElements * sizeof(float);
cudaDeviceProp devp;
cudaGetDeviceProperties(&devp, 0);
h_A = (float *)malloc(size);
int global_id = 0;
for (int matrix_index = 0; matrix_index < num_of_matrices; matrix_index++)
{
for (int row = 0; row < dim_of_matrix; row++)
{
for (int column = 0; column < dim_of_matrix; column++)
{
fscanf(fptr, "%f", &read_element);
global_id = row * dim_of_matrix * num_of_matrices + column * num_of_matrices + matrix_index;
h_A[global_id] = read_element;
// printf("At pos %d we get %0.2f\n", global_id, h_A[global_id]);
// printf("%0.2f \n ", h_A[global_id]);
}
}
}
printf("\nRead from the input file successfully!\n");
fclose(fptr);
printf("\nPrinting the host-side input array read from the input file:\n");
for (int i = 0; i < numElements; i++) {
printf("%f ", h_A[i]);
}
printf("\n\n");
// cudaError_t err = cudaSuccess;
// float *read_data = NULL;
// err = cudaMalloc((void **)&read_data,N*N*sizeof(float));
// if(err != cudaSuccess)
// {
// fprintf(stderr,"Failed to allocate matrix on the CUDA device! (error code %s)\n",cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// printf("Coping the matrix from host memory to device memory\n");
// err = cudaMemcpy(read_data,M,size,cudaMemcpyHostToDevice);
// if(err != cudaSuccess)
// {
// fprintf(stderr,"Failed to copy matrix from host to device (error code %s)\n",cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// printf("Testing for matrix M [%dx%d]\n",N,N);
cudaError_t err = cudaSuccess;
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
else {
printf("Copied the h_A to device side successfully!\n\n");
}
// dim3 grid(1,1,1);
// dim3 block(TILE_SIZE,TILE_SIZE,1);
// size_t shared_size = (N*(TILE_SIZE+1) + TILE_SIZE*(TILE_SIZE+1) + 1)*sizeof(float);
// right_looking_launch_kernel<<<grid,block,shared_size>>>(read_data,N);
// err = cudaMemcpy(M,read_data,size,cudaMemcpyDeviceToHost);
// if(err != cudaSuccess)
// {
// fprintf(stderr, "Failed to copy the output matrix M from device to Host (error code %s)\n", cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
int num_of_matrices_per_block = num_of_matrices;
dim3 grid(1, 1, 1);
dim3 block(num_of_matrices, TILE_SIZE, TILE_SIZE);
// no of tiles in a column
// int INPUT_SIZE = dim_of_matrix;
// int no_of_tiles = (INPUT_SIZE / TILE_SIZE) + (INPUT_SIZE % TILE_SIZE != 0); // ceil of (INPUT_SIZE / TILE_SIZE)
int N = dim_of_matrix;
size_t shared_size = num_of_matrices * (N*(TILE_SIZE+1) + TILE_SIZE*(TILE_SIZE+1) + 1)*sizeof(float) + num_of_matrices_per_block * TILE_SIZE*(TILE_SIZE+1) * sizeof(float);
right_looking_launch_kernel<<<grid,block,shared_size>>>(d_A, dim_of_matrix, num_of_matrices, num_of_matrices ,(num_of_matrices * (N*(TILE_SIZE+1) + TILE_SIZE*(TILE_SIZE+1) + 1))/num_of_matrices);
//left_looking_kernel<<<grid, block, num_of_matrices_per_block * 1 * TILE_SIZE * TILE_SIZE * sizeof(float)>>>(d_A, dim_of_matrix, num_of_matrices ,1 * TILE_SIZE * TILE_SIZE);
cudaError_t cudaerr = cudaDeviceSynchronize();
if (cudaerr != cudaSuccess) {
printf("kernel launch failed with error \"%s\".\n",
cudaGetErrorString(cudaerr));
}
// if(TILE_SIZE == INPUT_SIZE)
// {
// // printf("The if statement works.\n");
// left_looking_kernel<<<grid, block, num_of_matrices * 1 * TILE_SIZE * TILE_SIZE * sizeof(float)>>>(d_A, dim_of_matrix, num_of_matrices ,1 * TILE_SIZE * TILE_SIZE);
// }
// else if((no_of_tiles + 2) * TILE_SIZE * TILE_SIZE * sizeof(float) < devp.sharedMemPerBlock)
// {
// //printf("The if statement works.\n");
// left_looking_kernel_less_mem<<<grid, block, num_of_matrices * 4 * TILE_SIZE * TILE_SIZE * sizeof(float)>>>(d_A, dim_of_matrix, num_of_matrices ,4 * TILE_SIZE * TILE_SIZE);
// // left_looking_kernel<<<grid, block,num_of_matrices * (no_of_tiles + 2) * TILE_SIZE * TILE_SIZE * sizeof(float)>>>(d_A, dim_of_matrix, num_of_matrices ,(no_of_tiles + 2) * TILE_SIZE * TILE_SIZE);
// }
// else
// {
// left_looking_kernel_less_mem<<<grid, block, num_of_matrices * 4 * TILE_SIZE * TILE_SIZE * sizeof(float)>>>(d_A, dim_of_matrix, num_of_matrices ,4 * TILE_SIZE * TILE_SIZE);
// }
// printf("Printing output matrix\n");
// for(i=0;i<n;i++)
// {
// for(j=0;j<n;j++)
// {
// if(j<=i)
// printf("%f\t",M[i*N + j]);
// else
// printf("%f\t",0.0);
// }
// printf("\n");
// }
// err = cudaFree(read_data);
// if(err != cudaSuccess)
// {
// fprintf(stderr, "Failed to free device matrix M (error code %s)\n", cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// err = cudaDeviceReset();
// if(err != cudaSuccess)
// {
// fprintf(stderr, "Failed to deinitialize the CUDA device (error code %s)\n", cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// free(M);
// printf("DONE!\n");
err = cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
else {
printf("\nCopied d_A to host side successfully!\n");
}
printf("\nPrinting the output of cudememcopyDeviceToHost, i.e. the host-side array returned from device side:\n");
for (int i = 0; i < numElements; i++) {
printf("%f ", h_A[i]);
}
err = cudaFree(d_A);
if(err != cudaSuccess)
{
fprintf(stderr, "\nFailed to free device matrix M (error code %s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaDeviceReset();
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the CUDA device (error code %s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
FILE *fptr1;
fptr1 = fopen("./output.txt", "w+");
float write_element;
fprintf(fptr1, "%d\n", num_of_matrices);
fprintf(fptr1, "%d\n", dim_of_matrix);
for (int matrix_index = 0; matrix_index < num_of_matrices; matrix_index++)
{
for (int row = 0; row < dim_of_matrix; row++)
{
for (int column = 0; column < dim_of_matrix; column++)
{
//write_element = h_A[matrix_index * dim_of_matrix * dim_of_matrix + row * dim_of_matrix + column];
global_id = row * dim_of_matrix * num_of_matrices + column * num_of_matrices + matrix_index;
write_element = h_A[global_id] ;
fprintf(fptr1, "%0.2f ", write_element);
}
fprintf(fptr1,"\n");
}
}
fclose(fptr1);
free(h_A);
printf("\n\nAll tasks completed successfully!\n\n");
return 0;
}
|
3,346 | #include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include <time.h>
//#define __DEBUG
#define element_addr(a, m, n, d) (a + ((m) * (d) + n))
#define element(a, m, n, d) (((m >= 0)&&(m < d)&&(n >= 0)&&(n < d))? (a[(m) * (d) + n]) : 0)
#define CUDA_CALL(cmd) do { \
if((err = cmd) != cudaSuccess) { \
printf("(%d) Cuda Error: %s\n", __LINE__, cudaGetErrorString(err) ); \
} \
} while(0)
int calculateGPU(const int *living, float *honey[2], int d, int n, float rbee, float rflow);
int calculateCPU(const int *living, float *honey[2], int d, int n, float rbee, float rflow);
int main(int argc, char **argv)
{
FILE *fp;
int d, n;
float rbee, rflow;
int *living;
float *honey[2], *honeyr;
int i, j;
int resin;
if (argc != 2) {
printf("sampl0 <input_file>\n");
return 1;
}
fp = fopen(argv[1], "r");
if(fp == NULL) {
printf("fopen %s error\n", argv[1]);
return 1;
}
fscanf(fp, "%d %d", &d, &n);
fscanf(fp, "%f %f", &rbee, &rflow);
#ifdef __DEBUG
printf("D=%d N=%d\n", d, n);
printf("R(bee)=%f R(flow)=%f\n", rbee, rflow);
#endif
living = (int *) malloc( d * d * sizeof(int) );
if(living == NULL) {
printf("malloc living[] error\n");
return 1;
}
for( i = 0; i < d; i++ ) {
for( j = 0; j < d; j++ ) {
fscanf(fp, "%d", element_addr(living, i, j, d));
}
}
#ifdef __DEBUG
printf("livingmap = \n");
for( i = 0; i < d; i++ ) {
for( j = 0; j < d; j++ ) {
printf("%d ", element(living, i, j, d));
}
printf("\n");
}
#endif
honey[0] = (float *)malloc( d * d * sizeof(float) );
honey[1] = (float *)malloc( d * d * sizeof(float) );
honeyr = (float *)malloc( d * d * sizeof(float) );
if(honey[0] == NULL || honey[1] == NULL || honeyr == NULL ) {
printf("malloc honey[] error\n");
return 1;
}
for( i = 0; i < d; i++ ) {
for( j = 0; j < d; j++ ) {
fscanf(fp, "%f", element_addr(honey[0], i, j, d));
}
}
#ifdef __DEBUG
printf("honey = \n");
for( i = 0; i < d; i++ ) {
for( j = 0; j < d; j++ ) {
printf("%f ", element(honey[0], i, j, d));
}
printf("\n");
}
#endif
fclose(fp);
calculateGPU(living, honey, d, n, rbee, rflow); //always return in honey[1]
memcpy(honeyr, honey[1], d * d * sizeof(float) );
resin = calculateCPU(living, honey, d, n, rbee, rflow);
#ifdef __DEBUG
printf("result is in honey[%d] \n",resin);
#endif
fp = fopen("beehive_res.txt", "w");
n = 0;
for( i = 0; i < d; i++ ) {
for( j = 0; j < d; j++ ) {
fprintf(fp, "%f ", element(honeyr, i, j, d));
if( fabs( (element(honey[resin], i, j, d) - element(honeyr, i, j, d)) / element(honey[resin], i, j, d) ) > 0.0001) {
//if(element(honey[resin], i, j, d) != element(honeyr, i, j, d) ) {
printf("<%d, %d>:%f %f\n", i, j, element(honey[resin], i, j, d), element(honeyr, i, j, d));
n++;
}
}
fprintf(fp, "\n");
}
printf("error: %d\n", n);
fclose (fp);
free(honey[0]);
free(honey[1]);
free(honeyr);
free(living);
return 0;
}
int calculateCPU(const int *living, float *honey[2], int d, int n, float rbee, float rflow)
{
int ite;
int src, resin = 0;
int i, j;
clock_t start, end;
double time;
start = clock();
for(ite = 0; ite < n; ite++ ) {
src = resin;
resin = 1 - resin;
for(i = 0; i < d; i++ ) {
for(j = 0; j < d; j++ ) {
*(element_addr(honey[resin], i, j, d)) = rflow * (
element(honey[src], i-1, j-1, d) + element(honey[src], i-1, j, d) + element(honey[src], i-1, j+1, d)
+ element(honey[src], i, j-1, d) + element(honey[src], i, j+1, d)
+ element(honey[src], i+1, j-1, d) + element(honey[src], i+1, j, d) + element(honey[src], i+1, j+1, d)
) + (1.0 - 8.0 * rflow) * element(honey[src], i, j, d)
+ rbee * element(living, i, j, d);
}
}
}
end = clock();
time = ((double)(end-start))/CLOCKS_PER_SEC;
printf("CPU computation: %lf sec\n", time);
return resin;
}
|
3,347 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
struct GPUState {
int deviceCount; // number of GPUs to use
int deviceToUse; // GPU to use (round-robin)
pthread_mutex_t initLock;
} gpustate = {-1,-1,PTHREAD_MUTEX_INITIALIZER};
// returns which GPU to run on, or -1 if no GPUs are available
int get_gpu() {
if (gpustate.deviceCount == 1)
return 0; // return immediately for the common case of 1 GPU
else if (gpustate.deviceCount > 1) { // multiple GPUs
int newval, oldval;
do {
oldval = gpustate.deviceToUse;
if (oldval == gpustate.deviceCount-1)
newval = 0;
else
newval = oldval+1;
} while (!__sync_bool_compare_and_swap(&gpustate.deviceToUse, oldval, newval));
}
else if (gpustate.deviceCount == -1) { // not yet initialized... run initialization
pthread_mutex_lock(&gpustate.initLock);
// check if another thread already completed initialization
if (gpustate.deviceCount != -1) {
pthread_mutex_unlock(&gpustate.initLock);
return get_gpu();
}
// continue with initialization
if (cudaGetDeviceCount(&gpustate.deviceCount)) {
fprintf(stderr, "Cuda Error in GetDeviceCount: %s\n", cudaGetErrorString(cudaGetLastError()));
gpustate.deviceCount = 0;
}
else if (gpustate.deviceCount <= 0)
gpustate.deviceCount = 0;
else
gpustate.deviceToUse = 0;
for (int deviceID=0; deviceID<gpustate.deviceCount; deviceID++) {
cudaSetDevice(deviceID);
cudaDeviceReset();
}
pthread_mutex_unlock(&gpustate.initLock);
}
return gpustate.deviceToUse;
}
|
3,348 | #include "bitonic.cuh"
__global__ void BitonicMergeSort(float * d_output, float * d_input, int subarray_size)
{
extern __shared__ float shared_data[];
// internal index for sorting of the subarray
int index = threadIdx.x;
int index_global = index + blockDim.x * blockIdx.x;
double portions = log2(double(subarray_size)) - 1;
//copying of data portion dedicated to this block into shared memory
shared_data[index] = d_input[index_global];
__syncthreads();
for (short portion = 0; portion <= portions; portion++)
{
short offset = 1 << portion;
short threads_in_box = offset << 1;
// calculated at the beginning of each portion
//int boxI = index % (threads_in_box + (blockDim.x * blockIdx.x));
int boxI = threadIdx.x / threads_in_box;
for (short subportion = portion; subportion >= 0; subportion--)
{
offset = 1 << subportion;
threads_in_box = offset << 1;
int arrow_bottom = index % threads_in_box;
if (((boxI + 1) % 2) == 1) {
// top down
if (arrow_bottom < offset) {
float temp = shared_data[index];
if (shared_data[index + offset] < temp) {
shared_data[index] = shared_data[index + offset];
shared_data[index + offset] = temp;
}
}
}
else {
// bottom up
if (arrow_bottom >= offset) {
float temp = shared_data[index];
if (shared_data[index - offset] < temp) {
shared_data[index] = shared_data[index - offset];
shared_data[index - offset] = temp;
}
}
}
__syncthreads();
}
}
d_output[index_global] = shared_data[index];
} |
3,349 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
int main()
{
int deviceCount;
cudaDeviceProp devProp;
cudaGetDeviceCount(&deviceCount);
printf("Found %d devices\n", deviceCount);
for (int device=0; device < deviceCount; device++)
{
cudaGetDeviceProperties(&devProp, device);
printf("Device %d\n", device);
printf("Compute capability: %d.%d\n", devProp.major, devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total Global Memory: %d\n", devProp.totalGlobalMem);
printf("Shared memory per block: %d\n", devProp.sharedMemPerBlock);
printf("Registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Max threads per block: %d\n", devProp.maxThreadsPerBlock);
printf("Total constant memory: %d\n", devProp.totalConstMem);
printf("Clock Rate: %d\n", devProp.clockRate);
printf("Texture Alignment: %u\n", devProp.textureAlignment);
printf("Device overlap: %d\n", devProp.deviceOverlap);
printf("Multiprocessot Count: %d\n", devProp.multiProcessorCount);
printf("Max Threads Dim: %d %d %d\n", devProp.maxThreadsDim[0], devProp.maxThreadsDim[1], devProp.maxThreadsDim[2]);
printf("Max Grid Size: %d %d %d\n", devProp.maxGridSize[0], devProp.maxGridSize[1], devProp.maxGridSize[2]);
}
getchar();
return 0;
}
|
3,350 | #include <stdio.h>
#include <stdlib.h>
__global__ void kernel(int *array) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
array[index] = index;
}
int main(void) {
int num_elements = 256;
int num_bytes = num_elements * sizeof(int);
// pointers to host & device arrays
int *device_array = 0;
int *host_array = 0;
// malloc a host array
host_array = (int *)malloc(num_bytes);
// cudaMalloc a device array
cudaMalloc((void **)&device_array, num_bytes);
int block_size = 16;
int grid_size = num_elements / block_size;
kernel<<<grid_size, block_size>>>(device_array);
// download and inspect the result on the host:
cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost);
// print out the result element by element
for (int i = 0; i < num_elements; ++i) {
printf("%3d ", host_array[i]);
if ((i + 1) % block_size == 0) printf("\n");
}
// deallocate memory
free(host_array);
cudaFree(device_array);
}
|
3,351 | #include "includes.h"
// ERROR CHECKING MACROS //////////////////////////////////////////////////////
__global__ void buildGlobalLinReg(int noPoints, int noDims, int dimRes, int nYears, int noControls, int year, int control, float* regCoeffs, float* xmins, float* xmaxes, float* regression) {
// Global thread index
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < noPoints) {
// First, deconstruct the index into the index along each dimension
int *dimIdx;
dimIdx = (int*)malloc(noDims*sizeof(int));
int rem = idx;
for (int ii = 0; ii < noDims; ii++) {
int div = (int)(rem/pow(dimRes,noDims-ii-1));
dimIdx[ii] = div;
rem = rem - div*pow(dimRes,noDims-ii-1);
}
// Get the query point coordinates
float *xQ;
xQ = (float*)malloc(noDims*sizeof(float));
for (int ii = 0; ii < noDims; ii++) {
xQ[ii] = ((float)dimIdx[ii])*(xmaxes[control*noDims + ii] -
xmins[control*noDims + ii])/(float)dimRes +
xmins[control*noDims + ii];
}
// Use the regression coefficients to compute the value at this query
// point
float computed = regCoeffs[0];
for (int ii = 0; ii < noDims; ii++) {
computed += xQ[ii]*regCoeffs[ii+1];
}
if (computed >= 0) {
computed = 0;
}
regression[year*noControls*(dimRes*noDims + (int)pow(dimRes,noDims)*2)
+ control*(dimRes*noDims + (int)pow(dimRes,noDims)*2) + dimRes*
noDims + idx] = computed;
// Free memory
free(xQ);
free(dimIdx);
}
} |
3,352 | #include <iostream>
#define CHANNELS 3
__global__
void colorToGreyscaleConversion(unsigned char *Pout, unsigned char *Pin, int width, int height) {
int Col = threadIdx.x + blockIdx.x * blockDim.x;
int Row = threadIdx.y + blockIdx.y * blockDim.y;
if (Col < width && Row < height) {
// get 1D coordinate for the grayscale image.
// This is the linearization of the picture 2D array.
int greyOffset = Row * width + Col;
int rgbOffset = greyOffset*CHANNELS;
unsigned char r = Pin[rgbOffset + 1];
unsigned char g = Pin[rgbOffset + 2];
unsigned char b = Pin[rgbOffset + 3];
// Perform the grey scale conversion and store it in Pout.
Pout[greyOffset] = 0.21f*r + 0.71f*g + 0.07f*b;
}
}
int main() {
int dev_count;
cudaGetDeviceCount(&dev_count);
cudaDeviceProp dev_prop;
for (int i = 0; i < dev_count; ++i) {
cudaGetDeviceProperties(&dev_prop, i);
}
return 0;
}
|
3,353 | #include "includes.h"
__global__ void calcReluBackwardGPU( float *dz_next_layer, float *dz_in, float *dz, float *in, int elements )
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < elements ){
dz_in[id] += dz_next_layer[id];
dz[id] += (in[id] < 0) ? (0) : (1.0 * dz_in[id]);
}
/* original
for( unsigned i = 0; i < data_size; ++i ){
dz_in.data[i] += dz_next_layer.data[i];
dz.data[i] += (in.data[i] < 0) ? (0) : (1.0 * dz_in.data[i]);
}
*/
} |
3,354 | #include "includes.h"
__global__ void gpu_totalTemp_kernel ( int N, double * partialT, double * totalT)
{
extern __shared__ double T_cache[];
int tid = threadIdx.x;
T_cache[tid] = partialT[tid];
__syncthreads();
int nTotalThreads = blockDim.x; /// Total number of active threads
/** Algoritme per calcular la reduccio
* dels valors actuals a la cache del block */
while(nTotalThreads > 1)
{
int halfPoint = (nTotalThreads >> 1); /// divide by two, only the first half of the threads will be active.
if (threadIdx.x < halfPoint)
T_cache[threadIdx.x] += T_cache[threadIdx.x + halfPoint];
__syncthreads(); /// imprescindible
nTotalThreads = halfPoint; /// Reducing the binary tree size by two:
}
/// El primer thread de cada block es el k s'encarrega de fer els calculs finals
if(threadIdx.x == 0) {
double T = T_cache[0];
T /= (kb * dim * N); /// Instantaneous temperature using the Equipartition Theorem. The kinetic energy is just K = 3N/2 kT
(*totalT) = T;
}
} |
3,355 | //
// Created by caesar on 7/4/18.
//
#include "Computation.cuh"
|
3,356 | #include <iostream>
int main(void) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
std::cout << "CC: " << deviceProp.major << "." << deviceProp.minor << "\n";
return 0;
}
|
3,357 | /*
Authors: Jose Garcia Kameron Bush
Collatz code for CS 4380 / CS 5351
Copyright (c) 2019 Texas State University. All rights reserved.
Redistribution in source or binary form, with or without modification,
is *not* permitted. Use in source and binary forms, with or without
modification, is only permitted for academic use in CS 4380 or CS 5351
at Texas State University.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Martin Burtscher
*/
#include <cstdio>
#include <cuda.h>
static const int ThreadsPerBlock = 512;
static int* d_maxlen;
static __global__ void collatz(const long start, const long stop, int* const maxlen)
{
// todo: process odd values from start (assume start to be odd) to stop (inclusively if stop is odd) with one thread per value (based on code from previous project
// compute sequence lengths
//*********has to be only odds*************
//int maxlen = 0;
const int i = (threadIdx.x + blockIdx.x * (long)blockDim.x) * 2 + start;
if (i <= stop) {
long val = i;
int len = 1;
while (val != 1) {
len++;
if ((val % 2) == 0) {
val = val / 2; // even
} else {
val = 3 * val + 1; // odd
}
}
if (*maxlen < len) {
atomicMax(maxlen, len);
}
}
}
void GPU_Init()
{
int maxlen = 0;
if (cudaSuccess != cudaMalloc((void **)&d_maxlen, sizeof(int))) {fprintf(stderr, "ERROR: could not allocate memory\n"); exit(-1);}
if (cudaSuccess != cudaMemcpy(d_maxlen, &maxlen, sizeof(int), cudaMemcpyHostToDevice)) {fprintf(stderr, "ERROR: copying to device failed\n"); exit(-1);}
}
void GPU_Exec(const long start, const long stop)
{
if (start <= stop) {
collatz<<<((stop - start + 2) / 2 + ThreadsPerBlock - 1) / ThreadsPerBlock, ThreadsPerBlock>>>(start, stop, d_maxlen);
}
}
int GPU_Fini()
{
int maxlen;
// todo: copy the result from the device to the host and free the device memory
if (cudaSuccess != cudaMemcpy(&maxlen, d_maxlen, sizeof(int), cudaMemcpyDeviceToHost)) {
fprintf(stderr, "ERROR: copying from device failed\n"); exit(-1);
}
cudaFree(d_maxlen);
return maxlen;
}
|
3,358 | //This file contains a cuda code implementing 2d convolution
//Author: Ajay Singh
#include<stdio.h>
#include<cuda.h>
#include<stdlib.h>
#define mask_width (3)
#define mat_size (5)
__constant__ float mask[mask_width];
__global__
void covolution_2d_kernel(float *Mat, float *Ans)
{
int col=threadIdx.x+blockIdx.x*blockDim.x;
int row=threadIdx.y+blockIdx.y*blockDim.y;
float pvalue=0;
for(int j=0, i=row; j<mask_width; j++, i++){
for(int k=col, l=0; l<mask_width; l++, k++){
pvalue+=Mat[i*mask_width+k]*mask[j*mask_width+l];
}
}
Ans[row*mask_width+col]=pvalue;
return;
}
int main()
{
float *M, *ans, *h_mask;
float *d_M, *d_ans;
int k=mat_size-(mask_width-1);
h_mask=(float *)malloc(sizeof(float)*mask_width*mask_width);
M=(float *)malloc(sizeof(float)*mat_size*mat_size);
ans=(float *)malloc(sizeof(float)*k*k);
dim3 grid(mask_width, mask_width);
if(M==NULL || mask==NULL || ans==NULL){
printf(" Error while allocating memory in host for mask or the matrix");
return 0;
}
printf("Printing matrix\n");
for(int i=0; i<mat_size*mat_size; i++){
M[i]=i+1;
printf("%lf ", M[i]);
if(i%mat_size==0 && i!=0)
putchar('\n');
}
printf("\nPrinting Mask\n");
for(int j=0; j<mask_width*mask_width; j++){
h_mask[j]=j+1;
printf("%lf ",h_mask[j]);
if((j+1)%mask_width==0 && j!=0)
putchar('\n');
}
if(cudaMalloc( (void **)&d_M, sizeof(float)*mat_size*mat_size)!=cudaSuccess){
printf("error while allocating memory for matrix on device\n");
}
if(cudaMalloc((void **)&d_ans, sizeof(float)*k*k)!=cudaSuccess){
printf("error while allocating memory for the mask on device\n");
}
if(cudaMemcpyToSymbol(mask, h_mask, sizeof(float)*mask_width)!=cudaSuccess){
printf("error while copying mask from host to constant memory in device\n");
}
if(cudaMemcpy(d_M, M, sizeof(float)*mat_size*mat_size, cudaMemcpyHostToDevice)!=cudaSuccess){
printf("error while copying matrix to device\n");
}
covolution_2d_kernel<<< grid, 1>>>(d_M, d_ans);
if(cudaMemcpy( ans, d_ans, sizeof(float)*k*k, cudaMemcpyDeviceToHost)!=cudaSuccess){
printf("error while copying mask to device\n");
}
printf("\nPrinting ans\n");
for(int i=0; i<k*k; i++){
printf("%lf \t",ans[i]);
if(i%k==0 && i!=0)
putchar('\n');
}
cudaFree(d_M);
cudaFree(d_ans);
return 0;
}
|
3,359 | // Noop
// Device code that does nothing
#include<stdio.h>
__global__ void mykernel(void) { // this runs on device
}
int main(void) {
mykernel<<<1, 1>>>();
printf("Hello! \n");
return 0;
}
|
3,360 | /**
*
* bash版対称解除法のC言語版のGPU/CUDA移植版
*
詳しい説明はこちらをどうぞ
https://suzukiiichiro.github.io/search/?keyword=Nクイーン問題
*
*/
#include <iostream>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include <sys/time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define THREAD_NUM 96
#define MAX 27
// システムによって以下のマクロが必要であればコメントを外してください。
//#define UINT64_C(c) c ## ULL
//
// グローバル変数
unsigned long TOTAL=0;
unsigned long UNIQUE=0;
//GPU で使うローカル構造体
typedef struct
{
unsigned int BOUND1,BOUND2,TOPBIT,ENDBIT,SIDEMASK,LASTMASK;
unsigned long board[MAX];
unsigned long COUNT2,COUNT4,COUNT8,TOTAL,UNIQUE;
}local;
// GPU 再帰 非再帰 対称解除法
__host__ __device__
void symmetryOps(unsigned int size,local* l)
{
/**
2.クイーンが右上角以外にある場合、
(1) 90度回転させてオリジナルと同型になる場合、さらに90度回転(オリジナルか
ら180度回転)させても、さらに90度回転(オリジナルから270度回転)させてもオリ
ジナルと同型になる。
こちらに該当するユニーク解が属するグループの要素数は、左右反転させたパター
ンを加えて2個しかありません。
*/
if(l->board[l->BOUND2]==1){
unsigned int ptn;
unsigned int own;
for(ptn=2,own=1;own<size;++own,ptn<<=1){
unsigned int bit;
unsigned int you;
for(bit=1,you=size-1;(l->board[you]!=ptn)&&l->board[own]>=bit;--you){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
// 90度回転して同型なら180度回転しても270度回転しても同型である
if(own>size-1){
l->COUNT2++;
return ;
}//end if
}//end if
/**
2.クイーンが右上角以外にある場合、
(2) 90度回転させてオリジナルと異なる場合は、270度回転させても必ずオリジナル
とは異なる。ただし、180度回転させた場合はオリジナルと同型になることも有り得
る。こちらに該当するユニーク解が属するグループの要素数は、180度回転させて同
型になる場合は4個(左右反転×縦横回転)
*/
//180度回転
if(l->board[size-1]==l->ENDBIT){
unsigned int you;
unsigned int own;
for(you=size-1-1,own=1;own<=size-1;++own,--you){
unsigned int bit;
unsigned int ptn;
for(bit=1,ptn=l->TOPBIT;(ptn!=l->board[you])&&(l->board[own]>=bit);ptn>>=1){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
//90度回転が同型でなくても180度回転が同型であることもある
if(own>size-1){
l->COUNT4++;
return ;
}
}//end if
/**
2.クイーンが右上角以外にある場合、
(3)180度回転させてもオリジナルと異なる場合は、8個(左右反転×縦横回転×上下反転)
*/
//270度回転
if(l->board[l->BOUND1]==l->TOPBIT){
unsigned int ptn;
unsigned int own;
unsigned int you;
unsigned int bit;
for(ptn=l->TOPBIT>>1,own=1;own<=size-1;++own,ptn>>=1){
for(bit=1,you=0;(l->board[you]!=ptn)&&(l->board[own]>=bit);++you){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
}//end if
l->COUNT8++;
}
// 非再帰 角にQがないときのバックトラック
void symmetry_backTrack_NR(unsigned int size,unsigned int row,unsigned int _left,unsigned int _down,unsigned int _right,local *l)
{
unsigned int mask=(1<<size)-1;
unsigned int down[size];
unsigned int left[size];
unsigned int right[size];
unsigned int bitmap[size];
left[row]=_left;
down[row]=_down;
right[row]=_right;
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
while(row>0){
if(bitmap[row]>0){
if(row<l->BOUND1){ //上部サイド枝刈り
bitmap[row]|=l->SIDEMASK;
bitmap[row]^=l->SIDEMASK;
}else if(row==l->BOUND2){ //下部サイド枝刈り
if((down[row]&l->SIDEMASK)==0){
row--;
}
if((down[row]&l->SIDEMASK)!=l->SIDEMASK){
bitmap[row]&=l->SIDEMASK;
}
}
unsigned int save_bitmap=bitmap[row];
unsigned int bit=-bitmap[row]&bitmap[row];
bitmap[row]^=bit;
l->board[row]=bit; //Qを配置
if((bit&mask)!=0){
if(row==(size-1)){
if( (save_bitmap&l->LASTMASK)==0){
symmetryOps(size,l); //対称解除法
}
row--;
}else{
unsigned int n=row++;
left[row]=(left[n]|bit)<<1;
down[row]=(down[n]|bit);
right[row]=(right[n]|bit)>>1;
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
}
}else{
row--;
}
}else{
row--;
}
}//end while
}
// 非再帰 角にQがあるときのバックトラック
void symmetry_backTrack_corner_NR(unsigned int size,unsigned int row,unsigned int _left,unsigned int _down, unsigned int _right,local *l)
{
unsigned int mask=(1<<size)-1;
unsigned int bit=0;
unsigned int down[size];
unsigned int left[size];
unsigned int right[size];
unsigned int bitmap[size];
left[row]=_left;
down[row]=_down;
right[row]=_right;
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
while(row>=2){
if(row<l->BOUND1){
// bitmap[row]=bitmap[row]|2;
// bitmap[row]=bitmap[row]^2;
bitmap[row]&=~2;
}
if(bitmap[row]>0){
bit=-bitmap[row]&bitmap[row];
bitmap[row]^=bit;
if(row==(size-1)){
l->COUNT8++;
row--;
}else{
unsigned int n=row++;
left[row]=(left[n]|bit)<<1;
down[row]=(down[n]|bit);
right[row]=(right[n]|bit)>>1;
l->board[row]=bit; //Qを配置
//クイーンが配置可能な位置を表す
bitmap[row]=mask&~(left[row]|down[row]|right[row]);
}
}else{
row--;
}
}//end while
}
// 非再帰 対称解除法
void symmetry_NR(unsigned int size,local* l)
{
l->TOTAL=l->UNIQUE=l->COUNT2=l->COUNT4=l->COUNT8=0;
unsigned int bit=0;
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->SIDEMASK=l->LASTMASK=0;
l->BOUND1=2;
l->BOUND2=0;
l->board[0]=1;
while(l->BOUND1>1&&l->BOUND1<size-1){
if(l->BOUND1<size-1){
bit=1<<l->BOUND1;
l->board[1]=bit; //2行目にQを配置
//角にQがあるときのバックトラック
symmetry_backTrack_corner_NR(size,2,(2|bit)<<1,1|bit,(2|bit)>>1,l);
}
l->BOUND1++;
}
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->TOPBIT>>1;
l->SIDEMASK=l->TOPBIT|1;
l->LASTMASK=l->TOPBIT|1;
l->BOUND1=1;
l->BOUND2=size-2;
while(l->BOUND1>0 && l->BOUND2<size-1 && l->BOUND1<l->BOUND2){
if(l->BOUND1<l->BOUND2){
bit=1<<l->BOUND1;
l->board[0]=bit; //Qを配置
//角にQがないときのバックトラック
symmetry_backTrack_NR(size,1,bit<<1,bit,bit>>1,l);
}
l->BOUND1++;
l->BOUND2--;
l->ENDBIT=l->ENDBIT>>1;
l->LASTMASK=l->LASTMASK<<1|l->LASTMASK|l->LASTMASK>>1;
}//ene while
UNIQUE=l->COUNT2+l->COUNT4+l->COUNT8;
TOTAL=l->COUNT2*2+l->COUNT4*4+l->COUNT8*8;
}
// 再帰 角にQがないときのバックトラック
void symmetry_backTrack(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,local* l)
{
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
if(row==(size-1)){
if(bitmap){
if( (bitmap&l->LASTMASK)==0){
l->board[row]=bitmap; //Qを配置
symmetryOps(size,l); //対称解除
}
}
}else{
if(row<l->BOUND1){
bitmap=bitmap|l->SIDEMASK;
bitmap=bitmap^l->SIDEMASK;
}else{
if(row==l->BOUND2){
if((down&l->SIDEMASK)==0){
return;
}
if( (down&l->SIDEMASK)!=l->SIDEMASK){
bitmap=bitmap&l->SIDEMASK;
}
}
}
while(bitmap){
unsigned int bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit;
symmetry_backTrack(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}//end while
}//end if
}
// 再帰 角にQがあるときのバックトラック
void symmetry_backTrack_corner(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,local* l)
{
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
unsigned int bit=0;
if(row==(size-1)){
if(bitmap){
l->board[row]=bitmap;
l->COUNT8++;
}
}else{
if(row<l->BOUND1){ //枝刈り
bitmap=bitmap|2;
bitmap=bitmap^2;
}
while(bitmap){
bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit; //Qを配置
symmetry_backTrack_corner(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}
}
}
// 再帰 対称解除法
void symmetry_R(unsigned int size,local* l)
{
l->TOTAL=l->UNIQUE=l->COUNT2=l->COUNT4=l->COUNT8=0;
unsigned int bit=0;
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->LASTMASK=l->SIDEMASK=0;
l->BOUND1=2;
l->BOUND2=0;
l->board[0]=1;
while(l->BOUND1>1 && l->BOUND1<size-1){
if(l->BOUND1<size-1){
bit=1<<l->BOUND1;
l->board[1]=bit; //2行目にQを配置
//角にQがあるときのバックトラック
symmetry_backTrack_corner(size,2,(2|bit)<<1,1|bit,(2|bit)>>1,l);
}
l->BOUND1++;
}//end while
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->TOPBIT>>1;
l->SIDEMASK=l->TOPBIT|1;
l->LASTMASK=l->TOPBIT|1;
l->BOUND1=1;
l->BOUND2=size-2;
while(l->BOUND1>0 && l->BOUND2<size-1 && l->BOUND1<l->BOUND2){
if(l->BOUND1<l->BOUND2){
bit=1<<l->BOUND1;
l->board[0]=bit; //Qを配置
//角にQがないときのバックトラック
symmetry_backTrack(size,1,bit<<1,bit,bit>>1,l);
}
l->BOUND1++;
l->BOUND2--;
l->ENDBIT=l->ENDBIT>>1;
l->LASTMASK=l->LASTMASK<<1|l->LASTMASK|l->LASTMASK>>1;
}//ene while
UNIQUE=l->COUNT2+l->COUNT4+l->COUNT8;
TOTAL=l->COUNT2*2+l->COUNT4*4+l->COUNT8*8;
}
// GPU 対称解除法
__host__ __device__
void GPU_symmetryOps(unsigned int size,local* l)
{
/**
2.クイーンが右上角以外にある場合、
(1) 90度回転させてオリジナルと同型になる場合、さらに90度回転(オリジナルか
ら180度回転)させても、さらに90度回転(オリジナルから270度回転)させてもオリ
ジナルと同型になる。
こちらに該当するユニーク解が属するグループの要素数は、左右反転させたパター
ンを加えて2個しかありません。
*/
if(l->board[l->BOUND2]==1){
unsigned int ptn;
unsigned int own;
for(ptn=2,own=1;own<size;++own,ptn<<=1){
unsigned int bit;
unsigned int you;
for(bit=1,you=size-1;(l->board[you]!=ptn)&& l->board[own]>=bit;--you){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
// 90度回転して同型なら180度回転しても270度回転しても同型である
if(own>size-1){
l->COUNT2++;
return ;
}//end if
}//end if
/**
2.クイーンが右上角以外にある場合、
(2) 90度回転させてオリジナルと異なる場合は、270度回転させても必ずオリジナル
とは異なる。ただし、180度回転させた場合はオリジナルと同型になることも有り得
る。こちらに該当するユニーク解が属するグループの要素数は、180度回転させて同
型になる場合は4個(左右反転×縦横回転)
*/
//180度回転
if(l->board[size-1]==l->ENDBIT){
unsigned int you;
unsigned int own;
for(you=size-1-1,own=1;own<=size-1;++own,--you){
unsigned int bit;
unsigned int ptn;
for(bit=1,ptn=l->TOPBIT;(ptn!=l->board[you])&&(l->board[own]>=bit);ptn>>=1){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
//90度回転が同型でなくても180度回転が同型であることもある
if(own>size-1){
l->COUNT4++;
return ;
}
}//end if
/**
2.クイーンが右上角以外にある場合、
(3)180度回転させてもオリジナルと異なる場合は、8個(左右反転×縦横回転×上下反転)
*/
//270度回転
if(l->board[l->BOUND1]==l->TOPBIT){
unsigned int ptn;
unsigned int own;
unsigned int you;
unsigned int bit;
for(ptn=l->TOPBIT>>1,own=1;own<=size-1;++own,ptn>>=1){
for(bit=1,you=0;(l->board[you]!=ptn)&&(l->board[own]>=bit);++you){
bit<<=1;
}
if(l->board[own]>bit){
return ;
}
if(l->board[own]<bit){
break;
}
}//end for
}//end if
l->COUNT8++;
}
// GPU 角にQがないときのバックトラック
__host__ __device__
void GPU_symmetry_backTrack(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,local* l)
{
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
if(row==(size-1)){
if(bitmap){
if( (bitmap& l->LASTMASK)==0){
l->board[row]=bitmap; //Qを配置
GPU_symmetryOps(size,l); //対称解除
}
}
}else{
if(row<l->BOUND1){
bitmap=bitmap|l->SIDEMASK;
bitmap=bitmap^l->SIDEMASK;
}else{
if(row==l->BOUND2){
if((down&l->SIDEMASK)==0){
return;
}
if( (down&l->SIDEMASK)!=l->SIDEMASK){
bitmap=bitmap&l->SIDEMASK;
}
}
}
while(bitmap){
unsigned int bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit;
GPU_symmetry_backTrack(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}//end while
}//end if
}
// GPU 角にQがあるときのバックトラック
__host__ __device__
void GPU_symmetry_backTrack_corner(unsigned int size,unsigned int row,unsigned int left,unsigned int down,unsigned int right,local* l)
{
unsigned int mask=(1<<size)-1;
unsigned int bitmap=mask&~(left|down|right);
unsigned int bit=0;
if(row==(size-1)){
if(bitmap){
l->board[row]=bitmap;
l->COUNT8++;
}
}else{
if(row<l->BOUND1){ //枝刈り
bitmap=bitmap|2;
bitmap=bitmap^2;
}
while(bitmap){
bit=-bitmap&bitmap;
bitmap=bitmap^bit;
l->board[row]=bit; //Qを配置
GPU_symmetry_backTrack_corner(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1,l);
}
}
}
// GPU クイーンの効きを判定して解を返す
__host__ __device__
long symmetry_solve_nodeLayer(unsigned int size,unsigned long left,unsigned long down,unsigned long right)
{
long mask=(1<<size)-1;
long counter = 0;
if (down==mask) { // downがすべて専有され解が見つかる
return 1;
}
long bit=0;
for(long bitmap=mask&~(left|down|right);bitmap;bitmap^=bit){
bit=-bitmap&bitmap;
counter += symmetry_solve_nodeLayer(size,(left|bit)>>1,(down|bit),(right|bit)<< 1);
}
return counter;
}
// ノードレイヤー i 番目のメンバを i 番目の部分木の解で埋める
__global__
void dim_nodeLayer(unsigned int size,long* nodes,long* solutions,unsigned int numElements)
{
int i=blockDim.x * blockIdx.x + threadIdx.x;
if(i<numElements){
solutions[i]=symmetry_solve_nodeLayer(size,nodes[3 * i],nodes[3 * i + 1],nodes[3 * i + 2]);
}
}
// ノードレイヤー 0以外のbitをカウント
int countBits_nodeLayer(long n)
{
int counter = 0;
while (n){
n &= (n - 1); // 右端のゼロ以外の数字を削除
counter++;
}
return counter;
}
// ノードレイヤー ノードをk番目のレイヤーのノードで埋める
long kLayer_nodeLayer(int size,std::vector<long>& nodes, int k, long left, long down, long right,local* l)
{
unsigned int bit=0;
l->TOTAL=0;
l->UNIQUE=0;
l->COUNT2=0;
l->COUNT4=0;
l->COUNT8=0;
l->TOPBIT=1<<(size-1);
l->ENDBIT=0;
l->LASTMASK=0;
l->SIDEMASK=0;
l->BOUND1=2;
l->BOUND2=0;
l->board[0]=1;
while(l->BOUND1>1 && l->BOUND1<size-1){
if(l->BOUND1<size-1){
bit=1<<l->BOUND1;
l->board[1]=bit; //2行目にQを配置
//角にQがあるときのバックトラック
GPU_symmetry_backTrack_corner(size,2,(2|bit)<<1,1|bit,(2|bit)>>1,l);
}
l->BOUND1++;
}//end while
l->TOPBIT=1<<(size-1);
l->ENDBIT=l->TOPBIT>>1;
l->SIDEMASK=l->TOPBIT|1;
l->LASTMASK=l->TOPBIT|1;
l->BOUND1=1;
l->BOUND2=size-2;
while(l->BOUND1>0 && l->BOUND2<size-1 && l->BOUND1<l->BOUND2){
if(l->BOUND1<l->BOUND2){
bit=1<<l->BOUND1;
l->board[0]=bit; //Qを配置
//角にQがないときのバックトラック
GPU_symmetry_backTrack(size,1,bit<<1,bit,bit>>1,l);
}
l->BOUND1++;
l->BOUND2--;
l->ENDBIT=l->ENDBIT>>1;
l->LASTMASK=l->LASTMASK<<1|l->LASTMASK|l->LASTMASK>>1;
}//ene while
UNIQUE=l->COUNT2+l->COUNT4+l->COUNT8;
TOTAL=l->COUNT2*2+l->COUNT4*4+l->COUNT8*8;
/**
long counter=0;
long mask=(1<<size)-1;
// すべてのdownが埋まったら、解決策を見つけたことになる。
if (countBits_nodeLayer(down) == k) {
nodes.push_back(left);
nodes.push_back(down);
nodes.push_back(right);
return 1;
}
long bit=0;
for(long bitmap=mask&~(left|down|right);bitmap;bitmap^=bit){
bit=-bitmap&bitmap;
// 解を加えて対角線をずらす
counter+=kLayer_nodeLayer(size,nodes,k,(left|bit)>>1,(down|bit),(right|bit)<<1);
}
return counter;
*/
}
// ノードレイヤー k 番目のレイヤのすべてのノードを含むベクトルを返す。
std::vector<long> kLayer_nodeLayer(int size,int k,local* l)
{
std::vector<long> nodes{};
kLayer_nodeLayer(size,nodes, k, 0, 0, 0,l);
return nodes;
}
// ノードレイヤーの作成
void symmetry_build_nodeLayer(int size)
{
// ツリーの3番目のレイヤーにあるノード
//(それぞれ連続する3つの数字でエンコードされる)のベクトル。
// レイヤー2以降はノードの数が均等なので、対称性を利用できる。
// レイヤ4には十分なノードがある(N16の場合、9844)。
local l[MAX]; // ローカル構造体
std::vector<long> nodes = kLayer_nodeLayer(size,4,&l[0]);
// デバイスにはクラスがないので、
// 最初の要素を指定してからデバイスにコピーする。
size_t nodeSize = nodes.size() * sizeof(long);
long* hostNodes = (long*)malloc(nodeSize);
hostNodes = &nodes[0];
long* deviceNodes = NULL;
cudaMalloc((void**)&deviceNodes, nodeSize);
cudaMemcpy(deviceNodes, hostNodes, nodeSize, cudaMemcpyHostToDevice);
// デバイス出力の割り当て
long* deviceSolutions = NULL;
// 必要なのはノードの半分だけで、各ノードは3つの整数で符号化される。
int numSolutions = nodes.size() / 6;
size_t solutionSize = numSolutions * sizeof(long);
cudaMalloc((void**)&deviceSolutions, solutionSize);
// CUDAカーネルを起動する。
int threadsPerBlock = 256;
int blocksPerGrid = (numSolutions + threadsPerBlock - 1) / threadsPerBlock;
dim_nodeLayer <<<blocksPerGrid, threadsPerBlock >>> (size,deviceNodes, deviceSolutions, numSolutions);
// 結果をホストにコピー
long* hostSolutions = (long*)malloc(solutionSize);
cudaMemcpy(hostSolutions, deviceSolutions, solutionSize, cudaMemcpyDeviceToHost);
// 部分解を加算し、結果を表示する。
long solutions = 0;
for(long i=0;i<numSolutions;i++){
solutions += 2*hostSolutions[i]; // Symmetry
}
// 出力
TOTAL=solutions;
}
// CUDA 初期化
bool InitCUDA()
{
int count;
cudaGetDeviceCount(&count);
if(count==0){fprintf(stderr,"There is no device.\n");return false;}
int i;
for(i=0;i<count;i++){
struct cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop,i)==cudaSuccess){if(prop.major>=1){break;} }
}
if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;}
cudaSetDevice(i);
return true;
}
//メイン
int main(int argc,char** argv)
{
bool cpu=false,cpur=false,gpu=false,gpuNodeLayer=false;
int argstart=2;
if(argc>=2&&argv[1][0]=='-'){
if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;}
else if(argv[1][1]=='r'||argv[1][1]=='R'){cpur=true;}
else if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;}
else if(argv[1][1]=='g'||argv[1][1]=='G'){gpu=true;}
else if(argv[1][1]=='n'||argv[1][1]=='N'){gpuNodeLayer=true;}
else{ gpuNodeLayer=true; } //デフォルトをgpuとする
argstart=2;
}
if(argc<argstart){
printf("Usage: %s [-c|-g|-r|-s] n steps\n",argv[0]);
printf(" -r: CPU 再帰\n");
printf(" -c: CPU 非再帰\n");
printf(" -g: GPU 再帰\n");
printf(" -n: GPU ノードレイヤー\n");
}
if(cpur){ printf("\n\n対称解除法 再帰 \n"); }
else if(cpu){ printf("\n\n対称解除法 非再帰 \n"); }
else if(gpu){ printf("\n\n対称解除法 GPU\n"); }
else if(gpuNodeLayer){ printf("\n\n対称解除法 GPUノードレイヤー \n"); }
if(cpu||cpur){
int min=4;
int targetN=17;
struct timeval t0;
struct timeval t1;
printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms");
for(int size=min;size<=targetN;size++){
local l;
gettimeofday(&t0, NULL);//計測開始
if(cpur){ //再帰
symmetry_R(size,&l);
}
if(cpu){ //非再帰
symmetry_NR(size,&l);
}
//
gettimeofday(&t1, NULL);//計測終了
int ss;int ms;int dd;
if(t1.tv_usec<t0.tv_usec) {
dd=(t1.tv_sec-t0.tv_sec-1)/86400;
ss=(t1.tv_sec-t0.tv_sec-1)%86400;
ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000;
}else {
dd=(t1.tv_sec-t0.tv_sec)/86400;
ss=(t1.tv_sec-t0.tv_sec)%86400;
ms=(t1.tv_usec-t0.tv_usec+500)/10000;
}//end if
int hh=ss/3600;
int mm=(ss-hh*3600)/60;
ss%=60;
printf("%2d:%16ld%17ld%12.2d:%02d:%02d:%02d.%02d\n",
size,TOTAL,UNIQUE,dd,hh,mm,ss,ms);
} //end for
}//end if
if(gpu||gpuNodeLayer){
if(!InitCUDA()){return 0;}
/* int steps=24576; */
int min=4;
int targetN=21;
struct timeval t0;
struct timeval t1;
printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms");
for(int size=min;size<=targetN;size++){
gettimeofday(&t0,NULL); // 計測開始
if(gpu){
TOTAL=UNIQUE=0;
TOTAL=symmetry_solve_nodeLayer(size,0,0,0); //対称解除法
}else if(gpuNodeLayer){
TOTAL=UNIQUE=0;
symmetry_build_nodeLayer(size); // 対称解除法
}
gettimeofday(&t1,NULL); // 計測終了
int ss;int ms;int dd;
if (t1.tv_usec<t0.tv_usec) {
dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400;
ss=(t1.tv_sec-t0.tv_sec-1)%86400;
ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000;
} else {
dd=(int)(t1.tv_sec-t0.tv_sec)/86400;
ss=(t1.tv_sec-t0.tv_sec)%86400;
ms=(t1.tv_usec-t0.tv_usec+500)/10000;
}//end if
int hh=ss/3600;
int mm=(ss-hh*3600)/60;
ss%=60;
printf("%2d:%13ld%16ld%4.2d:%02d:%02d:%02d.%02d\n",
size,TOTAL,UNIQUE,dd,hh,mm,ss,ms);
}//end for
}//end if
return 0;
}
|
3,361 | #include "includes.h"
__global__ void rearrangePopulation(float *gene, float *fit, int* metaData)
{
const int idx = threadIdx.x + blockDim.x*blockIdx.x;
int nGene = metaData[1];
int nHalf = nGene / 2;
if(idx> nHalf) return;
int j = nGene - 1 - idx;
if (fit[idx] < fit[j]) {
for(int k=0; k<6; k++) {
float t = gene[idx*6+k];
gene[idx*6+k] = gene[j*6+k];
gene[j*6+k] = t;
t = fit[idx];
fit[idx] = fit[j];
fit[j] = t;
}
}
} |
3,362 | #include "includes.h"
__global__ void GaussianSamplePrior(float* input, int inputCount, float* mins, float* maxes, float* randomUniform)
{
int i = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (i < inputCount)
{
float diff = maxes[i] - mins[i];
input[i] = randomUniform[i] * diff + mins[i];
}
} |
3,363 | /*
* ExTopUpdater.cpp
*
* Created on: 01 февр. 2016 г.
* Author: aleksandr
*/
#include "ExTopUpdater.h"
#include "SmartIndex.h"
/*
* indx должен пренадлежать участку от [0, sizeX-1)
*/
__device__
void ExTopUpdater::operator() (const int indx) {
int m = indx;
Ex(m, sizeY - 1) = coeff[0]*(Ex(m, sizeY - 3) + ExTop(0, 1, m)) +
coeff[1] * (ExTop(0, 0, m) + ExTop(2, 0, m) - Ex(m, sizeY - 2) - ExTop(1, 1, m)) +
coeff[2] * ExTop(1, 0, m) - ExTop(2, 1, m);
for (int n = 0; n < 3; n++) {
ExTop(n, 1, m) = ExTop(n, 0, m);
ExTop(n, 0, m) = Ex(m, sizeY - 1 - n);
}
}
|
3,364 | #include "block.cuh"
Block::Block() {
}
Block::Block(AABB3 aabb, Vec3 color) {
this->aabb = aabb;
this->color = color;
}
AABB3* Block::get_bounding_box() {
return &this->aabb;
}
|
3,365 | #include <thrust/device_vector.h>
#include <thrust/gather.h>
#include <thrust/sequence.h>
#include <stdio.h>
using namespace thrust::placeholders;
/*************************************/
/* CONVERT LINEAR INDEX TO ROW INDEX */
/*************************************/
template <typename T>
struct linear_index_to_row_index : public thrust::unary_function<T,T> {
T Ncols; // --- Number of columns
__host__ __device__ linear_index_to_row_index(T Ncols) : Ncols(Ncols) {}
__host__ __device__ T operator()(T i) { return i / Ncols; }
};
/*******************/
/* EXPAND OPERATOR */
/*******************/
template <typename InputIterator1, typename InputIterator2, typename OutputIterator>
OutputIterator expand(InputIterator1 first1,
InputIterator1 last1,
InputIterator2 first2,
OutputIterator output)
{
typedef typename thrust::iterator_difference<InputIterator1>::type difference_type;
difference_type input_size = thrust::distance(first1, last1);
difference_type output_size = thrust::reduce(first1, last1);
// scan the counts to obtain output offsets for each input element
thrust::device_vector<difference_type> output_offsets(input_size, 0);
thrust::exclusive_scan(first1, last1, output_offsets.begin());
// scatter the nonzero counts into their corresponding output positions
thrust::device_vector<difference_type> output_indices(output_size, 0);
thrust::scatter_if(thrust::counting_iterator<difference_type>(0), thrust::counting_iterator<difference_type>(input_size),
output_offsets.begin(), first1, output_indices.begin());
// compute max-scan over the output indices, filling in the holes
thrust::inclusive_scan(output_indices.begin(), output_indices.end(), output_indices.begin(), thrust::maximum<difference_type>());
// gather input values according to index array (output = first2[output_indices])
OutputIterator output_end = output; thrust::advance(output_end, output_size);
thrust::gather(output_indices.begin(), output_indices.end(), first2, output);
// return output + output_size
thrust::advance(output, output_size);
return output;
}
/**************************/
/* STRIDED RANGE OPERATOR */
/**************************/
template <typename Iterator>
class strided_range
{
public:
typedef typename thrust::iterator_difference<Iterator>::type difference_type;
struct stride_functor : public thrust::unary_function<difference_type,difference_type>
{
difference_type stride;
stride_functor(difference_type stride)
: stride(stride) {}
__host__ __device__
difference_type operator()(const difference_type& i) const
{
return stride * i;
}
};
typedef typename thrust::counting_iterator<difference_type> CountingIterator;
typedef typename thrust::transform_iterator<stride_functor, CountingIterator> TransformIterator;
typedef typename thrust::permutation_iterator<Iterator,TransformIterator> PermutationIterator;
// type of the strided_range iterator
typedef PermutationIterator iterator;
// construct strided_range for the range [first,last)
strided_range(Iterator first, Iterator last, difference_type stride)
: first(first), last(last), stride(stride) {}
iterator begin(void) const
{
return PermutationIterator(first, TransformIterator(CountingIterator(0), stride_functor(stride)));
}
iterator end(void) const
{
return begin() + ((last - first) + (stride - 1)) / stride;
}
protected:
Iterator first;
Iterator last;
difference_type stride;
};
/********/
/* MAIN */
/********/
int main(){
/**************************/
/* SETTING UP THE PROBLEM */
/**************************/
const int Nrows = 10; // --- Number of objects
const int Ncols = 3; // --- Number of centroids
thrust::device_vector<int> d_sequence(Nrows * Ncols);
thrust::device_vector<int> d_counts(Ncols, Nrows);
thrust::sequence(d_sequence.begin(), d_sequence.begin() + Ncols);
expand(d_counts.begin(), d_counts.end(), d_sequence.begin(),
thrust::make_permutation_iterator(
d_sequence.begin(),
thrust::make_transform_iterator(thrust::make_counting_iterator(0),(_1 % Nrows) * Ncols + _1 / Nrows)));
printf("\n\nCentroid indices\n");
for(int i = 0; i < Nrows; i++) {
std::cout << " [ ";
for(int j = 0; j < Ncols; j++)
std::cout << d_sequence[i * Ncols + j] << " ";
std::cout << "]\n";
}
return 0;
}
|
3,366 | // GPU kernel for convoluting sine and cosine multiplication data with filter coefficients with hamming window ....
__global__ void conv(float *dev_op_sine, float *dev_op_cosine, float *dev_op_sine_conv, float *dev_op_cosine_conv, float *dev_lpf_hamming, int b, int windowLength){
int i,k,l;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
float temp1, temp2;
for(i = idx; i < b; i+=stride){
temp1 = 0;
temp2 = 0;
for(k = 0; k < windowLength; k++){
l = windowLength - k;
temp1 += dev_op_sine[i+l] * dev_lpf_hamming[k];
temp2 += dev_op_cosine[i+l] * dev_lpf_hamming[k];
}
dev_op_sine_conv[i] = temp1;
dev_op_cosine_conv[i] = temp2;
}
}
|
3,367 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include <iostream>
#define MASK_LEN 8
/*as mask is never changing we can define a constant memory on the device side so that
we do not have to copu again and again and loading from const cache is much much faster that
loading from d-ram.
*/
__constant__ int mask[MASK_LEN];
__global__ void conv_1d(int* a, int* c, int n) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
//cal the radius of the mask(mid point)
int r = MASK_LEN / 2;
//cal the start point of for the element
int start = id - r;
int temp = 0;
for (int j = 0; j < MASK_LEN; j++)
{
if ((start + j >= 0) && (start + j < n))
{
temp += a[start + j] * mask[j];
}
}
c[id] = temp;
}
// Initialize
void Array_init(int* a, int n, int div) {
for (int i = 0; i < n; i++) {
a[i] = rand() % div;
}
}
void check_answer(int* a, int* b, int* c, int n, int m) {
int radius = m / 2;
int temp;
int start;
for (int i = 0; i < n; i++)
{
start = i - radius;
temp = 0;
for (int j = 0; j < m; j++)
{
if ((start + j >= 0) && (start + j < n))
{
temp += a[start + j] * b[j];
}
}
assert(temp == c[i]);
}
}
int main() {
// number of elements in result array
int n = 1 << 16;
int n_bytes = n * sizeof(int);
//num of elemets in mask
int m = 8;
int m_bytes = m * sizeof(int);
//allocate the array
int* h_arr = new int[n];
Array_init(h_arr, n, 100);
//allocate the mask and intialize it
int* h_mask = new int[m];
Array_init(h_mask, m, 10);
//allocate space for result
int* h_result = new int[n];
//allocate space on device memory
int* d_arr, * d_res;
cudaMalloc(&d_arr, n_bytes);
cudaMalloc(&d_res, n_bytes);
cudaMemcpy(d_arr, h_arr, n_bytes, cudaMemcpyHostToDevice);
//special function to copy to a symbol
cudaMemcpyToSymbol(mask, h_mask, m_bytes);
int threads = 256;
int grid = (n + threads - 1) / threads;
conv_1d <<<grid, threads>>> (d_arr, d_res, n);
cudaMemcpy(h_result, d_res, n_bytes, cudaMemcpyDeviceToHost);
check_answer(h_arr, h_mask, h_result, n, m);
free(h_result);
free(h_mask);
free(h_arr);
cudaFree(d_arr);
cudaFree(d_res);
printf("COMPLETED SUCCESFULLY\n");
return 0;
} |
3,368 | #include <stdio.h>
#include <sys/time.h>
#include <stdlib.h>
#define N (1<<22)
#define BLOCK_SIZE 128
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define HANDLE_NULL( a ) {if (a == NULL) { \
printf( "Host memory failed in %s at line %d\n", \
__FILE__, __LINE__ ); \
exit( EXIT_FAILURE );}}
__global__ void reduxKernel(int* v, int n, int* res){
int tx = threadIdx.x;
int idx = blockIdx.x * (BLOCK_SIZE*2) + tx;
__shared__ int vs[BLOCK_SIZE*2];
// all threads execute the following, as the size of the shared memory is
// 2x the number of threads
int reg = idx < n ? v[idx] : 0;
if( idx + BLOCK_SIZE < n ){
reg += v[idx + BLOCK_SIZE];
}
//vs[tx] = v[idx] + v[idx + BLOCK_SIZE];
vs[tx] = reg;
__syncthreads();
if( BLOCK_SIZE >= 512 ){
if( tx < 256 ){ vs[tx] = reg = reg + vs[tx + 256]; __syncthreads(); }
}
if( BLOCK_SIZE >= 256 ){
if( tx < 128 ){ vs[tx] = reg = reg + vs[tx + 128]; __syncthreads(); }
}
if( BLOCK_SIZE >= 128 ){
if( tx < 64 ){ vs[tx] = reg = reg + vs[tx + 64]; __syncthreads(); }
}
// for( int bs = BLOCK_SIZE/2 ; bs > 32; bs >>= 1 ){
// if( tx < bs ){
// //accumulating on the register var "reg" saves a read
// //from shared memory. Instead of doing vs[tx] += vs[tx + bs],
// //we just accum on the reg and write it back to shared memory
// vs[tx] = reg = reg + vs[tx + bs];
// }
// __syncthreads();
// }
if( tx < 32 ){ //for the last wrap. Threads within a wrap don't need synchronize
volatile int* synchdShared = vs; //so that the compiler doesn't reorder
// in case the block size were less than 32 to begin with
if( BLOCK_SIZE >= 64 ){
// means we can "reach out" fully 32 positions to the right of the thread
// look at the general loop to work out why
synchdShared[tx] = reg = reg + synchdShared[tx + 32];
}
//and of course we are gonna fall-through
if( BLOCK_SIZE >= 32 ){
synchdShared[tx] = reg = reg + synchdShared[tx + 16];
}
if( BLOCK_SIZE >= 16 ){
synchdShared[tx] = reg = reg + synchdShared[tx + 8];
}
if( BLOCK_SIZE >= 8 ){
synchdShared[tx] = reg = reg + synchdShared[tx + 4];
}
if( BLOCK_SIZE >= 4 ){
synchdShared[tx] = reg = reg + synchdShared[tx + 2];
}
if( BLOCK_SIZE >= 2 ){
synchdShared[tx] = reg = reg + synchdShared[tx + 1];
}
}
//interesting: without this if, the kernel execution takes 5x the time! Probably
//due the memory conflicts created by _ALL_ the threads trying to write to the same
//memory location "at once"
if(tx == 0){
res[blockIdx.x] = vs[0];
}
}
int hostRedux(int* v, int n){
int res = 0;
for(int i=0; i < n; i++){
res += v[i];
}
return res;
}
void deviceRedux(int* v, int n, int* res){
// reserve memory on device
int *vd;
int *resd;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float time;
const int numBlocks = (N/BLOCK_SIZE)/2;
HANDLE_ERROR( cudaMalloc((void**)&vd, n*sizeof(int)) );
HANDLE_ERROR( cudaMalloc((void**)&resd, numBlocks*sizeof(int)) );
cudaEventRecord( start, 0 );
// transfer v to vd
HANDLE_ERROR( cudaMemcpy(vd, v, n*sizeof(int), cudaMemcpyHostToDevice) );
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
printf("memcpy in time: %f ms\n", time);
cudaEventRecord( start, 0 );
// invoke kernel
reduxKernel<<<numBlocks,BLOCK_SIZE>>>(vd,n,resd);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
printf("kernel time: %f ms. ", time);
printf("Bandwidth: %f GB/s\n", (N*sizeof(int)/1e6)/time);
cudaEventRecord( start, 0 );
// copy results back to host
HANDLE_ERROR( cudaMemcpy(res, resd, numBlocks*sizeof(int), cudaMemcpyDeviceToHost) );
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
printf("memcpy out time: %f ms\n", time);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//free vd
cudaFree(vd);
cudaFree(resd);
}
/* Subtract the `struct timeval' values X and Y,
storing the result in RESULT.
Return 1 if the difference is negative, otherwise 0. */
int
timeval_subtract (struct timeval* result, struct timeval*x, struct timeval*y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(){
int* v = (int*)malloc( sizeof(int) * N);
for(int i=0; i < N; i++){
v[i] = rand() % 10;
//v[i] = i;
}
int h;
struct timeval tv_start;
struct timeval tv_stop;
struct timeval tv_diff;
gettimeofday(&tv_start, 0);
h=hostRedux(v,N);
gettimeofday(&tv_stop, 0);
timeval_subtract(&tv_diff, &tv_stop, &tv_start);
printf("host: %d\n", h);
float msdiff = tv_diff.tv_sec*1000 + tv_diff.tv_usec/1000.0;
printf("CPU time: %.3f ms\n", msdiff);
const int numBlocks = (N/BLOCK_SIZE)/2;
int *res = (int*)malloc( sizeof(int) * numBlocks);
int d;
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
deviceRedux(v,N, res);
d=0;
for(int i=0; i < numBlocks; i++){
// printf("%d\n", res[i]);
d+=res[i];
}
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("CUDA time: %f ms\n", time);
printf("device: %d\n", d);
free(v);
free(res);
return !(h==d);
}
|
3,369 | #include <iostream>
#include <fstream>
using namespace std;
#define BLOCK_SIZE 128
__global__ void simpleKernel(
float* output )
{
output[threadIdx.x] = 0;
}
int main(int argc, char *argv[])
{
unsigned N = BLOCK_SIZE;
unsigned size = N*sizeof(float);
float* g_data;
cudaError mallocd = cudaMalloc( &g_data, size );
dim3 block( BLOCK_SIZE );
dim3 grid( 1 );
simpleKernel<<< grid, block>>>(g_data);
cudaError freed = cudaFree( g_data );
cudaError sync = cudaThreadSynchronize();
bool all_success = (mallocd == cudaSuccess)
&& (freed == cudaSuccess)
&& (sync == cudaSuccess);
cout << "mallocd = " << (mallocd == cudaSuccess) << endl
<< "freed = " << (freed == cudaSuccess) << endl
<< "sync = " << (sync == cudaSuccess) << endl;
bool any_failed = !all_success;
exit(any_failed);
}
|
3,370 | #include <iostream>
#include <iomanip>
#include <thrust/extrema.h>
#include <thrust/device_vector.h>
using namespace std;
struct comparator {
__host__ __device__ bool operator()(double a, double b)
{
return fabs(a) < fabs(b);
}
};
#define CSC(call) do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "CUDA Error in %s:%d: %s\n", __FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while (0)
__global__ void swapStr(double* da, double* res, int n, int from, int to) {
int offset = gridDim.x * blockDim.x;
double tmp;
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += offset) {
tmp = da[idx * n + from];
da[idx * n + from] = da[idx * n + to];
da[idx * n + to] = tmp;
}
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += offset) {
tmp = res[idx * n + from];
res[idx * n + from] = res[idx * n + to];
res[idx * n + to] = tmp;
}
}
__global__ void zeroColumn(double* da, double* res, int n, int cur) {
int offsetx = blockDim.x * gridDim.x;
int offsety = blockDim.y * gridDim.y;
int x, y;
int idxx = blockDim.x * blockIdx.x + threadIdx.x;
int idxy = blockDim.y * blockIdx.y + threadIdx.y;
for(y = idxy; y < n; y += offsety) {
for(x = idxx + cur + 1; x < n; x += offsetx) {
// if (fabs(da[cur * n + x]) > 1e-7) {
res[y * n + x] = res[y * n + x] - da[cur * n + x] * res[y * n + cur] / da[cur * n + cur];
// }
}
}
for(y = idxy + cur + 1; y < n; y += offsety) {
for(x = idxx + cur + 1; x < n; x += offsetx) {
da[y * n + x] = da[y * n + x] - da[cur * n + x] * da[y * n + cur] / da[cur * n + cur];
}
}
}
__global__ void zeroColumnUp(double* da, double* res, int n, int cur) {
int offsetx = blockDim.x * gridDim.x;
int offsety = blockDim.y * gridDim.y;
int x, y;
int idxx = blockDim.x * blockIdx.x + threadIdx.x;
int idxy = blockDim.y * blockIdx.y + threadIdx.y;
for(y = idxy; y < n; y += offsety) {
for(x = idxx; x < cur; x += offsetx) {
// if (fabs(da[cur * n + x]) > 1e-7)
res[y * n + x] = res[y * n + x] - res[y * n + cur] * da[cur * n + x] / da[cur * n + cur];
}
}
}
__global__ void divide(double* da, double* res, int n) {
int offsetx = blockDim.x * gridDim.x;
int offsety = blockDim.y * gridDim.y;
int x, y;
int idxx = blockDim.x * blockIdx.x + threadIdx.x;
int idxy = blockDim.y * blockIdx.y + threadIdx.y;
for(y = idxy; y < n; y += offsety) {
for(x = idxx; x < n; x += offsetx) {
res[y * n + x] = res[y * n + x] / da[x * n + x];
}
}
}
int main() {
ios_base::sync_with_stdio(false);
comparator comp = comparator();
int n;
cin >> n;
double* data = (double*)malloc(sizeof(double*) * n * n);
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
data[j * n + i] = (i == j);
}
}
double* dev_res;
cudaMalloc(&dev_res, sizeof(double) * n * n);
cudaMemcpy(dev_res, data, sizeof(double) * n * n, cudaMemcpyHostToDevice);
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
cin >> data[j * n + i];
}
}
double* dev_arr;
cudaMalloc(&dev_arr, sizeof(double) * n * n);
cudaMemcpy(dev_arr, data, sizeof(double) * n * n, cudaMemcpyHostToDevice);
thrust::device_ptr<double> p_arr = thrust::device_pointer_cast(dev_arr);
thrust::device_ptr<double> res;
for (int i = 0; i < n - 1; ++i) {
res = thrust::max_element(p_arr + i * n + i, p_arr + i * n + n, comp);
if ((int)(res - p_arr) % n > i) swapStr<<<256, 256>>>(dev_arr, dev_res, n, i, (int)(res - p_arr) % n);
zeroColumn<<<dim3(16, 16), dim3(16, 16)>>>(dev_arr, dev_res, n, i);
}
for (int i = n - 1; i > 0; --i) {
zeroColumnUp<<<dim3(16, 16), dim3(16, 16)>>>(dev_arr, dev_res, n, i);
}
divide<<<dim3(16, 16), dim3(16, 16)>>>(dev_arr, dev_res, n);
cudaMemcpy(data, dev_res, sizeof(double) * n * n, cudaMemcpyDeviceToHost);
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
cout << fixed << setprecision(10) << data[j * n + i] << " ";
}
cout << endl;
}
cudaFree(dev_arr);
cudaFree(dev_res);
free(data);
return 0;
} |
3,371 | // codigo incrementa e depois decrementa valores de um vetor.
//
// este codigo exemplifica o uso de __syncthreads() e
// o uso de memoria compartilhada criada estaticamente
// e dinamicamente.
//
// a primeira grade incrementa as posicoes de um vetor
// N vezes por thread. Usa memoria compartilhada criada estaticamente
//
// a segunda grade decrementa da mesma forma.
// o vetor comeca com vlrs 0 (zero) e termina com 0 (zero).
// Usa memória compartilhada criada dinâmicamente.
//
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
// no maximo 1024, pois determina o nr de threads no bloco
#define N 1024
__global__ void staticShdMem(int *vetD_glb)
{
// criacao estatica na memoria compartilhada
__shared__ int vetD_shd[N];
int t = threadIdx.x;
int i, aux;
// transfere dado da memoria global para a mem shd
// as N threads fazem isso
vetD_shd[t] = vetD_glb[t];
// cada thread incrementa N vezes cada posicao t
for (i = 0; i < N; i++)
{
__syncthreads(); // sync necessaria
aux = vetD_shd[N-t-1];
aux++;
__syncthreads(); // apenas esta sync produz resultados errados
vetD_shd[t] = aux;
}
// Retorna resultado para memoria global.
// Executado por todas as N threads.
vetD_glb[t] = vetD_shd[t];
}
__global__ void dynamicShdMem(int *vetD_glb)
{
// criacao dinamica na memoria compartilhada
extern __shared__ int vetD_shd[];
int t = threadIdx.x;
int i, aux;
// transfere dado da memoria global para a mem shd
// as N threads fazem isso
vetD_shd[t] = vetD_glb[t];
// cada thread decrementa N vezes cada posicao t
for (i = 0; i < N; i++)
{
__syncthreads(); // sync necessaria
aux = vetD_shd[N-t-1];
aux--;
__syncthreads(); // apenas esta sync produz resultados errados
vetD_shd[t] = aux;
}
// Retorna resultado para memoria global.
// Executado por todas as N threads.
vetD_glb[t] = vetD_shd[t];
}
int main(void)
{
int vetA_h[N], vetD_h[N];
int *vetD_d;
int i;
for (i = 0; i < N; i++) {
vetA_h[i] = 0;
}
cudaMalloc(&vetD_d, N * sizeof(int));
cudaMemcpy(vetD_d, vetA_h, N*sizeof(int), cudaMemcpyHostToDevice);
// algoritmo so funciona se as threads estiverem no mesmo bloco
// devido a limitacao do __syncthreads()
// invoca a versao com memoria compartilhada criada estaticamente
staticShdMem<<<1,N>>>(vetD_d);
cudaMemcpy(vetD_h, vetD_d, N*sizeof(int), cudaMemcpyDeviceToHost);
printf("Static Results(%d): ", N);
for (i = 0; i < N; i++)
printf("%d ", vetD_h[i]);
printf("\n\n");
// nao eh necessario copiar o vetor d do host para o device.
// a grade anterior fez essa copia e o conteudo dele ainda esta no device
// cudaMemcpy(vetD_d, vetD_h, N*sizeof(int), cudaMemcpyHostToDevice);
// invoca a versao com memoria compartilhada criada dinamicamente
dynamicShdMem<<<1,N,(N*sizeof(int))>>>(vetD_d);
cudaMemcpy(vetD_h, vetD_d, N * sizeof(int), cudaMemcpyDeviceToHost);
printf("Dynamic Results(%d): ", N);
for (i = 0; i < N; i++)
printf("%d ", vetD_h[i]);
printf("\n\n");
//Desaloca vetor no device
cudaFree(vetD_d);
exit(0);
}
|
3,372 | #include "ZonePlanMCMC.cuh"
#include <vector>
#include <iostream>
__device__
__host__
unsigned int rand(unsigned int* randx) {
*randx = *randx * 1103515245 + 12345;
return (*randx)&2147483647;
}
__device__
__host__
float randf(unsigned int* randx) {
return rand(randx) / (float(2147483647) + 1);
}
__device__
__host__
float randf(unsigned int* randx, float a, float b) {
return randf(randx) * (b - a) + a;
}
__device__
__host__
float sampleFromCdf(unsigned int* randx, float* cdf, int num) {
float rnd = randf(randx, 0, cdf[num-1]);
for (int i = 0; i < num; ++i) {
if (rnd <= cdf[i]) return i;
}
return num - 1;
}
__device__
__host__
float sampleFromPdf(unsigned int* randx, float* pdf, int num) {
if (num == 0) return 0;
float cdf[40];
cdf[0] = pdf[0];
for (int i = 1; i < num; ++i) {
if (cdf[i] >= 0) {
cdf[i] = cdf[i - 1] + pdf[i];
} else {
cdf[i] = cdf[i - 1];
}
}
return sampleFromCdf(randx, cdf, num);
}
__device__
__host__
float sum(float* values, int num) {
float total = 0.0f;
for (int i = 0; i < num; ++i) {
total += values[i];
}
return total;
}
__device__
__host__
void swapZoneType(zone_type* z1, zone_type* z2) {
int temp_type = z1->type;
int temp_level = z1->level;
z1->type = z2->type;
z1->level = z2->level;
z2->type = temp_type;
z2->level = temp_level;
}
__device__
__host__
float dot2(float* preference, float* feature) {
float ret = 0.0;
for (int i = 0; i < 9; ++i) {
ret += preference[i] * feature[i];
}
return ret;
}
__device__
__host__
float noise(float distToFactory, float distToAmusement, float distToStore) {
float Km = 800.0 - distToFactory;
float Ka = 400.0 - distToAmusement;
float Ks = 200.0 - distToStore;
return max(max(max(Km, Ka), Ks), 0.0f);
}
__device__
__host__
float pollution(float distToFactory) {
float Km = 800.0 - distToFactory;
return max(Km, 0.0f);
}
__device__
__host__
void MCMCstep(int numIterations, unsigned int randx, zone_plan* plan, zone_plan* proposal, zone_plan* bestPlan) {
float K = 0.001;
// preference
float preference[10][9];
preference[0][0] = 0; preference[0][1] = 0; preference[0][2] = 0.15; preference[0][3] = 0.15; preference[0][4] = 0.3; preference[0][5] = 0; preference[0][6] = 0.1; preference[0][7] = 0.1; preference[0][8] = 0.2;
preference[1][0] = 0; preference[1][1] = 0; preference[1][2] = 0.15; preference[1][3] = 0; preference[1][4] = 0.55; preference[1][5] = 0; preference[1][6] = 0.2; preference[1][7] = 0.1; preference[1][8] = 0;
preference[2][0] = 0; preference[2][1] = 0; preference[2][2] = 0.05; preference[2][3] = 0; preference[2][4] = 0; preference[2][5] = 0; preference[2][6] = 0.25; preference[2][7] = 0.1; preference[2][8] = 0.6;
preference[3][0] = 0.18; preference[3][1] = 0.17; preference[3][2] = 0; preference[3][3] = 0.17; preference[3][4] = 0; preference[3][5] = 0.08; preference[3][6] = 0.2; preference[3][7] = 0.2; preference[3][8] = 0;
preference[4][0] = 0.3; preference[4][1] = 0; preference[4][2] = 0.3; preference[4][3] = 0.1; preference[4][4] = 0; preference[4][5] = 0; preference[4][6] = 0.1; preference[4][7] = 0.2; preference[4][8] = 0;
preference[5][0] = 0.05; preference[5][1] = 0; preference[5][2] = 0.1; preference[5][3] = 0.2; preference[5][4] = 0.1; preference[5][5] = 0; preference[5][6] = 0.1; preference[5][7] = 0.15; preference[5][8] = 0.3;
preference[6][0] = 0.15; preference[6][1] = 0.1; preference[6][2] = 0; preference[6][3] = 0.15; preference[6][4] = 0; preference[6][5] = 0.1; preference[6][6] = 0.1; preference[6][7] = 0.2; preference[6][8] = 0.2;
preference[7][0] = 0.2; preference[7][1] = 0; preference[7][2] = 0.25; preference[7][3] = 0; preference[7][4] = 0.15; preference[7][5] = 0; preference[7][6] = 0.1; preference[7][7] = 0.1; preference[7][8] = 0.2;
preference[8][0] = 0.3; preference[8][1] = 0; preference[8][2] = 0.15; preference[8][3] = 0.05; preference[8][4] = 0; preference[8][5] = 0; preference[8][6] = 0.25; preference[8][7] = 0.25; preference[8][8] = 0;
preference[9][0] = 0.4; preference[9][1] = 0; preference[9][2] = 0.2; preference[9][3] = 0; preference[9][4] = 0; preference[9][5] = 0; preference[9][6] = 0.2; preference[9][7] = 0.2; preference[9][8] = 0;
// population ratio
float ratioPeople[10] = {0.06667, 0.06667, 0.06667, 0.21, 0.09, 0.09, 0.09, 0.12, 0.1, 0.1};
// poppulation of each level of zone
float levelPeople[3] = {1, 5, 10};
// initial plan
{
float zoneTypeDistribution[18] = {0.2, 0.38, 0.2, 0.06, 0.05, 0.03, 0.02, 0.01, 0.01, 0.02, 0, 0, 0.01, 0, 0, 0.01, 0, 0};
float Z = sum(zoneTypeDistribution, 18);
float remainedBlockNum[18];
for (int zi = 0; zi < 18; ++zi) {
remainedBlockNum[zi] = zoneTypeDistribution[zi] / Z * ZONE_GRID_SIZE * ZONE_GRID_SIZE;
}
for (int r = 0; r < ZONE_GRID_SIZE; ++r) {
for (int c = 0; c < ZONE_GRID_SIZE; ++c) {
int n = sampleFromPdf(&randx, remainedBlockNum, 18);
plan->zones[r][c].type = n / 3;
plan->zones[r][c].level = n % 3 + 1;
remainedBlockNum[n]--;
}
}
plan->score = 0.0;
}
bestPlan->score = 0.0;
//float current_score = 0.0f;
for (int loop = 0; loop < numIterations; ++loop) {
// create a proposal
{
// copy the current plan to the proposal
*proposal = *plan;
// swap a zone type between two blocks
while (true) {
int x1 = randf(&randx, 0, ZONE_GRID_SIZE);
int y1 = randf(&randx, 0, ZONE_GRID_SIZE);
int x2 = randf(&randx, 0, ZONE_GRID_SIZE);
int y2 = randf(&randx, 0, ZONE_GRID_SIZE);
if (proposal->zones[y1][x1].type != proposal->zones[y2][x2].type || proposal->zones[y1][x1].level != proposal->zones[y2][x2].level) {
swapZoneType(&proposal->zones[y1][x1], &proposal->zones[y2][x2]);
break;
}
}
}
//
proposal->score = 0.0;
float count = 0.0;
for (int r = 0; r < ZONE_GRID_SIZE; ++r) {
for (int c = 0; c < ZONE_GRID_SIZE; ++c) {
// skip for non-residential block
if (plan->zones[r][c].type != 0) continue;
// compute the distance to the nearest spots
float distToStore = 4000;
float distToRestaurant = 4000;
float distToFactory = 4000;
float distToPark = 4000;
float distToAmusement = 4000;
float distToSchool = 4000;
float distToLibrary = 4000;
for (int r2 = 0; r2 < ZONE_GRID_SIZE; ++r2) {
for (int c2 = 0; c2 < ZONE_GRID_SIZE; ++c2) {
if (proposal->zones[r2][c2].type == 0) continue;
//float dist = ZONE_CELL_LEN * sqrtf((r - r2) * (r - r2) + (c - c2) * (c - c2));
float dist = ZONE_CELL_LEN * (abs(r - r2) + abs(c - c2));
if (proposal->zones[r2][c2].type == 1) { // 店・レストラン
if (dist < distToStore) {
distToStore = dist;
distToRestaurant = dist;
}
} else if (proposal->zones[r2][c2].type == 2) { // 工場
if (dist < distToFactory) {
distToFactory = dist;
}
} else if (proposal->zones[r2][c2].type == 3) { // 公園
if (dist < distToPark) {
distToPark = dist;
}
} else if (proposal->zones[r2][c2].type == 4) { // アミューズメント
if (dist < distToAmusement) {
distToAmusement = dist;
}
} else if (proposal->zones[r2][c2].type == 5) { // 学校・図書館
if (dist < distToSchool) {
distToSchool = dist;
distToLibrary = dist;
}
}
}
}
// compute feature
float feature[9];
feature[0] = expf(-K * distToStore);
feature[1] = expf(-K * distToSchool);
feature[2] = expf(-K * distToRestaurant);
feature[3] = expf(-K * distToPark);
feature[4] = expf(-K * distToAmusement);
feature[5] = expf(-K * distToLibrary);
feature[6] = expf(-K * noise(distToFactory, distToAmusement, distToStore));
feature[7] = expf(-K * pollution(distToFactory));
feature[8] = 0; // 駅はなし
// compute score
for (int i = 0; i < 10; ++i) {
proposal->score += dot2(preference[i], feature) * ratioPeople[i] * levelPeople[proposal->zones[r][c].level - 1];
count += ratioPeople[i] * levelPeople[proposal->zones[r][c].level - 1];
}
}
}
proposal->score /= count;
// update the best plan
if (proposal->score > bestPlan->score) {
*bestPlan = *proposal;
}
// compare the current plan and the proposal
if (proposal->score > plan->score || loop % 10 == 0) {
// accept
*plan = *proposal;
}
}
}
/**
* CUDA version of MCMCM
*/
__global__
void zonePlanMCMCGPUKernel(int* numIterations, zone_plan* plan, zone_plan* proposal, zone_plan* bestPlan) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
// initialize random
unsigned int randx = idx;
MCMCstep(*numIterations, randx, &plan[idx], &proposal[idx], &bestPlan[idx]);
}
/**
* CUDA version of MCMC
*/
void zonePlanMCMCGPUfunc(zone_plan** bestPlans, int numIterations) {
// CPU側のメモリを確保
*bestPlans = (zone_plan*)malloc(sizeof(zone_plan) * ZONE_PLAN_MCMC_GRID_SIZE * ZONE_PLAN_MCMC_BLOCK_SIZE);
// デバイスメモリを確保
int* devNumIterations;
if (cudaMalloc((void**)&devNumIterations, sizeof(int)) != cudaSuccess) {
printf("cuda memory allocation error!\n");
return;
}
zone_plan* devPlan;
if (cudaMalloc((void**)&devPlan, sizeof(zone_plan) * ZONE_PLAN_MCMC_GRID_SIZE * ZONE_PLAN_MCMC_BLOCK_SIZE) != cudaSuccess) {
printf("cuda memory allocation error!\n");
return;
}
zone_plan* devProposal;
if (cudaMalloc((void**)&devProposal, sizeof(zone_plan) * ZONE_PLAN_MCMC_GRID_SIZE * ZONE_PLAN_MCMC_BLOCK_SIZE) != cudaSuccess) {
cudaFree(devPlan);
printf("cuda memory allocation error!\n");
return;
}
zone_plan* devBestPlan;
if (cudaMalloc((void**)&devBestPlan, sizeof(zone_plan) * ZONE_PLAN_MCMC_GRID_SIZE * ZONE_PLAN_MCMC_BLOCK_SIZE) != cudaSuccess) {
cudaFree(devPlan);
cudaFree(devProposal);
printf("cuda memory allocation error!\n");
return;
}
// copy memory
if (cudaMemcpy(devNumIterations, &numIterations, sizeof(int), cudaMemcpyHostToDevice) != cudaSuccess) {
cudaFree(devPlan);
cudaFree(devProposal);
cudaFree(devBestPlan);
printf("cuda memory copy error!\n");
return;
}
printf("start GPU kernel.\n");
// GPU側のカーネル関数を呼び出す
zonePlanMCMCGPUKernel<<<ZONE_PLAN_MCMC_GRID_SIZE, ZONE_PLAN_MCMC_BLOCK_SIZE>>>(devNumIterations, devPlan, devProposal, devBestPlan);
// 結果をCPU側のバッファへ転送する
if (cudaMemcpy(*bestPlans, devBestPlan, sizeof(zone_plan) * ZONE_PLAN_MCMC_GRID_SIZE * ZONE_PLAN_MCMC_BLOCK_SIZE, cudaMemcpyDeviceToHost) != cudaSuccess) {
cudaFree(devPlan);
cudaFree(devProposal);
cudaFree(devBestPlan);
printf("cuda memory copy error!\n");
return;
}
// デバイスメモリを開放する
cudaFree(devPlan);
cudaFree(devProposal);
cudaFree(devBestPlan);
printf("GPU kernel done.\n");
}
|
3,373 | //RX^g̓O[oϐɂłȂ
//ƂƂŕʃt@CANZXłȂׁCdeprecated
#include <iostream>
#include <inttypes.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include "cuda_call_checker.cuh"
#include "affine_transformer_gpu.cuh"
/*
RX^gɓ]邽߂̊ϊindexێ
affine_transform_sizen[0] = 90 ]
affine_transform_sizen[0] = 180]
affine_transform_sizen[0] = 270]
affine_transform_sizen[0] = 0 ]
affine_transform_sizen[0] = 90]
affine_transform_sizen[0] = 180]
affine_transform_sizen[0] = 270]
*/
//void init_affine_transformer(int size) {
//
// uint8_t h_affine_transform_size4_1d[7 * 16] = {
// //90
// 12, 8, 4, 0,
// 13, 9, 5, 1,
// 14, 10, 6, 2,
// 15, 11, 7, 3 ,
// //180
// 15, 14, 13, 12,
// 11, 10, 9, 8,
// 7, 6, 5, 4,
// 3, 2, 1, 0 ,
// //270
// 3, 7, 11, 15,
// 2, 6, 10, 14,
// 1, 5, 9, 13,
// 0, 4, 8, 12,
// //mirror 0
// 3, 2, 1, 0,
// 7, 6, 5, 4,
// 11, 10, 9, 8,
// 15, 14, 13, 12,
// //mirror 90
// 15, 11, 7, 3,
// 14, 10, 6, 2,
// 13, 9, 5, 1,
// 12, 8, 4, 0,
// //mirror 180
// 12, 13, 14, 15,
// 8, 9, 10, 11,
// 4, 5, 6, 7,
// 0, 1, 2, 3 ,
// //mirror 270
// 0, 4, 8, 12,
// 1, 5, 9, 13,
// 2, 6, 10, 14,
// 3, 7, 11, 15
// };
//
// uint8_t h_affine_transform_size4[7][16] = {
// //90
// { 12, 8, 4, 0,
// 13, 9, 5, 1,
// 14, 10, 6, 2,
// 15, 11, 7, 3 },
// //180
// { 15, 14, 13, 12,
// 11, 10, 9, 8,
// 7, 6, 5, 4,
// 3, 2, 1, 0 },
// //270
// { 3, 7, 11, 15,
// 2, 6, 10, 14,
// 1, 5, 9, 13,
// 0, 4, 8, 12 },
// //mirror 0
// { 3, 2, 1, 0,
// 7, 6, 5, 4,
// 11, 10, 9, 8,
// 15, 14, 13, 12 },
// //mirror 90
// { 15, 11, 7, 3,
// 14, 10, 6, 2,
// 13, 9, 5, 1,
// 12, 8, 4, 0 },
// //mirror 180
// { 12, 13, 14, 15,
// 8, 9, 10, 11,
// 4, 5, 6, 7,
// 0, 1, 2, 3 },
// //mirror 270
// { 0, 4, 8, 12,
// 1, 5, 9, 13,
// 2, 6, 10, 14,
// 3, 7, 11, 15 }
// };
//
// uint8_t h_affine_transform_size8[7][64] = {
// //90
// { 56, 48, 40, 32, 24, 16, 8, 0,
// 57, 49, 41, 33, 25, 17, 9, 1,
// 58, 50, 42, 34, 26, 18, 10, 2,
// 59, 51, 43, 35, 27, 19, 11, 3,
// 60, 52, 44, 36, 28, 20, 12, 4,
// 61, 53, 45, 37, 29, 21, 13, 5,
// 62, 54, 46, 38, 30, 22, 14, 6,
// 63, 55, 47, 39, 31, 23, 15, 7 },
// //180
// { 63, 62, 61, 60, 59, 58, 57, 56,
// 55, 54, 53, 52, 51, 50, 49, 48,
// 47, 46, 45, 44, 43, 42, 41, 40,
// 39, 38, 37, 36, 35, 34, 33, 32,
// 31, 30, 29, 28, 27, 26, 25, 24,
// 23, 22, 21, 20, 19, 18, 17, 16,
// 15, 14, 13, 12, 11, 10, 9, 8,
// 7, 6, 5, 4, 3, 2, 1, 0 },
// //270
// { 7, 15, 23, 31, 39, 47, 55, 63,
// 6, 14, 22, 30, 38, 46, 54, 62,
// 5, 13, 21, 29, 37, 45, 53, 61,
// 4, 12, 20, 28, 36, 44, 52, 60,
// 3, 11, 19, 27, 35, 43, 51, 59,
// 2, 10, 18, 26, 34, 42, 50, 58,
// 1, 9, 17, 25, 33, 41, 49, 57,
// 0, 8, 16, 24, 32, 40, 48, 56 },
// //mirror
// { 7, 6, 5, 4, 3, 2, 1, 0,
// 15, 14, 13, 12, 11, 10, 9, 8,
// 23, 22, 21, 20, 19, 18, 17, 16,
// 31, 30, 29, 28, 27, 26, 25, 24,
// 39, 38, 37, 36, 35, 34, 33, 32,
// 47, 46, 45, 44, 43, 42, 41, 40,
// 55, 54, 53, 52, 51, 50, 49, 48,
// 63, 62, 61, 60, 59, 58, 57, 56 },
// //mirror 90
// { 63, 55, 47, 39, 31, 23, 15, 7,
// 62, 54, 46, 38, 30, 22, 14, 6,
// 61, 53, 45, 37, 29, 21, 13, 5,
// 60, 52, 44, 36, 28, 20, 12, 4,
// 59, 51, 43, 35, 27, 19, 11, 3,
// 58, 50, 42, 34, 26, 18, 10, 2,
// 57, 49, 41, 33, 25, 17, 9, 1,
// 56, 48, 40, 32, 24, 16, 8, 0 },
// //mirror 180
// { 56, 57, 58, 59, 60, 61, 62, 63,
// 48, 49, 50, 51, 52, 53, 54, 55,
// 40, 41, 42, 43, 44, 45, 46, 47,
// 32, 33, 34, 35, 36, 37, 38, 39,
// 24, 25, 26, 27, 28, 29, 30, 31,
// 16, 17, 18, 19, 20, 21, 22, 23,
// 8, 9, 10, 11, 12, 13, 14, 15,
// 0, 1, 2, 3, 4, 5, 6, 7 },
// //mirror 270
// { 0, 8, 16, 24, 32, 40, 48, 56,
// 1, 9, 17, 25, 33, 41, 49, 57,
// 2, 10, 18, 26, 34, 42, 50, 58,
// 3, 11, 19, 27, 35, 43, 51, 59,
// 4, 12, 20, 28, 36, 44, 52, 60,
// 5, 13, 21, 29, 37, 45, 53, 61,
// 6, 14, 22, 30, 38, 46, 54, 62,
// 7, 15, 23, 31, 39, 47, 55, 63 }
// };
//
// uint8_t h_affine_transform_size16[7][256] = {
// //90
// { 240,224,208,192,176,160,144,128,112, 96, 80, 64, 48, 32, 16, 0,
// 241,225,209,193,177,161,145,129,113, 97, 81, 65, 49, 33, 17, 1,
// 242,226,210,194,178,162,146,130,114, 98, 82, 66, 50, 34, 18, 2,
// 243,227,211,195,179,163,147,131,115, 99, 83, 67, 51, 35, 19, 3,
// 244,228,212,196,180,164,148,132,116,100, 84, 68, 52, 36, 20, 4,
// 245,229,213,197,181,165,149,133,117,101, 85, 69, 53, 37, 21, 5,
// 246,230,214,198,182,166,150,134,118,102, 86, 70, 54, 38, 22, 6,
// 247,231,215,199,183,167,151,135,119,103, 87, 71, 55, 39, 23, 7,
// 248,232,216,200,184,168,152,136,120,104, 88, 72, 56, 40, 24, 8,
// 249,233,217,201,185,169,153,137,121,105, 89, 73, 57, 41, 25, 9,
// 250,234,218,202,186,170,154,138,122,106, 90, 74, 58, 42, 26, 10,
// 251,235,219,203,187,171,155,139,123,107, 91, 75, 59, 43, 27, 11,
// 252,236,220,204,188,172,156,140,124,108, 92, 76, 60, 44, 28, 12,
// 253,237,221,205,189,173,157,141,125,109, 93, 77, 61, 45, 29, 13,
// 254,238,222,206,190,174,158,142,126,110, 94, 78, 62, 46, 30, 14,
// 255,239,223,207,191,175,159,143,127,111, 95, 79, 63, 47, 31, 15 },
// //180
// { 255,254,253,252,251,250,249,248,247,246,245,244,243,242,241,240,
// 239,238,237,236,235,234,233,232,231,230,229,228,227,226,225,224,
// 223,222,221,220,219,218,217,216,215,214,213,212,211,210,209,208,
// 207,206,205,204,203,202,201,200,199,198,197,196,195,194,193,192,
// 191,190,189,188,187,186,185,184,183,182,181,180,179,178,177,176,
// 175,174,173,172,171,170,169,168,167,166,165,164,163,162,161,160,
// 159,158,157,156,155,154,153,152,151,150,149,148,147,146,145,144,
// 143,142,141,140,139,138,137,136,135,134,133,132,131,130,129,128,
// 127,126,125,124,123,122,121,120,119,118,117,116,115,114,113,112,
// 111,110,109,108,107,106,105,104,103,102,101,100, 99, 98, 97, 96,
// 95, 94, 93, 92, 91, 90, 89, 88, 87, 86, 85, 84, 83, 82, 81, 80,
// 79, 78, 77, 76, 75, 74, 73, 72, 71, 70, 69, 68, 67, 66, 65, 64,
// 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48,
// 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32,
// 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
// 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 },
// //270
// { 15, 31, 47, 63, 79, 95,111,127,143,159,175,191,207,223,239,255,
// 14, 30, 46, 62, 78, 94,110,126,142,158,174,190,206,222,238,254,
// 13, 29, 45, 61, 77, 93,109,125,141,157,173,189,205,221,237,253,
// 12, 28, 44, 60, 76, 92,108,124,140,156,172,188,204,220,236,252,
// 11, 27, 43, 59, 75, 91,107,123,139,155,171,187,203,219,235,251,
// 10, 26, 42, 58, 74, 90,106,122,138,154,170,186,202,218,234,250,
// 9, 25, 41, 57, 73, 89,105,121,137,153,169,185,201,217,233,249,
// 8, 24, 40, 56, 72, 88,104,120,136,152,168,184,200,216,232,248,
// 7, 23, 39, 55, 71, 87,103,119,135,151,167,183,199,215,231,247,
// 6, 22, 38, 54, 70, 86,102,118,134,150,166,182,198,214,230,246,
// 5, 21, 37, 53, 69, 85,101,117,133,149,165,181,197,213,229,245,
// 4, 20, 36, 52, 68, 84,100,116,132,148,164,180,196,212,228,244,
// 3, 19, 35, 51, 67, 83, 99,115,131,147,163,179,195,211,227,243,
// 2, 18, 34, 50, 66, 82, 98,114,130,146,162,178,194,210,226,242,
// 1, 17, 33, 49, 65, 81, 97,113,129,145,161,177,193,209,225,241,
// 0, 16, 32, 48, 64, 80, 96,112,128,144,160,176,192,208,224,240 },
// //mirror
// { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
// 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
// 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32,
// 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48,
// 79, 78, 77, 76, 75, 74, 73, 72, 71, 70, 69, 68, 67, 66, 65, 64,
// 95, 94, 93, 92, 91, 90, 89, 88, 87, 86, 85, 84, 83, 82, 81, 80,
// 111,110,109,108,107,106,105,104,103,102,101,100, 99, 98, 97, 96,
// 127,126,125,124,123,122,121,120,119,118,117,116,115,114,113,112,
// 143,142,141,140,139,138,137,136,135,134,133,132,131,130,129,128,
// 159,158,157,156,155,154,153,152,151,150,149,148,147,146,145,144,
// 175,174,173,172,171,170,169,168,167,166,165,164,163,162,161,160,
// 191,190,189,188,187,186,185,184,183,182,181,180,179,178,177,176,
// 207,206,205,204,203,202,201,200,199,198,197,196,195,194,193,192,
// 223,222,221,220,219,218,217,216,215,214,213,212,211,210,209,208,
// 239,238,237,236,235,234,233,232,231,230,229,228,227,226,225,224,
// 255,254,253,252,251,250,249,248,247,246,245,244,243,242,241,240 },
// //mirror 90
// { 255,239,223,207,191,175,159,143,127,111, 95, 79, 63, 47, 31, 15,
// 254,238,222,206,190,174,158,142,126,110, 94, 78, 62, 46, 30, 14,
// 253,237,221,205,189,173,157,141,125,109, 93, 77, 61, 45, 29, 13,
// 252,236,220,204,188,172,156,140,124,108, 92, 76, 60, 44, 28, 12,
// 251,235,219,203,187,171,155,139,123,107, 91, 75, 59, 43, 27, 11,
// 250,234,218,202,186,170,154,138,122,106, 90, 74, 58, 42, 26, 10,
// 249,233,217,201,185,169,153,137,121,105, 89, 73, 57, 41, 25, 9,
// 248,232,216,200,184,168,152,136,120,104, 88, 72, 56, 40, 24, 8,
// 247,231,215,199,183,167,151,135,119,103, 87, 71, 55, 39, 23, 7,
// 246,230,214,198,182,166,150,134,118,102, 86, 70, 54, 38, 22, 6,
// 245,229,213,197,181,165,149,133,117,101, 85, 69, 53, 37, 21, 5,
// 244,228,212,196,180,164,148,132,116,100, 84, 68, 52, 36, 20, 4,
// 243,227,211,195,179,163,147,131,115, 99, 83, 67, 51, 35, 19, 3,
// 242,226,210,194,178,162,146,130,114, 98, 82, 66, 50, 34, 18, 2,
// 241,225,209,193,177,161,145,129,113, 97, 81, 65, 49, 33, 17, 1,
// 240,224,208,192,176,160,144,128,112, 96, 80, 64, 48, 32, 16, 0 },
// //mirror 180
// { 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,
// 224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,
// 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,
// 192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,
// 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,
// 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,
// 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,
// 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,
// 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
// 96 ,97, 98, 99,100,101,102,103,104,105,106,107,108,109,110,111,
// 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
// 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
// 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
// 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
// 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
// 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
// //mirror 270
// { 0, 16, 32, 48, 64, 80, 96,112,128,144,160,176,192,208,224,240,
// 1, 17, 33, 49, 65, 81, 97,113,129,145,161,177,193,209,225,241,
// 2, 18, 34, 50, 66, 82, 98,114,130,146,162,178,194,210,226,242,
// 3, 19, 35, 51, 67, 83, 99,115,131,147,163,179,195,211,227,243,
// 4, 20, 36, 52, 68, 84,100,116,132,148,164,180,196,212,228,244,
// 5, 21, 37, 53, 69, 85,101,117,133,149,165,181,197,213,229,245,
// 6, 22, 38, 54, 70, 86,102,118,134,150,166,182,198,214,230,246,
// 7, 23, 39, 55, 71, 87,103,119,135,151,167,183,199,215,231,247,
// 8, 24, 40, 56, 72, 88,104,120,136,152,168,184,200,216,232,248,
// 9, 25, 41, 57, 73, 89,105,121,137,153,169,185,201,217,233,249,
// 10, 26, 42, 58, 74, 90,106,122,138,154,170,186,202,218,234,250,
// 11, 27, 43, 59, 75, 91,107,123,139,155,171,187,203,219,235,251,
// 12, 28, 44, 60, 76, 92,108,124,140,156,172,188,204,220,236,252,
// 13, 29, 45, 61, 77, 93,109,125,141,157,173,189,205,221,237,253,
// 14, 30, 46, 62, 78, 94,110,126,142,158,174,190,206,222,238,254,
// 15, 31, 47, 63, 79, 95,111,127,143,159,175,191,207,223,239,255 }
// };
//
// if (size == 4) {
// //CHECK(cudaMemcpyToSymbol(dc_affine_transform_size4_1d, h_affine_transform_size4_1d, sizeof(uint8_t) * 7 * 16));
// //CHECK(cudaMemcpyToSymbol(dc_affine_transform_size4_1d, h_affine_transform_size4_1d, sizeof(uint8_t) * 7 * 16))
// std::cout << "WTFFFFF" << (uint32_t)h_affine_transform_size4[3][0] << std::endl;
// //CHECK(cudaMemcpyToSymbol(dc_affine_transform_size4_1d, h_affine_transform_size4_1d, sizeof(uint8_t) * 7 * 16));
// //CHECK(cudaMemcpyToSymbol(dc_affine_transform_size4, h_affine_transform_size4, sizeof(uint8_t) * 7 * 16));
// //CHECK(cudaMemcpy(dc_affine_transform_size4_1d, h_affine_transform_size4_1d, sizeof(uint8_t) * 7 * 16, cudaMemcpyHostToDevice));
// std::cout << "size 4 copyed" << std::endl;
// CHECK(cudaDeviceSynchronize());
// }
// //cudaMemC
// else if (size == 8) {
// CHECK(cudaMemcpyToSymbol(dc_affine_transform_size8, h_affine_transform_size8, sizeof(uint8_t) * 7 * 64));
// std::cout << "size 8 copyed" << std::endl;
// }
// else if (size == 16) {
// CHECK(cudaMemcpyToSymbol(dc_affine_transform_size16, h_affine_transform_size16, sizeof(uint8_t) * 7 * 256));
// std::cout << "size 4 copyed" << std::endl;
// }
// CHECK(cudaDeviceSynchronize());
//} |
3,374 | #include <iostream>
#include <stdio.h>
#include <string.h>
#include <math.h>
//#include <cutil.h>
// ǥХؿ(GPU¦Ǽ¹Ԥ)
//
// GPU꤫ǡäƤ +1 GPU᤹
// ȤǤ
__global__ void function_on_GPU(float* d_idata, float* d_odata, int nword)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
if((tid == 0) && (bid==0)){ // ñΤGPU1ĤΥåɤ˸
for(int i = 0; i<nword; i++){
d_odata[i] = d_idata[i] + 1.0f ;
}
}
}
// mainؿ.
// ʤߤ __global__դƤʤؿ̾C++ɤȤCPU¦Ǽ¹Ԥޤ.
// (Ĥޤ __global__ʴؿʤñʤC++ɤʤΤg++ǥѥǽǤ)
int main( int argc, char** argv)
{
int nword = 1024;
int mem_size = sizeof(float) * nword;
// ۥȥ(CPU¦)
float* h_idata = (float*) malloc(mem_size);
float* h_odata = (float*) malloc(mem_size);
for(unsigned int i = 0; i < nword; ++i){
h_idata[i] = (float) i;
}
// ǥХ(GPU¦)
float* d_idata;
cudaMalloc((void**) &d_idata, mem_size);
float* d_odata;
cudaMalloc( (void**) &d_odata, mem_size);
// ǡž: ۥȥ -----> ǥХ
cudaMemcpy( d_idata, h_idata, mem_size, cudaMemcpyHostToDevice );
//ǥХؿ(GPUǼ¹Ԥؿ)¹
dim3 grid(128); // ֥å( ѤSIMDå )
dim3 threads(128); // åɿ( Ѥ륹åɿ(SIMDå))
// grid*thread(= 128*128)åɤ˥ǥХؿ¹Ԥޤ.
function_on_GPU<<< grid, threads >>>(d_idata, d_odata, nword);
// ǡž: ǥХ -----> ۥȥ
cudaMemcpy(h_odata, d_odata, mem_size, cudaMemcpyDeviceToHost);
for(int i=0; i<17; i++){
printf("%f, %f\n", h_idata[i], h_odata[i]);
}
// ΰ
free(h_idata);
free(h_odata);
cudaFree(d_idata);
cudaFree(d_odata);
return (0);
}
|
3,375 | #include <thrust/version.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/sequence.h>
#include <thrust/functional.h>
#include <iostream>
#define N 2048
struct saxpy_functor
{
const float a;
saxpy_functor(float _a) : a(_a) {}
__host__ __device__
float operator()(const float& x, const float& y) const
{
return a * x + y;
}
};
void transformTest() {
float A = 2.3;
thrust::device_vector<float> F1(N);
thrust::device_vector<float> F2(N);
thrust::device_vector<float> F3(N);
thrust::host_vector<float> H1(N);
thrust::sequence(F1.begin(),F1.end());
thrust::sequence(F2.begin(), F2.end());
thrust::transform(F1.begin(),F1.end(),F2.begin(),F3.begin(),
saxpy_functor(A));
H1 = F3;
std::cout << "Sample sum value " << H1[24] << std::endl;
}
void reduceTest() {
thrust::device_vector<float> F1(N);
float finalSum;
thrust::sequence(F1.begin(), F1.end());
finalSum = thrust::reduce(F1.begin(), F1.end(), (float) 0, thrust::plus<float>());
std::cout << "Sample total " << finalSum << std::endl;
}
void largeReduce() {
thrust::device_vector<float> F1(N);
float finalSum;
thrust::generate(F1.begin(),F1.end(),rand);
finalSum = thrust::reduce(F1.begin(), F1.end(), -1.0, thrust::maximum<float>());
std::cout<< "Max Element" << finalSum << std::endl;
}
int main(void) {
largeReduce();
}
|
3,376 | #include "includes.h"
// includes, project
#define PI 3.1415926536f
int MaxThreadsPerBlock;
int MaxThreadsX;
int MaxThreadsY;
// Conversion d'un vecteur réel en vecteur complexe
// Conversion d'un vecteur complexe en vecteur réel
// Multiplie point par point un vecteur complex par un vecteur réel
// Applique y = at*x +bt à chaque point d'un vecteur réel
// Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de réel
// Alpha n'est pas modifié
// Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de bytes
// Alpha n'est pas modifié
// Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de réel
// Alpha autorise l'affichage au dessus d'un certain seuil
// Processus auto-régressif X2 = a*X1 + b*X0 + N0;
// Expansion
// On applique une interpolation bi-linéaire à la source
// Transformation Cartesian To Polar
// On applique une interpolation bi-linéaire à la source
__global__ void AutoRegPinkK(double* X0, double* X1, double* Y, double* C0, double* D0, double* LastF, int numElts, int Nc)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElts)
{
for (int j=0; j<Nc; j++)
LastF[j*numElts+i] = (X0[i]+X1[i])*D0[j] - C0[j]*LastF[j*numElts+i];
double w = 0;
for (int j=0; j<Nc; j++) w = w+ LastF[j*numElts+i];
Y[i] = w;
}
} |
3,377 | #include <cuda.h>
#include <stdio.h>
#define cuda_safe_call(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code,
const char *file,
int line,
bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n",
cudaGetErrorString(code),
file,
line);
if (abort)
exit(code);
}
}
__global__ void kernel_hello()
{
printf("Hello from the device.\n");
}
int main()
{
printf("Hello from the host.\n");
kernel_hello<<<1,1>>>();
cuda_safe_call(cudaDeviceSynchronize());
printf("Success!\n");
return 0;
}
|
3,378 | /* Command to compile on Windows:
nvcc .\lab5_3.cu -ccbin "C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.29.30133\bin\Hostx64\x64"
Output should be:
A: [
[3.00, 5.00, 2.00, 0.00],
[2.00, 4.00, 5.00, 1.00],
[0.00, 3.00, 3.00, 1.00],
[3.00, 5.00, 4.00, 4.00],
[4.00, 5.00, 5.00, 3.00],
[10.00, 13.00, 21.00, 16.00],
[9.00, 11.00, 15.00, 8.00]]
b: [
[29.99],
[14.99],
[9.99],
[24.99]]
c: [
[184.90],
[194.88],
[99.93],
[304.84],
[319.83],
[1104.40],
[784.57]]
*/
#include <stdio.h>
__global__ void mat_mul(double *C, double *A, double *B) {
extern __shared__ double tmp[];
tmp[blockDim.y * (blockIdx.x * gridDim.y + blockIdx.y) + threadIdx.y] = A[blockIdx.x * blockDim.y + threadIdx.y] * B[threadIdx.x * gridDim.y + blockIdx.y];
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0) {
C[blockIdx.x * gridDim.y + blockIdx.y] = 0;
for (int i = 0; i <= blockDim.y; i++) {
C[blockIdx.x * gridDim.y + blockIdx.y] += tmp[blockDim.y * (blockIdx.x * gridDim.y + blockIdx.y) + i];
}
}
}
void mat2str(char *str, void *mat, int m, int n, int str_size) {
double *mat_p = (double *) mat;
int written = 0;
written += snprintf(str + written, str_size - written, "[\n");
for (int idx = 0; idx < m - 1; idx++) {
written += snprintf(str + written, str_size - written, "[");
for (int jdx = 0; jdx < n - 1; jdx++) {
written += snprintf(str + written, str_size - written, "%.2f, ", *(mat_p + idx * n + jdx));
}
written += snprintf(str + written, str_size - written, "%.2f],\n", *(mat_p + idx * n + n - 1));
}
written += snprintf(str + written, str_size - written, "[");
for (int jdx = 0; jdx < n - 1; jdx++) {
written += snprintf(str + written, str_size - written, "%.2f, ", *(mat_p + (m - 1) * n + jdx));
}
written += snprintf(str + written, str_size - written, "%.2f]]", *(mat_p + (m - 1) * n + n - 1));
}
int main(void) {
/* Intiialize inputs (CPU) */
const int M = 7;
const int N = 4;
const int O = 1;
double A[M][N] {
{3, 5, 2, 0},
{2, 4, 5, 1},
{0, 3, 3, 1},
{3, 5, 4, 4},
{4, 5, 5, 3},
{10, 13, 21, 16},
{9, 11, 15, 8}
};
double b[N][O] {
{29.99},
{14.99},
{9.99},
{24.99}
};
double c[M][O];
char str_A[320];
char str_b[320];
mat2str(str_A, A, M, N, 320);
mat2str(str_b, b, N, O, 320);
printf("A: %s\n", str_A);
printf("b: %s\n", str_b);
/* Allocate memory for calculation on GPU */
double *gpu_A;
double *gpu_B;
double *gpu_C;
cudaMalloc((void**) &gpu_A, sizeof(double) * M * N);
cudaMalloc((void**) &gpu_B, sizeof(double) * N * O);
cudaMalloc((void**) &gpu_C, sizeof(double) * M * O);
/* Copy inputs to GPU */
cudaMemcpy(gpu_A, A, sizeof(double) * M * N, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_B, b, sizeof(double) * N * O, cudaMemcpyHostToDevice);
/* Do the thing */
dim3 out_dim(M, O);
dim3 block_dim(M, N);
mat_mul<<<out_dim, block_dim, sizeof(double) * M * N * O>>>(gpu_C, gpu_A, gpu_B);
cudaMemcpy(c, gpu_C, sizeof(double) * M * O, cudaMemcpyDeviceToHost);
/* Remember to clean up after ourselves */
cudaFree(gpu_A);
cudaFree(gpu_B);
cudaFree(gpu_C);
/* Print result */
char str_c[80];
mat2str(str_c, c, M, O, 80);
printf("c: %s\n", str_c);
return 0;
}
|
3,379 | #include <stdio.h>
#include <stdlib.h>
#define min(a,b) (a<b?a:b)
#define threadsPerBlock 256
#define N 33 * 1024
#define blocksPerGrid min(32, (N+threadsPerBlock-1)/threadsPerBlock)
__global__ void dot(float *a, float *b, float *c) {
//calculate thread id combining the block and thread indices to get global offset into the input arrays
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//shared memory for each block, which means each block has a copy of the memory.
//and the index of the cache is just the thread index in each block
__shared__ float cache[threadsPerBlock];
int cache_index = threadIdx.x;
int i;
float tmp = 0;
//each thread multiplies a pair of corresponding entries, and then every thread moves on to its next pare.
//the threads increment their indices by the total number of threads to easure we don't miss any elements and don't multiply a pair twice
while (tid < N) {
tmp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
//in each block, store the sum of pairs from each thread
cache[cache_index] = tmp;
//sync threads in the block before we sum all the values resulted from each thread.
__syncthreads();
//for reductions, threads Per block must be power of 2 because of the following reduction
i = blockDim.x/2;
while (i!=0) {
if(cache_index < i) {
cache[cache_index] += cache[cache_index+i];
//__syncthreads(); THIS IS NOT ALLOWED and GPU will not work!!!!
}
//sync threads after each iteration of reduction
//notice that the "__syncthreads" cannot be placed in the above "if" block
//because cuda architecture guarantees that no thread will advance to an instruction beyond the __syncthreads() until every
//thread in the block has executed the "syncthreads", however, if the "__syncthreads" is placed into a divergent branch,
//some threads block will never go to the branch and hardware will simply continue to wait for these threads, forever.
__syncthreads();
i/=2;
}
//Use one thread in each block to write the results of each block to the global memory
//here "c" gather each block's sum results, since there is not many blocks, we don't leverage GPU to complete the final results
//and use CPU to compute this part
if (cache_index == 0 ) {
c[blockIdx.x] = cache[0];
}
}
int main(void) {
float *a, *b, *partial_c;
float *dev_a, *dev_b, *dev_partial_c;
int i;
float sum;
//allocate memory for array a, b and partial_c on CPU side
a = (float *)malloc(N * sizeof(float));
b = (float *)malloc(N * sizeof(float));
partial_c = (float *)malloc(blocksPerGrid * sizeof(float));
//initialize a and b in CPU
for (i=0;i<N; i++){
a[i] = i;
b[i] = i*2;
}
//malloc memory on GPU for array a, array b and partial results for each block
cudaMalloc((void **) &dev_a, N * sizeof(float) );
cudaMalloc((void **) &dev_b, N * sizeof(float) );
cudaMalloc((void **) &dev_partial_c, blocksPerGrid * sizeof(float) );
//copy memory from host to device
cudaMemcpy(dev_a, a, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_partial_c, partial_c, blocksPerGrid * sizeof(float), cudaMemcpyHostToDevice);
//call the kernel
dot<<<blocksPerGrid, threadsPerBlock>>>(dev_a, dev_b, dev_partial_c);
cudaMemcpy(partial_c, dev_partial_c, blocksPerGrid * sizeof(float), cudaMemcpyDeviceToHost);
for (i = 0; i< blocksPerGrid; i++) {
sum += partial_c[i];
}
//verify whether the result is correct
#define sum_squares(x) (x*(x+1)*(2*x+1)/6)
float expect_value = 2 * sum_squares((float)(N-1));
printf("does the gpu value %.6g = %.6g\n", sum, expect_value);
//free memory on GPU side
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_partial_c);
//free memory on CPU side
free(a);
free(b);
free(partial_c);
return 0;
}
|
3,380 | // From CUDA for Engineers
// Listing 5.11: sharpen/main.cpp
#include <cuda_runtime.h>
#include <iostream>
int main()
{
std::cout << "Sharpen\n";
}
|
3,381 | #include "ludcmp.cu"
#include "lubksb.cu"
__device__ void simpr(float* y, float* dydx, float* dfdx, float* dfdy,
const float xs, const float htot, const int nstep, float* yout,
void derivs(const float, float* , float*))
{
int i,j,nn;
float d,h,x;
const int n = 5;
float a[n*n];
int indx[n];
float del[n],ytemp[n];
h=htot/nstep;
for (i=0;i<n;i++) {
for (j=0;j<n;j++) a[i*n+j] = -h*dfdy[i*n+j];
++a[i*n+i];
}
ludcmp(a,indx,d);
for (i=0;i<n;i++)
yout[i]=h*(dydx[i]+h*dfdx[i]);
lubksb(a,indx,yout);
for (i=0;i<n;i++)
ytemp[i]=y[i]+(del[i]=yout[i]);
x=xs+h;
derivs(x,ytemp,yout);
for (nn=2;nn<=nstep;nn++) {
for (i=0;i<n;i++)
yout[i]=h*yout[i]-del[i];
lubksb(a,indx,yout);
for (i=0;i<n;i++) ytemp[i] += (del[i] += 2.0*yout[i]);
x += h;
derivs(x,ytemp,yout);
}
for (i=0;i<n;i++)
yout[i]=h*yout[i]-del[i];
lubksb(a,indx,yout);
for (i=0;i<n;i++)
yout[i] += ytemp[i];
}
|
3,382 | #include <thrust/device_vector.h>
typedef struct
{
size_t length;
double* latitude;
double* longitude;
long* ts;
} trajectory;
typedef struct
{
double latitude;
double longitude;
long ts;
} tpoint;
typedef struct
{
size_t length;
tpoint *buffer;
} swindow;
struct slide
{
size_t num;
swindow *swin;
trajectory traj;
slide(size_t _num, swindow *_swin, trajectory _traj) : num(_num), swin(_swin), traj(_traj) {}
//template <typename T>
__host__ __device__
void operator() (const unsigned int i)
{
//thrust::for_each(thrust::counting_iterator<unsigned int>(0), thrust::counting_iterator<unsigned int>(traj.length), [=]__device__(const int& j)
{
//cudaMalloc((void**)&swin, sizeof(size_t)+sizeof(tpoint)*num);
swin[i].buffer = new tpoint[num];
for (int k = 0; k < num; k++)
{
swin[i].buffer[k].longitude = traj.longitude[i+k];
swin[i].buffer[k].latitude = traj.latitude[i+k];
swin[i].buffer[k].ts = traj.ts[i+k];
}
};
}
};
int main()
{
double LAT[15] = {28.289, 28.287, 28.286, 28.285, 28.283, 28.284, 28.287, 28.282, 28.284, 28.286, 28.281, 28.286, 28.289, 28.279, 28.278};
double LON[15] = {121.11, 121.23, 121.20, 121.25, 121.22, 121.12, 121.02, 121.03, 121.03, 121.22, 121.21, 121.26, 121.12, 121.11, 121.20};
long TS[15] = {12638782800, 12638782900, 12638783000, 12638783100, 12638783200, 12638783300, 12638783400, 12638783500, 12638783600, 12638783700,
12638783800, 12638783900, 12638784000, 12638784100, 12638784200};
thrust::device_vector<double> lat_vec(LAT, LAT+15);
thrust::device_vector<double> lon_vec(LON, LON+15);
thrust::device_vector<long> ts_vec(TS, TS+15);
trajectory traj;
traj.latitude = thrust::raw_pointer_cast(lat_vec.data());
traj.longitude = thrust::raw_pointer_cast(lon_vec.data());
traj.ts = thrust::raw_pointer_cast(ts_vec.data());
traj.length = 15;
thrust::device_vector<swindow> win_vec(15);
slide sld(8, thrust::raw_pointer_cast(win_vec.data()), traj);
thrust::for_each(thrust::counting_iterator<unsigned int>(0), thrust::counting_iterator<unsigned int>(traj.length), sld);
}
|
3,383 | #include <stdio.h>
#define N 1000
#define TPB 32 // Threads per block
__global__ void summationKernel(int *d_array, int n, int *d_res)
{
const int idx=threadIdx.x+blockIdx.x*blockDim.x;
const int s_idx=threadIdx.x;
__shared__ int s_array[TPB];
if(idx<n)
s_array[s_idx]=d_array[idx];
else
{
s_array[s_idx]=0;
return;
}
__syncthreads();
for(int s=blockDim.x/2;s>0;s>>=1)
{
if(s_idx<s)
{
s_array[s_idx]+=s_array[s_idx+s];
}
}
__syncthreads();
if(s_idx==0)
{
atomicAdd(d_res, s_array[0]);
}
}
void summationLauncher(int *array, int n)
{
int *d_array=0;
cudaMalloc(&d_array,n*sizeof(int));
cudaMemcpy(d_array,array,n*sizeof(int),cudaMemcpyHostToDevice);
int *d_res;
cudaMalloc(&d_res,sizeof(int));
cudaMemset(d_res,0,sizeof(int));
int blocks = (N+TPB-1)/TPB;
summationKernel<<<blocks,TPB>>>(d_array,n,d_res);
int res;
cudaMemcpy(&res,d_res,sizeof(int), cudaMemcpyDeviceToHost);
printf("Sum is %d\n", res);
cudaFree(d_array);
cudaFree(d_res);
}
int main()
{
int array[N];
for(int i=0;i<N;i++)
{
array[i]=i;
}
summationLauncher(array,N);
return 0;
}
|
3,384 | /* источник https://gist.github.com/stevendborrelli/4286842 */
/* источник информации о сетке и о потоках внутри неё:
https://www.youtube.com/watch?v=kzXjRFL-gjo */
#pragma once
#include <stdio.h>
int print_info_about_GPU() {
int deviceCount;
cudaDeviceProp deviceProp;
cudaGetDeviceCount(&deviceCount);
printf("Количество CUDA девайсов %d.\n", deviceCount);
for (int dev = 0; dev < deviceCount; dev++) {
cudaGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
printf("CUDA GPU-девайсы не обнаружены\n");
return -1;
} else if (deviceCount == 1) {
printf("Обнаружен один девайс с поддержкой CUDA\n");
} else {
printf("Обнаружено %d устройств, поддерживающих CUDA\n", deviceCount);
}
}
printf("Для девайса #%d\n", dev);
printf("Название девайса: %s\n", deviceProp.name);
printf("Общее количество памяти: %ld\n", deviceProp.totalGlobalMem);
printf("Обзее количество разделяеммой памяти на блок: %ld\n",
deviceProp.sharedMemPerBlock);
printf("Количество статической памяти: %ld\n", deviceProp.totalConstMem);
printf("Размер варпа: %d\n", deviceProp.warpSize);
printf("Максимальное количество потоков на блок: %d\n", deviceProp.maxThreadsDim[0]);
printf("Максимальное количество блоков в сетке: %d\n", deviceProp.maxGridSize[0]);
printf("Количество мультипроцессоров: %d\n",
deviceProp.multiProcessorCount);
}
return deviceProp.maxThreadsDim[0];
}
|
3,385 | #include "includes.h"
__global__ void dot( int *a, int *b, int *c ) {
__shared__ int prod[THREADS_PER_BLOCK]; // Shared memory
int index = blockIdx.x * blockDim.x + threadIdx.x;
prod[threadIdx.x] = a[index] * b[index];
__syncthreads(); // Threads synchronization
if( threadIdx.x == 0) {
int par_sum = 0;
for(int i=0; i<THREADS_PER_BLOCK; i++)
par_sum += prod[threadIdx.x]; // Threads reduction
atomicAdd(c,par_sum); // Blocks reduction
}
} |
3,386 | /* gpu_trunc_norm.cu
* Author: Nick Ulle
* Description:
* CUDA C functions for generating truncated normal random variables.
*
* Compile with:
* nvcc --ptx -arch=compute_20 gpu_trunc_norm.cu -o bin/gpu_trunc_norm.ptx
*/
#include <stdio.h>
#include <math.h>
#include <curand_kernel.h>
#define NUM_RNG 128
// Set maximum number of iterations for all rejection sampling loops.
#define MAX_ITER 100
__device__ float one_sided_norm(
float mean,
float sd,
float a,
curandState_t *rng_state)
/* Sample a random value from a one-sided normal distribution.
*
* Args:
* mean: the mean.
* sd: the standard deviation.
* a: the finite truncation point.
* rng_state: the random number generator to be used.
*/
{
a = (a - mean) / sd;
// The algorithm samples from the right tail of N(0, 1); mirror everything
// if left tail samples are requested.
float mirror = 1;
if (a < 0) mirror = -1;
a *= mirror;
float z = 0;
for (int i = 0; i < MAX_ITER; i++)
{
// Generate z ~ EXP(alpha) + a.
float alpha = (a + sqrtf(powf(a, 2) + 4)) / 2;
float u = curand_uniform(rng_state);
z = -logf(1 - u) / alpha + a;
// Compute g(z).
float gz = expf(-powf(z - alpha, 2) / 2);
// Generate u and test acceptance.
u = curand_uniform(rng_state);
if (u <= gz) break;
}
return sd * mirror * z + mean;
}
__device__ int get_thread_id()
{
// Get block and thread numbers.
int block = blockIdx.x + blockIdx.y * gridDim.x;
int thread = threadIdx.x + threadIdx.y * blockDim.x
+ threadIdx.z * (blockDim.y * blockDim.x);
int block_size = blockDim.x * blockDim.y * blockDim.z;
return thread + block * block_size;
}
extern "C" {
__global__ void gpu_curand_init(int seed, curandState_t **state)
/* Initialize random number generators.
*
* Args:
* seed: a seed value.
* state: pointer to the random number generators.
*/
{
int idx = get_thread_id();
if (idx < NUM_RNG)
{
curandState_t *rng_state = \
(curandState_t*) malloc(sizeof(curandState_t));
curand_init(seed, idx, 0, rng_state);
state[idx] = rng_state;
}
}
__global__ void gpu_curand_deinit(curandState_t **state)
/* De-initialize random number generators.
*
* Args:
* state: pointer to the random number generators.
*/
{
int idx = get_thread_id();
if (idx < NUM_RNG) free(state[idx]);
}
__global__ void gpu_trunc_norm(
int n,
float *mean,
float *sd,
float *a,
float *b,
float *result,
curandState_t **state)
/* Sample random values from a truncated normal distribution.
*
* Args:
* n: number of values to sample.
* mean: array of means.
* sd: array of standard deviations.
* a: array of lower truncation points.
* b: array of upper truncation points.
* result: array to store the random values.
* state: pointer to the random number generators.
*/
{
int idx = get_thread_id();
// Only compute if the index is less than the result length.
if (idx < n)
{
// Choose RNG state based on thread ID within block. A better
// solution would be to run only one block and have each thread
// generate many random values.
curandState_t *rng_state = state[idx % NUM_RNG];
// Draw a truncated normal value using vanilla rejection sampling
// if the truncation region includes the mean; otherwise, use the
// one-sided algorithm described in Robert (2009).
float draw = 0;
if (!isfinite(a[idx]) && b[idx] <= mean[idx])
{ // Use one-sided algorithm.
draw = one_sided_norm(mean[idx], sd[idx], b[idx], rng_state);
} else if (!isfinite(b[idx]) && a[idx] >= mean[idx])
{
draw = one_sided_norm(mean[idx], sd[idx], a[idx], rng_state);
} else
{ // Use vanilla rejection sampling.
for (int i = 0; i < MAX_ITER; i++)
{
draw = sd[idx] * curand_normal(rng_state) + mean[idx];
if (a[idx] <= draw && draw <= b[idx]) break;
}
} // end if
result[idx] = draw;
} // end if
}
} // end extern
|
3,387 | #include <stdio.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include <cuda_runtime.h>
/*
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
*/
__device__
double calculateDistance(double* data, double* center, int size){
// euclidean distance
double distance = 0;
for(int i=0;i<size;i++){
distance += pow(data[i] - center[i], 2);
}
return sqrt(distance);
}
__global__
void kmean(int offset, double* data, double* centers, int rows, int cols, int k, int* result, int* class_count){
int id = blockIdx.x*blockDim.x+threadIdx.x + offset;
if(id < rows){
extern __shared__ double s_centers[];
if(id < k * cols){
s_centers[id] = centers[id];
}
double distance;
double min_distance;
int min_distance_cluster;
__syncthreads();
for(int i=0;i<k;i++){
distance = calculateDistance(&data[id*cols], &s_centers[i*cols], cols);
if(i == 0 ){
min_distance = distance;
min_distance_cluster = i;
} else {
if(distance < min_distance){
min_distance = distance;
min_distance_cluster = i;
}
}
}
//printf("cluster: %d\n", min_distance_cluster);
result[id] = min_distance_cluster;
atomicAdd(&class_count[min_distance_cluster], 1);
/*
for(int j=0;j<cols;j++){
atomicAdd(&new_centers[min_distance_cluster * cols + j], data[id*cols + j]);
}
*/
}
}
/*
__global__
void divideNewCenters(double* new_centers, int k, int cols, int* class_count){
int thread_id = blockIdx.x*blockDim.x+threadIdx.x;
if(thread_id >= k*cols){
return;
}
int cluster_id = thread_id/cols;
new_centers[thread_id] /= class_count[cluster_id];
}
*/
void loadData(char* file_url, int col_start, int cols, int rows, double* data_ptr){
FILE* stream = fopen(file_url, "r");
char line[1024];
char* clipboard;
// skip first row
fgets(line, 1024, stream);
for(int i=0;i<rows;i++){
fgets(line, 1024, stream);
clipboard = strtok( line, "," );
int j = 0;
while( clipboard != NULL )
{
if(j >= col_start){
data_ptr[i * cols + j-col_start] = atof(clipboard);
}
clipboard = strtok( NULL, "," );
j++;
if(j >= col_start + cols){
break;
}
}
}
}
void saveResultAsCsv(int * data, int rows){
FILE * file = fopen("result.csv", "w+");
fprintf(file,"Id,Cluster\n");
for(int i=0;i<rows;i++){
fprintf(file, "%d, %d\n", i, data[i]);
}
fclose(file);
}
void saveClusterCentersAsCsv(double * data, int k, int cols){
FILE * file = fopen("centers.csv", "w+");
//fprintf(file,"Cluster id, Cluster\n");
for(int i=0;i<k;i++){
for(int j=0;j<cols;j++){
if(j==cols-1){
fprintf(file, "%f\n", data[i*cols+j]);
} else {
fprintf(file, "%f,", data[i*cols+j]);
}
}
}
fclose(file);
}
int main(int argc, char** argv){
int k = 12;
// weather - cols:6
// iris - cols:2, rows:150, k: 3
int rows = 1586822;
int cols = 6;
int steps = 30;
int offset = 0;
char* file_url = "wind_data_prepared.csv";
// load arguments
if(argc == 5){
file_url = argv[1];
rows = atoi(argv[2]);
cols = atoi(argv[3]);
k = atoi(argv[4]);
} else {
printf("Continue with default parameters \n");
}
size_t data_size = cols * rows * sizeof(double);
size_t centers_size = k * cols * sizeof(double);
size_t calc_classes_size = rows * sizeof(int);
size_t class_count_size = k * sizeof(int);
double * h_data;
//double * h_data = (double*) malloc(data_size); // data[row * cols + col]
double * h_centers = (double *) malloc(centers_size); // center[k*cols + col]
int * h_calc_classes = (int*) malloc(calc_classes_size);
int * h_class_count = (int*) malloc(class_count_size);
cudaMallocHost((void**)&h_data, data_size);
srand(time(NULL));
loadData(file_url, 1, cols, rows, h_data);
// initialize clusters centers
for(int i=0; i < k ; i++){
int data_index = (rand() % rows) *cols;
for(int j=0;j<cols;j++){
h_centers[i*cols +j] = h_data[data_index + j];
}
}
double * d_data;
double * d_centers;
//double * d_new_centers;
int * d_calc_classes;
int * d_class_count;
cudaError_t error;
dim3 threadsPerBlock(1024, 1, 1);
dim3 blocksPerGrid((rows + threadsPerBlock.x - 1) / threadsPerBlock.x, 1, 1);
int max_rows = blocksPerGrid.x * threadsPerBlock.x;
cudaMalloc(&d_data, data_size);
cudaMalloc(&d_centers, centers_size);
//cudaMalloc(&d_new_centers, centers_size);
cudaMalloc(&d_calc_classes, calc_classes_size);
cudaMalloc(&d_class_count, class_count_size);
cudaMemcpy(d_data, h_data, data_size, cudaMemcpyHostToDevice);
for(int step=0;step<steps;step++){
cudaMemset(d_class_count, 0, class_count_size);
cudaMemcpy(d_centers, h_centers, centers_size, cudaMemcpyHostToDevice);
//cudaMemset(d_new_centers, 0, centers_size);
for(offset=0;offset<rows;offset+=max_rows){
// call device function
kmean<<<blocksPerGrid, threadsPerBlock, k*cols*sizeof(double)>>>(offset, d_data, d_centers, rows, cols, k, d_calc_classes, d_class_count);
if(offset+max_rows < rows){
calc_classes_size = max_rows * sizeof(int);
} else{
calc_classes_size = (rows - offset) * sizeof(int);
}
}
cudaMemcpy(h_calc_classes , d_calc_classes, calc_classes_size, cudaMemcpyDeviceToHost);
// recalculate clusters (mean value)
if(step != steps-1){
//cudaDeviceSynchronize();
//divideNewCenters<<<1, cols*k>>>(d_new_centers, k, cols, d_class_count);
//cudaMemcpy(d_centers, d_new_centers, centers_size, cudaMemcpyDeviceToDevice);
//memset(h_centers, 0, centers_size);
for(int i=0;i<k;i++){
for(int j=0;j<cols;j++){
h_centers[i*cols+j] = 0;
}
}
cudaMemcpy(h_class_count, d_class_count, class_count_size, cudaMemcpyDeviceToHost);
for(int i=0;i<rows;i++){
// sum all values
for(int j=0;j<cols;j++){
h_centers[h_calc_classes[i] * cols + j] += h_data[i*cols + j];
}
}
for(int i=0;i<k;i++){
for(int j=0;j<cols;j++){
if(h_class_count[i] != 0){
h_centers[i*cols + j] /= h_class_count[i];
}
}
}
}
}
// send results to host
//cudaMemcpy(h_centers, d_new_centers, centers_size, cudaMemcpyDeviceToHost);
// check if error occured
error = cudaGetLastError();
if(error != cudaSuccess){
fprintf(stderr, "Error: %s\n", cudaGetErrorString(error));
exit(-1);
}
saveResultAsCsv(h_calc_classes, rows);
saveClusterCentersAsCsv(h_centers, k, cols);
cudaFree(d_data);
cudaFree(d_data);
cudaFree(d_class_count);
cudaFree(d_calc_classes);
cudaFree(d_centers);
//free(h_data);
cudaFreeHost(h_data);
free(h_calc_classes);
free(h_centers);
free(h_class_count);
return 0;
} |
3,388 | #include <iostream>
#include <math.h>
#include <vector>
#include <iomanip>
#include <sstream>
#include <string>
#include <fstream>
#include <thread>
#include <ctime>
#include <stdio.h>
__device__ static inline void setSeed(int64_t *seed)
{
*seed = (*seed ^ 0x5deece66d) & ((1LL << 48) - 1);
}
__device__ static inline int next(int64_t *seed, const int bits)
{
*seed = (*seed * 0x5deece66d + 0xb) & ((1LL << 48) - 1);
return (int) (*seed >> (48 - bits));
}
__device__ static inline int nextInt(int64_t *seed, const int n)
{
int bits, val;
const int m = n - 1;
if((m & n) == 0) return (int) ((n * (int64_t)next(seed, 31)) >> 31);
do {
bits = next(seed, 31);
val = bits % n;
}
while (bits - val + m < 0);
return val;
}
struct Pos
{
int x, z;
};
__device__ class BoundingBox {
public:
Pos start;
Pos end;
__device__ bool intersectsWith(BoundingBox box)
{
return this->end.x >= box.start.x && this->start.x <= box.end.x && this->end.z >= box.start.z && this->start.z <= box.end.z;
}
};
#define BLOCK_SIZE (128)
#define WORK_SIZE_BITS 16
#define SEEDS_PER_CALL ((1ULL << (WORK_SIZE_BITS)) * (BLOCK_SIZE))
#define GPU_ASSERT(code) gpuAssert((code), __FILE__, __LINE__)
inline void gpuAssert(cudaError_t code, const char *file, int line) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s (code %d) %s %d\n", cudaGetErrorString(code), code, file, line);
exit(code);
}
}
/*
Originally 64-bit seed value.
Mod 48 bit to get the 48 bit value.
Time could be any 64-bit value that when mod 48 gives the structure seed value.
We have the 48 bit post-mod 48 value
((8682522807148012UL * 181783497276652981UL)^x)%(1LL << 48) = someSeed
Take 48 bit seed value
Loop upper bits
Xor (8682522807148012UL * 181783497276652981UL) with upperBits Seed
Find seed that matches
*/
__device__ BoundingBox guessBox;
__device__ int64_t startCurrent = 8682522807148012L;
__device__ int64_t hardcoded = 181783497276652981L;
__device__ int64_t current;
__global__ __launch_bounds__(BLOCK_SIZE,2) static void threadWork(uint64_t offset, uint32_t* counter, int64_t* buffer){
int64_t timeGuess = (blockIdx.x * blockDim.x + threadIdx.x) + offset;
timeGuess *= 1000;
int64_t seedGuess = current ^ timeGuess;
nextInt(&seedGuess, 203);
if(nextInt(&seedGuess, 203) == 103){
buffer[atomicAdd(counter, 1)] = timeGuess;
}
}
__global__ __launch_bounds__(1,1) static void setupGuessBox(Pos guessMin, Pos guessMax){
current = startCurrent*hardcoded*hardcoded*hardcoded*hardcoded*hardcoded*hardcoded*hardcoded*hardcoded*hardcoded*hardcoded*hardcoded*hardcoded*hardcoded;
guessBox.start = guessMin;
guessBox.end = guessMax;
}
int64_t* buffer;
uint32_t* counter;
std::vector<int64_t> structureSeeds;
int64_t* structSeedsArr;
int main(int argc, char **argv ){
int64_t startValue = 1282613228000000;
int64_t total = 1282706397225000;
time_t start = time(NULL);
FILE* fp = fopen("seananners-middlestep.txt", "w+");
double seconds_per_structure_seed = 0.0;
int thread = 0;
int curr = 0;
uint64_t amount = total - startValue;
int tmpCount = 0;
GPU_ASSERT(cudaMallocManaged(&buffer, sizeof(int64_t) * SEEDS_PER_CALL));
GPU_ASSERT(cudaPeekAtLastError());
GPU_ASSERT(cudaMallocManaged(&counter, sizeof(uint32_t)));
GPU_ASSERT(cudaPeekAtLastError());
Pos guessMin;
Pos guessMax;
guessMin.x = 1710;
guessMin.z = 276;
guessMax.x = 1734;
guessMax.z = 348;
setupGuessBox<<<1,1>>>(guessMin, guessMax);
cudaSetDevice(0);
GPU_ASSERT(cudaPeekAtLastError());
GPU_ASSERT(cudaDeviceSynchronize());
uint64_t countOut = 0;
uint64_t tempCount;
for(int64_t offset = 0; offset < amount; offset += SEEDS_PER_CALL){
if(offset > amount){
break;
}
int64_t value = startValue + offset;
threadWork<<<1ULL<<WORK_SIZE_BITS,BLOCK_SIZE>>>(value, counter, buffer);
GPU_ASSERT(cudaPeekAtLastError());
GPU_ASSERT(cudaDeviceSynchronize());
for(int i = 0; i < *counter; i++){
int64_t timeGuess = buffer[i];
fprintf(fp, "%lld\n", timeGuess);
}
if(countOut >= 1000000000){
time_t tempTime = time(NULL);
uint64_t tempDiff = tempTime - start;
double sps = (double)offset/(double)tempDiff;
double percent = ((double)offset/(double)amount) * 100.0;
printf("Seeds Per Second: %f\tProgress: %f\n", sps, percent);
countOut = 0;
}
*counter = 0;
countOut += SEEDS_PER_CALL;
}
time_t end = time(NULL);
uint64_t diff = end - start;
double seedsPerSec = (double)total/(double)diff;
printf("Time taken: %lld\nSeeds per second: %15.9f", diff, seedsPerSec);
fclose(fp);
return 0;
} |
3,389 | #include <stdio.h>
#include <cuda_runtime.h>
__global__ void add(int a, int b, int *c) {
*c = a + b;
}
__global__ void hello (void)
{
printf("Hello Wold from GPU!\n");
}
extern "C" int fun_cuda()
{
int c;
int *dev_c;
cudaMalloc((void **)&dev_c, sizeof(int));
add<<<1,1>>>(2, 7, dev_c);
cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
printf("2+7=%d\n",c);
cudaFree(dev_c);
hello<<<1, 10>>>();
cudaDeviceReset();
return 0;
}
|
3,390 | #include "includes.h"
/*
* /usr/local/cuda/bin/nvcc -gencode arch=compute_20,code=compute_20 -o fw_kernel.ptx -ptx fw_kernel.cu
*/
extern "C" {
}
__global__ void fw(float *adj_array, int *next_array, int k, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
float check;
float next;
if (i < N && j < N)
{
check = adj_array[j * N + k] + adj_array[k * N + i];
next = next_array[j * N + k];
}
__syncthreads();
if (i == 0 || j == 0 || i > N || j > N) return;
if (check < adj_array[j * N + i])
{
adj_array[j * N + i] = check;
next_array[j * N + i] = next;
}
} |
3,391 | #include <cmath>
#include <cuda_runtime.h>
namespace computation_playground {
__global__ void transpose2d_naive_kernel(float* in, float* out, int m, int n) {
int in_row_offet = blockIdx.x * blockDim.x + threadIdx.x;
if(in_row_offet < m) {
int in_global_offset = blockIdx.y * m + in_row_offet;
int out_global_offset = in_row_offet * n + blockIdx.y;
*(out + out_global_offset) = *(in + in_global_offset);
}
}
void transpose2d_naive(float* in, float* out, int m, int n, cudaStream_t stream) {
int threads_per_block = 32;
int blocks_per_row = std::ceil(m / float(threads_per_block));
dim3 grid(blocks_per_row, n);
transpose2d_naive_kernel<<<grid, threads_per_block, 0, stream>>>(in, out, m, n);
}
__global__ void transpose2d_tile_kernel(float* in, float* out, int m, int n,
int m_direction_iteration, int n_direction_iteration) {
for(int j = 0; j < n_direction_iteration; j++) {
for(int i = 0; i < m_direction_iteration; i++) {
int x = blockIdx.x * blockDim.x * m_direction_iteration + i * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y * n_direction_iteration + j * blockDim.y + threadIdx.y;
if(x < m && y < n) {
out[x * n + y] = in[y * m + x];
}
}
}
}
void transpose2d_tile(float* in, float* out, int m, int n, int tile_m_dim, int tile_n_dim,
int m_direction_iteration, int n_direction_iteration, cudaStream_t stream) {
dim3 grid(std::ceil(m / (tile_m_dim * m_direction_iteration)), std::ceil(n / (tile_n_dim * n_direction_iteration)));
dim3 block(tile_m_dim, tile_n_dim);
transpose2d_tile_kernel<<<grid, block, 0, stream>>>(in, out, m, n, m_direction_iteration, n_direction_iteration);
}
} // namespace computation_playground |
3,392 | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <iostream>
#include <fstream>
#define INF 1000000
using namespace std;
__global__ void RoyFloyd(int* matrix, int k, int N)
{
int i = blockDim.y * blockIdx.y + threadIdx.y;
int j = blockDim.x * blockIdx.x + threadIdx.x;
if (matrix[i*N + k] + matrix[k*N + j] < matrix[i*N + j])
matrix[i*N + j] = matrix[i*N + k] + matrix[k*N + j];
}
int main()
{
int n;
int thread_per_block = 1024;
ifstream input("input.txt");
ofstream output("output.txt");
input >> n;
int num_block = ceil((float)(n*n / (thread_per_block)));
int *hostMatrix = (int*)malloc(sizeof(int) * n * n);
int *deviceMatrix;
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
{
input >> hostMatrix[i + j];
if (hostMatrix[i + j] == 0)
hostMatrix[i + j] = INF;
}
cudaMalloc((void**)&deviceMatrix, n * n * sizeof(int));
cudaMemcpy(deviceMatrix, hostMatrix, n * n * sizeof(int), cudaMemcpyHostToDevice);
clock_t t;
t = clock();
for (int k = 0; k < n; ++k)
RoyFloyd <<< num_block, thread_per_block >>>(deviceMatrix, k, n);
t = clock() - t;
cout << "Time : " << ((double)t) / CLOCKS_PER_SEC << endl;
cudaMemcpy(hostMatrix, deviceMatrix, n * n * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
if (i == j || hostMatrix[i + j] == INF)
output << "0 ";
else
output << hostMatrix[i + j] << " ";
output << endl;
}
free(hostMatrix);
cudaFree(deviceMatrix);
} |
3,393 | #include "includes.h"
/*
* Implementations
*/
__global__ void ca_map_forward_kernel(const float *weight, const float *g, float *out, int num, int chn, int height, int width) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int sp = height * width;
int len = height + width - 1;
int plane = blockIdx.z;
if (x < width && y < height && plane < chn) {
for (int batch = 0; batch < num; ++batch) {
for (int i = 0; i < width; ++i) {
float _g = g[(batch * chn + plane) * sp + y*width + i];
float _w = weight[(batch * len + i) * sp + y*width + x];
out[(batch * chn + plane) * sp + y*width + x] += _g * _w;
}
for (int i = 0; i < height; ++i) {
if (i == y) continue;
int j = i<y ? i : i-1;
float _g = g[(batch * chn + plane) * sp + i*width + x];
float _w = weight[(batch * len + width + j) * sp + y*width + x];
out[(batch * chn + plane) * sp + y*width + x] += _g * _w;
}
}
}
} |
3,394 | #include <stdio.h>
#include <math.h>
#include <curand.h>
#include <curand_kernel.h>
#define PI 3.14159265358979323846 // known value of pi
//------------------CUDA ERROR HANDLING------------------//
#define gpuErrChk(e) gpuAssert(e, __FILE__, __LINE__)
// Catch GPU errors in CUDA runtime calls
inline void gpuAssert(cudaError_t call, const char* file, int line) {
if (call != cudaSuccess) {
fprintf(stderr, "gpuAssert: %s %s %d\n", cudaGetErrorString(call), file, line);
exit(-1);
}
}
//------------------CALCULATIONS------------------//
// Calculate the covariance estimate on GPU
__device__ void calc_cov(float* cov, float* inv_cov, float* est_cov, int n, int n_threads) {
int threadID = threadIdx.x + blockIdx.x * blockDim.x; // Global thread ID
int offset = threadID * n;
int i, j;
// Partition (1/covariance) array so that each thread is working on a different section
for (i = offset; i < offset + n; i++) {
inv_cov[i] = 1.f / cov[i];
}
// If the first thread, combine results
if (threadID == 0) {
for (i = 0; i < n; i++) {
est_cov[i] = 0.f; // Initialise
// Update using results from each thread
for (j = 0; j < n_threads; j++) {
est_cov[i] += inv_cov[i + j * n];
}
est_cov[i] = 1.f / est_cov[i]; // Inverse
}
}
__syncthreads(); // Synchronise threads within the same block
}
// Calculate the population mean (mu) estimate on GPU
__device__ void calc_mu(float* mu, float* inv_cov, float* inv_mu, float* est_cov, float* est_mu, int n, int n_threads) {
int threadID = threadIdx.x + blockIdx.x * blockDim.x; // Global thread ID
int offset = threadID * n;
int i, j;
// Partition inv_mu array so that each thread is working on a different section
for (i = offset; i < offset + n; i++) {
inv_mu[i] = mu[i] * inv_cov[i];
}
// If the first thread, combine results
if (threadID == 0) {
for (i = 0; i < n; i++) {
est_mu[i] = 0.f; // Initialise to 0
// Update mu estimate using results from each thread
for (j = 0; j < n_threads; j++) {
est_mu[i] += inv_mu[i + j * n];
}
// Scale using the covariance estimate
est_mu[i] = est_cov[i] * est_mu[i];
}
}
__syncthreads(); // Synchronise threads within the same block
}
// Calculates determinant
__device__ float alt_calc_det(float* cov, int n) {
int threadID = threadIdx.x + blockIdx.x * blockDim.x;
float det = 1.f;
int initial_idx = n * threadID;
for (int i = initial_idx; i < initial_idx + n; i++) {
det *= cov[i];
}
return det;
}
// Calculates vector product
__device__ float calc_vec_mat_vec_prod(float* cov, float* data, float* mu, int data_idx, int n) {
int threadID = threadIdx.x + blockIdx.x * blockDim.x;
int j;
int initial_idx = n * threadID;
float diff;
float cum_sum = 0.f;
for (int i = initial_idx; i < initial_idx + n; i++) {
j = i - initial_idx + data_idx;
diff = data[j] - mu[i];
cum_sum += diff * diff * 1. / cov[i];
}
return cum_sum;
}
// Calculates the log of the determinant
__device__ float get_log_det(float* A, int n) {
float det = alt_calc_det(A, n);
return log(det);
}
// Calculates the log likelihood (assuming Normal errors)
__device__ float get_log_likelihood(float* data, float* mu, float* cov, float cov_det, int data_idx, int n) {
float t1, t2, t3;
float L;
float fl_inf = 10000000000000000000;
t1 = -0.5 * cov_det;
t2 = -0.5 * calc_vec_mat_vec_prod(cov, data, mu, data_idx, n);
t3 = -0.5 * n * log(2 * PI);
L = t1 + t2 + t3;
if (isnan(L)) {
return -1 * fl_inf;
}
else {
return L;
}
}
// Returns the log likelihood for all of the data, summing the result.
__device__ float get_total_log_likelihood(float* cov, float* mu, float* data, int n_samples_per_thread, int n_samples, int n) {
int threadID = threadIdx.x + blockDim.x * blockIdx.x;
int data_idx;
float cov_det = get_log_det(cov, n);
int data_offset = threadID * n * n_samples_per_thread;
float cum_L = 0.f;
for (data_idx = data_offset; data_idx < data_offset + n * n_samples_per_thread; data_idx += n) {
cum_L += get_log_likelihood(data, mu, cov, cov_det, data_idx, n);
}
return cum_L;
}
// Calculates the square root of the cumulative sum
__device__ float calc_l2_norm(float* mu, float* true_mu, int n) {
int threadID = threadIdx.x + blockIdx.x * blockDim.x;
int offset = n * threadID;
float diff;
float cum_sum = 0.;
float result;
for (int i = offset; i < offset + n; i++) {
diff = true_mu[i] - mu[i];
cum_sum += diff * diff;
}
result = sqrt(cum_sum);
return result;
}
//------------------RANDOM GENERATORS------------------//
// Initialise random state for each CUDA thread
__device__ void cuda_rand_init(curandState* state) {
int threadID = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int seed = 0; // Fixed seed to 0 for reproducible results, otherwise just use clock64()
curand_init(seed + threadID, 0, 0, &state[threadID]); // Sets up initial cuda state for random generator
}
// Generates 2 psuedorandom floats from uniform distribution to update rand_num and rand_ints (for each thread)
__device__ void gen_uniform(curandState* state, float* rand_num, int* rand_ints, int max_int) {
int threadID = threadIdx.x + blockIdx.x * blockDim.x;
// Returns a pseudorandom float uniformly distributed between 0 and 1
float rand_int_num = (float)(curand_uniform(&state[threadID]));
rand_num[threadID] = (float)(curand_uniform(&state[threadID]));
rand_ints[threadID] = int(max_int * rand_int_num);
}
// Generates 2 pesudorandom floats from normal distribution to update rand_mu and rand_cov (for each thread)
__device__ void gen_normal(curandState* state, float sigma_1, float* norm_1, float sigma_2, float* norm_2) {
int threadID = threadIdx.x + blockIdx.x * blockDim.x;
// Returns a pseudorandom float normally distributed with mean 0 and std dev of sigma_1 (or sigma_2)
norm_1[threadID] = (float)(curand_normal(&state[threadID]) * sigma_1); // rand_mu
norm_2[threadID] = (float)(curand_normal(&state[threadID]) * sigma_2); // rand_cov
}
// Updates the random variables for each thread
__device__ void generate_random_nums(curandState* state, float* rand_mu, float* rand_cov, float mu_step, float cov_step, float* rand_num, int* rand_ints, int n) {
// Gen random numbers from the uniform distribution to update rand_num and rand_ints
int n_params = 2 * n;
gen_uniform(state, rand_num, rand_ints, n_params);
// Gen random numbers from the normal distribution to update rand_mu and rand_cov
gen_normal(state, mu_step, rand_mu, cov_step, rand_cov);
}
//------------------PERTURBATIONS------------------//
// Peturbates the covariance
__device__ void perturb_cov(float* old_cov, float* new_cov, int param_idx, float rand_cov_num, int n) {
int threadID = threadIdx.x + blockIdx.x * blockDim.x;
int offset = threadID * n;
int idx = param_idx - n;
float new_val = old_cov[offset + idx] + rand_cov_num; // Add random noise from proposal distribution to old cov sample
if (new_val > 0) { // If covariance is positive, set it as the new covariance
new_cov[offset + idx] = new_val;
}
}
// Peturbates a random parameter from the array parameters using a Normal dist with std_dev = step size in the array
__device__ void perturb_params(float* old_cov, float* old_mu, float* new_cov, float* new_mu, int* rand_ints, float* rand_mu, float* rand_cov, int n) {
int threadID = threadIdx.x + blockDim.x * blockIdx.x;
int offset = threadID * n;
int param_idx = rand_ints[threadID]; // Pick parameter to perturb
if (param_idx < n) {
new_mu[param_idx + offset] = old_mu[param_idx + offset] + rand_mu[threadID]; // Add random noise from proposal distribution to old mu sample
}
else {
perturb_cov(old_cov, new_cov, param_idx, rand_cov[threadID], n); // Otherwise perturb thee covariance
}
}
//------------------HELPERS------------------//
// Initialize the array with desired value (for mean and covariance)
__device__ void init_array(float initial_val, float* curr_val, int n) {
int threadID = threadIdx.x + blockDim.x * blockIdx.x;
int offset = threadID * n;
for (int i = offset; i < offset + n; i++) {
curr_val[i] = initial_val;
}
}
// Read data file (into temporary) and returns the number of samples
__host__ int count_n_samples(int n) {
unsigned int i = 0;
FILE* input;
char input_file[50];
float temp;
sprintf(input_file, "samples_%d.txt", n);
input = fopen(input_file, "r");
// Count the number of floats
while ((fscanf(input, "%f", &temp)) != EOF) {
i++;
}
fclose(input);
return i / n; // Return number of samples (total number of floats divided by the number of dimensions)
}
// Read data file into h_data for processing
__host__ void read_samples(int n_samples, int n, float* data) {
unsigned int i = 0;
FILE* input;
char input_file[50];
sprintf(input_file, "samples_%d.txt", n);
input = fopen(input_file, "r");
while ((fscanf(input, "%f", &data[i])) != EOF) { // Read each floating point number into new position within h_data
i++;
}
fclose(input);
}
// Copies a vector
__device__ void vec_cpy(float* source_vec, float* destination_vec, int n) {
int threadID = threadIdx.x + blockDim.x * blockIdx.x;
int offset = threadID * n;
for (int i = offset; i < offset + n; i++) {
destination_vec[i] = source_vec[i];
}
}
// Print usage to command prompt
void print_usage(int default_n_steps, int default_n, int default_spacing, int default_shared, int default_n_threads, int default_n_blocks) {
printf("Usage: MCMC options are...\n");
printf(" Number of steps: --n_steps=%d\n", default_n_steps);
printf(" Number of dimensions: --n_dim=%d\n", default_n);
printf(" Evaluation frequency: --eval_freq=%d\n", default_spacing);
printf(" Store data in shared memory (requires small datasets): --sm=%d\n", default_shared);
printf(" Number of threads: --n_threads=%d\n", default_n_threads);
printf(" Number of blocks: --n_blocks=%d\n", default_n_blocks);
}
// Matches the user input variable
int match_flag_id(char* arg, const char* keyword_array[]) {
int id = 6;
int i;
// Search for flag match i
for (i = 0; i < 6; i++) {
if (arg[id] == keyword_array[i][id]) {
return i;
}
}
return -1;
}
// Argument parsing so that program parameters can be easily changed for experimental analysis
int* parse_args(int argc, char* argv[]) {
// Initialise variables
int i, id, init_id;
int* args = (int*)malloc(6 * sizeof(int));
for (i = 0; i < 6; i++) {
args[i] = -1;
}
// Expected key words
const char* keyword_array[6];
keyword_array[0] = "--n_steps";
keyword_array[1] = "--n_dim";
keyword_array[2] = "--eval_freq";
keyword_array[3] = "--sm";
keyword_array[4] = "--n_threads";
keyword_array[5] = "--n_blocks";
// Default args
int default_n_steps = 10000;
int default_n = 10;
int default_spacing = 1000;
int default_shared = 0;
int default_n_threads = 256; // ideally 128-256 threads per block
int default_n_blocks = 1; // atleast as many as SMs available
// Compare each argument flag
for (i = 1; i < argc; i++) {
if (!strcmp(argv[i], "-h") || !strcmp(argv[i], "--help")) { // Can ask for usage instructions with help
print_usage(default_n_steps, default_n, default_spacing, default_shared, default_n_threads, default_n_blocks);
exit(0);
}
else {
id = match_flag_id(argv[i], keyword_array);
if (id != -1) { // Flag matched
init_id = strlen(keyword_array[id]) + 1;
argv[i] += init_id;
sscanf(argv[i], "%d", &args[id]); // Read and store value into appropriate id
}
else { // Flag mismatch occurred
print_usage(default_n_steps, default_n, default_spacing, default_shared, default_n_threads, default_n_blocks);
exit(0);
}
}
}
// Set other parameters to default values
if (args[0] == -1) {
printf("Number of steps not provided. Defaulting to %d steps.\n", default_n_steps);
args[0] = default_n_steps;
}
if (args[1] == -1) {
printf("Number of dimensions not provided. Defaulting to %d dimensions.\n", default_n);
args[1] = default_n;
}
if (args[2] == -1) {
printf("Evaluation frequency not provided. Defaulting to evaluating every %d steps.\n", default_spacing);
args[2] = default_spacing;
}
if (args[3] == -1) {
printf("Shared memory not specified. Defaulting to using global memory.\n");
args[3] = default_shared;
}
if (args[4] == -1) {
printf("Number of threads not specified. Defaulting to using %d.\n", default_n_threads);
args[4] = default_n_threads;
}
if (args[5] == -1) {
printf("Number of blocks not specified. Defaulting to using %d.\n", default_n_blocks);
args[5] = default_n_blocks;
}
return args;
}
// Prints the parameter configuration to the command window
void print_params(int n_steps, int n, int spacing, int shared, int n_threads, int n_blocks, int n_samples, int n_samples_per_thread) {
printf("N steps: %d\n", n_steps);
printf("Number of dimensions: %d\n", n);
printf("Evaluation frequency spacing: %d\n", spacing);
printf("Using shared mem: %d\n", shared);
printf("Using # threads: %d\n", n_threads);
printf("Using # blocks: %d\n", n_blocks);
printf("Number of samples (per dim): %d, number of total threads: %d, number of samples per thread: %d.\n", n_samples, n_threads, n_samples_per_thread);
}
// Writes the output of the MCMC to files for further analysis
void output_results(float* h_mu, float* h_cov, float* h_est_mu, float* h_rand, int n, int n_threads, int n_sample_points, float millis) {
int i;
// Write h_mu out to mu_data.txt
FILE* f = fopen("mu_data.txt", "w");
for (i = 0; i < n * n_threads; i++) {
fprintf(f, "%f ", h_mu[i]);
}
fclose(f);
// Write h_est_mu out to mu_ev_data.txt (evolution of mu)
f = fopen("mu_ev_data.txt", "w");
for (i = 0; i < n_sample_points * n; i++) {
fprintf(f, "%f ", h_est_mu[i]);
}
fclose(f);
// Write h_cov out to cov_data.txt
f = fopen("cov_data.txt", "w");
for (i = 0; i < n * n_threads; i++) {
fprintf(f, "%f ", h_cov[i]);
}
fclose(f);
// Write h_rand out to rand_data.txt
f = fopen("rand_data.txt", "w");
for (i = 0; i < n_threads; i++) {
fprintf(f, "%f ", h_rand[i]);
}
fclose(f);
// Write millis out to timing.txt
f = fopen("timing.txt", "w");
fprintf(f, "%f ", millis);
fclose(f);
}
//------------------MCMC STEP------------------//
// Computes a single step of the MCMC algorithm
__device__ void mcmc_step(float* curr_L, float* new_cov, float* new_mu, float* old_cov, float* old_mu, int* rand_ints, float* rand_mu, float* rand_cov, float* rand_num, int n, int n_samples_per_thread, int n_samples, int* take_step, float* data) {
int threadID = threadIdx.x + blockDim.x * blockIdx.x;
float old_L = curr_L[threadID];
float new_L = get_total_log_likelihood(new_cov, new_mu, data, n_samples_per_thread, n_samples, n);
float threshold;
if (new_L > old_L) { // If the fit (total log likelihood) has improved, take a step and update likelihood
take_step[threadID] = 1;
curr_L[threadID] = new_L;
}
else { // Otherwise accept or reject if less than threshold (exponential raised to the difference between the new and old likelihood)
threshold = exp(new_L - old_L);
if (rand_num[threadID] < threshold) { // Accept or reject
take_step[threadID] = 1; // Step was taken and update likelihood
curr_L[threadID] = new_L;
}
else {
take_step[threadID] = 0; // Did not take a step
}
}
}
//------------------FULL MCMC------------------//
__global__ void mcmc(int shared, int n, int n_samples, int n_samples_per_thread, int n_threads, int n_steps, int spacing, float mu_step, float cov_step, curandState* state, float* curr_cov, float* new_cov, float* curr_mu, float* new_mu, float* rand_num, int* rand_ints, float* rand_mu, float* rand_cov, float* curr_L, int* take_step, float* data, float* inv_mu, float* inv_cov, float* est_mu, float* est_cov, float* all_est_mu) {
int threadID = blockIdx.x * blockDim.x + threadIdx.x;
float initial_mean = 2.0f; // Farily arbitrary selection here - should prob check this an okay value
float initial_cov = 0.5f;
int step_count = 0;
int local_take_step, estimation_offset;
// Initialise shared memory
extern __shared__ float s_data[];
// Initialise covariance and mean arrays
init_array(initial_cov, curr_cov, n);
init_array(initial_mean, curr_mu, n);
cuda_rand_init(state);
// If using shared memory use s_data
// Note: Requires very small sample size for this to work without exceeding shared memory capacity
if (shared == 1) {
// Partition dataset according to threads
int copy_offset = threadID * n * n_samples_per_thread; // Starting index
int copy_end = copy_offset + n_samples_per_thread * n;
for (int i = copy_offset; i < copy_end; i++) {
s_data[i] = data[i];
}
// Get the current log likelihood using shared data
curr_L[threadID] = get_total_log_likelihood(curr_cov, curr_mu, s_data, n_samples_per_thread, n_samples, n);
}
else {
// Get the current log likelihood using global data
curr_L[threadID] = get_total_log_likelihood(curr_cov, curr_mu, data, n_samples_per_thread, n_samples, n);
}
// Copy current vectors (covariance and mu) to new vectors
vec_cpy(curr_cov, new_cov, n);
vec_cpy(curr_mu, new_mu, n);
while (step_count < n_steps) {
// Generate random numbers for rand_num, rand_ints (both from uniform dist) and rand_mu and rand_cov (both from normal dist)
generate_random_nums(state, rand_mu, rand_cov, mu_step, cov_step, rand_num, rand_ints, n);
// Propose new sample for new mu and cov by perturbating (add small random noise) the most recent sample
perturb_params(curr_cov, curr_mu, new_cov, new_mu, rand_ints, rand_mu, rand_cov, n);
// Accept or reject new proposal as the new sample (if rejected, old sample is retained).
if (shared == 1) { // If using shared, compute the step using shared data, otherwise use the global data
mcmc_step(curr_L, new_cov, new_mu, curr_cov, curr_mu, rand_ints, rand_mu, rand_cov, rand_num, n, n_samples_per_thread, n_samples, take_step, s_data);
}
else {
mcmc_step(curr_L, new_cov, new_mu, curr_cov, curr_mu, rand_ints, rand_mu, rand_cov, rand_num, n, n_samples_per_thread, n_samples, take_step, data);
}
local_take_step = take_step[threadID];
// If individual thread took a step, copy the new vectors (covariance and mu) to the current vectors
if (local_take_step == 1) {
vec_cpy(new_cov, curr_cov, n);
vec_cpy(new_mu, curr_mu, n);
}
// If at evaluation step, calculate the estimated covariance and mu
if (step_count % spacing == 0) {
calc_cov(curr_cov, inv_cov, est_cov, n, n_threads);
calc_mu(curr_mu, inv_cov, inv_mu, est_cov, est_mu, n, n_threads);
// If first thread, combine the results
if (threadID == 0) {
estimation_offset = step_count / spacing * n; // Error term
for (int i = estimation_offset; i < estimation_offset + n; i++) {
all_est_mu[i] = est_mu[i - estimation_offset];
}
}
__syncthreads(); // Synchronise threads within the same block
}
step_count += 1;
}
}
//-------------MAIN---------------//
int main(int argc, char* argv[]) {
// Process user inputs and parse arguments (could add in some logic here to prevent invalid inputs that would cause program to crash)
int* args = parse_args(argc, argv);
int n_steps = args[0];
int n = args[1];
int spacing = args[2];
int shared = args[3];
int n_threads = args[4];
int n_blocks = args[5];
int n_samples = count_n_samples(n); // Note: n_samples for each of the n dimensions
int n_samples_per_thread = n_samples / n_threads;
int n_sample_points = n_steps / spacing;
float mu_step = 0.2;
float cov_step = 0.2;
print_params(n_steps, n, spacing, shared, n_threads, n_blocks, n_samples, n_samples_per_thread);
// Declare variables
cudaEvent_t start, stop;
curandState* state;
float *rand_num, *rand_mu, *curr_mu, *new_mu, *inv_mu, *est_mu, *all_est_mu, *rand_cov, *curr_cov, *new_cov, *inv_cov, *est_cov, *curr_L, *data;
int *rand_ints, *take_step;
// Allocate memory for GPU (device) variables
cudaEventCreate(&start);
cudaEventCreate(&stop);
gpuErrChk(cudaMalloc(&state, n_blocks * n_threads * sizeof(curandState)));
gpuErrChk(cudaMalloc((void**)&rand_num, n_blocks * n_threads * sizeof(float)));
gpuErrChk(cudaMalloc((void**)&rand_mu, n_blocks * n_threads * sizeof(float)));
gpuErrChk(cudaMalloc((void**)&curr_mu, n * n_threads * n_blocks * sizeof(float)));
gpuErrChk(cudaMalloc((void**)&new_mu, n * n_threads * n_blocks * sizeof(float)));
gpuErrChk(cudaMalloc((void**)&inv_mu, n * n_blocks * n_threads * sizeof(float)));
gpuErrChk(cudaMalloc((void**)&est_mu, n * sizeof(float)));
gpuErrChk(cudaMalloc((void**)&all_est_mu, n * n_sample_points * sizeof(float)));
gpuErrChk(cudaMalloc((void**)&rand_cov, n_blocks * n_threads * sizeof(float)));
gpuErrChk(cudaMalloc((void**)&curr_cov, n * n_threads * n_blocks * sizeof(float)));
gpuErrChk(cudaMalloc((void**)&new_cov, n * n_threads * n_blocks * sizeof(float)));
gpuErrChk(cudaMalloc((void**)&inv_cov, n * n_threads * n_blocks * sizeof(float)));
gpuErrChk(cudaMalloc((void**)&est_cov, n * sizeof(float)));
gpuErrChk(cudaMalloc((void**)&curr_L, n_threads * n_blocks * sizeof(float)));
gpuErrChk(cudaMalloc((void**)&data, n * n_samples * sizeof(float)));
gpuErrChk(cudaMalloc((void**)&rand_ints, n_blocks * n_threads * sizeof(int)));
gpuErrChk(cudaMalloc((void**)&take_step, n_threads * n_blocks * sizeof(int)));
// Allocate memory for CPU (host) variables
float* h_data = (float*)malloc(n * n_samples * sizeof(float));
float* h_cov = (float*)malloc(n * n_threads * n_blocks * sizeof(float));
float* h_mu = (float*)malloc(n * n_threads * n_blocks * sizeof(float));
float* h_rand = (float*)malloc(n_threads * n_blocks * sizeof(float));
float* h_est_mu = (float*)malloc(n * n_sample_points * sizeof(float));
// Read the samples into h_data
read_samples(n_samples, n, h_data);
// Copy all of the samples from the CPU to the GPU
gpuErrChk(cudaMemcpy(data, h_data, n * n_samples * sizeof(float), cudaMemcpyHostToDevice));
// Calculate grid and block dimension
int grid_dim = n_blocks; // Default to 1D structures - could provide an option to use more complicated grid dimensions which could boost performance
int block_dim = n_threads / n_blocks;
printf("Grid dim: %d, Block dim: %d\n", grid_dim, block_dim);
if (shared == 1) {
cudaEventRecord(start);
// Usage: mykernel<<<grid_dim, block_dim, shared_mem>>>(args);
// NOTE: shared_mcmc only works for very small data sets
mcmc<<<grid_dim, block_dim, n * n_samples * sizeof(float) >>>(shared, n, n_samples, n_samples_per_thread, n_threads, n_steps, spacing, mu_step, cov_step, state, curr_cov, new_cov, curr_mu, new_mu, rand_num, rand_ints, rand_mu, rand_cov, curr_L, take_step, data, inv_mu, inv_cov, est_mu, est_cov, all_est_mu);
cudaEventRecord(stop);
gpuErrChk(cudaDeviceSynchronize()); // Error check for shared memory sync
}
else {
cudaEventRecord(start);
mcmc<<<grid_dim, block_dim>>>(shared, n, n_samples, n_samples_per_thread, n_threads, n_steps, spacing, mu_step, cov_step, state, curr_cov, new_cov, curr_mu, new_mu, rand_num, rand_ints, rand_mu, rand_cov, curr_L, take_step, data, inv_mu, inv_cov, est_mu, est_cov, all_est_mu);
cudaEventRecord(stop);
gpuErrChk(cudaDeviceSynchronize()); // Error check for shared memory sync
}
// Timing results
float millis = 0.f;
cudaEventElapsedTime(&millis, start, stop);
printf("Code executed in %f ms.\n", millis);
// Copy from GPU to CPU
gpuErrChk(cudaMemcpy(h_cov, curr_cov, n * n_threads * sizeof(float), cudaMemcpyDeviceToHost));
gpuErrChk(cudaMemcpy(h_mu, curr_mu, n * n_threads * sizeof(float), cudaMemcpyDeviceToHost));
gpuErrChk(cudaMemcpy(h_est_mu, all_est_mu, n * n_sample_points * sizeof(float), cudaMemcpyDeviceToHost));
gpuErrChk(cudaMemcpy(h_rand, rand_num, n_threads * sizeof(float), cudaMemcpyDeviceToHost));
// Output results to file
output_results(h_mu, h_cov, h_est_mu, h_rand, n, n_threads, n_sample_points, millis);
// Deallocate CUDA memory
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(state);
cudaFree(rand_num);
cudaFree(rand_mu);
cudaFree(curr_mu);
cudaFree(new_mu);
cudaFree(inv_mu);
cudaFree(est_mu);
cudaFree(all_est_mu);
cudaFree(rand_cov);
cudaFree(curr_cov);
cudaFree(new_cov);
cudaFree(inv_cov);
cudaFree(est_cov);
cudaFree(curr_L);
cudaFree(data);
cudaFree(rand_ints);
cudaFree(take_step);
// Dellocate CPU memory
free(h_data);
free(h_cov);
free(h_mu);
free(h_rand);
free(h_est_mu);
return 0;
}
|
3,395 | #include <cuda.h>
#include <stdio.h>
#define N (1024*1024)
__global__ void kernel(int* a, int* b, int* c){
int index = blockDim.x * blockIdx.x + threadIdx.x;
*(c + index) = *(a + index) + *(b + index);
}
int main(int argc, char** argv){
int size = N * sizeof(int);
int* host_a = (int*) malloc(size);
int* host_b = (int*) malloc(size);
int* host_c = (int*) malloc(size);
int* device_a; cudaMalloc((void**)&device_a, size);
int* device_b; cudaMalloc((void**)&device_b, size);
int* device_c; cudaMalloc((void**)&device_c, size);
for(int i = 0; i < N; i++){
*(host_a+i) = i;
*(host_b+i) = 2*i;
*(host_c+i) = *(host_a+i) + *(host_b+i);
}
printf("CPU\n");
for(int j = 0; j< 20000; j++){
cudaMemcpy(device_b, host_b, size, cudaMemcpyHostToDevice);
cudaMemcpy(device_a, host_a, size, cudaMemcpyHostToDevice);
kernel<<<1024,1024>>>(device_a, device_b, device_c);
}
// for(int j = 0; j< 20000; j++){
// for(int i = 0; i < N; i++){
// *(host_c+i) = *(host_a+i) + *(host_b+i);
// }
// }
printf("GPU\n");
cudaMemcpy(host_c, device_c, size, cudaMemcpyDeviceToHost);
// for(int i = 0; i < N; i++){
// printf("%d, %d\n", i, *(host_c+i));
// }
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_c);
free(host_a);
free(host_b);
free(host_c);
return 0;
}
|
3,396 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
__global__ void VecAdd(float* A, float* B, float* C, int N){
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
int main(int argc, char** argv){
srand(2634);
int N = atoi(argv[1]);
char* out = argv[2];
cudaEvent_t start, stop, fin;
float dur_time;
size_t size = N * sizeof(float);
float* h_A;
cudaMallocHost((void**)&h_A, size);
float* h_B;
cudaMallocHost((void**)&h_B, size);
float* h_C;
cudaMallocHost((void**)&h_C, size);
float* h_D;
cudaMallocHost((void**)&h_D, size);
float* h_F;
cudaMallocHost((void**)&h_F, size);
int i;
for (i = 0; i < N; ++i){
h_A[i] = (float)rand() / RAND_MAX;
h_B[i] = (float)rand() / RAND_MAX;
h_D[i] = (float)rand() / RAND_MAX;
}
float* d_A;
cudaMalloc((void**)&d_A, size);
float* d_B;
cudaMalloc((void**)&d_B, size);
float* d_C;
cudaMalloc((void**)&d_C, size);
float* d_D;
cudaMalloc((void**)&d_D, size);
float* d_F;
cudaMalloc((void**)&d_F, size);
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&fin);
cudaEventRecord(start, 0);
cudaStream_t stream[2];
cudaStreamCreate(&stream[0]);
cudaStreamCreate(&stream[1]);
cudaMemcpyAsync(d_A, h_A, size, cudaMemcpyHostToDevice, stream[0]);
cudaMemcpyAsync(d_B, h_B, size, cudaMemcpyHostToDevice, stream[0]);
VecAdd<<<blocksPerGrid, threadsPerBlock, 0, stream[0]>>>(d_A, d_B, d_C, N);
cudaEventRecord(fin, stream[0]);
cudaMemcpyAsync(h_C, d_C, size, cudaMemcpyDeviceToHost, stream[0]);
cudaMemcpyAsync(d_D, h_D, size, cudaMemcpyHostToDevice, stream[1]);
cudaStreamWaitEvent(stream[1], fin, 0);
VecAdd<<<blocksPerGrid, threadsPerBlock, 0, stream[1]>>>(d_C, d_D, d_F, N);
cudaMemcpyAsync(h_F, d_F, size, cudaMemcpyDeviceToHost, stream[1]);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&dur_time, start, stop);
fprintf(stderr, "%.3f\n", dur_time);
cudaEventDestroy(fin);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaFree(d_D);
cudaFree(d_F);
freopen(out, "w", stdout);
for (i = 0; i < N; ++i)
printf("%.5f %.5f\n", h_C[i], h_F[i]);
cudaFreeHost(h_A);
cudaFreeHost(h_B);
cudaFreeHost(h_C);
cudaFreeHost(h_D);
cudaFreeHost(h_F);
return 0;
} |
3,397 | #include <stdio.h>
#include <iostream>
#include <string.h>
#include <stdlib.h>
#include <cuda.h>
using namespace std;
#define CUDA_CHECK(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
struct BigInteger {
char* valeur;
int taille;
bool est_positif;
};
__global__ void test_add(char* d1_v, int* d1_t, bool d1_b,char* d2_v, int* d2_t, bool d2_b,char* dr_v, int* dr_t, bool dr_b) {
for (int i = *d1_t; i >0 ; --i) {
int nouveau = d1_v[i-1] - '0' + d2_v[i-1] - '0' + dr_v[i] - '0';
if( nouveau > 9 ) {
dr_v[i-1] += 1;
nouveau -= 10;
}
dr_v[i] = nouveau + '0';
}
}
void initialiser_biginteger(char * valeur, int taille) {
for (int i = 0; i < taille; ++i) {
valeur[i] = '0';
}
}
int main(int argc, char *argv[]) {
dim3 grid(1);
dim3 block(1);
BigInteger bigInteger1;
bigInteger1.valeur = "8794565467498434654216542546541321564624132165432324";
bigInteger1.taille = strlen(bigInteger1.valeur);
bigInteger1.est_positif = true;
char* d1_v;
int* d1_t;
bool* d1_b;
CUDA_CHECK(cudaMalloc( (void**) &d1_v, sizeof(char)*bigInteger1.taille));
CUDA_CHECK(cudaMalloc( (void**) &d1_t, sizeof(int)));
CUDA_CHECK(cudaMalloc( (void**) &d1_b, sizeof(bool)));
CUDA_CHECK(cudaMemcpy(d1_v, bigInteger1.valeur, sizeof(char)*bigInteger1.taille, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(d1_t, &bigInteger1.taille, sizeof(int), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(d1_b, &bigInteger1.est_positif, sizeof(bool), cudaMemcpyHostToDevice));
BigInteger bigInteger2;
bigInteger2.valeur = "9876546746542657865165461321654613212156432132164212";
bigInteger2.taille = strlen(bigInteger1.valeur);
bigInteger2.est_positif = true;
char* d2_v;
int* d2_t;
bool* d2_b;
CUDA_CHECK(cudaMalloc( (void**) &d2_v, sizeof(char)*bigInteger1.taille));
CUDA_CHECK(cudaMalloc( (void**) &d2_t, sizeof(int)));
CUDA_CHECK(cudaMalloc( (void**) &d2_b, sizeof(bool)));
CUDA_CHECK(cudaMemcpy(d2_v, bigInteger2.valeur, sizeof(char)*bigInteger2.taille, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(d2_t, &bigInteger2.taille, sizeof(int), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(d2_b, &bigInteger2.est_positif, sizeof(bool), cudaMemcpyHostToDevice));
BigInteger resultat;
resultat.taille = bigInteger1.taille+1;
resultat.valeur = (char*)malloc((resultat.taille)*sizeof(char));
resultat.est_positif = true;
initialiser_biginteger(resultat.valeur,resultat.taille);
char* dr_v;
int* dr_t;
bool* dr_b;
CUDA_CHECK(cudaMalloc( (void**) &dr_v, sizeof(char)*resultat.taille));
CUDA_CHECK(cudaMalloc( (void**) &dr_t, sizeof(int)));
CUDA_CHECK(cudaMalloc( (void**) &dr_b, sizeof(bool)));
CUDA_CHECK(cudaMemcpy(dr_v, resultat.valeur, sizeof(char)*resultat.taille, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(dr_t, &resultat.taille, sizeof(int), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(dr_b, &resultat.est_positif, sizeof(bool), cudaMemcpyHostToDevice));
test_add<<<grid, block>>>(d1_v,d1_t,d1_b,d2_v,d2_t,d2_b,dr_v,dr_t,dr_b);
CUDA_CHECK(cudaMemcpy(resultat.valeur, dr_v, sizeof(char)*resultat.taille, cudaMemcpyDeviceToHost));
//initialiser_biginteger(resultat.valeur,resultat.taille);
//cout << sizeof(BigInteger) << endl;
//cout << resultat.valeur << endl;
//test_add(bigInteger1.valeur,bigInteger1.taille,bigInteger2.valeur,bigInteger2.taille,resultat.valeur);
cout << " " << bigInteger1.valeur << endl;
cout << " " << bigInteger2.valeur << endl;
cout << resultat.valeur << endl;
return 0;
}
|
3,398 | template <typename T>
__device__ void fill(T *x, size_t n, T value) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = idx; i < n; i += gridDim.x * blockDim.x) x[i] = value;
}
template <typename T>
__device__ void axpy(T a, T *x, T *y, size_t n) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = idx; i < n; i += gridDim.x * blockDim.x) y[i] += a * x[i];
}
#define FILL_C(T) \
__global__ void fill ## _ ## T(T *x, size_t n, T value) { fill(x, n, value); }
#define AXPY_C(T) \
__global__ void axpy ## _ ## T(T a, T *x, T *y, size_t n) { axpy(a, x, y, n); }
extern "C" {
FILL_C(float)
FILL_C(double)
AXPY_C(float)
AXPY_C(double)
}
|
3,399 | #include <iostream>
#include <cstdio>
#include <cstdlib>
#include <ctime>
#include <cmath>
#include <stdio.h>
#include <math.h>
#include <cstring>
using namespace std;
__global__ void compute_z(int *NOC_device,int *NOS_device,int *SC_device,float *a_device,float *b_device,float *Z_device,float *d_device){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < NOC_device[0]){
float A = 0;
float B = 0;
for (int i = 0; i < NOS_device[0]; i++) {
A = A + SC_device[i*NOC_device[0]+id] * log(a_device[i]) + (1-SC_device[i*NOC_device[0]+id]) * log(1 - a_device[i]);
B = B + SC_device[i*NOC_device[0]+id] * log(b_device[i]) + (1-SC_device[i*NOC_device[0]+id]) * log(1 - b_device[i]);
//printf("A[%d] is: %.10f a[i]=%f B[%d] is: %.10f b[i]=%f \n",i,log(a_device[i]),a_device[i],i,log(b_device[i]),b_device[i]);
}
A = exp(A);
B = exp(B);
Z_device[id] = (A * d_device[0]) / ((A * d_device[0]) + (B * (1 - d_device[0])));
//printf("Z[%d] is: %.10f \n",id,Z_device[id]);
//printf("A[%d] is: %.10f \n",id,log(a_device[i]);
//printf("B[%d] is: %.10f \n",id,log(b_device[i]);
//printf("d[%d] is: %.10f \n",id,d_device[0]);
}
}
__global__ void compute_theta(int *NOC_device,int *NOS_device,int *SC_device,float *a_device,float *b_device,float *Z_device,float *d_device){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < NOS_device[0]){
float tempz = 0;
float totalz = 0;
int cnt = 0;
for (int j = 0; j < NOC_device[0]; j++) {
if (SC_device[id * NOC_device[0] +j] == 1) {
tempz = tempz + Z_device[j];
cnt = cnt + 1;
}
totalz = totalz + Z_device[j];
}
a_device[id] = tempz / totalz;
b_device[id] = (cnt - tempz) / (NOC_device[0] - totalz);
d_device[0] = totalz / NOC_device[0];
}
}
int main() {
FILE *input = fopen("TestSensingMatrix.txt", "r");
const int NOS = 30;
const int NOC = 2000;
const int MAX_IT = 10;
int *NOS_device;
int *NOC_device;
int *MAX_IT_device;
cudaMalloc(&NOS_device, sizeof(int));
cudaMalloc(&NOC_device, sizeof(int));
cudaMalloc(&MAX_IT_device, sizeof(int));
cudaMemcpy( NOS_device, &NOS, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( NOC_device, &NOC, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( MAX_IT_device, &MAX_IT, sizeof(int), cudaMemcpyHostToDevice);
cudaError_t malloc_error_check = cudaGetLastError();
if(malloc_error_check != cudaSuccess){
printf("malloc_error_check: CUDA error: %s\n", cudaGetErrorString(malloc_error_check));
exit(-1);
}
int SC[NOS*NOC];
std::memset(SC, 0, sizeof(SC));
int *SC_device;
cudaMalloc(&SC_device, NOS*NOC*sizeof(int));
cudaMemset(SC_device, 0, sizeof(int)*NOS*NOC);
// Generate the SC matrix
int row[2];
while (fscanf(input, "%d,%d", &row[0], &row[1]) == 2) {
SC[(row[0] - 1)*NOC + (row[1] - 1)] = 1;
}
cudaMemcpy(SC_device, SC, sizeof(int)*NOS*NOC, cudaMemcpyHostToDevice);
cudaError_t malloc_error_check2 = cudaGetLastError();
if(malloc_error_check2 != cudaSuccess){
printf("malloc_error_check2: CUDA error: %s\n", cudaGetErrorString(malloc_error_check2));
exit(-1);
}
// s
float s[NOS];
float *s_device;
cudaMalloc(&s_device, NOS*sizeof(float));
std::memset(s, 0, sizeof(s));
for (int x = 0; x < NOS; x++) {
int cnt = 0;
for (int y = 0; y < NOC; y++) {
if (SC[x*NOC+y] == 1) {
cnt = cnt + 1;
}
}
s[x] = cnt * 1.0 / NOC;
}
cudaMemcpy(s_device, s, sizeof(float)*NOS, cudaMemcpyHostToDevice);
// theta[ai]
float a[NOS];
float b[NOS];
float d = 0.5;
float *d_device;
cudaMalloc(&d_device, sizeof(float));
cudaMemcpy(d_device, &d, sizeof(float), cudaMemcpyHostToDevice);
float Z[NOC];
std::memset(a, 0, sizeof(a));
std::memset(b, 0, sizeof(b));
std::memset(Z, 0, sizeof(Z));
float *a_device;
float *b_device;
float *Z_device;
cudaMalloc(&a_device, NOS*sizeof(float));
cudaMalloc(&b_device, NOS*sizeof(float));
cudaMalloc(&Z_device, NOC*sizeof(float));
cudaMemset(Z_device, 0, sizeof(float)*NOC);
cudaMemset(a_device, 0, sizeof(float)*NOS);
cudaMemset(b_device, 0, sizeof(float)*NOS);
cudaError_t malloc_error_check3 = cudaGetLastError();
if(malloc_error_check3 != cudaSuccess){
printf("malloc_error_check3: CUDA error: %s\n", cudaGetErrorString(malloc_error_check3));
exit(-1);
}
for (int x = 0; x < NOS; x++) {
a[x] = s[x];
b[x] = 0.5 * s[x];
}
cudaMemcpy(a_device, a, sizeof(float)*NOS, cudaMemcpyHostToDevice);
cudaMemcpy(b_device, b, sizeof(float)*NOS, cudaMemcpyHostToDevice);
cudaError_t malloc_error_check4 = cudaGetLastError();
if(malloc_error_check4 != cudaSuccess){
printf("malloc_error_check4: CUDA error: %s\n", cudaGetErrorString(malloc_error_check4));
exit(-1);
}
dim3 grid_vertex((int)ceil((float)NOC/(float)1024),1), block_vertex(1024,1);
dim3 grid_vertex2((int)ceil((float)NOS/(float)1024),1), block_vertex2(1024,1);
for (int itn = 0; itn < MAX_IT; itn++) {
// Compute Z(t, j)
compute_z<<<grid_vertex,block_vertex>>>(NOC_device,NOS_device,SC_device,a_device,b_device,Z_device,d_device);
//for(int foo1=0;foo1<NOC;foo1++){
// printf("z[%d] = %f \n",foo1,z2[foo1]);
//}
cudaError_t iter_error = cudaGetLastError();
if(iter_error != cudaSuccess)
{
printf("iter_error: CUDA error: %s\n", cudaGetErrorString(iter_error));
exit(-1);
}
cudaDeviceSynchronize();
compute_theta<<<grid_vertex2,block_vertex2>>>(NOC_device,NOS_device,SC_device,a_device,b_device,Z_device,d_device);
cudaDeviceSynchronize();
}
cudaError_t vertex_filter_errorri = cudaGetLastError();
if(vertex_filter_errorri != cudaSuccess)
{
printf("FilterFrontierrri: CUDA error: %s\n", cudaGetErrorString(vertex_filter_errorri));
exit(-1);
}
// end of while
compute_z<<<grid_vertex,block_vertex>>>(NOC_device,NOS_device,SC_device,a_device,b_device,Z_device,d_device);
cudaError_t vertex_filter_errorr2 = cudaGetLastError();
if(vertex_filter_errorr2 != cudaSuccess)
{
printf("FilterFrontierrr2: CUDA error: %s\n", cudaGetErrorString(vertex_filter_errorr2));
exit(-1);
}
cudaMemcpy(Z, Z_device, sizeof(float)*NOC, cudaMemcpyDeviceToHost);
cudaMemcpy(a, a_device, sizeof(float)*NOS, cudaMemcpyDeviceToHost);
cudaMemcpy(b, b_device, sizeof(float)*NOS, cudaMemcpyDeviceToHost);
cudaMemcpy(&d, d_device, sizeof(float), cudaMemcpyDeviceToHost);
FILE *groundtruth = fopen("TestGroundTruth.txt", "r");
int gt[NOC];
std::memset(gt, 0, sizeof(gt));
while (fscanf(input, "%d,%d", &row[0], &row[1]) == 2) {
gt[row[0] - 1] = row[1];
}
int out[NOC];
std::memset(out, 0, sizeof(out));
FILE *output = fopen("outtie.txt", "w");
for (int j = 0; j < NOC; j++) {
if (Z[j] >= 0.5) {
out[j] = 1;
}
}
float t[NOS];
std::memset(t, 0, sizeof(t));
for (int i = 0; i < NOS; i++) {
t[i] = (a[i]*d) / ((a[i]*d) + (b[i]*(1-d)));
cout << t[i] << endl;
}
for (int j = 0; j < NOC; j++) {
fprintf(output, "%d, %d\n", j+1, out[j]);
}
fclose(input);
//Free GPU Memory
cudaFree(NOS_device);
cudaFree(NOC_device);
cudaFree(MAX_IT_device);
cudaFree(SC_device);
cudaFree(s_device);
cudaFree(d_device);
cudaFree(a_device);
cudaFree(b_device);
cudaFree(Z_device);
}
|
3,400 | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
// Function to generate random number between 1 and 2
double randd() {
return (double)rand() / (RAND_MAX) + 1.0;
}
//Serial function To multiply matrix with it's transpose
void multiply_serial(double *h_a,double *h_b, int dim)
{
int i,j,k;
float a, b, sum;
//Start the computation of matrix with it's transpose.
for(i=0; i<dim; i++)
{
for(j=0; j<dim; j++)
{
sum = 0;
for(k=0; k<dim; k++)
{
a =h_a[(k *dim)+i];
b =h_a[k*dim+j ]; // Interchange indices to get the transpose
sum = sum +( a * b);
}
h_b[ i * dim + j ] = sum; //Assign teh value to Matrix B's element.
}
}
}
/*
* The kernel function. Runs on the device(GPU).
* d_a - Source matrix.
* d_b - Destination matrix.
* dim - Dimension.
*/
__global__ void multiply_device (double *d_a, double *d_b,int dim) {
//Declaration of required variables.
double a, b, sum;
//Retrive the thread and block specific information.
int i = threadIdx.x,j,k;
// Begine Matrix Computation.
for (j = blockIdx.x; j < dim; j += gridDim.x) {
sum = 0;
for(k=0; k<dim; k++) {
a =d_a[k *dim+i];
b =d_a[k*dim+j];
sum = sum + a * b;
}
d_b[ i * dim + j ] = sum;
}
}
//Main function which invokes serial & parallel multiplication functions.
int main()
{
//Declare the required pointers.
double *h_a; //Pointer1 for host memory
double *h_b; //Pointer2 for host memory
double *h_c; //Pointer3 for host memory
double *d_a; //Pointer1 for device memory
double *d_b; //Pointer2 for device memory
//dim of vector
int dimA = 3;
int i,j;
//define thread hierarchy
int nblocks = 4;
int tpb = 1024;
//allocate host and device memory.
size_t memSize;
//Define the memSize and allocate required memory.
memSize = dimA*dimA * sizeof(double);
h_a = (double*)malloc(dimA*dimA*sizeof(double));
h_b = (double*)malloc(dimA*dimA*sizeof(double));
h_c = (double*)malloc(dimA*dimA*sizeof(double));
//h_b = (double*)malloc(memSize);
//cudaMalloc((void**)&h_a,memSize);
cudaMalloc((void**)&d_a,memSize);
cudaMalloc((void**)&d_b,memSize);
//Tested the program works fine for 3*3 matrix.
//So we find many print statements.
//initialize host array
//printf("Initialising host array\n");
for(i=0;i<dimA;i++)
{
for(j=0;j<dimA;j++)
{
*(h_a+(i * dimA) + j)= randd();
//printf("%lf ", *(h_a+(i * dimA) + j));
}
//printf("\n");
}
//printf("'\n");
//Call the Serial function to multiply Matrices.
multiply_serial(h_a,h_b,dimA);
//printf("Serial output is\n");
/*for(i=0;i<dimA;i++)
{
for(j=0;j<dimA;j++)
{
printf("%lf ",*((h_b+i*dimA)+j));
}
printf("\n");
}*/
//printf("Copying to device");
//Copy contents to device
cudaMemcpy(d_a, h_a,memSize, cudaMemcpyHostToDevice);
//printf("Launching kernel");
//Launch kernel
dim3 dimGrid(nblocks);
dim3 dimBlock(tpb);
multiply_device<<<dimGrid,dimBlock>>>(d_a,d_b,dimA);
//get the output
cudaMemcpy(h_c,d_b,memSize, cudaMemcpyDeviceToHost);
// Print the ouput(i.e final result after multiplication)
/*printf("Parallel output is\n");
for(i=0;i<dimA;i++)
{
for(j=0;j<dimA;j++)
{
printf("%lf ",*(h_c+i*dimA+j));
}
printf("\n");
}*/
//free up the allocated memory
free(h_a);
free(h_b);
free(h_c);
//free(d_a);
//free(d_b);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.