serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
8,801 | #include "includes.h"
__global__ void stream(float *dA, float *dB, float *dC, float alpha, int N) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < N) {
dA[id] = dB[id] + alpha * dC[id];
}
} |
8,802 | #include "includes.h"
__global__ void multKernel(float *a, float *b, float *ab, int width)
{
int tx = threadIdx.x, ty = threadIdx.y;
int bx = blockIdx.x, by = blockIdx.y;
// allocate tiles in __shared__ memory
__shared__ float s_a[TILE_WIDTH][TILE_WIDTH];
__shared__ float s_b[TILE_WIDTH][TILE_WIDTH];
// calculate the row & col index to identify element to work on
int row = by*blockDim.y + ty;
int col = bx*blockDim.x + tx;
float result = 0;
// loop over the tiles of the input in phases
for(int p = 0; p < width/TILE_WIDTH; ++p)
{
// collaboratively load tiles into shared memory: row-wise and column wise respectively
s_a[ty][tx] = a[row*width + (p*TILE_WIDTH + tx)];
s_b[ty][tx] = b[(p*TILE_WIDTH + ty)*width + col];
__syncthreads();
// dot product between row of s_a and col of s_b
for(int k = 0; k < TILE_WIDTH; ++k)
result += s_a[ty][k] * s_b[k][tx];
__syncthreads();
}
ab[row*width+col] = result;
} |
8,803 | #include "includes.h"
__global__ void add(int* a, int* b, int* c) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
c[id] = a[id] + b[id];
} |
8,804 | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define N 2050
#define THREADS_PER_BLOCK 128
void checkCUDAError(const char *);
void random_ints(int *a);
__global__ void vectorAdd(int *a, int *b, int *c, int max) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= max) return;
c[i] = a[i] + b[i];
}
void vectorAddCPU(int *a, int *b, int *c, int max) {
for (int i = 0; i < max; i++) c[i] = a[i] + b[i];
}
bool validate(int *c, int *c_ref, int max) {
for (int i = 0; i < max; i++)
if (c[i] != c_ref[i]) {
return false;
}
return true;
}
int main(void) {
int *a, *b, *c, *c_ref; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int errors;
unsigned int size = N * sizeof(int);
// Alloc space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
checkCUDAError("CUDA malloc");
// Alloc space for host copies of a, b, c and setup input values
a = (int *)malloc(size);
random_ints(a);
b = (int *)malloc(size);
random_ints(b);
c = (int *)malloc(size);
c_ref = (int *)malloc(size);
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
checkCUDAError("CUDA memcpy");
// Launch add() kernel on GPU
vectorAdd<<<(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK,
THREADS_PER_BLOCK>>>(d_a, d_b, d_c, N);
checkCUDAError("CUDA kernel");
// Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
checkCUDAError("CUDA memcpy");
vectorAddCPU(a, b, c_ref, N);
if (validate(c, c_ref, N)) {
puts("PASS");
} else {
puts("FAIL");
}
// Cleanup
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
checkCUDAError("CUDA cleanup");
return 0;
}
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA ERROR: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void random_ints(int *a) {
for (unsigned int i = 0; i < N; i++) {
a[i] = rand();
}
}
|
8,805 | //Enables the use of assert()
#include <cassert>
//To be able to use nvprof. Also needs cudaProfilerStop() at the end
#include <cuda_profiler_api.h>
//Included to create random numbers in cuda functions
#include <curand.h>
#include <curand_kernel.h>
//We occasionally might need to print things
#include <iostream>
//Used for fabs and other float math functions
//#include <math.h>
//Enables the use of vectors
#include <vector>
/*
* Checks if there is an error made by cuda which isn't shown to us.
* Use: cudaCheckErrors("<Message>");
*/
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
/*
* Roughly the same function as layerInit(), however it only applies
* to the first layer (maybe change this to one layer and use it for
* scaling as mentioned in the commentary at layerInit()?).
* This is useful as the first nodes layer needs to contain the input
* of the network, so those values can be propagated.
*/
__global__
void firstLayerInit(const unsigned int firstNodes, float *values, float *firstLayer) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < firstNodes; i += stride) {
firstLayer[i] = values[i];
}
}
/*
* Initialise the values of the nodes in the layers.
* To scale this up, it might be usefull to use templates
* and higher functions, or just use vectors.
*/
__global__
void layerInit(const unsigned int firstNodes,
const unsigned int secondNodes,
const unsigned int resultNodes,
float *firstLayer,
float *secondLayer,
float *resultLayer) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < firstNodes; i += stride) {
firstLayer[i] = 0.0f;
}
for (unsigned int i = index; i < secondNodes; i += stride) {
secondLayer[i] = 0.0f;
}
for (unsigned int i = index; i < resultNodes; i += stride) {
resultLayer[i] = 0.0f;
}
}
/*
* Fill the array weights with 'random' numbers.
* Do note that this is NOT a weightLayer, those will be filled using
* the values in this array.
*/
__global__
void fillWeights(float *weights, unsigned long int seed, const unsigned int amountWeights) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < amountWeights; i += stride) {
curandState state;
curand_init(seed, i, 0, &state);
weights[i] = curand_uniform(&state);
}
}
/*
* Given two arrays A = {a,b,c} and Z = {x,y,z}, perform
* R = A*Z in a manner which gives R = {a*x,b*y,c*z}.
*/
__global__
void multiply(const unsigned int n,
float *first,
float *second,
float *results) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < n; i += stride) {
results[i] = first[i] * second[i];
}
}
/*
* allocateStuff allocates stuff.
* It calls cudaMallocManaged (malloc for cuda) on all necessary
* arrays and vectors of arrays. This ensures these arrays are
* in the gpu memory and can therefore be used by cuda.
*/
void allocateStuff(const unsigned int firstNodes,
const unsigned int secondNodes,
const unsigned int resultNodes,
const unsigned int amountWeights,
float *&firstLayer,
float *&secondLayer,
float *&resultLayer,
float *&weights,
std::vector<float*> &firstWeightLayer,
std::vector<float*> &secondWeightLayer) {
cudaMallocManaged(&firstLayer, firstNodes * sizeof(float));
cudaMallocManaged(&secondLayer, secondNodes * sizeof(float));
cudaMallocManaged(&resultLayer, resultNodes * sizeof(float));
cudaMallocManaged(&weights, amountWeights * sizeof(float));
for (auto& nodeLayer : firstWeightLayer) {
cudaMallocManaged(&nodeLayer, secondNodes * sizeof(float));
}
for (auto& nodeLayer : secondWeightLayer) {
cudaMallocManaged(&nodeLayer, resultNodes * sizeof(float));
}
}
/*
* freeStuff() is a series of three functions which assure that any
* array or vector of arrays fed to it is freed from the memory.
*/
template<typename T, typename... Args>
void freeStuff(T *t) {
cudaFree(t);
}
template<typename T, typename... Args>
void freeStuff(T *t, Args... args) {
freeStuff(t);
freeStuff(args...);
}
void freeStuff(std::vector<float*> &vec) {
for (auto& v : vec) {
freeStuff(v);
}
}
void weightLayerInit(unsigned int &index,
float *&weights,
const unsigned int layerLength,
std::vector<float *> &vec) {
//Maybe do something with splicing of weights, so index can be 0 and this
//function can be global. Or find a way to do that without index being 0.
//Also remember that vec is a weightLayer, containing X arrays of length
//layerLength (where X is the amount of nodes in the next layer).
;
}
void weightLayerInit(unsigned int &index,
float *&weights,
const std::vector<unsigned int> &layerLength,
std::vector<std::vector<float *>> &vecs) {
assert(layerLength.size() == vecs.size() || "layerLength and vecs don't have the same size!");
for (unsigned int i = 0; i < vecs.size(); i++) {
weightLayerInit(index, weights, layerLength[i], vecs[i]);
index += layerLength[i];
}
}
void forward() {
}
int main () {
unsigned int firstNodes = 5, secondNodes = 3, resultNodes = 1;
const unsigned int amountWeights = (firstNodes + resultNodes) * secondNodes;
float *firstLayer;
float *secondLayer;
float *resultLayer;
float *weights;
//For every node it goes to, from every node it came from
//This way we can multiply the from nodes with the weights easily
std::vector<float*> firstWeightLayer(secondNodes, new float[firstNodes]);
std::vector<float*> secondWeightLayer(resultNodes, new float[secondNodes]);
//Put all the necessary stuff in the gpu memory
allocateStuff(firstNodes, secondNodes, resultNodes, amountWeights,
firstLayer, secondLayer, resultLayer,
weights, firstWeightLayer,secondWeightLayer);
//Initialise all the layers to 0. Should not be necessary, still doing it for now
layerInit<<<1,256>>>(firstNodes,
secondNodes,
resultNodes,
firstLayer,
secondLayer,
resultLayer);
//Fill an array with 'random' weights. This will be used to initialise the weightLayers
unsigned long int seed = 12345;
fillWeights<<<1,256>>>(weights, seed, amountWeights);
unsigned int globalIndex = 0;
const std::vector<unsigned int> layerLengths = {firstNodes, secondNodes};
std::vector<std::vector<float *>> weightLayers = {firstWeightLayer, secondWeightLayer};
weightLayerInit(globalIndex, weights, layerLengths,weightLayers);
//multiply<<<1,256>>>(n, first, second, results);
cudaDeviceSynchronize();
/*
float maxError = 0.0f;
for (unsigned int i = 0; i < n; i++) {
maxError = fmax(maxError, fabs(results[i]-6.0f));
}
std::cout << "Max error: " << maxError << std::endl;
*/
freeStuff(firstLayer, secondLayer, resultLayer);
freeStuff(firstWeightLayer);
freeStuff(secondWeightLayer);
cudaCheckErrors("Hi!");
//Necessary to be able to use nvprof
cudaProfilerStop();
return 0;
}
|
8,806 | __global__
void euclid(char *data, float x2, float y2,float *z, int N, int W, int L_POS)
{
int idx=blockIdx.x*blockDim.x+threadIdx.x;
float tmp_lat=0.0, tmp_long=0.0;
int position = ( idx * W ) + L_POS - 1;
if(idx < N) {
char temp1[5];
for( int i = 0 ; i < 5 ; i++ ) {
temp1[i] = data[position+i];
}
char temp2[5];
for( int i = 0 ; i < 5 ; i++ ) {
temp2[i] = data[position+6+i];
}
int dig1, dig2, dig3, dig_1;
if( temp1[0] == ' ' ) { dig1 = 0; }
else {
dig1 = temp1[0] - 48;
tmp_lat += dig1 * 100;
}
if( temp1[1] == ' ' ) { dig2 = 0; }
else {
dig2 = temp1[1] - 48;
tmp_lat += dig2 * 10;
}
if( temp1[2] == ' ' ) { dig3 = 0; }
else {
dig3 = temp1[2] - 48;
tmp_lat += dig3 * 1;
}
dig_1 = temp1[4] - 48;
tmp_lat += (float) dig_1 / 10;
if( temp2[0] == ' ' ) { dig1 = 0; }
else {
dig1 = temp2[0] - 48;
tmp_long += dig1 * 100;
}
if( temp2[1] == ' ' ) { dig2 = 0; }
else {
dig2 = temp2[1] - 48;
tmp_long += dig2 * 10;
}
if( temp2[2] == ' ' ) { dig3 = 0; }
else {
dig3 = temp2[2] - 48;
tmp_long += dig3 * 1;
}
dig_1 = temp2[4] - 48;
tmp_long += (float) dig_1 / 10;
z[idx]=sqrt(((tmp_lat-x2)*(tmp_lat-x2))+((tmp_long-y2)*(tmp_long-y2)));
}
}
|
8,807 | //#include "knn.h"
#include<time.h>
#include<stdio.h>
#include<cuda_runtime.h>
/*
TODO: add 'dim3 block', 'dim3 thread' check
TODO: add __shared__ size check
TODO: add CHECK_ERROR
TODO: current only support two-dim of dim3
*/
template<class T>
__global__ void neighbors(T *x, T *dataset,const unsigned int col, unsigned int num_samples, T *ans );
//__constant__ float d_x[1000];
template<class T>
int
vec_mul_mat(T *x, T *dataset, T *ans, const unsigned int col, unsigned int num_samples, unsigned int k){
/* 计算一个向量乘以矩阵,如[1x1000 乘以 1000x200_0000] ,则此时返回1x200_0000
*/
T *d_x = NULL;
size_t size1 = sizeof(T)*col;
cudaMalloc((void **)&d_x,size1);
cudaMemcpy(d_x, x, size1,cudaMemcpyHostToDevice);
// cudaMemcpyToSymbol(d_x,x,size1);
//TODO: need handle whether the block is smaller than need!
dim3 block(100,100);
dim3 thread(1,256);
T *d_dataset = NULL;
size_t size2 = sizeof(T)*col*num_samples;
cudaMalloc((void **)&d_dataset, size2);
cudaMemcpy(d_dataset, dataset,size2,cudaMemcpyHostToDevice);
T *d_ans = NULL;
size_t size3 = sizeof(T)*num_samples;
cudaMalloc((void **)&d_ans, size3);
neighbors<T><<<block,thread>>>(d_x,d_dataset,col,num_samples,d_ans);
cudaDeviceSynchronize();
cudaMemcpy(ans, d_ans, size3, cudaMemcpyDeviceToHost);
cudaFree(d_x);
cudaFree(d_dataset);
cudaFree(d_ans);
return 0;
}
// 先写个以行为单位的版本
template<class T>
__global__ void neighbors(T *x, T *dataset,const unsigned int col, unsigned int num_samples, T *ans ){
/* 计算向量和矩阵的相乘,至于矩阵和矩阵的相乘,后续再实现*/
// extern __shared__ T x1[];
// for(int i=0;i<col; i++){
// x1[i]=x[i] ;
// }
// __syncthreads();
unsigned int idy = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
// 当前每一行的网格数量乘以网格一行的数量,为总的一行的量,× 当前线程的列所在位置
unsigned int thread_idx = (gridDim.x * blockDim.x)*idy +idx;
if (thread_idx <= num_samples){
T tmp = (T)0;
T *p = dataset + thread_idx*col;
for(int i=0; i<col; i++){
tmp += x[i] * *(p+i);
}
ans[thread_idx] = tmp;
}
}
// const unsigned int col = 1000;
// const unsigned int num_samples = 20;
// float x[col];
//
// float ans[num_samples];
// float dataset[num_samples][col];
float *knn_one(float *x, float *dataset, float *ans,
const unsigned int col, const unsigned int num_samples){
float *p = dataset;
vec_mul_mat<float>(x,p,ans,col,num_samples,col);
return ans;
}
//int
//main(){
//
// printf("here=================\n");
//
// for(int i=0; i<col; i++){
// x[i] = (float)(i+1);
// }
//
// for (int i=0; i<num_samples; i++){
// ans[i] = 0.0;
// for(int j=0; j<col; j++){
// dataset[i][j] = 2.0;
// }
// }
// float *p = &dataset[0][0];
//
//
// clock_t t;
// t = clock();
// vec_mul_mat<float>(x,p,ans,col,num_samples,col);
// t = clock() - t;
// printf ("%f seconds \n", ((float)t)/CLOCKS_PER_SEC);
//
//
// for(int j = 0; j<num_samples; j++){
// printf("cur%d %f\n",j, ans[j]);
// }
// return 0;
//}
|
8,808 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define BLOCK_SIZE 16
// Matrixes Multiplcation (Global Memory)
__global__ void multiply_gm(float *C,float *A,float *B, int nrow,int ncol)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int index=idy*ncol+idx;
if (idy<nrow && idx<ncol){
float sum=0.0f;
for(int k=0;k<ncol;k++){
sum+=A[idy*ncol+k]*B[k*ncol+idx];
}
C[index] = sum;
}
}
int div_up(int a,int b){
return(a/b + (a%b == 0 ? 0:1));
}
int main(int argc, char* argv[]){
float *A_h,*B_h,*C_h; // Host matrixes
float *A_d,*B_d,*C_d; //Device matrixes
int nrow = atoi(argv[1]); // rows
int ncol = nrow; // cols
float N=nrow*ncol; // number of elements
//GPU Time
cudaEvent_t start, stop;
float time;
size_t size=N * sizeof(float);
A_h = (float *)malloc(size);
B_h = (float *)malloc(size);
C_h = (float *)malloc(size);
//Initializing Host matrixes
for (int i=0; i<nrow; i++){
for(int j=0;j<ncol;j++){
A_h[i*ncol+j] = 1.0f;
B_h[i*ncol+j] = 2.0f;
//A_h[i*ncol+j] = rand()/100.0f;
//B_h[i*ncol+j] = rand()/100.0f;
}
}
/*
printf("\nMatrix A:\n");
for (int i=0; i<nrow; i++){
for(int j=0;j<ncol;j++){
printf("%.2f ", A_h[i*ncol+j]);
}
printf("\n");
}
printf("\n\nMatrix B:\n");
for (int i=0; i<nrow; i++){
for(int j=0;j<ncol;j++){
printf("%.2f ", B_h[i*ncol+j]);
}
printf("\n");
}
*/
cudaMalloc((void **) &A_d,size);
cudaMalloc((void **) &B_d,size);
cudaMalloc((void **) &C_d,size);
// Host to Device transfer
cudaMemcpy(A_d, A_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B_h, size, cudaMemcpyHostToDevice);
//Realizamos el clculo en el Device
dim3 block_size(BLOCK_SIZE,BLOCK_SIZE);
dim3 n_blocks(div_up(ncol,block_size.x),div_up(nrow,block_size.y)) ;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
multiply_gm<<< n_blocks, block_size >>> (C_d,A_d,B_d,nrow,ncol);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Time : %f ms\n",time);
// Result from Device to Host
cudaMemcpy(C_h, C_d, size,cudaMemcpyDeviceToHost);
/*
//Results
printf("\n\nMatrix C:\n");
for (int i=0; i<nrow; i++){
for(int j=0;j<ncol;j++){
printf("%.2f ", C_h[i*ncol+j]);
}
printf("\n");
}
*/
system("sleep 1");
free(A_h);
free(B_h);
free(C_h);
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
return 0;
}
|
8,809 |
#include <iostream>
#include <cstdlib>
#include <ctime>
#include <unistd.h>
#include <stdio.h>
__global__ //set global so this runs on the GPU
void multiply(float *a, float *b, float *c, int aRow, int aCol,
int bCol, const float aSize, const float bSize) {
int index = threadIdx.x;
int stride = blockDim.x;
int block = blockIdx.x;
for(int i = 0; i < aRow; i += 1)
for(int j = index + (block * stride); j < bCol; j+= stride)
for(int k = 0; k < aCol; k += 1)
c[i*bCol + j] += a[i*aCol + k] * b[j + k*bCol];
__syncthreads(); //wait for all threads to finish
return;
}
void scalar_multiply(float *a, float *b, float *c, int aRow, int aCol,
int bCol) {
//run this on cpu to check for errors
for(int i = 0; i < aRow; ++i)
for(int j = 0; j < bCol; ++j)
for(int k = 0; k < aCol; ++k)
c[i*bCol + j] += a[i*aCol + k] * b[j + k*bCol];
return;
}
int main(int argc, char *argv[])
{
if(argc < 5) {
std::cerr << "usage: " << argv[0] << " aRows aCols bRows bCols\n";
return(-1);
}
if(atoi(argv[2]) != atoi(argv[3])) {
std::cerr << "error! aCols must match bRows. " <<
argv[2] << ", " << argv[3] << std::endl;
return(-1);
}
srand(4); //so creative
int errorcheck = 0;
int threads = 512;
int blocks = 512;
int DEBUG = 0;
//accept 4 args: row, col, for a and b
float aRow = atoi(argv[1]);
float aCol = atoi(argv[2]);
float bRow = atoi(argv[3]);
float bCol = atoi(argv[4]);
float cRow = aRow;
float cCol = bCol;
float aSize = aRow * aCol * sizeof(float);
float bSize = bRow * bCol * sizeof(float);
float cSize = cRow * cCol * sizeof(float);
float *a = (float *)malloc(aSize);
float *b = (float *)malloc(bSize);
float *c = (float *)malloc(cSize);
float *cu_a;
float *cu_b;
float *cu_c;
//malloc shared memory that can be accessed via GPU and CPU
cudaMallocManaged(&cu_a, aSize);
cudaMallocManaged(&cu_b, bSize);
cudaMallocManaged(&cu_c, cSize);
//initialize them to randoms
for(int i = 0; i < aRow*aCol; i++) {
a[i] = cu_a[i] = rand() % 1000;
}
for(int i = 0; i < bRow*bCol; i++) {
b[i] = cu_b[i] = rand() % 1000;
}
for(int i = 0; i < aRow*bCol; i++) {
c[i] = cu_c[i] = 0;
}
//warmup
std::cout << "warming up...\n";
multiply<<<blocks, threads>>>(cu_a, cu_b, cu_c, aRow, aCol, bCol, aSize, bSize);
//after warming up, set memory back to 0
cudaMemset(cu_c, 0, cSize);
std::cout << "done.\nrunning tests...\n";
if(errorcheck){
//run a CPU version to check for errors
scalar_multiply(a, b, c, aRow, aCol, bCol);
}
double fulltime = 0;
int repeats = 1;
for(int i=0; i<repeats; i++) {
//reset memory to zeros
cudaMemset(cu_c, 0, cSize);
std::clock_t start = std::clock();
multiply<<<blocks, threads>>>(cu_a, cu_b, cu_c, aRow, aCol, bCol, aSize, bSize);
//wait for all threads to finish before "timing" the code
cudaDeviceSynchronize();
std::clock_t end = std::clock();
fulltime += (end - start);
}
if(DEBUG) {
//print every entry
for(int i=0; i<aRow*bCol; i++)
std::cerr << "c[" << i << "]\t" << (c[i] == cu_c[i] ? "\x1B[32mPASS\x1B[0m\t" : "\x1B[31mFAIL\x1B[0m\t") << c[i] << " " << cu_c[i] << std::endl;
}
int arraycheck = 1;
if(errorcheck) {
//run error checking
for(int i=0; i<aRow*bCol; i++)
if(c[i] != cu_c[i])
arraycheck = 0;
std::cout << (arraycheck ? "\x1B[32mPASS\x1B[0m" : "\x1B[31mFAIL\x1B[0m") << std::endl;
}
float flops = (aRow*aCol*bCol*2);
double s_time = ((fulltime) / (double)(CLOCKS_PER_SEC));
std::cout << "a[" << aRow << "," << aCol << "], b[" << bRow << "," << bCol << "], c[" << cRow << "," << cCol << "]\n";
std::cout << "time: " << s_time*1000 << "ms\n";
std::cout << "performance: " << flops << " flops at " << (((float)flops / 1000000000) / ((s_time) / repeats)) << "GFlop/s\n";
if(DEBUG) {
//printout
for(int i=0; i<aRow * bCol; i++)
std::cerr << c[i] << " ";
std::cerr << std::endl;
}
//free shared memory
cudaFree(cu_a);
cudaFree(cu_b);
cudaFree(cu_c);
//free cpu memory
free(a);
free(b);
free(c);
return 0;
}
|
8,810 | // "Copyright 2018 <Fabio M. Graetz>"
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <iostream>
__global__ void scanHillisSteele(int *d_out, int *d_in, int n) {
int idx = threadIdx.x;
extern __shared__ int temp[];
int pout = 0, pin = 1;
temp[idx] = (idx > 0) ? d_in[idx - 1] : 0; // shift right by 1 to get exclusive scan
__syncthreads();
for (int offset = 1; offset < n; offset *= 2) {
// swap double buffer indices
pout = 1 - pout;
pin = 1 - pout;
if (idx >= offset) {
temp[pout*n+idx] = temp[pin*n+idx - offset] + temp[pin*n+idx];
} else {
temp[pout*n+idx] = temp[pin*n+idx];
}
__syncthreads();
}
d_out[idx] = temp[pout*n+idx];
}
int main() {
const int ARRAY_SIZE = 10;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// generate the input array on the host
int h_in[ARRAY_SIZE]{1, 2, 5, 7, 8, 10, 11, 12, 15, 19};
int h_out[ARRAY_SIZE];
// declare GPU memory pointers
int * d_in;
int * d_out;
// allocate GPU memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
// launch the kernel
scanHillisSteele<<<1, ARRAY_SIZE, 2 * ARRAY_BYTES>>>(d_out, d_in, ARRAY_SIZE);
cudaDeviceSynchronize();
// transfer the resulting array to the cpu
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// print out the input and resulting array
std::cout << "Input:" << std::endl;
for (int i = 0; i < ARRAY_SIZE; ++i) {
std::cout << h_in[i] << " " << std::flush;
}
std::cout << std::endl << "Exclusive scan with operation +:" << std::endl;
for (int i = 0; i < ARRAY_SIZE; ++i) {
std::cout << h_out[i] << " " << std::flush;
}
std::cout << std::endl;
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
8,811 | #include<stdio.h>
#include<iostream>
#include<ctime>
#include<cuda_runtime.h>
int log2(int i){
int r = 0;
while(i >>=1) r++;
return r;
}
int bit_reverse(int w, int bits){
int r = 0;
for(int i = 0; i < bits;i++){
int bit = (w & (1 << i))>>i;//从低位起的第i位
r |= bit << (bits - i - 1); //低位转为高位
}
return r;
}
void cpu_histo(int *h_bins, int *h_in, const int ARRAY_SIZE, const int BIN_COUNT){
for(int i = 0; i < ARRAY_SIZE; i++){
h_bins[h_in[i]%BIN_COUNT] += 1;
}
}
__global__ void simple_histo(int *d_bins,const int *d_in,const int BIN_COUNT){
int index = threadIdx.x + blockDim.x * blockIdx.x;
int myItem = d_in[index];
int myBin = myItem % BIN_COUNT;
atomicAdd(&d_bins[myBin],1);
}
__global__ void local_histo(int *d_bins, const int *d_in, const int N, const int BIN_COUNT){
int index = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;//gridDim.x = 1
int offset = 0;
__shared__ int cache[512][16];
for(int i = 0; i < BIN_COUNT; i++){
cache[index][i] = 0;
}
__syncthreads();
while(index + offset < N){
int myItem = d_in[index + offset];
int myBin = myItem%BIN_COUNT;
cache[index][myBin] += 1;
offset += stride;
}
__syncthreads();
//reduction
int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
for(int j = 0; j < BIN_COUNT; j++)
cache[threadIdx.x][j] += cache[threadIdx.x + i][j];
}
__syncthreads();
i /= 2;
}
if(threadIdx.x == 0){
for(int i = 0; i < BIN_COUNT; i++)
d_bins[i] = cache[0][i];
}
}
__global__ void local_histo_op(int *d_bins, const int *d_in, const int N, const int BIN_COUNT){
int index = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
__shared__ int cache[16];
for(int i = 0; i < BIN_COUNT; i++){
cache[i] = 0;
}
__syncthreads();
while(index + offset < N){
int myItem = d_in[index + offset];
int myBin = myItem%BIN_COUNT;
atomicAdd(&cache[myBin],1);
offset += stride;
}
__syncthreads();//此处的同步比较重要,确保每个block的cache完全获取后再写回 global memory.
if(threadIdx.x < BIN_COUNT){
atomicAdd(&d_bins[threadIdx.x],cache[threadIdx.x]);
}
}
int main(int argc, char **argv){
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if(deviceCount == 0){
fprintf(stderr,"error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
cudaSetDevice(dev);
cudaDeviceProp devProps;
if (cudaGetDeviceProperties(&devProps, dev) == 0)
{
printf("Using device %d:\n", dev);
printf("%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
devProps.name, (int)devProps.totalGlobalMem,
(int)devProps.major, (int)devProps.minor,
(int)devProps.clockRate);
}
const int ARRAY_SIZE = 1<<20;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
const int BIN_COUNT = 16;
const int BIN_BYTES = BIN_COUNT * sizeof(int);
//generate the input array on the host
int h_in[ARRAY_SIZE];
for(int i =0; i < ARRAY_SIZE; i++){
h_in[i] = bit_reverse(i,log2(ARRAY_SIZE));
}
int h_bins[BIN_COUNT];
for(int i = 0; i < BIN_COUNT; i++ ){
h_bins[i] = 0;
}
//declare GPU memory pointers
int *d_in;
int *d_bins;
//allocate GPU memory
cudaMalloc((int **)&d_in,ARRAY_BYTES);
cudaMalloc((int **)&d_bins,BIN_BYTES);
// transfer the arrays to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_bins, h_bins, BIN_BYTES, cudaMemcpyHostToDevice);
//timing on GPU
float gpu_elapsed_time;
cudaEvent_t gpu_start, gpu_stop;
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_stop);
cudaEventRecord(gpu_start, 0);
int whichKernel = 0;
if(argc == 2){
whichKernel = atoi(argv[1]);
}
// launch the kernel
switch(whichKernel) {
case 0:
printf("Running simple histo\n");
simple_histo<<<ARRAY_SIZE / 64, 64>>>(d_bins, d_in, BIN_COUNT);
break;
case 1:
printf("Running local histo\n");
local_histo<<<1,512>>>(d_bins, d_in,ARRAY_SIZE,BIN_COUNT);
break;
case 2:
printf("Running local opti histo");
local_histo_op<<<ARRAY_SIZE / 256, 256>>>(d_bins,d_in,ARRAY_SIZE,BIN_COUNT);
break;
default:
fprintf(stderr, "error: ran no kernel\n");
exit(EXIT_FAILURE);
}
// timing report
cudaEventRecord(gpu_stop, 0);
cudaEventSynchronize(gpu_stop);
cudaEventElapsedTime(&gpu_elapsed_time, gpu_start, gpu_stop);
cudaEventDestroy(gpu_start);
cudaEventDestroy(gpu_stop);
std::cout<<"GPU took: "<<gpu_elapsed_time<<" milli-seconds"<<std::endl;
// copy back the sum from GPU
cudaMemcpy(h_bins, d_bins, BIN_BYTES, cudaMemcpyDeviceToHost);
for(int i = 0; i < BIN_COUNT; i++) {
printf("GPU bin %d: count %d\n", i, h_bins[i]);
}
memset(h_bins, 0, BIN_COUNT * sizeof(int));
clock_t cpu_start = clock();
for(int i = 0; i < ARRAY_SIZE; i++){
h_bins[h_in[i]%BIN_COUNT] += 1;
}
clock_t cpu_stop = clock();
clock_t cpu_elapsed_time = 1000*(cpu_stop - cpu_start)/CLOCKS_PER_SEC;
std::cout<<"The cpu took: "<<cpu_elapsed_time<<" milli-seconds"<<std::endl;
for(int i = 0; i < BIN_COUNT; i++) {
printf("CPU bin %d: count %d\n", i, h_bins[i]);
}
//free GPU memory allocate
cudaFree(d_in);
cudaFree(d_bins);
return 0;
}
|
8,812 | #include <fstream>
#include <iostream>
#include <sstream>
#include <string>
/*
* Configuration
*/
static const std::string output_path = "output.ppm";
static const int N = 1024;
/*
* Image output
*/
void write_image(const std::string& file_name, const unsigned char *data, std::size_t width, std::size_t height)
{
std::ostringstream hdr_oss;
hdr_oss << "P6\n";
hdr_oss << width << " " << height << "\n";
hdr_oss << 255 << "\n";
std::string hdr = hdr_oss.str();
std::ofstream ofs(file_name);
ofs.write(hdr.data(), hdr.size());
ofs.write(reinterpret_cast<const char*>(data), width * height * 3);
}
/*
* Complex arithmetic
*/
struct cu_complex
{
float r; // real part
float i; // imaginary part
__device__ cu_complex() : r{0}, i{0} { }
__device__ cu_complex(float r, float i) : r{r}, i{i} { }
__device__ float modulus2() { return r * r + i * i; }
};
__device__ cu_complex operator+(const cu_complex &a, const cu_complex &b)
{
return cu_complex{a.r + b.r, a.i + b.i};
}
__device__ cu_complex operator*(const cu_complex &a, const cu_complex &b)
{
return cu_complex{a.r * b.r - a.i * b.i, a.r * b.i + a.i * b.r};
}
/*
* Mandelbrot logic
*/
__device__ int mandelbrot(int x, int y)
{
cu_complex z{static_cast<float>(x - N/2) / (N / 4), -static_cast<float>(y - N/2) * 4/N};
cu_complex a;
for (int i = 0; i < 255; ++i)
{
a = a * a + z;
if (a.modulus2() > 4)
{
// diverges
return 255 - i;
}
}
// probably converges
return 0;
}
/*
* Kernel and main function
*/
__global__ void kernel(unsigned char *img)
{
const int x = blockIdx.x;
const int y = blockIdx.y;
const int result = mandelbrot(x, y);
img[0 + 3 * x + 3 * N * y] = result;
img[1 + 3 * x + 3 * N * y] = result;
img[2 + 3 * x + 3 * N * y] = result;
}
int main(int argc, char **argv)
{
unsigned char *img = new unsigned char[3 * N * N];
unsigned char *dev_img;
cudaMalloc(&dev_img, 3 * N * N);
dim3 grid_size{N, N};
kernel<<<grid_size, 1>>>(dev_img);
cudaMemcpy(img, dev_img, 3 * N * N, cudaMemcpyDeviceToHost);
write_image(output_path, img, N, N);
cudaFree(dev_img);
}
|
8,813 | #include <cstdio>
#include <vector>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <math_constants.h>
#include "GPUScaleB.cuh"
#include <thrust/device_ptr.h>
#include <thrust/extrema.h>
#define Z_MASS 91.1876
#define M_BB_SC 62.5
#define M_TT_SC 62.5
#define MET_SC_1 0.008
#define MET_SC_2 0.0005
#define MET_SC_3 0.000125
#define CHI_SC 0.16
__device__ float getPx(float pt, float phi){
return pt * cosf(phi);
}
__device__ float getPy(float pt, float phi){
return pt * sinf(phi);
}
__device__ float getPz(float pt, float eta){
return pt * sinhf(eta);
}
__device__ float getPScaler(float pt, float eta){
return pt * coshf(eta);
}
__device__ float getE(float pt, float eta, float m){
float p = getPScaler(pt, eta);
return sqrtf(m * m + p * p);
}
__device__ float getMass( float energy, float px, float py, float pz){
return sqrtf(energy * energy - px * px - py * py - pz * pz);
}
__device__ float getdphi(float v1phi, float v2phi)
{
float ret;
if(fabsf(v1phi - v2phi) > CUDART_PI_F)
ret = ((float)(v1phi > 0) - (float)(v1phi <= 0)) * (2 * CUDART_PI_F - fabsf(v1phi - v2phi));
else
ret = v2phi - v1phi;
return ret;
}
__device__ float getPhiFromPxPy(float px, float py)
{
return (float)(px>0) * atanf(py / px) +
(float)(px<=0 && py > 0) * (atanf(py / px) + CUDART_PI_F) +
(float)(px<=0 && py <= 0) * (atanf(py / px) - CUDART_PI_F);
}
__device__ float getPsiMbb(float m_bb, float m_baseline, float scale_const_bb)
{
return (m_bb - m_baseline) * (m_bb - m_baseline) / scale_const_bb;
}
__device__ float getPsiMtt(float m_tt, float m_baseline, float scale_const_tt)
{
return (m_tt - m_baseline) * (m_tt - m_baseline) / scale_const_tt;
}
__device__ float getPsiMET(float vl_pt, float vh_pt, float tau_pt, float lep_pt, float omega,float sc_1, float sc_2, float sc_3)
{
float f_tau_l = vl_pt / (vl_pt + lep_pt);
float f_tau_h = vh_pt / (vh_pt + tau_pt);
float A_1 = 0.5 * ((f_tau_l - f_tau_h) / (f_tau_l + f_tau_h) + 1);
float ret = ((float)(A_1 > 0 && A_1 <= 0.8) * (A_1 - 0.8) * (A_1 - 0.8) / sc_1) +
((float)(A_1 > 0.8 && A_1 <= 1) * 0) +
((float)(omega > 1) * (omega - 1) * (omega - 1) / sc_2) +
((float)(omega <= 0) * (80 + (omega * omega / sc_3))) ;
return ret;
}
__device__ float getPsiChi(float chi0, float chi1, float sc_chi)
{
// calculate on log plane
float x = log2f(chi0);
float y = log2f(chi1);
float x_0 = 9.62213e-02;
float y_0 = 1.18202e-01;
float sigma_x = 4.81883e-01;
float sigma_y = 6.37269e-01;
float theta = 5.40952e-01;
// // calculate on linear plane
// float x = chi0;
// float y = chi1;
// float x_0 = 1.07186e+00;
// float y_0 = 1.05827e+00;
// float sigma_x = 3.27060e-01;
// float sigma_y = 4.99482e-01;
// float theta = 3.50592e-01;
float ell1 = (powf((x - x_0) * cosf(theta) + (y - y_0) * sinf(theta), 2)) / powf(sigma_x, 2);
float ell2 = (powf((x - x_0) * sinf(theta) - (y - y_0) * cosf(theta), 2)) / powf(sigma_y, 2);
float r2 = ell1+ell2;
// float x = log2f(chi0);
// float y = log2f(chi1);
// float x_0 = 9.62213e-02;
// float y_0 = 1.18202e-01;
// float r2 = powf((x - x_0), 2) + powf((y - y_0), 2);
return r2 / sc_chi;
}
__global__ void mykernel( float bjet0_pt, float bjet0_eta, float bjet0_phi, float bjet0_m,
float bjet1_pt, float bjet1_eta, float bjet1_phi, float bjet1_m,
float lep0_pt, float lep0_eta, float lep0_phi, float lep0_m,
float tau0_pt, float tau0_eta, float tau0_phi, float tau0_m,
float met_pt, float met_eta, float met_phi, float met_m,
bool * pass, float* score, int N)
{
int i = (threadIdx.x + blockIdx.x * blockDim.x);
int j = (threadIdx.y + blockIdx.y * blockDim.y);
float chi0 = 0.5 + 0.01 * i;
float chi1 = 0.5 + 0.01 * j;
// initialise
size_t sizeof_array = N*N;
size_t ind = i*N+j;
pass[ind] = 0;
score[sizeof_array*1 + ind] = -999;
score[sizeof_array*2 + ind] = -999;
score[sizeof_array*3 + ind] = -999;
score[sizeof_array*4 + ind] = -999;
score[ind] = 100000;
//mbb calculation
float bjet0_scaled_pt = bjet0_pt * chi0;
float bjet1_scaled_pt = bjet1_pt * chi1;
float met_scaled_px = getPx(met_pt, met_phi) - (getPx(bjet0_scaled_pt, bjet0_phi) - getPx(bjet0_pt, bjet0_phi)) - (getPx(bjet1_scaled_pt, bjet1_phi) - getPx(bjet1_pt, bjet1_phi));
float met_scaled_py = getPy(met_pt, met_phi) - (getPy(bjet0_scaled_pt, bjet0_phi) - getPy(bjet0_pt, bjet0_phi)) - (getPy(bjet1_scaled_pt, bjet1_phi) - getPy(bjet1_pt, bjet1_phi));
float bjet0_scaled_E = getE(bjet0_scaled_pt, bjet0_eta, bjet0_m);
float bjet0_scaled_px = getPx(bjet0_scaled_pt, bjet0_phi);
float bjet0_scaled_py = getPy(bjet0_scaled_pt, bjet0_phi);
float bjet0_scaled_pz = getPz(bjet0_scaled_pt, bjet0_eta);
float bjet1_scaled_E = getE(bjet1_scaled_pt, bjet1_eta, bjet1_m);
float bjet1_scaled_px = getPx(bjet1_scaled_pt, bjet1_phi);
float bjet1_scaled_py = getPy(bjet1_scaled_pt, bjet1_phi);
float bjet1_scaled_pz = getPz(bjet1_scaled_pt, bjet1_eta);
float m_bb_scaled = getMass(bjet0_scaled_E + bjet1_scaled_E,
bjet0_scaled_px + bjet1_scaled_px,
bjet0_scaled_py + bjet1_scaled_py,
bjet0_scaled_pz + bjet1_scaled_pz);
//omega and other
float dphi_hl = getdphi(tau0_phi, lep0_phi);
float met_scaled_phi = getPhiFromPxPy(met_scaled_px, met_scaled_py);
float met_scaled_pt = sqrtf(met_scaled_py * met_scaled_py + met_scaled_px * met_scaled_px);
float dphi_hv_scaled = getdphi(tau0_phi, met_scaled_phi);
float dphi_lv_scaled = getdphi(lep0_phi, met_scaled_phi);
bool inside_hl_scaled = (dphi_hl * dphi_hv_scaled > 0) && (fabsf(dphi_hl) > fabsf(dphi_hv_scaled));
bool close_to_h_scaled = fabsf(dphi_hv_scaled) < CUDART_PIO4_F && fabsf(dphi_hv_scaled) < fabsf(dphi_lv_scaled);
bool close_to_l_scaled = fabsf(dphi_lv_scaled) < CUDART_PIO4_F && fabsf(dphi_lv_scaled) < fabsf(dphi_hv_scaled);
bool v_pos_pass_scaled = inside_hl_scaled || close_to_h_scaled || close_to_l_scaled;
float omega = -999;
if(!inside_hl_scaled && close_to_l_scaled && dphi_hv_scaled * dphi_hl < 0)
{
if(dphi_hl < 0)
omega = (dphi_hv_scaled - 2 * CUDART_PI_F) / dphi_hl;
else if (dphi_hv_scaled < 0)
omega = (dphi_hv_scaled + CUDART_PI_F * 2.0f) / dphi_hl;
}
else
{
omega = dphi_hv_scaled / dphi_hl;
}
//p4 of vl vh
float vh_scaled_pt =0;
float vh_scaled_eta=0;
float vh_scaled_phi=0;
float vh_scaled_m =0;
float vl_scaled_pt =0;
float vl_scaled_eta=0;
float vl_scaled_phi=0;
float vl_scaled_m =0;
float m_tautau_scaled = -999;
if(v_pos_pass_scaled)
{
if (!inside_hl_scaled && close_to_h_scaled)
{
vh_scaled_pt = met_scaled_pt * cosf(fabsf(dphi_hv_scaled));
vh_scaled_eta = tau0_eta;
vh_scaled_phi = tau0_phi;
vh_scaled_m = 0;
}
else if (!inside_hl_scaled && close_to_l_scaled)
{
vl_scaled_pt = met_scaled_pt * cosf(fabsf(dphi_lv_scaled));
vl_scaled_eta = lep0_eta;
vl_scaled_phi = lep0_phi;
vl_scaled_m = 0;
}
else if (inside_hl_scaled)
{
vh_scaled_pt = met_scaled_pt * cosf(fabsf(dphi_hv_scaled)) - met_scaled_pt * sinf(fabsf(dphi_hv_scaled)) * (1/tanf(fabsf(dphi_hl)));
vh_scaled_eta = tau0_eta;
vh_scaled_phi = tau0_phi;
vh_scaled_m = 0;
vl_scaled_pt = met_scaled_pt * sinf(fabsf(dphi_hv_scaled)) / sinf(fabsf(dphi_hl));
vl_scaled_eta = lep0_eta;
vl_scaled_phi = lep0_phi;
vl_scaled_m = 0;
}
float tautau_px = (getPx(vh_scaled_pt, vh_scaled_phi)+getPx(vl_scaled_pt, vl_scaled_phi)+getPx(tau0_pt, tau0_phi)+getPx(lep0_pt, lep0_phi));
float tautau_py = (getPy(vh_scaled_pt, vh_scaled_phi)+getPy(vl_scaled_pt, vl_scaled_phi)+getPy(tau0_pt, tau0_phi)+getPy(lep0_pt, lep0_phi));
float tautau_pz = (getPz(vh_scaled_pt, vh_scaled_eta)+getPz(vl_scaled_pt, vl_scaled_eta)+getPz(tau0_pt, tau0_eta)+getPz(lep0_pt, lep0_eta));
float tautau_e = (getE(vh_scaled_pt, vh_scaled_eta, vh_scaled_m)+getE(vl_scaled_pt, vl_scaled_eta, vl_scaled_m)+getE(tau0_pt, tau0_eta, tau0_m)+getE(lep0_pt, lep0_eta, lep0_m));
m_tautau_scaled = getMass(tautau_e, tautau_px, tautau_py, tautau_pz);
}
if(m_tautau_scaled > 0)
{
pass[ind] = 1;
float score_m_bb = getPsiMbb(m_bb_scaled, Z_MASS, M_BB_SC);
float score_m_tt = getPsiMtt(m_tautau_scaled, Z_MASS, M_TT_SC);
float score_met = getPsiMET(vl_scaled_pt, vh_scaled_pt, tau0_pt, lep0_pt, omega, MET_SC_1, MET_SC_2, MET_SC_3);
float score_chi = getPsiChi(chi0, chi1, CHI_SC);
score[sizeof_array*1 + ind] = score_m_bb;
score[sizeof_array*2 + ind] = score_m_tt;
score[sizeof_array*3 + ind] = score_met;
score[sizeof_array*4 + ind] = score_chi;
score[ind] = sqrtf(score_chi * score_chi + score_m_bb * score_m_bb + score_m_tt * score_m_tt + score_met * score_met);
}
}
std::vector<double> GPUScaleB(float bjet0_pt, float bjet0_eta, float bjet0_phi, float bjet0_m,
float bjet1_pt, float bjet1_eta, float bjet1_phi, float bjet1_m,
float lep0_pt, float lep0_eta, float lep0_phi, float lep0_m,
float tau0_pt, float tau0_eta, float tau0_phi, float tau0_m,
float met_pt, float met_eta, float met_phi, float met_m){
size_t N = 151;
size_t NN = N*N;
size_t size_bool = NN * sizeof(bool);
size_t size_float = 5 * NN * sizeof(float);
//allocate arrays on GPU
bool *pass_dev;
cudaMalloc((void**)&pass_dev, size_bool);
float *score_dev;
cudaMalloc((void**)&score_dev, size_float);
////copy stuff to GPU
// cudaMemcpy(pass_dev,pass,size_bool,cudaMemcpyHostToDevice);
// cudaMemcpy(score_dev,score,size_float,cudaMemcpyHostToDevice);
// cudaMemcpy(score1_dev,score1,size_float,cudaMemcpyHostToDevice);
// cudaMemcpy(score2_dev,score2,size_float,cudaMemcpyHostToDevice);
// cudaMemcpy(score3_dev,score3,size_float,cudaMemcpyHostToDevice);
// cudaMemcpy(score4_dev,score4,size_float,cudaMemcpyHostToDevice);
////memset version
// cudaMemset(score_dev, 0, size_float);
// cudaMemset(score1_dev, 0, size_float);
// cudaMemset(score2_dev, 0, size_float);
// cudaMemset(score3_dev, 0, size_float);
// cudaMemset(score4_dev, 0, size_float);
// cudaMemset(pass_dev, 0, size_float);
dim3 grid(N, N);
mykernel<<<grid, 1>>>( bjet0_pt, bjet0_eta, bjet0_phi, bjet0_m,
bjet1_pt, bjet1_eta, bjet1_phi, bjet1_m,
lep0_pt, lep0_eta, lep0_phi, lep0_m,
tau0_pt, tau0_eta, tau0_phi, tau0_m,
met_pt, met_eta, met_phi, met_m,
pass_dev, score_dev, N);
//cudaDeviceSynchronize();
std::vector<double> ret;
double sf1 = -999;
double sf2 = -999;
double min_score = 200;
double s1 = 100;
double s2 = 100;
double s3 = 100;
double s4 = 100;
// //CPU find min
// bool pass[NN];
// float score[NN*5];
// cudaMemcpy(pass,pass_dev,size_bool,cudaMemcpyDeviceToHost);
// cudaMemcpy(score,score_dev,size_float,cudaMemcpyDeviceToHost);
// for(size_t i =0; i < NN; i++){
// if (pass[i])
// {
// if(score[i] < min_score)
// {
// min_score = score[i];
// s1 = score[NN + i];
// s2 = score[NN *2 + i];
// s3 = score[NN *3 + i];
// s4 = score[NN *4 + i];
// sf1 = 0.01 * (double)(i/151) + 0.5;
// sf2 = 0.01 * (double)(i%151) + 0.5;
// }
// }
// }
//GPU find min
thrust::device_ptr<float> score_vec = thrust::device_pointer_cast(score_dev);
int min_offset = thrust::min_element(score_vec, score_vec + NN) - score_vec;
min_score = *(score_vec + min_offset);
if (min_score < 200)
{
s1 = *(score_vec + NN + min_offset);
s2 = *(score_vec + NN*2 + min_offset);
s3 = *(score_vec + NN*3 + min_offset);
s4 = *(score_vec + NN*4 + min_offset);
sf1 = 0.01 * (double)(min_offset/N) + 0.5;
sf2 = 0.01 * (double)(min_offset%N) + 0.5;
}
if (sf1 > 0 && sf2 > 0)
{
ret = {sf1, sf2, min_score, s1, s2, s3, s4};
}
cudaFree(pass_dev);
cudaFree(score_dev);
return ret;
}
|
8,814 |
/*
* Copyright (c) 2016-2017 Naruto TAKAHASHI <tnaruto@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <stdbool.h>
#include <cuda.h>
#include <random>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
//#define ROUNDS 67
#define WORDS 8
#define BLOCK_SIZE (WORDS * 2)
#define MAX_KEY_WORDS 4
#define LANE_NUM 1
#define bl_size 16
//#include <helper_functions.h>
//#include <helper_cuda.h>
//#include <cuda_runtime.h>
int ROUNDS;
/*
#define ROTL32(x,r) (((x)<<(r)) | (x>>(32-(r))))
#define ROTR32(x,r) (((x)>>(r)) | ((x)<<(32-(r))))
#define ROTL64(x,r) (((x)<<(r)) | (x>>(64-(r))))
#define ROTR64(x,r) (((x)>>(r)) | ((x)<<(64-(r))))
#define f64(x) ((ROTL64(x,1) & ROTL64(x,8)) ^ ROTL64(x,2))
#define R64x2(x,y,k1,k2) (y^=f64(x), y^=k1, x^=f64(y), x^=k2)
*/
enum simon_encrypt_type {
simon_ENCRYPT_TYPE_32_64 = 0,
simon_ENCRYPT_TYPE_48_72,
simon_ENCRYPT_TYPE_48_96,
simon_ENCRYPT_TYPE_64_96,
simon_ENCRYPT_TYPE_64_128,
simon_ENCRYPT_TYPE_96_96,
simon_ENCRYPT_TYPE_96_144,
simon_ENCRYPT_TYPE_128_128,
simon_ENCRYPT_TYPE_128_192,
simon_ENCRYPT_TYPE_128_256,
};
struct simon_ctx_t_ {
int round;
uint64_t *key_schedule;
enum simon_encrypt_type type;
};
typedef struct simon_ctx_t_ simon_ctx_t;
static inline uint64_t rdtsc() {
uint32_t lo, hi;
asm volatile (
"cpuid \n" /* serializing */
"rdtsc"
: "=a"(lo), "=d"(hi) /* outputs */
: "a"(0) /* inputs */
: "%ebx", "%ecx");
/* clobbers*/
return ((uint64_t) lo) | (((uint64_t) hi) << 32);
}
static inline void cast_uint8_array_to_uint64(uint64_t *dst, const uint8_t *array) {
// TODO: byte order
*dst = (uint64_t)array[7] << 56 | (uint64_t)array[6] << 48 | (uint64_t)array[5] << 40 | (uint64_t)array[4] << 32 | (uint64_t)array[3] << 24 | (uint64_t)array[2] << 16 | (uint64_t)array[1] << 8 | (uint64_t)array[0];
}
static inline void cast_uint64_to_uint8_array(uint8_t *dst, uint64_t src) {
// TODO: byte order
dst[0] = (uint8_t)(src & 0x00000000000000ffULL);
dst[1] = (uint8_t)((src & 0x000000000000ff00ULL) >> 8);
dst[2] = (uint8_t)((src & 0x0000000000ff0000ULL) >> 16);
dst[3] = (uint8_t)((src & 0x00000000ff000000ULL) >> 24);
dst[4] = (uint8_t)((src & 0x000000ff00000000ULL) >> 32);
dst[5] = (uint8_t)((src & 0x0000ff0000000000ULL) >> 40);
dst[6] = (uint8_t)((src & 0x00ff000000000000ULL) >> 48);
dst[7] = (uint8_t)((src & 0xff00000000000000ULL) >> 56);
}
int is_validate_key_len(enum simon_encrypt_type type, int key_len) {
int ret;
switch (type) {
case simon_ENCRYPT_TYPE_128_128:
ret = (key_len == (128 / 8));
break;
case simon_ENCRYPT_TYPE_128_192:
ret = (key_len == (192 / 8));
break;
case simon_ENCRYPT_TYPE_128_256:
ret = (key_len == (256 / 8));
break;
default:
ret = -1;
break;
}
return ret;
}
int get_round_num(enum simon_encrypt_type type) {
int ret;
switch (type) {
case simon_ENCRYPT_TYPE_128_128:
ret = 32;
break;
case simon_ENCRYPT_TYPE_128_192:
ret = 33;
break;
case simon_ENCRYPT_TYPE_128_256:
ret = 34;
break;
default:
ret = -1;
break;
}
return ret;
}
int get_key_words_num(enum simon_encrypt_type type) {
int ret;
switch (type) {
case simon_ENCRYPT_TYPE_128_128:
ret = 2;
break;
case simon_ENCRYPT_TYPE_128_192:
ret = 3;
break;
case simon_ENCRYPT_TYPE_128_256:
ret = 4;
break;
default:
ret = -1;
break;
}
return ret;
}
//typedef struct simon_ctx_t_ simon_ctx_t;
// https://eprint.iacr.org/2013/404.pdf
//
// simon128/128
// Key: 0f0e0d0c0b0a0908 0706050403020100
// Plaintext: 6c61766975716520 7469206564616d20
// Ciphertext: a65d985179783265 7860fedf5c570d18
static const uint64_t s_key[2] = {0x0706050403020100, 0x0f0e0d0c0b0a0908};
static const uint64_t s_plain_text[2] = {0x7469206564616d20, 0x6c61766975716520};
static const uint64_t s_cipher_text[2] = {0x7860fedf5c570d18, 0xa65d985179783265};
/// for type 128_128
/*
static const uint8_t s_key_stream1[16] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
};
//// for type 128_192
static const uint8_t s_key_stream2[24] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
};
//// for type 128_256
static const uint8_t s_key_stream3[32] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
};
*/
static const uint8_t s_plain_text_stream[16] = {
0x20, 0x6d, 0x61, 0x64, 0x65, 0x20, 0x69, 0x74, 0x20, 0x65, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c,
};
//static const uint8_t s_plain_text_stream[16] = {[0 ... 15] = 0x20};
//static const uint8_t s_cipher_text_stream[16] = {[0 ... 15] = 0x20};
static const uint8_t s_cipher_text_stream[16] = {
0x18, 0x0d, 0x57, 0x5c, 0xdf, 0xfe, 0x60, 0x78, 0x65, 0x32, 0x78, 0x79, 0x51, 0x98, 0x5d, 0xa6,
};
//#define BLOCK_SIZE 16
__host__ __device__ uint64_t ROTR64(uint64_t *x, int r)
{
return (((*x)>>(r)) | ((*x)<<(64-(r))));
}
__host__ __device__ uint64_t ROTL64(uint64_t *x, int r)
{
return (((*x)<<(r)) | ((*x)>>(64-(r))));
}
__host__ __device__ uint64_t f64(uint64_t *x)
{
return ((ROTL64(x,1) & ROTL64(x,8)) ^ ROTL64(x,2));
}
__host__ __device__ void R64x2(uint64_t *x, uint64_t *y, uint64_t *k1, uint64_t *k2)
{
*y^=f64(x);
*y^=*k1;
*x^=f64(y);
*x^=*k2;
}
void Simon128128KeySchedule( uint8_t *K , uint64_t *rk) // for init key
{
int i;
uint64_t B=K[1], A=K[0];
uint64_t c=0xfffffffffffffffcLL, z=0x7369f885192c0ef5LL;
for(i=0;i<64;)
{
rk[i++]=A;
A^=c^(z&1)^ROTR64(&B,3)^ROTR64(&B,4);
z>>=1;
rk[i++]=B;
B^=c^(z&1)^ROTR64(&A,3)^ROTR64(&A,4);
z>>=1;
}
rk[64]=A; A^=c^1^ROTR64(&B,3)^ROTR64(&B,4);
rk[65]=B; B^=c^0^ROTR64(&A,3)^ROTR64(&A,4);
rk[66]=A;
rk[67]=B;
}
void Simon128192KeySchedule(uint8_t *K, uint64_t *rk)
{
uint64_t i,C=K[2],B=K[1],A=K[0];
uint64_t c=0xfffffffffffffffcLL, z=0xfc2ce51207a635dbLL;
for(i=0;i<63;)
{
rk[i++]=A;
A^=c^(z&1)^ROTR64(&C,3)^ROTR64(&C,4); z>>=1;
rk[i++]=B; B^=c^(z&1)^ROTR64(&A,3)^ROTR64(&A,4); z>>=1;
rk[i++]=C; C^=c^(z&1)^ROTR64(&B,3)^ROTR64(&B,4); z>>=1;
}
rk[63]=A; A^=c^1^ROTR64(&C,3)^ROTR64(&C,4);
rk[64]=B; B^=c^0^ROTR64(&A,3)^ROTR64(&A,4);
rk[65]=C; C^=c^1^ROTR64(&B,3)^ROTR64(&B,4);
rk[66]=A;
rk[67]=B;
rk[68]=C;
}
void Simon128256KeySchedule(uint8_t *K,uint64_t *rk)
{
uint64_t i,D=K[3],C=K[2],B=K[1],A=K[0];
uint64_t c=0xfffffffffffffffcLL, z=0xfdc94c3a046d678bLL;
for(i=0;i<64;)
{
rk[i++]=A; A^=c^(z&1)^ROTR64(&D,3)^ROTR64(&D,4)^B^ROTR64(&B,1); z>>=1;
rk[i++]=B;
B^=c^(z&1)^ROTR64(&A,3)^ROTR64(&A,4)^C^ROTR64(&C,1); z>>=1;
rk[i++]=C; C^=c^(z&1)^ROTR64(&B,3)^ROTR64(&B,4)^D^ROTR64(&D,1); z>>=1;
rk[i++]=D; D^=c^(z&1)^ROTR64(&C,3)^ROTR64(&C,4)^A^ROTR64(&A,1); z>>=1;
}
rk[64]=A; A^=c^0^ROTR64(&D,3)^ROTR64(&D,4)^B^ROTR64(&B,1);
rk[65]=B; B^=c^1^ROTR64(&A,3)^ROTR64(&A,4)^C^ROTR64(&C,1);
rk[66]=C; C^=c^0^ROTR64(&B,3)^ROTR64(&B,4)^D^ROTR64(&D,1);
rk[67]=D; D^=c^0^ROTR64(&C,3)^ROTR64(&C,4)^A^ROTR64(&A,1);
rk[68]=A;
rk[69]=B;
rk[70]=C;
rk[71]=D;
}
/*
void Simon128128Encrypt(uint64_t Pt[], uint64_t Ct[], uint64_t rk[])
{
int i;
Ct[0]=Pt[0]; Ct[1]=Pt[1];
for(i=0; i<68; i+=2)
R64x2(Ct[1], Ct[0], rk[i], rk[i+1]);
}
void Simon128128Decrypt(uint64_t Pt[], uint64_t Ct[], uint64_t rk[])
{
int i;
Pt[0]=Ct[0]; Pt[1]=Ct[1];
for(i=67;i>=0;i-=2)
R64x2(Pt[0], Pt[1], rk[i], rk[i-1]);
}
*/
void simon_finish(simon_ctx_t **ctx) {
if (!ctx) return;
free((*ctx)->key_schedule);
free(*ctx);
}
simon_ctx_t *simon_init(enum simon_encrypt_type type, uint8_t *key, int key_len) {
if (key == NULL) return NULL;
if (!is_validate_key_len(type, key_len)) return NULL;
simon_ctx_t *ctx = (simon_ctx_t *)calloc(1, sizeof(simon_ctx_t));
if (!ctx) return NULL;
ctx->type = type;
ctx->round = ROUNDS; // get_round_num(type);
ctx->key_schedule = (uint64_t*)calloc(1, ROUNDS * sizeof(uint64_t)); /// this line has been changed by Ahmed
if (!ctx->key_schedule) return NULL;
// calc key schedule
uint64_t b;
uint64_t a;
uint64_t k;
int key_words_num = get_key_words_num(ctx->type);
uint64_t keys[MAX_KEY_WORDS];
for (int i = 0; i < key_words_num; i++) {
cast_uint8_array_to_uint64(&keys[i], key + (WORDS * i));
}
switch (type)
{
case simon_ENCRYPT_TYPE_128_128:
Simon128128KeySchedule(key, ctx->key_schedule);
break;
case simon_ENCRYPT_TYPE_128_192:
Simon128192KeySchedule(key, ctx->key_schedule);
break;
case simon_ENCRYPT_TYPE_128_256:
Simon128256KeySchedule(key, ctx->key_schedule);
break;
default:
break;
}
return ctx;
}
void generate_random_array(uint8_t *iv, size_t iv_len) {
std::random_device rd; //Will be used to obtain a seed for the random number engine
std::mt19937 gen(rd()); //Standard mersenne_twister_engine seeded with rd()
std::uniform_int_distribution<> dis;
for(int i=0; i<iv_len; i++)
iv[i] = static_cast<uint8_t>(dis(gen));
//for(int i=0; i<iv_len; i++)
// iv[i] = rand();
}
void show_array(const char *explain, const uint8_t *array, size_t len) {
printf("%20s ", explain);
for(int i=len-1; i >= 0; i--) {
printf("%02x ", array[i]);
}
printf("\n");
}
__device__ void show_array2(const char *explain, uint64_t *array, size_t len) {
printf("%20s ", explain);
for(int i=len-1; i >= 0; i--) {
printf("%02x ", array[i]);
}
printf("\n");
}
__global__ void simon_ctr_encrypt(uint64_t *key_schedule, uint64_t *in, uint64_t *out, int len, uint64_t *iv, int iv_len, enum simon_encrypt_type type) {
// printf("erarar %d %d\n");
/* if (len < 0) {
return -1;
}
*/
int count = len/2; //(len / (BLOCK_SIZE * LANE_NUM));
// printf("count %d\n",count);
//int remain_bytes = len % BLOCK_SIZE;
int i;
uint64_t crypted_iv_block[2];
uint64_t plain_block[2];
uint64_t iv_block[2], t;
// __shared__ uint64_t * tmp;
// tmp = (uint64_t*)malloc(len*8);
iv_block[0]=iv[0];
iv_block[1]=iv[1];
//printf("len= %d len_iv=%d \n", len , iv_len);
//printf("okkkkkkkkkkk\n");
//printf("key %u \n", ctx->key_schedule[1]);
//printf("count = %d \n", count);
int id= blockIdx.x*blockDim.x+threadIdx.x;
/*
if (id==0) ///printing the recieved variables
{
//show_array2("--- iv", iv, iv_len*8);
// show_array2("--- plain", in, len*8);
printf("plain -- ");
for (int i = 0; i < len*8; i++)
printf("%llu ", &in[i]);
printf("\n key -- ");
for (int i = 0; i < ctx->round; i++)
printf("%d = %llu -- ", i,&ctx->key_schedule[i] );
printf("\n");
}
*/
if (id <count)
{
//printf("id =%d\n",id);
iv_block[0]+=id;
crypted_iv_block[0] = iv_block[0];
crypted_iv_block[1] = iv_block[1];
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[0], &key_schedule[1] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[2], &key_schedule[3] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[4], &key_schedule[5] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[6], &key_schedule[7] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[8], &key_schedule[9] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[10], &key_schedule[11] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[12], &key_schedule[13] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[14], &key_schedule[15] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[16], &key_schedule[17] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[18], &key_schedule[19] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[20], &key_schedule[21] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[22], &key_schedule[23] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[24], &key_schedule[25] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[26], &key_schedule[27] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[28], &key_schedule[29] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[30], &key_schedule[31] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[32], &key_schedule[33] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[34], &key_schedule[35] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[36], &key_schedule[37] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[38], &key_schedule[39] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[40], &key_schedule[41] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[42], &key_schedule[43] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[44], &key_schedule[45] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[46], &key_schedule[47] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[48], &key_schedule[49] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[50], &key_schedule[51] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[52], &key_schedule[53] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[54], &key_schedule[55] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[56], &key_schedule[57] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[58], &key_schedule[59] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[60], &key_schedule[61] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[62], &key_schedule[63] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[64], &key_schedule[65] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[66], &key_schedule[67] );
/*
switch (type)
{
case simon_ENCRYPT_TYPE_128_192:
t=crypted_iv_block[1];
crypted_iv_block[1]=crypted_iv_block[0]^f64(&crypted_iv_block[1])^key_schedule[68];
crypted_iv_block[0]=t;
break;
case simon_ENCRYPT_TYPE_128_256:
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[68], &key_schedule[69] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[70], &key_schedule[71] );
break;
default:
break;
}
*/
//printf("iv bloc %d %u %u\n",i,iv_block[0],iv_block[1]);
//printf("cr block %d %u %u\n",i,crypted_iv_block[0],crypted_iv_block[1]);
__syncthreads();
out[2*id]=crypted_iv_block[0] ^ in[2*id];
out[2*id+1]=crypted_iv_block[1] ^ in[2*id+1];
}
//if (id==0)
//for(; id<count; id+=gridDim.x*blockDim.x)
//memcpy(&out[id], &tmp[id], sizeof(uint64_t)*len);
//if (id==0)
//for (int i = 0; i < len ; i++)
// {
// out[i]=tmp[i];
// printf("%02x ", out[i]);
// }
}
__global__ void simon_ctr_encrypt4(uint64_t *key_schedule, uint64_t *in, uint64_t *out, int len, uint64_t *iv, int iv_len, enum simon_encrypt_type type) {
// printf("erarar %d %d\n");
/* if (len < 0) {
return -1;
}
*/
int count = len/4; //(len / (BLOCK_SIZE * LANE_NUM));
int i;
uint64_t crypted_iv_block[4];
uint64_t iv_block[4];
iv_block[0]=iv[0];
iv_block[1]=iv[1];
iv_block[2]=iv[2];
iv_block[3]=iv[3];
int id= blockIdx.x*blockDim.x+threadIdx.x;
if (id <count)
{
for (i = 0; i < 34; i++){
iv_block[0]+=id;
crypted_iv_block[0] = iv_block[0];
crypted_iv_block[1] = iv_block[1];
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[i], &key_schedule[i+1] );
iv_block[2]+=id;
crypted_iv_block[2] = iv_block[2];
crypted_iv_block[3] = iv_block[3];
R64x2(&crypted_iv_block[2], &crypted_iv_block[3], &key_schedule[i+2], &key_schedule[i+3]);
}
out[4*id]=crypted_iv_block[0] ^ in[4*id];
out[4*id+1]=crypted_iv_block[1] ^ in[4*id+1];
out[4*id+2]=crypted_iv_block[2] ^ in[4*id+2];
out[4*id+3]=crypted_iv_block[3] ^ in[4*id+3];
}
}
void simon_ctr_encrypt_cpu(uint64_t *key_schedule, const uint64_t *in, uint64_t *out, int len, uint64_t *iv, int iv_len, enum simon_encrypt_type type)
{
int count = len/2;//(len / (BLOCK_SIZE * LANE_NUM));
// printf("count %d\n",count);
//int remain_bytes = len % BLOCK_SIZE;
int i;
uint64_t crypted_iv_block[2];
uint64_t plain_block[2];
uint64_t iv_block[2],t;
iv_block[0]=iv[0];
iv_block[1]=iv[1];
//printf("key0= %u key1= %u \n", &key_schedule[0], &key_schedule[1] );
// printf("len= %d len_iv=%d \n", len , iv_len);
for (i = 0; i < count; i++)
{ // replaced with thread id in gpu
iv_block[0]+=i;
//simon_encrypt(ctx, iv_block, crypted_iv_block);
crypted_iv_block[0] = iv_block[0];
crypted_iv_block[1] = iv_block[1];
// R64x2(Ct[1], Ct[0], rk[i], rk[i+1]);
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[0], &key_schedule[1] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[2], &key_schedule[3] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[4], &key_schedule[5] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[6], &key_schedule[7] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[8], &key_schedule[9] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[10], &key_schedule[11] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[12], &key_schedule[13] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[14], &key_schedule[15] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[16], &key_schedule[17] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[18], &key_schedule[19] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[20], &key_schedule[21] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[22], &key_schedule[23] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[24], &key_schedule[25] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[26], &key_schedule[27] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[28], &key_schedule[29] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[30], &key_schedule[31] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[32], &key_schedule[33] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[34], &key_schedule[35] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[36], &key_schedule[37] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[38], &key_schedule[39] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[40], &key_schedule[41] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[42], &key_schedule[43] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[44], &key_schedule[45] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[46], &key_schedule[47] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[48], &key_schedule[49] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[50], &key_schedule[51] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[52], &key_schedule[53] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[54], &key_schedule[55] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[56], &key_schedule[57] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[58], &key_schedule[59] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[60], &key_schedule[61] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[62], &key_schedule[63] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[64], &key_schedule[65] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[66], &key_schedule[67] );
/* switch (type)
{
case simon_ENCRYPT_TYPE_128_192:
t=crypted_iv_block[1];
crypted_iv_block[1]=crypted_iv_block[0]^f64(&crypted_iv_block[1])^key_schedule[68];
crypted_iv_block[0]=t;
break;
case simon_ENCRYPT_TYPE_128_256:
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[68], &key_schedule[69] );
R64x2(&crypted_iv_block[1], &crypted_iv_block[0], &key_schedule[70], &key_schedule[71] );
break;
default:
break;
}
*/
out[2*i]=crypted_iv_block[0] ^ in[2*i];
out[2*i+1]=crypted_iv_block[1] ^ in[2*i+1];
}
}
int encrypt_decrypt_stream_test(int block_num, enum simon_encrypt_type type,uint8_t *s_key_stream, uint8_t key_s ) {
int r = 0;
/// define CPU variables
simon_ctx_t *ctx = NULL;
uint8_t *plain_text_stream = NULL;
uint8_t *crypted_text_stream = NULL;
uint8_t *decrypted_text_stream = NULL;
uint8_t *iv_text_stream = NULL;
uint8_t *origin_iv_text_stream = NULL;
int *bl1,*bl2;
// Define GPU variable
// simon_ctx_t *dev_ctx= NULL;
uint64_t *dev_key_schedule=NULL;
uint8_t *dev_plain_text_stream= NULL;
uint8_t *dev_crypted_text_stream=NULL;
uint8_t *dev_decrypted_text_stream= NULL;
uint8_t *dev_iv_text_stream= NULL;
//uint64_t *key;
//cudaStream_t streams[2];
// specfy gridSize and blockSize of threads
int blockSize =1024;
int gridSize = block_num/blockSize; /// ????
//allocate CPU variables
plain_text_stream = (uint8_t*)malloc(BLOCK_SIZE * block_num);
if (!plain_text_stream) {
r = 1;
// goto finish;
}
crypted_text_stream = (uint8_t*)malloc(BLOCK_SIZE * block_num);
if (!crypted_text_stream) {
r = 1;
// goto finish;
}
decrypted_text_stream = (uint8_t*)malloc(BLOCK_SIZE * block_num);
if (!decrypted_text_stream) {
r = 1;
// goto finish;
}
iv_text_stream = (uint8_t*)malloc(BLOCK_SIZE);
if (!iv_text_stream) {
r = 1;
// goto finish;
}
origin_iv_text_stream = (uint8_t*)malloc(BLOCK_SIZE);
if (!origin_iv_text_stream) {
r = 1;
// goto finish;
}
//allocate GPU variables
cudaMalloc((void **)&dev_key_schedule, sizeof(uint64_t)*ROUNDS);
cudaMalloc((void **)&dev_plain_text_stream, BLOCK_SIZE * block_num*sizeof(uint8_t));
cudaMalloc((void **)&dev_crypted_text_stream, BLOCK_SIZE * block_num*sizeof(uint8_t));
cudaMalloc((void **)&dev_decrypted_text_stream, BLOCK_SIZE * block_num*sizeof(uint8_t));
cudaMalloc((void **)&dev_iv_text_stream, BLOCK_SIZE *sizeof(uint8_t));
//cudaMalloc(&key, BLOCK_SIZE *sizeof(uint64_t));
for (int i = 0; i < block_num; i++)
{
memcpy(plain_text_stream + (i * BLOCK_SIZE), s_plain_text_stream, sizeof(s_plain_text_stream));
}
generate_random_array(origin_iv_text_stream, BLOCK_SIZE);
ctx = simon_init(type, s_key_stream, key_s);
//ctx->key_schedule = (uint64_t*)malloc(ROUNDS);
// Simon128128KeySchedule(s_key_stream , ctx->key_schedule);
if (!ctx) {
r = 1;
// goto finish;
}
memcpy(iv_text_stream, origin_iv_text_stream, BLOCK_SIZE);
//cast_uint8_array_to_uint64(c_plain_text_stream,plain_text_stream);
/*
printf("\n Plain---\n");
for (int i = 0; i < BLOCK_SIZE * block_num; i++)
printf("%02x ",plain_text_stream[i]);
printf("\n");
*/
//////////////////////////////////// simon on GPU ////////////////////////////////////////////
printf(" Run simon on GPU \n");
float elapsed=0;
cudaEvent_t start, stop;
//copy all needed variables to device memory
cudaMemcpy(dev_key_schedule, ctx->key_schedule,sizeof(uint64_t)*ROUNDS, cudaMemcpyHostToDevice);
cudaMemcpy(dev_plain_text_stream, plain_text_stream, BLOCK_SIZE * block_num*sizeof(uint8_t), cudaMemcpyHostToDevice);
cudaMemcpy(dev_iv_text_stream, iv_text_stream,BLOCK_SIZE *sizeof(uint8_t), cudaMemcpyHostToDevice);
//StopWatchInterface *timer = 0;
//sdkCreateTimer(&timer);
//sdkResetTimer(&timer);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
for(int i=0;i<100;i++)
{
//sdkStartTimer(&timer);
simon_ctr_encrypt<<<gridSize, blockSize>>>( dev_key_schedule, (uint64_t*)dev_plain_text_stream, (uint64_t*)dev_crypted_text_stream , BLOCK_SIZE * block_num/8 , (uint64_t*)dev_iv_text_stream,BLOCK_SIZE/8, type); //encryption
cudaDeviceSynchronize();
//sdkStopTimer(&timer);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize (stop);
cudaEventElapsedTime(&elapsed, start, stop);
float time_GPU =elapsed/100000;
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("Size message %ld bytes\n",(long)BLOCK_SIZE * block_num);
printf("Elapsed time on GPU %f seconds \n ", time_GPU);
printf("Throughput on GPU %f Gbps\n ",(long) 8*BLOCK_SIZE * block_num/time_GPU/1e9);
/*
cudaEventRecord(stop, 0);
cudaEventSynchronize (stop);
cudaEventElapsedTime(&elapsed, start, stop);
float time_GPU =elapsed/1000;
cudaEventDestroy(start);
cudaEventDestroy(stop);
*/
//float reduceTime_GPU = sdkGetAverageTimerValue(&timer);
//printf("Average time on GPU: %f seconds\n", reduceTime_GPU/1000);
//copy cypher text from device to host
cudaMemcpy(crypted_text_stream, dev_crypted_text_stream , BLOCK_SIZE * block_num*sizeof(uint8_t), cudaMemcpyDeviceToHost );
/*
printf("\n encr---\n");
for (int i = 0; i < BLOCK_SIZE * block_num; i++)
printf("%02x ",crypted_text_stream[i]);
printf("\n");
*/
memcpy(iv_text_stream, origin_iv_text_stream, BLOCK_SIZE);
//recopy the iv to device memory
cudaMemcpy(dev_iv_text_stream, iv_text_stream,BLOCK_SIZE *sizeof(uint8_t), cudaMemcpyHostToDevice);
cudaMemcpy(dev_crypted_text_stream, crypted_text_stream , BLOCK_SIZE * block_num*sizeof(uint8_t), cudaMemcpyHostToDevice);
simon_ctr_encrypt<<<gridSize, blockSize>>>( dev_key_schedule , (uint64_t*)dev_crypted_text_stream, (uint64_t*)dev_decrypted_text_stream , BLOCK_SIZE * block_num/8 , (uint64_t*)dev_iv_text_stream, BLOCK_SIZE/8, type); //decryption
//copy decryphed text from device to host
cudaMemcpy(decrypted_text_stream, dev_decrypted_text_stream , BLOCK_SIZE * block_num*sizeof(uint8_t), cudaMemcpyDeviceToHost );
//printf("Elapsea time on GPU %f seconds \n ", time_GPU);
/*
printf("\n decr---\n");
for (int i = 0; i < BLOCK_SIZE * block_num; i++)
printf("%02x ",decrypted_text_stream[i]);
printf("\n");
*/
//show_array("plain", plain_text_stream, block_num * BLOCK_SIZE);
// show_array("decrypted", decrypted_text_stream, block_num * BLOCK_SIZE);
for (int i = 0; i < BLOCK_SIZE * block_num; i++) {
//printf("%d %u %u\n",i,plain_text_stream[i],decrypted_text_stream[i]);
if (plain_text_stream[i] != decrypted_text_stream[i]) {
printf("block_num:%d idx:%d 0x%02x != 0x%02x\n", block_num, i, plain_text_stream[i], decrypted_text_stream[i]);
show_array("iv", origin_iv_text_stream, BLOCK_SIZE);
show_array("plain", plain_text_stream, block_num * BLOCK_SIZE);
show_array("decrypted", decrypted_text_stream, block_num * BLOCK_SIZE);
show_array("counted iv", iv_text_stream, BLOCK_SIZE);
printf("\n");
r = 1;
// goto finish;
}
}
//////////////////////////////////// simon on CPU ////////////////////////////////////////////
/*
printf(" Run simon on CPU \n");
// double time_CPU = 0.0;
//clock_t begin = clock();
//cudaEvent_t start, stop;
//cudaEventCreate(&start);
//cudaEventCreate(&stop);
//cudaEventRecord(start, 0);
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
for(int i=0;i<100;i++) {
sdkStartTimer(&timer);
simon_ctr_encrypt_cpu(ctx->key_schedule, (uint64_t*)plain_text_stream, (uint64_t*)crypted_text_stream, BLOCK_SIZE * block_num/8, (uint64_t*)iv_text_stream, BLOCK_SIZE/8,type); //encryption
sdkStopTimer(&timer);
}
float time_CPU = sdkGetAverageTimerValue(&timer);
printf("Average time on CPU: %f seconds\n", time_CPU/1000);
//clock_t end = clock();
//time_CPU += (double)(end - begin) / CLOCKS_PER_SEC;
//cudaEventRecord(stop, 0);
//cudaEventSynchronize (stop);
//cudaEventElapsedTime(&elapsed, start, stop);
//float time_CPU =elapsed;
//cudaEventDestroy(start);
//cudaEventDestroy(stop);
memcpy(iv_text_stream, origin_iv_text_stream, BLOCK_SIZE);
simon_ctr_encrypt_cpu(ctx->key_schedule, (uint64_t*)crypted_text_stream, (uint64_t*)decrypted_text_stream, BLOCK_SIZE * block_num/8, (uint64_t*)iv_text_stream, BLOCK_SIZE/8,type); // decryption
printf("Elapsea time on CPU %f seconds \n", time_CPU);
//show_array("plain", plain_text_stream, block_num * BLOCK_SIZE);
// show_array("decrypted", decrypted_text_stream, block_num * BLOCK_SIZE);
*/
///////////////////// speedup ////////////////////////////
// printf("The speedup = %f \n",time_CPU / time_GPU);
// printf("The speedup = %f \n",(time_CPU/1000) / (reduceTime_GPU/1000));
for (int i = 0; i < BLOCK_SIZE * block_num; i++) {
//printf("%d %u %u\n",i,plain_text_stream[i],decrypted_text_stream[i]);
if (plain_text_stream[i] != decrypted_text_stream[i]) {
printf("block_num:%d idx:%d 0x%02x != 0x%02x\n", block_num, i, plain_text_stream[i], decrypted_text_stream[i]);
show_array("iv", origin_iv_text_stream, BLOCK_SIZE);
show_array("plain", plain_text_stream, block_num * BLOCK_SIZE);
show_array("decrypted", decrypted_text_stream, block_num * BLOCK_SIZE);
show_array("counted iv", iv_text_stream, BLOCK_SIZE);
printf("\n");
r = 1;
goto finish;
}
}
finish:
// Release device memory
// cudaFree(dev_key_schedule);
//cudaFree(dev_plain_text_stream);
//cudaFree(dev_crypted_text_stream);
// cudaFree(dev_decrypted_text_stream);
// cudaFree(dev_iv_text_stream);
//cudaFree(&dev_ctx);
free(plain_text_stream);
free(crypted_text_stream);
free(decrypted_text_stream);
free(iv_text_stream);
free(origin_iv_text_stream);
simon_finish(&ctx);
// printf("okkkkkkkkkkkkk\n");
return r;
}
int main(int argc, char **argv )
{
// cudaDeviceProp deviceProp = { 3 };
//int dev;
//dev = findCudaDevice(argc, (const char **)argv);
// checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev));
int t, flag=1;
enum simon_encrypt_type type;
uint8_t key_stream[32] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
};
uint8_t *s_key_stream;
// s_plain_text_stream=(uint8_t*)malloc(16);
// s_cipher_text_stream=(uint8_t*)malloc(16);
printf("Enter Encryption type: \n");
//printf("Enter 1 --> for simon_ENCRYPT_TYPE_128_128 \nEnter 2 --> for simon_ENCRYPT_TYPE_128_192 \nEnter 3 --> for simon_ENCRYPT_TYPE_128_256\n");
uint8_t key_s;
//scanf("%d ", &t);
// t=3;
int keysize;
if ( (strncmp(argv[1],"keysize",7)==0) && (strncmp(argv[3],"nblocks",7)==0) )
{
keysize = atoi((argv[2]));
printf(" %s = %s\n", argv[1], argv[2]);
long nblocks=atoi((argv[4]));
printf("***************************\n");
printf("SIMON_ENCRYPT_TYPE_128_%d\n",keysize);
printf("***************************\n");
switch (keysize) {
case 128:
// s_key_stream=(uint8_t*)malloc(16);
type=simon_ENCRYPT_TYPE_128_128;
key_s=24;
ROUNDS=67;
s_key_stream=(uint8_t*)malloc(key_s);
for(int i=1; i<key_s; i++)
s_key_stream[i] = key_stream[i];
break;
case 192:
//s_key_stream=(uint8_t*)malloc(24);
type=simon_ENCRYPT_TYPE_128_192;
key_s=24;
ROUNDS=68;
s_key_stream=(uint8_t*)malloc(key_s);
for(int i=1; i<key_s; i++)
s_key_stream[i] = key_stream[i];
break;
case 256:
// s_key_stream=(uint8_t*)malloc(32);
type=simon_ENCRYPT_TYPE_128_256;
key_s=32;
ROUNDS=71;
s_key_stream=(uint8_t*)malloc(key_s);
for(int i=1; i<key_s; i++)
s_key_stream[i]=key_stream[i];
break;
default:
flag=0;
break;
}
//printf(" type = %d \n", type);
printf("test encrypt_decrypt_stream_test\n");
if (flag)
{
int r = encrypt_decrypt_stream_test(nblocks,type, key_stream, key_s);
if(r != 0)
{
return r;
}
printf("success encrypt_decrypt_stream_test\n");
}
else
printf("Wrong simon Encrypted Type \n");
} // end if(strncmp(argv[1],"keysize",7)==0)
free(s_key_stream);
return 0;
}
|
8,815 | #include <iostream>
#include <vector>
using namespace std;
// Cont
__global__ void count_samples_in_circles_slow(float* d_randNumsX, float* d_randNumsY, int* d_countInBlocks, int nsamples)
{
__shared__ int shared_blocks_slow[500];
int index = blockIdx.x * blockDim.x + threadIdx.x;
// Iterates through
int inCircle = 0;
for (int i = index*1000; i < 1000 * (index + 1) ; i++) {
float xValue = d_randNumsX[i];
float yValue = d_randNumsY[i];
if (xValue*xValue + yValue*yValue < 1.0f) {
inCircle++;
}
}
shared_blocks_slow[threadIdx.x] = inCircle;
__syncthreads();
if (threadIdx.x == 0) {
int totalInCircleForABlock = 0;
for (int j = 0; j < blockDim.x; j++) {
totalInCircleForABlock += shared_blocks_slow[j];
}
d_countInBlocks[blockIdx.x] = totalInCircleForABlock;
}
}
// Coales
__global__ void count_samples_in_circles(float* d_randNumsX, float* d_randNumsY, int* d_countInBlocks, int nsamples)
{
__shared__ int shared_blocks[500];
int index = blockIdx.x * blockDim.x + threadIdx.x;
// Iterates through
int inCircle = 0;
int stride = gridDim.x * blockDim.x;
for (int i = index; i < nsamples; i+=stride) {
float xValue = d_randNumsX[i];
float yValue = d_randNumsY[i];
if (xValue*xValue + yValue*yValue < 1.0f) {
inCircle++;
}
}
shared_blocks[threadIdx.x] = inCircle;
__syncthreads();
if (threadIdx.x == 0) {
int totalInCircleForABlock = 0;
for (int j = 0; j < blockDim.x; j++) {
totalInCircleForABlock += shared_blocks[j];
}
d_countInBlocks[blockIdx.x] = totalInCircleForABlock;
}
}
int nsamples = 1e8;
int main(void)
{
// allocate space to hold random values
vector<float> h_randNumsX(nsamples);
vector<float> h_randNumsY(nsamples);
srand(time(NULL)); // seed with system clock
//Initialize vector with random values
for (int i = 0; i < h_randNumsX.size(); ++i) {
h_randNumsX[i] = float(rand()) / RAND_MAX;
h_randNumsY[i] = float(rand()) / RAND_MAX;
}
// Send random values to the GPU
size_t size = nsamples * sizeof(float);
float* d_randNumsX;
float* d_randNumsY;
cudaMalloc(&d_randNumsX, size);
cudaMalloc(&d_randNumsY, size);
cudaMemcpy(d_randNumsX, &h_randNumsX.front(), size, cudaMemcpyHostToDevice);
cudaMemcpy(d_randNumsY, &h_randNumsY.front(), size, cudaMemcpyHostToDevice);
int threadsPerBlock = 500;
int num_blocks = nsamples / (1000 * threadsPerBlock);
int* d_countInBlocks;
size_t countBlocks = num_blocks * sizeof(int);
cudaMalloc(&d_countInBlocks, countBlocks);
cudaEvent_t start, stop;
// START CONT
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// CALL KERNEL
count_samples_in_circles_slow<<<num_blocks, threadsPerBlock>>>(d_randNumsX, d_randNumsY, d_countInBlocks, nsamples);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime_0;
cudaEventElapsedTime(&elapsedTime_0, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaMemset(d_countInBlocks, 0, sizeof(int));
cout << "Elapsed time Slow: " << elapsedTime_0 << endl;
// START COALES
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// CALL KERNEL
count_samples_in_circles<<<num_blocks, threadsPerBlock>>>(d_randNumsX, d_randNumsY, d_countInBlocks, nsamples);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cout << "Elapsed time Coales: " << elapsedTime << endl;
// Return back the vector from device to host
int* h_countInBlocks = new int[num_blocks];
cudaMemcpy(h_countInBlocks, d_countInBlocks, countBlocks, cudaMemcpyDeviceToHost);
int nsamples_in_circle = 0;
for (int i = 0 ; i < num_blocks; i++) {
//cout << "Value in block " + i << " is " << h_countInBlocks[i] << endl;
nsamples_in_circle = nsamples_in_circle + h_countInBlocks[i];
}
cudaFree(d_randNumsX);
cudaFree(d_randNumsY);
cudaFree(d_countInBlocks);
// fraction that fell within (quarter) of unit circle
float estimatedValue = 4.0 * float(nsamples_in_circle) / nsamples;
cout << "Estimated Value: " << estimatedValue << endl;
}
|
8,816 | #define TILE_SIZE 16
#define MASK_WIDTH 3
#include <stdio.h>
#include <cuda_runtime.h>
__global__ void sobel_filter(int *inputM, int *outputM, int width, int height, int thresh){
// shared 16x16
__shared__ int local[TILE_SIZE][TILE_SIZE];
//set up vars, leave 1 px border
int tx = threadIdx.x; int ty = threadIdx.y;
//find positions within input array for 14x14
int row = blockIdx.x * (blockDim.x-2) + tx + 1;
int col = blockIdx.y * (blockDim.y-2) + ty + 1;
//correct to the right place in input
int place = (row-1)*width + col - 1;
//if in bounds of pic
if(place < height*width){
//store into shared
local[tx][ty] = inputM[place];
//find inner part of 14x14
if(tx > 0 && tx < 15 && ty > 0 && ty < 15){
//no edges or corners
int top_left = local[tx-1][ty-1], top_right = local[tx+1][ty-1];
int bot_left = local[tx-1][ty+1], bot_right = local[tx+1][ty+1];
int gx = top_left - top_right + 2*local[tx-1][ty] - 2*local[tx+1][ty] + bot_left - bot_right;
int gy = top_left + 2*local[tx][ty-1] + top_right - bot_left - 2*local[tx][ty+1] - bot_right;
// calculate magnitude
int magnitude = gx*gx + gy*gy;
int result = 0;
// Check if greater than threshold
if(magnitude > thresh)
result = 255;
// store into global
outputM[place] = result;
}
}
}
|
8,817 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define dT 0.2f
#define G 0.6f
#define BLOCK_SIZE 128
// Global variables
int num_planets;
int num_timesteps;
// Host arrays
float2* velocities;
float4* planets;
// Device arrays
float2* velocities_d;
float4* planets_d;
double walltime() {
static struct timeval t;
gettimeofday(&t, NULL);
return (t.tv_sec + 1e-6 * t.tv_usec);
}
// Parse command line arguments
void parse_args(int argc, char** argv){
if(argc != 2){
printf("Useage: nbody num_timesteps\n");
exit(-1);
}
num_timesteps = strtol(argv[1], 0, 10);
}
// Reads planets from planets.txt
void read_planets(){
//char* a;
FILE* file = fopen("planets4096.txt", "r");
if(file == NULL){
printf("'planets.txt' not found. Exiting\n");
exit(-1);
}
char line[200];
fgets(line, 200, file);
sscanf(line, "%d", &num_planets);
planets = (float4*)malloc(sizeof(float4)*num_planets);
velocities = (float2*)malloc(sizeof(float2)*num_planets);
for(int p = 0; p < num_planets; p++){
fgets(line, 200, file);
sscanf(line, "%f %f %f %f %f",
&planets[p].x,
&planets[p].y,
&velocities[p].x,
&velocities[p].y,
&planets[p].z);
}
fclose(file);
}
// Writes planets to file
void write_planets(int timestep){
char name[20];
int n = sprintf(name, "gpu_planets_out.txt");
FILE* file = fopen(name, "wr+");
for(int p = 0; p < num_planets; p++){
fprintf(file, "%f %f %f %f %f\n",
planets[p].x,
planets[p].y,
velocities[p].x,
velocities[p].y,
planets[p].z);
}
fclose(file);
}
// TODO 7. Calculate the change in velocity for p, caused by the interaction with q
__device__ float2 calculate_velocity_change_planet(float4 p, float4 q){
float2 dist;
float2 dvel;
dist.x = q.x - p.x;
dist.y = q.y - p.y;
if(dist.x == 0 && dist.y == 0){
float2 vel = {0.0f, 0.0f};
return vel;
}
float abs_dist= sqrt(dist.x*dist.x + dist.y*dist.y);
float dist_cubed = abs_dist*abs_dist*abs_dist;
// Calculate change in velocity
dvel.x = dT*G*q.z/dist_cubed * dist.x;
dvel.y = dT*G*q.z/dist_cubed * dist.y;
return dvel;
}
// TODO 5. Calculate the change in velocity for my_planet, caused by the interactions with a block of planets
__device__ float2 calculate_velocity_change_block(float4 my_planet, float4* shared_planets){
float2 velocity;
velocity.x = 0.0f;
velocity.y = 0.0f;
for(int i = 0; i < blockDim.x; i++){
float2 temp_vel = calculate_velocity_change_planet(my_planet, shared_planets[i]);
velocity.x += temp_vel.x;
velocity.y += temp_vel.y;
}
return velocity;
}
// TODO 4. Update the velocities by calculating the planet interactions
__global__ void update_velocities(float4* planets, float2* velocities, int num_planets){
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
float4 my_planet = planets[thread_id];
// Shared memory for this block
__shared__ float4 shared_planets[BLOCK_SIZE];
// Compute the velocity change for planets for one block at a time
for(int i = 0; i < num_planets; i+=blockDim.x){
shared_planets[threadIdx.x] = planets[i + threadIdx.x];
__syncthreads();
float2 tempv = calculate_velocity_change_block(my_planet, shared_planets);
velocities[thread_id].x += tempv.x;
velocities[thread_id].y += tempv.y;
__syncthreads();
}
}
// TODO 7. Update the positions of the planets using the new velocities
__global__ void update_positions(float4* planets, float2* velocities, int num_planets){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
planets[tid].x += velocities[tid].x * dT;
planets[tid].y += velocities[tid].y * dT;
}
int main(int argc, char** argv){
parse_args(argc, argv);
read_planets();
double calculation_time = 0;
double memcopy_time = 0;
printf("blocksize: %d\n", BLOCK_SIZE);
// TODO 1. Allocate device memory, and transfer data to device
cudaMalloc(&planets_d, sizeof(float4)*num_planets);
cudaMalloc(&velocities_d, sizeof(float2)*num_planets);
double mem_start = walltime();
cudaMemcpy(planets_d, planets, sizeof(float4)*num_planets,
cudaMemcpyHostToDevice);
cudaMemcpy(velocities_d, velocities, sizeof(float2)*num_planets,
cudaMemcpyHostToDevice);
memcopy_time += walltime() - mem_start;
// Calculating the number of blocks
int num_blocks = num_planets/BLOCK_SIZE + ((num_planets%BLOCK_SIZE == 0) ? 0 : 1);
printf("numblocks: %d\n", num_blocks);
// Main loop
dim3 grid_size(num_blocks);
dim3 block_size(BLOCK_SIZE);
double calc_start = walltime();
for(int t = 0; t < num_timesteps; t++){
// TODO 2. Call kernels
//Update velocities
update_velocities<<<grid_size, block_size>>>(planets_d, velocities_d,
num_planets);
//Update positions
update_positions<<<grid_size, block_size>>>(planets_d, velocities_d,
num_planets);
}
calculation_time = walltime() - calc_start;
mem_start = walltime();
// TODO 3. Transfer data back to host
cudaMemcpy(planets, planets_d, sizeof(float4)*num_planets,
cudaMemcpyDeviceToHost);
cudaMemcpy(velocities, velocities_d, sizeof(float2)*num_planets,
cudaMemcpyDeviceToHost);
memcopy_time += walltime() - mem_start;
printf("%7.7f\n", calculation_time);
printf("%7.7f\n", memcopy_time);
cudaFree(planets_d);
cudaFree(velocities_d);
// Output
write_planets(num_timesteps);
}
|
8,818 | #include "includes.h"
__global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) {
const int threads = 512;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for (j = 0; j < batch; ++j) {
for (i = 0; i < spatial; i += threads) {
int index = j * spatial * filters + filter * spatial + i + id;
local[id] +=
(i + id < spatial)
? ((x[index] - mean[filter]) * (x[index] - mean[filter]))
: 0;
}
}
__syncthreads();
if (id == 0) {
variance[filter] = 0;
for (i = 0; i < threads; ++i) {
variance[filter] += local[i];
}
variance[filter] /= (spatial * batch - 1);
}
} |
8,819 | #include <stdio.h>
__global__ void csr_mv(int m, double *AVal, int *ARowP, int *AColI, double *x, double *b)
{
int i = ((blockDim.x * blockIdx.x) + threadIdx.x), j;
if (i < m) {
for (j = ARowP[i]; j < ARowP[i+1]; j++)
{
b[i] += AVal[j]*x[AColI[j]];
}}
}
int main()
{
// Define the Sparse Matrix
int n0 = 80000; // No. of no. zero enteries
int m = 8000; // m rows n collumns for A
int n = m;
int ARowP[m+1];
int AColI[n0];
double AVal[n0];
double x[n]; // b=Ax
double b[m];
int thrd = 1000;
int bloc = m/1000;
int i,j;
// Value Assignment
for (i=0;i<n0;i++){
AVal[i] = 1+i;
AColI[i] = rand() % m;
}
for (i=0;i<(m+1);i++)
ARowP[i] = (i-1)*10;
for (i=0;i<n;i++)
x[i] = 1;
for (i=0;i<m;i++)
b[i] = 0;
// Data comminication
double *d_AVal, *d_x, *d_b;
int *d_AColI, *d_ARowP;
cudaMalloc(&d_AVal, n0*sizeof(double));
cudaMalloc(&d_x, n*sizeof(double));
cudaMalloc(&d_b, m*sizeof(double));
cudaMalloc(&d_ARowP, (m+1)*sizeof(int));
cudaMalloc(&d_AColI, n0*sizeof(int));
cudaMemcpy(d_AVal, AVal, n0*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_x, x, n*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, m*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_ARowP, ARowP, (m+1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_AColI, AColI, n0*sizeof(int), cudaMemcpyHostToDevice);
csr_mv <<< bloc, thrd >>> ( m, d_AVal, d_ARowP, d_AColI, d_x, d_b);
cudaMemcpy(b, d_b, m*sizeof(double), cudaMemcpyDeviceToHost);
for (i = 0; i < 100; i++)
printf("b[%d] = %lf\n", i, b[i]);
return 0;
}
|
8,820 | #include "kernels.cuh"
__global__ void find_maximum_kernel(double *array, double *max, int *mutex, int n){
unsigned int index = threadIdx.x + blockIdx.x*gridDim.x;
unsigned int stride = gridDim.x * blockDim.x;
unsigned int offset = 0;
__shared__ double cache[256];
double temp = array[0];
while(index + offset < n){
temp = fmax(temp, array[index + offset]);
offset += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
//reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] = fmax(cache[threadIdx.x], cache[threadIdx.x + i]);
}
__syncthreads();
i /= 2;
}
if(threadIdx.x == 0){
while(atomicCAS(mutex,0 ,1) != 0);
*max = fmax(*max, cache[0]);
atomicExch(mutex, 0);
}
}
__global__ void find_minimum_kernel(double *array, double *min, int *mutex, int n){
unsigned int index = threadIdx.x + blockIdx.x*gridDim.x;
unsigned int stride = gridDim.x * blockDim.x;
unsigned int offset = 0;
__shared__ double cache[256];
double temp = array[0];
while(index + offset < n){
temp = fmin(temp, array[index + offset]);
offset += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
//reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] = fmin(cache[threadIdx.x], cache[threadIdx.x + i]);
}
__syncthreads();
i /= 2;
}
if(threadIdx.x == 0){
while(atomicCAS(mutex,0 ,1) != 0);
*min = fmin(*min, cache[0]);
atomicExch(mutex, 0);
}
}
__global__ void mean_kernel(double *array, double *mean, int *mutex, int n){
unsigned int index = threadIdx.x + blockIdx.x*gridDim.x;
unsigned int stride = gridDim.x * blockDim.x;
unsigned int offset = 0;
__shared__ double cache[256];
double temp = 0.0;
while(index + offset < n){
temp = temp + array[index + offset];
offset += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
//reduction
// unsigned int i = gridDim.x/2;
// while(i != 0){
// if(threadIdx.x < i){
// cache[threadIdx.x] = fmin(cache[threadIdx.x], cache[threadIdx.x + i]);
// }
//
// __syncthreads();
// i /= 2;
// }
unsigned int i = blockDim.x / 2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] += cache[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
__syncthreads();
if(threadIdx.x == 1){
while(atomicCAS(mutex, 0 ,1) != 0);
*mean += cache[0];
atomicExch(mutex, 0);
}
}
__global__ void std_kernel(double *array, double* d_std, int *mutex, int n, double mean){
unsigned int index = threadIdx.x + blockIdx.x*gridDim.x;
unsigned int stride = gridDim.x * blockDim.x;
unsigned int offset = 0;
__shared__ double cache[256];
double temp = 0.0;
while(index + offset < n){
temp = temp + (array[index + offset]-mean)*(array[index + offset]-mean);
offset += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
//reduction
// }
unsigned int i = blockDim.x / 2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] += cache[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
__syncthreads();
if(threadIdx.x == 1){
while(atomicCAS(mutex, 0 ,1) != 0);
*d_std += cache[0];
atomicExch(mutex, 0);
}
}
__global__ void concurrent_kernel(double* array,double* max,double* min, double *mean, int *mutex, int n){
unsigned int index = threadIdx.x + blockIdx.x*gridDim.x;
unsigned int stride = gridDim.x * blockDim.x;
unsigned int offset = 0;
__shared__ double cache_max[256];
__shared__ double cache_min[256];
__shared__ double cache_mean[256];
double temp_max = array[0];
double temp_min = array[0];
double temp_mean = 0;
while(index + offset < n){
temp_max = fmax(temp_max, array[index + offset]);
temp_min = fmin(temp_min,array[index + offset]);
temp_mean += array[index + offset];
offset += stride;
}
cache_max[threadIdx.x] = temp_max;
cache_min[threadIdx.x] = temp_min;
cache_mean[threadIdx.x] = temp_mean;
__syncthreads();
//reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache_max[threadIdx.x] = fmax(cache_max[threadIdx.x], cache_max[threadIdx.x + i]);
cache_min[threadIdx.x] = fmin(cache_min[threadIdx.x], cache_min[threadIdx.x + i]);
cache_mean[threadIdx.x] += cache_mean[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
if(threadIdx.x == 0){
while(atomicCAS(mutex,0 ,1) != 0);
*max = fmax(*max, cache_max[0]);
*min = fmin(*min, cache_min[0]);
*mean += cache_mean[0];
atomicExch(mutex, 0);
}
}
|
8,821 | //##########################################################//
// Name: Kirtan Mali //
// Roll no: 18AG10016 //
// Question 2: Matrix Transpose using Rect Tiles //
//##########################################################//
#include <stdio.h>
#include <stdlib.h>
// Cuda Libraries
#include <cuda.h>
#include <cuda_runtime.h>
// Macro for error checking and debugging
#define CHECK(call) { \
const cudaError_t error = call; \
if (error != cudaSuccess) { \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
typedef long long int lli;
#define MAX_VAL 100
#define TILEX 32
#define TILEY 16
#define BLOCKX 32
#define BLOCKY 16
void printMat(float *matrix, lli n);
void transpose_CPU(float *matrix, float *output, int n);
float *createMat(lli n, int isempty, int seed);
__global__ void transposeCoalesced_RECTTILES(float *matrix, float *output, int n)
{
// shared memory
__shared__ float tile[BLOCKY][BLOCKX];
// global memory index for original matrix
int ix = blockDim.x * blockIdx.x + threadIdx.x;
int iy = blockDim.y * blockIdx.y + threadIdx.y;
// transposed index in shared memory
int irow = (threadIdx.y * blockDim.x + threadIdx.x) % blockDim.y;
int icol = (threadIdx.y * blockDim.x + threadIdx.x) / blockDim.y;
// global memory index for transposed matrix
int ox = blockDim.y * blockIdx.y + irow;
int oy = blockDim.x * blockIdx.x + icol;
if (ix < n && iy < n) {
tile[threadIdx.y][threadIdx.x] = matrix[iy * n + ix];
__syncthreads();
output[oy * n + ox] = tile[irow][icol];
}
}
int main(int argc, char **argv)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
int isprint = 1;
if (argc > 1)
isprint = 0;
lli t;
scanf("%lld", &t);
while (t--)
{
srand(t);
lli n;
scanf("%lld", &n);
size_t size = sizeof(float) * n * n;
float *h_matrix = createMat(n, 0, t);
float *h_output = createMat(n, 1, t);
float *h_output_check = createMat(n, 1, t);
float *d_matrix = NULL;
float *d_output = NULL;
CHECK(cudaMalloc((void **)&d_matrix, size));
CHECK(cudaMalloc((void **)&d_output, size));
CHECK(cudaMemcpy(d_matrix, h_matrix, size, cudaMemcpyHostToDevice));
dim3 dimGrid((n + BLOCKX - 1) / BLOCKX, (n + BLOCKY - 1) / BLOCKY);
dim3 dimBlock(BLOCKX, BLOCKY);
transposeCoalesced_RECTTILES<<<dimGrid, dimBlock>>>(d_matrix, d_output, n);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch convolution_2D_DEVICE kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
CHECK(cudaMemcpy(h_output, d_output, size, cudaMemcpyDeviceToHost));
// transpose_CPU(h_matrix, h_output_check, n);
if (isprint == 1)
{
printf("\n\n***** Original Matrix *****\n\n");
printMat(h_matrix, n);
printf("\n\n***** Transposed Matrix using GPU *****\n\n");
printMat(h_output, n);
// printf("\n\n***** Transposed Matrix using CPU *****\n\n")
//printMat(h_output_check, n);
}
free(h_matrix);
free(h_output);
free(h_output_check);
cudaFree(d_matrix);
cudaFree(d_output);
}
return 0;
}
// Utility Functions
float *createMat(lli n, int isempty, int seed)
{
srand(seed+1);
size_t size = sizeof(float) * n * n;
float *matrix = (float *)malloc(size);
for (int i=0; i<n*n; i++)
{
if (isempty == 1)
matrix[i] = 0.0f;
else
matrix[i] = (float)rand()/((float)RAND_MAX/MAX_VAL);
}
return matrix;
}
void printMat(float *matrix, lli n)
{
for (lli i=0; i<n*n; i++)
{
printf("% 6.2f ", matrix[i]);
if (i % n == n-1)
printf("\n");
}
}
void transpose_CPU(float *matrix, float *output, int n)
{
for (int i=0; i<n; i++)
{
for (int j=0; j<n; j++)
{
output[i*n+j] = matrix[j*n+i];
}
}
}
|
8,822 | // see http://stackoverflow.com/questions/14818084/what-is-the-proper-include-for-the-function-sleep-in-c
//#include <stdio.h>
//#include <stdlib.h>
//#include <time.h>
//# include "nn-2_cuda.h"
//#define WARP_SIZE 16
//#define DEBUG false
//#define DEBUG true
/*
// use this and then if there is -DDEBUG it would be set but if not then it is false!
#ifndef DEBUG
#define DEBUG false
#endif
#ifndef DEBUGU
#define DEBUGU false
#endif
#ifdef _WIN32
#include <Windows.h>
#else
#include <unistd.h>
#endif
*/
// should be 2 as cuda from non_cuda one
/* ---------------- [[HELPER FUNCTIONS FOR GLOBAL MEMORY]] ---------------- */
float *_copyHostDevice_CUDA(float *src, int src_size) {
float *src_d;
cudaMalloc((void**)&src_d, sizeof(float) * src_size);
cudaMemcpy(src_d, src, sizeof(float) * src_size, cudaMemcpyHostToDevice);
return src_d;
}
float *_copyDeviceHost_CUDA(float *src, int src_size, float *dst=NULL) {
float *target;
if (dst == NULL) {
target = (float*)malloc(sizeof(float) * src_size);
} else {
target = dst;
}
cudaMemcpy(target, src, sizeof(float) * src_size, cudaMemcpyDeviceToHost);
return target;
}
/* ---------------- [[HELPER FUNCTIONS FOR TILING]] ---------------- */
typedef struct {
int x;
int y;
} GlobalDim;
__device__ GlobalDim getGlobalDim_CUDA(dim3 blockDim, dim3 blockIdx, dim3 threadIdx) {
GlobalDim gd;
gd.x = blockDim.x * blockIdx.x + threadIdx.x;
gd.y = blockDim.y * blockIdx.y + threadIdx.y;
return gd;
}
dim3 getGridBasedOnBlockSize_CUDA(int width, int height, int block_size) {
int gridX = (int)ceil((float)width / block_size);
int gridY = (int)ceil((float)height / block_size);
return dim3(gridX, gridY);
}
|
8,823 | //nvcc -ptx find_CS0.cu -ccbin "F:Visual Studio\VC\Tools\MSVC\14.12.25827\bin\Hostx64\x64"
__device__ void EM1( double * E_PE,
double * x_el,
double * y_el,
double * x_sel,
double * y_sel,
double * x_ion,
double * y_ion,
double * k,
double * A_2,
double * omega_2,
double * v_2,
double * gamma_2,
double * sigma_el,
double * sigma_ion,
double * sigma_exc,
double sqr_A0,
double sqr_a0,
double q0,
int len,
int parNum) {
int globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x;
int localThreadIdx = threadIdx.x + blockDim.x * threadIdx.y;
int threadsPerBlock = blockDim.x*blockDim.y;
int n = localThreadIdx + globalBlockIndex*threadsPerBlock;
if ( n >= parNum ){
return;
}
//Elastic
double e = E_PE[n];
double a,b,sigma;
int i;
if ( e >= 0.05 ){
i = 0;
while( e>x_el[i] && e<x_el[i+1] ){
i++;
}
a = ( x_el[i+1]-e )/( x_el[i+1] - x_el[i] );
b = ( e-x_el[i] )/( x_el[i+1] - x_el[i] );
sigma = (a*y_el[i] + b*y_el[i]) * sqr_a0 * 2;
}
else{
e = 1000*e;
i = 0;
while( e>x_sel[i] && e<x_sel[i+1] ){
i++;
}
a = ( x_sel[i+1]-e )/( x_sel[i+1] - x_sel[i] );
b = ( e-x_sel[i] )/( x_sel[i+1] - x_sel[i] );
sigma = (a*y_sel[i] + b*y_sel[i]) * sqr_A0;
}
sigma_el[n] = sigma;
//Electron impact ionization
e = 1000 * E_PE[n];
i = 0;
while( e>x_ion[i] && e<x_ion[i+1] ){
i++;
}
a = ( x_ion[i+1]-e )/( x_ion[i+1] - x_ion[i] );
b = ( e-x_ion[i] )/( x_ion[i+1] - x_ion[i] );
sigma_ion[n] = (a*y_ion[i] + b*y_ion[i]) * sqr_A0;
//Electron impact excitation
e = E_PE[n];
sigma = 0;
for (i = 0;i<len ;i++ ){
sigma = sigma + (q0 * A_2[i]/powf(k[i],2))* powf(k[i]/e,omega_2[i]) * powf(powf(1-k[i]/e,gamma_2[i]),v_2[i]);
}
sigma_exc[n] = sigma/10000;
}
__global__ void processMandelbrotElement(
double * E_PE,
double * x_el,
double * y_el,
double * x_sel,
double * y_sel,
double * x_ion,
double * y_ion,
double * k,
double * A_2,
double * omega_2,
double * v_2,
double * gamma_2,
double * sigma_el,
double * sigma_ion,
double * sigma_exc,
double sqr_A0,
double sqr_a0,
double q0,
int len,
int parNum) {
EM1(E_PE,x_el,y_el,x_sel,y_sel,x_ion,y_ion,k,A_2,omega_2,v_2,gamma_2,sigma_el,sigma_ion,sigma_exc,sqr_A0,sqr_a0,q0,len,parNum);
}
|
8,824 | #include "includes.h"
__global__ void kernel(int* count_d, float* randomnums)
{
int i;
double x,y,z;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
i = tid;
int xidx = 0, yidx = 0;
xidx = (i+i);
yidx = (xidx+1);
x = randomnums[xidx];
y = randomnums[yidx];
z = ((x*x)+(y*y));
if (z<=1)
count_d[tid] = 1;
else
count_d[tid] = 0;
} |
8,825 | #include <iostream>
#include "gMat.cuh"
#include <vector>
#include "mytime.h"
#include <functional>
#include <math.h>
#include <stdlib.h>
#include <string>
void manyMult(gMat& A, gMat& B, gMat& C,int iter,int mem){
for (int i=0; i!=iter; ++i){
prod(A,B,C,mem);
prod(A,C,B,mem);
}
}
void multstat(int mem, int row, int col){
std::cout << "multiplying " << row << " by "<< col <<" matrices.\n";
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop,0);
std::cout << "using "<< mem << " of "<<prop.sharedMemPerBlock << "shared memory\n";
int w=floor(sqrt(mem/2/sizeof(float)));
std::cout << "Tile sizes will be: "<< w<< " by "<< w << "\n";
std::cout << "This amounts to: " << w*w << " threads \n";
std::cout << ceil(row/ (float) w)*ceil(col/ (float) w) << " tiles will be used "<<std::endl;
}
int main(int argc, char *argv[]){
int w;
if ( argc >= 2) w=atoi(argv[1]);
else w=20;
int r=1000;
int c=1000;
gMat A=eye(r);
gMat B=randgMat(r,c);
gMat C=randgMat(r,c);
A.name="A";
B.name="B";
C.name="C";
int mem=2*w*w*sizeof(float);
auto mm=std::bind(manyMult,A,B,C,300,mem);
multstat(mem,r,c);
mm();
A.cleanup();
B.cleanup();
C.cleanup();
}
void test1(){
std::vector<float> adat={1,2,3,4};
std::vector<float> bdat={2,0,0,2,0,2};
std::vector<float> cdat={0,0,0,0,0,0};
gMat A{adat,2,2};
gMat B{bdat,2,3};
gMat C{cdat,2,3};
std::cout <<"A:\n" << A << std::flush;
std::cout <<"B:\n" << B << std::flush;
int mem=8*sizeof(float);
prod(A,B,C,mem);
std::cout <<"C:\n" << C << std::flush;
}
|
8,826 | #include <stdio.h>
#include <stdlib.h>
#include "math.h"
#define THREADS_PER_BLOCK 1024
__global__ void cu_voronoi(int *matrix_d, int *seeds_d, int numberOfSeeds, int width, int sizeOfArray) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int i;
if (x < sizeOfArray && matrix_d[x] != -1) {
double minDistance = INFINITY;
double distance;
int currentSeed;
int matrixI = x/width;
int matrixJ = x % width;
for (i = 0; i < numberOfSeeds; i++) {
int candidateSeed = seeds_d[i];
int seedI = candidateSeed/width;
int seedJ = candidateSeed % width;
double xDistance = matrixI - seedI;
double yDistance = matrixJ - seedJ;
double distanceSum = pow(xDistance, 2) + pow(yDistance, 2);
distance = sqrt(distanceSum);
if (distance < minDistance) {
minDistance = distance;
currentSeed = i;
}
}
matrix_d[x] = currentSeed;
}
}
extern "C" void voronoi(int *matrix, int *seeds, int numberOfSeeds, int width, int height) {
int *matrix_d;
int *seeds_d;
int sizeOfArray = width * height;
cudaMalloc((void**) &matrix_d, sizeof(int) * sizeOfArray);
cudaMalloc((void**) &seeds_d, sizeof(int) * numberOfSeeds);
cudaMemcpy(matrix_d, matrix, sizeof(int) * sizeOfArray, cudaMemcpyHostToDevice);
cudaMemcpy(seeds_d, seeds, sizeof(int) * numberOfSeeds, cudaMemcpyHostToDevice);
int blocks = ceil((float)sizeOfArray/THREADS_PER_BLOCK);
cu_voronoi <<<blocks, THREADS_PER_BLOCK>>> (matrix_d, seeds_d, numberOfSeeds, width, sizeOfArray);
cudaMemcpy(matrix, matrix_d, sizeof(int) * sizeOfArray, cudaMemcpyDeviceToHost);
cudaFree(matrix_d);
cudaFree(seeds_d);
}
|
8,827 | //////////////////////////////////////////////////////////////////////////
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//
// Author: M. Winters (2016)
// Institution: Chalmers Universtiy of Technology (MC2)
// email: mesoic@protonmail.com
//
//////////////////////////////////////////////////////////////////////////
//
// Graphene CV Modeling Kernel
//
// Compile with: nvcc -c -arch=sm_20 <filename>.cu
//
// Compilation will create CV_kernel.cubin which provides the
// functions below. The .cubin can then be imported into
// Python(PyCUDA) or C/C++ code.
//
//////////////////////////////////////////////////////////////////////////
//
// This kernel provides GPU methods to calculate the monolayer and bilayer
// DOS, eF, Capacitance, and other related quantities. Dit and surface
// potential fluctuations are also included.
//
// Characterization and physical modeling of MOS capacitors in epitaxial
// graphene monolayers and bilayers on 6H-SiC
//
// AIP Advances 6, 085010 (2016); https://doi.org/10.1063/1.4961361
//
#include <stdio.h>
#include <curand.h>
#include <curand_kernel.h>
#include <math.h>
///////////////////////
// CONSTANTS //
///////////////////////
__device__ __constant__ float PI = 3.141592654f;
__device__ __constant__ float HB = 6.58211e-16f;
__device__ __constant__ float KB = 8.617332e-5f;
__device__ __constant__ float VF = 1.000000e+8f;
__device__ __constant__ float G = 0.400f;
__device__ __constant__ float GS = 4.000f;
__device__ __constant__ float e = 1.602e-19f;
__device__ __constant__ float R = 1.414213562f;
//////////////////////////////////
// ATOMIC OPERATIONS //
//////////////////////////////////
extern "C" {
__device__ void addToC(float *r, float* rx, float* a, float n, int INT) {
for (int i=0; i<INT;i ++){
atomicAdd(&r[i], rx[i] + n*a[i]);
}
}
}
//////////////////////////////////////////
// CALCULATION OF DOS/CARRIERS //
//////////////////////////////////////////
// Monolayer
extern "C"{
__device__ float mlDOS_PW(float eF){
return (GS/(2.0*PI))*(1.0/(HB*HB*VF*VF))*abs(eF);
}
}
extern "C" {
__device__ float mlCarriers_PW(float ef, float T){
int INT = 1024;
float n = 0.0;
float p = 0.0;
for(int i = 0; i<INT; i++){
n += mlDOS_PW(float(i)/float(INT))*(1.0/(1.0 + exp( ((float(i)/float(INT)) -ef )/(KB*T))))*(1.0/INT);
p += mlDOS_PW(float(i)/float(INT))*(1.0/(1.0 + exp( ef + (float(i)/float(INT)) )/(KB*T)))*(1.0/INT);
}
return n-p;
}
}
// Bilayer. DOS includes energy gap model
extern "C" {
__device__ float blDOS_PW(float eF, float eG, float eGs){
if (eG < 0.0){ return (GS/(2.0*PI))*(1.0/(HB*HB*VF*VF))*(abs(eF) + (G/2.0));}
double dFp;
double dFm;
dFp = float( 0.5* (1 - erf ( double( (eF+eG)/(R*eGs) ) )));
dFm = 1.0 - float( 0.5* (1 - erf ( double ( (eF-eG)/(R*eGs) ) )));
return (float(dFm)+float(dFp)) * (GS/(2.0*PI)) * (1.0/(HB*HB*VF*VF))*(abs(eF) + (G/2.0));
}
}
extern "C" {
__device__ float blCarriers_PW(float ef, float eG, float eGs, float T){
int INT = 1024;
float n = 0.0;
float p = 0.0;
for(int i = 0; i<INT; i++){
n += blDOS_PW(float(i)/float(INT), eG, eGs) * (1.0/(1.0 + exp(( (float(i)/float(INT)) -ef )/(KB*T))))*(1.0/INT);
p += blDOS_PW(float(i)/float(INT), eG, eGs) * (1.0/(1.0 + exp(( ef + (float(i)/float(INT)) )/(KB*T))))*(1.0/INT);
}
return n-p;
}
}
//////////////////////////////////////////////////
// MONTE CARLO SIMULATION of CAPACITANCE //
//////////////////////////////////////////////////
extern "C" {
__global__ void gen_rand(float* eF, float* eFr, float deF, unsigned long SEED){
const int tid = threadIdx.x;
curandState state;
curand_init(SEED, tid, 0, &state);
eF[tid] = eF[tid];
eFr[tid] = deF*curand_normal(&state) + eF[tid];
}
}
extern "C" {
__global__ void gen_capMC(float* eF, float* eFr, float* eG, float* deF, float* Dit,
float*C, float COX, float M, int DEPTH, unsigned long SEED){
const int tid = threadIdx.x;
curandState state;
curand_init(SEED, tid, 0, &state);
float num;
float den;
for (int i = 0; i<DEPTH; i++){
eFr[tid] = deF[0]*( exp(-1* (eF[tid]*eF[tid])/(2*deF[1]*deF[1])) )*curand_normal(&state) + eF[tid];
num = (e*COX * ( M*mlDOS_PW(eFr[tid]) + (1.0-M)*blDOS_PW(eFr[tid], eG[0], eG[1]) + Dit[tid]));
den = (COX + e*M*mlDOS_PW(eFr[tid]) + e*(1.0-M)*blDOS_PW(eFr[tid], eG[0], eG[1]) + e*Dit[tid]);
C[tid] += num/den;
}
C[tid] = C[tid]/float(DEPTH);
}
}
///////////////////////////////////////////
// NEWTONS METHOD //
///////////////////////////////////////////
extern "C"{
__device__ float integrate(int v_i, int vD_i, float* Dit, float DELTA){
float _int = 0;
// Some bad coding to take care of return sweep
if (v_i == 200){
v_i = 0;
}
if (v_i > 100){
v_i -= 2*(v_i%100);
}
// Otherwise just integrate like normal
if (v_i < vD_i){
for(int i = v_i; i < vD_i; i++){
_int += Dit[i]*DELTA ;
}
}
else{
for(int i = vD_i; i < v_i; i++){
_int -= Dit[i]*DELTA ;
}
}
return _int;
}
}
extern "C"{
__global__ void solve_eF(float* v, float* vD, float* Dit, float* eF, float* eG,
float COX, float T, float M, float FLYWHEEL){
const int tid = threadIdx.x;
const float DELTA = v[1]-v[0];
float lhs = v[tid] - vD[0] - ((e/COX)*integrate(tid, int(vD[1]), Dit, DELTA));
float rhs = ( eF[tid] + M*(e/COX)*mlCarriers_PW(eF[tid], T) + (1-M)*(e/COX)*blCarriers_PW(eF[tid], eG[0], eG[1], T) );
float dhs;
if (tid == int(vD[1])){
eF[tid] = 0.0;
return;
}
if (tid - 2*(tid%100) == int(vD[1])){
eF[tid] = 0.0;
return;
}
for (int i = 0; i < 100; i++){
float conv = (lhs - rhs);
if (abs(conv) < 1e-6){
return;
}
else{
rhs = ( eF[tid] + M*(e/COX)*mlCarriers_PW(eF[tid], T) + (1-M)*(e/COX)*blCarriers_PW(eF[tid], eG[0], eG[1], T) );
dhs = ( 1 + M*(e/COX)*mlDOS_PW(eF[tid]) + (1-M)*(e/COX)*blDOS_PW(eF[tid], eG[0], eG[1]) );
eF[tid] += FLYWHEEL*(lhs - rhs)/(dhs);
}
}
return;
}
}
//////////////////////////////////////////
// VECTORWISE CARRIERS/DOS //
//////////////////////////////////////////
extern "C"{
__global__ void mlDOS_vec(float* eF, float* RO){
const int tid = threadIdx.x;
RO[tid] = mlDOS_PW(eF[tid]);
return;
}
}
extern "C"{
__global__ void blDOS_vec(float* eF, float* eG, float* RO){
const int tid = threadIdx.x;
RO[tid] = blDOS_PW(eF[tid], eG[0], eG[1]);
return;
}
}
extern "C"{
__global__ void mlCarriers_vec(float* eF, float* n, float* p, float t){
const int tid = threadIdx.x;
int INT = 1024;
for(int i = 0; i<INT; i++){
n[tid] += mlDOS_PW(float(i)/float(INT))*(1.0 / (1.0 + exp( ((float(i)/float(INT)) -eF[tid]) /(KB*t))))*(1.0/INT);
}
for(int i = 0; i<INT; i++){
p[tid] += mlDOS_PW(float(i)/float(INT))*(1.0 / (1.0 + exp( ((float(i)/float(INT)) +eF[tid]) /(KB*t))))*(1.0/INT);
}
return;
}
}
extern "C"{
__global__ void blCarriers_vec(float* eF, float* eG, float* n, float* p, float t){
const int tid = threadIdx.x;
int INT = 1024;
for(int i = 0; i<INT; i++){
n[tid] += blDOS_PW(float(i)/float(INT), eG[0], eG[1])*(1.0 / (1.0 + exp( ((float(i)/float(INT)) -eF[tid]) /(KB*t))))*(1.0/INT);
}
for(int i = 0; i<INT; i++){
p[tid] += blDOS_PW(float(i)/float(INT), eG[0], eG[1])*(1.0 / (1.0 + exp( ((float(i)/float(INT)) +eF[tid]) /(KB*t))))*(1.0/INT);
}
return;
}
}
|
8,828 | // RUN: %run_test hipify "%s" "%t" %hipify_args %clang_args
// CHECK: #include <hip/hip_runtime.h>
#include <stdio.h>
#define NUM_WORK_ELEMENTS (512)
#define WORK_BUFFER_SIZE (NUM_WORK_ELEMENTS * sizeof(float))
#define NUM_NODES NUM_WORK_ELEMENTS
#define NUM_FRAMES 1000
typedef struct {
int index;
float adder;
} node_t;
typedef struct {
node_t* vector_table[NUM_NODES];
node_t nodes[NUM_NODES];
} graph_control_t;
float* host_in_p;
float* host_out_p;
graph_control_t* host_gc_p;
float* dev_in_p;
float* dev_out_p;
graph_control_t* dev_gc_p;
int global_index[NUM_NODES] = {0};
// CHECK: hipGraph_t graph;
// CHECK-NEXT: hipGraphExec_t graphExec;
// CHECK-NEXT : hipGraphExec_t instance;
// CHECK-NEXT : hipStream_t stream;
// CHECK-NEXT : hipGraphNode_t node[NUM_NODES] = { 0 };
cudaGraph_t graph;
cudaGraphExec_t graphExec;
cudaGraphExec_t instance;
cudaStream_t stream;
cudaGraphNode_t node[NUM_NODES] = { 0 };
__global__
void add(const void* index,
const graph_control_t* gc_p,
const float *a,
float* b) {
int node_index = (long)index;
float adder;
int i = threadIdx.x;
node_t* node_p = gc_p->vector_table[node_index];
node_index = node_p->index;
adder = node_p->adder;
if (i == node_index) {
for (int j = 0; j < 100; j++)
b[i] = a[i] + adder + node_index;
}
}
void init(void) {
int i = 0;
// CHECK: hipStreamCreate(&stream);
cudaStreamCreate(&stream);
host_in_p = (float*) malloc(sizeof(float) * WORK_BUFFER_SIZE);
host_out_p = (float*) malloc(sizeof(float) * WORK_BUFFER_SIZE);
host_gc_p = (graph_control_t*) malloc(sizeof(graph_control_t));
for (i = 0; i < WORK_BUFFER_SIZE; ++i) {
host_in_p[i] = 1;
}
for (i = 0; i < WORK_BUFFER_SIZE; ++i) {
host_out_p[i] = 42;
}
// CHECK: hipMalloc(&dev_in_p, WORK_BUFFER_SIZE);
// CHECK-NEXT: hipMalloc(&dev_out_p, WORK_BUFFER_SIZE);
// CHECK-NEXT: hipMalloc(&dev_gc_p, sizeof(graph_control_t));
cudaMalloc(&dev_in_p, WORK_BUFFER_SIZE);
cudaMalloc(&dev_out_p, WORK_BUFFER_SIZE);
cudaMalloc(&dev_gc_p, sizeof(graph_control_t));
for (i = 0; i < NUM_NODES; ++i) {
host_gc_p->nodes[i].adder = 0;
host_gc_p->nodes[i].index = i;
host_gc_p->vector_table[i] = &(dev_gc_p->nodes[i]);
global_index[i] = i;
}
// CHECK: if (hipGraphCreate(&graph, 0))
if (cudaGraphCreate(&graph, 0))
printf("Failed to create graph\n");
/* Create one long vertical graph with dependency between each element
Would have parallel nodes and multiple instances of
it running in parallel if this was a real use-case,
but since that is not the point here, lets skip that.
*/
for (i = 0; i < NUM_NODES; ++i) {
void* kargs[] = { &global_index[i],
&dev_gc_p,
&dev_in_p,
&dev_out_p };
// CHECK: hipKernelNodeParams params = { .func = (void*)add,
cudaKernelNodeParams params = { .func = (void*)add,
.gridDim = dim3(1, 1, 1),
.blockDim = dim3(NUM_WORK_ELEMENTS, 1 ,1),
.sharedMemBytes = 0,
.kernelParams = kargs,
.extra = NULL };
// CHECK: if (hipGraphAddKernelNode(&node[i],
if (cudaGraphAddKernelNode(&node[i],
graph,
0,
0,
¶ms))
printf("Failed to create kernel node\n");
}
for (i = 0; i < (NUM_NODES - 1 ); ++i)
// CHECK: hipGraphAddDependencies(graph,
cudaGraphAddDependencies(graph,
&node[i],
&node[i+1],
1);
// CHECK: hipGraphInstantiate(&graphExec,
cudaGraphInstantiate(&graphExec,
graph,
NULL,
NULL,
0);
/* Same input for dataplane every frame */
// CHECK: hipMemcpy(dev_in_p,
cudaMemcpy(dev_in_p,
host_in_p,
WORK_BUFFER_SIZE,
// CHECK: hipMemcpyHostToDevice);
cudaMemcpyHostToDevice);
}
void clean_up() {
free(host_in_p);
free(host_out_p);
free(host_gc_p);
// CHECK: hipFree(dev_in_p);
// CHECK-NEXT: hipFree(dev_out_p);
// CHECK-NEXT: hipFree(dev_gc_p);
// CHECK-NEXT: hipDeviceReset();
cudaFree(dev_in_p);
cudaFree(dev_out_p);
cudaFree(dev_gc_p);
cudaDeviceReset();
}
void graph_launch() {
// CHECK: hipGraphLaunch(graphExec, stream);
cudaGraphLaunch(graphExec, stream);
}
/* Set up graphs, update input, execute and read out output once per frame */
int main (void) {
int i = 0;
/* Set up kernels (including parameters) and graphs once at init stage */
init();
/* Execute a bunch of frames */
for (int framenbr = 0; framenbr < NUM_FRAMES; ++framenbr) {
/* Set up control structure for one frame */
for(i = 0; i < NUM_NODES; ++i) {
host_gc_p->nodes[i].adder = framenbr * 1.0;
}
/* One copy for all data needed to execute one frame, queued in same stream as the rest */
// CHECK: hipMemcpyAsync(dev_gc_p,
cudaMemcpyAsync(dev_gc_p,
host_gc_p,
sizeof(graph_control_t),
// CHECK: hipMemcpyHostToDevice,
cudaMemcpyHostToDevice,
stream);
/* Kick graph */
graph_launch();
/* One read-out for all data produced this frame, queued in same stream as the rest. */
// CHECK: hipMemcpyAsync(host_out_p,
cudaMemcpyAsync(host_out_p,
dev_out_p,
WORK_BUFFER_SIZE,
// CHECK: hipMemcpyDeviceToHost,
cudaMemcpyDeviceToHost,
stream);
/*****************************************************************************************/
/* Here, use the offloaded CPU core to do things involved in setting up the control and */
/* data plane info for next frame */
/*****************************************************************************************/
/* If still not done, wait for current frame to finish */
// CHECK: hipStreamSynchronize(stream);
cudaStreamSynchronize(stream);
/* Just some printouts to uncomment for quick sanity check of functional behaviour */
printf("\n\nOutput buffer, frame %d:\n", framenbr);
for(i = 0; i < NUM_WORK_ELEMENTS; ++i) {
printf("%f ", host_out_p[i]);
if (i && i % 16 == 0)
printf("\n");
}
}
clean_up();
return 0;
}
|
8,829 | /**
*
* Matrix Multiplication - CUDA for GPUs
*
* CS3210
*
**/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <assert.h>
int size;
#define BLOCK_SIZE 32
typedef struct
{
float ** element;
} matrix;
long long wall_clock_time()
{
#ifdef __linux__
struct timespec tp;
clock_gettime(CLOCK_REALTIME, &tp);
return (long long)(tp.tv_nsec + (long long)tp.tv_sec * 1000000000ll);
#else
struct timeval tv;
gettimeofday(&tv, NULL);
return (long long)(tv.tv_usec * 1000 + (long long)tv.tv_sec * 1000000000ll);
#endif
}
/**
* Allocates memory for a matrix of size SIZE
* The memory is allocated row-major order, i.e.
* elements from the same row are allocated at contiguous
* memory addresses.
**/
void allocate_matrix(matrix* m)
{
int i;
cudaError_t rc;
// allocate array for all the rows
rc = cudaMallocManaged((void**)&(m->element), sizeof(float*) * size);
if (rc != cudaSuccess)
{
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(rc));
exit(1);
}
// allocate an array for each row of the matrix
for (i = 0; i < size; i++)
{
rc = cudaMallocManaged((void**)&(m->element[i]), sizeof(float) * size);
if (rc != cudaSuccess)
{
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(rc));
exit(1);
}
}
}
/**
* Free the memory allocated for a matrix.
**/
void free_matrix(matrix* m) {
int i;
for (i = 0; i < size; i++)
cudaFree(m->element[i]);
cudaFree(m->element);
}
/**
* Initializes the elements of the matrix with
* random values between 0 and 9
**/
void init_matrix(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
{
m.element[i][j] = rand() % 10;
}
}
/**
* Initializes the elements of the matrix with
* element 0.
**/
void init_matrix_zero(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
{
m.element[i][j] = 0.0;
}
}
/**
* Multiplies matrix @a with matrix @b storing
* the result in matrix @result
*
* The multiplication algorithm is the O(n^3)
* algorithm
*/
void mm(matrix a, matrix b, matrix result)
{
int i, j, k;
// Do the multiplication
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
for(k = 0; k < size; k++)
result.element[i][j] += a.element[i][k] * b.element[k][j];
}
/**
* Each kernel computes the result element (i,j).
*/
__global__ void mm_kernel(matrix a, matrix b, matrix result, int size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int row = threadIdx.y;
int col = threadIdx.x;
int k, m;
int kmax = BLOCK_SIZE + col;
int secondRow = row + BLOCK_SIZE;
int secondCol = col + BLOCK_SIZE;
if (i >= size || j >= size)
return;
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE * 2];
__shared__ float Bs[BLOCK_SIZE * 2][BLOCK_SIZE];
float C_res = 0.0f;
for (m = 0; m < size; m += BLOCK_SIZE) {
As[row][col] = a.element[j][m + threadIdx.x];
As[row][secondCol] = As[row][col];
Bs[row][col] = b.element[m + threadIdx.y][i];
Bs[secondRow][col] = Bs[row][col];
__syncthreads();
for(k = threadIdx.x; k < kmax; k++) {
C_res += As[row][k] * Bs[k][col];
}
__syncthreads();
}
result.element[j][i] = C_res;
}
void print_matrix(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
{
printf("row %4d: ", i);
for (j = 0; j < size; j++)
printf("%6.2f ", m.element[i][j]);
printf("\n");
}
}
void work()
{
matrix a, b, result1, result2;
long long before, after;
int correct, i, j, dim;
cudaError_t rc;
// Allocate memory for matrices
allocate_matrix(&a);
allocate_matrix(&b);
allocate_matrix(&result1);
allocate_matrix(&result2);
// Initialize matrix elements
init_matrix(a);
init_matrix(b);
// Perform sequential matrix multiplication
before = wall_clock_time();
mm(a, b, result1);
after = wall_clock_time();
fprintf(stderr, "Matrix multiplication on CPU took %1.2f seconds\n", ((float)(after - before))/1000000000);
// Perform CUDA matrix multiplication
dim3 block(32, 32); // a block of 32 x 32 CUDA threads
dim = (size % 32 == 0) ? size / 32 : size / 32 + 1;
dim3 grid(dim, dim); // a grid of CUDA thread blocks
before = wall_clock_time();
mm_kernel<<<grid, block>>>(a, b, result2, size);
cudaDeviceSynchronize();
after = wall_clock_time();
fprintf(stderr, "Matrix multiplication on GPU took %1.2f seconds\n", ((float)(after - before))/1000000000);
// was there any error?
rc = cudaGetLastError();
if (rc != cudaSuccess)
printf("Last CUDA error %s\n", cudaGetErrorString(rc));
// Compare the results
correct = 1;
for (i = 0; correct && i < size; i++)
for (j = 0; j < size; j++)
if (result1.element[i][j] != result2.element[i][j]) {
correct = 0;
break;
}
if (correct)
printf("The result matrices are identical!\n");
else
printf("Difference in result matrices at element (%d, %d)!\n", i, j);
free_matrix(&a);
free_matrix(&b);
free_matrix(&result1);
free_matrix(&result2);
}
int main(int argc, char ** argv)
{
srand(0);
printf("Usage: %s <size>\n", argv[0]);
if (argc >= 2)
size = atoi(argv[1]);
else
size = 1024;
fprintf(stderr,"Sequential matrix multiplication of size %d\n", size);
// Multiply the matrices
work();
return 0;
}
|
8,830 | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#define A1 0.31938153f
#define A2 -0.356563782f
#define A3 1.781477937f
#define A4 -1.821255978f
#define A5 1.330274429f
#define RSQRT2PI 0.3989422804f
__device__ float cndGPU(float d)
{
float
K = 1.0f / (1.0f + 0.2316419f * fabsf(d));
float
cnd = RSQRT2PI * expf(- 0.5f * d * d) *
(K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5)))));
if(d > 0)
cnd = 1.0f - cnd;
return cnd;
}
///////////////////////////////////////////////////////////////////////////////
// Black-Scholes formula for both call and put
///////////////////////////////////////////////////////////////////////////////
__device__ void BlackScholesBodyGPU
(
float& CallResult,
float& PutResult,
float S, //Stock price
float X, //Option strike
float T, //Option years
float R, //Riskless rate
float V //Volatility rate
)
{
float sqrtT, expRT;
float d1, d2, CNDD1, CNDD2;
sqrtT = sqrtf(T);
d1 = (logf(S / X) + (R + 0.5f * V * V) * T) / (V * sqrtT);
d2 = d1 - V * sqrtT;
CNDD1 = cndGPU(d1);
CNDD2 = cndGPU(d2);
//printf("%.15f,", CNDD1);
//Calculate Call and Put simultaneously
expRT = expf(- R * T);
CallResult = S * CNDD1 - X * expRT * CNDD2;
PutResult = X * expRT * (1.0f - CNDD2) - S * (1.0f - CNDD1);
}
////////////////////////////////////////////////////////////////////////////////
//Process an array of optN options on GPU
////////////////////////////////////////////////////////////////////////////////
__global__ void BlackScholesGPU(
float *d_CallResult,
float *d_PutResult,
float *d_StockPrice,
float *d_OptionStrike,
float *d_OptionYears,
float Riskfree,
float Volatility,
int optN
)
{
//Thread index
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Total number of threads in execution grid
const int THREAD_N = blockDim.x * gridDim.x;
//No matter how small is execution grid or how large OptN is,
//exactly OptN indices will be processed with perfect memory coalescing
for(int opt = tid; opt < optN; opt += THREAD_N)
BlackScholesBodyGPU(
d_CallResult[opt],
d_PutResult[opt],
d_StockPrice[opt],
d_OptionStrike[opt],
d_OptionYears[opt],
Riskfree,
Volatility
);
}
float RandFloat(float low, float high){
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
#define OPT_N 400000
const int NUM_ITERATIONS = 512;
const int OPT_SZ = OPT_N * sizeof(float);
const float RISKFREE = 0.02;
const float VOLATILITY = 0.30;
int main()
{
float * h_CallResultGPU = (float *)malloc(OPT_SZ);
float * h_PutResultGPU = (float *)malloc(OPT_SZ);
float * h_StockPrice = (float *)malloc(OPT_SZ);
float * h_OptionStrike = (float *)malloc(OPT_SZ);
float * h_OptionYears = (float *)malloc(OPT_SZ);
float
//Results calculated by GPU
*d_CallResult,
*d_PutResult,
//GPU instance of input data
*d_StockPrice,
*d_OptionStrike,
*d_OptionYears;
cudaMalloc((void **)&d_CallResult, OPT_SZ);
cudaMalloc((void **)&d_PutResult, OPT_SZ);
cudaMalloc((void **)&d_StockPrice, OPT_SZ);
cudaMalloc((void **)&d_OptionStrike, OPT_SZ);
cudaMalloc((void **)&d_OptionYears, OPT_SZ);
srand(5347);
//Generate options set
int i;
for(i = 0; i < OPT_N; i++)
{
h_CallResultGPU[i] = 0.0;
h_PutResultGPU[i] = -1.0;
h_StockPrice[i] = RandFloat(5.0f, 30.0f);
h_OptionStrike[i] = RandFloat(1.0f, 100.0f);
h_OptionYears[i] = RandFloat(0.25f, 10.0f);
}
cudaMemcpy(d_StockPrice, h_StockPrice, OPT_SZ, cudaMemcpyHostToDevice);
cudaMemcpy(d_OptionStrike, h_OptionStrike, OPT_SZ, cudaMemcpyHostToDevice);
cudaMemcpy(d_OptionYears, h_OptionYears, OPT_SZ, cudaMemcpyHostToDevice);
for(i = 0; i < NUM_ITERATIONS; i++){
BlackScholesGPU<<<256, 128>>>(
d_CallResult,
d_PutResult,
d_OptionStrike,
d_StockPrice,
d_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N
);
}
cudaMemcpy(h_CallResultGPU, d_CallResult, OPT_SZ, cudaMemcpyDeviceToHost);
cudaMemcpy(h_PutResultGPU, d_PutResult, OPT_SZ, cudaMemcpyDeviceToHost);
//for(i = 0; i < OPT_N; i++)
// printf("%.15f,", h_CallResultGPU[i]);
return 0;
} |
8,831 | #include <cstdio>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/iterator/constant_iterator.h>
#include <vector>
int main() {
std::vector<double> data;
double temp;
while (scanf("%lf", &temp) > 0) {
data.push_back(temp);
}
thrust::device_vector<double> gpu_data(data);
thrust::device_vector<double> var(data.size());
double mean = (double) thrust::reduce(gpu_data.begin(), gpu_data.end(), (double) 0, thrust::plus<double>()) / data.size();
thrust::transform(gpu_data.begin(), gpu_data.end(), thrust::constant_iterator<double>(mean), var.begin(), thrust::minus<double>());
thrust::transform(var.begin(), var.end(), var.begin(), var.begin(), thrust::multiplies<double>());
double variance = (double) thrust::reduce(var.begin(), var.end(), (double) 0, thrust::plus<double>()) / data.size();
printf("Mean: %lf Variance: %lf Size: %d\n", mean, variance, data.size());
} |
8,832 | #include <stdio.h>
__global__ void addBlockThread(int* data) {
int ind = blockDim.x * blockIdx.x + threadIdx.x;
int sum = blockIdx.x + threadIdx.x;
data[ind] = sum;
printf("%6d %6d %6d\n", blockIdx.x, threadIdx.x, sum);
}
int main() {
int num_blocks = 2;
int num_threads = 8;
dim3 dimGrid(num_blocks);
dim3 dimBlock(num_threads);
int num_ints = num_blocks * num_threads;
int hostArray[num_ints];
int* devArray;
// Allocate memory on the device. devArray is a pointer to the allocated
// memory.
cudaMalloc((void**)&devArray, sizeof(int) * num_ints);
// Invoke the device kernel which adds the block and thread indices
printf("\nValues written to the device array:\n");
printf("%6s %6s %6s\n", "Block", "Thread", "Sum");
addBlockThread <<<dimGrid, dimBlock>>> (devArray);
// Bring the results pointed to by devArray back to hostArray
cudaMemcpy(&hostArray, devArray, sizeof(int) * num_ints,
cudaMemcpyDeviceToHost);
// Print the results
printf("\nValues stored in the host array:\n");
for (int i = 0; i < num_ints; i++) printf("%d ", hostArray[i]);
printf("\n");
// Free the device memory
cudaFree(devArray);
return 0;
}
|
8,833 | #include <iostream>
#include <string>
#include <stdexcept>
#include <random>
#include <stdio.h>
#include <sys/time.h>
#include <cmath>
#define NUM_ARGS 2
#define add_particles(a,b) make_float3(a.x+b.x, a.y+b.y, a.z+b.z)
#define mul_particles(a,b) make_float3(a.x*b.x, a.y*b.y, a.z*b.z)
using std::stoi;
using std::cout;
using std::endl;
using std::exception;
using std::abs;
struct Particle {
float3 pos;
float3 vel;
Particle() {
pos = make_float3(0,0,0);
vel = make_float3(0,0,0);
}
Particle(float3 p, float3 v) : pos(p), vel(v) {}
};
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
__global__ void particle_update_gpu(Particle *particles, const int num_particles) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < num_particles) {
if(particles[i].pos.z > 0) {
particles[i].vel = add_particles(particles[i].vel, make_float3(0,0,-9.82));
} else {
particles[i].vel = add_particles(particles[i].vel, make_float3(0,0,9.82));
}
particles[i].vel = mul_particles(particles[i].vel, make_float3(0.99,0.99,0.99));
particles[i].pos = add_particles(particles[i].pos, particles[i].vel);
}
}
__host__ void particle_update_cpu(Particle *particles, const int num_particles) {
for(int i = 0; i < num_particles; i++) {
if(particles[i].pos.z > 0) {
particles[i].vel = add_particles(particles[i].vel, make_float3(0,0,-9.82));
} else {
particles[i].vel = add_particles(particles[i].vel, make_float3(0,0,9.82));
}
particles[i].vel = mul_particles(particles[i].vel, make_float3(0.99,0.99,0.99));
particles[i].pos = add_particles(particles[i].pos, particles[i].vel);
}
}
bool compare(Particle a, Particle b) {
float eps = 0.1;
if(abs(a.pos.x-b.pos.x) > eps) return false;
if(abs(a.pos.y-b.pos.y) > eps) return false;
if(abs(a.pos.z-b.pos.z) > eps) return false;
if(abs(a.vel.z-b.vel.z) > eps) return false;
if(abs(a.vel.z-b.vel.z) > eps) return false;
if(abs(a.vel.z-b.vel.z) > eps) return false;
return true;
}
bool compare(Particle *a, Particle *b, const int num_particles) {
for(int i = 0; i < num_particles; i++) {
if(compare(a[i],b[i]) == false) return false;
}
return true;
}
int main(int argc, char **argv) {
int num_particles;
int num_iterations;
int TPB;
/* Read command line arguments */
if(argc < NUM_ARGS + 1) {
cout << "Too few arguments" << endl;
return 0;
}
else {
try {
num_particles = stoi(argv[1]);
num_iterations = stoi(argv[2]);
TPB = stoi(argv[3]);
} catch(exception e) {
cout << "Invalid arguments. Proper usage is an integer number of particles and an integer number of iterations and an integer threads per block" << endl;
}
}
/* INITIALISE RNG */
std::random_device rd;
std::mt19937 engine(rd());
std::uniform_real_distribution<float> dist(-1000,1000);
/* MEMORY ALLOCATION AND INITIALISATION */
Particle *particles_cpu = new Particle[num_particles];
Particle *particles_gpu = new Particle[num_particles];
float x, y, z;
float vx, vy, vz;
for(int i = 0; i < num_particles; i++) {
x = dist(engine);
y = dist(engine);
z = dist(engine);
vx = dist(engine);
vy = dist(engine);
vz = dist(engine);
particles_cpu[i] = Particle(make_float3(x,y,z), make_float3(vx,vy,vz));
particles_gpu[i] = Particle(make_float3(x,y,z), make_float3(vx,vy,vz));
}
/* EXECUTION */
double time;
time = cpuSecond();
for(int i = 0; i < num_iterations; i++) {
particle_update_cpu(particles_cpu, num_particles);
}
time = cpuSecond() - time;
cout << "CPU TOOK " << time << " SECONDS" << endl;
// time = cpuSecond();
// Particle *device_particles;
// cudaMalloc(&device_particles, sizeof(Particle)*num_particles);
// cudaMemcpy(device_particles, particles_gpu, sizeof(Particle)*num_particles, cudaMemcpyHostToDevice);
// for(int i = 0; i < num_iterations; i++) {
// particle_update_gpu<<<(num_particles+TPB-1)/TPB, TPB>>>(device_particles, num_particles);
// }
// cudaMemcpy(particles_gpu, device_particles, sizeof(Particle)*num_particles, cudaMemcpyDeviceToHost);
// time = cpuSecond() - time;
// cout << "GPU TOOK " << time << " SECONDS" << endl;
// if(compare(particles_cpu, particles_gpu, num_particles)) {
// cout << "COMPARISON SUCCESSFUL" << endl;
// } else {
// cout << "COMPARISON FAILED" << endl;
// }
return 0;
}
|
8,834 | __device__ void CUDAprojection_simplexes_sort_quickSort_partition(int *pi, double *x, int low, int high, int t, int T)
{
/* pivot (Element to be placed at right position) */
double pivot = x[high*T + t];
int i = (low - 1); /* Index of smaller element */
double swap;
for(int j = low; j <= high - 1; j++)
{
// If current element is smaller than or
// equal to pivot
if(x[j*T + t] <= pivot)
{
i++; // increment index of smaller element
swap = x[i*T+t];
x[i*T+t] = x[j*T+t];
x[j*T+t] = swap;
}
}
swap = x[(i+1)*T+t];
x[(i+1)*T+t] = x[high*T+t];
x[high*T+t] = swap;
*pi = i + 1;
}
__device__ void CUDAprojection_simplexes_sort_quickSort(double *x, int low, int high, int t, int T)
{
if (low < high)
{
/* pi is partitioning index, arr[pi] is now
at right place */
int pi;
CUDAprojection_simplexes_sort_quickSort_partition(&pi, x, low, high, t,T);
CUDAprojection_simplexes_sort_quickSort(x, low, pi - 1, t,T); /* Before pi */
CUDAprojection_simplexes_sort_quickSort(x, pi + 1, high, t,T); /* After pi */
}
}
__device__ void device_sort_bubble(double *x_sorted, int t, int T, int K){
int i;
int m=K;
int mnew;
double swap;
while(m > 0){
/* Iterate through x */
mnew = 0;
for(i=1;i<m;i++){
/* Swap elements in wrong order */
if (x_sorted[i*T+t] < x_sorted[(i - 1)*T + t]){
swap = x_sorted[i*T + t];
x_sorted[i*T + t] = x_sorted[(i - 1)*T + t];
x_sorted[(i - 1)*T + t] = swap;
mnew = i;
}
}
m = mnew;
}
}
__global__ void CUDAprojection_simplexes( double *X,
double *Y,
int T, int K ) {
int t = blockIdx.x*blockDim.x + threadIdx.x;
int k;
if(t<T){
bool is_inside = true;
double sum = 0.0;
/* control inequality constraints */
for(k = 0; k < K; k++){ // TODO: could be performed parallely
if(X[k*T+t] < 0.0){
is_inside = false;
}
sum += X[k*T + t];
Y[k*T + t] = X[k*T + t];
}
/* control equality constraints */
if(sum != 1){
is_inside = false;
}
/* if given point is not inside the feasible domain, then do projection */
if(!is_inside){
int j,i;
/* compute sorted x_sub */
double sum_y;
// CUDAprojection_simplexes_sort_bubble(Y,t,T,K);
CUDAprojection_simplexes_sort_quickSort(Y, 0, K-1, t, T);
/* now perform analytical solution of projection problem */
double t_hat = 0.0;
i = K - 1;
double ti;
while(i >= 1){
/* compute sum(y) */
sum_y = 0.0;
for(j=i;j<K;j++){ /* sum(y(i,n-1)) */
sum_y += Y[j*T + t];
}
ti = (sum_y - 1.0)/(double)(K-i);
if(ti >= Y[(i-1)*T + t]){
t_hat = ti;
i = -1; /* break */
} else {
i = i - 1;
}
}
if(i == 0){
t_hat = (sum-1.0)/(double)K; /* uses sum=sum(x_sub) */
}
for(k = 0; k < K; k++){ // TODO: could be performed parallely
/* (*x_sub)(i) = max(*x_sub-t_hat,0); */
ti = X[k*T + t] - t_hat;
if(ti > 0.0){
X[k*T + t] = ti;
} else {
X[k*T + t] = 0.0;
}
}
}
}
/* if t >= T then relax and do nothing */
}
|
8,835 | #include <cstdlib>
#include <cstdio>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
bool allocate (void **ptr, size_t size){
// since cudaMalloc accepts double pointer, casting is required.
cudaError_t stat = cudaMalloc(ptr, size);
if (stat == cudaSuccess)
return true;
// if no success, print the error
std::cout << "allocation stat: " << stat << std::endl;
return false;
}
/*
enum cudaMemcpyKind:
cudaMemcpyHostToHost = 0
cudaMemcpyHostToDevice = 1
cudaMemcpyDeviceToHost = 2
cudaMemcpyDeviceToDevice = 3
cudaMemcpyDefault = 4
*/
bool copy(void *dst, void *src, size_t size, int cudaCpyKind){
cudaMemcpyKind dir;
switch (cudaCpyKind) {
case 0:
dir = cudaMemcpyHostToHost;
break;
case 1:
dir = cudaMemcpyHostToDevice;
break;
case 2:
dir = cudaMemcpyDeviceToHost;
break;
case 3:
dir = cudaMemcpyDeviceToDevice;
break;
default:
dir = cudaMemcpyHostToHost;
break;
}
cudaError_t stat = cudaMemcpy(dst, src, size, dir);
if (stat == cudaSuccess)
return true;
// if no success, print the error
std::cout << "copy stat: " << stat << std::endl;
return false;
}
void release(void *ptr){ cudaFree(ptr);}
|
8,836 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <curand_kernel.h>
#include <sys/time.h>
#define NX 19 //the number of internal nodes in the x direction
#define NY 19 //the number of internal nodes in the y direction
#define SIZE (NX+2)*(NY+2) //total number of nodes (including boundary nodes)
#define MIN_ITER 10000
__device__ double Tpre[SIZE]; //temperature of the previous timestep
__global__ void setup_kernel(curandState *state, double *dev_T)
{
int i, j;
int NThd_x;
int idx;
i = threadIdx.x + blockIdx.x*blockDim.x;
j = threadIdx.y + blockIdx.y*blockDim.y;
NThd_x = blockDim.x*gridDim.x;
idx = i + j*NThd_x;
curand_init(1234, idx, 0, &state[idx]);
for(i=0; i<SIZE; i++)
Tpre[i] = dev_T[i];
}
__global__ void kernel(double *T, double *para, curandState *state)
{
double LX = para[0]; //length in x direction, m
double LY = para[1]; //length in y direction, m
double k = para[2]; //conductivity, W(m-K)
double Q = para[3]; //internal heat source, W/m2
double rho = para[4]; //density of the material, kg/m3
double cp = para[5]; //specific heat capacity of the material, J/(kg-K)
double dt = para[6]; //time step, sec
int i; //index in the x direction
int j; //index in the y direction
int NThd_x; //number of threads in the x direction
int idx; //index of the temperature nodes in 1D array
double r; //random number
double Tsum = 0.0; //accumulated temperature
double Told = 0.0; //old value of the node's temperature
double TH[] = {0.0, 0.0, 0.0,
0.0}; //threshold value for random walking
double q = 0.0; //source term
double dx = LX/(NX+1); //spacial interval length in x direction, m
double dy = LY/(NY+1); //spacial interval length in y direction, m
int pos_x = -1; //current position of the node in the x direction
int pos_y = -1; //current position of the node in the y direction
int flag = 1; //flag for the outmost iteration
int iflag = 0; //flag for internal iteration
int sflag = 0; //flag for the stationary state
int count = 0; //number of iterations
double err = 0.0; //absolute error
double const EPS = 1.0E-3; //error tolerance
double a[4]; //temporary storage the coefficients
//
NThd_x = blockDim.x*gridDim.x;
j = threadIdx.x + blockIdx.x*blockDim.x;
i = threadIdx.y + blockIdx.y*blockDim.y;
idx = j + NThd_x*i;
//save the result of the previous timestep
Tpre[idx] = T[idx];
//
curandState localState = state[idx];
//
a[0] = rho*cp/dt;
a[1] = k/(dx*dx);
a[2] = k/(dy*dy);
a[3] = a[0]+2.0*a[1]+2.0*a[2];
TH[0] = a[1]/a[3]; //threshold for TW0
TH[1] = TH[0]+a[1]/a[3]; //threshold for TE0
TH[2] = TH[1]+a[2]/a[3]; //threshold for TS0
TH[3] = TH[2]+a[2]/a[3]; //threshold for TN0
q = Q/a[3]; //normalized source term
//
if(i>0 && i<(NY+1) && j>0 && j<(NX+1))
{
while(flag)
{
count++;
Told = T[idx];
Tsum = T[idx]*(count-1);
pos_x = j;
pos_y = i;
iflag = 0;
sflag = 0;
//
while(!iflag)
{
r = curand_uniform(&localState);
//
if(r<TH[0])
//move to west
pos_x--;
else if(r<TH[1])
//move to east
pos_x++;
else if(r<TH[2])
//move to south
pos_y--;
else if(r<TH[3])
//move to north
pos_y++;
else
{
pos_x += 0;
pos_y += 0;
sflag = 1;
}
//
Tsum += q;
if(sflag)
{
iflag = 1;
Tsum += Tpre[pos_x+NThd_x*pos_y];
}
else if(pos_x == 0 || pos_x == NX+1 || pos_y == 0 || pos_y == NY+1)
{
iflag = 1;
Tsum += T[pos_x+NThd_x*pos_y];
}
}
//
T[idx] = Tsum/count;
err = fabs(T[idx]-Told);
if(err<EPS && count>MIN_ITER)
{
flag = 0;
}
//state[idx] = localState;
}
//
state[idx] = localState;
}
}
int main()
{
double const TW = 200.0; //west boundary temperature, C
double const TE = 150.0; //east boundary temperature, C
double const TS = 100.0; //south boundary temperature, C
double const TN = 50.0; //north boundary temperature, C
double const LX = 0.20; //length in the x direction, m
double const LY = 0.15; //length in the y direction, m
double const k = 385.0; //conductivity, W(m-K)
double const Q = 0.0; //internal heat source, W/m2
double const rho = 8.96E3; //density, kg/m3
double const cp = 3.85E2; //specific capacity, J/(kg-K)
int const NStep = 100; //number of timestep
double cal_time = 0.0; //current time, sec
double end_time = 10.0; //end time
double dt = end_time/NStep; //time step, sec
int const blocksize = 1; //number of threads in each block
dim3 dimBlock(blocksize, blocksize);
dim3 dimGrid((NY+2+blocksize-1)/blocksize, (NX+2+blocksize-1)/blocksize);
int i, j, t;
double **Tfield; //The field of temperature (2D)
double *T; //linearized temperature stored in 1D array
double *dev_T;
double Tinit = 0.25*(TW+TE+TS+TN); //initial temperture, C
double x; //x-coordinate of node, m
double y; //y-coordinate of node, m
double dx = LX/(NX+1);
double dy = LY/(NY+1);
double para[]
= {LX, LY, k, Q,
rho, cp, dt}; //parameters for solving
double *dev_para;
int num_para = sizeof(para)/sizeof(double);
curandState *devStates;
time_t startTime, endTime;
struct timeval start, end;
FILE *fp; //file pointer
//
fp = fopen("Result.txt", "w");
if(fp == NULL)
{
printf("Fail to open the result.txt file!");
}
//
Tfield = (double**)malloc((NY+2)*sizeof(double*));
for(i=0; i<NY+2; i++)
Tfield[i] = (double*)malloc((NX+2)*sizeof(double));
T = (double*)malloc(SIZE*sizeof(double));
cudaMalloc((void**)&dev_T, SIZE*sizeof(double));
cudaMalloc((void**)&devStates, SIZE*sizeof(curandState));
cudaMalloc((void**)&dev_para, num_para*sizeof(double));
//
for(i=0; i<NY+2; i++)
{
for(j=0; j<NX+2; j++)
{
if(i == 0)
Tfield[i][j] = TS;
else if(i == NY+1)
Tfield[i][j] = TN;
else if(j == 0)
Tfield[i][j] = TW;
else if(j == NX+1)
Tfield[i][j] = TE;
else
Tfield[i][j] = Tinit;
//
T[j+(NX+2)*i] = Tfield[i][j];
}
}
//
cudaMemcpy(dev_T, T, SIZE*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_para, para, num_para*sizeof(double), cudaMemcpyHostToDevice);
//
setup_kernel<<<dimGrid,dimBlock>>>(devStates, dev_T);
//
startTime = time(NULL);
gettimeofday(&start, NULL);
//
for(t=0; t<NStep; t++)
{
cal_time += dt;
//
kernel<<<dimGrid, dimBlock>>>(dev_T, dev_para, devStates);
//
//cudaThreadSynchronize();
cudaDeviceSynchronize();
//
cudaMemcpy(T, dev_T, SIZE*sizeof(double), cudaMemcpyDeviceToHost);
//
fprintf(fp, "Results of time = %f:\n", cal_time);
for(i=0; i<NY+2; i++)
{
for(j=0; j<NX+2; j++)
{
x = j*dx;
y = i*dy;
Tfield[i][j] = T[j+(NX+2)*i];
fprintf(fp, "%f\t%f\t%f\n", x, y, Tfield[i][j]);
}
}
fprintf(fp, "\n\n");
}
//
endTime = time(NULL);
gettimeofday(&end, NULL);
printf("The calculation time is: %f seconds\n", difftime(endTime, startTime));
double timelapse = (end.tv_sec-start.tv_sec) + (end.tv_usec-start.tv_usec)/1.0E6;
printf("The time used for calculation is %f\n", timelapse);
//
cudaFree(dev_T);
cudaFree(dev_para);
fclose(fp);
//
return 0;
}
|
8,837 | // Harderthan, kheo1772@gmail.com
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
// kernelFunction_01
__global__ void kernel_01(void){
}
void kernelFunction_01(void){
kernel_01<<<1,1>>>();
}
// kernelFunction_02
__global__ void kernel_02(int a, int b, int *c){
*c = a + b;
}
void kernelFunction_02(void){
int c;
int *dev_c;
HANDLE_ERROR( cudaMalloc( (void**) &dev_c, sizeof(int) ) );
kernel_02<<<1,1>>>(2,7,dev_c);
HANDLE_ERROR( cudaMemcpy( &c, dev_c, sizeof(int), cudaMemcpyDeviceToHost) );
printf( "2 + 7 = %d \n", c);
cudaFree( dev_c );
return;
}
// kernelFunction_03
static int POINTS_HEIGHT = 32;
static int POINTS_WIDTH = 1;
static int POINTS_NUM = POINTS_WIDTH * POINTS_HEIGHT;
static int POINTS_STEP = 3;
__global__ void kernel_03(float *src_dev_points, float *dst_dev_points, int points_num, int step_size){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < points_num){
dst_dev_points[idx * step_size + 0] = src_dev_points[idx * step_size + 0] * 10;
dst_dev_points[idx * step_size + 1] = src_dev_points[idx * step_size + 1] * 10;
dst_dev_points[idx * step_size + 2] = src_dev_points[idx * step_size + 2] * 10;
}
}
void kernelFunction_03(){
float *src_points;
float *dst_points;
src_points = (float *) malloc(sizeof(float) * POINTS_NUM * POINTS_STEP);
dst_points = (float *) malloc(sizeof(float) * POINTS_NUM * POINTS_STEP);
for(int idx = 0; idx < POINTS_NUM; ++idx){
src_points[idx * POINTS_STEP + 0] = (float) rand();
src_points[idx * POINTS_STEP + 1] = (float) rand();
src_points[idx * POINTS_STEP + 2] = (float) rand();
}
{
float *src_dev_points;
float *dst_dev_points;
HANDLE_ERROR( cudaMalloc( (void**) &src_dev_points, sizeof(float) * POINTS_NUM * POINTS_STEP ) );
HANDLE_ERROR( cudaMalloc( (void**) &dst_dev_points, sizeof(float) * POINTS_NUM * POINTS_STEP ) );
HANDLE_ERROR( cudaMemcpy( src_dev_points, src_points, sizeof(float) * POINTS_NUM * POINTS_STEP, cudaMemcpyHostToDevice));
int thread_size = POINTS_HEIGHT;
int block_size = (POINTS_HEIGHT * POINTS_WIDTH + POINTS_HEIGHT - 1 ) / POINTS_HEIGHT;
kernel_03<<<block_size,thread_size>>>(src_dev_points, dst_dev_points, POINTS_NUM, POINTS_STEP);
HANDLE_ERROR( cudaMemcpy( dst_points, dst_dev_points, sizeof(float) * POINTS_NUM * POINTS_STEP, cudaMemcpyDeviceToHost));
for(int idx = 0; idx < POINTS_NUM; ++idx){
printf("id: %d\n", idx);
printf("%f", src_points[idx * POINTS_STEP + 0]);
printf(", %f", src_points[idx * POINTS_STEP + 1]);
printf(", %f \n", src_points[idx * POINTS_STEP + 2]);
printf("%f", dst_points[idx * POINTS_STEP + 0]);
printf(", %f", dst_points[idx * POINTS_STEP + 1]);
printf(", %f \n", dst_points[idx * POINTS_STEP + 2]);
}
cudaFree( src_dev_points );
cudaFree( dst_dev_points );
}
}
|
8,838 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <cuda.h>
/*********************************/
/** constants/define statements **/
/*********************************/
#define THREADS_PER_BLOCK 1024
#define MAX_BLOCKS 65535
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
#define BUFFER_CHAR 'A'
#define PAD_AMT 5
/**********************/
/** function headers **/
/**********************/
void usage(void);
int init_data(char** data, unsigned int num_chars);
int init_data_pad(char** data, unsigned int num_chars);
int read_data(char* data, char* file, unsigned int num_genomes,
unsigned int genome_len, unsigned int buffer_len,
unsigned int buffed_len, unsigned int vicinity);
void free_data(char* data, unsigned int num_genomes);
__global__ void readcmp(char* a, char* b, /*char* result,*/
unsigned long nthreads, unsigned int str_len,
unsigned int vicinity, unsigned int tstride,
char *reduce, unsigned int pop_thresh,
unsigned int threads_per_block,
unsigned int shift_amt);
__global__ void reduce(char *g_idata, char *g_odata, unsigned long nthreads,
unsigned int str_len, unsigned int pop_thresh,
unsigned int tstride);
void print_device_info(void);
unsigned int next_power_2(unsigned int v);
unsigned int log_2(unsigned int v);
/***************/
/** functions **/
/***************/
/*
* Function - gpuAssert
*
* Inputs:
* code - gpu error code
* file - current source file
* line - line within this file
* abort - if true, the prgram aborts
*
* Description:
* This function checks the cuda error code, and aborts if it is not a
* success.
*/
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n",cudaGetErrorString(code),file,line);
if (abort) exit(code);
}
}
/*
* Kernel - readcmp
*
* Inputs:
* a - a pointer to one read
* b - a pointer to the other read
* nthreads - the maximum number of threads used
* str_len - the length of the reads to be compared (this is a power of 2)
* vicinity - the bit-flip vicinity
* tstride - the read index stride distance for each thread
* pop_thresh - the pop count threshold
* thread_per_block - the number of threads per block
* shift_amt - the maximum shift amount between the reads
*
* Outputs:
* reduce - the array returned which contains 0 for "reads match" or 1 for
* "reads don't match"
*
* Description:
* This Kernel does two things: for each pair of reads, it compares the
* characters and stores the bit-op results. It then performas a sum
* reduction on each read comparison and checks if it is above the edit
* threshold.
*/
__global__ void readcmp(char *dev_test_array, char *a, char *b, /*char *result,*/ unsigned long nthreads, unsigned int str_len, unsigned int vicinity, unsigned int tstride, char *reduce, unsigned int pop_thresh, unsigned int threads_per_block, unsigned int shift_amt) {
// Set up shared memory
extern __shared__ char shared_data[];
char* sdata = (char *)shared_data;
char* result = (char *)&shared_data[threads_per_block];
// Find index of this thread
unsigned long x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned long y = threadIdx.y + blockIdx.y * blockDim.y;
unsigned long tid = x + y * blockDim.x * gridDim.x;
unsigned long i = threadIdx.x; //local block tid
int j, k;
while(tid < nthreads) {
//make the first xor comparison without shifting
result[i] = a[tid+PAD_AMT] ^ b[tid+PAD_AMT];
__syncthreads();
//check the vicinity for 100...01
if (result[i]!=0) {
for (j=1; j<vicinity; j++) {
if(result[i+j]!=0)
break;
}
if (result[i+j]!=0) {
for (k=1; k<j; k++) {
result[i+k]=0xff;
}
}
}
//make the remaining xor comparisons with shifting up until the limit
for(unsigned int cur_shift = 1; cur_shift <= shift_amt; cur_shift++) {
__syncthreads();
sdata[i] = a[tid+PAD_AMT] ^ b[tid+PAD_AMT-cur_shift]; //shift b left
__syncthreads();
//check vicinity
if (sdata[i] != 0) {
for (j=1; j<vicinity; j++) {
if(sdata[i+j]!=0)
break;
}
if (sdata[i+j]!=0) {
for (k=1; k<j; k++) {
sdata[i+k]=0xff;
}
}
}
//AND result
__syncthreads();
result[i] = result[i] && sdata[i];
sdata[i] = b[tid+PAD_AMT] ^ a[tid+PAD_AMT-cur_shift]; //shift a left
__syncthreads();
//check vicinity
if (sdata[i]!=0) {
for (j=1; j<vicinity; j++) {
if(sdata[i+j]!=0)
break;
}
if (sdata[i+j]!=0) {
for (k=1; k<j; k++) {
sdata[i+k]=0xff;
}
}
}
//AND result
__syncthreads();
result[i] = result[i] && sdata[i];
__syncthreads();
}
/////////////////////////////////////////////////////////////////////
// the code below is used for the sum reduce
/////////////////////////////////////////////////////////////////////
sdata[i] = result[i];
__syncthreads();
/*
* conservative reduction implemented by John Emmons Feb. 2014
* EX. if vicinity = 3 then 111100110111111 -> 100100110100101
*/
if(sdata[i] != 0 && (i == 0 || sdata[i-1] == 0)){
int m, n = i;
bool flag = true;
while(true){
for(m = 1; m < vicinity + 1; m++){
if(n + m < str_len) {
if(sdata[n + m] != 0){ continue; }
else if(m < 2){ flag = false; break; }
else{ break; }
}
else{
if(m < 2){ flag = false; break; }
else{ break; }
}
}
if(flag){
for(m -= 2; m > 0; m--)
sdata[n + m] = 0x00;
n += vicinity;
}
else{ break; }
}
}
__syncthreads();
// conservative reduction debugging
dev_test_array[i] = sdata[i];
__syncthreads();
// do reduction in shared mem
for(unsigned int s=str_len/2; s>0; s >>= 1){
if(i < s) {
sdata[i] += sdata[i + s];
}
__syncthreads();
}
__syncthreads();
// write result for this block to global mem
if(i%str_len == 0) {
reduce[tid/str_len] = (sdata[i]<=pop_thresh)?0:1;
}
///////////////////////////////////////////////////////////////////////
__syncthreads();
tid += tstride; //increase tid by thread stride amount
}
}
/*********************************/
/** THIS FUNCTION IS DEPRECATED **/
/*********************************/
__global__ void reduce(char *g_idata, char *g_odata, unsigned long nthreads, unsigned int str_len, unsigned int pop_thresh, unsigned int tstride) {
extern __shared__ char sdata[];
// each thread loads one element from global to shared mem
unsigned long x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned long y = threadIdx.y + blockIdx.y * blockDim.y;
unsigned long i = x + y * blockDim.x * gridDim.x; //global tid
unsigned int tid = threadIdx.x; //local block tid
while(i < nthreads) {
sdata[tid] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for(unsigned int s=str_len/2; s>0; s >>= 1){
if (tid<s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid%str_len == 0) {
g_odata[i/str_len] = (sdata[tid]<=pop_thresh)?1:0;
g_odata[i/str_len] = sdata[tid];
}
i += tstride;
}
}
/*
* Function - main
*
* Arguments:
* argc - the number of command line arguments
* argv - an array of the command line arguments
*
* Outputs:
* int - 0 if success, 1 if failure
*
* Description:
* This is the main function. It initializes memory. It reads in the
* files which contain the reads. It, then, launches the kernel on
* the GPU.
*/
int main(int argc, char *argv[]) {
/* check the number of command line arguments */
if(argc != 8) {
usage();
return 1;
}
/* get arguments */
char* file_1;
char* file_2;
unsigned int num_genomes, genome_len, buffed_len,
buffer_len, vicinity, errors, pop_thresh, shift_amt;
FILE *pop_count_file;
file_1 = argv[1]; //contains reads
file_2 = argv[2]; //contains reads
num_genomes = atoi(argv[3]); //the number of reads in each file
genome_len = atoi(argv[4]); //the length of each read
errors = atoi(argv[5]); //the number of edits allowed between two reads
vicinity = atoi(argv[6]); //the vicinity for bit flips
shift_amt = atoi(argv[7]); //the maximum shift amount when comparing reads
/* calculate important values */
pop_thresh = /*(vicinity-1)*(errors-1) +*/ errors; //popcount threshold (is simply the num of allowed errors with conservative reduction)
buffed_len = next_power_2(genome_len); //genome length + buffer space
buffer_len = buffed_len - genome_len; //difference bw genome len and buf len
unsigned long num_chars = num_genomes*buffed_len; //the total number of chars in every buffed read
/* initialize and allocate strings to compare */
char* genome_1_data; //first genome data
char* genome_2_data; //second genome data
char* reduce_data; //sum of "errors" in each string
if(init_data_pad(&genome_1_data, num_chars)) return 1;
if(init_data_pad(&genome_2_data, num_chars)) return 1;
if(init_data(&reduce_data, num_genomes)) return 1;
// conservative reduction debugging
char* test_array;
if(init_data(&test_array, num_chars+PAD_AMT)) return 1;
/* read in the data */
if(read_data(genome_1_data, file_1, num_genomes, genome_len, buffer_len, buffed_len, vicinity+1)) return 1;
if(read_data(genome_2_data, file_2, num_genomes, genome_len, buffer_len, buffed_len, vicinity+1)) return 1;
/* create timing events */
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
/* initialize and allocate memoer for GPU input and output arrays */
char *dev_genome_1_data;
char *dev_genome_2_data;
char *dev_reduce_data;
gpuErrchk( cudaMalloc((void**)&dev_genome_1_data, (num_chars+PAD_AMT)*sizeof(char)));
gpuErrchk( cudaMalloc((void**)&dev_genome_2_data, (num_chars+PAD_AMT)*sizeof(char)));
gpuErrchk( cudaMalloc((void**)&dev_reduce_data, num_genomes*sizeof(char) ));
// conservative reduction debugging
char *dev_test_array;
gpuErrchk( cudaMalloc((void**)&dev_test_array, (num_chars+PAD_AMT)*sizeof(char) ));
/******************/
/** START TIMING **/
/******************/
/*========================================================================*/
/* set start time */
cudaEventRecord(start, 0);
/* copy data to GPU */
gpuErrchk(cudaMemcpy( dev_genome_1_data, genome_1_data,
(num_chars+PAD_AMT)*sizeof(char), cudaMemcpyHostToDevice ));
gpuErrchk(cudaMemcpy( dev_genome_2_data, genome_2_data,
(num_chars+PAD_AMT)*sizeof(char), cudaMemcpyHostToDevice ));
// conservative reduction debugging
gpuErrchk(cudaMemcpy( dev_test_array, test_array,
(num_chars+PAD_AMT)*sizeof(char), cudaMemcpyHostToDevice ));
/* figure out thread count and dimensions for GPU */
unsigned int num_blocks_x = 128;
unsigned int num_blocks_y = 128;
unsigned int threads_per_block = buffed_len;
unsigned int tstride = threads_per_block*num_blocks_x*num_blocks_y;
dim3 grid_size(num_blocks_x, num_blocks_y, 1);
unsigned int log_len = log_2(buffed_len); //TODO: ALL OF THIS SHOULD PROBABLY BE MOVED ABOVE THE BEGINNING OF TIMING
/* create and run GPU threads */
readcmp<<<grid_size,threads_per_block,2*threads_per_block>>>(dev_test_array, dev_genome_1_data,
dev_genome_2_data,/* dev_result_data,*/ num_chars, buffed_len, vicinity,
tstride, dev_reduce_data, pop_thresh, threads_per_block, shift_amt);
gpuErrchk(cudaThreadSynchronize());
/* write the results back */
gpuErrchk(cudaMemcpy( reduce_data, dev_reduce_data,
num_genomes*sizeof(char), cudaMemcpyDeviceToHost ));
// conservative reduction debugging
gpuErrchk(cudaMemcpy( test_array, dev_test_array,
(num_chars + PAD_AMT) *sizeof(char), cudaMemcpyDeviceToHost ));
/*========================================================================*/
/****************/
/** END TIMING **/
/****************/
/* set stop time */
cudaEventRecord(stop,0);
cudaEventSynchronize( stop );
/* calculate elapsed time for GPU computation */
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Time to complete comparison %1.4f ms\n", elapsedTime);
// Writing output pop count to file
// for data collection purposes
unsigned int matches=0;
for (unsigned int q=0; q<num_genomes; q++) {
if (reduce_data[q]==0)
matches++;
}
// conservative reduction debugging
int j = 0;
int bad_strings[num_genomes];
for(int i=0; i < num_genomes; i++)
bad_strings[i] = 0;
// find the bad strings
for(int i=0; i < num_genomes; i++){
if (reduce_data[i]!=0){
bad_strings[j] = i;
j++;
}
}
for(int i=0; i < num_genomes; i++){
if(bad_strings[i] != 0){
printf("genome number %d is a false negative\n", bad_strings[i]);
//printf("Original genome:\n");
for(int k=0; k < genome_len; k++){
printf("%c", genome_1_data[k + bad_strings[i]]);
}
printf("\n");
//printf("Edited genome:\n");
for(int k=0; k < genome_len; k++){
printf("%c", genome_2_data[k + bad_strings[i]]);
}
printf("\n");
}
}
// for(unsigned int i=0; i < num_chars + PAD_AMT; i++){
// printf("the test_array: %i\n", test_array[i]);
// }
pop_count_file = fopen("pop_output.txt","w");
fprintf(pop_count_file, "%d %d\n", matches, num_genomes-matches);
fclose(pop_count_file);
/* free and destroy all allocated information */
cudaFree(dev_genome_1_data);
cudaFree(dev_genome_2_data);
cudaFree(dev_reduce_data);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFreeHost(genome_1_data);
cudaFreeHost(genome_2_data);
cudaFreeHost(reduce_data);
}
/*
* Function - usage
*
* Description:
* Just prints the usage invariant for this program.
*/
void usage(void) {
printf("\nUsage:\n");
printf("\t./a.out <file_1> <file_2> <num_genomes> <genome_len> <errors> <vicinity> <adj_errs>\n\n");
}
/*
* Function - init_data
*
* Arguments:
* data - the array in which to place data
* num_chars - the number of chars to allocate
*
* Outputs:
* int - 0 if success, 1 if failure
*
* Description:
* This function initializes a data array. Pretty simple to follow.
*/
int init_data(char** data, unsigned int num_chars) {
/* allocate pointers for the genome strings */
cudaHostAlloc((void**)data,num_chars*sizeof(char),cudaHostAllocDefault);
if(NULL == *data) {
printf("init_data - malloc failed\n");
return 1;
}
return 0; //SUCCESS
}
/*
* Function - init_data_pad
*
* Arguments:
* data - the array in which to place data
* num_chars - the number of chars to allocate
*
* Outputs:
* int - 0 if success, 1 if failure
*
* Description:
* This function initializes a data array. Pretty simple to follow. It's
* the same as the above function except that it adds the PAD_AMT to
* to it.
*/
int init_data_pad(char** data, unsigned int num_chars) {
/* allocate pointers for the genome strings */
cudaHostAlloc((void**)data,(num_chars+PAD_AMT)*sizeof(char),cudaHostAllocDefault);
if(NULL == *data) {
printf("init_data - malloc failed\n");
return 1;
}
return 0; //SUCCESS
}
/*
* Function - read_data
*
* Arguments:
* data - the char* to which the data will be written
* file - the filae that contains the genomes we care about
* num_genomes - the numebr of genomed to read
* genome_len - the length of the genomes
* buffer_len - the length of the buffer at the end of each genome
* buffed_len - length of genome + buffer
*
* Outputs:
* int - 0 if success, 1 if failure
*
* Description:
* This function reads in all of the genome data from the given genome
* file. Each line contains a genome read, and this is read into each
* string.
*/
int read_data(char* data, char* file, unsigned int num_genomes,
unsigned int genome_len, unsigned int buffer_len,
unsigned int buffed_len,unsigned int vicinity) {
/* basic info and variables */
FILE* ifp; //ifp: "in file pointer"
char* mode = "r";
/* open the file */
ifp = fopen(file, mode);
if(NULL == ifp) {
printf("Can't open input file %s!\n", file);
return 1;
}
/* create read in buffer */
char* buf = (char*)malloc((genome_len+2)*sizeof(char));
/* initialize the padding at beginning of array */
for(int i = 0; i < PAD_AMT; i++) {
*(data + i) = BUFFER_CHAR;
}
int limit_len;
/* calculate the limit to which we will read data */
if (buffer_len < vicinity)
limit_len = buffed_len - vicinity;
else
limit_len = genome_len;
/* read in the file */
for(int i = 0; i < num_genomes; i++) {
if(NULL != fgets(buf, genome_len + 2, ifp)) {
for(int j = 0; j < limit_len; j++) {
*(data + PAD_AMT + i*buffed_len + j) = buf[j];
}
for(int j = 0; j < buffed_len-limit_len; j++) {
*(data + PAD_AMT + i*buffed_len + limit_len + j) = BUFFER_CHAR;
}
}
else {
printf("Failed to read from the file\n");
return 1;
}
}
/* close the file */
fclose(ifp);
free(buf);
return 0; //SUCCESS
}
/*
* Function - print_device_info
*
* Description:
* Prints valuable information out regarding the CUDA-capable devices
* in this system.
*/
void print_device_info(void) {
cudaDeviceProp prop;
int count;
cudaGetDeviceCount( &count );
for (int i=0; i< count; i++) {
cudaGetDeviceProperties( &prop, i );
printf( " --- General Information for device %d ---\n", i );
printf( "Name: %s\n", prop.name );
printf( "Compute capability: %d.%d\n", prop.major, prop.minor );
printf( "Clock rate: %d\n", prop.clockRate );
printf( "Device copy overlap: " );
if (prop.deviceOverlap)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( "Kernel execition timeout : " );
if (prop.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( " --- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
printf( " --- MP Information for device %d ---\n", i );
printf( "Multiprocessor count: %d\n", prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n", prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1],
prop.maxThreadsDim[2] );
printf( "Max grid dimensions: (%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1],
prop.maxGridSize[2] );
printf( "\n" );
}
}
/*
* Function - next_power_2
*
* Arguments:
* v - the value for which we want to find the next power of 2
*
* Outputs:
* unsigned int - the next power of 2 greater than v
*
* Description:
* This code basically rounds v up to the next highest power of 2. So if
* v was 2, this function would return 2. If v was 15, this function would
* return 16. Etcetera.
*
* Source:
* http://graphics.stanford.edu/~seander/bithacks.html
*/
unsigned int next_power_2(unsigned int v) {
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v++;
return v;
}
unsigned int log_2(unsigned int v) {
unsigned int r=0;
while (v >>= 1) // unroll for more speed...
{
r++;
}
return r;
}
|
8,839 | #include "includes.h"
#define N (2048*2048)
#define N_THREADS_PER_BLOCK 512
// Adapt vector addition to use both blocks and threads
__global__ void addByCombine(int *a, int *b, int *c)
{
// use the built-in variable blockDim.x for threads per block
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
} |
8,840 | #include <cuda_runtime.h>
#include <stdio.h>
#include <iostream>
using namespace std;
/* Mirror operations */
__global__
void mirror(uchar4* inputChannel, uchar4* outputChannel, int numRows, int numCols, bool vertical)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int stripe = blockDim.x * gridDim.x;
for(int i=col; i<numRows*numCols; i=i+stripe)
{
unsigned char Y = 0.299 * inputChannel[i].x + 0.587 * inputChannel[i].y + 0.114 * inputChannel[i].z;
if(vertical)
outputChannel[i/numCols*numCols+(numCols-i%numCols)-1] = make_uchar4(Y, Y, Y, 255);
else
outputChannel[(numRows- (i/numCols) -1)*numCols +(i%numCols)] = make_uchar4(Y, Y, Y, 255);
}
}
uchar4* mirror_ops(uchar4 *d_inputImageRGBA, size_t numRows, size_t numCols, bool vertical)
{
//Creat Timing Event
cudaEvent_t start, stop;
cudaEventCreate (&start);
cudaEventCreate (&stop);
//Set reasonable block size (i.e., number of threads per block)
dim3 blockSize(9);
//Calculate Grid SIze
dim3 gridSize(6);
//Calculate number of pixels
size_t numPixels = numRows * numCols;
//Allocate Memory Space on Device for output image
uchar4 *d_outputImageRGBA;
cudaMalloc(&d_outputImageRGBA, sizeof(uchar4) * numPixels);
//start Timer
cudaEventRecord(start, 0);
//Call mirror kernel.
mirror<<<gridSize, blockSize>>>(d_inputImageRGBA, d_outputImageRGBA, numRows, numCols, vertical);
//Stop Timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaDeviceSynchronize();
//Initialize memory on host for output uchar4*
uchar4* h_out;
h_out = (uchar4*)malloc(sizeof(uchar4) * numPixels);
//Copy output from device to host
cudaMemcpy(h_out, d_outputImageRGBA, sizeof(uchar4) * numPixels, cudaMemcpyDeviceToHost);
//Cleanup memory on device
cudaFree(d_inputImageRGBA);
cudaFree(d_outputImageRGBA);
//Calculate Elapsed Time
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("GPU time = %5.2f ms\n", elapsedTime);
//return h_out
return h_out;
}
|
8,841 | #include <stdio.h>
#include <stdlib.h>
#define N 20
#define BLOCK_DIM 20
double mat_a[N][N]; //matriz A
double mat_b[N][N]; //matrz B
double mat_result[N][N]; //matriz C
//Contadores de los loops for
int i,j,m;
//Flag para imprimir los resultados
int flag;
__global__ void multiplica(double *A, double *B, double *C, int dim) {
//índices de los hilos
int columna = threadIdx.x + blockDim.x * blockIdx.x;
int renglon = threadIdx.y + blockDim.y * blockIdx.y;
//multiplicación
int k;
double suma = 0;
if(columna < dim && renglon < dim){
for(k = 0; k< dim; k++)
suma = suma + A[renglon*dim + k]*B[k*dim + columna];
C[renglon*dim + columna] = suma;
}
}
void inicializa_matrices();
void imprime_matrices();
int main(int argc, char *argv[]){
//Inicializa matrices A y B
inicializa_matrices();
//Se imprimen resultados?
flag = atoi(argv[1]);
//Variables utilizadas por el device
int size = N*N*sizeof(double);
double *pA, *pB, *pC;
//Memory allocation en el device
cudaMalloc((void**)&pA, size);
cudaMalloc((void**)&pB, size);
cudaMalloc((void**)&pC, size);
//Se copian las matrices del host al device
cudaMemcpy(pA, mat_a, size, cudaMemcpyHostToDevice);
cudaMemcpy(pB, mat_b, size, cudaMemcpyHostToDevice);
dim3 dimBlock(N,N);
dim3 dimGrid(1,1);
multiplica<<<dimGrid,dimBlock>>>(pA,pB,pC,N);
cudaMemcpy(mat_result, pC, size, cudaMemcpyDeviceToHost);
if (flag !=0){
imprime_matrices();
}
cudaFree(pA);
cudaFree(pB);
cudaFree(pC);
return 0;
}
void inicializa_matrices()
{
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
mat_a[i][j] = i + j;
}
}
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
mat_b[i][j] = i*j;
}
}
}
void imprime_matrices()
{ printf("Matriz A \n");
for (i = 0; i < N; i++) {
printf("\n");
for (j = 0; j < N; j++)
printf("%8.2f ", mat_a[i][j]);
}
printf("\n\n\n");
printf("Matriz B \n");
for (i = 0; i < N; i++) {
printf("\n");
for (j = 0; j < N; j++)
printf("%8.2f ", mat_b[i][j]);
}
printf("\n\n\n");
printf("Matriz C = A * B\n");
for (i = 0; i < N; i++) {
printf("\n");
for (j = 0; j < N; j++)
printf("%8.2f ", mat_result[i][j]);
}
printf("\n\n");
}
|
8,842 | //pass
//--blockDim=32 --gridDim=1
#include <cuda.h>
__global__ void test_Prog(int *A, int N) {
const int tid = threadIdx.x;
for (int i = 0; i < N; ++i)
{
int tmp=A[tid+1];
A[tid]=tmp;
}
}
|
8,843 | #include <iostream>
#include <stdio.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <device_launch_parameters.h>
using namespace std;
__global__
void matrixAddKernel(float* A, float* B, float* C, int n){
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < n*n) C[i] = A[i] + B[i];
}
__global__
void matrixAddKernel2(float* A, float* B, float* C, int n){
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < n){
for (int j = i * n; j < i * n + n; j++)
C[j] = A[j] + B[j];
}
}
__global__
void matrixAddKernel3(float* A, float* B, float* C, int n){
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < n)
for (int j = i; j < n*n; j += n)
C[j] = A[j] + B[j];
}
void matrixAdd(float* A, float* B, float* C, int n) {
int size = n * n * sizeof(float);
float *d_A, *d_B, *d_C;
cudaMalloc((void **)&d_A, size);
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_B, size);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_C, size);
matrixAddKernel <<< ceil((n*n) / 256.0), 256 >>> (d_A, d_B, d_C, n);
cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost);
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
}
int main() {
int n;
float *B;
float *A;float *C;
n=100;
A = (float*)malloc(n*n*sizeof(float));
B = (float*)malloc(n*sizeof(float));
C = (float*)malloc(n*sizeof(float));
for (int i = 0; i < n*n; i++)
{
A[i] = 1;
B[i] = 2;
}
matrixAdd(A, B, C, n);
}
|
8,844 | #include <math.h>
#include <stdio.h>
#include "cuda_profiler_api.h"
#include "volatility_inversion.cuh"
#ifndef PI
#define PI 3.141592653589793238462643f
#endif
const int serial_threads_per_block = 448;
const int parallel_guesses_per_option = 4;
const int parallel_threads_per_block = 192;
const int vega_parallel_guesses_per_option = 4;
const int vega_parallel_threads_per_block = 192;
#define HANDLE_CUDA_ERROR(err) if (err) { printf("%s", cudaGetErrorString(err)); return; }
__device__ __host__ float normal_pdf(const float z)
{
return (1.0f / sqrt(2.0f * PI)) * exp(-0.5f * z);
}
__device__ __host__ float normal_cdf(const float z)
{
const float b1 = 0.31938153f;
const float b2 = -0.356563782f;
const float b3 = 1.781477937f;
const float b4 = -1.821255978f;
const float b5 = 1.330274429f;
const float p = 0.2316419f;
const float c2 = 0.3989423f;
if (z > 6.0f)
{
return 1.0f;
}
if (z < -6.0f)
{
return 0.0f;
}
const float a = abs(z);
const float t = 1.0f / (1.0f + a * p);
const float b = c2 * exp((-z) * (z / 2.0f));
float n = ((((b5 * t + b4) * t + b3) * t + b2) * t + b1) * t;
n = 1.0f - b * n;
if ( z < 0.0f )
{
n = 1.0f - n;
}
return n;
}
__device__ __host__ float call_price(const float s, const float r, const float v, const float t, const float k)
{
const float sqrt_t = sqrt(t);
const float d1 = (1.0f / (v * sqrt_t)) * (log(s / k) + (r + v * v * 0.5f) * t);
const float d2 = d1 - v * sqrt_t;
return (s * normal_cdf(d1)) - (k * exp(-r * t) * normal_cdf(d2));
}
__device__ __host__ float call_vega(const float s, const float r, const float v, const float t, const float k)
{
const float sqrt_t = sqrt(t);
const float d1 = (1.0f / (v * sqrt_t)) * (log(s / k) + (r + v * v * 0.5f) * t);
return s * normal_pdf(d1) * sqrt_t;
}
__device__ __host__ float put_price(const float s, const float r, const float v, const float t, const float k)
{
const float sqrt_t = sqrt(t);
const float d1 = (1.0f / (v * sqrt_t)) * (log(s / k) + (r + v * v * 0.5f) * t);
const float d2 = d1 - v * sqrt_t;
return (k * exp(-r * t) * normal_cdf(-d2)) - (s * normal_cdf(-d1));
}
__device__ __host__ float put_vega(const float s, const float r, const float v, const float t, const float k)
{
return call_vega(s, r, v, t, k);
}
__global__ void volatility_inversion_device(const float s, const float r, float *v, const float *t, const float *k,
const float *p, const float tol, const int iter, const int num)
{
const int option = (blockIdx.x * blockDim.x) + threadIdx.x;
if (option >= num)
{
return;
}
float v_local = v[option];
float error;
int i = 0;
do
{
const float price = call_price(s, r, v_local, t[option], k[option]);
const float vega = call_vega(s, r, v_local, t[option], k[option]);
error = (p[option] - price);
v_local += error / vega;
} while ((abs(error) > tol) && (i++ < iter));
v[option] = v_local;
}
template<int GUESSES, int BLOCK_SIZE>
__device__ int find_best_guess(const float *const ferr_ladder, const int tid, const int lower_guess)
{
__shared__ int smallest_idx[BLOCK_SIZE];
__shared__ float smallest_error[BLOCK_SIZE];
/* Find minimum error */
smallest_idx[tid] = tid;
smallest_error[tid] = ferr_ladder[tid];
if ((GUESSES > 16) && (smallest_error[tid + 16] < smallest_error[tid]))
{
smallest_error[tid] = smallest_error[tid + 16];
smallest_idx[tid] = tid + 16;
}
if ((GUESSES > 8) && (smallest_error[tid + 8] < smallest_error[tid]))
{
smallest_error[tid] = smallest_error[tid + 8];
smallest_idx[tid] = tid + 8;
}
if ((GUESSES > 4) && (smallest_error[tid + 4] < smallest_error[tid]))
{
smallest_error[tid] = smallest_error[tid + 4];
smallest_idx[tid] = tid + 4;
}
if ((GUESSES > 2) && (smallest_error[tid + 2] < smallest_error[tid]))
{
smallest_error[tid] = smallest_error[tid + 2];
smallest_idx[tid] = tid + 2;
}
if (smallest_error[tid + 1] < smallest_error[tid])
{
smallest_error[tid] = smallest_error[tid + 1];
smallest_idx[tid] = tid + 1;
}
return smallest_idx[lower_guess];
}
template<int GUESSES>
__global__ void parallel_volatility_inversion_device(const float s, const float r, float *v, const float *t, const float *k,
const float *p, const float tol, const int iter)
{
const int tid = threadIdx.x;
const int guess = tid & (GUESSES - 1);
const int lower_guess = tid & ~(GUESSES - 1);
const int upper_guess = lower_guess + GUESSES - 1;
const int log_guesses = 31 - __clz(GUESSES);
const int option = (blockIdx.x * (parallel_threads_per_block >> log_guesses)) + (tid >> log_guesses);
/* Build guesses */
__shared__ float v_ladder[parallel_threads_per_block];
__shared__ float err_ladder[parallel_threads_per_block];
__shared__ float ferr_ladder[parallel_threads_per_block];
float ladder_span = 0.04f; /* Span the guesses over 4% */
float v_min = v[option] - (ladder_span * 0.5f);
int i = 0;
const float guess_fraction = guess / static_cast<float>(GUESSES - 1);
do
{
/* Work out guess */
v_ladder[tid] = v_min + (ladder_span * guess_fraction);
/* Price */
err_ladder[tid] = call_price(s, r, v_ladder[tid], t[option], k[option]) - p[option];
ferr_ladder[tid] = abs(err_ladder[tid]);
/* Find minimum error */
const int min_err_pos = find_best_guess<GUESSES, parallel_threads_per_block>(ferr_ladder, tid, lower_guess);
if (ferr_ladder[min_err_pos] < tol)
{
if (tid == lower_guess)
{
v[option] = v_ladder[min_err_pos];
}
break;
}
/* Pick the span for the next ladder */
/* Doesnt matter if v_min is actually higher than v_max so long as 0 is crossed */
if ((err_ladder[lower_guess] * err_ladder[upper_guess]) >= 0.0f) /* Root not found (or two roots found) */
{
ladder_span *= 2.0f;
if (abs(err_ladder[lower_guess] - err_ladder[upper_guess]) < tol) /* Ladder is very flat so no direction */
{
v_min -= ladder_span * 0.5f;
ladder_span *= 2.0f;
}
else if (ferr_ladder[lower_guess] < ferr_ladder[upper_guess]) /* Lower end is closer to root so expand it */
{
v_min -= ladder_span;
}
else /* Upper end is closer to root so expand it */
{
v_min = v_ladder[upper_guess];
}
}
else if (min_err_pos == lower_guess) /* Root found at lower extreme of ladder */
{
if ((err_ladder[lower_guess] * err_ladder[lower_guess + 1]) >= 0.0f)
{
ladder_span *= 2.0f;
v_min = (v_ladder[min_err_pos] - ladder_span);
}
else
{
ladder_span *= 0.5f;
v_min = v_ladder[min_err_pos];
}
}
else if (min_err_pos == upper_guess) /* Root found at upper extreme of ladder */
{
if ((err_ladder[upper_guess] * err_ladder[upper_guess - 1]) >= 0.0f)
{
ladder_span *= 2.0f;
v_min = v_ladder[min_err_pos];
}
else
{
ladder_span *= 0.5f;
v_min = v_ladder[upper_guess - 1];
}
}
else if ((err_ladder[min_err_pos] * err_ladder[min_err_pos - 1]) < 0.0f) /* Root in bin below min error */
{
ladder_span *= 1.0f / GUESSES;
v_min = v_ladder[min_err_pos - 1];
}
else /* Root in bin above min error */
{
ladder_span *= 1.0f / GUESSES;
v_min = v_ladder[min_err_pos];
}
} while (i++ < iter);
}
template<int GUESSES>
__global__ void vega_guided_parallel_volatility_inversion_device(const float s, const float r, float *v, const float *t, const float *k,
const float *p, const float tol, const int iter)
{
const int tid = threadIdx.x;
const int guess = tid & (GUESSES - 1);
const int lower_guess = tid & ~(GUESSES - 1);
const int upper_guess = lower_guess + GUESSES - 1;
const int log_guesses = 31 - __clz(GUESSES);
const int option = (blockIdx.x * (parallel_threads_per_block >> log_guesses)) + (tid >> log_guesses);
/* Build guesses */
__shared__ float v_ladder[vega_parallel_threads_per_block];
__shared__ float vega_ladder[vega_parallel_threads_per_block];
__shared__ float err_ladder[vega_parallel_threads_per_block];
__shared__ float ferr_ladder[vega_parallel_threads_per_block];
float ladder_span = 0.04f; /* Span the guesses over 4% */
float v_mid = v[option];
int i = 0;
int min_err_pos;
const float guess_fraction = guess / static_cast<float>(GUESSES);
do
{
/* Work out guess */
v_ladder[tid] = v_mid + (ladder_span * guess_fraction) - (ladder_span * 0.5f) + (ladder_span / static_cast<float>(GUESSES)) * 0.5f;
/* Price */
err_ladder[tid] = p[option] - call_price(s, r, v_ladder[tid], t[option], k[option]);
vega_ladder[tid] = call_vega(s, r, v_ladder[tid], t[option], k[option]);
ferr_ladder[tid] = abs(err_ladder[tid]);
/* Find minimum error */
min_err_pos = find_best_guess<GUESSES, vega_parallel_threads_per_block>(ferr_ladder, tid, lower_guess);
/* Pick the span for the next ladder */
v_mid = v_ladder[min_err_pos] + (err_ladder[min_err_pos] / vega_ladder[min_err_pos]);
if ((v_mid < v_ladder[upper_guess]) && (v_mid > v_ladder[lower_guess]))
{
ladder_span *= (1.0f / 1024.0f);
}
else
{
ladder_span *= 2.0f;
}
} while ((ferr_ladder[min_err_pos] > tol) && (i++ < iter));
if (tid == lower_guess)
{
v[option] = v_ladder[min_err_pos];
}
}
void volatility_inversion(const float s, const float r, float *v, const float *t, const float *k,
const float *p, const float tol, const int iter, const int num, const kernel_t kernel)
{
/* Start the profiler */
//cudaProfilerStart();
/* Get device */
HANDLE_CUDA_ERROR(cudaSetDevice(0));
/* Allocate and copy memory */
float *dev_v;
HANDLE_CUDA_ERROR(cudaMalloc((void **)&dev_v, num * sizeof(float)));
HANDLE_CUDA_ERROR(cudaMemcpy(dev_v, v, num * sizeof(float), cudaMemcpyHostToDevice));
float *dev_t;
HANDLE_CUDA_ERROR(cudaMalloc((void **)&dev_t, num * sizeof(float)));
HANDLE_CUDA_ERROR(cudaMemcpy(dev_t, t, num * sizeof(float), cudaMemcpyHostToDevice));
float *dev_k;
HANDLE_CUDA_ERROR(cudaMalloc((void **)&dev_k, num * sizeof(float)));
HANDLE_CUDA_ERROR(cudaMemcpy(dev_k, k, num * sizeof(float), cudaMemcpyHostToDevice));
float *dev_p;
HANDLE_CUDA_ERROR(cudaMalloc((void **)&dev_p, num * sizeof(float)));
HANDLE_CUDA_ERROR(cudaMemcpy(dev_p, p, num * sizeof(float), cudaMemcpyHostToDevice));
/* Run kernels */
const int serial_threads = min(num, serial_threads_per_block);
const int serial_blocks = (int)ceil(num / static_cast<float>(serial_threads));
const int parallel_threads = parallel_threads_per_block;
const int parallel_blocks = static_cast<int>(ceil((parallel_guesses_per_option * num) / static_cast<float>(parallel_threads_per_block)));
const int vega_parallel_threads = vega_parallel_threads_per_block;
const int vega_parallel_blocks = static_cast<int>(ceil((vega_parallel_guesses_per_option * num) / static_cast<float>(vega_parallel_threads_per_block)));
switch (kernel)
{
case serial :
volatility_inversion_device<<<serial_blocks, serial_threads>>>(s, r, dev_v, dev_t, dev_k, dev_p, tol, iter, num);
break;
case parallel :
parallel_volatility_inversion_device<parallel_guesses_per_option><<<parallel_blocks, parallel_threads>>>(s, r, dev_v, dev_t, dev_k, dev_p, tol, iter);
break;
case vega_parallel :
vega_guided_parallel_volatility_inversion_device<vega_parallel_guesses_per_option><<<vega_parallel_blocks, vega_parallel_threads>>>(s, r, dev_v, dev_t, dev_k, dev_p, tol, iter);
break;
}
/* Copy data back */
HANDLE_CUDA_ERROR(cudaDeviceSynchronize());
HANDLE_CUDA_ERROR(cudaMemcpy(v, dev_v, num * sizeof(float), cudaMemcpyDeviceToHost));
/* Clean up */
HANDLE_CUDA_ERROR(cudaFree(dev_v));
HANDLE_CUDA_ERROR(cudaFree(dev_t));
HANDLE_CUDA_ERROR(cudaFree(dev_k));
HANDLE_CUDA_ERROR(cudaFree(dev_p));
/* Flush profiling info */
HANDLE_CUDA_ERROR(cudaDeviceReset());
/* Stop the profiler */
//cudaProfilerStop();
} |
8,845 | // ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2017, September 11 - October 9
// ###
#include <cuda_runtime.h>
#include <iostream>
using namespace std;
// cuda error checking
#define CUDA_CHECK cuda_check(__FILE__,__LINE__)
void cuda_check(string file, int line)
{
cudaError_t e = cudaGetLastError();
if (e != cudaSuccess)
{
cout << endl << file << ", line " << line << ": " << cudaGetErrorString(e) << " (" << e << ")" << endl;
exit(1);
}
}
// Square vector on GPU
__global__ void vecSqr(float *d_a, int n){
for (int i=0; i < n; i++){
float val = d_a[i];
val = val*val;
d_a[i] = val;
}
}
int main(int argc,char **argv)
{
// alloc and init input arrays on host (CPU)
int n = 10;
float *a = new float[n];
size_t nbytes = (size_t)(n)*sizeof(float);
for(int i=0; i<n; i++) a[i] = i;
// CPU computation
for(int i=0; i<n; i++)
{
float val = a[i];
val = val*val;
a[i] = val;
}
// print result
cout << "CPU:"<<endl;
for(int i=0; i<n; i++) cout << i << ": " << a[i] << endl;
cout << endl;
// GPU computation
// reinit data
for(int i=0; i<n; i++) a[i] = i;
// Memory allocation on GPU
float *d_a = NULL;
cudaMalloc(&d_a, nbytes);
cudaMemcpy(d_a, a, nbytes, cudaMemcpyHostToDevice);
// Launch kernel
dim3 block = dim3(128, 1, 1);
dim3 grid = dim3((n+block.x-1)/block.x, 1, 1);
vecSqr <<<grid, block>>> (d_a, n);
// Copy back to CPU
cudaMemcpy(a, d_a, nbytes, cudaMemcpyDeviceToHost); CUDA_CHECK;
cudaFree(d_a); CUDA_CHECK;
// print result
cout << "GPU:" << endl;
for(int i=0; i<n; i++) cout << i << ": " << a[i] << endl;
cout << endl;
// free CPU arrays
delete[] a;
}
|
8,846 | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#define LIST_SIZE 100000
__device__ unsigned long long instCountList[LIST_SIZE];
__device__ int init_flag = 0;
extern "C" __device__ void profileCount(long index){
}
|
8,847 | #include "cuda.h"
#include "stdio.h"
#include <time.h>
#define threads_per_block 512
#define threads_per_block_matrix 32 // 32*16 = 512
//#include <sys/time.h>
//#include <sys/resource.h>
// double dwalltime(){
// double sec;
// struct timeval tv;
// gettimeofday(&tv,NULL);
// sec = tv.tv_sec + tv.tv_usec/1000000.0;
// return sec;
// }
int* arreglo_result;
int* mat_result;
int* suma_det;
int* arreglo_A;
int* d_arreglo_A;
int* arreglo_B;
int* d_arreglo_B;
int* mat_A;
int* d_mat_A;
int* mat_B;
int* d_mat_B;
int* arreglo_determinantes;
int* d_arreglo_determinantes;
void printi(int i){
printf("%d\n", i);
}
void init_CPU_matrices_array(int* arreglo, int n){
for(int i=0; i< n; i++)
{
//int valor = 1;
arreglo[(i*16) + 0] = 1;
arreglo[(i*16) + 1] = 0;
arreglo[(i*16) + 2] = 0;
arreglo[(i*16) + 3] = 0;
arreglo[(i*16) + 4] = 0;
arreglo[(i*16) + 5] = 1;
arreglo[(i*16) + 6] = 0;
arreglo[(i*16) + 7] = 0;
arreglo[(i*16) + 8] = 0;
arreglo[(i*16) + 9] = 0;
arreglo[(i*16) + 10] = 1;
arreglo[(i*16) + 11] = 0;
arreglo[(i*16) + 12] = 0;
arreglo[(i*16) + 13] = 0;
arreglo[(i*16) + 14] = 0;
arreglo[(i*16) + 15] = 1;
}
}
void init_CPU_array(int* array, int n){
for(int i = 0; i < n; i++) {
array[i] = i;
}
}
void print_CPU_array(int array[], int n){
for(int i = 0; i < n; i++) {
printi(array[i]);
}
}
void print_CPU_matrix(int array[], int n){
for(int i = 0; i < n; i++) {
if(i % 16 == 0)
printf("%s\n", "");
printf("%d ", array[i]);
}
}
__global__ void determinanteador(int* arreglo_b, int* arreglo_a, int N){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid > N)
return;
int mat[9];
mat[0] = arreglo_b[(tid * 16) + 5];
mat[1] = arreglo_b[(tid * 16) + 6];
mat[2] = arreglo_b[(tid * 16) + 7];
mat[3] = arreglo_b[(tid * 16) + 9];
mat[4] = arreglo_b[(tid * 16) + 10];
mat[5] = arreglo_b[(tid * 16) + 11];
mat[6] = arreglo_b[(tid * 16) + 13];
mat[7] = arreglo_b[(tid * 16) + 14];
mat[8] = arreglo_b[(tid * 16) + 15];
float det0 = mat[0]*(mat[4]*mat[8] - mat[5]*mat[7]);
float det1 = mat[1]*(mat[3]*mat[8] - mat[5]*mat[6]);
float det2 = mat[2]*(mat[3]*mat[7] - mat[4]*mat[6]);
float result0 = det0 - det1 + det2;
result0 *= arreglo_b[0];
mat[0] = arreglo_b[(tid * 16) + 4];
mat[1] = arreglo_b[(tid * 16) + 6];
mat[2] = arreglo_b[(tid * 16) + 7];
mat[3] = arreglo_b[(tid * 16) + 8];
mat[4] = arreglo_b[(tid * 16) + 10];
mat[5] = arreglo_b[(tid * 16) + 11];
mat[6] = arreglo_b[(tid * 16) + 12];
mat[7] = arreglo_b[(tid * 16) + 14];
mat[8] = arreglo_b[(tid * 16) + 15];
det0 = mat[0]*(mat[4]*mat[8] - mat[5]*mat[7]);
det1 = mat[1]*(mat[3]*mat[8] - mat[5]*mat[6]);
det2 = mat[2]*(mat[3]*mat[7] - mat[4]*mat[6]);
float result1 = det0 - det1 + det2;
result1 *= arreglo_b[1];
mat[0] = arreglo_b[(tid * 16) + 4];
mat[1] = arreglo_b[(tid * 16) + 5];
mat[2] = arreglo_b[(tid * 16) + 7];
mat[3] = arreglo_b[(tid * 16) + 8];
mat[4] = arreglo_b[(tid * 16) + 9];
mat[5] = arreglo_b[(tid * 16) + 11];
mat[6] = arreglo_b[(tid * 16) + 12];
mat[7] = arreglo_b[(tid * 16) + 13];
mat[8] = arreglo_b[(tid * 16) + 15];
det0 = mat[0]*(mat[4]*mat[8] - mat[5]*mat[7]);
det1 = mat[1]*(mat[3]*mat[8] - mat[5]*mat[6]);
det2 = mat[2]*(mat[3]*mat[7] - mat[4]*mat[6]);
float result2 = det0 - det1 + det2;
result2 *= arreglo_b[2];
mat[0] = arreglo_b[(tid * 16) + 4];
mat[1] = arreglo_b[(tid * 16) + 5];
mat[2] = arreglo_b[(tid * 16) + 6];
mat[3] = arreglo_b[(tid * 16) + 8];
mat[4] = arreglo_b[(tid * 16) + 9];
mat[5] = arreglo_b[(tid * 16) + 10];
mat[6] = arreglo_b[(tid * 16) + 12];
mat[7] = arreglo_b[(tid * 16) + 13];
mat[8] = arreglo_b[(tid * 16) + 14];
det0 = mat[0]*(mat[4]*mat[8] - mat[5]*mat[7]);
det1 = mat[1]*(mat[3]*mat[8] - mat[5]*mat[6]);
det2 = mat[2]*(mat[3]*mat[7] - mat[4]*mat[6]);
float result3 = det0 - det1 + det2;
result3 *= arreglo_b[3];
float result_total = result0 - result1 + result2 - result3;
arreglo_a[tid] = result_total;
}
void determinanteador_CPU(int* arreglo_b, int* arreglo_a, int tid){
int mat[9];
mat[0] = arreglo_b[(tid * 16) + 5];
mat[1] = arreglo_b[(tid * 16) + 6];
mat[2] = arreglo_b[(tid * 16) + 7];
mat[3] = arreglo_b[(tid * 16) + 9];
mat[4] = arreglo_b[(tid * 16) + 10];
mat[5] = arreglo_b[(tid * 16) + 11];
mat[6] = arreglo_b[(tid * 16) + 13];
mat[7] = arreglo_b[(tid * 16) + 14];
mat[8] = arreglo_b[(tid * 16) + 15];
float det0 = mat[0]*(mat[4]*mat[8] - mat[5]*mat[7]);
float det1 = mat[1]*(mat[3]*mat[8] - mat[5]*mat[6]);
float det2 = mat[2]*(mat[3]*mat[7] - mat[4]*mat[6]);
float result0 = det0 - det1 + det2;
result0 *= arreglo_b[0];
mat[0] = arreglo_b[(tid * 16) + 4];
mat[1] = arreglo_b[(tid * 16) + 6];
mat[2] = arreglo_b[(tid * 16) + 7];
mat[3] = arreglo_b[(tid * 16) + 8];
mat[4] = arreglo_b[(tid * 16) + 10];
mat[5] = arreglo_b[(tid * 16) + 11];
mat[6] = arreglo_b[(tid * 16) + 12];
mat[7] = arreglo_b[(tid * 16) + 14];
mat[8] = arreglo_b[(tid * 16) + 15];
det0 = mat[0]*(mat[4]*mat[8] - mat[5]*mat[7]);
det1 = mat[1]*(mat[3]*mat[8] - mat[5]*mat[6]);
det2 = mat[2]*(mat[3]*mat[7] - mat[4]*mat[6]);
float result1 = det0 - det1 + det2;
result1 *= arreglo_b[1];
mat[0] = arreglo_b[(tid * 16) + 4];
mat[1] = arreglo_b[(tid * 16) + 5];
mat[2] = arreglo_b[(tid * 16) + 7];
mat[3] = arreglo_b[(tid * 16) + 8];
mat[4] = arreglo_b[(tid * 16) + 9];
mat[5] = arreglo_b[(tid * 16) + 11];
mat[6] = arreglo_b[(tid * 16) + 12];
mat[7] = arreglo_b[(tid * 16) + 13];
mat[8] = arreglo_b[(tid * 16) + 15];
det0 = mat[0]*(mat[4]*mat[8] - mat[5]*mat[7]);
det1 = mat[1]*(mat[3]*mat[8] - mat[5]*mat[6]);
det2 = mat[2]*(mat[3]*mat[7] - mat[4]*mat[6]);
float result2 = det0 - det1 + det2;
result2 *= arreglo_b[2];
mat[0] = arreglo_b[(tid * 16) + 4];
mat[1] = arreglo_b[(tid * 16) + 5];
mat[2] = arreglo_b[(tid * 16) + 6];
mat[3] = arreglo_b[(tid * 16) + 8];
mat[4] = arreglo_b[(tid * 16) + 9];
mat[5] = arreglo_b[(tid * 16) + 10];
mat[6] = arreglo_b[(tid * 16) + 12];
mat[7] = arreglo_b[(tid * 16) + 13];
mat[8] = arreglo_b[(tid * 16) + 14];
det0 = mat[0]*(mat[4]*mat[8] - mat[5]*mat[7]);
det1 = mat[1]*(mat[3]*mat[8] - mat[5]*mat[6]);
det2 = mat[2]*(mat[3]*mat[7] - mat[4]*mat[6]);
float result3 = det0 - det1 + det2;
result3 *= arreglo_b[3];
float result_total = result0 - result1 + result2 - result3;
arreglo_a[tid] = result_total;
}
// realiza la suma de determinantes
__global__ void sumador_determinantes(int* arreglo, int* result, float N)
{
__shared__ int compartida[threads_per_block];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid > N)
return;
compartida[threadIdx.x] = arreglo[tid];
__syncthreads();
for(int i=1; pow((float)2,(float)i-1) < threads_per_block; i++)
{
int acceso = pow((float)2,(float)i);
int offset = pow((float)2, (float)i-1);
if(threadIdx.x < ((float)threads_per_block/acceso) && (threadIdx.x * acceso + offset) < (N - blockIdx.x * blockDim.x))
{
compartida[threadIdx.x * acceso] = compartida[threadIdx.x * acceso] + compartida[threadIdx.x * acceso + offset];
// compartida[threadIdx.x * acceso + offset] = 0;
}
__syncthreads();
}
//el primer thread de cada grupo guarda el resultado
if(threadIdx.x == 0)
result[blockIdx.x] = compartida[0];
}
__global__ void sumador_matrices(int* arreglo, int* result, float N)
{
__shared__ int compartida[threads_per_block_matrix * 16];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid > N * 16)
return;
compartida[threadIdx.x] = arreglo[tid];
__syncthreads();
for(int i=1; pow((float)2,(float)i-1) < N; i++)
{
int acceso = pow((float)2,(float)i);
int offset = pow((float)2, (float)i-1);
int t_id = (threadIdx.x/16) * 16;
int new_access = t_id * acceso + threadIdx.x % 16 ;
int new_offset = new_access + offset * 16;
if(t_id < ((float)threads_per_block_matrix*16/acceso) && (new_offset < (threads_per_block_matrix*16)))
{
compartida[new_access] = compartida[new_access] + compartida[new_offset];
// compartida[new_offset] = 0;
// printf("GRUPO: %d - ITERACION: %d - TID %d - ACCESO: %d - OFFSET %d - REMAINING: %d \n", blockIdx.x,
// i, tid, new_access , new_offset, threadIdx.x * acceso + offset);
}
__syncthreads();
}
//el primer thread de cada grupo guarda el resultado
if(threadIdx.x < 16)
result[blockIdx.x * 16 + threadIdx.x] = compartida[threadIdx.x];
}
int main(int argc, char** argv){
int N = 4096*4096 ;
int numBytesMatrices = sizeof(int) * N * 16; //bytes a alocar
int numBytesDeterminantes = sizeof(int) * N; //bytes a alocar
//##################################################################################
//############################## INICIALIZACION ####################################
suma_det = (int *) malloc(sizeof(int));
arreglo_A = (int *) malloc(numBytesDeterminantes);
arreglo_B = (int *) malloc(numBytesDeterminantes);
mat_A = (int *) malloc(numBytesMatrices);
mat_B = (int *) malloc(numBytesMatrices);
arreglo_determinantes = (int*) malloc(numBytesDeterminantes);
cudaMalloc(&d_arreglo_determinantes, numBytesDeterminantes);
cudaMalloc(&d_arreglo_A, numBytesDeterminantes);
cudaMalloc(&d_arreglo_B, numBytesDeterminantes);
cudaMalloc(&d_mat_A, numBytesMatrices);
cudaMalloc(&d_mat_B, numBytesMatrices);
init_CPU_matrices_array(mat_A, N);
cudaMemcpy(d_mat_A, mat_A, numBytesMatrices, cudaMemcpyHostToDevice);
//##################################################################################
//################################ EJECUCIONES #####################################
clock_t time_gpu = clock();
//################################ DETERMINANTE ####################################
dim3 miGrid1D_determinanteador(ceil((float)N/threads_per_block),1);
dim3 miBloque1D_determinanteador(threads_per_block,1);
determinanteador<<<miGrid1D_determinanteador,miBloque1D_determinanteador>>>(d_mat_A, d_arreglo_A, N);
// cudaThreadSynchronize();
// printf("ERROR %s\n", cudaGetErrorString(cudaGetLastError()));
// cudaMemcpy(arreglo_determinantes, d_arreglo_A, numBytesDeterminantes, cudaMemcpyDeviceToHost);
// print_CPU_matrix(arreglo_determinantes, N);
//############################# SUMADOR DETERMINANTE ###############################
dim3 miBloque1D_sumador(threads_per_block,1);
for(int i=0; pow(threads_per_block, i) < N ; i++)
{
int remaining_elements = ceil((float)N/pow(threads_per_block, i));
int block_count = ceil((float)N/pow(threads_per_block, i+1));
dim3 miGrid1D_sumador(block_count,1);
sumador_determinantes<<<miGrid1D_sumador, miBloque1D_sumador>>>(d_arreglo_A, d_arreglo_B, remaining_elements);
cudaThreadSynchronize();
// printf("ERROR: %s\n", cudaGetErrorString(cudaGetLastError()));
// printf("elementos restantes: %d \n", remaining_elements);
// printf("bloques usados: %d \n", block_count);
int* tmp = d_arreglo_A;
d_arreglo_A = d_arreglo_B;
d_arreglo_B = tmp;
}
// cudaMemcpy(arreglo_determinantes, d_arreglo_A, sizeof(int) * N, cudaMemcpyDeviceToHost);
// print_CPU_matrix(arreglo_determinantes, N);
//############################## SUMADOR MATRICES ##################################
dim3 miBloque1D_sumador_mat(threads_per_block,1);
for(int i=0; pow(threads_per_block_matrix, i) < N ; i++)
{
int remaining_elements = ceil((float)N/pow(threads_per_block_matrix, i));
int block_count = ceil((float)N/pow(threads_per_block_matrix, i+1));
dim3 miGrid1D_sumador_mat(block_count,1);
sumador_matrices<<<miGrid1D_sumador_mat, miBloque1D_sumador_mat>>>(d_mat_A, d_mat_B, remaining_elements);
cudaThreadSynchronize();
// printf("ERROR %s\n", cudaGetErrorString(cudaGetLastError()));
// printf("elementos restantes: %d \n", remaining_elements);
// printf("bloques usados: %d \n", block_count);
int* tmp = d_mat_A;
d_mat_A = d_mat_B;
d_mat_B = tmp;
}
//############################### READ BACK ########################################
clock_t time_gpu_end = clock();
// PROMEDIO
cudaMemcpy(suma_det, d_arreglo_A, sizeof(int), cudaMemcpyDeviceToHost);
// printf("SUMA DE DETERMINANTES: %d\n", *suma_det);
double promedio_det = (float)(*suma_det) / N;
// printf("PROMEDIO: %lf\n", promedio_det);
// SUMA DE MATRICES
cudaMemcpy(mat_B, d_mat_A, 16 * sizeof(int), cudaMemcpyDeviceToHost);
for(int i=0; i< 16; i++)
mat_B[i] *= (int)promedio_det;
printf("%s\n", "");
printf("%s", "RESULTADO GPU:");
print_CPU_matrix(mat_B, 16);
// CCCCCCCCCCCCCPPPPPPPPPPPPPPPPP UUUUUUUU UUUUUUUU
// CCC::::::::::::CP::::::::::::::::P U::::::U U::::::U
// CC:::::::::::::::CP::::::PPPPPP:::::P U::::::U U::::::U
// C:::::CCCCCCCC::::CPP:::::P P:::::PUU:::::U U:::::UU
// C:::::C CCCCCC P::::P P:::::P U:::::U U:::::U
//C:::::C P::::P P:::::P U:::::D D:::::U
//C:::::C P::::PPPPPP:::::P U:::::D D:::::U
//C:::::C P:::::::::::::PP U:::::D D:::::U
//C:::::C P::::PPPPPPPPP U:::::D D:::::U
//C:::::C P::::P U:::::D D:::::U
//C:::::C P::::P U:::::D D:::::U
// C:::::C CCCCCC P::::P U::::::U U::::::U
// C:::::CCCCCCCC::::CPP::::::PP U:::::::UUU:::::::U
// CC:::::::::::::::CP::::::::P UU:::::::::::::UU
// CCC::::::::::::CP::::::::P UU:::::::::UU
// CCCCCCCCCCCCCPPPPPPPPPP UUUUUUUUU
init_CPU_matrices_array(mat_A, N);
for(int i=0; i< 16; i++)
{
mat_B[i] = 0;
}
clock_t time_cpu = clock();
for(int i=0; i< N; i++)
{
determinanteador_CPU(mat_A, arreglo_determinantes, i);
}
int suma_determinantes_cpu = 0;
for(int i=0; i< N; i++)
{
suma_determinantes_cpu += arreglo_determinantes[i];
}
// printf("%s\n", "");
// printf("%s\n", "");
// printf("%s\n", "");
// printf("%s\n", "DETERMINANTES CPU:");
// print_CPU_matrix(arreglo_determinantes, N);
// printf("%s\n", "");
// printf("%s\n", "SUMA TOTAL DETERMINANTES:");
// printf("%d\n", suma_determinantes_cpu);
float promedio = (float)suma_determinantes_cpu / N;
for(int j=0; j<16; j++)
{
for(int i=0; i< N; i++)
{
mat_B[j] += mat_A[i * 16 + j];
}
mat_B[j] *= promedio;
}
clock_t time_cpu_end = clock();
printf("%s\n", "");
printf("%s\n", "");
printf("%s\n", "");
printf("%s", "RESULTADO CPU:");
print_CPU_matrix(mat_B, 16);
printf("%s\n", "");
printf("%s\n", "");
printf("%s\n", "");
printf("TIEMPO CPU: %lf\n", (double)(time_cpu_end - time_cpu) / CLOCKS_PER_SEC);
printf("TIEMPO GPU: %lf\n", (double)(time_gpu_end - time_gpu) / CLOCKS_PER_SEC);
free(arreglo_determinantes);
free(suma_det);
free(arreglo_A);
free(arreglo_B);
free(mat_A);
free(mat_B);
cudaFree (d_arreglo_A);
cudaFree (d_arreglo_B);
cudaFree (d_arreglo_determinantes);
cudaFree (d_mat_A);
cudaFree (d_mat_B);
}
|
8,848 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
void printImg(int * img,int width,int height);
void copyImg(int *img ,int *h_img,int width ,int height);
__global__ void mandelKernel(float lowerX,float lowerY,int* d_img,int resX,int resY,float stepX,float stepY,int maxIterations) {
// To avoid error caused by the floating number, use the following pseudo code
int thisX = blockIdx.x * blockDim.x + threadIdx.x;
int thisY = blockIdx.y * blockDim.y + threadIdx.y;
float x = lowerX + thisX * stepX;
float y = lowerY + thisY * stepY;
float c_re = x,c_im = y;
float z_re = x,z_im= y;
int i,count=0;
for (i = 0; i < maxIterations; ++i)
{
if (z_re * z_re + z_im * z_im > 4.f)
break;
//count ++;
float new_re = z_re * z_re - z_im * z_im;
float new_im = 2.f * z_re * z_im;
z_re = c_re + new_re;
z_im = c_im + new_im;
}
d_img[thisY*resX+thisX] = i;
}
// Host front-end function that allocates the memory and launches the GPU kernel
void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations)
{
float stepX = (upperX - lowerX) / resX;
float stepY = (upperY - lowerY) / resY;
int imageSize = resX * resY * sizeof(int);
//int * h_img = (int*) malloc(imageSize);
int *d_img;
int BLOCK_SIZE_X = 16;
int BLOCK_SIZE_Y = 16;
cudaMalloc((void**)&d_img, resX * resY*sizeof(int));
dim3 blockSize(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 numBlock(resX / BLOCK_SIZE_X, resY / BLOCK_SIZE_Y);
//cudaMemcpy(d_img,img,resX*resY*sizeof(int),cudaMemcpyHostToDevice);
mandelKernel<<<numBlock, blockSize>>>(lowerX,lowerY,d_img,resX,resY,stepX,stepY,maxIterations);
cudaMemcpy(img,d_img,resX*resY*sizeof(int),cudaMemcpyDeviceToHost);
//printf("width %d height %d \n",resX,resY);
//printImg(img,resX,1);
cudaFree(d_img);
return ;
}
void copyImg(int *img ,int *h_img,int width ,int height){
for(int j=0;j<height;j++){
for(int i =0 ;i<width;i++){
img[j*width + i ] = h_img[j*width+i];
}
}
}
void printImg(int * img,int width,int height){
for(int j=0;j<height;j++){
for(int i=0;i<width;i++){
printf("%d ",img[j*height+i]);
}
printf("\n");
}
}
|
8,849 | extern "C"
__global__ void sconv_update_C128_K128 (
float* param_test,
float* param_F,
const float* param_I,
const float* param_E,
float param_alpha,
int param_N,
int param_K,
int param_D,
int param_H,
int param_W,
int param_WN,
int param_HWN,
int param_DHWN,
int param_C,
int param_CRST,
int param_RST,
int param_magic_RST,
int param_shift_RST,
int param_RS,
int param_magic_RS,
int param_shift_RS,
int param_S,
int param_magic_S,
int param_shift_S,
int param_pad_d,
int param_pad_h,
int param_pad_w,
int param_str_d,
int param_str_h,
int param_str_w,
int param_P,
int param_Q,
int param_PQ,
int param_QN,
int param_PQN,
int param_MPQN,
int param_magic_Q,
int param_shift_Q,
int param_magic_PQ,
int param_shift_PQ,
int param_part_P,
int param_part_Q,
int param_part_PQ) {
__shared__ float share[(128 * 16 + 32) * 4 + 6];
int tid = threadIdx.x;
share[tid] = 1;
*param_F = share[255 - tid];
*param_test = share[255 - tid];
}
|
8,850 | # include <time.h>
# include <stdio.h>
# include <assert.h>
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char* msg);
// Implement the kernel
__global__ void reverseArrayBlock(int *d_out, int *d_in){
int inOffset = blockDim.x * blockIdx.x;
int outOffset = blockDim.x *(gridDim.x - 1 - blockIdx.x);
int in = inOffset + threadIdx.x;
int out = outOffset + (blockDim.x - 1 - threadIdx.x);
d_out[out] = d_in[in];
}
int main(int argc, char ** argv){
int *h_a;
int dimA = 256*1024; // In my machine 1 int = 4 bytes therefore this is 256K elements (1MB size)
// Pointer for device memory
int *d_a, *d_b;
// define grid and block size
int numThreadsPerBlock = 256;
// Compute the number of blocks needed based on array size and desired block size
int numBlocks = dimA/numThreadsPerBlock;
// allocate host and device memory
size_t memSize = numBlocks*numThreadsPerBlock*sizeof(int);
h_a = (int *)malloc(memSize);
// A key design feature of this program is that both arrays d_a and d_b reside in global memory on the device.
cudaMalloc((void **)&d_a, memSize);
cudaMalloc((void **)&d_b, memSize);
// Initialize input array on host
for(int i=0; i< dimA; i++){
h_a[i] = i;
}
// Cpy host array to device arryr
cudaMemcpy(d_a, h_a, memSize, cudaMemcpyHostToDevice);
// launch kernel
dim3 dimGrid(numBlocks);
dim3 dimBlock(numThreadsPerBlock);
clock_t device_start = clock();
reverseArrayBlock<<< dimGrid, dimBlock>>>(d_b, d_a);
// block until the device has completed
cudaThreadSynchronize();
/*
Blocks until the device has completed all preceding requested tasks. cudaThreadSynchronize() returns an error if one of the preceding tasks has failed. If the cudaDeviceScheduleBlockingSync flag was set for this device, the host thread will block until the device has finished its work.
*/
clock_t device_time = (clock() - device_start);
printf("Time elapsed on device: %f microseconds\n", (double)device_time/CLOCKS_PER_SEC/1000000);
// check if kernel execution generated an error
// check for any cuda errors
checkCUDAError("kernel invocation");
// device to host copy
cudaMemcpy(h_a, d_b, memSize, cudaMemcpyDeviceToHost);
// check for any CUDA erros
checkCUDAError("memcpy");
// verify the data returned to the host is correct
for(int i=0; i<dimA; i++){
assert( h_a[i] == dimA - 1 - i);
}
// free device memory
cudaFree(d_a);
cudaFree(d_b);
// free host memory
free(h_a);
printf("Correct! \n");
return 0;
}
void checkCUDAError( const char * msg){
cudaError_t err = cudaGetLastError();
// Properties of function "cudaGetLastError()" is discussed in the tutorial
// Due to the asynchronous nature, the error get from here may not be the first error we met
if(cudaSuccess != err){
fprintf(stderr, "Cuda error: %s: %s. \n ", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
|
8,851 | /***********************************************************************************
* Example cuda kernel file
************************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
void kernelWrapper();
__global__ void Kernel( int i)
{
//do stuff here
}
void kernelWrapper(bool use_gpu, bool gpu_transfer )
{
// setup execution parameters
dim3 grid( 1, 1, 1);
dim3 threads( 128, 1, 1);
Kernel<<< grid, threads, 0 >>>(0);
}
|
8,852 | // This turned out to be a little more complex than I had first thought, maybe I should try a different project.
//
#include <iostream>
#include <stdio.h>
void getInfo( void );
bool getDevicePresent( void );
// Let's do a basic CAESAR shift cipher, implemented in CUDA
__global__ void caesarCipher(char *key, char *text, int tlength, int klength);
__global__ void unCaesarCipher(char *key, char *text, int tlength, int klength);
int main( void ){
getInfo();
if(!getDevicePresent())
{
return -1;
}
char *key = "fsdbikjb";
char *text = "The Quick Brown Fox Jumped over The Lazy Dawg";
char *dev_key, *dev_text;
int textSize =(strlen(text) * sizeof(char))+1;
int keySize = (strlen(key) * sizeof(char))+1;
cudaMalloc( (void**)&dev_key, textSize);
cudaMalloc( (void**)&dev_text, textSize);
cudaMemcpy( dev_key, key, keySize, cudaMemcpyHostToDevice);
cudaMemcpy( dev_text, text, textSize, cudaMemcpyHostToDevice);
//printf("%i %i %i\n", (int)sizeof(text), (strlen(text)+1)*sizeof(char), (strlen(text))*sizeof(char));
printf("Key: %s(%d)\nText: '%s'(%d)\n", key, keySize, text,textSize);
caesarCipher<<<textSize, 1>>>(dev_key, dev_text, textSize, keySize);
char * result = (char *)malloc(textSize);
cudaMemcpy(result, dev_text, textSize, cudaMemcpyDeviceToHost);
printf("Output:");
printf(" '%s'\n", result);
printf("Length: %i", strlen(result));
unCaesarCipher<<<textSize, 1>>>(dev_key, dev_text, textSize, keySize);
cudaMemcpy(result, dev_text, textSize, cudaMemcpyDeviceToHost);
printf("Output:");
printf(" '%s'\n", result);
printf("Length: %i", strlen(result));
printf("Clearing Memory...\n");
cudaFree(dev_text);
cudaFree(dev_key);
free(result);
return 0;
}
__global__ void caesarCipher(char *key, char *text, int tlength, int klength)
{
int tid = blockIdx.x;
if (tid < tlength)
{
//printf("%i says - %s\n", tid, text);
char t = text[tid];
text[tid] = ((int)text[tid] + (int)key[tid % klength])%127;
printf("%c -> %c - %d\n",t,text[tid], tid);
}
}
__global__ void unCaesarCipher(char *key, char *text, int tlength, int klength)
{
int tid = blockIdx.x;
if (tid < tlength)
{
//printf("%i says - %s\n", tid, text);
char t = text[tid];
text[tid] = ((int)text[tid] - (int)key[tid % klength])%127;
printf("%c -> %c - %d\n",t,text[tid], tid);
}
}
void getInfo( void )
{
cudaDeviceProp p;
if(getDevicePresent())
{
cudaGetDeviceProperties( &p, 0);
printf(" -- Information & Properties about CUDA device 0 -- \n\n");
printf("\tCompute Capability: %i.%i\n", p.major, p.minor);
printf("\tDevice Name: %s\n", p.name);
printf("\tClock Rate: %d\n", p.clockRate);
printf("\tGlobal Memory: %dMiB\n", p.totalGlobalMem/(1024*1024));
printf("\n -- End of Information -- \n");
}else
{
printf(" -- Warning: No CUDA Device Detected :'( -- \n");
printf(" -- This software might not operate as -- \n -- Expected. -- \n");
}
}
bool getDevicePresent( void )
{
cudaDeviceProp p;
cudaGetDeviceProperties( &p, 0);
return (p.major != 0);
}
|
8,853 | // Made with CLion Educational License
#include <cmath>
#include <chrono>
#include <iostream>
// Utility function
// For __host__ it's possible to use std::swap() working similarly
__device__ __host__ void swap(float &x, float &y){
float temp = y;
y = x;
x = temp;
}
// CPU Bubble Sort
__host__ void bubble_sort(int n, float *x){
// Set optimizing variables
bool next_loop = true;
int k = 0;
// Main loop
while(next_loop){
next_loop = false;
for (int j = 0; j < n - k - 1 ; j ++){
// Do the comparison and swap
if(x[j] > x[j + 1]){
swap(x[j], x[j + 1]);
next_loop = true;
}
}
k++;
}
}
// GPU Bubble Sort
// ODD-EVEN Sort
// Alternately compare (2n with 2n + 1) and (2n with 2n - 1)
__global__ void bubble_sort(int n, float *x, bool parity) {
// Get current index (only even)
int i = 2 * blockDim.x *blockIdx.x + threadIdx.x * 2;
if(i < n){
// Check if we doo even-odd or odd-even sort
if(parity){
// Check whether we are inside of array
if(i + 1 < n){
if(x[i] > x[i + 1]){
swap(x[i], x[i + 1]);
}
}
}else{
// Check whether we are inside of array
if(i - 1 >= 0){
if(x[i] < x[i - 1]){
swap(x[i], x[i - 1]);
}
}
}
}
}
// CPU Merge Sort
__host__ void merge(float *x, int l, int m, int r){
// Calculate temporary arrays length
int n1 = m - l + 1;
int n2 = r - m;
// Create temporary arrays
auto R = new float[n2]; // Left and Right
auto L = new float[n1];
// Copy array to subarrays
for (int i = 0; i < n1; ++i)
L[i] = x[l + i];
for (int i = 0; i < n2; ++i)
R[i] = x[(m + 1) + i];
// Init indices of arrays
int i = 0; // L
int j = 0; // R
int p = l; // *x
// Choose smaller value from sorted arrays
while(i < n1 && j < n2)
x[p++] = L[i] < R[j] ? L[i++] : R[j++];
// Copy remaining elements
while(i < n1)
x[p++] = L[i++];
while(j < n2)
x[p++] = R[j++];
// Deallocate memory
delete[] R;
delete[] L;
}
__host__ void merge_sort(float *x, int l, int r){
// Check if it is more than 1 element in the array
// If there is one element it's obviously sorted
if(r > l) {
// Get middle of the array
int m = (l + r) / 2;
// Divide into two smaller arrays
merge_sort(x, l, m);
merge_sort(x, m + 1, r);
// Merge arrays together
merge(x, l, m, r);
}
}
// GPU Bitonic Sort
__device__ void compare_and_swap(int n, float *x, int i, int j){
// Check whether values are in good order. If they are not -> swap
if(i < n && j < n)
if(x[i] > x[j]) swap(x[i], x[j]);
}
__global__ void bitonic_sequence_step(int n, float *x, int size, int current_size){
// Get current comparison id
int i = (blockIdx.x*blockDim.x + threadIdx.x);
// Check if the comparison lays in math.ceil(n / 2) (this is number of comparisions)
if(i < (n + 1) / 2){
// Divide comparisons into blocks
int block = i / current_size;
// Calculate direction of sorting
int block_dir = (i / size) % 2;
// Calculate offset in the group
int num_in_block = i % current_size;
int pivot, comparator;
// Check direction of comparison and calculate indecies
if(block_dir == 0) {
pivot = 2 * (block * current_size) + num_in_block; // Number of element in x
comparator = pivot + current_size;
}else{
pivot = 2 * ((block + 1) * current_size ) - num_in_block - 1; // Number of element in x
comparator = pivot - current_size;
}
// Compare and swap right indices
compare_and_swap(n, x, pivot, comparator);
}
}
// Two groups next to each other with opposite sorting directions can be merged into one sorted array by Bitonic Sequence
// Bitonic sort divide former array into groups of size 2 and order them easily in alternating directions
// Thanks to these arrays can me merged (also in alternating directions) into sorted ones with bitonic sequence and their size becomes 2^n
// Finally we can merge two subarrays sorted in opposite directions into one sorted using Bitonic Sequence again
__host__ void bitonic_sort(int n, float *x){
int current_size;
// Sorts every 2^n block in
for (int size=1; size <= n / 2; size *= 2)
{
// Bitonic Sequence is a loop
for (current_size = size; current_size >= 1; current_size /= 2){
// Call number of comparisons in parallel (blocks of threads rounded to next integer value)
bitonic_sequence_step<<<std::ceil((float) (n / 2) / 1024.0f), 1024>>>(n, x, size, current_size);
}
}
}
// CPU Quick sort
// 5 6 3 4
// pivot = 4
// i j 5 > 4
// V
// 5 6 3 4
// i j 6 > 4
// V V
// 5 6 3 4
// i j 3 < 4
// V V
// 5 6 3 4
// i j 4 = 4
// V V
// 3 6 5 4
// swap x[i] x[h[]
// i
// V
// 3 4 5 6
__host__ int partition (float* x, int l, int h)
{
float pivot = x[h]; // Choose last value as the pivot
int i = l; // Index or current pivot
for (int j = l; j < h; j++)
// If x[j] is smaller than pivot value move it to the left and move pivot index to the right
// We are sure things smaller than pivot are on the left of it
if (x[j] < pivot)
swap(x[i++], x[j]);
// Because pivot was chosen as last element we need to move it to calculated index
swap(x[i], x[h]);
return i;
}
__host__ void quick_sort(float *x, int i, int j){
if(i < j){
// Divide array into two smaller ones where one has values smaller than pivot and second has values grater than pivot
int pivot = partition(x, i, j);
// Sort divided arrays
quick_sort(x, i, pivot - 1);
quick_sort(x, pivot + 1, j);
}
}
int main(){
// Initialize data
int order_of_magnitude;
std::cout << "Enter order of magnitude to test: ";
std::cin >> order_of_magnitude;
std::cout << std::endl << "----------------" << std::endl << std::endl;
if(order_of_magnitude > 26) std::cout << "WARNING" << std::endl << "Order of magnitude lowered to 26 due to the performance issues" << std::endl << std::endl;
order_of_magnitude = std::min(26, order_of_magnitude);
int N = (1 << order_of_magnitude); // 2^n
int next_power = pow(2, ceil(log(N)/log(2)));
float *gpu_bubble, *cuda_gpu_bubble, *cpu_bubble, *gpu_bitonic, *cuda_gpu_bitonic, *cpu_merge, *cpu_quick;
// Allocate memory on CPU
gpu_bubble = (float*)malloc(N * sizeof(float));
cpu_bubble = (float*)malloc(N * sizeof(float));
gpu_bitonic = (float*)malloc(next_power * sizeof(float)); // Need to ensure that date amount is 2^n
cpu_merge = (float*)malloc(N * sizeof(float));
cpu_quick = (float*)malloc(N * sizeof(float));
// Allocate memory on GPU
cudaMalloc(&cuda_gpu_bubble, N * sizeof(float));
cudaMalloc(&cuda_gpu_bitonic , next_power * sizeof(float)); // Need to ensure that date amount is 2^n
// Choose pseudo-random numbers
for (int i = 0; i < next_power; i ++) {
if(i < N) {
gpu_bubble[i] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
cpu_bubble[i] = gpu_bubble[i];
gpu_bitonic[i] = gpu_bubble[i];
cpu_merge[i] = gpu_bubble[i];
cpu_quick[i] = gpu_bubble[i];
}else{
gpu_bitonic[i] = - INFINITY;
}
}
// Bubble Sort GPU
auto cuda_bubble_begin = std::chrono::steady_clock::now();
if(N <= (1 << 14)) {
cudaMemcpy(cuda_gpu_bubble, gpu_bubble, N * sizeof(float), cudaMemcpyHostToDevice);
for (int i = 0; i < N; i++) {
bubble_sort<<<ceil((float)N / 2048), 1024>>>(N, cuda_gpu_bubble, i % 2 == 0);
}
cudaMemcpy(gpu_bubble, cuda_gpu_bubble, N * sizeof(float), cudaMemcpyDeviceToHost);
}else{
std::cout << "WARNING!" << std::endl << "GPU bubble sort disabled due to it's low performance" << std::endl << std::endl;
}
auto cuda_bubble_end = std::chrono::steady_clock::now();
// Bubble Sort CPU
auto cpu_bubble_begin = std::chrono::steady_clock::now();
if(N <= (1 << 14)) {
bubble_sort(N, cpu_bubble);
}else{
std::cout << "WARNING!" << std::endl << "CPU bubble sort disabled due to it's low performance" << std::endl;
std::cout << std::endl << "----------------" << std::endl << std::endl;
}
auto cpu_bubble_end = std::chrono::steady_clock::now();
// Bitonic Sort GPU
auto gpu_merge_start = std::chrono::steady_clock::now();
cudaMemcpy(cuda_gpu_bitonic, gpu_bitonic, next_power * sizeof(float), cudaMemcpyHostToDevice);
bitonic_sort(next_power, cuda_gpu_bitonic);
cudaMemcpy(gpu_bitonic, cuda_gpu_bitonic, next_power * sizeof(float), cudaMemcpyDeviceToHost);
auto gpu_merge_end = std::chrono::steady_clock::now();
// Merge Sort CPU
auto cpu_merge_start = std::chrono::steady_clock::now();
merge_sort(cpu_merge, 0, N - 1);
auto cpu_merge_end = std::chrono::steady_clock::now();
// Quick Sort CPU
auto cpu_quick_start = std::chrono::steady_clock::now();
quick_sort(cpu_quick, 0, N - 1);
auto cpu_quick_end = std::chrono::steady_clock::now();
// Set correctness flag
bool gpu_bubble_correct = true;
bool cpu_bubble_correct = true;
bool cpu_merge_correct = true;
bool gpu_bitonic_correct = true;
bool cpu_quick_correct = true;
// Check sorts correctness
for (int i = 0; i < next_power - 1 ; i ++) {
if(i < N - 1) {
if (gpu_bubble[i] > gpu_bubble[i + 1]) gpu_bubble_correct = false;
if (cpu_bubble[i] > cpu_bubble[i + 1]) cpu_bubble_correct = false;
if (cpu_merge[i] > cpu_merge[i + 1]) cpu_merge_correct = false;
if(cpu_quick[i] > cpu_quick [i + 1]) cpu_quick_correct = false;
}
if (gpu_bitonic[i] > gpu_bitonic[i + 1]) gpu_bitonic_correct = false;
}
// Display number of elements
std::cout << "Sorting algorithms for: " << N << " elements" << std::endl << std::endl;
// Display correctness test
std::cout << "GPU Bubble Sort correctness : "<< gpu_bubble_correct << std::endl;
std::cout << "CPU Bubble Sort correctness : "<< cpu_bubble_correct << std::endl;
std::cout << "GPU Bitonic Sort correctness : " << gpu_bitonic_correct << std::endl;
std::cout << "CPU Merge Sort correctness : " << cpu_merge_correct << std::endl;
std::cout << "CPU Quick Sort correctness : " << cpu_quick_correct << std::endl;
// Make space
std::cout << std::endl << "----------------" << std::endl << std::endl;
// Display times time
std::cout << "GPU Bubble Sort time = " << std::chrono::duration_cast<std::chrono::microseconds>(cuda_bubble_end - cuda_bubble_begin).count() << " µs" << std::endl;
std::cout << "CPU Bubble Sort time = " << std::chrono::duration_cast<std::chrono::microseconds>(cpu_bubble_end - cpu_bubble_begin).count() << " µs" << std::endl;
std::cout << "GPU Bitonic Sort time = " << std::chrono::duration_cast<std::chrono::microseconds>(gpu_merge_end - gpu_merge_start).count() << " µs" << std::endl;
std::cout << "CPU Merge Sort time = " << std::chrono::duration_cast<std::chrono::microseconds>(cpu_merge_end - cpu_merge_start).count() << " µs" << std::endl;
std::cout << "CPU Quick Sort time = " << std::chrono::duration_cast<std::chrono::microseconds>(cpu_quick_end - cpu_quick_start).count() << " µs" << std::endl;
// Deallocate CUDA memory
cudaFree(cuda_gpu_bubble);
cudaFree(cuda_gpu_bitonic);
// Deallocate memory
free(cpu_bubble);
free(cpu_merge);
free(cpu_quick);
free(gpu_bubble);
free(gpu_bitonic);
} |
8,854 | #include "includes.h"
void vectorAdd(double* A, double* B,double* C,int n);
__global__ void vecAddKernel(double* A, double* B, double* C, int n) {
int i=blockDim.x*blockIdx.x+threadIdx.x;
if(i<n) {
C[i]=A[i]+B[i];
}
} |
8,855 | __device__ void ArrayAvg( void* param)
{
float* paramIn = (float*)param;
int N = (int)(*paramIn);
paramIn = paramIn + 1;
float* a = paramIn;
float* b = a + N;
int tid = threadIdx.x;
int cacheIndex = threadIdx.x;
float temp = 0;
#if 1
while (tid < N)
{
temp = temp + a[tid];
tid += 32;
}
#endif
// set the cache values
b[cacheIndex] = temp;
//printf("Cache[%d]=%g\n", cacheIndex, temp);
#if 1
// synchronize threads in this block
//__syncthreads();
// for reductions, threadsPerBlock must be a power of 2
// because of the following code
int i = 32/2;
//if (cacheIndex < 0)
//printf("i=%d,blockDim.x=%d,tid=%d\n",i, blockDim.x, cacheIndex);
while (i != 0)
{
if (cacheIndex < i )
{
b[cacheIndex] += b[cacheIndex+i];
}
i /= 2;
}
if (cacheIndex == 0)
{
//printf("c[blockIdx.x]:%g, :%d\n", cache[0],blockIdx.x);
b[0] = b[0]/N;
}
#endif
}
|
8,856 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#define THREADS_PER_BLOCK 1024
#define MAX_NUMBER_BLOCKS 2496
/*******************************************************
* RUNTIME MEASURING METHODS *
*******************************************************/
struct timeval start, end;
void starttime(){
gettimeofday(&start,0);
}
void endtime(const char* c){
gettimeofday(&end, 0);
double elapsed = (end.tv_sec - start.tv_sec) * 1000.0 + (end.tv_usec - start.tv_usec) / 1000.0;
printf("%s: %f ms\n", c, elapsed);
}
/******************************************************
* CUDA METHODS *
******************************************************/
__global__ void validSets(int* fTable, int cardinality, int nCr, int mSupport){
int tIndex = blockIdx.x * blockDim.x + threadIdx.x;
if((tIndex < (cardinality + 1) * nCr) && (tIndex % (cardinality + 1) == cardinality)){
if(fTable[tIndex] < mSupport){
fTable[tIndex] = 0;
}
}
}
__global__ void counting(int* fTable, char* tTable, int row, int col, int nCr, int cardinality){
__shared__ int cache[THREADS_PER_BLOCK]; //cache memory that is shared by all the threads within a block
int bIndex = blockIdx.x; //the index value of the core
int cacheIndex = threadIdx.x; //each thread within a core has a corresponding cache index where it stores its values
//enter a block loop where the core index must remain lower than the amount of item sets present in the frequency table
//at the end of each iteration the core index is increased by the amount of cores being used and loops again if possible
for(int h = bIndex; h < nCr; h+= gridDim.x){
int tIndex = threadIdx.x; //the index value of the individual thread
int sum = 0; //keeps track of how many times an item set has been found
int found; //a boolean value that indicates whether an item set is present within a transaction; either 0 or 1
//enter a thread loop where i represents which transaction being scanned. Each thread within a core scans a
// different transaction; the loop is necessary since there aren't enough threads for each transaction. Whenever
// a scan is done i is incremented by th number of threads per block
for(int i = tIndex; i < row; i+= blockDim.x){
found = 1;
//enter a loop where j represents the specific item within an item set; the iterations within the for loop
// is dependent on the cardinality of the item sets
for(int j = 0; j < cardinality; j++){
//if an item indicated in the frequency table is not found in the transaction found is set to 0; i.e. false
if(tTable[i * col + (fTable[bIndex * (cardinality + 1) + j])] != '1'){
found = 0;
}
}
//if found equals 1 then the sum variable is incremented by 1
if(found == 1){
sum++;
}
}
//once any given thread exits the thread the thread loop it stores its sum value to its corresponding cache index
cache[cacheIndex] = sum;
//the threads are synced before the overall sum is calculated to ensure all threads have finished counting;
__syncthreads();
//the cache is then reduced to obtain the total sum for any given item set every iteration adds two cache location
//together until the sum is stored at cache[0]
int k = THREADS_PER_BLOCK/2;
while(k != 0){
if(cacheIndex < k){
cache[cacheIndex] += cache[cacheIndex + k];
}
__syncthreads();
k /= 2;
}
//takes the overall of the item set for the core index that is monitoring this specific item set and enters it into the
//corresponding count column within the frequency table
if(cacheIndex == 0){
fTable[bIndex * (cardinality + 1) + cardinality] = cache[0];
}
__syncthreads();
//the core index value is incremented by the number of cores being used
bIndex += gridDim.x;
}
}
//factorial function
long int factorial(int x){
int count = x;
while (count > 1){
x = x * (count - 1);
count--;
}
if(x == 0){
x = 1;
}
return x;
}
//combinatorics function
long int nCr(int n, int r){
int y;
int z;
int w = n - 1;
int init = n;
int x;
if(r > (n-r)){
y = r;
}
else{
y = (n-r);
}
z = n - y;
while(z > 1){
n = n * w;
w--;
z--;
}
if( r > (init - r)){
x = n/factorial(init - r);
}
else{
x = n/factorial(r);
}
return x;
}
int main() {
/************************************************************************************
* Variable Declarations *
************************************************************************************/
FILE *fPointer;
int max = 0;
int size = 0; //Contains the number of lines in the given database
int cardinality = 1; //Contains the initial cardinality of the item sets
int temp;
int i = 0;
int j, k, num, count;
int mSupport = 8000; //Contains the support count; set to approx 10% of all transactions
char val;
int numBlocks = 0;
//While loop that traverses through the database and returns the number of transactions
fPointer = fopen("retail.dat", "r");
fscanf(fPointer, "%c", &val);
while(!feof(fPointer)){
if(val == '\n'){
size++;
}
fscanf(fPointer, "%c", &val);
}
fclose(fPointer);
fPointer = fopen("retail.dat", "r");
fscanf(fPointer, "%d", &temp);
//Traverses through each transaction in order to find the max value.
while(!feof(fPointer)){
fscanf(fPointer, "%d", &temp);
if(max < temp){
max = temp;
}
}
fclose(fPointer);
printf("DATA FILE PARSED\n");
printf("============================================\n");
printf("Total number of transactions found: %d\n", size);
printf("Maximum number of unique items found: %d\n", max+1);
printf("============================================\n");
printf("APRIORI IMPLEMENTATION BEGINS\n");
starttime();
//Creation of table
char *cTable = (char*)malloc(sizeof(char) * (max + 1) * size); //Allocates an array of characters for each transaction
for(i=0; i < (max+1)*size; i++) {
// memset(cTable[i], '\0', sizeof(char) * (max + 1) * size); //Initialize all values to 0.
cTable[i] = '\0';
}
char line[400];
char *cNum;
fPointer = fopen("retail.dat", "r");
for(i = 0; i < size; i++){
fgets(line, 400, fPointer);
cNum = strtok(line, " \n");
while(cNum != NULL){
num = atoi(cNum);
cTable[i * (max + 1) + num] = '1';
cNum = strtok(NULL, " \n");
}
}
//Creating copy of transaction table in the video card memmory
char* gpuT;
cudaMalloc(&gpuT, size * (max + 1) * sizeof(char));
cudaMemcpy(gpuT, cTable, (size * (max + 1) * sizeof(char)), cudaMemcpyHostToDevice);
//Creates a frequency table of item sets with a Cardinality of 1; where the array index represents the item
//number. All the items have their counts initially set to zero
int * fTable = (int *)malloc((max + 1) * (cardinality + 1) * sizeof(int));
for(i = 0; i < max + 1; i++){
fTable[i * (cardinality + 1)] = i;
fTable[(i * (cardinality + 1)) + cardinality] = 0;
}
int* gpuF;
cudaMalloc(&gpuF, (max + 1) * (cardinality + 1) * sizeof(int));
cudaMemcpy(gpuF, fTable, (max + 1) * (cardinality + 1) * sizeof(int), cudaMemcpyHostToDevice);
//setting the number of cores to be used by the gpu
numBlocks = (max + 1);
if(numBlocks > MAX_NUMBER_BLOCKS){
numBlocks = MAX_NUMBER_BLOCKS;
}
counting<<< numBlocks, THREADS_PER_BLOCK>>>(gpuF, gpuT, size, (max + 1), (max + 1), cardinality);
//setting the number of cores to be used by the gpu
numBlocks = (max + 1) * (cardinality + 1)/ THREADS_PER_BLOCK + 1;
if(numBlocks > MAX_NUMBER_BLOCKS){
numBlocks = MAX_NUMBER_BLOCKS;
}
validSets<<< numBlocks, THREADS_PER_BLOCK>>>(gpuF, cardinality, max + 1, mSupport);
cudaMemcpy(fTable, gpuF, ((max + 1) * (cardinality + 1) * sizeof(int)), cudaMemcpyDeviceToHost);
cudaFree(gpuF);
//invalidating elements that are below the support count and counting the remaining eligible elements
count = 0;
for(i = 0; i < (max + 1); i++){
if (fTable[i * (cardinality + 1) + cardinality] != 0){
count++;
}
}
//creating new table consisting of only valid items
int iTable[count];
j = 0;
for(i = 0; i < (max + 1); i++){
if (fTable[i * (cardinality + 1) + cardinality] != 0){
iTable[j] = fTable[i * (cardinality + 1)];
j++;
}
}
//creating a tabel to hold the current valid items item and their the a variable for the count of the count
int * vTable = iTable;
int lastCount = count;
while(count > 1){
cardinality++;
//temporary array that will hold the new item sets
int temp[nCr(count, cardinality) * (cardinality + 1)];
//array of previous items sets
int oldSets[nCr(lastCount, cardinality - 1) * cardinality];
//array that hold one old item set for insertion into table
int oldEntry[cardinality - 1];
//function populates old item set
k = 0;
if(cardinality - 1 <= lastCount){
for(i = 0; (oldEntry[i] = i) < cardinality - 2; i++);
for(i = 0; i < cardinality - 1; i++){
oldSets[(k * cardinality) + i] = vTable[oldEntry[i]];
}
k++;
for(;;){
for( i = cardinality - 2; i >= 0 && oldEntry[i] == (lastCount - (cardinality - 1) + i); i--);
if(i < 0){
break;
}
else{
oldEntry[i]++;
for(++i; i < cardinality - 1; i++){
oldEntry[i] = oldEntry[i - 1] + 1;
}
for(j = 0; j < cardinality - 1; j++){
oldSets[(k * cardinality) + j] = vTable[oldEntry[j]];
}
k++;
}
}
}
for(i = 0; i < nCr(lastCount, cardinality - 1); i++){
oldSets[(i * cardinality) + cardinality - 1] = 0;
}
//array that will hold the information for a single item set before it is added to the
//array of all item sets
int entry[cardinality];
//function populates new item set
k = 0;
if(cardinality <= count){
for(i = 0; (entry[i] = i) < cardinality - 1; i++);
for(i = 0; i < cardinality; i++){
temp[(k*(cardinality + 1)) + i] = vTable[entry[i]];
}
k++;
for(;;){
for( i = cardinality - 1; i >= 0 && entry[i] == (count - cardinality + i); i--);
if(i < 0){
break;
}
else{
entry[i]++;
for(++i; i < cardinality; i++){
entry[i] = entry[i - 1] + 1;
}
for(j = 0; j < cardinality; j++){
temp[(k*(cardinality + 1)) + j] = vTable[entry[j]];
}
k++;
}
}
}
for(i = 0; i < nCr(count, cardinality); i++){
temp[(i*(cardinality + 1)) + cardinality ] = 0;
}
//counting the amount of instances of the item sets amongst the transactions
int * gpuSet;
cudaMalloc(&gpuSet, sizeof(int) * (cardinality + 1) * nCr(count, cardinality));
cudaMemcpy(gpuSet, temp, sizeof(int) * (cardinality + 1) * nCr(count, cardinality), cudaMemcpyHostToDevice);
numBlocks = nCr(count, cardinality);
if(numBlocks > MAX_NUMBER_BLOCKS){
numBlocks = MAX_NUMBER_BLOCKS;
}
counting<<< numBlocks, THREADS_PER_BLOCK>>>(gpuSet, gpuT, size, max + 1, nCr(count, cardinality), cardinality);
cudaMemcpy(temp, gpuSet, sizeof(int) * (cardinality + 1) * nCr(count, cardinality), cudaMemcpyDeviceToHost);
cudaFree(gpuSet);
//counting the amount of instances of the item sets amongst the transactions
cudaMalloc(&gpuSet, sizeof(int) * cardinality * nCr(lastCount, cardinality - 1));
cudaMemcpy(gpuSet, oldSets, sizeof(int) * cardinality * nCr(lastCount, cardinality - 1), cudaMemcpyHostToDevice);
numBlocks = nCr(lastCount, cardinality - 1);
if(numBlocks > MAX_NUMBER_BLOCKS){
numBlocks = MAX_NUMBER_BLOCKS;
}
counting<<< numBlocks, THREADS_PER_BLOCK>>>(gpuSet, gpuT, size, max + 1, nCr(lastCount, cardinality - 1), cardinality - 1);
cudaMemcpy(oldSets, gpuSet, sizeof(int) * cardinality * nCr(lastCount, cardinality - 1), cudaMemcpyDeviceToHost);
cudaFree(gpuSet);
//invalidating elements that are below the support count and counting the remaining eligible elements
int tCount = count;
lastCount = tCount;
count = 0;
for(i = 0; i < nCr(tCount, cardinality); i++){
if (temp[(i*(cardinality + 1)) + cardinality] < mSupport){
temp[(i * (cardinality + 1)) + cardinality] = 0;
}
else{
count++;
}
}
//set Table of valid items
char valid[max + 1];
for(i = 0; i <= max; i++){
valid[i] = '\0';
}
for(i = 0; i < nCr(tCount, cardinality); i++){
for(j = 0; j < cardinality; j++){
if(temp[(i * (cardinality + 1)) + cardinality] > 0){
valid[temp[(i * (cardinality + 1)) + j]] = '1';
}
}
}
//creating new table consisting of only valid items
int rTable[count];
count = 0;
j = 0;
for(i = 0; i <= max; i++){
if (valid[i] == '1'){
rTable[j] = i;
j++;
count++;
}
}
vTable = rTable;
if(count == 0){
printf("\n=============== MOST FREQUENT SUBSETS ================\n");
for(i = 0; i < nCr(lastCount, cardinality - 1); i++){
if(oldSets[(i * cardinality) + (cardinality-1)] > mSupport){
printf("Set: {");
}
for(j = 0; j < cardinality; j++){
if(oldSets[(i * cardinality) + (cardinality-1)] > mSupport){
if(j == cardinality - 1){
printf("}\t\tCount: %d\n", oldSets[(i * cardinality) + j]);
}
else{
printf("'%d'", oldSets[(i * cardinality) + j]);
}
}
}
}
printf("\n");
}
}
endtime("Total Parallelized Implementation Time" );
}
|
8,857 | #include "includes.h"
__global__ void subgradweight(float *input, float *gradOutput, float *gradWeight, float *gradBias, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW, float scale)
{
// iterators
int xx, yy;
// output size
int output_w = (input_w - kW) / dW + 1;
int output_h = (input_h - kH) / dH + 1;
// compute offsets based on thread/block ID
int o = blockIdx.x;
int i = o;
int k = blockIdx.x % input_n;
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y;
// select input/output plane
gradOutput = gradOutput + o*output_w*output_h;
input = input + i*input_w*input_h;
// thread ID
int tid = blockDim.x*threadIdx.y + threadIdx.x;
// create array to hold partial sums
__shared__ float sums[CUDA_MAX_THREADS];
sums[tid] = 0;
// compute partial sums
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
float *ptr_input = input + yy*dH*input_w + xx*dW;
float *ptr_gradOutput = gradOutput + yy*output_w + xx;
float z = *ptr_gradOutput;
long kx, ky;
for(ky = 0; ky < kH; ky++) {
for(kx = 0; kx < kW; kx++) {
sums[tid] += z * ptr_input[kx];
}
ptr_input += input_w;
}
}
}
__syncthreads();
// reduce: accumulate all partial sums to produce final gradWeight
if ((threadIdx.x == 0) && (threadIdx.y == 0)) {
for(int i = 0; i < blockDim.x*blockDim.y; i++) gradWeight[k] += scale*sums[i];
}
__syncthreads();
// compute gradBias
sums[tid] = 0;
for (int i=tid; i<output_w*output_h; i+=(blockDim.x*blockDim.y)) {
sums[tid] += gradOutput[i];
}
__syncthreads();
// reduce gradBias
if ((threadIdx.x == 0) && (threadIdx.y == 0)) {
for (int i=0; i<(blockDim.x*blockDim.y); i++)
gradBias[k] += scale*sums[i];
}
} |
8,858 | #include "includes.h"
__global__ void Iterate_Kernel(int size, int *originIn, int *originOut, int *bestSeenIn, int *bestSeenOut, int *adjIndexes, int *adjacency)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
int bestSeen = bestSeenIn[idx];
int origin = originIn[idx];
if (bestSeen < 1000001)
{
int start = adjIndexes[idx];
int end = adjIndexes[idx + 1];
// Look at all the neighbors and take best values:
for (int i = start; i < end; i++)
{
int neighbor = adjacency[i];
int challenger = bestSeenIn[neighbor];
int challengerOrigin = originIn[neighbor];
if (challenger > 0 && challenger == bestSeen && challengerOrigin > origin)
{
origin = challengerOrigin;
}
if (challenger > bestSeen)
{
bestSeen = challenger;
origin = challengerOrigin;
}
}
}
// Write out the best values found
bestSeenOut[idx] = bestSeen;
originOut[idx] = origin;
}
} |
8,859 | // Reference Scan implementation - Author: Ananoymous student of ME759 Fall 2017
#include<iostream>
#include<stdio.h>
#include<stdlib.h>
#include <cuda.h>
#include <math.h>
int checkResults(float*res, float* cudaRes,int length)
{
int nDiffs=0;
const float smallVal = .3f; // Keeping this extra high as we have repetitive addition and sequence matters
for(int i=0; i<length; i++)
if(fabs(cudaRes[i]-res[i])>smallVal)
{nDiffs++;
//printf("%f %f\n",cudaRes[i],res[i]);
}
return nDiffs;
}
void initializeArray(FILE* fp,float* arr, int nElements)
{
for( int i=0; i<nElements; i++){
int r=fscanf(fp,"%f",&arr[i]);
if(r == EOF){
rewind(fp);
}
arr[i]-=5; // This is to make the data zero mean. Otherwise we reach large numbers and lose precision
}
}
void inclusiveScan_SEQ(float *in, float *out,int length) {
float sum=0.f;
for (int i =0; i < length; i++) {
sum+=in[i];
out[i]=sum;
}
}
__global__ void scan(float *g_odata, float *g_idata,int n){
extern volatile __shared__ float temp[];
int thid = threadIdx.x;
int pout = 0,pin = 1;
if(thid<n)
temp[thid] = g_idata[thid];
else
temp[thid] = 0.0f;
__syncthreads();
for(int offset = 1;offset<n;offset <<=1)
{
pout = 1- pout;
pin = 1 - pout;
if(thid >= offset)
temp[pout*n+thid] = temp[pin*n+thid]+temp[pin*n+thid-offset];
else
temp[pout*n+thid] = temp[pin*n+thid];
__syncthreads();
}
if(thid<n)
g_odata[thid] = temp[pout*n+thid];
}
__global__ void scanlarge(float *g_odata, float *g_idata,float *aux_data,int n,int arraysize){
extern volatile __shared__ float temp[];
int thid = threadIdx.x;
int start = blockIdx.x*1024;
int aux_in = blockIdx.x;
int pout = 0,pin = 1;
if(thid+start<arraysize)
temp[thid] = g_idata[thid+start];
else
temp[thid] = 0.00;
__syncthreads();
for(int offset = 1;offset<n;offset <<=1)
{
pout = 1- pout;
pin = 1 - pout;
if(thid >= offset)
temp[pout*n+thid] = temp[pin*n+thid]+temp[pin*n+thid-offset];
else
temp[pout*n+thid] = temp[pin*n+thid];
__syncthreads();
}
if(thid+start<arraysize){
g_odata[thid+start] = temp[pout*n+thid];}
aux_data[aux_in] = temp[1023];
}
__global__ void addscan(float *g_odata,float *g_idata,float *aux_data,int arraysize){
extern volatile __shared__ float temp[];
int thid = threadIdx.x;
int start = (blockIdx.x+1)*1024;
int aux_in = blockIdx.x;
if(thid+start<arraysize)
temp[thid] = g_idata[thid+start];
else
temp[thid] = 0.00;
__syncthreads();
temp[thid]+=aux_data[aux_in];
__syncthreads();
if(thid+start<arraysize)
g_odata[thid+start]=temp[thid];
}
int main(int argc, char* argv[]) {
if(argc!=2){
printf("Usage %s N\n",argv[0]);
return 1;
}
int N=atoi(argv[1]);
FILE *fp = fopen("problem1.inp","r");
int size = N * sizeof(float);
//allocate resources
int threadsperblock,blocksPerGrid;
float *in = (float *)malloc(size);
float *out = (float *)malloc(size);
float *cuda_out= (float *)malloc(size);
float time = 0.f;
initializeArray(fp,in, N);
// Your code here
float *dout,*din;
cudaMalloc((void**)&dout,size);
cudaMalloc((void**)&din,size);
cudaEvent_t startEvent_inc, stopEvent_inc;
cudaEventCreate(&startEvent_inc);
cudaEventCreate(&stopEvent_inc);
cudaEventRecord(startEvent_inc,0); // starting timing for inclusive
cudaMemcpy(din,in,size,cudaMemcpyHostToDevice);
cudaMemset(dout,0,size);
if(N>1024)
{
threadsperblock = 1024;
blocksPerGrid = (N+threadsperblock-1)/threadsperblock;
float *aux;
float *auxscan;
int num = 1024;
cudaMalloc((void**)&aux,sizeof(float)*blocksPerGrid);
cudaMemset(aux,0,sizeof(float)*blocksPerGrid);
cudaMalloc((void**)&auxscan,sizeof(float)*blocksPerGrid);
cudaMemset(auxscan,0,sizeof(float)*blocksPerGrid);
if(blocksPerGrid<=1024)
{
scanlarge<<<blocksPerGrid,threadsperblock,2048*sizeof(float)>>>(dout,din,aux,num,N);
cudaDeviceSynchronize();
// Scanning the auxilliary array
scan<<<1,blocksPerGrid,2*blocksPerGrid*sizeof(float)>>>(auxscan,aux,blocksPerGrid);
cudaDeviceSynchronize();
// Adding the scanned block to get final result
addscan<<<blocksPerGrid,threadsperblock,2048*sizeof(float)>>>(dout,dout,auxscan,N);
cudaDeviceSynchronize();
cudaMemcpy(cuda_out,dout,size,cudaMemcpyDeviceToHost);
}
else
{
float *auxblock;
float *auxscanblock;
float *auxout;
volatile int blocksperGridaux = (blocksPerGrid+1023)/1024;
//int blocksperGridaux = 2;
cudaMalloc((void**)&auxblock,sizeof(float)*blocksperGridaux);
cudaMemset(auxblock,0,sizeof(float)*blocksperGridaux);
cudaMalloc((void**)&auxscanblock,sizeof(float)*blocksperGridaux);
cudaMemset(auxscanblock,0,sizeof(float)*blocksperGridaux);
cudaMalloc((void**)&auxout,sizeof(float)*blocksPerGrid);
cudaMemset(auxout,0,sizeof(float)*blocksPerGrid);
scanlarge<<<blocksPerGrid,threadsperblock,2048*sizeof(float)>>>(dout,din,aux,num,N); // We get the block sums here
cudaDeviceSynchronize();
scanlarge<<<blocksperGridaux,threadsperblock,2048*sizeof(float)>>>(auxout,aux,auxblock,num,blocksPerGrid); // Block sum array size is greater than 1024. So repeat the whole > 1024 process
cudaDeviceSynchronize();
scan<<<1,blocksperGridaux,2*blocksperGridaux*sizeof(float)>>>(auxscanblock,auxblock,blocksperGridaux); // Aux sum of sux array
cudaDeviceSynchronize();
addscan<<<blocksperGridaux-1,threadsperblock,2048*sizeof(float)>>>(auxout,auxout,auxscanblock,blocksPerGrid); // Fully scanned auxilliary array
cudaDeviceSynchronize();
addscan<<<blocksPerGrid,threadsperblock,2048*sizeof(float)>>>(dout,dout,auxout,N);
cudaDeviceSynchronize();
cudaMemcpy(cuda_out,dout,size,cudaMemcpyDeviceToHost);
}
}
else{
threadsperblock = N;
blocksPerGrid = 1;
scan<<<blocksPerGrid,threadsperblock,2*size>>>(dout,din,N);
cudaMemcpy(cuda_out,dout,size,cudaMemcpyDeviceToHost);
}
cudaEventRecord(stopEvent_inc,0); //ending timing for inclusive
cudaEventSynchronize(stopEvent_inc);
cudaEventElapsedTime(&time, startEvent_inc, stopEvent_inc);
inclusiveScan_SEQ(in, out,N);
int nDiffs = checkResults(out, cuda_out,N);
if(nDiffs)printf("Test Failed\n"); // This should never print
printf("%d\n%f\n%f\n",N,cuda_out[N-1],time);
//printf("%d\n",nDiffs);
//printf("%f\n",out[N-1]);
/* printf("%f, ", time); */
//free resources
free(in); free(out); free(cuda_out);
cudaFree(din);cudaFree(dout);
//cudaFree(dsize);
return 0;
}
|
8,860 | #include <stdio.h>
#include <chrono>
#include <iostream>
#include <curand_kernel.h>
#include <curand.h>
__device__ bool checkCircle(float x, float y)
{
if(sqrt((x*x) + (y*y)) <= 1.0)
{
return true;
}
return false;
}
__global__ void piKernel(int *d_res, int iterations, int totalIterations, curandState *states)
{
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id >= totalIterations)
return;
int localCount = 0;
const int seed = id;
curand_init(seed, id, 0, &states[id]);
for(size_t it=0; it<iterations; ++it)
{
float x = curand_uniform(&states[id]);
float y = curand_uniform(&states[id]);
if(checkCircle(x,y))
{
++localCount;
}
}
atomicAdd(d_res, localCount);
}
int main()
{
int blockSize, iterationPerCThread, totalIterations;
std::cin >>blockSize >>iterationPerCThread >> totalIterations;
auto start_gpu = std::chrono::high_resolution_clock::now();
int counter = 0;
int *dCounter=0;
cudaMalloc(&dCounter, sizeof(int));
cudaMemset(dCounter, 0, sizeof(int));
curandState *dev_random;
int numThread = totalIterations / iterationPerCThread;
int numBlock = (numThread+blockSize-1)/blockSize;
cudaMalloc((void**)&dev_random, numBlock*blockSize*sizeof(curandState));
piKernel<<<numBlock, blockSize>>>(dCounter, iterationPerCThread,
totalIterations, dev_random);
cudaDeviceSynchronize();
cudaMemcpy(&counter, dCounter, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dCounter);
double pi = ((double)counter / (double)totalIterations) * 4.0;
float pi_f = ((float)counter / (float)totalIterations) * 4.0;
auto end_gpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> time_gpu = end_gpu-start_gpu;
std::cout<<"GPU simulation time:"<<time_gpu.count()<<std::endl;
std::cout<<"Pi result is:"<<pi<<std::endl;
std::cout<<"float Pi result is:"<<pi_f<<std::endl;
std::cout<<"difference between float and double Pi is:"<<pi_f-pi<<std::endl;
return 0;
} |
8,861 | #include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#include <cuda_profiler_api.h>
int main()
{
cudaError_t error;
cudaDeviceProp dev;
int dev_cnt = 0;
cudaProfilerStart();
// return device numbers with compute capability >= 1.0
error = cudaGetDeviceCount (&dev_cnt);
if(error != cudaSuccess)
{
printf("Error: %s\n", cudaGetErrorString(error));
exit(-1);
}
printf("Number of devices: %d\n",dev_cnt);
// Get properties of each device
for(int i = 0; i < dev_cnt; i++)
{
error = cudaGetDeviceProperties(&dev, i);
if(error != cudaSuccess)
{
printf("Error: %s\n", cudaGetErrorString(error));
exit(-1);
}
printf("\nDevice %d:\n", i);
printf("name: %s\n",dev.name);
printf("Compute capability %d.%d\n",dev.major, dev.minor);
printf("total global memory(KB): %ld\n", dev.totalGlobalMem/1024);
printf("shared mem per block: %d\n",dev.sharedMemPerBlock);
printf("regs per block: %d\n", dev.regsPerBlock);
printf("warp size: %d\n", dev.warpSize);
printf("max threads per block: %d\n",dev.maxThreadsPerBlock);
printf("max thread dim z:%d y:%d x:%d\n", dev.maxThreadsDim[0], dev.maxThreadsDim[1], dev.maxThreadsDim[2]);
printf("max grid size z:%d y:%d x:%d\n", dev.maxGridSize[0],dev.maxGridSize[1], dev.maxGridSize[2]);
printf("clock rate(KHz):\n",dev.clockRate);
printf("total constant memory (bytes): %ld\n",dev.totalConstMem);
printf("multiprocessor count %d\n",dev.multiProcessorCount);
printf("memory bus width: %d\n",dev.memoryBusWidth);
printf("memory clock rate (KHz): %d\n",dev.memoryClockRate);
printf("L2 cache size (bytes): %d\n", dev.l2CacheSize);
printf("max threads per SM: %d\n", dev.maxThreadsPerMultiProcessor);
}
cudaProfilerStop();
return 0;
}
|
8,862 | /* GPU vector add test program for CUDA */
/* written by Viktor K. Decyk, UCLA */
#include <stdlib.h>
#include <stdio.h>
#include "cuda.h"
static int nblock_size = 64;
static int ngrid_size = 1;
int maxgsx = 65535;
int mmcc = 0;
static int devid;
static cudaError_t crc;
/*--------------------------------------------------------------------*/
__global__ void gadd(float a[], float b[], float c[], int nx) {
int j;
j = threadIdx.x+blockDim.x*blockIdx.x;
if (j < nx)
a[j] = b[j] + c[j];
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpadd(float *a, float *b, float *c, int nx) {
/* Vector Add Interface for C */
dim3 dimBlock(nblock_size);
dim3 dimGrid((nx - 1)/nblock_size + 1);
gadd<<<dimGrid,dimBlock>>>(a,b,c,nx);
cudaThreadSynchronize();
return;
}
__global__ void emptyKernel() {}
/*--------------------------------------------------------------------*/
extern "C" void emptykernel() {
int ngx, ngy;
ngx = nblock_size < 32768 ? nblock_size : 32768;
ngy = (ngrid_size - 1)/ngx + 1;
dim3 dimBlock(nblock_size,1);
dim3 dimGrid(ngx,ngy);
crc = cudaGetLastError();
emptyKernel<<<dimGrid,dimBlock>>>();
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("emptyKernel error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void init_cu(int dev, int *irc) {
/* initialize CUDA with device dev or selects best GPU available */
/* searches throughs devices, selects the device with the most compute */
/* units, and saves the device id devid */
/* if dev is a valid device, it is used, otherwise the GPU with the */
/* most multi-processors is selected */
/* error code is modified only if there is an error */
int maxcpus = 0, jm = -1;
int j, ndevs, maxunits;
unsigned long msize;
double z;
struct cudaDeviceProp prop;
/* returns number of device */
crc = cudaGetDeviceCount(&ndevs);
if (crc) {
printf("cudaGetDeviceCount Error=%i:%s\n",crc,
cudaGetErrorString(crc));
*irc = 1;
return;
}
/* get information about devices */
for (j = 0; j < ndevs; j++) {
crc = cudaGetDeviceProperties(&prop,j);
if (crc) {
printf("cudaGetDeviceProperties Error=%i:%s\n",crc,
cudaGetErrorString(crc));
prop.name[0] = 0;
}
maxunits = prop.multiProcessorCount;
if (dev <= 0) {
printf("j=%i:CUDA_DEVICE_NAME=%s,CUDA_MULTIPROCESSOR_COUNT=%i\n",
j,prop.name,maxunits);
msize = prop.totalGlobalMem;
z = ((double) msize)/1073741824.0;
mmcc = 10*prop.major + prop.minor;
printf(" CUDA_GLOBAL_MEM_SIZE=%lu(%f GB),Capability=%d\n",
msize,(float) z,mmcc);
if (maxunits > maxcpus) {
maxcpus = maxunits;
jm = j;
}
}
}
devid = jm;
if (dev >= 0)
devid = dev % ndevs;
printf("using device j=%i\n",devid);
/* get properties for this device */
crc = cudaGetDeviceProperties(&prop,devid);
maxgsx = prop.maxGridSize[0];
mmcc = 10*prop.major + prop.minor;
/* set device */
crc = cudaSetDevice(devid);
if (crc) {
printf("cudaSetDevice Error=%i:%s\n",crc,
cudaGetErrorString(crc));
*irc = 1;
return;
}
/* run empty kernel */
emptykernel();
return;
}
/*--------------------------------------------------------------------*/
extern "C" void end_cu() {
crc = cudaThreadExit();
if (crc) {
printf("cudaThreadExit Error=%d:%s\n",crc,cudaGetErrorString(crc));
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void setgbsize(int nblock) {
nblock_size = nblock;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fallocate(float **g_f, int nsize, int *irc) {
/* allocate global float memory on GPU, return pointer to C */
void *gptr;
crc = cudaMalloc(&gptr,sizeof(float)*nsize);
if (crc) {
printf("cudaMalloc float Error=%d:%s,l=%d\n",crc,
cudaGetErrorString(crc),nsize);
*irc = 1;
}
*g_f = (float *)gptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_deallocate(float **g_f, int *irc) {
/* deallocate global memory on GPU, return pointer to C */
crc = cudaFree((void *)*g_f);
if (crc) {
printf("cudaFree Error=%d:%s\n",crc,cudaGetErrorString(crc));
*irc = 1;
}
*g_f = NULL;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fcopyin(float *f, float *g_f, int nsize) {
/* copy float array from host memory to global GPU memory */
crc = cudaMemcpy((void *)g_f,f,sizeof(float)*nsize,
cudaMemcpyHostToDevice);
if (crc) {
printf("cudaMemcpyHostToDevice float Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fcopyout(float *f, float *g_f, int nsize) {
/* copy float array from global GPU memory to host memory */
crc = cudaMemcpy(f,(void *)g_f,sizeof(float)*nsize,
cudaMemcpyDeviceToHost);
if (crc) {
printf("cudaMemcpyDeviceToHost float Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/* Interfaces to Fortran */
/*--------------------------------------------------------------------*/
extern "C" void gpadd_(unsigned long *gp_a, unsigned long *gp_b,
unsigned long *gp_c, int *nx) {
/* Vector Add Interface for Fortran */
float *a, *b, *c;
a = (float *)*gp_a;
b = (float *)*gp_b;
c = (float *)*gp_c;
gpadd(a,b,c,*nx);
}
/*--------------------------------------------------------------------*/
extern "C" void init_cu_(int *dev, int *irc) {
init_cu(*dev,irc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void end_cu_() {
end_cu();
return;
}
/*--------------------------------------------------------------------*/
extern "C" void setgbsize_(int *nblock) {
setgbsize(*nblock);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fallocate_(unsigned long *gp_f, int *nsize,
int *irc) {
/* allocate global float memory on GPU, return pointer to Fortran */
float *fptr;
gpu_fallocate(&fptr,*nsize,irc);
*gp_f = (long )fptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_deallocate_(unsigned long *gp_f, int *irc) {
/* deallocate global memory on GPU, return pointer to Fortran */
float *f;
f = (float *)*gp_f;
gpu_deallocate(&f,irc);
*gp_f = 0;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fcopyin_(float *f, unsigned long *gp_f,
int *nsize) {
/* copy float array from main memory to global GPU memory */
float *g_f;
g_f = (float *)*gp_f;
gpu_fcopyin(f,g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fcopyout_(float *f, unsigned long *gp_f,
int *nsize) {
/* copy float array from global GPU memory to main memory */
float *g_f;
g_f = (float *)*gp_f;
gpu_fcopyout(f,g_f,*nsize);
return;
}
|
8,863 | /*
* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2018, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (email: xiaotong@mail.neu.edu.cn) 2018-07-18
* I'm surprised that I did not write this file till today.
*/
#include <curand.h>
#include <time.h>
#include "SetData.cuh"
#include <curand_kernel.h>
#include "../../XDevice.h"
#include "../../XUtility.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/*
set a data array with a fixed value
>> d - pointer to the data array
>> v - the initial value
>> size - size of the array
*/
template<class T>
__global__
void KernelSetDataFixed(T * d, T v, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size)
d[i] = v;
}
template __global__ void KernelSetDataFixed<int>(int *, int, int);
template __global__ void KernelSetDataFixed<float>(float *, float, int);
template __global__ void KernelSetDataFixed<double>(double *, double, int);
//template __global__ void KernelSetDataFixed<__half>(__half*, __half, int);
/*
generate data items with a fixed value
>> tensor - the tensor for initialization
>> value - the initial value
*/
template<class T>
void _CudaSetDataFixed(XTensor * tensor, T value)
{
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(tensor->devID, tensor->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(tensor->devID, devIDBackup);
if (tensor->dataType == X_INT)
KernelSetDataFixed << <blocks, threads >> > ((int*)tensor->data, (int)value, tensor->unitNum);
else if (tensor->dataType == X_FLOAT)
KernelSetDataFixed << <blocks, threads >> > ((float*)tensor->data, (float)value, tensor->unitNum);
else if (tensor->dataType == X_DOUBLE)
KernelSetDataFixed << <blocks, threads >> > ((double*)tensor->data, (double)value, tensor->unitNum);
//else if (tensor->dataType == X_FLOAT16)
// KernelSetDataFixed << <blocks, threads >> > ((__half*)tensor->data, (__half)value, tensor->unitNum);
else
ShowNTErrors("TODO! Unsupported datatype!")
BacktoCudaDev(tensor->devID, devIDBackup);
}
template void _CudaSetDataFixed<int>(XTensor *, int);
template void _CudaSetDataFixed<float>(XTensor *, float);
template void _CudaSetDataFixed<double>(XTensor *, double);
/*
set a float data array with a fixed value p (in int) only
if the condition entry is non-zero
>> d - pointer to the data array
>> c - pointer to the condition array
>> size - size of the array
>> p - the initial value
*/
template<class T>
__global__
void KernelSetDataFixedCond(T * d, T * c, T value, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size && c[i] != 0)
d[i] = value;
}
template __global__ void KernelSetDataFixedCond<int>(int*, int*, int, int);
template __global__ void KernelSetDataFixedCond<float>(float*, float*, float, int);
template __global__ void KernelSetDataFixedCond<double>(double*, double*, double, int);
//template __global__ void KernelSetDataFixedCond<__half>(__half*, __half*, __half, int);
/*
generate data items with a fixed value p
only if the condition entry is non-zero
>> tensor - the tensor for initialization
>> condition - the condition tensor whose entry would be check to
set the corresponding entry in "tensor"
>> value - the initial value
*/
template<class T>
void _CudaSetDataFixedCond(XTensor* tensor, XTensor* condition, T value)
{
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(tensor->devID, tensor->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(tensor->devID, devIDBackup);
if (tensor->dataType == X_INT)
KernelSetDataFixedCond <<< blocks, threads >>> ((int*)tensor->data, (int*)condition->data,
(int)value, tensor->unitNum);
else if (tensor->dataType == X_FLOAT)
KernelSetDataFixedCond <<< blocks, threads >>> ((float*)tensor->data, (float*)condition->data,
(float)value, tensor->unitNum);
else if (tensor->dataType == X_DOUBLE)
KernelSetDataFixedCond <<< blocks, threads >>> ((double*)tensor->data, (double*)condition->data,
(double)value, tensor->unitNum);
//else if (tensor->dataType == X_FLOAT16)
// KernelSetDataFixedCond <<< blocks, threads >>> ((__half*)tensor->data, (__half*)condition->data,
// (__half)value, tensor->unitNum);
else
ShowNTErrors("TODO! Unsupported datatype!")
BacktoCudaDev(tensor->devID, devIDBackup);
}
template void _CudaSetDataFixedCond<int>(XTensor*, XTensor*, int);
template void _CudaSetDataFixedCond<float>(XTensor*, XTensor*, float);
template void _CudaSetDataFixedCond<double>(XTensor*, XTensor*, double);
/*
set data array with a uniform distribution in [low, high]
>> deviceStates - the state of curand
>> d - float datatype pointer to the data array
>> size - size of the array
>> lower - low value of the range
>> variance - the variance of the range
*/
__global__
void KernelSetDataRandFloat(float * d, int size, DTYPE lower, DTYPE variance)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
d[i] = d[i] * variance + lower;
}
}
/*
set data array with a uniform distribution in [low, high]
>> deviceStates - the state of curand
>> d - double datatype pointer to the data array
>> size - size of the array
>> lower - low value of the range
>> variance - the variance of the range
*/
__global__
void KernelSetDataRandDouble(double * d, int size, DTYPE lower, DTYPE variance)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
d[i] = d[i] * variance + lower;
}
}
/*
set data items to a pre-defined value if its value >= p, set it to 0 otherwise
>> d - pointer to the data array
>> size - size of the array
>> lower - low value of the range
>> variance - the variance of the range
*/
__global__
void KernelSetDataPCut(DTYPE * d, int size, DTYPE p, DTYPE value)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
if (d[i] >= p)
d[i] = value;
else
d[i] = 0;
}
}
/*
set data items along with a given dimension (and keep the remaining items unchanged) - kernel version
>> tensor - the tensor whose data array would be initialized
>> beg - the beginning position
>> len - length of the segment to be set
>> blockSize - size of a data block
>> blockNum - number of data blocks
*/
template<class T>
__global__
void KernelSetDataDim(T * d, int beg, int len, int blockSize, int blockNum, T p)
{
/* offset in each block */
int i = blockDim.x * blockIdx.x + threadIdx.x;
/* block id */
int j = blockDim.y * blockIdx.y + threadIdx.y;
if(i >= blockSize || j > blockNum)
return;
if(i < beg || i >= beg + len)
return;
d[blockSize * j + i] = p;
}
template __global__ void KernelSetDataDim<int>(int*, int, int, int, int, int);
template __global__ void KernelSetDataDim<float>(float*, int, int, int, int, float);
template __global__ void KernelSetDataDim<double>(double*, int, int, int, int, double);
/*
set data items along with a given dimension (and keep the remaining items unchanged) - cuda version
>> tensor - the tensor whose data array would be initialized
>> beg - the beginning position
>> len - length along with the given dimension
>> dim - the dimension along which we set the data
e.g., given a 3 * 3 tensor
1 2 3
4 5 6
7 8 9
when beg = 1, len = 1, dim = 0 and p = 0, we have
1 2 3
0 0 0
7 8 9
i.e., we set all entries of row 1 to 0
*/
template<class T>
void _CudaSetDataDim(XTensor * tensor, int beg, int len, int dim, T p)
{
int n = tensor->order;
CheckNTErrors(tensor->dataType == DEFAULT_DTYPE, "TODO!");
CheckNTErrors(dim < n && dim >= 0, "Illegal dimension!");
CheckNTErrors(beg >= 0 && beg < tensor->GetDim(dim), "Illegal beginning position!");
CheckNTErrors(beg + len >= 0 && beg + len < tensor->GetDim(dim), "Illegal length!");
int stride = 1;
int blockSize = 1;
int blockNum = 1;
for(int i = n - 1; i > dim; i--){
stride *= tensor->GetDim(i);
}
blockSize = stride * tensor->GetDim(dim);
blockNum = tensor->unitNum / blockSize;
int cudaGrids[3];
int cudaBlocks[3];
GDevs.GetCudaThread2D(tensor->devID, blockSize, blockNum, MAX_INT, cudaGrids, cudaBlocks);
dim3 blocks(cudaGrids[0], cudaGrids[1]);
dim3 threads(cudaBlocks[0], cudaBlocks[1]);
int devIDBackup;
ProtectCudaDev(tensor->devID, devIDBackup);
if (tensor->dataType == X_INT)
KernelSetDataDim << <blocks, threads >> > ((int*)tensor->data, beg * stride,
len * stride, blockSize, blockNum, (int)p);
else if (tensor->dataType == X_FLOAT)
KernelSetDataDim << <blocks, threads >> > ((float*)tensor->data, beg * stride,
len * stride, blockSize, blockNum, (float)p);
else if (tensor->dataType == X_DOUBLE)
KernelSetDataDim << <blocks, threads >> > ((double*)tensor->data, beg * stride,
len * stride, blockSize, blockNum, (double)p);
else
ShowNTErrors("TODO! Unsupported datatype!")
BacktoCudaDev(tensor->devID, devIDBackup);
}
template void _CudaSetDataDim<int>(XTensor*, int, int, int, int);
template void _CudaSetDataDim<float>(XTensor*, int, int, int, float);
template void _CudaSetDataDim<double>(XTensor*, int, int, int, double);
/*
modify data items along with a given index and dimension
(and keep the remaining items unchanged) - kernel version
>> s - the pointer whose data would be modified
>> m - the pointer whose data would be used to modify the data pointed by s
>> blockNum - number of data blocks
>> blockSize - size of a data block
>> stride - stride of a data block
*/
__global__
void KernelSetDataIndexed(DTYPE * s, DTYPE * m, int blockNum, int blockSize, int stride)
{
/* offset in each block */
int i = blockDim.x * blockIdx.x + threadIdx.x;
/* block id */
int j = blockDim.y * blockIdx.y + threadIdx.y;
if(i >= stride || j >= blockNum)
return;
int x = blockSize * j + i;
int y = stride * j + i;
s[x] = m[y];
}
/*
modify data items along with a given index and dimension (and keep the remaining items unchanged)
>> source - the tensor whose data array would be modified
>> modify - the tensor whose data array would be used to modify the source tensor
>> dim - the dimension along which we modify the tensor
>> index - index of the given dimension
e.g., given a source tensor (3, 3)
1 2 3
4 5 6
7 8 9
given a modified tensor (3)
1 2 3
when dim = 0, index = 1, we have
1 2 3
1 2 3
7 8 9
i.e., we set entries of row 1 to {1, 2, 3}
*/
void _CudaSetDataIndexed(XTensor * source, XTensor * modify, int dim, int index)
{
int order = source->order;
int size = source->GetDim(dim);
CheckNTErrors(source->dataType == DEFAULT_DTYPE, "TODO!");
CheckNTErrors(dim >= 0 && dim < order, "Illegal dimension!");
CheckNTErrors(index >= 0 && index < size, "Illegal index!");
int stride = 1;
int blockSize = 1;
int blockNum = 1;
for(int i = order - 1; i > dim; i--){
stride *= source->GetDim(i);
}
blockSize = stride * source->GetDim(dim);
blockNum = source->unitNum / blockSize;
int cudaGrids[3];
int cudaBlocks[3];
GDevs.GetCudaThread2D(source->devID, stride, blockNum, MAX_INT, cudaGrids, cudaBlocks);
dim3 blocks(cudaGrids[0], cudaGrids[1]);
dim3 threads(cudaBlocks[0], cudaBlocks[1]);
int devIDBackup;
ProtectCudaDev(source->devID, devIDBackup);
KernelSetDataIndexed<<<blocks, threads >>>((DTYPE*)source->data + index * stride, (DTYPE*)modify->data,
blockNum, blockSize, stride);
BacktoCudaDev(source->devID, devIDBackup);
}
/*
set lower triangular matrics for each block
>> d - pointer to the data array
>> l - row number (or column number) of each block, i.e,
a block is l * l matrix
>> blockSize - size of each block (blockSize = l * l)
>> blockNum - number of the blocks
>> p - the value for each entry of the lower triangular matrics
>> shift - the offset from diagonal
e.g., for a 3* 3 tensor,
when p = 1 ans shift = 0, we have
1 0 0
1 1 0
1 1 1
when p = 2 and shift = -1, we have
0 0 0
2 0 0
2 2 0
*/
__global__
void KernelSetDataLowTri(DTYPE * d, int l, int blockSize, int blockNum, DTYPE p, int shift)
{
/* offset in each block */
int i = blockDim.x * blockIdx.x + threadIdx.x;
/* block id */
int j = blockDim.y * blockIdx.y + threadIdx.y;
if(i >= blockSize || j > blockNum)
return;
int row = i / l;
int col = i % l;
DTYPE * d2 = d + blockSize * j + row * l + col;
if(col <= row + shift)
*d2 = p;
else
*d2 = 0;
}
/*
generate data as lower triangular matrics for last two dimensions (cuda version)
>> tensor - the tensor whose data to be set
>> value - the value for each entry of the lower triangular matrics
>> shift - the offset from diagonal
e.g., for a 3 * 3 tensor,
when value = 1 ans shift = 0, we have
1 0 0
1 1 0
1 1 1
when value = 2 and shift = -1, we have
0 0 0
2 0 0
2 2 0
*/
void _CudaSetDataLowTri(XTensor * tensor, DTYPE value, int shift)
{
int size = tensor->GetDim(-1);
int blockSize = size * size;
int blockNum = tensor->unitNum / blockSize;
int cudaGrids[3];
int cudaBlocks[3];
GDevs.GetCudaThread2D(tensor->devID, blockSize, blockNum, MAX_INT, cudaGrids, cudaBlocks);
dim3 blocks(cudaGrids[0], cudaGrids[1]);
dim3 threads(cudaBlocks[0], cudaBlocks[1]);
int devIDBackup;
ProtectCudaDev(tensor->devID, devIDBackup);
KernelSetDataLowTri<<<blocks, threads >>>((DTYPE*)tensor->data, size, blockSize, blockNum, value, shift);
BacktoCudaDev(tensor->devID, devIDBackup);
}
/*
generate data items with a uniform distribution in [lower, upper]
>> tensor - the tensor whose data array would be initialized
>> lower - lower value of the range
>> upper - upper value of the range
*/
void _CudaSetDataRand(const XTensor * tensor, DTYPE lower, DTYPE upper)
{
CheckNTErrors(upper > lower, "the high value must be greater than low value!");
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(tensor->devID, tensor->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(tensor->devID, devIDBackup);
curandGenerator_t & gen = GDevs.GPUs[tensor->devID].gen;
curandGenerateUniform(gen, (float*)tensor->data, tensor->unitNum);
DTYPE variance = upper - lower;
if(variance != 1.0F || lower != 0){
if (tensor->dataType == X_FLOAT)
KernelSetDataRandFloat <<<blocks, threads >>>
((float*) tensor->data, tensor->unitNum, lower, variance);
else if (tensor->dataType == X_DOUBLE)
KernelSetDataRandDouble <<<blocks, threads >>>
((double*)tensor->data, tensor->unitNum, lower, variance);
}
BacktoCudaDev(tensor->devID, devIDBackup);
}
/*
generate data items with a uniform distribution in [lower, upper] and set
the item to a pre-defined value if the item >= p, set the item to 0 otherwise
>> tensor - the tensor whose data array would be initialized
>> lower - lower value of the range
>> upper - upper value of the range
>> p - the threshold
>> value - the value we intend to assign to the item
*/
void _CudaSetDataRandP(const XTensor * tensor, DTYPE lower, DTYPE upper, DTYPE p, DTYPE value)
{
_CudaSetDataRand(tensor, lower, upper);
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(tensor->devID, tensor->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(tensor->devID, devIDBackup);
KernelSetDataPCut << <blocks, threads >> >((float*)tensor->data, tensor->unitNum, p, value);
BacktoCudaDev(tensor->devID, devIDBackup);
}
/*
set the data with an array of offsets (kernel version)
>> data - pointer to the data array
>> offsets - offset for each data item
>> value - value of the data items
>> num - number of the data items
*/
__global__
void KernelSetDataWithOffset(DTYPE * data, MTYPE * offsets, DTYPE value, MTYPE num)
{
/* index */
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num)
data[offsets[i]] = value;
}
/*
set the data with an array of offsets (cuda version)
>> tensor - the tensor that keeps the data
>> offsets - offset for each data item
>> value - value of the data items
>> num - number of the data items
*/
void _CudaSetDataWithOffset(XTensor * tensor, MTYPE * offsets, DTYPE value, MTYPE num)
{
CheckNTErrors(tensor->dataType == X_FLOAT, "Data type is incorrect!");
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(tensor->devID, (int)num, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(tensor->devID, devIDBackup);
KernelSetDataWithOffset << <blocks, threads >> > ((DTYPE*)tensor->data, offsets, value, num);
BacktoCudaDev(tensor->devID, devIDBackup);
}
/*
set the data with an array of offsets (kernel version)
>> data - pointer to the data array
>> offsets - offset for each data item
>> value - value of the data items
>> num - number of the data items
>> dataType - the data type of the data and values
*/
__global__
void KernelSetDataWithOffsetAndValue(void * data, MTYPE * offsets, void * values, MTYPE num, TENSOR_DATA_TYPE dataType)
{
/* index */
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num) {
if (dataType == X_INT)
*((int *)data + offsets[i]) = *((int *)values + i);
else if (dataType == X_FLOAT)
*((float *)data + offsets[i]) = *((float *)values + i);
}
}
/*
set the data with an array of values
>> tensor - the tensor that keeps the data
>> offsets - offset for each data item
>> value - value of the ech data item
>> num - number of the data items
*/
void _CudaSetDataWithOffsetAndValue(XTensor * tensor, MTYPE * offsets, void * values, MTYPE num)
{
XMem * mem = tensor->mem;
MTYPE offsetSize = num * sizeof(MTYPE);
MTYPE valueSize;
if (tensor->dataType == X_INT)
valueSize = num * sizeof(int);
else if (tensor->dataType == X_FLOAT)
valueSize = num * sizeof(float);
else
ShowNTErrors("TO DO!!!");
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(tensor->devID, (int)num, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(tensor->devID, devIDBackup);
/*MTYPE * offsetsCuda = mem != NULL ?
(MTYPE*)mem->AllocBuf(mem->devID, offsetSize) :
(MTYPE*)XMemAlloc(tensor->devID, offsetSize);
void * valuesCuda = mem != NULL ?
mem->AllocBuf(mem->devID, valueSize) :
XMemAlloc(tensor->devID, valueSize);*/
MTYPE * offsetsCuda;
void * valuesCuda;
if (mem != NULL) {
mem->LockBuf();
offsetsCuda = (MTYPE*)mem->AllocBuf(mem->devID, offsetSize);
valuesCuda = mem->AllocBuf(mem->devID, valueSize);
}
else {
offsetsCuda = (MTYPE*)XMemAlloc(tensor->devID, offsetSize);
valuesCuda = XMemAlloc(tensor->devID, valueSize);
}
if (mem != NULL) {
XMemCopy(offsetsCuda, mem->devID, offsets, -1, offsetSize);
XMemCopy(valuesCuda, mem->devID, values, -1, valueSize);
}
else {
XMemCopy(offsetsCuda, tensor->devID, offsets, -1, offsetSize);
XMemCopy(valuesCuda, tensor->devID, values, -1, valueSize);
}
KernelSetDataWithOffsetAndValue<<<blocks, threads >>> (tensor->data, offsetsCuda, valuesCuda, num, tensor->dataType);
if (mem != NULL) {
mem->ReleaseBuf(mem->devID, valueSize);
mem->ReleaseBuf(mem->devID, offsetSize);
mem->UnlockBuf();
}
else {
XMemFree(tensor->devID, valuesCuda);
XMemFree(tensor->devID, offsetsCuda);
}
BacktoCudaDev(tensor->devID, devIDBackup);
}
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor)
|
8,864 | #include <stdio.h>
#include <time.h>
#include <math.h>
#define N 100
#define NTPB 1024
#define TPB 64
#define RAD 1 // radius of the stencil
__global__ void forwardPCR(float *a_in, float *b_in, float *c_in, float*d_in, float *a_out, float *b_out, float *c_out, float*d_out, int k, int size){
const int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i-k < 0){
float k2 = c_in[i] / b_in[i+k];
a_out[i] = a_in[i];
b_out[i] = b_in[i] - a_in[i+k] * k2;
c_out[i] = - c_in[i+k] * k2;
d_out[i] = d_in[i] - d_in[i+k] * k2;
return;
}
if(i+k >= size){
float k1 = a_in[i] / b_in[i-k];
a_out[i] = -a_in[i-k] * k1;
b_out[i] = b_in[i] - c_in[i-k] * k1;
c_out[i] = c_in[i];
d_out[i] = d_in[i] - d_in[i-k] * k1;
return;
}
float k1 = a_in[i] / b_in[i-k];
float k2 = c_in[i] / b_in[i+k];
a_out[i] = -a_in[i-k] * k1;
b_out[i] = b_in[i] - c_in[i-k] * k1 - a_in[i+k] * k2;
c_out[i] = - c_in[i+k] * k2;
d_out[i] = d_in[i] - d_in[i-k] * k1 - d_in[i+k] * k2;
}
__global__ void solve2unknown(float *a, float *b, float *c, float*d, float *x, int k, int size){
const int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i > size/2)
return;
x[i+k] = ( d[i+k] - a[i+k]*d[i]/b[i] ) / ( b[i+k] - a[i+k]*c[i]/b[i] );
x[i] = (d[i] - c[i]*x[i+k]) / b[i];
}
int main(){
srand(time(NULL));
float *a_cpu, *b_cpu, *c_cpu, *d_cpu, *x_cpu;
float *a_gpu, *b_gpu, *c_gpu, *d_gpu, *x_gpu;
float *a_out, *b_out, *c_out, *d_out, *x_out;
// a 1ere sous diagonale | b main diag | c 1ere up diag
// d right hand vector | x solution
a_cpu = (float*) malloc(N*sizeof(float));
b_cpu = (float*) malloc(N*sizeof(float));
c_cpu = (float*) malloc(N*sizeof(float));
d_cpu = (float*) malloc(N*sizeof(float));
x_cpu = (float*) malloc(N*sizeof(float));
cudaMalloc(&a_gpu, N*sizeof(float));
cudaMalloc(&b_gpu, N*sizeof(float));
cudaMalloc(&c_gpu, N*sizeof(float));
cudaMalloc(&d_gpu, N*sizeof(float));
cudaMalloc(&x_gpu, N*sizeof(float));
cudaMalloc(&a_out, N*sizeof(float));
cudaMalloc(&b_out, N*sizeof(float));
cudaMalloc(&c_out, N*sizeof(float));
cudaMalloc(&d_out, N*sizeof(float));
cudaMalloc(&x_out, N*sizeof(float));
int i, k;
for(i=0; i<N; i++){
a_cpu[i] = (float)rand()/(float)(RAND_MAX/10);
b_cpu[i] = (float)rand()/(float)(RAND_MAX/10);
c_cpu[i] = (float)rand()/(float)(RAND_MAX/10);
d_cpu[i] = (float)rand()/(float)(RAND_MAX/10);
}
a_cpu[0] = 0;
c_cpu[N-1] = 0;
cudaMemcpy(a_gpu, a_cpu, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(b_gpu, b_cpu, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(c_gpu, c_cpu, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_gpu, d_cpu, N*sizeof(float), cudaMemcpyHostToDevice);
k = 2;
while(N/k > 1){
forwardPCR<<<(N+TPB-1)/TPB, TPB>>>(a_gpu, b_gpu, c_gpu, d_gpu, a_out, b_out, c_out, d_out, k, N);
cudaDeviceSynchronize();
cudaMemcpy(a_gpu, a_out, N*sizeof(float), cudaMemcpyDeviceToDevice);
cudaMemcpy(b_gpu, b_out, N*sizeof(float), cudaMemcpyDeviceToDevice);
cudaMemcpy(c_gpu, c_out, N*sizeof(float), cudaMemcpyDeviceToDevice);
cudaMemcpy(d_gpu, d_out, N*sizeof(float), cudaMemcpyDeviceToDevice);
k *= 2;
}
solve2unknown<<<(N+TPB-1)/TPB, TPB>>>(a_gpu, b_gpu, c_gpu, d_gpu, x_gpu, k, N);
cudaDeviceSynchronize();
cudaMemcpy(x_cpu, x_gpu, N*sizeof(float), cudaMemcpyDeviceToHost);
for(i=0; i<N; i++){
printf("x_cpu[%d] = %f\n", i, x_cpu[i]);
}
free(a_cpu);
free(b_cpu);
free(c_cpu);
free(d_cpu);
free(x_cpu);
cudaFree(a_gpu);
cudaFree(b_gpu);
cudaFree(c_gpu);
cudaFree(d_gpu);
cudaFree(x_gpu);
return 0;
}
|
8,865 | #include "includes.h"
__global__ void kernel_A( float *g_data, int dimx, int dimy )
{
int ix = blockIdx.x;
int iy = blockIdx.y*blockDim.y + threadIdx.y;
int idx = iy*dimx + ix;
float value = g_data[idx];
if( ix % 2 )
{
value += sqrtf( logf(value) + 1.f );
}
else
{
value += sqrtf( cosf(value) + 1.f );
}
g_data[idx] = value;
} |
8,866 | #include <stdio.h>
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < N) C[i] = A[i] + B[i];
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
cudaSetDevice(dev);
// set up data size of vectors
int nElem = 1 << 5;
printf("Vector size %d\n", nElem);
// malloc host memory
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
for (int i = 0; i < nElem; i++)
{
h_A[i] = h_B[i] = i;
}
memset(gpuRef, 0, nBytes);
// malloc device global memory
float *d_A, *d_B, *d_C;
cudaMalloc((float**)&d_A, nBytes);
cudaMalloc((float**)&d_B, nBytes);
cudaMalloc((float**)&d_C, nBytes);
// transfer data from host to device
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_C, gpuRef, nBytes, cudaMemcpyHostToDevice);
// invoke kernel at host side
dim3 block (nElem);
dim3 grid (1);
sumArraysOnGPU<<<grid, block>>>(d_A, d_B, d_C, nElem);
printf("Execution configure <<<%d, %d>>>\n", grid.x, block.x);
// copy kernel result back to host side
cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
// free device global memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// free host memory
free(h_A);
free(h_B);
free(gpuRef);
cudaDeviceReset();
return(0);
} |
8,867 | #include "includes.h"
__global__ void my_memsetIdx(int* dg_array, int size, int scale){
const int gtid=blockIdx.x*blockDim.x + threadIdx.x;
if(gtid < size){
dg_array[gtid] = gtid*scale;
}
} |
8,868 | #include <iostream>
using namespace std;
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
cout << cudaGetErrorString(err)
<< file << line << endl;
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
int main( void ) {
cudaDeviceProp prop;
int dev;
HANDLE_ERROR( cudaGetDevice( &dev ) );
printf( "ID of current CUDA device: %d\n", dev );
memset( &prop, 0, sizeof( cudaDeviceProp ) );
prop.major = 1;
prop.minor = 3;
HANDLE_ERROR( cudaChooseDevice( &dev, &prop ) );
printf( "ID of CUDA device closest to revision 1.3: %d\n", dev );
HANDLE_ERROR( cudaSetDevice( dev ) );
}
|
8,869 | /*
* Copyright 2016 Alexander Terenin
*
* Licensed under the Apache License, Version 2.0 (the "License")
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* /
*/
#include <curand_kernel.h>
/*
* Function : cuda_onesided_unitvar_tnorm
* Purpose : draws samples from independent truncated normals with mean vector mu,
std. dev. 1, truncated from zero to positive infinity if y = 1 and
negative infinity if y = -1, using inversion method, which is reasonable
in floating point precision as long as mean vector mu is not too far
away from 0.
* Argument n : size of sample
* Argument *state : pointer to random number generator state
* Argument *mu : pointer to mean vector
* Argument *y : pointer to truncation vector, 1 if positive, 0 if negative
* Output : mutates mu and stores result in its place
*/
extern "C"
__global__ void cuda_onesided_unitvar_tnorm(int n, curandStatePhilox4_32_10_t *globalState, float *mu, int *y) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
curandStatePhilox4_32_10_t state = globalState[0]; //copy random number generator state to local memory
if(i < n) {
//combined rejection sampler version
float ystar = (float) (y[i] * 2 - 1); //transform from {0,1} to {-1.0f, 1.0f}
float mustar = ystar * mu[i]; //always positive
skipahead((unsigned long long) (6*i), &state); //give each thread its own pseudorandom subsequence with spacing 2^67
//skipahead_sequence overflows somewhere, so use standard skipahead with spacing 3.
if(!isfinite(mustar))
mu[i] = 0.0f;
else if(mustar < 0.47f) { //magic number to lower bound acceptance probability at around 2/3
//upper tail: use exponential rejection sampler
while(true) {
float u = curand_uniform(&state); //one uniform for proposal
float u2 = curand_uniform(&state); //one uniform for accept/reject step
float alpha = (-mustar + sqrtf(mustar * mustar + 4.0f))/2.0f; //optimal scaling factor
float prop = -logf(u) / alpha; //generate translated exponential(alpha, mu-)
float rho = expf((prop - mustar - alpha) * (prop - mustar - alpha) / -2.0f); //compute acceptance probability
if(u2 < rho) {
mu[i] = ystar * prop;
break;
}
}
} else {
//lower tail: use Gaussian rejection sampler
while(true) {
//float prop = curand_normal(&state) + mustar; //BROKEN: use inverse transform method instead
float u = curand_uniform(&state);
float prop = normcdfinvf(u) + mustar;
if(isinf(prop))
prop = 5.0f + mustar; //edge case, make sure computation doesn't stop if u == 1.0f
if(prop > 0.0f) {
mu[i] = ystar * prop;
break;
}
}
}
}
__syncthreads();
//last thread: copy curand state back to global memory
if(i == n-1)
globalState[0] = state;
} |
8,870 | #include <cuda_runtime.h>
#include <stdio.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if(error != CudaSuccess) \
{ \
printf("Error:%s:%d,", __FILE__, __LINE__);\
printf("Code:%d, reason: %s \n", error, cudaGetErrorString(error));\
exit(1);\
}\
}
__global__ void chechIndex(void)
{
printf("threadIdx:(%d, %d, %d) blockIdx:(%d,%d,%d) blockdim:(%d,%d,%d)"
"gridDim:(%d,%d,%d)\n", threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x,blockIdx.y,blockIdx.z, blockDim.x,blockDim.y,blockDim.z,
gridDim.x, gridDim.y, gridDim.z);
}
int main(int argc, char **argv)
{
int nElem = 6;
dim3 block(3);
dim3 grid((nElem + block.x - 1) / block.x);
printf("grid.x %d gird.y %d grid.z %d \n", grid.x, grid.y, grid.z);
printf("block.x %d block.y %d block.z %d \n", block.x, block.y, block.z);
chechIndex<<<grid, block>>>();
cudaDeviceReset();
return 0;
// kernel_function <<<4, 8>>>(argument list);
}
|
8,871 | ////#include<iostream>
////#include<cmath>
//#include "../Image.h"
//#include "../PPM.h"
//
//#include <cstdio>
//#include <cassert>
//#include <iostream>
//#include <math.h>
//
//// Constant values for LBP kerel
//#define MASK_WIDTH 3
//#define neighborhood (MASK_WIDTH * MASK_WIDTH - 1)
//#define img_deep 255
//
//
//
//static void CheckCudaErrorAux(const char *, unsigned, const char *, cudaError_t);
//
//#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
//
///**
// * Check the return value of the CUDA runtime API call and exit
// * the application if the call has failed.
// */
//static void CheckCudaErrorAux(const char *file, unsigned line,
// const char *statement, cudaError_t err) {
// if (err == cudaSuccess)
// return;
// std::cerr << statement << " returned " << cudaGetErrorString(err) << "("
// << err << ") at " << file << ":" << line << std::endl;
// exit(1);
//}
//
//__global__ void LBPkernel(float *img, float *out_img, int width, int height){
// // Naive cuda kernel to compute LBP descriptor
//// !! the pixel values are between 0 and 1
// int col = blockIdx.x * blockDim.x + threadIdx.x;
// int row = blockIdx.y * blockDim.y + threadIdx.y;
// printf("Hello from lbp k, col_idx: %d, row_idx: %d\n", col, row);
//
// // Ensure that threads do not attempt illegal memory access (this can happen because there could be more threads than elements in an array)
// if (col < width && row < height){
// int pixVal = 0;
// int threshold_values[neighborhood];
//// std::vector<int> threshold_values;
// int N_start_col = col - (MASK_WIDTH / 2);
// int N_start_row = row - (MASK_WIDTH / 2);
// int arr_idx = 0;
//// iterate over mask pixel values
// for (int j = 0; j < MASK_WIDTH; j++){
// for (int k = 0; k < MASK_WIDTH; k++){
// int curRow = N_start_row + j;
// int curCol = N_start_col + k;
//
// // Verify we have a valid image pixel
// if(curRow > -1 && curRow < height && curCol > -1 && curCol < width) {
// printf("pixel value at row: %d, col: %d : %d\n",curRow, curCol, (int)img[curRow * width + curCol] );
// if (curRow != row && curCol != col){ // no compute for mask center
//// threshold_values.push_back(((int)img[curRow * width + curCol] >= (int)img[row * width + col]) ? 1 : 0);
// threshold_values[arr_idx++] = (img[curRow * width + curCol] >= img[row * width + col]) ? 1 : 0;
// } else
// printf("central pixel value: %d \n", img[row * width + col]);
// }
// }
// }
//// ---
//// for (auto th : threashold_values){
//// pixVal += th * (int)exp2f();
//// }
//
// for (int i=0; i<neighborhood; i++){ // vec[i] operation has O(1) Complexity
// pixVal += threshold_values[i] * (int)exp2f(i);
// }
//
// printf("pixval: %d\n", pixVal);
//
// out_img[row * width + col] = pixVal / 255;
// }
//}
//
//// simple test to read/write PPM images, and process Image_t data
//void test_images() {
// Image_t* inputImg = PPM_import("computer_programming.ppm");
// for (int i = 0; i < 300; i++) {
//// Image_setPixel(inputImg, i, 100, 0, float(i) / 300);
// Image_setPixel(inputImg, i, 100, 1, float(i) / 300);
//// Image_setPixel(inputImg, i, 100, 2, float(i) / 200);
// }
// PPM_export("test_output3.ppm", inputImg);
// Image_t* newImg = PPM_import("test_output.ppm");
// inputImg = PPM_import("computer_programming.ppm");
// if (Image_is_same(inputImg, newImg))
// printf("Img uguali\n");
// else
// printf("Img diverse\n");
//}
//
//
//
//
//// ------------------------------------
//
////__constant__ int MASK_WIDTH = 3;
//
//
//
////__global__ void lbpKernel(int* grayImage, int* outputImage, int width, int height){
////// Naive cuda kernel to compute LBP descriptor
//// int col = blockIdx.x * blockDim.x + threadIdx.x;
//// int row = blockIdx.y * blockDim.y + threadIdx.y;
//// printf("Hello from lbp k, col_idx: %d, row_idx: %d", col, row);
//// if (col < width && row < height){ // Ensure that threads do not attempt illegal memory access (this can happen because there could be more threads than elements in an array)
//// int pix_val = 0;
//// int threshold_values[MASK_WIDTH * MASK_WIDTH - 1];
//// int N_start_col = col - (MASK_WIDTH / 2); // N_start_col = col - 1
//// int N_start_row = row - (MASK_WIDTH / 2);
//// int center_idx = int(floor(MASK_WIDTH / 2)); // index of the central pixel of the mask (in this case (1,1))
//// int arr_idx = 0;
////// iterate over pixels in the mask
//// for (int j = 0; j < MASK_WIDTH; j++){
//// for (int k = 0; k < MASK_WIDTH; k++){
//// int cur_row = N_start_row + j;
//// int cur_col = N_start_col + k;
//// // Verify we have a valid image pixel
//// if(cur_row > -1 && cur_row < height && cur_col > -1 && cur_col < width) {
//// if (cur_row != row && cur_col != col){ // dot compute for central pixel
//// if (grayImage[cur_row * width + cur_col] >= grayImage[row * width + col])
//// threshold_values[arr_idx++] = 1;
//// else
//// threshold_values[arr_idx++] = 0;
//// }
//// }
//// }
//// }
//// for (int i = 0; i < sizeof(threshold_values)/sizeof(threshold_values[0]); i++){
//// if (threshold_values[i] == 1)
//// pix_val += 2 * *i;
//// }
//// outputImage[row * width + col] = pix_val;
//// }
////}
//
|
8,872 | /*******************************************************************************
* serveral useful gpu functions will be defined in this file to facilitate
* the surface redistance scheme
******************************************************************************/
typedef struct
{
double sR;
double sL;
} double_eno_derivative;
__device__ inline
double max2(double x, double y)
{
return (x<y) ? y : x;
}
__device__ inline
double min2(double x, double y)
{
return (x<y) ? x : y;
}
__device__ inline
double min_mod(double x, double y)
{
return (x*y<0) ? 0.0 : (fabs(x)<fabs(y) ? x : y);
}
__device__ inline
double max_abs(double x, double y)
{
return (fabs(x)<fabs(y) ? y : x);
}
__device__ inline
double sign(double x)
{
return (x>0) ? 1.0 : -1.0;
}
__device__ inline
void upwind_derivative(double & Dr, double Cx, double Cy, double Cz, double xR, double xL, double yF, double yB, double zU, double zD)
{
Dr = 0;
Dr += max2(Cx,0)*xR - min2(Cx,0)*xL;
Dr += max2(Cy,0)*yF - min2(Cy,0)*yB;
Dr += max2(Cz,0)*zU - min2(Cz,0)*zD;
}
// convert subindex to linear index
// periodic boundary conditions are assumed
__device__ inline
int sub2ind(int row_idx, int col_idx, int pge_idx, int rows, int cols, int pges)
{
int row_idxn = min2(rows-1, max2(0, row_idx));
int col_idxn = min2(cols-1, max2(0, col_idx));
int pge_idxn = min2(pges-1, max2(0, pge_idx));
int ind = pge_idxn * rows * cols + col_idxn * rows + row_idxn;
return ind;
}
/******************************************************************************
* calculate Eno derivatives at node v0: [v4,v1,v0,v2,v3]
******************************************************************************/
__device__ inline
double_eno_derivative eno_derivative( double v4, double v1, double v0, double v2, double v3, double pr, double pl, double ds)
{
double p2m;
double_eno_derivative eno_d;
double p2 = v1 - 2.0 * v0 + v2;
double p2r = v0 - 2.0 * v2 + v3;
p2m = 0.5 * min_mod(p2, p2r) / pow(ds, 2);
double vr = (pr==ds) ? v2 : 0;
eno_d.sR = (vr - v0) / pr - pr * p2m;
double p2l = v0 - 2.0 * v1 + v4;
p2m = 0.5 * min_mod(p2, p2l) / pow(ds, 2);
double vl = (pl==ds) ? v1 : 0;
eno_d.sL = (v0 - vl) / pl + pl * p2m;
return eno_d;
}
// calculate surface redistance step
// now lsf represents the auxilary level set function(not the level set function)
// inputs : the auxilary level set function, sign of the initial level set function, distance to the interface, normal vectors
__global__
void surface_redistance_step(double * step, double const * lsf, double const * sign, double const * deltat, double const * nx, double const * ny, double const * nz, double const * ax, double const * ay, double const * az, double const * bx, double const * by, double const * bz, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int right2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
double_eno_derivative eno_dx = eno_derivative( lsf[left2], lsf[left], lsf[ind], lsf[right], lsf[right2], xpr[ind], xpl[ind], dx);
//double Dx[2] = {eno_dx.sR, eno_dx.sL};
double xR = eno_dx.sR;
double xL = eno_dx.sL;
int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int front2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
double_eno_derivative eno_dy = eno_derivative( lsf[back2], lsf[back], lsf[ind], lsf[front], lsf[front2], ypf[ind], ypb[ind], dy);
//double Dy[2] = {eno_dy.sR, eno_dy.sL};
double yF = eno_dy.sR;
double yB = eno_dy.sL;
int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int up2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
double_eno_derivative eno_dz = eno_derivative( lsf[down2], lsf[down], lsf[ind], lsf[up], lsf[up2], zpu[ind], zpd[ind], dz);
//double Dz[2] = {eno_dz.sR, eno_dz.sL};
double zU = eno_dz.sR;
double zD = eno_dz.sL;
double Nx = nx[ind];
double Ny = ny[ind];
double Nz = nz[ind];
double Ax = ax[ind];
double Ay = ay[ind];
double Az = az[ind];
double Bx = bx[ind];
double By = by[ind];
double Bz = bz[ind];
double Sign = sign[ind];
double dt = deltat[ind];
// forward and backward derivatives along a/b directions
double Da_f, Da_b, Db_f, Db_b;
upwind_derivative(Da_f,Ax,Ay,Az,xR,xL,yF,yB,zU,zD);
upwind_derivative(Da_b,-Ax,-Ay,-Az,xR,xL,yF,yB,zU,zD);
upwind_derivative(Db_f,Bx,By,Bz,xR,xL,yF,yB,zU,zD);
upwind_derivative(Db_b,-Bx,-By,-Bz,xR,xL,yF,yB,zU,zD);
// choice of one-sided derivatives in the surface coordiante
double Da = max_abs(max2(Sign*Da_b,0),min2(Sign*Da_f,0));
double Db = max_abs(max2(Sign*Db_b,0),min2(Sign*Db_f,0));
double den = sqrt(Da*Da+Db*Db);
// direction of information
double H1 = (Da * Ax + Db * Bx) / den;
double H2 = (Da * Ay + Db * By) / den;
double H3 = (Da * Az + Db * Bz) / den;
// select the right derivatives
double dr_x = (H1>0) ? xL : ( (H1<0) ? xR : 0 );
double dr_y = (H2>0) ? yB : ( (H2<0) ? yF : 0 );
double dr_z = (H3>0) ? zD : ( (H3<0) ? zU : 0 );
double dr_n = Nx * dr_x + Ny * dr_y + Nz * dr_z;
step[ind] = dt * Sign * (sqrt( dr_x*dr_x + dr_y*dr_y + dr_z*dr_z - dr_n*dr_n) - 1);
}
// construct a local coordinate system at each point
__global__
void surface_coordinate(double * ax, double * ay, double * az, double * bx, double * by, double * bz, double const * nx, double const * ny, double const * nz, int rows, int cols, int pges)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
double Nx = nx[ind];
double Ny = ny[ind];
double Nz = nz[ind];
double epsilon = 1e-6;
if(fabs(Nx)>epsilon){
double den1 = sqrt(Nx*Nx+Ny*Ny);
ax[ind] = -Ny/den1;
ay[ind] = Nx/den1;
den1 = sqrt(Nx*Nx+Nz*Nz);
bx[ind] = - Nz/den1;
bz[ind] = Nx/den1;
}
else if(fabs(Ny)>epsilon){
double den1 = sqrt(Ny*Ny+Nx*Nx);
ay[ind] = -Nx/den1;
ax[ind] = Ny/den1;
den1 = sqrt(Ny*Ny+Nz*Nz);
by[ind] = - Nz/den1;
bz[ind] = Ny/den1;
}
else if(fabs(Nz)>epsilon){
double den1 = sqrt(Nz*Nz+Nx*Nx);
az[ind] = -Nx/den1;
ax[ind] = Nz/den1;
den1 = sqrt(Nz*Nz+Ny*Ny);
bz[ind] = - Ny/den1;
by[ind] = Nz/den1;
}
}
__global__
void surface_redistance_step_backup(double * step, double const * lsf, double const * sign, double const * deltat, double const * ax, double const * ay, double const * az, double const * bx, double const * by, double const * bz, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int right2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
double_eno_derivative eno_dx = eno_derivative( lsf[left2], lsf[left], lsf[ind], lsf[right], lsf[right2], xpr[ind], xpl[ind], dx);
//double Dx[2] = {eno_dx.sR, eno_dx.sL};
double xR = eno_dx.sR;
double xL = eno_dx.sL;
int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int front2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
double_eno_derivative eno_dy = eno_derivative( lsf[back2], lsf[back], lsf[ind], lsf[front], lsf[front2], ypf[ind], ypb[ind], dy);
//double Dy[2] = {eno_dy.sR, eno_dy.sL};
double yF = eno_dy.sR;
double yB = eno_dy.sL;
int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int up2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
double_eno_derivative eno_dz = eno_derivative( lsf[down2], lsf[down], lsf[ind], lsf[up], lsf[up2], zpu[ind], zpd[ind], dz);
//double Dz[2] = {eno_dz.sR, eno_dz.sL};
double zU = eno_dz.sR;
double zD = eno_dz.sL;
double Ax = ax[ind];
double Ay = ay[ind];
double Az = az[ind];
double Bx = bx[ind];
double By = by[ind];
double Bz = bz[ind];
double Sign = sign[ind];
double dt = deltat[ind];
// forward and backward derivatives along a/b directions
double Da_f, Da_b, Db_f, Db_b;
upwind_derivative(Da_f,Ax,Ay,Az,xR,xL,yF,yB,zU,zD);
upwind_derivative(Da_b,-Ax,-Ay,-Az,xR,xL,yF,yB,zU,zD);
upwind_derivative(Db_f,Bx,By,Bz,xR,xL,yF,yB,zU,zD);
upwind_derivative(Db_b,-Bx,-By,-Bz,xR,xL,yF,yB,zU,zD);
double Da = max(max2(Sign*Da_b,0),-min2(Sign*Da_f,0));
double Db = max(max2(Sign*Db_b,0),-min2(Sign*Db_f,0));
step[ind] = dt * Sign * (sqrt(Da*Da + Db*Db) - 1);
}
|
8,873 | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <cstdio>
#include <math.h>
#include <sys/time.h>
#include <iostream>
#include <iomanip>
#include <string>
#include <algorithm>
using namespace std;
__device__ bool valueAllowedCheck(int row, int col, int value, int* puzzle)
{
int i; //loop vairable
for(i = 0; i < 9; i++)
{
if(puzzle[row * 9 + i] == value) //check cells in the row
{
return false;
}
else if(puzzle[col + i * 9] == value) //check cells in the column
{
return false;
}
else if(puzzle[(row/3*3+i%3) * 9 + (col/3*3+i/3) ] == value) //check the subsquare
{
return false;
}
}
return true; //the value works
}
__device__ bool solve(int row, int col, int* puzzle, int counter, int startValue, bool finished)
{
if(counter == 81) //every cell has been visted
{
return true;
}
if(finished)
{
return true;
}
//loop of column and rows
if(++col == 9)
{
col = 0;
if(++row == 9)
{
row = 0;
}
}
//skip cells that are already filled
if(puzzle[row * 9 + col] != 0)
{
return solve(row, col, puzzle, counter+1, startValue, finished);
}
//if the cell is empty
for(int val = 1; val <= 9; val++)
{
//loop through the values being checked recursively
if(++startValue == 10)
{
startValue = 1;
}
//check if the value is allowed
if(valueAllowedCheck(row, col, startValue, puzzle))
{
puzzle[row * 9 + col] = startValue; //record if it is
if(solve(row, col, puzzle, counter+1, startValue, finished)) //solve the next cell
{
return true;
}
}
}
puzzle[row * 9 + col] = 0; //set to zero if backtracking
return false;
}
__global__ void parallelSudoku(int* puzzle, bool* finished, int* result)
{
int i = threadIdx.x; //the thread id
int j = threadIdx.y;
int startVal = (blockIdx.x * blockDim.x + threadIdx.x) % 9 +1; //Starting value (1-9) N
__shared__ volatile bool sharedFinish;
sharedFinish = *finished;
int resultTemp;
int puzzleArray [81];
for(int i =0; i < 81; i++)
{
puzzleArray[i] = puzzle[i];
}
if(solve(i,j,puzzleArray,0,startVal, sharedFinish))
{
if(!sharedFinish)//none of the threads have finished the puzzle
{
sharedFinish = true;
(*finished) = sharedFinish; //flag finished
for(int i =0; i < 81; i++) //copy back array
{
puzzle[i] = puzzleArray[i];
}
resultTemp = 1;
(*result) = resultTemp; //set result
}
}
else
{
if(!sharedFinish)//none of the threads have finished the puzzle
{
sharedFinish = true;
(*finished) = sharedFinish; //flag finished
}
}
}
void printPuzzle (int* puzzle ) {
int i;
int count =0;
int mod;
for(i =0; i< 81; i++) {
mod = count % 3;
if(count > 8){
count =0;
cout << "\n";
}
else if (mod == 0)
{
cout << " ";
}
count ++;
cout << puzzle[i];
cout << " ";
}
cout << "\n";
}
// Copied from assignment 1
// BEGIN: timing and error checking routines (do not modify)
// Returns the current time in microseconds
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// Prints the time elapsed since the specified time
long long stop_timer(long long start_time, std::string name) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
std::cout << std::setprecision(5);
std::cout << name << ": " << ((float) (end_time - start_time)) / (1000 * 1000) << " sec\n";
return end_time - start_time;
}
void checkErrors(const char label[])
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
// END: timing and error checking routines (do not modify)
int main() {
//clear easy puzzle
int* easyPuzzle = (int*)malloc(81*sizeof(int));
int inputEasy[81] = {0,7,0, 0,5,0, 8,0,6, //row 1
8,0,0, 1,7,0, 9,5,2, //row 2
0,2,5, 0,4,6, 3,0,0, //row 3
0,0,0, 0,0,2, 0,0,0, //row 4
0,0,4, 0,9,0, 2,0,0, //row 5
0,0,0, 3,0,0, 0,0,0, //row 6
0,0,8, 5,3,0, 6,1,0, //row 7
3,5,1, 0,8,4, 0,0,9, //row 8
4,0,7, 0,2,0, 0,0,0 //row 9
};
//meduim
int* meduimPuzzle = (int*)malloc(81*sizeof(int));
int inputMeduim[81] = { 0,7,6, 3,0,0, 0,0,4, //row 1
0,0,0, 0,1,0, 3,9,0, //row2
3,2,9, 8,0,0, 5,0,0, //row3
0,0,0, 0,0,2, 0,8,0, //row4
2,0,0, 0,0,0, 0,0,1, //row5
0,8,0, 9,0,0, 0,0,0, //row6
0,0,2, 0,0,8, 6,4,9, //row7
0,6,7, 0,4,0, 0,0,0, //row8
9,0,0, 0,0,6, 7,5,0 //row9
};
int* hardPuzzle = (int*)malloc(81*sizeof(int));
int inputHard[81] = {0,0,3, 0,9,0, 0,5,0, //row1
0,0,9, 0,0,2, 0,0,0, //row2
0,0,0, 5,0,1, 8,0,0, //row3
4,5,0, 0,7,3, 0,0,2, //row4
0,0,0, 4,0,6, 0,0,0, //row5
2,0,0, 1,8,0, 0,4,6, //row6
0,0,2, 7,0,4, 0,0,0, //row 7
0,0,0, 2,0,0, 5,0,0, //row8
0,9,0, 0,1,0, 2,0,0 //row9
};
int* evilPuzzle = (int*)malloc(81*sizeof(int));
int inputEvil[81]= {6,0,0, 0,0,8, 9,4,0, //row1
9,0,0, 0,0,6, 1,0,0, //row2
0,7,0, 0,4,0, 0,0,0, ///row3
2,0,0, 6,1,0, 0,0,0, //row4
0,0,0, 0,0,0, 2,0,0, //row5
0,8,9, 0,0,2, 0,0,0, //row6
0,0,0, 0,6,0, 0,0,5, //row7
0,0,0, 0,0,0, 0,3,0, //row8
8,0,0, 0,0,1, 6,0,0 //row9
};
/* int inputEvil2[81]= {0,0,0, 0,5,3, 0,1,2, //row1
0,7,0, 0,0,1, 0,8,0, //row2
0,0,0, 2,6,0, 0,0,0, ///row3
0,0,9, 0,0,0, 0,2,7, //row4
7,0,0, 0,0,0, 0,0,3, //row5
2,4,0, 0,0,0, 5,0,0, //row6
0,0,0, 0,9,4, 0,0,0, //row7
8,5,0, 3,0,0, 0,7,0, //row8
0,1,0, 8,7,0, 0,0,0 //row9
};*/
int* unsolveable = (int*)malloc(81*sizeof(int));
int inputUnsolve[81] = {1,2,3, 4,5,6, 7,8,0, //row1
0,0,0, 0,0,0, 0,0,2, //row2
0,0,0, 0,0,0, 0,0,3, //row3
0,0,0, 0,0,0, 0,0,4, //row4
0,0,0, 0,0,0, 0,0,5, //row5
0,0,0, 0,0,0, 0,0,6, //row6
0,0,0, 0,0,0, 0,0,7, //row7
0,0,0, 0,0,0, 0,0,8, //row8
0,0,0, 0,0,0, 0,0,9 //row9
};
int i;
for(i =0; i < 81; i++) {
easyPuzzle[i] = inputEasy[i];
meduimPuzzle[i] = inputMeduim[i];
hardPuzzle[i] = inputHard[i];
evilPuzzle[i] = inputEvil[i];
unsolveable[i] = inputUnsolve[i];
}
int k;
cout << "Select a puzzle.\n1 = easy\n2 = medium\n3 = hard\n4 = evil\n5=unsolveable\n";
cin >> k;
int* puzzleToRun = (int*)malloc(81*sizeof(int));
switch(k)
{
case 1: puzzleToRun = easyPuzzle;
break;
case 2: puzzleToRun = meduimPuzzle;
break;
case 3: puzzleToRun = hardPuzzle;
break;
case 4: puzzleToRun = evilPuzzle;
break;
case 5: puzzleToRun = unsolveable;
}
//host variables
bool* h_finished = (bool*)malloc(sizeof(bool));
bool finishedTemp = false;
h_finished = &finishedTemp;
int* h_puzzle = (int*)malloc(81*sizeof(int));
int* h_result = (int*)malloc(sizeof(int));
int resultTemp = 0;
h_result = &resultTemp;
int* d_puzzle;
bool* d_finished;
int* d_result;
long long Total_GPU_start_time = start_timer();
cudaMalloc((void**) &d_puzzle, 81*sizeof(int));
checkErrors("cudaMalloc1");
cudaMalloc((void**) &d_finished, sizeof(bool));
checkErrors("cudaMalloc2");
cudaMalloc((void**) &d_result, sizeof(int));
checkErrors("cudaMalloc3");
cudaMemcpy(d_puzzle, puzzleToRun, 81*sizeof(int), cudaMemcpyHostToDevice);
checkErrors("cudaMemcpy1");
cudaMemcpy(d_finished, h_finished, sizeof(bool), cudaMemcpyHostToDevice);
checkErrors("cudaMemcpy2");
cudaMemcpy(d_result, h_result, sizeof(int), cudaMemcpyHostToDevice);
checkErrors("cudaMemcpy3");
size_t stackSize = 30000;
cudaDeviceSetLimit(cudaLimitStackSize, stackSize);
checkErrors("stack size");
//dim3 threadsPerBlock(9,9); runs really slow...
parallelSudoku<<<1 , 9>>>(d_puzzle, d_finished, d_result);
checkErrors("kernel error");
cudaDeviceSynchronize();
cudaMemcpy(h_finished, d_finished, sizeof(bool), cudaMemcpyDeviceToHost);
checkErrors("cudaMemcpy4");
cudaMemcpy(h_puzzle, d_puzzle, 81*sizeof(int), cudaMemcpyDeviceToHost);
checkErrors("cudaMemcpy5");
cudaMemcpy(h_result, d_result, sizeof(int), cudaMemcpyDeviceToHost);
checkErrors("cudaMemcpy6");
long long GPU_total_run_time = stop_timer(Total_GPU_start_time, "\nGPU Total Run Time");
if(*h_result == 1)
{
cout << "Solved\n";
}
else
{
cout << "Unsolved\n";
}
printPuzzle(h_puzzle);
return 0;
}
|
8,874 | #ifdef OCTOTIGER_HAVE_CUDA
__global__ void kernel_reconstruct(double *Q, double *D1, double *U_, double *X, double omega) {
bool first_thread = (blockIdx.x == 0) && (threadIdx.y == 0) && (threadIdx.z == 0);
if (first_thread)
printf("Hello reconstruct");
}
#endif |
8,875 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define BLOCK_SIZE 4
#define N 2000
#define CHUNK_SIZE (BLOCK_SIZE * BLOCK_SIZE)
__global__ void transpose_2D2D(float *A, float *B) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (col < N && row < N) {
B[row + col * N] = A[col + row * N];
}
}
__global__ void transpose_shmem(float *A, float *B) {
__shared__ float chunk[CHUNK_SIZE * CHUNK_SIZE];
int col_chunk = blockIdx.x * CHUNK_SIZE;
int row_chunk = blockIdx.y * CHUNK_SIZE;
int col = col_chunk;
int row_offset = threadIdx.y * BLOCK_SIZE + threadIdx.x;
int chunk_offset = CHUNK_SIZE * row_offset;
int row = row_chunk + row_offset;
int A_offset = row * N + col;
for (int k = 0; k < CHUNK_SIZE; k++) {
chunk[chunk_offset + k] = A[A_offset + k];
}
__syncthreads();
int row_out = row_chunk;
int col_out = col_chunk + row_offset;
int out = col_out * N + row_out;
for (int k = 0; k < CHUNK_SIZE; k++) {
B[out + k] = chunk[row_offset + CHUNK_SIZE * k];
}
}
void print_matrix(float *A) {
for (int i = 0; i < N * N; i++) {
printf("%.3f ", A[i]);
if (i % N == N - 1) printf("\n");
}
}
int check_transpose(float *A, float *B) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
if (B[j * N + i] != A[i * N + j]) return 0;
}
}
return 1;
}
double diff_ms(struct timeval t0, struct timeval t1) {
return (t1.tv_sec - t0.tv_sec) * 1000 + (t1.tv_usec - t0.tv_usec) / 1000.0;
}
void print_results(char* label, float *A, float *B, struct timeval t0, struct timeval t1) {
int success = check_transpose(A, B);
printf(
"%s -- status: %s, time: %lf ms\n",
label,
success ? "success" : "failure",
diff_ms(t0, t1)
);
}
int main() {
struct timeval t0, t1;
size_t size = sizeof(float) * N * N;
// step 1: allocate memory in GPU
float *d_A, *d_B, *d_C;
cudaMalloc((void**) &d_A, size);
cudaMalloc((void**) &d_B, size);
cudaMalloc((void**) &d_C, size);
// step 2: allocate memory in CPU
float *h_A, *h_B;
h_A = (float *) malloc(size);
h_B = (float *) malloc(size);
for (int i = 0; i < N * N; i++) h_A[i] = (float) rand() / RAND_MAX;
// step 3: transfer data from CPU to GPU
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
// steps 4-5: invoke kernel routine, transfer results
// 2D2D implementation
gettimeofday(&t0, NULL);
dim3 block_2D2D (BLOCK_SIZE, BLOCK_SIZE);
dim3 grid_2D2D (ceil((float) N / block_2D2D.x), ceil((float) N / block_2D2D.y));
transpose_2D2D<<<grid_2D2D, block_2D2D>>>(d_A, d_B);
cudaDeviceSynchronize();
cudaMemcpy(h_B, d_B, size, cudaMemcpyDeviceToHost);
gettimeofday(&t1, NULL);
print_results("2D2D", h_A, h_B, t0, t1);
// shared memory implementation
gettimeofday(&t0, NULL);
dim3 block_shmem (BLOCK_SIZE, BLOCK_SIZE);
dim3 grid_shmem (ceil((float) N / CHUNK_SIZE), ceil((float) N / CHUNK_SIZE));
transpose_shmem<<<grid_shmem, block_shmem>>>(d_A, d_C);
cudaDeviceSynchronize();
cudaMemcpy(h_B, d_C, size, cudaMemcpyDeviceToHost);
gettimeofday(&t1, NULL);
print_results("shared memory", h_A, h_B, t0, t1);
// step 6: free memory in GPU
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// step 7: free memory in CPU
free(h_A);
free(h_B);
return 0;
} |
8,876 |
// GPU kernel
__global__ void summation_kernel(int data_size, float * data_out)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < data_size) {
data_out[i] += ((i % 2 == 0) ? 1 : - 1) / (i + 1.0f);
}
}
__global__ void reduce(float data_size, float * data_out, float * data_block) {
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = (i < data_size) ? data_out[i] : 0;
__syncthreads();
for(unsigned int s = 1; s < blockDim.x; s *= 2) {
int index = 2 * s * tid;
if(index < blockDim.x) {
sdata[index] += sdata[index + s];
}
__syncthreads();
}
if(tid == 0) data_block[blockIdx.x] = sdata[0];
}
|
8,877 | #include <stdio.h>
#include <stdlib.h>
#define TILE 16
__global__ void transposeNoBankConflicts(float *odata, float *idata,\
int in_pitch, int out_pitch) {
__shared__ float tile[TILE][TILE+1];
int xIndex = blockIdx.x * TILE + threadIdx.x;
int yIndex = blockIdx.y * TILE + threadIdx.y;
int index_in = xIndex + (yIndex)*in_pitch;
// AQUI: Decomentar o bien la parte 1, o bien la parte 2
// PARTE 1
// xIndex = blockIdx.y * TILE + threadIdx.x;
// yIndex = blockIdx.x * TILE + threadIdx.y;
//////////
// PARTE 2
xIndex = blockIdx.y * TILE + threadIdx.y;
yIndex = blockIdx.x * TILE + threadIdx.x;
//////////
int index_out = xIndex + (yIndex)*out_pitch;
tile[threadIdx.y][threadIdx.x] = idata[index_in];
__syncthreads();
// AQUI: Decomentar o bien la parte 1, o bien la parte 2
// OJO: la parte que se tiene de decomentar depende de su eleccion mas arriba.
// Cual combinacion es preferible ?
// PARTE 1
odata[index_out] = tile[threadIdx.x][threadIdx.y];
//////
// PARTE 2
odata[index_out] = tile[threadIdx.y][threadIdx.x];
//////
}
extern "C" void transpose (float *matrix_in, float *matrix_out, int inp, int outp) {
dim3 grid, threads;
if (inp % TILE != 0) {
fprintf (stderr, "Size problem...\n");
exit (EXIT_FAILURE);
}
if (outp % TILE != 0) {
fprintf (stderr, "Size problem...\n");
exit (EXIT_FAILURE);
}
grid.x = inp/TILE;
grid.y = outp/TILE;
threads.x = TILE;
threads.y = TILE;
transposeNoBankConflicts<<<grid,threads>>>(matrix_out,matrix_in,inp,outp);
}
|
8,878 | #include <stdio.h>
__global__ void saxpy(const size_t num_elements, const float alpha,
const float *x, const float *y, float *z) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < num_elements) z[idx] += alpha * x[idx] + y[idx];
}
int main() {
size_t num_elements = 1 << 20;
size_t buffer_size = num_elements * sizeof(float);
float *x = (float *)malloc(buffer_size);
float *y = (float *)malloc(buffer_size);
float *z = (float *)malloc(buffer_size);
float *d_x, *d_y, *d_z;
cudaMalloc(&d_x, buffer_size);
cudaMalloc(&d_y, buffer_size);
cudaMalloc(&d_z, buffer_size);
for (size_t idx = 0; idx < num_elements; idx++) {
x[idx] = 1.0f;
y[idx] = 2.0f;
z[idx] = 0.0f;
}
cudaMemcpyAsync(d_x, x, buffer_size, cudaMemcpyHostToDevice);
cudaMemcpyAsync(d_y, y, buffer_size, cudaMemcpyHostToDevice);
cudaMemcpyAsync(d_z, z, buffer_size, cudaMemcpyHostToDevice);
saxpy<<<(num_elements + 255) / 256, 256>>>(num_elements, 2.0f, d_x, d_y, d_z);
cudaMemcpyAsync(z, d_z, buffer_size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
float error = 0.0;
for (size_t idx = 0; idx < num_elements; idx++) {
error = fmax(error, fabs(z[idx] - 4.0f));
}
printf("error: %e (%s)\n", error, error == 0.0 ? "PASS" : "FAIL");
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
free(x);
free(y);
free(z);
return 0;
}
|
8,879 | /**
* description:
* Simple genetic algorithm for finding equation that equals target number
* example for number 27 has many results:
* - 7+29-8-1
* - 15+28-16
* this code isn't really perfect ^_^ but shows some basics.
* tested in linux fedora 14 with valgrind.
*
* author: ADRABI Abderrahim (adrabi[at]mail[dot]ru)
* date: 2011-10-03
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#define POPSIZE 1024
#define ELITRATE 0.1f
#define MUTATIONRATE 0.25f
#define ELEMENTS 8
#define MUTATION RAND_MAX * MUTATIONRATE
#define TARGET 5270
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/**
* Basic elements for construction an equation
*/
static const char *BIN_ELEMENTS[12] = {
"0000\0", // 0
"0001\0", // 1
"0010\0", // 2
"0011\0", // 3
"0100\0", // 4
"0101\0", // 5
"0110\0", // 6
"0111\0", // 7
"1000\0", // 8
"1001\0", // 9
"1010\0", // +
"1011\0" // -
};
/**
* Structure base of genome
*/
typedef struct
{
unsigned int fitness;
char *gen;
} ga_struct;
char* dev_pop;
ga_struct* dev_betapop;
char *dev_gen;
__device__ int
stringCompare (char str1[], char str2[])
{
int c = 0;
while(str1[c] == str2[c])
{
if(str1[c]=='\0' && str2[c]=='\0'){return 1;}
//printf("equals");
c++;
}
return 0;
}
/**
* Initialize new random population
*/
void
init_population (ga_struct * population, ga_struct * beta_population)
{
const int bin_size = (sizeof (char) * ELEMENTS * 4) + 1;
int index = 0;
for (; index < POPSIZE; index++)
{
// default initialization/ create empty genome
population[index].fitness = 0;
population[index].gen = (char*)malloc (bin_size);
*population[index].gen = '\0';
// default initialization/ create empty genome
beta_population[index].fitness = 0;
beta_population[index].gen = (char*)malloc (bin_size);
*beta_population[index].gen = '\0';
int e = 0;
for (; e < ELEMENTS; e++)
{
// put random element in population
// 12 is count of elements in BIN_ELEMENTS array
strcat (population[index].gen, BIN_ELEMENTS[(rand () % 12)]);
}
}
}
__global__ void
cal_fitness (char population[])
{
//printf("inside function");
//printf("population = %s", population[0]);
int e = 8;
int p = 1024;
int t = 5270;
int index = 0;
int unsigned fitness = 0;
for (; index < p; index++)
{
//printf("inside for loop");
char *gen_str = population;
//printf("igen, %s", population);
int sum = 0, current_value = 0, step = 0;
unsigned int last_operator_index = -1;
char last_operator = (char) 0;
for (; step < e; step++)
{
//printf("inside 2nd for loop");
//element[4] = "\0";
//strncpy (element, gen_str, 4);
//printf("gen1, %c", gen_str[0]);
//printf("gen2, %c", gen_str[1]);
//printf("gen3, %c", gen_str[2]);
//printf("gen4, %c", gen_str[3]);
char element[4];
element[0] = gen_str[0];
element[1] = gen_str[1];
element[2] = gen_str[2];
element[3] = gen_str[3];
//printf("element, %c", element[0]);
//printf("element, %c", element[1]);
//printf("element, %c", element[2]);
//printf("element, %c", element[3]);
//element[4] = '\0';
char test[] = "0000\0";
char test1[] = "0001\0";
char test2[] = "0010\0";
char test3[] = "0011\0";
char test4[] = "0100\0";
char test5[] = "0101\0";
char test6[] = "0110\0";
char test7[] = "0111\0";
char test8[] = "1000\0";
char test9[] = "1001\0";
char test10[] = "1010\0";
char test11[] = "1011\0";
//printf("made it this far");
if(stringCompare(element,test))
//if (strcmp ("0000", element) == 0)
{
current_value *= 10;
}
else if (stringCompare(element,test1))
{
current_value = (current_value * 10) + 1;
}
else if (stringCompare(element,test2))
{
current_value = (current_value * 10) + 2;
}
else if (stringCompare(element,test3))
{
current_value = (current_value * 10) + 3;
}
else if (stringCompare(element,test4))
{
current_value = (current_value * 10) + 4;
}
else if (stringCompare(element,test5))
{
current_value = (current_value * 10) + 5;
}
else if (stringCompare(element,test6))
{
current_value = (current_value * 10) + 6;
}
else if (stringCompare(element,test7))
{
current_value = (current_value * 10) + 7;
}
else if (stringCompare(element,test8))
{
current_value = (current_value * 10) + 8;
}
else if (stringCompare(element,test9))
{
current_value = (current_value * 10) + 9;
}
else if (stringCompare(element,test10)
&& step - last_operator_index > 1 && step + 1 < e)
{
if (last_operator == (char) 0)
{
sum = current_value;
}
else if (last_operator == '+')
{
sum += current_value;
}
else if (last_operator == '-')
{
sum -= current_value;
}
current_value = 0;
last_operator_index = step;
last_operator = '+';
}
else if (stringCompare(element,test11)
&& step - last_operator_index > 1 && step + 1 < e)
{
if (last_operator == (char) 0)
{
sum = current_value;
}
else if (last_operator == '+')
{
sum += current_value;
}
else if (last_operator == '-')
{
sum -= current_value;
}
current_value = 0;
last_operator_index = step;
last_operator = '-';
}
else
{
/// error the binary string not found
//printf("whoops");
sum = 999999;
break;
}
gen_str += 4;
}
//printf("outside for loop");
if (last_operator == '+')
{
sum += current_value;
}
else if (last_operator == '-')
{
sum -= current_value;
}
// abs, because fitness is unsigned integer ^_^
//population[index].fitness = abs (sum - t); //fix this line
printf ("fitness = %d", abs (sum - t));
}
}
/**
* sort function needed by quick sort
*/
int
sort_func (const void *e1, const void *e2)
{
return ((ga_struct *) e1)->fitness - ((ga_struct *) e2)->fitness;
}
/**
* sort population by fitness
*/
inline void
sort_by_fitness (ga_struct * population)
{
qsort (population, POPSIZE, sizeof (ga_struct), sort_func);
}
/**
* select elit element in top array after sort
*/
void
elitism (ga_struct * population, ga_struct * beta_population, int esize)
{
const int gen_len = ELEMENTS * 4 + 1;
int index = 0;
for (; index < esize; index++)
{
int e = 0;
for (; e < gen_len; e++)
{
beta_population[index].gen[e] = population[index].gen[e];
}
}
}
/**
* mutate an individual with random rate
*/
void
mutate (ga_struct * member)
{
int tsize = strlen (member->gen);
int number_of_mutations = rand () % 10;
int m = 0;
for (; m < number_of_mutations; m++)
{
int apos = rand () % tsize;
if (member->gen[apos] == '0')
{
member->gen[apos] = '1';
}
else
{
member->gen[apos] = '0';
}
}
}
/**
* mate randomly the rest of population after elitism
*/
void
mate (ga_struct * population, ga_struct * beta_population)
{
int esize = POPSIZE * ELITRATE;
// elitism of top elements in array
elitism (population, beta_population, esize);
// mate the rest of shitty population xD
int m = esize, i1 = -1, i2 = -1, pos = -1, tsize = ELEMENTS * 4 + 1;
for (; m < POPSIZE; m++)
{
pos = rand () % tsize;
i1 = rand () % POPSIZE;
i2 = rand () % POPSIZE;
int i = 0;
for (; i < pos; i++)
{
beta_population[m].gen[i] = population[i1].gen[i];
}
for (i = pos; i < tsize; i++)
{
beta_population[m].gen[i] = population[i2].gen[i];
}
if (rand () < MUTATION)
{
mutate (&beta_population[m]);
}
}
}
/**
* decode binary string to readable format
*/
void
decode_gen (ga_struct * member)
{
char *gen_str = member->gen;
int step = 0;
for (; step < ELEMENTS; step++)
{
char element[5] = "\0";
strncpy (element, gen_str, 4);
if (strcmp ("0000", element) == 0)
{
printf ("0");
}
else if (strcmp ("0001", element) == 0)
{
printf ("1");
}
else if (strcmp ("0010", element) == 0)
{
printf ("2");
}
else if (strcmp ("0011", element) == 0)
{
printf ("3");
}
else if (strcmp ("0100", element) == 0)
{
printf ("4");
}
else if (strcmp ("0101", element) == 0)
{
printf ("5");
}
else if (strcmp ("0110", element) == 0)
{
printf ("6");
}
else if (strcmp ("0111", element) == 0)
{
printf ("7");
}
else if (strcmp ("1000", element) == 0)
{
printf ("8");
}
else if (strcmp ("1001", element) == 0)
{
printf ("9");
}
else if (strcmp ("1010", element) == 0)
{
printf ("+");
}
else if (strcmp ("1011", element) == 0)
{
printf ("-");
}
gen_str += 4;
}
printf ("\n");
}
/**
* free memory before exit program
*/
__global__ void
free_population (ga_struct * population)
{
int index = 0;
for (; index < POPSIZE; index++)
{
free (population[index].gen);
}
free (population);
}
/**
* swap arrays pointers
*/
void
swap (ga_struct ** p1, ga_struct ** p2)
{
ga_struct *tmp = *p1;
*p1 = *p2;
*p2 = tmp;
}
/**
* main program
*/
int
main (void)
{
//printf("begin");
float cpu1,cpu2;
cpu1 = ((float) clock())/CLOCKS_PER_SEC;
srand (time (NULL));
ga_struct *population = (ga_struct*)malloc (sizeof (ga_struct) * POPSIZE);
ga_struct *beta_population = (ga_struct*)malloc (sizeof (ga_struct) * POPSIZE);
init_population (population, beta_population);
//cudaMalloc((ga_struct**)&dev_betapop,sizeof (ga_struct) * POPSIZE);
dim3 numBlocks(1,1);
dim3 threads_per_block(1,1);
//printf("test gen2, %s", population[0].gen);
dev_gen = (char*)malloc(sizeof(char)*POPSIZE);
char *dev_gen[POPSIZE];
int i = 0;
for (; i < POPSIZE; i++)
{
dev_gen[i] = population[i].gen;
}
//printf("test gen, %s", dev_gen[0]);
//printf("test gen2, %s", population[i].gen);
int index = 0;
for (; index < POPSIZE; index++)
{
//printf("before func");
//cudaMemcpy(dev_pop,population,sizeof(ga_struct)*POPSIZE,cudaMemcpyHostToDevice);
//cudaMemcpy(dev_gen,(*population).gen,sizeof(char)*POPSIZE,cudaMemcpyHostToDevice);
//cudaMemcpy(&((*dev_pop).gen),&dev_gen,sizeof(char)*POPSIZE,cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_pop,sizeof (char) * POPSIZE);
//cudaMalloc(&dev_gen,sizeof (char) * POPSIZE);
cudaMemcpy(dev_pop,dev_gen,sizeof(char)*POPSIZE,cudaMemcpyHostToDevice);
cal_fitness<<<1,1>>>(dev_pop);
//printf("test dev pop = %s", dev_gen[0]);
//gpuErrchk( cudaPeekAtLastError() );
//printf("after func");
//cal_fitness((ga_struct*)population);
cudaMemcpy(dev_gen,dev_pop,sizeof(char)*POPSIZE,cudaMemcpyDeviceToHost);
sort_by_fitness (population);
// print current best individual
printf ("binary string: %s - fitness: %d\n", population[0].gen,
population[0].fitness);
if (population[0].fitness == 0)
{
//~ print equation
decode_gen (&population[0]);
break;
}
mate (population, beta_population);
swap (&population, &beta_population);
}
free_population<<<numBlocks,threads_per_block>>>((ga_struct*)population);
free_population<<<numBlocks,threads_per_block>>>((ga_struct*)beta_population);
cpu2 = ((float) clock())/CLOCKS_PER_SEC;
printf("Execution time (s) = %le\n",cpu2-cpu1);
return 0;
}
|
8,880 | #include <curand.h>
#include <curand_kernel.h>
#define DIM 1600
#define PI 3.14159265
__global__ void PPnoise(unsigned char *R_input, unsigned char *G_input,
unsigned char *B_input, size_t i_size, int noiseP, int seed){
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int offset = x + y * i_size;
curandState_t state;
curand_init(seed, x, y, &state);
unsigned char noise = (unsigned char)(curand(&state) % 100);
if(curand(&state) % 100 < noiseP){
noise = 255 * (noise % 2);
R_input[offset] = noise;
G_input[offset] = noise;
B_input[offset] = noise;
}
}
|
8,881 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void convert(int *a, int *b,int n)
{
int tid,octalnum=0,i=1,num;
tid = threadIdx.x;
num=a[tid];
while(num>0)
{
octalnum+=(num%8)*i;
num=num/8;
i=i*10;
}
b[tid]=octalnum;
}
int main(void)
{
int a[100],n,i,b[100],size;
printf("Enter the no of decimal values to be converted to Octal\n");
scanf("%d", &n);
printf("Enter the Decimal values\n");
for(i=0;i<n;i++)
{
scanf("%d", &a[i]);
}
int *d_a, *d_b;
size = sizeof(int);
cudaMalloc((void **)&d_a,n*size);
cudaMalloc((void **)&d_b,n*size);
cudaMemcpy(d_a,a,n*size,cudaMemcpyHostToDevice);
convert<<<1,n>>>(d_a,d_b,n);
cudaMemcpy(b,d_b,n*size,cudaMemcpyDeviceToHost);
for(i=0;i<n;i++)
{
printf("%d\n", b[i]);
}
cudaFree(d_a);
cudaFree(d_b);
return 0;
} |
8,882 | #include "MakeProjection_kernel.cu"
#define THREADSPERBLOCK 512
extern "C" void TestMakeProjection( int n, int m, float* eigvec, int* indices, float* E, float* Et ){
float *gpuEt;
cudaMalloc( ( void ** ) &gpuEt, m * n * sizeof( float ) );
float *gpuE;
cudaMalloc( ( void ** ) &gpuE, n * m * sizeof( float ) );
float *gpuEigvec;
cudaMalloc( ( void ** ) &gpuEigvec, n * m * sizeof( float ) );
cudaMemcpy( gpuEigvec, eigvec, n * m * sizeof( float ), cudaMemcpyHostToDevice );
int *gpuIndices;
cudaMalloc( ( void ** ) &gpuIndices, m * sizeof( int ) );
cudaMemcpy( gpuIndices, indices, m * sizeof( int ), cudaMemcpyHostToDevice );
if( m * n <= THREADSPERBLOCK ) {
makeProjection <<< 1, m*n >>>( gpuEt, gpuE, gpuEigvec, gpuIndices, m, n );
}else{
makeProjection <<< ( m * n ) / THREADSPERBLOCK + 1, THREADSPERBLOCK >>>( gpuEt, gpuE, gpuEigvec, gpuIndices, m, n );
}
cudaMemcpy( Et, gpuEt, m * n * sizeof( float ), cudaMemcpyDeviceToHost );
cudaMemcpy( E, gpuE, m * n * sizeof( float ), cudaMemcpyDeviceToHost );
cudaFree( gpuEt );
cudaFree( gpuE );
cudaFree( gpuEigvec );
cudaFree( gpuIndices );
}
|
8,883 | // Matrix Multiply
#include<stdio.h>
// perform MatrixMul on Device
__global__ void MatrixMulDevice( float *A, float *B, float *C, int *matrixSize)
{
int chunk = (*matrixSize) / gridDim.x;
int sum, i, k;
for(i = blockIdx.x * chunk; i < blockIdx.x * chunk + chunk - 1; i++) {
sum = 0;
for(k = 0; k < *matrixSize; k++) {
sum += A[i * *matrixSize + k] * B [k * *matrixSize + threadIdx.x];
}
C[i * *matrixSize + threadIdx.x] = sum;
}
}
int MatrixMulHostValidate(float *A, float *B, float *C, int dim)
{
float a, b, sum;
for (int i= 0; i< dim; i++)
{
for (int j = 0; j < dim; j++)
{
sum = 0;
for (int k = 0; k < dim; k++) {
a = A[ i* dim + k ];
b = B[ k * dim + j ];
sum += a * b;
}
if (C[ i* dim + j ] != sum)
return 0;
}
}
return 1;
}
void initMatrix(float *A, int dim) {
for (int i= 0; i< dim; i++)
{
for (int j = 0; j < dim; j++)
{
A[i* dim + j] = ((float)i + j) / dim;
}
}
}
int main(void) {
float *A, *B, *C;
int dim = 512;
float *d_A, *d_B, *d_C;
int *d_matrixSize;
// Allocate memory for the matrices.
A = (float *) malloc(sizeof(float) * dim * dim);
B = (float *) malloc(sizeof(float) * dim * dim);
C = (float *) malloc(sizeof(float) * dim * dim);
// I/O to load A, B and C.
initMatrix(A, dim);
initMatrix(B, dim);
// define thread hierarchy
int nblocks= 4;
int tpb= 512;
// allocate device memory
size_t memSize;
memSize= dim * dim * sizeof(float);
cudaMalloc( (void**) &d_A, memSize);
cudaMalloc( (void**) &d_B, memSize);
cudaMalloc( (void**) &d_C, memSize);
cudaMalloc( (void**) &d_matrixSize, sizeof(float));
// initialize device memory
cudaMemcpy(d_A, A, memSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, memSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_matrixSize, &dim, sizeof(float), cudaMemcpyHostToDevice);
// launch kernel
dim3 dimGrid(nblocks);
dim3 dimBlock(tpb);
// perform MatrixMulon Device
MatrixMulDevice<<< dimGrid, dimBlock>>>(d_A, d_B, d_C, d_matrixSize);
// retrieve results
cudaMemcpy(C, d_C, memSize, cudaMemcpyDeviceToHost);
// verfiy results
if(!MatrixMulHostValidate(A, B, C, dim))
fprintf(stderr, "Wrong results for matrix multiply\n");
else
printf("Matrix multiply was successful\n");
// Free memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaFree(d_matrixSize);
free(A);
free(B);
free(C);
}
|
8,884 | #include <stdio.h>
#include "cs_cuda.h"
#include "cs_helper.h"
#include <stdlib.h>
// #define CUDA_DBG
// #define CUDA_DBG1
template<typename F, typename T>
__global__ void d_expand_frame_T ( F *d_input, T *d_output,
int xdim, int ydim,
int xadd, int yadd, int zadd, int num_of_frames,
int o_frsize, int n_frsize, int size )
{
int frame_idx, frame_left, o_x, o_y ;
int t_idx = blockIdx.x*blockDim.x + threadIdx.x;
while ( t_idx < size )
{
frame_idx = t_idx / n_frsize ;
if ( frame_idx >= num_of_frames )
d_output[ t_idx ] = 0 ;
else
{
frame_left = t_idx % n_frsize ;
o_x = frame_left % ( xdim + xadd * 2 ) ;
if ( o_x < xadd )
o_x = 0 ;
else if ( o_x >= ( xdim + xadd ))
o_x = xdim - 1 ;
else
o_x -= xadd ;
o_y = frame_left / ( xdim + xadd * 2 ) ;
if ( o_y < yadd )
o_y = 0 ;
else if ( o_y >= ( yadd + ydim ))
o_y = ydim - 1 ;
else
o_y -= yadd ;
d_output[ t_idx ] = d_input[ frame_idx * o_frsize +
o_y * xdim + o_x ] ;
}
t_idx += CUDA_MAX_THREADS ;
}
}
/*
h_expand_frame: expand the current frames in a block
this is supposed to be the first step after pix are copied into
the device memory d_input
d_input : device address of input
should have size ( xdim * ydim * num_of_frames ) ;
d_output: device address of output
should have size ( xdim + xadd * 2 ) * ( ydim + yadd * 2 ) *
( num_of_frames + zadd )
xdim : frame H size
ydim : frame V size
xadd : size of H pix added on each side
yadd : size of V pix added on each side
zadd : size of T pix added at the end , content value is 0
num_of_frames : with data in d_input
*/
template<typename F, typename T>
void
h_expand_frame_T ( F *d_input, T *d_output, int xdim, int ydim,
int xadd, int yadd, int zadd, int num_of_frames )
{
int o_framesize, n_framesize, n ;
int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ;
int nBlocks ;
#ifdef CUDA_DBG1
fprintf( stderr, "%s: d_input %p dout %p x/y %d %d add x/y/z %d %d %d z %d\n",
__func__, d_input, d_output, xdim, ydim, xadd, yadd, zadd,
num_of_frames ) ;
#endif
o_framesize = xdim * ydim ;
n_framesize = ( xdim + xadd * 2 ) * ( ydim + yadd * 2 ) ;
n = n_framesize * ( num_of_frames + zadd ) ;
#ifdef CUDA_DBG
fprintf( stderr, "%s: old %d new %d n %d \n",
__func__, o_framesize, n_framesize, n ) ;
#endif
h_block_adj ( n, nThreadsPerBlock, &nBlocks ) ;
// nBlocks = ( n + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
d_expand_frame_T<F, T> <<< nBlocks, nThreadsPerBlock >>>
( d_input, d_output,
xdim, ydim, xadd, yadd, zadd, num_of_frames,
o_framesize, n_framesize, n ) ;
cudaThreadSynchronize() ;
}
template void h_expand_frame_T<int, int> ( int *d_input, int *d_output, int xdim, int ydim,
int xadd, int yadd, int zadd, int num_of_frames ) ;
template void h_expand_frame_T<float, float> ( float *d_input, float *d_output, int xdim, int ydim,
int xadd, int yadd, int zadd, int num_of_frames ) ;
|
8,885 | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#define TILE_SZ 16
__global__ void mysgemm(int m, int n, int k, const float *A, const float *B, float* C) {
/********************************************************************
*
* Compute C = A x B
* where A is a (m x k) matrix
* where B is a (k x n) matrix
* where C is a (m x n) matrix
*
* Use shared memory for tiling
*
********************************************************************/
// INSERT KERNEL CODE HERE
unsigned int TiRow = threadIdx.y;
unsigned int TiCol = threadIdx.x;
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float As[TILE_SZ][TILE_SZ];
__shared__ float Bs[TILE_SZ][TILE_SZ];
float sum = 0;
for(unsigned int TiNum = 0; TiNum < (k-1)/TILE_SZ+1; TiNum++){
if((row < m) && (TiNum * TILE_SZ + TiCol) < k)
As[TiRow][TiCol]= A[row * k + TiNum * TILE_SZ + TiCol];
else
As[TiRow][TiCol] = 0;
if((TiNum * TILE_SZ + TiRow) < k && col < n)
Bs[TiRow][TiCol] = B[(TiNum * TILE_SZ + TiRow) * n + col];
else
Bs[TiRow][TiCol] = 0;
__syncthreads();
//Calculate inner product for the tile
//Checking for matrix size to lower power and practice green computing
if(row < m && col < n)
for(unsigned int TiElem = 0; TiElem < TILE_SZ; TiElem++)
sum = sum + As[TiRow][TiElem]*Bs[TiElem][TiCol];
__syncthreads();
}
//Prevent writing of output to an undefined block
if (row < m && col < n)
C[row * n + col] = sum;
}
void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc)
{
if ((transa != 'N') && (transa != 'n')) {
printf("unsupported value of 'transa'\n");
return;
}
if ((transb != 'N') && (transb != 'n')) {
printf("unsupported value of 'transb'\n");
return;
}
if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) {
printf("unsupported value of alpha\n");
return;
}
if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) {
printf("unsupported value of beta\n");
return;
}
// Initialize thread block and kernel grid dimensions ---------------------
//INSERT CODE HERE
const unsigned int BLOCK_SIZE = TILE_SZ; //use 16 x 16 thread blocks
dim3 block(BLOCK_SIZE, BLOCK_SIZE ,1);
dim3 grid((n + BLOCK_SIZE - 1)/BLOCK_SIZE, (m + BLOCK_SIZE -1)/BLOCK_SIZE, 1);
// Invoke CUDA kernel -----------------------------------------------------
//INSERT CODE HERE
mysgemm<<< grid, block>>>(m, n, k, A, B, C);
}
|
8,886 | #include "includes.h"
__global__ void Compute_weightx_weighty2_norm0_Kernel(float* weightx, float* weighty, const float* absIx, const float* absIy, int nPixels, float norm_for_smooth_term, float eps)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int x = bx*blockDim.x + tx;
if (x >= nPixels)
return;
weightx[x] = 1.0f / (absIx[x] * absIx[x] + eps);
weighty[x] = 1.0f / (absIy[x] * absIy[x] + eps);
} |
8,887 | #include <stdio.h>
#include <cuda_runtime.h>
#define SIZE 1000
#define NUM_BIN 16
__global__ void histogram_shared_memory(int *device_b, int *device_a)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int offset = blockDim.x * gridDim.x;
__shared__ int cache[256];
cache[threadIdx.x] = 0;
__syncthreads();
while (tid < SIZE)
{
atomicAdd(&(cache[device_a[tid]]), 1);
tid += offset;
}
__syncthreads();
atomicAdd(&(device_b[threadIdx.x]), cache[threadIdx.x]);
}
int main(int argc, char **argv)
{
// generate the input array on the host.
int host_a[SIZE];
for (int i = 0; i < SIZE; ++i)
{
//host_a[i] = bit_reverse(i, log2(SIZE));
host_a[i] = i % NUM_BIN;
}
int host_b[NUM_BIN];
for (int i = 0; i < NUM_BIN; ++i)
{
host_b[i] = 0;
}
// declare GPU memory pointers
int *device_a, *device_b;
// allocate GPU memory
cudaMalloc((void **)&device_a, SIZE * sizeof(int));
cudaMalloc((void **)&device_b, NUM_BIN * sizeof(int));
// transfer the arrays to the GPU
cudaMemcpy(device_a, host_a, SIZE * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(device_b, host_b, NUM_BIN * sizeof(int), cudaMemcpyHostToDevice);
// launch the kernel
histogram_shared_memory <<<SIZE / 256, 256 >>> (device_b, device_a);
// copy back the result from GPU
cudaMemcpy(host_b, device_b, NUM_BIN * sizeof(int), cudaMemcpyDeviceToHost);
printf("Histogram using 16 bin is: \n");
for (int i = 0; i < NUM_BIN; ++i)
{
printf("bin %d: count %d\n", i, host_b[i]);
}
// free GPU memory allocation
cudaFree(device_a);
cudaFree(device_b);
return 0;
} |
8,888 | extern "C" {
typedef struct {
int e0;
char* e1;
} struct_Buffer_5425;
typedef struct {
struct_Buffer_5425 e0;
struct_Buffer_5425 e1;
int e2;
int e3;
} struct_image_5424;
typedef struct {
struct_Buffer_5425 e0;
int e1;
int e2;
} struct_filter_5428;
__device__ inline int threadIdx_x() { return threadIdx.x; }
__device__ inline int threadIdx_y() { return threadIdx.y; }
__device__ inline int threadIdx_z() { return threadIdx.z; }
__device__ inline int blockIdx_x() { return blockIdx.x; }
__device__ inline int blockIdx_y() { return blockIdx.y; }
__device__ inline int blockIdx_z() { return blockIdx.z; }
__device__ inline int blockDim_x() { return blockDim.x; }
__device__ inline int blockDim_y() { return blockDim.y; }
__device__ inline int blockDim_z() { return blockDim.z; }
__device__ inline int gridDim_x() { return gridDim.x; }
__device__ inline int gridDim_y() { return gridDim.y; }
__device__ inline int gridDim_z() { return gridDim.z; }
__global__ void lambda_20643(struct_image_5424, struct_filter_5428, struct_Buffer_5425);
__global__ void lambda_20775(struct_filter_5428, struct_image_5424, struct_Buffer_5425, double*, struct_Buffer_5425);
__global__ __launch_bounds__ (128 * 1 * 1) void lambda_20643(struct_image_5424 _20646_22982, struct_filter_5428 _20647_22983, struct_Buffer_5425 _20648_22984) {
__shared__ double ds_img[134][7];
int _22990;
int p_22990;
int _22996;
int p_22996;
int _23002;
int p_23002;
int _23008;
int p_23008;
int _23014;
int p_23014;
int _23020;
int p_23020;
int _23043;
int p_23043;
double sum_23045;
double psum_23045;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
_22990 = blockIdx_x();
p_22990 = _22990;
l22988: ;
_22990 = p_22990;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
_22996 = blockDim_x();
p_22996 = _22996;
l22994: ;
_22996 = p_22996;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
_23002 = threadIdx_x();
p_23002 = _23002;
l23000: ;
_23002 = p_23002;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
_23008 = blockIdx_y();
p_23008 = _23008;
l23006: ;
_23008 = p_23008;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
_23014 = blockDim_y();
p_23014 = _23014;
l23012: ;
_23014 = p_23014;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
_23020 = threadIdx_y();
p_23020 = _23020;
l23018: ;
_23020 = p_23020;
#line 11 "main.impala"
int _23024;
_23024 = _20646_22982.e2;
#line 155 "gpu_device.impala"
int _23021;
_23021 = _22990 * _22996;
#line 155 "gpu_device.impala"
int gid_x_23022;
gid_x_23022 = _23021 + _23002;
#line 160 "gpu_device.impala"
bool _23025;
_23025 = gid_x_23022 < _23024;
#line 160 "gpu_device.impala"
if (_23025) goto l23026; else goto l23091;
l23091: ;
#line 163 "gpu_device.impala"
goto l23090;
l23026: ;
#line 157 "gpu_device.impala"
int _23027;
_23027 = _23008 * _23014;
#line 157 "gpu_device.impala"
int gid_y_23028;
gid_y_23028 = _23027 + _23020;
#line 11 "main.impala"
int _23030;
_23030 = _20646_22982.e3;
#line 160 "gpu_device.impala"
bool _23031;
_23031 = gid_y_23028 < _23030;
#line 160 "gpu_device.impala"
if (_23031) goto l23032; else goto l23089;
l23089: ;
#line 163 "gpu_device.impala"
goto l23090;
l23090: ;
return ;
l23032: ;
#line 45 "gpu_device.impala"
char* _23073;
_23073 = _20648_22984.e1;
#line 50 "gpu_device.impala"
int _23062;
_23062 = gid_y_23028 * _23024;
#line 50 "gpu_device.impala"
struct_Buffer_5425 _23059;
_23059 = _20646_22982.e1;
#line 45 "gpu_device.impala"
double* _23074;
union { double* dst; char* src; } u_23074;
u_23074.src = _23073;
_23074 = u_23074.dst;
#line 50 "gpu_device.impala"
int _23075;
_23075 = _23062 + gid_x_23022;
#line 45 "gpu_device.impala"
double* _23076;
_23076 = _23074 + _23075;
#line 4 "gaussian.impala"
int _23034;
_23034 = _20647_22983.e1;
#line 50 "gpu_device.impala"
char* _23060;
_23060 = _23059.e1;
#line 4 "gaussian.impala"
int h_anchor_23036;
h_anchor_23036 = _23034 / 2;
#line 50 "gpu_device.impala"
double* _23061;
union { double* dst; char* src; } u_23061;
u_23061.src = _23060;
_23061 = u_23061.dst;
#line 17 "gaussian.impala"
bool _23037;
_23037 = h_anchor_23036 <= gid_x_23022;
#line 17 "gaussian.impala"
if (_23037) goto l23038; else goto l23088;
l23088: ;
#line 27 "gaussian.impala"
goto l23082;
l23038: ;
#line 17 "gaussian.impala"
int _23039;
_23039 = _23024 - h_anchor_23036;
#line 17 "gaussian.impala"
bool _23040;
_23040 = gid_x_23022 < _23039;
#line 17 "gaussian.impala"
if (_23040) goto l23041; else goto l23081;
l23081: ;
#line 27 "gaussian.impala"
goto l23082;
l23082: ;
#line 50 "gpu_device.impala"
double* _23083;
_23083 = _23061 + _23075;
#line 50 "gpu_device.impala"
double _23084;
_23084 = *_23083;
#line 50 "gpu_device.impala"
double _23086;
_23086 = _23084;
#line 45 "gpu_device.impala"
*_23076 = _23086;
return ;
l23041: ;
#line 19 "gaussian.impala"
int _23047;
_23047 = 1 + h_anchor_23036;
#line 55 "gpu_device.impala"
struct_Buffer_5425 _23052;
_23052 = _20647_22983.e0;
#line 19 "gaussian.impala"
int _23079;
_23079 = 0 - h_anchor_23036;
#line 55 "gpu_device.impala"
char* _23053;
_23053 = _23052.e1;
#line 55 "gpu_device.impala"
double* _23054;
union { double* dst; char* src; } u_23054;
u_23054.src = _23053;
_23054 = u_23054.dst;
#line 19 "gpu_device.impala"
p_23043 = _23079;
psum_23045 = 0.000000e+00;
goto l23042;
l23042: ;
_23043 = p_23043;
sum_23045 = psum_23045;
#line 19 "gpu_device.impala"
bool _23048;
_23048 = _23043 < _23047;
#line 19 "gpu_device.impala"
if (_23048) goto l23049; else goto l23072;
l23072: ;
#line 45 "gpu_device.impala"
*_23076 = sum_23045;
return ;
l23049: ;
#line 23 "gpu_device.impala"
int _23050;
_23050 = 1 + _23043;
#line 21 "gaussian.impala"
int _23055;
_23055 = _23043 + h_anchor_23036;
#line 21 "gaussian.impala"
int _23063;
_23063 = gid_x_23022 + _23043;
#line 54 "gpu_device.impala"
double* i_23056;
i_23056 = _23054 + _23055;
#line 50 "gpu_device.impala"
int _23064;
_23064 = _23062 + _23063;
#line 55 "gpu_device.impala"
double _23057;
_23057 = *i_23056;
#line 50 "gpu_device.impala"
double* _23065;
_23065 = _23061 + _23064;
#line 55 "gpu_device.impala"
double _23068;
_23068 = _23057;
#line 50 "gpu_device.impala"
double _23066;
_23066 = *_23065;
#line 50 "gpu_device.impala"
double _23069;
_23069 = _23066;
#line 21 "gaussian.impala"
double _23070;
_23070 = _23068 * _23069;
#line 21 "gaussian.impala"
double _23071;
_23071 = sum_23045 + _23070;
#line 19 "gpu_device.impala"
p_23043 = _23050;
psum_23045 = _23071;
goto l23042;
}
__global__ __launch_bounds__ (128 * 1 * 1) void lambda_20775(struct_filter_5428 _20778_23095, struct_image_5424 _20779_23096, struct_Buffer_5425 _20780_23097, double* _20781_23098, struct_Buffer_5425 _20782_23099) {
__shared__ double ds_img[134][7];
int _23102;
int p_23102;
int _23105;
int p_23105;
int _23108;
int p_23108;
int _23111;
int p_23111;
int _23114;
int p_23114;
int _23117;
int p_23117;
int _23136;
int p_23136;
double sum_23138;
double psum_23138;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
_23102 = blockIdx_x();
p_23102 = _23102;
l23100: ;
_23102 = p_23102;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
_23105 = blockDim_x();
p_23105 = _23105;
l23103: ;
_23105 = p_23105;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
_23108 = threadIdx_x();
p_23108 = _23108;
l23106: ;
_23108 = p_23108;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
_23111 = blockIdx_y();
p_23111 = _23111;
l23109: ;
_23111 = p_23111;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
_23114 = blockDim_y();
p_23114 = _23114;
l23112: ;
_23114 = p_23114;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
_23117 = threadIdx_y();
p_23117 = _23117;
l23115: ;
_23117 = p_23117;
#line 155 "gpu_device.impala"
int _23118;
_23118 = _23102 * _23105;
#line 155 "gpu_device.impala"
int gid_x_23119;
gid_x_23119 = _23118 + _23108;
#line 11 "main.impala"
int _23120;
_23120 = _20779_23096.e2;
#line 160 "gpu_device.impala"
bool _23121;
_23121 = gid_x_23119 < _23120;
#line 160 "gpu_device.impala"
if (_23121) goto l23122; else goto l23180;
l23180: ;
#line 163 "gpu_device.impala"
goto l23179;
l23122: ;
#line 157 "gpu_device.impala"
int _23123;
_23123 = _23111 * _23114;
#line 11 "main.impala"
int _23125;
_23125 = _20779_23096.e3;
#line 157 "gpu_device.impala"
int gid_y_23124;
gid_y_23124 = _23123 + _23117;
#line 160 "gpu_device.impala"
bool _23126;
_23126 = gid_y_23124 < _23125;
#line 160 "gpu_device.impala"
if (_23126) goto l23127; else goto l23178;
l23178: ;
#line 163 "gpu_device.impala"
goto l23179;
l23179: ;
return ;
l23127: ;
#line 45 "gpu_device.impala"
char* _23163;
_23163 = _20780_23097.e1;
#line 50 "gpu_device.impala"
char* _23150;
_23150 = _20782_23099.e1;
#line 50 "gpu_device.impala"
int _23165;
_23165 = gid_y_23124 * _23120;
#line 6 "gaussian.impala"
int _23128;
_23128 = _20778_23095.e2;
#line 45 "gpu_device.impala"
double* _23164;
union { double* dst; char* src; } u_23164;
u_23164.src = _23163;
_23164 = u_23164.dst;
#line 50 "gpu_device.impala"
double* _23151;
union { double* dst; char* src; } u_23151;
u_23151.src = _23150;
_23151 = u_23151.dst;
#line 50 "gpu_device.impala"
int _23166;
_23166 = _23165 + gid_x_23119;
#line 6 "gaussian.impala"
int v_anchor_23129;
v_anchor_23129 = _23128 / 2;
#line 45 "gpu_device.impala"
double* _23167;
_23167 = _23164 + _23166;
#line 39 "gaussian.impala"
bool _23130;
_23130 = v_anchor_23129 <= gid_y_23124;
#line 39 "gaussian.impala"
if (_23130) goto l23131; else goto l23177;
l23177: ;
#line 49 "gaussian.impala"
goto l23171;
l23131: ;
#line 39 "gaussian.impala"
int _23132;
_23132 = _23125 - v_anchor_23129;
#line 39 "gaussian.impala"
bool _23133;
_23133 = gid_y_23124 < _23132;
#line 39 "gaussian.impala"
if (_23133) goto l23134; else goto l23170;
l23170: ;
#line 49 "gaussian.impala"
goto l23171;
l23171: ;
#line 50 "gpu_device.impala"
double* _23172;
_23172 = _23151 + _23166;
#line 50 "gpu_device.impala"
double _23173;
_23173 = *_23172;
#line 50 "gpu_device.impala"
double _23175;
_23175 = _23173;
#line 45 "gpu_device.impala"
*_23167 = _23175;
return ;
l23134: ;
#line 41 "gaussian.impala"
int _23139;
_23139 = 1 + v_anchor_23129;
#line 55 "gpu_device.impala"
struct_Buffer_5425 _23143;
_23143 = _20778_23095.e0;
#line 41 "gaussian.impala"
int _23169;
_23169 = 0 - v_anchor_23129;
#line 55 "gpu_device.impala"
char* _23144;
_23144 = _23143.e1;
#line 55 "gpu_device.impala"
double* _23145;
union { double* dst; char* src; } u_23145;
u_23145.src = _23144;
_23145 = u_23145.dst;
#line 19 "gpu_device.impala"
p_23136 = _23169;
psum_23138 = 0.000000e+00;
goto l23135;
l23135: ;
_23136 = p_23136;
sum_23138 = psum_23138;
#line 19 "gpu_device.impala"
bool _23140;
_23140 = _23136 < _23139;
#line 19 "gpu_device.impala"
if (_23140) goto l23141; else goto l23162;
l23162: ;
#line 45 "gpu_device.impala"
*_23167 = sum_23138;
return ;
l23141: ;
#line 43 "gaussian.impala"
int _23146;
_23146 = _23136 + v_anchor_23129;
#line 43 "gaussian.impala"
int _23152;
_23152 = gid_y_23124 + _23136;
#line 50 "gpu_device.impala"
int _23153;
_23153 = _23152 * _23120;
#line 23 "gpu_device.impala"
int _23142;
_23142 = 1 + _23136;
#line 54 "gpu_device.impala"
double* i_23147;
i_23147 = _23145 + _23146;
#line 55 "gpu_device.impala"
double _23148;
_23148 = *i_23147;
#line 50 "gpu_device.impala"
int _23154;
_23154 = _23153 + gid_x_23119;
#line 55 "gpu_device.impala"
double _23158;
_23158 = _23148;
#line 50 "gpu_device.impala"
double* _23155;
_23155 = _23151 + _23154;
#line 50 "gpu_device.impala"
double _23156;
_23156 = *_23155;
#line 50 "gpu_device.impala"
double _23159;
_23159 = _23156;
#line 43 "gaussian.impala"
double _23160;
_23160 = _23158 * _23159;
#line 43 "gaussian.impala"
double _23161;
_23161 = sum_23138 + _23160;
#line 19 "gpu_device.impala"
p_23136 = _23142;
psum_23138 = _23161;
goto l23135;
}
} |
8,889 | #include "includes.h"
#define _USE_MATH_DEFINES
__global__ void calculateDotProductsAndReduceGPU(int *vec1, int *vec2, int *reduced, int numElements)
{
extern __shared__ int sdata[];
int tid = threadIdx.x;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int numThreads = blockDim.x;
sdata[tid] = 0;
sdata[tid + numThreads] = 0;
sdata[tid + (numThreads * 2)] = 0;
if (i < numElements)
{
//Multiplications for dot products
sdata[tid] = vec1[i] * vec2[i];
sdata[tid + numThreads] = vec1[i] * vec1[i];
sdata[tid + (numThreads * 2)] = vec2[i] * vec2[i];
__syncthreads();
//Reduction with sequential addressing with reversed loop and threadID-based indexing
for (int s = blockDim.x/2; s > 0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
sdata[tid + numThreads] += sdata[tid + numThreads + s];
sdata[tid + (numThreads * 2)] += sdata[(tid + (numThreads * 2)) + s];
}
__syncthreads();
}
}
if (tid == 0)
{
atomicAdd(&reduced[0], sdata[tid]);
atomicAdd(&reduced[1], sdata[tid + numThreads]);
atomicAdd(&reduced[2], sdata[tid + (numThreads * 2)]);
}
} |
8,890 | #include "includes.h"
// Lenght of each data
__constant__ int gcT_size;
__constant__ int gcP_size;
// Threshold of the SW algorithm
__constant__ int gcThre;
// Data of the query
__constant__ char gcP_seq[1024];
// Cost and Gain
__constant__ int gcMatch;
__constant__ int gcMiss;
__constant__ int gcExtend;
__constant__ int gcBegin;
enum{
Zero,
Diagonal,
Vertical,
Horizon,
};
using namespace std;
__global__ void DP(char* dT_seq, char* dTrace, int* dScore){
// ThreadId = ptn point
int id = threadIdx.x;
// The acid in this thread
char p = gcP_seq[id];
// p-1 row line's value
__shared__ int Hp_1[1024];
__shared__ int Ep_1[1024];
// Temporary
int Hp_1_buf = 0;
int Ep_1_buf = 0;
// t-1 element value
int Ht_1 = 0;
int Ft_1 = 0;
// p-1 t-1 element value
int Ht_1p_1 = 0;
// Initialize
Hp_1[id] = 0;
Ep_1[id] = 0;
// Similar score
int sim = 0;
int point = id * gcT_size - id;
// Culcurate elements
for(int t = -id; t < gcT_size; ++t){
// Control culcurate order
if(t<0){}
// Get similar score
else{
// Compare acids
if(dT_seq[t] == p){sim = gcMatch;}
else{sim = gcMiss;}
}
// SW algorithm
// Culcurate each elements
Ht_1p_1 += sim; // Diagonal
Ht_1 += gcBegin; // Horizon (Start)
Ft_1 += gcExtend; // Horizon (Extend)
Hp_1_buf = Hp_1[id] + gcBegin; // Vertical (Start)
Ep_1_buf = Ep_1[id] + gcExtend; // Vertical (Extend)
// Choose the gap score
if(Ht_1 > Ft_1){Ft_1 = Ht_1;} // Horizon
if(Hp_1_buf > Ft_1){Ep_1_buf = Hp_1_buf;} // Vertical
// Choose the max score
// Ht_1 is stored the max score
if(Ht_1p_1 > Ep_1_buf){
// Diagonal
if(Ht_1p_1 > Ft_1){
Ht_1 = Ht_1p_1;
dTrace[point] = Diagonal;
}
// Horizon
else{
Ht_1 = Ft_1;
dTrace[point] = Horizon;
}
}
else {
// Vertical
if(Ep_1_buf > Ft_1){
Ht_1 = Ep_1_buf;
dTrace[point] = Vertical;
}
// Horizon
else{
Ht_1 = Ft_1;
dTrace[point] = Horizon;
}
}
// The case 0 is max
if(Ht_1 <= 0){
Ht_1 = 0;
// Set 0 other value
Ft_1 = 0;
Ep_1_buf = 0;
dTrace[point] = Zero;
}
// Hp-1 is next Ht-1p-1
Ht_1p_1 = Hp_1[id];
__syncthreads();
// Set value need next culcurate
// p+1 row line
if(t >= 0){
Hp_1[id + 1] = Ht_1;
Ep_1[id + 1] = Ep_1_buf;
// DEBUG, score check
// dTrace[point] = (char)(Ht_1);
}
if(Ht_1 >= gcThre){
// printf("Score = %d:\n", Ht_1);
// traceback(dTrace, dT_seq, point-1, t);
if(Ht_1 >= (dScore[t] & 0x0000ffff)){
// Set score and now ptn point
dScore[t] = Ht_1 + (id << 16);
}
}
++point;
__syncthreads();
// for end
}
} |
8,891 | #include <stdio.h>
#include <iostream>
#include <ctime>
using namespace std;
#define TILE_WIDTH 16
void host_init(float *arr, int n, float init_val){
for (int i=0; i < n; i++)
arr[i] = init_val;
}
void host_matmul(float *h_a, float *h_b, float *h_c, int m, int n, int k){
// h_a (mxk); h_b (kxn); h_c (mxn)
for (int row = 0; row < m; row++){
for (int col = 0; col < n; col++){
float sum = 0;
for (int kk = 0; kk < k; kk++){
float a = h_a[row*k + kk];
float b = h_b[kk*n + col];
sum += a * b;
}
h_c[row*n + col] = sum;
}
}
}
__global__
void cuda_matmul(float *d_a, float *d_b, float *d_c, int m, int n, int k){
__shared__ float shmem_a[TILE_WIDTH][TILE_WIDTH];
__shared__ float shmem_b[TILE_WIDTH][TILE_WIDTH];
int tx = threadIdx.x;
int ty = threadIdx.y;
int col = blockIdx.x*blockDim.x + tx;
int row = blockIdx.y*blockDim.y + ty;
float c_val = 0.0;
for (int phase = 0; phase < (k-1)/TILE_WIDTH + 1; phase++){
if (row < m && (tx + phase*TILE_WIDTH) < k)
shmem_a[ty][tx] = d_a[row*k + (tx + phase*TILE_WIDTH)];
else
shmem_a[ty][tx] = 0.0;
if (col < n && ((ty + phase*TILE_WIDTH) < k))
shmem_b[ty][tx] = d_b[(ty + phase*TILE_WIDTH)*n + col];
else
shmem_b[ty][tx] = 0.0;
__syncthreads();
for (int kk = 0; kk < TILE_WIDTH; kk++){
c_val += shmem_a[ty][kk] * shmem_b[kk][tx];
}
__syncthreads();
}
if (col < n and row < m)
d_c[row*n + col] = c_val;
}
void cudaError_check(cudaError_t err, int line){
if (err != cudaSuccess){
printf("GPUassert: %s %s %d\n", cudaGetErrorString(err), __FILE__, line);
exit(EXIT_FAILURE);
}
}
int main(){
float *h_a, *h_b, *h_c, *h_c_cpy;
float *d_a, *d_b, *d_c;
// h_a dimensions = MxK
// h_b dimensions = KxN
// h_c dimensions = MxN
int m = 1024; //
int n = 1024; //
int k = 1024; //
size_t size_ha = k*m*sizeof(float);
size_t size_hb = k*n*sizeof(float);
size_t size_hc = m*n*sizeof(float);
clock_t start, stop;
//################## HOST Start ###################//
h_a = (float*) malloc (size_ha);
h_b = (float*) malloc (size_hb);
h_c = (float*) malloc (size_hc);
h_c_cpy = (float*) malloc (size_hc);
host_init(h_a, k*m, 1);
host_init(h_b, n*k, 2);
host_init(h_c, n*m, 0);
host_init(h_c_cpy, n*m, 0);
start = clock();
host_matmul(h_a, h_b, h_c, m, n, k);
stop = clock();
double cpu_duration = (stop - start) / (double) CLOCKS_PER_SEC;
//################## HOST End ###################//
//################## CUDA Start ###################//
cudaError_t err ;
err = cudaMalloc((void **) &d_a, size_ha);
cudaError_check(err, __LINE__);
err = cudaMemcpy(d_a, h_a, size_ha, cudaMemcpyHostToDevice);
cudaError_check(err, __LINE__);
err = cudaMalloc((void **) &d_b, size_hb);
cudaError_check(err, __LINE__);
err = cudaMemcpy(d_b, h_b, size_hb, cudaMemcpyHostToDevice);
cudaError_check(err, __LINE__);
err = cudaMalloc((void **) &d_c, size_hc);
cudaError_check(err, __LINE__);
//Kernel invocation
int num_threads_per_block = TILE_WIDTH;
dim3 gridDim ((m-1)/num_threads_per_block + 1, (n-1)/num_threads_per_block + 1, 1);
dim3 blockDim (num_threads_per_block, num_threads_per_block, 1);
start = clock();
cuda_matmul<<<gridDim, blockDim>>>(d_a, d_b, d_c, m, n, k);
stop = clock();
err = cudaMemcpy(h_c_cpy, d_c, size_hc, cudaMemcpyDeviceToHost);
cudaError_check(err, __LINE__);
double gpu_duration = (stop - start) / (double) CLOCKS_PER_SEC;
//################## CUDA End ###################//
int success = 1;
for (int i = 0; i < n*m; i++){
if (h_c[i] != h_c_cpy[i]){
success = 0;
printf("Failure at idx: %d\n", i);
break;
}
}
if (success == 1)
printf("Success\n");
printf("CPU Duration: %0.3f secs \n", cpu_duration);
printf("GPU Duration: %0.5f secs \n", gpu_duration);
return 1;
}
|
8,892 | /*
* Shade Alabsa
* Vector Summation Example from Cuda By Example
* CS 7172
* Edward Jung
* HW 3
* Most is taken from the example in the book with little changes.
*/
#include <stdio.h>
#define N (32 * 1024)
/*
These error functions is ripped from there example. I didn't want to include
their entire file nor did I want to just ignore all the errors.
*/
static void HandleError( cudaError_t err, const char *file, int line) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR(err) (HandleError(err, __FILE__, __LINE__))
/*
Performs the summation of the vectors on the GPU
*/
__global__ void add(float *a, float *b, float *c) {
int tid = blockIdx.x;
while(tid < N) {
c[tid] = a[tid] + b[tid];
tid += gridDim.x;
}
}
void printArray(float *c) {
for (int i = 0; i < N; i++) {
printf("%f ", c[i]);
if (i % 10 == 0) {
printf("\n");
}
}
printf("\n");
}
int main(void) {
float *a, *b, *c;
float *dev_a, *dev_b, *dev_c;
a = (float*)malloc( N * sizeof(float) );
b = (float*)malloc( N * sizeof(float) );
c = (float*)malloc( N * sizeof(float) );
HANDLE_ERROR( cudaMalloc((void**)&dev_a, N * sizeof(float) ));
HANDLE_ERROR( cudaMalloc((void**)&dev_b, N * sizeof(float) ));
HANDLE_ERROR( cudaMalloc((void**)&dev_c, N * sizeof(float) ));
for (int i = 0; i<N; i++) {
a[i] = i * 3.14;
b[i] = 2 * i;
}
HANDLE_ERROR( cudaMemcpy(dev_a, a, N * sizeof(float),
cudaMemcpyHostToDevice));
HANDLE_ERROR( cudaMemcpy(dev_b, b, N * sizeof(float),
cudaMemcpyHostToDevice));
add<<<512,1>>>(dev_a, dev_b, dev_c);
HANDLE_ERROR(cudaMemcpy(c, dev_c, N * sizeof(float),
cudaMemcpyDeviceToHost));
bool success = true;
for (int i = 0; i < N; i++) {
if (a[i] + b[i] != c[i]) {
printf( "Error %f + %f != %f\n", a[i], b[i], c[i]);
success = false;
}
}
if (success) {
printf("We did it!\n");
printArray(c);
}
HANDLE_ERROR( cudaFree(dev_a));
HANDLE_ERROR( cudaFree(dev_b));
HANDLE_ERROR( cudaFree(dev_c));
free(a);
free(b);
free(c);
return 0;
}
|
8,893 | #include <stdio.h>
#include <stdlib.h>
typedef int* Vector;
int techo(int tam, int block_size)
{
return (tam / block_size) + (tam % block_size == 0? 0: 1);
}
__global__ void multiplicarMatrices(int *C, int *A, int *B, int tam)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int index = idy * tam + idx;
if(idy < tam && idx < tam)
{
int suma = 0;
for(int k = 0; k < tam; k++)
suma += A[idy + tam + k] * B [k * tam + idx];
C[index] = suma;
}
}
__global__ void sumarMatrices(int *C, int *A, int *B, int tam)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int index = idy * tam + idx;
if(idy < tam && idx < tam)
C[index] = A[index] + B[index];
}
void imprimirMatriz(int *A, int tam)
{
for(int i = 0; i < tam; i++)
{
for(int j = 0; j < tam; j++)
{
printf("%d ", A[tam * i + j]);
}
printf("\n");
}
}
Vector newMatrizVectorizada(int tam, int val)
{
Vector v = (Vector) malloc(sizeof(int) * tam * tam);
for(int i = 0; i < tam; i++)
{
for(int j = 0; j < tam; j++)
{
v[i * tam + j] = val;
}
}
return v;
}
int main(int argc, char **argv)
{
Vector A_h, B_h, C_h;
Vector A_d, B_d, C_d;
int tam = 12;
if(argc > 1)
tam = atoi(argv[1]);
int N = tam * tam;
size_t size = N * sizeof(int);
A_h = newMatrizVectorizada(tam, 2);
B_h = newMatrizVectorizada(tam, 3);
C_h = newMatrizVectorizada(tam, 0);
cudaMalloc((void**) &A_d, size);
cudaMalloc((void**) &B_d, size);
cudaMalloc((void**) &C_d, size);
cudaMemcpy(A_d, A_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B_h, size, cudaMemcpyHostToDevice);
int BLOCK_SIZE = 4;
dim3 block_size(BLOCK_SIZE, BLOCK_SIZE);
dim3 n_blocks(techo(tam, block_size.x), techo(tam, block_size.y));
multiplicarMatrices<<< n_blocks, block_size >>> (C_d, A_d, B_d, tam);
cudaMemcpy(C_h, C_d, size, cudaMemcpyDeviceToHost);
imprimirMatriz(C_h, tam);
sumarMatrices <<< n_blocks, block_size>>> (C_d, A_d, B_d, tam);
cudaMemcpy(C_h, C_d, size, cudaMemcpyDeviceToHost);
printf("==============================================RESULTADO============================================\n");
imprimirMatriz(C_h, tam);
printf("===================================================================================================\n");
free(A_h);
free(B_h);
free(C_h);
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
return 0;
}
|
8,894 | #include "includes.h"
__global__ void square_array(float *a, int array_size)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<array_size) a[idx] = a[idx] * a[idx];
} |
8,895 | #include <fstream>
#include <iostream>
#include <string>
#include <cuda.h>
// Charge une matrice disponible dans les repertoires exemples
bool load_matrix(char * filename, double * &matrix, int &nx, int &ny){
std::string line;
std::ifstream infile(filename);
if (!infile.is_open()) {
std::cout << "Fichier introuvable: "<< filename << std::endl;
return 0;
}
// Charge la taile de la matrice
infile >> nx >> ny;
// Alloue le tableau correspondant
matrix = new double[nx*ny];
// Charge la matrice
for (int i=0; i< nx*ny; i++){
infile >> matrix[i];
}
infile.close();
return 1;
}
// Calcul C = A * B
__global__ void matrixMultiply(float * A, float * B, float * C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
/// Insérer le code
}
int main(int argc, char ** argv) {
float * hostA;
float * hostB;
float * hostC;
float * deviceA;
float * deviceB;
float * deviceC;
int numARows;
int numAColumns;
int numBRows;
int numBColumns;
int numCRows;
int numCColumns;
/// Charger le fichier d'entree
/// Initialiser numCRows et numCColumns
numCRows = 0;
numCColumns = 0;
/// Allouer hostC
/// Afficher les informations sur la matrice
/// Allouer la memoire sur GPU
/// Copier la memoire sur le GPU
/// Initialise la grille et les dimensions de chaque bloc
/// Execute le kernel
cudaThreadSynchronize();
/// Charge le resultat en memoire CPU
/// Libere la memoire
free(hostA);
free(hostB);
free(hostC);
return 0;
}
|
8,896 | #include "includes.h"
__global__ void componentStepFive(unsigned int * Q,unsigned int length,unsigned int * sprimtemp,unsigned int s){
unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x;
if(tid <length) {
if(Q[tid]==s){
atomicExch(sprimtemp,1);
//*sprime=*sprimtemp+1;
}
}
} |
8,897 | /*
* The names of the kernels are fairly self explanatory
* about the task that they are performing. You
* may refer to the Arnoldi algorithm on page 4 of the
* attached paper to understand the flow.
*/
#include <cuda_runtime.h>
#include <stdio.h>
#include <cuda.h>
__global__ void assign_Q(double* d_Q, double* d_q, int j, int k, int n)
{
int id = (blockIdx.x*blockDim.x) + threadIdx.x;
if(id<n)
{
d_Q[j+(id*k)] = d_q[id];
}
}
__global__ void update_q(double* d_q, double* d_Q, int i, int col, int n)
{
int id = (blockIdx.x*blockDim.x) + threadIdx.x;
if(id<n)
{
d_q[id] = d_Q[id*col + i];
}
}
__global__ void update_z(double* d_z, double*d_q, double* temp, int n)
{
__shared__ double t;
if(threadIdx.x == 0) t = *temp;
__syncthreads();
int id = (blockIdx.x*blockDim.x) + threadIdx.x;
if(id<n)
{
d_z[id] -= t * d_q[id];
}
}
__global__ void dot_prod_assign_H(double* a, double*b, int n, double* temp, double* d_H, int i, int j, int k)
{
double t=0;
for(int i=0; i<n; i++) t += a[i]*b[i];
if(i==j) d_H[i + j*(k+1)] = t-1;
else d_H[i + j*(k+1)] = t;
*temp = t;
}
__global__ void saxpy(double *A, int n, double* z, double* q)
{
int id = (blockIdx.x*blockDim.x) + threadIdx.x;
if(id < n){
double temp = 0;
int i = id*n, j = i+n , count=-1;
for(; i<j; i++) temp += A[i]*q[++count];
z[id] = temp;
}
}
__global__ void normalize_1(double* q, double* temp, int n, double* d_H, int loc)
{
double sum = 0;
for(int i=0; i<n; i++)
{
double f = q[i];
sum += f*f;
}
*temp = sqrt(sum);
d_H[loc] = *temp;
}
__global__ void normalize_2(double* q, double* temp, int n)
{
int id = (blockIdx.x*blockDim.x) + threadIdx.x;
if(id<n){
q[id] /= *temp ;
}
}
void normalize_assign_H(double *q, double* temp, int n, double* d_H, int loc)
{
normalize_1<<<1, 1>>>(q, temp, n, d_H, loc);
normalize_2<<<n/1024 + 1, 1024>>>(q, temp, n);
}
__global__ void printMat(double* A, int row, int col)
{
printf("Printing matrix\n");
int z;
for(z=0; z< (row*col); z++)
{
printf("%f ", A[z]);
if((z+1)%col == 0) printf("\n");
}
}
void parallelArnoldi(double* d_A, double* d_q, int k, double* d_Q, double* d_H, int n, double* d_z, double* temp)
{
cudaError_t err = cudaSuccess;
assign_Q<<<n/1024 + 1, 1024>>>(d_Q, d_q, 0, k+1, n);
for(int j=0; j<k; j++)
{
saxpy<<<n/1024 + 1, 1024>>>(d_A, n, d_z, d_q);
for(int i=0; i<=j; i++)
{
update_q<<<n/1024 + 1, 1024>>>(d_q, d_Q, i, k+1, n);
dot_prod_assign_H<<< 1, 1>>>(d_q, d_z, n, temp, d_H, i, j, k);
update_z<<<n/1024 + 1, 1024>>>(d_z, d_q, temp, n);
}
normalize_assign_H(d_z, temp, n, d_H, j+1 + j*(k+1));
cudaMemcpy(d_q, d_z, sizeof(double)*n, cudaMemcpyDeviceToDevice);
assign_Q<<<n/1024 + 1, 1024>>>(d_Q, d_q, j+1, k+1, n);
}
if (err != cudaSuccess) printf("Error above: %s\n", cudaGetErrorString(err));
/*
printf("H\n");
printMat<<<1, 1>>>(d_H, k+1, k);
cudaDeviceSynchronize();
printf("Q\n");
printMat<<<1, 1>>>(d_Q, n, k+1);
cudaDeviceSynchronize();
*/
}
|
8,898 | #include "includes.h"
__global__ void one_vector_float(float *vec, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if ( xIndex < n )
vec[xIndex]=1.0f;
} |
8,899 | //fail
//--blockDim=2048 --gridDim=2 --no-inline
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#define N 2//2048
__constant__ int A[4096];
__constant__ int B[3] = {0,1,2};
__global__ void kernel(int* x) {
x[threadIdx.x] = A[threadIdx.x] + B[0]; //permanece constante por ser muito grande. N < 1024 não permanece
}
|
8,900 | #include "includes.h"
__global__ void nodiag_normalize(double *A, double *I, int n, int i) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < n && y < n)
if (x == i && x != y) {
I[x * n + y] /= A[i * n + i];
A[x * n + y] /= A[i * n + i];
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.