serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
9,401 | #include<stdio.h>
//Compile with nvcc -arch sm_20 -lineinfo -Xcompiler -Wall -O4 -g sample_base.cu -o hello
//Check cuda call for errors, Call after each cuda call
#define cudaCheckError() { \
cudaError_t e=cudaGetLastError(); \
if(e!=cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \
exit(EXIT_FAILURE); \
} \
}
//This is the kernel that runs on the GPU
__global__ void hello_from_GPU()
{
//printf("Hello world from the device! block=%d, thread=%d\n", blockIdx.x, threadIdx.x);
}
//This is the main function
int main(int argc, char **argv)
{
double **hostMatrix, *hostGpuOutput;
double *deviceMatrix, *deviceMatrixTransposed;
int dim_x = 1, dim_y = 1;
#ifdef DEBUG
int max_withd = 3;
#endif
if ( argc != 3)
{
printf("Insuffisend arguments exiting\n");
exit(EXIT_FAILURE);
}
else
{
dim_x = atoi(argv[1]);
dim_y = atoi(argv[2]);
}
hostMatrix = (double**)malloc(sizeof(double*)*dim_x);
if ( hostMatrix == NULL){
fprintf(stderr, "Error in Host Matrix allocation\n");
exit(EXIT_FAILURE);
}
for ( int i = 0; i < dim_y; i++){
hostMatrix[i] = (double*)malloc( sizeof(double) * dim_y);
if (hostMatrix[i] == NULL){
fprintf(stderr, "Error in Host Matrix allocation\n" );
exit(EXIT_FAILURE);
}
for ( int j = 0; j < dim_y; j++) {
hostMatrix[i][j] = j + (i * dim_x);
}
}
deviceMatrix = NULL;
cudaMalloc((void**)&deviceMatrix, sizeof(double)*dim_x*dim_y);
deviceMatrixTransposed = NULL;
cudaMalloc((void**)&deviceMatrixTransposed, sizeof(double)*dim_x*dim_y);
//Kernel invocation with <<< # of blocks , # of threads per block>>>(args...)
hello_from_GPU<<<10,10>>>();
cudaCheckError();
cudaDeviceSynchronize();
//Reset the GPU
cudaDeviceReset();
return 0;
}
// Yey
|
9,402 | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
// Define your kernels in this file you may use more than one kernel if you
// need to
#define BLOCK_SIZE 512
#define NUM_BINS 40
#define SPACE_INDEX 36
#define PERIOD_INDEX 37
#define COMMA_INDEX 38
#define NEW_LINE 39
// INSERT KERNEL(S) HERE
/*
* This function calculates the frequency of each character in the character buffer. It does so by creating local
* histograms and finally merging them with global histogram.
*/
__global__ void calculateFrequency(char* input, unsigned int* bins, unsigned int num_elements)
{
__shared__ unsigned int private_histogram[NUM_BINS];
//Computation of private histogram
int i = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
if (threadIdx.x < NUM_BINS) {
private_histogram[threadIdx.x] = 0;
}
__syncthreads();
while(i < num_elements) {
if(input[i] >= 97 && input[i] <= 122) { //if the character is small alphabet from a to z
atomicAdd(&private_histogram[input[i]-97], 1);
} else if(input[i] >= 48 && input[i] <= 57) { //if the character is a digit from 0 to 9
atomicAdd(&private_histogram[input[i]-22], 1);
} else if(input[i] == 32) { //if the character is a space
atomicAdd(&private_histogram[SPACE_INDEX], 1);
} else if(input[i] == 46) { //if the character is a fullstop
atomicAdd(&private_histogram[PERIOD_INDEX], 1);
} else if(input[i] == 44) { //if the character is a comma
atomicAdd(&private_histogram[COMMA_INDEX], 1);
} else if(input[i] == 10) { //if the character is a new line character
atomicAdd(&private_histogram[NEW_LINE], 1);
}
i += stride;
}
__syncthreads();
if (threadIdx.x < NUM_BINS) {
atomicAdd(&bins[threadIdx.x], private_histogram[threadIdx.x]);
}
__syncthreads();
}
/*
* This function reads input character and converts that character into its corresponding string code from the char**
* array passed. The code is written to output array with each character code taking as much space as is the length
* of the code.
*/
__global__ void encodeDataKernel(char* input, unsigned int num_elements, char** characterCodes, int* codeLengths, int maxLength, char* output) {
//__shared__ char* sharedOutput[outSize];
int i = blockIdx.x * blockDim.x + threadIdx.x;
char ch;
char* code;
int codeLen = 0;
if(threadIdx.x < num_elements) {
ch = input[threadIdx.x];
if(ch >= 97 && ch <= 122) { //if the character is small alphabet from a to z
code = characterCodes[ch - 97];
codeLen = codeLengths[ch - 97];
} else if(ch >= 48 && ch <= 57) { //if the character is a digit from 0 to 9
code = characterCodes[ch - 22];
codeLen = codeLengths[ch - 22];
} else if(ch == 32) { //if the character is a space
code = characterCodes[SPACE_INDEX];
codeLen = codeLengths[SPACE_INDEX];
} else if(ch == 46) { //if the character is a fullstop
code = characterCodes[PERIOD_INDEX];
codeLen = codeLengths[PERIOD_INDEX];
} else if(ch == 44) { //if the character is a comma
code = characterCodes[COMMA_INDEX];
codeLen = codeLengths[COMMA_INDEX];
} else if(ch == 10) { //if the character is a new line character
code = characterCodes[NEW_LINE];
codeLen = codeLengths[NEW_LINE];
}
}
__syncthreads();
int j = i * maxLength, k, m;
for(m = 0, k = 0; m < maxLength && k < codeLen; j++, k++, m++) {
//sharedOutput[j] = code[k];
output[j] = code[k];
}
if(k == codeLen && m < maxLength) {
for(; m < maxLength; m++, j++) {
//sharedOutput[j] = '2'; //padding with '2'
output[j] = '2'; //padding with '2'
}
}
__syncthreads();
}
/******************************************************************************
Setup and invoke your kernel(s) in this function. You may also allocate more
GPU memory if you need to
*******************************************************************************/
void histogram(char* input, unsigned int* bins, unsigned int num_elements)
{
// INSERT CODE HERE
dim3 dimGrid((num_elements - 1)/BLOCK_SIZE + 1, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
calculateFrequency<<<dimGrid, dimBlock>>>(input, bins, num_elements);
}
void encodeData(char* input, unsigned int num_elements, char** characterCodes, int* codeLengths, int maxLength, char* output) {
dim3 dimGrid((num_elements - 1)/BLOCK_SIZE + 1, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
encodeDataKernel<<<dimGrid, dimBlock>>>(input, num_elements, characterCodes, codeLengths, maxLength, output);
}
|
9,403 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
int banyakdata = 1;
int dimensigrid = 1;
int dimensiblok = 1;
__global__ void kernelenk(int *res) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int temp = 0;
for (int i = 0; i < 1000; i++)
{
for (int j = 0; j < 1000; j++)
{
int kali = 1+j+i;
temp += kali;
}
}
res[idx] = temp;
}
void fserial(int *res) {
for (int kk = 0; kk < banyakdata; ++kk)
{
int temp = 0;
for (int i = 0; i < 1000; i++)
{
for (int j = 0; j < 1000; j++)
{
int kali = 1+j+i;
temp += kali;
}
}
res[kk] = temp;
}
}
int serial(){
int *res;
res = (int*) malloc(sizeof(int) * banyakdata);
clock_t begin = clock();
fserial(res);
clock_t end = clock();
double time_spent = (double)(end - begin);
printf("Durasi enkripsi = %f milliseconds\n", time_spent / 1000);
for (int i = 0; i < 5; i++)
{
printf("Res %d : %d\n",i,res[i]);
}
printf("Res %d : %d\n",banyakdata-1,res[banyakdata-1]);
free(res);
}
int paralel(){
int *res, *devres;
res = (int*) malloc(sizeof(int) * banyakdata);
cudaMalloc((void**)&devres,sizeof(int) * banyakdata);
kernelenk<<<dimensigrid,dimensiblok>>>(devres);
cudaDeviceSynchronize();
cudaMemcpy(res, devres, sizeof(int) * banyakdata, cudaMemcpyDeviceToHost);
for (int i = 0; i < 5; i++)
{
printf("Res %d : %d\n",i,res[i]);
}
printf("Res %d : %d\n",banyakdata-1,res[banyakdata-1]);
cudaFree(devres);
free(res);
return 0;
}
int main(){
// serial();
paralel();
} |
9,404 | __global__ void fillTwoIntegerArraysKernel(
int numberEntries,
int numberIterations,
int* firstArray,
int firstConstant,
int* secondArray,
int secondConstant) {
int start = blockIdx.x * blockDim.x * numberIterations + threadIdx.x * numberIterations;
for(int index = start; index < min(start + numberIterations, numberEntries); index++) {
firstArray[index] = firstConstant;
secondArray[index] = secondConstant;
}
} |
9,405 | #include<iostream>
using namespace std;
__global__ void Sum(float* d1_in, float* d2_in, float* d_out, int* d_arr_size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < *d_arr_size)
d_out[i] = d1_in[i] + d2_in[i];
}
int main()
{
int arr_size;
cout << "Enter array size : ";
cin >> arr_size;
float h1_in[arr_size], h_out[arr_size],h2_in[arr_size];
int arr_bytes = arr_size * sizeof(float);
cout<<"Enter "<<arr_size<<" elements array 1 and array 2\n";
for(int i=0; i<arr_size; i++)
cin>>h1_in[i];
for(int i=0; i<arr_size; i++)
cin>>h2_in[i];
float *d1_in, *d_out, *d2_in;
int *d_arr_size;
cudaMalloc((void**)&d1_in, arr_bytes);
cudaMalloc((void**)&d2_in, arr_bytes);
cudaMalloc((void**)&d_out, arr_bytes);
cudaMalloc((void**)&d_arr_size,sizeof(float));
cudaMemcpy(d1_in, h1_in, arr_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d2_in, h2_in, arr_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_arr_size, &arr_size, sizeof(float), cudaMemcpyHostToDevice);
Sum<<<ceil(1.0*arr_size/1024), 1024>>>(d1_in, d2_in, d_out,d_arr_size);
cudaMemcpy(h_out, d_out, arr_bytes, cudaMemcpyDeviceToHost);
cout<<"Sum of the 2 arrays is\n";
for(int i=0; i<arr_size; i++)
cout << h_out[i] << " ";
cudaFree(d1_in);
cudaFree(d2_in);
cudaFree(d_out);
cudaFree(d_arr_size);
}
|
9,406 | #include <stdio.h>
#include <iostream>
#include <stdlib.h>
#include <time.h>
using namespace std;
void print_matrix(int** world,int m, int n){
for (int i = 0; i < m; i++){
cout<<"| ";
for(int j =0; j < n; j++){
cout<<world[i][j]<<" ";
}
cout<<"|\n";
}
cout<<"\n";
}
int ** create_empty_world(int m, int n){
int **world = 0;
world = new int*[m];
for(int i = 0; i < m; i++){
world[i] = new int[n];
for(int j = 0; j < n; j++){
world[i][j] = 0;
}
}
return world;
}
//just a normal CPU function here. making a world filled with values of 0 or 1
int ** create_world(int m, int n){
int **world = 0;
world = new int*[m];
int value;
srand(time(0));
for (int i = 0; i < m; i ++){
world[i] = new int[n];
for(int j = 0 ; j < n; j ++){
//setting up padding
if(i == 0 || j == 0 || i == m -2 || j == n -2){
world[i][j] = 0;
}else{
//giving the world[m][n] a value between 1 and 0
value = rand() % 2;
world[i][j] = value;
}
}
}
return world;
}
/*
this will take the world, the new world, the hight and width of the worlds
We will then go through every value, sum the value of it's 8 neighbours
and give a result of 1 or 0 for each cell
we will do this for the number of turns that we have specified in the command line arguments.
*/
__global__
void next_turn(int **world, int **new_world, int m, int n ){
//getting the index that we are currently in
int const index_x = threadIdx.x + blockIdx.x * blockDim.x;
if(index_x < m + 1 && index_x != 0){
for(int index_y = 1; index_y < n - 1 ; index_y++){
int living = world[index_x-1][index_y-1] + world[index_x-1][index_y] + world[index_x-1][index_y+1] +
world[index_x][index_y+1] + world[index_x+1][index_y-1] + world[index_x+1][index_y] +
world[index_x+1][index_y+1] + world[index_x][index_y-1];
int current = world[index_x][index_y];
int new_cell = 0;
if((current == 1 && (living == 3 || living == 2)) || current == 0 && living == 3){
new_cell = 1;
}
new_world[index_x][index_y] = new_cell;
}
}
}
//set up numblocks to be a calculation of how many are needed
int const numblocks = 1;
int const blocksize = 1;
int main(int argc, char *argv[]){
//verifying the number of arguments
if(argc <5 || argc> 6){
cout<<"Usage: ./gol [width] [height] [iterations] [number of tests]";
return 1;
}
//gathering data from the arguments
int m = atoi(argv[1]);
m = m + 2;
int n = atoi(argv[2]);
n = n + 2;
int iterations = atoi(argv[3]);
int num_tests = atoi(argv[4]);
int size = sizeof(int) * m * n;
//verifying the arguments are able to be converted to int values
if((m == 0) || (n == 0) || (iterations == 0) || (num_tests == 0)){
cout<< "must enter INT values";
return 0;
}
//create a world and a new world
int **world = create_world(m, n);
int **new_world = create_empty_world(m, n);
print_matrix(world, m, n);
//create a matrix of ints for the device
int **dev_world;
int **dev_new_world;
cudaMalloc((void **)&dev_world, size);
cudaMalloc((void **)&dev_new_world, size);
if(dev_world == NULL) {std::cerr << "not able to alloc memory on device" << std::endl;}
if(dev_new_world == NULL) {std::cerr << "not able to alloc memory on device" << std::endl;}
//copy values from host to device
cudaMemcpy(dev_world, world, size, cudaMemcpyHostToDevice);
//cudaMemcpy(dev_new_world, new_world, size, cudaMemcpyHostToDevice);
//no output yet, don't need to copy it over
//set up timing here, no need to take into account how long it takes to copy stuffs over.
//do next_turn on GPU
//for(int i = 0; i < iterations; i ++){
next_turn<<< numblocks, blocksize >>>(dev_world, dev_new_world, m, n);
//}
//copy result back to host
cudaMemcpy(new_world, dev_new_world, size, cudaMemcpyDeviceToHost);
print_matrix(new_world, m, n);
//free space that we created
for(int i = 0; i < m ; i++){
free(world[i]);
free(new_world[i]);
}
free(world);
free(new_world);
cudaFree(dev_new_world);
cudaFree(dev_world);
} |
9,407 | #define LINK_HEADER_LIBS
#include <stdio.h>
#include "config.h"
#include "linearSystemOps.h"
#include "cpuSolver.h"
int main(int argc, char **argv)
{
unsigned long int rowCount, columnCount;
unsigned long int percent;
unsigned char* A;
unsigned char* B;
unsigned char* x;
rowCount = ROWS;
columnCount = COLS;
percent = PERCENTAGE;
printf("Number of arguments = %lu\n", argc);
if(argc == 3) {
rowCount = atoi(argv[1]);
columnCount = atoi(argv[2]);
}
printf("Running solver with following parameters:\n");
printf("Number of rows = %lu\n", rowCount);
printf("Number of columns = %lu\n", columnCount);
printf("Percentage density = %lu\n", percent);
// --- Allocate memory for input matrix A and B on cpu
A = (unsigned char*)malloc(sizeof(unsigned char) * rowCount * columnCount);
B = (unsigned char*)malloc(sizeof(unsigned char) * rowCount);
x = (unsigned char*)malloc(sizeof(unsigned char) * columnCount);
if(A == NULL || B == NULL || x == NULL)
{
printf("Unable to allocate space for linear system\n");
exit(-1);
}
// --- Initialise the input matrix
generateLinearSystem(A, B, x, rowCount, columnCount, percent);
writeVectorToFile(REFERENCE_SOLUTION, x, columnCount);
gaussianElimination(A, B, rowCount, columnCount);
backsubstitution(A, B, x, rowCount, columnCount);
writeVectorToFile(COMPUTED_SOLUTION, x, columnCount);
free(x);
free(A);
free(B);
return 0;
}
|
9,408 | #include "cuda_runtime.h"
#include "cooperative_groups.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define SIZE 100
#include <iostream>
using namespace std;
__global__ void sum(int* input) // kernel function definition
{
const int tid = threadIdx.x; // thread id
int step_size = 1;
int number_of_threads = blockDim.x; // no of threads
while (number_of_threads > 0)
{
if (tid < number_of_threads) // still alive?
{
const int fst = tid * step_size * 2; //get the index in array
const int snd = fst + step_size; //get the index in array
if (input[fst] > input[snd])
input[fst] = input[snd]; //calculate sum
}
step_size <<= 1; // increment step_size by 1
number_of_threads >>= 1; //decrement number of threads by 2
}
}
int main()
{
int count = SIZE;
cout << "Enter the number of elements:\n" << endl;
const int size = count * sizeof(int);
int h[SIZE];
cout << "Enter the elements:\n" << endl;
for (int i = 0; i<count; i++)
{
h[i] = rand()%500;
}
h[2] = -2;
for (int i = 0; i<count; i++)
{
printf("%d ",h[i]);
}
int* d; //GPU parameter
cudaMalloc(&d, size); //assign memory to parameters on GPU
cudaMemcpy(d, h, size, cudaMemcpyHostToDevice); //copy the array from CPU to GPU
float elapsed=0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
sum <<< 1, 32 >>>(d); // call kernel function <<<number of blocks, number of threads= number of elements/2
cudaEventRecord(stop, 0);
cudaEventSynchronize (stop);
cudaEventElapsedTime(&elapsed, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cout<<"\nThe elapsed time in gpu was : "<<elapsed<<"\n";
int result;
cudaMemcpy(&result, d, sizeof(int), cudaMemcpyDeviceToHost); // copy the result back from GPU to CPU
cout << "Min is " << result << endl;
getchar();
cudaFree(d); // Free the allocated memory
return 0;
}
/*
nvcc 3_2.cu -o a
a.exe
Thread: This is just an execution of a kernel with a given index. Each thread uses its index to access elements in array (see the kernel in my first CUDA program) such that the collection of all threads cooperatively processes the entire data set.
Block: This is a group of threads. There’s not much you can say about the execution of threads within a block – they could execute concurrently or serially and in no particular order. You can coordinate the threads, somewhat, using the _syncthreads() function that makes a thread stop at a certain point in the kernel until all the other threads in its block reach the same point.
Grid: This is a group of blocks. There’s no synchronization at all between the blocks
may allow up to 8 thread blocks to be assigned to an SM.
After a block of threads is assigned to a SM, it is divided into sets of 32 threads, each called a warp. However, the size of a warp depends upon the implementation.
https://www.tutorialspoint.com/cuda/index.htm
*/
|
9,409 | //The simple version of the Matrix _ multiplication:
//AUTHOR: SAURAV RAI
//REGD NO: 17558
#include<cuda.h>
#include<stdio.h>
#define BLOCKSIZE 32
void MatrixMultiplication(float *,float *,float *,int);
int main(int argc,char const*argv[]) {
const int Width = atoi(argv[1]);
float *M,*N,*P;
int size = Width*Width*sizeof(float);
cudaMallocHost((void **)&M,size);
cudaMallocHost((void **)&N,size);
cudaMallocHost((void **)&P,size);
for(int i = 0; i < (Width*Width) ; i++)
{
M[i] = 1;
N[i] = 1;
P[i] = 0;
}
MatrixMultiplication(M, N, P, Width);
for(int i = 0; i < (Width*Width) ; i++)
{
printf("%f \n", P[i]);
}
cudaFree(M);
cudaFree(N);
cudaFree(P);
return 0;
}
//Matrix multiplication kernel - thread specification
__global__ void MatrixMulKernel(float *Md, float *Nd, float *Pd, int Width)
{
//2D Thread ID
int column = blockIdx.x * BLOCKSIZE + threadIdx.x;
int row = blockIdx.y * BLOCKSIZE +threadIdx.y;
//Pvalue stores the Pd element that is computed by the thread
float Pvalue = 0;
if(row > Width || column > Width )
return;
else
{
for (int k = 0; k < Width ; ++k)
{
Pvalue += Md[row *Width + k] * Nd[k *Width + column];
}
Pd[ row*Width + column] = Pvalue;
}
}
void MatrixMultiplication(float *M, float *N, float *P, int Width) {
int size = Width*Width*sizeof(float);
float *Md, *Nd, *Pd;
//Transfer M and N to device memory
cudaMalloc((void**)&Md, size);
cudaMemcpy(Md,M,size,cudaMemcpyHostToDevice);
cudaMalloc((void**)&Nd, size);
cudaMemcpy(Nd,N,size,cudaMemcpyHostToDevice);
//Allocate P on the device
cudaMalloc((void**)&Pd,size);
unsigned int grid_rows = (Width + BLOCKSIZE -1 ) / BLOCKSIZE;
unsigned int grid_cols = (Width + BLOCKSIZE -1 ) / BLOCKSIZE;
//Setup the execution configuration
dim3 dimBlock(BLOCKSIZE,BLOCKSIZE);
dim3 dimGrid( grid_rows , grid_cols);
//Launch the device computation threads!
MatrixMulKernel<<<dimGrid,dimBlock>>>(Md,Nd,Pd,Width);
//Transfer P from device to host
cudaMemcpy(P,Pd,size,cudaMemcpyDeviceToHost);
//Free device matrices
cudaFree(Md);
cudaFree(Nd);
cudaFree(Pd);
}
|
9,410 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <cufft.h>
#include <iostream>
#include <complex>
#include <cuda_runtime.h>
#define imin(a,b) (a<b?a:b)
__global__ void prod( cuDoubleComplex *vec2, cuDoubleComplex *a, int NDIM)
{
int id = threadIdx.x + blockIdx.x* blockDim.x;
cuDoubleComplex d;
// make_cuDoubleComplex(1.0/NDIM,0.0);
d.x=(1.0/NDIM);
d.y=0.0;
while(id<NDIM)
{
vec2[id]= cuCmul (cuCmul(a[id], d) , vec2[id] );
// vec2[id]= cuCmul (a[id] / NDIM , vec2[id] );
id += blockDim.x*gridDim.x;
}
}
/*--------- function called from main fortran programn ---------------*/
extern "C" void kernel_imestfft_(cuDoubleComplex *wsl,cuDoubleComplex *psi,int *NX,int *NY,int *NZ, int *Xcase,cuDoubleComplex *vtilde)
{
cuDoubleComplex *psi_d, *vtilde_d;
int NDX = *NX;
int NDY = *NY;
int NDZ = *NZ;
int NDIM= NDX*NDY*NDZ;
int dcase= *Xcase;
// ---- CUDA variables: -----------------------------------------------------------------------
const int threads = imin(64,NDIM); //dim3 (16,1,1);
const int blocks = imin(32, NDIM/threads ); // ; //imin(8, (M+threads)/threads );
//----------------------------------------------------------------------------------------------
cufftHandle plan;
cudaMalloc( (void **)&psi_d, sizeof(cuDoubleComplex) * NDIM );
cudaMalloc( (void **)&vtilde_d, sizeof(cuDoubleComplex) *NDIM);
cudaMemcpy( psi_d, psi, sizeof(cuDoubleComplex)*NDIM, cudaMemcpyHostToDevice );
cudaMemcpy( vtilde_d, vtilde, sizeof(cuDoubleComplex)*NDIM, cudaMemcpyHostToDevice );
// printf("Kernel GPU \n");
if (dcase == 1) cufftPlan1d(&plan, NDX, CUFFT_Z2Z, 1);
if (dcase == 2) cufftPlan2d(&plan, NDY, NDX, CUFFT_Z2Z);
if (dcase == 3) cufftPlan3d(&plan, NDZ, NDY, NDX, CUFFT_Z2Z);
cufftExecZ2Z(plan, psi_d, psi_d, CUFFT_FORWARD);
prod<<<blocks,threads>>>( psi_d, vtilde_d, NDIM);
cufftExecZ2Z(plan, psi_d, psi_d, CUFFT_INVERSE);
/* copy vectors from GPU to CPU */
cudaMemcpy(wsl, psi_d, sizeof(cuDoubleComplex) * NDIM, cudaMemcpyDeviceToHost);
cufftDestroy(plan);
cudaFree(psi_d);
cudaFree(vtilde_d);
return;
}
|
9,411 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,int var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30) {
if (comp == (-1.3650E-35f - -1.6966E35f + (var_3 - (var_4 * +0.0f)))) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
comp += sinf((-1.7156E35f + coshf(var_6 - (-0.0f + var_7))));
comp += -1.8569E9f - -1.0959E-36f + var_8 * var_9 + -1.9780E-36f;
if (comp > (var_10 * var_11)) {
comp = (+1.6505E-28f - (+1.6974E2f - (var_12 * var_13 + (var_14 + var_15))));
comp += (var_16 / logf(-1.4145E-36f / +1.8719E34f / -1.6747E-17f + var_17 + (+1.6221E34f + var_18)));
comp = (var_19 / atan2f((var_20 - (var_21 - var_22 - var_23)), -1.1830E-37f - +1.0270E-44f + (var_24 / var_25)));
comp += (-1.3441E-42f * (var_26 * (+1.0655E12f * -1.6187E-37f)));
}
for (int i=0; i < var_5; ++i) {
comp += (+0.0f / log10f(-0.0f * (var_27 + +0.0f - (var_28 * (+0.0f * var_29)))));
float tmp_1 = +1.3321E36f;
comp = tmp_1 + (var_30 * (+1.2326E26f + (-1.2799E20f / +1.1198E-42f)));
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
int tmp_6 = atoi(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31);
cudaDeviceSynchronize();
return 0;
}
|
9,412 | /* Matrix normalization.
* * Compile with "gcc matrixNorm.c"
* */
/* ****** ADD YOUR CODE AT THE END OF THIS FILE. ******
* * You need not submit the provided code.
* */
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <sys/types.h>
#include <sys/times.h>
#include <sys/time.h>
#include <time.h>
#include <cuda_runtime.h>
/* Program Parameters */
#define MAXN 13000 /* Max value of N */
int N; /* Matrix size */
#define THREADS_PER_BLOCK 64
#define BLOCKS_PER_GRID_ROW 32
/* Matrices */
volatile float A[MAXN][MAXN], B[MAXN][MAXN];
float *newA, *newB;
/* junk */
#define randm() 4|2[uid]&3
/* Prototype */
void matrixNorm();
/* returns a seed for srand based on the time */
unsigned int time_seed() {
struct timeval t;
struct timezone tzdummy;
gettimeofday(&t, &tzdummy);
return (unsigned int)(t.tv_usec);
}
/* Set the program parameters from the command-line arguments */
void parameters(int argc, char **argv) {
int seed = 0; /* Random seed */
char uid[32]; /*User name */
/* Read command-line arguments */
srand(time_seed()); /* Randomize */
if (argc == 3) {
seed = atoi(argv[2]);
srand(seed);
printf("Random seed = %i\n", seed);
}
if (argc >= 2) {
N = atoi(argv[1]);
if (N < 1 || N > MAXN) {
printf("N = %i is out of range.\n", N);
exit(0);
}
}
else {
printf("Usage: %s <matrix_dimension> [random seed]\n",
argv[0]);
exit(0);
}
/* Print parameters */
printf("\nMatrix dimension N = %i.\n", N);
}
/* Initialize A and B*/
void initialize_inputs() {
int row, col;
printf("\nInitializing...\n");
for (row = 0; row < N; row++) {
for (col = 0; col < N; col++) {
A[row][col] = (float)rand() / 32768.0;
B[row][col] = 0.0;
}
}
/*
* for (col = 0; col < N; col++) {
* for (row = 0; row < N; row++) {
* A[row][col] = col + row;
* B[row][col] = 0.0;
* }
* }
* */
}
/* Print input matrices */
void print_inputs() {
int row, col;
if (N < 10) {
printf("\nA =\n\t");
for (col = 0; col < N; col++) {
for (row = 0; row < N; row++) {
printf("%5.2f%s", A[row][col], (row < N-1) ? ", " : ";\n\t");
}
}
}
}
void print_B() {
int row, col;
if (N < 10) {
printf("\nB =\n\t");
for (row = 0; row < N; row++) {
for (col = 0; col < N; col++) {
printf("%1.10f%s", B[row][col], (col < N-1) ? ", " : ";\n\t");
}
}
}
}
int main(int argc, char **argv) {
/* Timing variables */
struct timeval etstart, etstop; /* Elapsed times using gettimeofday() */
struct timezone tzdummy;
clock_t etstart2, etstop2; /* Elapsed times using times() */
unsigned long long usecstart, usecstop;
struct tms cputstart, cputstop; /* CPU times for my processes */
/* Process program parameters */
parameters(argc, argv);
/* Initialize A and B */
/* Nikhita: Note this function has been changed
* to initialize the data in column major format
* as we aim to do the block distribution
* in a column wise fashion
*/
initialize_inputs();
/* Print input matrices */
print_inputs();
/* Start Clock */
printf("\nStarting clock.\n");
gettimeofday(&etstart, &tzdummy);
etstart2 = times(&cputstart);
/* Gaussian Elimination */
matrixNorm();
/* Stop Clock */
gettimeofday(&etstop, &tzdummy);
etstop2 = times(&cputstop);
printf("Stopped clock.\n");
usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec;
usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec;
/* Display output */
print_B();
/* Display timing results */
printf("\nElapsed time = %g ms.\n",
(float)(usecstop - usecstart)/(float)1000);
printf("(CPU times are accurate to the nearest %g ms)\n",
1.0/(float)CLOCKS_PER_SEC * 1000.0);
printf("My total CPU time for parent = %g ms.\n",
(float)( (cputstop.tms_utime + cputstop.tms_stime) -
(cputstart.tms_utime + cputstart.tms_stime) ) /
(float)CLOCKS_PER_SEC * 1000);
printf("My system CPU time for parent = %g ms.\n",
(float)(cputstop.tms_stime - cputstart.tms_stime) /
(float)CLOCKS_PER_SEC * 1000);
printf("My total CPU time for child processes = %g ms.\n",
(float)( (cputstop.tms_cutime + cputstop.tms_cstime) -
(cputstart.tms_cutime + cputstart.tms_cstime) ) /
(float)CLOCKS_PER_SEC * 1000);
/* Contrary to the man pages, this appears not to include the parent */
printf("--------------------------------------------\n");
exit(0);
}
__global__ void block_sum(const float *input, float *results, float *otherresults, const size_t n)
{
__shared__ float sdata[256];
__shared__ float otherdata[256];
//extern __shared__ float otherdata[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int tx = threadIdx.x;
// load input into __shared__ memory
float x = 0;
if(i < n) {
x = input[i];
}
sdata[tx] = x;
otherdata[tx] = x*x;
__syncthreads();
// block-wide reduction in __shared__ mem
for(int offset = blockDim.x / 2;
offset > 0;
offset >>= 1)
{
if(tx < offset)
{
// add a partial sum upstream to our own
sdata[tx] += sdata[tx + offset];
otherdata[tx] += otherdata[tx + offset];
}
__syncthreads();
}
// finally, thread 0 writes the result
if(threadIdx.x == 0)
{
// note that the result is per-block
// not per-thread
results[blockIdx.x] = sdata[0];
otherresults[blockIdx.x] = otherdata[0];
}
}
float* sum (float *input, size_t n, float *d_A, float *sums, float *sigmas, float *temp) {
size_t block_size = 256;
size_t num_blocks = n/block_size + ((n%block_size) ? 1 : 0);
cudaMemcpy( d_A, input, sizeof(float) * N, cudaMemcpyHostToDevice );
int smem_sz = 2 * block_size*sizeof(float);
block_sum<<<num_blocks,block_size,smem_sz>>> (d_A, sums, sigmas, n);
block_sum<<<1,num_blocks,num_blocks*sizeof(float)>>> (sums, sums + num_blocks, temp, num_blocks);
block_sum<<<1,num_blocks,num_blocks*sizeof(float)>>> (sigmas, sigmas + num_blocks, temp, num_blocks);
float result = 0;
cudaMemcpy(&result, sums + num_blocks, sizeof(float) , cudaMemcpyDeviceToHost );
float otherresult = 0;
cudaMemcpy(&otherresult, sigmas + num_blocks, sizeof(float) , cudaMemcpyDeviceToHost );
float * results = (float *)malloc(sizeof(float) * 2);
results[0] = result;
results[1] = otherresult;
return results;
}
/* ------------------ Above Was Provided --------------------- */
/****** You will replace this routine with your own parallel version *******/
/* Provided global variables are MAXN, N, A[][] and B[][],
* * defined in the beginning of this code. B[][] is initialized to zeros.
* */
void matrixNorm() {
int row, col;
float mu, sigma; // Mean and Standard Deviation
printf("Computing Parallely.\n");
cudaError_t err;
float *d_B;
size_t block_size = 256;
size_t num_blocks = N/block_size + ((N%block_size) ? 1 : 0);
float *sums = 0;
cudaMalloc((void**)&sums, sizeof(float) * (num_blocks + 1));
float *sigmas = 0;
cudaMalloc((void**)&sigmas, sizeof(float) * (num_blocks + 1));
float *d_A = 0;
cudaMalloc( (void**)&d_A, sizeof(float) * N);
float *temp = 0;
cudaMalloc((void**)&temp, sizeof(float) * (num_blocks + 1));
for (col=0; col < N; col++) {
mu = 0.0;
float *calcs;
calcs = sum ((float *)A[col], N, d_A, sums, sigmas, temp);
mu = calcs[0] / (float) N;
sigma = (calcs[1] + N*powf(mu, 2.0) - 2 * mu * calcs[0])/(float)N;
for (row=0; row < N; row++) {
if (sigma == 0.0) {
B[row][col] = 0.0;
} else {
B[row][col] = (A[col][row] - mu) / sigma;
}
}
}
cudaFree(sums); cudaFree(sigmas); cudaFree(d_A); cudaFree(temp);
}
|
9,413 | #include "includes.h"
__global__ void sync_gdn_groups() { } |
9,414 | #include <stdio.h>
#include <stdlib.h>
__global__
void calculateX(int *rowArr, int *colArr, double *valueArr,double *xArr,int n, int m)
{
int dist = n/blockDim.x;
if(n%blockDim.x > threadIdx.x)
dist = dist + 1;
int s = ((n%blockDim.x>(threadIdx.x-1))? dist : n/blockDim.x)*threadIdx.x;
int i;
double sum = 0;
for(i = s; i< s + dist; i++)
for(int j = rowArr[i]; j < ((i+1 < n)? rowArr[i+1] : m); j++)
sum = sum + valueArr[j] * xArr[colArr[j]];
xArr[i] = sum;
sum = 0;
}
int main(int argc, char *argv[])
{
int threads = atoi(argv[1]);
int iterations = atoi(argv[2]);
int answer = atoi(argv[3]);
FILE *file = fopen(argv[4], "r");
int num;
double num2;
int row;
int col;
fscanf(file, "%d", &num);
int sizeOfMatrix = num;
fscanf (file, "%d", &num);
fscanf(file, "%d", &num);
int numOfNumbers = num;
static double matrix[15000][15000];
for(row = 0; row < sizeOfMatrix; row++)
for(col = 0; col < sizeOfMatrix; col++)
matrix[row][col] = 0;
while(!feof (file))
{
fscanf(file, "%d", &num);
row = num-1;
fscanf(file, "%d", &num);
col = num-1;
fscanf(file, "%lf", &num2);
matrix[row][col] = num2;
}
double *x = (double *)malloc(sizeOfMatrix*sizeof(double));
for(row = 0; row < sizeOfMatrix; row++)
x[row] = 1;
int *row_ptr = (int *)malloc(sizeOfMatrix*sizeof(int));
int *col_ind = (int *)malloc(numOfNumbers*sizeof(int));
double *values = (double *)malloc(numOfNumbers*sizeof(double));
int count = 0;
int first = 0;
for(row = 0; row < sizeOfMatrix; row++)
{
for(col = 0; col < sizeOfMatrix; col++)
{
if(matrix[row][col] != 0 && first == 0)
{
row_ptr[row] = count;
col_ind[count] = col;
values [count] = matrix[row][col];
count ++;
first = 1;
}
else if(matrix[row][col] != 0 && first == 1)
{
col_ind [count] = col;
values [count] = matrix[row][col];
count++;
}
}
if(first == 0)
row_ptr[row] = -1;
first = 0;
}
int i = 1;
for(row = 0; row < sizeOfMatrix; row++)
if(row_ptr[row] == -1)
{
while(1)
{
if(row_ptr[row + i] != -1)
{
row_ptr[row] = row_ptr[row+i];
break;
}
i++;
}
i = 1;
}
int *rowArr, *colArr;
double *valueArr, *xArr;
cudaMalloc(&rowArr, sizeOfMatrix*sizeof(int));
cudaMalloc(&colArr, numOfNumbers*sizeof(int));
cudaMalloc(&valueArr, numOfNumbers*sizeof(double));
cudaMalloc(&xArr, sizeOfMatrix*sizeof(double));
cudaMemcpy(rowArr, row_ptr, sizeOfMatrix*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(colArr, col_ind, numOfNumbers*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(valueArr, values, numOfNumbers*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(xArr, x, sizeOfMatrix*sizeof(double), cudaMemcpyHostToDevice);
for(row = 0; row < iterations; row++)
{
calculateX<<<1, threads>>>(rowArr, colArr, valueArr, xArr, sizeOfMatrix, numOfNumbers);
cudaThreadSynchronize();
}
cudaMemcpy(x, xArr, sizeOfMatrix*sizeof(double), cudaMemcpyDeviceToHost);
if(answer == 1)
{
printf("ROW");
for(row = 0; row < sizeOfMatrix; row++)
printf("%d ",row_ptr[row]);
printf("\n");
printf("COL");
printf("\n");
for(row = 0; row < numOfNumbers; row++)
printf("%d ",col_ind[row]);
printf("\n");
printf("VALUES");
printf("\n");
for(row = 0; row < numOfNumbers; row++)
printf("%lf ",values[row]);
printf("\n");
printf("X ARRAY");
printf("\n");
for(row = 0; row < sizeOfMatrix; row++)
printf("%lf ",x[row]);
}
cudaFree(rowArr);
cudaFree(colArr);
cudaFree(valueArr);
cudaFree(xArr);
free(row_ptr);
free(col_ind);
free(values);
free(x);
} |
9,415 | #include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<unistd.h>
#include<time.h>
__global__ void multiply(int *vec, int *mat, int *out, const int N, const int M)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int sum=0;
printf("\n tid = %d",tid);
if(tid<M)
{
for(int i=0; i<N; i++)
{
sum += vec[i]*mat[(tid*M)+i];
printf("\n tid*M+i = %d", (tid*M)+i);
printf("\n vec[%d] = %d",i,vec[i]);
printf("\n mat[%d] = %d", (tid*M)+i, mat[(tid*M)+i]);
}
}
out[tid]=sum;
}
__global__ void printmatscreen(int* mat, int N)
{
int i,j;
for (i=0;i<N;i++)
{
printf("\n");
for (j=0;j<N;j++)
{
printf("%d ",mat[(i*N)+j]);
}
}
printf("\n");
}
int** Make2DIntArray(int arraySizeX, int arraySizeY)
{
int** theArray;
theArray = (int**) malloc(arraySizeX*sizeof(int*));
int i;
for (i = 0; i < arraySizeX; i++)
theArray[i] = (int*) malloc(arraySizeY*sizeof(int));
int j;
for (i=0;i<arraySizeX;i++)
{
for (j=0;j<arraySizeY;j++)
{
theArray[i][j]=0;
}
}
return theArray;
}
int** Make2DVariableIntArray(int rows, int blocks, int blocksize, int* columns)
{
int** theArray;
theArray = (int**) malloc(rows*sizeof(int*));
int i, j, k;
for (i = 0; i < blocks; i++)
{
k=columns[i];
for (j=0; j < blocksize; j++)
{
theArray[i*blocksize+j] = (int*) malloc(k*sizeof(int));
}
}
//int j;
for (i=0;i<blocks;i++)
{
for (j=0;j<blocksize;j++)
{
for (k=0;k<columns[i];k++)
{
theArray[i*blocksize+j][k]=0;
}
}
}
return theArray;
}
int** Changeto2DVariableIntArray(int** theArray,int rows, int blocks, int blocksize, int* columns)
{
int** NewArray=Make2DVariableIntArray(rows,blocks,blocksize,columns);
int i, j, k;
for (i=0;i<blocks;i++)
{
for (j=0;j<blocksize;j++)
{
for (k=0;k<columns[i];k++)
{
NewArray[i*blocksize+j][k]=theArray[i*blocksize+j][k];
}
}
}
return NewArray;
}
void init_zeros(int** matrix, int N)
{
int i,j;
for (i=0;i<N;i++)
{
for (j=0;j<N;j++)
{
matrix[i][j]=0;
}
}
}
void printmat(int** matrix, int N, int Nj)
{
int i,j;
for (i=0;i<N;i++)
{
printf("\n");
for (j=0;j<N;j++)
{
printf("%d ",matrix[i][j]);
}
}
printf("\n");
}
void printtofile(int** matrix, int K, char* filename)
{
/*
Prints original 2D matrices to file
*/
FILE *fp;
fp=fopen(filename,"wt");
int i,j;
for (i=0;i<K;i++)
{
fprintf(fp, "\n");
for (j=0;j<K;j++)
{
fprintf(fp, "%d\t", matrix[i][j]);
}
}
}
void printtofile1D(int* matrix, int K, char* filename)
{
/*
Prints resultant matrix to a file
*/
FILE *fp;
fp=fopen(filename,"wt");
int i,j;
int counters=0;
for (i=0;i<K;i++)
{
fprintf(fp, "\n");
for (j=0;j<K;j++)
{
fprintf(fp, "%d \t", matrix[counters]);
counters++;
}
}
}
int* Make1DIntArray(int arraySizeX)
{
int* theArray;
theArray = (int*)malloc(arraySizeX*sizeof(int));
int i;
for (i=0;i<arraySizeX;i++)
{
theArray[i]=0;
}
return theArray;
}
void freese(int sizeX, int sizeY, double** ptr)
{
int i;
for (i=0;i<sizeX;i++)
free(ptr[i]);
free(ptr);
}
int main()
{
const int N=6;
// const int Dsize=1000;
FILE *arr, *vec;
int i,j,maxrowwidth=0,tint=0;
int** a=Make2DIntArray(N,N);
int* aflat=Make1DIntArray(N*N);
// int* val=Make1DIntArray(Dsize);
// int* col=Make1DIntArray(Dsize);
// int* row=Make1DIntArray(Dsize);
int* result=Make1DIntArray(N);
int* vecX=Make1DIntArray(N);
int** scval=Make2DIntArray(N,N); //sell c value
int** sccol=Make2DIntArray(N,N); //sell c col
int* rowwidth=Make1DIntArray(N); //number of elements in each row
int* temp=Make1DIntArray(N);
int* cols=Make1DIntArray(N/2);
//int val[10],col[10],row[10];
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void**)&dev_a, sizeof(int)*N);
cudaMalloc((void**)&dev_b, sizeof(int)*N*N);
cudaMalloc((void**)&dev_c, sizeof(int)*N);
arr=fopen("mat.txt","r");
int k=0,cinrow=0;
// struct timeval start, end;
//gettimeofday(&start, NULL);
//row[0]=0;
//Reading the vector
vec=fopen("vec.txt","r");
for (i=0;i<N;i++)
{
fscanf(vec,"%d",&vecX[i]);
}
printf("\n Vector is:\n");
for (i=0;i<N;i++)
{
printf("%d\n",vecX[i]);
}
//Reading the matrix
for(i=0;i<N;i++)
{
printf("\n");
for(j=0;j<N;j++)
{
fscanf(arr,"%d",&a[i][j]);
printf("%d ",a[i][j]);
}
}
printf("\n");
//row[i]=k;
//printf("\n k = %d\n ", k);
//sleep(10);
//gettimeofday(&end, NULL);
// double delta = ((end.tv_sec - start.tv_sec) * 1000000u +
// end.tv_usec - start.tv_usec) / 1.e6;
// printf("\nTime spent=%f\n", delta);
int counter=0;
for (i=0;i<N;i++)
{
printf("\n");
for(j=0;j<N;j++)
{
aflat[counter]=a[i][j];
printf("%d ",aflat[counter]);
counter+=1;
}
}
printf("\n");
counter=0;
sleep(5);
cudaMemcpy(dev_a, vecX, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, aflat, sizeof(int)*N*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, result, sizeof(int)*N, cudaMemcpyHostToDevice);
printmatscreen<<<1,1>>>(dev_b,N);
multiply<<<N,N>>>(dev_a, dev_b, dev_c, N, N);
cudaMemcpy(result, dev_c, sizeof(int)*N, cudaMemcpyDeviceToHost);
for (i=0;i<N;i++)
{
printf("\n%d",result[i]);
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
/*
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
if(a[i][j]!=0)
{
scval[i][k]=a[i][j];
//printf("\n scval[%d][%d]=%d",i,k,scval[i][k]);
//sleep(1);
sccol[i][k]=j;
//printf("\n sccol[%d][%d]=%d",i,k,sccol[i][k]);
rowwidth[i]=k+1;
if(rowwidth[i]>maxrowwidth)
{ //printf("\nrow[%d] width=%d\n",i,maxrowwidth);
maxrowwidth=rowwidth[i];
}k++;
}
}
//printf("\nRow width %d = %d", i, rowwidth[i]);
k=0;
}
for(i=0;i<N-1;i++)
{
for(j=0;j<N-1;j++)
{
if(rowwidth[j]<rowwidth[j+1])
{ printf("\nrow %d width=%d",j,rowwidth[j]);
/*printf("\nscval[%d]=",j);
for(k=0;k<rowwidth[j];k++)
{
printf("%d ", scval[j][k]);
}
printf("\nscval[%d]=",j+1);
for(k=0;k<rowwidth[j+1];k++)
{
printf("%d ", scval[j+1][k]);
}
temp=scval[j];
scval[j]=scval[j+1];
scval[j+1]=temp;
temp=sccol[j];
sccol[j]=sccol[j+1];
sccol[j+1]=temp;
tint=rowwidth[j];
rowwidth[j]=rowwidth[j+1];
rowwidth[j+1]=tint;
tint=vecX[j];
vecX[j]=vecX[j+1];
vecX[j+1]=tint;
}
}
}
printf("\nmaxrowwidth=%d\n",maxrowwidth);
printtofile(scval,N,"scval.txt");
printtofile(sccol,N,"sccol.txt");
*/
/* NEED TO FIGURE OUT A WAY TO POPULATE cols SO AS TO HAVE varmat CREATED PROPERLY. SYSTEM CRASHES OTHERWISE
int cols[3]={4,3,3};
int** varmat=Changeto2DVariableIntArray(scval,N,N/2,2,cols);
for (i=0;i<N;i++)
{
for(j=0;j<2;j++)
{
printf("\n");
for (k=0;k<cols[i];k++)
{
printf("%d ",varmat[i*2+j][k]);
}
}
}
printf("\n");
//printing val, col and row
/*
printf("Val=");
for(i=0;i<Dsize;i++)
{
printf("%d\t",val[i]);
}
printf("\n");
printf("col=");
for(i=0;i<Dsize;i++)
{
printf("%d\t",col[i]);
}
printf("\n");
printf("row=");
for(i=0;i<Dsize;i++)
{
printf("%d\t",row[i]);
}
*/
printf("\n");
/*Now the actual multiplication kernel
for (i=0;i<N;i++)
{
for (j=row[i];j<row[i+1];j++)
{
result[i]+=val[j]*vecX[col[j]];
}
}
printf("\n Result is:\n");
for (i=0;i<N;i++)
{
printf("%d\n",result[i]);
}
*/
return 0;
}
|
9,416 | #include "includes.h"
__global__ void reduceUnroll2(int *g_idata, int *g_odata, unsigned int n){
// thread id
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// data pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 2;
if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x];
__syncthreads();
// thread id out of range
if (idx >= n) return;
for (int stride = blockDim.x/2; stride > 0; stride >>= 1){
if (threadIdx.x < stride){
idata[threadIdx.x] += idata[threadIdx.x + stride];
}
__syncthreads();
}
if (threadIdx.x == 0){
g_odata[blockIdx.x] = idata[0];
}
} |
9,417 | #include <cstdio> // 用于print结果
__global__ void two_norm_kernel(const float *a,const float *b,float *c, int n, int m) {
__shared__ float a_minus_b[64*64]; // 不支持可变数量的参数,矩阵维度最多为(4096,4096)
// // 初始化a_minus_b
// for(int row=threadIdx.x;row<n;row+=blockDim.x){
// for(int col=0;col<m;col++){ // 循环展开
// a_minus_b[row * 64 + col] = 0.0;
// }
// }
// 异常、越界处理
if(threadIdx.x==0)
printf("after compute a-b:\n");
for(int row=threadIdx.x;row<n;row+=blockDim.x){
for(int col=0;col<m;col++){ // 循环展开
float a_ij = a[row * m + col];
float b_ij = b[row * m + col];
a_minus_b[row * m + col] = (a_ij - b_ij) * (a_ij - b_ij);
printf("tensor a-b,coord=(%d,%d), value=%f\n", row,col, a_minus_b[row * m + col]) ;
}
}
__syncthreads(); // 等待整个a_minus_b被计算完毕
// 将每行结果加到每行的第一个元素上
if(threadIdx.x==0)
printf("after add rows to first col\n");
for(int row=threadIdx.x;row<n;row+=blockDim.x){
for(int col=1;col<m;col++){ // 循环展开
a_minus_b[row * m + 0] += a_minus_b[row * m + col];
}
printf("tensor a-b,row=%d, value=%f\n", row, a_minus_b[row * m + 0]);
}
__syncthreads(); // 等待结果都加到了第一行
// 将所有结果加到第一行第一列元素上
if(threadIdx.x == 0){
for(int i=1;i<n;i++)
a_minus_b[0] += a_minus_b[i * m + 0];
c[0] = sqrtf(a_minus_b[0]);
}
}
void launch_two_norm(const float *a,const float *b,float *c, int n, int m) {
two_norm_kernel<<<1, 512>>>(a, b, c, n, m);
} |
9,418 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
//Framework for gradient descent example. |
9,419 | #include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
int main(){
cudaError_t custatus;
int gpu_num = 0;
cudaDeviceProp prop;
custatus = cudaGetDeviceCount(&gpu_num);
printf("Number of GPUs : %d\n", gpu_num);
custatus = cudaSetDevice(0);
if(custatus != cudaSuccess){
printf("Failed to set Device 0. Exit\n");
return -1;
}
cudaGetDeviceProperties(&prop, 0);
printf("Device name : %s\n", prop.name);
printf("Compute Capability : %d.%d\n", prop.major, prop.minor);
if(prop.major<3 || (prop.major==3 && prop.minor<5)){
printf("[error] Your device compute capability is too low. Exit.\n");
return -1;
}
printf("totalGlobalMem : %.1f GB\n", prop.totalGlobalMem/1024/1024/1024.0);
if(prop.totalGlobalMem/1024/1024/1024.0 < 2){
printf("[error] Your device global memory is too small. Exit.\n");
return -1;
}
printf("canMapHostMem : %d\n", prop.canMapHostMemory);
if(!prop.canMapHostMemory){
printf("[error] Your device do not support host memory mapping. Exit.\n");
return -1;
}
printf("sharedMemPerBlock : %u KB\n", (unsigned int)prop.sharedMemPerBlock/1024);
printf("maxThreadsPerBlock : %d\n", prop.maxThreadsPerBlock);
printf("maxGridSize : %d, %d, %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("regPerBlock : %d\n", prop.regsPerBlock);
}
|
9,420 | #include "includes.h"
/*
* JCuda - Java bindings for NVIDIA CUDA driver and runtime API
* http://www.jcuda.org
*
*
* This code is based on the NVIDIA 'reduction' CUDA sample,
* Copyright 1993-2010 NVIDIA Corporation.
*/
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
__global__ void setTargetIndex(int n, double *w, double *out)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
if(w[i] == 1.0) {out[i] -= 1.0;}
}
} |
9,421 | #include "curand_kernel.h"
// Which type of RNG should we expose?
// DON'T FORGET to change the RandomState type in platform/cuda.t, also
// typedef curandStateXORWOW rng_state;
typedef curandStateMRG32k3a rng_state;
extern "C"
{
__device__ void cu_rand_init(unsigned long long seed,
unsigned long long subsequence,
unsigned long long offset,
rng_state* state)
{
curand_init(seed, subsequence, offset, state);
}
__device__ double cu_rand_uniform(rng_state* state)
{
return curand_uniform_double(state);
}
} |
9,422 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <algorithm>
#include <iostream>
//#define n 256000
using namespace std;
int CPU_results(int *C, int *B,int *A,int N)
{
for(int i=0;i<N;i++)
C[B[i]]=A[i];
return 0;
}
int check_results(int *C, int *B, int *A, int N)
{
for(int i=0; i<N; i++)
{ if(C[B[i]]!=A[i])
{
cout<<i<<endl;
cout<<A[i]<<" "<<C[B[i]]<<endl;
printf("Sorry! Checking Failed!\n");
return 0;
}
}
printf("Good! Checking Passed!\n");
return 1;
}
__global__ void kernel(int *d_C,int *d_B, int *d_A, int N)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid >= N) return;
int x = d_A[d_B[tid]];
}
int main(int argc, char *argv[])
{
int N=atoi(argv[1]);
int *A, *B, *C, *d_A, *d_B, *d_C;
A=(int *)malloc(N*sizeof(int));
B=(int *)malloc(N*sizeof(int));
C=(int *)malloc(N*sizeof(int));
cudaMalloc((void **)&d_A, N*sizeof(int));
cudaMalloc((void **)&d_B, N*sizeof(int));
cudaMalloc((void **)&d_C, N*sizeof(int));
srand(2013);
vector<int> BV(N);
for(int i=0; i<N; i++)
{
A[i]=rand()%N;
//cout<<"A["<<i<<"]="<<A[i]<<endl;
BV[i]=i;//rand()%N;
}
random_shuffle(BV.begin(),BV.end());
for(int i=0;i<N;i++)
B[i]=BV[i];
cudaMemcpy(d_A,A,N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_B,B,N*sizeof(int),cudaMemcpyHostToDevice);
int blocks= 256;
struct timespec time_start, time_end;
clock_gettime(CLOCK_MONOTONIC,&time_start);
kernel<<<(N+255)/256,blocks>>>(d_C,d_B,d_A,N);
cudaThreadSynchronize();
clock_gettime(CLOCK_MONOTONIC,&time_end);
double kernel_time=(time_end.tv_sec-time_start.tv_sec)*1.e9+time_end.tv_nsec-time_start.tv_nsec;
cout<<"GPU kernel time= "<<kernel_time*1.e-9<<endl;
//for(int i=0;i<N;i++)
//cout<<"C "<<i<<"="<<C[i]<<endl;
clock_gettime(CLOCK_MONOTONIC,&time_start);
// CPU_results(C,B,A,N);
clock_gettime(CLOCK_MONOTONIC,&time_end);
kernel_time=(time_end.tv_sec-time_start.tv_sec)*1.e9+time_end.tv_nsec-time_start.tv_nsec;
cout<<"CPU time= "<<kernel_time*1.e-9<<endl;
cudaMemcpy(C,d_C,N*sizeof(int),cudaMemcpyDeviceToHost);
// check_results(C,B,A,N);
return 0;
}
|
9,423 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
__global__ void gpuVecAdd(float *A, float *B, float *C) { //device용이라는 __global__ 로 알수있다.
// TODO: write kernel code here
int tid = blockIdx.x * blockDim.x + threadIdx.x;
C[tid]=A[tid]+B[tid];
}
double get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (double)tv.tv_sec + (double)1e-6 * tv.tv_usec;
}
void init(float *V, int N) {
for (int i = 0; i < N; i++) {
V[i] = rand() % 100;
}
}
void verify(float *A, float *B, float *C, int N) {
for (int i = 0; i < 16384; i++) {
if (A[i] + B[i] != C[i]) {
printf("Verification failed! A[%d] = %d, B[%d] = %d, C[%d] = %d\n",
i, A[i], i, B[i], i, C[i]);
return;
}
}
printf("Verification success!\n");
}
int main() {
int N = 16384;
float *A = (float*)malloc(sizeof(float) * N);
float *B = (float*)malloc(sizeof(float) * N);
float *C = (float*)malloc(sizeof(float) * N);
init(A, N);
init(B, N);
// Memory objects of the device
float *d_A, *d_B, *d_C;
// TODO: allocate memory objects d_A, d_B, and d_C.
cudaMalloc(&d_A, sizeof(float)*N);
cudaMalloc(&d_C, sizeof(float)*N);
cudaMalloc(&d_B, sizeof(float)*N);
// TODO: copy "A" to "d_A" (host to device).
cudaMemcpy(d_A, A, sizeof(float)*N, cudaMemcpyHostToDevice);
// TODO: copy "B" to "d_B" (host to device).
cudaMemcpy(d_B, B, sizeof(float)*N, cudaMemcpyHostToDevice);
// TODO: launch the kernel.
dim3 dimBlock(32,1); // 스레드 블록의 크기를 지정
dim3 dimGrid(N/32,1); //grid의 크기를 지정 , global_size랑 다름 총 몇개의 thread_block이 있냐
double start_time = get_time();
gpuVecAdd <<< dimGrid, dimBlock >>> (d_A, d_B, d_C); //Background에서 돈다
// TODO: copy "d_C" to "C" (device to host).
cudaMemcpy(C, d_C, sizeof(float)*N, cudaMemcpyDeviceToHost); // MemCopy 끝난게 Kernel이 끝나다는 뜻이다.
double end_time = get_time();
printf("Elapsed time: %f sec\n", end_time - start_time);
verify(A, B, C, N);
// TODO: release d_A, d_B, and d_C.
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(A);
free(B);
free(C);
return 0;
}
/*
Elapsed time: 0.000117 sec
Verification success!
*/
|
9,424 | #include "includes.h"
__global__ void fill( float4 *localbuf, float val, float4* ptr, int offset, int N ) {
int idx= blockDim.x * blockIdx.x + threadIdx.x;
if( idx < N ) {
float4 t = localbuf[ idx ];
t.x += val;
t.y += val;
t.z += val;
t.w += val;
ptr[ offset + idx ] = t;
}
} |
9,425 | // Compile: nvcc -arch=sm_61 -std=c++11 assignment5-p3.cu -o assignment5-p3
#include <cmath>
#include <cstdint>
#include <iostream>
#include <sys/time.h>
#define SIZE 1024
#define BLOCK_SIZE 32
#define THRESHOLD (0.000001)
using namespace std;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
double rtclock() {
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday(&Tp, &Tzp);
if (stat != 0) {
cout << "Error return from gettimeofday: " << stat << "\n";
}
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
}
__host__ void ATAonCPU(double* M, double* P) {
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
for (int k = 0; k < SIZE; k++)
P[i*SIZE + j] += M[k*SIZE + i] * M[k*SIZE + j];
}
}
}
__host__ void check_result(double* Test, double* Ref) {
double maxdiff = 0, rel_diff = 0;
int numdiffs = 0;
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
rel_diff = (Test[i*SIZE + j] - Ref[i*SIZE + j]);
if (fabs(rel_diff) > THRESHOLD) {
numdiffs++;
if (rel_diff > maxdiff)
maxdiff = rel_diff;
}
}
}
if (numdiffs > 0)
cout << numdiffs << " Diffs found over THRESHOLD " << THRESHOLD << " Max Diff = " << maxdiff
<< "\n";
else
cout << "No differences found between base and test versions\n";
}
// SB: Implement your kernel here
__global__ void ATAkernel(double* M, double* P) {
if(blockIdx.x < blockIdx.y) return;
double sum = 0;
uint64_t i = blockIdx.y*blockDim.y + threadIdx.y;
uint64_t j = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ double A_t[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double B_t[BLOCK_SIZE][BLOCK_SIZE];
for (uint64_t tid = 0; tid < SIZE/blockDim.x; tid++) {
A_t[threadIdx.y][threadIdx.x] = M[(tid * blockDim.x + threadIdx.x) * SIZE + i];
B_t[threadIdx.y][threadIdx.x] = M[(tid * blockDim.y + threadIdx.y) * SIZE + j];
__syncthreads();
sum += A_t[threadIdx.y][0] * B_t[0][threadIdx.x]
+ A_t[threadIdx.y][1] * B_t[1][threadIdx.x]
+ A_t[threadIdx.y][2] * B_t[2][threadIdx.x]
+ A_t[threadIdx.y][3] * B_t[3][threadIdx.x]
+ A_t[threadIdx.y][4] * B_t[4][threadIdx.x]
+ A_t[threadIdx.y][5] * B_t[5][threadIdx.x]
+ A_t[threadIdx.y][6] * B_t[6][threadIdx.x]
+ A_t[threadIdx.y][7] * B_t[7][threadIdx.x]
+ A_t[threadIdx.y][8] * B_t[8][threadIdx.x]
+ A_t[threadIdx.y][9] * B_t[9][threadIdx.x]
+ A_t[threadIdx.y][10] * B_t[10][threadIdx.x]
+ A_t[threadIdx.y][11] * B_t[11][threadIdx.x]
+ A_t[threadIdx.y][12] * B_t[12][threadIdx.x]
+ A_t[threadIdx.y][13] * B_t[13][threadIdx.x]
+ A_t[threadIdx.y][14] * B_t[14][threadIdx.x]
+ A_t[threadIdx.y][15] * B_t[15][threadIdx.x]
+ A_t[threadIdx.y][16] * B_t[16][threadIdx.x]
+ A_t[threadIdx.y][17] * B_t[17][threadIdx.x]
+ A_t[threadIdx.y][18] * B_t[18][threadIdx.x]
+ A_t[threadIdx.y][19] * B_t[19][threadIdx.x]
+ A_t[threadIdx.y][20] * B_t[20][threadIdx.x]
+ A_t[threadIdx.y][21] * B_t[21][threadIdx.x]
+ A_t[threadIdx.y][22] * B_t[22][threadIdx.x]
+ A_t[threadIdx.y][23] * B_t[23][threadIdx.x]
+ A_t[threadIdx.y][24] * B_t[24][threadIdx.x]
+ A_t[threadIdx.y][25] * B_t[25][threadIdx.x]
+ A_t[threadIdx.y][26] * B_t[26][threadIdx.x]
+ A_t[threadIdx.y][27] * B_t[27][threadIdx.x]
+ A_t[threadIdx.y][28] * B_t[28][threadIdx.x]
+ A_t[threadIdx.y][29] * B_t[29][threadIdx.x]
+ A_t[threadIdx.y][30] * B_t[30][threadIdx.x]
+ A_t[threadIdx.y][31] * B_t[31][threadIdx.x];
__syncthreads();
}
P[i * SIZE + j] = sum;
if(blockIdx.x > blockIdx.y) P[j * SIZE + i] = sum;
}
int main() {
cout << "Matrix Size = " << SIZE << "\n";
double* A = new double[SIZE*SIZE];
double* O_s = new double[SIZE*SIZE];
double* O_p = new double[SIZE*SIZE];
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
// A[i*SIZE + j] = random() * 0.25;
A[i*SIZE + j] = i * j * 0.25;
O_s[i*SIZE + j] = 0;
O_p[i*SIZE + j] = 0;
}
}
double clkbegin, clkend;
double t;
clkbegin = rtclock();
ATAonCPU(A, O_s);
clkend = rtclock();
t = clkend - clkbegin;
cout << "A^T.A on CPU: " << (2.0 * SIZE * SIZE * SIZE / t / 1.0e9)
<< " GFLOPS; Time = " << t * 1000 << " msec\n";
cudaEvent_t start, end;
gpuErrchk( cudaEventCreate(&start) );
gpuErrchk( cudaEventCreate(&end) );
// SB: Write your GPU kernel here
double *O_p_c, *A_c;
gpuErrchk( cudaMalloc((void**)&O_p_c, SIZE*SIZE*sizeof(double)) );
gpuErrchk( cudaMalloc((void**)&A_c, SIZE*SIZE*sizeof(double)) );
dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE);
dim3 gridSize((SIZE + blockSize.x - 1)/blockSize.x, (SIZE + blockSize.y - 1)/blockSize.y);
gpuErrchk( cudaEventRecord(start, 0) );
gpuErrchk( cudaMemcpy(O_p_c, O_p, SIZE*SIZE*sizeof(double), cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(A_c, A, SIZE*SIZE*sizeof(double), cudaMemcpyHostToDevice) );
ATAkernel<<<gridSize, blockSize>>>(A_c, O_p_c);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaMemcpy(O_p, O_p_c, SIZE*SIZE*sizeof(double), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaEventRecord(end, 0) );
gpuErrchk( cudaDeviceSynchronize() );
float kernel_time = 0;
gpuErrchk( cudaEventElapsedTime(&kernel_time, start, end) );
cout << "A^T.A on GPU: " << (2.0 * SIZE * SIZE * SIZE / t / 1.0e9)
<< " GFLOPS; Time = " << kernel_time << " msec\n";
check_result(O_p, O_s);
gpuErrchk( cudaFree(O_p_c) );
gpuErrchk( cudaFree(A_c) );
free(O_s);
free(O_p);
free(A);
return EXIT_SUCCESS;
}
|
9,426 | /*
* This is a CUDA version of bellman_ford algorithm
* Compile: nvcc -std=c++11 -arch=sm_52 -o cuda_bellman_ford cuda_bellman_ford.cu
* Run: ./cuda_bellman_ford <input file> <number of blocks per grid> <number of threads per block>, you will find the output file 'output.txt'
* */
#include <string>
#include <cassert>
#include <iostream>
#include <fstream>
#include <algorithm>
#include <iomanip>
#include <cstring>
#include <sys/time.h>
#include <climits>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
// for mmap
#include <sys/mman.h>
#include <fcntl.h>
#include <unistd.h>
// for timing
#include <sys/time.h>
#define INF INT_MAX
#define THREADS_PER_BLOCK 1024
/*
* This is a CHECK function to check CUDA calls
*/
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
void get_outedges(int64_t* graph, int64_t* outedges, size_t interval_st, size_t num_of_subvertices, size_t num_of_vertices) {
cudaMemcpy(outedges, (graph + interval_st * num_of_vertices), sizeof(int64_t) * num_of_subvertices * num_of_vertices, cudaMemcpyHostToDevice);
}
void get_inedges(int64_t* graph, int64_t* inedges, size_t interval_st, size_t num_of_subvertices, size_t num_of_vertices) {
size_t i;
int64_t *graph_ptr = graph + interval_st;
int64_t *inedges_ptr = inedges;
// in column favor but transpose into row.
for (i = 0; i < num_of_vertices; i++) {
cudaMemcpy(inedges_ptr, graph_ptr, sizeof(int64_t) * num_of_subvertices, cudaMemcpyHostToDevice);
inedges_ptr += num_of_subvertices;
graph_ptr += num_of_vertices;
}
}
__global__ void bellman_ford_one_iter(size_t n, size_t sub_n, size_t st, int64_t *d_mat, int64_t *d_dist, bool *d_has_next){
size_t v = blockDim.x * blockIdx.x + threadIdx.x;
size_t u;
int64_t weight, new_dist;
int64_t *node;
if (v > sub_n) {
return;
}
node = d_mat + v;
v = v + st;
for (u = 0; u < n; u++){
int64_t weight = node[u * sub_n]; // row is src, col is dst
if (weight > 0) {
int64_t new_dist = d_dist[u] + weight;
if(new_dist < d_dist[v]){
d_dist[v] = new_dist;
*d_has_next = true;
}
}
}
}
/**
* Bellman-Ford algorithm. Find the shortest path from vertex 0 to other vertices.
* @param blockPerGrid number of blocks per grid
* @param threadsPerBlock number of threads per block
* @param num_of_vertices input size
* @param *mat input adjacency matrix
* @param *dist distance array
* @param *has_negative_cycle a bool variable to recode if there are negative cycles
*/
void bellman_ford(size_t num_of_vertices, size_t num_of_subvertices, int64_t *mat, int64_t *dist, bool *has_negative_cycle) {
size_t iter_num = 0;
int64_t *d_mat, *d_dist;
bool *d_has_next, h_has_next;
size_t i;
const size_t stripe_sz = num_of_vertices * num_of_subvertices * sizeof(int64_t);
cudaMalloc(&d_mat, stripe_sz);
cudaMalloc(&d_dist, sizeof(int64_t) * num_of_vertices);
cudaMalloc(&d_has_next, sizeof(bool));
*has_negative_cycle = false;
for(i = 0 ; i < num_of_vertices; i++){
dist[i] = INF;
}
dist[0] = 0;
cudaMemcpy(d_dist, dist, sizeof(int64_t) * num_of_vertices, cudaMemcpyHostToDevice);
do {
h_has_next = false;
cudaMemcpy(d_has_next, &h_has_next, sizeof(bool), cudaMemcpyHostToDevice);
for (i = 0; i < num_of_vertices; i += num_of_subvertices) {
get_inedges(mat, d_mat, i, num_of_subvertices, num_of_vertices);
bellman_ford_one_iter<<<(num_of_subvertices+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(num_of_vertices, num_of_subvertices, i, d_mat, d_dist, d_has_next);
CHECK(cudaDeviceSynchronize());
}
cudaMemcpy(&h_has_next, d_has_next, sizeof(bool), cudaMemcpyDeviceToHost);
iter_num++;
if (iter_num >= num_of_vertices - 1){
*has_negative_cycle = true;
break;
}
} while (h_has_next);
if (! *has_negative_cycle){
cudaMemcpy(dist, d_dist, sizeof(int64_t) * num_of_vertices, cudaMemcpyDeviceToHost);
}
cudaFree(d_mat);
cudaFree(d_dist);
cudaFree(d_has_next);
}
/**
* TODO section:
* maybe we can borrow the log system from graphchi?
*/
int main(int argc, char** argv) {
int64_t *graph;
int fd;
size_t num_of_vertices, num_of_subvertices;
size_t iter, st, i;
// result
int64_t *vertices;
bool has_negative_cycle = false;
// timing
struct timeval h_start, h_end;
long duration;
if (argc < 4) {
printf("usage: %s <graph path> <# of vertices> <# of subvertices>\n", argv[0]);
exit(1);
}
// I/O part, open in mmap mode
fd = open(argv[1], O_RDONLY);
num_of_vertices = (size_t) atoi(argv[2]);
num_of_subvertices = (size_t) atoi(argv[3]);
graph = (int64_t *) mmap(NULL, sizeof(int64_t) * num_of_vertices * num_of_vertices, PROT_READ, MAP_PRIVATE, fd, 0);
// calculate the largest stripe we can have
// Assume we have 1 GB (like graphchi), and at least we can contain one row and one column of the graph
// and assume we those numbers are the power of 2
// num_of_subvertices = 32;
printf("num_of_subvertices: %lu\n", num_of_subvertices);
// PR initialization
vertices = (int64_t *) calloc(sizeof(int64_t), num_of_vertices);
for (i = 0; i < num_of_vertices; i++) {
vertices[i] = INF;
}
bellman_ford(num_of_vertices, num_of_subvertices, graph, vertices, &has_negative_cycle);
FILE *fp = fopen("log.txt", "w");
if (!has_negative_cycle) {
for (i = 0; i < num_of_vertices; i++) {
if (vertices[i] > INF) {
vertices[i] = INF;
}
fprintf(fp, "%lu %lu\n", i, vertices[i]);
}
} else {
fprintf(fp, "FOUND NEGATIVE CYCLE!\n");
}
fclose(fp);
// cleanup
munmap(graph, sizeof(int64_t) * num_of_vertices * num_of_vertices);
close(fd);
free(vertices);
return 0;
} |
9,427 | #include "includes.h"
__global__ void tissueGPU3Kernel(float *d_tissxyz, float *d_vessxyz, float *d_pt000, float *d_qv000, int nnt, int nnv, int is2d, float req, float r2d)
{
int itp = blockDim.x * blockIdx.x + threadIdx.x;
int jvp,nnv2=2*nnv;
float p = 0., xt,yt,zt,x,y,z,dist2,gtv,req2=req*req,r2d2=r2d*r2d;
if(itp < nnt){
xt = d_tissxyz[itp];
yt = d_tissxyz[itp+nnt];
zt = d_tissxyz[itp+nnt*2];
for(jvp=0; jvp<nnv; jvp++){
x = d_vessxyz[jvp] - xt;
y = d_vessxyz[jvp+nnv] - yt;
z = d_vessxyz[jvp+nnv2] - zt;
dist2 = x*x + y*y + z*z;
if(dist2 < req2){
if(is2d) gtv = log(r2d2/req2) + 1. - dist2/req2;
else gtv = (1.5 - 0.5*dist2/req2)/req;
}
else{
if(is2d) gtv = log(r2d2/dist2);
else gtv = 1./sqrt(dist2);
}
p += d_qv000[jvp]*gtv;
}
d_pt000[itp] = p;
}
} |
9,428 | // includes
#include <stdio.h>
#include <stdlib.h>
//-------------Funcion llenar SUMA
void llenarSuma(float * pmat, int node){
int i;
for(i=0;i<node;i++){
pmat[i]=0.0;
}
//---------------------------------------------------- COMPROBACION DE QUE ESTA LLENANDO CORRECTAMENTE-----------------------------------
/* printf( "Contenido del fichero:\n" );
for (i = 0; i < node; i++) {
printf ("%f ", pmat[i]);
printf ("\n");
}
*/
}
|
9,429 | /*_________________________________________________________________________
* ww2parCC_device_066DP.cu - calculates the self-induced velocity field of the wake.
* Parallel version on GPU - CUDA code executed on device
*
* CUDA kernel function (executed on the device, called from the host) +
* CUDA block & thread functions (executed on the device, called from device)
* Manages data flow, launches and syncronize threads blocks
*
* DUWIND- Delft University Wind Energy Research Institute
* developer: Giuseppe Tescione
*
* Version: 0.6.6DP (alpha) - 20110824
* basic version with no loop unrolling, no wrap and no multithread bodies
* simple cut-off constant for desingularization
* double precision (for GPUs of computing capability 2.x)
*________________________________________________________________________*/
//Definition of double2 and double3 types
//typedef struct {
//double x, y;
//} double2;
//typedef struct {
//double x, y, z;
//} double3;
__constant__ int blocksize_gpu;
__constant__ int nParticles_gpu;
__constant__ int nTargets_gpu;
__constant__ int nParticleBlocks_gpu;
__constant__ int nTargetBlocks_gpu;
__constant__ double ksigmasqr_gpu;
__constant__ double inv_pi_gpu;
__constant__ double myeps_gpu;
/* constants (block dimension, number of particle, cut-off and 1/2pi) residing in
constant memory space, accessible from all threads within the grid and from the host.
Defined in host code*/
__device__ double ww2par_thread(double THR_vorticity, double THR_xTarget, double THR_yTarget, double THR_xBlob, double THR_yBlob, double THR_wBlob)
/*THREAD FUNCTION - set of instructions performed parallely by each processor.
Calculates velocity induction on target particles by source particle.
Takes as input:
THR_UIND (2x double THR_UIND.x & THR_UIND.y) -> velocity induction of target particles
already computed by previous thread blocks to which adds the new induction;
THR_TARG (3x double THR_TARG.x & THR_TARG.y & THR_TARG.z) -> position (x, y) and
vorticity (z) of target particles.Position is needed to calculate induction
but vorticity is not used but kept to mantain data structure coeherency;
THR_SRC (3x double THR_SRC.x & THR_SRC.y & THR_SRC.z) -> position (x, y) and
vorticity (z) of source particles, needed to calculate induction.
Gives as output:
THR_UIND (2x double THR_UIND.x & THR_UIND.y) -> updated velocity induction of targets */
{
//targets-particle distance, local variable [2 FLOPS]
// printf("Thread %d; xTarget %f; yTarget %f; xBlob %f; yBlob %f, wBlob %f\n -----------------------------------------\n",threadIdx.x,THR_xTarget,THR_yTarget,THR_xBlob,THR_yBlob,THR_wBlob);
double2 RAD;
RAD.x = THR_xTarget - THR_xBlob;
RAD.y = THR_yTarget - THR_yBlob;
//square of distance plus cut-off, local variable [4 FLOPS]
double RADSQR = RAD.x * RAD.x + RAD.y * RAD.y + myeps_gpu;
//vorticity/(2pi*sqr(rad)) [2 FLOPS]
double S = THR_wBlob * inv_pi_gpu / ksigmasqr_gpu;
//update velocity induction [4 FLOPS]
THR_vorticity += S * exp(-RADSQR/(ksigmasqr_gpu));
return THR_vorticity;
}
__device__ double ww2par_block(double BLK_xTarget, double BLK_yTarget, double BLK_vorticity)
/*BLOCK FUNCTION - data & execution management for thread block
Evaluate induction in a pxp block
Takes as input:
BLK_TARG (3x double BLK_TARG.x & BLK_TARG.y & BLK_TARG.z) -> position (x, y)
and vorticity (z) of target. Passed unchanged to THREAD CODE as TARGET;
BLK_UIND (2x double BLK_UIND.x & BLK_UIND.y) -> velocity induction of target
particles. Passed unchanged to THREAD CODE as UIND.
Gives as output:
BLK_UIND (2x double BLK_UIND.x & BLK_UIND.y) -> updated velocity induction
of target. Received unchanged by THREAD CODE as UIND */
{
extern __shared__ double BLK_blob [];
//extern __shared__ double BLK_yBlob [];
//extern __shared__ double BLK_wBlob [];
/* External variable residing in shared memory space of thread block,
accessible from all threads within the block. Source particles data array
(position (x & y) and vorticity (z)) common to the block.
Size of the array is determined at launch time with instruction [] */
//call thread function for every thread in block
for ( int i = 0; i <blockDim.x; i++)
{
BLK_vorticity = ww2par_thread(BLK_vorticity, BLK_xTarget, BLK_yTarget, BLK_blob[i], BLK_blob[i+blocksize_gpu], BLK_blob[i+2*blocksize_gpu]);
}
return BLK_vorticity;
}
__global__ void ww2par_kernel(void *cxBlob_gpu_ondevice, void *cyBlob_gpu_ondevice, void *cwBlob_gpu_ondevice, void *cxTarget_gpu_ondevice, void *cyTarget_gpu_ondevice, void *cw_gpu_ondevice)
/*KERNEL FUNCTION - data & execution management for block grid
Kernel executed on the device, called from the host.
Manages memory passages from host to device and executes block function
Takes as input:
*ONDEV_POS and *ONDEV_IND -> pointers to global device memory for the
position and induction of particles */
{
extern __shared__ double BLK_blob []; //see above
//extern __shared__ double BLK_yBlob []; //see above
//extern __shared__ double BLK_wBlob []; //see above
//pointers passage
double * KRN_xBlob = (double *)cxBlob_gpu_ondevice;
double * KRN_yBlob = (double *)cyBlob_gpu_ondevice;
double * KRN_wBlob = (double *)cwBlob_gpu_ondevice;
double * KRN_xTarget = (double *)cxTarget_gpu_ondevice;
double * KRN_yTarget = (double *)cyTarget_gpu_ondevice;
double * KRN_w = (double *)cw_gpu_ondevice;
//induction initialization
double BLK_vorticity;
BLK_vorticity = 0;
//target particles definition
double BLK_xTarget;
double BLK_yTarget;
int NTHR = blockIdx.x * blockDim.x + threadIdx.x;
BLK_xTarget = KRN_xTarget[NTHR];
BLK_yTarget = KRN_yTarget[NTHR];
//printf("Block %d; Thread %d :: Before the loop\n",blockIdx.x,threadIdx.x);
int i, block;
for (i = 0, block = 0; i < nParticles_gpu; i += blocksize_gpu, block++)//LOOP over blocks
{
//source particle definition (shared data)
int id = block * blockDim.x + threadIdx.x;
BLK_blob [threadIdx.x] = KRN_xBlob[id];
BLK_blob [threadIdx.x + blocksize_gpu] = KRN_yBlob[id];
BLK_blob [threadIdx.x + 2*blocksize_gpu] = KRN_wBlob[id];
__syncthreads();
// all shared memory locations are populated before starting computation
BLK_vorticity = ww2par_block(BLK_xTarget, BLK_yTarget, BLK_vorticity); //block function call
__syncthreads();
//all threads within block finish computation before advancing next block
}
//save results in global memory
double WIND = BLK_vorticity;
KRN_w[NTHR] = WIND;
}
|
9,430 | #include "includes.h"
__global__ void innerProd(float *aa, float *bb, float *cc)
{
__shared__ float temp[THREADS_PER_BLOCK];
int index = threadIdx.x + blockIdx.x* blockDim.x;
temp[threadIdx.x] = aa[index]*bb[index];
*cc = 0; // Initialized to avoid memory problems. See comments
// below, next to the free and cudaFree commands.
// No thread goes beyond this point until all of them
// have reached it. Threads are only synchronized within
// a block.
__syncthreads();
// Thread 0 sums the pairwise products
if (threadIdx.x == 0) {
float sum = 0;
for (int i = 0; i < THREADS_PER_BLOCK; i++){
sum += temp[i];
}
// Use atomicAdd to avoid different blocks accessing cc at the
// same time (race condition). The atomic opperation enables
// read-modify-write to be performed by a block without interruption.
//*cc += sum;
atomicAdd(cc, sum);
}
} |
9,431 | #include "includes.h"
__global__ void matmul_kernel(float *C, float *A, float *B) {
__shared__ float sA[block_size_y*tile_size_y][block_size_x];
__shared__ float sB[block_size_y*tile_size_y][block_size_x * tile_size_x];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * block_size_x * tile_size_x + threadIdx.x;
int y = blockIdx.y * block_size_y * tile_size_y + threadIdx.y;
int k, kb;
float sum[tile_size_y][tile_size_x];
#pragma unroll
for (int i = 0; i < tile_size_y; i++) {
#pragma unroll
for (int j = 0; j < tile_size_x; j++) {
sum[i][j] = 0.0f;
}
}
for (k = 0; k < WIDTH; k += block_size_x) {
__syncthreads();
#pragma unroll
for (int i = 0; i < tile_size_y; i++) {
sA[ty + block_size_y * i][tx] = A[(y+i*block_size_y) * WIDTH + k + tx];
#pragma unroll
for (int j = 0; j < tile_size_x; j++) {
sB[ty + block_size_y * i][tx + j * block_size_x] = B[(k + ty + block_size_y * i) * WIDTH + x + j * block_size_x];
}
}
__syncthreads();
//compute
#pragma unroll
for (kb = 0; kb < block_size_x; kb++) {
#pragma unroll
for (int i = 0; i < tile_size_y; i++) {
#pragma unroll
for (int j = 0; j < tile_size_x; j++) {
sum[i][j] += sA[ty + block_size_y * i][kb] * sB[kb][tx + j * block_size_x];
}
}
}
}
//store result
#pragma unroll
for (int i = 0; i < tile_size_y; i++) {
#pragma unroll
for (int j = 0; j < tile_size_x; j++) {
C[y * WIDTH + x + block_size_y * i * WIDTH + j * block_size_x] = sum[i][j];
}
}
} |
9,432 | #include <cuda_runtime_api.h>
#include <stdlib.h>
#include <stdio.h>
void handleCudaMalloc(void **var, ssize_t size) {
cudaError_t status;
status = cudaMalloc(var, size);
if (status != cudaSuccess) {
printf("%s\n", cudaGetErrorString(status));
}
}
void handleCudaMemcpy(void* dst, const void* src, ssize_t size, cudaMemcpyKind kind) {
cudaError_t status;
status = cudaMemcpy(dst, src, size, kind);
if (status != cudaSuccess) {
printf("%s\n", cudaGetErrorString(status));
}
}
void handleCudaFree(void* pointer) {
cudaError_t status;
status = cudaFree(pointer);
if (status != cudaSuccess) {
printf("%s\n", cudaGetErrorString(status));
}
} |
9,433 | //*****************************************************************************************//
// sobel_kernel.cu - CUDA Hough Transform Benchmark
//
// Authors: Ramnarayan Krishnamurthy, University of Colorado (Shreyas.Ramnarayan@gmail.com)
// Matthew Demi Vis, Embry-Riddle Aeronautical University (MatthewVis@gmail.com)
//
// This code was used to obtain results documented in the SPIE Sensor and Technologies paper:
// S. Siewert, V. Angoth, R. Krishnamurthy, K. Mani, K. Mock, S. B. Singh, S. Srivistava,
// C. Wagner, R. Claus, M. Demi Vis, “Software Defined Multi-Spectral Imaging for Arctic
// Sensor Networks”, SPIE Algorithms and Technologies for Multipectral, Hyperspectral, and
// Ultraspectral Imagery XXII, Baltimore, Maryland, April 2016.
//
// This code was developed for, tested and run on a Jetson TK1 development kit by NVIDIA
// running Ubuntu 14.04
//
// Please use at your own risk. We are sharing so that other researchers and developers can
// recreate our results and make suggestions to improve and extend the benchmarks over time.
//
//*****************************************************************************************//
#include <stdio.h>
#include <assert.h>
#define MAXRGB 255
#define THRESHOLD 128
//***************************************************************//
// Sobel transform using CUDA hardware
//***************************************************************//
__global__ void sobel_transform(unsigned char *img_out, unsigned char *img_in, unsigned int width, unsigned int height)
{
int x,y;
unsigned char LUp,LCnt,LDw,RUp,RCnt,RDw;
int pixel;
x=blockDim.x*blockIdx.x+threadIdx.x;
y=blockDim.y*blockIdx.y+threadIdx.y;
if( x<width && y<height )
{
LUp = (x-1>=0 && y-1>=0) ? img_in[(x-1)+(y-1)*width] : 0;
LCnt= (x-1>=0) ? img_in[(x-1)+y*width]:0;
LDw = (x-1>=0 && y+1<height) ? img_in[(x-1)+(y+1)*width] : 0;
RUp = (x+1<width && y-1>=0) ? img_in[(x+1)+(y-1)*width] : 0;
RCnt= (x+1<width) ? img_in[(x+1)+y*width] : 0;
RDw = (x+1<width && y+1<height) ? img_in[(x+1)+(y+1)*width] : 0;
pixel = -1*LUp + 1*RUp +
-2*LCnt + 2*RCnt +
-1*LDw + 1*RDw;
pixel = (pixel<THRESHOLD) ? 0 : pixel;
pixel = (pixel>MAXRGB) ? MAXRGB : pixel;
if(pixel < THRESHOLD)
pixel = 0;
else
pixel = MAXRGB;
img_out[x+y*width] = pixel;
}
}
//***************************************************************//
// simple wrapper to keep cuda code in just the kernel file.
//***************************************************************//
void sobel_transform_wrapper(unsigned char *img_out, unsigned char *img_in, unsigned int width, unsigned int height, dim3 grid, dim3 threads)
{
sobel_transform<<<grid, threads, 0>>>(img_out, img_in, width, height);
}
//***************************************************************//
// Sobel transform using the CPU
//***************************************************************//
void CPU_transform(unsigned char *img_out, unsigned char *img_in, unsigned int width, unsigned int height)
{
unsigned char LUp,LCnt,LDw,RUp,RCnt,RDw;
int pixel;
for(int y=0; y<height; y++)
{
for(int x=0; x<width; x++)
{
#ifdef DEBUG
printf("Pixel X:%d Y:%d\n",x,y);
#endif
assert(x+(y*width)<width*height);
LUp = (x-1>=0 && y-1>=0)? img_in[(x-1)+(y-1)*width]:0;
LCnt= (x-1>=0)? img_in[(x-1)+y*width]:0;
LDw = (x-1>=0 && y+1<height)? img_in[(x-1)+(y+1)*width]:0;
RUp = (x+1<width && y-1>=0)? img_in[(x+1)+(y-1)*width]:0;
RCnt= (x+1<width)? img_in[(x+1)+y*width]:0;
RDw = (x+1<width && y+1<height)? img_in[(x+1)+(y+1)*width]:0;
pixel = -1*LUp + 1*RUp + -2*LCnt + 2*RCnt + -1*LDw + 1*RDw;
pixel=(pixel<0)?0:pixel;
pixel=(pixel>MAXRGB)?MAXRGB:pixel;
img_out[x+y*width]=pixel;
#ifdef DEBUG
printf("\r%5.2f",100*(float)(y*width+x)/(float)(width*height-1));
#endif
}
}
#ifdef DEBUG
printf("\n");
#endif
}
|
9,434 | #include "includes.h"
__global__ void compute_kernel(double *cellStatePtr, double *iApp, double *cellVDendPtr) {
} |
9,435 | #include <iostream>
#include <cstdlib>
__global__ void addVec (const int* A, const int * B, int* C) {
int i=blockIdx.x*blockDim.x+threadIdx.x;
C[i] = A[i]+B[i];
}
int main() {
int *A,*B,*C; //Creates pointers of int type. We will use these for host arrays
int *A_d,*B_d,*C_d; //Creates pointers of int type. We will use these for device arrays
int N = 8; //Chooses array size as 8
int buffer_size = sizeof(int)*N; //array size, in bytes.
//Allocates arrays on host
A = new int[N];
B = new int[N];
C = new int[N];
//Allocates buffer_size bytes on GPU's global memory for each array
cudaMalloc((void**) &A_d, buffer_size);
cudaMalloc((void**) &B_d, buffer_size);
cudaMalloc((void**) &C_d, buffer_size);
for (int i=0; i<N; i++) {//Initialize A and B
A[i] = N - i;
B[i] = i;
}
//Copies A and B to A_d and B_d. We won't copy C because it won't be read.
cudaMemcpy( A_d, A, buffer_size, cudaMemcpyHostToDevice );
cudaMemcpy( B_d, B, buffer_size, cudaMemcpyHostToDevice );
int Blocks = 1;
int ThreadsPerBlock = 8;
addVec<<<Blocks,ThreadsPerBlock>>> (A_d, B_d, C_d); //Launch the kernel.
cudaMemcpy( C, C_d, buffer_size, cudaMemcpyDeviceToHost ); //Copies from C_d to C.
for (int i=0; i<N; i++) { //Checks if all the operations were done right
std::cout << A[i] << " + " << B[i] << " = " << C[i] << std::endl;
}
//Clean arrays on host
free(A);
free(B);
free(C);
//Clean arrays on device
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
return 0;
}
|
9,436 | #include <thrust/sort.h>
#include <thrust/copy.h>
#include <iostream>
int main(int argc, char *argv[]) {
const int N = 6;
int A[N] = {1, 4, 2, 8, 5, 7};
int keys[N] = {1, 4, 3, 8, 5, 7};
char values[N] = {'a', 'b', 'c', 'd', 'e', 'f'};
// thrust::sort(A, A + N);
thrust::sort_by_key(keys, keys + N, values);
for (int i = 0; i < N; i++) {
std::cout << keys[i] << " ";
}
std::cout << std::endl;
for (int i = 0; i < N; i++) {
std::cout << values[i] << " ";
}
std::cout << std::endl;
thrust::stable_sort(A, A + N, thrust::greater<int>());
std::cout << "A:\n";
for (int i = 0; i < N; i++) {
std::cout << A[i] << " ";
}
std::cout << std::endl;
}
|
9,437 | #include "includes.h"
__global__ void softmax_loss_kernel(float *reduced_loss, float *predict, float *target, float *workspace, int batch_size, int num_outputs)
{
int batch_idx = blockDim.x * blockIdx.x + threadIdx.x;
extern __shared__ float s_data[];
float loss = 0.f;
// each thread calculate entropy for each data and accumulate to shared memory
for (int c = 0; c < num_outputs; c++)
loss += target[batch_idx * num_outputs + c] * logf(predict[batch_idx * num_outputs + c]);
workspace[batch_idx] = -loss;
// then, we do reduction the result to calculate loss using 1 thread block
if (blockIdx.x > 0) return;
// cumulate workspace data
s_data[threadIdx.x] = 0.f;
for (int i = 0; i < batch_size; i += blockDim.x)
{
s_data[threadIdx.x] += workspace[threadIdx.x + i];
}
__syncthreads();
// reduction
for (unsigned int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (threadIdx.x + stride < batch_size)
s_data[threadIdx.x] += s_data[threadIdx.x + stride];
__syncthreads();
}
if (threadIdx.x == 0) {
reduced_loss[blockIdx.x] = s_data[0];
}
} |
9,438 | #include "includes.h"
__global__ void PrepareMeanStdDev(float* input, float* delta, int imageWidth, int imageHeight)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int size = imageWidth * imageHeight;
if (id < size)
{
int px = id % imageWidth;
int py = id / imageWidth;
float2 pixPos = { 2.0f * px / imageWidth - 1, 2.0f * py / imageHeight - 1};
//mean sum
delta[id] = input[id] * pixPos.x;
delta[id + size] = input[id] * pixPos.y;
//variance sum
delta[id + 2 * size] = input[id] * pixPos.x * pixPos.x;
delta[id + 3 * size] = input[id] * pixPos.y * pixPos.y;
}
} |
9,439 | #include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cstdlib>
#include <assert.h>
#include <sys/time.h>
const int B_SIZE = 1024;
double timer() {
struct timeval tp;
struct timezone tzp;
gettimeofday(&tp, &tzp);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-06);
}
__global__ void transpose(double *M, double *M_t, int n) {
int Idx = blockIdx.x * B_SIZE + threadIdx.x;
if(Idx < n*n) {
int x = Idx % n;
int y = Idx / n;
int T_index = x * n + y;
M_t[T_index] = M[Idx];
}
}
void print(double *M, int n) {
std::cout << "Print matrix: \n";
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
std::cout << M[i * n + j] << " ";
}
std::cout << std::endl;
}
}
int main(int argc, char ** argv) {
assert(argc == 2);
int n = atoi(argv[1]);
size_t size = n * n * sizeof(double);
double *M = (double*)malloc(size);
//init
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
M[i * n + j] = i / j + j % i;
}
}
double *M_t = (double*)malloc(size);
double t1 = timer();
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
M_t[j * n + i] = M[i * n + j];
}
}
double t2 = timer();
std::cout << "Cpu time: " << t2-t1 << std::endl;
// print(M, n);
// print(M_t, n);
dim3 block(B_SIZE);
dim3 grid((n * n - 1) / B_SIZE + 1);
double *M_dev, *M_t_dev;
cudaMalloc(&M_dev, size);
cudaMalloc(&M_t_dev, size);
cudaMemcpy(M_dev, M, size, cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
transpose<<<grid, block>>>(M_dev, M_t_dev, n);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaMemcpy(M_t, M_t_dev, size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float ms = 0;
cudaEventElapsedTime(&ms, start, stop);
std::cout << "Gpu time is: " << ms << std::endl;
// print(M_t, n);
bool check = true;
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
check *= (M[i*n+j] == M_t[j*n+i]);
}
}
if (check) std::cout << "CHECK PASSED!" << std::endl;
else std::cout << "CHECK FAILED!" << std::endl;
free(M);
free(M_t);
cudaFree(M_dev);
cudaFree(M_t_dev);
return 0;
}
|
9,440 | #include "includes.h"
extern "C" {
#ifndef DTYPE
#define DTYPE float
#endif
}
__global__ void tensor_1d_equals (const int n, const DTYPE* x, const int offset_x, const int stride_x, const DTYPE* y, const int offset_y, const int stride_y, int* eq_flag) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < n) {
const int ix = offset_x + gid * stride_x;
const int iy = offset_y + gid * stride_y;
if (x[ix] != y[iy]) {
eq_flag[0]++;
}
}
} |
9,441 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,int var_3,int var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float* var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float* var_20,float var_21) {
if (comp == (-0.0f / var_1 / expf(acosf(-1.2513E-43f - var_2 / +1.9779E-37f)))) {
comp = (+1.8974E-44f + +1.2493E-37f + -1.0347E-30f * (-1.4569E27f - var_5));
comp = +1.1623E-5f * log10f(var_6 + (var_7 + -1.3131E-37f * +1.7783E-37f));
comp += var_8 - (-0.0f + fabsf(-1.0131E21f + atanf(powf(coshf((var_9 - (-1.4074E-44f - var_10 / -1.6276E34f - +1.0109E-7f))), (var_11 - log10f(+1.7576E-35f))))));
for (int i=0; i < var_3; ++i) {
var_12[i] = (var_13 / (var_14 * +1.4414E-42f / sinhf(-1.5031E-44f - (-0.0f - var_15 - (var_16 / (-1.4357E35f - -1.7691E36f))))));
comp += var_12[i] / -1.0193E-35f * var_17;
comp += +0.0f - -0.0f / (var_18 * -1.2206E36f);
comp += var_19 * -1.5791E-43f;
}
for (int i=0; i < var_4; ++i) {
var_20[i] = +1.0155E35f;
comp = var_20[i] / -0.0f + -1.0106E35f * -1.8495E36f * var_21;
comp += +1.3088E34f * +1.2880E-23f - -1.1207E10f;
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
int tmp_4 = atoi(argv[4]);
int tmp_5 = atoi(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float* tmp_13 = initPointer( atof(argv[13]) );
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float* tmp_21 = initPointer( atof(argv[21]) );
float tmp_22 = atof(argv[22]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22);
cudaDeviceSynchronize();
return 0;
}
|
9,442 | // nvcc -gencode arch=compute_61,code=sm_61 -fmad=false -O3 -ptx fma.cu -o fma.ptx -I/cvmfs/cms.cern.ch/slc7_amd64_gcc630/external/cuda/9.1.85-cms/include ; cat fma.ptx
// c++ -O3 -S -march=native -ffp-contract=off fma.cc ; cat fma.s
#include <cmath>
#ifdef __NVCC__
#define inline __device__ __host__ inline
#else
#define __global__
#endif
#if defined(__x86_64__) && !defined(__FMA__)
#warning nofma
#define FMA(x,y,z) x*y+z
#else
#warning fma
#define FMA(x,y,z) std::fma(x,y,z)
#endif
inline
float dofma(float x, float y, float z) {
return FMA(x,-y,z);
}
inline
float myf(float x, float y, float z) {
return std::fma(x,-y,z);
}
inline
float myff(float x, float y, float z) {
return z+x*y;
}
inline
float myfn(float x, float y, float z) {
return x*y-z;
}
inline
float myxyn(float x, float y, float z) {
return (x*y) - (y*z);
}
inline
float myxyp(float x, float y, float z) {
return (x*y) + (y*z);
}
inline
float logP(float y) {
return y * (float(0xf.fff14p-4) + y * (-float(0x7.ff4bfp-4)
+ y * (float(0x5.582f6p-4) + y * (-float(0x4.1dcf2p-4) + y * (float(0x3.3863f8p-4) + y * (-float(0x1.9288d4p-4)))))));
}
inline
float cw(float x) {
constexpr float inv_log2f = float(0x1.715476p0);
constexpr float log2H = float(0xb.172p-4);
constexpr float log2L = float(0x1.7f7d1cp-20);
// This is doing round(x*inv_log2f) to the nearest integer
// float z = std::round(x*inv_log2f);
float z = std::floor((x*inv_log2f) +0.5f);
float y;
// Cody-and-Waite accurate range reduction. FMA-safe.
y = x;
y -= z*log2H;
y -= z*log2L;
return y;
}
__global__
void goGPU(float * x, float * y, float * z, float * r) {
r[9] = dofma(x[9],y[9],z[9]);
r[0] = myf(x[0],y[0],z[0]);
r[1] = myff(x[1],y[1],z[1]);
r[2] = myfn(x[2],y[2],z[2]);
r[3] = myxyn(x[3],y[3],z[3]);
r[4] = myxyp(x[4],y[4],z[4]);
r[5] = logP(x[5]);
r[6] = cw(x[6]);
}
|
9,443 | #include <stdio.h>
#include <stdlib.h>
#define SIZE 2000
__global__ void demo(int * p){
int tx=threadIdx.x;
printf("tx=%d\n",tx);
p[tx]=tx;
}
int main(int argc , char **argv){
int * p;
cudaError_t err;
err=cudaMalloc((void**)&p,SIZE*sizeof(int));
if( err != cudaSuccess)
{
printf("CUDA error: %s\n", cudaGetErrorString(err));
exit(-1);
}
dim3 dimGrid(1,1);
dim3 dimBlock(SIZE,1);
// Configuration too many thread in a thread block.
demo<<<dimGrid,dimBlock>>>(p);
err=cudaFree(p);
if( err != cudaSuccess)
{
printf("CUDA error: %s\n", cudaGetErrorString(err));
exit(-1);
}
printf("tx\n");
return 0;
}
|
9,444 | #include <iostream>
#include <fstream>
#include <iomanip>
#include <cstring>
#include <cmath>
#include <stdlib.h>
#include <sys/time.h>
using namespace std;
//-----------------------DO NOT CHANGE NAMES, ONLY MODIFY VALUES--------------------------------------------
//Final Value that will be compared for correctness
//You need to create the function prototypes and definitions as per your design, but you need to present final results in this array
//-----------------------------Structures for correctness check-------------------
char **fourbit_sorted_suffixes_student;
//--------------------------------------------------------------------------------
//----------------------------------------------------------------------------------------------------------
//-----------------------DO NOT CHANGE AT ALL--------------------------------------------
int read_count = 0;
int read_length = 0;
//This array is the default result
char **fourbit_sorted_suffixes_original;
//Read file to get reads
char** inputReads(char *file_path, int *read_count, int *length){
FILE *read_file = fopen(file_path, "r");
int ch, lines=0;
char **reads;
do
{
ch = fgetc(read_file);
if (ch == '\n')
lines++;
} while (ch != EOF);
rewind(read_file);
reads=(char**)malloc(lines*sizeof(char*));
*read_count = lines;
int i = 0;
size_t len = 0;
for(i = 0; i < lines; i++)
{
reads[i] = NULL;
len = 0;
getline(&reads[i], &len, read_file);
}
fclose(read_file);
int j=0;
while(reads[0][j]!='\n')
j++;
*length = j+1;
for(i=0;i<lines;i++)
reads[i][j]='$';
return reads;
}
//Check correctness of values
int checker(){
int correct = 1;
for(int i=0;i<read_count*read_length;i++){
for(int j=0;j<read_length/2;j++){
if(fourbit_sorted_suffixes_student[i][j] != fourbit_sorted_suffixes_original[i][j])
correct = 0;
}
}
return correct;
}
//Rotate 4-bit encoded read by 1 character (4-bit)
char* rotateRead(char *read, int byte_length){
char prev_4bit = (read[0] & 0x0F) << 4;
read[0] = (read[0] >> 4) & 0x0F;
for(int i=1;i<byte_length;i++){
char this_char = ((read[i] >> 4) & 0x0F) | prev_4bit;
prev_4bit = (read[i] & 0x0F) << 4;
read[i] = this_char;
}
read[0]=read[0] | prev_4bit;
char *rotated_read = (char*)malloc(byte_length*sizeof(char));
for(int i=0;i<byte_length;i++)
rotated_read[i] = read[i];
return rotated_read;
}
//Generate Sufixes for a 4-bit encoded read
char** generateSuffixes(char *read, int byte_length){
char **suffixes=(char**)malloc(byte_length*2*sizeof(char*));
for(int i=0;i<byte_length*2;i++){
suffixes[i] = rotateRead(read, byte_length);
}
return suffixes;
}
//Comparator for 4-bit encoded Suffixes
int compSuffixes(char *suffix1, char *suffix2, int byte_length){
int ret = 0;
for(int i=0;i<byte_length;i++){
if(suffix1[i]>suffix2[i])
return 1;
else if(suffix1[i]<suffix2[i])
return -1;
}
return ret;
}
char* fourbitEncodeRead(char *read, int length){
int byte_length = length/2;
char *fourbit_read = (char*)calloc(byte_length,sizeof(char));
for(int i=0;i<length;i++){
char this_char = read[i];
char fourbit_char;
if(this_char == '$')
fourbit_char = 0x00;
else if(this_char == 'A')
fourbit_char = 0x01;
else if(this_char == 'C')
fourbit_char = 0x02;
else if(this_char == 'G')
fourbit_char = 0x03;
else
fourbit_char = 0x04;
fourbit_char = i%2==0 ? fourbit_char << 4 : fourbit_char;
fourbit_read[i/2] = fourbit_read[i/2] | fourbit_char;
}
return fourbit_read;
}
void sort_fourbit_suffixes(char **suffixes, int suffix_count, int byte_length){
char *temp=(char*)malloc(byte_length*sizeof(char));
for(int i=0;i<suffix_count-1;i++){
for(int j=0;j<suffix_count-i-1;j++){
if(compSuffixes(suffixes[j], suffixes[j+1], byte_length)>0){
memcpy(temp, suffixes[j], byte_length*sizeof(char));
memcpy(suffixes[j], suffixes[j+1], byte_length*sizeof(char));
memcpy(suffixes[j+1], temp, byte_length*sizeof(char));
}
}
}
}
//Default Pipeline. You need to implement CUDA function corresponding to everything inside this function
void pipeline(char **reads, int read_length, int read_count){
fourbit_sorted_suffixes_original = (char**)malloc(read_length*read_count*sizeof(char*));
for(int i=0;i<read_count;i++){
char **suffixes_for_read = generateSuffixes(fourbitEncodeRead(reads[i], read_length), read_length/2);
sort_fourbit_suffixes(suffixes_for_read, read_length, read_length/2);
for(int j=0;j<read_length;j++){
fourbit_sorted_suffixes_original[i*read_length+j] = suffixes_for_read[j];
}
}
//--------------For debug purpose--------------
/*
for(int i=0;i<read_count*read_length;i++){
for(int j=0;j<read_length/2;j++)
printf("%x\t",fourbit_sorted_suffixes_original[i][j]);
printf("\n");
}*/
//---------------------------------------------
}
//Merge all sorted suffixes in overall sorted order
void mergeAllSorted4bitSuffixes(char** suffixes, int read_count, int read_length){
}
//-----------------------DO NOT CHANGE--------------------------------------------
int main(int argc, char *argv[]){
char **reads = inputReads(argv[1], &read_count, &read_length);//Input reads from file
//-----------Default implementation----------------
//-----------Time capture start--------------------
struct timeval TimeValue_Start;
struct timeval TimeValue_Final;
struct timezone TimeZone_Start;
struct timezone TimeZone_Final;
long time_start, time_end;
double time_overhead_default, time_overhead_student;
gettimeofday(&TimeValue_Start, &TimeZone_Start);
pipeline(reads, read_length, read_count);
mergeAllSorted4bitSuffixes(fourbit_sorted_suffixes_original, read_count, read_length);
gettimeofday(&TimeValue_Final, &TimeZone_Final);
time_start = TimeValue_Start.tv_sec * 1000000 + TimeValue_Start.tv_usec;
time_end = TimeValue_Final.tv_sec * 1000000 + TimeValue_Final.tv_usec;
time_overhead_default = (time_end - time_start)/1000000.0;
cout<<time_overhead_default<<endl;
//------------Time capture end----------------------
//--------------------------------------------------
//-----------Your implementations------------------
gettimeofday(&TimeValue_Start, &TimeZone_Start);
time_start = TimeValue_Start.tv_sec * 1000000 + TimeValue_Start.tv_usec;
//-----------Call your functions here--------------------
//-----------Call your functions here--------------------
time_end = TimeValue_Final.tv_sec * 1000000 + TimeValue_Final.tv_usec;
time_overhead_student = (time_end - time_start)/1000000.0;
//--------------------------------------------------
//---------------Correction check and speedup calculation----------------------
float speedup=0.0;
//if(checker()==1)
// speedup = time_overhead_default/time_overhead_student;
cout<<"Speedup="<<speedup<<endl;
//-----------------------------------------------------------------------------
return 0;
}
|
9,445 | #include <stdio.h>
#include <cuda.h>
/* Lab8 Q3
* Write a program in CUDA to perform matrix multiplication using 2D Grid and 2D Block.
*/
__global__ void matMul2d(const int* a, const int *b, int *c, int m, int n, int p){
// Calculate appropriate row and col:
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
c[row * p + col] = 0;
// Compute a single element
for(int k = 0; k < n ; k++)
c[row * p + col] += a[row * n + k ] * b[k * p + col];
}
int main(){
int m = 4, n = 2, p =4;
int a[m][n];
int b[n][p];
int c[m][p];
// Initialize A and B
for(int i = 0; i < m; i++)
for(int j = 0; j < n; j ++)
a[i][j] = i * m + j;
for(int i = 0; i < n; i++)
for(int j = 0; j < p; j ++)
b[i][j] = i * n + j;
// Device copies of inputs
int *d_a, *d_b, *d_c;
// Allocate memory on device
cudaMalloc((void**) &d_a, m * n * sizeof(int));
cudaMalloc((void**) &d_b, n * p * sizeof(int));
cudaMalloc((void**) &d_c, m * p * sizeof(int));
// Copy inputs to device
cudaMemcpy(d_a, a, m * n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, a, n * p * sizeof(int), cudaMemcpyHostToDevice);
// Launch Kernel on 2D grid with a 2D block
dim3 grid((m * p / 2), (m * p / 2));
dim3 block(2, 2);
matMul2d<<<grid, block>>>(d_a, d_b, d_c, m, n, p);
// Copy outputs back from device
cudaError err = cudaMemcpy(c, d_c, m * p * sizeof(int), cudaMemcpyDeviceToHost);
if(err != cudaSuccess)
printf("CUDA error copying to Host: %s\n", cudaGetErrorString(err));
printf("A is : \n");
for(int i = 0; i < m; i++){
for(int j = 0; j < n; j ++)
printf("%d ", a[i][j]);
printf("\n");
}
printf("\nB is : \n");
for(int i = 0; i < n; i++){
for(int j = 0; j < p; j ++)
printf("%d ", b[i][j]);
printf("\n");
}
printf("\nC is : \n");
for(int i = 0; i < m; i++){
for(int j = 0; j < p; j ++)
printf("%d ", c[i][j]);
printf("\n");
}
// Free and cleanup
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
9,446 | #include <iostream>
#include <math.h>
#define N 102
#define BLOCK_DIM 32
typedef struct{
int rows;
int cols;
int stride;
float* mat;
float* dev;
}matrix;
__global__ void matMult(matrix a, matrix b, matrix c){
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
float val = 0;
if(row < a.rows && col < b.cols){
for(int i = 0; i < a.cols; i++){
val += a.dev[row * a.cols + i] * b.dev[i * b.cols + col];
}
c.dev[row * b.cols + col] = val;
}
}
int main(void){
cudaError_t error;
matrix a;
matrix b;
matrix c;
a.rows = N;
a.cols = N;
b.rows = N;
b.cols = N;
a.mat = new float[a.rows * a.cols];
cudaMalloc(&a.dev, a.rows * a.cols * sizeof(float));
b.mat = new float[b.rows * b.cols];
cudaMalloc(&b.dev, b.rows * b.cols * sizeof(float));
c.mat = new float[a.rows * b.cols];
cudaMalloc(&c.dev, a.rows * b.cols * sizeof(float));
for(int i = 0; i < a.cols; i++){
for(int k = 0; k < a.rows; k++){
a.mat[k * a.cols + i] = 1;
}
for(int j = 0; j < b.cols; j++){
b.mat[i * b.cols + j] = 1;
}
}
cudaMemcpy(a.dev, a.mat, a.rows * a.cols * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(b.dev, b.mat, b.rows * b.cols * sizeof(float), cudaMemcpyHostToDevice);
dim3 block(BLOCK_DIM, BLOCK_DIM);
dim3 grid((b.cols + block.x - 1)/block.x, (a.rows + block.y - 1)/block.y);
matMult<<<grid, block>>>(a, b, c);
error = cudaMemcpy(c.mat, c.dev, a.rows * b.cols * sizeof(float), cudaMemcpyDeviceToHost);
printf("C device to host: %s\n", cudaGetErrorString(error));
printf("%f\n", c.mat[3]);
float maxError = 0;
for(int k = 0; k < a.rows; k++){
for(int j = 0; j < b.cols; j++){
maxError = fmax(maxError, fabs(c.mat[k * b.cols + j] - a.cols));
}
}
std::cout << "Error: " << maxError << std::endl;
cudaFree(a.dev);
cudaFree(b.dev);
cudaFree(c.dev);
delete [] a.mat;
delete [] b.mat;
delete [] c.mat;
return 0;
} |
9,447 | #include "includes.h"
__global__ void floatToDouble(float* input, double* output, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
output[i] = (double)input[i];
}
} |
9,448 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7) {
comp += (+1.4392E36f - var_1 / log10f(var_2 / (var_3 - (var_4 - (-1.9382E-36f / +1.1977E-37f)))));
if (comp > (-1.9936E-37f * -1.8166E-41f * (var_5 + acosf(expf(-1.5876E-35f))))) {
comp = var_6 / var_7;
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8);
cudaDeviceSynchronize();
return 0;
}
|
9,449 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void helloGPU(){//ּ 1.0 us ð
// ε ε,
//printf("GPU bx:%d tx:%d \n", blockIdx.x, threadIdx.x);
}
__host__ void hello_gpu(){
helloGPU <<<4, 2>>>();//<<<ϼ, max1024>>>
cudaThreadSynchronize();//ȣƮ ȭض(ٷ)
printf("cpu \n");
}
void communication(){
//host memory(DRAM) device memory (GDRAM)
const int SIZE = 5;
int a[SIZE] = {1,2,3,4,5};
int b[SIZE] = {0,};
int *a_d;//
cudaMalloc(&a_d, SIZE * sizeof(int));//cuda memory allocation
printf("a[0] = %d \n", a[0]);//1 µ Դϴ
//printf("a_d[0] = %d \n", a_d[0]);//host device ϴ
// Ȯϰ ʹٸ host ؼ Ȯؾ մϴ.
//cudaMemcpy(dst, src, size, ), ȭ˴ϴ
cudaMemcpy(a_d, a, SIZE * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(b, a_d, SIZE * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < SIZE; i++)
printf("%d \n", b[i]);
}
//CUDA:SIMD(Single Instruction Multi Data) ٸ Ѵ
//CPU:MIMD
__global__ void vector_add_kernel(int* dst, int*src0, int* src1, int n){
// blockIdx.x // 0 1
int tx = threadIdx.x;//0~999 0~999
//0~1999
// gridDim.x = 2,
// ȿ blockDim.x = 1000
//int idx = threadIdx.x + blockIdx.x * blockDim.x; //ϴ 1000
// dim3(3,3,1), tx=0,1,2 ty=0,1,2 > tinx = 0,1,2,...,8
// ty * 3( x ) + tx
int idx = (threadIdx.y * blockDim.x + threadIdx.x) + blockIdx.x * (blockDim.x * blockDim.y);
if (idx < n)
dst[idx] = src0[idx] + src1[idx];
}
void vector_add(){
const int SIZE = 5;
int a[SIZE] = {1,2,3,4,5};
int b[SIZE] = {10,20,30,40,50};
int c[SIZE] = {0,};
int *a_d, *b_d, *c_d;
cudaMalloc(&a_d, SIZE * sizeof(int));//cuda memory allocation
cudaMalloc(&b_d, SIZE * sizeof(int));
cudaMalloc(&c_d, SIZE * sizeof(int));
cudaMemcpy(a_d, a, SIZE * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, SIZE * sizeof(int), cudaMemcpyHostToDevice);
vector_add_kernel<<<1, SIZE>>>(c_d, a_d, b_d, SIZE);
cudaMemcpy(c, c_d, SIZE * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < SIZE; i++)
printf("%d \n", c[i]);
}
#include <memory>
void long_vector_add(){
const int SIZE = 2000 + 101;
int *a = (int*)malloc(SIZE * sizeof(int));
int *b = (int*)malloc(SIZE * sizeof(int));
int *c = (int*)malloc(SIZE * sizeof(int));
for (int i = 0; i < SIZE; i++)
{
a[i] = i; b[i] = i * 1; c[i] = 0;
}
int *a_d, *b_d, *c_d;
cudaMalloc(&a_d, SIZE * sizeof(int));//cuda memory allocation
cudaMalloc(&b_d, SIZE * sizeof(int));
cudaMalloc(&c_d, SIZE * sizeof(int));
cudaMemcpy(a_d, a, SIZE * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, SIZE * sizeof(int), cudaMemcpyHostToDevice);
int thread = 1000;
//int block = ceil(1.0 * SIZE / thread);
//int block = (SIZE + thread-1) / thread;
int block = (SIZE -1) / thread + 1;
vector_add_kernel<<<block, thread>>>(c_d, a_d, b_d, SIZE);
cudaMemcpy(c, c_d, SIZE * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < SIZE; i++)
printf("%d ", c[i]);
}
void matrix_add(){
// 2 迭(3x3) 10 ִ
int h = 3 * 3 * 2;
int w = 3 * 3 * 2;
int m = 10;
const int SIZE = m*h*w;
int *a = (int*)malloc(SIZE * sizeof(int));
int *b = (int*)malloc(SIZE * sizeof(int));
int *c = (int*)malloc(SIZE * sizeof(int));
for (int i = 0; i < SIZE; i++)
{
a[i] = i; b[i] = i * 1; c[i] = 0;
}
int *a_d, *b_d, *c_d;
cudaMalloc(&a_d, SIZE * sizeof(int));//cuda memory allocation
cudaMalloc(&b_d, SIZE * sizeof(int));
cudaMalloc(&c_d, SIZE * sizeof(int));
cudaMemcpy(a_d, a, SIZE * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, SIZE * sizeof(int), cudaMemcpyHostToDevice);
//dim3(x,y,z) ɿ . x*y*z <= 1024
vector_add_kernel<<<dim3(m,1,1), dim3(h,w,1)>>>(c_d, a_d, b_d, SIZE);
cudaMemcpy(c, c_d, SIZE * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < SIZE; i++)
printf("%d ", c[i]);
}
__global__ void sum_vertical(int* dst, int* src, int h, int w){
int tx = threadIdx.x;
int sum = 0;
/*
for (int y = 0; y < h; y++)
{
int idx = y * w + tx;
sum += src[idx];
}
*/
dst[tx] = src[tx] + src[w + tx];
}
void matrix_add_test(){
int h = 2, w = 4;
const int SIZE = h*w;
int *a = (int*)malloc(SIZE * sizeof(int));
int *b = (int*)malloc(w * sizeof(int));
for (int i = 0; i < SIZE; i++)
a[i] = i;
int *a_d, *b_d;
cudaMalloc(&a_d, SIZE * sizeof(int));
cudaMalloc(&b_d, SIZE * sizeof(int));
cudaMemcpy(a_d, a, SIZE * sizeof(int), cudaMemcpyHostToDevice);
sum_vertical<<<1, w>>> (b_d, a_d, h, w);// :
cudaMemcpy(b, b_d, w * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < w; i++)
printf("%d ", b[i]);
}
__global__ void mean_channel(int* dst, int*src,int w,int c){
int tx = threadIdx.x;//0,1,2,3
int sum = 0;
for (int i = 0; i < c; i++)
{
sum += src[tx * c + i];
}
dst[tx] = sum / c;
}
void rgb_mean(){
int w = 4, c = 3;
const int SIZE = w * c;
int *a = (int*)malloc(SIZE * sizeof(int));
int *b = (int*)malloc(w * sizeof(int));
for (int i = 0; i < SIZE; i++)
a[i] = i;
int *a_d, *b_d;
cudaMalloc(&a_d, SIZE * sizeof(int));
cudaMalloc(&b_d, SIZE * sizeof(int));
cudaMemcpy(a_d, a, SIZE * sizeof(int), cudaMemcpyHostToDevice);
mean_channel<<<1, w>>>(b_d, a_d, w, c);
cudaMemcpy(b, b_d, w * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < w; i++)
printf("%d ", b[i]);
}
typedef unsigned char uchar;
//__device__ __global__ ȣմϴ
__device__ uchar rgb_2_gray_pixel(uchar R,uchar G,uchar B){
return 0.2989 * R + 0.5870 * G + 0.1140 * B;
}
__global__ void rgb2gray_kernel(uchar* gray, uchar*rgb,int h,int w){
int y = blockIdx.x;
int x = threadIdx.x;
int idx = (y * w + x) * 3;//3(RGB 3channel)
uchar R = rgb[idx + 0]; uchar G = rgb[idx + 1]; uchar B = rgb[idx + 2];
gray[y * w + x] = rgb_2_gray_pixel(R,G,B);
}
void rgb_2_gray(){
int h = 4, w = 4, c = 3;
const int SIZE = h * w * c;
// ctrl + h : ٲٱ, alt + r: ϳ
uchar *a = (uchar*)malloc(SIZE * sizeof(uchar));
uchar *b = (uchar*)malloc(h * w * sizeof(uchar));
for (int i = 0; i < SIZE; i++) a[i] = (uchar)i;
uchar *a_d, *b_d;
cudaMalloc(&a_d, SIZE * sizeof(uchar));
cudaMalloc(&b_d, h * w * sizeof(uchar));
cudaMemcpy(a_d, a, SIZE * sizeof(uchar), cudaMemcpyHostToDevice);
rgb2gray_kernel<<<h, w>>>(b_d, a_d, h, w);
cudaMemcpy(b, b_d, h * w * sizeof(uchar), cudaMemcpyDeviceToHost);
for (int i = 0; i < h * w; i++)
printf("%d ", b[i]);
}
int main()//ڵ __host__ ٽϴ
{
rgb_2_gray();
//hello_gpu();
return 0;
}
|
9,450 | #include "includes.h"
__global__ void sphereTransform(float *data, const unsigned int N)
{
unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < N)
{
data[idx] = data[idx] * 360.0f - 180.0f;
data[idx + N] = acosf(2.0f * data[idx + N] - 1.0f);
}
} |
9,451 | #include "includes.h"
__global__ void set_identity_kernel( float *a, int m, int n )
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if( col < n && row < m)
{
a[row * n + col] = (row == col) ? 1.0f: 0.0f;
}
} |
9,452 | #ifndef __CUDACC__
#define __CUDACC__
#endif
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <device_functions.h>
#include <cuda_runtime_api.h>
#include <curand.h>
#include <curand_kernel.h>
#include <stdio.h>
#include <iomanip>
#include <iostream>
#define BLOCKSIZE 64
#define GA_POPSIZE 2048 // ga population size
#define GA_MAXITER 16384 // maximum iterations
#define GA_ELITRATE 0.10f // elitism rate
#define GA_MUTATIONRATE 0.25f // mutation rate
#define GA_MUTATION RAND_MAX * GA_MUTATIONRATE
#define GA_TARGET "Hello world!"
#define GA_TARGETLEN 12
struct ga_struct
{
char str[GA_TARGETLEN]; // the string
unsigned int fitness; // its fitness
};
cudaError_t gaCuda(struct ga_struct *population, int size);
__device__ int rand(unsigned int *seed, int m) {
unsigned int a = 32767;
unsigned int x = *seed;
x = (a * x) % m;
*seed = x;
return ((int)x);
}
__device__ void elitismKernel(struct ga_struct *population, struct ga_struct *buffer, const int esize) {
}
__global__ void initKernel(struct ga_struct *population, const int size) {
unsigned int id = blockIdx.x * BLOCKSIZE + threadIdx.x;
unsigned int seed = id + 1;
if (id < GA_POPSIZE) {
ga_struct citizen;
citizen.fitness = 0;
for (int j = 0; j < size; j++)
citizen.str[j] = (rand(&seed, 90) + 32);
population[id] = citizen;
}
}
__global__ void calcKernel(struct ga_struct *population, const int size) {
unsigned int id = blockIdx.x * BLOCKSIZE + threadIdx.x;
if (id < GA_POPSIZE) {
char *target = GA_TARGET;
unsigned int fitness = 0;
ga_struct pop = population[id];
for (int j = 0; j < size; j++)
fitness += abs(pop.str[j] - target[j]);
population[id].fitness = fitness;
}
}
__global__ void bestKernel(struct ga_struct *population, struct ga_struct *best) {
__shared__ struct ga_struct temp[BLOCKSIZE];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * BLOCKSIZE + threadIdx.x;
if (i < GA_POPSIZE) {
temp[tid] = population[i];
__syncthreads();
for (unsigned int s = 1; s < blockDim.x; s *= 2) {
int index = 2 * s * tid;
if (index < blockDim.x) {
if (temp[tid + s].fitness < temp[tid].fitness)
temp[tid] = temp[tid + s];
}
__syncthreads();
}
}
if (tid == 0) {
best[blockIdx.x] = temp[0];
}
}
__global__ void mateKernel(struct ga_struct *population, struct ga_struct *buffer) {
}
inline void print_best(struct ga_struct *gav) {
std::cout << "Best: " << " (" << gav[0].fitness << ")" << std::endl;
}
int main() {
srand(unsigned(time(NULL)));
struct ga_struct *population;
int size = sizeof(GA_TARGET) / sizeof(GA_TARGET[0]);
population = (struct ga_struct *) malloc(GA_POPSIZE * sizeof(struct ga_struct));
cudaError_t cudaStatus = gaCuda(population, size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "gaCuda failed!");
return 1;
}
std::cout << size << std::endl;
for (int i = 0; i < size; i++) {
std::cout << population[0].str[i];
}
std::cout << std::endl;
for (int i = 0; i < size; i++) {
std::cout << population[1].str[i];
}
std::cout << std::endl;
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
cudaError_t gaCuda(struct ga_struct *population, int size) {
struct ga_struct pop_alpha, pop_beta;
struct ga_struct *buffer;
dim3 dimBlock(BLOCKSIZE);
dim3 dimGrid(GA_POPSIZE/BLOCKSIZE);
struct ga_struct *dev_population = 0;
struct ga_struct *dev_buffer = 0;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_population, GA_POPSIZE * sizeof(struct ga_struct));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_buffer, GA_POPSIZE * sizeof(struct ga_struct));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
initKernel<<<dimGrid, dimBlock>>>(dev_population, size);
for (int i=0; i<GA_MAXITER; i++) {
calcKernel<<<dimGrid, dimBlock>>>(dev_population, size);
}
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "gaKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching Kernel!\n", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(population, dev_population, GA_POPSIZE * sizeof(struct ga_struct), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_population);
cudaFree(dev_buffer);
return cudaStatus;
}
|
9,453 | /*******************************
* Autor: Alejandro Delgado Martel
* Nombre: Proyecto Final Versión 2 Reducción memoria SHARED
*
*
*
*
* NOTA: mirar extern __shared__ variables
*
*
*******************************/
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <time.h>
#include <math.h>
#include <iostream>
using namespace std;
#define HISTO_ELEMENTS 1000
__global__ void inicializa_histograma(int num_elements, const int hist_elements, float* hist_locales){
int i = (blockIdx.x * blockDim.x + threadIdx.x);
if(i < num_elements){
if(threadIdx.x == 0){
for(int k=0;k<hist_elements;k++){
hist_locales[blockIdx.x*hist_elements+k] = 0.0;
}
}
}
}
__global__ void histograma(float* A, float* hist, int num_elements, const int hist_elements, int nBloques){
//Posicion del thread
int i = (blockIdx.x * blockDim.x + threadIdx.x);
if(i < num_elements){
//CALCULAMOS LOS HISTOGRAMAS APLICANDO LA SIGUIENTE FORMULA
int pos = (int)(fmod(A[i],(float)hist_elements));
atomicAdd(&(hist[(blockIdx.x * HISTO_ELEMENTS) + pos]), 1.0);
//ESPERAMOS A QUE LOS HILOS DEL MISMO BLOQUE TERMINEN
__syncthreads();
}
}
__global__ void reduccion(float* hist, float* hist_reducido, int num_elements, const int hist_elements, int num_bloques){
int i = (blockIdx.x * blockDim.x + threadIdx.x);
//UNA VEZ TENEMOS CREADOS LOS HISTOGRAMAS LOCALES A CADA BLOQUE EN UNA MATRIZ UNIDIMENSIONAL,
//EMPLEAREMOS EL MÉTODO DE LA SUMA POR REDUCCIÓN Y COPIAREMOS EL RESULTADO EN EL HISTOGRAMA
//FINAL
//NOS TENEMOS QUE ASEGURAR QUE EL NUMERO DE BLOQUES DEL GRID SEA POTENCIA DE DOS PARA QUE
//EL MÉTODO DE LA REDUCCIÓN FUNCIONE CORRECTAMENTE
//SUMA POR METODO REDUCCIÓN
if(i < (num_bloques*HISTO_ELEMENTS)/2){
//VAMOS REALIZANDO LA SUMA DE LA PRIMERA MITAD CON LA SEGUNDA Y SE
//ALMACENA EN LA PRIMERA
atomicAdd(&(hist[i]), hist[ (num_bloques/2) * HISTO_ELEMENTS + i ]);
}
//COPIAMOS EL VALOR DEL HISTOGRAMA (MATRIZ UNIDIMENSIONAL) AL HISTOGRAMA FINAL
if(i < HISTO_ELEMENTS){
hist_reducido[i] = hist[i];
__syncthreads();
}
}
void fError(cudaError_t err, int linea){
if(err != cudaSuccess){
printf("Ha ocurrido un error el la linea %d con codigo: %s\n", linea, cudaGetErrorString(err));
}
}
int main(){
//cudaSetDevice(0);
int num_elements = 1000000;
int hist_elements = HISTO_ELEMENTS;
int HilosPorBloque = 977;
int BloquesPorGrid = (num_elements + HilosPorBloque -1) / HilosPorBloque;
int nBloques = BloquesPorGrid;
//Reservar espacio en memoria HOST
float * h_A = (float*)malloc(num_elements * sizeof(float));
float * h_hist = (float*)malloc(hist_elements * sizeof(float));
float * h_Histo = (float*)malloc(BloquesPorGrid * hist_elements * sizeof(float));
float * h_Reducido = (float*)malloc(hist_elements * sizeof(float));
if(h_A == NULL || h_hist == NULL || h_Histo == NULL){
printf("Error al reservar memoria para los vectores HOST");
exit(1);
}
//Inicializar elementos de los vectores de forma hormogenea
for(int i=0; i<num_elements; i++){
h_A[i] = (float)i;
}
for(int i=0; i<hist_elements; i++){
h_Reducido[i] = 0.0;
}
cudaError_t err;
int size = num_elements * sizeof(float);
int size_hist = hist_elements * sizeof(float);
int size_Histo = BloquesPorGrid * hist_elements * sizeof(float);
float * d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
fError(err,__LINE__);
float * d_hist = NULL;
err = cudaMalloc((void**)&d_hist, size_hist);
fError(err,__LINE__);
//Array que almacena los histogramas locales
float * d_Histo = NULL;
err = cudaMalloc((void**)&d_Histo, size_Histo);
float* d_Reducido = NULL;
err = cudaMalloc((void**)&d_Reducido, size_hist);
//Copiamos a GPU DEVICE
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
fError(err,__LINE__);
err = cudaMemcpy(d_hist, h_hist, size_hist, cudaMemcpyHostToDevice);
fError(err,__LINE__);
err = cudaMemcpy(d_Reducido, h_Reducido, size_hist, cudaMemcpyHostToDevice);
fError(err, __LINE__);
printf("Numero de bloques: %d\n", BloquesPorGrid);
printf("Numero de hilos por bloque: %d\n", HilosPorBloque);
printf("Tamaño del histograma: %d\n", HISTO_ELEMENTS);
/**********************EJECUTANDO KERNELS***************************/
cudaError_t Err;
//Lanzamos el kernel y medimos tiempos
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
inicializa_histograma<<<BloquesPorGrid, HilosPorBloque>>>(num_elements, hist_elements, d_Histo);
histograma<<<BloquesPorGrid, HilosPorBloque>>>(d_A, d_Histo, num_elements, hist_elements, BloquesPorGrid);
//EJECUTAMOS EL KERNEL REDUCE TANTAS VECES COMO TAMAÑO DEL NÚMERO DE
//BLOQUES QUE TENGAMOS ES POR ELLO QUE EJECUTAMOS log2(Numero_de_bloques)
//Y POSTERIORMENTE VAMOS REDUCIENDO ESE NÚMERO A LA MITAD
for(int i=0; i<log2((double)nBloques); i++){
reduccion<<<BloquesPorGrid, HilosPorBloque>>>(d_Histo, d_Reducido, num_elements, hist_elements, BloquesPorGrid);
BloquesPorGrid /= 2;
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float tiempo_reserva_host;
cudaEventElapsedTime(&tiempo_reserva_host, start, stop);
Err = cudaGetLastError();
fError(Err,__LINE__);
printf("Tiempo de suma vectores DEVICE: %f\n", tiempo_reserva_host);
cudaEventDestroy(start);
cudaEventDestroy(stop);
/***********************************************************************/
//Copiamos a CPU el vector C
err = cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost);
fError(err,__LINE__);
err = cudaMemcpy(h_hist, d_hist, size_hist, cudaMemcpyDeviceToHost);
fError(err,__LINE__);
err = cudaMemcpy(h_Reducido, d_Reducido, size_hist, cudaMemcpyDeviceToHost);
fError(err, __LINE__);
err = cudaMemcpy(h_Histo, d_Histo, size_Histo, cudaMemcpyDeviceToHost);
fError(err, __LINE__);
/*******************COMPROBANDO RESULTADOS ****************************/
float suma = 0;
for(int j=0; j</*BloquesPorGrid * */HISTO_ELEMENTS; j++){
//printf("%f \n", h_Reducido[j]);
//printf("\n");
suma = suma + h_Reducido[j];
}
printf("La suma total es: %f\n", suma);
printf("Con un tamaño de %d\n", BloquesPorGrid*HISTO_ELEMENTS);
/***********************************************************************/
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_hist);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_Histo);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_Reducido);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_hist);
free(h_Histo);
free(h_Reducido);
}
|
9,454 | #include "includes.h"
__global__ void __veccmp(int *a, int *b, int *d) {
int xa = *a;
int xb = *b;
int xc = 0;
int xd = 0;
asm("vset4.s32.s32.ne" "%0, %1.b0000, %2, %3;": "=r" (xd) : "r" (xa), "r" (xb), "r" (xc));
*d++ = xd;
asm("vset4.s32.s32.ne" "%0, %1.b1111, %2, %3;": "=r" (xd) : "r" (xa), "r" (xb), "r" (xc));
*d++ = xd;
asm("vset4.s32.s32.ne" "%0, %1.b2222, %2, %3;": "=r" (xd) : "r" (xa), "r" (xb), "r" (xc));
*d++ = xd;
asm("vset4.s32.s32.ne" "%0, %1.b3333, %2, %3;": "=r" (xd) : "r" (xa), "r" (xb), "r" (xc));
*d = xd;
} |
9,455 | #include "includes.h"
unsigned char *pdata; // pointer to data content
__global__ void processData(unsigned char *Da, int* filter)
{
int tx = threadIdx.x; // thread的x軸id
int bx = blockIdx.x; // block的x軸id
int bn = blockDim.x;
int gid = bx * bn + tx;
__shared__ int sfilter[3][3];
__shared__ int sR[3][512]; // 每個block存上中下三行
__shared__ int sG[3][512];
__shared__ int sB[3][512];
__shared__ int sRsum[512]; // 每個block 最後512個sum
__shared__ int sGsum[512];
__shared__ int sBsum[512];
if (tx < 9) // 每個block 存filter 到 share memory
{
sfilter[tx / 3][tx % 3] = filter[tx];
}
__syncthreads();
if (bx == 0 || bx == 511 || tx == 0 || tx == 511)
{
// 邊界處理 --> 直接給原本值不動
sRsum[tx] = Da[gid * 3];
sGsum[tx] = Da[gid * 3 + 1];
sBsum[tx] = Da[gid * 3 + 2];
}
// 邊界處理(第1個block跟最後一個block不做)
if (bx != 0 && bx != 511)
{
// R, G, B個別將該Row(Block)運算會用到的上中下三行存入Share Memory
sR[0][tx] = Da[gid * 3 - 512 * 3];
sR[1][tx] = Da[gid * 3];
sR[2][tx] = Da[gid * 3 + 512 * 3];
sG[0][tx] = Da[gid * 3 - 512 * 3 + 1];
sG[1][tx] = Da[gid * 3 + 1];
sG[2][tx] = Da[gid * 3 + 512 * 3 + 1];
sB[0][tx] = Da[gid * 3 - 512 * 3 + 2];
sB[1][tx] = Da[gid * 3 + 2];
sB[2][tx] = Da[gid * 3 + 512 * 3 + 2];
__syncthreads();
// 邊界處理(每個block的的第一個值和最後一個值不做)
if (tx != 0 && tx != 511)
{
// R
sRsum[tx] = sR[0][tx - 1] * sfilter[0][0];
sRsum[tx] += sR[0][tx] * sfilter[0][1];
sRsum[tx] += sR[0][tx + 1] * sfilter[0][2];
sRsum[tx] += sR[1][tx - 1] * sfilter[1][0];
sRsum[tx] += sR[1][tx] * sfilter[1][1];
sRsum[tx] += sR[1][tx + 1] * sfilter[1][2];
sRsum[tx] += sR[2][tx - 1] * sfilter[2][0];
sRsum[tx] += sR[2][tx] * sfilter[2][1];
sRsum[tx] += sR[2][tx + 1] * sfilter[2][2];
// G
sGsum[tx] = sG[0][tx - 1] * sfilter[0][0];
sGsum[tx] += sG[0][tx] * sfilter[0][1];
sGsum[tx] += sG[0][tx + 1] * sfilter[0][2];
sGsum[tx] += sG[1][tx - 1] * sfilter[1][0];
sGsum[tx] += sG[1][tx] * sfilter[1][1];
sGsum[tx] += sG[1][tx + 1] * sfilter[1][2];
sGsum[tx] += sG[2][tx - 1] * sfilter[2][0];
sGsum[tx] += sG[2][tx] * sfilter[2][1];
sGsum[tx] += sG[2][tx + 1] * sfilter[2][2];
// B
sBsum[tx] = sB[0][tx - 1] * sfilter[0][0];
sBsum[tx] += sB[0][tx] * sfilter[0][1];
sBsum[tx] += sB[0][tx + 1] * sfilter[0][2];
sBsum[tx] += sB[1][tx - 1] * sfilter[1][0];
sBsum[tx] += sB[1][tx] * sfilter[1][1];
sBsum[tx] += sB[1][tx + 1] * sfilter[1][2];
sBsum[tx] += sB[2][tx - 1] * sfilter[2][0];
sBsum[tx] += sB[2][tx] * sfilter[2][1];
sBsum[tx] += sB[2][tx + 1] * sfilter[2][2];
sRsum[tx] /= filter[9];
sGsum[tx] /= filter[9];
sBsum[tx] /= filter[9];
// 大於255 或 小於0處理
if (sRsum[tx] > 255)
sRsum[tx] = 255;
else if (sRsum[tx] < 0)
sRsum[tx] = 0;
if (sGsum[tx] > 255)
sGsum[tx] = 255;
else if (sGsum[tx] < 0)
sGsum[tx] = 0;
if (sBsum[tx] > 255)
sBsum[tx] = 255;
else if (sBsum[tx] < 0)
sBsum[tx] = 0;
}
}
__syncthreads();
// 將R, G, B三個陣列值合併寫回一維陣列,以利輸出到檔案
Da[gid * 3] = sRsum[tx];
Da[gid * 3 + 1] = sGsum[tx];
Da[gid * 3 + 2] = sBsum[tx];
} |
9,456 | #include "includes.h"
__global__ void cal_pi(float *sum, int nbin, float step, int nthreads, int nblocks) {
int i;
float x;
int idx = blockIdx.x * blockDim.x + threadIdx.x; // Sequential thread index across the blocks
for (i = idx; i < nbin; i += nthreads * nblocks) {
x = (i + 0.5) * step;
sum[idx] += 4.0 / (1.0 + x * x);
}
} |
9,457 | //No headers
//ncvv automatically imports the required
__global__ void process_kernel1(float *input1,float *input2,float *output,int datasize){
int blockNum = blockIdx.z * (gridDim.x *gridDim.y) + blockIdx.y * gridDim.x + blockIdx.x;
int threadNum = threadIdx.z * (blockDim.x*blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x;
int id = blockNum * (blockDim.x * blockDim.y * blockDim.z) + threadNum;
int n = datasize/sizeof(input1[0]);
if(id<n)output[id] = sin(input1[id]) + cos(input2[id]);
}
__global__ void process_kernel2(float *input,float *output,int datasize){
int blockNum = blockIdx.z * (gridDim.x *gridDim.y) + blockIdx.y * gridDim.x + blockIdx.x;
int threadNum = threadIdx.z * (blockDim.x*blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x;
int id = blockNum * (blockDim.x * blockDim.y * blockDim.z) + threadNum;
int n = datasize/sizeof(input[0]);
if(id<n)output[id] = log(input[id]);
}
__global__ void process_kernel3(float *input,float *output,int datasize){
int blockNum = blockIdx.z * (gridDim.x *gridDim.y) + blockIdx.y * gridDim.x + blockIdx.x;
int threadNum = threadIdx.z * (blockDim.x*blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x;
int id = blockNum * (blockDim.x * blockDim.y * blockDim.z) + threadNum;
int n = datasize/sizeof(input[0]);
if(id<n)output[id] = sqrt(input[id]);
}
|
9,458 | #include "common/deviceInfo.cuh"
#include "task/algorithm/vectorAdd.cuh"
#include "task/algorithm/vectorSum.cuh"
#include "task/algorithm/matrixTranspose.cuh"
#include "task/memory/manualMemory.cuh"
#include "task/memory/pinnedMemory.cuh"
#include "task/memory/zeroCopyMemory.cuh"
#include "task/memory/unifiedMemory.cuh"
#include "task/memory/misalignedRead.cuh"
#include "task/sharedMemory/sharedMemoryVectorSum.cuh"
#include "task/stream/multiKernelConcurrent.cuh"
#include "task/stream/syncStreamWithEvent.cuh"
#include "task/stream/vectorAddMultiStream.cuh"
#include "task/stream/graphConcurrent.cuh"
#include "task/instruction/floatPrecision.cuh"
#include "task/algorithm/matrixMultiplication.cuh"
#include <cstdio>
int main() {
size_t m = 1 << 10;
matrixMultiplication(m, m + 1, m + 2, 32);
} |
9,459 | #pragma once
#include "cuda_Common_Include.cu"
static __device__ unsigned int xorShift128(stateRNG_xorShift128* state){
unsigned int t;
t = state->x ^ (state->x << 11);
state->x = state->y; state->y = state->z; state->z = state->w;
return state->w = state->w ^ (state->w >> 19) ^ (t ^ (t >> 8));
} |
9,460 | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: LI Yinqiao (li.yin.qiao.2012@hotmail.com) 2018-06-14
*/
#include "FlushToMem.cuh"
#include "../../XUtility.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/*
flush a list of XTensor to GPU memory
>> mList - list of the tensors
>> devID - target GPU id
>> GPUMem - memory pool for the GPU
*/
void CudaCPUToGPUFlush(TensorList * mList, int devID, XMem * GPUMem)
{
if (mList == NULL || mList->count == 0)
return;
#ifdef USE_CUDA
int size = 0, p = 0;
int reqiredSize = 0;
/* compute the requried memory size */
for (int i = 0; i < mList->count; i++) {
XTensor * m = (XTensor*)mList->GetItem(i);
CheckNTErrors((m->devID < 0), "Cannot do gpu-flush on matrices that are already on GPUs.");
if (m->isSparse)
reqiredSize = sizeof(int) + (sizeof(int) + m->unitSize) * m->unitNumNonZero;
else
reqiredSize = m->unitSize * m->unitNum;
size += reqiredSize;
}
char * data = new char[size];
char * GPUData = GPUMem != NULL ? (char*)GPUMem->Alloc(GPUMem->devID, size):
(char*)XMemAlloc(devID, size);
int pSize = 0;
/* place the data in a memory block */
for (int i = 0; i < mList->count; i++) {
XTensor * m = (XTensor*)mList->GetItem(i);
if (m->isSparse)
pSize = sizeof(int) + (sizeof(int) + m->unitSize) * m->unitNumNonZero;
else
pSize = m->unitSize * m->unitNum;
reqiredSize = pSize;
memcpy(data + p, m->data, pSize);
if (m->dataHost != NULL)
delete[](char*)m->dataHost;
if(m->mem == NULL)
delete[] (char*)m->data;
else
m->mem->Release(m->data, m->GetDataSizeInChar(), m->signature);
m->dataHost = NULL;
m->data = GPUData + p;
m->devID = GPUMem != NULL ? GPUMem->devID : devID;
m->mem = GPUMem;
p += reqiredSize;
}
/* copy from CPU memory to GPU memory */
cudaMemcpy(GPUData, data, size, cudaMemcpyHostToDevice);
delete[] data;
#endif
}
/* copy the data from GPU memory to CPU memory (memory pool) */
void CudaGPUToCPUFlush(XTensor * tensor, int devID, XMem * CPUMem)
{
#ifdef USE_CUDA
CheckNTErrors((tensor->devID >= 0), "Cannot do cpu-flush on matrices that are already on CPU.");
/* compute the requried memory size */
int size = 0;
if (tensor->isSparse)
size = sizeof(int) + (sizeof(int) + tensor->unitSize) * tensor->unitNumNonZero;
else
size = tensor->unitSize * tensor->unitNum;
char * CPUData = CPUMem != NULL ? (char*)CPUMem->Alloc(CPUMem->devID, size):
(char*)XMemAlloc(devID, size);
/* copy from CPU memory to GPU memory */
cudaMemcpy(CPUData, tensor->data, size, cudaMemcpyDeviceToHost);
if (tensor->dataHost != NULL)
delete[](char*)tensor->dataHost;
tensor->dataHost = NULL;
tensor->mem->Release(tensor->data, tensor->GetDataSizeInChar(), tensor->signature);
tensor->data = CPUData;
tensor->devID = CPUMem != NULL ? CPUMem->devID : devID;
tensor->mem = CPUMem;
#endif
}
/* copy the data from GPU memory to CPU memory ((dataHost)) and do not delete the data */
void CudaGPUToCPUFlush(XTensor * tensor)
{
if (tensor->dataHost != NULL)
delete[](char*)tensor->dataHost;
if (tensor->isSparse) {
int num = int(tensor->unitNum * tensor->denseRatio + 1);
cudaMemcpy(&num, tensor->data, sizeof(int), cudaMemcpyDeviceToHost);
int tupleSize = sizeof(int) + tensor->unitSize;
int size = sizeof(int) + tupleSize*(num);
CheckNTErrors((size >= 0), "Illegal data size in the sparse matrix!");
tensor->dataHost = new char[size];
cudaMemcpy(tensor->dataHost, tensor->data, size, cudaMemcpyDeviceToHost);
}
else {
tensor->dataHost = new char[tensor->unitNum * tensor->unitSize];
if (tensor->data != NULL)
XMemCopy(tensor->dataHost, -1, tensor->data, tensor->devID, tensor->unitNum * tensor->unitSize);
else
memset(tensor->dataHost, 0, tensor->unitNum * tensor->unitSize);
}
}
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor) |
9,461 | #include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void hello_kernel()
{
printf("Hello World from Thread %d", threadIdx.x);
}
int main(int argc, char *argv[])
{
dim3 blocksPerGrid(1, 1, 1);
dim3 threadsPerBlock(10, 1, 1);
hello_kernel<<<blocksPerGrid, threadsPerBlock>>>();
return 0;
} |
9,462 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define MAX_DIM 32
#define SCALING_FACTOR 100.0
#define TILE_DIM 32
#define NUM_THREADS 1024
int * def_mat_dim(int k)
{
int * dim = (int *) malloc(k * sizeof(int));
int i;
srand(time(NULL));
for (i = 0; i < k; i++)
{
dim[i] = (rand() % MAX_DIM) + 1;
printf("%d\n", dim[i]);
}
return dim;
}
double * creat_mat(int dimX, int dimY)
{
int x;
double * mat = (double *) malloc(dimX * dimY * sizeof(double));
srand(time(NULL));
for (x = 0; x < dimX * dimY; x++) {
mat[x] = float(rand()) / float(RAND_MAX) * SCALING_FACTOR;
//printf("%f\n", mat[x]);
}
return mat;
}
__device__
void matmult(/* parameters */) {
}
__global__
void multi_matmult(int num_dim, int * dim_list, double ** mat_list)
{
}
__global__ void MatMul(double* A, double* B, double* C, int ARows, int ACols, int BRows,
int BCols, int CRows, int CCols)
{
float CValue = 0;
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ double As[TILE_DIM][TILE_DIM];
__shared__ double Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + ACols - 1)/TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < ACols && Row < ARows)
As[threadIdx.y][threadIdx.x] = A[Row*ACols + k*TILE_DIM + threadIdx.x];
else
As[threadIdx.y][threadIdx.x] = 0.0;
if (k*TILE_DIM + threadIdx.y < BRows && Col < BCols)
Bs[threadIdx.y][threadIdx.x] = B[(k*TILE_DIM + threadIdx.y)*BCols + Col];
else
Bs[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n)
CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x];
__syncthreads();
}
if (Row < CRows && Col < CCols)
C[((blockIdx.y * blockDim.y + threadIdx.y)*CCols) +
(blockIdx.x * blockDim.x)+ threadIdx.x] = CValue;
}
int main()
{
int num_dim = 3;
int num_mat = num_dim - 1;
int * mat_dim = def_mat_dim(num_dim);
double ** mat_list = (double **) malloc((num_mat) * sizeof(double *));
// printf("Copying matrix dimensions to device\n");
int * d_mat_dim;
cudaMalloc((void **)&d_mat_dim, num_dim * sizeof(int));
cudaMemcpy(d_mat_dim, mat_dim, num_dim * sizeof(int), cudaMemcpyHostToDevice);
// printf("Creating Matrix from on host\n");
int k;
for (k = 0; k < num_mat; k++) {
//printf("================= MATRIX %d ====================\n", k);
printf("%d %d\n", mat_dim[k], mat_dim[k+1]);
mat_list[k] = creat_mat(mat_dim[k], mat_dim[k+1]);
}
// printf("Allocating space to store output matrix\n");
double * out_mat = (double *) malloc(mat_dim[0] * mat_dim[num_dim-1] * sizeof(double));
double * d_out_mat;
cudaMalloc((void **) &d_out_mat, mat_dim[0] * mat_dim[num_dim-1] * sizeof(double));
// printf("Allocating space for each matrix, and storing pointer address of matrices on the host\n");
double ** int_mat_list = (double **) malloc(num_mat * sizeof(double *));
for (k = 0; k < num_mat; k++) {
cudaMalloc((void **)&int_mat_list[k], mat_dim[k] * mat_dim[k+1] * sizeof(double));
cudaMemcpy(int_mat_list[k], mat_list[k], mat_dim[k] * mat_dim[k+1] * sizeof(double), cudaMemcpyHostToDevice);
}
// printf("Copying pointer addresses of matrices from host to device\n");
double ** d_mat_list;
cudaMalloc(&d_mat_list, num_mat * sizeof(double *));
cudaMemcpy(d_mat_list, int_mat_list, num_mat * sizeof(double *), cudaMemcpyHostToDevice);
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
/*
for (k = 0; k < num_dim-1; k++) {
printf("%d %d %d %d\n", k, mat_dim[k], mat_dim[k+1], &d_mat_list[k]);
cudaMalloc((void **)&d_mat_list[k], mat_dim[k] * mat_dim[k+1] * sizeof(double));
//cudaMemcpy(d_mat_list[k], mat_list[k], mat_dim[k] * mat_dim[k+1] * sizeof(double), cudaMemcpyHostToDevice);
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
}
printf("After d_mat_list\n");
*/
// printf("At the kernel call\n");
printf("%d %d %d\n", mat_dim[0], mat_dim[1], mat_dim[2]);
MatMul<<<2, NUM_THREADS>>>(int_mat_list[0], int_mat_list[1], d_out_mat, mat_dim[0], mat_dim[1], mat_dim[1], mat_dim[2], mat_dim[0], mat_dim[2]);
cudaThreadSynchronize();
error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
return 0;
}
|
9,463 | #include "includes.h"
__global__ void kAssignScalar(float* dest, float alpha, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = alpha;
}
} |
9,464 | // Checks that cuda compilation does the right thing when passed -march.
// (Specifically, we want to pass it to host compilation, but not to device
// compilation or ptxas!)
//
// REQUIRES: clang-driver
// REQUIRES: x86-registered-target
// REQUIRES: nvptx-registered-target
// RUN: %clang -### -target x86_64-linux-gnu -c -march=haswell %s 2>&1 | FileCheck %s
// RUN: %clang -### -target x86_64-linux-gnu -c -march=haswell --cuda-gpu-arch=sm_20 %s 2>&1 | \
// RUN: FileCheck %s
// CHECK: bin{{/|\\+}}clang
// CHECK: "-cc1"
// CHECK-SAME: "-triple" "nvptx
// CHECK-SAME: "-target-cpu" "sm_20"
// CHECK: ptxas
// CHECK-SAME: "--gpu-name" "sm_20"
// CHECK: bin{{/|\\+}}clang
// CHECK-SAME: "-cc1"
// CHECK-SAME: "-target-cpu" "haswell"
|
9,465 |
#include <vector>
#include <cassert>
#include <iostream>
__device__ int dummy;
__global__ void dummy_kernel()
{
dummy = 0;
}
void check(cudaError_t status)
{
assert(status == cudaSuccess);
}
void benchmark(int streamCount, int kernelsPerStream)
{
std::vector<cudaStream_t> streams;
std::vector<cudaEvent_t> beginEvents;
std::vector<cudaEvent_t> endEvents;
for(int i = 0; i < streamCount; ++i)
{
streams.push_back(nullptr);
check(cudaStreamCreateWithFlags(&streams.back(), cudaStreamNonBlocking));
}
for(int i = 0; i < streamCount; ++i)
{
endEvents.push_back(nullptr);
check(cudaEventCreate(&endEvents.back()));
beginEvents.push_back(nullptr);
check(cudaEventCreate(&beginEvents.back()));
}
for(int i = 0; i < streamCount; ++i)
{
check(cudaEventRecord(beginEvents[i], streams[i]));
}
for(int i = 0; i < kernelsPerStream; ++i)
{
for(int j = 0; j < streamCount; ++j)
{
dummy_kernel<<<1, 1, 0, streams[j]>>>();
}
}
for(int i = 0; i < streamCount; ++i)
{
check(cudaEventRecord(endEvents[i], streams[i]));
}
check(cudaDeviceSynchronize());
double duration = 0.0;
for(int i = 0; i < streamCount; ++i)
{
float ms = 0.0f;
check(cudaEventElapsedTime(&ms, beginEvents[i], endEvents[i]));
duration = std::max(static_cast<double>(ms), duration);
}
std::cout << "Overhead per kernel is " << (duration * 1e3) / (streamCount * kernelsPerStream) << "us\n";
for(auto event : beginEvents)
{
check(cudaEventDestroy(event));
}
for(auto event : endEvents)
{
check(cudaEventDestroy(event));
}
for(auto stream : streams)
{
check(cudaStreamDestroy(stream));
}
}
int main()
{
benchmark(1, 100);
benchmark(1, 50*100000);
benchmark(50, 100000);
return 0;
}
|
9,466 | #include "includes.h"
__global__ void IMOMask_kernel(float *d_IMOMask, float *d_IMO, const float *d_disparity, float offset, int n_cols, int n_rows) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < n_cols) & (y < n_rows)) // are we in the image?
{
unsigned int ind = x + y * n_cols;
if (!(bool)(d_IMOMask[ind])) {
d_IMO[ind] = nanf("");
}
}
} |
9,467 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,int var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18) {
if (comp <= var_1 + (+1.8492E-43f + +1.0630E35f - (var_2 * (var_3 * var_4)))) {
float tmp_1 = fmodf((var_6 * (-1.5153E-42f * var_7 - (-1.6779E-37f - (var_8 * var_9)))), (-1.2966E-36f - -1.7316E-44f - (+1.0173E-35f * (var_10 - var_11 - var_12))));
comp = tmp_1 - (+1.4222E-35f - (+1.4708E-2f / -1.1461E10f * var_13));
float tmp_2 = var_14 - -1.8882E-41f / +1.9457E34f * var_15 - var_16;
comp += tmp_2 + (var_17 + +1.1120E-4f);
for (int i=0; i < var_5; ++i) {
comp += -1.9710E-17f + log10f(fabsf(+1.4002E-41f));
comp += var_18 * (-1.3840E-37f + (-1.1320E-36f - floorf(-1.7171E-42f)));
comp = (-1.2470E27f - +1.7430E34f);
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
int tmp_6 = atoi(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19);
cudaDeviceSynchronize();
return 0;
}
|
9,468 | // CMPE297-6 HW2
// CUDA version Rabin-Karp
/* *
HW2 by Jiongfeng Chen
Parallelize the sequential version of Rabin-Karp String matching algorithm
on GPU: Searching for multiple patterns in the input sequence
Test Case:
input string:"Hello, 297 Class!"
pattern 1:"alxxl";
pattern 2:"llo";
pattern 3:", 297";
pattern 4:"97 Cl";
Output:
Kernel Execution Time: 5336 cycles
Total cycles: 5336
Kernel Execution Time: 5336 cycles
Searching for multiple patterns in the input sequence
Input string: Hello, 297 Class!
Pattern: "llo" was found.
Pattern: ", 297" was found.
Pattern: "97 Cl" was found.
run:nvcc -I/usr/local/cuda/include -I. -lineinfo -arch=sm_53 --ptxas-options=-v -g -c cmpe297_hw2_rabin_karp_multiPattern.cu -o cmpe297_hw2_rabin_karp_multiPattern.o
*/
#include<stdio.h>
#include<iostream>
#include <cuda_runtime.h>
/*ADD CODE HERE: Implement the parallel version of the sequential Rabin-Karp*/
__device__ int
memcpy(char* input, int index, char* pattern, int pattern_length)
{
for(int i = 0; i< pattern_length; i++)
if(pattern[i] != input[index+i])
return 1;
return 0;
}
__global__ void
findIfExistsCu(char* input, int input_length, char* pattern, int* patternLength, int* patHashes, int* result, int *runtime)
{
int start_time = clock64();
int tid = threadIdx.x;
int inputHash = 0;
int searchSpaceIndex[4];
int patternIndex[4];
for(int i = 0; i< 4; i++)
result[i]=0;
for(int i = 0; i< 4; i++)
searchSpaceIndex[i]=0;
for (int i = 0; i < 4; i++)
if(i==0)
searchSpaceIndex[i] = input_length - patternLength[i] +1 ;
else
searchSpaceIndex[i] = searchSpaceIndex[i-1] + input_length - patternLength[i] +1 ;
//printf("C----tid-=%d--------%d-%d-%d-%d\n", tid,searchSpaceIndex[0],searchSpaceIndex[1] ,searchSpaceIndex[2] ,searchSpaceIndex[3] );
for(int i = 0; i < 4; i++)
if(i == 0)
patternIndex[i] = 0;
else
patternIndex[i] = patternIndex[i-1] + patternLength[i-1];
int searchStart;
if(tid>=0 && tid <searchSpaceIndex[0])
{
searchStart =tid-searchSpaceIndex[0] +patternIndex[0];
for(int i = searchStart; i< searchStart +patternLength[0]; i++)
inputHash = (inputHash*256 + input[i]) % 997;
if(inputHash == patHashes[0] && 1)
result[0]=1;
}
if(tid>=searchSpaceIndex[0] && tid <searchSpaceIndex[1])
{
searchStart =tid-searchSpaceIndex[1] +patternIndex[1];
for(int i = searchStart; i< searchStart +patternLength[1]; i++)
inputHash = (inputHash*256 + input[i]) % 997;
if(inputHash == patHashes[1] && 1)
result[1]=1;
}
if(tid>=searchSpaceIndex[1] && tid <searchSpaceIndex[2])
{
searchStart =tid-searchSpaceIndex[2] +patternIndex[2];
for(int i = searchStart; i< searchStart +patternLength[2]; i++)
inputHash = (inputHash*256 + input[i]) % 997;
if(inputHash == patHashes[2] && memcpy(&(input[searchStart]),0, pattern+patternIndex[2], patternLength[2])==0)
result[2]=1;
}
if(tid>=searchSpaceIndex[2] && tid <searchSpaceIndex[3])
{
searchStart =tid-searchSpaceIndex[3] +patternIndex[3];
for(int i = searchStart; i< searchStart +patternLength[3]; i++)
inputHash = (inputHash*256 + input[i]) % 997;
if(inputHash == patHashes[3] && memcpy(&(input[searchStart]),0, pattern+patternIndex[3], patternLength[3])==0)
result[3]=1;
}
int stop_time = clock64();
runtime[tid] = (int)(stop_time - start_time);
}
int main()
{
// host variables
//char input[] = "HEABAL"; /*Sample Input*/
char input[] = "Hello, 297 Class!"; /*Multiple Patter Version: Input string*/
char pattern[] = "AB"; /*Sample Pattern*/
int patHash = 0; /*hash for the pattern*/
int* result; /*Result array*/
int* runtime; /*Exection cycles*/
int input_length = 17; /*Input Length*/
/*ADD CODE HERE*/;
int pattern_number = 4; /*Multiple Patter Version: Pattern Number*/
//int pattern_length = 2; /*Pattern Length*/
int match_times = 0; /*Match Times*/
cudaError_t err = cudaSuccess;
int * patternsLength;
int patHashes[4]; /*hash for the pattern*/
int patternFound[4]; /*Multiple Patter Version: Result Array*/
char* patterns[4]; /*Multiple Patter Version: Pattern String*/
patterns[0] ="alxxl";
patterns[1] ="llo";
patterns[2] =", 297";
patterns[3] ="97 Cl";
// device variables
char* d_input;
char* d_pattern;
int* d_result;
int* d_runtime;
int* d_patHashes;
int* d_patternsLength;
char* d_patterns;
//convert the string arrary to 1D string and pass to cuda fucntion
//1D string
char patternLong[100] = "";
int* patternIndex;
int p_size = 4*sizeof(int);
patternsLength = (int *) malloc(p_size);
memset(patternsLength, 0, p_size);
for(int i = 0; i < pattern_number; i++)
patternsLength[i] = strlen(patterns[i]);
patternIndex = (int *) malloc(pattern_number*sizeof(int));
for(int i = 0; i < pattern_number; i++)
if(i == 0)
patternIndex[i] = 0;
else
patternIndex[i] = patternIndex[i-1] + patternsLength[i-1];
for(int i = 0; i < pattern_number; i++)
strcat(patternLong, patterns[i]);
printf("Pattern: \"%s\" ...\n", patternLong);
printf("Pattern: \"%s\"\n", patterns[0]);
for(int i = 0; i < pattern_number; i++)
printf("Pattern: \"%s\", lenth: %d.\n", patterns[i], patternsLength[i]);
int totalPatternLength=0;
for(int i = 0; i < pattern_number; i++)
totalPatternLength += patternsLength[i];
// measure the execution time by using clock() api in the kernel as we did in Lab3
/*ADD CODE HERE*/;
match_times=0;
for (int i = 0; i < pattern_number; i++)
match_times += strlen(input) - patternsLength[i] +1 ;
int runtime_size = match_times*sizeof(int);
cudaMalloc((void**)&d_runtime, runtime_size);
runtime = (int *) malloc(runtime_size);
memset(runtime, 0, runtime_size);
result = (int *) malloc((match_times)*sizeof(int));
/*Calculate the hash of the pattern*/
for (int i = 0; i < pattern_number; i++)
{
int tmp=patternIndex[i];
patHashes[i] =0;
for (int j = 0; j < patternsLength[i]; j++)
{
//if(i==3) printf("xxx %c \n", patternLong[tmp+j]);
patHashes[i] = (patHashes[i] * 256 + patternLong[tmp+j]) % 997;
}
printf("patHash %d \n", patHashes[i]);
}
/*ADD CODE HERE: Allocate memory on the GPU and copy or set the appropriate values from the HOST*/
int size = input_length*sizeof(char);
err = cudaMalloc((void**)&d_input, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device d_input(error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Copy input string from the host memory to the CUDA device\n");
err = cudaMemcpy(d_input, input, size, cudaMemcpyHostToDevice);
size = totalPatternLength*sizeof(char);
err = cudaMalloc((void**)&d_pattern, size);
printf("Copy pattern string from the host memory to the CUDA device\n");
err = cudaMemcpy(d_pattern, patternLong, size, cudaMemcpyHostToDevice);
size = pattern_number*sizeof(int);
err = cudaMalloc((void**)&d_patHashes, size);
printf("Copy Hashes from the host memory to the CUDA device\n");
err = cudaMemcpy(d_patHashes, patHashes, size, cudaMemcpyHostToDevice);
size = pattern_number*sizeof(int);
err = cudaMalloc((void**)&d_patternsLength, size);
printf("Copy patternsLength from the host memory to the CUDA device\n");
err = cudaMemcpy(d_patternsLength, patternsLength, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device d_input(error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
size = pattern_number*sizeof(int);
err = cudaMalloc((void**)&d_result, size);
/*ADD CODE HERE: Launch the kernel and pass the arguments*/
int blocksPerGrid = 1;// FILL HERE
int threadsPerBlock = match_times;// FILL HERE
//printf("C--------=%d--------%s\n",match_times, "");
findIfExistsCu<<<blocksPerGrid, threadsPerBlock>>>(d_input, input_length, d_pattern, d_patternsLength, d_patHashes, d_result,d_runtime);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaThreadSynchronize();
/*ADD CODE HERE: COPY the result and print the result as in the HW description*/
// Copy the device result device memory to the host result matrix
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(result, d_result, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy result from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaThreadSynchronize();
/*ADD CODE HERE: Copy the execution times from the GPU memory to HOST Code*/
cudaMemcpy(runtime, d_runtime, runtime_size, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
/*RUN TIME calculation*//*ADD CODE HERE:*/
unsigned long long elapsed_time = 0;
for(int i = 0; i < match_times; i++)
if(elapsed_time < runtime[i])
elapsed_time = runtime[i];
//Print
printf("Kernel Execution Time: %llu cycles\n", elapsed_time);
printf("Total cycles: %d \n", elapsed_time);
printf("Kernel Execution Time: %d cycles\n", elapsed_time);
printf("Searching for multiple patterns in the input sequence\n");
printf("Input string: %s\n", input);
//Print Result[];
for(int i = 0; i < pattern_number; i++)
if(result[i]==1)
printf("Pattern: \"%s\" was found.\n", patterns[i]);
// Free device memory
cudaFree(d_input);
cudaFree(d_pattern);
cudaFree(d_result);
cudaFree(d_runtime);
cudaFree(d_patHashes);
cudaFree(d_patternsLength);
cudaFree(d_patterns);
// Free host memory
free(result);
free(runtime);
return 0;
}
|
9,469 | #include "tensor_gpu.cuh"
#include <cuda_runtime.h>
tensor_gpu::tensor_gpu(const int p_size) : _size(p_size)
{
_data = init_data(_size);
}
tensor_gpu::tensor_gpu(const tensor_gpu& p_copy)
{
_size = p_copy._size;
_data = init_data(_size);
cudaMemcpy(_data, p_copy._data, sizeof(float) * p_copy._size, cudaMemcpyDeviceToDevice);
}
tensor_gpu& tensor_gpu::operator=(const tensor_gpu& p_copy)
{
if (_size != p_copy._size)
{
cudaFree(_data);
_data = init_data(_size);
}
cudaMemcpy(_data, p_copy._data, sizeof(float) * p_copy._size, cudaMemcpyDeviceToDevice);
_size = p_copy._size;
return *this;
}
tensor_gpu::~tensor_gpu()
{
_size = 0;
cudaFree(_data);
}
void tensor_gpu::to_gpu(float* p_cpu_data) const
{
cudaError_t error = cudaMemcpy(_data, p_cpu_data, sizeof(float) * _size, cudaMemcpyHostToDevice);
}
void tensor_gpu::to_cpu(float* p_cpu_data) const
{
cudaError_t error = cudaMemcpy(p_cpu_data, _data, sizeof(float) * _size, cudaMemcpyDeviceToHost);
}
float* tensor_gpu::init_data(int& p_size)
{
float* result;
cudaError_t error = cudaMalloc(reinterpret_cast<void**>(&result), sizeof(float) * p_size);
return result;
} |
9,470 | #include "includes.h"
__global__ void matrixMul(int *a, int *b, int *c, int ROW, int COLUMNS, int temp)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if( col < COLUMNS && row < ROW)
{
for(int i = 0; i < temp; i++)
{
sum += a[row * temp + i] * b[i * COLUMNS + col];
}
c[row * COLUMNS + col] = sum;
}
} |
9,471 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <assert.h>
#include <cuda.h>
#ifdef __NVCC__
#include <cuda_profiler_api.h>
#endif
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
//TODO: Remove this hack added for DPC++
#ifdef DPCT_COMPATIBILITY_TEMP
#undef DPCT_COMPATIBILITY_TEMP
#endif
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = cudaDeviceSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
}
extern "C" {
void GetDeviceCount(int *count) {
CudaSafeCall(cudaGetDeviceCount(count));
}
void GetDevice(int *device) {
CudaSafeCall(cudaGetDevice(device));
}
void SetDevice(int device) {
CudaSafeCall(cudaSetDevice(device));
}
void ProfilerStart() {
#ifdef __NVCC__
CudaSafeCall(cudaProfilerStart());
#endif
}
void ProfilerStop() {
#ifdef __NVCC__
CudaSafeCall(cudaProfilerStop());
#endif
}
void DeviceSynchronize() {
CudaCheckError();
CudaSafeCall(cudaDeviceSynchronize());
}
void Malloc(void** devPtr, size_t size) {
CudaSafeCall(cudaMalloc(devPtr, size));
}
void MallocPtr(void*** devPtr, size_t size) {
CudaSafeCall(cudaMalloc(devPtr, size));
}
void MallocPtrPtr(void**** devPtr, size_t size) {
CudaSafeCall(cudaMalloc(devPtr, size));
}
void MallocPitch(void** devPtr, size_t* pitch, size_t width, size_t height) {
CudaSafeCall(cudaMallocPitch(devPtr, pitch, width, height));
}
void Memcpy(void* dst, void* src, size_t count, int kind) {
switch (kind) {
case 0:
CudaSafeCall(cudaMemcpy(dst, src, count, cudaMemcpyHostToDevice));
break;
case 1:
CudaSafeCall(cudaMemcpy(dst, src, count, cudaMemcpyDeviceToHost));
break;
default:
printf("Fatal: Wrong Memcpy kind!\n");
exit(1);
}
}
void Memcpy2D(void* dst, size_t dpitch, void* src, size_t spitch, size_t width, size_t height, int kind) {
switch (kind) {
case 0:
CudaSafeCall(cudaMemcpy2D(dst, dpitch, src, spitch, width, height, cudaMemcpyHostToDevice));
break;
case 1:
CudaSafeCall(cudaMemcpy2D(dst, dpitch, src, spitch, width, height, cudaMemcpyDeviceToHost));
break;
default:
printf("Fatal: Wrong Memcpy kind!\n");
exit(1);
}
}
void Free(void* devPtr) {
CudaSafeCall(cudaFree(devPtr));
}
}
|
9,472 |
#include <stdio.h>
#include <stdlib.h>
#define N 256*128
#define THREADS_PER_BLOCK 128
#define N_BLOCKS N/THREADS_PER_BLOCK
// Kernel for dot product
__global__ void dot( int *a, int *b, int *c ) {
__shared__ int prod[THREADS_PER_BLOCK]; // Shared memory
int index = blockIdx.x * blockDim.x + threadIdx.x;
prod[threadIdx.x] = a[index] * b[index];
__syncthreads(); // Threads synchronization
if( threadIdx.x == 0) {
int par_sum = 0;
for(int i=0; i<THREADS_PER_BLOCK; i++)
par_sum += prod[threadIdx.x]; // Threads reduction
atomicAdd(c,par_sum); // Blocks reduction
}
}
// Main program
int main(void){
int *a,*b,*c; // Host copies
int *a_dev,*b_dev,*c_dev; // Device copies
int size = N*sizeof(int); // Size of N integer
// Allocate host memory
a = (int *) malloc (size);
b = (int *) malloc (size);
c = (int *) malloc (sizeof(int));
// Allocate device memory
cudaMalloc( (void**)&a_dev, size);
cudaMalloc( (void**)&b_dev, size);
cudaMalloc( (void**)&c_dev, sizeof(int));
// Initialize
for (int i=0; i<N; i++){
a[i] = 1;
b[i] = 1;
}
*c = 0;
// Copy inputs to device
cudaMemcpy( a_dev, a, size , cudaMemcpyHostToDevice );
cudaMemcpy( b_dev, b, size , cudaMemcpyHostToDevice );
cudaMemcpy( c_dev, c, sizeof(int), cudaMemcpyHostToDevice );
// Launch kernel on device
dot <<< N_BLOCKS , THREADS_PER_BLOCK >>> (a_dev, b_dev, c_dev);
// Copy device result back to host
cudaMemcpy( c, c_dev, sizeof(int), cudaMemcpyDeviceToHost );
// Print result
printf("%d\n",*c);
// Free device memory
cudaFree(a_dev);
cudaFree(b_dev);
cudaFree(c_dev);
// Free host memory
free(a);
free(b);
free(c);
return 0;
}
|
9,473 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <cmath>
#include <string>
#include <cstdint>
#include <iostream>
#include <fstream>
#include <chrono>
#include <atomic>
__global__ void knapKernel(
const uint16_t* maxWeight, const uint16_t* arraySize,
const uint16_t* valueD, const uint16_t* weightD,
unsigned long long int* victorId, uint16_t* victorValue,
unsigned long long int* offset, int* memSize, unsigned long long int runLength)
{
unsigned long long int threadId = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int id = threadId + *offset;
unsigned int sackWeight = 0;
if (id <= runLength) {
if (*offset == 0) {
victorValue[threadId] = 0;
victorId[threadId] = 0;
}
//check validity
unsigned long long int trueIdLoc = id;
for (int a1 = 0; trueIdLoc; a1++) {
if (trueIdLoc & 1) {
sackWeight += (weightD[a1]);
}
trueIdLoc >>= 1;
}
//check against memory
unsigned int sackValue = 0;
if (sackWeight <= *maxWeight) {
trueIdLoc = id;
for (int a1 = 0; trueIdLoc; a1++) {
if (trueIdLoc & 1) {
sackValue += (valueD[a1]);
}
trueIdLoc >>= 1;
}
if (sackValue > victorValue[threadId]) {
victorValue[threadId] = sackValue;
victorId[threadId] = id;
}
}
}
}
int main()
{
uint16_t arraySize = 0;
uint16_t maxWeight = 0;
uint16_t* weightH = nullptr;
uint16_t* valueH = nullptr;
try {
std::ifstream file("items.txt", std::ifstream::in);
std::string line;
for (int a1 = 0; std::getline(file, line); a1++)
{
switch(a1)
{
case 0:
maxWeight = (std::stoi(line));
break;
case 1:
arraySize = std::stoi(line);
arraySize = static_cast<int>(arraySize);
weightH = (uint16_t*)malloc(sizeof(uint16_t) * arraySize);
valueH = (uint16_t*)malloc(sizeof(uint16_t) * arraySize);
break;
default:
for (int a2 = 0; a2 < line.size(); a2++) {
if (line[a2] == ' ') {
std::size_t pos = line.find(" ");
weightH[a1 - 2] = std::stoi(line.substr(0, pos));
valueH[a1 - 2] = std::stoi(line.substr(pos));
}
}
}
}
file.close();
}
catch (...) { //TODO: this is bad parctice so refactor
std::cout << "Error reading file" << std::endl;
return 0;
}
std::cout << arraySize << "\n" << maxWeight << std::endl;
for (int a1 = 0; a1 < arraySize; a1++) {
std::cout << "("<< weightH[a1] << " " << valueH[a1] << ")" << std::endl;
}
size_t size = arraySize * sizeof(uint16_t);
cudaSetDevice(0);
uint16_t* weightD;
cudaMalloc(&weightD, size);
uint16_t* valueD;
cudaMalloc(&valueD, size);
uint16_t* maxWeightD;
cudaMalloc(&maxWeightD, sizeof(uint16_t));
uint16_t* arraySizeD;
cudaMalloc(&arraySizeD, sizeof(uint16_t));
unsigned long long int* offsetD;
cudaMalloc(&offsetD, sizeof(unsigned long long int));
int* memSizeD;
cudaMalloc(&memSizeD, sizeof(int));
cudaMemcpy(weightD, weightH, size, cudaMemcpyHostToDevice);
cudaMemcpy(valueD, valueH, size, cudaMemcpyHostToDevice);
cudaMemcpy(maxWeightD, &maxWeight, sizeof(uint16_t), cudaMemcpyHostToDevice);
cudaMemcpy(arraySizeD, &arraySize, sizeof(uint16_t), cudaMemcpyHostToDevice);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0); // 0-th device
//determin block size
int blockSize = deviceProp.maxThreadsPerMultiProcessor / deviceProp.maxBlocksPerMultiProcessor;
unsigned long long int runLength = pow(2.0, arraySize);
/// This will launch a grid that can maximally fill the GPU, on the default stream with kernel arguments
int numBlocksPerSm = 0;
// Number of threads my_kernel will be launched with
int numThreads = 128;
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocksPerSm, knapKernel, numThreads, 0);
//determin grid size
int memSize = numBlocksPerSm * deviceProp.multiProcessorCount * numThreads;
uint16_t* victorValueD;
cudaMalloc(&victorValueD, sizeof(uint16_t) * memSize);
unsigned long long int* victorIdD;
cudaMalloc(&victorIdD, sizeof(unsigned long long int) * memSize);
//copy number of threads
//copy memory size
cudaMemcpy(memSizeD, &memSize, sizeof(int), cudaMemcpyHostToDevice);
dim3 dimBlock(numThreads, 1, 1);
dim3 dimGrid(deviceProp.multiProcessorCount * numBlocksPerSm, 1, 1);
int incriment = deviceProp.multiProcessorCount * numBlocksPerSm * numThreads;
for (unsigned long long int offset = 0; offset < runLength; offset += incriment) {
cudaDeviceSynchronize();
cudaMemcpy(offsetD, &offset, sizeof(unsigned long long int), cudaMemcpyHostToDevice);
knapKernel <<<dimGrid, dimBlock >>> (maxWeightD, arraySizeD, valueD, weightD, victorIdD, victorValueD, offsetD, memSizeD, runLength);
}
//create host memory
unsigned long long int *victorIdH = (unsigned long long int*)malloc (sizeof(unsigned long long int) * memSize);
uint16_t *victorValueH = (uint16_t*)malloc(sizeof(uint16_t) * memSize);
//initialize to zero
for (int a1 = 0; a1 < memSize; a1++) {
victorIdH[a1] = 0;
victorValueH[a1] = 0;
}
cudaMemcpy(victorIdH, victorIdD, sizeof(unsigned long long int) * memSize, cudaMemcpyDeviceToHost);
cudaMemcpy(victorValueH, victorValueD, sizeof(uint16_t) * memSize, cudaMemcpyDeviceToHost);
//for debugging what is in memory
/*for (int a1 = 0; a1 < memSize; a1++) {
if (victorIdH[a1] != 0) {
std::cout << a1 << ": " << victorIdH[a1] << ", " << victorValueH[a1] << std::endl;
}
}*/
uint16_t highValue = 0;
unsigned long long int highId = 0;
for (int a1 = 0; a1 < memSize; a1++) {
if (victorValueH[a1] > highValue) {
highValue = victorValueH[a1];
highId = victorIdH[a1];
}
}
std::cout << "Sets to Search:" << runLength << std::endl;
std::cout << "Mem Size:" << memSize << std::endl;
std::cout << "Value:" << highValue << " ID:" << highId << std::endl;
for (int a1 = 0; highId; a1 ++) {
if (highId & 1)
printf("%d ", a1 + 1);
highId >>= 1;
}
//free memory
cudaFree(weightD);
cudaFree(valueD);
cudaFree(maxWeightD);
cudaFree(arraySizeD);
cudaFree(offsetD);
free(victorIdH);
free(victorValueH);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
//code from an example tutorial
cudaError_t cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
//end code from an example tutorial
return 0;
}
|
9,474 | #include "includes.h"
__global__ void FlushKernel(void)
{
} |
9,475 | #pragma once
namespace cuFIXNUM {
/*
* Calculate the modular inverse.
* TODO: Only supports moduli of the form 2^k at the moment.
*/
template< typename fixnum >
struct modinv {
/*
* Return x = 1/b (mod 2^k). Must have 0 < k <= BITS.
*
* Source: MCA Algorithm 1.10.
*
* TODO: Calculate this using the multiple inversion trick (MCA 2.5.1)
*/
__device__ void operator()(fixnum &x, fixnum b, int k) const {
typedef typename fixnum::digit digit;
// b must be odd
digit b0 = fixnum::get(b, 0);
assert(k > 0 && k <= fixnum::BITS);
digit binv;
digit::modinv_2exp(binv, b0);
x = fixnum::zero();
fixnum::set(x, binv, 0);
if (k <= digit::BITS) {
digit::rem_2exp(x, x, k);
return;
}
// Hensel lift x from (mod 2^WORD_BITS) to (mod 2^k)
// FIXME: Double-check this condition on k!
while (k >>= 1) {
fixnum t;
// TODO: Make multiplications faster by using the "middle
// product" (see MCA 1.4.5 and 3.3.2).
fixnum::mul_lo(t, b, x);
fixnum::sub(t, fixnum::one(), t);
fixnum::mul_lo(t, t, x);
fixnum::add(x, x, t);
}
}
};
} // End namespace cuFIXNUM
|
9,476 | //== inline __device__ uint32_t random(size_t seed)
//== {
//== uint32_t h = 5381;
//==
//== return (h << (seed % 15)) + h;
//== }
//==
//== __global__ void randomize_on_gpu_kernel
//== (
//== double* ptr__,
//== size_t size__
//== )
//== {
//== int i = blockIdx.x * blockDim.x + threadIdx.x;
//== if (i < size__) ptr__[i] = double(random(i)) / (1 << 31);
//== }
//==
//== extern "C" void randomize_on_gpu(double* ptr, size_t size)
//== {
//== dim3 grid_t(64);
//== dim3 grid_b(num_blocks(size, grid_t.x));
//==
//== randomize_on_gpu_kernel <<<grid_b, grid_t>>>
//== (
//== ptr,
//== size
//== );
//== }
|
9,477 | #include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#define BLOCK_SIZE 32
__global__ void l1_t_kernel(float *__restrict__ c, const float * __restrict__ a, const float * __restrict__ b,
const int M, const int N, const int D)
{
int block_row = blockIdx.y, block_col = blockIdx.x;
int row = threadIdx.y, col = threadIdx.x;
float c_value = 0;
#pragma unroll
for (int m = 0; m < (D + BLOCK_SIZE - 1) / BLOCK_SIZE; m++)
{
__shared__ float a_block[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float b_block[BLOCK_SIZE][BLOCK_SIZE];
if (m * BLOCK_SIZE + col < D && block_row * BLOCK_SIZE + row < M)
{
a_block[row][col] = a[(m * BLOCK_SIZE + col) + (block_row * BLOCK_SIZE + row) * D];
}
else
{
a_block[row][col] = 0.0;
}
if (block_col * BLOCK_SIZE + col < N && m * BLOCK_SIZE + row < D)
{
b_block[row][col] = b[(block_col * BLOCK_SIZE + col) * D + (m * BLOCK_SIZE + row)];
}
else
{
b_block[row][col] = 0.0;
}
__syncthreads();
#pragma unroll
for (int e = 0; e < BLOCK_SIZE; e++)
{
c_value += fabsf(a_block[row][e] - b_block[e][col]);
}
__syncthreads();
}
if (block_col * BLOCK_SIZE + col < N && block_row * BLOCK_SIZE + row < M)
{
c[(block_col * BLOCK_SIZE + col) + (block_row * BLOCK_SIZE + row) * N] = c_value;
}
}
void cuda_l1_t(float *c, const float *a, const float *b, const int M, const int N, const int D)
{
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((N + BLOCK_SIZE - 1) / BLOCK_SIZE, (M + BLOCK_SIZE - 1) / BLOCK_SIZE);
l1_t_kernel<<<dimGrid, dimBlock>>>(c, a, b, M, N, D);
cudaDeviceSynchronize();
}
|
9,478 | #include <limits.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#define cuda_check(ret) _cuda_check((ret), __FILE__, __LINE__)
inline void _cuda_check(cudaError_t ret, const char *file, int line) {
if (ret != cudaSuccess) {
fprintf(stderr, "CudaErr: %s (%s:%d)\n", cudaGetErrorString(ret), file, line);
exit(1);
}
}
__constant__ int width_d;
__constant__ int height_d;
__constant__ int order_d;
__global__ void gaussian_calc_kernel(unsigned char *image_mat, unsigned char *result_mat, float *kernel)
{
float val = 0;
int tx = threadIdx.x;
int ty = threadIdx.y;
int j = blockIdx.x * blockDim.x + tx; //col
int i = blockIdx.y * blockDim.y + ty; //row
if (i >= height_d || j >= width_d) {
return;
}
int center = (order_d - 1) / 2;
for (int x = 0; x < order_d; x++) {
for (int y = 0; y < order_d; y++) {
// Min accounts for right and bottom edges
// Max accounts for left and top edges
int mat_x = max(0, min(i + x - center, height_d - 1));
int mat_y = max(0, min(j + y - center, width_d - 1));
val += image_mat[mat_x * width_d + mat_y] * kernel[x * order_d + y];
}
}
result_mat[i * width_d + j] = (unsigned char) val;
}
void gaussian_calc(unsigned char *image_mat, unsigned char *result_mat, float *kernel, int width, int height, int order)
{
/* Allocate device memory for all matrices */
float *kernel_d;
unsigned char *image_mat_d, *result_mat_d;
cuda_check(cudaMalloc(&kernel_d, order * order * sizeof(float)));
cuda_check(cudaMalloc(&image_mat_d, width * height * sizeof(unsigned char)));
cuda_check(cudaMalloc(&result_mat_d, width * height * sizeof(unsigned char)));
/* Copy kernel and image_mat to device */
cuda_check(cudaMemcpy(kernel_d, kernel, order * order * sizeof(float), cudaMemcpyHostToDevice));
cuda_check(cudaMemcpy(image_mat_d, image_mat, width * height * sizeof(unsigned char), cudaMemcpyHostToDevice));
cuda_check(cudaMemcpyToSymbol(width_d, &width, sizeof(int)));
cuda_check(cudaMemcpyToSymbol(height_d, &height, sizeof(int)));
cuda_check(cudaMemcpyToSymbol(order_d, &order, sizeof(int)));
/* Invoke kernel function */
dim3 block_dim(779, 1);
dim3 grid_dim(DIV_ROUND_UP(width, block_dim.x), DIV_ROUND_UP(height, block_dim.y));
gaussian_calc_kernel<<<grid_dim, block_dim>>>(image_mat_d, result_mat_d, kernel_d);
/* Copy result back to host */
cuda_check(cudaMemcpy(result_mat, result_mat_d, width * height * sizeof(unsigned char), cudaMemcpyDeviceToHost));
/* Free device memory */
cuda_check(cudaFree(kernel_d));
cuda_check(cudaFree(image_mat_d));
cuda_check(cudaFree(result_mat_d));
}
void write_gaussian(char *filename, unsigned char *picture, int width, int height)
{
FILE *fp;
/* Open file */
fp = fopen(filename, "wb");
if (!fp) {
fprintf(stderr, "Error: cannot open file %s", filename);
exit(1);
}
/* Put structural information */
fprintf(fp, "P5\n%d %d\n255\n", width, height);
/* Output grayscale pixels */
fwrite(picture, sizeof(unsigned char), width * height, fp);
// free(pixels);
fclose(fp);
}
int main(int argc, char *argv[])
{
float sigma, order;
char *output_filename;
int width, height;
FILE *input_file;
/* Command line arguments */
if (argc < 4) {
fprintf(stderr, "Usage: %s <input_pgm> <output_pgm> <sigma>\n",
argv[0]);
exit(1);
}
input_file = fopen(argv[1], "rb");
if (!input_file) {
fprintf(stderr, "Error: cannot open file %s", argv[1]);
exit(1);
}
output_filename = argv[2];
if (fscanf(input_file, "%*[^\n]\n") != 0) {
exit(1);
}
if (fscanf(input_file, "%d %d\n", &width, &height) != 2) {
exit(1);
}
if (fscanf(input_file, "%*[^\n]\n") != 0) {
exit(1);
}
sigma = atof(argv[3]);
if (sigma <= 0) {
fprintf(stderr, "Error: invalid sigma value");
exit(1);
}
order = ceil(6 * sigma);
if ((int)order % 2 == 0) {
order++;
}
if (order > width || order > height) {
fprintf(stderr, "Error: sigma value too big for image size");
exit(1);
}
float *kernel;
unsigned char *image_mat, *result_mat;
kernel = (float*)aligned_alloc(64, (int) order * (int) order * sizeof(float));
image_mat = (unsigned char*)aligned_alloc(64, width * height * sizeof(unsigned char));
result_mat = (unsigned char*)aligned_alloc(64, width * height * sizeof(unsigned char));
if (fread(image_mat, sizeof(unsigned char), height * width, input_file) != (size_t)(height * width)) {
exit(1);
}
fclose(input_file);
float sum = 0;
for (int i = 0; i < order; i++) {
for (int j = 0; j < order; j++) {
kernel[i * (int) order + j] = exp(-(pow(i - floor(order/2), 2) + pow(j - floor(order/2), 2))/(2 * sigma * sigma));
sum += kernel[i * (int) order + j];
}
}
for (int i = 0; i < order; i++) {
for (int j = 0; j < order; j++) {
kernel[i * (int) order + j] /= sum;
}
}
gaussian_calc(image_mat, result_mat, kernel, width, height,(int) order);
// /* Save output image */
write_gaussian(output_filename, result_mat, width, height);
free(kernel);
free(image_mat);
free(result_mat);
return 0;
} |
9,479 | #include <stdio.h>
#include <assert.h>
#define N 1000000
static void HandleError( cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString( err ), file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
static void HandleKernelError(const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString( err ), file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_KERNEL_ERROR (HandleKernelError(__FILE__, __LINE__))
__global__ void vecadd(int *a, int *b, int *c){
int idx=blockIdx.x*blockDim.x+threadIdx.x;
if (idx<N) c[idx]=a[idx]+b[idx];
}
int main (int argc, char **argv){
int a_host[N], b_host[N], c_host[N];
int *a_device, *b_device, *c_device;
int i;
int blocksize=256;
dim3 dimBlock(blocksize);
dim3 dimGrid(ceil(N/(float)blocksize));
for (i=0;i<N;i++) a_host[i]=i;
for (i=0;i<N;i++) b_host[i]=i;
HANDLE_ERROR(cudaMalloc((void**)&a_device,N*sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&b_device,N*sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&c_device,N*sizeof(int)));
HANDLE_ERROR(cudaMemcpy(a_device,a_host,N*sizeof(int),cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(b_device,b_host,N*sizeof(int),cudaMemcpyHostToDevice));
vecadd<<<dimGrid,dimBlock>>>(a_device,b_device,c_device);
HANDLE_KERNEL_ERROR;
HANDLE_ERROR(cudaMemcpy(c_host,c_device,N*2*sizeof(int),cudaMemcpyDeviceToHost));
for (i=0;i<N;i++) assert (c_host[i] == a_host[i] + b_host[i]);
HANDLE_ERROR(cudaFree(a_device));
HANDLE_ERROR(cudaFree(b_device));
HANDLE_ERROR(cudaFree(c_device));
return 0;
}
|
9,480 | #include <stdio.h>
#include<chrono>
int main() {
auto t0 = std::chrono::steady_clock::now();
cudaEvent_t a;
cudaEventCreate(&a);
auto t1 = std::chrono::steady_clock::now();
std::chrono::duration<double> diff = t1 - t0;
printf("init takes %fs\n", diff.count());
}
|
9,481 | #include <sys/time.h>
#include <stdio.h>
#include <signal.h>
#include "gpu_boolean.cu"
struct timeval start, end;
FILE* data;
unsigned long max_iterations(int index);
void starttime() {
gettimeofday( &start, 0);
}
double endtime() {
gettimeofday( &end, 0);
double elapsed = ( end.tv_sec - start.tv_sec) * 1000.0 + (end.tv_usec - start.tv_usec) / 1000.0;
return elapsed;
}
// function handler for SIGNALS to avoid loss of data
void handle_sigint(int sig) {
if (sig == SIGTERM || sig == SIGINT) {
if (data) fclose(data);
}
signal (sig, SIG_DFL);
raise (sig);
}
void test() {
data = fopen("./data.csv", "w");
//Save data to file
fputs("Operation,Number of iterations,Number of cores,Runtime\n", data);
//Output data to console
printf("Operation\tNumber of iterations\tNumber of cores\tRuntime\n");
//Run through all expressions
for (int i = 0; i <= 59; i++) {
int numThreads = 1024;
int maxNumberOfCores;
if (i == 16) i = 22;
unsigned long maxIterations = max_iterations(i);
for (int j = 4; j > 0; j--) {
unsigned long numCores = maxIterations/(numThreads*j);
if (numCores == 0) numCores = 1;
if (numCores >= (((unsigned long)1 << 31) - 1))
numCores = (((unsigned long)1 << 31) - 1);
//Save data to file
fprintf(data, "%d,%lu,%lu,", i, maxIterations, numCores);
//Output data to console
printf("%d\t%lu\t%lu\t", i, maxIterations, numCores);
starttime();
//Cuda code
char* gpuResult;
char result = 1;
cudaMalloc(&gpuResult, sizeof(char));
cudaMemcpy(gpuResult, &result, sizeof(char), cudaMemcpyHostToDevice);
gpu_boolean_matcher<<<numCores, numThreads>>>(gpuResult, i);
cudaMemcpy(&result, gpuResult, sizeof(char), cudaMemcpyDeviceToHost);
cudaFree(&gpuResult);
double runtime = endtime();
//Save data to file
fprintf(data, "%f ms\n", runtime);
//Output data to console
printf("%f ms\n", runtime);
fflush(stdout);
fflush(data);
//Print total_runtime with cores
}
}
fclose(data);
}
unsigned long max_iterations(int index) {
switch (index) {
// Expression #1, Amount of variables: 2
case 1:
return (((unsigned long) 1) << 2);
// Expression #2, Amount of variables: 2
case 2:
return (((unsigned long) 1) << 2);
// Expression #3, Amount of variables: 2
case 3:
return (((unsigned long) 1) << 2);
// Expression #4, Amount of variables: 3
case 4:
return (((unsigned long) 1) << 3);
// Expression #5, Amount of variables: 4
case 5:
return (((unsigned long) 1) << 4);
// Expression #6, Amount of variables: 5
case 6:
return (((unsigned long) 1) << 5);
// Expression #7, Amount of variables: 20
case 7:
return (((unsigned long) 1) << 20);
// Expression #8, Amount of variables: 21
case 8:
return (((unsigned long) 1) << 21);
// Expression #9, Amount of variables: 25
case 9:
return (((unsigned long) 1) << 25);
// Expression #10, Amount of variables: 21
case 10:
return (((unsigned long) 1) << 21);
// Expression #11, Amount of variables: 30
case 11:
return (((unsigned long) 1) << 30);
// Expression #12, Amount of variables: 21
case 12:
return (((unsigned long) 1) << 21);
// Expression #13, Amount of variables: 39
case 13:
return (((unsigned long) 1) << 39);
// Expression #14, Amount of variables: 45
case 14:
return (((unsigned long) 1) << 45);
// Expression #15, Amount of variables: 50
case 15:
return (((unsigned long) 1) << 50);
// Expression #16, Amount of variables: 60
case 16:
return (((unsigned long) 1) << 60);
// Expression #17, Amount of variables: 61
case 17:
return (((unsigned long) 1) << 61);
// Expression #18, Amount of variables: 60
case 18:
return (((unsigned long) 1) << 60);
// Expression #19, Amount of variables: 62
case 19:
return (((unsigned long) 1) << 62);
// Expression #20, Amount of variables: 60
case 20:
return (((unsigned long) 1) << 60);
// Expression #21, Amount of variables: 63
case 21:
return (((unsigned long) 1) << 63);
// Expression #22, Amount of variables: 2
case 22:
return (((unsigned long) 1) << 2);
// Expression #23, Amount of variables: 4
case 23:
return (((unsigned long) 1) << 4);
// Expression #24, Amount of variables: 5
case 24:
return (((unsigned long) 1) << 5);
// Expression #25, Amount of variables: 8
case 25:
return (((unsigned long) 1) << 8);
// Expression #26, Amount of variables: 9
case 26:
return (((unsigned long) 1) << 9);
// Expression #27, Amount of variables: 12
case 27:
return (((unsigned long) 1) << 12);
// Expression #28, Amount of variables: 10
case 28:
return (((unsigned long) 1) << 10);
// Expression #29, Amount of variables: 12
case 29:
return (((unsigned long) 1) << 12);
// Expression #30, Amount of variables: 16
case 30:
return (((unsigned long) 1) << 16);
// Expression #31, Amount of variables: 18
case 31:
return (((unsigned long) 1) << 18);
// Expression #32, Amount of variables: 18
case 32:
return (((unsigned long) 1) << 18);
// Expression #33, Amount of variables: 21
case 33:
return (((unsigned long) 1) << 21);
// Expression #34, Amount of variables: 24
case 34:
return (((unsigned long) 1) << 24);
// Expression #35, Amount of variables: 26
case 35:
return (((unsigned long) 1) << 26);
// Expression #36, Amount of variables: 28
case 36:
return (((unsigned long) 1) << 28);
// Expression #37, Amount of variables: 29
case 37:
return (((unsigned long) 1) << 29);
// Expression #38, Amount of variables: 27
case 38:
return (((unsigned long) 1) << 27);
// Expression #39, Amount of variables: 29
case 39:
return (((unsigned long) 1) << 29);
// Expression #40, Amount of variables: 31
case 40:
return (((unsigned long) 1) << 31);
// Expression #41, Amount of variables: 34
case 41:
return (((unsigned long) 1) << 34);
// Expression #42, Amount of variables: 35
case 42:
return (((unsigned long) 1) << 35);
// Expression #43, Amount of variables: 36
case 43:
return (((unsigned long) 1) << 36);
// Expression #44, Amount of variables: 40
case 44:
return (((unsigned long) 1) << 40);
// Expression #45, Amount of variables: 39
case 45:
return (((unsigned long) 1) << 39);
// Expression #46, Amount of variables: 42
case 46:
return (((unsigned long) 1) << 42);
// Expression #47, Amount of variables: 43
case 47:
return (((unsigned long) 1) << 43);
// Expression #48, Amount of variables: 43
case 48:
return (((unsigned long) 1) << 43);
// Expression #49, Amount of variables: 41
case 49:
return (((unsigned long) 1) << 41);
// Expression #50, Amount of variables: 40
case 50:
return (((unsigned long) 1) << 40);
// Expression #51, Amount of variables: 51
case 51:
return (((unsigned long) 1) << 51);
// Expression #52, Amount of variables: 52
case 52:
return (((unsigned long) 1) << 52);
// Expression #53, Amount of variables: 54
case 53:
return (((unsigned long) 1) << 54);
// Expression #54, Amount of variables: 47
case 54:
return (((unsigned long) 1) << 47);
// Expression #55, Amount of variables: 55
case 55:
return (((unsigned long) 1) << 55);
// Expression #56, Amount of variables: 60
case 56:
return (((unsigned long) 1) << 60);
// Expression #57, Amount of variables: 57
case 57:
return (((unsigned long) 1) << 57);
// Expression #58, Amount of variables: 58
case 58:
return (((unsigned long) 1) << 58);
// Expression #59, Amount of variables: 61
case 59:
return (((unsigned long) 1) << 61);
}
return 0; // Should not happen
}
int main() {
signal(SIGINT, handle_sigint);
test();
return 0;
}
|
9,482 | // nvcc evert-cli.cu -o evert-cli -use_fast_math && ./evert-cli
#include <stdint.h>
#include <stdio.h>
#define M_TAU 6.283185
// PIXEL SHADER data!
#define EV_NFRAMES 3
// MESH shader data! @theta is the AZIMUTHAL parameter; @v is the POLAR parameter!
#define EV_EPSILON 0.001f
#define EV_NSTRIPS 8
#define EV_THETA_MIN (0)
#define EV_PHI_MIN (0 + EV_EPSILON)
#define EV_THETA_MAX ((8./EV_NSTRIPS)*M_TAU) // 8
#define EV_PHI_MAX ((2./2) *M_PI) // 2
#define EV_THETA_NVERTS (1* 8*(EV_THETA_MAX-EV_THETA_MIN)/M_TAU*EV_NSTRIPS)
#define EV_PHI_NVERTS (1*12*(EV_PHI_MAX -EV_PHI_MIN) /M_PI *2)
#define EV_NLIGHTS 7
#define EV_RGB_FRONT 0xff6666
#define EV_RGB_BACK 0x1188ff
// STAGE times!
#define EV_CORRUGATE_TDEL 1.f
#define EV_PUSH_TDEL 2.f
#define EV_TWIST_TDEL 6.f
#define EV_UNPUSH_TDEL 2.f
#define EV_UNCORRUGATE_TDEL 1.f
#define EV_CORRUGATE_TINI (0.f)
#define EV_PUSH_TINI (EV_CORRUGATE_TINI+EV_CORRUGATE_TDEL)
#define EV_TWIST_TINI (EV_PUSH_TINI +EV_PUSH_TDEL)
#define EV_UNPUSH_TINI (EV_TWIST_TINI +EV_TWIST_TDEL)
#define EV_UNCORRUGATE_TINI (EV_UNPUSH_TINI +EV_UNPUSH_TDEL)
#define EV_TMIN (EV_CORRUGATE_TINI)
#define EV_TMAX (EV_CORRUGATE_TINI + EV_CORRUGATE_TDEL+EV_PUSH_TDEL+EV_TWIST_TDEL+EV_UNPUSH_TDEL+EV_UNCORRUGATE_TDEL)
typedef float f32;
typedef uint32_t u32;
// ----------------------------------------------------------------------------------------------------------------------------#
struct vec3{ // Just a simple 3D vector!
union{ // Access the `vec3` using array notation of by specifying the name of a component!
f32 data[3];
struct{ f32 x0, x1, x2; };
};
__device__ __host__ vec3(){}
__device__ __host__ vec3(f32 a0, f32 a1, f32 a2){ this->x0=a0; this->x1=a1; this->x2=a2; }
__device__ __host__ f32 operator[](int idx){ return this->data[idx]; }
};
__device__ __host__ vec3 operator*(f32 s, vec3 v){ return vec3(s*v[0], s*v[1], s*v[2]); }
__device__ __host__ vec3 operator+(vec3 v0, vec3 v1){ return vec3(v0[0]+v1[0], v0[1]+v1[1], v0[2]+v1[2]); }
__device__ __host__ vec3 operator-(vec3 v0, vec3 v1){ return vec3(v0[0]-v1[0], v0[1]-v1[1], v0[2]-v1[2]); }
__device__ __host__ vec3 operator*(vec3 v0, vec3 v1){ return vec3(v0[0]*v1[0], v0[1]*v1[1], v0[2]*v1[2]); }
// ----------------------------------------------------------------------------------------------------------------------------#
__forceinline__ __device__ vec3 clamp01(vec3 v){ return {__saturatef(v[0]), __saturatef(v[1]), __saturatef(v[2])}; }
__forceinline__ __device__ vec3 bgr8u_to_rgbf32(u32 bgr8u){
return vec3(((bgr8u>>0x10) & 0xff)/255.,
((bgr8u>>0x08) & 0xff)/255.,
((bgr8u>>0x00) & 0xff)/255.);
}
__forceinline__ __device__ u32 rgbf32_to_bgr8u(vec3 rgbf32){
return ((u32)(255.*rgbf32[0] + .5) << 0x10) |
((u32)(255.*rgbf32[1] + .5) << 0x08) |
((u32)(255.*rgbf32[2] + .5) << 0x00);
}
// ----------------------------------------------------------------------------------------------------------------------------#
// @section Geometric data structures! Each geometric primitive needs its own intersection routine!
struct triangle_t{
// Intersection data!
vec3 vert0; // Geometry: main vertex!
vec3 edge01; // Geometry: vert1 - vert0
vec3 edge02; // Geometry: vert2 - vert0
// Rendering data!
u32 albedo_front; // Lighting: albedo! Albedo is the base color input, commonly known as a diffuse map.
u32 albedo_back; // Lighting: albedo! Albedo is the base color input, commonly known as a diffuse map.
};
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// @block evert code, by Nathaniel Thurston!
// @section A 1-jet, aka. a first-order jet, aka. a scalar field (evaluated at some point) together with its 1st-order partial derivatives (evaluated at some point)!
struct jet{
f32 f; // Scalar value of a 2D scalar field!
f32 fu, fv; // 1st-order partial derivatives of a 2D scalar field!
__forceinline__ __device__ jet(){}
__forceinline__ __device__ jet(f32 s){ f=s; fu=0; fv=0; }
__forceinline__ __device__ jet(f32 s, f32 su, f32 sv){ f=s; fu=su; fv=sv; }
};
__forceinline__ __device__ jet operator-(jet x){ return {-x.f, -x.fu, -x.fv}; } // Unary negation!
__forceinline__ __device__ jet operator+(jet x0, jet x1){ return {x0.f + x1.f, x0.fu + x1.fu, x0.fv + x1.fv}; } // 1st-order partial derivatives of the addition of two 2D scalar fields!
__forceinline__ __device__ jet operator-(jet x0, jet x1){ return {x0.f - x1.f, x0.fu - x1.fu, x0.fv - x1.fv}; } // 1st-order partial derivatives of the subtraction of two 2D scalar fields!
__forceinline__ __device__ jet operator*(jet x0, jet x1){ // 1st-order partial derivatives of the product of two 2D scalar fields!
return {x0.f *x1.f,
x0.fu*x1.f + x0.f*x1.fu,
x0.fv*x1.f + x0.f*x1.fv};
}
__forceinline__ __device__ jet operator%(jet x, f32 s){
x.f = fmod(x.f, s);
if(x.f<0) x.f += s;
return x;
}
__forceinline__ __device__ jet operator^(jet x, f32 s){ // Derivatives of the n-th power?
f32 f0 = powf(x.f, s);
f32 f1 = x.f==0 ? 0 : s*f0/x.f; // Avoid division by zero
return {f0, f1*x.fu, f1*x.fv};
}
__forceinline__ __device__ jet operator/(jet x0, jet x1){ return x0 * (x1^-1); } // Derivatives of the quotient!
__forceinline__ __device__ jet ev_interpolate( jet x0, jet x1, jet t){ return (jet(1)-t)*x0 + t*x1; }
__forceinline__ __device__ jet ev_partial_diff(jet x, int idx){ return {idx==0 ? x.fu : x.fv, 0,0}; } // Keep the partial WRT u, or the partial WRT v? It's a bug to pass a derivative index other than 0 or 1
__forceinline__ __device__ jet ev_cos(jet x){ f32 c=cosf(x.f); f32 dc=-sinf(x.f); return {c, dc*x.fu, dc*x.fv}; } // Derivatives of the cosine of a scalar field!
__forceinline__ __device__ jet ev_sin(jet x){ f32 s=sinf(x.f); f32 ds= cosf(x.f); return {s, ds*x.fu, ds*x.fv}; } // Derivatives of the sine of a scalar field!
// ----------------------------------------------------------------------------------------------------------------------------#
// @section A 3D vector of 1-jets!
struct vjet{
jet x0, x1, x2;
};
__forceinline__ __device__ vjet operator*(jet s, vjet v){ return {s*v.x0, s*v.x1, s*v.x2}; } // Scalar multiplication!
__forceinline__ __device__ vjet operator+(vjet v0, vjet v1){ return {v0.x0 + v1.x0, v0.x1 + v1.x1, v0.x2 + v1.x2}; } // Vector addition!
__forceinline__ __device__ vjet operator-(vjet v0, vjet v1){ return {v0.x0 - v1.x0, v0.x1 - v1.x1, v0.x2 - v1.x2}; } // Vector subtraction!
__forceinline__ __device__ jet ev_dot( vjet v0, vjet v1){ return v0.x0*v1.x0 + v0.x1*v1.x1 + v0.x2*v1.x2; }
__forceinline__ __device__ vjet ev_cross(vjet v0, vjet v1){ // Homology of R3: 0 --> 1 --> 2 --> 0 --> 1 --> 2 --> 0 --> ...
return {v0.x1*v1.x2 - v0.x2*v1.x1, // 0 --> 1 --> 2
v0.x2*v1.x0 - v0.x0*v1.x2, // 1 --> 2 --> 0
v0.x0*v1.x1 - v0.x1*v1.x0}; // 2 --> 0 --> 1
}
__forceinline__ __device__ vjet ev_normalize(vjet v){
jet s = ev_dot(v,v);
if(s.f>0) s = s^-.5; // Avoid division by zero!
else s = jet(0);
return s*v;
}
__forceinline__ __device__ vjet ev_interpolate( vjet v0, vjet v1, jet t){ return (jet(1)-t)*v0 + t*v1; }
__forceinline__ __device__ vjet ev_partial_diff(vjet v, int idx){ return {ev_partial_diff(v.x0,idx), ev_partial_diff(v.x1,idx), ev_partial_diff(v.x2,idx)}; }
// ----------------------------------------------------------------------------------------------------------------------------#
// @section A quaternion of 1-jets!
struct qjet{
jet x0, x1, x2, x3;
__forceinline__ __device__ qjet(jet a0, jet a1, jet a2, jet a3){ x0=a0; x1=a1; x2=a2; x3=a3; }
__forceinline__ __device__ qjet(jet s, vjet v){ x0=s; x1=v.x0; x2=v.x1; x3=v.x2; }
};
__forceinline__ __device__ qjet operator*(qjet q0, qjet q1){
return {q0.x0*q1.x0 - q0.x1*q1.x1 - q0.x2*q1.x2 - q0.x3*q1.x3,
q0.x0*q1.x1 + q0.x1*q1.x0 + q0.x2*q1.x3 - q0.x3*q1.x2,
q0.x0*q1.x2 - q0.x1*q1.x3 + q0.x2*q1.x0 + q0.x3*q1.x1,
q0.x0*q1.x3 + q0.x1*q1.x2 - q0.x2*q1.x1 + q0.x3*q1.x0};
}
__forceinline__ __device__ qjet ev_conj(qjet q){ return {q.x0, -q.x1, -q.x2, -q.x3}; } // The quaternion inverse of a quaternion `q` is just `conj(q) / quad(q)`, just like for complex numbers!
__forceinline__ __device__ qjet ev_versor(jet angle, vjet dir){
return {ev_cos(.5*angle), ev_sin(.5*angle)*ev_normalize(dir)}; // If @dir isn't a `direction vector` (ie. a unit vector), then the rotation speed is not constant, methinks!
}
__forceinline__ __device__ vjet ev_rot3d(vjet v, qjet versor){
qjet p_rot = ev_conj(versor) * qjet(0,v) * versor; // Right-conjugation by @versor! The quaternion-conjugate of a unit-quaternion is its quaternion-inverse!
return {p_rot.x1, p_rot.x2, p_rot.x3};
}
__forceinline__ __device__ vjet ev_rotx(vjet v, jet angle){ return ev_rot3d(v, ev_versor(angle, {jet(1),jet(0),jet(0)})); }
__forceinline__ __device__ vjet ev_roty(vjet v, jet angle){ return ev_rot3d(v, ev_versor(angle, {jet(0),jet(1),jet(0)})); }
__forceinline__ __device__ vjet ev_rotz(vjet v, jet angle){ return ev_rot3d(v, ev_versor(angle, {jet(0),jet(0),jet(1)})); }
// ----------------------------------------------------------------------------------------------------------------------------#
// @section geometric deformations!
__forceinline__ __device__ vjet ev_sphere_arc(jet phi, f32 radius_x0, f32 radius_x1, f32 radius_x2){ // Trace out a meridian, since the horizontal angle is fixed!
jet s0 = radius_x0 * ev_sin(jet(0,0,1)) * ev_sin(phi); // Keep the horizontal angle constant, vary the vertical angle!
jet s1 = radius_x1 * ev_cos(jet(0,0,1)) * ev_sin(phi); // Keep the horizontal angle constant, vary the vertical angle!
jet s2 = radius_x2 * ev_cos(phi);
return {s0, s1, s2};
}
__forceinline__ __device__ jet ev_phi_deform0(jet phi){ // Map the (0..pi) interval to itself, but with some curvature!
if(phi.f <= M_PI/2) return -2/M_PI*(phi^2) + 2*phi;
else return 2/M_PI*(phi^2) - 2*phi + jet(M_PI);
}
__forceinline__ __device__ jet ev_phi_deform1(jet phi){ // Map (0..xi) to (0..xi) with some curvature, and map (xi..pi) to (5xi..6xi) with some curvature!
if(phi.f <= M_PI/2) return 2/M_PI*(phi^2);
else return -2/M_PI*(phi^2) + 4*phi + jet(M_PI);
}
__forceinline__ __device__ jet ev_phi_deform2(jet phi){
if(phi.f > M_PI/2) phi = jet(M_PI) - phi;
return -16/(M_PI*M_PI*M_PI)*(phi^3) + 12/(M_PI*M_PI)*(phi^2); // $\purple-{16 \over \red\pi^3} \purple\dot \blue\varphi^3 ~\purple+~ {12 \over \red\pi^2} \purple\dot \blue\varphi^2$
}
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// @section eversion stages! A 3-dimensional vjet (of 1-jets, 2-jets, or 3-jets, or k-jets, or even of a mixture of jets of various degrees) always represents a point in R3! So, if the output of a function is a vjet, then we can always render its output as a vertex in R3, to visualize what in Equestria it does!
// ----------------------------------------------------------------------------------------------------------------------------#
// low-level stages!
__forceinline__ __device__ vjet ev_stage1(jet phi){ return ev_sphere_arc(phi,+1,+1,+1); }
__forceinline__ __device__ vjet ev_stage2(jet phi){ return ev_interpolate(ev_sphere_arc(ev_phi_deform0(phi),+.9,+.9,-1), ev_sphere_arc(ev_phi_deform1(phi),+1,+1,+.5), ev_phi_deform2(phi)); }
__forceinline__ __device__ vjet ev_stage3(jet phi){ return ev_interpolate(ev_sphere_arc(ev_phi_deform0(phi),-.9,-.9,-1), ev_sphere_arc(ev_phi_deform1(phi),-1,+1,-.5), ev_phi_deform2(phi)); }
__forceinline__ __device__ vjet ev_stage4(jet phi){ return ev_sphere_arc(phi,-1,-1,-1); }
// ----------------------------------------------------------------------------------------------------------------------------#
// mid-level stages!
__forceinline__ __device__ vjet ev_scene12(jet phi, f32 t){ return ev_interpolate(ev_stage1(phi), ev_stage2(phi), jet(t,0,0)); }
__forceinline__ __device__ vjet ev_scene23(jet phi, f32 t){ // The heart of the TWIST stage! Notice the rotations here! =D
t *= .5;
f32 tt = (phi.f<=M_PI/2) ? t : -t;
vjet rot_z = ev_rotz(ev_sphere_arc(ev_phi_deform0(phi),+0.9,+0.9,-1.0), M_TAU*jet(tt,0,0));
vjet rot_y = ev_roty(ev_sphere_arc(ev_phi_deform1(phi),+1.0,+1.0,+0.5), M_TAU*jet(t, 0,0));
return ev_interpolate(rot_z, rot_y, ev_phi_deform2(phi));
}
__forceinline__ __device__ vjet ev_scene34(jet phi, f32 t){ return ev_interpolate(ev_stage3(phi), ev_stage4(phi), jet(t,0,0)); }
// ----------------------------------------------------------------------------------------------------------------------------#
// high-level stages!
__forceinline__ __device__ vjet ev_figure8(vjet w, vjet h, vjet bend, jet form, jet theta){ // At the end of the twisting phase, the corrugations have nearly become figure eights!
theta = theta%1;
jet height = 1 - ev_cos(2*M_TAU*theta);
if(.25<theta.f && theta.f<.75) height = 4-height;
height = .6*height;
h = h + (height*height)/(8*8) * bend;
form = 2*form - form*form;
return ev_sin(2*M_TAU*theta)*w + ev_interpolate(2-2*ev_cos(M_TAU*theta), height, form)*h;
}
__forceinline__ __device__ vjet ev_add_figure8(vjet p, jet theta, jet phi, jet form){
jet size = -0.2 * ev_phi_deform2(phi) * form; // 0.2 is like a scale constant?
vjet du = ev_normalize(ev_partial_diff(p,0)); // Is this the partial with respect to theta, or with respect to phi?
vjet dv = ev_normalize(ev_partial_diff(p,1)); // Is this the partial with respect to theta, or with respect to phi?
vjet h = 1.0*size * ev_normalize(ev_cross(du,dv));
vjet w = 1.1*size * ev_normalize(ev_cross(h, du)); // The 1.1 factor gives more thickness/width to the corrugations?
vjet bend = ev_partial_diff(size,0)/ev_partial_diff(phi,0) * du;
vjet fig8 = ev_figure8(w,h, bend, form, (f32)EV_NSTRIPS/(f32)M_TAU*theta);
return ev_rotz(p+fig8, theta);
}
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// @section eversion phases!
__device__ vjet ev_corrugate( jet theta, jet phi, f32 t){ vjet p=ev_stage1( phi ); return ev_add_figure8(p, theta,phi, jet(t) *ev_phi_deform2(phi)); }
__device__ vjet ev_push( jet theta, jet phi, f32 t){ vjet p=ev_scene12(phi,t); return ev_add_figure8(p, theta,phi, jet(1) *ev_phi_deform2(phi)); }
__device__ vjet ev_twist( jet theta, jet phi, f32 t){ vjet p=ev_scene23(phi,t); return ev_add_figure8(p, theta,phi, jet(1) *ev_phi_deform2(phi)); }
__device__ vjet ev_unpush( jet theta, jet phi, f32 t){ vjet p=ev_scene34(phi,t); return ev_add_figure8(p, theta,phi, jet(1) *ev_phi_deform2(phi)); }
__device__ vjet ev_uncorrugate(jet theta, jet phi, f32 t){ vjet p=ev_stage4( phi ); return ev_add_figure8(p, theta,phi, jet(1-t)*ev_phi_deform2(phi)); }
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// @section This is the "mesh driver", ie. it takes to Nathaniel Thurton's jet/evert stuff and actually creates the sphere eversion animation! It creates vertex coordinates OUT OF THIN AIR (ie. out of kernel coordinates), per FRAME! How sexy is that? 0.5M triangles in 0.5ms!
__global__ void ker_mesh_shader(f32 t, u32 theta_nverts, u32 phi_nverts, triangle_t* triangles){
u32 x = blockIdx.x*blockDim.x + threadIdx.x;
u32 y = blockIdx.y*blockDim.y + threadIdx.y;
u32 thr_idx = (blockIdx.y*gridDim.x + blockIdx.x) * (blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x) + threadIdx.x; // Global thread index, see richiesams blogspot!
f32 theta_range = EV_THETA_MAX - EV_THETA_MIN;
f32 phi_range = EV_PHI_MAX - EV_PHI_MIN;
f32 theta = EV_THETA_MIN + (f32)x/theta_nverts * theta_range; // Now is theta in [0 .. EV_THETA_MAX)
f32 phi = EV_PHI_MIN + (f32)y/phi_nverts * phi_range; // Now is phi in (0 .. EV_PHI_MAX)
f32 dtheta = 1 / (theta_nverts + .05*theta_nverts) * theta_range; // 0.5 0.5 .5 .50 1.0 8 4 2 1 2
f32 dphi = 1 / (phi_nverts + .05*phi_nverts) * phi_range; // 1.0 2.0 .5 .25 0.5 1 1 1 1 2
vjet vert0_jet, vert1_jet, vert2_jet, vert3_jet; // MAIN geometry driver!
if(t-EV_EPSILON < EV_CORRUGATE_TINI+EV_CORRUGATE_TDEL){ // WARN! For some reason we need to subtract the time by EV_EPSILON?
vert0_jet = ev_corrugate(jet(theta + 0*dtheta), jet(phi + 0*dphi, 1,0), (t-EV_CORRUGATE_TINI)/EV_CORRUGATE_TDEL);
vert1_jet = ev_corrugate(jet(theta + 0*dtheta), jet(phi + 1*dphi, 1,0), (t-EV_CORRUGATE_TINI)/EV_CORRUGATE_TDEL);
vert2_jet = ev_corrugate(jet(theta + 1*dtheta), jet(phi + 0*dphi, 1,0), (t-EV_CORRUGATE_TINI)/EV_CORRUGATE_TDEL);
vert3_jet = ev_corrugate(jet(theta + 1*dtheta), jet(phi + 1*dphi, 1,0), (t-EV_CORRUGATE_TINI)/EV_CORRUGATE_TDEL);
}else if(t-EV_EPSILON < EV_PUSH_TINI+EV_PUSH_TDEL){
vert0_jet = ev_push(jet(theta + 0*dtheta), jet(phi + 0*dphi, 1,0), (t-EV_PUSH_TINI)/EV_PUSH_TDEL);
vert1_jet = ev_push(jet(theta + 0*dtheta), jet(phi + 1*dphi, 1,0), (t-EV_PUSH_TINI)/EV_PUSH_TDEL);
vert2_jet = ev_push(jet(theta + 1*dtheta), jet(phi + 0*dphi, 1,0), (t-EV_PUSH_TINI)/EV_PUSH_TDEL);
vert3_jet = ev_push(jet(theta + 1*dtheta), jet(phi + 1*dphi, 1,0), (t-EV_PUSH_TINI)/EV_PUSH_TDEL);
}else if(t-EV_EPSILON < EV_TWIST_TINI+EV_TWIST_TDEL){
vert0_jet = ev_twist(jet(theta + 0*dtheta), jet(phi + 0*dphi, 1,0), (t-EV_TWIST_TINI)/EV_TWIST_TDEL);
vert1_jet = ev_twist(jet(theta + 0*dtheta), jet(phi + 1*dphi, 1,0), (t-EV_TWIST_TINI)/EV_TWIST_TDEL);
vert2_jet = ev_twist(jet(theta + 1*dtheta), jet(phi + 0*dphi, 1,0), (t-EV_TWIST_TINI)/EV_TWIST_TDEL);
vert3_jet = ev_twist(jet(theta + 1*dtheta), jet(phi + 1*dphi, 1,0), (t-EV_TWIST_TINI)/EV_TWIST_TDEL);
}else if(t-EV_EPSILON < EV_UNPUSH_TINI+EV_UNPUSH_TDEL){
vert0_jet = ev_unpush(jet(theta + 0*dtheta), jet(phi + 0*dphi, 1,0), (t-EV_UNPUSH_TINI)/EV_UNPUSH_TDEL);
vert1_jet = ev_unpush(jet(theta + 0*dtheta), jet(phi + 1*dphi, 1,0), (t-EV_UNPUSH_TINI)/EV_UNPUSH_TDEL);
vert2_jet = ev_unpush(jet(theta + 1*dtheta), jet(phi + 0*dphi, 1,0), (t-EV_UNPUSH_TINI)/EV_UNPUSH_TDEL);
vert3_jet = ev_unpush(jet(theta + 1*dtheta), jet(phi + 1*dphi, 1,0), (t-EV_UNPUSH_TINI)/EV_UNPUSH_TDEL);
}else if(t-EV_EPSILON < EV_UNCORRUGATE_TINI+EV_UNCORRUGATE_TDEL){
vert0_jet = ev_uncorrugate(jet(theta + 0*dtheta), jet(phi + 0*dphi, 1,0), (t-EV_UNCORRUGATE_TINI)/EV_UNCORRUGATE_TDEL);
vert1_jet = ev_uncorrugate(jet(theta + 0*dtheta), jet(phi + 1*dphi, 1,0), (t-EV_UNCORRUGATE_TINI)/EV_UNCORRUGATE_TDEL);
vert2_jet = ev_uncorrugate(jet(theta + 1*dtheta), jet(phi + 0*dphi, 1,0), (t-EV_UNCORRUGATE_TINI)/EV_UNCORRUGATE_TDEL);
vert3_jet = ev_uncorrugate(jet(theta + 1*dtheta), jet(phi + 1*dphi, 1,0), (t-EV_UNCORRUGATE_TINI)/EV_UNCORRUGATE_TDEL);
}
// ----------------------------------------------------------------------------------------------------------------------------#
vec3 vert0 = vec3(vert0_jet.x0.f, vert0_jet.x1.f, vert0_jet.x2.f);
vec3 vert1 = vec3(vert1_jet.x0.f, vert1_jet.x1.f, vert1_jet.x2.f);
vec3 vert2 = vec3(vert2_jet.x0.f, vert2_jet.x1.f, vert2_jet.x2.f);
vec3 vert3 = vec3(vert3_jet.x0.f, vert3_jet.x1.f, vert3_jet.x2.f);
vec3 color0 = bgr8u_to_rgbf32(EV_RGB_FRONT); // sin(theta): as `theta` goes from 0 to TAU, `sin(theta)` goes from 0 to 0
vec3 color1 = bgr8u_to_rgbf32(EV_RGB_BACK); // sin(2*phi): as `phi` goes from 0 to PI, `sin(2*phi)` goes from 0 to 0
vec3 dcolor0 = .15 * vec3(0,0,(sin(theta)+1)/2);
vec3 dcolor1 = .30 * vec3((sin(theta)+1)/2,0,0);
triangle_t triangle;
triangle.albedo_front = rgbf32_to_bgr8u(clamp01(color0 + dcolor0));
triangle.albedo_back = rgbf32_to_bgr8u(clamp01(color1 + dcolor1));
triangle.vert0=vert0; triangle.edge01=vert1-vert0; triangle.edge02=vert2-vert0; triangles[2*thr_idx+0]=triangle;
triangle.vert0=vert3; triangle.edge01=vert2-vert3; triangle.edge02=vert1-vert3; triangles[2*thr_idx+1]=triangle;
}
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
// ----------------------------------------------------------------------------------------------------------------------------#
int main(){
cudaSetDevice(0);
u32 theta_nverts = ceilf(EV_THETA_NVERTS);
u32 phi_nverts = ceilf(EV_PHI_NVERTS);
f32 t = EV_TMIN;
f32 dt = (EV_TMAX-EV_TMIN) / (EV_NFRAMES-1);
dim3 MESH_BLOCK_DIM = {1,1,1}; // {1,1,1} {8,8,1} {32,32,1} // Launch `MESH_BLOCK_DIM.x * MESH_BLOCK_DIM.y * MESH_BLOCK_DIM.z` nthreads per block! So, `32 * 32 * 1` nthreads per block! Max nthreads per block on Titan V is 1024!
dim3 MESH_GRID_DIM = {theta_nverts/MESH_BLOCK_DIM.x, phi_nverts/MESH_BLOCK_DIM.y, 1}; // Launch ` MESH_GRID_DIM.x * MESH_GRID_DIM.y * MESH_GRID_DIM.z` nblocks per grid! Then `nthreads per grid` is `nblocks per grid * nthreads per block`!
triangle_t* triangles_gpu; cudaMalloc( &triangles_gpu, sizeof(triangle_t)*theta_nverts*phi_nverts*2);
triangle_t* triangles_cpu; cudaMallocHost(&triangles_cpu, sizeof(triangle_t)*theta_nverts*phi_nverts*2);
// ----------------------------------------------------------------
printf("nframes \x1b[94m%d\x1b[0m\n", EV_NFRAMES);
printf("ntriangles \x1b[94m%'u\x1b[0m\n", theta_nverts*phi_nverts*2);
printf("theta nverts \x1b[94m%'u\x1b[0m\n", theta_nverts);
printf("phi nverts \x1b[94m%'u\x1b[0m\n", phi_nverts);
printf("mesh grid \x1b[94m%d %d\x1b[0m\n", MESH_GRID_DIM.x, MESH_GRID_DIM.y);
// ----------------------------------------------------------------
for(int frame=0; frame<EV_NFRAMES; ++frame){
ker_mesh_shader<<<MESH_GRID_DIM,MESH_BLOCK_DIM>>>(t,theta_nverts,phi_nverts, triangles_gpu);
cudaMemcpy(triangles_cpu, triangles_gpu, sizeof(triangle_t)*theta_nverts*phi_nverts*2, cudaMemcpyDeviceToHost);
t += dt;
printf("\nframe:\x1b[91m%d\x1b[0m\n", frame);
for(int i=0; i<theta_nverts*phi_nverts*2; ++i)
printf("triangle:\x1b[35m%4d\x1b[0m v0:\x1b[31m%6.3f\x1b[0m \x1b[32m%6.3f\x1b[0m \x1b[94m%6.3f\x1b[0m e01:\x1b[31m%6.3f\x1b[0m \x1b[32m%6.3f\x1b[0m \x1b[94m%6.3f\x1b[0m e02:\x1b[31m%6.3f\x1b[0m \x1b[32m%6.3f\x1b[0m \x1b[94m%6.3f\x1b[0m f:\x1b[35m%06x\x1b[0m b:\x1b[94m%06x\x1b[0m\n",
i, triangles_cpu[i].vert0[0],triangles_cpu[i].vert0[1],triangles_cpu[i].vert0[2], triangles_cpu[i].edge01[0],triangles_cpu[i].edge01[1],triangles_cpu[i].edge01[2], triangles_cpu[i].edge02[0],triangles_cpu[i].edge02[1],triangles_cpu[i].edge02[2], triangles_cpu[i].albedo_front, triangles_cpu[i].albedo_back);
}
// ----------------------------------------------------------------
cudaFree(triangles_gpu);
cudaFreeHost(triangles_cpu);
cudaDeviceReset();
}
|
9,483 | extern "C"
#define TILE_WIDTH 16
// Compute C = A * B
__global__ void matrixMultiplyShared(float *A, float *B, float *C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
__shared__ float ds_A[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_B[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
float cValue = 0;
for (int t = 0; t < (numAColumns - 1) / TILE_WIDTH + 1; t++) {
if (row < numARows && t * TILE_WIDTH + tx < numAColumns) {
ds_A[ty][tx] = A[row * numAColumns + t * TILE_WIDTH + tx];
} else {
ds_A[ty][tx] = 0.0;
}
if (t * TILE_WIDTH + ty < numBRows && col < numBColumns) {
ds_B[ty][tx] = B[(t * TILE_WIDTH + ty) * numBColumns + col];
} else {
ds_B[ty][tx] = 0.0;
}
__syncthreads();
for (int i = 0; i < TILE_WIDTH; i++) {
cValue += ds_A[ty][i] * ds_B[i][tx];
}
__syncthreads();
}
if (row < numCRows && col < numCColumns) {
C[row * numCColumns + col] = cValue;
}
} |
9,484 | #include "includes.h"
__global__ void TgvUpdateDualVariablesTGVMaskedKernel(float* mask, float* u_, float2 *v_, float alpha0, float alpha1, float sigma, float eta_p, float eta_q, float* a, float* b, float*c, float4* grad_v, float2* p, float4* q, int width, int height, int stride)
{
int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row
int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column
if ((iy >= height) && (ix >= width)) return;
int pos = ix + iy * stride;
if (mask[pos] == 0.0f) return;
//p[pos] = make_float2(0.0f, 0.0f);
//q[pos] = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
int right = (ix + 1) + iy * stride;
int down = ix + (iy + 1) * stride;
int left = (ix - 1) + iy * stride;
int up = ix + (iy - 1) * stride;
float maskRight, maskDown;
if (ix + 1 >= width) {
maskRight = 0.0f;
}
else maskRight = mask[right];
if (iy + 1 >= height) {
maskDown = 0.0f;
}
else maskDown = mask[down];
float u_pos = u_[pos];
float2 v_pos = v_[pos];
//u_x = dxp(u_) - v_(:, : , 1);
float u_x, u_y;
if (maskRight != 0.0f)
u_x = u_[right] - u_pos - v_pos.x;
else
u_x = u_pos - u_[left] - v_pos.x;
//u_y = dyp(u_) - v_(:, : , 2);
if (maskDown != 0.0f)
u_y = u_[down] - u_pos - v_pos.y;
else
u_y = u_pos - u_[up] - v_pos.y;
//du_tensor_x = a.*u_x + c.*u_y;
float du_tensor_x = a[pos] * u_x + c[pos] * u_y;
//du_tensor_y = c.*u_x + b.*u_y;
float du_tensor_y = c[pos] * u_x + b[pos] * u_y;
float2 ppos;
//p(:, : , 1) = p(:, : , 1) + alpha1*sigma / eta_p.*du_tensor_x;
ppos.x = p[pos].x + (alpha1*sigma / eta_p) * du_tensor_x;
//p(:, : , 2) = p(:, : , 2) + alpha1*sigma / eta_p.*du_tensor_y;
ppos.y = p[pos].y + (alpha1*sigma / eta_p) * du_tensor_y;
//projection
//reprojection = max(1.0, sqrt(p(:, : , 1). ^ 2 + p(:, : , 2). ^ 2));
float reprojection = sqrtf(ppos.x * ppos.x + ppos.y * ppos.y);
if (reprojection < 1.0f) {
reprojection = 1.0f;
}
//p(:, : , 1) = p(:, : , 1). / reprojection;
p[pos].x = ppos.x / reprojection;
//p(:, : , 2) = p(:, : , 2). / reprojection;
p[pos].y = ppos.y / reprojection;
//grad_v(:, : , 1) = dxp(v_(:, : , 1));
float4 grad_v_pos;
if (maskRight != 0.0f)
grad_v_pos.x = v_[right].x - v_pos.x;
else
grad_v_pos.x = v_pos.x - v_[left].x;
//grad_v(:, : , 2) = dyp(v_(:, : , 2));
if (maskDown != 0.0f)
grad_v_pos.y = v_[down].y - v_pos.y;
else
grad_v_pos.y = v_pos.y - v_[up].y;
//grad_v(:, : , 3) = dyp(v_(:, : , 1));
if (maskDown != 0.0f)
grad_v_pos.z = v_[down].x - v_pos.x;
else
grad_v_pos.z = v_pos.x - v_[up].x;
//grad_v(:, : , 4) = dxp(v_(:, : , 2));
if (maskRight != 0.0f)
grad_v_pos.w = v_[right].y - v_pos.y;
else
grad_v_pos.w = v_pos.y - v_[left].y;
grad_v[pos] = grad_v_pos;
//q = q + alpha0*sigma / eta_q.*grad_v;
float ase = alpha0 * sigma / eta_q;
float4 qpos;
qpos.x = q[pos].x + ase * grad_v_pos.x;
qpos.y = q[pos].y + ase * grad_v_pos.y;
qpos.z = q[pos].z + ase * grad_v_pos.z;
qpos.w = q[pos].w + ase * grad_v_pos.w;
//reproject = max(1.0, sqrt(q(:, : , 1). ^ 2 + q(:, : , 2). ^ 2 + q(:, : , 3). ^ 2 + q(:, : , 4). ^ 2));
float reproject = sqrtf(qpos.x * qpos.x + qpos.y * qpos.y + qpos.z * qpos.z + qpos.w * qpos.w);
if (reproject < 1.0f) {
reproject = 1.0f;
}
//q(:, : , 1) = q(:, : , 1). / reproject;
q[pos].x = qpos.x / reproject;
//q(:, : , 2) = q(:, : , 2). / reproject;
q[pos].y = qpos.y / reproject;
//q(:, : , 3) = q(:, : , 3). / reproject;
q[pos].z = qpos.z / reproject;
//q(:, : , 4) = q(:, : , 4). / reproject;
q[pos].w = qpos.w / reproject;
} |
9,485 | #include <inttypes.h>
#include <stdio.h>
#ifndef block_size_x
#define block_size_x 256
#endif
#ifndef use_shared
#define use_shared 1
#endif
#ifndef window_width
#define window_width 1500
#endif
#ifndef write_rows
#define write_rows 1
#endif
#ifndef shared_memory_size
#define shared_memory_size 12*block_size_x
#endif
/*
* This kernel creates a sparse representation of the densely stored correlations table.
*
* In addition to the correlations table, this kernel needs a precomputed prefix_sums array. This
* array contains the inclusive prefix sums of the degrees of the nodes in the correlations table.
* In other words, it is an array with one element per hit, containing the sum over the total
* number of hits correlated with the hits up to and including that hit in the correlations table.
*
* Output arguments are row_idx and col_idx, which contain the (hit id, hit id) pairs that describe
* the correlated hits.
*
*/
__global__ void dense2sparse_kernel(int *row_idx, int *__restrict__ col_idx, int *__restrict__ prefix_sums, uint8_t * correlations, int n) {
int i = blockIdx.x * block_size_x + threadIdx.x;
#if use_shared == 1
__shared__ int sh_col_idx[shared_memory_size];
int block_start = 0;
if (blockIdx.x > 0) {
block_start = prefix_sums[blockIdx.x * block_size_x - 1];
}
#endif
if (i<n) {
//get the offset to where output should be written
int offset = 0;
if (i>0) {
offset = prefix_sums[i-1];
}
//see how much work there is on this row
//int end = prefix_sums[i];
//collect the edges to nodes with lower id
if (i<window_width) {
for (int j=i-1; j>=0; j--) {
int col = i-j-1;
uint64_t pos = (j * (uint64_t)n) + (uint64_t) (col);
if (correlations[pos] == 1) {
#if write_rows
row_idx[offset] = i;
#endif
#if use_shared == 1
sh_col_idx[offset - block_start] = col;
#else
col_idx[offset] = col;
#endif
offset += 1;
}
}
} else {
#if f_unroll == 2
#pragma unroll 2
#elif f_unroll == 3
#pragma unroll 3
#elif f_unroll == 4
#pragma unroll 4
#elif f_unroll == 5
#pragma unroll 5
#elif f_unroll == 6
#pragma unroll 6
#endif
for (int j=window_width-1; j>=0; j--) {
int col = i-j-1;
uint64_t pos = (j * (uint64_t)n) + (uint64_t) (col);
if (correlations[pos] == 1) {
#if write_rows
row_idx[offset] = i;
#endif
#if use_shared == 1
sh_col_idx[offset - block_start] = col;
#else
col_idx[offset] = col;
#endif
offset += 1;
}
}
}
//collect the edges to nodes with higher id
#if f_unroll == 2
#pragma unroll 2
#elif f_unroll == 3
#pragma unroll 3
#elif f_unroll == 4
#pragma unroll 4
#elif f_unroll == 5
#pragma unroll 5
#elif f_unroll == 6
#pragma unroll 6
#endif
for (int j=0; j<window_width; j++) {
uint64_t pos = (j * (uint64_t)n) + (uint64_t)i;
if (correlations[pos] == 1) {
#if write_rows
row_idx[offset] = i;
#endif
#if use_shared == 1
sh_col_idx[offset - block_start] = i+j+1;
#else
col_idx[offset] = i+j+1;
#endif
offset += 1;
}
}
}
//collaboratively write back the output collected in shared memory to global memory
#if use_shared == 1
int block_stop;
int last_i = blockIdx.x * block_size_x + block_size_x-1;
if (last_i < n) {
block_stop = prefix_sums[last_i];
} else {
block_stop = prefix_sums[n-1];
}
__syncthreads(); //ensure all threads are done writing shared memory
for (int k=block_start+threadIdx.x; k<block_stop; k+=block_size_x) {
col_idx[k] = sh_col_idx[k-block_start];
}
#endif
}
|
9,486 |
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void im2colN(float* data_col, const float* ori_data,
const int height, const int width, const int channels, const int ksize,
const int height_col, const int width_col) {
int poolx = threadIdx.x + blockIdx.x * blockDim.x;
int pooly = threadIdx.y + blockIdx.y * blockDim.y;
int poolz = blockIdx.z;
if (pooly < height_col && poolx < width_col) {
int hstart = pooly;
int hend = pooly + ksize;
int wstart = poolx;
int wend = poolx + ksize;
int patchNum = pooly * height_col + poolx;
ori_data += poolz * height * width;
data_col += height_col * width_col * poolz;
data_col += patchNum;
for (int h = hstart; h < hend; h++) {
for (int w = wstart; w < wend; w++) {
*data_col = ori_data[h * width + w];
data_col += height_col * width_col * channels;
}
}
}
}
|
9,487 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include <time.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
typedef struct Vertex {
int data;
int visited;
int xpos;
int ypos;
} Vertex;
typedef struct Node {
int data;
int* pos;
struct Node* next;
} Node;
void transfer1(Vertex** g, Vertex** arr, int size) {
for (int i = 0; i < size; ++i) {
for (int j = 0; j < size; ++j) {
(*arr)[j + i*size] = g[i][j];
(*arr)[j + i*size].xpos = i;
(*arr)[j + i*size].ypos = j;
}
}
}
void transfer2(Vertex*** graph, Vertex* arr, int size) {
for (int i = 0; i < size; ++i) {
for (int j = 0; j < size; ++j) {
(*graph)[i][j].visited = arr[j + i*size].visited;
}
}
}
void display(Vertex** graph, int size) {
for (int i = 0; i < size; ++i) {
for (int j = 0; j < size; ++j) {
if (graph[i][j].data != -1) {
if (graph[i][j].data < 10) {
printf("%d ", graph[i][j].data);
}
else {
printf("%d ", graph[i][j].data);
}
}
else {
printf(" ");
}
}
printf("\n");
}
printf("\n");
for (int i = 0; i < size; ++i) {
for (int j = 0; j < size; ++j) {
if (graph[i][j].data != -1) {
printf("%d ", graph[i][j].visited);
}
else {
printf(" ");
}
}
printf("\n");
}
}
void freeGraph(Vertex** graph, int size) {
for(int i = 0; i < size; ++i) {
free(graph[i]);
}
free(graph);
}
Vertex** build(Vertex** g, int size) {
Vertex** graph;
graph = g;
for (int i = 0; i < size; ++i) {
for (int j = 0; j < size; ++j) {
if (i == 0) {
if (j == 0) {
if (graph[i+1][j].data == -1 && graph[i][j+1].data == -1 && graph[i+1][j+1].data == -1) {
graph[i+1][j].data = rand() % 100;
graph[i][j+1].data = rand() % 100;
graph[i+1][j+1].data = rand() % 100;
}
}
else if (j == size - 1) {
if (graph[i+1][j].data == -1 && graph[i][j-1].data == -1 && graph[i+1][j-1].data == -1) {
graph[i+1][j].data = rand() % 100;
graph[i][j-1].data = rand() % 100;
graph[i+1][j-1].data = rand() % 100;
}
}
else {
if (graph[i][j-1].data == -1 && graph[i][j+1].data == -1 &&
graph[i+1][j-1].data == -1 && graph[i+1][j].data == -1 && graph[i+1][j+1].data == -1) {
graph[i][j-1].data = rand() % 100;
graph[i][j+1].data = rand() % 100;
graph[i+1][j-1].data = rand() % 100;
graph[i+1][j].data = rand() % 100;
graph[i+1][j+1].data = rand() % 100;
}
}
}
else if (i == size - 1) {
if (j == 0) {
if (graph[i-1][j].data == -1 && graph[i][j+1].data == -1 && graph[i-1][j+1].data == -1) {
graph[i-1][j].data = rand() % 100;
graph[i][j+1].data = rand() % 100;
graph[i-1][j+1].data = rand() % 100;
}
}
else if (j == size - 1) {
if (graph[i-1][j].data == -1 && graph[i][j-1].data == -1 && graph[i-1][j-1].data == -1) {
graph[i-1][j].data = rand() % 100;
graph[i][j-1].data = rand() % 100;
graph[i-1][j-1].data = rand() % 100;
}
}
else {
if (graph[i][j-1].data == -1 && graph[i][j+1].data == -1 &&
graph[i-1][j-1].data == -1 && graph[i-1][j].data == -1 && graph[i-1][j+1].data == -1) {
graph[i][j-1].data = rand() % 100;
graph[i][j+1].data = rand() % 100;
graph[i-1][j-1].data = rand() % 100;
graph[i-1][j].data = rand() % 100;
graph[i-1][j+1].data = rand() % 100;
}
}
}
else {
if (j == 0) {
if (graph[i-1][j].data == -1 && graph[i-1][j+1].data == -1 &&
graph[i][j+1].data == -1 &&
graph[i+1][j].data == -1 && graph[i+1][j+1].data == -1) {
graph[i-1][j].data = rand() % 100;
graph[i-1][j+1].data = rand() % 100;
graph[i][j+1].data = rand() % 100;
graph[i+1][j].data = rand() % 100;
graph[i+1][j+1].data = rand() % 100;
}
}
else if (j == size - 1) {
if (graph[i-1][j-1].data == -1 && graph[i-1][j].data == -1 && graph[i][j-1].data == -1 &&
graph[i+1][j-1].data == -1 && graph[i+1][j].data == -1) {
graph[i-1][j-1].data = rand() % 100;
graph[i-1][j].data = rand() % 100;
graph[i][j-1].data = rand() % 100;
graph[i+1][j-1].data = rand() % 100;
graph[i+1][j].data = rand() % 100;
}
}
else {
if (graph[i-1][j-1].data == -1 && graph[i-1][j].data == -1 && graph[i-1][j+1].data == -1 &&
graph[i][j-1].data == -1 && graph[i][j+1].data == -1 &&
graph[i+1][j-1].data == -1 && graph[i+1][j].data == -1 && graph[i+1][j+1].data == -1) {
graph[i-1][j-1].data = rand() % 100;
graph[i-1][j].data = rand() % 100;
graph[i-1][j+1].data = rand() % 100;
graph[i][j-1].data = rand() % 100;
graph[i][j+1].data = rand() % 100;
graph[i+1][j-1].data = rand() % 100;
graph[i+1][j].data = rand() % 100;
graph[i+1][j+1].data = rand() % 100;
}
}
}
}
}
return graph;
}
void makeGraph(Vertex*** graph, int size) {
*graph = (Vertex**)malloc(sizeof(Vertex*) * size);
srand((unsigned int)time(NULL));
int i = 0;
for (; i < size; ++i) {
Vertex* a = (Vertex*)malloc(sizeof(Vertex) * size);
int j = 0;
for (; j < size; ++j) {
int rando = rand() % 150;
if (rando < 100) {
a[j].data = rando;
}
else {
a[j].data = -1;
}
a[j].visited = 0;
}
(*graph)[i] = a;
}
*graph = build(*graph, size);
}
void writeGraphTo(Vertex** graph, char* fname, long int size) {
FILE* f = fopen(fname, "wb");
int* arr = (int*)malloc(sizeof(int)* size * size);
for (int i = 0; i < size; ++i) {
for (int j = 0; j < size; ++j) {
arr[j + i*size] = graph[i][j].data;
}
}
fwrite(arr, sizeof(int), size*size, f);
fclose(f);
free(arr);
}
Vertex** readGraphFrom(char* fname, long int size) {
int* arr = (int*)malloc(sizeof(int) * size * size);
FILE *fp = fopen(fname, "rb");
if (fp == NULL) {
printf("Possible file reading error....\n");
return NULL;
}
fread(arr, sizeof(int), size*size, fp);
Vertex** graph = (Vertex**)malloc(sizeof(Vertex*) * size * size);
for (int i = 0; i < size; ++i) {
graph[i] = (Vertex*)malloc(sizeof(Vertex)*size);
for (int j = 0; j < size; ++j) {
graph[i][j].data = arr[j + i*size];
graph[i][j].visited = 0;
}
}
free(arr);
return graph;
}
__device__ void bfs_gpu_thread(Vertex* graph, int size, Node curr) {
Node next = curr;
int row = *(next.pos);
int col = *(next.pos + 1);
if (graph[row*size + col].data != -1) {
if (row-1 >= 0) {
if (graph[(row-1)*size + col].data != -1 && graph[(row-1)*size + col].visited == 0) {
graph[(row-1)*size + col].visited = 1;
}
if (col-1 >= 0) {
if (graph[(row-1)*size + (col-1)].data != -1 && graph[(row-1)*size + (col-1)].visited == 0) {
graph[(row-1)*size + (col-1)].visited = 1;
}
if (graph[row*size + (col-1)].data != -1 && graph[row*size + (col-1)].visited == 0) {
graph[row*size + (col-1)].visited = 1;
}
}
if (col+1 < size) {
if (graph[(row-1)*size + (col+1)].data != -1 && graph[(row-1)*size + (col+1)].visited == 0) {
graph[(row-1)*size + (col+1)].visited = 1;
}
if (graph[row*size + (col+1)].data != -1 && graph[row*size + (col+1)].visited == 0) {
graph[row*size + (col+1)].visited = 1;
}
}
}
if (row+1 < size) {
if (graph[(row+1)*size + col].data != -1 && graph[(row+1)*size + col].visited == 0) {
graph[(row+1)*size + col].visited = 1;
}
if (col-1 >= 0) {
if (graph[row*size + (col-1)].data != -1 && graph[row*size + (col-1)].visited == 0) {
graph[row*size + (col-1)].visited = 1;
}
if (graph[(row+1)*size + (col-1)].data != -1 && graph[(row+1)*size + (col-1)].visited == 0) {
graph[(row+1)*size + (col-1)].visited = 1;
}
}
if (col+1 < size) {
if (graph[row*size + (col+1)].data != -1 && graph[row*size + (col+1)].visited == 0) {
graph[row*size + (col+1)].visited = 1;
}
if (graph[(row+1)*size + (col+1)].data != -1 && graph[(row+1)*size + (col+1)].visited == 0) {
graph[(row+1)*size + (col+1)].visited = 1;
}
}
}
}
}
__global__ void bfs_gpu_kernel(Vertex* graph, int size) {
int idx = threadIdx.x + blockIdx.x*blockDim.x;
Node startNode;
int* temp = (int*)malloc(sizeof(*temp) * 2);
*temp = graph[idx].xpos;
*(temp+1) = graph[idx].ypos;
startNode.data = graph[idx].data;
startNode.pos = temp;
startNode.next = NULL;
for (int i = idx; i < size*size; ++i) {
bfs_gpu_thread(graph, size, startNode);
}
free(temp);
__syncthreads();
}
int main(int argc, char* argv[]) {
if(argc != 2){
printf("Usage: ./bfsg <file name>\n");
exit(1);
} else{
int SIZE = 100;
int THREADS_PER_BLOCK = 100;
srand((unsigned int)time(NULL));
char* fname = argv[1];
int done = 0;
Vertex** g = readGraphFrom(fname, SIZE);
if (g == NULL) {
makeGraph(&g, SIZE);
done = 1;
}
Vertex* graph_dev = (Vertex*)malloc(sizeof(*graph_dev) * SIZE * SIZE);
transfer1(g, &graph_dev, SIZE);
Vertex* graph_dev_copy;
cudaSetDevice(0);
cudaMalloc((void**)&graph_dev_copy, sizeof(Vertex) * SIZE * SIZE);
cudaMemcpy(graph_dev_copy, graph_dev, sizeof(Vertex) * SIZE * SIZE, cudaMemcpyHostToDevice);
clock_t start = clock();
bfs_gpu_kernel<<<(SIZE*SIZE)/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(graph_dev_copy, SIZE);
clock_t stop = clock();
float total = (float)((stop - start) / (float)CLOCKS_PER_SEC);
cudaMemcpy(graph_dev, graph_dev_copy, sizeof(Vertex) * SIZE * SIZE, cudaMemcpyDeviceToHost);
transfer2(&g, graph_dev, SIZE);
cudaFree(&graph_dev_copy);
//display(g, SIZE);
printf("\n");
printf("BFS in GPU took: %f\n", total);
if (done) {
writeGraphTo(g, fname, SIZE);
}
freeGraph(g, SIZE);
free(graph_dev);
return 0;
}
} |
9,488 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <ctime>
//Function that verify cuda calls and return cuda error if any
#define gpuCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
//Initialise ascendant array with random values in
void init_array(int* array, int size, int const adder=10)
{
array[0] = rand()%adder;
for(int i = 0; i < size;i++)
{
array[i] = array[i-1] + rand()%adder;
}
}
//Function that initialise array with random values
void init_array_no_order(int* array, int size, int const adder=10)
{
array[0] = rand()%adder;
for(int i = 0; i < size;i++)
{
array[i] = rand()%adder;
}
}
//Function that copy array in another
void copy_array(int* a, int* a_copy, int n){
for(int i = 0;i < n;i++){
a_copy[i] = a[i];
}
}
//Function that print an array of size size
void print_array(int* a, int size)
{
printf("[");
for(int i = 0; i < size;i++)
{
//printf("i = %d | v = %d " ,i, a[i]);
printf("%d " ,a[i]);
}
printf("]\n");
}
//Device version of parallel merge of a and b in m with |m|<1024
__global__ void mergeSmall_k(int* m, int n, int size)
{
int gbx = blockIdx.x;
int tidx = threadIdx.x;
int i = gbx * blockDim.x + tidx;
if(i < n)
{
int L1, R1, L2, R2;
L1 = gbx*blockDim.x;
R1 = gbx*blockDim.x + size-1;
L2 = gbx*blockDim.x + size;
R2 = gbx*blockDim.x + 2*size-1;
if(L2 < n)
{
// printf("L1 : %d, R1 : %d, L2 : %d, R2 : %d\n", L1, R1, L2, R2);
if(R2 >= n){
R2 = n-1;
}
__shared__ int *d_a, *d_b;
int n_a = R1-L1+1;
int n_b = R2-L2+1;
int n_m = n_a+n_b;
d_a = (int*)malloc(n_a*sizeof(int));
d_b = (int*)malloc(n_b*sizeof(int));
__syncthreads();
// printf("tidx : %d, n_a : %d\n", tidx, n_a);
if (tidx < n_a)
{
// printf("m[%d] : %d\n", i, m[i]);
d_a[tidx] = m[i];
// printf("d_a_%d[%d] = %d\n", gbx, tidx, d_a[tidx]);
}
else if (tidx < n_m)
{
d_b[tidx - n_a] = m[i];
// printf("d_b_%d[%d] = %d\n", gbx, tidx - n_a, d_b[tidx - n_a]);
}
__syncthreads();
int2 K;
int2 P;
int2 Q;
// printf("n_a : %d, n_b : %d\n", n_a, n_b);
if(tidx > n_a)
{
K.x = tidx - n_a;
K.y = n_a;
P.x = n_a;
P.y = tidx - n_a;
}
else
{
K.x = 0;
K.y = tidx;
P.x = tidx;
P.y = 0;
}
int offset = 0;
while(1)
{
offset = abs(K.y - P.y)/2;
Q.x = K.x + offset;
Q.y = K.y - offset;
if(Q.y >= 0 && Q.x <= n_b && (Q.y == n_a || Q.x == 0 || d_a[Q.y] > d_b[Q.x - 1]))
{
if(Q.x == n_b || Q.y == 0 || d_a[Q.y - 1] <= d_b[Q.x])
{
if(Q.y < n_a && (Q.x == n_b || d_a[Q.y] <= d_b[Q.x]))
{
m[i] = d_a[Q.y];
// printf("## m[%d] : %d, d_a_%d[%d] : %d\n",i, m[i], gbx, Q.y, d_a[Q.y]);
}
else
{
m[i] = d_b[Q.x];
// printf("## m[%d] : %d, d_b_%d[%d] : %d\n",i, m[i], gbx, Q.x, d_b[Q.x]);
}
break;
}
else
{
K.x = Q.x + 1;
K.y = Q.y - 1;
}
}
else
{
P.x = Q.x - 1;
P.y = Q.y + 1;
}
}
__syncthreads();
}
}
}
/**
Parallel version of merge of A and B with |A| + |B| <= 1024
@param d_a, d_b : device versions of arrays to merge
d_m : device version of merge of a and b
n_a, n_b, n_b : respective sizes of d_a, d_b, d_m
*/
__device__ void mergeSmall_k(int* d_a, int* d_b, int* d_m, int n_a, int n_b, int n_m){
int i = threadIdx.x;
if(i < n_m)
{
int2 K;
int2 P;
int2 Q;
if(i > n_a)
{
K.x = i - n_a;
K.y = n_a;
P.x = n_a;
P.y = i - n_a;
}
else
{
K.x = 0;
K.y = i;
P.x = i;
P.y = 0;
}
int offset = 0;
while(1)
{
offset = abs(K.y - P.y)/2;
Q.x = K.x + offset;
Q.y = K.y - offset;
if(Q.y >= 0 && Q.x <= n_b && (Q.y == n_a || Q.x == 0 || d_a[Q.y] > d_b[Q.x - 1]))
{
if(Q.x == n_b || Q.y == 0 || d_a[Q.y - 1] <= d_b[Q.x])
{
if(Q.y < n_a && (Q.x == n_b || d_a[Q.y] <= d_b[Q.x]))
{
d_m[i] = d_a[Q.y];
}
else
{
d_m[i] = d_b[Q.x];
}
break;
}
else
{
K.x = Q.x + 1;
K.y = Q.y - 1;
}
}
else
{
P.x = Q.x - 1;
P.y = Q.y + 1;
}
}
}
}
//Giving a path ( from pathBig_k ) each block merge (with mergeParallel) each piece a_k and b_k in m_k of a and b. Then it replace elements in m
__global__ void mergeBig_k(int *m, int n_m, int *a, int n_a, int *b, int n_b, int2 *path, int n_path, int nbPartitions, int size)
{
int blockId = blockIdx.x;
int threadId = threadIdx.x;
int i = blockId * blockDim.x + threadId;
if (blockId <= nbPartitions)//On utilise un block pour chaque partition
{
int x0, y0, x1, y1;
x0 = path[blockId].x;
y0 = path[blockId].y;
x1 = path[blockId+1].x;
y1 = path[blockId+1].y;
const int dimx=x1-x0;
const int dimy = y1-y0;
//A modifier par dimx dimy dimx+dimy
__shared__ int a_k[1024];
__shared__ int b_k[1024];
__shared__ int m_k[1024];
if (threadId < dimx) //On rempli a_k[i] : 0 <= i < dimx
{
a_k[threadId] = a[x0+threadId];
}
else if (threadId < dimy+dimx)//On rempli b_k[i] : indice dimx <= i < dimx+dimy+1
{
b_k[threadId-dimx] = b[y0+threadId-dimx];
}
__syncthreads();
// mergeParallel(m_k, dimx+dimy, size);
m[i] = m_k[threadId];
}
}
//Function that generate a path to break down m into pieces that could be merge without conflict
//On appelle |m|/TPB blocks avec chacun un seul thread. Chaque thread s'occupe de la diagonale thread
__global__ void pathBig_k(int pas, int2* path, int n_path , int* d_a, int n_a ,int* d_b, int n_b)
{
int thread_i = blockIdx.x * blockDim.x + threadIdx.x;
if(thread_i <= (n_a + n_b)/pas) //<------------//On vérifie que l'indice du thread est inférieur à la taille du tableau de retour et qu'il est un multiple du pas
{
int i = thread_i*pas;
int2 K;
int2 P;
int2 Q;
if(i > n_a)
{
K.x = i - n_a;
K.y = n_a;
P.x = n_a;
P.y = i - n_a;
}
else
{
K.x = 0;
K.y = i;
P.x = i;
P.y = 0;
}
int offset = 0;
while(1)
{
//Calcul des coordonnées du milieu de P et K
offset = abs(K.y - P.y)/2;
Q.x = K.x + offset;
Q.y = K.y - offset;
//
if(Q.y >= 0 && Q.x <= n_b && (Q.y == n_a || Q.x == 0 || d_a[Q.y] > d_b[Q.x - 1]))
{
//
if(Q.x == n_b || Q.y == 0 || d_a[Q.y - 1] <= d_b[Q.x])
{
break;
}
else
{
K.x = Q.x + 1;
K.y = Q.y - 1;
}
}
else
{
P.x = Q.x - 1;
P.y = Q.y + 1;
}
}
//printf("thread : %d => (%d, %d)\n", thread_i, Q.y, Q.x);
//!\\ Problème ordre x et y
path[thread_i].x=Q.y;
path[thread_i].y=Q.x;
}
//Si |m| n'est pas un mutliple de pas, le thread 0 ajoute (n_a, n_b) à la fin du tableau
if (thread_i==0 && (n_a+n_b)%pas!=0)
{
//printf("thread : %d => (%d, %d)\n", thread_i, n_a, n_b);
path[n_path-1].x=n_a;
path[n_path-1].y=n_b;
}
}
//Function that sort any array
void fusionSort(int *mGPU, int n_m)
{
//L1 : indice du premier élément de m_part1
//R1 : indice du dernier élément de m_part1
//L2 : indice du premier élément de m_part2
//R2 : indice du dernier élément de m_part2
int size = 1;
int i;
int *m = (int*)malloc(n_m*sizeof(int));
while (size < n_m)
{
i = 0;
if (size < 1024)
{
printf("Size : %d\n", size);
mergeSmall_k<<<n_m/(2*size) + 1, 2*size>>>(mGPU, n_m, size);
gpuCheck(cudaMemcpy(m, mGPU, n_m*sizeof(int), cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
print_array(m, n_m);
}
size *= 2;
}
}
void fusionMergeSeq(int* A, int* tmp, int L1, int R1, int L2, int R2){
int i = 0;
while(L1 <= R1 && L2 <= R2){
if(A[L1] <= A[L2]){
tmp[i] = A[L1];
i++;
L1++;
}
else{
tmp[i] = A[L2];
i++;
L2++;
}
}
while(L1 <= R1){
tmp[i] = A[L1];
i++;
L1++;
}
while(L2 <= R2){
tmp[i] = A[L2];
i++;
L2++;
}
}
void fusionSortSeq(int* A, int n){
int len = 1;
int i;
int L1, R1, L2, R2;
int* tmp = (int*)malloc(n*sizeof(int));
while(len < n){
i = 0;
while(i < n){
L1 = i;
R1 = i + len - 1;
L2 = i + len;
R2 = i + 2*len - 1;
tmp = (int*)realloc(tmp, (R2-L1+1)*sizeof(int));
if(L2 >= n){
break;
}
if(R2 >= n){
R2 = n - 1;
}
fusionMergeSeq(A, tmp, L1, R1, L2, R2);
for(int j = 0;j < R2-L1+1;j++){
A[i+j] = tmp[j];
}
i = i + 2*len;
}
len *= 2;
}
free(tmp);
}
//Fonction qui trie un tableau M en parallèle par tri fusion itératif (question 3)
//Fonctions de vérification
//Fonction qui vérifie qu'un tableau est bien trié (tous ses éléments rangés dans l'ordre croissant)
int assertOrder(int *tab, int size){
for (int i=0; i<size-1; i++){
if (tab[i] > tab[i+1]){
printf("WARNING : Unsuccessful merge or sort ... : unordered array on indice %d ...\n", i);
printf("tab[i]= %d > tab[i+1] = %d\n", tab[i], tab[i+1]);
return 0;
}
}
return 1;
}
//Fonction qui vérifie qu'on retrouve bien dans le nouveau tableau tous les éléments des deux tableaux qu'on veut fusionner
int assertMergeAllValuesPresent(int *tab, int n1, int *tab2, int n2, int* m, int size)
{
int verif[size]; //tableau avec des 1 là où l'on a déjà vérifié qu'il correspond déjà à un élément de a ou de b
for(int i = 0;i<size;i++){
verif[i] = 0;
}
for (int i=0; i<size; i++){
for(int j = 0;j < n1;j++){
if(tab[j] == m[i] && verif[i] == 0){ //si il y a une valeur identique et que celle-ci n'a pas été vérifiée
verif[i] = 1;
}
}
}
for (int i=0; i<size; i++){
for(int j = 0;j < n2;j++){
if(tab2[j] == m[i] && verif[i] == 0){
verif[i] = 1;
}
}
}
for(int i = 0;i<size;i++){
if(verif[i] != 1){
printf("\nWARNING : Unsuccessful merge : incorrect elements...\n");
return 0;
}
}
return 1;
}
//Fonction qui vérifie qu'on retrouve bien dans le nouveau tableau tous les éléments du tableau qu'on veut trier
int assertSortAllValuesPresent(int* m, int* m_sorted, int size){
int verif[size]; //tableau avec des 1 là où l'on a déjà vérifié qu'il correspond déjà à un élément de a ou de b
for(int i = 0;i<size;i++){
verif[i] = 0;
}
for (int i=0; i<size; i++){
for(int j = 0;j < size;j++){
if(m_sorted[j] == m[i]){ //si il y a une valeur identique
verif[i] = 1;
}
}
}
for(int i = 0;i<size;i++){
if(verif[i] != 1){
printf("i : %d\n", i);
printf("\nWARNING : Unsuccessful sort : incorrect elements...\n");
return 0;
}
}
return 1;
}
//Fonction qui vérifie qu'un tableau est bien trié et la fusion de deux tableaux
//tab et tab2 : les deux tableaux qu'on veut fusionner
//m : le tableau qui est la fusion triée de tab et tab2
int assertMerge(int *tab, int n1, int *tab2, int n2, int* m, int size){
int successfulOrder = assertOrder(m, size);
int successfulElements = assertMergeAllValuesPresent(tab, n1, tab2, n2, m, size);
//assertMergeAllValuesPresent(int *tab, int n1, int *tab2, int n2, int* m, int size)
if(successfulOrder && successfulElements){
printf("\nSuccessful merge !\n");
return 1;
}
else{
printf("\nUnsuccessful merge !\n");
return 0;
}
}
//Fonction qui vérifie qu'un tableau est bien trié
//m : le tableau non trié qu'on veut trier
//m_sorted : le tableau m soi-disant trié (on veut vérifier si c'est bien le cas)
//size : la taille du tableau
int assertSorted(int* m, int* m_sorted, int size)
{
int successfulOrder = assertOrder(m_sorted, size); // les éléments du tableau sont ils bien dans le bon ordre ?
int successfulElements = assertSortAllValuesPresent(m, m_sorted, size); //retrouve t-on bien toutes les valeurs ?
if(successfulOrder && successfulElements){
printf("\nSuccessful sort !\n");
return 1;
}
else{
printf("\nUnsuccessful sort !\n");
return 0;
}
}
int main(int argc, char *argv[])
{
std::clock_t startS, endS;
float seqMergeTime, parMergeTime;
srand(time(NULL));
int n_m = 200;
int *m, *mseq, *mref, *mGPU;
if(argc==2)
{
n_m = atoi(argv[1]);
}
printf("========== Path Sort : =========\n");
printf("* Size of array : %d\n\n", n_m);
//int* mseq;
m = (int*)malloc(n_m*sizeof(int));
init_array_no_order(m, n_m, n_m*10);
gpuCheck(cudaMalloc(&mGPU, n_m*sizeof(int)));
gpuCheck(cudaMemcpy(mGPU, m, n_m*sizeof(int), cudaMemcpyHostToDevice));
print_array(m, n_m);
mseq = (int*)malloc(n_m*sizeof(int)); //copie de m
copy_array(m, mseq, n_m);
mref = (int*)malloc(n_m*sizeof(int)); //copie de m
copy_array(m, mref, n_m);
//Partie des calculs1024
//================ Paral1024lel : =======================\\
//Etape de prétraitement :
startS = std::clock();
fusionSort(mGPU, n_m);
cudaDeviceSynchronize();
endS = std::clock();
parMergeTime = (endS - startS) / (float) CLOCKS_PER_SEC;
gpuCheck(cudaMemcpy(m, mGPU, n_m*sizeof(int), cudaMemcpyDeviceToHost));
//Etape du tri fusion :
startS = std::clock();
fusionSortSeq(mseq, n_m);
endS = std::clock();
seqMergeTime = (endS - startS) / (float) CLOCKS_PER_SEC;
printf("========= Parallel sort : =============\n");
printf("Total time elapsed : %f s\n", parMergeTime);
assertSorted(mref, m, n_m);
printf("Parrallel algorithm is %f times faster than sequential merge !\n", seqMergeTime/parMergeTime);
printf("Parrallel merge is %f times faster than sequential merge !\n", seqMergeTime/parMergeTime);
printf("========= Sequential sort : =============\n");
printf("Total time elapsed : %f s\n", seqMergeTime);
// assertSorted(mref, mseq, n_m);
return 0;
}
|
9,489 | #include <iostream>
using namespace std;
#define TYPE float
typedef TYPE T;
__constant__ float dev_box[4];
__constant__ int dev_threads[1];
__constant__ int dev_blocks[1];
__constant__ int dev_n_of_ints[1];
__constant__ int dev_n_of_func[1];
template<class T>
class interval_gpu
{
public:
__device__ __host__ interval_gpu();
__device__ __host__ interval_gpu(T const &v);
__device__ __host__ interval_gpu(T const &l, T const &u);
__device__ __host__ T const &lower() const;
__device__ __host__ T const &upper() const;
static __device__ __host__ interval_gpu empty();
friend ostream& operator<<(ostream& os, const interval_gpu<T> &x){
os<<"["<<x.lower()<<":"<<x.upper()<<"]";return os;
}
private: T low; T up;
};
// Constructors
template<class T> inline __device__ __host__
interval_gpu<T>::interval_gpu(){}
template<class T> inline __device__ __host__
interval_gpu<T>::interval_gpu(T const &v) :
low(v), up(v){}
template<class T> inline __device__ __host__
interval_gpu<T>::interval_gpu(T const &l, T const &u) :
low(l), up(u){}
template<class T> inline __device__ __host__
T const &interval_gpu<T>::lower() const
{return low;}
template<class T> inline __device__ __host__
T const &interval_gpu<T>::upper() const
{return up;}
//OVERLOAD OVERLOAD OVERLOAD OVERLOAD OVERLOAD OVERLOAD OVERLOAD OVERLOAD OVERLOAD
template<class T> inline __host__ __device__
interval_gpu<T> operator+(interval_gpu<T> const &x, interval_gpu<T> const &y)
{
return interval_gpu<T>(x.lower() + y.lower(), x.upper() + y.upper());
}
template<class T> inline __host__ __device__
interval_gpu<T> operator-(interval_gpu<T> const &x, interval_gpu<T> const &y)
{return interval_gpu<T>(x.lower() - y.upper(), x.upper() - y.lower());}
template<class T> inline __host__ __device__
interval_gpu<T> operator*(interval_gpu<T> const &x, interval_gpu<T> const &y)
{return interval_gpu<T>(min(min(x.lower()*y.lower(),x.lower()*y.upper()),
min(x.upper()*y.lower(),x.upper()*y.upper())),
max(max(x.lower()*y.lower(),x.lower()*y.upper()),
max(x.upper()*y.lower(),x.upper()*y.upper())));}
template<class T> inline __host__ __device__
interval_gpu<T> operator/(interval_gpu<T> const &x, interval_gpu<T> const &y)
{return interval_gpu<T>(min(min(x.lower()/y.lower(),x.lower()/y.upper()),
min(x.upper()/y.lower(),x.upper()/y.upper())),
max(max(x.lower()/y.lower(),x.lower()/y.upper()),
max(x.upper()/y.lower(),x.upper()/y.upper())));}
__device__ int g1(interval_gpu<T> *x){
interval_gpu<T> lmax(12);
interval_gpu<T> f(x[0]*x[0] + x[1]*x[1] - lmax*lmax);
return int(bool(f.upper() < 0) + bool(f.lower() < 0));
}
__device__ int g2(interval_gpu<T> *x){
interval_gpu<T> l(8);
interval_gpu<T> f(l*l - x[0]*x[0] - x[1]*x[1]);
return int(bool(f.upper() < 0) + bool(f.lower() < 0));
}
__device__ int g3(interval_gpu<T> *x){
interval_gpu<T> lmax(12);
interval_gpu<T> l0(5);
interval_gpu<T> f((x[0]-l0)*(x[0]-l0) + x[1]*x[1] - lmax*lmax);
return int(bool(f.upper() < 0) + bool(f.lower() < 0));
}
__device__ int g4(interval_gpu<T> *x){
interval_gpu<T> l(8);
interval_gpu<T> l0(5);
interval_gpu<T> f(l*l - (x[0]-l0)*(x[0]-l0) - x[1]*x[1]);
return int(bool(f.upper() < 0) + bool(f.lower() < 0));
}
__constant__ int(*dev_func_pp[4])(interval_gpu<T>*) = {&g1,&g2,&g3,&g4};
template<class T>
__global__ void first_grid(int* res){
interval_gpu<T>* x = new interval_gpu<T>[dev_n_of_ints[0]];
x[0] = interval_gpu<T>(dev_box[0] + (threadIdx.x) * (dev_box[1] - dev_box[0])/64,
dev_box[0] +(1+threadIdx.x) * (dev_box[1] - dev_box[0])/64);
x[1] = interval_gpu<T>(dev_box[2] + (blockIdx.x) * (dev_box[3] - dev_box[2])/32,
dev_box[2] + (1+blockIdx.x) * (dev_box[3] - dev_box[2])/32);
res[(blockIdx.x*blockDim.x + threadIdx.x)] = 1;
for(int i = 0; i < dev_n_of_func[0]; i++){
res[(blockIdx.x*blockDim.x + threadIdx.x)] *= (*dev_func_pp[i])(x);
}
}
int main(){
int n_of_ints = 2;
int n_of_func = 4;
float host_box[4] = {-15.0,15.0,0.0,15.0};
int ithreads = 256;
int iblocks = 128;
int * res;
cout<<fixed;
cout.precision(4);
cudaMallocManaged(&res, sizeof(int)*iblocks*ithreads);
cudaMemcpyToSymbol(dev_n_of_ints, &n_of_ints, sizeof(int));
cudaMemcpyToSymbol(dev_n_of_func, &n_of_func, sizeof(int));
cudaMemcpyToSymbol(dev_threads, &ithreads, sizeof(int));
cudaMemcpyToSymbol(dev_blocks, &iblocks, sizeof(int));
cudaMemcpyToSymbol(dev_box, &host_box, sizeof(float)*4);
first_grid<T><<<32, 64>>>(res);
cudaDeviceSynchronize();
for(int i = 0; i < 32; i++){
for(int j = 0; j < 64; j++){
if(int(res[(i*64+j)]) > 0){
interval_gpu<T> x1(host_box[0] + (j) * (host_box[1] - host_box[0])/64 ,host_box[0]+(1+j) * (host_box[1] - host_box[0])/64);
interval_gpu<T> x2(host_box[2] + (i) * (host_box[3] - host_box[2])/32 ,host_box[2]+(1+i) * (host_box[3] - host_box[2])/32);
cout<<x1<<":"<<x2<<"\n";
}
}
}
cudaFree(res);
cudaFree(dev_blocks);
cudaFree(dev_threads);
return 0;
}
|
9,490 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float* var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28) {
if (comp > (-1.7917E-36f / +1.6161E-35f)) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
var_3[i] = +1.3808E-43f * (+1.4054E-35f - var_4 - var_5);
comp += var_3[i] - (var_6 - log10f(atan2f(cosf(var_7 - var_8 - +1.8063E35f / +0.0f + var_9), atan2f((var_10 * var_11 * var_12 * fmodf((-1.1895E-35f * -1.2946E-21f + var_13), -0.0f)), +1.0473E0f / +1.1172E36f))));
comp = asinf(-1.8887E19f);
comp = (var_14 / (-1.4624E-37f * -1.3550E25f * +0.0f));
if (comp < -1.3526E-42f * atan2f(-1.2584E-41f + fmodf(var_15 / -1.5098E-37f + (-1.7195E5f * (var_16 / var_17)), (var_18 + +1.4954E28f)), logf(-0.0f))) {
comp = -1.0378E-35f * -1.9795E35f / -1.3511E-41f - var_19 * -1.1929E16f;
}
if (comp <= (var_20 * (-1.6826E-35f - var_21))) {
comp = (var_22 / (+0.0f * (var_23 - -1.5127E-42f * -1.6680E15f * -1.2287E-41f)));
}
if (comp > (var_24 - ceilf(-1.8055E-37f * (var_25 + fabsf((+1.5670E-37f / (var_26 * +0.0f - var_27 + (-1.3188E-16f * var_28)))))))) {
float tmp_1 = +0.0f * +1.8601E20f - -1.4860E35f / -1.4571E36f;
comp = tmp_1 + cosf(sinf(+1.3136E35f));
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float* tmp_4 = initPointer( atof(argv[4]) );
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29);
cudaDeviceSynchronize();
return 0;
}
|
9,491 | #include <stdio.h>
/*
* Refactor firstParallel so that it can run on the GPU.
*/
__global__ void firstParallel()
{
printf("This should be running in parallel.\n");
}
int main()
{
/*
* Refactor this call to firstParallel to execute in parallel
* on the GPU.
*/
firstParallel<<<5,5>>>();
/*
* Some code is needed below so that the CPU will wait
* for the GPU kernels to complete before proceeding.
*/
cudaDeviceSynchronize();
}
|
9,492 | #include <stdio.h>
#define N 8192 // Number of rows/columns of the matrix.
#define TILE_DIM 32
#define SIZE N*N // Total size of a matrix.
// Compares two matrices element by element.
int isTransposed (const double* a, const double* b, const int dim) {
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
if(b[j + i*dim] != a[i + j*dim]) return 0;
}
}
return 1;
}
// Gpu naive transposition.
__global__ void gpuNaiveTrans (double* a, double* b, const int size,
const int brows) {
int col = blockIdx.x * TILE_DIM + threadIdx.x;
int row = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for(int i = 0; i < TILE_DIM; i += brows) {
b[col * width + (row + i)] = a[(row + i) * width + col];
}
}
// Gpu optimised transposition.
__global__ void gpuOptTrans (double* a, double* b, const int size,
const int brows) {
// Buffer on the shared memory.
__shared__ double tmp[TILE_DIM][TILE_DIM];
int col = blockIdx.x * TILE_DIM + threadIdx.x;
int row = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
// Transposition on buffer.
for(int i = 0; i < TILE_DIM; i += brows) {
tmp[threadIdx.y + i][threadIdx.x] = a[(row + i) * width + col];
}
__syncthreads();
col = blockIdx.y * TILE_DIM + threadIdx.x;
row = blockIdx.x * TILE_DIM + threadIdx.y;
// Writing to output.
for(int i = 0; i < TILE_DIM; i += brows) {
b[col + width * (row + i)] = tmp[threadIdx.x][threadIdx.y + i];
}
}
void matrixFill (double* a, const int dim) {
for(int i = 0; i < dim; i++) {
a[i] = (double) i;
}
}
int main(int argc, char* argv[]) {
double* hostInput, * hostOutput;
double* devInput, * devOutput;
// Allocate host memory.
hostInput = (double* )malloc(SIZE * sizeof(double));
hostOutput = (double* )malloc(SIZE * sizeof(double));
// Allocate device memory.
cudaMalloc((void**)&devInput, SIZE * sizeof(double));
cudaMalloc((void**)&devOutput, SIZE * sizeof(double));
// Dimensions.
dim3 grid, block;
block.x = TILE_DIM;
block.y = atoi(argv[1])/TILE_DIM; // Threads-per-block readed as input.
grid.x = N / TILE_DIM;
grid.y = N / TILE_DIM;
printf("\n--------------------------\n");
printf("--------------------------\n\n");
printf("Threads per block = %d\n\n", atoi(argv[1]));
/// NAIVE TRANSPOSE //
// Initialise matricx.
matrixFill(hostInput, SIZE);
// Copy input to device.
cudaMemcpy(devInput, hostInput, SIZE * sizeof(double),
cudaMemcpyHostToDevice);
// Timing.
float elapsedTime = 0.0;
cudaEvent_t tStart, tEnd;
cudaEventCreate(&tStart);
cudaEventCreate(&tEnd);
cudaEventRecord(tStart);
gpuNaiveTrans<<< grid, block >>>(devInput, devOutput, N, block.y);
cudaEventRecord(tEnd);
cudaEventSynchronize(tEnd);
cudaEventElapsedTime(&elapsedTime, tStart, tEnd);
printf("NAIVE TRANSPOSE: Elapsed time: %fms\n", elapsedTime);
printf("Bandwidth: %f GB/s\n",
2 * SIZE * sizeof(double) / elapsedTime / 1000000);
// Copy output to host.
cudaMemcpy(hostOutput, devOutput, SIZE * sizeof(double),
cudaMemcpyDeviceToHost);
printf("Is the tranposition correct? %s\n",
isTransposed(hostOutput, hostInput, N) ? "CORRECT" : "ERROR!" );
/// OPTIMISED TRANSPOSE //
printf("\n\n");
// Initialise matricx.
matrixFill(hostInput, SIZE);
// Copy input to device.
cudaMemcpy(devInput, hostInput, SIZE * sizeof(double),
cudaMemcpyHostToDevice);
// Timing.
elapsedTime = 0.0;
cudaEventRecord(tStart);
gpuOptTrans<<< grid, block >>>(devInput, devOutput, N, block.y);
cudaEventRecord(tEnd);
cudaEventSynchronize(tEnd);
cudaEventElapsedTime(&elapsedTime, tStart, tEnd);
printf("OPTIMISED TRANSPOSE: Elapsed time: %fms\n", elapsedTime);
printf("Bandwidth: %f GB/s\n",
2 * SIZE * sizeof(double) / elapsedTime / 1000000);
// Copy output to host.
cudaMemcpy(hostOutput, devOutput, SIZE * sizeof(double),
cudaMemcpyDeviceToHost);
printf("Is the tranposition correct? %s\n",
isTransposed(hostOutput, hostInput, N) ? "CORRECT" : "ERROR!" );
// Freeing resources.
free(hostInput);
free(hostOutput);
cudaFree(devInput);
cudaFree(devOutput);
cudaEventDestroy(tStart);
cudaEventDestroy(tEnd);
}
|
9,493 | #include<stdio.h>
#include<cuda_runtime.h>
/**
* CUDA kernel code
*/
__global__
void vectorAdd(float *A, float *B, float *C, int numElemnets)
{
int i = threadIdx.x+blockDim.x*blockIdx.x;
//vector addition
if(i<numElemnets){
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
int main(void)
{
cudaError_t err = cudaSuccess;
int n = 50000;
size_t size = n * sizeof(float);
// alloc host side memory
float *h_A = (float*)malloc(size);
float *h_B = (float*)malloc(size);
float *h_C = (float*)malloc(size);
//alloc device vetors
float *d_A = NULL;
float *d_B = NULL;
float *d_C = NULL;
//TODO: cudaMalloc for d_A, d_B, d_C
cudaMalloc(&d_A,size);
cudaMalloc(&d_B,size);
cudaMalloc(&d_C,size);
//init vector A and vector B
for(int j=0;j < n; j++){
h_A[j] = rand()%2;
h_B[j] = rand()%2;
}
// copy host data to device
printf("Copy input vectors to device\n");
//TODO: cudaMemcpy h_A -> d_A, h_B -> d_B
cudaMemcpy(d_A,h_A,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_B,h_B,size,cudaMemcpyHostToDevice);
//Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads \n", blocksPerGrid, threadsPerBlock);
vectorAdd<<<blocksPerGrid,threadsPerBlock>>>(d_A,d_B,d_C,n);
err = cudaGetLastError();
//error check
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Copy device output data to host
printf("Copy output data to host\n");
//TODO: cudaMemcpy d_C -> h_C
cudaMemcpy(h_C,d_C,size,cudaMemcpyDeviceToHost);
//Verifiy output
int pass = 0;
pass = 1;
for (int i=0;i<n;i++)
{
if(fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
pass = 0;
fprintf(stderr, "Result is invalid at element %d!\n",i);
exit(EXIT_FAILURE);
}
}
if (pass)
printf("Test PASSED\n");
else
printf("Test FAILED\n");
//free device memory
//TODO: cudaFree for d_A, d_B, d_C
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
//free host memory
free(h_A);
free(h_B);
free(h_C);
printf("Done\n");
return 0;
}
|
9,494 |
#include <cstdio>
#include <cmath>
#define BLOCKDIM 512
#define JFACTOR 1
// device kernel def
__global__ void Action_noImage_center_GPU(double *D_,double *maskCenter,double *SolventMols_,double maxD, int Nmols , int NAtoms, int active_size);
__global__ void Action_noImage_no_center_GPU(double *D_,double *SolventMols_,double *Solute_atoms ,double maxD, int Nmols , int NAtoms,int NSAtoms , int active_size);
//for imaging with ortho
__global__ void Action_ImageOrtho_center_GPU(double *D_,double *maskCenter,double *SolventMols_,double maxD, double *box, int Nmols , int NAtoms, int active_size);
__global__ void Action_ImageOrtho_no_center_GPU(double *D_,double *SolventMols_,double *Solute_atoms ,double maxD, double *box, int Nmols , int NAtoms,int NSAtoms , int active_size);
//for imaging with NONortho
__global__ void Action_ImageNonOrtho_center_GPU(double *D_,double *maskCenter,double *SolventMols_,double maxD, double *ucell, double *recip ,int Nmols , int NAtoms, int active_size);
__global__ void Action_ImageNonOrtho_no_center_GPU(double *D_,double *SolventMols_,double *Solute_atoms ,double maxD, double *ucell, double *recip, int Nmols , int NAtoms,int NSAtoms , int active_size);
////////////////////////
void Action_NoImage_Center(double *SolventMols_,double *D_, double maskCenter[3],double maxD,int NMols, int NAtoms, float &time_gpu,int type, double box[3], double ucell[9], double recip[9])
{
cudaEvent_t start_event, stop_event;
float elapsed_time_gpu;
double *devI2Ptr;
double *devI1Ptr;
double *devO1Ptr;
double *boxDev;
double *ucellDev, *recipDev;
cudaMalloc(((void **)(&devO1Ptr)),NMols * sizeof(double ));
cudaMalloc(((void **)(&devI1Ptr)),3 * sizeof(double ));
cudaMemcpy(devI1Ptr,maskCenter,3 * sizeof(double ),cudaMemcpyHostToDevice);
cudaMalloc(((void **)(&devI2Ptr)),NMols * NAtoms * 3 * sizeof(double ));
cudaMemcpy(devI2Ptr,SolventMols_,NMols * NAtoms * 3 * sizeof(double ),cudaMemcpyHostToDevice);
cudaMalloc(((void**)(&boxDev)), 3 * sizeof(double));
cudaMalloc(((void**)(&ucellDev)), 9 * sizeof(double));
cudaMalloc(((void**)(&recipDev)), 9 * sizeof(double));
if (type == 1)
cudaMemcpy(boxDev,box, 3 * sizeof(double), cudaMemcpyHostToDevice);
if (type == 2)
{
cudaMemcpy(ucellDev,ucell, 9 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(recipDev,recip, 9 * sizeof(double), cudaMemcpyHostToDevice);
}
//figue out the decomposition here
//we need to pad as well
// due to lack to using center, each thread is going rocess the solvent mol
//instead of atoms (make it alot easier) (speacially for the imaging case)
//figure out how many active thread in a block
int active_size = BLOCKDIM/NAtoms * NAtoms;
//int NBlocks = ceil(NMols * NAtoms / float(active_size)); //having unroll factor
int NBlocks = ceil(float(NMols)/ (BLOCKDIM));
// printf("Nmols = %d; Natoms = %d\n", NMols, NAtoms);
// printf("active_size = %d\n", active_size);
printf("NBlocks = %d\n", NBlocks);
//printf("sezeof(double) = %d\n", sizeof(double));
//exit(0);
dim3 dimGrid0 = dim3(NBlocks,1);
dim3 dimBlock0 = dim3(BLOCKDIM,1);
printf("NMols = %d, NAtoms = %d\n", NMols, NAtoms);
printf("About to launch kernel.\n");
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
cudaEventRecord(start_event, 0);
if(type == 0)
Action_noImage_center_GPU<<<dimGrid0,dimBlock0>>>(devO1Ptr,devI1Ptr, devI2Ptr, maxD, NMols, NAtoms,active_size);
else if (type == 1)
Action_ImageOrtho_center_GPU<<<dimGrid0,dimBlock0>>>(devO1Ptr,devI1Ptr, devI2Ptr, maxD,boxDev, NMols, NAtoms,active_size);
else if (type ==2 )
Action_ImageNonOrtho_center_GPU<<<dimGrid0,dimBlock0>>>(devO1Ptr,devI1Ptr, devI2Ptr, maxD,ucellDev, recipDev, NMols, NAtoms,active_size);
else
printf("kernel_wrapper: error in type\n");
cudaThreadSynchronize();
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&elapsed_time_gpu,start_event, stop_event );
printf("Done with kernel CUDA Kernel Time: %.2f\n", elapsed_time_gpu);
time_gpu = elapsed_time_gpu;
cudaMemcpy(D_,devO1Ptr,NMols * sizeof(double ),cudaMemcpyDeviceToHost);
cudaFree(devO1Ptr);
cudaFree(devI1Ptr);
cudaFree(devI2Ptr);
cudaFree(boxDev);
cudaFree(ucellDev);
cudaFree(recipDev);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Action_NoImage_no_Center(double *SolventMols_,double *D_, double *Solute_atoms,double maxD,int NMols, int NAtoms,int NSAtoms, float &time_gpu, int type,double box[3], double ucell[9], double recip[9])
{
cudaEvent_t start_event, stop_event;
float elapsed_time_gpu;
double *devI3Ptr;
double *devI2Ptr;
//double *devI1Ptr;
double *devO1Ptr;
double *boxDev;
double *ucellDev, *recipDev;
cudaMalloc(((void **)(&devO1Ptr)),NMols * sizeof(double ));
//cudaMalloc(((void **)(&devI1Ptr)),3 * sizeof(double ));
//cudaMemcpy(devI1Ptr,maskCenter,3 * sizeof(double ),cudaMemcpyHostToDevice);
cudaMalloc(((void **)(&devI2Ptr)),NMols * NAtoms * 3 * sizeof(double ));
cudaMemcpy(devI2Ptr,SolventMols_,NMols * NAtoms * 3 * sizeof(double ),cudaMemcpyHostToDevice);
cudaMalloc(((void **)(&devI3Ptr)), NSAtoms * 3 * sizeof(double ));
cudaMemcpy(devI3Ptr,Solute_atoms,NSAtoms * 3 * sizeof(double ),cudaMemcpyHostToDevice);
cudaMalloc(((void**)(&boxDev)), 3 * sizeof(double));
cudaMalloc(((void**)(&ucellDev)), 9 * sizeof(double));
cudaMalloc(((void**)(&recipDev)), 9 * sizeof(double));
if (type == 1)
cudaMemcpy(boxDev,box, 3 * sizeof(double), cudaMemcpyHostToDevice);
if (type == 2)
{
cudaMemcpy(ucellDev,ucell, 9 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(recipDev,recip, 9 * sizeof(double), cudaMemcpyHostToDevice);
}
//figue out the decomposition here
//we need to pad as well
//figure out how many active thread in a block
int active_size = BLOCKDIM/NAtoms * NAtoms;
int NBlocks = ceil(NMols * NAtoms / float(active_size));
// printf("Nmols = %d; Natoms = %d\n", NMols, NAtoms);
// printf("active_size = %d\n", active_size);
// printf("NBlocks = %d\n", NBlocks);
//printf("sezeof(double) = %d\n", sizeof(double));
//exit(0);
dim3 dimGrid0 = dim3(NBlocks,1);
dim3 dimBlock0 = dim3(BLOCKDIM,1);
printf("NMols = %d, NAtoms = %d\n", NMols, NAtoms);
printf("About to launch kernel.\n");
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
cudaEventRecord(start_event, 0);
if(type == 0)
Action_noImage_no_center_GPU<<<dimGrid0,dimBlock0>>>(devO1Ptr, devI2Ptr,devI3Ptr, maxD, NMols, NAtoms,NSAtoms,active_size);
else if(type == 1)
Action_ImageOrtho_no_center_GPU<<<dimGrid0,dimBlock0>>>(devO1Ptr, devI2Ptr,devI3Ptr, maxD, boxDev, NMols, NAtoms,NSAtoms,active_size);
else if (type == 2)
Action_ImageNonOrtho_no_center_GPU<<<dimGrid0,dimBlock0>>>(devO1Ptr, devI2Ptr,devI3Ptr, maxD, ucellDev, recipDev, NMols, NAtoms,NSAtoms,active_size);
else
printf("kernel_wrapper: error in type no center version\n");
cudaThreadSynchronize();
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&elapsed_time_gpu,start_event, stop_event );
printf("Done with kernel CUDA Kernel Time: %.2f\n", elapsed_time_gpu);
time_gpu = elapsed_time_gpu;
cudaMemcpy(D_,devO1Ptr,NMols * sizeof(double ),cudaMemcpyDeviceToHost);
cudaFree(devO1Ptr);
//cudaFree(devI1Ptr);
cudaFree(devI2Ptr);
cudaFree(devI3Ptr);
cudaFree(boxDev);
cudaFree(ucellDev);
cudaFree(recipDev);
} |
9,495 | #include <stdlib.h>
#include <time.h>
#include <stdio.h>
#include <cuda.h>
#include "cuda_runtime.h"
#define NUMBLOCK 1
#define BLOCKWIDTH 16
#define NUMTHREAD 10000
#define ASIZE 10000
void printArray(int * image){
int i,j;
for (i = 0; i < ASIZE; ++i)
{
for (j = 0; j < ASIZE; ++j)
{
printf("%d\t", image[i * ASIZE + j]);
}
printf("\n");
}
printf("\n\n");
}
__global__ void prefixSum(int * img, int * integral)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i;
//printf("blockIdx = %d, blockDim = %d, threadIdx = %d, img[%d] = %d\n", blockIdx.x, blockDim.x, threadIdx.x, idx, img[idx]);
integral[idx*ASIZE] = img[idx*ASIZE];
for(i = 1; i<ASIZE; i++){
integral[idx*ASIZE+i] = img[idx*ASIZE+i] + integral[idx*ASIZE+i-1];
__syncthreads();
}
// integral[idx*ASIZE+ASIZE-1] = img[idx*ASIZE+ASIZE-1];
}
__global__ void columnSum(int * img, int * integral)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i;
for(i = 1; i<ASIZE; i++){
integral[idx + i*ASIZE] = img[idx+ (i*ASIZE)] + integral[idx+ (i-1)*ASIZE];
__syncthreads();
}
}
int main()
{
// const int SIZE = ASIZE;
//int ASIZE = *(int *) argv[1];
int *IMG_HOST, *INTG_HOST;
int *IMG_DEV, *INTG_DEV;
//Time initialization
float timePassed;
size_t size = ASIZE*sizeof(int);
IMG_HOST = (int *)malloc(size*size);
INTG_HOST = (int *)malloc(size*size);
cudaMalloc((void **) &IMG_DEV, size*size);
cudaMalloc((void **) &INTG_DEV, size*size);
int i,j, random;
for (i = 0; i < ASIZE; ++i)
{
//srand(i);
for (j = 0; j < ASIZE; ++j)
{
//srand(j);
IMG_HOST[i*ASIZE + j] = i*2 + j*4;
}
}
// printArray(IMG_HOST);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMemcpy(IMG_DEV, IMG_HOST, size*size, cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
prefixSum <<< NUMTHREAD/BLOCKWIDTH, BLOCKWIDTH >>> (IMG_DEV, INTG_DEV);
cudaThreadSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timePassed, start,stop);
printf("Time Spent Row: %0.5f\n", timePassed);
//#################################################################//
cudaMemcpy(INTG_HOST, INTG_DEV, size*size, cudaMemcpyDeviceToHost);
// printArray(INTG_HOST);
cudaMemcpy(INTG_DEV, INTG_HOST, size*size, cudaMemcpyHostToDevice);
//INTG_HOST = (int *)malloc(size*size);
cudaEventRecord(start, 0);
columnSum <<< NUMTHREAD/BLOCKWIDTH, BLOCKWIDTH >>> (INTG_DEV, INTG_DEV);
cudaThreadSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timePassed, start,stop);
printf("Time Spent Column: %0.5f\n", timePassed);
cudaMemcpy(INTG_HOST, INTG_DEV, size*size, cudaMemcpyDeviceToHost);
// printArray(INTG_HOST);
//Free up the resources
free(IMG_HOST);
free(INTG_HOST);
cudaFree(IMG_DEV);
cudaFree(INTG_DEV);
return 0;
}
|
9,496 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda.h"
__global__ void reduction(int N, float *a, float* result) {
int thread = threadIdx.x;
int block = blockIdx.x;
int blockSize = blockDim.x;
//unique global thread ID
int id = thread + block*blockSize;
__shared__ float s_sum[32];
s_sum[id] = a[id]; //add the thread's id to start
__syncthreads(); //make sure the write to shared is finished
if (id<16) {//first half
s_sum[id] += s_sum[id+16];
}
__syncthreads(); //make sure the write to shared is finished
if (id<8) {//next half
s_sum[id] += s_sum[id+8];
}
__syncthreads(); //make sure the write to shared is finished
if (id<4) {//next half
s_sum[id] += s_sum[id+4];
}
__syncthreads(); //make sure the write to shared is finished
if (id<2) {//next half
s_sum[id] += s_sum[id+2];
}
__syncthreads(); //make sure the write to shared is finished
if (id<1) {//final piece
s_sum[id] += s_sum[id+1];
*result = s_sum[id];
}
}
//perform a reduction on a vector of length N
int main (int argc, char **argv) {
int N = 32;
double seed=0;
srand48(seed);
//allocate memory on host
float *h_a = (float*) malloc(N*sizeof(float));
//populate with random data
for (int n=0;n<N;n++) {
h_a[n] = drand48();
}
//perform the reduction on host
float h_sum = 0.;
for (int n=0;n<N;n++) {
h_sum += h_a[n];
}
printf("The Host's sum was %f \n", h_sum);
float *d_a, *d_sum;
cudaMalloc(&d_a, N*sizeof(float));
cudaMalloc(&d_sum, 1*sizeof(float));
//populate the device array with the same data as the host
cudaMemcpy(d_a,h_a,N*sizeof(float),cudaMemcpyHostToDevice);
//block dimensions
dim3 B(32,1,1);
//grid dimensions
dim3 G((N+32-1)/32,1,1);
reduction <<< G,B >>> (N, d_a, d_sum);
cudaMemcpy(&h_sum,d_sum,1*sizeof(float),cudaMemcpyDeviceToHost);
printf("The Device's sum was %f \n", h_sum);
return 0;
}
|
9,497 | //Editor: Michael Lukiman
//Izhikevich spiking neuron network implementation in CUDA with added spatial winner-take-all dynamics
//GPU Architecture and Programming - Fall 2018
#include <stdio.h> //Standard input-output
#include <stdlib.h> //StandardLibraryfunctions
#include <iostream> //For streaming input-output operations
#include <math.h> //math
//Define parameters of the network:
const int excitatory=800;//Excitatory neurons(N_e)
const int inhibitory=200;//Inhibitory neurons(N_i)
const int total=excitatory+inhibitory;//Total Exc.+Inh. neurons
const int synapses=100;//synapses per neuron
const int delay=20;//in milliseconds, top axonal conduction delay
const int hz=total*100;//Upper bound of firingrate
const float max_weight=10.0;//Top synaptic weight
//Neuronal dynamics
float a[total];
float d[total];
//Activity variables
float v[total];
float u[total];
int num_fired;//amount of fired neurons
int spike[hz][2];//Timing of spikes, max of hz limit
//Spike-timing dependent variables (LTP,LTD)
float LTpot[total][delay+1001];//Longterm potentiation
float LTdep[total];//Longterm depression
//Turn these relationships into data arrays
int ps_set[total][synapses];//Matrix holding the post-synaptic neurons(synapses) of each neuron(total)
float weights[total][synapses];//Matrix holding the weights of each synapse
float w_derivs[total][synapses];//Matrix holding the derivative of each above weight
short del_length[total][delay];//Matrix holding the delay values of each neuron
short del_set[total][delay][synapses];//Matrix holding the del_set to each synapse from each neuron
int pre_neuron[total];//Index of presynaptic information
int pre_input[total][synapses*3];//Presynaptic inputs
int pre_delay[total][synapses*3];//Presynaptic del_set
float *pre_weights[total][synapses*3];//Presynaptic weights
float *pre_w_derivs[total][synapses*3];//Presynaptic derivatives
int pstochastic(int n) { // Pseudo-stochastic/random
return rand() % (int)(n);
}
void initialize()
{
int i,j,k;
int jj;
int dd;
int self;
int r;
for(i=0;i<excitatory;i++)a[i]=0.02;//Set excitatory as regular-spiking neurons
for(i=excitatory;i<total;i++)a[i]=0.1;//Set inhibitory as fast-spiking neurons
for(i=0;i<excitatory;i++)d[i]=8.0;//Set excitatory as regular-spiking neurons
for(i=excitatory;i<total;i++)d[i]=2.0;//Set inhibitory as fast-spiking neurons
//Self-sort synapses:
for(i=0;i<total;i++)//Every neuron
for(j=0;j<synapses;j++)//Every weight of connected synapse
{
do{
self=0;
if(i<excitatory)r=pstochastic(total);//Pick a random neuron
else r=pstochastic(excitatory);//Pick a random excitatory neurons
if(r==i)self=1;//Self selection
//Not a recurrent network, so also prevent synapses connecting to self
for(k=0;k<j;k++)
if(ps_set[i][k]==r)//If the synapse connects to self
self=1;
}
while(self==1);
ps_set[i][j]=r;//This synapse is randomly assigned
}
//Initialize excitatory synaptic weights
for(i=0;i<excitatory;i++)
for(j=0;j<synapses;j++)
weights[i][j]=6.0;
//Initialize inhibitory weights
for(i=excitatory;i<total;i++)
for(j=0;j<synapses;j++)
weights[i][j]=-5.0;
//Initialize synaptic derivatives
for(i=0;i<total;i++)
for(j=0;j<synapses;j++)
w_derivs[i][j]=0.0;
for(i=0;i<total;i++)//For every neuron
{
short ind=0;//Keep track of its index
if(i<excitatory)//If the neuron is excitatory
{
//Update delay lengths
for(j=0;j<delay;j++)
{
del_length[i][j]=synapses/delay;//Allocate equal intervals
//Update del_set via delay lengths
for(k=0;k<del_length[i][j];k++)
del_set[i][j][k]=ind++;
}
}
else
{
//Set all del_set to 1ms from delay start
for(j=0;j<delay;j++)
del_length[i][j]=0;
del_length[i][0]=synapses;
for(k=0;k<del_length[i][0];k++)
del_set[i][0][k]=ind++;
}
}
for(i=0;i<total;i++)
{
pre_neuron[i]=0;
for(j=0;j<excitatory;j++)
for(k=0;k<synapses;k++)
if(ps_set[j][k]==i)//This is a presynaptic neuron
{
pre_input[i][pre_neuron[i]]=j;//Register it to the set
for(dd=0;dd<delay;dd++)//Every delay
for(jj=0;jj<del_length[j][dd];jj++)
if(ps_set[j][ del_set[j][dd][jj] ]==i)
pre_delay[i][pre_neuron[i]]=dd;
pre_weights[i][pre_neuron[i]]=&weights[j][k];//Presynaptic weight assigned to relevant synaptic weights
pre_w_derivs[i][pre_neuron[i]++]=&w_derivs[j][k];//Likewise with derivatives
}
}
//Initialize longterm potentiation values
for(i=0;i<total;i++)
for(j=0;j<1+delay;j++)
LTpot[i][j]=0.0;
for(i=0;i<total;i++) LTdep[i]=0.0;//Initialize longterm depression values
for(i=0;i<total;i++) v[i]=-65.0;//Initialize v (resting membrane potential)
for(i=0;i<total;i++) u[i]=0.2*v[i];//initial values for u
num_fired=1;//spike timings
spike[0][0]=-delay;//dummy spike with negative delay interval for warmup
spike[0][1]=0;//dummy spike
}
int main()
{
int i,j,k;//Loop counters
int sec,t;//seconds, milliseconds
float inputs[total];//Inputs to neurons!
FILE *fs;//File pointer
fs=fopen("spikes.dat","w");
initialize();
for(sec=0;sec<60;sec++) {//plot for 1minute(60s)
for(t=0;t<1000;t++)//plot for 1sec(1000ms)
{
for(i=0;i<total;i++)inputs[i]=0.0;//Fresh input
for(k=0;k<total/1000;k++) inputs[pstochastic(total)]=20.0;//Noisy input
int fired_count = 0;
for(i=0;i<total;i++)
if(v[i]>=30)//Passing the threshold:
{
fired_count += 1;
v[i]=-65.0;//Zero the voltage
u[i]+=d[i];//Refractory period
LTpot[i][t+delay]=0.1;// Update potentiation
LTdep[i]=0.12;//Update depression
for(j=0;j<pre_neuron[i];j++)
*pre_w_derivs[i][j]+=LTpot[pre_input[i][j]][t+delay-pre_delay[i][j]-1];//Spike after presynaptic spike
spike[num_fired][0]=t;
spike[num_fired++][1]=i;
if(num_fired==hz){ std::cout<<"Too many spikes at t="<<t<<"(ignoring all)"; num_fired=1; }
}
k=num_fired;
while(t-spike[--k][0]<delay)
{
for(j=0;j<del_length[spike[k][1]][t-spike[k][0]];j++)
{
i=ps_set[spike[k][1]][del_set[spike[k][1]][t-spike[k][0]][j]];
inputs[i]+= weights[ spike[k][1] ] [ del_set[ spike[k][1] ] [ t-spike[k][0] ] [j] ];
if(spike[k][1]<excitatory)//Spike before post-synaptic
w_derivs[spike[k][1]][del_set[spike[k][1]][t-spike[k][0]][j]]-=LTdep[i];
}
}
for(i=0;i<total;i++)
{
v[i]+=0.5*((0.04*v[i]+5)*v[i]+140-u[i]+inputs[i]);//Izkevich formulae
v[i]+=0.5*((0.04*v[i]+5)*v[i]+140-u[i]+inputs[i]);//Repeat
u[i]+=a[i]*(0.2*v[i]-u[i]);
LTpot[i][t+delay+1]=0.95*LTpot[i][t+delay];
LTdep[i]*=0.95;
}
if (t%50==0) fprintf(fs,"%d\n", fired_count);
}
std::cout<<"sec="<<sec<<",firingrate="<<float(num_fired)/total<<"\n";
//Next second
for(i=0;i<total;i++)
for(j=0;j<delay+1;j++)
LTpot[i][j]=LTpot[i][1000+j];
k=num_fired-1;
while(1000-spike[k][0]<delay)k--;
for(i=1;i<num_fired-k;i++)
{
spike[i][0]=spike[k+i][0]-1000;
spike[i][1]=spike[k+i][1];
}
num_fired=num_fired-k;
for(i=0;i<excitatory;i++) //Update excitatory connections
for(j=0;j<synapses;j++)
{
weights[i][j]+=0.01+w_derivs[i][j];
w_derivs[i][j]*=0.9;
if(weights[i][j]>max_weight) weights[i][j]=max_weight;
if(weights[i][j]<0) weights[i][j]=0.0;
}
}
fclose(fs);
return 1;
}
|
9,498 | // Version 20180208-02: Constant multiplication now works.
#include <math.h>
#include <stdio.h>
template <class num>
class Complex {
public:
// Variables
num x;
num y;
//Functions
// Addition
__host__ __device__ Complex operator+(Complex rhs) const {
return Complex{x + rhs.x, y + rhs.y};
}
__host__ __device__ Complex operator+(num rhs) const {
return Complex{x + rhs, y};
}
__host__ __device__ friend Complex operator+(num lhs, Complex rhs) {
return Complex{lhs + rhs.x, rhs.y};
}
// Subtraction
__host__ __device__ Complex operator-(Complex rhs) const {
return Complex{x - rhs.x, y - rhs.y};
}
__host__ __device__ Complex operator-(num rhs) const {
return Complex{x - rhs, y};
}
__host__ __device__ friend Complex operator-(num lhs, Complex rhs) {
return Complex{lhs - rhs.x, -rhs.y};
}
// Multiplication
__host__ __device__ Complex operator*(Complex rhs) const {
return Complex{x * rhs.x - y * rhs.y, x * rhs.y + y * rhs.x};
}
__host__ __device__ Complex operator*(num rhs) const {
return Complex{x * rhs, y * rhs};
}
__host__ __device__ friend Complex operator*(num lhs, Complex rhs) {
return Complex{lhs * rhs.x, lhs * rhs.y};
}
// Division
__host__ __device__ Complex operator/(Complex rhs) const {
return Complex{(x * rhs.x + y * rhs.y)/(rhs.x * rhs.x + rhs.y * rhs.y), (- x * rhs.y + y * rhs.x)/(rhs.x * rhs.x + rhs.y * rhs.y)};
}
__host__ __device__ Complex operator/(num rhs) const {
return Complex{x / rhs, y / rhs};
}
__host__ __device__ friend Complex operator/(num lhs, Complex rhs) {
return Complex{lhs * rhs.x/(rhs.x * rhs.x + rhs.y * rhs.y), - lhs * rhs.y/(rhs.x * rhs.x + rhs.y * rhs.y)};
}
};
//Functions
template <class num>
__host__ __device__ Complex<num> conj(Complex<num> a) {
return Complex<num>{a.x,-a.y};
}
template <class num>
__host__ __device__ num modsq(Complex<num> a) {
return a.x*a.x + a.y*a.y;
}
template <class num>
__host__ __device__ num abs(Complex<num> a) {
return sqrtf( a.x*a.x + a.y*a.y );
}
template <class num>
__host__ __device__ num arg(Complex<num> a) {
if (a.x > 0) {
return atan(a.y/a.x);
}
if (a.x < 0 && a.y >= 0) {
return M_PI + atan(a.y/a.x);
}
if (a.x < 0 && a.y < 0) {
return -M_PI + atan(a.y/a.x);
}
if (a.y > 0) {
return M_PI/2;
}
if (a.y < 0) {
return -M_PI/2;
}
printf("\nWARNING: arg(0)\n");
return 0;
}
|
9,499 | #include <stdio.h>
// This is the interesting part. The specifier "__global__" indicates that this function runs in the GPU
// kernel and thus runs in a different environment than the main function. Remember, C functions need
// to be prototyped!
__global__ void cuda_test(int n, float a, float *x, float *y)
{
// This line is very important. Not doing this properly can greatly affect the performence of the
// program (see"memory coalescing")
// Note also that blockIdx, blockDim, and ThreadIdx are all CUDA built-ins. They are all of the "dim3"
// type, which is also a CUDA built-in type.
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i] + i;
}
int main()
{
// Initialize N = 2^20 (<< is the shift operator!)
int N = 1<<20;
// Output
printf("N = %d\n\n", N);
// Create two pars of arrays. One copy is for the host (CPU), the other copy is for the device (GPU).
float *host_x, *host_y, *device_x, *device_y;
// Use dynamic allocation here since the heap memory is larger than the stack memory.
host_x = (float*)malloc(N*sizeof(float));
host_y = (float*)malloc(N*sizeof(float));
// Allocate the device memory
cudaMalloc(&device_x, N*sizeof(float));
cudaMalloc(&device_y, N*sizeof(float));
// Give the parallel structure of the problem. The triplet gives block count in each parallelization direction.
dim3 blocks_per_grid((N+255)/256, 1, 1);
dim3 threads_per_block(256, 1, 1);
// Initialize some example data.
for (int i = 0; i < N; i++)
{
host_x[i] = 1.0f;
host_y[i] = 2.0f;
}
// Output.
printf("Initializing...\n");
printf("x[0] = %f\n", host_x[0]);
printf("...\n");
printf("x[N - 1] = %f\n\n", host_x[N - 1]);
printf("y[0] = %f\n", host_y[0]);
printf("...\n");
printf("y[N - 1] = %f\n\n", host_y[N - 1]);
// Copy the initialized example data from host to the device. Note that the DESTINATION is always the first argument and
// the SOURCE is always the second argument.
// cudaMemcpyHostToDevice is a CUDA built-in that specifies transfer direction.
cudaMemcpy(device_x, host_x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_y, host_y, N*sizeof(float), cudaMemcpyHostToDevice);
// Output.
printf("Computing y[i] = 2*x[i] + y[i] + i\n\n");
// Call the function on the GPU. At this point it should be noted that the variables "device_x" and "device_y"
// do not exist on the CPU, and so an access to this will cause a segmentation fault!
// This is the line everyone is here for.
cuda_test<<<blocks_per_grid,threads_per_block>>>(N, 2.0f, device_x, device_y);
// Copy the computed data back to the host as "host_y".
cudaMemcpy(host_y, device_y, N*sizeof(float), cudaMemcpyDeviceToHost);
// Output.
printf("Outputting...\n");
printf("x[0] = %f\n", host_x[0]);
printf("...\n");
printf("x[N - 1] = %f\n\n", host_x[N - 1]);
printf("y[0] = %f\n", host_y[0]);
printf("...\n");
printf("y[N - 1] = %f\n", host_y[N - 1]);
// Free the memory. "cudaFree" is a CUDA built-in.
cudaFree(device_x);
cudaFree(device_y);
// Free the memory.
free(host_x);
free(host_y);
// Haha
printf("Look how fast that was!\n");
// Exit code.
return 0;
}
|
9,500 | /*In order to choose the correct device for computation it is essential to compare the computation
capability of the devices available and choose the best. It is a boring task to print out all the device properties and compare
hence CUDA C provides the methods for the same.
cudaChooseDevice is used to select the best of the devices after comparing with the desired properties-- here 1.3 computation capability
cudaSetDevice is used to set the best device. */
#include<stdio.h>
int main()
{
cudaDeviceProp prop;
int dev;
int count;
cudaGetDeviceCount(&count);
printf("Number of devices available: %d\n",count); // Gives the number of devices available to choose from
cudaGetDevice(&dev);
printf("ID of the current device: %d\n", dev); //ID of the current device under check
memset(&prop,0,sizeof(cudaDeviceProp));
prop.major=1;
prop.minor=3;
cudaChooseDevice(&dev,&prop);
printf("ID of CUDA Device Closest to 1.3 Computation Capability: %d\n", dev); // ID of the best selected device
cudaGetDeviceProperties(&prop,dev);
printf("Computation Capability of the selected device: %d.%d\n", prop.major,prop.minor); // Gives the compute capability of the device
cudaSetDevice(dev);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.