serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
23,301 | #include "includes.h"
__global__ void kContract(float *expanded_data, float* targets, int num_images, int num_input_channels, int image_size_y, int image_size_x, int num_modules_y, int num_modules_x, int kernel_size_y, int kernel_size_x, int padding_y, int padding_x, int stride_y, int stride_x, int num_modules_batch, int module_id_offset) {
int color = blockIdx.y;
int dst_module_id = module_id_offset + blockIdx.x;
int src_module_id = blockIdx.x;
int module_id_x = dst_module_id % num_modules_x;
int module_id_y = dst_module_id / num_modules_x;
int startX = module_id_x * stride_x + padding_x;
int startY = module_id_y * stride_y + padding_y;
int Y, X;
long target_id, source_id;
targets += num_images * image_size_x * image_size_y * color;
expanded_data += num_images * (src_module_id + num_modules_batch * (kernel_size_y * kernel_size_x * color));
for (int y = 0; y < kernel_size_y; y++) {
Y = startY + y;
for (int x = 0; x < kernel_size_x; x++) {
X = startX + x;
source_id = num_images * num_modules_batch * (x + kernel_size_x * y);
target_id = num_images * (X + image_size_x * Y);
if (X < 0 || X >= image_size_x || Y < 0 || Y >= image_size_y) {
// do nothing.
} else {
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
atomicAdd(&targets[target_id + im], expanded_data[source_id + im]);
__syncthreads();
}
}
}
}
} |
23,302 | #include <stdio.h>
#include <assert.h>
#include <math.h>
#include <limits.h>
__global__ void operate(int *test, int *train, double *dist, int tr_num, int index, int dimen){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//printf("%d %d\n", tid, tr_num);
if(tid < tr_num)
{
double sum = 0.0;
/*
int loc = tid*dimen;
sum = (train[loc+0] - test[0+index]) * (train[loc+0] - test[0+index]) +
(train[loc+1] - test[1+index]) * (train[loc+1] - test[1+index]) +
(train[loc+2] - test[2+index]) * (train[loc+2] - test[2+index]) +
(train[loc+3] - test[3+index]) * (train[loc+3] - test[3+index]) +
(train[loc+4] - test[4+index]) * (train[loc+4] - test[4+index]) +
(train[loc+5] - test[5+index]) * (train[loc+5] - test[5+index]) +
(train[loc+6] - test[6+index]) * (train[loc+6] - test[6+index]) +
(train[loc+7] - test[7+index]) * (train[loc+7] - test[7+index]) +
(train[loc+8] - test[8+index]) * (train[loc+8] - test[8+index]) +
(train[loc+9] - test[9+index]) * (train[loc+9] - test[9+index]) +
(train[loc+10] - test[10+index]) * (train[loc+10] - test[10+index]) +
(train[loc+11] - test[11+index]) * (train[loc+11] - test[11+index]) +
(train[loc+12] - test[12+index]) * (train[loc+12] - test[12+index]) +
(train[loc+13] - test[13+index]) * (train[loc+13] - test[13+index]) +
(train[loc+14] - test[14+index]) * (train[loc+14] - test[14+index]) +
(train[loc+15] - test[15+index]) * (train[loc+15] - test[15+index]);
*/
for(int i = 0; i < dimen; i++){
sum = sum + (train[tid*dimen+i] - test[i+index]) * (train[tid*dimen+i] - test[i+index]);
}
dist[tid] = sum;
//printf("%d : %lf\n", tid,sum);
}
}
__global__ void write(int *bla, int size)
{
for (int i = 0; i < size; ++i)
{
if(i != 0 && i%16 == 0) printf("\nline %d \n", i/16);
printf("%d ", bla[i]);
}
}
void call(int *test, int *train, double *dist, int *d_test, int *d_train, double *d_dist, int ts_num, int tr_num, int dimen)
{
int ts_size = ts_num*dimen;
int tr_size = tr_num*dimen;
// printf("%d\n", ts_size);
// printf("%d\n", tr_size);
cudaMalloc( (void**)&d_test, ts_size*sizeof(int));
cudaMalloc( (void**)&d_train, tr_size*sizeof(int));
cudaMalloc( (void**)&d_dist, tr_num*sizeof(double));
cudaMemcpy(d_test, test, ts_size*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_train, train, tr_size*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_dist, dist, tr_num*sizeof(double), cudaMemcpyHostToDevice);
//write<<<1,1>>>(d_test, ts_size);
//write<<<1,1>>>(d_train, tr_size);
FILE *f = fopen("out.txt", "w");
if (f == NULL)
{
printf("Error opening file!\n");
exit(1);
}
const int blockSize = 20;
const int bla = 1024;
float totaltime;
cudaEvent_t startEvent, endEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&endEvent);
cudaEventRecord(startEvent, 0);
for(int i = 0; i < ts_size; i+=dimen)
{
operate<<<blockSize, bla>>>(d_test, d_train, d_dist, tr_num, i, dimen);
cudaMemcpy(dist, d_dist, tr_num*sizeof(double), cudaMemcpyDeviceToHost);
double min_dist = 100000000.0;
int which = -1;
for(int j = 0; j<tr_num; j++)
{
if(min_dist>dist[j]){
which = j;
min_dist = dist[j];
}
}
//printf("test: %d,\ttrain: %d,\tdistance: %lf\n", i/16, which, sqrt(min_dist));
fprintf(f, "%d\n", which);
}
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime(&totaltime, startEvent, endEvent);
fclose(f);
printf("Execution time is %f secs.\n", totaltime/1000);
cudaFree(d_test);
cudaFree(d_train);
cudaFree(d_dist);
} |
23,303 | #include "includes.h"
__global__ void set_array_double(double *a, double value, size_t len)
{
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < len) {
a[idx] = value;
idx += blockDim.x * gridDim.x;
}
} |
23,304 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <iostream>
#include <chrono>
#include <limits>
static double inf = std::numeric_limits<double>::max();
using namespace std::chrono;
int main() {
double s;
thrust::host_vector<double> host_AAPL;
thrust::host_vector<double> host_MSFT;
std::string line;
high_resolution_clock::time_point start, end;
while (!std::cin.eof()) {
std::cin >> s;
host_AAPL.push_back(s);
std::cin >> s;
host_MSFT.push_back(s);
}
/* na linha abaixo os dados são copiados
para GPU */
start = high_resolution_clock::now();
thrust::device_vector<double> dev_AAPL(host_AAPL);
thrust::device_vector<double> dev_MSFT(host_MSFT);
thrust::device_vector<double> dev(host_AAPL.size());
end = high_resolution_clock::now();
double d = duration_cast<milliseconds>(end - start).count();
std::cerr << "ASSIGN AND COPY TO GPU:" << d << " ms" << std::endl;
// printf("Device vector: ");
// for (auto i = dev.begin(); i != dev.end(); i++) {
// std::cout << *i << " "; // este acesso é lento! -- GPU
// }
start = high_resolution_clock::now();
thrust::transform(dev_AAPL.begin(), dev_AAPL.end(), dev_MSFT.begin(), dev.begin(), thrust::minus<double>());
double avg = thrust::reduce(dev.begin(), dev.end(), 0, thrust::plus<double>());
avg = avg/host_AAPL.size();
std::cout << avg << std::endl;
return 0;
}
|
23,305 | #include <stdio.h>
#include <cuda.h>
#include <math.h>
__global__ void TwoDimPoisson(float *d_A, float *d_B, float *d_F, double dx, float* diff)
{
int threadId = threadIdx.x + (blockIdx.x * blockDim.x);
int threadAbove = threadId - blockDim.x;
int threadBelow = threadId + blockDim.x;
int N = gridDim.x * blockDim.x;
// Punt if this thread is a boundary point
if ((threadAbove <= 0) || (threadBelow >= N-1) ||
(threadId % (blockDim.x) == 0) ||
((threadId+1) % (blockDim.x) == 0))
return;
else
{
d_B[threadId] = (.25*(d_A[threadId+1] + d_A[threadId-1]
+ d_A[threadAbove] + d_A[threadBelow])
+ d_F[threadId]*pow(dx,2.0));
}
atomicAdd(diff,abs(d_B[threadId]-d_A[threadId])/(blockDim.x*gridDim.x));
}
int main(int argc, char** argv)
{
const int n = atoi(argv[1]);
int steps = 0;
const int BYTES = n*n * sizeof(float);
float* h_A = new float[n*n];
float* h_B = new float[n*n];
float* h_F = new float[n*n];
double dx = 0.1;
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
h_A[n*i + j] = 0;
h_B[n*i + j] = 0;
h_F[n*i + j] = 0;
if (j==0) h_A[n*i + j] = 1000;
if (j==n-1) h_F[n*i + j] = 5*i+20;
h_A[n*i + j]+=h_F[n*i + j];
}
}
//declare GPU memory pointers
float *d_A;
float *d_B;
float *d_F;
float *diff;
//allocate memory on the device
cudaMalloc((void **) &d_A, BYTES);
cudaMalloc((void **) &d_B, BYTES);
cudaMalloc((void **) &d_F, BYTES);
cudaMallocManaged(&diff, sizeof(float));
*diff = 0.0;
//transfer the array to the GPU
//destination, source, size, method
cudaMemcpy(d_B, h_B, BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_A, h_A, BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, d_A, BYTES, cudaMemcpyDeviceToDevice);
cudaMemcpy(d_F, h_F, BYTES, cudaMemcpyHostToDevice);
//launch the kernel
while (true)
{
steps++;
*diff = 0.0;
TwoDimPoisson<<<n,n>>>(d_A, d_B, d_F, dx, diff);
cudaDeviceSynchronize();
cudaMemcpy(h_A, d_A, BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(h_B, d_B, BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(d_A, d_B, BYTES, cudaMemcpyDeviceToDevice);
if (*diff < 0.0001) break;
}
//copy the results back onto the device
//destination, source, size, method
cudaMemcpy(h_B, d_B, BYTES, cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
printf("%-10.3f ", h_B[i*n + j]);
}
printf("\n");
}
printf("\nSteps: %d \nn: %d\n",steps,n);
//free memory previously allocated on the device
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_F);
}
|
23,306 | #include "includes.h"
__global__ void instance_iou_cuda_kernel( int64_t total_gt_instances, const int64_t* __restrict__ nInstance, int nProposal, const int64_t* __restrict__ proposals_idx, const int64_t* __restrict__ proposals_offset, const int64_t* __restrict__ instance_labels, const int64_t* __restrict__ offset_num_gt_instances, const int64_t* __restrict__ batch, const int64_t* __restrict__ instance_pointnum, float* proposals_iou)
{
for (int proposal_id = blockIdx.x; proposal_id < nProposal; proposal_id += gridDim.x)
{
int start = proposals_offset[proposal_id];
int end = proposals_offset[proposal_id + 1];
int sampleIdx = batch[proposals_idx[start]];
int sampleNInstances = nInstance[sampleIdx];
int instanceOffset = offset_num_gt_instances[sampleIdx];
int proposal_total = end - start;
for (int instance_id = threadIdx.x; instance_id < sampleNInstances;
instance_id += blockDim.x)
{
int instance_total = instance_pointnum[instanceOffset + instance_id];
int intersection = 0;
for (int i = start; i < end; i++)
{
int idx = proposals_idx[i];
if ((int)instance_labels[idx] == instance_id + 1)
{ // 0 is reserved for "no instance"
intersection += 1;
}
}
proposals_iou[instanceOffset + instance_id + proposal_id * total_gt_instances] =
(float)intersection /
((float)(proposal_total + instance_total - intersection) + 1e-5);
}
}
} |
23,307 | #include "includes.h"
__global__ void solve_GPU(int a, int b, int c ,int *x1, int *x2)
{
int raiz = powf(b, 2) - (4 * a * c);
int i = -b / 2 * a;
int j = 2 * a;
*x1 = i + sqrtf(raiz) / j;
*x2 = i - sqrtf(raiz) / j;
} |
23,308 | /* Program : To find the matrix multiplication of rectangular matrices without tiling
* Author : Anant Shah
* Date : 11-9-2018
* Roll Number : EE16B105
**/
#include<stdio.h>
#define ERROR_HANDLER(error_msg,line) error_handler(error_msg,line)
#define ROWS_M 4096
#define COLS_M 8192
#define ROWS_N 8192
#define COLS_N 16384
#define NUM_THREADS_X 16
#define NUM_THREADS_Y 16
void error_handler(cudaError_t error_msg,int line){
/* Will terminate the program if an error caused due to a CUDA statement */
if(error_msg!=cudaSuccess){
printf("%s in %s at line %d",cudaGetErrorString(error_msg),__FILE__,line);
exit(EXIT_FAILURE);
}
}
void fill_matrix(double *mat,unsigned numRows,unsigned numCols){
for(unsigned i=0;i<numRows;i++){
for(unsigned j=0;j<numCols;j++){
mat[i*numCols+j] = i*2.1f+j*3.2f;
}
}
}
void print_matrix_to_file(double *mat,unsigned numRows,unsigned numCols){
const char *fname = "assignment2_out";
FILE *f = fopen(fname,"a");
for(unsigned i=0;i<numRows;i++){
for(unsigned j=0;j<numCols;j++){
fprintf(f,"%4.4f ", mat[i*numCols+j]);
}
fprintf(f,"\n");
}
fclose(f);
}
__global__ void matrixMul(double *M,double *N,double *P,unsigned numRows_M,unsigned numCols_N,unsigned dim){
/* Program : To find the rectangular matrix matrix multiplication
* Shared Memory and Tiling has not been used in this kernel
* Parameters : M - Matrix M of size (numRows_M,dim)
* N - Matrix N of size (dim,numCols_N)
* P - Output matrix (M*N)
* numRows_M - Number of rows in the M matrix
* numCols_N - Number of columns in the N matrix
* dim - Numbor of columns in M = Number of rows in N
*/
int Row = blockIdx.y*blockDim.y+threadIdx.y;
int Col = blockIdx.x*blockDim.x+threadIdx.x;
if(Row<numRows_M && Col<numCols_N){
double pSum = 0.0;
for(int i=0;i<dim;i++){
pSum += M[Row*dim+i]*N[i*numCols_N+Col];
}
P[Row*numCols_N+Col] = pSum;
}
}
int main(int argc,char **argv){
if(argc!=1){
printf("error : Invalid number of arguments\n");
exit(EXIT_FAILURE);
}
if(COLS_M!=ROWS_N){
printf("Error : Invalid matrix dimensions");
exit(EXIT_FAILURE);
}
/************************************* Variable Initialization **************************************/
double *h_M; /*Rectangular matrix M on the host */
double *d_M; /*Rectangular matrix M on the device */
size_t size_M; /* Size of the rectangular matrix M in bytes */
double *h_N; /* Rectangular matrix N on the host */
double *d_N; /* Rectangular matrix N on the device */
size_t size_N; /* Size of the matrix N in bytes */
double *h_P; /* Product M*N on the host */
double *d_P; /* Product M*N on the device */
size_t size_P; /* Size of the matrix P in bytes */
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
size_M = sizeof(double)*ROWS_M*COLS_M;
size_N = sizeof(double)*ROWS_N*COLS_N;
size_P = sizeof(double)*ROWS_M*COLS_N;
/************************************** Memory Allocation on the Host ********************************/
h_M = (double *)malloc(size_M);
h_N = (double *)malloc(size_N);
h_P = (double *)malloc(size_P);
/************************************** Initialize the matrices ***************************************/
fill_matrix(h_M,ROWS_M,COLS_M);
fill_matrix(h_N,ROWS_N,COLS_N);
/************************************** Allocate memory on the device *********************************/
ERROR_HANDLER(cudaMalloc((void **)&d_M,size_M),__LINE__);
ERROR_HANDLER(cudaMalloc((void **)&d_N,size_N),__LINE__);
ERROR_HANDLER(cudaMalloc((void **)&d_P,size_P),__LINE__);
/************************************** Copy Matrices to the device ***********************************/
ERROR_HANDLER(cudaMemcpy(d_M,h_M,size_M,cudaMemcpyHostToDevice),__LINE__);
ERROR_HANDLER(cudaMemcpy(d_N,h_N,size_N,cudaMemcpyHostToDevice),__LINE__);
/************************************** Kernel invocation *********************************************/
dim3 threads(NUM_THREADS_X,NUM_THREADS_Y); /*2-D layout of the threads in a block */
dim3 blocks((COLS_N+NUM_THREADS_X-1)/NUM_THREADS_X,(ROWS_M+NUM_THREADS_Y-1)/NUM_THREADS_Y); /*2-D layout of blocks in a grid */
cudaEventRecord(start);
matrixMul<<<blocks,threads>>>(d_M,d_N,d_P,ROWS_M,COLS_N,COLS_M); /* The last parameter could have been <ROWS_N> */
cudaEventRecord(stop);
ERROR_HANDLER(cudaMemcpy(h_P,d_P,size_P,cudaMemcpyDeviceToHost),__LINE__);
cudaEventSynchronize(stop);
float run_time = 0.0;
cudaEventElapsedTime(&run_time,start,stop);
printf("Run-Time(seconds) : %.4f",run_time/1000);
print_matrix_to_file(h_P,ROWS_M,COLS_N);
/********************************** Free Allocated Memory ********************************************/
cudaFree(d_M);
cudaFree(d_N);
cudaFree(d_P);
free(h_M);
free(h_N);
free(h_P);
}
|
23,309 | #include "includes.h"
__global__ void change_theta(const int ncoord, const float3 *theta, float4 *thetax, float4 *thetay, float4 *thetaz) {
unsigned int pos = blockIdx.x*blockDim.x + threadIdx.x;
if (pos < ncoord) {
thetax[pos].x = theta[pos*4].x;
thetax[pos].y = theta[pos*4+1].x;
thetax[pos].z = theta[pos*4+2].x;
thetax[pos].w = theta[pos*4+3].x;
thetay[pos].x = theta[pos*4].y;
thetay[pos].y = theta[pos*4+1].y;
thetay[pos].z = theta[pos*4+2].y;
thetay[pos].w = theta[pos*4+3].y;
thetaz[pos].x = theta[pos*4].z;
thetaz[pos].y = theta[pos*4+1].z;
thetaz[pos].z = theta[pos*4+2].z;
thetaz[pos].w = theta[pos*4+3].z;
}
} |
23,310 | /*
backup: v_backup = v
history update: v = m * v - learning_rate * dx
parameter update: x = x - m * v_backup + v + m * v
*/
__global__ void nesterovKernel (
int numberIterations,
float learningRate,
float momentum,
float* history,
float* backup,
int* parameterIndices,
int* counts,
int parameterSize,
float* parameters,
float* gradient) {
int startEntry = (blockIdx.y * blockDim.x * numberIterations) + threadIdx.x * numberIterations;
if(startEntry < parameterSize) {
int gradientIndex = blockIdx.x;
int parameterIndex = parameterIndices[gradientIndex];
if(parameterIndex != -1) {
int startParameter = parameterIndex * parameterSize + startEntry;
int startGradient = gradientIndex * parameterSize + startEntry;
float scalingFactor = 1.0 / (float)counts[gradientIndex];
for(int indexParameter = startParameter, indexGradient = startGradient; indexParameter < startParameter + numberIterations; indexParameter++, indexGradient++) {
float entryBackup = history[indexParameter];
backup[indexParameter] = entryBackup;
float entryUpdate = momentum * history[indexParameter] - scalingFactor * learningRate * gradient[indexGradient];
history[indexParameter] = entryUpdate;
float removedPreviousLookAhead = parameters[indexParameter] - momentum * entryBackup;
parameters[indexParameter] = removedPreviousLookAhead + (1.0 + momentum) * entryUpdate;
}
}
}
} |
23,311 | //When wold using shared memory make sense ?
//So, let's say you have a big array in the host memory and you transfer it to
//GPU memory and the task is to square each element of this array - This won't
//be a very good usage of __shared__ memory as you would first have to load
//from global to shared memory and then from shared memory to the thread
//memory for usage and there is no data reuse, so not a good example.Instead
//if you use normally, i.e. take the data from global to the thread memory
//then only 1 read so using shared memory here may make it slower on the
//contrary.
//However, let's say, each output element of the array is such that it
//equals the corresponding value,squared, plus the average of both it's left
//and right neighbours, squared.
//So for element arr[i] it would be arr[i]^2 + (arr[i-1]+arr[i+1]/2)^2. Here
//there is element reuse as is evident and hence shared memory makes sense to
//speed it up.
#include <stdio.h>
//Kernel
__global__ void compute(float *data) //pointer to data in global memory which
//is later shifted to shared memory
{int tid = threadIdx.x;
__shared__ float arr[1024];
float temp;
//Load thread's data element in the shared memory
arr[tid] = data[tid];
//Now, before we startthe computation you want all the elements to be loaded
//in the shared memory. If a computation depends on an element in shared
//memory and if it's vlaues is not that from data[] but some earlier
//uninitialised vlaue then wrong computation, so use synchronization
__syncthreads();
temp = (arr[tid>0 ? tid-1 : 1023] + arr[tid<0 ? tid+1 : 0])*0.5f; //average
//calculation
//No need for synchronization now as the read from arr[] is not a problem and
//the write to temp is not a problem either as temp has been calculated in the
//earlier statement and then it is being altered, not depending on any other
//elements / threads
//temp = (data[tid>0 ? tid-1 :1023] + data[tid<0 ? tid+1 : 0])*0.5f;
//The statement above is used if no shared
temp = temp*temp + arr[tid]*arr[tid];
//The statement below is used if no shared
//temp = temp*temp + data[tid]*data[tid];
//Write back result to global memory straightaway. No need to write in shared
//memory and then update global as unneccessary extra step.
data[tid] = temp;
}
int main(int argc, char ** argv)
{
float data_h[1024];
float data_out_h[1024];
float *data_d;
int i;
//populate host
for(i=0;i<1023;i++){
data_h[i] = 2.0f;
}
//Device memory allocate
cudaMalloc((void **)&data_d,1024*sizeof(float));
//Transfer from host to device
cudaMemcpy(data_d,data_h,1024*sizeof(float),cudaMemcpyHostToDevice);
//Kernel call
compute<<<1,1024>>>(data_d);
//Copy backto host from device
cudaMemcpy(data_out_h,data_d,1024*sizeof(float),cudaMemcpyDeviceToHost);
//Print out results
for(i=0;i<1023;i++){
printf("%f ",data_out_h[i]);
}
//free memory
cudaFree(data_d);
return 0;
}
|
23,312 | #include "includes.h"
//
// imgproc_main.cpp
//
//
// Created by Nathaniel Lewis on 3/8/12.
// Copyright (c) 2012 E1FTW Games. All rights reserved.
//
// GPU constant memory to hold our kernels (extremely fast access time)
__constant__ float convolutionKernelStore[256];
/**
* Convolution function for cuda. Destination is expected to have the same width/height as source, but there will be a border
* of floor(kWidth/2) pixels left and right and floor(kHeight/2) pixels top and bottom
*
* @param source Source image host pinned memory pointer
* @param width Source image width
* @param height Source image height
* @param paddingX source image padding along x
* @param paddingY source image padding along y
* @param kOffset offset into kernel store constant memory
* @param kWidth kernel width
* @param kHeight kernel height
* @param destination Destination image host pinned memory pointer
*/
// converts the pythagoran theorem along a vector on the GPU
// create an image buffer. return host ptr, pass out device pointer through pointer to pointer
__global__ void convolve(unsigned char *source, int width, int height, int paddingX, int paddingY, ssize_t kOffset, int kWidth, int kHeight, unsigned char *destination)
{
// Calculate our pixel's location
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
float sum = 0.0;
int pWidth = kWidth/2;
int pHeight = kHeight/2;
// Only execute for valid pixels
if(x >= pWidth+paddingX &&
y >= pHeight+paddingY &&
x < (blockDim.x * gridDim.x)-pWidth-paddingX &&
y < (blockDim.y * gridDim.y)-pHeight-paddingY)
{
for(int j = -pHeight; j <= pHeight; j++)
{
for(int i = -pWidth; i <= pWidth; i++)
{
// Sample the weight for this location
int ki = (i+pWidth);
int kj = (j+pHeight);
float w = convolutionKernelStore[(kj * kWidth) + ki + kOffset];
sum += w * float(source[((y+j) * width) + (x+i)]);
}
}
}
// Average the sum
destination[(y * width) + x] = (unsigned char) sum;
} |
23,313 | #include "includes.h"
__device__ bool checkBoundary(int blockIdx, int blockDim, int threadIdx){
int x = threadIdx;
int y = blockIdx;
return (x == 0 || x == (blockDim-1) || y == 0 || y == 479);
}
__global__ void mGradient_TwoDim(float *u_dimX, float *u_dimY, float *scalar, float coeffX, float coeffY) {
if(checkBoundary(blockIdx.x, blockDim.x, threadIdx.x)) return;
int Idx = blockIdx.x * blockDim.x + threadIdx.x;
int Left = Idx - 1;
int Right = Idx + 1;
int Top = Idx + blockDim.x;
int Bottom = Idx - blockDim.x;
u_dimX[Idx] -= (scalar[Right] - scalar[Left])*coeffX;
u_dimY[Idx] -= (scalar[Top] - scalar[Bottom])*coeffY;
} |
23,314 | #include <stdio.h>
__global__ void kernel(int* a_d, int* b_d, int* c_d){
*c_d = *a_d + *b_d;
return;
}
int main(){
int a = 1, b = 2;
int *a_d, *b_d, *c_d;
cudaMalloc((void**) &a_d, sizeof(int));
cudaMalloc((void**) &b_d, sizeof(int));
cudaMalloc((void**) &c_d, sizeof(int));
cudaMemcpy(a_d, &a, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(b_d, &b, sizeof(int), cudaMemcpyHostToDevice);
int c;
kernel<<<1, 1>>>(a_d, b_d, c_d);
cudaMemcpy(&c, c_d, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree((void**) a_d);
cudaFree((void**) b_d);
cudaFree((void**) c_d);
printf("%d\n", c);
}
|
23,315 | #include <stdio.h>
#include <time.h>
#include <sys/time.h>
// CPU: marca o tempo
__host__ double wtime() {
struct timeval t;
gettimeofday(&t, NULL);
return t.tv_sec + (double) t.tv_usec / 1000000;
}
// CPU: Núcleo de execução (processamento)
__host__ void fhcalc(int n)
{
double v1=0;
for (int j=0; j < 10000; j++) {
for(int i=2; i < n; ++i) {
v1=pow(v1,i);
}
}
}
// GPU: Núcleo de execução (processamento)
__global__ void fdcalc(int n)
{
double v1=0;
for (int j=0; j < 10000; j++) {
for(int i=2; i < n; ++i) {
v1=pow(v1,i);
}
}
}
// CPU: Função principal
int main (int argc, char ** argv) {
int nthreads = 400;
int nblocos = 1;
int ncalc = 400;
double start_time = 0, end_time = 0;
// Tratamento dos paramêtros
if (argc == 4) {
nblocos = atoi(argv[1]);
nthreads = atoi(argv[2]);
ncalc = atoi(argv[3]);
} else {
printf ("\n ############# \n");
printf ("./05_proc <nblocos> <nthreads> <nloop>\n");
printf ("Caso não haja passagem de parâmetros, atribuiu-se:\n(1) nblocos c/ %d, nthreads definido c/ %d e ncalc = %d \n", nblocos, nthreads, ncalc);
}
// Mensura o tempo de processamento do kernel em 1 bloco c/ 1 thread
printf ("\n##### DEVICE (1,1) #####\n");
start_time = wtime();
// GPU: Execução do kernel em 1,1
fdcalc<<<1,1>>>(ncalc);
// CPU: Aguarda a sincronização das threads
cudaDeviceSynchronize();
end_time = wtime();
printf("\n");
printf("\tRuntime: %f\n", end_time - start_time);
// Mensura o tempo de processamento do kernel c/ n blocos e m threads
printf ("\n##### DEVICE (%d,%d) #####\n", nblocos, nthreads);
start_time = wtime();
// GPU: Execução do kernel em n,m
fdcalc<<<nblocos,nthreads>>>(ncalc/nthreads);
// CPU: Aguarda a sincronização das threads
cudaDeviceSynchronize();
end_time = wtime();
printf("\n");
printf("\tRuntime: %f\n", end_time - start_time);
// Mensura o tempo de processamento em CPU
printf ("\n##### HOST #####\n");
start_time = wtime();
fhcalc(ncalc);
cudaDeviceSynchronize();
end_time = wtime();
printf("\n");
printf("\tRuntime: %f\n", end_time - start_time);
return 0;
}
|
23,316 | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define MAX_CHAR 100
#define DATAFILE "data.txt"
#define RESULTSFILE "resultsCudal.txt"
#define G 6.674e-11
#define NUM_ITER 1000
#define NUM_ITER_SHOW 50
__device__ double atomicAddD(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
__global__ void asteroid(double * gpu_x, double * gpu_y, double * gpu_vx, double * gpu_vy, double * gpu_m){
int posx = blockIdx.x * blockDim.x + threadIdx.x;
int posy = blockIdx.y * blockDim.y + threadIdx.y;
if (posx!=posy) {
double d = sqrt(pow( (gpu_x[posx]-gpu_x[posy]),2.0) + pow( (gpu_y[posx]-gpu_y[posy]),2.0));
double f = G*gpu_m[posx]*gpu_m[posy]/pow(d,2.0);
double fx = f*(gpu_x[posy]-gpu_x[posx])/d;
double fy = f*(gpu_y[posy]-gpu_y[posx])/d;
double ax = fx/gpu_m[posx];
double ay = fy/gpu_m[posx];
// atomicAddD(gpu_vx+posx, ax);
// atomicAddD(gpu_vy+posx, ay);
atomicAddD(&gpu_vx[posx], ax);
atomicAddD(&gpu_vy[posx], ay);
}
}
__global__ void positions(double * gpu_x, double * gpu_y, double * gpu_vx, double * gpu_vy, double * gpu_m){
int i = blockIdx.x * blockIdx.x + threadIdx.x;
// gpu_x[i] += gpu_vx[i];
// gpu_y[i] += gpu_vy[i];
atomicAddD(&gpu_x[i], gpu_vx[i]);
atomicAddD(&gpu_y[i], gpu_vy[i]);
}
int main(){
clock_t start, end;
double time_used;
char str[MAX_CHAR];
FILE *file;
int noOfObjects;
int i;
file = fopen( DATAFILE, "r");
fscanf(file,"%s",str);
noOfObjects = atoi(str);
printf("Number of objects: %d\n",noOfObjects);
const int ARRAY_BYTES = noOfObjects * sizeof(double);
double *x = (double *) malloc(ARRAY_BYTES);
double *y = (double *) malloc(ARRAY_BYTES);
double *vx = (double *) malloc(ARRAY_BYTES);
double *vy = (double *) malloc(ARRAY_BYTES);
double *m = (double *) malloc(ARRAY_BYTES);
double *x_new = (double *) malloc(ARRAY_BYTES);
double *y_new = (double *) malloc(ARRAY_BYTES);
double *vx_new = (double *) malloc(ARRAY_BYTES);
double *vy_new = (double *) malloc(ARRAY_BYTES);
// declare GPU memory pointers
double * gpu_x;
double * gpu_y;
double * gpu_vx;
double * gpu_vy;
double * gpu_m;
double * gpu_x_new;
double * gpu_y_new;
double * gpu_vx_new;
double * gpu_vy_new;
// allocate GPU memory
cudaMalloc((void**) &gpu_x, ARRAY_BYTES);
cudaMalloc((void**) &gpu_y, ARRAY_BYTES);
cudaMalloc((void**) &gpu_vx, ARRAY_BYTES);
cudaMalloc((void**) &gpu_vy, ARRAY_BYTES);
cudaMalloc((void**) &gpu_m, ARRAY_BYTES);
cudaMalloc((void**) &gpu_x_new, ARRAY_BYTES);
cudaMalloc((void**) &gpu_y_new, ARRAY_BYTES);
cudaMalloc((void**) &gpu_vx_new, ARRAY_BYTES);
cudaMalloc((void**) &gpu_vy_new, ARRAY_BYTES);
// launch the kernel
for (i=0; i < noOfObjects; i++) {
fscanf(file,"%s",str);
x[i] = atof(str);
x_new[i] = atof(str);
fscanf(file,"%s",str);
y[i] = atof(str);
y_new[i] = atof(str);
fscanf(file,"%s",str);
vx[i] = atof(str);
vx_new[i] = atof(str);
fscanf(file,"%s",str);
vy[i] = atof(str);
vy_new[i] = atof(str);
fscanf(file,"%s",str);
m[i] = atof(str);
}
fclose(file);
// transfer the array to the GPU
cudaMemcpy(gpu_x, x, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_y, y, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_vx, vx, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_vy, vy, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_m, m, ARRAY_BYTES, cudaMemcpyHostToDevice);
start=clock();
dim3 blocksPerGrid(32,32); // blocks per grid
dim3 threadsPerBlock(32,32); // threads per block
for (int niter=0; niter<=NUM_ITER; niter++) {
asteroid<<<blocksPerGrid, threadsPerBlock>>>(gpu_x, gpu_y, gpu_vx, gpu_vy, gpu_m);
positions<<<1, 1024>>>(gpu_x, gpu_y, gpu_vx, gpu_vy, gpu_m);
if (niter%NUM_ITER_SHOW == 0)
printf("Iteration %d/%d\n", niter, NUM_ITER);
} // nIter
end=clock();
// copy back the result array to the CPU
cudaMemcpy(x, gpu_x, ARRAY_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(y, gpu_y, ARRAY_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(vx, gpu_vx, ARRAY_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(vy, gpu_vy, ARRAY_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(m, gpu_m, ARRAY_BYTES, cudaMemcpyDeviceToHost);
file = fopen( RESULTSFILE, "w");
fprintf(file, "Movement of objects\n");
fprintf(file, "-------------------\n");
for (i = 0; i < noOfObjects; i++) {
double mov = sqrt(pow( (x_new[i]-x[i]),2.0) + pow( (y_new[i]-y[i]),2.0));
// printf(" Object %i - ORIGINAL: (%f,%f) -- NEW: (%f,%f) -> %f meters\n", i, x[i], y[i], x_new[i], y_new[i], mov);
fprintf(file," Object %i - ORIGINAL: (%f,%f) -- NEW: (%f,%f) -> %f meters\n", i, x[i], y[i], x_new[i], y_new[i], mov);
}
int hours = NUM_ITER/3600;
int mins = (NUM_ITER - hours*3600)/60;
int secs = (NUM_ITER - hours*3600 - mins*60);
fprintf(file,"Time elapsed: %i seconds (%i hours, %i minutes, %i seconds)\n",NUM_ITER, hours, mins, secs);
time_used = ((double)(end-start)) / CLOCKS_PER_SEC;
fprintf(file,"Processing time: %f sec.\n",time_used);
fclose(file);
cudaFree(gpu_x);
cudaFree(gpu_y);
cudaFree(gpu_vx);
cudaFree(gpu_vy);
cudaFree(gpu_m);
cudaFree(gpu_x_new);
cudaFree(gpu_y_new);
cudaFree(gpu_vx_new);
cudaFree(gpu_vy_new);
} // main
|
23,317 | #include "includes.h"
#define BLOCK_SIZE 32
#define N 2048
__global__ void matMult(float* A, float* B, float* C){
// Индекс блока
int bx = blockIdx.x;
int by = blockIdx.y;
// Индекс нити
int tx = threadIdx.x;
int ty = threadIdx.y;
float sum = 0.0;
//Индекс A[i][0]
int ia = N * BLOCK_SIZE * by + N * ty;
// Индекс B[0][j]
int ib = BLOCK_SIZE * bx + tx;
for (int k = 0; k < N; k++) {
sum += A[ia + k] * B[ib + k * N];
}
// Индекс C[i][j]
int ic = N * BLOCK_SIZE * by + BLOCK_SIZE * bx;
//Результирующая матрица
C[ic + N * ty + tx] = sum;
} |
23,318 | #include "quad_tree_node.cuh"
#include "points.cuh"
#include <iostream>
__host__ __device__ QuadTreeNode::QuadTreeNode(): m_id(0), m_begin(0), m_end(0), m_bounding_box() {}
__host__ __device__ int QuadTreeNode::id() const{
return m_id;
}
__host__ __device__ void QuadTreeNode::set_id(int new_id){
m_id= new_id;
}
__host__ __device__ const BoundingBox& QuadTreeNode::bounding_box() const{
return m_bounding_box;
}
__host__ __device__ void QuadTreeNode::set_bounding_box(float minx, float miny, float maxx, float maxy){
m_bounding_box.set(minx, miny, maxx, maxy);
}
__host__ __device__ int QuadTreeNode::num_points() const{
return m_end - m_begin;
}
__host__ __device__ int QuadTreeNode::points_begin() const{
return m_begin;
}
__host__ __device__ int QuadTreeNode::points_end() const{
return m_end;
}
__host__ __device__ void QuadTreeNode::set_range(int begin, int end){
m_begin = begin;
m_end = end;
}
__host__ void QuadTreeNode::list_points(Points points){
std::cout <<"Hello!" << std::endl;
std::cout << "The Node contains the points in range: " <<m_begin <<" to "<< m_end << std::endl;
std::cout << "These points are: " << std::endl;
// for (int i = m_begin; i< m_end; ++i )
// points.print_point_d_2_h(i);
}
|
23,319 | // Trivial array add example
// This is almost like "hello, world" in CUDA :-)
#include <iostream>
const size_t array_size = 1024;
// Good macro for making sure we know where things went wrong...
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
void check_cuda(cudaError_t result, char const *const func, const char *const file, int const line) {
if (result) {
std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
// Make sure we call CUDA Device Reset before exiting
cudaDeviceReset();
exit(99);
}
}
// __global__ means this is a piece of device code
__global__
void add_kernel(float* c, int n_el, const float* a, const float* b) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n_el)
return;
c[index] = a[index] + b[index];
}
int main() {
float host_array_a[array_size];
float host_array_b[array_size];
float host_array_c[array_size];
for (size_t i=0; i<array_size; ++i) {
host_array_a[i] = i;
host_array_b[i] = 8*i;
}
// Pointers for device memory blocks
// Note that cudaMalloc wants a pointer to the pointer
float *device_array_a, *device_array_b, *device_array_c;
checkCudaErrors(cudaMalloc(&device_array_a, sizeof(host_array_a)));
checkCudaErrors(cudaMalloc(&device_array_b, sizeof(host_array_b)));
checkCudaErrors(cudaMalloc(&device_array_c, sizeof(host_array_c)));
checkCudaErrors(cudaMemcpy(device_array_a, host_array_a, sizeof(host_array_a),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(device_array_b, host_array_b, sizeof(host_array_b),
cudaMemcpyHostToDevice));
// Define block size and number, then launch the kernel
const int block_size = 128;
int n_blocks = (array_size + block_size - 1) / block_size;
add_kernel<<<block_size, n_blocks>>>(device_array_c, array_size,
device_array_a, device_array_b);
// Copy back
checkCudaErrors(cudaMemcpy(host_array_c, device_array_c, sizeof(host_array_c),
cudaMemcpyDeviceToHost));
for (size_t i=0; i<array_size; ++i) {
std::cout << host_array_a[i] << " + " << host_array_b[i] << " = " <<
host_array_c[i] << std::endl;
}
return 0;
} |
23,320 | #include <cmath>
#include <cstdlib>
#include <cstdio>
#include <sys/time.h>
#define BLOCK 16
__global__ void matmul(float *A, float *B, float *C, int M, int N, int K) {
// Shared memory
__shared__ float s_A[BLOCK][BLOCK];
__shared__ float s_B[BLOCK][BLOCK];
int a_begin = N * BLOCK * blockIdx.y; // N * blockDim.y * blockIdx.y
int a_end = a_begin + N;
int a_step = BLOCK; // blockDim.x
int b_begin = BLOCK * blockIdx.x; // blockDim.y * blockIdx.x
int b_step = BLOCK * K; // blockDim.y * K
int a = a_begin;
int b = b_begin;
int a_th = N * threadIdx.y + threadIdx.x;
int b_th = K * threadIdx.y + threadIdx.x;
const int MN = M*N, NK = N*K;
float sum = 0;
if (M%BLOCK == 0 && N%BLOCK == 0 && K%BLOCK == 0) {
while (a < a_end) {
// Copy to shared memory
__syncthreads();
s_A[threadIdx.y][threadIdx.x] = A[a + a_th];
s_B[threadIdx.y][threadIdx.x] = B[b + b_th];
__syncthreads();
// Multiply
#pragma unroll
for (int i=0; i<BLOCK; i++) {
sum += s_A[threadIdx.y][i] * s_B[i][threadIdx.x];
}
a += a_step;
b += b_step;
}
} else {
// Out of bound case
while (a < a_end) {
// Copy to shared memory
__syncthreads();
s_A[threadIdx.y][threadIdx.x] = a + a_th < MN ? A[a + a_th] : 0.0;
s_B[threadIdx.y][threadIdx.x] = b + b_th < NK ? B[b + b_th] : 0.0;
__syncthreads();
// Multiply
#pragma unroll
for (int i=0; i<BLOCK; i++) {
sum += s_A[threadIdx.y][i] * s_B[i][threadIdx.x];
}
a += a_step;
b += b_step;
}
if (blockIdx.y * BLOCK + threadIdx.y >= M) return;
if (blockIdx.x * BLOCK + threadIdx.x >= K) return;
}
int c_idx = \
K * BLOCK * blockIdx.y + \
BLOCK * blockIdx.x + \
K * threadIdx.y + \
threadIdx.x;
C[c_idx] = sum;
}
int main(int argc, char **argv) {
int M, N, K;
switch(argc) {
case 2: M = atoi(argv[1]);
N = M;
K = M;
break;
case 4: M = atoi(argv[1]);
N = atoi(argv[2]);
K = atoi(argv[3]);
break;
default: printf("Invalid number of parameters\n");
return 1;
}
// Host allocation
float *h_A = new float [M*N];
float *h_B = new float [N*K];
float *h_C = new float [M*K];
for (int i=0; i<M*N; i++) {
h_A[i] = drand48();
}
for (int i=0; i<N*K; i++) {
h_B[i] = drand48();
}
for (int i=0; i<M*K; i++) {
h_C[i] = 0;
}
// Device allocation
float *d_A, *d_B, *d_C;
int mem_A = M * N * sizeof(float);
int mem_B = N * K * sizeof(float);
int mem_C = M * K * sizeof(float);
cudaMalloc((void **) &d_A, mem_A);
cudaMalloc((void **) &d_B, mem_B);
cudaMalloc((void **) &d_C, mem_C);
// Copy from Host to Device
cudaMemcpy(d_A, h_A, mem_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_B, cudaMemcpyHostToDevice);
cudaMemcpy(d_C, h_C, mem_C, cudaMemcpyHostToDevice);
dim3 grid((K/BLOCK)+(K%BLOCK!=0),(M/BLOCK)+(M%BLOCK!=0)); // number of blocks
dim3 block(BLOCK,BLOCK); // threads per block
// CUDA
struct timeval tic, toc;
gettimeofday(&tic, NULL);
matmul<<<grid,block>>>(d_A, d_B, d_C, M, N, K);
cudaDeviceSynchronize();
gettimeofday(&toc, NULL);
double time = toc.tv_sec-tic.tv_sec+(toc.tv_usec-tic.tv_usec)*1e-6;
printf("[%dx%dx%d]\n", M, N, K);
printf("CUDA : %lfs (%lf GFlops)\n", time, 2.*M*N*K/time/1e9);
// Copy Result from Device to Host
cudaMemcpy(h_C, d_C, mem_C, cudaMemcpyDeviceToHost);
// CPU
gettimeofday(&tic, NULL);
#pragma omp parallel for
for (int i=0; i<M; ++i) {
for(int j=0; j<K; j++) {
for (int k=0; k<N; k++) {
h_C[K*i+j] -= h_A[N*i+k] * h_B[K*k+j];
}
}
}
gettimeofday(&toc, NULL);
time = toc.tv_sec-tic.tv_sec+(toc.tv_usec-tic.tv_usec)*1e-6;
printf("CPU : %lfs (%lf GFlops)\n", time, 2.*M*N*K/time/1e9);
// Calculate error
float err = 0;
for (int i=0; i<M*K; ++i) {
err += fabs(h_C[i]);
}
printf("Error : %f\n",err/M/K);
// Free memory
delete[] h_A;
delete[] h_B;
delete[] h_C;
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
23,321 | /*
Name: Matthew Matze
Date: 9/28/2016
Class: csc4310
Location: ~/csc3210/deviceq
General Summary of Program
The program is set up to show the various device properties to the screen
To Compile:
nvcc device_query.cu -o device_query
To Execute:
device_query
*/
#include<stdio.h>
void printDevProp(cudaDeviceProp devProp);
/*
The function prints out some of the properties of the cudaDeviceProp struct
Parameters:The struct to with which the device info shall be taken from
Postcondition: The specified parameters have been outputed to the screen
*/
int main(void){
int devCnt;
cudaGetDeviceCount(&devCnt);
for(int i=0;i<devCnt;i++){
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp,i);
printDevProp(devProp);
}
}
void printDevProp(cudaDeviceProp devProp){
printf("Device name: %s\n", devProp.name);
printf("Major: %d\n",devProp.major);
printf("Minor: %d\n",devProp.minor);
printf("TotalGlobalMem(Bytes): %d\n",devProp.totalGlobalMem);
printf("SharedMemPerBlock: %d\n",devProp.sharedMemPerBlock);
printf("RegsPerBlock: %d\n",devProp.regsPerBlock);
printf("WarpSize: %d\n",devProp.warpSize);
printf("MemPitch: %d\n",devProp.memPitch);
printf("MaxThreadsPerBlock: %d\n",devProp.maxThreadsPerBlock);
printf("MaxThreadsPerMultiProcessor: %d\n",devProp.maxThreadsPerMultiProcessor);
for(int i=0;i<3;i++){
printf("MaxThreadsDim[%d]: %d\n", i ,devProp.maxThreadsDim[i]);
printf("MaxGridSize[%d]: %d\n", i, devProp.maxGridSize[i]);
}
printf("ClockRate: %d\n",devProp.clockRate);
printf("TotalConstMem: %d\n",devProp.totalConstMem);
printf("TextureAlignment: %d\n",devProp.textureAlignment);
printf("DeviceOverlap: %d\n",devProp.deviceOverlap);
printf("MultiProcessorCount: %d\n",devProp.multiProcessorCount);
printf("KernelExecTimeoutEnabled: %d\n",devProp.kernelExecTimeoutEnabled);
}
|
23,322 | #include <iostream>
using namespace std;
#define Threads 3
#define Blocks 4
#define N Threads*Blocks
__global__ // GPU function
void add(int *a, int *b, int n)
{
// Get ID of thread being executed
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// if the thread id is less than the number of loops required
if (tid < n)
// Add them together
b[tid] += a[tid];
// Notice there is no return statement
}
int main(void)
{
// Calculate memory size
int memSize = N*sizeof(int);
// Initialize host (CPU) memory
int *h_a, *h_b;
h_a = (int*)malloc(memSize);
h_b = (int*)malloc(memSize);
// Initialize device (GPU) memory
int *d_a, *d_b;
cudaMalloc((void**)&d_a, memSize);
cudaMalloc((void**)&d_b, memSize);
// Add some values to host arrays a and b to sum.
for (int i = 0; i < N; i++) {
h_a[i] = i;
h_b[i] = i*i;
}
// Send host (CPU) memory to device (GPU)
cudaMemcpy(d_a, h_a, memSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, memSize, cudaMemcpyHostToDevice);
// Run function add() on device (GPU)
add<<<Blocks, Threads>>>(d_a, d_b, N);
// Make sure all threads on GPU finish
cudaThreadSynchronize();
// Send device (GPU) memory back to host(CPU)
cudaMemcpy(h_b, d_b, memSize, cudaMemcpyDeviceToHost);
// Print output from device (GPU)
for (int i = 0; i < N; i++)
cout << h_b[i] << "\n";
// Free host (CPU) memory
free(h_a);
free(h_b);
// Free device (GPU) memory
cudaFree(d_a);
cudaFree(d_b);
// Exit with success!
return 1;
}
|
23,323 | #include <cstdio>
extern "C" {
__device__
static int THREADS_IN_BLOCK = 1024;
__device__
void min_max(int* tab, int for_min, int for_max, int size) {
if (for_min >= size || for_max >= size) {
return;
}
int min = tab[for_min];
int max = tab[for_max];
if (max < min) {
atomicExch(tab + for_max, min);
atomicExch(tab + for_min, max);
}
};
__global__
void bitonic_sort(int* to_sort, int size) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int thid = x + y*gridDim.x;
if (thid >= size) {
return;
}
int d_traingle;
int local_thid;
int opposite;
for (d_traingle = 2; d_traingle <= THREADS_IN_BLOCK; d_traingle*=2) {
local_thid = thid % d_traingle;
opposite = thid - local_thid + d_traingle - 1 - local_thid;
if (local_thid < d_traingle/2) {
min_max(to_sort, thid, opposite, size);
}
__syncthreads();
for (int d = d_traingle/2; d >= 2; d /= 2) {
local_thid = thid % d;
if (local_thid < d/2) {
opposite = thid + d/2;
min_max(to_sort, thid, opposite, size);
}
__syncthreads();
}
__syncthreads();
}
}
__global__
void bitonic_merge(int* to_sort, int d, int size) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int thid = x + y*gridDim.x*blockDim.x;
if (thid >= size) {
return;
}
int local_thid = thid % d;
int opposite = thid + d/2;
if (local_thid < d/2) {
min_max(to_sort, thid, opposite, size);
}
}
__global__
void bitonic_triangle_merge(int* to_sort, int d_traingle, int size) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int thid = x + y*gridDim.x*blockDim.x;
if (thid >= size) {
return;
}
int local_thid = thid % d_traingle;
int opposite = thid - local_thid + d_traingle - 1 - local_thid;
if (local_thid < d_traingle/2) {
min_max(to_sort, thid, opposite, size);
}
}
}
|
23,324 | /*
Copyright 2021 Fixstars Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http ://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <iostream>
#include <fstream>
#include <algorithm>
#include <random>
#include <chrono>
#include <cmath>
static constexpr int NUM_TRIALS = 11;
// CPU版行列積カーネル
void matmul_cpu(float *C, const float *A, const float *B, int n){
for(int i = 0; i < n; ++i){
for(int j = 0; j < n; ++j){
C[i * n + j] = 0.0f;
for(int k = 0; k < n; ++k){
C[i * n + j] += A[i * n + k] * B[k * n + j];
}
}
}
}
// GPU版行列積カーネル
__global__ void matmul_gpu(float *C, const float *A, const float *B, int n){
const int i = blockIdx.y * blockDim.y + threadIdx.y;
const int j = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0.0f;
for(int k = 0; k < n; ++k){
sum += A[i * n + k] * B[k * n + j];
}
C[i * n + j] = sum;
}
void call_matmul_gpu(float *C, const float *A, const float *B, int n){
const dim3 bdim(16, 16, 1), gdim(n / 16, n / 16, 1);
matmul_gpu<<<gdim, bdim>>>(C, A, B, n);
}
// GPU版処理時間計測
// NUM_TRIALS 回計測して中央値を求める
double matmul_gpu_benchmark(float *h_C, const float *h_A, const float *h_B, int n){
// デバイスメモリの確保
float *d_C = nullptr, *d_A = nullptr, *d_B = nullptr;
cudaMalloc(&d_A, sizeof(float) * n * n);
cudaMalloc(&d_B, sizeof(float) * n * n);
cudaMalloc(&d_C, sizeof(float) * n * n);
// 入力データの転送
cudaMemcpy(d_A, h_A, sizeof(float) * n * n, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, sizeof(float) * n * n, cudaMemcpyHostToDevice);
std::vector<double> durations(NUM_TRIALS);
for(int i = 0; i < NUM_TRIALS; ++i){
const auto begin = std::chrono::steady_clock::now();
call_matmul_gpu(d_C, d_A, d_B, n);
cudaDeviceSynchronize(); // GPUカーネルの終了を待つ
const auto end = std::chrono::steady_clock::now();
const auto duration = std::chrono::duration_cast<std::chrono::microseconds>(end - begin);
durations[i] = duration.count() * 1e-3;
}
// 出力データの転送
cudaMemcpy(h_C, d_C, sizeof(float) * n * n, cudaMemcpyDeviceToHost);
// デバイスメモリの開放
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// 中央値を求める
std::sort(durations.begin(), durations.end());
return durations[NUM_TRIALS / 2];
}
// 検算
bool validate(const float *expect, const float *actual, int n){
bool valid = true;
for(int i = 0; i < n * n; ++i){
if(std::fabs(expect[i] - actual[i]) > 1e-4){
std::cerr << "(" << i / n << ", " << i % n << "): " << expect[i] << " != " << actual[i] << std::endl;
valid = false;
}
}
return valid;
}
int main(int argc, char *argv[]){
if(argc < 2){
std::cerr << "Usage: " << argv[0] << " n" << std::endl;
return 0;
}
const int n = atoi(argv[1]);
std::cout << "n = " << n << std::endl;
std::default_random_engine engine;
std::uniform_real_distribution<float> dist(-1.0f, 1.0f);
std::vector<float> A(n * n), B(n * n), cpu_C(n * n), gpu_C(n * n);
for(int i = 0; i < n * n; ++i){
A[i] = dist(engine);
B[i] = dist(engine);
}
// CPU側の計算が遅いのでキャッシュする
const std::string cache_name = "cache_" + std::to_string(n);
std::ifstream cache_ifs(cache_name, std::ios::binary);
if(cache_ifs){
cache_ifs.read(reinterpret_cast<char*>(cpu_C.data()), sizeof(float) * n * n);
}else{
std::ofstream cache_ofs(cache_name);
matmul_cpu(cpu_C.data(), A.data(), B.data(), n);
cache_ofs.write(reinterpret_cast<const char*>(cpu_C.data()), sizeof(float) * n * n);
}
const auto gpu_duration =
matmul_gpu_benchmark(gpu_C.data(), A.data(), B.data(), n);
std::cout << "GPU: " << gpu_duration << " [ms]" << std::endl;
std::cout << " " << 2.0 * n * n * n / gpu_duration * 1e-9 << " [TFLOPS]" << std::endl;
const auto valid = validate(cpu_C.data(), gpu_C.data(), n);
std::cout << "Validation: " << (valid ? "Success" : "Failed") << std::endl;
return 0;
}
|
23,325 | /*
Collatz code for CS 4380 / CS 5351
Copyright (c) 2021 Texas State University. All rights reserved.
Redistribution in source or binary form, with or without modification,
is *not* permitted. Use in source or binary form, with or without
modification, is only permitted for academic use in CS 4380 or CS 5351
at Texas State University.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Martin Burtscher
Bryan Valenzuela
Connor Steed
*/
#include <cstdio>
#include <cuda.h>
#include <algorithm>
#include <sys/time.h>
static const int ThreadsPerBlock = 512;
static __global__ void collatz(const long start, const long bound, const long step, int* const maxlen)
{
const long i = threadIdx.x + blockIdx.x * (long)blockDim.x + start;
if( (i - start) % step == 0)
if (i < bound){
long val = i;
int len = 1;
while (val != 1) {
len++;
if ((val % 2) == 0) {
val /= 2; // even
} else {
val = 3 * val + 1; // odd
}
}
atomicMax(maxlen, len);
}
}
static void CheckCuda()
{
cudaError_t e;
cudaDeviceSynchronize();
if (cudaSuccess != (e = cudaGetLastError())) {
fprintf(stderr, "CUDA error %d: %s\n", e, cudaGetErrorString(e));
exit(-1);
}
}
int main(int argc, char *argv[])
{
printf("Collatz v1.5\n");
// check command line
if (argc != 4) {fprintf(stderr, "USAGE: %s start bound step\n", argv[0]); exit(-1);}
const long start = atol(argv[1]);
const long bound = atol(argv[2]);
const long step = atol(argv[3]);
if (start < 1) {fprintf(stderr, "ERROR: start value must be at least 1\n"); exit(-1);}
if (bound <= start) {fprintf(stderr, "ERROR: bound must be larger than start\n"); exit(-1);}
if (step < 1) {fprintf(stderr, "ERROR: step size must be at least 1\n"); exit(-1);}
printf("start value: %ld\n", start);
printf("upper bound: %ld\n", bound);
printf("step size: %ld\n", step);
int maxlen = 0;
int size = sizeof(int);
int* d_maxlen;
if (cudaSuccess != cudaMalloc((void **)&d_maxlen, size)) {fprintf(stderr, "ERROR: could not allocate memory\n"); exit(-1);}
if (cudaSuccess != cudaMemcpy(d_maxlen, &maxlen, sizeof(int), cudaMemcpyHostToDevice)) {fprintf(stderr, "ERROR: copying to device failed\n"); exit(-1);}
// start time
timeval beg, end;
gettimeofday(&beg, NULL);
// execute timed code
collatz<<<(bound + ThreadsPerBlock - 1) / ThreadsPerBlock, ThreadsPerBlock>>>(start, bound, step, d_maxlen);
cudaDeviceSynchronize();
// end time
gettimeofday(&end, NULL);
const double runtime = end.tv_sec - beg.tv_sec + (end.tv_usec - beg.tv_usec) / 1000000.0;
printf("compute time: %.6f s\n", runtime);
CheckCuda();
if (cudaSuccess != cudaMemcpy(&maxlen, d_maxlen, sizeof(int), cudaMemcpyDeviceToHost)) {fprintf(stderr, "ERROR: copying from device failed\n"); exit(-1);}
// print result
printf("maximum sequence length: %d elements\n", maxlen);
cudaFree(d_maxlen);
return 0;
}
|
23,326 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
__device__ float logsumexp(float a, float b)
{
if(a <= -1e20f)
{
return b;
}
else
if(b <= -1e20f)
{
return a;
}
/*float diff = a-b;
if (diff < -20.0f)
{
return b;
}
else
if (diff > 20.0f)
{
return a;
}*/
if(a > b)
{
return a + log(1.0f+exp(b-a));
}
else
{
return b + log(1.0f+exp(a-b));
}
}
__device__ float safeadd(float a, float b)
{
if(a <= -1e20f)
{
return b;
}
else
if(b <= -1e20f)
{
return a;
}
return a+b;
}
/*
push!(rules, Rule('S', "LS",0.868534, 3))
push!(rules, Rule('S', "s",0.117609877998, 1))
push!(rules, Rule('S', "dFd",0.013856122002, 2))
push!(rules, Rule('F', "dFd",0.787640, 2))
push!(rules, Rule('F', "LS",0.21236, 3))
push!(rules, Rule('L', "s",0.894603, 1))
push!(rules, Rule('L', "dFd",0.105397, 2))
type1rules = Rule[rules[2],rules[6]]
type2rules = Rule[rules[3],rules[4],rules[7]]
type3rules = Rule[rules[1],rules[5]]
ruleindex = Dict('S' => 1, 'F' => 2, 'L' => 3)
*/
__constant__ int S = 0;
__constant__ int F = 1;
__constant__ int L = 2;
__constant__ float r1logprob = -0.14094854611f;
__constant__ float r2logprob = -2.14038225046f;
__constant__ float r3logprob = -4.27902812221f;
__constant__ float r4logprob = -0.2387141463f;
__constant__ float r5logprob = -1.549472331f;
__constant__ float r6logprob = -0.11137523453f;
__constant__ float r7logprob = -2.25002110628f;
/*
__global__ void initialiseinside(float *inside, const float* unpairedlogprobs, int len)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < len)
{
for(int j=0 ; j < len ; j++)
{
if(i == j)
{
inside[S*len*len + i*len + j] = unpairedlogprobs[j] + r2logprob;
inside[L*len*len + i*len + j] = unpairedlogprobs[j] + r6logprob;
inside[F*len*len + i*len + j] = -1e20f;
}
else
{
inside[S*len*len + i*len + j] = -1e20f;
inside[L*len*len + i*len + j] = -1e20f;
inside[F*len*len + i*len + j] = -1e20f;
}
}
}
}
__global__ void insidealgorithm(float* inside, const float* pairedlogprobs, const float* unpairedlogprobs, const int b, const int len, const float BT)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if (j < len-b)
{
int index = 0;
// type 3 rules
// rule 1
float tmp = -1e20f;
for(int h=j ; h < j+b ; h++)
{
tmp = logsumexp(tmp, inside[L*len*len + j*len + h] + inside[S*len*len + (h+1)*len + (j+b)]);
}
index = S*len*len + j*len + j+b;
inside[index] = logsumexp(inside[index], r1logprob + tmp);
// rule 5
index = F*len*len + j*len + j+b;
inside[index] = logsumexp(inside[index], r5logprob + tmp);
// type 2 rules
float v = pairedlogprobs[j*len+j+b]*BT + inside[F*len*len+(j+1)*len+ (j+b-1)];
// rule 3
index = S*len*len + j*len + j+b;
inside[index] = logsumexp(inside[index], r3logprob + v);
// rule 4
index = F*len*len + j*len + j+b;
inside[index] = logsumexp(inside[index], r4logprob + v);
// rule 7
index = L*len*len + j*len + j+b;
inside[index] = logsumexp(inside[index], r7logprob + v);
}
}
__global__ void insidez(const float* inside, float* Z, const int len)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if(j == 0)
{
Z[j] = inside[len-1];
}
}*/
/*
__global__ void initialiseinside(float *inside, const float* unpairedlogprobs, int len)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < len)
{
for(int j=0 ; j < len ; j++)
{
if(i == j)
{
inside[i*len*3 + j*3 + S] = unpairedlogprobs[j] + r2logprob;
inside[i*len*3 + j*3 + L] = unpairedlogprobs[j] + r6logprob;
inside[i*len*3 + j*3 + F] = -1e20f;
}
else
{
inside[i*len*3 + j*3 + S] = -1e20f;
inside[i*len*3 + j*3 + L] = -1e20f;
inside[i*len*3 + j*3 + F] = -1e20f;
}
}
}
}
__global__ void insidealgorithm(float* inside, const float* pairedlogprobs, const float* unpairedlogprobs, const int b, const int len, const float BT)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if (j < len-b)
{
int index = 0;
// type 3 rules
// rule 1
float tmp = -1e20f;
for(int h=j ; h < j+b ; h++)
{
tmp = logsumexp(tmp, inside[j*len*3 + h*3 + L] + inside[(h+1)*len*3 + (j+b)*3 + S]);
}
index = j*len*3 + (j+b)*3 + S;
inside[index] = logsumexp(inside[index], r1logprob + tmp);
// rule 5
index = j*len*3 + (j+b)*3 + F;
inside[index] = logsumexp(inside[index], r5logprob + tmp);
// type 2 rules
float v = pairedlogprobs[j*len+j+b] + inside[(j+1)*len*3 + (j+b-1)*3 + F];
// rule 3
index = j*len*3 + (j+b)*3 + S;
inside[index] = logsumexp(inside[index], r3logprob + v);
// rule 4
index = j*len*3 + (j+b)*3 + F;
inside[index] = logsumexp(inside[index], r4logprob + v);
// rule 7
index = j*len*3 + (j+b)*3 + L;
inside[index] = logsumexp(inside[index], r7logprob + v);
}
}
__global__ void insidez(const float* inside, float* Z, const int len)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if(j == 0)
{
Z[j] = inside[(len-1)*3];
}
}*/
__global__ void initialiseinside(float* insideS, float* insideL, float* insideF, const float* unpairedlogprobs, int len)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < len)
{
for(int j=0 ; j < len ; j++)
{
if(i == j)
{
insideS[i*len + j] = unpairedlogprobs[j] + r2logprob;
insideL[i*len + j] = unpairedlogprobs[j] + r6logprob;
insideF[i*len + j] = -1e20f;
}
else
{
insideS[i*len + j] = -1e20f;
insideL[i*len + j] = -1e20f;
insideF[i*len + j] = -1e20f;
}
}
}
}
__global__ void insidealgorithm(float* insideS, float* insideL, float* insideF, const float* pairedlogprobs, const float* unpairedlogprobs, const int b, const int len, const float BT)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if (j < len-b)
{
int index = j*len + j+b;
// type 3 rules
// rule 1
float tmp = -1e20f;
for(int h=j ; h < j+b ; h++)
{
tmp = logsumexp(tmp, insideL[j*len + h] + insideS[(h+1)*len + (j+b)]);
}
insideS[index] = logsumexp(insideS[index], r1logprob + tmp);
// rule 5
insideF[index] = logsumexp(insideF[index], r5logprob + tmp);
// type 2 rules
float v = pairedlogprobs[index]*BT + insideF[(j+1)*len+ (j+b-1)];
// rule 3
insideS[index] = logsumexp(insideS[index], r3logprob + v);
// rule 4
insideF[index] = logsumexp(insideF[index], r4logprob + v);
// rule 7
insideL[index] = logsumexp(insideL[index], r7logprob + v);
}
}
__global__ void posteriordecoding(float* ematrix, int* smatrix, const float* pairprobs, const float* singleprobs, const int datalen, const int diag, const float alpha)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < datalen-diag)
{
int j = i + diag;
float e1 = singleprobs[i] + ematrix[(i+1)*datalen + j];
float e2 = alpha*pairprobs[i*datalen+j] + ematrix[(i+1)*datalen + j-1];
float maxe3 = -1e10;
int maxk = 0;
for(int k=i+1 ; k <= j-1 ; k++)
{
float v = alpha*pairprobs[i*datalen + k] + ematrix[(i+1)*datalen + k-1] + ematrix[(k+1)*datalen + j];
if(v > maxe3)
{
maxe3 = v;
maxk = k;
}
}
float maxval = e1;
smatrix[i*datalen + j] = -1;
if(e2 > maxval)
{
maxval = e2;
smatrix[i*datalen + j] = -2;
}
if(maxe3 > maxval)
{
maxval = maxe3;
smatrix[i*datalen + j] = maxk+1;
}
ematrix[i*datalen + j] = maxval;
}
}
__global__ void insidez(const float* insideS, float* Z, const int len)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if(j == 0)
{
Z[j] = insideS[len-1];
}
}
/*
__global__ void initialiseinside(float* insideS, float* insideL, float* insideF, const float* unpairedlogprobs, int len, const int stride)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < len)
{
for(int j=0 ; j < len ; j++)
{
if(i == j)
{
insideS[i*stride + j] = unpairedlogprobs[j] + r2logprob;
insideL[i*stride + j] = unpairedlogprobs[j] + r6logprob;
insideF[i*stride + j] = -1e20f;
}
else
{
insideS[i*stride + j] = -1e20f;
insideL[i*stride + j] = -1e20f;
insideF[i*stride + j] = -1e20f;
}
}
}
}
__global__ void insidealgorithm(float* insideS, float* insideL, float* insideF, const float* pairedlogprobs, const float* unpairedlogprobs, const int b, const int len, const int stride, const float BT)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if (j < len-b)
{
int index = j*stride + j+b;
// type 3 rules
// rule 1
float tmp = -1e20f;
for(int h=j ; h < j+b ; h++)
{
tmp = logsumexp(tmp, insideL[j*stride + h] + insideS[(h+1)*stride + (j+b)]);
}
insideS[index] = logsumexp(insideS[index], r1logprob + tmp);
// rule 5
insideF[index] = logsumexp(insideF[index], r5logprob + tmp);
// type 2 rules
float v = pairedlogprobs[j*len + j+b]*BT + insideF[(j+1)*stride + (j+b-1)];
// rule 3
insideS[index] = logsumexp(insideS[index], r3logprob + v);
// rule 4
insideF[index] = logsumexp(insideF[index], r4logprob + v);
// rule 7
insideL[index] = logsumexp(insideL[index], r7logprob + v);
}
}
__global__ void insidez(const float* insideS, float* Z, const int len)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if(j == 0)
{
Z[j] = insideS[len-1];
}
}*/
/*
push!(rules, Rule('S', "LS",0.868534, 3))
push!(rules, Rule('S', "s",0.117609877998, 1))
push!(rules, Rule('S', "dFd",0.013856122002, 2))
push!(rules, Rule('F', "dFd",0.787640, 2))
push!(rules, Rule('F', "LS",0.21236, 3))
push!(rules, Rule('L', "s",0.894603, 1))
push!(rules, Rule('L', "dFd",0.105397, 2))
type1rules = Rule[rules[2],rules[6]]
type2rules = Rule[rules[3],rules[4],rules[7]]
type3rules = Rule[rules[1],rules[5]]
ruleindex = Dict('S' => 1, 'F' => 2, 'L' => 3)
*/
__global__ void outsidealgorithm(float* outside, const float* inside, const float* pairedlogprobs, const float* unpairedlogprobs, const int b, const int len, const float BT)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if (j < len - b)
{
int index = 0;
// type 3 rules
// rule 1 Rule('S', "LS",0.868534, 3))
float tmp = -1e20f;
for (int k = j + b + 1; k < len; k++)
{
tmp = logsumexp(tmp, outside[S*len*len + j*len + k] + inside[S*len*len + (j+b+1)*len + k]);
}
index = L*len*len + j*len + j+b;
outside[index] = logsumexp(outside[index], r1logprob*BT + tmp);
tmp = -1e20f;
for (int k = 0 ; k < j ; k++)
{
tmp = logsumexp(tmp, outside[S*len*len + k*len + j+b] + inside[L*len*len + k*len + j-1]);
}
index = S*len*len + j*len + j+b;
outside[index] = logsumexp(outside[index], r1logprob*BT + tmp);
// rule 5 Rule('F', "LS",0.21236, 3)
tmp = -1e20f;
for (int k = j + b + 1; k < len; k++)
{
tmp = logsumexp(tmp, outside[F*len*len + j*len + k] + inside[S*len*len + (j+b+1)*len + k]);
}
index = L*len*len + j*len + j+b;
outside[index] = logsumexp(outside[index], r5logprob*BT + tmp);
tmp = -1e20f;
for (int k = 0 ; k < j ; k++)
{
tmp = logsumexp(tmp, outside[F*len*len + k*len + j+b] + inside[L*len*len + k*len + j-1]);
}
index = S*len*len + j*len + j+b;
outside[index] = logsumexp(outside[index], r5logprob*BT + tmp);
// type 2 rules
if ((j>=1) && (j+b+1<len))
{
float v = pairedlogprobs[(j-1)*len+(j+b+1)]*BT;
index = F*len*len + j*len + j+b;
// rule 3 Rule('S', "dFd",0.013856122002, 2)
outside[index] = logsumexp(outside[index], r3logprob*BT + outside[S*len*len + (j-1)*len + j+b+1] + v);
// rule 4 Rule('F', "dFd",0.787640, 2)
outside[index] = logsumexp(outside[index], r4logprob*BT + outside[F*len*len + (j-1)*len + j+b+1] + v);
// rule 7 Rule('L', "dFd",0.105397, 2)
outside[index] = logsumexp(outside[index], r7logprob*BT + outside[L*len*len + (j-1)*len + j+b+1] + v);
}
}
}
/*
int main()
{
int len = 8000;
int N = len;
float* inside = (float*)malloc(3*len*len*sizeof(float));
float* pairedlogprobs = (float*)malloc(len*len*sizeof(float));
float* unpairedlogprobs = (float*)malloc(len*sizeof(float));
float* d_inside;
float* d_pairedlogprobs;
float* d_unpairedlogprobs;
cudaMalloc(&d_inside, 3*len*len*sizeof(float));
cudaMalloc(&d_pairedlogprobs, len*len*sizeof(float));
cudaMalloc(&d_unpairedlogprobs, len*len*sizeof(float));
cudaMemcpy(d_inside, inside, 3*len*len*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_pairedlogprobs, pairedlogprobs, len*len*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_unpairedlogprobs, unpairedlogprobs, len*sizeof(float), cudaMemcpyHostToDevice);
for(int b=1 ; b < len ; b++)
{
insidealgorithm<<<(N+511)/512, 512>>>(d_inside, d_pairedlogprobs, d_unpairedlogprobs, b, len, 1.0);
}
int code = cudaMemcpy(inside, d_inside, 3*len*len*sizeof(float), cudaMemcpyDeviceToHost);
printf("exitcode %d\n", code);
printf("Z %f\n", inside[len-1]);
}*/
|
23,327 | #define TILE_DIM 16
#define BLOCK_ROWS 16
#define FLOOR(a,b) (a-(a%b))
__global__ void transposeNaive(float* odata, float* idata, int width, int height)
{
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + width * yIndex;
int index_out = yIndex + height * xIndex;
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS)
{
odata[index_out+i] = idata[index_in+i*width];
}
}
|
23,328 | /*
* @Program: sync_async.cu
* @Description: Shows the common sync/async behaviour.
*
* @Author: Giacomo Marciani <gmarciani@acm.org>
* @Institution: University of Rome Tor Vergata
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
__host__ __device__ void waitClockCycles(const int cycles) {
clock_t start = clock();
clock_t now;
for (;;) {
now = clock();
clock_t cycles = now > start ? now - start : now + (0xffffffff - start);
if (cycles >= cycles) {
break;
}
}
}
__device__ void helloGPUDevice(const int val, const int cycles, int *result) {
waitClockCycles(cycles);
printf("[gpu]> (%d) Hello world! (device | delay: %d clock cycles | in: %d)\n", val, cycles, *result);
*result = val;
}
__global__ void helloGPU(const int val, const int cycles, int *result) {
waitClockCycles(cycles);
printf("[gpu]> (%d) Hello world! (global | delay: %d clock cycles | in: %d)\n", val, cycles, *result);
*result = val;
helloGPUDevice(val, cycles, result);
}
__host__ void helloCPUFromHost(const int val, const int cycles) {
waitClockCycles(cycles);
printf("[cpu]> (%d) Hello world! (host | delay: %d clock cycles)\n", val, cycles);
}
void helloCPU(const int val, const int cycles) {
waitClockCycles(cycles);
printf("[cpu]> (%d) Hello world! (normal | delay: %d clock cycles)\n", val, cycles);
helloCPUFromHost(val, cycles);
}
void flow_1(void) {
printf("### start FLOW 1 ###\n");
int result = -1;
int *dev_result = NULL;
cudaMalloc((void**)&dev_result, sizeof(int));
printf("[before memcpy #1] result: %d\n", result);
cudaMemcpy(dev_result, &result, sizeof(int), cudaMemcpyHostToDevice); // memcpy #1
printf("[after memcpy #1] result: %d\n", result);
helloGPU<<< 1, 1 >>>(1, 30000, dev_result); // gpu #1
helloGPU<<< 1, 1 >>>(2, 10000, dev_result); // gpu #2
printf("[before deviceSynchronize #1] result: %d\n", result);
cudaDeviceSynchronize(); // deviceSynchronize #1
printf("[after deviceSynchronize #1] result: %d\n", result);
printf("[before memcpy #2] result: %d\n", result);
cudaMemcpy(&result, dev_result, sizeof(int), cudaMemcpyDeviceToHost); // memcpy #2
printf("[after memcpy #2] result: %d\n", result);
printf("### end FLOW 1 ###\n\n");
}
void flow_2(void) {
printf("### start FLOW 2 ###\n");
int result = -1;
int *dev_result = NULL;
cudaMalloc((void**)&dev_result, sizeof(int));
printf("[before memcpy #1] result: %d\n", result);
cudaMemcpy(dev_result, &result, sizeof(int), cudaMemcpyHostToDevice); // memcpy #1
printf("[after memcpy #1] result: %d\n", result);
helloGPU<<< 1, 1 >>>(1, 30000, dev_result); // gpu #1
helloGPU<<< 1, 1 >>>(2, 10000, dev_result); // gpu #2
printf("[before memcpy #2] result: %d\n", result);
cudaMemcpy(&result, dev_result, sizeof(int), cudaMemcpyDeviceToHost); // memcpy #2
printf("[after memcpy #2] result: %d\n", result);
printf("### end FLOW 2 ###\n\n");
}
void flow_3(void) {
printf("### start FLOW 3 ###\n");
int result = -1;
int *dev_result = NULL;
cudaMalloc((void**)&dev_result, sizeof(int));
printf("[before memcpyAsync #1] result: %d\n", result);
cudaMemcpyAsync(dev_result, &result, sizeof(int), cudaMemcpyHostToDevice); // memcpyAsync #1
printf("[after memcpyAsync #1] result: %d\n", result);
helloGPU<<< 1, 1 >>>(1, 30000, dev_result); // gpu #1
helloGPU<<< 1, 1 >>>(2, 10000, dev_result); // gpu #2
printf("[before memcpyAsync #2] result: %d\n", result);
cudaMemcpyAsync(&result, dev_result, sizeof(int), cudaMemcpyDeviceToHost); // memcpyAsync #2
printf("[after memcpyAsync #2] result: %d\n", result);
printf("[before deviceSynchronize #1] result: %d\n", result);
cudaDeviceSynchronize(); // deviceSynchronize #1
printf("[after deviceSynchronize #1] result: %d\n", result);
printf("### end FLOW 3 ###\n\n");
}
int main(void) {
flow_1(); // deviceSynchronize + memcpy
flow_2(); // memcpy
flow_3(); // memcpyAsync
/*
helloCPU(3, 20000);
helloGPU<<< 1, 1 >>>(4, 20000, dev_result); // potentially skipped or not in order, if deviceSynchronize #2 is missing
//cudaDeviceSynchronize(); // deviceSynchronize #2
helloCPU(5, 30000);
helloCPU(6, 10000);
helloGPU<<< 1, 1 >>>(7, 20000, dev_result); // potentially skipped or not in order, if deviceSychronize #3 is missing
//cudaDeviceSynchronize(); // deviceSynchronize #3
helloCPU(8, 10000);
*/
return 0;
}
|
23,329 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <algorithm>
#include <cstdlib>
int main(void)
{
// generate 100 random numbers serially
thrust::host_vector<int> h_vec(100);
std::generate(h_vec.begin(), h_vec.end(), rand);
// transfer data to the device
thrust::device_vector<int> d_vec = h_vec;
// sort data on the device
thrust::sort(d_vec.begin(), d_vec.end());
//@@ transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
// print h_vec
for (int i = 0; i < h_vec.size(); i++) {
printf("h_vec [%d] = %d\n", i, h_vec[i]);
}
return 0;
} |
23,330 | #include <stdio.h>
#include <stdlib.h>
typedef unsigned char uchar;
__global__ void calcGis(uchar* data, int n, int* height) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int offsetx = gridDim.x * blockDim.x;
__shared__ int tmp[256];
for(int i = threadIdx.x; i<256; i+=blockDim.x){
tmp[i]=0;
}
__syncthreads();
for(int i = idx; i < n; i += offsetx){
atomicAdd(tmp+(int)data[i], 1);
}
__syncthreads();
for(int i = threadIdx.x; i<256; i+=blockDim.x){
atomicAdd(height + i, tmp[i]);
}
}
__global__ void scan(int* height){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int forSwap,k;
__shared__ int data[264];
if(idx<256){
data[idx+(idx>>5)]=height[idx];
__syncthreads();
for(k=1;k<256;k*=2){
int j=idx*k*2+k-1;
if(j+k<256){
data[((j+k)>>5)+(j+k)]+=data[j+(j>>5)];
}
__syncthreads();
}
data[((255)>>5)+(255)]=0;
__syncthreads();
for( k=256;k>1;k/=2){
int j=k*(idx+1)-1;
if(j<256){
forSwap=data[((j-k/2)>>5)+(j-k/2)];
data[((j-k/2)>>5)+(j-k/2)]=data[(j>>5)+j];
data[(j>>5)+j]=forSwap+data[(j>>5)+j];
}
__syncthreads();
}
__syncthreads();
height[idx]+=data[idx+(idx>>5)];
}
}
__global__ void outGis(uchar* data, int n, int* height) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int offsetx = gridDim.x * blockDim.x;
uchar j = 0;
for(int i = idx; i < n; i += offsetx){
while(height[j] <= i){
j++;
}
data[i] = j;
}
}
int main() {
int size = 256;
int n;
fread(&n, sizeof(int), 1, stdin);
uchar* data = (uchar*) malloc(sizeof(uchar) * n);
fread(data, sizeof(uchar), n, stdin);
int *height1;
uchar *data1;
cudaMalloc(&data1, n * sizeof(uchar));
cudaMemcpy(data1, data, n*sizeof(uchar), cudaMemcpyHostToDevice);
cudaMalloc(&height1, size * sizeof(int));
cudaMemset(height1, 0, size * sizeof(int));
dim3 threads = 256;
dim3 blocks = 256;
calcGis<<<blocks, threads>>>(data1, n, height1);
scan<<<blocks, threads>>>(height1);
outGis<<<blocks, threads>>>(data1, n, height1);
cudaMemcpy(data, data1, n * sizeof(uchar), cudaMemcpyDeviceToHost);
cudaFree(height1);
cudaFree(data1);
fwrite(data, sizeof(uchar), n, stdout);
free(data);
return 0;
}
|
23,331 | #include "includes.h"
__global__ void transpose_smem(int * in, int* out, int nx, int ny)
{
__shared__ int tile[BDIMY][BDIMX];
//input index
int ix, iy, in_index;
//output index
int i_row, i_col, _1d_index, out_ix, out_iy, out_index;
//ix and iy calculation for input index
ix = blockDim.x * blockIdx.x + threadIdx.x;
iy = blockDim.y * blockIdx.y + threadIdx.y;
//input index
in_index = iy * nx + ix;
//1D index calculation fro shared memory
_1d_index = threadIdx.y * blockDim.x + threadIdx.x;
//col major row and col index calcuation
i_row = _1d_index / blockDim.y;
i_col = _1d_index % blockDim.y;
//coordinate for transpose matrix
out_ix = blockIdx.y * blockDim.y + i_col;
out_iy = blockIdx.x * blockDim.x + i_row;
//output array access in row major format
out_index = out_iy * ny + out_ix;
if (ix < nx && iy < ny)
{
//load from in array in row major and store to shared memory in row major
tile[threadIdx.y][threadIdx.x] = in[in_index];
//wait untill all the threads load the values
__syncthreads();
out[out_index] = tile[i_col][i_row];
}
} |
23,332 | #include "includes.h"
__global__ void update_population_lost( unsigned int * pop , unsigned int rows , unsigned int cols , unsigned int * fixed ) {
} |
23,333 | #include "includes.h"
__global__ void kSigmoid_d(const int nThreads, float const *input, float *output) {
/* Computes the value of the sigmoid function derivative f'(x) = f(x)(1 - f(x)),
where f(x) is sigmoid function.
Inputs:
input: array
output: array, the results of the computation are to be stored here:
x(1 - x) for every element of the input matrix m1.
*/
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
output[i] = input[i] * (1 - input[i]);
}
} |
23,334 | /*
* CPU version of BackPropagation Neural Network written in CUDA. One can change the extension .cu to .c
* and compile the program with C compiler.
*
* This program is a rework of Source code for Neural Networks w/ JAVA (Tutorial 09) - Backpropagation 01
* from http://zaneacademy.com
*
* Author - Waylon Luo
* Date - April 23, 2020
*
*/
#include <stdio.h>
#include <time.h>
#include <cuda_runtime.h>
#include <cassert>
#include <cstdlib>
#include <functional>
#include <iostream>
#include <algorithm>
#include <vector>
#define LEARNING_RATE 0.80
#define NUMB_OF_EPOCHS 100
#define TD_X 4 // training data in x- dimension
#define TD_Y 2 // training data in y- dimension
#define TD_Z 2 // training data in z- dimension
double rand_double();
double TRAINING_DATA[TD_X][TD_Y][TD_Z] = {{{0,0},{0}},
{{0,1},{1}},
{{1,0},{1}},
{{1,1},{0}}};
#include "Neuron.cu"
int main(void){
double result[] = {0, 0, 0, 0};
// declare and initialize neurons
struct neuron neurons[5];
setNeurons(neurons);
// forward propagation before the training
for(int i = 0; i < TD_X; i++) { // TD_X - Traning Data Dimension X
forwardProp(TRAINING_DATA[i][0], neurons);
result[i] = neurons[4].output; // get output
}
printResult(result);
// training 100 * 100 = 10,000 trainings
for(int x = 0; x < 100; x++){
for(int i = 0; i < NUMB_OF_EPOCHS; i++) {
if(i%100 == 0) {
printf("[epoch %d ]\n", i);
}
for(int j = 0; j < TD_X; j++) { // TD_X - Traning Data Dimension X
forwardProp(TRAINING_DATA[j][0], neurons);
backpropError(TRAINING_DATA[j][1][0], neurons);
if(i%100 == 0) printTrainingData(neurons);
}
}
}
printf("[done training]\n");
// forward propagation after the training
for(int i = 0; i < TD_X; i++) {
forwardProp(TRAINING_DATA[i][0], neurons);
result[i] = neurons[4].output; // get output
}
printResult(result);
return(1);
} |
23,335 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
int main()
{
return 0;
}
|
23,336 |
#include <cuda.h>
#include <stdlib.h>
#include <iostream>
#include <stdio.h>
#include <ctime>
using namespace std;
//assignment constraints prevent the optimization of this function
//better approach would have been to take a max per block
//then sort the resulting maxes, with blocks of thread size 1024 you can cut down the serial
//sort from array size to (array size)/1024 or 2^(n-10) where n=log2(array size) assuming it is a power of 2
__global__ void getmaxcu(const unsigned int* numI, unsigned int* numO, const unsigned int size){
extern __shared__ unsigned int sarr[];
//thread id
const unsigned int tdx=threadIdx.x;
//global location in array
unsigned int gdx=(blockDim.x)*blockIdx.x+threadIdx.x;
//compare two initially
sarr[tdx]=numI[gdx];
for(unsigned int s=blockDim.x/2; s>0; s>>=1){
if (tdx<s)
sarr[tdx]=max(sarr[tdx],sarr[tdx+s]);
__syncthreads();
}
if(!tdx){
//atomically swap output value for multiple blocks
atomicMax(numO, sarr[0]);
}
}
int main(int argc, char *argv[]){
cudaError_t err;
unsigned int *numbers_temp_in, *numbers_temp_out;
unsigned int *numbers_in, *numbers_out;
unsigned int size;
//blocksize is 1024 based on cuda5 (original test source) and then my own GPU after all the servers crashed
const unsigned int blockSize=1024;
//check for proper args
if(argc!=2){
printf("usage: maxseq num\nnum = size of the array\n");
exit(1);
}
//get the size and then determine number of blocks in grid
size = atol(argv[1]);
const unsigned int gridSize=ceil((float)size/blockSize);
//numbers_out = single max element to return
numbers_out=(unsigned int *)malloc(sizeof(unsigned int));
if(!numbers_out){
printf("Failed Allocation\n");
exit(1);
}
//numbers_in = array of randomized inputs based on size
numbers_in=(unsigned int *)malloc(size*sizeof(unsigned int));
if(!numbers_in){
printf("Failed Allocation\n");
exit(1);
}
//input the array
srand(time(NULL)); // setting a seed for the random number generator
for(unsigned int i=0; i<size; ++i)
numbers_in[i] = rand()%size;
//allocate the memory in the GPU
cudaMalloc((void**)&numbers_temp_in, sizeof(unsigned int)*size);
cudaMalloc((void**)&numbers_temp_out, sizeof(unsigned int));
//copt over the inputs to the device
cudaMemcpy((void*)numbers_temp_in, (void*)numbers_in, size*sizeof(unsigned int), cudaMemcpyHostToDevice);
//set the grid/block dimensions
dim3 dimGrid(gridSize);
dim3 dimBlock(blockSize);
//run the gpu max
getmaxcu<<<dimGrid,dimBlock,blockSize*sizeof(unsigned int)>>>(numbers_temp_in, numbers_temp_out, size);
//print out some information
cout<<"Starting GPU Calculation:\nBlocksize: "<<blockSize<<"\nBlocks in grid: "<<gridSize<<"\n";
//wait until completion
cudaThreadSynchronize();
err=cudaGetLastError();
if(err!=cudaSuccess){
printf("Err: %s\n", cudaGetErrorString(err));
exit(1);
}
//get the output back from the device
cudaMemcpy((void*)numbers_out, (void*)numbers_temp_out, sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
err=cudaGetLastError();
if(err!=cudaSuccess){
printf("Err Getting Output: %s\n", cudaGetErrorString(err));
exit(1);
}
//print the max value
cout<<"The maximum number in the array is: "<<*numbers_out<<"\n";
//free memory
cudaFree(numbers_temp_in);
cudaFree(numbers_temp_out);
free(numbers_in);
free(numbers_out);
return 0;
}
|
23,337 | #include <stdio.h>
#define Width 31
#define TITE_WIDTH 16
__global__ void MatrixMulKernel (float* Md, float* Nd, float* Pd, int ncols) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
printf("Block ID X : %d and Block ID Y: %d\n", blockIdx.x,blockIdx.y);
float Pvalue = 0;
if(row < Width || col < Width){
for(int k=0;k<ncols;k++){
float Melement = Md[row*ncols+k];
float Nelement = Nd[k*ncols+col];
Pvalue += Melement * Nelement;
}
}
Pd[row*ncols+col] = Pvalue;
}
int main (int argc, char *argv[]){
int i,j;
int size = Width * Width * sizeof(float);
float M[Width][Width], N[Width][Width], P[Width][Width];
float* Md, *Nd, *Pd;
for(i=0;i<Width;i++){
for(j=0;j<Width;j++){
M[i][j] = 1;
N[i][j] = 2;
}
}
cudaMalloc( (void**)&Md, size);
cudaMalloc( (void**)&Nd, size);
cudaMalloc( (void**)&Pd, size);
cudaMemcpy( Md, M, size, cudaMemcpyHostToDevice);
cudaMemcpy( Nd, N, size, cudaMemcpyHostToDevice);
dim3 dimBlock(TITE_WIDTH, TITE_WIDTH);
dim3 dimGrid((Width+TITE_WIDTH-1)/TITE_WIDTH,(Width+TITE_WIDTH-1)/TITE_WIDTH);
MatrixMulKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd, Width);
cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost);
cudaFree(Md);
cudaFree(Nd);
cudaFree(Pd);
printf("\n================================\n");
for(i=0;i<Width;i++){
for(j=0;j<Width;j++){
printf("%.2f ", P[i][j]);
}
}
}
|
23,338 | #include "includes.h"
#define N 100000
#define THREAD_PER_BLOCK 1
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
__global__ void add(int *a, int *b, int *c) {
int tid = blockIdx.x; // handle the data at this index
if (tid < N) {
c[tid] = a[tid] + b[tid];
}
} |
23,339 | #include <stdio.h>
#include <cuda_runtime.h>
__global__ void checkIndex(void){
printf("ThreadIdx: (%d,%d,%d) BlockIdx: (%d,%d,%d) BockDim: (%d,%d,%d) gridDim: (%d,%d,%d)\n", \
threadIdx.x,threadIdx.y,threadIdx.z,
blockIdx.x,blockIdx.y,blockIdx.z,
blockDim.x,blockDim.y,blockDim.z,
gridDim.x,gridDim.y,gridDim.z);
}
int main(int argc, char** argv){
int nElem = 6;
dim3 block(3);
dim3 grid((nElem + block.x) / block.x); // round it to the correct size
printf("no. elements: %d\n", nElem);
printf("no. blocks: (%d,%d,%d)\n", grid.x, grid.y, grid.z);
printf("no. threads: (%d,%d,%d)\n", block.x, block.y, block.z);
checkIndex <<< grid, block >>>();
cudaDeviceReset();
}
|
23,340 |
#include "complex.h"
extern "C" {
__device__
double2 fetch_initial_point(unsigned long i) {
return (double2){0.0, 0.0};
}
__device__
double2 iterate_point(double2 val, unsigned long i, double2 ipnt, unsigned long func_n) {
if (func_n < 42949673) {
func_n = 0;
} else if (func_n < 3693671875) {
func_n = 1;
} else if (func_n < 3994319586) {
func_n = 2;
} else {
func_n = 3;
}
switch (func_n) {
case 0:
return (double2){0.0, 0.16 * val.y};
case 1:
return (double2){0.85 * val.x + 0.04 * val.y, -0.04 * val.x + 0.85 * val.y + 1.6};
case 2:
return (double2){0.2 * val.x - 0.26 * val.y, 0.23 * val.x + 0.22 * val.y + 1.6};
case 3:
return (double2){-0.15 * val.x + 0.28 * val.y, 0.26 * val.x + 0.24 * val.y + 0.44};
}
return val;
}
}
|
23,341 |
#include <cuda.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda_runtime_api.h>
#include "utils.cuh"
#include "rbm_helpers.cuh"
using namespace utils;
__global__
void contrastive_divergence(curandState *globalState,int *input, double *weights, double *bh, double *bv, bool *mask, double *ph_mean, int *ph_sample, double *nv_means, int *nv_samples, double *nh_means, int *nh_samples, int n_hidden, int n_visible, double lr, int N) {
//printf("in cuda kernel");
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row > n_visible || col > n_hidden) return;
/* CD-k */
//mtx.lock();
//printf("start CD \n");
curandState state = globalState[row*blockDim.y * 32 + col];
sample_h_given_v(state,input, ph_mean, ph_sample, bh, weights, n_hidden, n_visible);
__syncthreads();
//sample_v_given_h(state, ph_sample, nv_means, nv_samples, weights, bv, n_visible, n_hidden);
for (int step = 0; step<1; step++) {
if (step == 0) {
//printf("only do it once \n");
gibbs_hvh(state,ph_sample, nv_means, nv_samples, nh_means, nh_samples, weights, bv, bv, n_visible, n_hidden);
}
}
__syncthreads();
if (mask[row*n_hidden + col]) {
//printf("yes it was masked \n");
//printf("%d, ", ph_mean[i]); printf("%d, ", input[j]); printf("%d, ", nh_means[i]); printf("%d, ", nv_samples[j]);
weights[row*n_hidden + col] += lr * (ph_mean[col] * input[row] - nh_means[col] * nv_samples[row]) / N;
//bh[col] += lr * (ph_sample[col] - nh_means[col]) / N;
//bv[row] += lr * (input[row] - nv_samples[row]) / N;
//printf("|%d|", weights[i*n_visible + j]);
}
else {
//penalty if not masked
//weights[row*n_hidden + col] += lr * lr * lr* (ph_mean[col] * input[row] - nh_means[col] * nv_samples[row]) / N;
//bh[col] += lr *lr * (ph_sample[col] - nh_means[col]) / N;
//bv[row] += lr*lr * (input[row] - nv_samples[row]) / N;
}
}
__device__
void sample_h_given_v(curandState globalState,int *v0_sample, double *mean, int *sample, double *hbias, double *weights, int n_hidden, int n_visible) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row > n_visible || col > n_hidden) return;
double val = propup(v0_sample, weights, hbias[col], n_visible, n_hidden);
mean[col] = val;
int state = binomial(1, mean[col], globalState);
sample[col] = state;
//printf("%f -> %d\n",val,state);
}
__device__
void sample_v_given_h(curandState globalState,int *h0_sample, double *mean, int *sample, double *weights, double *vbias, int n_visible, int n_hidden) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row > n_visible || col > n_hidden) return;
mean[row] = propdown(h0_sample, vbias[row],n_hidden,n_visible,weights);
sample[row] = binomial(1, mean[row], globalState);
}
__device__
double propup(int *v, double *w, double bh, int n_visible, int n_hidden) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row > n_visible || col > n_hidden) return;
double pre_sigmoid_activation = 0.0;
for (int j = 0; j<n_visible; j++) {
pre_sigmoid_activation +=v[j] * w[j*n_hidden + col];
}
//pre_sigmoid_activation += bh;
return sigmoid(pre_sigmoid_activation);
}
__device__
double propdown(int *h, double bv, int n_hidden, int n_visible, double *W) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row > n_visible || col > n_hidden) return;
double pre_sigmoid_activation = 0.0;
for (int j = 0; j<n_hidden; j++) {
pre_sigmoid_activation += W[row*n_hidden + j] * h[j];
}
//pre_sigmoid_activation += bv;
return sigmoid(pre_sigmoid_activation);
}
__device__
void gibbs_hvh(curandState globalState,int *h0_sample, double *nv_means, int *nv_samples, double *nh_means, int *nh_samples, double *weights, double *vbias, double *hbias, int n_visible, int n_hidden) {
sample_v_given_h(globalState,h0_sample, nv_means, nv_samples, weights, vbias, n_visible, n_hidden);
__syncthreads();
sample_h_given_v(globalState,nv_samples, nh_means, nh_samples, hbias, weights, n_hidden, n_visible);
}
|
23,342 | #include <stdio.h>
__global__ void mykernel(int* a, int* b, int* c) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[idx] = a[idx] * b[idx];
}
// Probably needs to be a define since we'll use it in <<<
#define NUM_BLOCKS 8
#define NUM_THREADS_PER_BLOCK 64
int main() {
// Host
int *a, *b, *c;
// Device pointers
int *d_a, *d_b, *d_c;
int numElements = NUM_BLOCKS * NUM_THREADS_PER_BLOCK;
int arraySize = numElements * sizeof(int);
// Allocate and initialize host memory.
a = (int *) malloc(arraySize);
b = (int *) malloc(arraySize);
c = (int *) malloc(arraySize);
for (int i = 0; i < numElements; i++) {
a[i] = i;
b[i] = numElements - i;
}
// Allocate memory on device. Store pointers on host.
// Pretty sure I don't need void** cast here.
cudaMalloc((void**) &d_a, arraySize);
cudaMalloc((void**) &d_b, arraySize);
cudaMalloc((void**) &d_c, arraySize);
// Copy host values to device.
cudaMemcpy(d_a, a, arraySize, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, arraySize, cudaMemcpyHostToDevice);
// Run kernel.
mykernel<<<NUM_BLOCKS, NUM_THREADS_PER_BLOCK>>>(d_a, d_b, d_c);
// Copy device values to host
cudaMemcpy(c, d_c, arraySize, cudaMemcpyDeviceToHost);
// Deallocate memory.
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
for (int i = 0; i < numElements; i++) {
printf("Result %d: %d = %d * %d\n", i, c[i], a[i], b[i]);
}
return 0;
}
|
23,343 | __constant__ int numFreqs;
__constant__ int numPoints;
__constant__ float centerFreq;
/*Finds the index of largest value among the input (size numFreqs)*/
__device__ int argmax(float* input) {
float max = input[0];
int best = 0;
for (int i = 1; i < numFreqs; i++) {
if (input[i] > max) {
max = input[i];
best = i;
}
}
return best;
}
__device__ int dCountPeaks(float *input, float *frequencies) {
int peaks = 0;
for (int i = 1; i < numFreqs - 1; i++) {
if ((input[i] > input[i-1] ||
(input[i] == input[i-1] && i > 1 && input[i] > input[i-2])) //extra term where y progresses like 1, 4, 4, 2
&& input[i] > input[i+1]) peaks++;
}
return peaks;
}
/*Output is numPoints * 3 array: height, center, standard deviation (a*e^(-(x-b)^2/(2c^2)))
Only uses the points surrounding the maximum value where y>ymax*cutoffPortion
If quadratic is true, outputs an approximation for the top of the gaussian in the form a(x-b)^2 + c
Otherwise outputs gaussian parameters*/
__global__ void reg(float *input, float *output, float *frequencies, float cutoffPortion, bool quadratic){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= numPoints) return;
input += idx * numFreqs;
output += idx * 3;
float scaleFactor = 1.0 / (frequencies[numFreqs-1] - frequencies[0]); //avoid float overflow, just scale things
int maxI = argmax(input);
float threshold = input[maxI] * cutoffPortion;
float s40 = 0, s30 = 0, s20 = 0, s10 = 0, s00 = 0, s21 = 0, s11 = 0, s01 = 0; //quadratic regression stuff
float yval, frequency;
//go left from peak, then go right from peak
for (int i = maxI; i < numFreqs && input[i] > threshold; i++) {
yval = quadratic ? input[i] / input[maxI] : __logf(input[i]); //in quadratic, divide by input[maxI] to avoid underflow
frequency = frequencies[i] - frequencies[0]; //shift over by maxFreq to avoid floating point errors
frequency *= scaleFactor; //scale by scaleFactor to avoid underflow/overflow
s00++;
s10 += frequency;
s20 += frequency * frequency;
s30 += frequency * frequency * frequency;
s40 += frequency * frequency * frequency * frequency;
s01 += yval;
s11 += frequency * yval;
s21 += frequency * frequency * yval;
}
for (int i = maxI-1; i > 0 && input[i] > threshold; i--) {
yval = quadratic ? input[i] / input[maxI] : __logf(input[i]);
frequency = frequencies[i] - frequencies[0]; //shift over by maxFreq to avoid floating point errors
frequency *= scaleFactor; //scale by scaleFactor to avoid underflow/overflow
s00++;
s10 += frequency;
s20 += frequency * frequency;
s30 += frequency * frequency * frequency;
s40 += frequency * frequency * frequency * frequency;
s01 += yval;
s11 += frequency * yval;
s21 += frequency * frequency * yval;
}
float a, b, c, q;
//magical quadratic regression stuff
q = (s40*(s20 * s00 - s10 * s10) -
s30*(s30 * s00 - s10 * s20) +
s20*(s30 * s10 - s20 * s20));
a = (s21*(s20 * s00 - s10 * s10) -
s11*(s30 * s00 - s10 * s20) +
s01*(s30 * s10 - s20 * s20))
/ q;
b = (s40*(s11 * s00 - s01 * s10) -
s30*(s21 * s00 - s01 * s20) +
s20*(s21 * s10 - s11 * s20))
/ q;
c = (s40*(s20 * s01 - s10 * s11) -
s30*(s30 * s01 - s10 * s21) +
s20*(s30 * s11 - s20 * s21))
/ q;
//ax^2+bx+c=Ae^((x-b)^2/2c^2)
if (a > 0) {
output[0] = output[1] = output[2] = 0;
}
if (quadratic) { //scalefactors and input[maxI] and frequencies[0] terms are there to fix scaled/shifted values
output[0] = a * input[maxI] / scaleFactor;
output[1] = -b / scaleFactor / (2 * a) + frequencies[0];
output[2] = (c - b * b / (4 * a)) * input[maxI];
} else {
output[0] = __expf(c - (b * b) / (4 * a));
output[1] = -b / scaleFactor / (2 * a) + frequencies[0];
output[2] = sqrtf(-1 / (2 * a)) / scaleFactor;
}
if (!isfinite(a) || !isfinite(b) || !isfinite(c)) {
output[0] = output[1] = output[2] = 0;
return;
}
}
/*Finds the absolute peak values of the function.*/
__global__ void findPeaks(float *input, float *output, float *frequencies) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= numPoints) return;
input = input + idx * numFreqs;
output = output + idx;
int maxI = argmax(input);
output[0] = frequencies[maxI];
}
/*Counts the number of peaks, where d2y/df2 < 0 and dy/df=0*/
__global__ void countPeaks(float *input, int *output, float *frequencies) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= numPoints) return;
input += idx * numFreqs;
output += idx;
*output = dCountPeaks(input, frequencies);
}
/*Does a single integration using frequency bounds*/
__device__ float integrate(float *input, float *frequencies, float lowerbound, float upperbound) {
int i;
float integral = 0, width, h1;
for (i = 0; frequencies[i] < lowerbound && i < numFreqs; i++);
if (i == numFreqs) return 0;
if (i != 0) {
//interpolation to find trapezoid size
width = frequencies[i] - lowerbound;
h1 = input[i-1] + (lowerbound - frequencies[i-1]) / (frequencies[i] - frequencies[i-1]) * (input[i] - input[i-1]);
integral += (h1 + input[i]) * width / 2;
}
for(i++; frequencies[i] < upperbound && i < numFreqs; i++) {
integral += (input[i] + input[i-1]) * (frequencies[i] - frequencies[i-1]) / 2;
}
if (i == numFreqs) return integral;
//interpolation to find trapezoid size
width = upperbound - frequencies[i-1];
h1 = input[i-1] + (upperbound - frequencies[i-1]) / (frequencies[i] - frequencies[i-1]) * (input[i] - input[i-1]);
integral += (h1 + input[i-1]) * width / 2;
return integral;
}
/*Integrates everything to the left or right of a particular frequency until input hits limit * input[target]*/
__global__ void integrateLR(float *input, float *output, float *target, float *frequencies, float limit, bool left) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= numPoints) return;
input = input + idx * numFreqs;
output = output + idx;
float targetFrequency = target[idx];
float boundFrequency;
int end;
for (end = 0; frequencies[end] < targetFrequency; end++); //end is frequency position of target
limit *= input[end]; //set limit input value
if (left) {
for (; end >= 0 && input[end] > limit; end--);
boundFrequency = end >= 0 ? frequencies[end] : -INFINITY;
*output = integrate(input, frequencies, boundFrequency, targetFrequency);
} else { //right
for (; end < numFreqs && input[end] > limit; end++);
boundFrequency = end < numFreqs ? frequencies[end] : INFINITY;
*output = integrate(input, frequencies, targetFrequency, boundFrequency);
}
}
/*Does integration between bounds, where bounds is a n*2 array*/
__global__ void integrateBounds(float *input, float *output, float *bounds, float *frequencies) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= numPoints) return;
input = input + idx * numFreqs;
output = output + idx;
float lowerbound = bounds[idx];
float upperbound = bounds[idx + numPoints];
*output = integrate(input, frequencies, lowerbound, upperbound);
}
/* Finds the width of the profile at half maximum.*/
__global__ void fwhm(float *input, float *output, float *frequencies) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= numPoints) return;
input = input + idx * numFreqs;
output = output + idx;
int peak = argmax(input);
float halfIntensity = input[peak] / 2;
float upperbound = 0;
float lowerbound = 0;
int i;
for (i = peak; input[i] > halfIntensity && i < numFreqs; i++);
upperbound = frequencies[i-1] + (frequencies[i] - frequencies[i-1]) *
(halfIntensity - input[i]) / (input[i-1] - input[i]);
for (i = peak; input[i] > halfIntensity && i >= 0; i--);
lowerbound = frequencies[i] + (frequencies[i+1] - frequencies[i]) *
(halfIntensity - input[i]) / (input[i+1] - input[i]);
if (upperbound != upperbound || lowerbound != lowerbound)
*output = 0;
else
*output = upperbound - lowerbound;
}
|
23,344 | #include<stdio.h>
#define N (1024*1024)
#define M 1024
__global__ void dot(float *a, float *b, float *c) {
int i = blockDim.x*blockIdx.x + threadIdx.x, j = threadIdx.x;
__shared__ float ab[M];
ab[j] = a[i]*b[i];
__syncthreads();
if (!j) {
float s = 0.;
for (i = 0; i < M; i++)
s += ab[i];
atomicAdd(c, s);
}
}
int main(int argc, char *argv[]) {
int i = 0, size = N*sizeof(float);
float *a, *b, *c, *dev_a, *dev_b, *dev_c;
a = (float *) malloc(size);
b = (float *) malloc(size);
c = (float *) malloc(sizeof(float));
cudaMalloc((void **) &dev_a, size);
cudaMalloc((void **) &dev_b, size);
cudaMalloc((void **) &dev_c, sizeof(float));
for (i = 0; i < N; i++) {a[i] = 1.; b[i] = 1.;} *c = 0.;
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, c, sizeof(float), cudaMemcpyHostToDevice);
dot<<<N/M, M>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, sizeof(float), cudaMemcpyDeviceToHost);
printf("%f\n", *c);
cudaFree(dev_c); cudaFree(dev_b); cudaFree(dev_a);
free(c); free(b); free(a);
return 0;
}
|
23,345 | #include "includes.h"
/* CUDA API header files*/
extern "C"
__global__ void matrixMult(const double *Md, const double *Nd, double *Pd, int size)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
int col = blockDim.y * blockIdx.y + threadIdx.y;
if (row < size) { // Don't do anything to the memory if we're above the size of the matrix
if (col < size) {
double Pvalue = 0;
for (int k = 0; k < size; k++) {
// Elements of 2d-arrays are stored in column-major ordering (i.e. column by column)
// This is a consequence of this code being called in R (where column-major ordering is the norm)
// whereas C usually stores 2d-array in row-major ordering
Pvalue += Md[k*size + row] * Nd[col*size + k];
}
Pd[col*size + row] = Pvalue;
}
}
} |
23,346 | // seems my bitcasting is/was broken ('is', at time of writing this test, 'was' since I probably fixed it by now :-) )
// this code tests this, sortof
// (hmmm, edit, seems to be ok, in fact...)
#include "cuda.h"
#include <iostream>
#include <cassert>
using namespace std;
__global__ void mykernel(int *int1, float *f1, int *int2) {
f1[0] = *(float *)&int1[0];
int2[0] = *(int *)&f1[0];
}
__global__ void floattoint(int *out, float *in) {
out[0] = *(int *)&in[0];
}
__global__ void inttofloat(float *out, int *in) {
out[0] = *(float *)&in[0];
}
int main(int argc, char *argv[]) {
int N = 32;
int *int1_host = new int[N];
float *f1_host = new float[N];
int *int2_host = new int[N];
// float *f2_host = new float[N];
int *int1_gpu;
float *f1_gpu;
int *int2_gpu;
// float *f2_gpu;
cudaMalloc((void**)&int1_gpu, N * sizeof(float));
cudaMalloc((void**)&f1_gpu, N * sizeof(float));
cudaMalloc((void**)&int2_gpu, N * sizeof(float));
// cudaMalloc(&(void**)f2_gpu, N * sizeof(float));
int1_host[0] = 123;
cudaMemcpy(int1_gpu, int1_host, N * sizeof(float), cudaMemcpyHostToDevice);
mykernel<<<dim3(1,1,1), dim3(32,1,1)>>>(int1_gpu, f1_gpu, int2_gpu);
cudaMemcpy(f1_host, f1_gpu, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(int2_host, int2_gpu, N * sizeof(float), cudaMemcpyDeviceToHost);
cout << "this should NOT be 123, should be some weird float value, not even slightly close to 123 :" << endl;
cout << "f1[0] " << f1_host[0] << endl;
assert(f1_host[0] != 123.0f);
cout << "this SHOULD be 123 :" << endl;
cout << "int2[0] " << int2_host[0] << endl;
assert(int2_host[0] == 123);
int1_host[0] = 777;
cudaMemcpy(int1_gpu, int1_host, N * sizeof(float), cudaMemcpyHostToDevice);
cout << "after copy to device" << endl;
inttofloat<<<dim3(1,1,1), dim3(32,1,1)>>>(f1_gpu, int1_gpu);
cout << "after kernel call 2" << endl;
cudaMemcpy(f1_host, f1_gpu, N * sizeof(float), cudaMemcpyDeviceToHost);
cout << "f1[0]" << f1_host[0] << endl;
assert(f1_host[0] != 777.0f);
floattoint<<<dim3(1,1,1), dim3(32,1,1)>>>(int2_gpu, f1_gpu);
cout << "after kernel call 3" << endl;
cudaMemcpy(int2_host, int2_gpu, N * sizeof(float), cudaMemcpyDeviceToHost);
cout << "int2[0]" << int2_host[0] << endl;
assert(int2_host[0] == 777);
return 0;
}
|
23,347 | #include "softmax.hh"
#include "graph.hh"
#include "../runtime/node.hh"
#include "../memory/alloc.hh"
namespace ops
{
Softmax::Softmax(Op* arg)
: Op("softmax", arg->shape_get(), {arg})
{}
void Softmax::compile()
{
auto& g = Graph::instance();
auto& carg = g.compiled(preds()[0]);
std::size_t rows = carg.out_shape[0];
std::size_t cols = carg.out_shape[1];
Shape out_shape = carg.out_shape;
dbl_t* out_data = tensor_alloc(rows * cols);
auto out_node = rt::Node::op_softmax(carg.out_data, out_data,
rows, cols,
{carg.out_node});
g.add_compiled(this, {out_node}, {out_data}, out_node, out_shape, out_data);
}
}
|
23,348 | #include "includes.h"
__global__ void kernel4(int k, int n, int bias, float* searchPoints, float* referencePoints, float* dist)
{
float diff, squareSum;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < n) {
squareSum = 0;
for (int i = 0; i < k; i++) {
diff = searchPoints[k * bias + i] - referencePoints[k * tid + i];
squareSum += (diff * diff);
}
dist[bias * n + tid] = squareSum;
}
} |
23,349 | /*
Below code is based on
https://github.com/NVIDIA-developer-blog/code-samples/tree/master/series/cuda-cpp/transpose.
nvcc transpose_any.cu -o transpose_any
*/
#include <assert.h>
#include <math.h>
#include <stdio.h>
#define DEBUG
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline cudaError_t checkCuda(cudaError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
const int nx = 2;
const int ny = 2;
const int TILE_DIM = 16;
const int BLOCK_ROWS = TILE_DIM; // 8;
const int NUM_REPS = 100;
// Check errors and print GB/s
void postprocess(const float* ref, const float* res, int n, float ms) {
bool passed = true;
printf("\nref res\n");
#if 1
for (int i = 0; i < n; i++) {
if (res[i] != ref[i]) {
printf(" Failed: %d %f %f\n", i, ref[i], res[i]);
// printf("%25s\n", "*** FAILED ***");
passed = false;
// break;
} else {
printf(" Passed: %d %f %f\n", i, ref[i], res[i]);
}
}
#endif
#if 0
for (int i = 0; i < n; i++) {
if (res[i] != ref[i]) {
passed = false;
printf("%25s\n", "*** FAILED ***");
break;
}
}
#endif
if (passed)
printf("%20.2f\n", 2 * n * sizeof(float) * 1e-6 * NUM_REPS / ms);
}
// Naive transpose
// Simplest transpose; doesn't use shared memory.
// Global memory reads are coalesced but writes are not.
__global__ void transposeNaiveRectangle(float* odata, const float* idata) {
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = nx; // gridDim.x * TILE_DIM;
int height = ny; // gridDim.y * TILE_DIM;
if ((x < nx) && (y < ny)) {
odata[y + (x)*height] = idata[(x) + width * y];
}
}
int sizeX = nx / TILE_DIM;
int sizeY = ny / TILE_DIM;
int remainderX = nx % TILE_DIM;
int remainderY = ny % TILE_DIM;
// Shared
__global__ void transposeCoalescedRectangle(float* odata, const float* idata) {
__shared__ float tile[TILE_DIM][TILE_DIM];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = nx; // gridDim.x * TILE_DIM;
int height = ny; // gridDim.y * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) {
if ((x < nx) && ((y + j) < ny)) {
tile[threadIdx.y + j][threadIdx.x] = idata[(y + j) * width + x];
}
}
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) {
if ((x < ny) && ((y + j) < nx)) {
odata[(y + j) * height + x] = tile[threadIdx.x][threadIdx.y + j];
}
}
}
__global__ void transposeNoBankConflictsRectangle(float* odata,
const float* idata) {
__shared__ float tile[TILE_DIM][TILE_DIM + 1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = nx; // gridDim.x * TILE_DIM;
int height = ny; // gridDim.y * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) {
if ((x < nx) && ((y + j) < ny)) {
tile[threadIdx.y + j][threadIdx.x] = idata[(y + j) * width + x];
}
}
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) {
if ((x < ny) && ((y + j) < nx)) {
odata[(y + j) * height + x] = tile[threadIdx.x][threadIdx.y + j];
}
}
}
int main(int argc, char** argv) {
const int mem_size = nx * ny * sizeof(float);
dim3 dimGrid((int)ceil((float)nx / (float)TILE_DIM),
(int)ceil((float)ny / (float)TILE_DIM), 1);
dim3 dimBlock(TILE_DIM, TILE_DIM, 1);
int devId = 0;
if (argc > 1)
devId = atoi(argv[1]);
cudaDeviceProp prop;
checkCuda(cudaGetDeviceProperties(&prop, devId));
printf("\nDevice : %s\n", prop.name);
printf("%d.%d\n", prop.major, prop.minor);
printf("maxGridSize= %d, %d\n", prop.maxGridSize[0], prop.maxGridSize[1]);
printf("Matrix size: %d %d, Block size: %d %d, Tile size: %d %d\n", nx, ny,
TILE_DIM, BLOCK_ROWS, TILE_DIM, TILE_DIM);
printf("dimGrid: %d %d %d. dimBlock: %d %d %d\n", dimGrid.x, dimGrid.y,
dimGrid.z, dimBlock.x, dimBlock.y, dimBlock.z);
printf("warp size: %d\n", prop.warpSize);
printf("max threads per block: %d\n", prop.maxThreadsPerBlock);
printf("max thread dim z:%d y:%d x:%d\n", prop.maxThreadsDim[0],
prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("max grid size z:%d y:%d x:%d\n", prop.maxGridSize[0],
prop.maxGridSize[1], prop.maxGridSize[2]);
printf("clock rate(KHz):\n", prop.clockRate);
if (dimBlock.x * dimBlock.y * dimBlock.z > prop.maxThreadsPerBlock) {
printf("Error! Block size is greater than maxThreadsPerBlock!\n");
}
checkCuda(cudaSetDevice(devId));
float* h_idata = (float*)malloc(mem_size);
float* h_cdata = (float*)malloc(mem_size);
float* h_tdata = (float*)malloc(mem_size);
float* gold = (float*)malloc(mem_size);
float *d_idata, *d_cdata, *d_tdata;
checkCuda(cudaMalloc(&d_idata, mem_size));
checkCuda(cudaMalloc(&d_cdata, mem_size));
checkCuda(cudaMalloc(&d_tdata, mem_size));
// check parameters and calculate execution configuration
#if 0
if (nx % TILE_DIM || ny % TILE_DIM) {
printf("nx and ny must be a multiple of TILE_DIM\n");
goto error_exit;
}
if (TILE_DIM % BLOCK_ROWS) {
printf("TILE_DIM must be a multiple of BLOCK_ROWS\n");
goto error_exit;
}
#endif
// host
for (int j = 0; j < ny; j++) {
for (int i = 0; i < nx; i++) {
h_idata[j * nx + i] = j * nx + i;
}
}
printf("\n");
for (int j = 0; j < 100; j++) {
printf("%d ", (int)h_idata[j]);
}
// correct result for error checking
for (int j = 0; j < ny; j++) {
for (int i = 0; i < nx; i++) {
gold[i * ny + j] = h_idata[j * nx + i];
}
}
printf("\n");
for (int j = 0; j < 100; j++) {
printf("%d ", (int)gold[j]);
}
printf("\nmem_size=%d\n\n", mem_size);
// device
checkCuda(cudaMemcpy(d_idata, h_idata, mem_size, cudaMemcpyHostToDevice));
// events for timing
cudaEvent_t startEvent, stopEvent;
checkCuda(cudaEventCreate(&startEvent));
checkCuda(cudaEventCreate(&stopEvent));
float ms;
// ------------
// time kernels
// ------------
printf("%35s%20s\n", "Routine", "Bandwidth (GB/s)");
#if 1
{
printf("%35s", "transposeNaiveRectangle ");
printf("Matrix size: %d %d, Block size: %d %d, Tile size: %d %d\n", nx, ny,
TILE_DIM, TILE_DIM, TILE_DIM, TILE_DIM);
printf("dimGrid: %d %d %d. dimBlock: %d %d %d\n", dimGrid.x, dimGrid.y,
dimGrid.z, dimBlock.x, dimBlock.y, dimBlock.z);
// --------------
// transposeNaiveRectangle
// --------------
checkCuda(cudaMemset(d_tdata, 0, mem_size));
// warmup
transposeNaiveRectangle<<<dimGrid, dimBlock>>>(d_tdata, d_idata);
checkCuda(cudaEventRecord(startEvent, 0));
for (int i = 0; i < NUM_REPS; i++)
transposeNaiveRectangle<<<dimGrid, dimBlock>>>(d_tdata, d_idata);
checkCuda(cudaEventRecord(stopEvent, 0));
checkCuda(cudaEventSynchronize(stopEvent));
checkCuda(cudaEventElapsedTime(&ms, startEvent, stopEvent));
checkCuda(cudaMemcpy(h_tdata, d_tdata, mem_size, cudaMemcpyDeviceToHost));
postprocess(gold, h_tdata, nx * ny, ms);
}
#endif
#if 1
{
printf("%35s", "transposeCoalescedRectangle");
// dim3 dimGrid(ceil(nx / TILE_DIM), ceil(ny / TILE_DIM), 1);
dim3 dimGrid((int)ceil((float)nx / (float)TILE_DIM),
(int)ceil((float)ny / (float)TILE_DIM), 1);
dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1);
printf("Matrix size: %d %d, Block size: %d %d, Tile size: %d %d\n", nx, ny,
TILE_DIM, BLOCK_ROWS, TILE_DIM, TILE_DIM);
printf("dimGrid: %d %d %d. dimBlock: %d %d %d\n", dimGrid.x, dimGrid.y,
dimGrid.z, dimBlock.x, dimBlock.y, dimBlock.z);
// ------------------
// transposeCoalescedRectangle
// ------------------
checkCuda(cudaMemset(d_tdata, 0, mem_size));
// warmup
transposeCoalescedRectangle<<<dimGrid, dimBlock>>>(d_tdata, d_idata);
checkCuda(cudaEventRecord(startEvent, 0));
for (int i = 0; i < NUM_REPS; i++)
transposeCoalescedRectangle<<<dimGrid, dimBlock>>>(d_tdata, d_idata);
checkCuda(cudaEventRecord(stopEvent, 0));
checkCuda(cudaEventSynchronize(stopEvent));
checkCuda(cudaEventElapsedTime(&ms, startEvent, stopEvent));
checkCuda(cudaMemcpy(h_tdata, d_tdata, mem_size, cudaMemcpyDeviceToHost));
postprocess(gold, h_tdata, nx * ny, ms);
}
#endif
#if 1
{
printf("%35s", "transposeNobankConflictsRectangle");
dim3 dimGrid((int)ceil((float)nx / (float)TILE_DIM),
(int)ceil((float)ny / (float)TILE_DIM), 1);
dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1);
printf("Matrix size: %d %d, Block size: %d %d, Tile size: %d %d\n", nx, ny,
TILE_DIM, BLOCK_ROWS, TILE_DIM, TILE_DIM);
printf("dimGrid: %d %d %d. dimBlock: %d %d %d\n", dimGrid.x, dimGrid.y,
dimGrid.z, dimBlock.x, dimBlock.y, dimBlock.z);
// ------------------
// transposeNoBankConflictsRectangle
// ------------------
checkCuda(cudaMemset(d_tdata, 0, mem_size));
// warmup
transposeNoBankConflictsRectangle<<<dimGrid, dimBlock>>>(d_tdata, d_idata);
checkCuda(cudaEventRecord(startEvent, 0));
for (int i = 0; i < NUM_REPS; i++)
transposeNoBankConflictsRectangle<<<dimGrid, dimBlock>>>(d_tdata,
d_idata);
checkCuda(cudaEventRecord(stopEvent, 0));
checkCuda(cudaEventSynchronize(stopEvent));
checkCuda(cudaEventElapsedTime(&ms, startEvent, stopEvent));
checkCuda(cudaMemcpy(h_tdata, d_tdata, mem_size, cudaMemcpyDeviceToHost));
postprocess(gold, h_tdata, nx * ny, ms);
}
#endif
error_exit:
// cleanup
checkCuda(cudaEventDestroy(startEvent));
checkCuda(cudaEventDestroy(stopEvent));
checkCuda(cudaFree(d_tdata));
checkCuda(cudaFree(d_cdata));
checkCuda(cudaFree(d_idata));
free(h_idata);
free(h_tdata);
free(h_cdata);
free(gold);
}
|
23,350 | #define N 536870912
#include <cuda_runtime.h>
#include <iostream>
#include <stdio.h>
#include <vector>
#include <cmath>
#define BLOCK_SIZE 1024
__global__ void reduceSum(int *ada, int *gabrys){
__shared__ int partialSum[2 * BLOCK_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = 2 * blockIdx.x * BLOCK_SIZE;
if(start + t < N){
partialSum[t] = ada[start + t];
}
else{
partialSum[t] = 0;
}
if (start + BLOCK_SIZE + t < N){
partialSum[BLOCK_SIZE + t] = ada[start + BLOCK_SIZE + t];
}
else{
partialSum[BLOCK_SIZE + t] = 0;
}
for(unsigned int stride = 1; stride <= BLOCK_SIZE ; stride *= 2){
__syncthreads();
if (t % stride == 0 ){
partialSum[2*t] += partialSum[2*t + stride];
}
}
if(t == 0){
gabrys[blockIdx.x] = partialSum[0];
}
__syncthreads();
}
int main(void){
int * ada, * gabrys;
cudaMallocManaged(&ada, N * sizeof(int));
cudaMallocManaged(&gabrys, N * sizeof(int));
for(int i = 0; i < N; i++){
ada[i] = 1;
}
dim3 threadsPerBlock(BLOCK_SIZE);
dim3 blocksPerGrid((N + BLOCK_SIZE - 1)/BLOCK_SIZE);
reduceSum<<<blocksPerGrid, threadsPerBlock>>>(ada,gabrys);
cudaDeviceSynchronize();
int count = log(N);
for(int i = 0; i<count; i++){
reduceSum<<<blocksPerGrid, BLOCK_SIZE>>>(gabrys,gabrys);
cudaDeviceSynchronize();
}
std::cout<<gabrys[0]<<std::endl;
}
|
23,351 | #include "includes.h"
__global__ void MatrixMultiplication__CudaKernel(int* in_tabA, int* in_tabB, int* out_tabC, int outTabWidth)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
//making sure that extra threads will do not any work
if (row < outTabWidth && col < outTabWidth)
{
int tmp_sum = 0;
//#pragma unroll
for (int i = 0; i < outTabWidth; i++)
{
tmp_sum += in_tabA[row * outTabWidth + i] * in_tabB[i * outTabWidth + col];
}
out_tabC[row * outTabWidth + col] = tmp_sum;
}
} |
23,352 | #include "includes.h"
__global__ void convertFloatToRGBAbinary_kernel(uchar4 *out_image, const float *in_image, int width, int height, float lowerLim, float upperLim) {
const int x = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int y = __mul24(blockIdx.y, blockDim.y) + threadIdx.y;
uchar4 temp;
if (x < width && y < height) {
float val = in_image[__mul24(y, width) + x];
// draw everything invalid or out of lim in white
if (!isfinite(val) || (val < lowerLim) || (val > upperLim)) {
temp.x = 255;
temp.y = 255;
temp.z = 255;
temp.w = 255;
} else {
temp.x = 0.0f;
temp.y = 0.0f;
temp.z = 0.0f;
temp.w = 0.0f;
}
out_image[__mul24(y, width) + x] = temp;
}
} |
23,353 | #include <stdio.h>
#ifdef __DEVICE_EMULATION__
#define EMUSYNC __syncthreads()
#else
#define EMUSYNC (void*)(0)
#endif
void checkCUDAError(const char *msg);
__device__ void sum_block(float *s, float *sdata)
{
int blockSize=blockDim.x;
int tid=threadIdx.x;
if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); }
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
if (blockSize >= 64) { sdata[tid] += sdata[tid + 32]; EMUSYNC; }
if (blockSize >= 32) { sdata[tid] += sdata[tid + 16]; EMUSYNC; }
if (blockSize >= 16) { sdata[tid] += sdata[tid + 8]; EMUSYNC; }
if (blockSize >= 8) { sdata[tid] += sdata[tid + 4]; EMUSYNC; }
if (blockSize >= 4) { sdata[tid] += sdata[tid + 2]; EMUSYNC; }
if (blockSize >= 2) { sdata[tid] += sdata[tid + 1]; EMUSYNC; }
}
if (tid == 0) *s = sdata[0];
}
__global__ void sum_global(float* s, float *array)
{
extern __shared__ float sdata[];
int tid=threadIdx.x;
sdata[tid] = array[tid];
__syncthreads();
sum_block(s, sdata);
}
__global__ void calc_pi0(float *da_pi)
{
extern __shared__ float sdata[];
int i= blockIdx.x*blockDim.x + threadIdx.x;
int tid=threadIdx.x;
int n= blockDim.x*gridDim.x;
float x = (i - 0.5)/n;
sdata[tid] = 4.0/(1 + x*x);
__syncthreads();
sum_block(da_pi+blockIdx.x, sdata);
}
int main( int argc, char** argv)
{
float pi=0.0;
float *d_pi;
float *da_pi;
/* note: numBlocks and numThreadsPerBlocks must be power of 2 */
int numBlocks = 256;
int numThreadsPerBlock = 256;
int numThreads = numBlocks*numThreadsPerBlock;
size_t memSize = numBlocks*sizeof(float);
size_t sharedMemSize = numThreadsPerBlock*sizeof(float);
cudaMalloc((void**)&da_pi, memSize);
cudaMalloc((void**)&d_pi, sizeof(float));
dim3 dimGrid(numBlocks);
dim3 dimBlock(numThreadsPerBlock);
calc_pi0<<<dimGrid, dimBlock, sharedMemSize>>>(da_pi);
cudaThreadSynchronize();
sum_global<<<1, dimGrid, sharedMemSize>>>(d_pi, da_pi);
cudaThreadSynchronize();
checkCUDAError("kernel execution");
cudaMemcpy(&pi, d_pi, sizeof(float), cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy");
cudaFree(da_pi);
cudaFree(d_pi);
pi/=numThreads;
printf("pi=%f\n", pi);
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(-1);
}
}
|
23,354 | #include <stdio.h>
#include <curand.h>
#include <curand_kernel.h>
#include <math.h>
#include <assert.h>
#define MIN 2
#define MAX 7
#define ITER 10000000
__global__ void setup_kernel(curandState *state){
int idx = threadIdx.x+blockDim.x*blockIdx.x;
curand_init(1234, idx, 0, &state[idx]);
}
__global__ void generate_kernel(curandState *my_curandstate, float *result){
int idx = threadIdx.x + blockDim.x*blockIdx.x;
int count = 0;
while (count < 100){
float myrandf = curand_uniform(my_curandstate+idx);
printf("idx=%d and myrandf=%f\n", idx,myrandf);
result[count]=myrandf;
count++;
}
}
int main(){
curandState *d_state;
cudaMalloc(&d_state, sizeof(curandState));
float *d_result, *h_result;
cudaMalloc(&d_result, (100) * sizeof(float));
h_result = (float *)malloc((100)*sizeof(float));
setup_kernel<<<1,1>>>(d_state);
generate_kernel<<<1,1>>>(d_state, d_result);
cudaMemcpy(h_result, d_result, (100) * sizeof(float), cudaMemcpyDeviceToHost);
/*printf("Bin: Count: \n");
for (int i = MIN; i <= MAX; i++)
printf("%d %d\n", i, h_result[i-MIN]);*/
return 0;
}
|
23,355 | #include<stdio.h>
//Device code
__global__ void addvec (float* a, float* b, float* c, int N)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i<N)
c[i] = a[i]+b[i];
}
//Host code
int main()
{
int N = 10;
size_t size = N*sizeof(float);
//Allocate input vectors h_A and h_B in host memory
float* h_a = (float*)malloc(size);
float* h_b = (float*)malloc(size);
float* h_c = (float*)malloc(size);
//Initialize input vectors
int i;
for (i=0;i<N;i++){
h_a[i] = i+1;
}
for (i=0;i<N;i++){
h_b[i] = i+1;
}
//Allocate vectors in device memory
float* d_a;
cudaMalloc(&d_a, size);
float* d_b;
cudaMalloc(&d_b,size);
float* d_c;
cudaMalloc(&d_c,size);
//Copy vectors from host memory to device memory
cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice);
//Invoke kernel
int threads_per_block = 256;
int blocks_per_grid = (N + threads_per_block - 1) / threads_per_block;
addvec<<<blocks_per_grid, threads_per_block>>>(d_a, d_b, d_c, N);
//Copy result from device memory to host memory
//h_c contains the result in host memory
cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost);
for (i=0;i<N;i++){
printf("%f \n",h_c[i]);
}
//Free device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
//Free host memory
cudaFree(h_a);
cudaFree(h_b);
}
|
23,356 | #include <iostream>
#include <math.h>
#include <cstdlib>
#include <curand_kernel.h>
#include <thrust/random.h>
// add two arrays
template<typename T>
__global__ void add(T *output, T *inputA, T *inputB) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
output[idx] = inputA[idx] + inputB[idx];
}
template<typename T>
__global__ void initRandom(T *arr, float minValue, float maxValue) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
thrust::default_random_engine rng;
thrust::uniform_real_distribution<float> dist(minValue, maxValue);
rng.discard(idx);
arr[idx] = dist(rng);
}
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int main () {
int N = 8000 * 8000; // 800px x 800px image
int iterations = 10;
int size = N*sizeof(float);
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
float *x, *y, *output;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, size);
cudaMallocManaged(&y, size);
cudaMallocManaged(&output, size);
// initialize arrays
initRandom<<<numBlocks, blockSize>>>(x, 0., 1.);
initRandom<<<numBlocks, blockSize>>>(y, 0., 1.);
cudaDeviceSynchronize();
for (int blerp = 0; blerp < iterations; blerp++) {
add<<<numBlocks, blockSize>>>(output, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
}
// Free memory
cudaFree(x);
cudaFree(y);
cudaFree(output);
return 0;
}
|
23,357 | #include <stdio.h>
__global__ void hello_kernel()
{
printf("Hello from GPU thread %d\n", threadIdx.x);
}
int main()
{
hello_kernel<<<1, 32>>>();
cudaDeviceSynchronize();
return 0;
}
|
23,358 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "stdio.h"
int main()
{
double *matrix, *d_A;
matrix = (double *)calloc(1000, sizeof(double));
cudaMalloc( &d_A, 1000*sizeof(double));
cudaMemcpy(d_A, matrix, 1000*sizeof(double), cudaMemcpyHostToDevice);
printf("\nthe first element of matrix is %f\n", matrix[0]);
cudaFree(d_A);
free(matrix);
return 0;
} |
23,359 | // From CUDA for engineers
// Listing 6.5: centroid_2d/kernel.cu
// 2d: reduction
#include <cuda_runtime.h>
#include <iostream>
#include <stdio.h>
__global__
void centroidKernel()
{
}
void centroidParallel()
{
}
int main()
{
return 0;
} |
23,360 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define GIGABYTE 1000000000
struct entry
{
int origIndex;
float xValue, yValue;
};//entry
int h_binarySearchLB(entry * data, float val, int n)
{
//return index of greatest leftmost xValue that is greater than val
int left = 0;
int right = n;
int mid;
while (left != right)
{
mid = (left+right)/2;
if (data[mid].xValue <= val)
left = mid + 1;
else
right = mid;
}//while
return left;
}//binarySearchLB
int h_binarySearchUB(entry * data, float val, int n)
{
//return index of greatest leftmost xValue that is greater than val
int left = 0;
int right = n;
int mid;
while (left != right)
{
mid = (left+right)/2;
if (data[mid].xValue >= val)
right = mid;
else
left = mid + 1;
}//while
return left;
}//binarySearchUB
__device__ int binarySearchLB(entry * data, float val, int n)//val is x val +/- tuning parameter
{
//return index of greatest leftmost xValue that is greater than val
int left = 0;
int right = n;
int mid;
while (left != right)
{
mid = (left+right)/2;
if (data[mid].xValue <= val)
left = mid + 1;
else
right = mid;
}//while
return left;
}//binarySearchLB
__device__ int binarySearchUB(entry * data, float val, int n)
{
//return index of greatest leftmost xValue that is greater than val
int left = 0;
int right = n;
int mid;
while (left != right)
{
mid = (left+right)/2;
if (data[mid].xValue >= val)
right = mid;
else
left = mid + 1;
}//while
return left;
}//binarySearchUB
__global__ void kernel1(entry * array, int n, float h)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int lowerBound = binarySearchLB(array, array[idx].xValue-h, n);//binsearchlb device func
int upperBound = binarySearchUB(array, array[idx].xValue+h, n);//ub is device func
float avg = 0;
//calculate y average
for (int i=lowerBound; i<upperBound; i++)
avg += array[i].yValue;
avg = avg/((float) (upperBound-lowerBound));
//yValue stores the avg
array[idx].yValue = avg;
}//kernel1
__global__ void kernel2(entry * array, int n)
{
float avg = 0;
for (int i=0; i<n; i++)
avg += array[i].yValue;
avg = avg / (float) n;
array[0].yValue = avg;
}//kernel2
void merge(entry * a, int low, int high)
{
int pivot = (low+high)/2;
int i = 0;
int j = low;
int k = pivot+1;
entry * temp = new entry[high-low+1];
while ((j <= pivot) && (k <= high))
{
if (a[j].xValue < a[k].xValue)
temp[i++] = a[j++];
else
temp[i++] = a[k++];
}//while
while (j <= pivot)
temp[i++] = a[j++];
while (k <= high)
temp[i++] = a[k++];
for (int h=low; h<= high; h++)
a[h] = temp[h-low];
delete [] temp;
}//merge
void mergeSort(entry * a, int low, int high)
{
int pivot;
if (low < high)
{
pivot = (low+high)/2;
mergeSort(a, low, pivot);
mergeSort(a, pivot+1, high);
merge(a, low, high);
}//if
}//mergeSort
void smoothc(float * x, float * y, float * m, int n, float h)
{
entry * array = new entry[n];
entry * deviceArray;
int * countArray = new int[n];// should not be there, mem leak
int blockSize = 1024;//num thread per block
//creat array of structs
for (int i=0; i<n; i++)
{
entry temp;
temp.origIndex = i;
temp.xValue = x[i];
temp.yValue = y[i];
array[i] = temp;
}//for
//sort by xValue
mergeSort(array, 0, n-1);
if (n < GIGABYTE/sizeof(entry))// if fits into 1 gig of mem hard code in line 5
{
//put array onto device array
cudaMalloc(&deviceArray, sizeof(entry) * n);
cudaMemcpy(deviceArray, array, sizeof(entry) * n, cudaMemcpyHostToDevice);
dim3 dimBlock(blockSize);
dim3 dimGrid(ceil(n/blockSize));
//stores smoothed average in yValue
kernel1 <<< dimGrid, dimBlock >>> (deviceArray, n, h);//send to line 96
cudaMemcpy(array, deviceArray, sizeof(entry) * n, cudaMemcpyDeviceToHost);
//rearrange array in original order
for (int i=0; i<n; i++)
m[array[i].origIndex] = array[i].yValue;
cudaFree(deviceArray);
}//if
else //have to chunk up data
{
int lb, ub;
for (int i=0; i<n; i++)
{
lb = h_binarySearchLB(array, array[i].xValue-h, n);
ub = h_binarySearchUB(array, array[i].xValue+h, n);
entry * chunkArray = new entry[ub-lb];
for (int j=0; j<ub-lb; j++)
chunkArray[j] = array[lb+j];
cudaMalloc(&deviceArray, sizeof(entry) * (ub-lb));
cudaMemcpy(deviceArray, chunkArray, sizeof(entry) * (ub-lb), cudaMemcpyHostToDevice);
kernel2 <<< 1, 1 >>> (deviceArray, ub-lb);
cudaMemcpy(chunkArray, deviceArray, sizeof(entry) * (ub-lb), cudaMemcpyDeviceToHost);
m[array[i].origIndex] = chunkArray[0].yValue;//store y avg
cudaFree(deviceArray);
delete [] chunkArray;
}//for
}//else
delete [] array;
}//smoothc
/*
int main()
{
int n = 200;
float * x = new float[n];
float * y = new float[n];
float * m = new float[n];
float h = 2;
for (int i=0; i<n; i++)
{
x[i] = rand() % 100;
y[i] = rand() % 100;
}//for
float x[20] = {1, 1,2,2, 3,3, 4,4, 5,5, 6,6, 7,7, 8,8, 9,9, 10,10};
float y[20] = {11,11, 12,12, 13,13, 14,14, 15,15, 16,16, 17,17, 18,18, 19,19, 20,20};
float m[20];
int n = 20;
float h = 2;
smoothc(x, y, m, n, h);
// delete [] x;
// delete [] y;
// delete [] m;
}//main
*/
|
23,361 | /****************************************************************************
*
* cuda-rule30.cu - Rule30 Cellular Automaton with CUDA
*
* Written in 2017 by Moreno Marzolla <moreno.marzolla(at)unibo.it>
*
* To the extent possible under law, the author(s) have dedicated all
* copyright and related and neighboring rights to this software to the
* public domain worldwide. This software is distributed without any warranty.
*
* You should have received a copy of the CC0 Public Domain Dedication
* along with this software. If not, see
* <http://creativecommons.org/publicdomain/zero/1.0/>.
*
* --------------------------------------------------------------------------
*
* This program implements the "rule 30 CA" as described in
* https://en.wikipedia.org/wiki/Rule_30 . This program uses the CPU
* only.
*
* Compile with:
* nvcc cuda-rule30.cu -o cuda-rule30
*
* Run with:
* /cuda-rule30 1024 1024
*
****************************************************************************/
#include <stdio.h>
#include <stdlib.h>
typedef unsigned char cell_t;
/**
* Given the current state of the CA, compute the next state.
*/
void rule30( cell_t *cur, cell_t *next, int n )
{
int i;
for (i=0; i<n; i++) {
const cell_t left = cur[(i-1+n)%n];
const cell_t center = cur[i ];
const cell_t right = cur[(i+1)%n ];
next[i] = \
( left && !center && !right) ||
(!left && !center && right) ||
(!left && center && !right) ||
(!left && center && right);
}
}
/**
* Initialize the domain; all cells are 0, with the exception of a
* single cell in the middle of the domain.
*/
void init_domain( cell_t *cur, int n )
{
int i;
for (i=0; i<n; i++) {
cur[i] = 0;
}
cur[n/2] = 1;
}
/**
* Dump the current state of the CA to PBM file |out|.
*/
void dump_state( FILE *out, cell_t *cur, int n )
{
int i;
for (i=0; i<n; i++) {
fprintf(out, "%d ", cur[i]);
}
fprintf(out, "\n");
}
int main( int argc, char* argv[] )
{
const char *outname = "rule30.pbm";
FILE *out;
cell_t *cur, *next, *tmp;
int width = 1024, steps = 1024, s;
if ( argc > 3 ) {
fprintf(stderr, "Usage: %s [width [steps]]\n", argv[0]);
return -1;
}
if ( argc > 1 ) {
width = atoi(argv[1]);
}
if ( argc > 2 ) {
steps = atoi(argv[2]);
}
const size_t size = width * sizeof(cell_t);
/* Allocate space for host copy the cur[] and next[] vectors */
cur = (cell_t*)malloc(size);
next = (cell_t*)malloc(size);
/* Create the output file */
out = fopen(outname, "w");
if ( !out ) {
fprintf(stderr, "Cannot create %s\n", outname);
return -1;
}
fprintf(out, "P1\n");
fprintf(out, "# produced by %s %d %d\n", argv[0], width, steps);
fprintf(out, "%d %d\n", width, steps);
/* Initialize the domain */
init_domain(cur, width);
/* Evolve the CA */
for (s=0; s<steps; s++) {
/* Dump the current state to the output image */
dump_state(out, cur, width);
/* Compute next state */
rule30(cur, next, width);
/* swap cur and next on the GPU */
tmp = cur;
cur = next;
next = tmp;
}
fclose(out);
free(cur);
free(next);
return 0;
}
|
23,362 | #include "includes.h"
__global__ void calcSoftmaxMaxForwardGPU(float *array, float *max, int *mutex, int batch_size, int in_size_x, unsigned n)
{
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int stride = gridDim.x * blockDim.x; // = in_size_x
unsigned int offset = 0;
// __shared__ float cache[ 32 ][ BLOCK ]; // this should be constant. batch_size * in_size_x actually
extern __shared__ float cache[];
// printf("index=%d, stride=%d, n=%d, gridDim.x=%d, blockDim.x=%d\n", index, stride, n, gridDim.x, blockDim.x);
float temp = -1.0;
while(index + offset < n){
temp = fmaxf(temp, array[index + offset]);
offset += stride;
}
// cache[threadIdx.x] = temp;
cache[index] = temp;
__syncthreads();
unsigned int prev_i = blockDim.x;
unsigned int i = blockDim.x / 2;
while ( i!=0 ){
if(threadIdx.x < i){
// cache[threadIdx.x] = fmaxf(cache[threadIdx.x], cache[threadIdx.x + i]);
cache[index] = fmaxf(cache[index], cache[index + i]);
}
if(prev_i%2 != 0){
cache[(blockIdx.x * blockDim.x)] = fmaxf(cache[(blockIdx.x * blockDim.x)], cache[(blockIdx.x * blockDim.x) + prev_i-1]);
}
__syncthreads();
i /= 2;
}
if( threadIdx.x == 0 ){
while( atomicCAS(mutex, 0, 1) != 0 ); // atomic compare and swap.
// *max = fmaxf(*max, cache[0]);
*(max+blockIdx.x) = fmaxf(*(max+blockIdx.x), cache[blockIdx.x * blockDim.x + 0]);
atomicExch(mutex, 0); // atomic exchange.
}
/* original
for ( int b = 0; b < in.size.b; ++b ){
float max_v = 0.0;
for ( int i = 0; i < in.size.x; ++i ){
float v = in( b, i, 0, 0 );
if(v>max_v){
max_v = v;
}
}
}
*/
} |
23,363 | #include "includes.h"
__global__ void TwoNodesDifferenceKernel( int nodeOne, int nodeTwo, int vectorLength, float *referenceVector, float *twoNodesDifference )
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < vectorLength)
{
twoNodesDifference[threadId] = referenceVector[nodeOne * vectorLength + threadId] - referenceVector[nodeTwo * vectorLength + threadId];
}
} |
23,364 | #include "includes.h"
__global__ void normalize_energy_gpu(float *ksn2e, float *ksn2f, double omega_re, double omega_im, float *nm2v_re, float *nm2v_im, int nfermi, int norbs, int nvirt, int vstart)
{
int i = blockIdx.x * blockDim.x + threadIdx.x; //nocc
int j = blockIdx.y * blockDim.y + threadIdx.y; //nvirt
float en=0.0, fn=0.0, em=0.0, fm=0.0, old_re, old_im;
double d1p, d1pp, d2p, d2pp, alpha, beta;
if (i < nfermi)
{
en = ksn2e[i];
fn = ksn2f[i];
if ( j < norbs - vstart )
{
em = ksn2e[j + vstart];
fm = ksn2f[j + vstart];
d1p = omega_re - (em-en); d1pp = omega_im;
d2p = omega_re + (em-en); d2pp = omega_im;
alpha = d1p/(d1p*d1p + d1pp*d1pp) - d2p/(d2p*d2p + d2pp*d2pp);
beta = -d1pp/(d1p*d1p + d1pp*d1pp) + d2pp/(d2p*d2p + d2pp*d2pp);
old_re = nm2v_re[i*nvirt + j];
old_im = nm2v_im[i*nvirt + j];
nm2v_re[i*nvirt + j] = (fn - fm)*(old_re*alpha - old_im*beta);
nm2v_im[i*nvirt + j] = (fn - fm)*(old_re*beta + old_im*alpha);
//printf("i = %d, j = %d, m = %d, alpha = %f, beta = %f, old_re = %f, old_im = %f, nm2v_re = %f, nm2v_im = %f\n",
// i, j, m, alpha, beta, old_re, old_im, nm2v_re[index], nm2v_im[index]);
//nm2v = nm2v * (fn-fm) * ( 1.0 / (comega - (em - en)) - 1.0 /(comega + (em - en)) );
}
}
} |
23,365 | #include "includes.h"
__global__ void matrixMulKernel(float* ans, float* M, float* N, int size) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < size && col < size) {
float pVal = 0;
for (int i = 0; i < size; ++i)
pVal += M[row*size + i] * N[i*size + col];
ans[row*size + col] = pVal;
}
} |
23,366 | #include "my_device_func.cuh"
__global__ void make_ones(float *a, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
a[tid] = 1.0;
tid+= blockDim.x * gridDim.x;
}
}
__global__ void make_zeros(float *a, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
a[tid] = 0.0;
tid+= blockDim.x * gridDim.x;
}
}
__global__ void transpose(float *dst, const float *str, int str_row, int str_column,int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int x,y;
while(tid < n)
{
y = tid % str_row;
x = tid / str_row;
dst[IDX2C(x,y,str_column)] = str[IDX2C(y,x,str_row)];
tid+= blockDim.x * gridDim.x;
}
}
__global__ void add_bias(const float *a, const float *b, float *c, int a_row, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
c[tid] = a[tid] + b[tid % a_row];
tid+= blockDim.x * gridDim.x;
}
}
__global__ void transfer(float *dst,const float *str, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
dst[tid] = str[tid];
tid+= blockDim.x * gridDim.x;
}
}
__global__ void relu(float *dst,const float *str, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
if(str[tid] > 0 ) dst[tid] = str[tid];
else dst[tid] = 0.0;
tid+= blockDim.x * gridDim.x;
}
}
__global__ void relu_inv(float *dst,const float *str, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
if(str[tid] > 0 ) dst[tid] = 1.0;
else dst[tid] = 0.0;
tid+= blockDim.x * gridDim.x;
}
}
__global__ void scalar_multi(const float *a, const float *b, float *c, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
c[tid] = a[tid]*b[tid];
tid+= blockDim.x * gridDim.x;
}
}
__global__ void elu(float *dst,const float *str, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
if(str[tid] >= 0 ) dst[tid] = str[tid];
else dst[tid] = expf(str[tid]) - 1;
tid+= blockDim.x * gridDim.x;
}
}
__global__ void elu_inv(float *dst,const float *str, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
if(str[tid] >= 0 ) dst[tid] = 1.0;
else dst[tid] = expf(str[tid]);
tid+= blockDim.x * gridDim.x;
}
}
__global__ void binary_cross_entropy(const float *a,const float *b,float *c, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
c[tid] = -0.5*(b[tid]*logf(a[tid] + 1e-8) + (1-b[tid])*logf(1-a[tid] + 1e-8));
tid+= blockDim.x * gridDim.x;
}
}
__global__ void binary_cross_entropy_inv(const float *a,const float *b,float *c, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
c[tid] = 0.5*(a[tid] - b[tid])/(a[tid]*(1-a[tid]) + 1e-8);
tid+= blockDim.x * gridDim.x;
}
}
__global__ void dropout_table(float *a, float dropout_rate,int n)
{
long tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
if(a[tid] > dropout_rate) a[tid] = 0.0;
else a[tid] = 1.0;
tid+= blockDim.x * gridDim.x;
}
}
__global__ void dropout(const float *a,const float *b, float *c,float dropout_rate,int n)
{
long tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
c[tid] = a[tid]*b[tid]/dropout_rate;
tid+= blockDim.x * gridDim.x;
}
}
__global__ void sigmoid(float *dst, const float *str, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
dst[tid] = 1/(1 + expf(-str[tid]));
tid+= blockDim.x * gridDim.x;
}
}
__global__ void sigmoid_inv(float *dst,const float *str, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
float y;
while(tid < n)
{
y = 1/(1 + expf(-str[tid]));
dst[tid] = y*(1 - y) + 1e-8;
tid+= blockDim.x * gridDim.x;
}
}
__global__ void tanh(float *dst,const float *str,int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
dst[tid] = tanhf(str[tid]);
tid+= blockDim.x * gridDim.x;
}
}
__global__ void tanh_inv(float *dst,const float *str,int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
float y;
while(tid < n)
{
y = tanhf(str[tid]);
dst[tid] = (1 + y)*(1 - y);
tid+= blockDim.x * gridDim.x;
}
}
__global__ void add(const float *a, const float *b, float *c, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
c[tid] = a[tid] + b[tid];
tid+= blockDim.x * gridDim.x;
}
}
__global__ void least_squares(const float *a,const float *b,float *c, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
c[tid] = (a[tid]-b[tid])*(a[tid]-b[tid]);
tid+= blockDim.x * gridDim.x;
}
}
__global__ void least_squares_inv(const float *a,const float *b,float *c, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
c[tid] = 2*(a[tid]-b[tid]);
tid+= blockDim.x * gridDim.x;
}
}
__global__ void momentum_vector(const float *a, float *b, float l_rate,float m_rate, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
b[tid] = m_rate*b[tid] - l_rate*a[tid];
tid+= blockDim.x * gridDim.x;
}
}
__global__ void adam_beta1(const float *a, float *b, float beta1, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
b[tid] = beta1*b[tid] + (1.0 - beta1)*a[tid];
tid+= blockDim.x * gridDim.x;
}
}
__global__ void adam_beta2(const float *a, float *b, float beta2, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
b[tid] = beta2*b[tid] + (1.0 - beta2)*a[tid]*a[tid];
tid+= blockDim.x * gridDim.x;
}
}
__global__ void adam_sum(const float *a, const float *b, float *c, float learning_rate,
float beta1_t,float beta2_t, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
c[tid] = c[tid] - learning_rate*((a[tid])/(1.0-beta1_t))/(sqrtf(b[tid]/(1.0-beta2_t)) + 1e-8);
tid+= blockDim.x * gridDim.x;
}
}
__global__ void max_norm(float *a, float rate, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
if(a[tid] > rate) a[tid] = rate;
else if(a[tid] < -rate) a[tid] = -rate;
tid+= blockDim.x * gridDim.x;
}
}
__global__ void min(float *dst,const float *str, float rate, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
if(str[tid] > rate ) dst[tid] = rate;
else dst[tid] = str[tid];
tid+= blockDim.x * gridDim.x;
}
}
__global__ void min_inv(float *dst,const float *str,float rate, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
if(str[tid] > rate ) dst[tid] = 0.0;
else dst[tid] = 1.0;
tid+= blockDim.x * gridDim.x;
}
}
__global__ void accuracy_table(const float *y, const float *t,float *r, int row ,int column)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
float max;
int id_y;
int id_t;
while(tid < column)
{
max = 0.0;
for(int i = 0 ; i < row ;i++)
{
if(y[IDX2C(i,tid,row)] > max)
{
max = y[IDX2C(i,tid,row)];
id_y = i;
}
if(t[IDX2C(i,tid,row)] >= 0.9999) id_t = i;
}
if(id_y == id_t) r[tid] = 1.0;
else r[tid] = 0.0;
tid+= blockDim.x * gridDim.x;
}
}
__global__ void merge(float *dst,int dst_row, const float *str, int str_row,int offset, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int x,y;
while(tid < n)
{
y = tid % str_row;
x = tid / str_row;
dst[IDX2C(y + offset, x, dst_row)] = str[IDX2C(y, x, str_row)];
tid+= blockDim.x * gridDim.x;
}
}
__global__ void inv_merge(float *dst, int dst_row, const float *str, int str_row,int offset, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int x,y;
while(tid < n)
{
y = tid % dst_row;
x = tid / dst_row;
dst[IDX2C(y, x, dst_row)] = str[IDX2C(y + offset, x, str_row)];
tid+= blockDim.x * gridDim.x;
}
}
__global__ void multi_scala(float *dst, const float *str, float scale,int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
while(tid < n)
{
dst[tid] = scale*str[tid];
tid+= blockDim.x * gridDim.x;
}
}
|
23,367 |
__device__ void hsv_rgb_single(float h, float s, float v, unsigned char *r, unsigned char *g, unsigned char *b)
{
// Adapted and simplified from https://github.com/jakebesworth/Simple-Color-Conversions
/* Convert hue back to 0-6 space, floor */
const float hex = h * 6;
const unsigned char primary = (int) hex;
const float secondary = hex - primary;
float x = (1.0 - s) * v;
float y = (1.0 - (s * secondary)) * v;
float z = (1.0 - (s * (1.0 - secondary))) * v;
float *rp, *gp, *bp;
switch(primary) {
case 0: rp = &v; gp = &z; bp = &x; break;
case 1: rp = &y; gp = &v; bp = &x; break;
case 2: rp = &x; gp = &v; bp = &z; break;
case 3: rp = &x; gp = &y; bp = &v; break;
case 4: rp = &z; gp = &x; bp = &v; break;
case 5:
default: rp = &v; gp = &x; bp = &y; break;
}
*r = *rp * 255.0;
*g = *gp * 255.0;
*b = *bp * 255.0;
}
__global__ void hsv_rgb(float *img, unsigned char *result, int width, int height)
{
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < width && y < height) {
int idx = (x + y * width) * 3;
hsv_rgb_single(img[idx], img[idx + 1], img[idx + 2], &result[idx], &result[idx + 1], &result[idx + 2]);
}
}
|
23,368 | #include <cuda_runtime.h> //uchar4
__global__
void split_channels(uchar4 *input_image, uchar4 *red, uchar4 *green, uchar4 *blue){
int row = threadIdx.x;
int col = blockIdx.x;
int idx = col + row*360;
red[idx] = input_image[idx];
green[idx] = input_image[idx];
blue[idx] = input_image[idx];
red[idx].x = 0;
red[idx].y = 0;
green[idx].x = 0;
green[idx].z = 0;
blue[idx].y = 0;
blue[idx].z = 0;
}
void separate_channels(uchar4 *input_image, uchar4 *red, uchar4 *green, uchar4 *blue){
dim3 blockSize(360,1,1);
dim3 threadSize(480,1,1);
split_channels<<<blockSize, threadSize>>>(input_image, red, green, blue);
}
|
23,369 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <math.h>
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
__global__ void vecMult(double *d_vecA,unsigned long n){
unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ double s_vecA[sizeof(double)*32];
unsigned int i;
// int y = 2;
if (global_id < n){
s_vecA[threadIdx.x]=d_vecA[global_id];
__syncthreads();
for( i = 1; i <= 7; i++) {
if( threadIdx.x < (int)(128 >>(i))){
s_vecA[threadIdx.x * (1 << i)] += s_vecA[(threadIdx.x * (1 << i)) + (1 << (i-1))];
}
// y = y * 2;
__syncthreads();
}
if ( threadIdx.x == 0){
d_vecA[blockIdx.x] = s_vecA[0];
}
}
}
int main(int argc, char *argv[]){
if (argc != 2){
printf("Falta argumento: N\n");
return 0;
}
//declaracion de variables
cudaError_t error;
unsigned long N = atoi (argv[1]);
unsigned long CUDA_BLK = 128,GRID_BLK;
unsigned long numBytes = sizeof(double)*N;
double *vecA,result,*d_vecA,timetick;
unsigned long i,j;
vecA = (double *)malloc(numBytes);
result = 1;
for (i = 0; i < N; i++){
vecA[i] = 2;
}
//comment
cudaMalloc((void **) &d_vecA, numBytes);
cudaMemcpy(d_vecA, vecA, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
dim3 dimBlock(CUDA_BLK);
unsigned long int iteraciones = log(N) / log(2);
timetick = dwalltime();
cudaMemcpy(d_vecA, vecA, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
for(i = N ; i > 1; i = ceil(float(i) / CUDA_BLK)){
GRID_BLK = ceil(float(i) / CUDA_BLK) ;
dim3 dimGrid(GRID_BLK);
vecMult<<<dimGrid, dimBlock>>>(d_vecA,i);
cudaThreadSynchronize();
}
cudaMemcpy(vecA, d_vecA, sizeof(double), cudaMemcpyDeviceToHost); // GPU -> CPU
printf("Tiempo para la GPU: %f\n",dwalltime() - timetick);
error = cudaGetLastError();
printf("error: %d\n",error);
printf("%f|",vecA[0]);
printf("\n\n");
cudaFree(d_vecA);
free(vecA);
return 0;
} |
23,370 | #include "includes.h"
// Copyright (c) 2020, Michael Kunz. All rights reserved.
// https://github.com/kunzmi/ImageStackAlignator
//
// This file is part of ImageStackAlignator.
//
// ImageStackAlignator is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as
// published by the Free Software Foundation, version 3.
//
// ImageStackAlignator is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
// MA 02110-1301 USA, http://www.gnu.org/licenses/.
//squared sum of a tile without the border
extern "C"
//Boxfilter ignoring the border parts
//blockDim.X must be tileSize + 2 * maxShift
//blockDim.Y must be 1
extern "C"
//Boxfilter ignoring the border parts
//blockDim.Y must be tileSize + 2 * maxShift
//blockDim.X must be 1
extern "C"
//Computed the normalized CC values out of the different input data
//Cross correlation is fft shifted
//blockDim.X must be 2 * maxShift
//blockDim.Y must be 2 * maxShift
//blockDim.Z must be nr of tiles
extern "C"
//Convert a tiled image into consecutive tiles for FFT
//input img has a pitch, output tiles are consecutive
//output tiles overlap by maxShift is filled by zero
extern "C"
//Convert a tiled image into consecutive tiles for FFT
//input img has a pitch, output tiles are consecutive
//output tiles overlap by maxShift on each side
extern "C"
__global__ void conjugateComplexMulKernel(const float2* __restrict__ aIn, float2* __restrict__ bInOut, int maxElem)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= maxElem)
return;
float2 valA = aIn[idx];
//conjugate complex
valA.y = -valA.y;
//multiplication:
float2 valB = bInOut[idx];
float2 res;
res.x = valA.x * valB.x - valA.y * valB.y;
res.y = valA.x * valB.y + valA.y * valB.x;
bInOut[idx] = res;
} |
23,371 | // Compile it with:
// nvcc blur_gpu.cu -o blur_gpu
// Run it with:
// CUDA_VISIBLE_DEVICES=1 ./blur_gpu
#include <iostream>
#include <cstdlib>
#include <math.h>
#include <stdio.h>
#include <assert.h>
#include <fstream>
#include <time.h>
__global__ void convolutionGPU(
float *d_Result,
float *d_Data,
int dataW,
int dataH )
{
// global mem address for this thread
const int gLoc = threadIdx.x +
blockIdx.x * blockDim.x +
threadIdx.y * dataW +
blockIdx.y * blockDim.y * dataW;
float sum = 0;
float value = 0;
int KERNEL_RADIUS = 3;
for (int i = -KERNEL_RADIUS; i <= KERNEL_RADIUS; i++) // row wise
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) // col wise
{
// check row first
if (blockIdx.x == 0 && (threadIdx.x + i) < 0) // left apron
value = 0;
else if ( blockIdx.x == (gridDim.x - 1) &&
(threadIdx.x + i) > blockDim.x-1 ) // right apron
value = 0;
else
{
// check col next
if (blockIdx.y == 0 && (threadIdx.y + j) < 0) // top apron
value = 0;
else if ( blockIdx.y == (gridDim.y - 1) &&
(threadIdx.y + j) > blockDim.y-1 ) // bottom apron
value = 0;
else // safe case
value = d_Data[gLoc + i + j * dataW];
}
sum += value * 0.5 * 0.5;
}
d_Result[gLoc] = sum;
}
void image_convolution(float * input,float* output, int img_height, int img_width, const int r, float & gpu_elapsed_time_ms)
{
// initialize kernel here
int kernel_height = r;
int kernel_width = r;
float *kernel;
kernel = new float[r*r];
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
for (int i = 0; i < r*r; i++){
kernel[i] = rand() % 10 + 1;
}
float * mask = new float[kernel_height*kernel_width];
for (int i = 0; i < kernel_height*kernel_width; i++)
{
mask[i] = kernel[i];
}
float * d_input, * d_output, * d_kernel;
cudaMalloc(&d_input, img_width*img_height*sizeof(float));
cudaMalloc(&d_output, img_width*img_height*sizeof(float));
cudaMalloc(&d_kernel, kernel_height*kernel_width*sizeof(float));
cudaMemcpy(d_input, input, img_width*img_height*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_kernel, mask, kernel_height*kernel_width*sizeof(float), cudaMemcpyHostToDevice);
dim3 blocksize(16,16);
dim3 gridsize;
gridsize.x=(img_width+blocksize.x-1)/blocksize.x;
gridsize.y=(img_height+blocksize.y-1)/blocksize.y;
convolutionGPU<<<gridsize,blocksize>>>(d_output,d_input,img_width,img_height);
cudaMemcpy(output, d_output, img_width*img_height*sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
}
int main(){
// number of instances of data generated
int NUM = 5;
float total_time = 0;
std::ofstream ofile;
// change here to customize output filename
ofile.open("naive_blur_gpu.csv");
for (int iterator = 0; iterator < NUM; iterator++) {
// currently have to manually change the input size
float *in, *out;
int m = 16384;
int n = 16384;
int is = n * m;
int r = 3;
in = new float[is];
out = new float[is];
for (int i = 0; i < m * n; i++)
in[i] = rand() % 1024 + 1;
float time;
image_convolution(in, out, n, m, r, time);
total_time += time;
}
std::cout << total_time / (NUM*1000) << std::endl;
ofile.close();
return 0;
} |
23,372 | // REQUIRES: clang-driver
// REQUIRES: x86-registered-target
// REQUIRES: nvptx-registered-target
// Verify that DWARF version is properly clamped for nvptx, but not for the host.
// RUN: %clang -### -target x86_64-linux-gnu -c %s -gdwarf-5 -gembed-source 2>&1 \
// RUN: | FileCheck %s --check-prefix=DWARF-CLAMP
// RUN: %clang -### -target x86_64-linux-gnu -c %s -ggdb -gembed-source -gdwarf-5 2>&1 \
// RUN: | FileCheck %s --check-prefix=DWARF-CLAMP
// DWARF-CLAMP: "-triple" "nvptx64-nvidia-cuda"
// DWARF-CLAMP-SAME: -dwarf-version=2
// DWARF-CLAMP: "-triple" "x86_64
// DWARF-CLAMP-SAME: -dwarf-version=5
|
23,373 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/sort.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <iostream>
#include <random>
#include <ctime>
#include <cstdio>
#include <cstdlib>
#include <chrono>
using namespace std;
#include <cuda.h>
#include <curand.h>
#include <math.h>
const int MaxT = 500;
const int MaxPart = 500000;
const int N = MaxPart;
const double sig_x = 2;
const double sig_obs = 0.5;
double randn() {
static default_random_engine engine;
static normal_distribution<double> dist(0, 1);
return dist(engine);
}
struct trans {
double a;
trans(double a):a(a){};
__host__ __device__
double operator()(double x, double y) {
return sin(a * x) + y;
}
};
struct likeli {
double y, coef;
likeli(double y, double sig_obs):y(y),coef(-1.0/(2*sig_obs*sig_obs)){};
__host__ __device__
double operator()(double x) {
// cpd y ~ N(x, sig_obs)
// ignore coefficient 1/(sigma * sqrt(2 PI))
return exp(coef * (x - y) * (x - y));
}
};
struct resample {
double sum;
double* W;
double* X;
int N;
resample(double sum, double *W, double* X, int N)
:sum(sum),W(W),X(X),N(N){};
__host__ __device__
double operator()(double t) {
t = t * sum;
int lo = 0, hi = N, mid;
while(lo + 1 < hi) {
mid = lo + hi >> 1;
if (W[mid - 1] >= t) hi = mid;
else lo = mid;
}
return X[hi - 1];
}
};
int main() {
double x, y, cur_x, a = -0.5;
thrust::device_vector<double> X(N), T(N), Z(N), Y(N), W(N);
double* X_ptr = thrust::raw_pointer_cast(X.data());
double* T_ptr = thrust::raw_pointer_cast(T.data());
double* Z_ptr = thrust::raw_pointer_cast(Z.data());
double* W_ptr = thrust::raw_pointer_cast(W.data());
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
int start, end;
double duration;
start = clock();
// generate data
thrust::host_vector<double> data(MaxT+1);
for(int t=0; t<=MaxT; ++ t) {
if (t == 0) cur_x = randn();
else cur_x = sin(x * a) + randn() * sig_x;
x = cur_x; y = cur_x + sig_obs * randn();
data[t] = y;
}
end = clock();
duration = (end - start) * 1.0 / CLOCKS_PER_SEC;
printf("Data Generation : %.6lfs\n", duration);
start = clock();
for(int t = 0; t <= MaxT; ++ t) {
// fetch data
y = data[t];
// Particle Filter
if (t == 0) {
// initial value
curandGenerateNormalDouble(gen, X_ptr, N, 0, 1);
} else {
// transition
// x[t] = N(sin(a * x[t-1]), 2)
curandGenerateNormalDouble(gen, Z_ptr, N, 0, 2);
thrust::transform(X.begin(), X.end(), Z.begin(), X.begin(), trans(a));
}
// Calc Likelihood
thrust::transform(X.begin(), X.end(), W.begin(), likeli(y, sig_obs));
// resample
thrust::inclusive_scan(W.begin(), W.end(), W.begin());
double tot_sum = W[N - 1];
curandGenerateUniformDouble(gen, Z_ptr, N);
thrust::transform(Z.begin(), Z.end(), Y.begin(),
resample(tot_sum, W_ptr, X_ptr, N));
thrust::copy(Y.begin(), Y.end(), X.begin());
}
end = clock();
duration = 1.0 * (end - start) / CLOCKS_PER_SEC;
printf("Time Elapsed: %.6lfds\n", duration);
}
|
23,374 | #include "includes.h"
__global__ void kernel_test1_write(char* _ptr, char* end_ptr, unsigned int* err)
{
unsigned int i;
unsigned long* ptr = (unsigned long*) (_ptr + blockIdx.x*BLOCKSIZE);
if (ptr >= (unsigned long*) end_ptr) {
return;
}
for (i = 0;i < BLOCKSIZE/sizeof(unsigned long); i++){
ptr[i] =(unsigned long) & ptr[i];
}
return;
} |
23,375 | #include "includes.h"
__global__ void MHDUpdatePrim_CUDA3_kernel(float *Rho, float *Vx, float *Vy, float *Vz, float *Etot, float *Bx, float *By, float *Bz, float *Phi, float *dUD, float *dUS1, float *dUS2, float *dUS3, float *dUTau, float *dUBx, float *dUBy, float *dUBz, float *dUPhi, float dt, float C_h, float C_p, int size)
{
// get thread and block index
const long tx = threadIdx.x;
const long bx = blockIdx.x;
const long by = blockIdx.y;
int igrid = tx + bx*CUDA_BLOCK_SIZE + by*CUDA_BLOCK_SIZE*CUDA_GRID_SIZE;
if (igrid < 2 || igrid > size - 3)
return;
float D, S1, S2, S3, Tau;
D = Rho[igrid];
S1 = D*Vx[igrid];
S2 = D*Vy[igrid];
S3 = D*Vz[igrid];
Tau = D*Etot[igrid];
D += dUD[igrid];
S1 += dUS1[igrid];
S2 += dUS2[igrid];
S3 += dUS3[igrid];
Tau += dUTau[igrid];
Rho[igrid] = D;
Vx[igrid] = S1/D;
Vy[igrid] = S2/D;
Vz[igrid] = S3/D;
Etot[igrid] = Tau/D;
Bx[igrid] += dUBx[igrid];
By[igrid] += dUBy[igrid];
Bz[igrid] += dUBz[igrid];
Phi[igrid] += dUPhi[igrid];
Phi[igrid] *= expf(-dt*(C_h/C_p)*(C_h/C_p));
} |
23,376 | /*
* _et_clear_accumulator_gpu_kernels.cu
*
* NiftyRec
* Stefano Pedemonte, May 2012.
* CMIC - Centre for Medical Image Computing
* UCL - University College London.
* Released under BSD licence, see LICENSE.txt
*/
|
23,377 | /*
* JCudaVec - Vector operations for JCuda
* http://www.jcuda.org
*
* Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org
*/
extern "C"
__global__ void vec_setf (size_t n, float *result, float value)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = value;
}
}
//=== Vector arithmetic ======================================================
extern "C"
__global__ void vec_addf (size_t n, float *result, float *x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] + y[id];
}
}
extern "C"
__global__ void vec_subf (size_t n, float *result, float *x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] - y[id];
}
}
extern "C"
__global__ void vec_mulf (size_t n, float *result, float *x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] * y[id];
}
}
extern "C"
__global__ void vec_divf (size_t n, float *result, float *x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] / y[id];
}
}
extern "C"
__global__ void vec_negatef (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = -x[id];
}
}
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
__global__ void vec_addScalarf (size_t n, float *result, float *x, float y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] + y;
}
}
extern "C"
__global__ void vec_subScalarf (size_t n, float *result, float *x, float y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] - y;
}
}
extern "C"
__global__ void vec_mulScalarf (size_t n, float *result, float *x, float y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] * y;
}
}
extern "C"
__global__ void vec_divScalarf (size_t n, float *result, float *x, float y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] / y;
}
}
extern "C"
__global__ void vec_scalarAddf (size_t n, float *result, float x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x + y[id];
}
}
extern "C"
__global__ void vec_scalarSubf (size_t n, float *result, float x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x - y[id];
}
}
extern "C"
__global__ void vec_scalarMulf (size_t n, float *result, float x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x * y[id];
}
}
extern "C"
__global__ void vec_scalarDivf (size_t n, float *result, float x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x / y[id];
}
}
//=== Vector comparison ======================================================
extern "C"
__global__ void vec_ltf (size_t n, float *result, float *x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] < y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_ltef (size_t n, float *result, float *x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] <= y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_eqf (size_t n, float *result, float *x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] == y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_gtef (size_t n, float *result, float *x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] >= y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_gtf (size_t n, float *result, float *x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] > y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_nef (size_t n, float *result, float *x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] != y[id])?1.0f:0.0f;
}
}
//=== Vector-and-scalar comparison ===========================================
extern "C"
__global__ void vec_ltScalarf (size_t n, float *result, float *x, float y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] < y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_lteScalarf (size_t n, float *result, float *x, float y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] <= y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_eqScalarf (size_t n, float *result, float *x, float y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] == y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_gteScalarf (size_t n, float *result, float *x, float y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] >= y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_gtScalarf (size_t n, float *result, float *x, float y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] > y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_neScalarf (size_t n, float *result, float *x, float y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] != y)?1.0f:0.0f;
}
}
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
__global__ void vec_acosf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = acosf(x[id]);
}
}
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
__global__ void vec_acoshf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = acoshf(x[id]);
}
}
// Calculate the arc sine of the input argument.
extern "C"
__global__ void vec_asinf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = asinf(x[id]);
}
}
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
__global__ void vec_asinhf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = asinhf(x[id]);
}
}
// Calculate the arc tangent of the input argument.
extern "C"
__global__ void vec_atanf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = atanf(x[id]);
}
}
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
__global__ void vec_atanhf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = atanhf(x[id]);
}
}
// Calculate the cube root of the input argument.
extern "C"
__global__ void vec_cbrtf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = cbrtf(x[id]);
}
}
// Calculate ceiling of the input argument.
extern "C"
__global__ void vec_ceilf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = ceilf(x[id]);
}
}
// Calculate the cosine of the input argument.
extern "C"
__global__ void vec_cosf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = cosf(x[id]);
}
}
// Calculate the hyperbolic cosine of the input argument.
extern "C"
__global__ void vec_coshf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = coshf(x[id]);
}
}
// Calculate the cosine of the input argument p .
extern "C"
__global__ void vec_cospif (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = cospif(x[id]);
}
}
// Calculate the complementary error function of the input argument.
extern "C"
__global__ void vec_erfcf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erfcf(x[id]);
}
}
// Calculate the inverse complementary error function of the input argument.
extern "C"
__global__ void vec_erfcinvf (size_t n, float *result, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erfcinvf(y[id]);
}
}
// Calculate the scaled complementary error function of the input argument.
extern "C"
__global__ void vec_erfcxf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erfcxf(x[id]);
}
}
// Calculate the error function of the input argument.
extern "C"
__global__ void vec_erff (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erff(x[id]);
}
}
// Calculate the inverse error function of the input argument.
extern "C"
__global__ void vec_erfinvf (size_t n, float *result, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erfinvf(y[id]);
}
}
// Calculate the base 10 exponential of the input argument.
extern "C"
__global__ void vec_exp10f (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = exp10f(x[id]);
}
}
// Calculate the base 2 exponential of the input argument.
extern "C"
__global__ void vec_exp2f (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = exp2f(x[id]);
}
}
// Calculate the base e exponential of the input argument.
extern "C"
__global__ void vec_expf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = expf(x[id]);
}
}
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
__global__ void vec_expm1f (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = expm1f(x[id]);
}
}
// Calculate the absolute value of its argument.
extern "C"
__global__ void vec_fabsf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fabsf(x[id]);
}
}
// Calculate the largest integer less than or equal to x.
extern "C"
__global__ void vec_floorf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = floorf(x[id]);
}
}
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
__global__ void vec_j0f (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = j0f(x[id]);
}
}
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
__global__ void vec_j1f (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = j1f(x[id]);
}
}
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
__global__ void vec_lgammaf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = lgammaf(x[id]);
}
}
// Calculate the base 10 logarithm of the input argument.
extern "C"
__global__ void vec_log10f (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = log10f(x[id]);
}
}
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
__global__ void vec_log1pf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = log1pf(x[id]);
}
}
// Calculate the base 2 logarithm of the input argument.
extern "C"
__global__ void vec_log2f (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = log2f(x[id]);
}
}
// Calculate the floating point representation of the exponent of the input argument.
extern "C"
__global__ void vec_logbf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = logbf(x[id]);
}
}
// Calculate the natural logarithm of the input argument.
extern "C"
__global__ void vec_logf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = logf(x[id]);
}
}
// Calculate the standard normal cumulative distribution function.
extern "C"
__global__ void vec_normcdff (size_t n, float *result, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = normcdff(y[id]);
}
}
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
__global__ void vec_normcdfinvf (size_t n, float *result, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = normcdfinvf(y[id]);
}
}
// Calculate reciprocal cube root function.
extern "C"
__global__ void vec_rcbrtf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = rcbrtf(x[id]);
}
}
// Round input to nearest integer value in floating-point.
extern "C"
__global__ void vec_rintf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = rintf(x[id]);
}
}
// Round to nearest integer value in floating-point.
extern "C"
__global__ void vec_roundf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = roundf(x[id]);
}
}
// Calculate the reciprocal of the square root of the input argument.
extern "C"
__global__ void vec_rsqrtf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = rsqrtf(x[id]);
}
}
// Calculate the sine of the input argument.
extern "C"
__global__ void vec_sinf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = sinf(x[id]);
}
}
// Calculate the hyperbolic sine of the input argument.
extern "C"
__global__ void vec_sinhf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = sinhf(x[id]);
}
}
// Calculate the sine of the input argument p .
extern "C"
__global__ void vec_sinpif (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = sinpif(x[id]);
}
}
// Calculate the square root of the input argument.
extern "C"
__global__ void vec_sqrtf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = sqrtf(x[id]);
}
}
// Calculate the tangent of the input argument.
extern "C"
__global__ void vec_tanf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = tanf(x[id]);
}
}
// Calculate the hyperbolic tangent of the input argument.
extern "C"
__global__ void vec_tanhf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = tanhf(x[id]);
}
}
// Calculate the gamma function of the input argument.
extern "C"
__global__ void vec_tgammaf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = tgammaf(x[id]);
}
}
// Truncate input argument to the integral part.
extern "C"
__global__ void vec_truncf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = truncf(x[id]);
}
}
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
__global__ void vec_y0f (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = y0f(x[id]);
}
}
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
__global__ void vec_y1f (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = y1f(x[id]);
}
}
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
__global__ void vec_copysignf ( size_t n, float *result, float *x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = copysignf(x[id], y[id]);
}
}
// Compute the positive difference between x and y.
extern "C"
__global__ void vec_fdimf ( size_t n, float *result, float *x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fdimf(x[id], y[id]);
}
}
// Divide two floating point values.
extern "C"
__global__ void vec_fdividef ( size_t n, float *result, float *x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fdividef(x[id], y[id]);
}
}
// Determine the maximum numeric value of the arguments.
extern "C"
__global__ void vec_fmaxf ( size_t n, float *result, float *x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fmaxf(x[id], y[id]);
}
}
// Determine the minimum numeric value of the arguments.
extern "C"
__global__ void vec_fminf ( size_t n, float *result, float *x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fminf(x[id], y[id]);
}
}
// Calculate the floating-point remainder of x / y.
extern "C"
__global__ void vec_fmodf ( size_t n, float *result, float *x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fmodf(x[id], y[id]);
}
}
// Calculate the square root of the sum of squares of two arguments.
extern "C"
__global__ void vec_hypotf ( size_t n, float *result, float *x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = hypotf(x[id], y[id]);
}
}
// Return next representable single-precision floating-point value afer argument.
extern "C"
__global__ void vec_nextafterf ( size_t n, float *result, float *x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = nextafterf(x[id], y[id]);
}
}
// Calculate the value of first argument to the power of second argument.
extern "C"
__global__ void vec_powf ( size_t n, float *result, float *x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = powf(x[id], y[id]);
}
}
// Compute single-precision floating-point remainder.
extern "C"
__global__ void vec_remainderf ( size_t n, float *result, float *x, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = remainderf(x[id], y[id]);
}
}
|
23,378 | #include "includes.h"
__global__ void IndexLeafNode(const char *text, bool *forest, int text_size, int step)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockIdx.x*step+blockDim.x;
forest[offset+threadIdx.x] = (text[idx] != '\n' && idx < text_size);
} |
23,379 | #include<iostream>
#include<stdlib.h>
#include<cuda.h>
#include<time.h>
#define BLOCK_SIZE 64
#define SOA 512
void random_ints(int *data,int size)
{
int i;
for(i=0;i<size;i++)
{
data[i]=rand()%size;
}
}
__global__ void ReductionMax2(int *input,int *results,int n)
{
__shared__ int sdata[BLOCK_SIZE];
unsigned int i=blockIdx.x*blockDim.x+threadIdx.x;
unsigned int tx=threadIdx.x;
int x=-INT_MAX;
if(i<n)
x=input[i];
sdata[tx]=x;
__syncthreads();
for(unsigned int offset=blockDim.x>>1;offset>0;offset >>=1)
{
__syncthreads();
if(tx<offset)
{
if(sdata[tx+offset]>sdata[tx])
sdata[tx]=sdata[tx+offset];
}
}
if(threadIdx.x==0)
{
results[blockIdx.x]=sdata[0];
}
}
int main()
{
int num_blocks=SOA/BLOCK_SIZE;
int num_threads=BLOCK_SIZE,i;
unsigned int mem_size_a=sizeof(int)*SOA;
int *h_a=(int*)malloc(mem_size_a);
random_ints(h_a,SOA);
int *d_a;
cudaMalloc((void**)&d_a,mem_size_a);
cudaMemcpy(d_a,h_a,mem_size_a,cudaMemcpyHostToDevice);
unsigned int mem_size_b=sizeof(int)*num_blocks;
int *d_b;
cudaMalloc((void**)&d_b,mem_size_b);
int *h_b=(int*)malloc(mem_size_b);
unsigned int mem_size_c=sizeof(int);
int *d_c;
cudaMalloc((void**)&d_c,mem_size_c);
ReductionMax2<<<num_blocks,num_threads>>>(d_a,d_b,SOA);
cudaMemcpy(h_b,d_b,mem_size_b,cudaMemcpyDeviceToHost);
ReductionMax2<<<1,num_blocks>>>(d_b,d_c,num_blocks);
int *h_c=(int*)malloc(mem_size_c);
cudaMemcpy(h_c,d_c,mem_size_c,cudaMemcpyDeviceToHost);
int j;
for(j=0;j<SOA;j++)
{
std::cout<<h_a[j]<<",";
}
std::cout<<"\nblock max";
for(j=0;j<num_blocks;j++)
{
std::cout<<h_b[j]<<",";
}
std::cout<<"\nparallel max="<<*h_c;
}
|
23,380 | #include <iostream>
#include "vector_summation.cuh"
#include <algorithm>
#include <cstdlib>
#include <ctime>
#include <cuda.h>
#include <stdio.h>
GpuVector::GpuVector(int* vec_cpu,int N):N_(N)
{
//! Allocate GPU mem
int nbytes=N_*sizeof(int);
cudaMallocManaged((void **)&vec_gpu,nbytes);
cudaMallocManaged((void **)&vec_sum_device,sizeof(int));
cudaMemcpy(vec_gpu, vec_cpu, nbytes, cudaMemcpyHostToDevice);
if(vec_gpu == NULL || vec_sum_device == NULL )
{printf("couldn't allocate GPU memory\n");}
}
void GpuVector::sum()
{
//! 1D block
int bs = 256;
//! 2D grid
int s = ceil(sqrt((N_/sizeof(int) + bs - 1.) / bs));
dim3 grid = dim3(s, s);
//! Call kernel function from the host
vector_sum_kernel<<<grid, bs>>>(vec_gpu,N_,vec_sum_device);
//! Copy data from device to host
cudaThreadSynchronize();
cudaMemcpy(&vec_sum_host, vec_sum_device, sizeof(int), cudaMemcpyDeviceToHost);
}
GpuVector::~GpuVector(){cudaFree(vec_gpu);}
int main()
{
//! Declare a vector on the host
int* vec_cpu;
int N=100;
int vec_cpu_sum=0;
//! Allocate CPU mem
int nbytes = N * sizeof(int);
vec_cpu = (int *) malloc(nbytes);
for (int i = 0; i < N; ++i)vec_cpu[i]=10;
//! Declare a constructor
GpuVector vec_gpu(vec_cpu,N);
//! Calculate the sum on gpu
std::cout<<"before running on gpu, sum="<<vec_gpu.vec_sum_host<<std::endl;
vec_gpu.sum();
//! Check if data is copied from gpu to cpu
cudaDeviceSynchronize();
std::cout<<"After running on gpu, sum="<<vec_gpu.vec_sum_host<<std::endl;
//! Calculate the sum on cpu
std::cout<<"Before running on cpu, sum="<<vec_cpu_sum<<std::endl;
cudaDeviceSynchronize();
for( size_t i = 0 ; i < N ; i++ )
{vec_cpu_sum+=vec_cpu[i];}
std::cout<<"After running on cpu, sum="<<vec_cpu_sum<<std::endl;
free(vec_cpu);
}
|
23,381 | #include "warpStandard.cuh"
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <string>
#include <iostream>
#include <numeric>
#include <sys/time.h>
#include <sstream>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdint.h>
#include <stdio.h>
#include <unistd.h>
extern __shared__ unsigned rngShmem[];
__global__ void CalcPiKernel(unsigned *state, unsigned N, unsigned *hits)
{
unsigned rngRegs[WarpStandard_REG_COUNT];
WarpStandard_LoadState(state, rngRegs, rngShmem);
unsigned acc=0;
for(unsigned i=0;i<N;i++)
{
unsigned long x=WarpStandard_Generate(rngRegs, rngShmem);
unsigned long y=WarpStandard_Generate(rngRegs, rngShmem);
x=(x*x)>>3;
y=(y*y)>>3;
if(x+y <= (1UL<<61))
{
acc++;
}
}
hits[blockIdx.x*blockDim.x+threadIdx.x]=acc;
WarpStandard_SaveState(rngRegs, rngShmem, state);
}
int main(int,char *[])
{
int devId=-1;
cudaDeviceProp devProps;
cudaGetDevice(&devId);
cudaGetDeviceProperties(&devProps, devId);
unsigned gridSize=devProps.multiProcessorCount;
unsigned blockSize=256;
unsigned totalThreads=blockSize*gridSize;
unsigned totalRngs=totalThreads/WarpStandard_K;
unsigned rngsPerBlock=blockSize/WarpStandard_K;
unsigned sharedMemBytesPerBlock=rngsPerBlock*WarpStandard_K*4;
fprintf(stderr, "gridSize=%u, blockSize=%u, totalThreads=%u\n", gridSize, blockSize, totalThreads);
unsigned seedBytes=totalRngs*4*WarpStandard_STATE_WORDS;
std::vector<uint32_t> seedHost(seedBytes/4);
void *seedDevice=0;
if(cudaMalloc(&seedDevice, seedBytes))
{
fprintf(stderr, "Error couldn't allocate state array of size %u\n", seedBytes);
exit(1);
}
int fr=open("/dev/urandom", O_RDONLY);
if(seedBytes!=read(fr, &seedHost[0], seedBytes))
{
fprintf(stderr, "Couldn't seed RNGs.\n");
exit(1);
}
//cudaMemcpy(seedDevice, &seedHost[0], seedBytes, cudaMemcpyHostToDevice);
std::vector<uint32_t>hitsHost(totalThreads, 0);
void *hitsDevice=0;
if(cudaMalloc(&hitsDevice, totalThreads*4))
{
fprintf(stderr, "Error: couldn't allocate hits array of size %u.\n", totalThreads*4);
exit(1);
}
if(cudaMemcpy(hitsDevice, &hitsHost[0], totalThreads*4, cudaMemcpyHostToDevice))
{
fprintf(stderr, "Error: couldn't copy hits array to device.\n");
exit(1);
}
unsigned K=8;
unsigned N=65536;
double totalHits=0, totalSamples=0;
for(unsigned i=0;i<K;i++)
{
N=N*2;
double outputsPerKernel=totalThreads*double(N);
CalcPiKernel<<<gridSize,blockSize,sharedMemBytesPerBlock>>>((unsigned*)seedDevice, N, (unsigned*)hitsDevice);
cudaMemcpy(&hitsHost[0], hitsDevice, 4*totalThreads, cudaMemcpyDeviceToHost);
//for(unsigned i=0;i<hitsHost.size();i++)
//{
// fprintf(stdout, "hitsHost[%u]=%u\n", i, hitsHost[i]);
//}
totalSamples+=outputsPerKernel;
totalHits += std::accumulate(hitsHost.begin(), hitsHost.end(), 0.0);
double estimate=4*totalHits/totalSamples;
fprintf(stdout, "totalHits=%lg, totalSamples=%lg\n", totalHits, totalSamples);
fprintf(stdout, "samples=2^%lg, estimate=%.16lf, error=%lg\n", log(totalSamples)/log(2), estimate, std::abs(estimate-M_PI));
}
return 0;
}
|
23,382 | /*
* File: Complex.cu
*
* Created on June 24, 2012
*
* Purpose: Simple complex number class for use on GPU
*
* If it works, it was written by Brian Swenson.
* Otherwise, I have no idea who wrote it.
*/
class Complex
{
public:
float r;
float i;
__host__ __device__ Complex( float a, float b ) : r(a), i(b) {}
__device__ Complex(const Complex& x) : r(x.r), i(x.i) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ Complex operator*(const Complex& a) {
return Complex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ Complex operator+(const Complex& a) {
return Complex(r+a.r, i+a.i);
}
}; |
23,383 | #include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#define M_PI 3.14159265359
#define MIN(a,b) (((a)<(b))?(a):(b))
typedef struct
{
float *matrix;
int n_landmarks;
int n_measurements;
} dist_matrix;
typedef struct
{
int *assignment;
bool *assigned_measurements;
} assignment;
typedef struct
{
float (*measurements)[2];
int n_measurements;
float *measurement_cov;
} landmark_measurements;
__device__ float* get_particle(float *particles, int i) {
int max_landmarks = (int)particles[4];
return (particles + (6 + 6*max_landmarks)*i);
}
__device__ float* get_mean(float *particle, int i)
{
return (particle + 6 + 2*i);
}
__device__ float* get_cov(float *particle, int i)
{
int max_landmarks = (int)particle[4];
return (particle + 6 + 2*max_landmarks + 4*i);
}
__device__ int get_n_landmarks(float *particle)
{
return (int)particle[5];
}
__device__ void add_landmark(float *particle, float mean[2], float *cov)
{
int n_landmarks = (int)particle[5];
particle[5] = (float)(n_landmarks + 1);
float *new_mean = get_mean(particle, n_landmarks);
float *new_cov = get_cov(particle, n_landmarks);
new_mean[0] = mean[0];
new_mean[1] = mean[1];
new_cov[0] = cov[0];
new_cov[1] = cov[1];
new_cov[2] = cov[2];
new_cov[3] = cov[3];
}
__device__ void add_unassigned_measurements_as_landmarks(float *particle, bool *assigned_measurements, landmark_measurements *measurements)
{
int n_measurements = measurements->n_measurements;
float *measurement_cov = measurements->measurement_cov;
for(int i = 0; i < n_measurements; i++) {
if(!assigned_measurements[i]) {
float x = particle[0];
float y = particle[1];
float measurement[] = {
x + measurements->measurements[i][0],
y + measurements->measurements[i][1]
};
add_landmark(particle, measurement, measurement_cov);
}
}
}
__device__ void add_measurements_as_landmarks(float *particle, landmark_measurements *measurements)
{
int n_measurements = measurements->n_measurements;
float *measurement_cov = measurements->measurement_cov;
for(int i = 0; i < n_measurements; i++) {
float x = particle[0];
float y = particle[1];
float measurement[] = {
x + measurements->measurements[i][0],
y + measurements->measurements[i][1]
};
add_landmark(particle, measurement, measurement_cov);
}
}
__device__ void insertion_sort(float arr[], int lm[], int me[], int n)
{
int i, lm_key, me_key, j;
float key;
for (i = 1; i < n; i++) {
key = arr[i];
lm_key = lm[i];
me_key = me[i];
j = i - 1;
while (j >= 0 && arr[j] < key) {
arr[j + 1] = arr[j];
lm[j + 1] = lm[j];
me[j + 1] = me[j];
j = j - 1;
}
arr[j + 1] = key;
lm[j + 1] = lm_key;
me[j + 1] = me_key;
}
}
__device__ void vecmul(float *A, float *u, float *v)
{
float a = A[0];
float b = A[1];
float c = A[2];
float d = A[3];
float e = u[0];
float f = v[1];
v[0] = a*e + b*f;
v[1] = c*e + d*f;
}
__device__ void matmul(float *A, float *B, float *C)
{
float a = A[0];
float b = A[1];
float c = A[2];
float d = A[3];
float e = B[0];
float f = B[1];
float g = B[2];
float h = B[3];
C[0] = a*e + b*g;
C[1] = a*f + b*h;
C[2] = c*e + d*g;
C[3] = c*f + d*h;
}
__device__ void pinv(float *A, float *B)
{
float a = A[0];
float b = A[1];
float c = A[2];
float d = A[3];
float e = a*a + c*c;
float f = a*b + c*d;
float g = a*b + c*d;
float h = b*b + d*d;
float scalar = 1/(e*h - f*g);
float e_i = scalar * h;
float f_i = scalar * (-f);
float g_i = scalar * (-g);
float h_i = scalar * e;
B[0] = e_i*a + f_i*b;
B[1] = e_i*c + f_i*d;
B[2] = g_i*a + h_i*b;
B[3] = g_i*c + h_i*d;
}
__device__ float pdf(float *x, float *mean, float* cov)
{
float a = cov[0];
float b = cov[1];
float logdet = log(a*a - b*b);
float root = sqrt(2.0)/2.0;
float e = root * (1.0/sqrt(a-b));
float f = root * (1.0/sqrt(a+b));
float m = x[0] - mean[0];
float n = x[1] - mean[1];
float maha = 2*(m*m*e*e + n*n*f*f);
float log2pi = log(2 * M_PI);
return exp(-0.5 * (2*log2pi + maha + logdet));
}
__device__ void compute_dist_matrix(float *particle, landmark_measurements *measurements, dist_matrix *matrix)
{
float *measurement_cov = measurements->measurement_cov;
float pos[] = { particle[0], particle[1] };
float *landmarks_cov = get_cov(particle, 0);
for(int i = 0; i < matrix->n_landmarks; i++) {
float *landmark = get_mean(particle, i);
for(int j = 0; j < matrix->n_measurements; j++) {
float measurement_predicted[] = {
landmark[0] - pos[0], landmark[1] - pos[1]
};
float cov[4] = {
landmarks_cov[4*i] + measurement_cov[0],
landmarks_cov[4*i+1] + measurement_cov[1],
landmarks_cov[4*i+2] + measurement_cov[2],
landmarks_cov[4*i+3] + measurement_cov[3]
};
matrix->matrix[i * matrix->n_measurements + j] = pdf(measurement_predicted, measurements->measurements[j], cov);
}
}
}
__device__ void assign(dist_matrix *matrix, int *data_assoc_memory, assignment *assignment, float threshold) {
int n_landmarks = matrix->n_landmarks;
int n_measurements = matrix->n_measurements;
int *landmark_idx = data_assoc_memory;
int *measurement_idx = landmark_idx + (n_landmarks * n_measurements);
int k = 0;
for(int i = 0; i < n_landmarks; i++) {
for(int j = 0; j < n_measurements; j++) {
// only take values > threshold
if(matrix->matrix[i * n_measurements + j] > threshold) {
landmark_idx[k] = i;
measurement_idx[k] = j;
matrix->matrix[k] = matrix->matrix[i * n_measurements + j];
k++;
}
}
}
insertion_sort(matrix->matrix, landmark_idx, measurement_idx, k);
int iterations = MIN(n_landmarks, k);
for(int i = 0; i < iterations; i++) {
int a = landmark_idx[i];
int b = measurement_idx[i];
if(assignment->assignment[a] != -1){
continue;
}
assignment->assignment[a] = b;
assignment->assigned_measurements[b] = true;
}
}
__device__ void associate_landmarks_measurements(float *particle, float *m, int *data_assoc_memory, landmark_measurements *measurements, int n_landmarks, assignment *assignment, float threshold) {
int n_measurements = measurements->n_measurements;
dist_matrix matrix;
matrix.matrix = m;
matrix.n_landmarks = n_landmarks;
matrix.n_measurements = n_measurements;
compute_dist_matrix(particle, measurements, &matrix);
assign(&matrix, data_assoc_memory, assignment, threshold);
}
__device__ void update_landmark(float *particle, landmark_measurements *measurements, assignment *assignment)
{
float *measurement_cov = measurements->measurement_cov;
float x = particle[0];
float y = particle[1];
int n_landmarks = get_n_landmarks(particle);
for(int i = 0; i < n_landmarks; i++) {
int j = assignment->assignment[i];
if(j == -1) {
continue;
}
float *mean = get_mean(particle, i);
float mean_x = mean[0];
float mean_y = mean[1];
float measurement_predicted[2] = { mean_x - x, mean_y - y };
float residual[2] = {
measurements->measurements[j][0] - measurement_predicted[0],
measurements->measurements[j][1] - measurement_predicted[1]
};
float *cov = get_cov(particle, i);
float Q[4] = {
cov[0] + measurement_cov[0],
cov[1] + measurement_cov[1],
cov[2] + measurement_cov[2],
cov[3] + measurement_cov[3]
};
float K[4] = { 0, 0, 0, 0 };
float Q_inv[4] = { 0, 0, 0, 0 };
pinv(Q, Q_inv);
matmul(cov, Q_inv, K);
float K_residual[] = { 0, 0 };
vecmul(K, residual, K_residual);
mean[0] += K_residual[0];
mean[1] += K_residual[1];
float new_cov[] = { 1 - K[0], K[1], K[2], 1 - K[3] };
matmul(new_cov, cov, new_cov);
cov[0] = new_cov[0];
cov[1] = new_cov[1];
cov[2] = new_cov[2];
cov[3] = new_cov[3];
particle[3] *= pdf(measurements->measurements[j], measurement_predicted, Q);
}
}
__device__ int get_max_landmarks_in_block(float *particles, int block_size, int thread_id, int n_particles) {
int max_landmarks = 0;
for(int k = 0; k < block_size; k++) {
int particle_id = thread_id*block_size + k;
if(particle_id >= n_particles) {
break;
}
float *particle = get_particle(particles, particle_id);
int n_landmarks = get_n_landmarks(particle);
if(n_landmarks > max_landmarks) {
max_landmarks = n_landmarks;
}
}
return max_landmarks;
}
__global__ void update(
float *particles, int block_size, float measurements_array[][2], int n_particles, int n_measurements,
float *measurement_cov, float threshold/*, int *scratchpad_memory, int size*/)
{
// int i = threadIdx.x + blockIdx.x * blockDim.x;
if(n_measurements == 0) {
return;
}
int block_id = blockIdx.x+ blockIdx.y * gridDim.x;
int thread_id = block_id * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
int max_landmarks = get_max_landmarks_in_block(particles, block_size, thread_id, n_particles);
int scratchpad_size = (max_landmarks + n_measurements + (3 * max_landmarks * n_measurements)) * sizeof(int);
int *scratchpad;
int *assignment_memory;
int *data_assoc_memory;
float *matrix_memory;
if(scratchpad_size > 0) {
scratchpad = (int *)malloc(scratchpad_size);
assignment_memory = scratchpad;
data_assoc_memory = assignment_memory + (max_landmarks + n_measurements);
matrix_memory = (float *)(data_assoc_memory + (2 * max_landmarks * n_measurements));
}
landmark_measurements measurements;
measurements.n_measurements = n_measurements;
measurements.measurement_cov = measurement_cov;
measurements.measurements = measurements_array;
for(int k = 0; k < block_size; k++) {
int particle_id = thread_id*block_size + k;
if(particle_id >= n_particles) {
return;
}
float *particle = get_particle(particles, particle_id);
int n_landmarks = get_n_landmarks(particle);
if(n_landmarks == 0) {
add_measurements_as_landmarks(particle, &measurements);
continue;
}
bool *assigned_measurements = (bool *)(assignment_memory);
int *assignment_lm = (int *)(assignment_memory + n_measurements);
for(int i = 0; i < n_measurements; i++) {
assigned_measurements[i] = false;
}
for(int i = 0; i < n_landmarks; i++) {
assignment_lm[i] = -1;
}
assignment assignmentx;
assignmentx.assignment = assignment_lm;
assignmentx.assigned_measurements = assigned_measurements;
associate_landmarks_measurements(
particle, matrix_memory, data_assoc_memory, &measurements,
n_landmarks, &assignmentx,
threshold
);
update_landmark(particle, &measurements, &assignmentx);
add_unassigned_measurements_as_landmarks(particle, assignmentx.assigned_measurements, &measurements);
}
if(scratchpad_size > 0) {
free(scratchpad);
}
} |
23,384 | #include <stdio.h>
#include <curand.h>
#include <curand_kernel.h>
#include <time.h>
#include <sys/time.h>
#define N (1<<12)
#define M (1<<12)
#define THREADBLOCKSIZE 1024
#define LENGTH (N*sizeof(point))
#define INDEX (blockIdx.x * blockDim.x + threadIdx.x)
#define D2H cudaMemcpyDeviceToHost
#define H2D cudaMemcpyHostToDevice
#define MARK_TIME(t) gettimeofday(&t, NULL)
#define CALC_TIME(t1, t2) (1.0e6 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec))/(1.0e6)
#define PRINT_TIME(t, s) printf("It took %f seconds to do %s\n",t,s)
typedef struct {
int num, tnum;
float pi;
} CalcTest;
extern __shared__ float sdat[];
// setup all the threads on a gpu with individual rand seeds
__global__ void
kernel_setup(curandState *states, int d) {
int i = INDEX;
curand_init(d, i, 0, &states[i]);
}
// generate M random points for each thread, and count how many have len <= 1.0
__device__ void
d_gen(curandState *globalState) {
int i = INDEX;
curandState localState = globalState[i];
int tid = threadIdx.x;
for (int s = 0; s < M; s++) {
float rx = curand_uniform(&localState);
float ry = curand_uniform(&localState);
float mag = rx*rx + ry*ry;
if (mag <= 1.0f) {
sdat[tid] += 1.0;
}
}
globalState[i] = localState;
sdat[tid] *= 4.0f;
sdat[tid] *= 1.0/M;
}
__device__ void
d_count(float *sums) {
int tid = threadIdx.x;
for (int i = blockDim.x/2; i > 0; i >>= 1) {
if (tid < i)
sdat[tid] += sdat[tid + i];
__syncthreads();
}
__syncthreads();
if (tid == 0) {
sdat[0] *= 1.0/blockDim.x;
sums[blockIdx.x] = sdat[0];
}
}
// gets the points within a circle of radius 1.0
__global__ void
generate(curandState *globalState, float *sums) {
d_gen(globalState);
__syncthreads();
d_count(sums);
}
CalcTest *
runTest(int num, CalcTest *t) {
printf("calculating pi with num %d\n", num);
//CalcTest *t = (CalcTest *)malloc(sizeof(CalcTest));
//t->num = num;
//t->tnum = testNum;
//printf("MultiGPU Pi is running...\n");
//struct timeval begin, t1, t2;
//MARK_TIME(begin);
//MARK_TIME(t1);
int numDevs = 0;
cudaGetDeviceCount(&numDevs);
numDevs = 1;
//MARK_TIME(t2);
//PRINT_TIME(CALC_TIME(t1,t2),"get device count");
//printf("We have access to %d devices\n", numDevs);
dim3 block, grid;
block.x = THREADBLOCKSIZE;
grid.x = (num + THREADBLOCKSIZE - 1)/THREADBLOCKSIZE;
//printf("grid.x %d\n", grid.x);
//printf("block.x %d\n", block.x);
//MARK_TIME(t1);
float *a[numDevs];
for (int d = 0; d < numDevs; d++) {
a[d] = (float *)malloc(grid.x*sizeof(float));
}
//MARK_TIME(t2);
//PRINT_TIME(CALC_TIME(t1,t2),"allocal host arrays");
// allocate memory on each device
//printf("allocating memory...");
//MARK_TIME(t1);
float *d_a[numDevs];
curandState *states[numDevs];
for (int d = 0; d < numDevs; d++) {
cudaSetDevice(d);
cudaMalloc(&d_a[d],grid.x*sizeof(float));
cudaMalloc(&states[d],num*sizeof(curandState));
}
//MARK_TIME(t2);
//PRINT_TIME(CALC_TIME(t1,t2),"allocal cuda arrays");
//printf("done\n");
// run the kernel on each device
//printf("\nrunning kernels...");
//MARK_TIME(t1);
for (int d = 0; d < numDevs; d++) {
cudaSetDevice(d);
kernel_setup<<<grid, block>>>(states[d],d);
generate<<<grid, block, block.x*sizeof(float)>>>(states[d], d_a[d]);
}
//MARK_TIME(t2);
//PRINT_TIME(CALC_TIME(t1,t2),"execute kernels");
//printf("done\n");
// copy data back to host
//MARK_TIME(t1);
for (int d = 0; d < numDevs; d++) {
cudaSetDevice(d);
cudaMemcpy(a[d],d_a[d],grid.x*sizeof(float), D2H);
}
//MARK_TIME(t2);
//PRINT_TIME(CALC_TIME(t1,t2),"copy mem back to host");
// now print the host arrays
//MARK_TIME(t1);
int num_print = 2;//grid.x;
float total = 0.0;
for (int d = 0; d < numDevs; d++) {
printf("Values received from device %d:\n",d);
for (int i = 0; i < grid.x; i++) {
total += a[d][i];
if (i < num_print) printf("\ta[%d][%d]: %f\n",d,i,a[d][i]);
}
}
total *= 1.0/(grid.x * numDevs);
printf("Estimate of pi: %f\n",total);
t->pi = total;
for (int d = 0; d < numDevs; d++) {
free(a[d]);
cudaFree(d_a[d]);
}
//MARK_TIME(t2);
//PRINT_TIME(CALC_TIME(t1,t2),"print results and clean up");
//PRINT_TIME(CALC_TIME(begin,t2),"execute the entire program");
return t;
}
int
main(void) {
// lets set up random generators for everything
// then run multiple tests with all this already set up
/*
printf("MultiGPU Pi is running...\n");
struct timeval begin, t1, t2;
MARK_TIME(begin);
MARK_TIME(t1);
int numDevs = 0;
cudaGetDeviceCount(&numDevs);
numDevs = 2;
MARK_TIME(t2);
PRINT_TIME(CALC_TIME(t1,t2),"get device count");
printf("We have access to %d devices\n", numDevs);
dim3 block, grid;
block.x = THREADBLOCKSIZE;
grid.x = (N + THREADBLOCKSIZE - 1)/THREADBLOCKSIZE;
printf("grid.x %d\n", grid.x);
printf("block.x %d\n", block.x);
MARK_TIME(t1);
float *a[numDevs];
for (int d = 0; d < numDevs; d++) {
a[d] = (float *)malloc(grid.x*sizeof(float));
}
MARK_TIME(t2);
PRINT_TIME(CALC_TIME(t1,t2),"allocal host arrays");
// allocate memory on each device
printf("allocating memory...");
MARK_TIME(t1);
float *d_a[numDevs];
curandState *states[numDevs];
for (int d = 0; d < numDevs; d++) {
cudaSetDevice(d);
cudaMalloc(&d_a[d],grid.x*sizeof(float));
cudaMalloc(&states[d],N*sizeof(curandState));
}
MARK_TIME(t2);
PRINT_TIME(CALC_TIME(t1,t2),"allocal cuda arrays");
printf("done\n");
// run the kernel on each device
printf("\nrunning kernels...");
MARK_TIME(t1);
for (int d = 0; d < numDevs; d++) {
cudaSetDevice(d);
kernel_setup<<<grid, block>>>(states[d],d);
generate<<<grid, block, block.x*sizeof(float)>>>(states[d], d_a[d]);
}
MARK_TIME(t2);
PRINT_TIME(CALC_TIME(t1,t2),"execute kernels");
printf("done\n");
// copy data back to host
MARK_TIME(t1);
for (int d = 0; d < numDevs; d++) {
cudaSetDevice(d);
cudaMemcpy(a[d],d_a[d],grid.x*sizeof(float), D2H);
}
MARK_TIME(t2);
PRINT_TIME(CALC_TIME(t1,t2),"copy mem back to host");
// now print the host arrays
MARK_TIME(t1);
int num_print = grid.x;
float total = 0.0;
for (int d = 0; d < numDevs; d++) {
printf("Values received from device %d:\n",d);
for (int i = 0; i < grid.x; i++) {
total += a[d][i];
if (i < num_print) printf("\ta[%d][%d]: %f\n",d,i,a[d][i]);
}
}
total *= 1.0/(grid.x * numDevs);
printf("Estimate of pi: %f\n",total);
for (int d = 0; d < numDevs; d++) {
free(a[d]);
cudaFree(d_a[d]);
}
MARK_TIME(t2);
PRINT_TIME(CALC_TIME(t1,t2),"print results and clean up");
PRINT_TIME(CALC_TIME(begin,t2),"execute the entire program");
*/
int numTests = 22;
CalcTest tests[numTests];
for (int i = 0; i < numTests; i++) {
tests[i].num = 1<<i;
tests[i].tnum = i;
runTest(1<<i, &tests[i]);
}
FILE *fp;
fp = fopen("results_multi.txt","w");
fprintf(fp, "tnum\tN\tpi\n");
for (int i = 0; i < numTests; i++) {
fprintf(fp, "%d\t%d\t%f\n", tests[i].tnum, tests[i].num, tests[i].pi);
}
fclose(fp);
return 0;
}
|
23,385 | #include "includes.h"
__global__ void ChangeOutputWeightsKernel( float *outputWeights, float *outputWeightDeltas, float *outputDeltas, float *hiddenActivations, float trainingRate, float momentum )
{
int weightId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
int to = weightId / D_HIDDEN_UNITS;
int from = weightId % D_HIDDEN_UNITS;
if (weightId < D_OUTPUT_UNITS * D_HIDDEN_UNITS)
{
float gradient = outputDeltas[to] * hiddenActivations[from];
float weightDelta = trainingRate * gradient + momentum * outputWeightDeltas[weightId];
outputWeightDeltas[weightId] = weightDelta;
outputWeights[weightId] += weightDelta;
}
} |
23,386 | #include <stdio.h>
#define TPB 256
#define BPG 1
__global__ void printing()
{
int myID = blockIdx.x *blockDim.x + threadIdx.x;
printf("Hello world! My thread ID is %d", myID);
}
int main()
{
printing<<<BPG, TPB>>>();
return 0;
} |
23,387 | #include <stdio.h>
#include <math.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#include <fstream>
// Definitions
#define CUDADEVICE 0
#define ARRAY_POWER_SIZE 30
#define RANDOM_SEED 1337
#define ENSEMBLES 50000
__global__
void generate_random_data(int* values, unsigned int values_n) {
// Calculate global thread id
unsigned int global_thread_id = (blockDim.x * blockIdx.x) + threadIdx.x;
unsigned int stride = gridDim.x;
// Initialize the random generator
curandState_t random_state;
curand_init(RANDOM_SEED, global_thread_id, pow(global_thread_id, 2), &random_state);
// Start generating random numbers
for(unsigned int index = global_thread_id; index < values_n; index += stride) {
// Get a random value of either 0 or 1
values[index] = (curand_uniform(&random_state) > 0.5) ? 1 : 0;
}
}
__global__
void gpu_reduce(int* values, int* result, unsigned int amount) {
// Declare some variables
extern __shared__ int shared_memory[];
// Declare some private variables
int temp_score = 0;
// Loop over all the samples
for(unsigned int index = threadIdx.x; index < amount; index += blockDim.x) {
// Store the score in the temp variable
temp_score += values[index];
}
// Store the value in shared memory
shared_memory[threadIdx.x] = temp_score * (1 - 2 * (threadIdx.x % 2));
// Wait for all threads to finish the previous task
__syncthreads();
// Start to reduce the shared memory
for(unsigned int offset = blockDim.x / 2; offset > 0; offset /= 2) {
// Check if we are inside the offset
if(threadIdx.x < offset){
// Add the offset value to the current value
shared_memory[threadIdx.x] += shared_memory[threadIdx.x + offset];
}
__syncthreads();
}
if(threadIdx.x == 0) {
result[0] = shared_memory[0];
}
}
void cpu_reduce(int* values, int* result, unsigned int amount) {
// Declare a temp place to store the result (and initialize it to 0)
int scores = 0;
// Loop over the given amount of "clauses"
for(unsigned int index = 0; index < amount; index++) {
scores += (values[index] * (1 - 2 * (index % 2)));
}
// Store the result back to the given pointer
result[0] = scores;
}
int main() {
// Set the seed
printf("Using random seed: %d \n", RANDOM_SEED);
// Select the defined CUDA device
printf("Using CUDA device: %d \n", CUDADEVICE);
cudaSetDevice(CUDADEVICE);
unsigned int array_size = pow(2, ARRAY_POWER_SIZE);
// Declare the two arrays of data
int* cpu_array;
int* gpu_array;
printf("Allocating memory \n");
// Allocate memory in both RAM and VRAM
cpu_array = (int*) malloc(sizeof(int) * array_size);
cudaMalloc(&gpu_array, sizeof(int) * array_size);
cudaMemset(gpu_array, 2, sizeof(int) * array_size);
// Allocate the result variables
int* cpu_result = (int*) malloc(sizeof(int));
int* gpu_result;
cudaMallocManaged(&gpu_result, sizeof(int), cudaMemAttachGlobal);
// Allocate memory to store the results
//double* cpu_results = (double*) malloc(sizeof(double) * ENSEMBLES * ARRAY_POWER_SIZE);
//double* gpu_results = (double*) malloc(sizeof(double) * ENSEMBLES * ARRAY_POWER_SIZE);
// Generating a random stream of data
printf("Starting to create random values \n");
generate_random_data<<<256, 1024>>>(gpu_array, array_size);
cudaDeviceSynchronize();
// Copy the data from RAM to VRAM
printf("Copy random data from RAM to VRAM \n");
cudaMemcpy(cpu_array, gpu_array, sizeof(int) * array_size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
for(unsigned int index = 0; index < 65535; index++) {
printf("%d", cpu_array[index]);
}
// Time variables
clock_t cpu_start, cpu_stop;
cudaEvent_t gpu_start, gpu_stop;
float cpu_time, gpu_time;
// The current dataset size
int datasize;
// Open a filestream to store the results
std::ofstream result_file_cpu, result_file_gpu;
result_file_cpu.open("reduce_cpu_results.csv");
result_file_gpu.open("reduce_gpu_results.csv");
// Lets create headers for each of the 2^n power experiments
for(unsigned int power = 0; power <= ARRAY_POWER_SIZE; power++) {
// Create the column header
result_file_cpu << (int)pow(2, power);
result_file_gpu << (int)pow(2, power);
if(power != ARRAY_POWER_SIZE){
result_file_cpu << ";";
result_file_gpu << ";";
}
}
// Print newline
result_file_cpu << "\n";
result_file_gpu << "\n";
printf("Starting experiments...\n");
// Run several ensembles in order to compensate for noise
for(unsigned int ensemble = 0; ensemble < ENSEMBLES; ensemble++) {
printf("Ensemble: %d \n", ensemble);
// Start the experiments
for(unsigned int power = 0; power <= ARRAY_POWER_SIZE; power++) {
datasize = (int)pow(2, power);
// Start the CPU time
cpu_start = clock();
// Perform the CPU reduction
cpu_reduce(cpu_array, cpu_result, datasize);
// Stop the CPU time
cpu_stop = clock();
// Store the CPU time in the time variable
cpu_time = ((double)(cpu_stop - cpu_start)) / CLOCKS_PER_SEC;
// Create the GPU time event objects
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_stop);
// Measure the time before launching the kernel
cudaEventRecord(gpu_start);
// Launch the GPU kernel
gpu_reduce<<<1,1024,sizeof(int) * 1024>>>(gpu_array, gpu_result, datasize);
// Measure the time after launching the kernel
cudaEventRecord(gpu_stop);
// Stop the time
cudaEventSynchronize(gpu_stop);
// Calculate the time difference
cudaEventElapsedTime(&gpu_time, gpu_start, gpu_stop);
// Store result in result array
// cpu_results[(ensemble * ARRAY_POWER_SIZE) + power] = (double)cpu_time;
// gpu_results[(ensemble * ARRAY_POWER_SIZE) + power] = (double)gpu_time / 1000;
result_file_cpu << (double)cpu_time;
result_file_gpu << (double)gpu_time / 1000;
if(power != ARRAY_POWER_SIZE){
result_file_cpu << ";";
result_file_gpu << ";";
}
}
result_file_cpu << "\n";
result_file_gpu << "\n";
}
// Write the output to a csv file
// CPU time
/*for(unsigned int ensemble_id = 0; ensemble_id < ENSEMBLES; ensemble_id++) {
for(unsigned int power = 0; power <= ARRAY_POWER_SIZE; power++) {
result_file_cpu << cpu_results[(ensemble_id * ARRAY_POWER_SIZE) + power] << ";";
}
result_file_cpu << "\n";
}
// GPU time
for(unsigned int ensemble_id = 0; ensemble_id < ENSEMBLES; ensemble_id++) {
for(unsigned int power = 0; power <= ARRAY_POWER_SIZE; power++) {
result_file_gpu << gpu_results[(ensemble_id * ARRAY_POWER_SIZE) + power] << ";";
}
result_file_gpu << "\n";
}
*/
// Close the file streams
result_file_cpu.close();
result_file_gpu.close();
// Deallocate results
//free(cpu_results);
//free(gpu_results);
// Deallocate memory
free(cpu_array);
cudaFree(gpu_array);
free(cpu_result);
cudaFree(gpu_result);
// Reset cuda device
cudaDeviceReset();
printf("Done! \n");
return 0;
}
|
23,388 | #include "includes.h"
using namespace std;
#define D 3
#define N 200
#define K 512
#define Nt 20
#define Rt 0.1f
#define c 0.001f
#define ct 0.0001f
__global__ void addcuda(float* Q, float* P, float* Qt, float* Pt, float* Eg, float* Epg) {
for (int j = 0; j < 10; j++) {
int x = blockIdx.x;
int y = threadIdx.x;
int i = x * K * D + y * D;
float Px = P[i + 0];
float Py = P[i + 1];
float Pz = P[i + 2];
float E = Eg[i/3];
float Ep = Epg[i/3];
float Qx = Q[i + 0];
float Qy = Q[i + 1];
float Qz = Q[i + 2];
float nQx = Q[i + 0] + c * P[i + 0];
float nQy = Q[i + 1] + c * P[i + 1];
float nQz = Q[i + 2] + c * P[i + 2];
// Îòðàæåíèå îò ñòåíîê îáëàñòè
if ((nQx > 1) || (nQx < 0)) {
Px = (-1) * Px;
}
if ((nQy > 1) || (nQy < 0)) {
Py = (-1) * Py;
}
if ((nQz > 1) || (nQz < 0)) {
Pz = (-1) * Pz;
}
// Îòðàæåíèå îò òóðáóëåíòíîñòåé
for (int nt = 0; nt < Nt; nt += 1) {
float Range = (sqrt(pow(Qx - Qt[nt + 0], 2) + pow(Qy - Qt[nt + 1], 2) + pow(Qz - Qt[nt + 2], 2)));
float nRange = (sqrt(pow(nQx - Qt[nt + 0], 2) + pow(nQy - Qt[nt + 1], 2) + pow(nQz - Qt[nt + 2], 2)));
if((Range > Rt) && (nRange < Rt)) {
float DirX = (nQx - Qt[nt + 0]) / Range;
float DirY = (nQy - Qt[nt + 1]) / Range;
float DirZ = (nQz - Qt[nt + 2]) / Range;
float PnormKoe = ((Px * DirX) + (Py * DirY) + (Pz * DirZ));
float Pnormt = ((Pt[nt + 0] * DirX) + (Pt[nt + 1] * DirY) + (Pt[nt + 2] * DirZ));
E -= (ct / c) * (PnormKoe * PnormKoe) * (Pnormt * abs(Pnormt));
Px -= 2 * DirX;
Py -= 2 * DirY;
Pz -= 2 * DirZ;
}
}
// ×àñòèöà âûëåòàåò èç îáëàñòè, çàïèñûâàåòñÿ åå ýíåðãèÿ è ñáðàñûâàåòñÿ äî íà÷àëüíîãî çíà÷åíèÿ.
// ×àñòèöà ïðîäîëæàåò äâèãàòüñÿ ïî òðàåêòîðèè
// Ep ñëó÷àéíàÿ âåëè÷èíà ëèíåéíî çàâèñÿùàÿ îò ýíåðãèè
if ((nQz > 1) && (E > Ep)) {
E = 100.0f;
}
// Àäèàáàòè÷åñêîå îõëàæäåíèå
if (nQz > 0.5) {
E -= 0.0001f;
}
//Ïðèðàùåíèå ýíåðãèè ïðè ïåðåñå÷åíèè öåíòðà
if (((nQz > 0.5f) && (Qz < 0.5f)) || ((Qz > 0.5f) && (nQz < 0.5f))) {
E += 1.0f;
}
// Çàïèñü â ïàìÿòü
Q[i + 0] = nQx;
Q[i + 1] = nQy;
Q[i + 2] = nQz;
P[i + 0] = Px;
P[i + 1] = Py;
P[i + 2] = Pz;
Eg[i/3] = E;
}
} |
23,389 | #include "includes.h"
__global__ void partialSumKernel(int *X, int N)
{
__shared__ int partialSum[BLOCK_SIZE];
int tx = threadIdx.x;
int i = blockIdx.x * blockDim.x + tx;
if (i < N) {
partialSum[tx] = X[i];
partialSum[tx + blockDim.x] = X[i + gridDim.x * blockDim.x];
//printf("X[%d + %d * %d] = %d\n", i,gridDim.x, blockDim.x, X[i + gridDim.x * blockDim.x]);
}
else
partialSum[tx] = 0; // last block may pad with 0's
for (int stride = blockDim.x; stride > 0; stride = stride/2)
{
__syncthreads();
if (tx < stride) {
//printf("tx[%d], bx[%d]: %d + %d\n", tx, blockIdx.x, partialSum[tx], partialSum[tx + stride]);
partialSum[tx] += partialSum[tx + stride];
}
}
if (tx == 0)
X[blockIdx.x] = partialSum[tx];
} |
23,390 | //
// TriggerSelection.cpp
// HiggsAnalysis_new
//
// Created by Joona Havukainen on 5/31/19.
// Copyright © 2019 Joona Havukainen. All rights reserved.
//
__device__
bool L1METTrigger(float L1MET_x, float L1MET_y, float L1MET_cut)
{
float L1MET = sqrtf(powf(L1MET_x, 2.f)+powf(L1MET_y, 2.f));
return L1MET>L1MET_cut;
}
__global__
void triggerSelection(float *inputArray, bool *passedArray, bool *passed, float L1MetCut, int variablesPerEvent, int nEvents, int triggerIndex)
{
int processIndex = blockIdx.x * blockDim.x + threadIdx.x;
int localIndex = processIndex * variablesPerEvent;
if(processIndex<nEvents)
{
passedArray[processIndex]=((bool)inputArray[localIndex+triggerIndex+2] && L1METTrigger(inputArray[localIndex+triggerIndex+0], inputArray[localIndex+triggerIndex+1], L1MetCut));
passed[processIndex] = passed[processIndex] && passedArray[processIndex];
}
}
|
23,391 | #include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define DEBUG 1
#define MAX_BLOCKS (32*1024)
// #define MAX_BLOCKS (13)
#define COPY_THREADS 128
#define max(a,b) \
({ __typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a > _b ? _a : _b; })
#define min(a,b) \
({ __typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a < _b ? _a : _b; })
#define CUDACHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// rotates used by bucket algo.
__global__ void rotate(
double *dst, double *src, size_t width, int height, int rotate
) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int num_threads = blockDim.x * gridDim.x;
size_t n = (width*height);
rotate = rotate % height;
#pragma unroll
for (
int dst_offset = index, src_offset = (index + rotate*width) % n;
dst_offset < n;
dst_offset += num_threads,
src_offset = (src_offset + num_threads) % n
) {
dst[dst_offset] = src[src_offset];
}
}
__global__ void rotate_rev(
double *dst, double *src, size_t width, int height, int rotate
) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int num_threads = blockDim.x * gridDim.x;
size_t n = (width*height);
// row offset is going to be indexed forward,
int row_offset = index % width;
// but each successive col will precede the last.
int col_offset = index / width;
/*
for rotate=2, height=4
dst 0 1 2 3 | 4 5 6 7 | 8 9 A B | C D E F
src 8 9 A B | 4 5 6 7 | 0 1 2 3 | C D E F
*/
int num_cols = num_threads / width;
src += row_offset;
int dst_offset = col_offset*width + row_offset;
int src_col = ((height + rotate - col_offset) % height);
#pragma unroll
for (
;
dst_offset < n;
dst_offset += num_threads,
src_col = (height + src_col - num_cols) % height
) {
dst[dst_offset] = src[src_col*width];
}
}
__global__ void copy(double *dst, double *src, size_t num_elems) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int num_threads = blockDim.x * gridDim.x;
#pragma unroll
for (int offset = index; offset < num_elems; offset += num_threads) {
// might want to use shared memory? looked like that had higher throughtput
// in the transpose experiments.
dst[offset] = src[offset];
}
}
void MPI_All2All_bucket(
double* sendbuf, size_t sendcount,
double* recvbuf, size_t recvcount,
double* tempbuf,
int size,
// just use this for debugging
double* host_buffer
) {
size_t ln = sendcount * size;
size_t threads = min(sendcount, COPY_THREADS);
size_t blocks = min(MAX_BLOCKS, max(1, sendcount / COPY_THREADS));
for (int rank = 0; rank < size; rank++) {
CUDACHECK( cudaSetDevice(rank) );
rotate<<<blocks,threads>>>(tempbuf + rank*ln, sendbuf + rank*ln, sendcount, size, rank);
CUDACHECK( cudaGetLastError() );
}
#if DEBUG
printf("after rotate\n");
CUDACHECK( cudaMemset(host_buffer, 0, ln*size*sizeof(double)) );
CUDACHECK( cudaMemcpy(host_buffer, tempbuf, ln*size*sizeof(double), cudaMemcpyDeviceToHost) );
for (int i = 0; i < size; i++) {
printf("p%d\t\t", i+1);
}
printf("\n");
for (size_t j = 0; j < ln; j++) {
for (int i = 0; i < size; i++) {
printf("%f\t", host_buffer[i*ln + j]);
}
printf("\n");
}
printf("\n");
#endif
for (int i = 1; i < size; i++) {
for (int rank = 0; rank < size; rank++) {
CUDACHECK( cudaSetDevice(rank) );
CUDACHECK( cudaDeviceSynchronize() );
}
// task(?) size
int ts = size - i;
if (i % 2 == 1) {
// send to right.
for (int rank = 0; rank < size; rank++) {
int rank_left = (size + rank - 1) % size;
CUDACHECK( cudaSetDevice(rank) );
// CUDACHECK( cudaMemPrefetchAsync(
// tempbuf + rank_left*ln + i*sendcount,
// sendcount*ts*sizeof(double),
// rank
// ) );
copy<<<blocks, threads>>>(
recvbuf + rank*ln + i*recvcount,
tempbuf + rank_left*ln + i*sendcount,
sendcount*ts
);
CUDACHECK( cudaGetLastError() );
}
// copy my chunk of received buffer into tempbuf.
for (int rank = 0; rank < size; rank++) {
CUDACHECK( cudaSetDevice(rank) );
copy<<<blocks, threads>>>(
tempbuf + rank*ln + i*recvcount,
recvbuf + rank*ln + i*recvcount,
sendcount
);
CUDACHECK( cudaGetLastError() );
}
} else {
// send to right.
for (int rank = 0; rank < size; rank++) {
int rank_left = (size + rank - 1) % size;
CUDACHECK( cudaSetDevice(rank) );
// CUDACHECK( cudaMemPrefetchAsync(
// tempbuf + rank_left*ln + i*sendcount,
// sendcount*ts*sizeof(double),
// rank
// ) );
copy<<<blocks, threads>>>(
tempbuf + rank*ln + i*recvcount,
recvbuf + rank_left*ln + i*sendcount,
sendcount*ts
);
CUDACHECK( cudaGetLastError() );
}
}
}
// rotate the data in tempbuf to recvbuf.
for (int rank = 0; rank < size; rank++) {
CUDACHECK( cudaSetDevice(rank) );
rotate_rev<<<blocks,threads>>>(recvbuf + rank*ln, tempbuf + rank*ln, sendcount, size, rank);
CUDACHECK( cudaGetLastError() );
}
}
int main(int argc, char **argv) {
int n = atoi(argv[1]);
int p = atoi(argv[2]);
// min of p and device count
int device_count = 0;
CUDACHECK( cudaGetDeviceCount(&device_count) );
p = p > device_count ? device_count : p;
int ln = n / p;
int sn = ln / p;
// Set peer access.
for (int device = 0; device < p; device++) {
CUDACHECK( cudaSetDevice(device) );
for (int peer = 0; peer < p; peer++) {
int canAccessPeer = 0;
cudaDeviceCanAccessPeer(&canAccessPeer, device, peer);
if (canAccessPeer) {
cudaDeviceEnablePeerAccess(peer, 0);
printf("%d can access %d\n", device, peer);
}
}
}
double *send_buffer;
double *recv_buffer;
double *temp_buffer;
double *host_buffer;
size_t size = n * sizeof(double);
CUDACHECK( cudaMallocManaged(&send_buffer, size) );
CUDACHECK( cudaMallocManaged(&recv_buffer, size) );
CUDACHECK( cudaMallocManaged(&temp_buffer, size) );
CUDACHECK( cudaMallocHost(&host_buffer, size) );
// Initialize
for (int i = 0; i < p; i++) {
for (size_t j = 0; j < ln; j++) {
host_buffer[i*ln + j] = (i+1)*1.0 + (j+1)*0.01;
}
}
CUDACHECK( cudaMemcpy(send_buffer, host_buffer, size, cudaMemcpyHostToDevice) );
CUDACHECK( cudaMemset(recv_buffer, 0, size) );
CUDACHECK( cudaMemset(temp_buffer, 0, size) );
#if DEBUG
for (int i = 0; i < p; i++) {
printf("p%d\t\t", i+1);
}
printf("\n");
for (size_t j = 0; j < ln; j++) {
for (int i = 0; i < p; i++) {
printf("%f\t", host_buffer[i*ln + j]);
}
printf("\n");
}
printf("\n");
#endif
// device events for timing
cudaEvent_t *start = (cudaEvent_t *) malloc(p*sizeof(cudaEvent_t));
cudaEvent_t *stop = (cudaEvent_t *) malloc(p*sizeof(cudaEvent_t));
for (int device = 0; device < p; device++) {
cudaSetDevice(device);
CUDACHECK( cudaEventCreate(start + device) );
CUDACHECK( cudaEventCreate(stop + device) );
}
CUDACHECK( cudaGetLastError() );
for (int iters = 0; iters < 1; iters++) {
// Start
for (int device = 0; device < p; device++) {
cudaSetDevice(device);
CUDACHECK( cudaEventRecord(start[device]) );
}
// All to all
MPI_All2All_bucket(send_buffer, sn, recv_buffer, sn, temp_buffer, p, host_buffer);
// Stop
for (int device = 0; device < p; device++) {
cudaSetDevice(device);
CUDACHECK( cudaEventRecord(stop[device]) );
}
for (int device = 0; device < p; device++) {
cudaSetDevice(device);
cudaDeviceSynchronize();
float time_ms;
CUDACHECK( cudaEventElapsedTime(&time_ms, start[device], stop[device]) );
printf("p%d: %f ms\n", device + 1, time_ms);
}
}
#if DEBUG
CUDACHECK( cudaMemcpy(host_buffer, recv_buffer, size, cudaMemcpyDeviceToHost) );
for (int i = 0; i < p; i++) {
printf("p%d\t\t", i+1);
}
printf("\n");
for (size_t j = 0; j < ln; j++) {
for (int i = 0; i < p; i++) {
printf("%f\t", host_buffer[i*ln + j]);
}
printf("\n");
}
printf("\n");
#endif
// -- Cleanup -------------
CUDACHECK( cudaFreeHost(host_buffer) );
CUDACHECK( cudaFree(temp_buffer) );
CUDACHECK( cudaFree(recv_buffer) );
CUDACHECK( cudaFree(send_buffer) );
}
|
23,392 | #include "includes.h"
__global__ void doubleToFloat(double* input, float* output, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
output[i] = (float)input[i];
}
} |
23,393 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#define N (1 << 12)
#define tile_size 64
#define block_size 16
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
void serialTranspose(float *src, float *out){
for(int i = 0; i < N*N; i++){
int r = i / N;
int c = i % N;
int iT = N*c + r;
out[iT] = src[i];
}
}
int transposeCheck(float *src, float *out){
for(int i = 0; i < N*N; i++){
int r = i / N;
int c = i % N;
int iT = N*c + r;
if(src[i] != out[iT]){
printf("Transpose Incorrect\n");
return 1;
}
}
printf("Transpose Correct\n");
return 0;
}
void fillArray(float *arr){
for(int i = 0; i < N*N; i++){
arr[i] = rand();
}
}
__global__ void gpuTransposeGlobal(float *src, float *out){
unsigned int i = threadIdx.x + blockDim.x*blockIdx.x;
if(i < N*N){
unsigned int iT = N*(i%N)+(i/N);
out[iT] = src[i];
}
}
__global__ void gpuTransposeShared(float *src, float *out){
__shared__ float tile[tile_size][tile_size];
int tc = threadIdx.x;
int tr = threadIdx.y;
int c = blockIdx.x*tile_size + threadIdx.x;
int r = blockIdx.y*tile_size + threadIdx.y;
for(int i = 0; i < tile_size; i = i + block_size){
tile[tr+i][tc] = src[N*(r+i) + c];
}
__syncthreads();
c = blockIdx.y*tile_size + threadIdx.x;
r = blockIdx.x*tile_size + threadIdx.y;
for(int i = 0; i < tile_size; i = i + block_size){
out[N*(r+i)+c] = tile[tc][tr+i];
}
}
int main(int argc, char** argv){
// Setup time variables
float timecpu = 0;
float timegpug = 0;
float timegpus = 0;
float tpcpu = 0;
float tpgpug = 0;
float tpgpus = 0;
cudaEvent_t launch_begin_seq, launch_end_seq;
// Host variables
float *h_arr = (float*)malloc(N*N*sizeof(float));
float *h_out = (float*)malloc(N*N*sizeof(float));
//Device variables
float *d_arr, *d_out;
cudaMalloc((void**)&d_arr, N*N*sizeof(float));
cudaMalloc((void**)&d_out, N*N*sizeof(float));
// Check Memory Allocation
if(h_arr == 0 || h_out == 0 || d_arr == 0 || d_out == 0){
printf("Memory Allocation Failed!\n");
return 1;
}
// Fill Array
fillArray(h_arr);
memset(h_out, 0, N*N*sizeof(float));
// Create time variables
cudaEventCreate(&launch_begin_seq);
cudaEventCreate(&launch_end_seq);
//Start CPU Transpose
cudaEventRecord(launch_begin_seq,0);
serialTranspose(h_arr, h_out);
cudaEventRecord(launch_end_seq,0);
cudaEventSynchronize(launch_end_seq);
if(transposeCheck(h_arr, h_out) == 0){
cudaEventElapsedTime(&timecpu, launch_begin_seq, launch_end_seq);
printf("CPU time: %f ms\n", timecpu);
tpcpu = 1e-9*N*N/(timecpu*1e-3);
printf("Throughput = %f Gflops/s\n\n", tpcpu);
}
// Prep Block And Thread variables
size_t num_blocks = (N*N)/(block_size*tile_size);
if((N*N) % block_size*tile_size) ++num_blocks;
// Prep device memory
cudaMemset(d_arr, 0, N*N*sizeof(float));
cudaMemcpy(d_arr, h_arr, N*N*sizeof(float), cudaMemcpyHostToDevice);
memset(h_out, 0, N*N*sizeof(float));
cudaMemset(d_out, 0, N*N*sizeof(float));
// Create time variables
cudaEventCreate(&launch_begin_seq);
cudaEventCreate(&launch_end_seq);
// Start global GPU Transpose
cudaEventRecord(launch_begin_seq,0);
gpuTransposeGlobal<<<num_blocks, block_size*tile_size>>>(d_arr, d_out);
cudaEventRecord(launch_end_seq,0);
cudaEventSynchronize(launch_end_seq);
// Copy Memory back to Host
cudaMemcpy(h_out, d_out, N*N*sizeof(float), cudaMemcpyDeviceToHost);
// Check For Cuda Errors
checkCUDAError("gpuTranspose");
if(transposeCheck(h_arr, h_out) == 0){
cudaEventElapsedTime(&timegpug, launch_begin_seq, launch_end_seq);
printf("Global Memory GPU time: %f ms\n", timegpug);
tpgpug = 1e-9*N*N/(timegpug*1e-3);
printf("Throughput = %f Gflops/s\n\n", tpgpug);
}
dim3 dimGrid(N/tile_size, N/tile_size, 1);
dim3 dimBlock(tile_size, block_size, 1);
memset(h_out, 0, N*N*sizeof(float));
cudaMemset(d_out, 0, N*N*sizeof(float));
// Create time variables
cudaEventCreate(&launch_begin_seq);
cudaEventCreate(&launch_end_seq);
// Start shared GPU Transpose
cudaEventRecord(launch_begin_seq,0);
gpuTransposeShared<<<dimGrid, dimBlock>>>(d_arr, d_out);
cudaEventRecord(launch_end_seq,0);
cudaEventSynchronize(launch_end_seq);
// Copy Memory back to Host
cudaMemcpy(h_out, d_out, N*N*sizeof(float), cudaMemcpyDeviceToHost);
// Check For Cuda Errors
checkCUDAError("gpuTransposeShared");
if(transposeCheck(h_arr, h_out) == 0){
cudaEventElapsedTime(&timegpus, launch_begin_seq, launch_end_seq);
printf("Shared Memory GPU time: %f ms\n", timegpus);
tpgpus = 1e-9*N*N/(timegpus*1e-3);
printf("Throughput = %f Gflops/s\n\n", tpgpus);
}
printf("Global Speed up = %f \n", timecpu/timegpug);
printf("Global ratio = %f \n\n", tpgpug/tpcpu);
printf("Shared Speed up = %f \n", timecpu/timegpus);
printf("Shared ratio = %f \n\n", tpgpus/tpcpu);
printf("CSV output:\n");
printf("%i,%i,%i,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f", N, tile_size, block_size, timecpu, timegpug, timegpus, tpcpu, tpgpug, tpgpus, timecpu/timegpug, timecpu/timegpus, tpgpug/tpcpu, tpgpus/tpcpu);
// Free Host variables
free(h_arr);
free(h_out);
// Free Device variables
cudaFree(d_arr);
cudaFree(d_out);
return 0;
}
|
23,394 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,int var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24) {
if (comp == (-1.2579E-35f - (var_1 + (-1.0518E-1f * (+1.6866E-35f - -1.9646E-36f))))) {
if (comp > (+1.3266E-8f + (var_2 * +1.3132E-43f + sinhf(var_3 - var_4 - (var_5 - var_6))))) {
float tmp_1 = +1.2442E-36f * atanf((+1.0640E-44f + var_8 * var_9));
comp = tmp_1 * (-1.3575E-36f / var_10 * var_11 - var_12);
for (int i=0; i < var_7; ++i) {
comp += var_13 * var_14;
comp = (+1.1025E-35f * -1.9304E-41f * +1.8745E34f - +1.8769E-42f);
comp += -0.0f + (+1.4865E-36f + (+0.0f * +1.5963E-43f));
comp += var_15 / ldexpf((-1.9723E35f / (-1.5751E36f * +1.5224E-37f / var_16)), 2);
}
if (comp > +0.0f + (var_17 * tanhf(+1.5846E-36f * (var_18 + +1.4856E-43f + -0.0f)))) {
comp = acosf((var_19 * (var_20 + var_21 * (var_22 - var_23 + var_24))));
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
int tmp_8 = atoi(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25);
cudaDeviceSynchronize();
return 0;
}
|
23,395 | #include "cuda_runtime.h"
#include "stdio.h"
__device__ float devData;
__global__ void checkGlobalVariable(){
printf("Device: the value of the global variable is: %f\n", devData);
devData += 2.0f;
}
int main(void){
float value = 3.14f;
cudaMemcpyToSymbol(devData, &value, sizeof(float));
printf("Host: copied %f to the global variable \n", value);
checkGlobalVariable<<<1, 1>>>();
// copy back
cudaMemcpyFromSymbol(&value, devData, sizeof(float));
printf("Host the value changed by the kernel to %f \n", value);
cudaDeviceReset();
return EXIT_SUCCESS;
} |
23,396 | #include "cuUtils.cuh"
// wczeniej nazywao si normalizeVectorSum
__global__ void reciprocal(double * v, int n){
// inverse values of elements in a vector
// grid stride loop
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x){
if (v[i] != 0.0){
v[i] = 1.0 / v[i];
}
}
}
__global__ void saxdotpy(double a, double * x, double *y, double n, double *z){
// perform following operation
// z = z + a*(x.*y);
// grid stride loop
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x){
z[i] += a*x[i]*y[i];
}
}
__global__ void elemByElem(int n, double *x, double *y, double *z){
// grid stride loop
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x){
z[i] = x[i]*y[i];
}
}
__global__ void absComplex(cufftDoubleComplex * idata, double *odata, int n){
/*
Instead of completely eliminating the loop when parallelizing the computation,
a grid-stride loop approach is used here
*/
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x){
cufftDoubleComplex c = idata[i];
double x2 = c.x*c.x; // pow2
double y2 = c.y*c.y; // pow2
odata[i] = sqrt(x2+y2);
}
}
/*compute sqrt root of complex c
Newtow's method for computing sqrt
*/
__device__ __inline__ cuDoubleComplex sqrtComplex(cuDoubleComplex c){
//Csub - subtract two double complex number: x - y
//Cmul - multiplicate two double complex number: x*y
cuDoubleComplex x = c;
cuDoubleComplex real2 = make_cuDoubleComplex (2.0, 0.0);
/*
for(unsigned iter=0; iter<10; iter++){
x = cuCsub(x,cuCdivf(cuCsub(cuCmul(x,x), c), cuCmul(real2,x))); //
}*/
//we can unroll the loop - czy na pewno??
/*1*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x)));
/*2*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x)));
/*3*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x)));
/*4*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x)));
/*5*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x)));
/*6*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x)));
/*7*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x)));
/*8*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x)));
/*9*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x)));
/*10*/ x = cuCsub(x,cuCdiv(cuCsub(cuCmul(x,x), c), cuCmul(real2,x)));
/*
int iter;
for(iter=0; iter<10; iter++){
x = cuCsubf(x,cuCdivf(cuCsubf(cuCmulf(x,x), c), cuCmulf(real2,x))); //
}
*/
return x;
}
__global__ void copyRealFromComplexCufft(cuDoubleComplex* complex, double* real, int m, int n){
/*
int x = threadIdx.x;
int y = blockIdx.x;
int real_index = x + y*m;
int cufft_height = m/2+1;
int complex_index;
if (x < cufft_height){
//dane trzymane kolumnowo
complex_index = y*cufft_height + x;
}
else{
// indeks y wychodzi poza macierz skompresowan cufft
x = m - x;
y = n - y;
//dane trzymane kolumnowo
complex_index = y*cufft_height + x;
}
real[real_index] = complex[complex_index].x;
*/
int x = threadIdx.x;
int y = blockIdx.x;
int x2 = (m - x) % m; // indeksowanie
int y2 = (n - y) % n; // indeksowanie
int cut_cols = n/2+1;
int out_index = x + y*m; // pytanie czy czym to si rni od threadIdx.x + blockIdx.x*blockDim.x - rni si jak wida fft2_m a blockDim.x
int in_index = (x + y*cut_cols)*(x < cut_cols) + (x2 + y2*cut_cols)*(x >= cut_cols); // ale ale kolego!! dla fft2 nie tylko zmieniamy indeks ale bierzemy
// sprzenie wartoci zespolonej !
// tylko, e tutaj to nie ma znaczenia - obliczamy modu liczby zespolonej
if(in_index < cut_cols*m){
//real[out_index] = complex[in_index].x;
real[out_index] = complex[in_index].x;
}
}
__global__ void copy_real_from_cufft_1d(cuDoubleComplex* complex, double* real, int n){
int cufft_width = n/2+1;
int index = threadIdx.x + blockIdx.x*blockDim.x;
int cufft_index = index *(index < cufft_width) + (n-index)*(index >= cufft_width);
real[index] = complex[cufft_index].x;
}
__global__ void copy_with_comparison(double * d_U, double * d_xk, double * d_max_X, double * d_min_X, int n){
//rec(rec<minx) = minx; rec(rec>maxx) = maxx;
//xk = rec;
// fctr = 1/URange; URange = Umax - Umin
double max = d_max_X[0];
double min = d_min_X[0];
double range = max - min;
// grid stride loop
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x){
double val = d_U[i]*range;
d_xk[i] = min*(val<min) + max*(val>max) + val*((!(val<min)) && (!(val>max)));
//d_xk[i] = val;
}
}
__global__ void normalize_ifft_result(double* ifft_vector, double denominator, int n){
// grid stride loop
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x){
ifft_vector[i] /= denominator;
}
}
__global__ void simple_copy_from_complex(cuDoubleComplex* complex, double* real, int n){
// grid stride loop
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x){
real[i] = complex[i].y;
}
}
__global__ void generate_dct_matrix_coefficients(double *A, double *AT, double N){
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
double lx = 1.0 + (1.0)*(x>0);
double ly = 1.0 + (1.0)*(y>0);
int n = N;
// row major order
// A[x + y*N] = cospi((2*x+1)*y/(2*N));
// column major order
AT[x + y*n] = sqrt(lx/N) * cospi((2.0*y+1.0)*x/(2.0*N));
A[x + y*n] = sqrt(ly/N) * cospi((2.0*x+1.0)*y/(2.0*N));
} |
23,397 | #include <stdio.h>
#include <cuda_runtime.h>
__global__ void test_print_kernel(const float* pdata, int ndata){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
/* dims indexs
gridDim.z blockIdx.z
gridDim.y blockIdx.y
gridDim.x blockIdx.x
blockDim.z threadIdx.z
blockDim.y threadIdx.y
blockDim.x threadIdx.x
Pseudo code:
position = 0
for i in 6:
position *= dims[i]
position += indexs[i]
*/
printf("Element[%d] = %f, threadIdx.x=%d, blockIdx.x=%d, blockDim.x=%d\n", idx, pdata[idx], threadIdx.x, blockIdx.x, blockDim.x);
}
void test_print(const float* pdata, int ndata){
// <<<gridDim, blockDim, bytes_of_shared_memory, stream>>>
test_print_kernel<<<1, ndata, 0, nullptr>>>(pdata, ndata);
// 在核函数执行结束后,通过cudaPeekAtLastError获取得到的代码,来知道是否出现错误
// cudaPeekAtLastError和cudaGetLastError都可以获取得到错误代码
// cudaGetLastError是获取错误代码并清除掉,也就是再一次执行cudaGetLastError获取的会是success
// 而cudaPeekAtLastError是获取当前错误,但是再一次执行 cudaPeekAtLastError 或者 cudaGetLastError 拿到的还是那个错
// cuda的错误会传递,如果这里出错了,不移除。那么后续的任意api的返回值都会是这个错误,都会失败
cudaError_t code = cudaPeekAtLastError();
if(code != cudaSuccess){
const char* err_name = cudaGetErrorName(code);
const char* err_message = cudaGetErrorString(code);
printf("kernel error %s:%d test_print_kernel failed. \n code = %s, message = %s\n", __FILE__, __LINE__, err_name, err_message);
}
} |
23,398 | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <assert.h>
__global__ void add(int *a, int *b, int *c) {
int idx = blockDim.x*blockIdx.x + threadIdx.x;
c[idx] = a[idx] + b[idx];
__syncthreads();
}
void random_ints(int* a, int N) {
for (int i=0; i<N; i++){
a[i] = rand() % 1000;
}
}
#define N (2048*2048)
#define THREADS_PER_BLOCK 512
int main(){
int *a, *b, *c;
int *a_d, *b_d, *c_d;
int size = sizeof(int)*N;
cudaMalloc((void **) &a_d, size);
cudaMalloc((void **) &b_d, size);
cudaMalloc((void **) &c_d, size);
// setup initial values:
a = (int*)malloc(size); random_ints(a, N);
b = (int*)malloc(size); random_ints(b, N);
c = (int*)malloc(size);
cudaMemcpy(a_d, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, size, cudaMemcpyHostToDevice);
add<<<N/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(a_d, b_d, c_d);
cudaMemcpy(c, c_d, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
assert(a[i] + b[i] == c[i]);
free(a); free(b); free(c);
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
}
|
23,399 | //
// kernel routine
//
__global__ void VecAdd_kernel(const float* A, const float* B, float* C, int N)
/* Naive kernel */
{
// Uncomment line below and define global index form block and thread indexes
// int i = ;
// Define C[i] below
}
|
23,400 | #include<stdio.h>
__global__ void interpolate(float * x, float * y, float a, float * k, int n){
int i,j;
i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float ss[100], ts[100], ks[100];
if(i<n)
{
ss[i]=1;
ts[i]=1;
__syncthreads();
for(j=0;j<n;j++)
{
if(j!=i)
{
ss[i]=ss[i]*(a-x[j]);
ts[i]=ts[i]*(x[i]-x[j]);
}
}
ks[i]=(((ss[i])/(ts[i]))*y[i]);
__syncthreads();
if(i==0){
for(i=0;i<(n*n);i++){
*k += ks[i];
}
}
}
}
int main()
{
float *x, *y, *d_x, *d_y;
float a, k, *d_k;
size_t size = 100 * sizeof(float);
cudaEvent_t start, stop;
x = new float[100];
y = new float[100];
cudaMalloc(&d_x, size);
cudaMalloc(&d_y, size);
cudaMalloc(&d_k, sizeof(float));
cudaEventCreate(&start);
cudaEventCreate(&stop);
int N,i,d=1;
printf("\n\n Enter the number of the terms of the table: ");
scanf("%d",&N);
printf("%d", N);
printf("\n\n Enter the respective values of the variables x and y: \n");
for(i=0; i<N; i++)
{
scanf ("%f",&x[i]);
scanf("%f",&y[i]);
}
printf("\n\n The table you entered is as follows :\n\n");
for(i=0; i<N; i++)
{
printf("%0.3f\t%0.3f",x[i],y[i]);
printf("\n");
}
cudaMemcpy(d_x, x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, size, cudaMemcpyHostToDevice);
//while(d==1)
//{
printf(" \n\n\n Enter the value of the x to find the respective value of y\n\n\n");
scanf("%f",&a);
printf("%f\n",a);
int threads_per_block = 32; // A 16 x 16 block threads
int number_of_blocks = N/threads_per_block + 1;
//dim3 threads_per_block(3, 3); // A 16 x 16 block threads
//dim3 number_of_blocks(N/threads_per_block.x + 1, N/threads_per_block.y + 1);
cudaEventRecord(start);
interpolate<<<number_of_blocks,threads_per_block>>>(d_x, d_y, a, d_k, N);
cudaError_t err;
err = cudaGetLastError(); // `cudaGetLastError` will return the error from above.
if (err != cudaSuccess)
{
printf("Error: %s\n", cudaGetErrorString(err));
}
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cudaMemcpy(&k, d_k, sizeof(float), cudaMemcpyDeviceToHost);
printf("\n\n The respective value of the variable y is: %f\n", k);
printf(" Elapsed time in milliseconds: %f\n", milliseconds);
printf("\n\n Do you want to continue?\n\n Press 1 to continue and any other key to exit");
scanf("%d",&d);
//}
delete [] x;
delete [] y;
cudaFree(d_x);
cudaFree(d_y);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.