serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
10,101 | #include <stdio.h>
#include <assert.h>
__constant__ int inc;
__device__ int sum;
__global__ void atomicAdd()
{
int s = atomicAdd(&sum, inc);
assert((s - 1) % inc == 0);
if (threadIdx.x == 0)
{
printf("blockIdx.x = %d, sum = %d\n", blockIdx.x, s);
}
}
int main(int argc, char *argv[])
{
// Initialize inc and sum.
int h_inc = 3;
int h_sum = 1;
// Copy inc and sum from host memory to device memory synchronously.
cudaMemcpyToSymbol(inc, &h_inc, sizeof(int));
cudaMemcpyToSymbol(sum, &h_sum, sizeof(int));
// Invoke the kernel on device asynchronously.
atomicAdd<<<2, 2>>>();
// Copy sum from device memory to host memory synchronously.
cudaMemcpyFromSymbol(&h_sum, sum, sizeof(int));
// Print the result.
printf("sum = %d\n", h_sum);
// Cleanup.
cudaDeviceReset();
}
|
10,102 | // Reductions based off of https://developer.download.nvidia.com/assets/cuda/files/reduction.pdf
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#include <cuda.h>
#define CEIL(x,y) (1 + (((x) - 1) / (y)))
void init(double* A, int size, int dims)
{
for (int i = 0; i < size; ++i) {
A[i] = rand();
}
}
__global__ void reduce_kernel(double *input, double *output, int num_arr) {
extern __shared__ double shared_mem[];
int dim = blockIdx.y;
input += dim * gridDim.x * 2;
int thread_id = threadIdx.x;
int i = blockIdx.x * blockDim.x * 2 + thread_id;
if (i < num_arr) {
shared_mem[thread_id] = input[i];
} else {
shared_mem[thread_id] = 0;
}
__syncthreads();
if (i + blockDim.x < num_arr) {
shared_mem[thread_id] += input[i + blockDim.x];
}
__syncthreads();
for (int j = blockDim.x / 2; j > 0; j /= 2) {
if (thread_id < j) {
shared_mem[thread_id] += shared_mem[thread_id + j];
}
__syncthreads();
}
if (thread_id == 0) {
output[dim] += shared_mem[0];
}
}
void reduce_cuda(int num_dims, int num_arr)
{
int total_size = num_dims * num_arr * sizeof(double);
double *arr;
double *sum;
#ifdef UNMANAGED
arr = (double *) malloc(total_size);
sum = (double *) malloc(sizeof(double) * num_dims);
#else
cudaMallocManaged(&arr, total_size);
cudaMallocManaged(&sum, sizeof(double) * num_dims);
cudaMemset(sum, 0, sizeof(double) * num_dims);
#endif
init(arr, num_dims * num_arr, num_arr);
cudaEvent_t start;
cudaEvent_t end;
cudaEventCreate(&start);
cudaEventCreate(&end);
dim3 grid(CEIL(num_arr, 512), num_dims);
#ifdef UNMANAGED
double *arr_gpu;
double *sum_gpu;
cudaMalloc(&arr_gpu, total_size);
cudaMalloc(&sum_gpu, sizeof(double) * num_dims);
cudaEventRecord(start);
cudaMemcpy(arr_gpu, arr, total_size, cudaMemcpyHostToDevice);
cudaMemset(sum_gpu, 0, sizeof(double) * num_dims);
reduce_kernel<<<grid, 512, sizeof(double) * 512>>>(arr_gpu, sum_gpu, num_arr);
cudaEventRecord(end);
cudaMemcpy(sum, sum_gpu, sizeof(double) * num_dims, cudaMemcpyDeviceToHost);
#else
cudaEventRecord(start);
reduce_kernel<<<grid, 512, sizeof(double) * 512>>>(arr, sum, num_arr);
cudaEventRecord(end);
#endif
cudaEventSynchronize(end);
float elapsed = 0;
cudaEventElapsedTime(&elapsed, start, end);
printf("%f \n", elapsed);
#ifdef UNMANAGED
free(arr);
free(sum);
cudaFree(arr_gpu);
cudaFree(sum_gpu);
#else
cudaFree(arr);
cudaFree(sum);
#endif
}
int main(int argc, char *argv[])
{
// handle command line arguments
if (argc != 2) {
printf("Incorrect command line arguments! Need to provide num_arr.\n");
return -1;
}
// int num_dims = strtol(argv[1], NULL, 10);
int num_dims = 1;
int num_arr = strtol(argv[1], NULL, 10);
reduce_cuda(num_dims, num_arr);
return 0;
}
|
10,103 | #include <stdio.h>
#include <limits.h>
/* Part A */
__global__ void part_a(int n, int *A, int *B){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
if (0 <= A[i] && A[i] <= 99) { atomicAdd(B, 1); }
else if (100 <= A[i] && A[i] <= 199) { atomicAdd(&B[1], 1); }
else if (200 <= A[i] && A[i] <= 299) { atomicAdd(&B[2], 1); }
else if (300 <= A[i] && A[i] <= 399) { atomicAdd(&B[3], 1); }
else if (400 <= A[i] && A[i] <= 499) { atomicAdd(&B[4], 1); }
else if (500 <= A[i] && A[i] <= 599) { atomicAdd(&B[5], 1); }
else if (600 <= A[i] && A[i] <= 699) { atomicAdd(&B[6], 1); }
else if (700 <= A[i] && A[i] <= 799) { atomicAdd(&B[7], 1); }
else if (800 <= A[i] && A[i] <= 899) { atomicAdd(&B[8], 1); }
else if (900 <= A[i] && A[i] <= 999) { atomicAdd(&B[9], 1); }
}
}
/* Part B */
__global__ void part_b(int n, int *A, int *B){
__shared__ int s[10];
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
if (0 <= A[i] && A[i] <= 99) { atomicAdd(B, 1); }
else if (100 <= A[i] && A[i] <= 199) { atomicAdd(&s[1], 1); }
else if (200 <= A[i] && A[i] <= 299) { atomicAdd(&s[2], 1); }
else if (300 <= A[i] && A[i] <= 399) { atomicAdd(&s[3], 1); }
else if (400 <= A[i] && A[i] <= 499) { atomicAdd(&s[4], 1); }
else if (500 <= A[i] && A[i] <= 599) { atomicAdd(&s[5], 1); }
else if (600 <= A[i] && A[i] <= 699) { atomicAdd(&s[6], 1); }
else if (700 <= A[i] && A[i] <= 799) { atomicAdd(&s[7], 1); }
else if (800 <= A[i] && A[i] <= 899) { atomicAdd(&s[8], 1); }
else if (900 <= A[i] && A[i] <= 999) { atomicAdd(&s[9], 1); }
}
__syncthreads();
if (threadIdx.x == 0)
for (int i = 0; i < 10; i++)
atomicAdd(&B[i], s[i]);
}
/* Part C */
__global__ void part_c(int *B, int *C){
for (int i = 0; i < 10; i += 1) {
int sum = 0;
for (int j = 0; j < i; j++) { sum += B[j]; }
C[i] += sum;
}
}
int main() {
/* Open File */
FILE *fp;
fp = fopen("inp.txt", "r");
char buff[256];
const int M = 1<<20;
const int d = 10;
int *A = new int[M];
int *B = new int[d];
int *B2 = new int[d];
int *C = new int[d];
int i, count = 0;
/* Copy to GPU Memory */
cudaMallocManaged(&A, M * sizeof(int));
cudaMallocManaged(&B, d * sizeof(int));
cudaMallocManaged(&B2, d * sizeof(int));
cudaMallocManaged(&C, d * sizeof(int));
/* Read numbers as integers one by one */
while (fscanf(fp, "%d", &i) != EOF) {
A[count++] = i; // Add number to array
fscanf(fp, "%s", buff); // Read until whitespace
}
/* Close FilePointer */
fclose(fp);
/**************************************************/
/* Part A */
int blockSize = 256;
int numBlocks = (count + blockSize - 1) / blockSize;
part_a<<<numBlocks, blockSize>>>(count, A, B);
/* Wait for GPU */
cudaDeviceSynchronize();
/* Part A to File */
FILE *f = fopen("q2a.txt", "w");
for (int i = 0; i < d; i++) {
fprintf(f, "%d", B[i]);
if (i + 1 != d) { fprintf(f, ", "); }
} fclose(f);
/* Print B */
printf("B: ");
for (int i = 0; i < d; i++) {
printf("%d", B[i]);
if (i + 1 != d ) printf(", ");
} printf("\n");
/* Copy B to C */
for (int i = 0; i < d; i++) { C[i] = B[i]; }
/**************************************************/
/* Part B */
part_b<<<numBlocks, blockSize>>>(count, A, B2);
/* Wait for GPU */
cudaDeviceSynchronize();
/* Part B to File */
FILE *f2 = fopen("q2b.txt", "w");
for (int i = 0; i < d; i++) {
fprintf(f2, "%d", B2[i]);
if (i + 1 != d) { fprintf(f2, ", "); }
} fclose(f2);
/* Print B2 */
printf("B2: ");
for (int i = 0; i < d; i++) {
printf("%d", B2[i]);
if (i + 1 != d ) printf(", ");
} printf("\n");
/**************************************************/
/* Part C */
part_c<<<1, 1>>>(B, C);
/* Wait for GPU */
cudaDeviceSynchronize();
/* Part C to File */
FILE *f3 = fopen("q2c.txt", "w");
for (int i = 0; i < d; i++) {
fprintf(f3, "%d", C[i]);
if (i + 1 != d) { fprintf(f3, ", "); }
} fclose(f3);
/* Print C */
printf("C: ");
for (int i = 0; i < d; i++) {
printf("%d", C[i]);
if (i + 1 != d ) printf(", ");
} printf("\n");
/**************************************************/
/* Free Memory */
cudaFree(A);
cudaFree(B);
cudaFree(B2);
cudaFree(C);
return 0;
}
|
10,104 | #include "includes.h"
/*This file is part of quantumsim. (https://github.com/brianzi/quantumsim)*/
/*(c) 2016 Brian Tarasinski*/
/*Distributed under the GNU GPLv3. See LICENSE.txt or https://www.gnu.org/licenses/gpl.txt*/
//kernel to transform to pauli basis (up, x, y, down)
//to be run on a complete complex density matrix, once for each bit
//this operation is its own inverse (can also be used in opposite direction)
__global__ void get_diag(double *dm9, double *out, unsigned int no_qubits) {
int x = (blockIdx.x *blockDim.x) + threadIdx.x;
if (x >= (1 << no_qubits)) return;
unsigned int addr_real = 0;
for (int i = 0; i < 16; i++) {
addr_real |= (x & 1U << i) << i | (x & 1U << i) << (i + 1);
}
out[x] = dm9[addr_real];
} |
10,105 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define checkCudaErrors(x) printf("%s (%d)\n", cudaGetErrorString(x), __LINE__)
__global__ void kernel(double *a, double *b, double *c, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
c[i] = a[i] + b[i];
// Possible correction: let one thread do more than one calculation.
// If there is less threads than elements to calculate, then some threads will
// do 2 calculations (instead of one).
if (i < N-640) {
c[i+640] = a[i+640] + b[i+640];
}
}
int main(int argc, char **argv)
{
int N = 1000;
int sz_in_bytes = N*sizeof(double);
double *h_a, *h_b, *h_c;
double *d_a, *d_b, *d_c;
h_a = (double*)malloc(sz_in_bytes);
h_b = (double*)malloc(sz_in_bytes);
h_c = (double*)malloc(sz_in_bytes);
// Initiate values on h_a and h_b
for(int i = 0 ; i < N ; i++) {
h_a[i] = 1./(1.+i);
h_b[i] = (i-1.)/(i+1.);
}
checkCudaErrors(cudaMalloc((void**)&d_a, sz_in_bytes));
// Correction:
// checkCudaErrors(cudaMalloc((void**)&d_b, 0));
checkCudaErrors(cudaMalloc((void**)&d_b, sz_in_bytes));
checkCudaErrors(cudaMalloc((void**)&d_c, sz_in_bytes));
checkCudaErrors(cudaMemcpy(d_a, h_a, sz_in_bytes, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_b, h_b, sz_in_bytes, cudaMemcpyHostToDevice));
// 640 threads au total.
// But we calculate 1000 values.
// => error.
// One correction is: use enough threads.
// dim3 dimBlock(64, 1, 1);
// dim3 dimGrid(10, 1, 1) => dim3 dimGrid(10, 1, 1);
// Another correction is:
// Let a thread make more than one calculation (see function kernel()).
dim3 dimBlock(64, 1, 1);
dim3 dimGrid(16, 1, 1);
kernel<<<dimGrid , dimBlock>>>(d_a, d_b, d_c, N);
checkCudaErrors(cudaMemcpy(h_c, d_c, sz_in_bytes, cudaMemcpyDeviceToHost));
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Verifying
double err = 0, norm = 0;
for(int i = 0 ; i < N ; i++) {
double err_loc = fabs(h_c[i] - (h_a[i]+h_b[i]));
err += err_loc;
norm += fabs(h_c[i]);
}
if (err/norm < 1.e-16) {
printf("SUCCESS (Relative error : %.3e)\n", err/norm);
} else {
printf("ERROR (Relative error : %.3e)\n", err/norm);
}
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
10,106 | #include <vector>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_functions.h>
#include <cuda_runtime_api.h>
#include <cmath>
#include <cstdio>
#include <iostream>
using namespace std;
//using namespace std::chrono;
int test_reduce(int* v);
using namespace std;
__global__ void reduce0(int *g_idata, int *g_odata) {
extern __shared__ int sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for(unsigned int s = 1; s < blockDim.x; s *= 2) {
if (tid % (2 * s) == 0) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
int test_reduce(int* in, int N) {
int* d_in;
int* d_out;
int num_threads = 32;
int num_blocks = N / num_threads;
int *out = new int[num_blocks];
cudaMalloc(&d_in, N * sizeof(int));
cudaMalloc(&d_out, num_blocks * sizeof(int));
cudaMemcpy(d_in, in, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_out, out, num_blocks * sizeof(int), cudaMemcpyHostToDevice);
reduce0<<<num_blocks, num_threads, num_threads * sizeof(int)>>>(d_in, d_out);
int res = 0;
cudaMemcpy(out, d_out, sizeof(int) * num_blocks, cudaMemcpyDeviceToHost);
for(int i = 0; i < num_blocks; i++)
{
//std::cout << out[i] << std::endl;
res += out[i];
}
cudaFree(d_in);
cudaFree(d_out);
//delete in;
delete out;
return res;
}
int main() {
int N = 1024;
int* in = new int[N];
for (int i = 0; i < N; i++) {
in[i] = i + 1;
}
int maximo = 0;
for (int i = 0; i < N; i++) {
maximo += in[i];// std::max(maximo, vec[i]);
}
cout << "Max CPU " << maximo << endl;
int max_cuda = test_reduce(in, N);
cout << "Max GPU " << max_cuda << endl;
delete in;
return 0;
}
|
10,107 | //#include<cstdio>
//#include<cuda.h>
//#include<cuda_runtime.h>
//#include<device_launch_parameters.h>
//#include<vector>
//#include "Common.cuh"
//#include "Mesh.cuh"
//using namespace std;
//
////#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
////#else
////__device__ double atomicAdd(double* address, double val) {
//// unsigned long long int* address_as_ull = (unsigned long long int*)address;
//// unsigned long long int old = *address_as_ull, assumed;
//// do {
//// assumed = old;
//// old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
//// // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
//// } while (assumed != old);
//// return __longlong_as_double(old);
////}
////
////#endif
//
//
//
//__global__ void d_square(Vector3d *d_in, Vector3d *d_out) {
// int idx = threadIdx.x;
// Vector3d val = d_in[idx];
// printf("%d\n", sizeof(d_in[idx]));
// d_out[idx] = val + val;
//}
//
//
//__global__ void reduce(double *d_data, double *d_out) {
// int tid = threadIdx.x;
// printf("%lf, %d, %lf\n", d_out[0], tid, d_data[tid]);
// atomicAdd(&d_out[0], d_data[tid]);
//}
//
//
//void main() {
// unsigned int size = 5, bytes = size * sizeof(double);
// double *test = new double[size];
// for (int i = 0; i < size; i++) {
// test[i] = i + 1;
// }
// double *res = new double[size];
//
// double *d_in, *d_out;
//
// cudaMalloc((void**)&d_in, bytes);
// cudaMalloc((void**)&d_out, bytes);
//
// cudaMemcpy(d_in, test, bytes, cudaMemcpyHostToDevice);
//
// reduce<<<1,size>>>(d_in, d_out);
//
// cudaMemcpy(res, d_out, bytes, cudaMemcpyDeviceToHost);
// printf("%lf\n", res[0]);
//
//}
//
////// TIMER
/////*chrono::high_resolution_clock Clock;
////auto t1 = Clock.now();
////auto t2 = Clock.now();
////double t = chrono::duration_cast<chrono::nanoseconds>(t2 - t1).count();
////
////printf("time = %lf \n", t * 1e-9);*/
|
10,108 | /*
**********************************************
* CS314 Principles of Programming Languages *
* Spring 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void markFilterEdges_gpu(int * src, int * dst, int * matches, int * keepEdges, int numEdges) {
/*YOUR CODE HERE*/
//basically check if source or dst is matched, then set to 1
//if not matched then I can set to keepEdges[tid] = 0
int tid = blockIdx.x * blockDim.x + threadIdx.x;//this is the thread id and will be used as index
while( tid < numEdges )
{
if( matches[src[tid]] == -1 && matches[dst[tid]] == -1 )//either source or destination has an unmatched node, meaning we want to keep this edge
{
keepEdges[tid] = 1;//if we want to keep the edge, set it to 1
}
else
{
keepEdges[tid] = 0;//otherwise set it to 0
}
tid += ( blockDim.x * gridDim.x );
}
return;
}
|
10,109 | /* Computes a step in the integration changing only p */
__global__ void step_type2(long n, double a, double *r_gpu, double *p_gpu, double *f_gpu)
{
long tid;
tid=threadIdx.x+blockIdx.x*blockDim.x;
while (tid<n)
{
p_gpu[tid]=p_gpu[tid]+a*f_gpu[tid];
tid+=blockDim.x*gridDim.x;
}
return;
}
|
10,110 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
//// notes
// based on the examples given in the CUDE programming guide
// this one makes a list of gals, one list for ra and one for dec.
// it can then calcs the separation between gal pairs.
// note that it's not returning anythign from the calculation!
// just calculating how long each process takes.
// Try playing with ngals - time scales as you'd expect with CPU
// ans CPU is fatser with fewer gals.
// then again, this isn't exactly optimised code....
// my numbers:
// 100 gals: 0.6 ms w/ CPU, 13 ms w/ GPU
// 1000 gals: 61 ms w/ CPU, 185 w/ GPU
// 10000 gals: 6085 ms w/ CPU, 4871 w/ GPU
//device code
__global__ void CalcSep(float* raA, float* decA, int ngals)
{
//does all the i's simultaneously - one for each thread
int ix = blockDim.x * blockIdx.x + threadIdx.x;
float sep=0;
// Do 1 ``column"
for(int ij=ix+1;ij<ngals;ij++)
{
sep = acos( sin(decA[ix])*sin(decA[ij]) + \
cos(decA[ix])*cos(decA[ij])*cos(fabs(raA[ix]-raA[ij])) );
}//loop over gals
// Then the ngals-ix ``column"
ix = (ngals - 1) - ix;
for(int ij=ix+1;ij<ngals;ij++)
{
sep = acos( sin(decA[ix])*sin(decA[ij]) + \
cos(decA[ix])*cos(decA[ij])*cos(fabs(raA[ix]-raA[ij])) );
}//loop over gals
}
//Host code
int main(int argc, char **argv)
{
int ngals = 100;
// Grab the number of galaxies from the command line *if* they have
// been specified.
if (argc>1)
{
ngals = atoi(argv[1]);
}
size_t sizeneededin = ngals * sizeof(float);
//allocate vectors in host memory
float* h_raA = (float*)malloc(sizeneededin);
float* h_decA = (float*)malloc(sizeneededin);
srand(time(0));
//initailise input vectors - place galaxies at rando coords between 0 and 1
for(int i=0;i<ngals;i++)
{
h_raA[i] = rand();
h_decA[i] = rand();
}
//calculate separation in CPU and calculate time needed
clock_t teststart = clock();
float testsep=0;
for(int i=0;i<ngals;i++){
for(int j=i+1;j<ngals;j++){
testsep = acos( sin(h_decA[i])*sin(h_decA[j]) + cos(h_decA[i])*cos(h_decA[j])*cos(fabs(h_raA[i]-h_raA[j])) );
}
}
clock_t testend = clock();
float testelapsed = (float)(testend-teststart);
printf("elapsed time for CPU in ms: %f", testelapsed/CLOCKS_PER_SEC*1000);
printf("\n");
//allocate vectors in device memory
float* d_raA; float* d_decA;
cudaMalloc(&d_raA, sizeneededin);
cudaMalloc(&d_decA, sizeneededin);
//copy vectors from host to device memory
cudaMemcpy(d_raA, h_raA, sizeneededin, cudaMemcpyHostToDevice);
cudaMemcpy(d_decA, h_decA, sizeneededin, cudaMemcpyHostToDevice);
//invoke kernel
int threadsPerBlock = 256;
//int threadsPerBlock = 64;
//int blocksPerGrid = (ngals + threadsPerBlock -1) / threadsPerBlock; //???????
// Only need 1/2 as many threads
int blocksPerGrid = (ngals/2 + threadsPerBlock -1) / threadsPerBlock; //???????
//set up the cuda timer.
//can't use simple CPU timer since that would only time the kernel launch overhead.
// Need to make sure all threads have finished before stop the timer - so can synchronise threads before and after kernel launch if using cpu timer? I didn't get sensible results when I've tried that though.
cudaEvent_t cudastart, cudaend;
cudaEventCreate(&cudastart);
cudaEventCreate(&cudaend);
//record the start time
cudaEventRecord(cudastart,0);
//run the kernel!
CalcSep<<<blocksPerGrid, threadsPerBlock>>>(d_raA, d_decA, ngals);
//record the end time
cudaEventRecord(cudaend,0);
cudaEventSynchronize(cudaend);
//how long did the kernel take? this gives time in ms
float cudaelapsed=0;
cudaEventElapsedTime(&cudaelapsed, cudastart, cudaend);
printf("elapsed time for GPU in ms: %f",cudaelapsed);
printf("\n");
//delete memory
cudaEventDestroy(cudastart);
cudaEventDestroy(cudaend);
//free device memory
cudaFree(d_raA); cudaFree(d_decA);
//free host memory
free(h_raA); free(h_decA);
}
|
10,111 | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void teste()
{
printf("hello world GPU");
printf("\n");
}
int main()
{
teste<<<2,2>>>();
printf("hello awdawdworld");
printf("\n");
return 0;
} |
10,112 | #include "includes.h"
__global__ void _A_mul_Bst_32(int my, int xc, float *dy, float *xval, int *xrow, int *xcol, float *dw) {
// dw[wr,wc] += dy[yr,yc] * x[xr,xc] where wr=yr, wc=xr, yc=xc
int t, n, xp, xr, yp, yr, wp;
t = threadIdx.x + blockIdx.x * blockDim.x;
n = xcol[xc+1] - xcol[xc];
while (t < n) {
xp = xcol[xc] + t - 1;
xr = xrow[xp] - 1;
for (yr = 0; yr < my; yr++) {
yp = yr + xc * my;
wp = yr + xr * my;
dw[wp] += dy[yp] * xval[xp];
}
t += blockDim.x * gridDim.x;
}
} |
10,113 | #include "includes.h"
__global__ void tiled_matrix_multiplication(int *A, int *B, int *C) {
__shared__ int As[TILE_WIDTH][TILE_WIDTH];
__shared__ int Bs[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
int res = 0;
for(int i = 0; i < M/TILE_WIDTH; i++) {
As[ty][tx] = A[row * M + (i*TILE_WIDTH + tx)];
Bs[ty][tx] = B[(i*TILE_WIDTH + ty)* M + col];
__syncthreads();
for(int j = 0; j < TILE_WIDTH; j++) {
res += As[ty][j] + Bs[j][tx];
}
__syncthreads();
}
C[row * M + col] = res;
} |
10,114 | #include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
__device__ uint32_t k[64] = {0};
// K specifies the per-round shift amounts
__device__ const uint32_t K[] = {
7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22,
5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20,
4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23,
6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21};
// leftrotate function definition
__device__ uint32_t leftrotate(uint32_t x, uint32_t C) {
return (((x) << (C)) | ((x) >> (32 - (C))));
}
__device__ void append_bytes(uint32_t val, uint8_t *outputs) {
outputs[0] = (uint8_t)val;
outputs[1] = (uint8_t)(val >> 8);
outputs[2] = (uint8_t)(val >> 16);
outputs[3] = (uint8_t)(val >> 24);
}
__device__ uint32_t append_int(const uint8_t *inputs) {
return (uint32_t)inputs[0] | ((uint32_t)inputs[1] << 8) |
((uint32_t)inputs[2] << 16) | ((uint32_t)inputs[3] << 24);
}
__device__ void md5(const uint8_t *orig_msg, size_t orig_len,
uint8_t *digest) {
// Use binary integer part of the sines of integers (Radians) as constants:
for (int i = 0; i < 64; i++) {
k[i] = (uint32_t)(abs(sin(i + 1.0)) * ((long long)1 << 32));
}
// Initialize variables:
uint32_t a0 = 0x67452301;
uint32_t b0 = 0xefcdab89;
uint32_t c0 = 0x98badcfe;
uint32_t d0 = 0x10325476;
size_t new_len, offset;
uint32_t M[16];
uint32_t A, B, C, D, F, g;
// append "0" bit until message length in bits ≡ 448 (mod 512)
for (new_len = orig_len + 1; new_len % (512 / 8) != 448 / 8; new_len++);
uint8_t *message = (uint8_t *)malloc(new_len + 8);
memcpy(message, orig_msg, orig_len);
// Pre-processing: adding a single 1 bit
message[orig_len] = 0x80;
// Pre-processing: padding with zeros
for (offset = orig_len + 1; offset < new_len; offset++){
message[offset] = 0;
}
// append length mod (2^64) to message
append_bytes(orig_len * 8, message + new_len);
// address the overflow part
append_bytes(orig_len >> 29, message + new_len + 4);
// Process the message in successive 512-bit chunks:
// for each 512-bit chunk of message:
for (offset = 0; offset < new_len; offset += (512 / 8)) {
// break chunk into sixteen 32-bit words w[j], 0 ≤ j ≤ 15
for (int i = 0; i < 16; i++) {
M[i] = append_int(message + offset + i * 4);
}
// Initialize hash value for this chunk:
A = a0;
B = b0;
C = c0;
D = d0;
// Main loop:
for (int i = 0; i < 64; i++) {
if (i < 16) {
F = (B & C) | ((~B) & D);
g = i;
} else if (i < 32) {
F = (D & B) | ((~D) & C);
g = (5 * i + 1) % 16;
} else if (i < 48) {
F = B ^ C ^ D;
g = (3 * i + 5) % 16;
} else {
F = C ^ (B | (~D));
g = (7 * i) % 16;
}
// Be wary of the below definitions of a,b,c,d
F = A + F + k[i] + M[g]; // M[g] must be a 32-bits block
A = D;
D = C;
C = B;
B = B + leftrotate(F, K[i]);
}
// Add this chunk's hash to result so far:
a0 += A;
b0 += B;
c0 += C;
d0 += D;
}
// cleanup
free(message);
// var char digest[16] := a0 append b0 append c0 append d0 //(Output is in
// little-endian)
append_bytes(a0, digest);
append_bytes(b0, digest + 4);
append_bytes(c0, digest + 8);
append_bytes(d0, digest + 12);
}
|
10,115 | #include "includes.h"
__global__ void matchValidity_kernel(float *d_flow, float *d_disparity, int n_cols, int n_rows) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < n_cols) & (y < n_rows)) // are we in the image?
{
unsigned int ind = x + y * n_cols;
bool valid = (isfinite(d_flow[ind]) && isfinite(d_disparity[ind]));
if (!valid) {
d_flow[ind] = nanf("");
d_flow[ind + n_cols * n_rows] = nanf("");
d_disparity[ind] = nanf("");
}
}
} |
10,116 | #include "includes.h"
__global__ void kernel( int *a, int dimx, int dimy ) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
a[i] = blockIdx.x * dimx + threadIdx.x;
} |
10,117 | #include "includes.h"
//double* x, * devx, * val, * gra, * r, * graMax;
//double* hes_value;
////int size;
//int* pos_x, * pos_y;
//int* csr;
double* x;
//thrust::pair<int, int> *device_pos;
//typedef double (*fp)(double);
//typedef void (*val_fp)(double*, double*, int);
//typedef void (*valsum_fp)(double*, double*,int);
//typedef void (*gra_fp)(double*, double*, int);
//typedef void (*gramin_fp)(double*, double*,int);
//typedef void (*hes_fp)( double*, thrust::pair<int, int>*, double*, int);
//typedef void (*print_fp)(double*, int);
int numSMs;
__device__ void wait() {
for (int i = 1; i <= 10000000; i++);
}
__device__ double sqr(double x) {
return x * x;
}
__global__ void calculate_val(double* devx, double* val, int size) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < size;
index += blockDim.x * gridDim.x)
{
int pre = index - 1;
if (pre < 0) pre += size;
int next = index + 1;
if (next >= size) next -= size;
val[index] = sqr(sin(devx[pre] * devx[index])) * sqr(sin(devx[next] * devx[index]));
}
// wait();
} |
10,118 | #include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <cstring>
#include <sys/stat.h> //for filesize
#include <vector>
#include <stdlib.h>
#include <stdio.h>
#include <iomanip>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
using namespace std;
#define BLOCK_SIZE_BIT 128
#define BLOCK_SIZE_CHAR 16
typedef struct State
{
int bytes[4][4];
}State;
struct block
{
unsigned char text[BLOCK_SIZE_CHAR];
};
__global__ void ByteSub(int A[][4], int B[][4])
{
const int Matrix_ByteSub[][16] = {
{ 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76 },
{ 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0 },
{ 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15 },
{ 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75 },
{ 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84 },
{ 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf },
{ 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8 },
{ 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2 },
{ 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73 },
{ 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb },
{ 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79 },
{ 0xe7, 0xcb, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08 },
{ 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a },
{ 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e },
{ 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe8, 0xce, 0x55, 0x28, 0xdf },
{ 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 }
};
int i = threadIdx.x;
int j = threadIdx.y;
if (i < 4)
{
if (j < 4)
{
//get the ASCII value as well as its decimal value
int byteTemp = A[i][j];
//get the leftmost 4 bits aka the COLUMN
byteTemp = A[i][j];
byteTemp = (byteTemp >> 4) & ((1 << 4) - 1); //leftmost 4 bits
int column = byteTemp;
//get the rightmost 4 bits aka the ROW
byteTemp = A[i][j];
byteTemp = (byteTemp >> 0) & ((1 << 4) - 1); //rightmost 4 bits
int row = byteTemp;
//set the original bytes on the passed in matrix to the new bytes
B[i][j] = Matrix_ByteSub[column][row];
}
}
}
__global__ void ShiftRow(int A[][4], int B[][4])
{
//NEED TO GET A INTO B
int i = threadIdx.x;
int j = threadIdx.y;
int k = threadIdx.y;
if (i < 4)
{
unsigned char tempBytes[4];
if (j < 4)
{
//perform the left shift as dependent upon the row
tempBytes[j] = A[i][(j + i) % 4];
//B[i][j] = tempBytes[j];
}
if (k < 4)
{
A[i][k] = tempBytes[k];
//B[i][k] = A[i][k];
}
}
for(int m = 0; m < 4; m ++)
{
for(int n = 0; n < 4; n++)
{
B[m][n]=A[m][n];
}
}
}
__device__ unsigned char gmul(unsigned char a, unsigned char b)
{
//Original Author: Sam Trenholme
//GF(2^8) multiplication/addition
unsigned char p = 0; // the product of the multiplication
unsigned char counter;
unsigned char hi_bit_set;
for (counter = 0; counter < 8; counter++)
{
// if b is odd, then add (XOR) the corresponding a to p (final product = sum of all a's corresponding to odd b's)
if ((b & 1) == 1)
{
p ^= a;
}
// GF modulo: if a >= 128, then it will overflow when shifted left, so reduce
hi_bit_set = (a & 0x80);
a <<= 1;
// if a's hi bit had a value of one prior to this rotation, exclusive or a with the hexadecimal number 0x1b
// 0x1b = x^4 + x^3 + x + 1 of GF(2^8) irreducible polynomial
if (hi_bit_set == 0x80)
{
a ^= 0x1b;
}
// rotate b one bit to the right, discarding the low bit, and making the high (eighth from left) bit have a value of zero
b >>= 1;
}
return p;
}
__global__ void MixColumn(int A[][4], int B[][4])
{
unsigned char col[4];
unsigned char copyCol[4];
int i = threadIdx.x;
//int j = threadIdx.y;
//int k = threadIdx.y;
/*
if (i < 4)
{
if (j < 4)
{
col[j] = A[j][i];
}
// apply the mixColumn on one column
unsigned char copyCol[4];
if (k < 4)
{
copyCol[k] = col[k];
}
col[0] = gmul(copyCol[0], 2) ^ gmul(copyCol[1], 3) ^ gmul(copyCol[2], 1) ^ gmul(copyCol[3], 1);
col[1] = gmul(copyCol[0], 1) ^ gmul(copyCol[1], 2) ^ gmul(copyCol[2], 3) ^ gmul(copyCol[3], 1);
col[2] = gmul(copyCol[0], 1) ^ gmul(copyCol[1], 1) ^ gmul(copyCol[2], 2) ^ gmul(copyCol[3], 3);
col[3] = gmul(copyCol[0], 3) ^ gmul(copyCol[1], 1) ^ gmul(copyCol[2], 1) ^ gmul(copyCol[3], 2);
// put the values back into the state
if (j < 4)
{
B[j][i] = col[j];
}
}
*/
if (i < 4)
{
for(int m = 0; m < 4; m++)
{
copyCol[m] = A[m][i];
}
// apply the mixColumn on one column
col[0] = gmul(copyCol[0], 2) ^ gmul(copyCol[1], 3) ^ gmul(copyCol[2], 1) ^ gmul(copyCol[3], 1);
col[1] = gmul(copyCol[0], 1) ^ gmul(copyCol[1], 2) ^ gmul(copyCol[2], 3) ^ gmul(copyCol[3], 1);
col[2] = gmul(copyCol[0], 1) ^ gmul(copyCol[1], 1) ^ gmul(copyCol[2], 2) ^ gmul(copyCol[3], 3);
col[3] = gmul(copyCol[0], 3) ^ gmul(copyCol[1], 1) ^ gmul(copyCol[2], 1) ^ gmul(copyCol[3], 2);
// put the values back into the state
for(int n = 0; n < 4; n++)
{
B[n][i] = col[n];
}
}
}
int main()
{
int i, j;
dim3 threadsPerBlock(4, 4);
int test_matrix[4][4] = {
{ 0x00, 0x12, 0x0c, 0x08 },
{ 0x04, 0x04, 0x00, 0x23 },
{ 0x12, 0x12, 0x13, 0x19 },
{ 0x14, 0x00, 0x11, 0x19 }
};
State test;
for (int i = 0; i < 4; i++) {
for (int j =0; j < 4; j++) {
test.bytes[i][j] = test_matrix[i][j];
}
}
int BS[4][4];
int SR[4][4];
int MC[4][4];
int(*d_test)[4];
int(*d_BS)[4];
int(*d_SR)[4];
int(*d_MC)[4];
cudaMalloc((void**)&d_test, (4 * 4) * sizeof(int));
cudaMalloc((void**)&d_BS, (4 * 4) * sizeof(int));
cudaMalloc((void**)&d_SR, (4 * 4) * sizeof(int));
cudaMalloc((void**)&d_MC, (4 * 4) * sizeof(int));
cudaMemcpy(d_test, &test, (4 * 4) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_BS, BS, (4 * 4) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_SR, SR, (4 * 4) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_MC, MC, (4 * 4) * sizeof(int), cudaMemcpyHostToDevice);
/*------------------BYTESUB--------------------*/
ByteSub <<<4, threadsPerBlock >>>(d_test, d_BS);
cudaMemcpy(&test, d_test, (4 * 4) * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(BS, d_BS, (4 * 4) * sizeof(int), cudaMemcpyDeviceToHost);
/*------------------SHIFTROW--------------------*/
State bytesub;
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
bytesub.bytes[i][j] = BS[i][j];
}
}
int(*d_bytesub)[4];
cudaMalloc((void**)&d_bytesub, (4 * 4) * sizeof(int));
cudaMemcpy(d_bytesub, &bytesub, (4 * 4) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_SR, SR, (4 * 4) * sizeof(int), cudaMemcpyHostToDevice);
ShiftRow<<<4, threadsPerBlock >>>(d_bytesub, d_SR);
cudaMemcpy(&bytesub, d_bytesub, (4 * 4) * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(SR, d_SR, (4 * 4) * sizeof(int), cudaMemcpyDeviceToHost);
/*------------------MIXCOLUMN--------------------*/
State shiftrow;
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
shiftrow.bytes[i][j] = SR[i][j];
}
}
int(*d_shiftrow)[4];
cudaMalloc((void**)&d_shiftrow, (4 * 4) * sizeof(int));
cudaMemcpy(d_shiftrow, &shiftrow, (4 * 4) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_SR, SR, (4 * 4) * sizeof(int), cudaMemcpyHostToDevice);
MixColumn <<<4, threadsPerBlock >>>(d_shiftrow, d_MC);
cudaMemcpy(&shiftrow, d_shiftrow, (4 * 4) * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(MC, d_MC, (4 * 4) * sizeof(int), cudaMemcpyDeviceToHost);
printf("test = \n");
for (i = 0; i<4; i++) {
for (j = 0; j<4; j++) {
//printf("%d ", test.bytes[i][j]);
cout << hex << test.bytes[i][j] << endl;
}
printf("\n\n");
}
printf("BS = \n");
for (i = 0; i<4; i++) {
for (j = 0; j<4; j++) {
//printf("%d ", BS[i][j]);
cout << hex << BS[i][j] << endl;
}
printf("\n\n");
}
printf("SR = \n");
for (i = 0; i<4; i++) {
for (j = 0; j<4; j++) {
//printf("%d ", SR[i][j]);
cout << hex << SR[i][j] << endl;
}
printf("\n\n");
}
printf("MC = \n");
for (i = 0; i<4; i++) {
for (j = 0; j<4; j++) {
//printf("%d ", MC[i][j]);
cout << hex << MC[i][j] << endl;
}
printf("\n\n");
}
cudaFree(d_test);
cudaFree(d_BS);
cudaFree(d_SR);
cudaFree(d_MC);
return 0;
}
|
10,119 | //
// global.cu
// Kernels for encoding/decoding polynomials.
//
// Copyright (c) 2021 Tatsuki Ono
//
// This software is released under the MIT License.
// https://opensource.org/licenses/mit-license.php
//
#include "device.cuh"
#include "global.cuh"
#include "kernel_params.cuh"
namespace atpqc_cuda::kyber::endecode_mt::global {
template <unsigned Dv>
__global__ void poly_compress(std::uint8_t* cbytes, std::size_t cbytes_pitch,
const short2* poly, unsigned ninputs) {
constexpr unsigned dv = Dv;
using kp = kernel_params::poly_de_compress<dv>;
device::poly_compress<dv> compress;
if (unsigned pos = blockIdx.x * blockDim.y + threadIdx.y; pos < ninputs) {
cbytes += cbytes_pitch * pos + kp::cbyte_per_thread * threadIdx.x;
poly += (params::n / 2) * pos + kp::coeff_per_thread * threadIdx.x;
compress(cbytes, poly);
}
}
template <unsigned Dv>
__global__ void poly_decompress(short2* poly, const std::uint8_t* cbytes,
std::size_t cbytes_pitch, unsigned ninputs) {
constexpr unsigned dv = Dv;
using kp = kernel_params::poly_de_compress<dv>;
device::poly_decompress<dv> decompress;
if (unsigned pos = blockIdx.x * blockDim.y + threadIdx.y; pos < ninputs) {
poly += (params::n / 2) * pos + kp::coeff_per_thread * threadIdx.x;
cbytes += cbytes_pitch * pos + kp::cbyte_per_thread * threadIdx.x;
decompress(poly, cbytes);
}
}
template <unsigned K, unsigned Du>
__global__ void polyvec_compress(std::uint8_t* cbytes, std::size_t cbytes_pitch,
const short2* polyvec, unsigned ninputs) {
constexpr unsigned k = K;
constexpr unsigned du = Du;
using kp = kernel_params::polyvec_de_compress<k, du>;
device::polyvec_compress<du> compress;
if (unsigned pos = blockIdx.x * blockDim.y + threadIdx.y; pos < ninputs) {
cbytes += cbytes_pitch * pos + kp::cbyte_per_thread * threadIdx.x;
polyvec += (k * params::n / 2) * pos + kp::coeff_per_thread * threadIdx.x;
compress(cbytes, polyvec);
}
}
template <unsigned K, unsigned Du>
__global__ void polyvec_decompress(short2* polyvec, const std::uint8_t* cbytes,
std::size_t cbytes_pitch, unsigned ninputs) {
constexpr unsigned k = K;
constexpr unsigned du = Du;
using kp = kernel_params::polyvec_de_compress<k, du>;
device::polyvec_decompress<du> decompress;
if (unsigned pos = blockIdx.x * blockDim.y + threadIdx.y; pos < ninputs) {
polyvec += (k * params::n / 2) * pos + kp::coeff_per_thread * threadIdx.x;
cbytes += cbytes_pitch * pos + kp::cbyte_per_thread * threadIdx.x;
decompress(polyvec, cbytes);
}
}
template <unsigned K>
__global__ void polyvec_tobytes(std::uint8_t* bytes, std::size_t bytes_pitch,
const short2* polyvec, unsigned ninputs) {
constexpr unsigned k = K;
using kp = kernel_params::polyvec_bytes<k>;
device::polyvec_tobytes tobytes;
if (unsigned pos = blockIdx.x * blockDim.y + threadIdx.y; pos < ninputs) {
bytes += bytes_pitch * pos + kp::byte_per_thread * threadIdx.x;
polyvec += (k * params::n / 2) * pos + kp::coeff_per_thread * threadIdx.x;
tobytes(bytes, polyvec);
}
}
template <unsigned K>
__global__ void polyvec_frombytes(short2* polyvec, const std::uint8_t* bytes,
std::size_t bytes_pitch, unsigned ninputs) {
constexpr unsigned k = K;
using kp = kernel_params::polyvec_bytes<K>;
device::polyvec_frombytes frombytes;
if (unsigned pos = blockIdx.x * blockDim.y + threadIdx.y; pos < ninputs) {
polyvec += (k * params::n / 2) * pos + kp::coeff_per_thread * threadIdx.x;
bytes += bytes_pitch * pos + kp::byte_per_thread * threadIdx.x;
frombytes(polyvec, bytes);
}
}
__global__ void poly_frommsg(short2* poly, const std::uint8_t* msg,
std::size_t msg_pitch, unsigned ninputs) {
using kp = kernel_params::poly_msg;
device::poly_frommsg frommsg;
if (unsigned pos = blockIdx.x * blockDim.y + threadIdx.y; pos < ninputs) {
poly += (params::n / 2) * pos + kp::coeff_per_thread * threadIdx.x;
msg += msg_pitch * pos + kp::msgbyte_per_thread * threadIdx.x;
frommsg(poly, msg);
}
}
__global__ void poly_tomsg(std::uint8_t* msg, std::size_t msg_pitch,
const short2* poly, unsigned ninputs) {
using kp = kernel_params::poly_msg;
device::poly_tomsg tomsg;
if (unsigned pos = blockIdx.x * blockDim.y + threadIdx.y; pos < ninputs) {
msg += msg_pitch * pos + kp::msgbyte_per_thread * threadIdx.x;
poly += (params::n / 2) * pos + kp::coeff_per_thread * threadIdx.x;
tomsg(msg, poly);
}
}
template __global__ void poly_compress<4>(std::uint8_t*, std::size_t,
const short2*, unsigned);
template __global__ void poly_compress<5>(std::uint8_t*, std::size_t,
const short2*, unsigned);
template __global__ void poly_decompress<4>(short2*, const std::uint8_t*,
std::size_t, unsigned);
template __global__ void poly_decompress<5>(short2*, const std::uint8_t*,
std::size_t, unsigned);
template __global__ void polyvec_compress<2, 10>(std::uint8_t*, std::size_t,
const short2*, unsigned);
template __global__ void polyvec_compress<3, 10>(std::uint8_t*, std::size_t,
const short2*, unsigned);
template __global__ void polyvec_compress<4, 11>(std::uint8_t*, std::size_t,
const short2*, unsigned);
template __global__ void polyvec_decompress<2, 10>(short2*, const std::uint8_t*,
std::size_t, unsigned);
template __global__ void polyvec_decompress<3, 10>(short2*, const std::uint8_t*,
std::size_t, unsigned);
template __global__ void polyvec_decompress<4, 11>(short2*, const std::uint8_t*,
std::size_t, unsigned);
template __global__ void polyvec_tobytes<2>(std::uint8_t*, std::size_t,
const short2*, unsigned);
template __global__ void polyvec_tobytes<3>(std::uint8_t*, std::size_t,
const short2*, unsigned);
template __global__ void polyvec_tobytes<4>(std::uint8_t*, std::size_t,
const short2*, unsigned);
template __global__ void polyvec_frombytes<2>(short2*, const std::uint8_t*,
std::size_t, unsigned);
template __global__ void polyvec_frombytes<3>(short2*, const std::uint8_t*,
std::size_t, unsigned);
template __global__ void polyvec_frombytes<4>(short2*, const std::uint8_t*,
std::size_t, unsigned);
} // namespace atpqc_cuda::kyber::endecode_mt::global
|
10,120 | #include <cstdio>
#include <cstdlib>
#include <math.h>
// Assertion to check for errors
#define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define NUM_THREADS_PER_BLOCK 256
#define NUM_BLOCKS 1
#define PRINT_TIME 1
#define SM_ARR_LEN 50000
#define TOL 1e-6
#define IMUL(a, b) __mul24(a, b)
void initializeArray1D(float *arr, int len, int seed);
__global__ void kernel_add (int arrLen, float* x, float* y, float* result) {
const int tid = IMUL(blockDim.x, blockIdx.x) + threadIdx.x;
const int threadN = IMUL(blockDim.x, gridDim.x);
int i;
for(i = tid; i < arrLen; i += threadN) {
result[i] = (1e-6 * x[i] ) + (1e-7 * y[i]) + 0.25;
}
}
int main(int argc, char **argv){
int arrLen = 0;
// GPU Timing variables
cudaEvent_t start, stop;
float elapsed_gpu;
// Arrays on GPU global memoryc
float *d_x;
float *d_y;
float *d_result;
// Arrays on the host memory
float *h_x;
float *h_y;
float *h_result;
float *h_result_gold;
int i, errCount = 0, zeroCount = 0;
if (argc > 1) {
arrLen = atoi(argv[1]);
}
else {
arrLen = SM_ARR_LEN;
}
printf("Length of the array = %d\n", arrLen);
// Select GPU
CUDA_SAFE_CALL(cudaSetDevice(0));
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
// Allocate GPU memory
size_t allocSize = arrLen * sizeof(float);
CUDA_SAFE_CALL(cudaMalloc((void **)&d_x, allocSize));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_y, allocSize));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_result, allocSize));
// Allocate arrays on host memory
h_x = (float *) malloc(allocSize);
h_y = (float *) malloc(allocSize);
h_result = (float *) malloc(allocSize);
h_result_gold = (float *) malloc(allocSize);
// Initialize the host arrays
printf("\nInitializing the arrays ...");
// Arrays are initialized with a known seed for reproducability
initializeArray1D(h_x, arrLen, 2453);
initializeArray1D(h_y, arrLen, 1467);
printf("\t... done\n\n");
#if PRINT_TIME
// Create the cuda events
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Record event on the default stream
#endif
// Transfer the arrays to the GPU memory
CUDA_SAFE_CALL(cudaMemcpy(d_x, h_x, allocSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_y, h_y, allocSize, cudaMemcpyHostToDevice));
// Launch the kernel
cudaEventRecord(start, 0);
kernel_add<<<NUM_BLOCKS, NUM_THREADS_PER_BLOCK>>>(arrLen, d_x, d_y, d_result);
cudaEventRecord(stop,0);
// Check for errors during launch
CUDA_SAFE_CALL(cudaPeekAtLastError());
// Transfer the results back to the host
CUDA_SAFE_CALL(cudaMemcpy(h_result, d_result, allocSize, cudaMemcpyDeviceToHost));
#if PRINT_TIME
// Stop and destroy the timer
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_gpu, start, stop);
printf("\nGPU time: %f (msec)\n", elapsed_gpu);
cudaEventDestroy(start);
cudaEventDestroy(stop);
#endif
// Compute the results on the host
for(i = 0; i < arrLen; i++) {
h_result_gold[i] = (1e-6 * h_x[i]) + (1e-7 * h_y[i]) + 0.25;
}
// Compare the results
for(i = 0; i < arrLen; i++) {
if (abs(h_result_gold[i] - h_result[i]) > TOL) {
errCount++;
}
if (h_result[i] == 0) {
zeroCount++;
}
}
/*
for(i = 0; i < 50; i++) {
printf("%d:\t%.8f\t%.8f\n", i, h_result_gold[i], h_result[i]);
}
*/
if (errCount > 0) {
printf("\n@ERROR: TEST FAILED: %d results did not matched\n", errCount);
}
else if (zeroCount > 0){
printf("\n@ERROR: TEST FAILED: %d results (from GPU) are zero\n", zeroCount);
}
else {
printf("\nTEST PASSED: All results matched\n");
}
// Free-up device and host memory
CUDA_SAFE_CALL(cudaFree(d_x));
CUDA_SAFE_CALL(cudaFree(d_y));
CUDA_SAFE_CALL(cudaFree(d_result));
free(h_x);
free(h_y);
free(h_result);
return 0;
}
void initializeArray1D(float *arr, int len, int seed) {
int i;
float randNum;
srand(seed);
for (i = 0; i < len; i++) {
randNum = (float) rand();
arr[i] = randNum;
}
}
|
10,121 | #include <stdio.h>
int main()
{
int dev_ct;
cudaGetDeviceCount( &dev_ct );
int dev_id;
cudaGetDevice( &dev_id );
struct cudaDeviceProp myGPU;
cudaGetDeviceProperties( &myGPU, dev_id );
printf("Device Count = %d\n", dev_ct);
printf("Device Number = %d\n", dev_id);
printf("Device Name = %s\n", myGPU.name );
printf("Global Memory = %zu B = %f GB\n", myGPU.totalGlobalMem, myGPU.totalGlobalMem/(1024.0*1024.0*1024.0) );
printf("Shared Mem/block = %zu\n", myGPU.sharedMemPerBlock );
printf("Registers /block = %d\n", myGPU.regsPerBlock );
printf("Warp Size = %d\n", myGPU.warpSize );
printf("Mem Pitch = %zu\n", myGPU.memPitch );
printf("Max Threads/block = %d\n", myGPU.maxThreadsPerBlock );
printf("Max Threads Dim = (%d, %d, %d)\n", myGPU.maxThreadsDim[0],\
myGPU.maxThreadsDim[1],\
myGPU.maxThreadsDim[2]);
printf("Max Grid Size = (%d, %d, %d)\n", myGPU.maxGridSize[0],\
myGPU.maxGridSize[1],\
myGPU.maxGridSize[2]);
printf("Total Const Mem = %zu\n", myGPU.totalConstMem );
printf("Major = %d\n", myGPU.major );
printf("Minor = %d\n", myGPU.minor );
printf("Clock Rate = %.2f MHz\n", myGPU.clockRate/1000.0 );
printf("Text Alignment = %zu\n", myGPU.textureAlignment);
printf("Device Overlap = %d\n", myGPU.deviceOverlap);
printf("MultiProcessorCount = %d\n", myGPU.multiProcessorCount);
printf("Kernel Exec Timeout Enabled = %d\n", myGPU.kernelExecTimeoutEnabled);
printf("Integrated GPU = %d\n", myGPU.integrated);
printf("Can Map Host Memory = %d\n", myGPU.canMapHostMemory);
printf("Compute Mode = %d\n", myGPU.computeMode);
printf("Concurrent Kernels = %d\n", myGPU.concurrentKernels);
printf("ECC Enabled = %d\n", myGPU.ECCEnabled);
printf("PCI Bus ID = %d\n", myGPU.pciBusID);
printf("PCI Device ID = %d\n", myGPU.pciDeviceID);
printf("TCC Driver = %d\n", myGPU.tccDriver);
return 0;
}
|
10,122 | #include "includes.h"
__device__ int get_index_to_check(int thread, int num_threads, int set_size, int offset) {
// Integer division trick to round up
return (((set_size + num_threads) / num_threads) * thread) + offset;
}
__global__ void p_ary_search(int search, int array_length, int *arr, int *ret_val)
{
const int num_threads = blockDim.x * gridDim.x;
const int thread = blockIdx.x * blockDim.x + threadIdx.x;
int set_size = array_length;
ret_val[0] = -1;
ret_val[1] = 0;
while (set_size != 0)
{
int offset = ret_val[1];
__syncthreads();
// Get the next index to check
int index_to_check = get_index_to_check(thread, num_threads, set_size, offset);
// If the index is outside the bounds of the array do not check it
if (index_to_check < array_length)
{
// If the next index is outside the bounds of the array, then set it to maximum array size
int next_index_to_check = get_index_to_check(thread + 1, num_threads, set_size, offset);
if (next_index_to_check >= array_length)
{
next_index_to_check = array_length - 1;
}
// If we're at the mid section of the array reset the offset to this index
if (search > arr[index_to_check] && (search < arr[next_index_to_check]))
{
ret_val[1] = index_to_check;
}
else if (search == arr[index_to_check])
{
// Set the return var if find it
ret_val[0] = index_to_check;
}
}
// Since this is a paralel array search divide by our total threads to get the next set size
set_size = set_size / num_threads;
// Sync up so no threads jump ahead
__syncthreads();
}
} |
10,123 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/for_each.h>
#include <iterator>
#include <thrust/copy.h>
#include <thrust/transform.h>
#include <algorithm>
#include <vector>
#include <thrust/sort.h>
int main()
{
thrust::host_vector<int> host_vec(5); //initiating host vector
for(int z = 0; z <host_vec.size();++z)
host_vec[z]=z;
//Printing
std::cout<<"Host vector before"<< std::endl;
std::copy(host_vec.begin(), host_vec.end(), std::ostream_iterator<int>(std::cout, " "));
std::cout<<std::endl;
thrust::device_vector<int> dev_vec = host_vec; //creating device vector and copying host vec into it
thrust::transform(dev_vec.begin(), dev_vec.end(),dev_vec.begin(),thrust::negate<int>()); //transformation on GPU
thrust::copy(dev_vec.begin(), dev_vec.end(), host_vec.begin()); //copy back to cpu
//Printing
std::cout<<"Host vector After"<< std::endl;
std::copy(host_vec.begin(), host_vec.end(), std::ostream_iterator<int>(std::cout, " "));
std::cout<<std::endl;
} |
10,124 | #include "includes.h"
__global__ void vecAdd(float * in1, float * in2, float * out, int len) {
//@@ Insert code to implement vector addition here
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < len) {
out[i] = in1[i] + in2[i];
}
} |
10,125 | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
// GPU SM copy benchmark tests dtoh/htod data transfer bandwidth initiated by GPU SM.
#include <chrono>
#include <cstdio>
#include <cstring>
#include <string>
#include <cuda.h>
#include <cuda_runtime.h>
// Argurment index used in argument parsing.
enum class ArgIdx { kGpuId = 1, kCopyDirection, kSize, kNumLoops, kNumArgs };
// Stored arguments for this program.
struct Args {
// ID of GPU used in this benchmark.
int gpu_id = 0;
// Data transfer direction, can be "dtoh" or "htod".
std::string copy_direction;
// Data buffer size used.
uint64_t size = 0;
// Number of loops in data transfer benchmark.
uint64_t num_loops = 0;
};
struct Buffers {
// Original data buffer.
uint8_t *data_buf = nullptr;
// Buffer to validate the correctness of data transfer.
uint8_t *check_buf = nullptr;
// Data buffer in host memory.
uint8_t *host_buf = nullptr;
// Device pointer of the data buffer in host memory.
uint8_t *host_buf_dev_ptr = nullptr;
// Data buffer in device memory
uint8_t *dev_buf = nullptr;
};
// Pring usage of this program.
void PrintUsage() {
printf("Usage: gpu_sm_copy "
"<gpu-id> "
"<copy-direction: dtoh|htod> "
"<size> "
"<num_loops>\n");
}
// Prepare data buffers to be used.
int PrepareBuf(const Args &args, Buffers *buffers) {
cudaError_t cuda_err = cudaSuccess;
// Generate data to copy
buffers->data_buf = static_cast<uint8_t *>(malloc(args.size));
for (int i = 0; i < args.size; i++) {
buffers->data_buf[i] = static_cast<uint8_t>(i % 256);
}
// Reset check buffer
buffers->check_buf = static_cast<uint8_t *>(malloc(args.size));
memset(buffers->check_buf, 0, args.size);
// Allocate host buffer
buffers->host_buf = static_cast<uint8_t *>(malloc(args.size));
cuda_err = cudaHostRegister(buffers->host_buf, args.size, cudaHostRegisterMapped);
if (cuda_err != cudaSuccess) {
fprintf(stderr, "PrepareBuf::cudaHostRegister error: %d\n", cuda_err);
return -1;
}
cuda_err = cudaHostGetDevicePointer((void **)(&(buffers->host_buf_dev_ptr)), buffers->host_buf, 0);
if (cuda_err != cudaSuccess) {
fprintf(stderr, "PrepareBuf::cudaHostGetDevicePointer error: %d\n", cuda_err);
return -1;
}
// Allocate device buffer
cuda_err = cudaMalloc(&(buffers->dev_buf), args.size);
if (cuda_err != cudaSuccess) {
fprintf(stderr, "PrepareBuf::cudaMalloc error: %d\n", cuda_err);
return -1;
}
// Initialize source buffer
if (args.copy_direction == "dtoh") {
cuda_err = cudaMemcpy(buffers->dev_buf, buffers->data_buf, args.size, cudaMemcpyDefault);
} else if (args.copy_direction == "htod") {
cuda_err = cudaMemcpy(buffers->host_buf, buffers->data_buf, args.size, cudaMemcpyDefault);
} else {
fprintf(stderr, "Unrecognized copy direction: %s\n", args.copy_direction.c_str());
return -1;
}
if (cuda_err != cudaSuccess) {
fprintf(stderr, "PrepareBuf::cudaMemcpy error: %d\n", cuda_err);
return -1;
}
return 0;
}
// Validate the result of data transfer.
int CheckBuf(const Args &args, Buffers *buffers) {
cudaError_t cuda_err = cudaSuccess;
// Copy result
if (args.copy_direction == "dtoh") {
cuda_err = cudaMemcpy(buffers->check_buf, buffers->host_buf, args.size, cudaMemcpyDefault);
} else if (args.copy_direction == "htod") {
cuda_err = cudaMemcpy(buffers->check_buf, buffers->dev_buf, args.size, cudaMemcpyDefault);
}
if (cuda_err != cudaSuccess) {
fprintf(stderr, "CheckBuf::cudaMemcpy error: %d\n", cuda_err);
return -1;
}
// Validate result
int memcmp_result = memcmp(buffers->data_buf, buffers->check_buf, args.size);
if (memcmp_result) {
fprintf(stderr, "Memory check failed\n");
return -1;
}
return 0;
}
// Destroy data buffers
int DestroyBuf(Buffers *buffers) {
int ret = 0;
cudaError_t cuda_err = cudaSuccess;
// Destroy original data buffer and check buffer
if (buffers->data_buf != nullptr)
free(buffers->data_buf);
if (buffers->check_buf != nullptr)
free(buffers->check_buf);
// Destroy device buffer
if (buffers->dev_buf != nullptr) {
cuda_err = cudaFree(buffers->dev_buf);
if (cuda_err != cudaSuccess) {
fprintf(stderr, "DestroyBuf::cudaFree error: %d\n", cuda_err);
ret = -1;
}
}
// Destroy host buffer
if (buffers->host_buf != nullptr) {
cuda_err = cudaHostUnregister(buffers->host_buf);
if (cuda_err != cudaSuccess) {
fprintf(stderr, "DestroyBuf::cudaHostUnregister error: %d\n", cuda_err);
ret = -1;
}
free(buffers->host_buf);
buffers->host_buf_dev_ptr = nullptr;
}
return ret;
}
// Unroll depth in SM copy kernel
#define NUM_LOOP_UNROLL 2
// Thread block size
#define NUM_THREADS_IN_BLOCK 128
// Fetch a ulong2 from source memory and write to register
// This kernel references the implementation in
// 1) NCCL:
// https://github.com/NVIDIA/nccl/blob/7e515921295adaab72adf56ea71a0fafb0ecb5f3/src/collectives/device/common_kernel.h#L483
// 2) RCCL:
// https://github.com/ROCmSoftwarePlatform/rccl/blob/5c8380ff5b5925cae4bce00b1879a5f930226e8d/src/collectives/device/common_kernel.h#L268
inline __device__ void FetchULong2(ulong2 &v, const ulong2 *p) {
#if defined(__HIP_PLATFORM_HCC__) || defined(__HCC__) || defined(__HIPCC__)
v.x = p->x;
v.y = p->y;
#else
asm volatile("ld.volatile.global.v2.u64 {%0,%1}, [%2];" : "=l"(v.x), "=l"(v.y) : "l"(p) : "memory");
#endif
}
// Store a ulong2 from register and write to target memory
// This kernel references the implementation in
// 1) NCCL:
// https://github.com/NVIDIA/nccl/blob/7e515921295adaab72adf56ea71a0fafb0ecb5f3/src/collectives/device/common_kernel.h#L486
// 2) RCCL:
// https://github.com/ROCmSoftwarePlatform/rccl/blob/5c8380ff5b5925cae4bce00b1879a5f930226e8d/src/collectives/device/common_kernel.h#L276
inline __device__ void StoreULong2(ulong2 *p, ulong2 &v) {
#if defined(__HIP_PLATFORM_HCC__) || defined(__HCC__) || defined(__HIPCC__)
p->x = v.x;
p->y = v.y;
#else
asm volatile("st.volatile.global.v2.u64 [%0], {%1,%2};" ::"l"(p), "l"(v.x), "l"(v.y) : "memory");
#endif
}
// Fetch data from source memory into register first, and then write them to target memory
// Stride set to thread block size to best utilize cache
__global__ void SMCopyKernel(ulong2 *tgt, const ulong2 *src) {
uint64_t index = blockIdx.x * blockDim.x * NUM_LOOP_UNROLL + threadIdx.x;
ulong2 val[NUM_LOOP_UNROLL];
#pragma unroll
for (uint64_t i = 0; i < NUM_LOOP_UNROLL; i++)
FetchULong2(val[i], src + index + i * blockDim.x);
#pragma unroll
for (uint64_t i = 0; i < NUM_LOOP_UNROLL; i++)
StoreULong2(tgt + index + i * blockDim.x, val[i]);
}
// Run SM copy kernel benchmark
int BenchSMCopyKernel(const Args &args, Buffers *buffers) {
cudaError_t cuda_err = cudaSuccess;
cudaStream_t stream;
uint8_t *src_buf = nullptr;
uint8_t *tgt_buf = nullptr;
// Determine source buffer and target buff
if (args.copy_direction == "dtoh") {
src_buf = buffers->dev_buf;
tgt_buf = buffers->host_buf_dev_ptr;
} else {
src_buf = buffers->host_buf_dev_ptr;
tgt_buf = buffers->dev_buf;
}
// Validate data size
uint64_t num_elements_in_thread_block = NUM_LOOP_UNROLL * NUM_THREADS_IN_BLOCK;
uint64_t num_bytes_in_thread_block = num_elements_in_thread_block * sizeof(ulong2);
if (args.size % num_bytes_in_thread_block) {
fprintf(stderr, "Data size should be multiple of %lu\n", num_bytes_in_thread_block);
return -1;
}
// Create stream to launch kernels
cuda_err = cudaStreamCreate(&stream);
if (cuda_err != cudaSuccess) {
fprintf(stderr, "BenchSMCopyKernel::cudaStreamCreate error: %d\n", cuda_err);
return -1;
}
// Launch kernels and collect running time
uint64_t num_thread_blocks = args.size / num_bytes_in_thread_block;
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < args.num_loops; i++) {
SMCopyKernel<<<num_thread_blocks, NUM_THREADS_IN_BLOCK, 0, stream>>>(reinterpret_cast<ulong2 *>(tgt_buf),
reinterpret_cast<ulong2 *>(src_buf));
}
cuda_err = cudaStreamSynchronize(stream);
auto end = std::chrono::steady_clock::now();
if (cuda_err != cudaSuccess) {
fprintf(stderr, "BenchSMCopyKernel::cudaStreamSynchronize error: %d\n", cuda_err);
return -1;
}
// Destroy stream
cuda_err = cudaStreamDestroy(stream);
if (cuda_err != cudaSuccess) {
fprintf(stderr, "BenchSMCopyKernel::cudaStreamDestroy error: %d\n", cuda_err);
return -1;
}
// Calculate and display bandwidth if no problem
double time_in_sec = std::chrono::duration_cast<std::chrono::duration<double>>(end - start).count();
printf("Bandwidth (GB/s): %g\n", args.size * args.num_loops / time_in_sec / 1e9);
return 0;
}
int main(int argc, char **argv) {
int ret = 0;
int destroy_buf_ret = 0;
cudaError_t cuda_err = cudaSuccess;
Args args;
Buffers buffers;
if (argc != static_cast<int>(ArgIdx::kNumArgs)) {
PrintUsage();
return -1;
}
args.gpu_id = std::stoi(argv[static_cast<int>(ArgIdx::kGpuId)]);
args.copy_direction = argv[static_cast<int>(ArgIdx::kCopyDirection)];
args.size = std::stoul(argv[static_cast<int>(ArgIdx::kSize)]);
args.num_loops = std::stoul(argv[static_cast<int>(ArgIdx::kNumLoops)]);
// Set device context
cuda_err = cudaSetDevice(args.gpu_id);
if (cuda_err != cudaSuccess) {
fprintf(stderr, "cudaSetDevice error: %d\n", cuda_err);
goto destroy_buf;
}
// Prepare data buffers
ret = PrepareBuf(args, &buffers);
if (ret != 0) {
goto destroy_buf;
}
// Run benchmark
ret = BenchSMCopyKernel(args, &buffers);
if (ret != 0) {
goto destroy_buf;
}
// Validate data
ret = CheckBuf(args, &buffers);
destroy_buf:
// Destroy buffers
destroy_buf_ret = DestroyBuf(&buffers);
if (ret == 0) {
ret = destroy_buf_ret;
}
return ret;
}
|
10,126 | #include<iostream>
using namespace std;
#define THREADS_PER_BLOCK 256
__global__ void mean_per_block(int *a_d,int *b_d,int n){
int block = blockDim.x*blockIdx.x;
int mean=0,sum=0;
for(int i=block;i<min(block+blockDim.x,n);i++){
sum+=a_d[i];
}
b_d[blockIdx.x]=sum/blockDim.x;
}
int main() {
int n;
cout<<"Enter the no of elements";
cin>>n;
int *arr = new int[n];
for(int i=0;i<n;i++){
arr[i]=i+1;
}
int no_of_blocks = (n+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK;
int size = n*sizeof(int);
int *arr_d,*b_d;
cudaMalloc(&arr_d,size);
cudaMalloc(&b_d,no_of_blocks*sizeof(int));
cudaMemcpy(arr_d,arr,size,cudaMemcpyHostToDevice);
while(n>1){
mean_per_block<<<no_of_blocks,THREADS_PER_BLOCK>>>(arr_d,b_d,n);
n=(n+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK;
cudaMemcpy(arr_d,b_d,no_of_blocks*sizeof(int),cudaMemcpyDeviceToDevice);
}
int ans;
cudaMemcpy(&ans,arr_d,sizeof(int),cudaMemcpyDeviceToHost);
cout<<ans;
} |
10,127 | #include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <limits>
#include <sys/time.h>
#define NUM_PARTICLES 100000
#define NUM_ITERATIONS 1000
#define BLOCK_SIZE 256
#define MICROSECONDS(start, end) ((end.tv_sec - start.tv_sec) * 1000000LL + end.tv_usec - start.tv_usec)
#define MILLISECONDS(start, end) MICROSECONDS(start, end) / 1000.0
#define SECONDS(start, end) MILLISECONDS(start, end) / 1000.0
typedef struct
{
float3 position;
float3 velocity;
} Particle;
void cpu_timestep(Particle *particles, const float dt)
{
for (unsigned int i = 0; i < NUM_PARTICLES; i++)
{
particles[i].position.x += particles[i].velocity.x * dt;
particles[i].position.y += particles[i].velocity.y * dt;
particles[i].position.z += particles[i].velocity.z * dt;
}
}
__global__ void gpu_timestep(Particle *particles, const float dt)
{
const unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < NUM_PARTICLES)
{
particles[i].position.x += particles[i].velocity.x * dt;
particles[i].position.y += particles[i].velocity.y * dt;
particles[i].position.z += particles[i].velocity.z * dt;
}
}
int main(int argc, char **argv)
{
struct timeval start, end;
const float dt = 1.0;
// Initialize data.
Particle *particles;
cudaMallocManaged(&particles, NUM_PARTICLES * sizeof(Particle));
for (unsigned int i = 0; i < NUM_PARTICLES; i++)
{
particles[i].position.x = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
particles[i].position.y = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
particles[i].position.z = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
particles[i].velocity.x = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
particles[i].velocity.y = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
particles[i].velocity.z = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
}
// Initialize data.
Particle *gpu_particles;
cudaMalloc(&gpu_particles, NUM_PARTICLES * sizeof(Particle));
// Run simulation.
printf("Running simulation... ");
gettimeofday(&start, NULL);
for (unsigned int i = 0; i < NUM_ITERATIONS; i++)
{
gpu_timestep<<<(NUM_PARTICLES + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(gpu_particles, dt);
cudaDeviceSynchronize();
}
gettimeofday(&end, NULL);
printf("Done! Took %lfs.\n", SECONDS(start, end));
// Free resources.
cudaFree(particles);
return 0;
} |
10,128 | // Copyright (c) OpenMMLab. All rights reserved.
#include <cstdint>
namespace mmdeploy {
namespace operation {
namespace cuda {
namespace impl {
template <typename T, int channels>
__global__ void crop(const T *src, int src_w, T *dst, int dst_h, int dst_w, int offset_h,
int offset_w) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= dst_w || y >= dst_h) return;
int src_x = x + offset_w;
int src_y = y + offset_h;
int dst_loc = (y * dst_w + x) * channels;
int src_loc = (src_y * src_w + src_x) * channels;
for (int i = 0; i < channels; ++i) {
dst[dst_loc + i] = src[src_loc + i];
}
}
template <typename T, int channels>
void Crop(const T *src, int src_w, T *dst, int dst_h, int dst_w, int offset_h, int offset_w,
cudaStream_t stream) {
const dim3 thread_block(32, 8);
const dim3 block_num((dst_w + thread_block.x - 1) / thread_block.x,
(dst_h + thread_block.y - 1) / thread_block.y);
crop<T, channels>
<<<block_num, thread_block, 0, stream>>>(src, src_w, dst, dst_h, dst_w, offset_h, offset_w);
}
template void Crop<uint8_t, 3>(const uint8_t *src, int src_w, uint8_t *dst, int dst_h, int dst_w,
int offset_h, int offset_w, cudaStream_t stream);
template void Crop<uint8_t, 1>(const uint8_t *src, int src_w, uint8_t *dst, int dst_h, int dst_w,
int offset_h, int offset_w, cudaStream_t stream);
template void Crop<float, 3>(const float *src, int src_w, float *dst, int dst_h, int dst_w,
int offset_h, int offset_w, cudaStream_t stream);
template void Crop<float, 1>(const float *src, int src_w, float *dst, int dst_h, int dst_w,
int offset_h, int offset_w, cudaStream_t stream);
} // namespace impl
} // namespace cuda
} // namespace operation
} // namespace mmdeploy
|
10,129 | #include <iostream>
#include <cufft.h>
using std::cout;
using std::endl;
int main(int argc, char *argv[])
{
cufftComplex *sig1 = new cufftComplex[8];
cufftComplex *sig2 = new cufftComplex[8];
cufftComplex *sig3 = new cufftComplex[8];
for (int ii = 0; ii < 8; ii ++) {
sig1[ii].x = 1.0f * (ii % 2);
sig1[ii].y = 0.0f;
sig2[ii].x = 0.0f;
sig2[ii].y = -1.0f * (ii % 2);
sig3[ii].x = sig1[ii].x;
sig3[ii].y = sig2[ii].y;
}
cufftComplex *d_s1;
cufftComplex *d_s2;
cufftComplex *d_s3;
cudaMalloc((void**)&d_s1, 8 * sizeof(cufftComplex));
cudaMalloc((void**)&d_s2, 8 * sizeof(cufftComplex));
cudaMalloc((void**)&d_s3, 8 * sizeof(cufftComplex));
cudaMemcpy(d_s1, sig1, 8 * sizeof(cufftComplex), cudaMemcpyHostToDevice);
cudaMemcpy(d_s2, sig2, 8 * sizeof(cufftComplex), cudaMemcpyHostToDevice);
cudaMemcpy(d_s3, sig3, 8 * sizeof(cufftComplex), cudaMemcpyHostToDevice);
cufftHandle fftplan;
cufftPlan1d(&fftplan, 8, CUFFT_C2C, 1);
cufftExecC2C(fftplan, d_s1, d_s1, CUFFT_FORWARD);
cufftExecC2C(fftplan, d_s2, d_s2, CUFFT_FORWARD);
cufftExecC2C(fftplan, d_s3, d_s3, CUFFT_FORWARD);
cufftComplex *fft1 = new cufftComplex[8];
cufftComplex *fft2 = new cufftComplex[8];
cufftComplex *fft3 = new cufftComplex[8];
cudaMemcpy(fft1, d_s1, 8 * sizeof(cufftComplex), cudaMemcpyDeviceToHost);
cudaMemcpy(fft2, d_s2, 8 * sizeof(cufftComplex), cudaMemcpyDeviceToHost);
cudaMemcpy(fft3, d_s3, 8 * sizeof(cufftComplex), cudaMemcpyDeviceToHost);
cout << "Signal 1: " << endl;
for (int ii = 0; ii < 8; ii++) {
cout << sig1[ii].x << " + i*" << sig1[ii].y << endl;
}
cout << "Signal 1 FFT: " << endl;
for (int ii = 0; ii < 8; ii++) {
cout << fft1[ii].x << " + i*" << fft1[ii].y << endl;
}
cout << "Signal 2: " << endl;
for (int ii = 0; ii < 8; ii++) {
cout << sig2[ii].x << " + i*" << sig2[ii].y << endl;
}
cout << "Signal 2 FFT: " << endl;
for (int ii = 0; ii < 8; ii++) {
cout << fft2[ii].x << " + i*" << fft2[ii].y << endl;
}
cout << "Signal 3: " << endl;
for (int ii = 0; ii < 8; ii++) {
cout << sig3[ii].x << " + i*" << sig3[ii].y << endl;
}
cout << "Signal 3 FFT: " << endl;
for (int ii = 0; ii < 8; ii++) {
cout << fft3[ii].x << " + i*" << fft3[ii].y << endl;
}
cudaFree(d_s1);
cudaFree(d_s2);
cudaFree(d_s3);
delete[] sig1;
delete[] sig2;
delete[] sig3;
delete[] fft1;
delete[] fft2;
delete[] fft3;
return 0;
}
|
10,130 | #include<bits/stdc++.h>
#include<cuda.h>
#include<thrust/count.h>
#include<curand_kernel.h>
#include<thrust/extrema.h>
#include<thrust/device_ptr.h>
using namespace std;
void catchCudaError(cudaError_t error, const char *function)
{
if(error!=cudaSuccess)
{
printf("\n====== Cuda Error Code %i ======\n %s in CUDA %s\n", error, cudaGetErrorString(error), function);
exit(-1);
}
}
void ReadColFile(const char filename[],long int *V,long int ***st_Column,long int **st_degree,long int *counter){
string s;
ifstream infile(filename);
if(infile.fail()){
cout<<"Fail to open the file\n";
exit(0);
}
long int n_rows,n_edges;
//Maintain Hash for NNZ,preSum,colIndex
//allocate dynamic size
while(getline(infile,s)){
istringstream iss(s);
string str;
long int u,v;
iss>>str;
if(str=="p"){
iss>>s;
iss>>n_rows;
iss>>n_edges;
*V = n_rows;
*st_degree = new long int[n_rows];
*st_Column = new long int*[n_rows];
memset(*st_degree,0,n_rows*sizeof(long int));
continue;
}else if(str!="e"){
continue;
}
iss>>u>>v;
if(u!=v){
long int u_len = (*st_degree)[u-1];
long int v_len = (*st_degree)[v-1];
(*st_Column)[u-1] = (long int*)realloc((*st_Column)[u-1],sizeof(long int)*(u_len+1));
(*st_Column)[v-1] = (long int*)realloc((*st_Column)[v-1],sizeof(long int)*(v_len+1));
(*st_Column)[u-1][u_len] = v-1;
(*st_Column)[v-1][v_len] = u-1;
(*st_degree)[u-1]++;
(*st_degree)[v-1]++;
*counter+=2;
}
}
infile.close();
}
void ReadMMFile(const char filename[], long int *V,long int ***st_Column,long int **st_degree,long int *counter){
string s;
ifstream infile(filename);
if(infile.fail()){
cout<<"Fail to open the file\n";
return;
}
//content
while(getline(infile,s)){
istringstream iss(s);
if(s.find("%")==string::npos){
break;
}
}
istringstream iss(s);
//Maintain Hash for NNZ,preSum,colIndex
//allocate dynamic size
long int n_rows,n_cols,n_edges;
iss>>n_rows>>n_cols>>n_edges;
*st_degree = new long int[n_rows];
*st_Column = new long int*[n_rows];
memset(*st_degree,0,n_rows*sizeof(long int));
*V = n_rows;
//reading edges
while(getline(infile,s)){
istringstream iss(s);
long int u,v,w;
iss>>u>>v>>w;
if(u!=v){
long int u_len = (*st_degree)[u-1];
long int v_len = (*st_degree)[v-1];
(*st_Column)[u-1] = (long int*)realloc((*st_Column)[u-1],sizeof(long int)*(u_len+1));
(*st_Column)[v-1] = (long int*)realloc((*st_Column)[v-1],sizeof(long int)*(v_len+1));
(*st_Column)[u-1][u_len] = v-1;
(*st_Column)[v-1][v_len] = u-1;
(*st_degree)[u-1]++;
(*st_degree)[v-1]++;
*counter+=2;
}
}
infile.close();
}
|
10,131 | #include <cuda.h>
#include <stdio.h>
#define nthread 256
__global__ void count_hist(int arr[], int hist[], int size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < size) atomicAdd(&hist[arr[i]], 1);
}
__global__ void arr_sort(int arr[], int hist[], int max_val)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = threadIdx.x;
__shared__ int s_idx[nthread];
if(i < max_val){
s_idx[j] = hist[i];
__syncthreads();
int s, cnt;
if(i==0 && j==0) s=0;
else if(i!=0 && j==0) s=hist[i-1];
else s=s_idx[j-1];
cnt = s_idx[j] - s;
for(int idx = s; idx < s+cnt; idx++){
arr[idx] = i;
}
}
}
__host__ void counting_sort(int arr[], int size, int max_val)
{
// fill
int* histogram;
int* cuda_hist, *cuda_arr;
histogram = (int *)calloc(sizeof(int), max_val);
cudaMalloc((void**)&cuda_hist, max_val*sizeof(int));
cudaMalloc((void**)&cuda_arr, size*sizeof(int));
cudaMemcpy(cuda_hist, histogram, max_val*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_arr, arr, size*sizeof(int), cudaMemcpyHostToDevice);
count_hist <<< ceil((double)size / nthread), nthread >>> (cuda_arr, cuda_hist, size);
cudaMemcpy(histogram, cuda_hist, max_val*sizeof(int), cudaMemcpyDeviceToHost);
// cudaFree(cuda_hist);
// cudaFree(cuda_arr);
int sum = 0;
for (int i=0; i<max_val; i++)
{
sum += histogram[i];
histogram[i] = sum;
}
cudaMemcpy(cuda_hist, histogram, max_val*sizeof(int), cudaMemcpyHostToDevice);
arr_sort <<< ceil((double)max_val / nthread), nthread >>> (cuda_arr, cuda_hist, max_val);
cudaMemcpy(arr, cuda_arr, size*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(cuda_hist);
cudaFree(cuda_arr);
free(histogram);
}
|
10,132 | #include <stdio.h>
#include <math.h>
#include <time.h>
#include <stdlib.h>
unsigned int N = 1;
unsigned int S = 4;
unsigned int D = 3;
// среднее для скользящего окна
__global__ void add(double *inputArr, double *outputArr, int inputSize, int outputSize) {
int col = threadIdx.x;
int row = blockIdx.x;
//printf("%d, %d \n", col,row);
int result = 0;
for (int k = row*2; k < row*2+4; k++) {
for (int m = col*2; m < col*2+4; m++) {
result += inputArr[k*inputSize + m];
// if (col == 1 && row == 1) printf("%d, %d: %f, %d \n",row,col, inputArr[k*inputSize + m], k*inputSize+m);
}
}
outputArr[row*(outputSize) + col] = result / 16;
//printf("%f, \n", result/16);
}
void fillRandArr(double *Arr, int powD, int powS){
for (int i = 0; i < powD; i++) {
for (int j = 0; j < powS; j++) {
int tmp = rand() % 100; // допустим не больше 100
Arr[i*powS + j] = tmp;
printf("%d, ", (int)Arr[i*powS + j]);
}
printf("\n");
}
printf("\n");
}
void formExpandedArr(double *inputArr, double *expandedArr, int powD, int powS){
// верх лево
expandedArr[0] = inputArr[0];
// верх
for (int j = 1; j < powS + 1; j++) {
expandedArr[j] = inputArr[j-1];
}
// верх право
expandedArr[powS + 1] = inputArr[powS-1];
// право
for (int i = 1; i < powD + 1; i++) {
expandedArr[i*(powS + 2) + powS + 1] = inputArr[(i-1)*powS + powS-1];
}
// низ право
expandedArr[(powS + 2)*(powD + 1) + powS + 1] = inputArr[powS*(powD-1) + powS-1];
// низ
for (int j = 1; j < powS + 1; j++) {
expandedArr[(powS + 2)*(powD + 1) + j] = inputArr[(powS)*(powD-1) + j-1];
}
// низ лево
expandedArr[(powD + 1)*(powS + 2)] = inputArr[(powD-1)*powS];
// лево
for (int i = 1; i < powD + 1; i++) {
expandedArr[i*(powS + 2)] = inputArr[(i-1)*powS];
}
//центр
for (int i = 1; i < powD + 1; i++) {
for (int j = 1; j < powS + 1; j++) {
expandedArr[i*(powS + 2) + j] = inputArr[(i-1)*powS + j-1];
}
}
}
void printArr(double *arr, int powD, int powS){
for (int i = 0; i < powD; i++) {
for (int j = 0; j < powS; j++) {
printf("%d, ", (int)arr[i*powS+j]);
}
printf("\n");
}
printf("\n");
}
void printVerificationArr(double *expandedArr, int powDres, int powSres, int powS){
double ArrResultCh[powDres*powSres]; // конечный
for (int i = 0; i < powDres; i++) {
for (int j = 0; j < powSres; j++) {
int result = 0;
for (int k = i*2; k < i*2+4; k++) {
for (int m = j*2; m < j*2+4; m++) {
result += expandedArr[k*(powS + 2) + m];
}
}
ArrResultCh[i*powSres + j] = result / 16;
printf("%d, ", (int)ArrResultCh[i*powSres + j]);
}
printf("\n");
}
}
int main(void) {
srand(time(NULL));
double *dev_i, *dev_o;
int powD = (int)(pow( 2.0, (double)D ));
int powS = (int)(pow( 2.0, (double)S ));
int powDres = (int)(pow( 2.0, (double)(D - 1) ));
int powSres = (int)(pow( 2.0, (double)(S - 1) ));
//Выделить память на GPU
cudaMalloc( (void**)&dev_i,
(powD + 2) * (powS + 2) * sizeof(double) );
cudaMalloc( (void**)&dev_o,
powDres * powSres * sizeof(double) );
double ArrM[powD*powS]; // начальный массив М
fillRandArr(ArrM, powD, powS);
while (N > 0) {
double ArrMPlus[(powD + 2) * (powS + 2)]; // начальный массив М с добавлением крайних рядов
formExpandedArr(ArrM, ArrMPlus, powD, powS);
printArr(ArrMPlus, powD+2, powS+2);
//Копируем массив ArrMPlus в dev_i
cudaMemcpy( dev_i, ArrMPlus,
(powD + 2) * (powS + 2) * sizeof(double),
cudaMemcpyHostToDevice );
add<<<powDres, powSres>>>(dev_i, dev_o, powS+2, powSres);
cudaDeviceSynchronize();
double ArrResult[powDres * powSres]; // конечный
//Копируем массив с GPU на CPU
cudaMemcpy( ArrResult, dev_o, powDres * powSres * sizeof(double), cudaMemcpyDeviceToHost );
printArr(ArrResult, powDres, powSres);
printVerificationArr(ArrMPlus, powDres, powSres, powS);
D--;
S--;
powD = powDres;
powS = powSres;
powDres = (int)(pow( 2.0, (double)(D - 1) ));
powSres = (int)(pow( 2.0, (double)(S - 1) ));
for (int i = 0; i < powD; i++) {
for (int j = 0; j < powS; j++) {
ArrM[i*powS + j] = ArrResult[i*powS + j];
}
}
printf("New Array:\n");
printArr(ArrM, powD, powS);
N--;
}
cudaFree( dev_i );
cudaFree( dev_o );
return 0;
} |
10,133 | #include "includes.h"
__global__ void dotProduct_CUDA_float(float *sum, int size, float *vector1, float *vector2){
int idx = blockIdx.x*blockDim.x+threadIdx.x; // Sequential thread index across the blocks
if(idx < size){
sum[idx] = (vector2[idx]) * (vector1[idx]);
}
} |
10,134 | #include<stdio.h>
#include<iostream>
#include<cuda.h>
using namespace std;
//Catch Cuda errors
void catchCudaError(cudaError_t error){
if(error!=cudaSuccess) {
printf("\n====== Cuda Error Code %i ======\n %s\n",error,cudaGetErrorString(error));
exit(-1);
}
}
//=====================================================================
#define DIM 32
#define ROW 600
#define COL 600
//Kernel function
__global__ void add(int a[][COL], int b[][COL], int c[][COL]){
//Skip till required block + the required thread index in the block
uint x = blockDim.x * blockIdx.x + threadIdx.x;
uint y = blockDim.y * blockIdx.y + threadIdx.y;
if(x < ROW && y < COL)
c[x][y] = a[x][y] + b[x][y];
}
int main(){
int a[ROW][COL], b[ROW][COL], c[ROW][COL]; //Host 2-d arrays
int (*d_a)[COL], (*d_b)[COL], (*d_c)[COL]; //Device 2-d arrays
// auto a = new int[ROW][COL];
// auto b = new int[ROW][COL];
// auto c = new int[ROW][COL];
clock_t start, end;
cudaEvent_t d_start, d_end;
catchCudaError(cudaEventCreate(&d_start));
catchCudaError(cudaEventCreate(&d_end));
size_t size = ROW*COL* sizeof(int);
//Allocate device memory(double ptr as assigning value to a pointer as defined in CUDA API)
catchCudaError(cudaMalloc((void **)&d_a, size));
catchCudaError(cudaMalloc((void **)&d_b, size));
catchCudaError(cudaMalloc((void **)&d_c, size));
//Initial values of a,b random
for(uint i=0; i < ROW; ++i){
for(uint j=0; j < COL; ++j){
a[i][j] = i+j;
b[i][j] = i-j;
}
}
//Copy to Device
catchCudaError(cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice));
catchCudaError(cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice));
catchCudaError(cudaEventRecord(d_start));
dim3 dimBlock(DIM, DIM);
dim3 dimGrid(ceil(1.0*ROW/DIM), ceil(1.0*COL/DIM)) ;
//Max 1024 threads in each block(max 65,535)
add <<< dimGrid, dimBlock >>>(d_a, d_b, d_c);
cudaThreadSynchronize();
catchCudaError(cudaEventRecord(d_end));
//Copy to Host
catchCudaError(cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost));
//Wait for all threads to finish
//catchCudaError(cudaDeviceSynchronize(d_end));
//Waits till event is recorded
catchCudaError(cudaEventSynchronize(d_end));
start = clock();
for(uint i=0; i<ROW; ++i)
for(uint j=0; j<COL; ++j){
if(a[i][j] + b[i][j] != c[i][j]){
printf("Incorrect matrix addition (%d,%d)\n", i, j);
exit(-3);
}
}
end = clock();
float time_taken = 1000.0* (end - start)/CLOCKS_PER_SEC;
float d_time_taken;
cudaEventElapsedTime(&d_time_taken, d_start, d_end);
printf("Correct matrix addition\n");
printf("Host time = %f ms\nDevice Time = %f ms\n", time_taken, d_time_taken);
// delete[] a;
// delete[] b;
// delete[] c;
//Free device memory
catchCudaError(cudaFree(d_a));
catchCudaError(cudaFree(d_b));
catchCudaError(cudaFree(d_c));
}
//==============================================================================================
/*
Output
Correct matrix addition
Host time = 0.422000 ms
Device Time = 0.143072 ms
*/
//==============================================================================================
|
10,135 | #include <stdint.h>
#include <cuda.h>
typedef unsigned char BYTE; // 8-bit byte
typedef unsigned int WORD; // 32-bit word, change to "long" for 16-bit machines
typedef struct {
BYTE data[64];
WORD datalen;
unsigned long long bitlen;
WORD state[5];
WORD k[4];
} SHA1_CTX;
#define ROTLEFT(a, b) ((a << b) | (a >> (32 - b)))
#define SHA1_BLOCK_SIZE 20 // SHA1 outputs a 20 byte digest
#define PAGE_SIZE 4096
__device__ void sha1_init(SHA1_CTX *ctx)
{
ctx->datalen = 0;
ctx->bitlen = 0;
ctx->state[0] = 0x67452301;
ctx->state[1] = 0xEFCDAB89;
ctx->state[2] = 0x98BADCFE;
ctx->state[3] = 0x10325476;
ctx->state[4] = 0xc3d2e1f0;
ctx->k[0] = 0x5a827999;
ctx->k[1] = 0x6ed9eba1;
ctx->k[2] = 0x8f1bbcdc;
ctx->k[3] = 0xca62c1d6;
}
__device__ void sha1_transform(SHA1_CTX *ctx, const BYTE data[])
{
WORD a, b, c, d, e, i, j, t, m[80];
for (i = 0, j = 0; i < 16; ++i, j += 4)
m[i] = (data[j] << 24) + (data[j + 1] << 16) + (data[j + 2] << 8) + (data[j + 3]);
for ( ; i < 80; ++i) {
m[i] = (m[i - 3] ^ m[i - 8] ^ m[i - 14] ^ m[i - 16]);
m[i] = (m[i] << 1) | (m[i] >> 31);
}
a = ctx->state[0];
b = ctx->state[1];
c = ctx->state[2];
d = ctx->state[3];
e = ctx->state[4];
for (i = 0; i < 20; ++i) {
t = ROTLEFT(a, 5) + ((b & c) ^ (~b & d)) + e + ctx->k[0] + m[i];
e = d;
d = c;
c = ROTLEFT(b, 30);
b = a;
a = t;
}
for ( ; i < 40; ++i) {
t = ROTLEFT(a, 5) + (b ^ c ^ d) + e + ctx->k[1] + m[i];
e = d;
d = c;
c = ROTLEFT(b, 30);
b = a;
a = t;
}
for ( ; i < 60; ++i) {
t = ROTLEFT(a, 5) + ((b & c) ^ (b & d) ^ (c & d)) + e + ctx->k[2] + m[i];
e = d;
d = c;
c = ROTLEFT(b, 30);
b = a;
a = t;
}
for ( ; i < 80; ++i) {
t = ROTLEFT(a, 5) + (b ^ c ^ d) + e + ctx->k[3] + m[i];
e = d;
d = c;
c = ROTLEFT(b, 30);
b = a;
a = t;
}
ctx->state[0] += a;
ctx->state[1] += b;
ctx->state[2] += c;
ctx->state[3] += d;
ctx->state[4] += e;
}
__device__ void sha1_update(SHA1_CTX *ctx, const BYTE data[], size_t len)
{
size_t i;
for (i = 0; i < len; ++i) {
ctx->data[ctx->datalen] = data[i];
ctx->datalen++;
if (ctx->datalen == 64) {
sha1_transform(ctx, ctx->data);
ctx->bitlen += 512;
ctx->datalen = 0;
}
}
}
__device__ void sha1_final(SHA1_CTX *ctx, BYTE hash[])
{
WORD i;
i = ctx->datalen;
// Pad whatever data is left in the buffer.
if (ctx->datalen < 56) {
ctx->data[i++] = 0x80;
while (i < 56)
ctx->data[i++] = 0x00;
}
else {
ctx->data[i++] = 0x80;
while (i < 64)
ctx->data[i++] = 0x00;
sha1_transform(ctx, ctx->data);
while (i < 7)
ctx->data[i++] = 0x00;
}
// Append to the padding the total message's length in bits and transform.
ctx->bitlen += ctx->datalen * 8;
ctx->data[63] = ctx->bitlen;
ctx->data[62] = ctx->bitlen >> 8;
ctx->data[61] = ctx->bitlen >> 16;
ctx->data[60] = ctx->bitlen >> 24;
ctx->data[59] = ctx->bitlen >> 32;
ctx->data[58] = ctx->bitlen >> 40;
ctx->data[57] = ctx->bitlen >> 48;
ctx->data[56] = ctx->bitlen >> 56;
sha1_transform(ctx, ctx->data);
// Since this implementation uses little endian byte ordering and MD uses big endian,
// reverse all the bytes when copying the final state to the output hash.
for (i = 0; i < 4; ++i) {
hash[i] = (ctx->state[0] >> (24 - i * 8)) & 0x000000ff;
hash[i + 4] = (ctx->state[1] >> (24 - i * 8)) & 0x000000ff;
hash[i + 8] = (ctx->state[2] >> (24 - i * 8)) & 0x000000ff;
hash[i + 12] = (ctx->state[3] >> (24 - i * 8)) & 0x000000ff;
hash[i + 16] = (ctx->state[4] >> (24 - i * 8)) & 0x000000ff;
}
}
extern "C" __global__
void gpusha1(unsigned char* text1, unsigned char* hashval, int text_num) {
int thx = blockIdx.x * blockDim.x + threadIdx.x;
SHA1_CTX ctx;
unsigned char text_dev[PAGE_SIZE];
unsigned char hashval_dev[SHA1_BLOCK_SIZE];
int i;
if (thx < text_num) {
for (i = 0; i < PAGE_SIZE; ++i) {
text_dev[i] = text1[i + thx*PAGE_SIZE];
}
sha1_init(&ctx);
sha1_update(&ctx, text_dev, PAGE_SIZE);
sha1_final(&ctx, hashval_dev);
for (i = 0; i < SHA1_BLOCK_SIZE; ++i) {
hashval[i + thx*SHA1_BLOCK_SIZE] = hashval_dev[i];
}
}
}
|
10,136 |
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <cuda.h>
#include <curand.h>
#include <sys/time.h>
#define MILLION 1000000.0
#define NUM_ELS 1024
__global__ void reduction(int num_els, float *d_input, float *d_output)
{
// Allocate shared memory
__shared__ float smem_array[NUM_ELS];
//reset d_output to 0
d_output[0] = 0;
//define local thread id for each block, and input index across all blocks
int tid = threadIdx.x;
int index = blockIdx.x*blockDim.x + threadIdx.x;
// first, each thread loads data into shared memory
if (index < num_els) {
smem_array[tid] = d_input[index];
}
else smem_array[tid] =0;
// next, we perform binary tree reduction
for (int d = blockDim.x/2; d > 0; d /= 2) {
__syncthreads(); // ensure previous step completed
if (tid<d) smem_array[tid] += smem_array[tid+d];
}
// finally, first thread in each block puts result into global memory
if (tid == 0) atomicAdd(&d_output[0], smem_array[0]);
}
// wall_clock_time - wall clock time function
double wall_clock_time (void) {
double secs;
struct timeval tp;
gettimeofday (&tp,NULL);
secs = (MILLION * (double) tp.tv_sec + (double) tp.tv_usec) / MILLION;
return secs;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, const char** argv)
{
int num_els, num_threads, mem_size, nblocks;
float *h_data;
float *d_input, *d_output;
double start, finish;
//Prompt user to enter number of numbers to work with
printf("Enter the number of random numbers to work with:\t");
scanf("%d", &num_els);
// initialise card
num_threads = NUM_ELS;
mem_size = sizeof(float) * num_els;
// allocate device memory input and output arrays, and host memory
cudaMalloc((void**)&d_input, mem_size);
cudaMalloc((void**)&d_output, sizeof(float));
h_data = (float*) malloc(mem_size);
// now need to generate random numbers into d_input using cuRAND library
// initiate random number generator
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
// Set the generator options
curandSetPseudoRandomGeneratorSeed(gen, 1234ULL);
// create random numbers, initialized to h_data, with mean 500 & standard deviation 200
curandGenerateNormal(gen, d_input, num_els, 500.0f, 50.0f);
// execute the kernel
nblocks = num_els/NUM_ELS + (num_els % NUM_ELS == 0 ? 0 : 1); //effective ceiling function to calculate number of blocks required
printf("nblocks = %d\n", nblocks); //check here
start = wall_clock_time ();
reduction<<<nblocks,num_threads>>>(num_els, d_input,d_output);
finish = wall_clock_time ();
// copy result from device to host
cudaMemcpy(h_data, d_output, sizeof(float), cudaMemcpyDeviceToHost);
// check results
printf("reduction error = %f\n",h_data[0]/num_els);
printf("process time = %e s\n", finish - start);
// cleanup memory
free(h_data);
cudaFree(d_input);
cudaFree(d_output);
// CUDA exit -- needed to flush printf write buffer
cudaDeviceReset();
}
|
10,137 | #include <stdio.h>
#include <unistd.h>
__global__ void hello( ){
printf("Hello from device!\n");
}
int main(void){
hello<<< 1,1 >>>( );
printf("Hello from Host!\n");
sleep(1);
return 0;
}
|
10,138 | ///////////////////////////////////////////////////////////////////////////////
//
// The MIT License
//
// Copyright (c) 2006 Scientific Computing and Imaging Institute,
// University of Utah (USA)
//
// License for the specific language governing rights and limitations under
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef ELVIS_CORE_REENTRANT_ADAPTIVE_TRAPEZOIDAL_CU
#define ELVIS_CORE_REENTRANT_ADAPTIVE_TRAPEZOIDAL_CU
namespace ElVis
{
// template<typename T, unsigned int n>
// struct ReentrantAdaptiveTrapezoidal
// {
// public:
// struct StackPoint
// {
// template<typename IntegrandType, typename FieldFunc>
// __device__
// void Evaluate(const IntegrandType& integrand,
// const FieldFunc& fieldFunc)
// {
// T s = fieldFunc(TVal);
// F = integrand(TVal, s);
// }
// __device__ void Reset()
// {
// TVal = MAKE_FLOAT(1e30);
// }
// __device__ bool IsUninitialized() const
// {
// return TVal == MAKE_FLOAT(1e30);
// }
// __device__ StackPoint& operator=(const StackPoint& rhs)
// {
// TVal = rhs.TVal;
// F = rhs.F;
// return *this;
// }
// T TVal;
// T F;
// };
// struct StackEntry
// {
// __device__ void CalculateMidpointT()
// {
// Mid().TVal = (Right().TVal +
// Left().TVal)*MAKE_FLOAT(.5);
// }
// __device__ void SetT(const T& t0, const T& t1)
// {
// Left().TVal = t0;
// Right().TVal = t1;
// CalculateMidpointT();
// }
// __device__ void CreateFromRight(const StackEntry& s)
// {
// points[0] = s.points[1];
// points[2] = s.points[2];
// CalculateMidpointT();
// }
// __device__ void CreateFromLeft(const StackEntry& s)
// {
// points[0] = s.points[0];
// points[2] = s.points[1];
// CalculateMidpointT();
// }
// __device__ T GetH() const
// {
// return Right().TVal - Left().TVal;
// }
// template<typename IntegrandType, typename FieldFunc>
// __device__ void EvaluateAll(const IntegrandType& integrand,
// const FieldFunc& fieldFunc)
// {
// for(unsigned int i = 0; i < 3; ++i)
// {
// points[i].Evaluate(integrand, fieldFunc);
// }
// }
// __device__ StackEntry& operator=(const StackEntry& rhs)
// {
// points[0] = rhs.points[0];
// points[1] = rhs.points[1];
// points[2] = rhs.points[2];
// return *this;
// }
// __device__ StackPoint& Left() { return points[0]; }
// __device__ StackPoint& Mid() { return points[1]; }
// __device__ StackPoint& Right() { return points[2]; }
// __device__ const StackPoint& Left() const { return
// points[0]; }
// __device__ const StackPoint& Mid() const { return
// points[1]; }
// __device__ const StackPoint& Right() const { return
// points[2]; }
// StackPoint points[3];
// };
// struct Stack
// {
// template<typename IntegrandType, typename FieldFunction>
// __device__ void Initialize(const T& t0, const T& t1, const
// IntegrandType& integrand, const FieldFunction&
// fieldFunction)
// {
// // Initialize the stack prior to execution.
// stack[0].SetT(t0, t1);
// stack[0].EvaluateAll(integrand, fieldFunction);
// curIndex = 0;
// baseH = t1-t0;
// }
// __device__ StackEntry Pop(bool traceEnabled)
// {
// if( curIndex < 0 ) return stack[0];
// StackEntry result = stack[curIndex];
// curIndex -= 1;
// if( traceEnabled )
// {
// printf("After popping.\n");
// PrintStack(traceEnabled);
// }
// return result;
// }
// __device__ bool HasCapacity(int num)
// {
// return curIndex + num < n;
// }
// __device__ bool Push(const StackEntry& s, bool traceEnabled)
// {
// if( curIndex + 2 >= n )
// {
// if( traceEnabled )
// {
// printf("Attempting to push onto stack but not
// enough space.\n");
// }
// return false;
// }
// StackEntry right;
// right.CreateFromRight(s);
// StackEntry left;
// left.CreateFromLeft(s);
// if( right.Mid().TVal == right.Left().TVal ||
// right.Mid().TVal == right.Right().TVal ||
// left.Mid().TVal == left.Left().TVal ||
// left.Mid().TVal == left.Right().TVal )
// {
// return false;
// }
// stack[curIndex+1] = right;
// stack[curIndex+2] = left;
// if( traceEnabled )
// {
// printf("Pushing [%2.10f, %2.10f, %2.10f] with values
// (%2.10f, %2.10f, %2.10f) onto location %d\n",
// right.Left().TVal, right.Mid().TVal,
// right.Right().TVal, right.Left().F, right.Mid().F,
// right.Right().F, curIndex+1);
// printf("Pushing [%2.10f, %2.10f, %2.10f] with values
// (%2.10f, %2.10f, %2.10f) onto location %d\n",
// left.Left().TVal, left.Mid().TVal,
// left.Right().TVal, left.Left().F, left.Mid().F,
// left.Right().F, curIndex+2);
// }
// curIndex += 2;
// PrintStack(traceEnabled);
// return true;
// }
// __device__ void PrintStack(bool traceEnabled)
// {
// if( traceEnabled )
// {
// for(int i = 0; i <= curIndex; ++i)
// {
// printf("[%d] = (%2.10f, %2.10f, %2.10f) and
// values (%2.10f, %2.10f, %2.10f) \n", i,
// stack[i].Left().TVal, stack[i].Mid().TVal,
// stack[i].Right().TVal, stack[i].Left().F,
// stack[i].Mid().F, stack[i].Right().F);
// }
// }
// }
// __device__ T GetBaseH() { return baseH; }
// __device__ StackEntry& Top() { return stack[curIndex]; }
// __device__ bool Empty() { return curIndex == -1; }
// __device__ int Depth() { return curIndex; }
// StackEntry stack[n];
// int curIndex;
// ElVisFloat baseH;
// };
// template<typename IntegrandType, typename FieldFunctionType>
// __device__ void Initialize(const T& t0, const T& t1, const
// IntegrandType& integrand,
// const FieldFunctionType& fieldFunction, const T&
// globalEpsilon,
// const T& globalIntegralEstimate,
// const T& maxFunctionValue, bool&
// reachedMaxRecursion,
// bool traceEnabled)
// {
// if( traceEnabled )
// {
// printf("Initializing range [%2.10f, %2.10f]\n", t0, t1);
// }
// stack.Initialize(t0, t1, integrand, fieldFunction);
// // Put it at the end, since the continue function will copy
// it to the beginning.
// t[n-1] = t0;
// f[n-1] = stack.Top().Left().F;
// I[n-1] = 0.0;
// }
// template<typename IntegralFunc, typename FieldFunctionType>
// __device__ IntegrationStatus ContinueIntegration(const
// IntegralFunc& integrand,
// const FieldFunctionType& fieldFunction, const T&
// globalEpsilon,
// const T& globalIntegralEstimate,
// const T& maxFunctionValue, bool&
// reachedMaxRecursion,
// bool traceEnabled)
// {
// if( traceEnabled )
// {
// printf("Global Epsilon %2.10f, globalIntegralEstimate
// %2.10f, maxValue %2.10f\n", globalEpsilon,
// globalIntegralEstimate, maxFunctionValue);
// }
// reachedMaxRecursion = false;
// unsigned int minimumDepth = 2;
// t[0] = t[n-1];
// f[0] = f[n-1];
// I[0] = I[n-1];
// if( traceEnabled )
// {
// printf("########################################3
// Restarting with t = %2.10f, f = %2.10f, I = %2.10f\n",
// t[0], f[0], I[0]);
// }
// int loopGuard = 0;
// endIndex = 1;
// while( !stack.Empty() && endIndex < n && loopGuard < 50)
// {
// reachedMaxRecursion |= stack.HasCapacity(1);
// StackEntry curStack = stack.Pop(traceEnabled);
// bool needToSubdivide = false;
// curStack.CalculateMidpointT();
// curStack.Mid().Evaluate(integrand, fieldFunction);
// if( stack.Depth() < minimumDepth )
// {
// if( traceEnabled )
// {
// printf("Subdividing because of minimum
// depth.\n");
// }
// needToSubdivide = true;
// }
// else
// {
// ElVisFloat h2 = curStack.GetH()*MAKE_FLOAT(.5);
// ElVisFloat h4 = h2*MAKE_FLOAT(.5);
// if( h4 == MAKE_FLOAT(0.0) )
// {
// if( traceEnabled )
// {
// printf("Stopping subdivision because h is
// 0.\n");
// }
// //goto PushPop;
// }
// T localEpsilon = globalEpsilon*
// (curStack.GetH()/stack.GetBaseH());
// if( localEpsilon == MAKE_FLOAT(0.0) )
// {
// if( traceEnabled )
// {
// printf("Stopping subdivision because epsilon
// is 0.\n");
// }
// //goto PushPop;
// }
// if( h4 > MAKE_FLOAT(0.0) && localEpsilon >
// MAKE_FLOAT(0.0) )
// {
// T I0 = h2 * (curStack.Left().F +
// curStack.Right().F);
// T I1 = h4 * (curStack.Left().F +
// 2.0*curStack.Mid().F + curStack.Right().F);
// if( traceEnabled )
// {
// printf("Level %d, Interval (%2.10f, %2.10f,
// %2.10f), values (%2.10f, %2.10f, %2.10f) I0
// = %2.10f, I1 = %2.10f, localEpsilon =
// %2.10f\n", stack.Depth(),
// curStack.Left().TVal, curStack.Mid().TVal,
// curStack.Right().TVal,
// curStack.Left().F, curStack.Mid().F,
// curStack.Right().F, I0, I1,
// localEpsilon);
// }
// ElVisFloat h = curStack.GetH()/MAKE_FLOAT(2.0);
// bool rangeCheckEnabled = curStack.Left().F ==
// MAKE_FLOAT(0.0);
// rangeCheckEnabled &= curStack.Mid().F ==
// MAKE_FLOAT(0.0);
// rangeCheckEnabled &= curStack.Right().F ==
// MAKE_FLOAT(0.0);
// if( rangeCheckEnabled )
// {
// // If any of the samples are 0, then we know
// there is a breakpoint somewhere and we
// should subdivide.
// T maxSegmentError =
// (maxFunctionValue*h)/globalIntegralEstimate;
// ElVis::Interval<ElVisFloat> range =
// fieldFunction.EstimateRange(curStack.Left().TVal,
// curStack.Right().TVal);
// ElVisFloat maxValue =
// integrand.GetMaxValue(range);
// T updatedSegmentError =
// (maxValue*h)/globalIntegralEstimate;
// if( traceEnabled )
// {
// printf("At least one value is 0. Scalar
// range is (%2.10f, %2.10f),
// maxSegmentError %2.10f,
// updatedSegmentError %2.10f\n",
// range.GetLow(), range.GetHigh(),
// maxSegmentError,
// updatedSegmentError);
// }
// if( traceEnabled )
// {
// printf("One of the samples is 0,
// maxSegmentError = %2.10f, localEpsilon =
// %2.10f\n", maxSegmentError,
// localEpsilon);
// }
// //needToSubdivide = updatedSegmentError >
// localEpsilon;
// if(updatedSegmentError > localEpsilon )
// {
// needToSubdivide = true;
// }
// }
// else
// {
// T errorEstimate =
// fabs(I0-I1)/globalIntegralEstimate;
// if( traceEnabled )
// {
// printf("No samples 0, errorEstimate =
// %2.10f, localEpsilon = %2.10f\n",
// errorEstimate, localEpsilon);
// }
// if( errorEstimate > localEpsilon )
// {
// needToSubdivide = true;
// }
// }
// }
// }
// if( traceEnabled )
// {
// printf("Subdividing = %d\n", needToSubdivide? 1 :
// 0);
// }
// bool failedToSubdivide = true;
// if( needToSubdivide && stack.HasCapacity(2) )
// {
// // Push onto the stack
// failedToSubdivide = !stack.Push(curStack,
// traceEnabled);
// }
// if( failedToSubdivide )
// {
// // Update values and pop off the stack.
// T prevValue = I[endIndex-1];
// T h = curStack.GetH()/MAKE_FLOAT(4.0);
// T mid_f = curStack.Mid().F;
// T right_f = curStack.Right().F;
// t[endIndex] = curStack.Mid().TVal;
// t[endIndex+1] = curStack.Right().TVal;
// f[endIndex] = mid_f;
// f[endIndex+1] = right_f;
// T leftContribution = h * (curStack.Left().F +
// mid_f);
// T rightContribution = h * (mid_f + right_f);
// I[endIndex] = prevValue + leftContribution;
// I[endIndex+1] = prevValue +
// leftContribution+rightContribution;
// if( traceEnabled )
// {
// printf("prevValue %2.10f, h %2.10f, mid_f
// %2.10f, right_f %2.10f, leftContribution %2.10f,
// rightContribution %2.10f\n", prevValue, h,
// mid_f, right_f, leftContribution,
// rightContribution);
// printf("Integral Value at f(%2.10f) = %2.10f is
// %2.10f\n", t[endIndex], f[endIndex],
// I[endIndex]);
// printf("Integral Value at f(%2.10f) = %2.10f is
// %2.10f\n", t[endIndex+1], f[endIndex+1],
// I[endIndex+1]);
// }
// endIndex += 2;
// }
// loopGuard += 1;
// }
// if( stack.Empty() )
// {
// if( traceEnabled )
// {
// printf("Stack is empty, end index %d, loopGuard
// %d.\n", endIndex, loopGuard);
// }
// return eFinished;
// }
// else
// {
// if( traceEnabled )
// {
// printf("Stack is not empty end index %d, loopGuard
// %d.\n", endIndex, loopGuard);
// }
// return ePartial;
// }
// }
// __device__ T SampleInnerIntegral(T t_i, T sample,
// TransferFunctionChannel channel, const TransferFunction*
// densityFunc) const
// {
// if( t_i < t[0] ||
// t_i > t[endIndex-1] )
// {
// return MAKE_FLOAT(0.0);
// }
// if( t_i == t[0] ) return MAKE_FLOAT(0.0);
// if( t_i == t[endIndex-1] ) return I[endIndex-1];
// const T* a = &(t[0]);
// const T* b = &(t[endIndex-1]);
// while(b-a > 1 )
// {
// const T* mid = (b-a)/2 + a;
// if( *mid == t_i )
// {
// return I[mid-a];
// }
// if( t_i < *mid )
// {
// b = mid;
// }
// else
// {
// a = mid;
// }
// }
// T baseline = I[a-t];
// T segment = (t_i-*a)/MAKE_FLOAT(2.0) * ( f[a-t] +
// densityFunc->Sample(channel, sample));
// return baseline+segment;
// }
// __device__ T OverallValue() const
// {
// return I[endIndex-1];
// }
// __device__ Interval<ElVisFloat> ValidDomain() const
// {
// Interval<ElVisFloat> result;
// result.SetLow(t[0]);
// result.SetHigh(t[endIndex-1]);
// return result;
// }
// Stack stack;
// T t[n];
// T f[n];
// T I[n];
// private:
// // During evaluation, always = n.
// // On the last interval, can be less.
// unsigned int endIndex;
// };
}
#endif
|
10,139 | #include <cuda.h>
#include <iostream>
#define N 32
using namespace std;
__global__ void matrix_Add(int A[][N],int B[][N],int C[][N])
{
int i=blockIdx.x;
int j=threadIdx.x;
__syncthreads();
C[i][j]=A[i][j]+B[i][j];
//printf("asjkdsl");
}
int main(int argc,char *argv[])
{
int A[N][N],B[N][N],C[N][N];
for(int i=0;i<N;i++)
{
for(int j=0;j<N;j++)
{
A[i][j]=2*i+j;
B[i][j]=i+2*j;
}
}
int (*A_D)[N],(*B_D)[N],(*C_D)[N];
cudaMalloc((void**)&A_D, (N*N)*sizeof(int));
cudaMalloc((void**)&B_D, (N*N)*sizeof(int));
cudaMalloc((void**)&C_D, (N*N)*sizeof(int));
cudaMemcpy(A_D,A,N*N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(B_D,B,N*N*sizeof(int),cudaMemcpyHostToDevice);
matrix_Add<<<N,N>>>(A_D,B_D,C_D);
cudaMemcpy(C,C_D,N*N*sizeof(int),cudaMemcpyDeviceToHost);
for(int i=0;i<N;i++)
{
for(int j=0;j<N;j++)
{
cout<<C[i][j]<<" ";
}
cout<<endl;
}
cudaFree(A_D);
cudaFree(B_D);
cudaFree(C_D);
return 0;
} |
10,140 | #include <iostream>
__global__ void add(int *a, int *b, int *c) {
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
int main(void) {
// Number of blocks
int N = 512;
// host copies of a, b, c
int *a, *b, *c;
// device copies of a, b, c
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
// Allocate memory to host copies
a = (int *) malloc(size);
b = (int *) malloc(size);
c = (int *) malloc(size);
// Initialize vectors
for (int i = 0; i < N; ++i) {
a[i] = 2;
b[i] = 7;
}
// Allocate memory to device copies
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Copy inputs from host to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Launch add() kernel of GPU
add<<<N,1>>>(d_a, d_b, d_c);
// Copy result from device to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
// Print
for(int i = 0; i < N; ++i) {
std::cout << c[i] << " ";
}
std::cout << std::endl;
// Cleanup
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
10,141 | #include <stdio.h>
/*
void getComputeModeDescription(int computeMode, **char result)
{
if (computeMode == 0)
&result = "Thrreading mode available";
else
&result = "Unknown";
}
*/
int main(int argc, char **argv)
{
printf("Hey, Cuda! utility, v0.1\n\n");
int device;
cudaDeviceProp prop;
cudaGetDevice(&device);
cudaGetDeviceProperties(&prop, device);
printf("Device Name: %s\n", prop.name);
printf("Processors: %d\n", prop.multiProcessorCount);
printf("Clock Rate: %d (Mhz)\n", prop.clockRate / 1024);
printf("Total Memory: %d Megabytes\n", (prop.totalGlobalMem / 1024) / 1024);
printf("Memory Clock Rate: %d Mhz\n", (prop.memoryClockRate / 1024));
printf("Memory BUS width: %d bits\n", prop.memoryBusWidth);
printf("CUDA compute mode: %d\n", prop.computeMode);
printf("Max Threads per Block: %d\n", prop.maxThreadsPerBlock);
printf("Registers per Multiprocessor: %d\n", prop.regsPerMultiprocessor);
printf("PCI Device ID: %d\n", prop.pciDeviceID);
printf("\n");
printf("Supports allocating managed memory: %s\n", (prop.managedMemory == 0) ? "No" : "Yes");
printf("Device is %s\n", (prop.integrated == 0) ? "discrete" : "integrated");
// release device
cudaDeviceSynchronize();
return 0;
} |
10,142 | #include "includes.h"
__device__ float activator_derivative( float x )
{
float sig = 1.0f / (1.0f + exp( -x ));
return sig * (1 - sig);
}
__global__ void calcDetectObjectsBackwardGPU( float *dz_in, float *dz, float *in, int batch_size, int in_size_x, int in_size_y, int in_size_z, int max_bounding_boxes, int max_classes )
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
for( int i = 0; i < max_bounding_boxes; i=i+(4+max_classes)){
int index = id * (in_size_x * in_size_y * in_size_z) + i;
dz[index ] = activator_derivative( in[index ] ) * dz_in[index ]; // x: sigmoid derivative * grads
dz[index+1] = activator_derivative( in[index+1] ) * dz_in[index+1]; // y: sigmoid derivative * grads
dz[index+2] = exp( in[index+2] ) * dz_in[index+2]; // w: exp * grads
dz[index+3] = exp( in[index+3] ) * dz_in[index+3]; // w: exp * grads
for( int c = 0; c <max_classes; ++c){
int index2 = id * (in_size_x * in_size_y * in_size_z) + i+4+c;
dz[index2] = activator_derivative( in[index2] ) * dz_in[index2]; // id: sigmoid derivative * grads
}
}
/* original code
for(int b = 0; b < dz_in.size.b; ++b ){
for( int i = 0; i < _max_bounding_boxes; i=i+(4+_max_classes)){
dz( b, i , 0, 0 ) = activator_derivative( in( b, i , 0, 0 ) ) * dz_in( b, i , 0, 0 ); // x: sigmoid derivative * grads
dz( b, i+1, 0, 0 ) = activator_derivative( in( b, i+1 , 0, 0 ) ) * dz_in( b, i+1, 0, 0 ); // y: sigmoid derivative * grads
dz( b, i+2, 0, 0 ) = exp( in( b, i+2, 0, 0 ) ) * dz_in( b, i+2, 0, 0 ); // w: exp * grads
dz( b, i+3, 0, 0 ) = exp( in( b, i+3, 0, 0 ) ) * dz_in( b, i+3, 0, 0 ); // h: exp * grads
for( int c = 0; c <_max_classes; ++c){
dz( b, i+4+c, 0, 0 ) = activator_derivative( in( b, i+4+c , 0, 0 ) ) * dz_in( b, i+4+c , 0, 0 ); // id: sigmoid derivative * grads
}
}
}
*/
} |
10,143 | /*
* Copyright 2014 Netherlands eScience Center
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This program benchmarks four different implementations for
* overlapping CPU-GPU communication and GPU computation of a
* sparse matrix vector multiplication kernel.
*
* @author Ben van Werkhoven <b.vanwerkhoven@esciencecenter.nl>
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
//CSR representation
#define M 128*1024 //number of rows
//used for filling matrix
#define N 128*1024 //number of cols
#define NNZ (int)(((long)N * (long)M)/(long)1000) //number of non-zero elements
#define BLOCK_X 128
#define NSTREAMS 128
#define ITERATIONS 5
extern "C" {
void print_CSR (int *row_start, int *col_idx, float *values, int elem);
void print_vector (float *x, int n);
void spmv_kernel (float *y, int *row_start, int *col_idx, float *values, float *x);
void spmv_explicit (float *y, int *row_start, int *col_idx, float *values, float *x);
void spmv_implicit (float *y, int *row_start, int *col_idx, float *values, float *x);
void spmv_streams (float *y, int *row_start, int *col_idx, float *values, float *x);
void spmv_hybrid (float *y, int *row_start, int *col_idx, float *values, float *x);
void start_timer ();
void stop_timer (float *);
int compare (float *a1, float *a2, int n);
__global__ void spmv_gpukernel (float *y, int *row_start, int *col_idx, float *values, float *x);
__global__ void spmv_offsetkernel (float *y, int *row_start, int *col_idx, float *values, float *x, int offset);
}
//this number specifies the actual number of streams used at this point
int nStreams = 32;
cudaStream_t stream[NSTREAMS];
cudaEvent_t event_htod[NSTREAMS];
int
main () {
cudaError_t err;
cudaSetDeviceFlags (cudaDeviceMapHost);
cudaSetDevice (0);
cudaDeviceSetCacheConfig (cudaFuncCachePreferShared);
cudaDeviceSetSharedMemConfig (cudaSharedMemBankSizeFourByte);
//setup streams
for (int k = 0; k < NSTREAMS; k++) {
err = cudaStreamCreate (&stream[k]);
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaStreamCreate: %s\n", cudaGetErrorString (err));
}
err = cudaEventCreate (&event_htod[k]);
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaEventCreate htod: %s\n", cudaGetErrorString (err));
}
}
//setup memory
int *row_start;
int *col_idx;
float *values;
float *y;
float *y_ref;
float *x;
err = cudaHostAlloc ((void **) &row_start, (M + 1) * sizeof (int), cudaHostAllocMapped);
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaHostAlloc: %s\n", cudaGetErrorString (err));
}
err = cudaHostAlloc ((void **) &col_idx, NNZ * sizeof (int), cudaHostAllocMapped);
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaHostAlloc: %s\n", cudaGetErrorString (err));
}
err = cudaHostAlloc ((void **) &values, NNZ * sizeof (float), cudaHostAllocMapped);
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaHostAlloc: %s\n", cudaGetErrorString (err));
}
err = cudaHostAlloc ((void **) &x, N * sizeof (float), cudaHostAllocMapped);
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaHostAlloc: %s\n", cudaGetErrorString (err));
}
err = cudaHostAlloc ((void **) &y_ref, M * sizeof (float), cudaHostAllocMapped);
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaHostAlloc: %s\n", cudaGetErrorString (err));
}
err = cudaHostAlloc ((void **) &y, M * sizeof (float), cudaHostAllocMapped);
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaHostAlloc: %s\n", cudaGetErrorString (err));
}
int i, j, k;
for (i = 0; i < M; i++) {
y[i] = 0.0;
y_ref[i] = 0.0;
}
for (i = 0; i < N; i++) {
x[i] = 0.00001 + (rand () % 10000) / 10000.0;
}
srand (time (NULL));
//srand(13337);
float density = (float) (NNZ) / (float) ((long) M * N);
//int chance = (int)ceilf(1.0 / density); //used only in more random matrix generation
int elem = 0;
int first_on_row = 1;
int n_per_col = (int) ceilf (N * density);
int *col_indexes = (int *) malloc (n_per_col * sizeof (int));
float time;
start_timer ();
//Generate the sparse matrix
//using a less random generation scheme because more random took forever
for (j = 0; j < M; j++) {
row_start[j] = elem;
first_on_row = 1;
//faster less-random matrix generation
//generate column indexes
for (i = 0; i < n_per_col; i++) {
int sub_range = N / n_per_col;
col_indexes[i] = i * sub_range + rand () % sub_range;
}
int min = N;
int min_idx = -1;
for (i = 0; i < n_per_col && elem < NNZ; i++) {
//search lowest column idx
for (k = 0; k < n_per_col; k++) {
if (col_indexes[k] < min) {
min = col_indexes[k];
min_idx = k;
}
//if duplicate delete it
if (col_indexes[k] == min) {
col_indexes[k] = N + 1;
}
}
//sanity checks
if (min < N) {
if (elem >= NNZ) {
fprintf (stderr, "error: elem=%d > NNZ=%d", elem, NNZ);
}
//add value
values[elem] = 0.0001 + ((rand () % 1000) / 1000.0);
col_idx[elem] = min;
if (first_on_row == 1) {
first_on_row = 0;
row_start[j] = elem;
}
elem++;
}
//for next search
col_indexes[min_idx] = N + 1;
min = N;
}
/* the more randomly generated way of matrix generation
for (i=0; i<N && elem < NNZ; i++) {
if ((rand() % chance) == 0) {
//create non-zero
values[elem] = 1.0 + ((rand() % 1000) / 100.0) ;
col_idx[elem] = i;
if (first_on_row == 1) {
first_on_row = 0;
row_start[row_idx++] = elem;
}
elem++;
}
}
*/
//check for empty row and add same index
if (first_on_row == 1) {
row_start[j] = elem;
}
}
//last element of row_start[] points to last element
//in values[] and col_idx[] by definition
row_start[M] = elem;
free (col_indexes);
stop_timer (&time);
printf ("Matrix generated in: %.6f ms\n", time);
printf ("elements in sparse matrix: %d\n", elem);
printf ("target density=%f, achieved density=%f\n", density, (float) elem / ((float) M * (float) N));
printf ("target NNZ=%d, achieved NNZ=%d\n", NNZ, elem);
printf ("\n");
print_CSR (row_start, col_idx, values, elem);
printf ("finished generating sparse matrix, starting computation ... \n");
fflush (stdout);
start_timer ();
spmv_kernel (y_ref, row_start, col_idx, values, x);
stop_timer (&time);
printf ("SPMV CPU: %.6f ms\n", time);
// now run the four implementations
for (i = 0; i < ITERATIONS; i++) {
spmv_explicit (y, row_start, col_idx, values, x);
}
compare (y_ref, y, M);
for (i = 0; i < ITERATIONS; i++) {
spmv_implicit (y, row_start, col_idx, values, x);
}
compare (y_ref, y, M);
for (i = 0; i < ITERATIONS; i++) {
spmv_streams (y, row_start, col_idx, values, x);
}
compare (y_ref, y, M);
for (i = 0; i < ITERATIONS; i++) {
spmv_hybrid (y, row_start, col_idx, values, x);
}
compare (y_ref, y, M);
//clean up
cudaFreeHost (row_start);
cudaFreeHost (col_idx);
cudaFreeHost (values);
cudaFreeHost (x);
cudaFreeHost (y);
cudaFreeHost (y_ref);
//flush info for profiling
cudaDeviceSynchronize ();
cudaDeviceReset ();
return 0;
}
/*
* Utility function for printing the sparse matrix in CSR representation
*/
void
print_CSR (int *row_start, int *col_idx, float *values, int elem) {
int i;
//sanity check
if (elem > 100)
return;
printf ("NNZ=%d, M=%d\n", elem, M);
printf ("row_start[]=\n {");
for (i = 0; i < M; i++) {
printf ("%d", row_start[i]);
if (i < M - 1)
printf (", ");
}
printf ("}\n");
printf ("col_idx[]=\n {");
for (i = 0; i < elem; i++) {
printf ("%d", col_idx[i]);
if (i < elem - 1)
printf (", ");
}
printf ("}\n");
printf ("values[]=\n {");
for (i = 0; i < elem; i++) {
printf ("%.2f", values[i]);
if (i < elem - 1)
printf (", ");
}
printf ("}\n");
}
/*
* Utility function for printing a small vector
*/
void
print_vector (float *x, int n) {
printf ("x=\n {");
for (int i = 0; i < n; i++) {
printf ("%.2f", x[i]);
if (i < n - 1)
printf (", ");
}
printf ("}\n");
}
/*
* Simple kernel for performing a sparse matrix vector multiplication
* for a sparse matrix in CSR representation
*
* y = A * x
*/
void
spmv_kernel (float *y, int *row_start, int *col_idx, float *values, float *x) {
int i, j;
//for each row
for (i = 0; i < M; i++) {
//for each element on row
for (j = row_start[i]; j < row_start[i + 1]; j++) {
y[i] += values[j] * x[col_idx[j]];
}
}
}
/*
* Simple CUDA kernel for performing a sparse matrix vector multiplication
* for a sparse matrix in CSR representation
*
* y = A * x
*/
__global__ void
spmv_gpukernel (float *y, int *row_start, int *col_idx, float *values, float *x) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
float ly = 0.0;
if (i < M) {
//for each element on row
int start = row_start[i];
int end = row_start[i + 1];
for (int j = start; j < end; j++) {
int col = col_idx[j];
float val = values[j];
float lx = x[col];
ly += val * lx;
}
y[i] = ly;
}
}
/*
* Simple CUDA kernel for performing a sparse matrix vector multiplication
* for a sparse matrix in CSR representation
*
* y = A * x
*
* The offset kernel allows an offset to be specified that directs the threads towards
* a row in the sparse matrix from which to start the computation. This is used
* when the computation is split across different streams.
*
*/
__global__ void
spmv_offsetkernel (float *y, int *row_start, int *col_idx, float *values, float *x, int offset) {
int i = offset + blockIdx.x * blockDim.x + threadIdx.x;
float ly = 0.0;
if (i < M) {
//for each element on row
for (int j = row_start[i]; j < row_start[i + 1]; j++) {
ly += values[j] * x[col_idx[j]];
}
y[i] = ly;
}
}
/*
* Host code that invokes the sparse matrix vector multiplication kernel
*
* The explicit implementation uses explicit memory copy
* statements to move all data to the GPU, executes the
* GPU kernel, and uses memory copies to copy the output
* data back to host memory. This implementation achieves
* no overlap between transfers and/or computation.
*
*/
void
spmv_explicit (float *y, int *row_start, int *col_idx, float *values, float *x) {
cudaError_t err;
int *d_row_start;
int *d_col_idx;
float *d_values;
float *d_x;
float *d_y;
err = cudaMalloc ((void **) &d_row_start, (M + 1) * sizeof (int));
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaMalloc d_row_start: %s\n", cudaGetErrorString (err));
}
err = cudaMalloc ((void **) &d_col_idx, NNZ * sizeof (int));
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaMalloc d_col_idx: %s\n", cudaGetErrorString (err));
}
err = cudaMalloc ((void **) &d_values, NNZ * sizeof (float));
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaMalloc d_values: %s\n", cudaGetErrorString (err));
}
err = cudaMalloc ((void **) &d_x, N * sizeof (float));
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaMalloc d_x: %s\n", cudaGetErrorString (err));
}
err = cudaMalloc ((void **) &d_y, M * sizeof (float));
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaMalloc d_y: %s\n", cudaGetErrorString (err));
}
dim3 threads (BLOCK_X, 1);
dim3 grid ((int) ceilf ((float) M / (float) (BLOCK_X)), 1);
cudaDeviceSynchronize ();
err = cudaGetLastError ();
if (err != cudaSuccess) {
fprintf (stderr, "Error occured: %s\n", cudaGetErrorString (err));
}
float time;
//Measure total execution time
cudaDeviceSynchronize ();
start_timer ();
err = cudaMemcpyAsync (d_row_start, row_start, (M + 1) * sizeof (int), cudaMemcpyHostToDevice, stream[1]);
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaMemcpy host to device row_start: %s\n", cudaGetErrorString (err));
}
err = cudaMemcpyAsync (d_col_idx, col_idx, NNZ * sizeof (int), cudaMemcpyHostToDevice, stream[1]);
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaMemcpy host to device col_idx: %s\n", cudaGetErrorString (err));
}
err = cudaMemcpyAsync (d_values, values, NNZ * sizeof (float), cudaMemcpyHostToDevice, stream[1]);
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaMemcpy host to device values: %s\n", cudaGetErrorString (err));
}
err = cudaMemcpyAsync (d_x, x, N * sizeof (float), cudaMemcpyHostToDevice, stream[1]);
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaMemcpy host to device x: %s\n", cudaGetErrorString (err));
}
spmv_gpukernel <<< grid, threads, 0, stream[1] >>> (d_y, d_row_start, d_col_idx, d_values, d_x);
err = cudaMemcpyAsync (y, d_y, M * sizeof (float), cudaMemcpyDeviceToHost, stream[1]);
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaMemcpy device to host y: %s\n", cudaGetErrorString (err));
}
cudaDeviceSynchronize ();
stop_timer (&time);
printf ("EXPLICIT: %.6f ms\n", time);
//Measure kernel execution time
cudaDeviceSynchronize ();
start_timer ();
spmv_gpukernel <<< grid, threads, 0, stream[1] >>> (d_y, d_row_start, d_col_idx, d_values, d_x);
cudaDeviceSynchronize ();
stop_timer (&time);
printf ("EXPLICIT kernel:\t %.6f ms\n", time);
cudaFree (d_row_start);
cudaFree (d_col_idx);
cudaFree (d_values);
cudaFree (d_x);
cudaFree (d_y);
}
/*
* Host code that invokes the sparse matrix vector multiplication kernel
*
* The implicit implementation uses device-mapped host memory rather
* than explicit memory copy statements. A different kernel is used
* to ensure strictly coalesced access to system memory.
*
*/
void
spmv_implicit (float *y, int *row_start, int *col_idx, float *values, float *x) {
cudaError_t err;
dim3 threads (BLOCK_X, 1);
dim3 grid ((int) ceilf ((float) M / (float) (BLOCK_X)), 1);
cudaDeviceSynchronize ();
err = cudaGetLastError ();
if (err != cudaSuccess) {
fprintf (stderr, "Error occured: %s\n", cudaGetErrorString (err));
}
float time;
cudaDeviceSynchronize ();
start_timer ();
spmv_gpukernel <<< grid, threads, 0, stream[1] >>> (y, row_start, col_idx, values, x);
cudaDeviceSynchronize ();
stop_timer (&time);
printf ("IMPLICIT: %.6f ms\n", time);
}
/*
* Host code that invokes the sparse matrix vector multiplication kernel
*
* The streams implementation uses CUDA streams combined
* with explicit memory copy statements. This way transfers
* in one stream may overlap with computation and transfers
* in other streams.
*
*/
void
spmv_streams (float *y, int *row_start, int *col_idx, float *values, float *x) {
int k;
cudaError_t err;
int *d_row_start;
int *d_col_idx;
float *d_values;
float *d_x;
float *d_y;
err = cudaMalloc ((void **) &d_row_start, (M + 1) * sizeof (int));
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaMalloc d_row_start: %s\n", cudaGetErrorString (err));
}
err = cudaMalloc ((void **) &d_col_idx, NNZ * sizeof (int));
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaMalloc d_col_idx: %s\n", cudaGetErrorString (err));
}
err = cudaMalloc ((void **) &d_values, NNZ * sizeof (float));
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaMalloc d_values: %s\n", cudaGetErrorString (err));
}
err = cudaMalloc ((void **) &d_x, N * sizeof (float));
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaMalloc d_x: %s\n", cudaGetErrorString (err));
}
err = cudaMalloc ((void **) &d_y, M * sizeof (float));
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaMalloc d_y: %s\n", cudaGetErrorString (err));
}
dim3 threads (BLOCK_X, 1);
dim3 grid ((int) ceilf ((float) M / (float) (BLOCK_X)), 1);
cudaDeviceSynchronize ();
err = cudaGetLastError ();
if (err != cudaSuccess) {
fprintf (stderr, "Error occured: %s\n", cudaGetErrorString (err));
}
//determine rows per stream
int nstr = NSTREAMS;
if (nStreams != -1)
nstr = nStreams;
//setup for spmv_offsetkernel
int tb = M / BLOCK_X;
int rps = (tb / nstr) * BLOCK_X;
grid.x = tb / nstr;
if (tb % nstr != 0) {
fprintf (stderr, "Error nStreams=%d not a divisor of the number of thread blocks=%d\n", nstr, M);
}
float time;
cudaDeviceSynchronize ();
start_timer ();
//all streams need x to be on the device
err = cudaMemcpyAsync (d_x, x, N * sizeof (float), cudaMemcpyHostToDevice, stream[0]);
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaMemcpy host to device x: %s\n", cudaGetErrorString (err));
}
//copy first element in row_start, copy rest as needed by stream
err = cudaMemcpyAsync (d_row_start, row_start, sizeof (int), cudaMemcpyHostToDevice, stream[0]);
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaMemcpy host to device row_start: %s\n", cudaGetErrorString (err));
}
err = cudaEventRecord (event_htod[0], stream[0]);
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaEventRecord htod: %s\n", cudaGetErrorString (err));
}
for (k = 0; k < nstr; k++) {
int start = row_start[rps * k];
int end = row_start[rps * (k + 1)];
//printf("stream %d: start=%d, end=%d\n", k, start, end);
err = cudaStreamWaitEvent (stream[k], event_htod[0], 0);
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaStreamWaitEvent htod 1: %s\n", cudaGetErrorString (err));
}
//enforce strict ordering of copy operations per stream
if (k > 0) {
err = cudaStreamWaitEvent (stream[k], event_htod[k - 1], 0);
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaStreamWaitEvent htod 1: %s\n", cudaGetErrorString (err));
}
}
err = cudaMemcpyAsync (d_row_start + 1 + rps * k, row_start + 1 + rps * k, rps * sizeof (int), cudaMemcpyHostToDevice, stream[k]);
if (err != cudaSuccess) {
fprintf (stderr, "Stream %d: Error in cudaMemcpy host to device row_start: %s\n", k, cudaGetErrorString (err));
}
err = cudaMemcpyAsync (d_col_idx + start, col_idx + start, (end - start) * sizeof (int), cudaMemcpyHostToDevice, stream[k]);
if (err != cudaSuccess) {
fprintf (stderr, "Stream %d: Error in cudaMemcpy host to device col_idx: %s\n", k, cudaGetErrorString (err));
}
err = cudaMemcpyAsync (d_values + start, values + start, (end - start) * sizeof (float), cudaMemcpyHostToDevice, stream[k]);
if (err != cudaSuccess) {
fprintf (stderr, "Stream %d: Error in cudaMemcpy host to device values: %s\n", k, cudaGetErrorString (err));
}
//enforce strict ordering of copy operations per stream
err = cudaEventRecord (event_htod[k], stream[k]);
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaEventRecord htod: %s\n", cudaGetErrorString (err));
}
// }
// for (k=0; k<nstr; k++) {
spmv_offsetkernel <<< grid, threads, 0, stream[k] >>> (d_y, d_row_start, d_col_idx, d_values, d_x, k * rps);
}
for (k = 0; k < nstr; k++) {
err = cudaMemcpyAsync (y + k * rps, d_y + k * rps, rps * sizeof (float), cudaMemcpyDeviceToHost, stream[k]);
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaMemcpy device to host y: %s\n", cudaGetErrorString (err));
}
}
cudaDeviceSynchronize ();
stop_timer (&time);
printf ("STREAMS: %.6f ms\n", time);
cudaFree (d_row_start);
cudaFree (d_col_idx);
cudaFree (d_values);
cudaFree (d_x);
cudaFree (d_y);
}
/*
* Host code that invokes the sparse matrix vector multiplication kernel
*
* The Hybrid implementation uses CUDA streams combined
* with explicit memory copy statements for the input data
* and uses device-mapped host memory to copy the output data
* back to host memory.
*
*/
void
spmv_hybrid (float *y, int *row_start, int *col_idx, float *values, float *x) {
int k;
cudaError_t err;
int *d_row_start;
int *d_col_idx;
float *d_values;
float *d_x;
err = cudaMalloc ((void **) &d_row_start, (M + 1) * sizeof (int));
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaMalloc d_row_start: %s\n", cudaGetErrorString (err));
}
err = cudaMalloc ((void **) &d_col_idx, NNZ * sizeof (int));
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaMalloc d_col_idx: %s\n", cudaGetErrorString (err));
}
err = cudaMalloc ((void **) &d_values, NNZ * sizeof (float));
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaMalloc d_values: %s\n", cudaGetErrorString (err));
}
err = cudaMalloc ((void **) &d_x, N * sizeof (float));
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaMalloc d_x: %s\n", cudaGetErrorString (err));
}
dim3 threads (BLOCK_X, 1);
dim3 grid ((int) ceilf ((float) M / (float) (BLOCK_X)), 1);
cudaDeviceSynchronize ();
err = cudaGetLastError ();
if (err != cudaSuccess) {
fprintf (stderr, "Error occured: %s\n", cudaGetErrorString (err));
}
//determine rows per stream
int nstr = NSTREAMS;
if (nStreams != -1)
nstr = nStreams;
//setup for spmv_offsetkernel
int tb = M / BLOCK_X;
int rps = (tb / nstr) * BLOCK_X;
grid.x = tb / nstr;
if (tb % nstr != 0) {
fprintf (stderr, "Error nStreams=%d not a divisor of the number of thread blocks=%d\n", nstr, M);
}
float time;
cudaDeviceSynchronize ();
start_timer ();
//all streams need x to be on the device
err = cudaMemcpyAsync (d_x, x, N * sizeof (float), cudaMemcpyHostToDevice, stream[0]);
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaMemcpy host to device x: %s\n", cudaGetErrorString (err));
}
//copy first element in row_start, copy rest as needed by stream
err = cudaMemcpyAsync (d_row_start, row_start, sizeof (int), cudaMemcpyHostToDevice, stream[0]);
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaMemcpy host to device row_start: %s\n", cudaGetErrorString (err));
}
err = cudaEventRecord (event_htod[0], stream[0]);
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaEventRecord htod: %s\n", cudaGetErrorString (err));
}
for (k = 0; k < nstr; k++) {
int start = row_start[rps * k];
int end = row_start[rps * (k + 1)];
err = cudaStreamWaitEvent (stream[k], event_htod[0], 0);
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaStreamWaitEvent htod 1: %s\n", cudaGetErrorString (err));
}
//enforce strict ordering of copy operations per stream
if (k > 0) {
err = cudaStreamWaitEvent (stream[k], event_htod[k - 1], 0);
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaStreamWaitEvent htod 1: %s\n", cudaGetErrorString (err));
}
}
err = cudaMemcpyAsync (d_row_start + 1 + rps * k, row_start + 1 + rps * k, rps * sizeof (int), cudaMemcpyHostToDevice, stream[k]);
if (err != cudaSuccess) {
fprintf (stderr, "Stream %d: Error in cudaMemcpy host to device row_start: %s\n", k, cudaGetErrorString (err));
}
err = cudaMemcpyAsync (d_col_idx + start, col_idx + start, (end - start) * sizeof (int), cudaMemcpyHostToDevice, stream[k]);
if (err != cudaSuccess) {
fprintf (stderr, "Stream %d: Error in cudaMemcpy host to device col_idx: %s\n", k, cudaGetErrorString (err));
}
err = cudaMemcpyAsync (d_values + start, values + start, (end - start) * sizeof (float), cudaMemcpyHostToDevice, stream[k]);
if (err != cudaSuccess) {
fprintf (stderr, "Stream %d: Error in cudaMemcpy host to device values: %s\n", k, cudaGetErrorString (err));
}
//enforce strict ordering of copy operations per stream
err = cudaEventRecord (event_htod[k], stream[k]);
if (err != cudaSuccess) {
fprintf (stderr, "Error in cudaEventRecord htod: %s\n", cudaGetErrorString (err));
}
// }
// for (k=0; k<nstr; k++) {
spmv_offsetkernel <<< grid, threads, 0, stream[k] >>> (y, d_row_start, d_col_idx, d_values, d_x, k * rps);
// }
}
cudaDeviceSynchronize ();
stop_timer (&time);
printf ("HYBRID: %.6f ms\n", time);
cudaFree (d_row_start);
cudaFree (d_col_idx);
cudaFree (d_values);
cudaFree (d_x);
}
/*
* Compare function that compares two arrays of length N for similarity
*
* This function performs a number of different tests, for example the number of
* values at an epsilon from 0.0 should be similar in both arrays and may not
* be greater than 1/4th of the array. Additionally NaN values are treated as
* errors.
*
* The value of eps should be adjusted to something reasonable given the
* fact that CPU and GPU do not produce exactly the same numerical results.
*/
int
compare (float *a1, float *a2, int n) {
int i = 0, res = 0;
int print = 0;
int zero_one = 0;
int zero_two = 0;
float eps = 0.0001;
for (i = 0; i < n; i++) {
if (a1[i] < eps && a1[i] > -eps) {
zero_one++;
}
if (a2[i] < eps && a2[i] > -eps) {
zero_two++;
}
if (isnan (a1[i]) || isnan (a2[i])) {
res++;
if (print < 10) {
print++;
fprintf (stderr, "Error detected at i=%d,\t a1= %10.7e \t a2= \t %10.7e\n", i, a1[i], a2[i]);
}
}
float diff = a1[i] - a2[i];
if (diff > eps || diff < -eps) {
res++;
if (print < 10) {
print++;
fprintf (stderr, "Error detected at i=%d,\t a1= \t %10.7e \t a2= \t %10.7e\n", i, a1[i], a2[i]);
}
}
}
if (zero_one > (n / 4)) {
fprintf (stderr, "Error: array1 contains %d zeros\n", zero_one);
}
if (zero_two > (n / 4)) {
fprintf (stderr, "Error: array2 contains %d zeros\n", zero_two);
}
if (zero_one != zero_two) {
fprintf (stderr, "Error: number of zeros in arrays dont correspond zero1=%d, zero2=%d\n", zero_one, zero_two);
}
if (res > 0) {
fprintf (stdout, "Number of errors in GPU result: %d\n", res);
}
return res;
}
|
10,144 | #include "includes.h"
/* This file is copied from https://github.com/jzbonter/mc-cnn */
extern "C" {
}
#define TB 128
#define DISP_MAX 256
__global__ void remove_occluded(float *y, int size, int size3)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) {
int x = id % size3;
for (int i = 1; x + i < size3; i++) {
if (i - y[id + i] < -y[id]) {
y[id] = 0;
break;
}
}
}
} |
10,145 | #include <math.h>
#include <iostream>
__global__ void add(int n, float *x, float *y) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
//printf("threadIdx.x: %d blockIdx.x: %d\n", threadIdx.x, blockIdx.x);
//printf("threadIdx.y: %d threadIdx.z: %d\n", threadIdx.y, threadIdx.z);
//printf("blockIdx.y: %d blockIdx.z: %d\n", blockIdx.y, blockIdx.z);
//printf("blockDim.y: %d blockDim.z: %d\n", blockDim.y, blockDim.z);
//printf("gridDim.y: %d gridDim.z: %d\n", gridDim.y, gridDim.z);
for (int i = index; i < n; i+=stride)
y[i] = x[i] + y[i];
}
int main(void) {
int n = 1 << 20;
float *x;
float *y;
cudaMallocManaged(&x, n * sizeof(float));
cudaMallocManaged(&y, n * sizeof(float));
for (size_t i = 0; i < n; i++) {
*(x + i) = 1.0f;
*(y + i) = 2.0f;
}
int blockSize = 256;
int numBlocks = (n + blockSize - 1) / blockSize;
add<<<numBlocks, blockSize>>>(n, x, y);
cudaDeviceSynchronize();
float max_error = 0.0f;
for (int i = 0; i < n; i++){
max_error = fmax(max_error, fabs(y[i] - 3.0f));
}
std::cout << "Max error: " << max_error << std::endl;
cudaFree(x);
cudaFree(y);
return 0;
} |
10,146 | #include "includes.h"
__global__ void _kgauss64sum(int xrows, int xcols, double *x, double *xx) {
int i, j, x0, x1;
double sum;
j = threadIdx.x + blockIdx.x * blockDim.x;
while (j < xcols) {
x0 = j*xrows; x1 = x0+xrows;
sum = 0;
for (i=x0; i<x1; i++) sum += x[i]*x[i];
xx[j] = sum;
j += blockDim.x * gridDim.x;
}
} |
10,147 | extern "C" __global__ void
gauge(float2* psi1, float2* psi2, float2* q, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float2 eiq = make_float2(cos(-q[i].x / size), sin(-q[i].x / size));
psi1[i] = make_float2(psi1[i].x * eiq.x - psi1[i].y * eiq.y, psi1[i].x * eiq.y + psi1[i].y * eiq.x);
psi2[i] = make_float2(psi2[i].x * eiq.x - psi2[i].y * eiq.y, psi2[i].x * eiq.y + psi2[i].y * eiq.x);
} |
10,148 | #include "includes.h"
__global__ void NN_DownSampling( float *target, const float *source, const int wt, const int ht, const int ws, const int hs )
{
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = y*wt+x;
const int curs = (y*2)*ws+x*2;
if(y < ht and x < wt) {
target[curt*3+0] = source[curs*3+0];
target[curt*3+1] = source[curs*3+1];
target[curt*3+2] = source[curs*3+2];
}
} |
10,149 | #include <cstdio>
#include <cstdlib>
// #include <memory>
#include <assert.h>
#define cudaCheckError() { \
cudaError_t e=cudaGetLastError(); \
if(e!=cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \
exit(EXIT_FAILURE); \
} \
}
// Computes the pointer address of a given value in a 2D array given:
// baseAddress: the base address of the buffer
// col: the col coordinate of the value
// row: the row coordinate of the value
// pitch: the actual allocation size **in bytes** of a row plus its padding
template <typename T>
__device__ inline T* eltPtr(T *baseAddress, int col, int row, size_t pitch) {
return (T*)((char*)baseAddress + row * pitch + col * sizeof(int)); // FIXME
}
// Simple vector initialization; element at index i receives value i.
// This is a good alternative to cudaMemset which write bytes only
template <typename T>
__global__ void simpleInit2D(T *buffer, T value, int cols, int rows, size_t pitch) {
int col = blockDim.x * blockIdx.x+threadIdx.x; // FIXME compute coordinates
int row = blockDim.y * blockIdx.y+threadIdx.y; // FIXME compute coordinates
if(col < cols && row < rows) { // FIXME check boundaries
T* eptr = eltPtr<T>(buffer, col, row, pitch);
*eptr = value;
}
}
// Simply checks that all the values of a given buffer are `expectedValue`
template <typename T>
__global__ void checkOnDevice(T *buffer, T expectedValue, int cols, int rows, size_t pitch) {
int col = blockDim.x*blockIdx.x+threadIdx.x; // FIXME compute coordinates
int row = blockDim.y*blockIdx.y+threadIdx.y; // FIXME compute coordinates
if(col < cols && row < rows) { // FIXME check boundaries
T* eptr = eltPtr<T>(buffer, col, row, pitch);
assert (*eptr == expectedValue);
}
}
int main() {
int cols=2*1024;
int rows=2*1024;
// int cols=4; // Use less elements for debug if needed
// int rows=4;
int *d_buffer;
dim3 threads(32,32);
dim3 blocks((cols+threads.x-1)/threads.x,
(rows+threads.y-1)/threads.y);
size_t pitch;
// Allocate an 2D buffer with padding
cudaMallocPitch(&d_buffer, &pitch, cols * sizeof(int), rows);
printf("Pitch d_buffer: %ld\n", pitch);
cudaCheckError();
// The value we want our buffer to be filled with
const int value = 5;
// USING cudaMemset* FUNCTIONS IS WRONG FOR SETTING INTEGERS!!!
// https://stackoverflow.com/questions/13387101/cudamemset-does-it-set-bytes-or-integers
// Why do cudaMemset* functions take int values when they actually set bytes???
// because std::memset does so… https://en.cppreference.com/w/cpp/string/byte/memset
// cudaMemset2D(c, pitch, value, cols * sizeof(int), rows);
// cudaDeviceSynchronize();
// cudaCheckError();
// Initialize the buffer
simpleInit2D<int><<<blocks,threads>>>(d_buffer, value, cols, rows, pitch);
cudaDeviceSynchronize();
cudaCheckError();
// Check the content of the buffer on the device
checkOnDevice<int><<<blocks,threads>>>(d_buffer, value, cols, rows, pitch);
cudaDeviceSynchronize();
cudaCheckError();
// Copy back d_buffer to host memory for inspection
int* host_buffer = (int*) std::malloc(rows * cols * sizeof(int));
cudaMemcpy2D(host_buffer, cols * sizeof(int),
d_buffer, pitch, cols * sizeof(int), rows,
cudaMemcpyDeviceToHost);
cudaCheckError();
// Check for errors
bool error = false;
for (int i = 0; i < rows * cols; i++) {
int val_real = host_buffer[i];
if (error = val_real != value) {
printf("ERROR at index %d: expected %d but got %d.\n", i, value, val_real);
break;
}
}
// Clean up
cudaFree(d_buffer);
cudaCheckError();
std::free(host_buffer);
// Useful return value
if (!error) {
printf("Test completed successfully.\n");
return 0;
} else {
printf("WARNING there were some errors.\n");
return 1;
}
}
|
10,150 | /*
* Copyright (c) 2018 Preferred Networks, Inc. All rights reserved.
*/
// This is a utility for conversion between float and half.
// From CUDA 9.2, __float2half/__half2float can be called from host and GCC can
// compile them, but with CUDA 9.0 they can't, so this utils are needed.
#include <cuda_fp16.h>
#include <iostream>
namespace chainer_trt {
namespace internal {
__global__ void float2half_kernel(const float* src, __half* dst, int n) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(n <= idx)
return;
dst[idx] = __float2half(src[idx]);
}
__global__ void half2float_kernel(const __half* src, float* dst, int n) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(n <= idx)
return;
dst[idx] = __float2half(src[idx]);
}
void float2half(const float* src, __half* dst, int n) {
const int block_size = 1024;
const int grid_size = (int)std::ceil(1.0 * n / block_size);
float* src_g = NULL;
__half* dst_g = NULL;
cudaMalloc(&src_g, sizeof(float) * n);
cudaMalloc(&dst_g, sizeof(__half) * n);
cudaMemcpy(src_g, src, sizeof(float) * n, cudaMemcpyHostToDevice);
float2half_kernel<<<grid_size, block_size, 0, 0>>>(src_g, dst_g, n);
cudaMemcpy(dst, dst_g, sizeof(__half) * n, cudaMemcpyDeviceToHost);
cudaFree(src_g);
cudaFree(dst_g);
}
void half2float(const __half* src, float* dst, int n) {
const int block_size = 1024;
const int grid_size = (int)std::ceil(1.0 * n / block_size);
__half* src_g = NULL;
float* dst_g = NULL;
cudaMalloc(&src_g, sizeof(__half) * n);
cudaMalloc(&dst_g, sizeof(float) * n);
cudaMemcpy(src_g, src, sizeof(__half) * n, cudaMemcpyHostToDevice);
half2float_kernel<<<grid_size, block_size, 0, 0>>>(src_g, dst_g, n);
cudaMemcpy(dst, dst_g, sizeof(float) * n, cudaMemcpyDeviceToHost);
cudaFree(src_g);
cudaFree(dst_g);
}
}
}
|
10,151 | //=============================================================================================
// Name : thread2dStl.cu
// Author : Jose Refojo
// Version :
// Creation date : 02-01-11
// Copyright : Copyright belongs to Trinity Centre for High Performance Computing
// Description : This program will initialize a number of arrays stored in stl vectors,
// then it will grab data from each thread (such as thread position inside the block and block),
// save it, send it back into the main memory, and print it
//=============================================================================================
#include "stdio.h"
#include <vector>
using namespace std;
__global__ void scanTheadInformationGPU(int *threadXIdsGPU,int *threadYIdsGPU,int *blockXIdsGPU,int *blockYIdsGPU,int N,int M) {
int idx=blockIdx.x*blockDim.x+threadIdx.x;
int idy=blockIdx.y*blockDim.y+threadIdx.y;
if ( idx < N ) {
if ( idy < M ) {
threadXIdsGPU[idx+idy*N]=threadIdx.x;
threadYIdsGPU[idx+idy*N]=threadIdx.y;
blockXIdsGPU[idx+idy*N]=blockIdx.x;
blockYIdsGPU[idx+idy*N]=blockIdx.y;
}
}
}
int main() {
// pointers to host memory matrices
std::vector< int* > threadXIds,threadYIds;
std::vector< int > threadXIds1d,threadYIds1d;
std::vector< int* > blockXIds,blockYIds;
std::vector< int > blockXIds1d;
std::vector< int > blockYIds1d;
// pointers to device memory matrices
int *threadXIdsGPU, *threadYIdsGPU;
int *blockXIdsGPU, *blockYIdsGPU;
// N and M are the total size that we want, N is number of rows and M is number of columns
int N=3,M=3;
int i,j;
// Allocate arrays threadIds and blockIds on host
// threadIds
// threadXIds is the pointer to all the array malloced in one dimension
threadXIds1d.resize(N*M);
threadYIds1d.resize(N*M);
// thread*Ids will be just pointers to the one dimension array
threadXIds.resize(N);
threadYIds.resize(N);
for (i=0;i<N;i++) {
threadXIds[i]=(&(threadXIds1d[i*M]));
threadYIds[i]=(&(threadYIds1d[i*M]));
}
// blockIds
// blockIds is the pointer to all the array malloced in one dimension
blockXIds1d.resize(N*M);
blockYIds1d.resize(N*M);
// block*Ids will be just pointers to the one dimension array
blockXIds.resize(N);
blockYIds.resize(N);
for (i=0;i<N;i++) {
blockXIds[i]=(&(blockXIds1d[i*M]));
blockYIds[i]=(&(blockYIds1d[i*M]));
}
// Allocate arrays threadIdsGPU and blockIdsGPU on device
cudaMalloc ((void **) &threadXIdsGPU, sizeof(int)*N*M);
cudaMalloc ((void **) &threadYIdsGPU, sizeof(int)*N*M);
cudaMalloc ((void **) &blockXIdsGPU, sizeof(int)*N*M);
cudaMalloc ((void **) &blockYIdsGPU, sizeof(int)*N*M);
/*
// Copy data from host memory to device memory (not needed)
cudaMemcpy(threadXIdsGPU, &(threadXIds1d[0]), sizeof(int)*N*M, cudaMemcpyHostToDevice);
cudaMemcpy(threadYIdsGPU, &(threadYIds1d[0]), sizeof(int)*N*M, cudaMemcpyHostToDevice);
cudaMemcpy(blockXIdsGPU, &(blockXIds1d[0]), sizeof(int)*N*M, cudaMemcpyHostToDevice);
cudaMemcpy(blockYIdsGPU, &(blockYIds1d[0]), sizeof(int)*N*M, cudaMemcpyHostToDevice);
*/
// Compute the execution configuration
int block_size=2;
dim3 dimBlock(block_size,block_size);
dim3 dimGrid ( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1),(M/dimBlock.y) + (!(M%dimBlock.y)?0:1) );
// Scan information from the threads
scanTheadInformationGPU<<<dimGrid,dimBlock>>>(threadXIdsGPU,threadYIdsGPU,blockXIdsGPU,blockYIdsGPU,N,M);
// Copy data from device memory to host memory
cudaMemcpy(&(threadXIds1d[0]), threadXIdsGPU, sizeof(int)*N*M, cudaMemcpyDeviceToHost);
cudaMemcpy(&(threadYIds1d[0]), threadYIdsGPU, sizeof(int)*N*M, cudaMemcpyDeviceToHost);
cudaMemcpy(&( blockXIds1d[0]), blockXIdsGPU, sizeof(int)*N*M, cudaMemcpyDeviceToHost);
cudaMemcpy(&( blockYIds1d[0]), blockYIdsGPU, sizeof(int)*N*M, cudaMemcpyDeviceToHost);
// Print all the data about the threads
printf(" dimGrid = %d %d\n",dimGrid.x,dimGrid.y);
for (i=0; i<N; i++) {
for (j=0; j<M; j++) {
printf(" threadIds[%d][%d]= %d , %d\n",i,j,threadXIds[i][j],threadYIds[i][j]);
}
}
for (i=0; i<N; i++) {
for (j=0; j<M; j++) {
printf(" blockIds[%d][%d]= %d , %d\n",i,j,blockXIds[i][j],blockYIds[i][j]);
}
}
// Free the memory
threadXIds.clear();
threadXIds1d.clear();
threadYIds.clear();
threadYIds1d.clear();
blockXIds.clear();
blockXIds1d.clear();
blockYIds.clear();
blockYIds1d.clear();
cudaFree(threadXIdsGPU);
cudaFree(threadYIdsGPU);
cudaFree(blockXIdsGPU);
cudaFree(blockYIdsGPU);
}
|
10,152 | //headers
#include <cuda.h>
//cuda kernel definition
__global__ void sinewave_kernel(float4 *pos, unsigned int width, unsigned int height, float timer)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
float u, v, w;
const float frequency = 4.0f;
u = x / (float)width;
v = y / (float)height;
u = u * 2.0f - 1.0f;
v = v * 2.0f - 1.0f;
w = sinf(u * frequency + timer) * cosf(v * frequency + timer) * 0.5f;
pos[y * width + x] = make_float4(u, w, v, 1.0f);
}
void launch_cuda_kernel(float4 *ppos, unsigned int width, unsigned int height, float timer)
{
dim3 block = dim3(8, 8, 8);
dim3 grid = dim3(width/block.x, height/block.y, 1);
sinewave_kernel<<<grid, block>>>(ppos, width, height, timer);
}
|
10,153 | /*Autores:
*Walter Martínez Santana
*José Carlos Castro
*/
#include <stdio.h>
__global__ void multMatriz(float *da, float *db, float *dc, int num){
float sum=0;
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
while(j<num){
while(i<num){
for (unsigned int k = 0; k<num; k++)
sum += da[i * num + k] * db[k * num + j];
dc[i*num + j] = (float) sum;
i += gridDim.y * blockDim.y;
}
j+=gridDim.x * blockDim.x;
i = threadIdx.y + blockIdx.y * blockDim.y;
}
}
#define n 300
#define SIZE n*n*sizeof(float)
int main(){
int N=n;
float *A, *B, *C;
float *da, *db, *dc;
int m;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 dimGrid(16, 16);
dim3 dimBlock(16, 16);
A=(float *)malloc(SIZE);
B=(float *)malloc(SIZE);
C=(float *)malloc(SIZE);
for(m=0;m<N*N;m++){
A[m]=(float)1;
B[m]=(float)1;
C[m]=(float)0;
}
cudaMalloc((void**)&da, SIZE);
cudaMalloc((void**)&db, SIZE);
cudaMalloc((void**)&dc, SIZE);
cudaMemcpy(da,A, SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(db,B, SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(dc,C, SIZE, cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
multMatriz<<<dimGrid , dimBlock >>>(da,db,dc,N);
//cudaThreadSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaMemcpy(C,dc, SIZE, cudaMemcpyDeviceToHost);
for(m=0;m<N*N;m++){
printf("%08.0f",A[m]);
printf("%c",( (m%N)<(N-1) ) ? '\t':'\n');
}
printf("\n\n");
for(m=0;m<N*N;m++){
printf("%08.0f",B[m]);
printf("%c",( (m%N)<(N-1) ) ? '\t':'\n');
}
printf("\n\n");
for(m=0;m<N*N;m++){
printf("%08.0f",C[m]);
printf("%c",( (m%N)<(N-1) ) ? '\t':'\n');
}
printf("\n\n");
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,start,stop);
printf("Tiempo %4.6f milseg\n\n",elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(da);
cudaFree(db);
cudaFree(dc);
free(A);
free(B);
free(C);
return 0;
}
|
10,154 | #include "math.h"
#define SMALLEST_FLOAT 1.175494351E-38
#define MAX_ELEMENTS_PER_BLOCK 2048
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(n)\
((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(n)((n) >> LOG_NUM_BANKS)
#endif
extern "C"
__global__ void reduceStates(int numCategories, int numStates, int numSitesWithPadding, int numSites,
double pInv, double* equiFreq,
float* sequence, double* ufScaling){
//among-site rate heterogenity category index
int categoryIdx = blockIdx.y;
// sequence site index
int siteIdx = threadIdx.x + blockDim.x * blockIdx.x;
// site index within current block
int siteBlockIdx = threadIdx.x;
// index of the ancestral state at the current node
int stateIdx = threadIdx.y;
__shared__ float partialLikelihoods[1024];
__shared__ double sharedDoubleBuffer[1024];
// Prefetching partial likelihoods
if(siteIdx < numSitesWithPadding){
partialLikelihoods[threadIdx.x + blockDim.x * threadIdx.y] = sequence[siteIdx + stateIdx*numSitesWithPadding + categoryIdx*numStates*numSitesWithPadding];
}
if(siteIdx < numSites){
sharedDoubleBuffer[stateIdx * blockDim.x + siteBlockIdx] = partialLikelihoods[stateIdx * blockDim.x + siteBlockIdx] * equiFreq[stateIdx]
* ((1.0 - pInv)/(double)(numCategories)) * ufScaling[categoryIdx * numSitesWithPadding + siteIdx];
}
__syncthreads();
// Reduce states here
if(stateIdx == 0 && siteIdx < numSites){
double cellLikelihoodAccumulator = 0;
for(int st = 0; st < numStates; st++){
cellLikelihoodAccumulator += sharedDoubleBuffer[st * blockDim.x + siteBlockIdx];
}
ufScaling[categoryIdx * numSitesWithPadding + siteIdx] = cellLikelihoodAccumulator;
}
}
extern "C"
__global__ void reduceCategories(int numCategories, int numSites, int numSitesWithPadding, double pInv, double* ufScaling, double* invSites, double* weights){
int categoryIdx = threadIdx.y;
int siteIdx = threadIdx.x + blockDim.x * blockIdx.x;
int siteBlockIdx = threadIdx.x;
__shared__ double sharedDoubleBuffer[1024];
if(siteIdx < numSites){
sharedDoubleBuffer[categoryIdx * blockDim.x + siteBlockIdx] = ufScaling[categoryIdx * numSitesWithPadding + siteIdx];
}
__syncthreads();
if(categoryIdx == 0 && siteIdx < numSites){
for(int cat = 1; cat < numCategories; cat++){
sharedDoubleBuffer[siteBlockIdx] += sharedDoubleBuffer[cat * blockDim.x + siteBlockIdx];
}
double siteLikelihoodInv = invSites[siteIdx] * pInv;
ufScaling[siteIdx] = log(sharedDoubleBuffer[siteBlockIdx] + siteLikelihoodInv) * weights[siteIdx];
}
}
extern "C"
__global__ void reduceSites(double* g_odata, double* g_idata, int n, double* debug){
__shared__ double temp[2115];
int thid = threadIdx.x;
int offset = 1;
int ai = thid;
int bi = thid + (MAX_ELEMENTS_PER_BLOCK/2);
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
//Copy data from global memory to shared memory and apply
// padding for sizes that are not exponents of 2
int blockOffset = MAX_ELEMENTS_PER_BLOCK * blockIdx.x;
if((blockOffset + ai) < n){
temp[ai + bankOffsetA] = g_idata[blockOffset + ai];
}else{
temp[ai + bankOffsetA] = 0;
}
if((blockOffset + bi) < n){
temp[bi + bankOffsetB] = g_idata[blockOffset + bi];
}else{
temp[bi + bankOffsetB] = 0;
}
for(int d = MAX_ELEMENTS_PER_BLOCK >> 1; d > 0; d >>= 1){
__syncthreads();
if(thid < d){
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
temp[bi] += temp[ai];
}
offset *= 2;
}
if(thid == 0){
g_idata[blockIdx.x] = temp[MAX_ELEMENTS_PER_BLOCK - 1 + CONFLICT_FREE_OFFSET(MAX_ELEMENTS_PER_BLOCK - 1)];
}
__syncthreads();
if(thid == 0 && blockIdx.x == 0 && gridDim.x == 1) g_odata[0] = g_idata[0];
} |
10,155 | // Berat Postalcioglu
/* OUTPUT
Asymmetric Traveling Salesman Problem Solver with random weighted 5 Nodes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
---> Duration in Graph generation with random numbers: 48.7383 ms. <---
=== g1 adjacency matrix ===
m[0]: 896, m[1]: 857, m[2]: 53, m[3]: 107, m[4]: 987,
m[5]: 570, m[6]: 115, m[7]: 713, m[8]: 708, m[9]: 757,
m[10]: 125, m[11]: 286, m[12]: 385, m[13]: 582, m[14]: 786,
m[15]: 539, m[16]: 564, m[17]: 333, m[18]: 198, m[19]: 617,
m[20]: 159, m[21]: 640, m[22]: 86, m[23]: 413, m[24]: 973
=== g2 adjacency matrix ===
m[0]: 896, m[1]: 857, m[2]: 53, m[3]: 107, m[4]: 987,
m[5]: 570, m[6]: 115, m[7]: 713, m[8]: 708, m[9]: 757,
m[10]: 125, m[11]: 286, m[12]: 385, m[13]: 582, m[14]: 786,
m[15]: 539, m[16]: 564, m[17]: 333, m[18]: 198, m[19]: 617,
m[20]: 159, m[21]: 640, m[22]: 86, m[23]: 413, m[24]: 973
---> Duration in Graph addition: 0.01024 ms. <---
=== g3 adjacency matrix = (g1 + g2) ===
m[0]: 1792, m[1]: 1714, m[2]: 106, m[3]: 214, m[4]: 1974,
m[5]: 1140, m[6]: 230, m[7]: 1426, m[8]: 1416, m[9]: 1514,
m[10]: 250, m[11]: 572, m[12]: 770, m[13]: 1164, m[14]: 1572,
m[15]: 1078, m[16]: 1128, m[17]: 666, m[18]: 396, m[19]: 1234,
m[20]: 318, m[21]: 1280, m[22]: 172, m[23]: 826, m[24]: 1946
=== Optimal Path Found ===
0 -> 3 -> 1 -> 4 -> 2 -> 0 | cost: 3278
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <vector>
#include "Graph.cuh"
#include "Solver.cuh"
using namespace std;
using namespace atspSolver;
const int NumberOfNodes = 5;
int main()
{
cout << "Asymmetric Traveling Salesman Problem Solver with random weighted 5 Nodes" << endl;
cout << "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" << endl;
Graph g1(NumberOfNodes);
Graph g2 = g1;
cout << endl;
cout << "=== g1 adjacency matrix === " << endl;
g1.display();
cout << endl;
cout << "=== g2 adjacency matrix === " << endl;
g2.display();
cout << endl;
Graph g3 = g1 + g2;
cout << "=== g3 adjacency matrix = (g1 + g2) === " << endl;
g3.display();
fullCycle optimalPath = findOptimalPath(g3);
cout << endl;
cout << "=== Optimal Path Found ===" << endl;
optimalPath.display();
return 0;
} |
10,156 | __global__ void createVertices_kernel(float4* positions, float time,
unsigned int width, unsigned int height)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
// Calculate uv coordinates
float u = x / (float)width;
float v = y / (float)height;
u = u * 2.0f - 1.0f;
v = v * 2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = sinf(u * freq + time)
* cosf(v * freq + time) * 0.5f;
// Write positions
positions[y * width + x] = make_float4(u, w, v, 1.0f);
}
void createVertices(void * positions, float time,
unsigned int width, unsigned int height)
{
dim3 dimBlock(16, 16, 1);
dim3 dimGrid(width / dimBlock.x, height / dimBlock.y, 1);
createVertices_kernel<<<dimGrid, dimBlock>>>((float4*)positions, time,
width, height);
cudaDeviceSynchronize();
}
|
10,157 | #include <stdlib.h>
#include <stdio.h>
struct Point
{
float a, b;
};
__global__ void testKernel(Point *p)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
p[i].a = 1.1;
p[i].b = 2.2;
}
int main(void)
{
// set number of points
int numPoints = 16,
pointSize = sizeof(Point),
numBytes = numPoints * pointSize,
gpuBlockSize = 4,
gpuGridSize = numPoints / gpuBlockSize;
Point cpuPointArray[numPoints];
Point* gpuPointArray;
cudaMalloc((void**)&gpuPointArray, numBytes);
// launch kernel
testKernel<<<gpuGridSize,gpuBlockSize>>>(gpuPointArray);
// retrieve the results
cudaMemcpy(cpuPointArray, gpuPointArray, numBytes, cudaMemcpyDeviceToHost);
printf("testKernel results:\n");
for(int i = 0; i < numPoints; ++i)
{
printf("point.a: %f, point.b: %f\n",cpuPointArray[i].a, cpuPointArray[i].b);
}
// deallocate memory
cudaFree(gpuPointArray);
return 0;
} |
10,158 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <math.h>
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
__global__ void vecPromedio(double *d_vecA,unsigned long dist,unsigned long n,unsigned long tam_tot){
unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_id < n){
d_vecA[global_id*dist] = d_vecA[global_id*dist] + d_vecA[global_id*dist+dist / 2];
if(dist == tam_tot) {
d_vecA[global_id*dist] /= tam_tot;
}
}
}
__global__ void sumatoria(double *d_parcialA,double *d_parcialB,double *d_vecPromedio, unsigned long n){
unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_id < n){
d_parcialA[global_id] = (d_parcialB[global_id] - d_vecPromedio[0]) * (d_parcialB[global_id] - d_vecPromedio[0]);
d_parcialB[global_id] = (d_parcialB[global_id] + d_vecPromedio[0]) * (d_parcialB[global_id] + d_vecPromedio[0]);
}
}
__global__ void acomulativo(double *d_parcialA,double *d_parcialB,unsigned long dist,unsigned long n,unsigned long tam_tot){
unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_id < n){
d_parcialA[global_id*dist] = d_parcialA[global_id*dist] + d_parcialA[global_id*dist+dist / 2];
d_parcialB[global_id*dist] = d_parcialB[global_id*dist] + d_parcialB[global_id*dist+dist / 2];
if(dist == tam_tot) {
d_parcialB[0] += 1;
d_parcialB[0] = sqrt(d_parcialA[0] / d_parcialB[0]);
}
}
}
int main(int argc, char *argv[]){
if (argc != 3){
printf("Falta argumento: N, CUDABLK\n");
return 0;
}
//declaracion de variables
cudaError_t error;
unsigned int N = atoi (argv[1]);
unsigned long CUDA_BLK = atoi (argv[2]);
unsigned long numBytes = sizeof(double)*N,tam_tot;
double *vecA,promedio,result,parcialA,parcialB,*d_vecA,*d_vecPromedio,*d_parcialA,resultgpu,timetick;
unsigned int i;
vecA = (double *)malloc(numBytes);
promedio = 0;
result = 0;
parcialA = 0;
parcialB = 0;
for (i = 0; i < N; i++){
vecA[i] = i;
}
tam_tot = N;
cudaMalloc((void **) &d_vecA, numBytes);
cudaMalloc((void **) &d_vecPromedio, numBytes);
cudaMalloc((void **) &d_parcialA, numBytes);
// Bloque unidimencional de hilos (*cb* hilos)
dim3 dimBlock(CUDA_BLK);
//--------------------------------cpu comienza ------------------------------------
//secuencial
timetick = dwalltime();
for (i = 0; i < N; i++){
promedio += vecA[i];
}
promedio /= N;
for (i = 0; i < N; i++){
parcialA += (vecA[i] - promedio) * (vecA[i] - promedio);
parcialB += (vecA[i] + promedio) * (vecA[i] + promedio);
}
parcialB += 1;
result = sqrt(parcialA / parcialB);
printf("Tiempo para la CPU: %f\n\n",dwalltime() - timetick);
//--------------------------------cpu termina ------------------------------------
for (i = 0; i < N; i++){
vecA[i] = i;
}
//--------------------------------gpu comienza ------------------------------------
timetick = dwalltime();
cudaMemcpy(d_vecA, vecA, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
cudaMemcpy(d_vecPromedio, vecA, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
//promedio
for(i = 2; i <= N ;i *= 2){
dim3 dimGrid((N / i + dimBlock.x - 1) / dimBlock.x);
vecPromedio<<<dimGrid, dimBlock>>>(d_vecPromedio,i,N/i,tam_tot);
cudaThreadSynchronize();
}
// Grid unidimencional (*ceil(n/cb)* bloques)
dim3 dimGrid((N + dimBlock.x - 1) / dimBlock.x);
//sumatoria
sumatoria<<<dimGrid, dimBlock>>>(d_parcialA,d_vecA,d_vecPromedio,N);
cudaThreadSynchronize();
//sumatoria acumulativo
for(i = 2; i <= N ;i *= 2){
dim3 dimGrid((N / i + dimBlock.x - 1) / dimBlock.x);
acomulativo<<<dimGrid, dimBlock>>>(d_parcialA,d_vecA,i,N/i,tam_tot);
cudaThreadSynchronize();
}
cudaMemcpy(&resultgpu, d_vecA, sizeof(double), cudaMemcpyDeviceToHost); // GPU -> CPU
printf("Tiempo para la GPU: %f\n",dwalltime() - timetick);
error = cudaGetLastError();
printf("error: %d\n\n",error);
//--------------------------------gpu termina ------------------------------------
cudaMemcpy(vecA, d_vecA, numBytes, cudaMemcpyDeviceToHost); // GPU -> CPU
/*
printf("----------------------------------------\n\n");
for(i = 0; i < N; i++){
printf("%f|",vecA[i]);
}
printf("\n\n");
printf("promedio: %f\n",promedio);
printf("parcialA: %f||parcialB: %f\n",parcialA,parcialB);*/
printf("resultadoCPU: %f\n",result);
printf("resultadoGPU: %f\n",resultgpu);
cudaFree(d_vecA);
free(vecA);
return 0;
} |
10,159 | /**
* Primality Testing with CUDA (Fall 2016):
*
* Members:
* Emanuelle Crespi, Tolga Keskinoglu
*
* This test implements an algorithm to test for primality discussed in the methodology section
* of Optimizing CPU-GPU Interactions.
*
* The following code makes use of the kernel call is_prime(int n, char *factor, char *prime)
* to perform a parallel search for some factor of the value n. The kernel calls are
* seperated into r=20 streams amongst the multi-stream processors of the CUDA compatible GPU.
* This allows us to gather data via power analysis to find a relationship between
* execution speed and power dissipation for the Jetsion TK1.
*
* While the overhead of executing executing r streams slows down execution time,
* the performance of the parallel search itself is significantly faster than it's
* serial counterpart. We can see a significant improvement in the output displayed during runtime
* when r = 1.
*
* The output of the performance is displayed in seconds for verification.
*
* References:
* NVIDIA CUDA C Programming Guide Version 3.2
*/
// System includes
#include <stdio.h>
#include <time.h>
// Jetson TK1 has device capability 1.x allowing 1024 threads/block
#define THREADS_PER_BLOCK 1024
// Performs a parallel search for a factor of the value n
// When a multiple is found, prime is written to 1 and facter
// is written as the multiple to be read & verified by the caller
//
// The values are written to device memory and must be recovered by the caller
__global__ void is_prime(int n, int *d_factor, int *d_prime) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i > 1 && i < n && n % i == 0) {
*d_prime = 0;
*d_factor = i;
}
}
int main(void) {
//r can be modified to produce as much overhead as needed during testing
int *prime, *d_prime, n=900000006, r=20, *factor, *d_factor;
cudaError_t error;
/* Generate space on the device */
prime = (int *)calloc(1, sizeof(int));
*prime = 1;
cudaMalloc((void **)&d_prime, sizeof(int));
cudaMemcpy(d_prime, prime, sizeof(int), cudaMemcpyHostToDevice);
factor = (int *)calloc(1, sizeof(int));
cudaMalloc((void **)&d_factor, sizeof(int));
/* Launch encrypt() kernel on GPU */
cudaStream_t stream[r];
for (int i = 0; i < r; i++ )
cudaStreamCreate(&stream[i]);
/*******************************for testing purposes****************************************
*******************************************************************************************/
cudaDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
// NEED TO PUT STREAMS FOR R VALUE IN HERE
for( int i = 0; i < r; i++){
is_prime<<<(n + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK,THREADS_PER_BLOCK,0,stream[i]>>>(n, d_factor, d_prime);
cudaStreamSynchronize(stream[i]);
}
// Record the stop event
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerisPrime = msecTotal / 1;
printf( "Performance= %.06f sec\n", msecPerisPrime/1000.0 );
/*******************************************************************************************
****************************** for testing purposes ***************************************/
/* Destroy streams */
for (int j = 0; j < r; j++){
cudaStreamDestroy(stream[j]);
}
/* Copy results back to host */
error = cudaMemcpy(prime, d_prime, sizeof(int), cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMemcpy (prime,d_prime) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(factor, d_factor, sizeof(int), cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMemcpy (factor,d_factor) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
/* IS IT PRIME??? */
if (*prime == 1) {
printf("%d is prime.\n", n);
} else {
printf("%d is NOT prime, %d is a factor!\n", n, *factor);
}
/* Cleanup */
free(prime); free(factor);
cudaFree(d_prime); cudaFree(d_factor);
return 0;
}
|
10,160 | #include "includes.h"
__global__ void Ecalc2(float* out, const float* label)
{
int i = blockDim.x*blockIdx.x + threadIdx.x; //10 * Data.count
out[i] = label[i] - out[i];
} |
10,161 | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <iostream>
#include <functional>
#include <algorithm>
#define NUM_OF_GPU_THREADS 1024
#define ACCURACY 1
double *jacobi_seq(int m, int n)
{
double d, r, t, *b, *x, *xnew;
int i, it;
b = (double *)malloc(n * sizeof(double));
x = (double *)malloc(n * sizeof(double));
xnew = (double *)malloc(n * sizeof(double));
printf("\n");
printf("JACOBI:\n");
printf(" C++ sequential version\n");
printf(" Jacobi iteration to solve A*x=b.\n");
printf("\n");
printf(" Number of variables N = %d\n", n);
printf(" Number of iterations M = %d\n", m);
printf("\n");
printf(" IT l2(dX) l2(resid)\n");
printf("\n");
b[n - 1] = (double)(n + 1);
/*
Initialize the solution estimate to 0.
Exact solution is (1,2,3,...,N).
*/
for (i = 0; i < n; i++)
{
x[i] = 0.0;
}
/* Iterate M times. */
for (it = 0; it < m; it++)
{
/* Jacobi update. */
for (i = 0; i < n; i++)
{
xnew[i] = b[i];
if (0 < i)
{
xnew[i] = xnew[i] + x[i - 1];
}
if (i < n - 1)
{
xnew[i] = xnew[i] + x[i + 1];
}
xnew[i] = xnew[i] / 2.0;
}
/* Difference. */
d = 0.0;
for (i = 0; i < n; i++)
{
d = d + pow(x[i] - xnew[i], 2);
}
/* Overwrite old solution. */
for (i = 0; i < n; i++)
{
x[i] = xnew[i];
}
/* Residual. */
r = 0.0;
for (i = 0; i < n; i++)
{
t = b[i] - 2.0 * x[i];
if (0 < i)
{
t = t + x[i - 1];
}
if (i < n - 1)
{
t = t + x[i + 1];
}
r = r + t * t;
}
if (it < 10 || m - 10 < it)
{
printf(" %8d %14.6g %14.6g\n", it, sqrt(d), sqrt(r));
}
if (it == 9)
{
printf(" Omitting intermediate results.\n");
}
}
/* Write part of final estimate. */
printf("\n");
printf(" Part of final solution estimate:\n");
printf("\n");
for (i = 0; i < 10; i++)
{
printf(" %8d %14.6g\n", i, x[i]);
}
printf("...\n");
for (i = n - 11; i < n; i++)
{
printf(" %8d %14.6g\n", i, x[i]);
}
/* Free memory. */
free(b);
//free(x);
free(xnew);
return x;
}
__global__ void KernelJacobi(double* cuda_x, double* cuda_xnew, double* cuda_d_array, int n, int iteration) {
__shared__ double shared_x_array[NUM_OF_GPU_THREADS];
int myId = blockIdx.x * blockDim.x + threadIdx.x;
int localId = threadIdx.x;
int leftId = localId- 1; // local leftId
int rightId = localId + 1; // local rightId
double leftValue, rightValue;
double xNewValue;
// double bValue; // is equal to (n + 1) only for the last element in cuda_x, otherwise 0
// ^ ---> can be avoided as to not use an additional variable in the register file
//printf("myId = %d, localId= %d, leftId = %d, rightId = %d\n", myId, threadIdx.x, leftId, rightId);
__syncthreads();
if (myId < n) {
// retrieve element for your id and place into shared memory
shared_x_array[threadIdx.x] = cuda_x[myId];
__syncthreads();
//printf("shared_x_array[%d] = %d\n", threadIdx.x, shared_x_array[threadIdx.x]);
// retrieve element from shared memory if leftId/rightId aren't out of bounds
// ---> otherwise, grab left/right value if it exists
if (leftId == -1) { // local leftId is out of bounds -- get value if it exists (isn't 0-th element of cuda_x)
if (myId == 0) {
leftValue = 0.0;
}
else {
leftValue = cuda_x[myId - 1]; // use global id
}
}
else {
leftValue = shared_x_array[leftId]; // get leftValue from shared array
}
if (rightId == NUM_OF_GPU_THREADS) {
// local rightId is out of bounds -- get value if it exists (isn't n-1-st element of cuda_x)
if (myId == n - 1) {
rightValue = 0.0;
}
else {
rightValue = cuda_x[myId + 1]; // use global id
}
}
else {
rightValue = shared_x_array[rightId]; // get rightValue from shared array
}
xNewValue = (myId == n - 1) ? n + 1 : 0;
xNewValue += leftValue;
xNewValue += rightValue;
xNewValue /= 2.0;
// Store new value
cuda_xnew[myId] = xNewValue;
// printf("globalId=%d, localId=%d, leftId=%d, rightId=%d, leftValue=%f, rightValue=%f, newValue=%f\n", myId, localId, leftId, rightId, leftValue, rightValue, cuda_xnew[myId]);
// if it's an iteration when reduction will be performed, send D off to global memory
if (iteration == 1) {
cuda_d_array[myId] = pow(shared_x_array[threadIdx.x] - xNewValue, 2);
}
}
__syncthreads(); // all threads (even those that don't work) synchronise here
}
__global__ void KernelOverwrite(double* cuda_x, double* cuda_xnew, int n) {
int myId = blockIdx.x * blockDim.x + threadIdx.x;
if (myId < n) {
// printf("myId=%d, oldcuda_x=%f, newcuda_x=%f\n", myId, cuda_x[myId], cuda_xnew[myId]);
cuda_x[myId] = cuda_xnew[myId];
}
}
__global__ void KernelCalculateR(double* cuda_x, double* cuda_r_array, int n) {
__shared__ double shared_x_array[NUM_OF_GPU_THREADS];
int myId = blockIdx.x * blockDim.x + threadIdx.x;
int localId = threadIdx.x;
int leftId = localId- 1; // local leftId
int rightId = localId + 1; // local rightId
double leftValue, rightValue;
double t;
__syncthreads();
if (myId < n) {
shared_x_array[threadIdx.x] = cuda_x[myId];
__syncthreads();
if (leftId == -1) { // local leftId is out of bounds -- get value if it exists (isn't 0-th element of cuda_x)
if (myId == 0) {
leftValue = 0.0;
}
else {
leftValue = cuda_x[myId - 1]; // use global id
}
}
else {
leftValue = shared_x_array[leftId]; // get leftValue from shared array
}
if (rightId == NUM_OF_GPU_THREADS) {
// local rightId is out of bounds -- get value if it exists (isn't n-1-st element of cuda_x)
if (myId == n - 1) {
rightValue = 0.0;
}
else {
rightValue = cuda_x[myId + 1]; // use global id
}
}
else {
rightValue = shared_x_array[rightId]; // get rightValue from shared array
}
t = (myId == n - 1) ? n + 1 : 0;
t -= 2.0 * shared_x_array[localId];
t += leftValue;
t += rightValue;
t *= t;
cuda_r_array[myId] = t;
}
__syncthreads();
}
// each block reduces and then places into cuda_d_result or cuda_r_result
__global__ void KernelReduction(double* cuda_d_array, double* cuda_r_array, int n,
double* cuda_d_result_array, double* cuda_r_result_array) {
__shared__ double shared_d_array[NUM_OF_GPU_THREADS];
__shared__ double shared_r_array[NUM_OF_GPU_THREADS];
int myId = blockIdx.x * blockDim.x + threadIdx.x;
int localId = threadIdx.x;
__syncthreads();
if (myId < n) {
shared_d_array[localId] = cuda_d_array[myId];
shared_r_array[localId] = cuda_r_array[myId];
}
__syncthreads();
//if (myId < n) {
if (blockIdx.x < gridDim.x - 1) {
// printf("entered if");
// 1024 elements to reduce
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (localId < s && myId < n) {
shared_d_array[localId] = shared_d_array[localId] + shared_d_array[localId + s];
shared_r_array[localId] = shared_r_array[localId] + shared_r_array[localId + s];
}
__syncthreads();
}
}
else {
// find number of elements remaining in array -- then reduce those
int remainingElements = n - blockIdx.x * NUM_OF_GPU_THREADS;
// printf("entered else, elems remaining: %d\n", remainingElements);
int b = 1;
while (b < remainingElements) // nearest larger power of 2
{
b = b << 1;
}
//printf("remaining=%d, b = %d, globalId=%d, localId=%d\n", remainingElements, b, localId, myId);
for (unsigned int s = b / 2; s > 0; s >>= 1) {
//printf("s=%d", s);
if ((localId < s) && (localId + s < remainingElements) && (myId < n)) {
// printf("globalId: %d, localId: %d, s: %d, shared_d[%d] = %f, shared_d[%d] = %f\n", myId, localId, s, localId, shared_d_array[localId], localId + s, shared_d_array[localId + s]);
// printf("Id=%d niz[%d]=niz[%d]+niz[%d] => %d + %d\n", id, id, id, id + s, cudaDeltaArray[id], cudaDeltaArray[id + s]);
shared_d_array[localId] = shared_d_array[localId] + shared_d_array[localId + s];
shared_r_array[localId] = shared_r_array[localId] + shared_r_array[localId + s];
}
__syncthreads();
}
}
// only element with local 0 id places result into resulting arrays
if (localId == 0) {
//printf("shared_d_array[%d] = %f\n", localId, shared_d_array[localId]);
cuda_d_result_array[blockIdx.x] = shared_d_array[0];
cuda_r_result_array[blockIdx.x] = shared_r_array[0];
}
// }
__syncthreads();
}
int main(int argc, char *argv[])
{
int m, n, i, it;
float timePar, timeSeq;
double *xPar, *xSeq;
cudaEvent_t start = cudaEvent_t();
cudaEvent_t stop = cudaEvent_t();
cudaEventCreate( &start );
cudaEventCreate( &stop );
if (argc == 3)
{
m = atoi(argv[1]);
n = atoi(argv[2]);
}
else
{
m = 5000;
n = 50000;
}
// Use standard jacobi interface
cudaEventRecord( start, 0 );
xSeq = jacobi_seq(m, n);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &timeSeq, start, stop );
timeSeq /= 1000;
printf("\n");
printf("JACOBI_CUDA:\n");
printf(" C/CUDA version - %d threads per block\n", NUM_OF_GPU_THREADS);
printf(" Jacobi iteration to solve A*x=b.\n");
printf("\n");
printf(" Number of variables N = %d\n", n);
printf(" Number of iterations M = %d\n", m);
printf("\n");
printf(" IT l2(dX) l2(resid)\n");
printf("\n");
// ====== Parallel
// Copy to GPU.
double d, r;
double *cuda_x;
double *cuda_xnew;
double* cuda_d_array;
double* cuda_r_array;
xPar = (double*) calloc (n, sizeof(double));
cudaMalloc(&cuda_x, n * sizeof(double));
cudaMemcpy(cuda_x, xPar, n * sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&cuda_xnew, n * sizeof(double));
cudaMalloc(&cuda_d_array, n * sizeof(double));
cudaMalloc(&cuda_r_array, n * sizeof(double));
dim3 DimGrid((int)((n + NUM_OF_GPU_THREADS - 1) / NUM_OF_GPU_THREADS)); // upper() thread blocks
dim3 DimBlock(NUM_OF_GPU_THREADS); // 1024 threads per block
double* d_result_array = (double*) malloc (DimGrid.x * sizeof(double));
double* r_result_array = (double*) malloc (DimGrid.x * sizeof(double));
double* cuda_d_result_array;
double* cuda_r_result_array;
cudaMalloc(&cuda_d_result_array, DimGrid.x * sizeof(double)); // 1 element for each block
cudaMalloc(&cuda_r_result_array, DimGrid.x * sizeof(double)); // 1 element for each block
printf("Dimgrid: %d, DimBlock: %d\n", DimGrid.x, DimBlock.x);
// Use parallel jacobi interface
cudaEventRecord( start, 0 );
for (it = 0; it < m; it++) {
// call kernel for processing
int shouldCalculateD = (it < 10 || m - 10 < it) ? 1 : 0;
// calculate xnew on GPU (and calc. d if needed)
KernelJacobi<<< DimGrid, DimBlock >>>(cuda_x, cuda_xnew, cuda_d_array, n, shouldCalculateD);
cudaDeviceSynchronize();
KernelOverwrite<<< DimGrid, DimBlock >>>(cuda_x, cuda_xnew, n); // place xnew into x on GPU
cudaDeviceSynchronize();
if (it < 10 || m - 10 < it) {
KernelCalculateR<<< DimGrid, DimBlock >>> (cuda_x, cuda_r_array, n);
cudaDeviceSynchronize();
KernelReduction <<< DimGrid, DimBlock >>> (cuda_d_array, cuda_r_array, n, cuda_d_result_array, cuda_r_result_array);
cudaDeviceSynchronize();
cudaMemcpy(d_result_array, cuda_d_result_array, DimGrid.x * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(r_result_array, cuda_r_result_array, DimGrid.x * sizeof(double), cudaMemcpyDeviceToHost);
// calculate final reduction
d = 0;
r = 0;
for (int i = 0; i < DimGrid.x; i++) {
//printf("d_local=%f, r_local=%f\n", d_result_array[i], r_result_array[i]);
d += d_result_array[i];
r += r_result_array[i];
}
/*
d = 1;
r = 1;
*/
printf(" %8d %14.6g %14.6g\n", it, sqrt(d), sqrt(r));
}
if (it == 9)
{
printf(" Omitting intermediate results.\n");
}
}
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &timePar, start, stop );
timePar /= 1000;
// Copy result.
cudaMemcpy(xPar, cuda_x, n * sizeof(double), cudaMemcpyDeviceToHost);
/* Write part of final estimate. */
printf("\n");
printf(" Part of final solution estimate:\n");
printf("\n");
for (i = 0; i < 10; i++)
{
printf(" %8d %14.6g\n", i, xPar[i]);
}
printf("...\n");
for (i = n - 11; i < n; i++)
{
printf(" %8d %14.6g\n", i, xPar[i]);
}
// Comparison
std::function<bool(double, double)> comparator = [](double left, double right) {
// Lambda function to compare 2 doubles with ACCURACY
return fabs(left - right) < ACCURACY;
};
std::vector<double> vectorPar(xPar, xPar + n), vectorSeq(xSeq, xSeq + n);
std::cerr << "********************DZ3Z2**********************" << std::endl;
std::cerr << "Elapsed time - SEQ: " << timeSeq << "." << std::endl;
std::cerr << "Elapsed time - PAR(" << NUM_OF_GPU_THREADS << " threads/block): " << timePar << "." << std::endl;
std::cerr << (std::equal(vectorPar.begin(), vectorPar.end(), vectorSeq.begin(), comparator) ? "TEST PASSED" : "TEST FAILED") << std::endl;
std::cerr << "***********************************************" << std::endl;
free(xSeq);
free(xPar);
// Cuda Free calls.
cudaFree(cuda_x);
cudaFree(cuda_xnew);
cudaFree(cuda_d_array);
cudaFree(cuda_r_array);
cudaFree(cuda_d_result_array);
cudaFree(cuda_r_result_array);
return 0;
}
|
10,162 | #include <stdio.h>
#include <cuda.h>
#include <time.h>
double r1();
__global__ void MatrixMulKernel(float* M,float* N, float* Pd, int blockSize,int loopTimes)
{
__shared__ float Ms[16][16];
__shared__ float Ns[16][16];
int bx=blockIdx.x;
int by=blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
int Row=by*blockSize+ty;
int Col=bx*blockSize+tx;
float sum=0;
for(int m=0;m<loopTimes;m++)
{
Ms[ty][tx]=M[Row*blockSize*loopTimes+(m*blockSize+tx)];
Ns[ty][tx]=N[Col+(m*blockSize+ty)*blockSize*loopTimes];
__syncthreads();
for(int j=0;j<blockSize;j++)
sum+=Ms[ty][j]*Ns[j][tx];
__syncthreads();
}
Pd[Row*blockSize*loopTimes+Col]=sum;
}
int main()
{
int matrixSize=pow(2,8);
int blockSize=pow(2,4); //the default blockSize I will put as is 16
int noOfElement=matrixSize*matrixSize;
float* M;
float* N;
float* P;
M=(float*)malloc(noOfElement*sizeof(float));
N=(float*)malloc(noOfElement*sizeof(float));
P=(float*)malloc(noOfElement*sizeof(float));
clock_t begin, end;
for(int i=0;i<noOfElement;i++)
{
M[i]=r1();
N[i]=r1();
P[i]=0.0;
}
//start timing after generating the matrix
begin = clock();
float* Pd,*Md,*Nd;
int size=noOfElement*sizeof(float);
cudaMalloc((void**)&Pd,size);
//sned M and N to device
cudaMalloc((void**)&Md,size);
cudaMemcpy(Md,M,size,cudaMemcpyHostToDevice);
cudaMalloc((void**)&Nd,size);
cudaMemcpy(Nd,N,size,cudaMemcpyHostToDevice);
dim3 dimGrid(matrixSize/blockSize,matrixSize/blockSize);
dim3 dimBlock(blockSize,blockSize);
MatrixMulKernel<<<dimGrid,dimBlock>>>(Md,Nd,Pd,blockSize,matrixSize/blockSize);
cudaMemcpy(P,Pd,size,cudaMemcpyDeviceToHost);
cudaFree(Pd);
double time_spent;
/* here, do your time-consuming job */
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("time Spend for matrix size: (%d,%d), with blockSize: %d is :%f \n",matrixSize,matrixSize,blockSize,time_spent);
printf("The following are the first 100 reuslt from the matrix multiplication:\n");
//print out first 100 result.
for(int i=0;i<100;i++)
{
printf("result: %f \n",P[i]);
}
return 0;
}
double r1()
{
return -1.0*(double)rand() / (double)RAND_MAX ;
} |
10,163 | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
__global__ void VecAdd(float* A, float* B, float*
C, int N_op,int op_loop){
// N_op : no of total ops
// op_loop: no of ops to do in a loop
// Host code
int j;
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N_op){
for (j=0;j<op_loop;j++){
C[i*op_loop+j] = A[i*op_loop+j] + B[i*op_loop+j];
}
}
}
int main() {
int N = pow(2,15);
int threadsPerBlock_op=256;
int avg_runs=1000;
size_t size = N * sizeof(float);
int loop;
int op_loop;
int op_loop_array[10];
int op_loop_ii;
int clock_loop;
float time_spent;
// Allocate input vectors h_A and h_B in host memory
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
float* h_C = (float*)malloc(size);
// Initialize input vectors
printf("Array A (first 10 values) \n ");
for(loop = 0; loop < N; loop++){
h_A[loop] = rand() % 100 + 1;
if (loop<10){
printf("%f ", h_A[loop]);
}
}
printf("\nArray B (first 10 values) \n ");
for(loop = 0; loop < N; loop++){
h_B[loop] = rand() % 100 + 1;
if (loop<10){
printf("%f ", h_B[loop]);
}
}
for (op_loop_ii=0;op_loop_ii<10;op_loop_ii++){
op_loop_array[op_loop_ii]=pow(2,op_loop_ii);
}
// Allocate vectors in device memory
float* d_A; cudaMalloc(&d_A, size);
float* d_B; cudaMalloc(&d_B, size);
float* d_C; cudaMalloc(&d_C, size);
//GPU timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Copy vectors from host memory to device memory
cudaMemcpy(d_A, h_A, size,cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size,cudaMemcpyHostToDevice);
for (op_loop_ii=0;op_loop_ii<10;op_loop_ii++){
op_loop=op_loop_array[op_loop_ii];
for(clock_loop=0;clock_loop<avg_runs;clock_loop++){
if (clock_loop==1){
cudaEventRecord(start, 0);
}
//ops per loop
//printf("Ops per loop %d",op_loop);
// Invoke kernel
int threadsPerBlock = threadsPerBlock_op;
int N_op=(N + op_loop -1)/op_loop;
int blocksPerGrid = (N_op + threadsPerBlock - 1) /threadsPerBlock;
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A,d_B, d_C, N_op,op_loop);
// h_C contains the result in host memory
cudaMemcpy(h_C, d_C, size,cudaMemcpyDeviceToHost);
//printf("\nArray C (first 10 outputs)\n");
//for(loop = 0; loop < 10; loop++)
//printf("%f ", h_C[loop]);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time_spent, start, stop);
time_spent=time_spent/(avg_runs-1)*10;
printf("\n Average Time spent in loop %d is %f",op_loop,time_spent);
}
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// Free host memory
free(h_A);
free(h_B);
return 0;
} |
10,164 | #include <iostream>
#define TILE_WIDTH 16
__global__ void matMulSimpleDevice(float* d_M, float* d_N, float* d_p, int width){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if ((row < width) && (col < width)){
float pvalue = 0;
for(int k = 0; k < width; ++k){
pvalue += d_M[row*width+k]*d_N[k*width+col];
}
d_p[row*width+col] = pvalue;
}
}
__global__ void matMulAdvancedDevice(float* d_M, float* d_N, float* d_p, int width){
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
float pvalue = 0;
int a = width/TILE_WIDTH;
if (width%TILE_WIDTH) a++;
for (int m = 0; m < a; m++){
if((row < width) && (m * TILE_WIDTH+tx < width)){
Mds[ty][tx] = d_M[row*width + m * TILE_WIDTH+tx];
}else{
Mds[ty][tx] = 0.0;
}
if((col < width) && (m * TILE_WIDTH+ty < width)){
Nds[ty][tx] = d_N[(m*TILE_WIDTH+ty)*width + col];
}else{
Nds[ty][tx] = 0.0;
}
__syncthreads();
for (int k = 0; k < TILE_WIDTH; k++){
//if((m * TILE_WIDTH + tx < width) && (m * TILE_WIDTH + ty < width)){
pvalue += Mds[ty][k] * Nds[k][tx];
//}
}
__syncthreads();
}
if ((row < width) && (col < width)){
d_p[row * width + col] = pvalue;
}
}
float* matMulSimple(float* a, float* b, int width){
// host arrays
float* h_out;
int matSize = sizeof(float)*width*width;
h_out = (float*)malloc(matSize); // matrix of width time width
// device arrays
float* d_M;
float* d_N;
float* d_p;
cudaMalloc((void **) &d_M, matSize);
cudaMalloc((void **) &d_N, matSize);
cudaMalloc((void **) &d_p, matSize);
// copy to device
cudaMemcpy( d_M, a, matSize, cudaMemcpyHostToDevice);
cudaMemcpy( d_N, b, matSize, cudaMemcpyHostToDevice);
int blockWidth = 16;
int numBlocks = width/blockWidth;
if (width%blockWidth) numBlocks++;
dim3 dimGrid(numBlocks, numBlocks);
dim3 dimBlock(blockWidth, blockWidth);
matMulSimpleDevice<<<dimGrid, dimBlock>>>(d_M, d_N, d_p, width);
cudaMemcpy(h_out, d_p, matSize, cudaMemcpyDeviceToHost);
// Release device memory
cudaFree(d_M);
cudaFree(d_N);
cudaFree(d_p);
return h_out;
}
float* matMulAdvanced(float* a, float* b, int width){
// host arrays
float* h_out;
int matSize = sizeof(float)*width*width;
h_out = (float*)malloc(matSize); // matrix of width time width
// device arrays
float* d_M;
float* d_N;
float* d_p;
cudaMalloc((void **) &d_M, matSize);
cudaMalloc((void **) &d_N, matSize);
cudaMalloc((void **) &d_p, matSize);
// copy to device
cudaMemcpy( d_M, a, matSize, cudaMemcpyHostToDevice);
cudaMemcpy( d_N, b, matSize, cudaMemcpyHostToDevice);
int numBlocks = width/TILE_WIDTH;
if (width%TILE_WIDTH) numBlocks++;
dim3 dimGrid(numBlocks, numBlocks);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
matMulAdvancedDevice<<<dimGrid, dimBlock>>>(d_M, d_N, d_p, width);
cudaMemcpy(h_out, d_p, matSize, cudaMemcpyDeviceToHost);
// Release device memory
cudaFree(d_M);
cudaFree(d_N);
cudaFree(d_p);
return h_out;
}
float matSum(float* v, int width){
double res = 0;
int i;
int j;
for (i = 0; i < width; i++){
for (j = 0; j < width; j++){
res += v[i*width+j];
}
}
return res;
}
bool matDiagEQ(float* v, float* w, int width){
bool res = true;
for (int i = 0; i < width; i++){
res &= (v[i*width+i] == w[i*width+i]);
}
return res;
}
void printMat(float* v, int width){
for(int i = 0; i< width; i++){
for(int j = 0; j < width; j ++){
std::cout << v[i*width+j] << ", ";
}
std::cout << std::endl;
}
}
|
10,165 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,int var_11,int var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float* var_21,float* var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30,float var_31,float var_32,float* var_33,float var_34,float var_35,float var_36,float var_37,float var_38) {
if (comp >= -1.1278E-36f * powf(var_2 * ceilf((var_3 - (+0.0f * +1.1754E14f))), (var_4 / var_5 / (-1.6095E34f * atanf(+1.4243E19f - var_6))))) {
if (comp <= var_7 + var_8 / -1.6767E10f * var_9 / (-0.0f + +1.9388E-41f)) {
for (int i=0; i < var_1; ++i) {
if (comp <= (+1.9397E-41f / asinf(+1.4536E-37f / var_10 * (+1.4471E-20f / (-0.0f + +1.6621E-35f))))) {
float tmp_1 = var_13 - (+0.0f + var_14);
comp = tmp_1 / var_15 * (-0.0f - var_16);
comp = var_17 / (var_18 + (var_19 + var_20));
for (int i=0; i < var_11; ++i) {
float tmp_2 = (+0.0f / (var_23 - -0.0f));
var_21[i] = +0.0f;
var_22[i] = (-0.0f / (var_24 + ceilf(+1.6357E-42f)));
comp += var_22[i] * var_21[i] - tmp_2 / (var_25 * (var_26 * (-1.9806E-42f + var_27)));
}
if (comp <= -1.3492E-36f * var_28 * (-1.8482E4f - var_29)) {
float tmp_3 = +0.0f;
float tmp_4 = -1.1884E-26f;
comp += tmp_4 - tmp_3 * (+1.2614E-35f / (+1.8342E-35f * var_30));
comp += (var_31 / var_32);
}
for (int i=0; i < var_12; ++i) {
comp += (+1.2738E-10f + +1.5006E14f);
comp = fabsf(var_34 * var_35 + -1.5396E-12f * var_36);
var_33[i] = -0.0f * (-1.0610E-35f / (-0.0f * var_37));
comp += var_33[i] + -1.5176E-36f / (var_38 + +1.4360E35f);
}
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
int tmp_12 = atoi(argv[12]);
int tmp_13 = atoi(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float* tmp_22 = initPointer( atof(argv[22]) );
float* tmp_23 = initPointer( atof(argv[23]) );
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
float tmp_32 = atof(argv[32]);
float tmp_33 = atof(argv[33]);
float* tmp_34 = initPointer( atof(argv[34]) );
float tmp_35 = atof(argv[35]);
float tmp_36 = atof(argv[36]);
float tmp_37 = atof(argv[37]);
float tmp_38 = atof(argv[38]);
float tmp_39 = atof(argv[39]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33,tmp_34,tmp_35,tmp_36,tmp_37,tmp_38,tmp_39);
cudaDeviceSynchronize();
return 0;
}
|
10,166 | #include "includes.h"
__global__ void gGather(float* denseData, float* sparseData, int* sparseIndices, int denseSize, int sparseSize, int offset) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx >= sparseSize)
return;
if(sparseIndices[idx] >= -offset && sparseIndices[idx] + offset < denseSize)
sparseData[idx] = denseData[sparseIndices[idx] + offset];
} |
10,167 | namespace broadcast {
template <bool X_THREAD, bool Y_THREAD, bool Z_THREAD>
__host__ __device__ unsigned offset_of_source(
const dim3& block_dim,
const dim3& thread_idx) {
unsigned offset = 0;
if (!Z_THREAD)
offset = offset * block_dim.z + thread_idx.z;
if (!Y_THREAD)
offset = offset * block_dim.y + thread_idx.y;
if (!X_THREAD)
offset = offset * block_dim.x + thread_idx.x;
return offset;
}
// Broadcasts within partitioned groups of threads.
//
// X_THREAD: Broadcast from threadIdx.x == 0 if true
// Y_THREAD: Broadcast from threadIdx.y == 0 if true
// Z_THREAD: Broadcast from threadIdx.z == 0 if true
// inp_val: Per-thread source value. Only valid when the thread is a source.
// out: Per-thread output location
//
template <bool X_THREAD, bool Y_THREAD, bool Z_THREAD, typename T>
__device__ void blockBroadcast(T& out, T inp_val, T* shared_mem) {
const bool has_valid_data = (!X_THREAD || threadIdx.x == 0) &&
(!Y_THREAD || threadIdx.y == 0) && (!Z_THREAD || threadIdx.z == 0);
const auto shared_offset =
offset_of_source<X_THREAD, Y_THREAD, Z_THREAD>(blockDim, threadIdx);
if (has_valid_data)
shared_mem[shared_offset] = inp_val;
__syncthreads();
out = shared_mem[shared_offset];
}
} // namespace broadcast
|
10,168 | /**
* Compute the padding activation.
* @param x_shape the shape of the input buffer.
* @param x the input buffer.
* @param y_shape the shape of the output buffer.
* @param y the output buffer.
* @param value the padding value.
* @return nothing.
*/
extern "C"
__global__ void activation(long *x_shape, float *x, long *y_shape, float *y, float value)
{
int yfs = y_shape[2] * y_shape[3]; // Y feature size.
int y_index = threadIdx.x * y_shape[1] * yfs + blockIdx.x * yfs + blockIdx.y * y_shape[3] + blockIdx.z;
if (threadIdx.x >= x_shape[0] || blockIdx.x >= x_shape[1] || blockIdx.y >= x_shape[2] || blockIdx.z >= x_shape[3]) {
y[y_index] = value;
} else {
int xfs = x_shape[2] * x_shape[3]; // X feature size.
int x_index = threadIdx.x * x_shape[1] * xfs + blockIdx.x * xfs + blockIdx.y * x_shape[3] + blockIdx.z;
y[y_index] = x[x_index];
}
}
/**
* Compute the gradients with respect to the inputs.
* @param x_shape the shape of the input buffer.
* @param x the input buffer.
* @param y_shape the shape of the output buffer.
* @param y the output buffer.
* @return nothing.
*/
extern "C"
__global__ void inputs_gradients(long *x_shape, float *x, long *y_shape, float *y)
{
int yfs = y_shape[2] * y_shape[3]; // Y feature size.
int y_index = threadIdx.x * y_shape[1] * yfs + blockIdx.x * yfs + blockIdx.y * y_shape[3] + blockIdx.z;
int xfs = x_shape[2] * x_shape[3]; // X feature size.
int x_index = threadIdx.x * x_shape[1] * xfs + blockIdx.x * xfs + blockIdx.y * x_shape[3] + blockIdx.z;
y[y_index] = x[x_index];
}
|
10,169 | /*
============================================================================
Name : PasswordChecker.cu
Author : Thomas Cross
Version : 0.0.1
Copyright : This is free. Do with it as you please
Description : CUDA password generator and checker
============================================================================
*/
#include <iostream>
#include <numeric>
#include <stdlib.h>
#include <stdio.h>
#include <cmath>
static void CheckCudaErrorAux (const char *, unsigned, const char *, cudaError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
/*
* The password in question
* */
__constant__ char password[16];
__constant__ char alphabet[95];
/**
* CUDA kernel copies one string buffer to another
*/
__device__ char *strcpyDevice(char *dest, const char *src)
{
char *ret = dest;
while (*dest++ = *src++)
;
return ret;
}
/**
* CUDA kernel that compares two strings
*/
__device__ int strcmpDevice(const char * s1, const char * s2)
{
while(*s1 && (*s1==*s2))
{
s1++,s2++;
}
return *(const unsigned char*)s1-*(const unsigned char*)s2;
}
/**
* CUDA kernel that computes converts base 10 to any base
* found this online somewhere
*/
__device__ void convertBase(char converted_string[], int converted_number[], unsigned long long number_to_convert, int base, char *alphabet) {
//char alphabet[95] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '!', '"', '#', '$', '%', '&', "'", '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '<', '=', '>', '?', '@', '[', '\\', ']', '^', '_', '`', '{', '|', '}', '~', ' '};
//char *alphabet = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~ ";
int index = 0;
base = 95;
/* convert to the indicated base */
while (number_to_convert != 0)
{
converted_number[index] = number_to_convert % base;
number_to_convert = number_to_convert / base;
++index;
}
converted_string[index] = '\0';
/* now print the result in reverse order */
--index; /* back up to last entry in the array */
int word_length = index;
for( ; index>=0; index--) /* go backward through array */
{
converted_string[word_length - index] = alphabet[converted_number[index]];
}
}
__global__ void checkPasswordShared(char *return_guess, const int string_size, const int iteration) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int total_threads = blockDim.x * gridDim.x;
int converted_number[16];
char converted_string[16];
unsigned long long codex = idx + (total_threads * iteration);
unsigned long long codex_for_printf = idx + (total_threads * iteration);
const int base = (int)'z';
convertBase(converted_string, converted_number, codex, base, alphabet);
if(strcmpDevice(converted_string, password) == 0)
{
printf("%llu,%d,%d,%d,%d,%d, %s == %s\n", codex_for_printf, blockIdx.x, blockDim.x, threadIdx.x, total_threads, iteration, converted_string, password);
return_guess = strcpyDevice(return_guess, converted_string);
}
}
/**
* Host function that copies the data and launches the work on GPU
* Created n streams where n = number of multiprocessors * 8 (peformance degrades after this point on my GTX)
* thread count per kernel is your max threads / 2
* block count is the number of multiprocessors you have
* Using shared memory and registers I have been measuring about 32,499,876 password generations and comparisons per second
*/
char *checkPasswordHost(int iteration)
{
cudaSetDevice(0);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
int STREAM_COUNT = deviceProp.multiProcessorCount * 8 * 8;
cudaStream_t streams[STREAM_COUNT];
for(int i = 0; i < STREAM_COUNT; ++i)
{
cudaStreamCreate(&streams[i]);
}
static const int THREAD_COUNT = deviceProp.maxThreadsPerMultiProcessor / 2;
static const int BLOCK_COUNT = deviceProp.multiProcessorCount;
static const int SIZE = 16;
char *converted_string = new char[SIZE];
char *gpuData;
for(int i = 0; i < SIZE; ++i)
converted_string[i] = '\0';
CUDA_CHECK_RETURN(cudaMalloc((void **)&gpuData, sizeof(char)*SIZE));
CUDA_CHECK_RETURN(cudaMemcpy(gpuData, converted_string, sizeof(char)*SIZE, cudaMemcpyHostToDevice));
for(int i = 0; i < STREAM_COUNT; ++i)
{
checkPasswordShared<<<BLOCK_COUNT, THREAD_COUNT, 0, streams[i]>>> (gpuData, SIZE, (iteration * STREAM_COUNT) + i);
}
for(int i = 0; i < STREAM_COUNT; ++i)
{
cudaStreamSynchronize(streams[i]);
cudaStreamDestroy(streams[i]);
}
CUDA_CHECK_RETURN(cudaMemcpy(converted_string, gpuData, sizeof(char)*SIZE, cudaMemcpyDeviceToHost));
CUDA_CHECK_RETURN(cudaFree(gpuData));
return converted_string;
}
int main(void)
{
int iteration = 0;
int max_iterations = 100000000;
char *answer_password;
answer_password = new char[1];
answer_password[0] = '\0';
std::string temp_password;
std::cout << "Please enter a password to find: ";
getline(std::cin, temp_password);
CUDA_CHECK_RETURN(cudaMemcpyToSymbol(password, temp_password.c_str(), sizeof(char) * 16));
CUDA_CHECK_RETURN(cudaMemcpyToSymbol(alphabet, "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~ ", sizeof(char) * 95));
std::cout << "searching for \"" << temp_password.c_str() << "\"..." << std::endl;
time_t start = time(0);
while(answer_password[0] == '\0' && iteration < max_iterations)
{
delete[] answer_password;
answer_password = checkPasswordHost(iteration);
//std::cout << "The password could be: \"" << answer_password << "\"" << std::endl;
iteration++;
}
if(answer_password[0] != '\0')
{
std::cout << "The password is: \"" << answer_password << "\"" << std::endl;
}
else if(iteration == max_iterations)
{
std::cout << "Reached max iterations of " << max_iterations << std::endl;
}
time_t end = time(0);
double time = difftime(end, start);
std::cout << "Execution Time: " << (int)floor(time) << " seconds" << std::endl;
delete[] answer_password;
return 0;
}
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err)
{
if (err == cudaSuccess)
return;
std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (1);
}
|
10,170 | #include<stdio.h>
#include<iostream>
#include <cstdlib>
#include <sys/time.h>
#include<limits.h>
#include<algorithm>
using namespace std;
#define maxVertices 8192
#define INF INT_MAX-1
#define NS 64
#define THREADSPB 1024
float dist[maxVertices * maxVertices];
float *device_matrix;
float *result_matrix;
int vertices;
int tilesize[3];
size_t tot;
__global__
void FloydWarshall(int Xi, int Xj, int Ui, int Uj, int Vi, int Vj, float *matrix, int n, int na)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + Xj;
int i = blockIdx.y * blockDim.y + threadIdx.y + Xi;
if (j >= na || (i >= na))
return;
__shared__ float thisrowkthcolumn;
if (n > NS) {
for (int via = Vi; via < (Vi + n); via++) {
if (threadIdx.x == 0)
thisrowkthcolumn = matrix[i * na + via];
__syncthreads();
if (i != j && i != via && j != via)
matrix[i * na + j] = min(matrix[i * na + j],
thisrowkthcolumn + matrix[via * na + j]);
}
} else {
__shared__ float work[NS];
work[i * na + j] = matrix[i * na + j];
for (int via = Vi; via < (Vi + n); via++) {
work[i * na + via] = matrix[i *na + via];
work[via * na + j] = matrix[via *na + j];
}
__syncthreads();
for (int via = Vi; via < (Vi + n); via++) {
if (i != j && j != via && i != via)
work[i * na + j] = min(work[i * na + j], work[i * na + via] +
work[via * na + j]);
}
__syncthreads();
for (int via = Vi; via < (Vi + n); via++)
matrix[i * na + j] = work[i * na + j];
}
}
void F_loop_FW(int Xi, int Xj, int Ui, int Uj, int Vi, int Vj, int n)
{
dim3 blocks_per_grid((n + THREADSPB - 1) /
THREADSPB, n);
FloydWarshall<<<blocks_per_grid, THREADSPB>>>(Xi, Xj, Ui,
Uj, Vi, Vj, device_matrix, n, vertices);
cudaThreadSynchronize();
}
__global__
void A_FloydWarshall(int via, int from, int to, float *matrix, int n)
{
matrix[from * n + to] = min(matrix[from * n + to],
matrix[from * n + via] + matrix[via * n + to]);
}
void A_F_loop_FW(int Xi, int Xj, int Ui, int Uj, int Vi, int Vj, int n)
{
for(int via = Uj; via < Uj + n; via++) {
for(int from = Xi; from < Xi + n; from++) {
for(int to = Xj; to < Xj + n ; to++) {
if(from!=to && from!=via && to!=via) {
A_FloydWarshall<<<1, 1>>>(via, from, to, device_matrix, vertices);
}
}
}
}
}
/*
void F_loop_FW(int Xi, int Xj, int Ui, int Uj, int Vi, int Vj, int n)
{
for(int via = Uj; via < Uj + n; via++)
{
for(int from = Xi; from < Xi + n; from++)
{
for(int to = Xj; to < Xj + n ; to++)
{
if(from!=to && from!=via && to!=via)
{
dist[from * vertices + to] = min(dist[from * vertices + to],
dist[from * vertices + via]+dist[via * vertices + to]);
}
}
}
}
printarray(vertices);
}
*/
void DFW(int Xi, int Xj, int Ui, int Uj, int Vi, int Vj, int n, int d) {
int r = tilesize[d];
if (n < r)
F_loop_FW(Xi, Xj, Ui, Uj, Vi, Vj, n);
else {
for (int via = 0; via < r; via++) {
int p = via * (n/r);
for (int i = 0; i < r; i++)
for (int j = 0; j < r; j++) {
int ip = i * (n/r);
int jp = j * (n/r);
if (i != via && j != via)
DFW(Xi + ip, Xj + jp, Ui + ip, Uj + p, Vi + p, Vj + jp, n/r, d + 1);
}
}
}
}
void BFW(int Xi, int Xj, int Ui, int Uj, int Vi, int Vj, int n, int d) {
int r = tilesize[d];
if (n < r)
F_loop_FW(Xi, Xj, Ui, Uj, Vi, Vj, n);
else {
for (int via = 0; via < r; via++) {
int p = via * (n/r);
for (int j = 0; j < r; j++) {
int ip = j * (n/r);
if (j != via)
BFW(Xi + p, Xj + ip , Ui + p, Uj + p, Vi + p, Vj + ip, n/r, d + 1);
}
for (int i = 0; i < r; i++)
for (int j = 0; j < r; j++) {
int ip = i * (n/r);
int jp = j * (n/r);
if (i != via && j != via)
DFW(Xi + ip, Xj + jp, Ui + ip, Uj + p, Vi + p, Vj + jp, n/r, d + 1);
}
}
}
}
void CFW(int Xi, int Xj, int Ui, int Uj, int Vi, int Vj, int n, int d) {
int r = tilesize[d];
if (n < r)
F_loop_FW(Xi, Xj, Ui, Uj, Vi, Vj, n);
else {
for (int via = 0; via < r; via++) {
int p = via * (n/r);
for (int j = 0; j < r; j++) {
int ip = j * (n/r);
if (j != via)
CFW(Xi + ip, Xj + p , Ui + ip, Uj + p, Vi + p, Vj + p, n/r, d + 1);
}
for (int i = 0; i < r; i++)
for (int j = 0; j < r; j++) {
int ip = i * (n/r);
int jp = j * (n/r);
if (i != via && j != via)
DFW(Xi + ip, Xj + jp, Ui + ip, Uj + p, Vi + p, Vj + jp, n/r, d + 1);
}
}
}
}
void AFW(int Xi, int Xj, int Ui, int Uj, int Vi, int Vj, int n, int d) {
int r = tilesize[d];
if (n < r)
A_F_loop_FW(Xi, Xj, Ui, Uj, Vi, Vj, n);
else {
for (int via = 0; via < r; via++) {
int p = via * (n/r);
AFW(Xi + p, Xj + p, Ui + p, Uj + p, Vi + p, Vj + p, n/r, d + 1);
for (int j = 0; j < r; j++) {
int ip = j * (n/r);
if (j != via)
BFW(Xi + p, Xj + ip , Ui + p, Uj + p, Vi + p, Vj + ip, n/r, d + 1);
}
for (int j = 0; j < r; j++) {
int ip = j * (n/r);
if (j != via)
CFW(Xi + ip, Xj + p , Ui + ip, Uj + p, Vi + p, Vj + p, n/r, d + 1);
}
for (int i = 0; i < r; i++)
for (int j = 0; j < r; j++) {
int ip = i * (n/r);
int jp = j * (n/r);
if (i != via && j != via)
DFW(Xi + ip, Xj + jp, Ui + ip, Uj + p, Vi + p, Vj + jp, n/r, d + 1);
}
}
}
}
int main(int argc, char *argv[])
{
char *arg_vertices = getenv("N_VERTICES");
vertices = atoi(arg_vertices);
tilesize[0] = 2;
tilesize[1] = vertices/NS;
tilesize[2] = INF;
for(int i = 0 ; i < vertices ; i++ )
{
for(int j = 0 ; j< vertices; j++ )
{
if( i == j )
dist[i * vertices + j] = 0;
else {
int num = i + j;
if (num % 3 == 0)
dist[i * vertices + j] = num / 2;
else if (num % 2 == 0)
dist[i * vertices + j] = num * 2;
else
dist[i * vertices + j] = num;
}
}
}
struct timeval tvalBefore, tvalAfter;
tot = vertices * vertices * sizeof(float);
device_matrix = NULL;
cudaMalloc((float **)&device_matrix, tot);
cudaMemcpy(device_matrix, dist, tot, cudaMemcpyHostToDevice);
result_matrix =(float *)malloc( vertices * vertices *
sizeof(float));
gettimeofday (&tvalBefore, NULL);
AFW(0, 0, 0, 0, 0, 0, vertices, 0);
cudaMemcpy(result_matrix, device_matrix, tot, cudaMemcpyDeviceToHost);
gettimeofday (&tvalAfter, NULL);
printf("Time: %ld microseconds\n",
((tvalAfter.tv_sec - tvalBefore.tv_sec)*1000000L
+tvalAfter.tv_usec) - tvalBefore.tv_usec
);
for(int i = 0 ; i < vertices; i++ )
{
cout << "\n";
for(int j = 0 ; j< vertices ;j++ )
cout << result_matrix[i * vertices + j] << " " ;
}
return 0;
}
|
10,171 | /*****Implemented first layer of convolution using global memory*******/
/**Implemented First Maxpool Layer**/
/**Measuring time**/
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include "string.h"
// #include<time.h>
#include<float.h>
__constant__ int FIL[32*5*5];
__global__ void conv1(unsigned int *pich, int *resulth, int xsize, int numfilters, int filterdim){
int i,j,k,l;
int sum;
int height;
i = threadIdx.y;
j = threadIdx.x;
l = blockIdx.x;
k=0;
sum =0;
// height = blockIdx.x*(xsize -filterdim +1)*(xsize -filterdim +1);
if(i<(xsize -filterdim +1)&& j<(xsize -filterdim +1)){
sum = (FIL[l*(filterdim*filterdim) + k])*pich[ xsize * (i) + j ] + (FIL[l*(filterdim*filterdim) + k+1])*pich[ xsize*(i) + (j+1) ]
+ FIL[l*(filterdim*filterdim)+ k+2]*pich[ xsize * (i)+(j+2)] + FIL[l*(filterdim*filterdim) +k+3]*pich[xsize * (i)+(j+3)]
+ FIL[l*(filterdim*filterdim) +k+4]*pich[ xsize * (i)+(j+4)]+ FIL[l*(filterdim*filterdim) + k+5]*pich[ xsize*(i+1)+(j) ]
+ FIL[l*(filterdim*filterdim) +k+6]*pich[ xsize * (i+1) + (j+1) ] + FIL[l*(filterdim*filterdim) + k+7]*pich[ xsize*(i+1) + (j+2) ] +
FIL[l*(filterdim*filterdim) +k+8]*pich[ xsize*(i+1) + (j+3) ] + FIL[l*(filterdim*filterdim) +k+9]*pich[ xsize*(i+1) + (j+4) ] +
FIL[l*(filterdim*filterdim) +k+10]*pich[ xsize*(i+2) + (j) ] + FIL[l*(filterdim*filterdim) +k+11]*pich[ xsize * (i+2) + (j+1) ] +
FIL[l*(filterdim*filterdim) +k+12]*pich[ xsize*(i+2) + (j+2)] + FIL[l*(filterdim*filterdim) +k+13]*pich[ xsize*(i+2) + (j+3)]
+FIL[l*(filterdim*filterdim) +k+14]*pich[ xsize*(i+2) + (j+4)] + FIL[l*(filterdim*filterdim) +k+15]*pich[ xsize*(i+3) + (j)]
+ FIL[l*(filterdim*filterdim) +k+16]*pich[ xsize*(i+3) + (j+1)] + FIL[l*(filterdim*filterdim) +k+17]*pich[ xsize*(i+3) + (j+2)]
+ FIL[l*(filterdim*filterdim) +k+18]*pich[ xsize*(i+3) + (j+3)] + FIL[l*(filterdim*filterdim) +k+19]*pich[ xsize*(i+3) + (j+4)]
+ FIL[l*(filterdim*filterdim) +k+20]*pich[ xsize*(i+4) + (j)] +FIL[l*(filterdim*filterdim) +k+21]*pich[ xsize*(i+3) + (j+1)]
+ FIL[l*(filterdim*filterdim) +k+22]*pich[ xsize*(i+4) + (j+2)] + FIL[l*(filterdim*filterdim) +k+23]*pich[ xsize*(i+4) + (j+3)]
+ FIL[l*(filterdim*filterdim) + k+24]*pich[ xsize*(i+4) + (j+4)];
resulth[l*(xsize -filterdim +1)*(xsize -filterdim +1) + i*(xsize - filterdim +1)+j] = sum;
printf("resultgpu[%d][%d]=%d\n",l,i*(xsize - filterdim +1)+j,resulth[l*(xsize -filterdim +1)*(xsize -filterdim +1) + i*(xsize - filterdim +1)+j]);
}
}
__global__ void maxpooling(int *resulth, int *maxpoolh, int xsize, int filterdim, int numfilters){
int i,j,l;
int temp;
i = threadIdx.y;
j = threadIdx.x;
l = blockIdx.x;
if(i<((xsize-filterdim+1)/2)&&(j<((xsize-filterdim+1)/2))){
int a,b,c,d,index, max1, max2;
index = l*((xsize -filterdim +1)*(xsize -filterdim +1))+ threadIdx.x*2 + threadIdx.y*2*(xsize -filterdim +1);
a = resulth[index];
b = resulth[index +1];
c = resulth[index+(xsize-filterdim+1)];
d = resulth[index + (xsize-filterdim+2)];
if(a>b){
max1 = a;
}
else{
max1 = b;
}
if(c>d){
max2 = c;
}
else{
max2 = d;
}
if(max1>max2){
maxpoolh[l*(xsize -filterdim +1)*(xsize -filterdim +1)/4 + i*(xsize - filterdim +1)/2+j]=max1;
}
else{
maxpoolh[l*(xsize -filterdim +1)*(xsize -filterdim +1)/4 + i*(xsize - filterdim +1)/2+j] = max2;
}
}
}
int main( int argc, char **argv )
{
int xsize;
int filterdim;
int numfilters;
xsize = 28;
filterdim = 5;
numfilters =32;
int numbytes = xsize*xsize*sizeof(int);
int numbytes2 = (xsize-filterdim+1)*(xsize-filterdim+1)*sizeof(int);
/**Numbytes required for output of first maxpool layer**/
int numbytes3 = ((xsize-filterdim+1)*(xsize-filterdim+1)/4)*sizeof(int);
unsigned int *pic = (unsigned int *)malloc(numbytes);
unsigned int filter[numfilters*filterdim*filterdim];
int *result;
int *maxpool;
result = (int *)malloc(numfilters*numbytes2);
maxpool = (int *)malloc(numfilters*numbytes3);
unsigned int *pich;
int *resulth;
int *maxpoolh;
cudaMalloc(&pich, numbytes);
cudaMalloc(&resulth, numfilters*numbytes2);
cudaMalloc(&maxpoolh, numfilters*numbytes3);
int i,j,k,l,count,dimx;
for (i=0; i<xsize; i++) {
for (j=0; j<xsize; j++) {
pic[i*xsize + j] = 1;
//printf("pic[%d][%d] : %d\t",i,j,pic[i*xsize + j]);
}
// printf("\n");
}
for(int k=0;k<numfilters;k++){
for (int i=0; i<filterdim; i++) {
for (int j=0; j<filterdim; j++){
filter[k*(filterdim*filterdim) + i*filterdim + j] = 1;
// printf("filter[%d][%d]: %d\n",k, i*filterdim + j, filter[k*(filterdim*filterdim) + i*filterdim + j]);
}
}
}
// int blocksize, gridsize;
dim3 dimGrid (32);
dim3 dimBlock (32,32);
// gridsize = numfilters;
// blocksize = (24,24);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
cudaMemcpy(pich,pic,numbytes, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(FIL, filter, numfilters*filterdim*filterdim*sizeof(int));
conv1<<<dimGrid, dimBlock>>>(pich, resulth, xsize, numfilters, filterdim);
cudaMemcpy(result,resulth,numfilters*numbytes2,cudaMemcpyDeviceToHost);
dim3 dimBlock1 (16,16);
cudaMemcpy(resulth, result,numfilters*numbytes2, cudaMemcpyHostToDevice);
maxpooling<<<dimGrid, dimBlock1>>>(resulth, maxpoolh, xsize, filterdim, numfilters);
cudaMemcpy(maxpool, maxpoolh, numfilters*numbytes3, cudaMemcpyDeviceToHost);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float time = 0;
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("Time taken on GPU: %f ms\n", time);
}
|
10,172 | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#define LIST_SIZE 100000
__device__ int init_flag = 0;
__device__ unsigned long long mulValue1List[LIST_SIZE];
__device__ unsigned long long mulValue2List[LIST_SIZE];
__device__ unsigned long long mulCountList[LIST_SIZE];
__device__ unsigned long long record_flag;
extern "C" __device__ void profileMulValue(long mulValue1,long mulValue2, long index)
{
if (record_flag == 0)
return;
atomicAdd(&mulCountList[index],1);
if (mulValue1 == 0)
{
atomicAdd(&mulValue1List[index], 1);
}
if(mulValue2 == 0)
{
atomicAdd(&mulValue2List[index], 1);
}
}
extern "C" __device__ void profileFmulValue(double mulValue1,double mulValue2, long index)
{
if (record_flag == 0)
return;
atomicAdd(&mulCountList[index],1);
if (mulValue1 == 0)
{
atomicAdd(&mulValue1List[index], 1);
}
if (mulValue2 == 0)
{
atomicAdd(&mulValue1List[index], 1);
}
}
|
10,173 | /******************************************************************************/
/* */
/* (C) 2010 Texas Advanced Computing Center. All rights reserved. */
/* For information, contact Frank Willmore: willmore@tacc.utexas.edu */
/* */
/******************************************************************************/
#include <stdio.h>
#include <assert.h>
char h_string[256];
__device__ char d_string[256];
__global__ void toUpper()
{
if ((d_string[threadIdx.x] <= 122) && (d_string[threadIdx.x]) >=97)
d_string[threadIdx.x] -= 32;
}
int main(int argc, char* argv[])
{
sprintf(h_string, "hello world, this is my first CUDA program ever.");
cudaMemcpyToSymbol(d_string, h_string, sizeof(h_string), 0, cudaMemcpyHostToDevice);
toUpper<<< 1, 256 >>>();
cudaMemcpyFromSymbol(h_string, d_string, sizeof(h_string), 0, cudaMemcpyDeviceToHost);
printf("%s\n", h_string);
}
|
10,174 | // 在GPU上进行计算
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <time.h>
// 在GPU上进行求和计算
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N){
printf("Caculating On GPU\n");
for (int idx=0; idx<N; idx++){
C[idx] = A[idx] + B[idx];
}
}
// 生成初始的随机数据
void initialData(float *ip, int size){
// generate different seed for random number
time_t t;
srand((unsigned int) time(&t));
for (int i=0; i<size; i++){
ip[i] = (float)(rand() & 0xFF)/10.0f;
}
}
// 显示计算结果
void print(float *array, const int N){
for (int idx=0; idx<N; idx++){
printf(" %f", array[idx]);
}
printf("\n");
}
// 主函数
int main(){
int nElem = 4;
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *h_C;
// 首先在host上分配内存并且存储数据
printf("malloc memory on Host\n");
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
h_C = (float *)malloc(nBytes);
printf("initialize data on Host\n");
initialData(h_A, nElem);
initialData(h_B, nElem);
print(h_A, nElem);
print(h_B, nElem);
printf("malloc memory on GPU\n");
float *d_A, *d_B, *d_C;
cudaMalloc((float**)&d_A, nBytes);
cudaMalloc((float**)&d_B, nBytes);
cudaMalloc((float**)&d_C, nBytes);
printf("copying inputs from Host to Device\n");
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
sumArraysOnGPU <<<1, 1>>>(d_A, d_B, d_C, nElem); // 异步计算
printf("copying output from Device to Host\n");
cudaMemcpy(h_C, d_C, nBytes, cudaMemcpyDeviceToHost);
print(h_C, nElem);
// 记得host和device上分配的内存都要free掉!
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
10,175 | __global__ void conv2(int *inp, int *out)
{
if(inp[0] == 1) {
if(inp[2] == 3 || inp[3] == 4) {
if(inp[4] == 4 || inp[5] == 5 || inp[6] == 6) {
__syncthreads();
out[2] = 4;
}
else {
out[5] = 4;
}
}
else {
out[3] = 3;
}
}
else if(inp[1] == 1) {
__syncthreads();
if((inp[9] == 3) || (inp[10] == 4 && inp[11] == 3)) {
__syncthreads();
out[4] = 42;
if(inp[5] == 4 || inp[6] == 44) {
out[4] = 455;
}
else {
__syncthreads();
out[5] = 56;
}
}
}
}
|
10,176 | #include <stdio.h>
// Matrix is stored as 1d array in row-major order
typedef struct {
int width;
int height;
float *elements;
} Matrix;
#define BLOCK_SIZE 16
#define A_WIDTH 2048
#define A_HEIGHT 2048
#define B_WIDTH 2048
#define B_HEIGHT 2048
#define C_WIDTH 2048
#define C_HEIGHT 2048
__global__ void matmul(const Matrix A, const Matrix B, const Matrix C)
{
float CValue = 0;
int row = blockDim.y * blockIdx.y + threadIdx.y;
int col = blockDim.x * blockIdx.x + threadIdx.x;
int rowVal = row * A.width;
for (int k = 0; k < A.width; k++) {
CValue += A.elements[rowVal + k] * B.elements[k * B.height + col];
}
C.elements[row * C.width + col] = CValue;
}
void matmulDriver(const Matrix A, const Matrix B, const Matrix C)
{
// Load matrix A into device.
Matrix dA;
dA.width = A.width;
dA.height = A.height;
size_t sizeOfA = A.width * A.height * sizeof(float);
cudaMalloc(&dA.elements, sizeOfA);
cudaMemcpy(dA.elements, A.elements, sizeOfA, cudaMemcpyHostToDevice);
// Load matrix B into device.
Matrix dB;
dB.width = B.width;
dB.height = B.height;
size_t sizeOfB = B.width * B.height * sizeof(float);
cudaMalloc(&dB.elements, sizeOfB);
cudaMemcpy(dB.elements, B.elements, sizeOfB, cudaMemcpyHostToDevice);
// Allocate matrix C on device.
Matrix dC;
dC.width = C.width;
dC.height = C.height;
size_t sizeOfC = C.width * C.height * sizeof(float);
cudaMalloc(&dC.elements, sizeOfC);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
matmul<<<dimGrid, dimBlock>>>(A, B, C);
cudaMemcpy(C.elements, dC.elements, sizeOfC, cudaMemcpyDeviceToHost);
cudaFree(dA.elements);
cudaFree(dB.elements);
cudaFree(dC.elements);
}
int main()
{
Matrix A;
A.width = A_WIDTH;
A.height = A_HEIGHT;
size_t sizeOfA = A.width * A.height * sizeof(float);
A.elements = (float *) malloc(sizeOfA);
Matrix B;
B.width = B_WIDTH;
B.height = B_HEIGHT;
size_t sizeOfB = B.width * B.height * sizeof(float);
B.elements = (float *) malloc(sizeOfB);
Matrix C;
C.width = C_WIDTH;
C.height = C_HEIGHT;
size_t sizeOfC = C.width * C.height * sizeof(float);
C.elements = (float *) malloc(sizeOfC);
Matrix C_check;
C_check.width = C_WIDTH;
C_check.height = C_HEIGHT;
C_check.elements = (float *) malloc(sizeOfC);
for (int i = 0; i < A.height; i++) {
for (int j = 0; j < A.width; j++) {
A.elements[i * A.width + j] = i + j;
}
}
for (int i = 0; i < B.height; i++) {
for (int j = 0; j < B.width; j++) {
B.elements[i * B.width + j] = i + j;
}
}
int value;
for (int i = 0; i < C_check.height; i++) {
for (int j = 0; j < C_check.width; j++) {
value = 0.0;
for (int k = 0; k < A.width; k++) {
value += A.elements[i * A.width + k] * B.elements[k * B.width + j];
}
C_check.elements[i * C_check.width + j] = value;
}
}
matmulDriver(A, B, C);
int cmp = memcmp(C_check.elements, C.elements, sizeOfC);
if (cmp == 0) {
printf("Arrays are equal.\n");
} else {
printf("Arrays are equal.\n");
}
return 0;
}
|
10,177 | #include "includes.h"
__global__ void remove_redness_from_coordinates( const unsigned int* d_coordinates, unsigned char* d_r, unsigned char* d_b, unsigned char* d_g, unsigned char* d_r_output, int num_coordinates, int num_pixels_y, int num_pixels_x, int template_half_height, int template_half_width )
{
int ny = num_pixels_y;
int nx = num_pixels_x;
int global_index_1d = (blockIdx.x * blockDim.x) + threadIdx.x;
int imgSize = num_pixels_x * num_pixels_y;
if (global_index_1d < num_coordinates)
{
unsigned int image_index_1d = d_coordinates[imgSize - global_index_1d - 1];
ushort2 image_index_2d = make_ushort2(image_index_1d % num_pixels_x, image_index_1d / num_pixels_x);
for (int y = image_index_2d.y - template_half_height; y <= image_index_2d.y + template_half_height; y++)
{
for (int x = image_index_2d.x - template_half_width; x <= image_index_2d.x + template_half_width; x++)
{
int2 image_offset_index_2d = make_int2(x, y);
int2 image_offset_index_2d_clamped = make_int2(min(nx - 1, max(0, image_offset_index_2d.x)), min(ny - 1, max(0, image_offset_index_2d.y)));
int image_offset_index_1d_clamped = (nx * image_offset_index_2d_clamped.y) + image_offset_index_2d_clamped.x;
unsigned char g_value = d_g[image_offset_index_1d_clamped];
unsigned char b_value = d_b[image_offset_index_1d_clamped];
unsigned int gb_average = (g_value + b_value) / 2;
//printf("heya\t");
d_r_output[image_offset_index_1d_clamped] = (unsigned char)gb_average;
}
}
}
} |
10,178 | #include "includes.h"
# include <bits/stdc++.h>
# include <cuda.h>
#define SIZE 60// Global Size
#define BLOCK_SIZE 1024
using namespace std;
//::::::::::::::::::::::::::::::::::::::::::GPU::::::::::::::::::::::::::::::::
// :::: Kernel
// :::: Calls
__global__ void kernel_prefix_sum_efficient(double *g_idata,double *g_odata,int l){ // Sequential Addressing technique
} |
10,179 | #include "includes.h"
extern "C" {
}
#define IDX2C(i, j, ld) ((j)*(ld)+(i))
#define SQR(x) ((x)*(x)) // x^2
__global__ void cutoff_log_kernel(double* device_array, double min_signal){
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if (device_array[thread_id] < min_signal){
device_array[thread_id] = logf(min_signal);
}
else{
device_array[thread_id] = logf(device_array[thread_id]);
}
} |
10,180 | #include <stdio.h>
__global__ void printLoopIndex() {
printf("%d\n", threadIdx.x);
}
int main() {
printLoopIndex<<<1, 100>>>();
cudaDeviceSynchronize();
}
|
10,181 | #include <cstdio>
#include <climits>
#include <algorithm>
#define SERIAL_SCALE 3
#define SERIAL_PART (1<<SERIAL_SCALE)
extern "C" {
__global__
void initialize(int* output, int size)
{
int flatId=(blockIdx.x * blockDim.x) + threadIdx.x;
if(flatId<size)
output[flatId]=flatId;
}
__global__
void transpose(int *input, int *output, int N){
const unsigned int column=blockIdx.x*32+threadIdx.x;
const unsigned int row=blockIdx.y*32+threadIdx.y;
const unsigned int newRow=(32*blockIdx.x+threadIdx.y);
const unsigned int newColumn=32*blockIdx.y+threadIdx.x;
__shared__ int cache[32][35];
cache[threadIdx.x][threadIdx.y]=input[N*row+column];
__syncthreads();
output[newRow*N+newColumn]=cache[threadIdx.y][threadIdx.x];
}
__global__
void propagateMin(int *G,int *result,int *modified,int N)
{
int column=(blockIdx.x * blockDim.x) + threadIdx.x;
bool m=false;
#define GET(array,row) array[N*(row)+column]
#define propagate(i) \
int currG=GET(G,i); \
if(prevG==currG) \
{ \
int currR=GET(result,i); \
if(currR>prevR) \
{ \
GET(result,i)=prevR; \
m=true; \
} \
else \
{ \
prevR=currR; \
} \
} \
else \
{ \
prevR=GET(result,i); \
} \
prevG=currG;
int prevG=GET(G,0);
int prevR=GET(result,0);
for(int i=1;i<N;++i)
{
propagate(i)
}
prevG=GET(G,N-1);
prevR=GET(result,N-1);
for(int i=N-2;i>=0;--i)
{
propagate(i)
}
if(m)
*modified=-1;
#undef propagate
#undef GET
}
}
|
10,182 | #include "includes.h"
__global__ void cudaSBilinearTF_BackWard_kernel( unsigned int outputWidth, unsigned int outputHeight, unsigned int nbChannels, unsigned int batchSize, unsigned int inputWidth, unsigned int inputHeight, const float scaleX, const float scaleY, const float* diffInput, float* diffOutputs)
{
const unsigned int inputOffset
= (blockIdx.z * blockDim.z + threadIdx.z) * nbChannels*inputWidth*inputHeight;
const unsigned int outputOffset
= (blockIdx.z * blockDim.z + threadIdx.z) * nbChannels*outputWidth*outputHeight;
for (unsigned int ch = blockIdx.x; ch < nbChannels; ch += gridDim.x)
{
for (unsigned int oy = threadIdx.y; oy < outputHeight; oy += blockDim.y)
{
const float in_y = oy * scaleY;
const int top_y_index = (int)(floorf(in_y));
//const int bottom_y_index = min((int)(ceilf(in_y)), (int) (inputHeight - 1) ) ;
const int bottom_y_index = (in_y < inputHeight - 1) ? ceilf(in_y) : inputHeight - 1;
const float y_lerp = in_y - top_y_index;
const float inverse_y_lerp = (1.0f - y_lerp);
for (unsigned int ox = threadIdx.x; ox < outputWidth; ox += blockDim.x)
{
const float in_x = ox * scaleX;
const int left_x_index = (int)(floorf(in_x));
//const int right_x_index = min((int)(ceilf(in_x)), (int)(inputWidth - 1));
const int right_x_index = (in_x < inputWidth - 1) ? ceilf(in_x) : inputWidth - 1;
const float x_lerp = in_x - left_x_index;
const float inverse_x_lerp = (1.0f - x_lerp);
const unsigned int inLeftTopIdx = left_x_index + top_y_index*inputWidth + ch*inputWidth*inputHeight + inputOffset;
const unsigned int inRightTopIdx = right_x_index + top_y_index*inputWidth + ch*inputWidth*inputHeight + inputOffset;
const unsigned int inLeftBotIdx = left_x_index + bottom_y_index*inputWidth + ch*inputWidth*inputHeight + inputOffset;
const unsigned int inRightBotIdx = right_x_index + bottom_y_index*inputWidth + ch*inputWidth*inputHeight + inputOffset;
const unsigned int outIdx = ox + oy*outputWidth + ch*outputWidth*outputHeight + outputOffset;
const float outData = diffInput[outIdx];
diffOutputs[inLeftTopIdx] += outData * inverse_y_lerp * inverse_x_lerp ;
diffOutputs[inRightTopIdx] += outData * inverse_y_lerp * x_lerp ;
diffOutputs[inLeftBotIdx] += outData * y_lerp * inverse_x_lerp ;
diffOutputs[inRightBotIdx] += outData * y_lerp * x_lerp ;
}
}
}
} |
10,183 | /*
* Copyright (c) 2018 Preferred Networks, Inc. All rights reserved.
*/
#include <cuda_fp16.h>
template <typename T>
__global__ void increment_kernel(const T* in, T* out, int n) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(n <= idx)
return;
out[idx] = in[idx] + T(1);
}
template <typename T>
void increment(const T* in, T* out, int n, cudaStream_t stream) {
const int thread_size = 1024;
const int block_size = (int)std::ceil(1.0 * thread_size);
increment_kernel<<<block_size, thread_size, 0, stream>>>(in, out, n);
}
template void increment(const float*, float*, int, cudaStream_t);
template void increment(const __half*, __half*, int, cudaStream_t);
|
10,184 | //
// Created by Peter Rigole on 2019-03-13.
//
#include "Synapse.cuh"
// Default Constructor
Synapse::Synapse() : weight(0.5) {}
Synapse::Synapse(float weight_init, unsigned int sourceNeuronIndexInit) :
weight(weight_init), sourceNeuronIndex(sourceNeuronIndexInit) {}
__host__ __device__
Synapse::Synapse(const Synapse &synapseOrig) {
weight = synapseOrig.weight;
sourceNeuronIndex = synapseOrig.sourceNeuronIndex;
}
// Destructor
__host__
Synapse::~Synapse() {
// Pointers that we don't want to delete here are:
// * source (because source neurons are managed elsewhere)
}
// Get the weight
__host__ __device__
float Synapse::getWeight() const { return weight; }
__host__ __device__
void Synapse::updateWeight(const float weight_update) {
weight = weight_update;
}
__host__ __device__
unsigned int Synapse::getSource() const { return sourceNeuronIndex; }
__host__
void Synapse::setSource(unsigned int sourceNeuronIndexUpdate) {
sourceNeuronIndex = sourceNeuronIndexUpdate;
}
|
10,185 | #include <assert.h>
const int N = 1<<10;
__global__ void cuda_hello(int * a){
for (int i=threadIdx.x+blockIdx.x*blockDim.x; i < N; i+=gridDim.x*blockDim.x) {
a[i] *= 2;
}
}
int main(void) {
int *a;
cudaMallocManaged(&a, sizeof(int)*N);
for (int i=0; i<N; i++) {
a[i] = i;
}
int blocks = N/256;
cuda_hello<<<blocks,256>>>(a);
cudaDeviceSynchronize();
for (int i=0; i<N; i++) {
assert(a[i] == i*2);
}
cudaFree(a);
return 0;
}
|
10,186 | // Compile: nvcc -g -G -arch=sm_61 -std=c++11 assignment5-p2.cu -o assignment5-p2
// Execute: ./assignment5-p2
#include <algorithm>
#include <cuda.h>
#include <iostream>
#include <sys/time.h>
#define THRESHOLD (0.000001)
#define N (1 << 24)
#define CHUNK_SIZE 2048
#define CHUNK_SIZE2 2048
using std::cerr;
using std::cout;
using std::endl;
__host__ void host_excl_prefix_sum(float* h_A, float* h_O) {
h_O[0] = 0;
for (int i = 1; i < N; i++) {
h_O[i] = h_O[i - 1] + h_A[i - 1];
}
}
__global__ void kernel_excl_prefix_sum_ver1_1(float* d_in, float* d_out) {
// TODO: Fill in
int i = blockIdx.x * blockDim.x + threadIdx.x;
i *= CHUNK_SIZE;
if(i < N){
for(int j = i+1; j < i+CHUNK_SIZE; j++){
d_out[j] = d_out[j-1] + d_in[j-1];
}
}
}
__global__ void kernel_excl_prefix_sum_ver1_2(float* d_in, float* d_out, long long int curr_chunk) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int times = curr_chunk/CHUNK_SIZE;
int chunk_num = i/times;
int chunk_part = i%times;
int c = (2*chunk_num + 1)*curr_chunk;
float sum = d_out[c-1] + d_in[c-1];
i = c + (curr_chunk * chunk_part)/times;
int upper_limit = i + CHUNK_SIZE;
if(i < N){
for(int j = i; j < upper_limit; j++){
d_out[j] += sum;
}
}
}
__global__ void kernel_excl_prefix_sum_ver2_1(float* d_in, float* d_out) {
// TODO: Fill in
int i = blockIdx.x * blockDim.x + threadIdx.x;
i *= CHUNK_SIZE2;
if(i < N){
for(int j = i+1; j < i+CHUNK_SIZE2; j++){
d_out[j] = d_out[j-1] + d_in[j-1];
}
}
}
__global__ void kernel_excl_prefix_sum_ver2_2(float* d_in, float* d_out, long long int curr_chunk) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int times = curr_chunk/CHUNK_SIZE2;
int chunk_num = i/times;
int chunk_part = i%times;
int c = (2*chunk_num + 1)*curr_chunk;
float sum = d_out[c-1] + d_in[c-1];
i = c + (curr_chunk * chunk_part)/times;
int upper_limit = i + CHUNK_SIZE2;
if(i < N){
for(int j = i; j + 3 < upper_limit; j += 4){
d_out[j] += sum;
d_out[j+1] += sum;
d_out[j+2] += sum;
d_out[j+3] += sum;
}
}
}
__host__ void check_result(float* w_ref, float* w_opt) {
double maxdiff = 0.0, this_diff = 0.0;
int numdiffs = 0;
for (int i = 0; i < N; i++) {
this_diff = w_ref[i] - w_opt[i];
if (fabs(this_diff) > THRESHOLD) {
numdiffs++;
if (this_diff > maxdiff)
maxdiff = this_diff;
}
}
if (numdiffs > 0) {
cout << numdiffs << " Diffs found over threshold " << THRESHOLD << "; Max Diff = " << maxdiff
<< endl;
} else {
cout << "No differences found between base and test versions\n";
}
}
__host__ double rtclock() { // Seconds
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday(&Tp, &Tzp);
if (stat != 0) {
cout << "Error return from gettimeofday: " << stat << "\n";
}
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
}
int main() {
size_t size = N * sizeof(float);
float* h_in = (float*)malloc(size);
std::fill_n(h_in, N, 1);
float* h_excl_sum_out = (float*)malloc(size);
std::fill_n(h_excl_sum_out, N, 0);
double clkbegin = rtclock();
host_excl_prefix_sum(h_in, h_excl_sum_out);
double clkend = rtclock();
double time = clkend - clkbegin; // seconds
cout << "Serial time on CPU: " << time * 1000 << " msec" << endl;
float* h_dev_result = (float*)malloc(size);
std::fill_n(h_dev_result, N, 0);
float* d_k1_in;
float* d_k1_out;
cudaError_t status;
cudaEvent_t start, end;
// TODO: Fill in
status = cudaMalloc(&d_k1_in, size);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMalloc() failed");
return EXIT_FAILURE;
}
status = cudaMalloc(&d_k1_out, size);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMalloc() failed");
return EXIT_FAILURE;
}
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
status = cudaMemcpy(d_k1_in, h_in, size, cudaMemcpyHostToDevice);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMemcpy() failed");
return EXIT_FAILURE;
}
status = cudaMemcpy(d_k1_out, h_dev_result, size, cudaMemcpyHostToDevice);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMemcpy() failed");
return EXIT_FAILURE;
}
dim3 threadsPerBlock(256);
dim3 numBlocks(((N/CHUNK_SIZE) + threadsPerBlock.x - 1)/threadsPerBlock.x);
kernel_excl_prefix_sum_ver1_1<<<numBlocks, threadsPerBlock>>>(d_k1_in, d_k1_out);
numBlocks = dim3((N/(2*CHUNK_SIZE) + threadsPerBlock.x - 1)/threadsPerBlock.x);
long long int curr_chunk = CHUNK_SIZE;
while(curr_chunk != N){
kernel_excl_prefix_sum_ver1_2<<<numBlocks, threadsPerBlock>>>(d_k1_in, d_k1_out, curr_chunk);
curr_chunk *= 2;
}
status = cudaMemcpy(h_dev_result, d_k1_out, size, cudaMemcpyDeviceToHost);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMemcpy() failed");
return EXIT_FAILURE;
}
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
float k_time; // ms
cudaEventElapsedTime(&k_time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
check_result(h_excl_sum_out, h_dev_result);
cout << "Kernel1 time on GPU: " << k_time << " msec" << endl;
// kernel 2
std::fill_n(h_dev_result, N, 0);
float* d_k2_in;
float* d_k2_out;
status = cudaMalloc(&d_k2_in, size);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMalloc() failed");
return EXIT_FAILURE;
}
status = cudaMalloc(&d_k2_out, size);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMalloc() failed");
return EXIT_FAILURE;
}
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
status = cudaMemcpy(d_k2_in, h_in, size, cudaMemcpyHostToDevice);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMemcpy() failed");
return EXIT_FAILURE;
}
status = cudaMemcpy(d_k2_out, h_dev_result, size, cudaMemcpyHostToDevice);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMemcpy() failed");
return EXIT_FAILURE;
}
threadsPerBlock = dim3(256);
numBlocks = dim3(((N/CHUNK_SIZE2) + threadsPerBlock.x - 1)/threadsPerBlock.x);
kernel_excl_prefix_sum_ver2_1<<<numBlocks, threadsPerBlock>>>(d_k2_in, d_k2_out);
threadsPerBlock = dim3(256);
numBlocks = dim3((N/(2*CHUNK_SIZE2) + threadsPerBlock.x - 1)/threadsPerBlock.x);
curr_chunk = CHUNK_SIZE2;
while(curr_chunk != N){
kernel_excl_prefix_sum_ver2_2<<<numBlocks, threadsPerBlock>>>(d_k2_in, d_k2_out, curr_chunk);
curr_chunk *= 2;
}
status = cudaMemcpy(h_dev_result, d_k2_out, size, cudaMemcpyDeviceToHost);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMemcpy() failed");
return EXIT_FAILURE;
}
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&k_time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
check_result(h_excl_sum_out, h_dev_result);
cout << "Kernel2 time on GPU: " << k_time << " msec" << endl;
// Free device memory
cudaFree(d_k1_in);
cudaFree(d_k1_out);
cudaFree(d_k2_in);
cudaFree(d_k2_out);
free(h_in);
free(h_excl_sum_out);
free(h_dev_result);
return EXIT_SUCCESS;
}
|
10,187 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
__global__ void residual(float* x_now, float* x_next, float* b, int N)
{
int j;
float sigma = 0.0;
int row = blockIdx.x*blockDim.x+threadIdx.x;
if (row < N)
{
printf(" %d ", row);
if (row == 0)
{
sigma += 4*x_now[0] - x_now[N] - x_now[1]; //bottom left
}
else if (row == N-1)
{
sigma += 4*x_now[(N-1)*N] - x_now[(N-1)*N + 1] - x_now[(N-2)*N]; //top left
for (j=1; j<N-1;j++)
sigma += 4*x_now[(N-1)*N + j] - x_now[(N-1)*N + j-1] - x_now[(N-1)*N + j+1] - x_now[(N-2)*N + j]; //top edge
sigma += 4*x_now[(N-1)*N + N-1] - x_now[(N-1)*N + N-2] - x_now[(N-2)*N + N-1]; //top right
}
else
{
sigma += 4*x_now[row*N] - x_now[row*N + 1] - x_now[(row-1)*N] - x_now[(row+1)*N]; //left edge (i = row)
for (j=1; j<N-1;j++)
sigma += 4*x_now[row*N + j] - x_now[row*N + j-1] - x_now[row*N + j+1] - x_now[(row-1)*N + j] - x_now[(row+1)*N + j]; // inner points (i=row)
sigma += 4*x_now[row*N + N-1] - x_now[row*N + N-2] - x_now[(row-1)*N + N-1] - x_now[(row+1)*N + N-1]; // right edge
}
x_next[row] = sigma;
}
}
// Kernel Funktion fuer die Durchfuehrung einer Jacobi-Iteration
__global__ void jacobiOnDevice(float* x_now, float* x_next, float* b, int N)
{
int j;
float sigma = 0.0;
float omega = 0.6;
float aDiag = 0.25;
int row = blockIdx.y*blockDim.y+threadIdx.y;
if (row < N)
{
if (row == 0)
{
sigma += 4*x_now[0] - x_now[N] - x_now[1]; //bottom left
for (j=1; j<N-1;j++)
sigma += 4*x_now[j] - x_now[j-1] - x_now[j+1] - x_now[N + j]; // bottom edge
sigma += 4*x_now[N-1] - x_now[2*N-1] - x_now[N - 2]; //bottom right
}
else if (row == N-1)
{
sigma += 4*x_now[(N-1)*N] - x_now[(N-1)*N + 1] - x_now[(N-2)*N]; //top left
for (j=1; j<N-1;j++)
sigma += 4*x_now[(N-1)*N + j] - x_now[(N-1)*N + j-1] - x_now[(N-1)*N + j+1] - x_now[(N-2)*N + j]; //top edge
sigma += 4*x_now[(N-1)*N + N-1] - x_now[(N-1)*N + N-2] - x_now[(N-2)*N + N-1]; //top right
}
else
{
sigma += 4*x_now[row*N] - x_now[row*N + 1] - x_now[(row-1)*N] - x_now[(row+1)*N]; //left edge (i = row)
for (j=1; j<N-1;j++)
sigma += 4*x_now[row*N + j] - x_now[row*N + j-1] - x_now[row*N + j+1] - x_now[(row-1)*N + j] - x_now[(row+1)*N + j]; // inner points (i=row)
sigma += 4*x_now[row*N + N-1] - x_now[row*N + N-2] - x_now[(row-1)*N + N-1] - x_now[(row+1)*N + N-1]; // right edge
}
x_next[row] = aDiag * omega *(b[row] - sigma);
}
}
int main(int argc, char* argv[]){
int N = atoi(argv[1]);
int iter = 10;
int k;
clock_t before = clock();
float *x_now = (float*)malloc(N*sizeof(float));
float *x_next = (float*)malloc(N*sizeof(float));
float *b = (float*)malloc(N*sizeof(float));
float res, dev_res;
float *dev_x_now,*dev_x_next,*dev_b;
//Allokiere Speicher im globalen Speicher der GPU
cudaMalloc((void**)&dev_x_now,N*sizeof(float));
cudaMalloc((void**)&dev_x_next,N*sizeof(float));
cudaMalloc((void**)&dev_b,N*sizeof(float));
//Füllen der Arrays auf der CPU
for (int i=0;i<N;i++)
{
x_now[i] = 1.0;
x_next[i] = 0.0;
b[i] = 1.0;
}
//Kopiere Daten auf GPU in globalen Speicher
cudaMemcpy(dev_x_now,x_now,N*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dev_x_next,x_next,N*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,N*sizeof(float),cudaMemcpyHostToDevice);
//Baue 2D Gitter von Blocks der Größe 32x32 Threads
int nblocks = (N+32)/32;
//dim3 gridDim(nblocks,nblocks);
//dim3 blockDim(32,32);
//Aufruf des Jacobi Verfahrens auf der GPU
// for (k=0; k<iter; k++)
// {
// if (k%2)
// jacobiOnDevice<<<gridDim,blockDim>>>(dev_x_next, dev_x_now, dev_b, N);
// else
// jacobiOnDevice<<<gridDim,blockDim>>>(dev_x_now, dev_x_next, dev_b, N);
// }
residual<<<nblocks,32>>>(dev_x_now, dev_x_next, dev_b, N);
//Ergebnis zurück auf den Host kopieren
// if (k%2)
// cudaMemcpy(x_next,dev_x_now,N*sizeof(float),cudaMemcpyDeviceToHost);
// else
// cudaMemcpy(x_next,dev_x_next,N*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(x_next,dev_x_next, N*sizeof(float),cudaMemcpyDeviceToHost);
clock_t after = clock();
clock_t difference = clock() - before;
int msec = difference * 1000 / CLOCKS_PER_SEC;
printf("\nTime taken %d.%d seconds \n\n",msec/1000,msec%1000);
printf("Result: C= ");
for (int i=0;i<N;i++){
printf(" %f",x_next[i]);
}
cudaFree(dev_x_now);
cudaFree(dev_x_next);
cudaFree(dev_b);
free(x_now);
free(x_next);
free(b);
return 0;
}
|
10,188 | ///* Check whether two images are the same */
//#include "lodepng.h"
//#include <stdio.h>
//#include <stdlib.h>
//#include <math.h>
//#define MAX_MSE 0.00001f
//
//float get_MSE(char* input_filename_1, char* input_filename_2)
//{
// unsigned error1, error2;
// unsigned char* image1, * image2;
// unsigned width1, height1, width2, height2;
//
// error1 = lodepng_decode32_file(&image1, &width1, &height1, input_filename_1);
// error2 = lodepng_decode32_file(&image2, &width2, &height2, input_filename_2);
// if (error1) printf("error %u: %s\n", error1, lodepng_error_text(error1));
// if (error2) printf("error %u: %s\n", error2, lodepng_error_text(error2));
// if (width1 != width2) printf("images do not have same width\n");
// if (height1 != height2) printf("images do not have same height\n");
//
// // process image
// float im1, im2, diff, sum, MSE;
// sum = 0;
// for (int i = 0; i < width1 * height1; i++) {
// im1 = (float)image1[i];
// im2 = (float)image2[i];
// diff = im1 - im2;
// sum += diff * diff;
// }
// MSE = sqrt(sum) / (width1 * height1);
//
// free(image1);
// free(image2);
//
// return MSE;
//}
//
//int main(int argc, char *argv[])
//{
// char* input_filename_1 = argv[1];
// char* input_filename_2 = argv[2];
//
// // get mean squared error between image1 and image2
// float MSE = get_MSE(input_filename_1, input_filename_2);
//
// if (MSE < MAX_MSE) {
// printf("Images are equal (MSE = %f, MAX_MSE = %f)\n",MSE,MAX_MSE);
// } else {
// printf("Images are NOT equal (MSE = %f, MAX_MSE = %f)\n",MSE,MAX_MSE);
// }
//
// return 0;
//}
|
10,189 | #include "includes.h"
//using namespace Eigen;
using namespace std;
__device__ void setPhysicialParameters(float T, float *ce, float *pho, float *lamda)
{
float Ts = 1456.16f, Tl = 1522.69f, fs = 0.0f, L = 268000.0f;
if (T < Ts)
{
fs = 0;
*pho = 7250.0f;
*lamda = 50.0f;
*ce = 540.0f;
}
if (T >= Ts && T <= Tl)
{
fs = (Tl - T) / (Tl - Ts);
*pho = 7250.0f;
*lamda = fs * 25.0f + (1.0f - fs) * 50.0f;
*ce = 540.0f + L / (Tl - Ts);
}
if (T > Tl)
{
fs = 1;
*pho = 7250.0f;
*lamda = 28.0f;
*ce = 540.0f;
}
}
__device__ float setBoundaryCondition(int tstep, float tau, float Vcast, float *hPop, int Section, float *ccml)
{
float zposition = tstep * tau * fabs(Vcast);//ËٶȳËÒÔʱ¼ä(ʱ¼äÍø¸ñ*Íø¸ñÊý£©,¸÷¸öÀäÈ´¶Î³¤¶È
float h = 0; //±íÃæ´«ÈÈϵÊý
for (int i = 0; i < Section; i++)
{
if (zposition >= *(ccml + i) && zposition <= *(ccml + i + 1))//ÏÞ¶¨¸÷¸öÀäÈ´¶Î£¬Ã¿¸öÀäÈ´¶Î¶ÔÓ¦Ò»¸öh
{
h = *(hPop + blockIdx.x * Section + i);
}
}
return h;
}
__global__ void solvePDEKernel(float *hPop, float *T_Last, float *T_New, float *T_Surface, float Tw, float lamda, float pho, float ce, int ny, float dy, int nx, float dx, float tau, int tnpts, int tstep, float Vcast, int Section, float *ccml)
{
float ax, ay, T_Up, T_Down, T_Middle, T_Right, T_Left;
float h;
ax = tau * lamda / (pho * ce * dx * dx);
ay = tau * lamda / (pho * ce * dy * dy);
int i = threadIdx.x;
int j = threadIdx.y;
int tis = blockIdx.x * nx * ny + i * ny + j;
int L = ny;
setPhysicialParameters(T_Last[tis], &ce, &pho, &lamda);
h = setBoundaryCondition(tstep, tau, Vcast, hPop, Section, ccml);
if (i != 0 && i != (nx - 1) && j != 0 && j != (ny - 1))//Öмä
{
T_Right = T_Last[tis + L];
T_Left = T_Last[tis - L];
T_Middle = T_Last[tis];
T_Up = T_Last[tis + 1];
T_Down = T_Last[tis - 1];
T_New[tis] = ax * T_Right - (2 * ax + 2 * ay - 1) * T_Middle + ax * T_Left + ay * T_Up + ay * T_Down;
}
else if (i == 0 && j == 0)//µã1
{
T_Up = T_Last[tis + 1];
T_Middle = T_Last[tis];
T_Down = T_Last[tis + 1] - 2 * dx * h * (T_Last[tis] - Tw) / lamda;
T_Right = T_Last[tis + L];
T_Left = T_Last[tis + L] - 2 * dx * h * (T_Last[tis] - Tw) / lamda;
T_New[tis] = ax * T_Right - (2 * ax + 2 * ay - 1) * T_Middle + ax * T_Left + ay * T_Up + ay * T_Down;
}
else if (i == (nx - 1) && j == 0)//µã2
{
T_Up = T_Last[tis + 1];
T_Middle = T_Last[tis];
T_Down = T_Last[tis + 1] - 2 * dx * h * (T_Last[tis] - Tw) / lamda;
T_Left = T_Last[tis - L];
T_Right = T_Last[tis - L] - 2 * dx * h * (T_Last[tis] - Tw) / lamda;
T_New[tis] = ax * T_Right - (2 * ax + 2 * ay - 1) * T_Middle + ax * T_Left + ay * T_Up + ay * T_Down;
}
else if (i == 0 && j == (ny - 1))//µã3
{
T_Up = T_Last[tis - 1] - 2 * dx *h * (T_Last[tis] - Tw) / lamda;
T_Middle = T_Last[tis];
T_Down = T_Last[tis - 1];
T_Right = T_Last[tis + L];
T_Left = T_Last[tis + L] - 2 * dx *h * (T_Last[tis] - Tw) / lamda;
T_New[tis] = ax * T_Right - (2 * ax + 2 * ay - 1) * T_Middle + ax * T_Left + ay * T_Up + ay * T_Down;
}
else if (i == (nx - 1) && j == (ny - 1))//µã4
{
T_Up = T_Last[tis - 1] - 2 * dx *h * (T_Last[tis] - Tw) / lamda;
T_Middle = T_Last[tis];
T_Down = T_Last[tis - 1];
T_Right = T_Last[tis - L] - 2 * dx * h * (T_Last[tis] - Tw) / lamda;
T_Left = T_Last[tis - L];
T_New[tis] = ax * T_Right - (2 * ax + 2 * ay - 1) * T_Middle + ax * T_Left + ay * T_Up + ay * T_Down;
}
else if (i == 0 && j != 0 && j != (ny - 1))//±ß1
{
T_Up = T_Last[tis + 1];
T_Middle = T_Last[tis];
T_Down = T_Last[tis - 1];
T_Right = T_Last[tis + L];
T_Left = T_Last[tis + L] - 2 * dx * h * (T_Last[tis] - Tw) / lamda;
T_New[tis] = ax * T_Right - (2 * ax + 2 * ay - 1) * T_Middle + ax * T_Left + ay * T_Up + ay * T_Down;
}
else if (i == (nx - 1) && j != 0 && j != (ny - 1))//±ß2
{
T_Up = T_Last[tis + 1];
T_Middle = T_Last[tis];
T_Down = T_Last[tis - 1];
T_Right = T_Last[tis - L] - 2 * dx * h * (T_Last[tis] - Tw) / lamda;
T_Left = T_Last[tis - L];
T_New[tis] = ax * T_Right - (2 * ax + 2 * ay - 1) * T_Middle + ax * T_Left + ay * T_Up + ay * T_Down;
}
else if (i != 0 && i != (nx - 1) && j == 0)//±ß3
{
T_Up = T_Last[tis + 1];
T_Middle = T_Last[tis];
T_Down = T_Last[tis + 1] - 2 * dx * h* (T_Last[tis] - Tw) / lamda;
T_Right = T_Last[tis + L];
T_Left = T_Last[tis - L];
T_New[tis] = ax * T_Right - (2 * ax + 2 * ay - 1) * T_Middle + ax * T_Left + ay * T_Up + ay * T_Down;
}
else if (i != 0 && i != (nx - 1) && j == (ny - 1))//±ß4
{
T_Up = T_Last[tis - 1] - 2 * dx * h * (T_Last[tis] - Tw) / lamda;
T_Middle = T_Last[tis];
T_Down = T_Last[tis - 1];
T_Right = T_Last[tis + L];
T_Left = T_Last[tis - L];
T_New[tis] = ax * T_Right - (2 * ax + 2 * ay - 1) * T_Middle + ax * T_Left + ay * T_Up + ay * T_Down;
}
if (i == 0 && j == int((ny - 1)/2))
T_Surface[blockIdx.x * tnpts + tstep] = T_New[tis];
T_Last[tis] = T_New[tis];
__syncthreads();
} |
10,190 | #include "includes.h"
__global__ void nodiag_normalize(double *A, double *I, int nn, int i){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( x< nn && y < nn){
if (x < nn && y < nn){
if (x == i && x!=y){
I[x*nn + y] /= A[i*nn + i];
A[x*nn + y] /= A[i*nn + i];
}
}
}
} |
10,191 |
#define TIME_STEPSIZE2 0.3
__global__ void addforce_Kernel(float3* partcleAcclerationArray, float3 accelaration)
{
int thread_no = blockIdx.x * blockDim.x + threadIdx.x;
partcleAcclerationArray[thread_no] = make_float3(accelaration.x , accelaration.y , accelaration.z );
}
void addforceCudaKernel(float3* partcleAcclerationArray, float3 accelaration, int numOfParticles,int particle_width, int particle_height)
{
dim3 DimBlock=dim3(particle_width ,1,1);
dim3 DimGrid=dim3(particle_height, 1, 1);
addforce_Kernel<<<DimGrid, DimBlock>>>(partcleAcclerationArray, accelaration);
}
__device__ float3 GPUgetParticle(float3* GPUParticles_pos_array,int x, int y, int particle_width) { return GPUParticles_pos_array[y*particle_width + x]; }
__device__ float GPUcrossProduct[3];
__device__ float* GPUcross(const float* v1, const float* v2)
{
float crossProduct[3] = {v1[1] * v2[2] - v1[2] * v2[1], v1[2] * v2[0] - v1[0] * v2[2], v1[0] * v2[1] - v1[1] * v2[0]};
return crossProduct;
}
__device__ float GPUlength(float* v)
{
return sqrt(v[0] * v[0] + v[1] * v[1] + v[2] * v[2]);
}
__device__ int GPUgetParticles_pos_array_index(int x, int y, int particle_width)
{
return (y*particle_width + x);
}
__global__ void applyWindForce_Kernel(float3* GPUParticles_pos_array, float3* partcleAcclerationArray,int particle_width,int particle_height, float3 windForce)
{
int thread_no = blockIdx.x * blockDim.x + threadIdx.x;
float *normal;
float3 TrianglePoint1, TrianglePoint2, TrianglePoint3;
float v1[3], v2[3];
float mass = 1.0;
if(((thread_no) < ((particle_width * particle_height) - blockDim.x )) && ((thread_no % blockDim.x) < (blockDim.x-1)))
{
TrianglePoint1 = GPUParticles_pos_array[thread_no + blockDim.x];// ,X+1,Y,particle_width;
TrianglePoint2 = GPUParticles_pos_array[thread_no];// ,X,Y,particle_width;
TrianglePoint3 = GPUParticles_pos_array[thread_no + 1];// ,X,Y+1,particle_width;
v1[0] = TrianglePoint2.x - TrianglePoint1.x;
v1[1] = TrianglePoint2.y - TrianglePoint1.y;
v1[2] = TrianglePoint2.z - TrianglePoint1.z;
v2[0] = TrianglePoint3.x - TrianglePoint1.x;
v2[1] = TrianglePoint3.y - TrianglePoint1.y;
v2[2] = TrianglePoint3.z - TrianglePoint1.z;
normal = GPUcross(&v1[0], &v2[0]);
float d[3];
float l = GPUlength(&normal[0]);
d[0] = normal[0]/l;
d[1] = normal[1]/l;
d[2] = normal[2]/l;
float dotproduct = d[0] * windForce.x + d[1] * windForce.y + d[2] * windForce.z;
float force[3] = {normal[0] * dotproduct,normal[1] * dotproduct,normal[1] * dotproduct} ;
int particalIndex = 0;
particalIndex = thread_no + blockDim.x ; //GPUgetParticles_pos_array_index(X+1,Y,particle_width);
partcleAcclerationArray[particalIndex].x = partcleAcclerationArray[particalIndex].x + force[0] / mass;
partcleAcclerationArray[particalIndex].y = partcleAcclerationArray[particalIndex].y + force[1] / mass;
partcleAcclerationArray[particalIndex].z = partcleAcclerationArray[particalIndex].z + force[2] / mass;
particalIndex = thread_no;// GPUgetParticles_pos_array_index(X,Y,particle_width);
partcleAcclerationArray[particalIndex].x = partcleAcclerationArray[particalIndex].x + force[0] / mass;
partcleAcclerationArray[particalIndex].y = partcleAcclerationArray[particalIndex].y + force[1] / mass;
partcleAcclerationArray[particalIndex].z = partcleAcclerationArray[particalIndex].z + force[2] / mass;
particalIndex = thread_no+1;//GPUgetParticles_pos_array_index(X,Y+1,particle_width);
partcleAcclerationArray[particalIndex].x = partcleAcclerationArray[particalIndex].x + force[0] / mass;
partcleAcclerationArray[particalIndex].y = partcleAcclerationArray[particalIndex].y + force[1] / mass;
partcleAcclerationArray[particalIndex].z = partcleAcclerationArray[particalIndex].z + force[2] / mass;
TrianglePoint1 = GPUParticles_pos_array[thread_no + blockDim.x + 1] ;//,X+1,Y+1,particle_width);
TrianglePoint2 = GPUParticles_pos_array[thread_no+ blockDim.x ]; //,X+1,Y,particle_width);
TrianglePoint3 = GPUParticles_pos_array[thread_no + 1]; //,X,Y+1,particle_width);
v1[0] = TrianglePoint2.x - TrianglePoint1.x;
v1[1] = TrianglePoint2.y - TrianglePoint1.y;
v1[2] = TrianglePoint2.z - TrianglePoint1.z;
v2[0] = TrianglePoint3.x - TrianglePoint1.x;
v2[1] = TrianglePoint3.y - TrianglePoint1.y;
v2[2] = TrianglePoint3.z - TrianglePoint1.z;
normal = GPUcross(&v1[0], &v2[0]);
l = GPUlength(&normal[0]);
d[0] = normal[0]/l;
d[1] = normal[1]/l;
d[2] = normal[2]/l;
dotproduct = d[0] * windForce.x + d[1] * windForce.y + d[2] * windForce.z;
force[0] = normal[0] * dotproduct; force[1] = normal[1] * dotproduct; force[2] = normal[1] * dotproduct;
particalIndex = thread_no + blockDim.x + 1;//GPUgetParticles_pos_array_index(X+1,Y+1,particle_width);
partcleAcclerationArray[particalIndex].x = partcleAcclerationArray[particalIndex].x + force[0] / mass;
partcleAcclerationArray[particalIndex].y = partcleAcclerationArray[particalIndex].y + force[1] / mass;
partcleAcclerationArray[particalIndex].z = partcleAcclerationArray[particalIndex].z + force[2] / mass;
particalIndex = thread_no + blockDim.x ;//GPUgetParticles_pos_array_index(X+1,Y,particle_width);
partcleAcclerationArray[particalIndex].x = partcleAcclerationArray[particalIndex].x + force[0] / mass;
partcleAcclerationArray[particalIndex].y = partcleAcclerationArray[particalIndex].y + force[1] / mass;
partcleAcclerationArray[particalIndex].z = partcleAcclerationArray[particalIndex].z + force[2] / mass;
particalIndex = thread_no+1;//GPUgetParticles_pos_array_index(X,Y+1,particle_width);
partcleAcclerationArray[particalIndex].x = partcleAcclerationArray[particalIndex].x + force[0] / mass;
partcleAcclerationArray[particalIndex].y = partcleAcclerationArray[particalIndex].y + force[1] / mass;
partcleAcclerationArray[particalIndex].z = partcleAcclerationArray[particalIndex].z + force[2] / mass;
}
}
void applyWindForceCudaKernel(float3* GPUParticles_pos_array, float3* partcleAcclerationArray, int particle_width, int particle_height, float3 windForce)
{
dim3 DimBlock=dim3(particle_width ,1,1);
dim3 DimGrid=dim3( particle_height,1, 1);
applyWindForce_Kernel<<<DimGrid, DimBlock>>>(GPUParticles_pos_array,partcleAcclerationArray,particle_width,particle_height,windForce);
}
__global__ void timeStep_Kernel(float3* GPUParticles_pos_array,int2* GPUNeighbourParticlesInddex,float* GPURestDistance,bool* GPUMovableStatus,int TotalThreads)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < TotalThreads)
{
float p1_to_p2[3];
p1_to_p2[0] = GPUParticles_pos_array[GPUNeighbourParticlesInddex[index].y].x - GPUParticles_pos_array[GPUNeighbourParticlesInddex[index].x].x;
p1_to_p2[1] = GPUParticles_pos_array[GPUNeighbourParticlesInddex[index].y].y - GPUParticles_pos_array[GPUNeighbourParticlesInddex[index].x].y;
p1_to_p2[2] = GPUParticles_pos_array[GPUNeighbourParticlesInddex[index].y].z - GPUParticles_pos_array[GPUNeighbourParticlesInddex[index].x].z;
float current_distance = GPUlength(&p1_to_p2[0]); // current distance between p1 and p2
float correctionVector[3];
correctionVector[0] = p1_to_p2[0] * (1 - GPURestDistance[index] / current_distance);
correctionVector[1] = p1_to_p2[1] * (1 - GPURestDistance[index] / current_distance);
correctionVector[2] = p1_to_p2[2] * (1 - GPURestDistance[index] / current_distance);
// Lets make it half that length, so that we can move BOTH p1 and p2.
float correctionVectorHalf[3];
correctionVectorHalf[0] = correctionVector[0] * 0.5;
correctionVectorHalf[1] = correctionVector[1] * 0.5;
correctionVectorHalf[2] = correctionVector[2] * 0.5;
// correctionVectorHalf is pointing from p1 to p2, so the length should move p1 half the length needed to satisfy the constraint.
if(GPUMovableStatus[GPUNeighbourParticlesInddex[index].x])
{
//p1->offsetPos(correctionVectorHalf);
GPUParticles_pos_array[GPUNeighbourParticlesInddex[index].x].x = GPUParticles_pos_array[GPUNeighbourParticlesInddex[index].x].x + correctionVectorHalf[0];
GPUParticles_pos_array[GPUNeighbourParticlesInddex[index].x].y = GPUParticles_pos_array[GPUNeighbourParticlesInddex[index].x].y + correctionVectorHalf[1];
GPUParticles_pos_array[GPUNeighbourParticlesInddex[index].x].z = GPUParticles_pos_array[GPUNeighbourParticlesInddex[index].x].z + correctionVectorHalf[2];
}
if(GPUMovableStatus[GPUNeighbourParticlesInddex[index].y])
{
//p2->offsetPos(-correctionVectorHalf);
GPUParticles_pos_array[GPUNeighbourParticlesInddex[index].y].x = GPUParticles_pos_array[GPUNeighbourParticlesInddex[index].y].x - correctionVectorHalf[0];
GPUParticles_pos_array[GPUNeighbourParticlesInddex[index].y].y = GPUParticles_pos_array[GPUNeighbourParticlesInddex[index].y].y - correctionVectorHalf[1];
GPUParticles_pos_array[GPUNeighbourParticlesInddex[index].y].z = GPUParticles_pos_array[GPUNeighbourParticlesInddex[index].y].z - correctionVectorHalf[2];
}
}
//__syncthreads();
}
void timeStepCudaKernel(float3* GPUParticles_pos_array,int2* GPUNeighbourParticlesInddex,float* GPURestDistance,bool* GPUMovableStatus, int NoOfwidthparticles,int NoOfheightparticle, int ToatalThreads)
{
dim3 DimBlock=dim3(NoOfwidthparticles,1,1);
dim3 DimGrid=dim3(NoOfheightparticle, 1, 1);
timeStep_Kernel<<<DimGrid, DimBlock>>>(GPUParticles_pos_array,GPUNeighbourParticlesInddex,GPURestDistance,GPUMovableStatus,ToatalThreads);
}
__global__ void timeStepDisplacement_Kernel( float3* KParticles_pos_array,float3* KParticles_old_pos_array,bool* GPUMovableStatus,float dampingFactor, float3* partcleArray,int particle_width, int particle_height)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
dampingFactor = 0.000f;
if(GPUMovableStatus[index])
{
float temp[3];
temp[0] = KParticles_pos_array[index].x;
temp[1] = KParticles_pos_array[index].y;
temp[2] = KParticles_pos_array[index].z;
KParticles_pos_array[index].x = KParticles_pos_array[index].x + (KParticles_pos_array[index].x - KParticles_old_pos_array[index].x)*(1.0 - dampingFactor) + partcleArray[index].x * TIME_STEPSIZE2;
KParticles_pos_array[index].y = KParticles_pos_array[index].y + (KParticles_pos_array[index].y - KParticles_old_pos_array[index].y)*(1.0 - dampingFactor) + partcleArray[index].y * TIME_STEPSIZE2;
KParticles_pos_array[index].z = KParticles_pos_array[index].z + (KParticles_pos_array[index].z - KParticles_old_pos_array[index].z)*(1.0 - dampingFactor) + partcleArray[index].z * TIME_STEPSIZE2;
KParticles_old_pos_array[index].x = temp[0];
KParticles_old_pos_array[index].y = temp[1];
KParticles_old_pos_array[index].z = temp[2];
partcleArray[index].x = 0.0f;
partcleArray[index].y = 0.0f;
partcleArray[index].z = 0.0f;
}
}
void timeStepDisplacementCudaKernel(float3* KParticles_pos_array,float3* KParticles_old_pos_array,bool* GPUMovableStatus,float dampingFactor, float3* partcleArray,int particle_width, int particle_height)
{
dim3 DimBlock=dim3(particle_width,1,1);
dim3 DimGrid=dim3(particle_height, 1, 1);
timeStepDisplacement_Kernel<<<DimGrid, DimBlock>>>(KParticles_pos_array,KParticles_old_pos_array,GPUMovableStatus,dampingFactor, partcleArray,particle_width, particle_height);
}
__global__ void calculateNormal_Kernel(float3* KParticles_pos_array,float3* KParticles_Normal, int Kparticle_width, int Kparticle_height)
{
int thread_no = blockIdx.x * blockDim.x + threadIdx.x;
float *normal;
float3 TrianglePoint1, TrianglePoint2, TrianglePoint3;
float v1[3], v2[3];
if(((thread_no) < ((Kparticle_width * Kparticle_height) - blockDim.x )) && ((thread_no % blockDim.x) < (blockDim.x-1)))
{
TrianglePoint1 = KParticles_pos_array[thread_no ]; //getParticle(x+1,y);
TrianglePoint2 = KParticles_pos_array[thread_no + blockDim.x]; //getParticle(x,y);
TrianglePoint3 = KParticles_pos_array[thread_no + 1]; //getParticle(x,y+1);
v1[0] = TrianglePoint2.x - TrianglePoint1.x;
v1[1] = TrianglePoint2.y - TrianglePoint1.y;
v1[2] = TrianglePoint2.z - TrianglePoint1.z;
v2[0] = TrianglePoint3.x - TrianglePoint1.x;
v2[1] = TrianglePoint3.y - TrianglePoint1.y;
v2[2] = TrianglePoint3.z - TrianglePoint1.z;
normal = GPUcross(&v1[0], &v2[0]);
int particalIndex = 0;
particalIndex = thread_no ;
KParticles_Normal[particalIndex].x = normal[0];
KParticles_Normal[particalIndex].y = normal[1];
KParticles_Normal[particalIndex].z = normal[2];
particalIndex = thread_no + blockDim.x;
KParticles_Normal[particalIndex].x = normal[0];
KParticles_Normal[particalIndex].y = normal[1];
KParticles_Normal[particalIndex].z = normal[2];
particalIndex = thread_no+1;
KParticles_Normal[particalIndex].x = normal[0];
KParticles_Normal[particalIndex].y = normal[1];
KParticles_Normal[particalIndex].z = normal[2];
normal = NULL;
//TrianglePoint1 = NULL; TrianglePoint2 = NULL; TrianglePoint3=NULL;
v1[0] = 0.0f; v1[1] = 0.0f; v1[2] = 0.0f;
v2[0] = 0.0f; v2[1] = 0.0f; v2[2] = 0.0f;
TrianglePoint1 = KParticles_pos_array[thread_no + 1]; //getParticle(x+1,y+1);
TrianglePoint2 = KParticles_pos_array[thread_no+ blockDim.x ]; //getParticle(x+1,y);
TrianglePoint3 = KParticles_pos_array[thread_no + blockDim.x + 1]; //getParticle(x,y+1);
v1[0] = TrianglePoint2.x - TrianglePoint1.x;
v1[1] = TrianglePoint2.y - TrianglePoint1.y;
v1[2] = TrianglePoint2.z - TrianglePoint1.z;
v2[0] = TrianglePoint3.x - TrianglePoint1.x;
v2[1] = TrianglePoint3.y - TrianglePoint1.y;
v2[2] = TrianglePoint3.z - TrianglePoint1.z;
normal = GPUcross(&v1[0], &v2[0]);
particalIndex = thread_no + 1;
KParticles_Normal[particalIndex].x = normal[0];
KParticles_Normal[particalIndex].y = normal[1];
KParticles_Normal[particalIndex].z = normal[2];
particalIndex = thread_no + blockDim.x;
KParticles_Normal[particalIndex].x = normal[0];
KParticles_Normal[particalIndex].y = normal[1];
KParticles_Normal[particalIndex].z = normal[2];
particalIndex = thread_no + blockDim.x+ 1 ;
KParticles_Normal[particalIndex].x = normal[0];
KParticles_Normal[particalIndex].y = normal[1];
KParticles_Normal[particalIndex].z = normal[2];
}
}
void calculateNormalCudaKernel(float3* KParticles_pos_array,float3* KParticles_Normal, int Kparticle_width, int Kparticle_height)
{
dim3 DimBlock=dim3( Kparticle_width ,1,1);
dim3 DimGrid=dim3( Kparticle_height, 1, 1);
calculateNormal_Kernel<<<DimGrid, DimBlock>>>(KParticles_pos_array, KParticles_Normal, Kparticle_width, Kparticle_height);
}
__global__ void render_Kernel(float3* KtriangleVertices,float3* KtriangleVertices_normal, float3* KParticles_pos_array, float3* KParticles_Normal, int Kparticle_width, int Kparticle_height)
{
int thread_no = blockIdx.x * blockDim.x + threadIdx.x;
int index = thread_no * 6;
if(((thread_no) < ((Kparticle_width * Kparticle_height) - blockDim.x )) && ((thread_no % blockDim.x) < (blockDim.x-1)))
{
float3 point = KParticles_pos_array[ thread_no ]; //getParticle(x, y + 1);
float3 pointNormal = KParticles_Normal[ thread_no ]; //getParticleNormals(x, y+1);
KtriangleVertices[index].x = point.x;
KtriangleVertices[index].y = point.y;
KtriangleVertices[index].z = point.z;
KtriangleVertices_normal[index].x = pointNormal.x;
KtriangleVertices_normal[index].y = pointNormal.y;
KtriangleVertices_normal[index].z = pointNormal.z;
index++;
point = KParticles_pos_array[ thread_no + blockDim.x ]; //getParticle(x, y);
pointNormal = KParticles_Normal[ thread_no + blockDim.x ]; //getParticleNormals(x, y);
KtriangleVertices[index].x = point.x;
KtriangleVertices[index].y = point.y;
KtriangleVertices[index].z = point.z;
KtriangleVertices_normal[index].x = pointNormal.x;
KtriangleVertices_normal[index].y = pointNormal.y;
KtriangleVertices_normal[index].z = pointNormal.z;
index++;
point = KParticles_pos_array[ thread_no + 1 ]; //getParticle(x + 1, y);
pointNormal = KParticles_Normal[ thread_no + 1 ]; //getParticleNormals(x + 1, y);
KtriangleVertices[index].x = point.x;
KtriangleVertices[index].y = point.y;
KtriangleVertices[index].z = point.z;
KtriangleVertices_normal[index].x = pointNormal.x;
KtriangleVertices_normal[index].y = pointNormal.y;
KtriangleVertices_normal[index].z = pointNormal.z;
index++;
point = KParticles_pos_array[ thread_no + 1 ]; //getParticle(x, y + 1);
pointNormal = KParticles_Normal[ thread_no + 1 ]; //getParticleNormals(x, y + 1);
KtriangleVertices[index].x = point.x;
KtriangleVertices[index].y = point.y;
KtriangleVertices[index].z = point.z;
KtriangleVertices_normal[index].x = pointNormal.x;
KtriangleVertices_normal[index].y = pointNormal.y;
KtriangleVertices_normal[index].z = pointNormal.z;
index++;
point = KParticles_pos_array[ thread_no + blockDim.x ]; //getParticle(x + 1, y);
pointNormal = KParticles_Normal[ thread_no + blockDim.x ]; //getParticleNormals(x + 1, y);
KtriangleVertices[index].x = point.x;
KtriangleVertices[index].y = point.y;
KtriangleVertices[index].z = point.z;
KtriangleVertices_normal[index].x = pointNormal.x;
KtriangleVertices_normal[index].y = pointNormal.y;
KtriangleVertices_normal[index].z = pointNormal.z;
index++;
point = KParticles_pos_array[ thread_no + blockDim.x + 1 ]; //getParticle(x + 1, y);
pointNormal = KParticles_Normal[ thread_no + blockDim.x + 1 ]; //getParticleNormals(x + 1, y);
KtriangleVertices[index].x = point.x;
KtriangleVertices[index].y = point.y;
KtriangleVertices[index].z = point.z;
KtriangleVertices_normal[index].x = pointNormal.x;
KtriangleVertices_normal[index].y = pointNormal.y;
KtriangleVertices_normal[index].z = pointNormal.z;
//index++;
}
}
void renderCudaKernel(float3* KtriangleVertices,float3* KtriangleVertices_normal, float3* KParticles_pos_array, float3* KParticles_Normal, int Kparticle_width, int Kparticle_height)
{
dim3 DimBlock=dim3( Kparticle_width ,1,1);
dim3 DimGrid=dim3( Kparticle_height, 1, 1);
render_Kernel<<<DimGrid, DimBlock>>>(KtriangleVertices, KtriangleVertices_normal, KParticles_pos_array, KParticles_Normal, Kparticle_width, Kparticle_height);
}
|
10,192 | #include <iostream>
#include <string>
#include <cuda.h>
void printDesc(const std::string &label, const cudaChannelFormatDesc &desc) {
std::cout << label << " -----------" << std::endl;
std::cout << "x: " << desc.x << std::endl;
std::cout << "y: " << desc.y << std::endl;
std::cout << "z: " << desc.z << std::endl;
std::cout << "w: " << desc.w << std::endl;
}
int main(int argc, char const *argv[])
{
cudaChannelFormatDesc desc;
desc = cudaCreateChannelDesc<float4>();
printDesc("float4", desc);
desc = cudaCreateChannelDesc<float>();
printDesc("float", desc);
desc = cudaCreateChannelDesc<int>();
printDesc("int", desc);
desc = cudaCreateChannelDesc<char>();
printDesc("char", desc);
desc = cudaCreateChannelDesc<char4>();
printDesc("char4", desc);
return 0;
}
|
10,193 | #include "includes.h"
__global__ void gpu_copy_mass( const int num_atoms, const int* g_group_contents, const double* g_mass_i, double* g_mass_o)
{
const int n = threadIdx.x + blockIdx.x * blockDim.x;
if (n < num_atoms) {
g_mass_o[n] = g_mass_i[g_group_contents[n]];
}
} |
10,194 | #include "stdio.h"
__global__ void add(int *a, int *b, int *c){
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
#define N 512
int main(void ){
int *a,*b,*c; // host copies of a, b and c
int *dev_a,*dev_b, *dev_c; // device copies of a, b and c
int size = N * sizeof(int); // we need space for an integer
//allocate device copies of a, b , c
cudaMalloc((void**) &dev_a, size);
cudaMalloc((void**) &dev_b, size);
cudaMalloc((void**) &dev_c, size);
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(size);
//random_ints(a,N);
//random_ints(b,N);
for (int i= 0; i<N ; i++){
a[i]=i;
b[i]=i*2;
}
//copy inputs to device (GPU)
cudaMemcpy(dev_a, a, size , cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
// launch add() kernel on GPU, passing parameters
add<<< 1 , N >>> (dev_a,dev_b,dev_c);
//copy device result back to host copy of c
cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
for(int i =0; i<N; i++){
printf("The value of the %d plus %d is : %d\n", a[i], b[i], c[i]);
}
free(a);
free(b);
free(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
10,195 |
#define THREAD_BLOCK_SIZE 512
#define NUM_BLOCKS 320 // Define the size of a tile
__global__ void vector_dot_product_kernel(float *A, float *B, float *C, unsigned int num_elements)
{
__shared__ float sum_per_thread[THREAD_BLOCK_SIZE];
unsigned int thread_id = blockIdx.x * blockDim.x + threadIdx.x; // Obtain the index of the thread
unsigned int stride = blockDim.x * gridDim.x;
float sum = 0.0f;
unsigned int i = thread_id;
while(i < num_elements){
sum += A[i] * B[i];
i += stride;
}
sum_per_thread[threadIdx.x] = sum; // Copy sum to shared memory
__syncthreads();
i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i)
sum_per_thread[threadIdx.x] += sum_per_thread[threadIdx.x + i];
__syncthreads();
i /= 2;
}
if(threadIdx.x == 0)
C[blockIdx.x] = sum_per_thread[0];
}
|
10,196 | // ##########################################################
// By Eugene Ch'ng | www.complexity.io
// Email: genechng@gmail.com
// ----------------------------------------------------------
// The ERC 'Lost Frontiers' Project
// Development for the Parallelisation of ABM Simulation
// ----------------------------------------------------------
// A Basic CUDA Application for ABM Development
//
// Adding two numbers in the device
// ----------------------------------------------------------
// How to compile:
// nvcc <filename>.cu -o <outputfile>
// ##########################################################
#include <stdio.h>
#include <iostream>
using namespace std;
// --------------------- CUDA KERNELS
// kernel function sum with three parameters
__global__ void sum(int a, int b, int *c)
{
*c = a + b;
}
// the main is a host code
int main(int argc, const char * argv[])
{
cout << "------------ initialising host and device variables" << endl;
int c; // host variable
int a = 2;
int b = 7;
int *dev_c; // device variable
cout << "------------ allocate device memory dev_c" << endl;
// allocate dev_c memory in the device
// we need to return the summed value to the host for printing
// and thus the need to create a device variable.
cudaMalloc( (void**)&dev_c, sizeof(int) );
cout << "------------ calling kernel" << endl;
// sum 2 + 7 on the kernel, using just 1 block and 1 thread
sum<<<1,1>>>(a, b, dev_c);
cout << "------------ copy dev_c to c from device to host" << endl;
// once we have assigned the sum of 2 + 7 to dev_c, we need to
// bring it back to the host by using cudaMemcpy(destination, source, ...)
cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
cout << "------------ display results" << endl;
printf( "%d + %d = %d\n", a, b, c);
// ---- FREE ALLOCATED KERNEL MEMORY
cudaFree( dev_c );
return 0;
}
|
10,197 | extern "C"
__global__
void vecConv(int *l, int *r, int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t sum = 0;
if ( idx < N ) {
for(size_t i = 0; i <= idx; i++){
sum += l[i] + r[idx-i];
}
p[idx] = sum;
}
}
|
10,198 | __global__
void mandel(int disp_width, int disp_height, int *array, int max_iter)
{
double scale_real, scale_imag;
double x, y, u, v, u2, v2;
int i, j, iter;
scale_real = 3.5 / (double)disp_width;
scale_imag = 3.5 / (double)disp_height;
// Get thread indexes
i = blockIdx.x * blockDim.x + threadIdx.x;
j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= disp_width || j >= disp_height)
return;
x = ((double)i * scale_real) - 2.25;
y = ((double)j * scale_imag) - 1.75;
u = 0.0;
v = 0.0;
u2 = 0.0;
v2 = 0.0;
iter = 0;
while (u2 + v2 < 4.0 && iter < max_iter)
{
v = 2 * v * u + y;
u = u2 - v2 + x;
u2 = u * u;
v2 = v * v;
iter = iter + 1;
}
// if we exceed max_iter, reset to zero
iter = iter == max_iter ? 0 : iter;
array[i * disp_height + j] = iter;
}
|
10,199 | #include <iostream>
#include <cuda.h>
#include <algorithm>
#include <cstdlib>
using namespace std;
__global__ void sum(float *x, float *y, float *z, int size) // declaration kernel
{ int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = j*gridDim.x * blockDim.x + i;
if (k < size) z[k] = x[k] + y[k];
}
int main() {
const int SIZE = 1024*1024;
const float n_byte = SIZE * sizeof(float);
// 1) allocation et initialisation des données en mémoire centrale (cpu/host)
float *h_x = new float[ SIZE ], *h_y = new float[ SIZE ], *h_z = new float[ SIZE ];
fill(&h_x[0], &h_x[SIZE], 1);
fill(&h_y[0], &h_y[SIZE], 2);
//2) allocation dans la mémoire du GPU (device)
float *d_x, *d_y, *d_z;
cudaMalloc( (void**) &d_x, n_byte );
cudaMalloc( (void**) &d_y, n_byte );
cudaMalloc( (void**) &d_z, n_byte );
// 3) copie des données en entrée de host (cpu) vers device (gpu)
cudaMemcpy(d_x, h_x, n_byte, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, h_y, n_byte, cudaMemcpyHostToDevice);
// 4) exécution du kernel
dim3 t_grid(1024,1,1);
dim3 t_bloc(512,2,1);
sum<<< t_grid, t_bloc >>>(d_x, d_y, d_z, SIZE);
// 5) copie des données en sortie de device (gpu) vers host (cpu)
cudaMemcpy(h_z, d_z, n_byte, cudaMemcpyDeviceToHost);
for(size_t i=0; i<SIZE; ++i)
cout<< h_x[i]<<" + "<<h_y[i]<<" = "<<h_z[i]<<endl;
// 6) libération de la mémoire sur le GPU
cudaFree( d_x );cudaFree( d_y );cudaFree( d_z );
// 7) libération de la mémoire centrale
delete [] h_x;delete [] h_y;delete [] h_z;
exit(EXIT_FAILURE);
} |
10,200 | #include<chrono>
#include<stdio.h>
#include<iostream>
#include<math.h>
#include<string>
#include<sstream>
#include<fstream>
#include<vector>
#include<malloc.h>
#define LENGTH_DICTIONARY 57664
#define LENGTH_DOCS 10000
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
using namespace std;
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void cosine_similarity(int *sparsemat, double *cosinematrix, int dicsize,int docsize)
{
double mul = 0.0, d_a = 0.0, d_b = 0.0 ;
int doc1index = blockIdx.x;
int doc2index = threadIdx.x;
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(doc1index == doc2index)
cosinematrix[index] = -1;
else {
int *A = &sparsemat[doc1index*dicsize];
int *B = &sparsemat[doc2index*dicsize] ;
for(unsigned int i = 0; i < dicsize; ++i)
{
mul += A[i] * B[i] ;
d_a += A[i] * A[i] ;
d_b += B[i] * B[i] ;
}
cosinematrix[index] = mul / (sqrt(d_a) * sqrt(d_b)) ;
}
}
void getTokens(string line,vector<string>&tokens)
{
istringstream tokenStream(line);
string token;
while (getline(tokenStream, token,' '))
{
tokens.push_back(token);
}
}
void printVector(vector<string> v)
{
cout<<"size: "<<v.size()<<endl;
for(int i=0;i<v.size();i++)
{
cout<<v[i]<<" ";
}
cout<<endl;
}
void feedTheMatrix(vector<string> tokens, int * mat)
{
for(int i=0;i<LENGTH_DICTIONARY;i++)
{
mat[i] = 0;
//cout<<i<<" "<<LENGTH_DICTIONARY<<endl;
}
for(int i=1;i<tokens.size();i++)
{
mat[stoi(tokens[i])] +=1;
}
}
void printTheMatrix(int *mat,int row,int col)
{
for(int i=0;i<row;i++) {
for(int j=0;j<col;j++)
cout<<mat[(i*LENGTH_DICTIONARY)+j]<<" ";
cout<<endl;
}
}
void printTheCosineMatrix(double *mat,int row,int col)
{
for(int i=0;i<row;i++) {
for(int j=0;j<col;j++)
cout<<mat[(i*LENGTH_DOCS)+j]<<" ";
cout<<endl;
}
}
int findIndexofHighestSimilarity(double *cosinematrix)
{
int index = 0;
double maxvalue = -1;
for(int i=0;i<LENGTH_DOCS;i++)
{
if(cosinematrix[i] > maxvalue)
{
maxvalue = cosinematrix[i];
index = i;
}
}
return index;
}
int main()
{
int *sparsemat;
int *d_sparsemat;
sparsemat = (int *)malloc(LENGTH_DOCS*LENGTH_DICTIONARY*sizeof(int));
gpuErrchk( cudaMalloc((void **)&d_sparsemat,LENGTH_DOCS*LENGTH_DICTIONARY*sizeof(int)));
//get the contents of file
ifstream inFile;
inFile.open("./sample10000.txt");
if (!inFile) {
cerr << "Unable to open file sample100.txt";
// exit(1); // call system to stop
return -1;
}
string line;
int linenum = 0;
while (getline(inFile,line)) {
//cout<<line<<endl;
vector<string> tokens;
getTokens(line,tokens);
//cout<<linenum<<" "<<LENGTH_DOCS<<endl;
//printVector(tokens);
feedTheMatrix(tokens,&(sparsemat[linenum*LENGTH_DICTIONARY]));
linenum++;
}
inFile.close();
//printTheMatrix(sparsemat,LENGTH_DOCS,LENGTH_DICTIONARY);
//create a docs*docs matrix
double *cosinematrix;
double *d_cosinematrix;
cosinematrix = (double *)malloc(LENGTH_DOCS*LENGTH_DOCS*sizeof(double));
gpuErrchk(cudaMalloc((void **)&d_cosinematrix,LENGTH_DOCS*LENGTH_DOCS*sizeof(double)));
gpuErrchk(cudaMemcpy(d_sparsemat,sparsemat,LENGTH_DOCS*LENGTH_DICTIONARY*sizeof(int),cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_cosinematrix,cosinematrix,LENGTH_DOCS*LENGTH_DOCS*sizeof(double),cudaMemcpyHostToDevice));
chrono::time_point<chrono::system_clock> start = chrono::system_clock::now();
cosine_similarity<<<LENGTH_DOCS,LENGTH_DOCS>>>(d_sparsemat,d_cosinematrix,LENGTH_DICTIONARY,LENGTH_DOCS);
/*
for(int i=0;i<LENGTH_DOCS;i++)
{
for(int j =0;j<LENGTH_DOCS;j++)
{
if(i==j)
{
//obviously same docs have highest similarity so equating them to -1
cosinematrix[i][j] = -1;
}
else
{
cosinematrix[i][j] = cosine_similarity(sparsemat[i],sparsemat[j],LENGTH_DICTIONARY);
}
}
}
*/
gpuErrchk( cudaDeviceSynchronize());
gpuErrchk( cudaMemcpy(cosinematrix,d_cosinematrix,LENGTH_DOCS*LENGTH_DOCS*sizeof(double),cudaMemcpyDeviceToHost));
chrono::time_point<chrono::system_clock> end = chrono::system_clock::now();
chrono::duration<double> elapsed_sec = end - start;
double count_sec = elapsed_sec.count();
cout<<"Time(cosine_similarity_calculations/sec): "<<(LENGTH_DOCS*LENGTH_DOCS)/count_sec<<endl;
//printTheCosineMatrix(cosinematrix,LENGTH_DOCS,LENGTH_DOCS);
//sort the matrix
for(int i=0;i<LENGTH_DOCS;i++)
{
int similardoc = findIndexofHighestSimilarity(&cosinematrix[i*LENGTH_DOCS]);
cout<<"doc "<<i<<" is similart to doc "<<similardoc<<endl;
}
free(sparsemat);
free(cosinematrix);
cudaFree(d_sparsemat);
cudaFree(d_cosinematrix);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.