serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
11,401 |
#define NOW(time, i) (2 * (i) + time)
#define FUTURE(time, i) (2 * (i) + (1 - time))
extern "C"
__global__ void move(int n, int time, int *a) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ((i > 2) && (2 * i + 1 < n)) {
char p = a[NOW(time, i - 1)];
char q = a[NOW(time, i)];
char r = a[NOW(time, i + 1)];
a[FUTURE(time, i)] = ((!p && q && r) || (p && !q && !r) || (p && !q && r) || (p && q && r));
}
} |
11,402 | extern "C"
// Compute C = A * B
__global__ void matrixMultiply(float *A, float *B, float *C, int numARows,
int numAColumns, int numBRows, int numBColumns,
int numCRows, int numCColumns) {
int column = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if (column < numCColumns && row < numCRows) {
/*
float cValue = 0.0f;
for (int i = 0; i < numAColumns; i++) {
//float a = A[row * numAColumns + i];
//float b = B[i * numBColumns + column];
//cValue += a * b;
cValue += A[row * numAColumns + i] * B[i * numBColumns + column];
}
*/
//C[row * numCColumns + column] = cValue;
//C[row * numCColumns + column] = 0.11424443 * -1.25084 + -0.9391175 * -0.4892239;
C[row * numCColumns + column] = 0.11424443f * -1.25084f + -0.9391175f * -0.4892239f;
}
} |
11,403 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
// Include cuda functions
#include <cuda.h>
#include <curand.h>
// Include timing functions
#include <sys/time.h>
#define MILLION 1000000.0
#define MAX_THREADS 1024
__global__ void reduction(float *d_input, float *d_output, int num_els)
{
// Allocate shared memory
__shared__ float smem_array[MAX_THREADS];
int tid = threadIdx.x;
int index = blockIdx.x * blockDim.x + threadIdx.x;
// first, each thread loads data into shared memory
if (index < num_els) {
smem_array[tid] = d_input[index];
} else {
smem_array[tid] = 0;
}
// next, we perform binary tree reduction
for (int d = blockDim.x/2; d > 0; d /= 2) {
__syncthreads(); // ensure previous step completed
if (tid<d) smem_array[tid] += smem_array[tid+d];
}
// finally, first thread puts result into global memory
if (tid==0) d_output[blockIdx.x] = smem_array[0];
}
double wall_clock_time (void) {
double secs;
struct timeval tp;
gettimeofday (&tp,NULL);
secs = (MILLION * (double) tp.tv_sec + (double) tp.tv_usec) / MILLION;
return secs;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, const char** argv)
{
int num_els, num_threads, mem_size, num_blocks;
float *h_data;
float *d_input, *d_output;
// timing variables
double time_start, time_end;
// Take user input for number of random numbers
printf("Enter a number of random numbers: ");
scanf("%d", &num_els);
// initialise card
num_threads = MAX_THREADS;
mem_size = sizeof(float) * num_els;
num_blocks = ((num_els % MAX_THREADS) == 0) ? (num_els / MAX_THREADS) : (num_els / MAX_THREADS + 1);
// allocate host memory to store the input data
// and initialize to integer values between 0 and 1000
h_data = (float*) malloc(mem_size);
// allocate device memory input and output arrays
cudaMalloc((void**)&d_input, mem_size);
cudaMalloc((void**)&d_output, (num_blocks * sizeof(float)));
// Use cuRAND to generate input data
// Create pseudo-random number generator
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
// Set the generator options
curandSetPseudoRandomGeneratorSeed(gen, 1234ULL);
// Generate the randoms
curandGenerateNormal(gen, d_input, num_els, 0.0f, 1.0f);
// copy host memory to device input array
// cudaMemcpy(d_input, h_data, mem_size, cudaMemcpyHostToDevice);
// execute the kernel
// start time
time_start = wall_clock_time ( );
reduction<<<num_blocks,num_threads>>>(d_input,d_output, num_els);
// end time
time_end = wall_clock_time ( );
// copy result from device to host
cudaMemcpy(h_data, d_output, num_blocks * sizeof(float), cudaMemcpyDeviceToHost);
// Sum results from all blocks
for (int i = 1; i < num_blocks; i++) {
h_data[0] += h_data[i];
}
// check results
printf(" process time = %e s\n", time_end - time_start);
printf("Total sum: %f\n", h_data[0]);
printf("reduction error = %f\n",h_data[0]/num_els);
// cleanup memory
free(h_data);
cudaFree(d_input);
cudaFree(d_output);
curandDestroyGenerator(gen);
// CUDA exit -- needed to flush printf write buffer
cudaDeviceReset();
}
|
11,404 | #include <stdio.h>
int main(int argc, char** argv){
printf("Hello World!");
}
|
11,405 | // source: http://cacs.usc.edu/education/cs596/src/cuda/pi.cu
// Using CUDA device to calculate pi
#include <stdio.h>
#include <cuda.h>
#include <getopt.h>
#include <stdlib.h>
#define NUM_BLOCK 30 // Number of thread blocks
#define NUM_THREAD 8 // Number of threads per block
#define NBIN 10000000 // Number of bins
#define PI 3.1415926535 // known value of pi
int tid;
float pi = 0;
double pi_d = 0;
// Kernel that executes on the CUDA device
__global__ void cal_pi(float *sum, int nbin, float step, int nthreads, int nblocks) {
int i;
float x;
int idx = blockIdx.x*blockDim.x+threadIdx.x; // Sequential thread index across the blocks
for (i=idx; i< nbin; i+=nthreads*nblocks) {
x = (i+0.5)*step;
sum[idx] += 4.0/(1.0+x*x);
}
}
__global__ void cal_pi_d(double *sum, int nbin, double step, int nthreads, int nblocks) {
int i;
double x;
int idx = blockIdx.x*blockDim.x+threadIdx.x; // Sequential thread index across the blocks
for (i=idx; i< nbin; i+=nthreads*nblocks) {
x = (i+0.5)*step;
sum[idx] += 4.0/(1.0+x*x);
}
}
// Main routine that executes on the host
int main(int argc, char **argv) {
int dp = 0;
int c;
while((c = getopt(argc, argv, "d")) != -1){
switch(c){
case 'd':
dp = 1;
printf("Run with double presision\n");
break;
default:
dp = 0;
printf("Run with single presision\n");
break;
}
}
dim3 dimGrid(NUM_BLOCK,1,1); // Grid dimensions
dim3 dimBlock(NUM_THREAD,1,1); // Block dimensions
if(!dp){
float *sumHost, *sumDev; // Pointer to host & device arrays
float step = 1.0/NBIN; // Step size
size_t size = NUM_BLOCK*NUM_THREAD*sizeof(float); //Array memory size
sumHost = (float *)malloc(size); // Allocate array on host
cudaMalloc((void **) &sumDev, size); // Allocate array on device
// Initialize array in device to 0
cudaMemset(sumDev, 0, size);
// Do calculation on device
cal_pi <<<dimGrid, dimBlock>>> (sumDev, NBIN, step, NUM_THREAD, NUM_BLOCK); // call CUDA kernel
// Retrieve result from device and store it in host array
cudaMemcpy(sumHost, sumDev, size, cudaMemcpyDeviceToHost);
for(tid=0; tid<NUM_THREAD*NUM_BLOCK; tid++)
pi += sumHost[tid];
pi *= step;
// Print results
printf("PI = %.10f\n",pi);
printf("Error = %.10f\n",abs(PI-pi));
// Cleanup
free(sumHost);
cudaFree(sumDev);
}else{
double *sumHost, *sumDev; // Pointer to host & device arrays
double step = 1.0/NBIN; // Step size
size_t size = NUM_BLOCK*NUM_THREAD*sizeof(double); //Array memory size
sumHost = (double *)malloc(size); // Allocate array on host
cudaMalloc((void **) &sumDev, size); // Allocate array on device
// Initialize array in device to 0
cudaMemset(sumDev, 0, size);
// Do calculation on device
cal_pi_d <<<dimGrid, dimBlock>>> (sumDev, NBIN, step, NUM_THREAD, NUM_BLOCK); // call CUDA kernel
// Retrieve result from device and store it in host array
cudaMemcpy(sumHost, sumDev, size, cudaMemcpyDeviceToHost);
for(tid=0; tid<NUM_THREAD*NUM_BLOCK; tid++)
pi_d += sumHost[tid];
pi_d *= step;
// Print results
printf("PI = %.10lf\n",pi_d);
printf("Error = %.10lf\n",abs(PI-pi_d));
// Cleanup
free(sumHost);
cudaFree(sumDev);
}
return 0;
}
|
11,406 | #include <iostream>
#include <stdlib.h>
using namespace std;
#define CUDA true
__global__ void multiadd(int N, float a, float* x, float* y);
int main(int argc, char* argv[]){
cout << "This is test CUDA in visual studio!" << endl;
int N = 1 << 20;
int size = sizeof(float) * N;
float* x = (float*)malloc(size);
float* y = (float*)malloc(size);
float *dx, *dy;
cudaMalloc(&dx, size);
cudaMalloc(&dy, size);
for (int i = 0; i < N; i++){
x[i] = 1.0;
y[i] = 2.0;
}
if (CUDA){
cudaMemcpy(dx, x, size, cudaMemcpyHostToDevice);
cudaMemcpy(dy, y, size, cudaMemcpyHostToDevice);
multiadd <<< (N + 255) / 256, 256 >>>(N, 3, dx, dy);
cudaMemcpy(y, dy, size, cudaMemcpyDeviceToHost);
}
else{
for (int i = 0; i < N; i++){
y[i] = 3 * x[i] + y[i];
}
}
int counter = 0;
for (int i = 0; i < N; i++){
if (y[i] != 5.0) counter++;
}
cout << "Error number is: " << counter << endl;
return 0;
}
__global__ void multiadd(int N, float a, float* x, float* y){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) y[i] = a * x[i] + y[i];
}
|
11,407 | #include "includes.h"
extern "C"
__global__ void leven(char* a, char* b, char* costs, int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i > 0 && i < size) {
costs[0] = i;
int nw = i - 1;
for(int j = 1; j <= size; j++) {
int firstMin = costs[j] < costs[j-1] ? costs[j] : costs[j-1];
// This line is hard to read due to the lack of min() function
int secondMin = 1 + firstMin < a[i - 1] == b[j - 1] ? nw : nw + 1 ? 1 + firstMin : a[i - 1] == b[j - 1] ? nw : nw + 1;
int cj = secondMin;
nw = costs[j];
costs[j] = cj;
}
}
} |
11,408 | #include <iostream>
#include <stdlib.h>
#include <math.h>
#include <algorithm>
#include <stdio.h>
#include <fcntl.h>
#include <time.h>
#define NS_PER_SEC (1000*1000*1000)
using namespace std;
int base[3][4];
int base7[3][7];
int tranposeBase7[7][3];
int base8[3][8];
int tranposeBase8[8][3];
int base11[3][11];
int base12[3][12];
int base13[3][13];
int base14[3][14];
int board7[7][7];
int board8[8][8];
inline unsigned long int monotonicTime(void)
{
//const unsigned long int NS_PER_SEC = 1000 * 1000 * 1000;
struct timespec now;
clock_gettime(CLOCK_MONOTONIC, &now);
return now.tv_sec * NS_PER_SEC + now.tv_nsec;
}
void loadData()
{
//base 3*4
base[0][0]=1;
base[0][1]=4;
base[0][2]=7;
base[0][3]=10;
base[1][0]=8;
base[1][1]=11;
base[1][2]=2;
base[1][3]=5;
base[2][0]=3;
base[2][1]=6;
base[2][2]=9;
base[2][3]=12;
//base 3*7
base7[0][0]=1;
base7[0][1]=14;
base7[0][2]=17;
base7[0][3]=20;
base7[0][4]=9;
base7[0][5]=4;
base7[0][6]=7;
base7[1][0]=16;
base7[1][1]=19;
base7[1][2]=12;
base7[1][3]=3;
base7[1][4]=6;
base7[1][5]=21;
base7[1][6]=10;
base7[2][0]=13;
base7[2][1]=2;
base7[2][2]=15;
base7[2][3]=18;
base7[2][4]=11;
base7[2][5]=8;
base7[2][6]=5;
//Tranpose base 7
for(int x = 0; x < 3; x++)
{
for(int y = 0; y < 7 ; y++)
{
tranposeBase7[y][x] = base7[x][y];
}
}
//base 3*8
base8[0][0]=1;
base8[0][1]=16;
base8[0][2]=3;
base8[0][3]=22;
base8[0][4]=19;
base8[0][5]=12;
base8[0][6]=7;
base8[0][7]=10;
base8[1][0]=4;
base8[1][1]=21;
base8[1][2]=18;
base8[1][3]=15;
base8[1][4]=6;
base8[1][5]=9;
base8[1][6]=24;
base8[1][7]=13;
base8[2][0]=17;
base8[2][1]=2;
base8[2][2]=5;
base8[2][3]=20;
base8[2][4]=23;
base8[2][5]=14;
base8[2][6]=11;
base8[2][7]=8;
//Tranpose base 8
for(int x = 0; x < 3; x++)
{
for(int y = 0; y < 8 ; y++)
{
tranposeBase8[y][x] = base8[x][y];
}
}
//base 3*11
for(int x = 0; x < 3; x++)
{
for(int y = 0; y < 11 ; y++)
{
if(y < 4)
base11[x][y] = base[x][y];
else
base11[x][y] = base7[x][y-4]+3*4;
}// end of inner loop
}
//base 3*12
for(int x = 0; x < 3; x++)
{
for(int y =0; y<12; y++)
{
if(y<4)
base12[x][y] = base[x][y];
else
base12[x][y] = base8[x][y-4]+3*4;
}
}
//board 3*13
base13[0][0]=1;
base13[0][1]=4;
base13[0][2]=13;
base13[0][3]=16;
base13[0][4]=21;
base13[0][5]=8;
base13[0][6]=23;
base13[0][7]=18;
base13[0][8]=35;
base13[0][9]=38;
base13[0][10]=27;
base13[0][11]=32;
base13[0][12]=29;
base13[1][0]=12;
base13[1][1]=15;
base13[1][2]=6;
base13[1][3]=3;
base13[1][4]=10;
base13[1][5]=17;
base13[1][6]=20;
base13[1][7]=37;
base13[1][8]=24;
base13[1][9]=33;
base13[1][10]=30;
base13[1][11]=39;
base13[1][12]=26;
base13[2][0]=5;
base13[2][1]=2;
base13[2][2]=11;
base13[2][3]=14;
base13[2][4]=7;
base13[2][5]=22;
base13[2][6]=9;
base13[2][7]=34;
base13[2][8]=19;
base13[2][9]=36;
base13[2][10]=25;
base13[2][11]=28;
base13[2][12]=31;
//base 3*14
for(int x = 0; x < 3; x++)
{
for(int y =0; y<14; y++)
{
if(y < 7)
base14[x][y] = base7[x][y];
else
base14[x][y] = base7[x][y-7]+3*7;
}
}
// load board 7
board7[0][0] = 1;
board7[0][1] = 26;
board7[0][2] = 11;
board7[0][3] = 46;
board7[0][4] = 29;
board7[0][5] = 24;
board7[0][6] = 9;
board7[1][0] = 12;
board7[1][1] = 45;
board7[1][2] = 28;
board7[1][3] = 25;
board7[1][4] = 10;
board7[1][5] = 47;
board7[1][6] = 30;
board7[2][0] = 27;
board7[2][1] = 2;
board7[2][2] = 35;
board7[2][3] = 44;
board7[2][4] = 49;
board7[2][5] = 8;
board7[2][6] = 23;
board7[3][0] = 40;
board7[3][1] = 13;
board7[3][2] = 42;
board7[3][3] = 19;
board7[3][4] = 36;
board7[3][5] = 31;
board7[3][6] = 48;
board7[4][0] = 3;
board7[4][1] = 16;
board7[4][2] = 39;
board7[4][3] = 34;
board7[4][4] = 43;
board7[4][5] = 22;
board7[4][6] = 7;
board7[5][0] = 14;
board7[5][1] = 41;
board7[5][2] = 18;
board7[5][3] = 5;
board7[5][4] = 20;
board7[5][5] = 37;
board7[5][6] = 32;
board7[6][0] = 17;
board7[6][1] = 4;
board7[6][2] = 15;
board7[6][3] = 38;
board7[6][4] = 33;
board7[6][5] = 6;
board7[6][6] = 21;
//Board 8
board8[0][0] = 1;
board8[0][1] = 46;
board8[0][2] = 15;
board8[0][3] = 24;
board8[0][4] = 59;
board8[0][5] = 28;
board8[0][6] = 13;
board8[0][7] = 26;
board8[1][0] = 16;
board8[1][1] = 23;
board8[1][2] = 58;
board8[1][3] = 51;
board8[1][4] = 14;
board8[1][5] = 25;
board8[1][6] = 64;
board8[1][7] = 29;
board8[2][0] = 47;
board8[2][1] = 2;
board8[2][2] = 45;
board8[2][3] = 54;
board8[2][4] = 63;
board8[2][5] = 60;
board8[2][6] = 27;
board8[2][7] = 12;
board8[3][0] = 22;
board8[3][1] = 17;
board8[3][2] = 52;
board8[3][3] = 57;
board8[3][4] = 50;
board8[3][5] = 55;
board8[3][6] = 30;
board8[3][7] = 61;
board8[4][0] = 3;
board8[4][1] = 48;
board8[4][2] = 21;
board8[4][3] = 44;
board8[4][4] = 53;
board8[4][5] = 62;
board8[4][6] = 11;
board8[4][7] = 34;
board8[5][0] = 18;
board8[5][1] = 39;
board8[5][2] = 42;
board8[5][3] = 49;
board8[5][4] = 56;
board8[5][5] = 33;
board8[5][6] = 8;
board8[5][7] = 31;
board8[6][0] = 41;
board8[6][1] = 4;
board8[6][2] = 37;
board8[6][3] = 20;
board8[6][4] = 43;
board8[6][5] = 6;
board8[6][6] = 35;
board8[6][7] = 10;
board8[7][0] = 38;
board8[7][1] = 19;
board8[7][2] = 40;
board8[7][3] = 5;
board8[7][4] = 36;
board8[7][5] = 9;
board8[7][6] = 32;
board8[7][7] = 7;
}
int blockOfFour(int n) // getting num blocks of four in each stripe.
{
if(n < 11)
{
return 0;
}
else
{
int num = 0;
switch(n%4)
{
case 0:
num = (n-8);
break;
case 1:
num = (n-13);
break;
case 2:
num = (n-14);
break;
case 3:
num = (n-7);
break;
}
return num;
}
}
void solveBoard(int n)
{
int board[n][n]; // initialize the board
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
board[i][j] = 0;
int BaseOfFour = blockOfFour(n)/4; // number blocks of 3*4
switch(n % 3)
{
case 0: // for all board size that is divisibe by 3
for (int x = 0; x < 3; x++)
{
for(int y = 0; y < n; y++)
{
if(y < blockOfFour(n))
{
int temp = y/4;
for(int i = 0; i < n; i+= 6) // parrallel here parrallel here i+6 to get stripe without flipping order
{
int stride = i/3;
board[x+i][y] = base[x][y%4]+ temp*12 + 3*n*stride;
if(x+3+i < n)
board[x+3+i][n-y-1] = base[x][y%4]+ temp*12 + 3*n*(stride+1);
}
}
else
{
for(int i = 0 ; i < n; i+= 6) // parallel here
{
int stride = i/3;
if(n % 4 == 0)
{
board[x+i][y] = base8[x][y-blockOfFour(n)]+BaseOfFour*12 + 3*n*stride;
board[x+3+i][n-y-1] = base8[x][y-blockOfFour(n)]+ BaseOfFour*12 + 3*n*(stride+1); //using base 8
}
if(n % 4 == 1)
{
board[x+i][y] = base13[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*stride;
if(x+3+i < n)
board[x+3+i][n-y-1] = base13[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*(stride+1); //using base 13
}
if(n % 4 == 2)
{
board[x+i][y] = base14[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*stride;
if(x+3+i < n)
board[x+3+i][n-y-1] = base14[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*(stride+1); // using base 14
}
if(n % 4 == 3)
{
board[x+i][y] = base7[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*stride;
if(x+3+i < n)
board[x+3+i][n-y-1] = base7[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*(stride+1); // using base 7
}
}
}
}
}
break; // end of first case
case 1:
for (int x = 0; x < 3; x++)
{
for(int y = 0; y < n; y++)
{
if(y < blockOfFour(n))
{
int temp = y/4; // temp get index of blockOf4
for(int i = 0; i < n-7; i+= 6) //
{
int stride = i/3;
board[x+i][y] = base[x][y%4]+ temp*12 + 3*n*stride;
if(x+3+i < n-7)
board[x+3+i][n-y-1] = base[x][y%4]+ temp*12 + 3*n*(stride+1);
}
}// end of if
else
{
for(int i = 0 ; i < n-7; i+= 6) // parallel here
{
int stride = i/3;
if(n % 4 == 0)
{
board[x+i][y] = base8[x][y-blockOfFour(n)]+BaseOfFour*12 + 3*n*stride;
if(x+3+i < n-7)// Don't want to get in the 7 stride
board[x+3+i][n-y-1] = base8[x][y-blockOfFour(n)]+ BaseOfFour*12 + 3*n*(stride+1);
}
if(n % 4 == 1)
{
board[x+i][y] = base13[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*stride;
if(x+3+i < n-7) // Don't want to get in the 7 stride
board[x+3+i][n-y-1] = base13[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*(stride+1);
}
if(n % 4 == 2)
{
board[x+i][y] = base14[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*stride;
if(x+3+i < n-7) // Don't want to get in the 7 stride
board[x+3+i][n-y-1] = base14[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*(stride+1);
}
if(n % 4 == 3)
{
board[x+i][y] = base7[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*stride;
if(x+3+i < n-7) // Don't want to get in the 7 stride
board[x+3+i][n-y-1] = base7[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*(stride+1);
}
}
}// end of else
}// end of for y
}// end of big 4
//Handling 7*3(k-1) here 2 case
if(n%2 != 0)
{
for(int x = 0; x < 7; x++)
{
for(int y = 0; y < n-7 ; y++) // NOTE !!!!
{
if(y%6 == 0) //0,6,12....
{
int temp = y/6;
board[n+x-7][y] = tranposeBase7[x][y%3]+ 2*temp*21 + (n-7)*n; // minus 7 because we want to start at line n-7
board[n+x-7][y+1] = tranposeBase7[x][y%3+1]+ 2*temp*21 + (n-7)*n;
board[n+x-7][y+2] = tranposeBase7[x][y%3+2]+ 2*temp*21 + (n-7)*n;
}
else if( y%3 == 0) //3,9,15,21...
{
int temp = y/6;
board[n-x-1][y] = tranposeBase7[x][y%3]+ 21 + 2*temp*21 + (n-7)*n;
board[n-x-1][y+1] = tranposeBase7[x][y%3+1]+ 21 + 2*temp*21+ (n-7)*n;
board[n-x-1][y+2] = tranposeBase7[x][y%3+2]+ 21 + 2*temp*21+ (n-7)*n;
}
}
}
//handling 7*7 chessboard here
for(int x = 0; x < 7; x++)
{
for(int y = 0; y < 7 ; y++)
{
board[n+x-7][n+y-7] = board7[x][y] + (n*n-49);
}
}
} else
{
for(int x = 0; x < 7; x++)
{
for(int y = n; y > 7 ; y--) // NOTE !!!!
{
if((n-y)%6 == 0) // n-y because we start from ending
{
int temp = (n-y)/6;
board[n+x-7][y-1] = tranposeBase7[x][(n-y)%3] + 2*temp*21 + (n-7)*n;
board[n+x-7][y-2] = tranposeBase7[x][(n-y)%3+1] + 2*temp*21 + (n-7)*n; //+ 2*temp*21 + (n-7)*n
board[n+x-7][y-3] = tranposeBase7[x][(n-y)%3+2] + 2*temp*21 + (n-7)*n;
}
else if((n-y)%3 == 0) // n-y because we start from ending
{
int temp = (n-y)/6;
board[n-x-1][y-1] = tranposeBase7[x][(n-y)%3] + 21 + 2*temp*21 + (n-7)*n;
board[n-x-1][y-2] = tranposeBase7[x][(n-y)%3+1]+ 21 + 2*temp*21 + (n-7)*n;
board[n-x-1][y-3] = tranposeBase7[x][(n-y)%3+2]+ 21 + 2*temp*21 + (n-7)*n;
}
}
}
//handling 7*7 chessboard here
for(int x = 6; x >= 0; x--)
{
for(int y = 6; y >=0 ; y--)
{
board[n+x-7][y] = board7[6-x][6-y] + (n*n-49);
//cout << "testing" << endl;
}
}
}// end of else
break;
case 2:
for (int x = 0; x < 3; x++)
{
for(int y = 0; y < n; y++)
{
if(y < blockOfFour(n))
{
int temp = y/4;
for(int i = 0; i < n-8; i+= 6)
{
int stride = i/3;
board[x+i][y] = base[x][y%4]+ temp*12 + 3*n*stride;
if(x+3+i < n-8)
board[x+3+i][n-y-1] = base[x][y%4]+ temp*12 + 3*n*(stride+1);
}
}// end of if
else
{
for(int i = 0 ; i < n-8; i+= 6)
{
int stride = i/3;
if(n % 4 == 0)
{
board[x+i][y] = base8[x][y-blockOfFour(n)]+BaseOfFour*12 + 3*n*stride;
if(x+3+i < n-8)// Don't want to get in the 8 stride
board[x+3+i][n-y-1] = base8[x][y-blockOfFour(n)]+ BaseOfFour*12 + 3*n*(stride+1);
}
if(n % 4 == 1)
{
board[x+i][y] = base13[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*stride;
if(x+3+i < n-8) // Don't want to get in the 8 stride
board[x+3+i][n-y-1] = base13[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*(stride+1);
}
if(n % 4 == 2)
{
board[x+i][y] = base14[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*stride;
if(x+3+i < n-8) // Don't want to get in the 8 stride
board[x+3+i][n-y-1] = base14[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*(stride+1);
}
if(n % 4 == 3)
{
board[x+i][y] = base7[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*stride;
if(x+3+i < n-8) // Don't want to get in the 8 stride
board[x+3+i][n-y-1] = base7[x][y-blockOfFour(n)]+ BaseOfFour * 12 + 3*n*(stride+1);
}
}
}// end of else
}// end of for y
}// end of big 4
// Handling 8*n stride here
if(n%2 == 0)
{
for(int x = 0; x < 8; x++)
{
for(int y = 0; y < n-8 ; y++) // NOTE !!!!
{
if(y%6 == 0) //0,6,12....
{
int temp = y/6;
board[n+x-8][y] = tranposeBase8[x][y%3]+ 2*temp*24 + (n-8)*n; // minus 8 because we want to start at line n-8
board[n+x-8][y+1] = tranposeBase8[x][y%3+1]+ 2*temp*24 + (n-8)*n;
board[n+x-8][y+2] = tranposeBase8[x][y%3+2]+ 2*temp*24 + (n-8)*n;
}
else if( y%3 == 0) //3,9,15,21...
{
int temp = y/6;
board[n-x-1][y] = tranposeBase8[x][y%3]+ 24 + 2*temp*24 + (n-8)*n;
board[n-x-1][y+1] = tranposeBase8[x][y%3+1]+ 24 + 2*temp*24+ (n-8)*n;
board[n-x-1][y+2] = tranposeBase8[x][y%3+2]+ 24 + 2*temp*24+ (n-8)*n;
}
}
}
//handling 8*8 chessboard here
for(int x = 0; x < 8; x++)
{
for(int y = 0; y < 8 ; y++)
{
board[n+x-8][n+y-8] = board8[x][y] + (n*n-64);
}
}
} else
{
for(int x = 0; x < 8; x++)
{
for(int y = n; y > 8 ; y--) // NOTE !!!!
{
if((n-y)%6 == 0) // n-y because we start from ending
{
int temp = (n-y)/6; // getting index of block 8*3
board[n+x-8][y-1] = tranposeBase8[x][(n-y)%3] + 2*temp*24 + (n-8)*n;
board[n+x-8][y-2] = tranposeBase8[x][(n-y)%3+1] + 2*temp*24 + (n-8)*n;
board[n+x-8][y-3] = tranposeBase8[x][(n-y)%3+2] + 2*temp*24 + (n-8)*n;
}
else if((n-y)%3 == 0) // n-y because we start from ending
{
int temp = (n-y)/6; // getting index of block 8*3
board[n-x-1][y-1] = tranposeBase8[x][(n-y)%3] + 24 + 2*temp*24 + (n-8)*n; // note 24 = 3*8
board[n-x-1][y-2] = tranposeBase8[x][(n-y)%3+1]+ 24 + 2*temp*24 + (n-8)*n;
board[n-x-1][y-3] = tranposeBase8[x][(n-y)%3+2]+ 24 + 2*temp*24 + (n-8)*n;
}
}
}
//handling 7*7 chessboard here
for(int x = 7; x >= 0; x--)
{
for(int y = 7; y >=0 ; y--)
{
board[n+x-7][y] = board8[7-x][7-y] + (n*n-64);
//cout << "testing" << endl;
}
}
}// end of else
} // end of switch
// print out the thingy
/*for (int x = 0; x < n; x++) {
for (int y = 0; y < n; y++)
cout << board[x][y]<< "\t";
cout << endl;
}*/
}
int main()
{
loadData();
int n;
cout << "Enter size of board:";
cin >> n;
unsigned long int cpuTime = monotonicTime();
solveBoard(n);
cpuTime = monotonicTime() - cpuTime;
fprintf(stderr, "Time to perform operation on CPU = %ld ns\n", cpuTime);
/*for(int x = 0; x < 3; x++)
{
for(int y = 0; y < 8 ; y++)
cout << base8[x][y] << "\t";
cout << endl;
}
for(int x = 0; x < 8; x++)
{
for(int y = 0; y < 3 ; y++)
cout << tranposeBase8[x][y] << "\t";
cout << endl;
}*/
return 0;
}
|
11,409 | #include "includes.h"
__global__ void minValue(int *source, int *val){
__shared__ int temp[1];
int currentValue = source[threadIdx.x];
if (currentValue > -1 && currentValue < *val){
temp[0] = currentValue;
}
__syncthreads();
*val = temp[0];
} |
11,410 | #include "gMat.cuh"
#include "real.h"
#include <assert.h>
#include <iostream>
void basicMultTest(int w){
std::vector<real> adat={1,2,3,4};
std::vector<real> bdat={2,0,0,2,0,2};
std::vector<real> cdat={0,0,0,0,0,0};
gMat A{adat,2,2};//A= [1 2; 3 4]
gMat B{bdat,2,3};//B= [2 0 0; 2 0 2]
gMat C{cdat,2,3};
int mem=2*w*w*sizeof(real);
prod(A,B,C,mem);
std::cout << C << std::endl;
assert( C.entry(0,0) == 6 );
assert(C.entry(0,1) == 0 );
assert(C.entry(0,2) == 4 );
assert(C.entry(1,0) == 14 );
assert(C.entry(1,1) == 0 );
assert(C.entry(1,2) == 8 );
A.cleanup();
B.cleanup();
C.cleanup();
}
void basicMultTestv2(int w, int s){
std::vector<real> adat={1,2,3,4};
std::vector<real> bdat={2,0,0,2,0,2};
std::vector<real> cdat={0,0,0,0,0,0};
gMat A{adat,2,2};//A= [1 2; 3 4]
gMat B{bdat,2,3};//B= [2 0 0; 2 0 2]
gMat C{cdat,2,3};
prodv2(A, B, C, w, s);
std::cout << C << std::endl;
assert( C.entry(0,0) == 6 );
assert(C.entry(0,1) == 0 );
assert(C.entry(0,2) == 4 );
assert(C.entry(1,0) == 14 );
assert(C.entry(1,1) == 0 );
assert(C.entry(1,2) == 8 );
A.cleanup();
B.cleanup();
C.cleanup();
}
int main(){
basicMultTest(1);
/* basicMultTest(1);
basicMultTest(2);
basicMultTestv2(16,1);
basicMultTestv2(1,2);
basicMultTestv2(1,3);
basicMultTestv2(2,1);
basicMultTestv2(2,2);
*/
std::cout << "SUCCESS!" << std::endl;
}
|
11,411 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime_api.h>
#define restrict __restrict__
void check_error(cudaError_t err, const char *msg)
{
if (err != cudaSuccess) {
fprintf(stderr, "%s : errore %d (%s)\n",
msg, err, cudaGetErrorString(err));
exit(err);
}
}
float runtime;
void PrintStats(size_t bytes, cudaEvent_t before, cudaEvent_t after, const char *msg)
{
check_error(cudaEventElapsedTime(&runtime, before, after), msg);
printf("%s %gms, %g GB/s\n", msg, runtime, bytes/runtime/(1024*1024));
}
__global__ void
init(int * restrict input, int numels)
{
int gid = threadIdx.x + blockIdx.x*blockDim.x;
if (gid < numels)
input[gid] = gid+1;
}
/*shared memory da utilizzare nei medoti di riduzione */
extern __shared__ int sPartial[];
__global__ void
reduction(int* restrict input, int* restrict output, int numels)
{
int gid = threadIdx.x + blockIdx.x*blockDim.x;
int min = input[gid];
//Fase 1: pre-riduzione e riempimento shared-memory
while (gid < numels)
{
if(input[gid] < min)
min = input[gid];
gid += gridDim.x*blockDim.x;
}
const int lid = threadIdx.x;
sPartial[lid] = min;
//Fase 2: riduzione in shared memory
int stride = (blockDim.x)/2;
while (stride > 0)
{
__syncthreads();
if (lid < stride && sPartial[lid + stride] < sPartial[lid]) {
sPartial[lid] = sPartial[lid + stride];
}
stride /= 2;
}
/* Fase 3: salvataggio del risultato del blocco in memoria globale */
if (lid == 0){
output[blockIdx.x] = sPartial[0];
}
}
bool check_result(int result, int numels)
{
return result == 1;
}
int main(int argc, char *argv[])
{
int numels;
if (argc > 1) {
numels = atoi(argv[1]);
} else {
fprintf(stderr, "inserire numero di elementi\n");
exit(1);
}
int h_output;
int* d_input, *d_output, *d_result;
size_t numbytes = numels*sizeof(int);
check_error(cudaMalloc(&d_input, numbytes), "alloc d_input");
cudaEvent_t before_init, before_reduction, before_final_reduction, before_download;
cudaEvent_t after_init, after_reduction, after_final_reduction, after_download;
check_error(cudaEventCreate(&before_init), "create before_init cudaEvent");
check_error(cudaEventCreate(&before_reduction), "create before_reduction cudaEvent");
check_error(cudaEventCreate(&before_final_reduction), "create before_final_reduction cudaEvent");
check_error(cudaEventCreate(&before_download), "create before_download cudaEvent");
check_error(cudaEventCreate(&after_init), "create after_init cudaEvent");
check_error(cudaEventCreate(&after_reduction), "create after_reduction cudaEvent");
check_error(cudaEventCreate(&after_final_reduction), "create after_final_reduction cudaEvent");
check_error(cudaEventCreate(&after_download), "create after_download cudaEvent");
const int blockSize = 32; //prova a modificare con numeri potenze del 2
int numBlocks = (numels + blockSize - 1)/blockSize;
cudaEventRecord(before_init);
init<<<numBlocks, blockSize>>>(d_input, numels);
cudaEventRecord(after_init);
check_error(cudaMalloc(&d_output, numBlocks*sizeof(int)), "alloc d_output");
check_error(cudaMalloc(&d_result, sizeof(int)), "alloc d_result");
cudaEventRecord(before_reduction);
reduction<<<numBlocks, blockSize, blockSize*sizeof(int)>>>(d_input, d_output, numels);
cudaEventRecord(after_reduction);
cudaEventRecord(before_final_reduction);
reduction<<<1, blockSize, blockSize*sizeof(int)>>>(d_output, d_result, numBlocks);
cudaEventRecord(after_final_reduction);
//copy result from Device to Host (recorded)
cudaEventRecord(before_download);
check_error(cudaMemcpy(&h_output, d_result, sizeof(int), cudaMemcpyDeviceToHost), "copy d_result");
cudaEventRecord(after_download);
//cudaEventElapsedTime
check_error(cudaEventSynchronize(after_download), "sync cudaEvents");
PrintStats(numels*sizeof(int), before_init, after_init, "time init");
PrintStats((numels+numBlocks)*sizeof(int), before_reduction, after_reduction, "time reduction");
PrintStats((numels+1)*sizeof(int), before_final_reduction, after_final_reduction, "time final_reduction");
PrintStats(sizeof(int), before_download, after_download, "time download");
if(!check_result(h_output, numels))
{
fprintf(stderr, "SBAGLIATO!\n");
printf("nostro: %d invece che %d\n", h_output, 1);
exit(1);
}
else
{
printf("risultato: %d == %d\n", h_output, 1);
}
cudaFree(d_input);
cudaFree(d_output);
return 0;
}
|
11,412 | // includes
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
// Funciones de alocacion para el vector que representa la Matriz
float *allocaVector(int node, int n) {
float *v = (float *) malloc(node * n * sizeof(float));
assert(v != NULL);
return v;
}
|
11,413 | #include "includes.h"
__global__ void cuda_cmp_kernel(std::size_t n, int* aptr, int* bptr, int* rptr) {
int i = threadIdx.x+blockIdx.x*blockDim.x;
int cmp = i<n? aptr[i]<bptr[i]: 0;
if (__syncthreads_or(cmp)) *rptr=1;
} |
11,414 | #include <stdio.h>
#include <cuda_runtime.h>
#include <sys/time.h>
#define CHECK(call) \
{ \
cudaError_t error = call; \
if(error != cudaSuccess){ \
printf("ERROR: %s:%d\n", __FILE__, __LINE__); \
printf("error_num: %d reason:%s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
double cpuSecond(){
struct timeval tp;
gettimeofday(&tp,NULL);
return ( (double)tp.tv_sec + (double)tp.tv_usec * 1e-6 );
}
__global__ void helloFromGPU(void){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
printf("Hello from GPU! %d\n", idx);
}
int main(void){
int nElem = 6;
dim3 block(6);
dim3 grid( (nElem + block.x -1) / block.x );
double iStart, iElapse;
iStart = cpuSecond();
helloFromGPU<<< grid, block >>>();
CHECK(cudaDeviceSynchronize());
iElapse = cpuSecond() - iStart;
printf("Elapsed time: %5.6f sec\n", iElapse);
return 0;
}
|
11,415 | #include "includes.h"
__global__ void gpu_stencil37_hack1_cp_rows(double * dst, double * shared_rows, double *shared_cols,double *shared_slices,int n_rows, int n_cols,int n_slices,int tile_x,int tile_y, int tile_z){
#ifdef CUDA_DARTS_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.x==0)){
printf("copy rows:begin\n");
printf("copy rows:gridDim.x=%d,gridDim.y=%d,gridDim.z=%d\n",gridDim.x,gridDim.y,gridDim.z);
printf("copy rows:blockDim.x=%d,blockDim.y=%d,blockDim.z=%d\n",blockDim.x,blockDim.y,blockDim.z);
printf("copy rows:tile_x=%d,tile_y=%d,tile_z=%d\n",tile_x,tile_y,tile_z);
}
#endif
int base_global_slice = tile_z * blockIdx.z;
int base_global_row = tile_y * blockIdx.y;
int base_global_col = blockDim.x*blockIdx.x;
int dst_area = n_rows*n_cols;
int s_area = gridDim.y*n_cols*2;
int base_global_idx = base_global_slice*dst_area + base_global_row * n_cols + base_global_col;
int nextRow = base_global_row+1;
bool legalNextRow = nextRow<n_rows;
int tx = threadIdx.x;
bool legalCurCol = (base_global_col + tx)<n_cols;
for(int tz=0;tz<tile_z;++tz){
bool legalCurSlice = (base_global_slice + tz)<n_slices;
int idx_dst =base_global_idx + tz*dst_area+ tx ;
int idx = (base_global_slice+tz)*s_area + blockIdx.y*n_cols*2+blockIdx.x*blockDim.x+ tx ;
if(legalCurCol && legalCurSlice){
shared_rows[idx] = dst[idx_dst];
}
if(legalCurCol && legalCurSlice && legalNextRow){
shared_rows[idx+n_cols] = dst[idx_dst+n_cols];
}
}
__syncthreads();
#ifdef CUDA_CUDA_DEBUG
if(blockIdx.y==0 && blockIdx.x==0 &&blockIdx.z==0 ){
if((threadIdx.x==0 || threadIdx.x==1 || threadIdx.x==2 ) && threadIdx.y==0){
int addr0 = base_global_idx+0*dst_area+threadIdx.x;
int addr = base_global_slice+blockIdx.x*blockDim.x + threadIdx.x;
int addr1 = s_area*(base_global_slice+1)+n_cols+blockIdx.x*blockDim.x+ threadIdx.x;
int addr2 = s_area*(base_global_slice+2)+n_cols+blockIdx.x*blockDim.x+ threadIdx.x;
printf("copy rows: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,dst : z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,0,addr0,dst[addr0]);
printf("copy rows: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,shared_rows: z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,0,addr,shared_rows[addr]);
printf("copy rows: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,shared_rows: z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,1,addr1,shared_rows[addr1]);
printf("copy rows: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,shared_rows: z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,2,addr2,shared_rows[addr2]);
}
if(threadIdx.x==0 && threadIdx.y==0){
int addr = 2*s_area+n_cols+256;
int addr1 = 2*dst_area+n_cols+256;
printf("shared_rows: addr:%d, val:%f\n", addr, shared_rows[addr]);
printf("dst : addr:%d, val:%f\n", addr1, dst[addr1]);
}
}
#endif
#ifdef CUDA_DARTS_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.x==0)){
printf("copy rows end!\n");
}
#endif
} |
11,416 | #include <iostream>
#include <utility>
#include <algorithm>
#include <fstream>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <vector>
#include <time.h>
//#include "graphfilter.h"
//#include <cutil.h>
//#include <graphfilter_kernel.cu>
#define N 400
#define DEBUG 0
typedef struct _Graph_node_A {
int adj[N];
} Graph_node_A;
using namespace std;
extern "C"
#ifndef _MATRIXMUL_H_
#define _MATRIXMUL_H_
__global__ void naiveFilterKernel(struct _Graph_node_A *PA, int *relation_count, int *v_start, int *max_id_A)
{
__shared__ int v_start_adj[N];
int tid = threadIdx.x;
int count=0;
if(tid < N)
v_start_adj[tid] = PA[(*v_start)-1].adj[tid];
__syncthreads();
if(tid == ((*v_start)-1) || tid >= *max_id_A) return;
int i, j;
for(i=0;(i<N) && (PA[tid].adj[i] != 0);i++) {
for(j=0;j<N && v_start_adj[j]!= 0;j++) {
if(PA[tid].adj[i] == v_start_adj[j]){
count++;
break;
}
}
}
relation_count[tid] = count;
}
#endif
#ifndef _MATRIXMUL_F_
#define _MATRIXMUL_F_
__global__ void fullFilterKernel(struct _Graph_node_A *PA, int *relation_count, int *index_vertice, int *k, int *max_id_A)
{
__shared__ int index_ref[N];
int by = blockIdx.y;
int tx = threadIdx.x;
int count = 0;
if((by >= *k) || (tx >= *max_id_A)) return;
// load data into shared memory
if(tx < N)
index_ref[tx] = PA[index_vertice[by]-1].adj[tx];
__syncthreads();
for(int i=0;(i<N) && (PA[tx].adj[i] != 0);i++)
for(int j=0;(j<N) && (index_ref[j]!= 0);j++) {
if((PA[tx].adj[i] == index_ref[j]) && ((index_vertice[by]-1) != tx)) {
count++;
break;
}
}
relation_count[(by+1) * (*max_id_A) + tx] = count;
}
#endif
void filterOnDevice(struct _Graph_node_A *PA, int *relation_count, int *index_vertice, int v_start, int k, int max_id_A);
void filterOnHost(struct _Graph_node_A *PA, int *relation_count_h, int *index_vertice_h, int v_start, int k, int max_id_A);
// function to sort the relations to the start vertice in a descending order
void selSort(int s[], int index[], int length);
//function to compare the result between the host side and device side
bool compare(vector<int> final_resut_h, vector<int> final_result_d);
//static const char* DAT_FILE_NAME = "graph.dat";// file to store the graph struct
int main(int arcg, char** argv)
{
char tracefilename[30];
int A, B; //fot the nodes in party A and party B
int max_id_A;
int max_id_B;
int max_weight;
int i=0, j=0;
//int m=0, n=0;
FILE *fp;
char buf[100];
struct _Graph_node_A *PA;
/***********************************************************
read in the trace file **********************************
************************************************************/
printf("Please input the trace file name:");
scanf("%s", &tracefilename);
fp = fopen(tracefilename, "r");
if(fp==NULL){
printf("Could not open the trace file!\n");
exit(1);
}
printf("Has open the trace successfully!\n");
while(fgets(buf,100,fp)){
if(buf[0]=='%') continue;
if(i==0){
sscanf(buf, "%d%d%d", &max_id_A, &max_id_B, &max_weight);
break;
}
}
fclose(fp);
cout << max_id_A << endl;
PA = (struct _Graph_node_A *)malloc(sizeof(struct _Graph_node_A) * max_id_A);
if(PA == NULL)
cout << "Allocate memory for PA failed" <<endl;
else
cout << "Allocate memory for PA successfully" << endl;
// initialize A and B
for(i=0;i<max_id_A;i++)
for(j=0;j<N;j++)
PA[i].adj[j] = 0;
cout << "Initialized PA successfully!" << endl;
fp = fopen(tracefilename, "r");
if(fp==NULL){
printf("Could not open the trace file!\n");
exit(1);
}
printf("Has open the trace sucessfully!\n");
int index[max_id_A];
for(i=0;i<max_id_A;i++)
index[i] = 0;
cout << "Initilized index successfully" << endl;
i=0;
//read in the input file and build the graph
while(fgets(buf,100,fp)){
if(buf[0]=='%')
continue;
if(i==0){
sscanf(buf,"%d%d%d", &max_id_A, &max_id_B, &max_weight);
cout << max_id_A << " " << max_id_B << " " << max_weight << endl;
i++;
}else{
sscanf(buf, "%d%d", &A, &B);
PA[A-1].adj[index[A-1]++] = B;
}
}
fclose(fp);
// Naive Graph collaboratie Filtering
int v_start;// the start vertive for search
int k;// the k is the number of relational vertices needed to be found
// read in the start vertex, and value k for ralation depth
std::cout << "Please input the start vertice as an interger number less than " << max_id_A << " :";
scanf("%d", &v_start);
while(v_start<0 || v_start > max_id_A) {
cout << "Please input a valid start vertice less than " << max_id_A << " :";
scanf("%d", &v_start);
}
std::cout << "Please input the value of k as an interger number less than " << max_id_A << " :";
scanf("%d", &k);
while(k < 0 || k > max_id_A) {
cout << "Please input a valid k less than " << max_id_A << " :";
scanf("%d", &k);
}
int* relation_count = (int*)malloc((k+1)*max_id_A*sizeof(int));
int* index_vertice = (int*)malloc((k+1)*max_id_A*sizeof(int));
int* relation_count_h = (int*)malloc((k+1)*max_id_A*sizeof(int));
int* index_vertice_h = (int*)malloc((k+1)*max_id_A*sizeof(int));
//initialize the relation_count and vertice index
for(i=0;i<k+1;i++)
for(j=0;j<max_id_A;j++){
relation_count[i * max_id_A + j] = 0;
relation_count_h[i * max_id_A + j] = 0;
index_vertice[i * max_id_A + j] = j + 1;
index_vertice_h[i * max_id_A + j] = j + 1;
}
// executing the filtering algorithm on the host side
clock_t st = clock();
filterOnHost(PA, relation_count_h, index_vertice_h, v_start, k, max_id_A);
st = clock() - st;
printf("CPU execution time is %.5f\n", (float)st/CLOCKS_PER_SEC);
#if DEBUG
cout << "Relation count on the host side is:" << endl;
for(i=0;i<k+1;i++){
for(j=0;j<max_id_A;j++){
cout << relation_count_h[i*max_id_A+j] << " ";
}
cout << endl;
}
#endif
// executing the filtering algorithm on the device side
st = clock();
filterOnDevice(PA, relation_count, index_vertice, v_start, k, max_id_A);
st = clock() - st;
printf("GPU execution time is %.5f\n", (float)st/CLOCKS_PER_SEC);
//sort the relation in a descending order
for(i=1;i<k+1;i++) {
selSort(&relation_count[i*max_id_A], &index_vertice[i*max_id_A], max_id_A);
selSort(&relation_count_h[i*max_id_A], &index_vertice_h[i*max_id_A], max_id_A);
}
#if DEBUG
cout << "The " << k << " related vertices to each vertice are: " << endl;
for(i=0;i<k+1;i++) {
for(j=0;j<k;j++) {
cout << index_vertice[i * max_id_A + j] << " ";
cout << index_vertice_h[i * max_id_A + j] << " ";
}
cout << endl;
}
#endif
vector<int> final_result_d;// record the final result of collaborative filtering for visualization
vector<int> final_result_h;
vector<int>::iterator it;
// copy the index_vertice to the final_result vector
for(i=0;i<k+1;i++)
for(j=0;j<k;j++) {
final_result_d.push_back(index_vertice[i * max_id_A + j]);
final_result_h.push_back(index_vertice_h[i * max_id_A + j]);
}
// sort the final_result vector in a desending order
std::sort(final_result_d.begin(), final_result_d.end());
std::sort(final_result_h.begin(), final_result_h.end());
// remove the repeated vertices
for(it=final_result_d.begin()+1, i=final_result_d.front();it!=final_result_d.end();) {
if(i == *it)
final_result_d.erase(it);
else {
i = *it;
it++;
}
}
for(it=final_result_h.begin()+1, i=final_result_h.front();it!=final_result_h.end();) {
if(i == *it)
final_result_h.erase(it);
else {
i = *it;
it++;
}
}
// compare the result from GPU with the result from CPU to test the correctness
bool match = compare(final_result_h, final_result_d);
if(match)
cout << "Test passed ^^!" << endl;
else
cout << "Test failed !!" << endl;
// output the final result
cout << "The final Collaborative Filtering result is:" << endl;
for(it=final_result_d.begin();it!=final_result_d.end();it++)
cout << *it << " ";
cout << endl;
free(relation_count);
free(relation_count_h);
free(index_vertice);
free(index_vertice_h);
return 0;
}
void filterOnHost(struct _Graph_node_A *PA, int *relation_count, int *index_vertice, int v_start, int k, int max_id_A)
{
int i, j, m, n;
// naive collaborative filtering
for(i=0;i<max_id_A;i++){
if(i == v_start - 1) continue;
for(j=0;j<N && PA[i].adj[j] != 0;j++)
for(m=0;m<N && PA[v_start-1].adj[m] != 0;m++){
if(PA[v_start-1].adj[m] == PA[i].adj[j]) {
relation_count[i]++;
break;
}
}
}
// sort the relation in a descending order by selection sort algorithm
selSort(relation_count, index_vertice, max_id_A);
// full collaborative filtering
for(i=0;i<k;i++)
for(j=0;j<max_id_A;j++)
for(m=0;m<N && PA[j].adj[m] != 0;m++)
for(n=0;n<N && PA[index_vertice[i]-1].adj[n] != 0;n++) {
if(PA[index_vertice[i]-1].adj[n] == PA[j].adj[m] && index_vertice[i] - 1 != j) {
relation_count[(i+1)*max_id_A + j] ++;
break;
}
}
}
void filterOnDevice(struct _Graph_node_A *PA, int *relation_count, int *index_vertice, int v_start, int k, int max_id_A) {
struct _Graph_node_A *PA_d;
int *relation_count_d;
int *max_id_A_d;
int *index_vertice_d;
int *v_start_d;
int *k_d;
int size = (k + 1) * max_id_A * sizeof(int);
cudaMalloc((void**)&(PA_d), sizeof(struct _Graph_node_A) * max_id_A);
cudaMalloc((void**)&(relation_count_d), size);
cudaMalloc((void**)&(max_id_A_d), sizeof(int));
cudaMalloc((void**)&(v_start_d), sizeof(int));
cudaMalloc((void**)&(k_d), sizeof(int));
cudaMalloc((void**)&(index_vertice_d), max_id_A * sizeof(int));
cudaMemcpy(PA_d, PA, sizeof(struct _Graph_node_A) * max_id_A, cudaMemcpyHostToDevice);
cudaMemcpy(relation_count_d, relation_count, size, cudaMemcpyHostToDevice);
cudaMemcpy(max_id_A_d, &max_id_A, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(v_start_d, &v_start, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(k_d, &k, sizeof(int), cudaMemcpyHostToDevice);
// naive collaborative filtering
dim3 dimGrid0(1, 1);
dim3 dimBlock0(max_id_A, 1);
naiveFilterKernel<<<dimGrid0, dimBlock0>>>(PA_d, relation_count_d, v_start_d, max_id_A_d);
cudaMemcpy(relation_count, relation_count_d, max_id_A * sizeof(int), cudaMemcpyDeviceToHost);
#if DEBUG
cout << "The relation count is:" << endl;
for(int i=0; i<k+1;i++) {
for(int j=0;j<max_id_A;j++){
cout << relation_count[i*max_id_A+j] << " ";
}
cout << endl;
}
#endif
selSort(relation_count, index_vertice, max_id_A);
#if DEBUG
cout << "The " << k << " related vertice to " << v_start << " is:" << endl;
for(int i=0;i<k;i++)
cout << index_vertice[i] << " ";
cout << endl;
#endif
cudaMemcpy(index_vertice_d, index_vertice, max_id_A * sizeof(int), cudaMemcpyHostToDevice);
// full collaborative filtering
dim3 dimGrid(1, k);
dim3 dimBlock(max_id_A, 1);
fullFilterKernel<<<dimGrid, dimBlock>>>(PA_d, relation_count_d, index_vertice_d, k_d, max_id_A_d);
cudaMemcpy(relation_count, relation_count_d, size, cudaMemcpyDeviceToHost);
#if DEBUG
cout << "The relation count is:" << endl;
for(int i=0; i<k+1;i++) {
for(int j=0;j<max_id_A;j++){
cout << relation_count[i*max_id_A+j] << " ";
}
cout << endl;
}
#endif
cudaFree(PA_d);
PA_d = NULL;
cudaFree(relation_count_d);
relation_count_d = NULL;
cudaFree(max_id_A_d);
max_id_A_d = NULL;
cudaFree(k_d);
k_d = NULL;
cudaFree(index_vertice_d);
index_vertice_d = NULL;
}
// Selection Sort
void selSort(int s[], int index[], int length)
{
int i, j, maxPos;
for(i=0;i<length-1;i++) {
maxPos = i;
for(j=i+1;j<length;j++)
if(s[j] > s[maxPos])
maxPos = j;
if(i != maxPos) {
swap(s[i], s[maxPos]);
swap(index[i], index[maxPos]);
}
}
}
bool compare(vector<int> final_result_h, vector<int> final_result_d)
{
vector<int>::iterator it_h;
vector<int>::iterator it_d;
for(it_h=final_result_h.begin(), it_d=final_result_d.begin(); it_h != final_result_h.end() && it_d != final_result_d.end();it_h++, it_d++) {
if((*it_h) != (*it_d))
return false;
}
return true;
}
|
11,417 | #include <stdio.h>
__global__ void emptyKernel()
{
// leerer Kernel
}
int main(int argc, char ** argv)
{
dim3 dimGrid(1);
dim3 dimBlock(1);
emptyKernel<<<dimGrid, dimBlock>>>();
return 0;
}
|
11,418 | #include "includes.h"
// #pragma once
using namespace std;
#define NUM_THREADS_PER_BLOCK 512
int* create_shifts (char* pattern);
int linear_horspool_match (char* text, char* pattern, int* shift_table, unsigned int* num_matches, int chunk_size,
int num_chunks, int text_size, int pat_len, int myId);
/*
* Driver function
* argv[0] is target pattern string
* argv[1] is text path
*/
__global__ void horspool_match (char* text, char* pattern, int* shift_table, unsigned int* num_matches, int chunk_size, int num_chunks, int text_size, int pat_len) {
const int TABLE_SIZ = 126;
int count = 0;
int myId = threadIdx.x + blockDim.x * blockIdx.x;
if(myId > num_chunks){ //if thread is an invalid thread
return;
}
int text_length = (chunk_size * myId) + chunk_size + pat_len - 1;
// don't need to check first pattern_length - 1 characters
int i = (myId*chunk_size) + pat_len - 1;
int k = 0;
while(i < text_length) {
// reset matched character count
k = 0;
if (i >= text_size) {
// break out if i tries to step past text length
break;
}
if (text[i] >= TABLE_SIZ || text[i] < 0) {
// move to next char if unknown char (Unicode, etc.)
++i;
} else {
while(k <= pat_len - 1 && pattern[pat_len - 1 - k] == text[i - k]) {
// increment matched character count
k++;
}
if(k == pat_len) {
// increment pattern count, text index
++count;
++i;
} else {
// add on shift if known char
i = i + shift_table[text[i]];
}
}
}
atomicAdd(num_matches, count);
} |
11,419 | #include "includes.h"
__global__ void CudaPermutePVToCudnn( float *dest, float *src, int outFeatures, int ny, int nx, int inFeatures, int manyScaleX, int manyScaleY, int cropX, int cropY) {
// parameter dimensions are in source PV format
int destNx = (nx - 2 * cropX) / manyScaleX;
int destNy = (ny - 2 * cropY) / manyScaleY;
int destInFeatures = inFeatures * manyScaleX * manyScaleY;
int kSrc = (blockIdx.x * blockDim.x) + threadIdx.x;
if (kSrc < outFeatures * ny * nx * inFeatures) {
int kOF = kSrc / (ny * nx * inFeatures);
int kY = (kSrc % (ny * nx * inFeatures)) / (nx * inFeatures);
int kX = (kSrc % (nx * inFeatures)) / inFeatures;
int kIF = (kSrc % inFeatures);
// check if in bounds
if (kX < cropX || kX >= nx - cropX) {
return;
}
else {
kX = kX - cropX;
}
if (kY < cropY || kY >= ny - cropY) {
return;
}
else {
kY = kY - cropY;
}
// Recalculate x, y, and f based on manyScale
kIF = kIF + inFeatures * (kX % manyScaleX + (kY % manyScaleY) * manyScaleX);
kX = kX / manyScaleX;
kY = kY / manyScaleY;
int sOF = destInFeatures * destNy * destNx;
int sIF = destNy * destNx;
int sY = destNx;
int kDest = kOF * sOF + kIF * sIF + kY * sY + kX;
dest[kDest] = src[kSrc];
}
} |
11,420 | /*
* purpose: CUDA managed unified memory for >= pascal architectures;
* this version just uses cudaMallocManaged() on the host,
* then runs a kernel on the GPU to add together two arrays
* of size 1 GB and save the results into a third array;
* result: working great ! when running a loop over 25 attemps with
* 'watch nvidia-smi' open in a background terminal, we see
* Memory-Usage 3185MiB / 8114MiB
* n.b. for visual clarity, the printout section below should be
* commented out when starting to do some profiling runs,
* e.g. nvprof ./a.out
* compilation: nvcc ./unified_memory_example_1.cu
* usage: ./a.out
*/
#include <stdio.h>
#define ARRAYDIM 268435456
/*
* GPU kernel working with unified memory which had been
* allocated using cudaMallocManaged() on the host
*/
__global__ void KrnlDmmy(float *x, float *y, float *z)
{
int i;
i = (blockIdx.x * blockDim.x) + threadIdx.x;
x[i] = (float) i;
y[i] = (float) (i + 1);
z[i] = x[i] + y[i];
return;
}
/*
* host main
*/
int main()
{
int i, cudaRtrn;
dim3 thrds_per_block, blcks_per_grid;
float *a, *b, *c;
/*
* Let us make use of cudaMallocManaged() to allocate 3 arrays
* of size 1 GB each for subsequent usage on the GPU.
*/
if (cudaRtrn = cudaMallocManaged(&a, ARRAYDIM * sizeof(float)) != 0) {
printf("*** allocation failed for array a[], %d ***\n", cudaRtrn);
}
if (cudaRtrn = cudaMallocManaged(&b, ARRAYDIM * sizeof(float)) != 0) {
printf("*** allocation failed for array b[], %d ***\n", cudaRtrn);
}
if (cudaRtrn = cudaMallocManaged(&c, ARRAYDIM * sizeof(float)) != 0) {
printf("*** allocation failed for array c[], %d ***\n", cudaRtrn);
}
/*
* next we want to call a simple kernel that sets array elements
* a[] and b[] with thread-specific values and then adds together
* these values and stores back the result into array c[]
*/
thrds_per_block.x = 256;
blcks_per_grid.x = ARRAYDIM / thrds_per_block.x;
KrnlDmmy<<<blcks_per_grid, thrds_per_block>>>(a, b, c);
cudaDeviceSynchronize();
//for (i=0; i<=100; i++) {
// printf("%6d%6.1f%6.1f%6.1f\n", i, a[i], b[i], c[i]);
//}
cudaFree(c);
cudaFree(b);
cudaFree(a);
return(0);
}
|
11,421 | #include "includes.h"
__global__ void pw_tanh(float *y, float *a, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = tanh(a[i]);
} |
11,422 | #include "includes.h"
__global__ void reduceUnrollWarps (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 2;
// unrolling 2
if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x];
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 32; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// unrolling last warp
if (tid < 32)
{
volatile int *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if (tid == 0) g_odata[blockIdx.x] = idata[0];
} |
11,423 | #include "includes.h"
__global__ void gradientLayersKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD )
{
__shared__ float s_Data[LAYERS_GRAD_BLOCKDIM_X][LAYERS_GRAD_BLOCKDIM_Y][(LAYERS_GRAD_RESULT_STEPS + 2 * LAYERS_GRAD_HALO_STEPS) * LAYERS_GRAD_BLOCKDIM_Z + 1];
//Offset to the upper halo edge
const int baseX = blockIdx.x * LAYERS_GRAD_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * LAYERS_GRAD_BLOCKDIM_Y + threadIdx.y;
const int baseZ = (blockIdx.z * LAYERS_GRAD_RESULT_STEPS - LAYERS_GRAD_HALO_STEPS) * LAYERS_GRAD_BLOCKDIM_Z + threadIdx.z;
d_Src += (baseZ * imageH + baseY) * imageW + baseX;
d_Dst += (baseZ * imageH + baseY) * imageW + baseX;
const int pitch = imageW*imageH;
//Main data
#pragma unroll
for (int i = LAYERS_GRAD_HALO_STEPS; i < LAYERS_GRAD_HALO_STEPS + LAYERS_GRAD_RESULT_STEPS; i++) {
s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * LAYERS_GRAD_BLOCKDIM_Z] = d_Src[i * LAYERS_GRAD_BLOCKDIM_Z * pitch];
}
//Upper halo
#pragma unroll
for (int i = 0; i < LAYERS_GRAD_HALO_STEPS; i++) {
s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * LAYERS_GRAD_BLOCKDIM_Z] = (baseZ + i * LAYERS_GRAD_BLOCKDIM_Z >= 0) ? d_Src[i * LAYERS_GRAD_BLOCKDIM_Z * pitch] : 0;
}
//Lower halo
#pragma unroll
for (int i = LAYERS_GRAD_HALO_STEPS + LAYERS_GRAD_RESULT_STEPS; i < LAYERS_GRAD_HALO_STEPS + LAYERS_GRAD_RESULT_STEPS + LAYERS_GRAD_HALO_STEPS; i++) {
s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * LAYERS_GRAD_BLOCKDIM_Z]= (baseZ + i * LAYERS_GRAD_BLOCKDIM_Z < imageD) ? d_Src[i * LAYERS_GRAD_BLOCKDIM_Z * pitch] : 0;
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = LAYERS_GRAD_HALO_STEPS; i < LAYERS_GRAD_HALO_STEPS + LAYERS_GRAD_RESULT_STEPS; i++) {
float sum = 0;
sum += s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * LAYERS_GRAD_BLOCKDIM_Z + 1];
sum -= s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * LAYERS_GRAD_BLOCKDIM_Z - 1];
sum *= 0.5f;
d_Dst[i * LAYERS_GRAD_BLOCKDIM_Z * pitch] = sum;
}
} |
11,424 | #include <stdio.h>
#include <stdlib.h>
__global__ void MatrixAdd(float* d_out, float* d_in1, float* d_in2, int M, int N)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
float f = d_in1[idx];
float g = d_in2[idx];
if(blockIdx.x < M && threadIdx.x < N)
{
d_out[idx] = f + g;
}
}
int main(int argc, char* argv[])
{
FILE *fp;
if(argc < 2)
{
int i =0;
while(i != 10)
{
printf("WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING\n");
i++;
}
printf("ERROR: NO INPUT FILE SELECTED! USE %s <INPUT FILE> \n\n", argv[0]);
exit(1);
}
char* filename = (char*)malloc(strlen(argv[1])*sizeof(char));
strcpy(filename, argv[1]);
fp = fopen(filename, "r");
if(fp == NULL)
{
int i = 0;
while(i != 10)
{
printf("WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING\n");
i++;
}
printf("ERROR: FILE COULD NOT BE READ. CANT EVEN TYPE A FILE NAME EH? NERD\n");
free(filename);
fclose(fp);
exit(1);
}
int M, N;
M = N = 0;
fscanf(fp, "%d %d", &M, &N);
int MATRIXMEM = (M*N) * sizeof(float);
float *h_in1;
h_in1 = (float*)malloc(MATRIXMEM);
for(int x = 0; x < M; x++)
{
for(int y = 0; y < N; y++)
{
h_in1[x*N + y] = 0;
}
}
for(int x = 0; x < M; x++)
{
for(int y = 0; y < N; y++)
{
fscanf(fp, "%f", &h_in1[x*N + y]);
}
}
fscanf(fp, "%d %d", &M, &N);
float* h_in2;
h_in2 = (float*)malloc(MATRIXMEM);
for(int x = 0; x < M; x++)
{
for(int y = 0; y < N; y++)
{
h_in2[x*N + y] = 0;
}
}
for(int x = 0; x < M; x++)
{
for(int y = 0; y < N; y++)
{
fscanf(fp, "%f", &h_in2[x*N + y]);
}
}
float* h_out = (float*)malloc(MATRIXMEM);
float *d_out, *d_in1, *d_in2;
cudaMalloc(&d_in1, MATRIXMEM);
cudaMalloc(&d_in2, MATRIXMEM);
cudaMalloc(&d_out, MATRIXMEM);
cudaMemcpy(d_in1, h_in1, MATRIXMEM, cudaMemcpyHostToDevice);
cudaMemcpy(d_in2, h_in2, MATRIXMEM, cudaMemcpyHostToDevice);
MatrixAdd<<<M, N>>>(d_out, d_in1, d_in2, M, N);
cudaMemcpy(h_out, d_out, MATRIXMEM, cudaMemcpyDeviceToHost);
for(int x = 0; x < M; x++)
{
for(int y = 0; y < N; y++)
{
printf("%12f ", h_out[x*N + y]);
}
printf("\n");
}
cudaFree(d_in1);
cudaFree(d_in2);
cudaFree(d_out);
free(h_in1);
free(h_in2);
free(h_out);
fclose(fp);
exit(0);
} |
11,425 | #include <stdio.h>
#include <math.h>
#define NN 1000000000
void cudaErr(){
printf("%s\n", cudaGetErrorString(cudaGetLastError()) );
return ;
}
double integrate(int n) {
double sum, h, x;
int i;
sum = 0.0;
h = 1.0 / (double) n;
for (i = 1; i <= n; i++) {
x = h * ((double)i - 0.5);
sum += 4.0 / (1.0 + x*x);
}
return sum * h;
}
__global__
void integrate_kernel(int n, double * pi_gpu) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int job_size = blockDim.x * gridDim.x;
double sum, h, h_d, x;
int i;
int n_d = n / (job_size);
sum = 0.0;
pi_gpu[idx]=0.0;
h = 1.0 / (double) n;
h_d = 1.0 / (double) n_d;
for (i = idx +1; i <= n_d ; i += job_size ) {
x = h_d * ((double)i - 0.5);
sum += 4.0 / (1.0 + x*x);
}
pi_gpu[idx] = sum * h_d;
}
double integrate_gpu(int n) {
double *pi_cpu;
double *pi_gpu;
double pi =0.0;
int bs = 100;
int ts = 100;
pi_cpu = (double *) malloc( sizeof(double) * bs * ts );
cudaMalloc( (void**)&pi_gpu, sizeof(double) * bs * ts );cudaErr();
cudaMemset( pi_gpu, 0.0, sizeof(double) * bs * ts );cudaErr();
integrate_kernel <<< bs ,ts >>> ( n, pi_gpu); cudaErr();
cudaMemcpy( pi_cpu, pi_gpu, sizeof(double) * bs * ts , cudaMemcpyDeviceToHost); cudaErr();
cudaFree(pi_gpu); cudaErr();
for( int i =0; i < bs * ts; i++) pi += pi_cpu[i]; //reduce
free(pi_cpu);
return pi ;
}
int main() {
int n=NN;
double PI25DT = 3.141592653589793238462643;
double pi;
pi = integrate_gpu(n);
printf("pi is %.16f\n", PI25DT);
printf("pi is approximately %.16f\n", pi);
printf("error is %.16f with %d iteration\n", fabs(pi - PI25DT), n);
return 0;
}
|
11,426 | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<cuda.h>
#include<cuda_runtime.h>
namespace Neighbours
{
class FileHandle
{
public:
int InputSize(void)
{
FILE *input = NULL;
input = fopen("input.txt", "r");
char line[30];
int N = 0;
while(fgets(line, 30, input) != NULL)
N++;
fclose(input);
return N;
}
void ReadFromFile(double *x, double *y, double *z, bool *b, int *N)
{
FILE *input = NULL;
input = fopen("input.txt", "r");
char line[30];
for(int i = 0; i < (*N); i++)
{
fgets(line, 30, input);
sscanf(line, "%lf %lf %lf", &x[i], &y[i], &z[i]);
b[i] = true;
}
fclose(input);
printf("Data imported from input.txt successfully!\n");
}
void WriteToFile(double *x, double *y, double *z, bool *b, int *N)
{
FILE *output = NULL;
output = fopen("output.txt", "w");
for(int i = 0; i < (*N); i++)
{
if(b[i] == true)
fprintf(output, "%.1lf %.1lf %.1lf\n", x[i], y[i], z[i]);
}
fclose(output);
printf("Data exported to output.txt successfully!\n");
}
};
__global__ void kernel(double *d_xx, double *d_yy, double *d_zz, bool *d_bb, int *d_N, double *x, double *y, double *z, double *r)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < *d_N)
{
if((pow((*x)-d_xx[index], 2) + pow((*y)-d_yy[index], 2) + pow((*z)-d_zz[index], 2)) > pow(*r, 2))
d_bb[index] = false;
}
}
class NeighbourSearch
{
public:
void FindNeighbours(double *d_xx, double *d_yy, double *d_zz, bool *d_bb, int *d_N, double *x, double *y, double *z, double *r)
{
int grid_size, block_size = 256;
grid_size = ((*d_N) + block_size) / block_size;
kernel<<<grid_size, block_size>>>(d_xx, d_yy, d_zz, d_bb, d_N, x, y, z, r);
cudaDeviceSynchronize();
}
};
} // namespace Neighbours
int main()
{
Neighbours::FileHandle fh = Neighbours::FileHandle();
double *x, *y, *z;
double *r;
double *xx, *yy, *zz;
bool *bb;
int *N;
cudaMallocManaged(&N, sizeof(int));
*N = fh.InputSize();
cudaMallocManaged(&x, sizeof(double));
cudaMallocManaged(&y, sizeof(double));
cudaMallocManaged(&z, sizeof(double));
cudaMallocManaged(&r, sizeof(double));
cudaMallocManaged(&xx, sizeof(double)*(*N));
cudaMallocManaged(&yy, sizeof(double)*(*N));
cudaMallocManaged(&zz, sizeof(double)*(*N));
cudaMallocManaged(&bb, sizeof(double)*(*N));
fh.ReadFromFile(xx, yy, zz, bb, N);
Neighbours::NeighbourSearch ns = Neighbours::NeighbourSearch();
while(1)
{
printf("Enter the x, y and z coordinates of the point and the search distance:\t");
scanf("%lf %lf %lf %lf", x, y, z, r);
if((*r) <= 0)
break;
else
{
ns.FindNeighbours(xx, yy, zz, bb, N, x, y, z, r);
fh.WriteToFile(xx, yy, zz, bb, N);
}
}
cudaFree(xx);
cudaFree(yy);
cudaFree(zz);
cudaFree(bb);
cudaFree(N);
cudaFree(x);
cudaFree(y);
cudaFree(z);
cudaFree(r);
printf("Program terminated.\n");
return 0;
}
|
11,427 | #include "includes.h"
__global__ void MatrixMultiply(const float* A_elements, const float* B_elements, float* C_elements, const int X, const int Y, const int Z)
{
int baseMatrixRow = blockIdx.y * blockDim.y + threadIdx.y;
int baseMatrixCol = blockIdx.x * blockDim.x + threadIdx.x;
int strideX = blockDim.x * gridDim.x;
int strideY = blockDim.y * gridDim.y;
__shared__ float As[TILE_SIZE][TILE_SIZE];
__shared__ float Bs[TILE_SIZE][TILE_SIZE];
for (int iterY = 0; iterY < (Y + strideY - 1) / strideY; iterY++)
{
for (int iterX = 0; iterX < (X + strideX - 1)/ strideX; iterX++)
{
int matrixRow = baseMatrixRow + strideY * (iterY);
int matrixCol = baseMatrixCol + strideX * (iterX);
int blockRow = threadIdx.y;
int blockCol = threadIdx.x;
float Cvalue = 0;
for (int i = 0; i < ((X + TILE_SIZE - 1) / TILE_SIZE); ++i)
{
if((blockCol + i*TILE_SIZE) < X && matrixRow < Y)
As[blockRow][blockCol] = A_elements[matrixRow * X + blockCol + i*TILE_SIZE];
else
As[blockRow][blockCol] = 0;
if((blockRow + i*TILE_SIZE) < X && matrixCol < Z)
Bs[blockRow][blockCol] = B_elements[(blockRow + i*TILE_SIZE) * Z + matrixCol];
else
Bs[blockRow][blockCol] = 0;
//Synchronize threads
__syncthreads();
for (int j = 0; j < TILE_SIZE; ++j)
{
Cvalue += As[blockRow][j] * Bs[j][blockCol];
}
__syncthreads();
}
if (matrixRow < Y && matrixCol < Z) //Saving Final result into Matrix C
{
C_elements[matrixRow * Z + matrixCol] = Cvalue;
}
}
}
} |
11,428 | // Simple CUDA example by Ingemar Ragnemalm 2009. Simplest possible?
// Assigns every element in an array with its index.
// nvcc simple.cu -L /usr/local/cuda/lib -lcudart -o simple
#include <stdio.h>
const int N = 16;
const int blocksize = 16;
__global__
void simple(float *c)
{
c[threadIdx.x] = sqrt(c[threadIdx.x]);
}
int main()
{
float *c = new float[N];
float *cd;
const int size = N*sizeof(float);
int i;
cudaMalloc( (void**)&cd, size );
/* Fill c with data */
for (i = 0; i < N; i++)
c[i] = i*i;
/* Upload c to cd */
cudaMemcpy(cd, c, size, cudaMemcpyHostToDevice);
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
simple<<<dimGrid, dimBlock>>>(cd);
cudaThreadSynchronize();
cudaMemcpy( c, cd, size, cudaMemcpyDeviceToHost );
cudaFree( cd );
for (int i = 0; i < N; i++)
printf("%f:%f ", c[i], sqrtf(i*i));
printf("\n");
delete[] c;
printf("done\n");
return EXIT_SUCCESS;
}
|
11,429 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
int main()
{
int count = 0;
cudaGetDeviceCount(&count);
if (0 == count) {
fprintf(stderr,"found no GPU device\n");
exit (1);
}
fprintf(stdout,"found %d GPU on host\n",count);
int i = 0;
for (i=0;i<count;i++)
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp,i);
// major version 6: pascal
// major version 5: Maxwell
// major versopm 3: kepler
// major version 2: Fermi
// major version 1: Tesla
fprintf(stdout,"Device :%d has compute capability %d:%d\n",i,deviceProp.major,deviceProp.minor);
}
return 0;
}
|
11,430 |
#include "TestKernel.cuh"
#include<stdio.h>
__global__ void test_mykernel2(int x)
{
printf("x=%d\n", x);
}
/*
// will cause error
__global__ void test_mykernel1(int& x)
{
x++;
printf("x=%d\n", x);
}
*/
template<class T>
__global__ void test_mykernel(const T& func, int x)
{
int y = func(x);
printf("y=%d\n", y);
}
|
11,431 | // this is dull!
__global__ void vector_add(int *a, int *b, int *out, int max) {
int tid = blockIdx.x;
if(tid < max) {
out[tid] = a[tid] + b[tid];
}
}
__global__ void intmap(int *a, int *out, int (*f)(int x), int max){
int tid = blockIdx.x + blockIdx.x * blockDim.x;
if(tid < max) {
out[tid] = f(a[tid]);
}
}
__device__ int increment(int a){
return a+1;
}
|
11,432 | extern "C"
__global__ void multiplication(char* M, char* N, char* P, int Width)
{
int tid, tx, ty;
tx = blockDim.x*blockIdx.x + threadIdx.x;
ty = blockDim.y*blockIdx.y + threadIdx.y;
tid = Width*ty + tx;
char Value = 0;
char MVal = 0;
char NVal = 0;
for (int i = 0; i < Width; i++)
{
MVal = M[ty * Width + i];
NVal = N[i * Width + tx];
Value += MVal * NVal;
}
P[tid] = Value;
} |
11,433 | #include <iostream>
#include <stdio.h>
#include <vector>
#define MAX_THREADS 256
#define SIZE 524288
#define __START__ cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0);
#define __STOP__(_V) cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); _V.push_back(time); cudaEventDestroy(start); cudaEventDestroy(stop);
#define __NEXT__(_V) __STOP__(_V) __START__
__global__ void square_kernel(float *d_vector)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i >= SIZE)
return;
d_vector[i] = d_vector[i]*d_vector[i];
}
void showMean(std::vector<float> v)
{
float sum(0);
for (unsigned int i(0) ; i!=v.size() ; i++)
sum += v[i];
std::cout << 1000.*sum/v.size() << " microseconds" << std::endl;
}
int main(int argc, char **argv)
{
std::cout << "SIZE (Memory Allocation): " << SIZE << std::endl;
cudaEvent_t start, stop;
std::vector<float> cMalloc, cMemcpy1, cKernel, cMemcpy2, cFree;
float time(0);
cudaFree(0); // Force runtime API context establishment
float h_vector[SIZE]; // For input and output
for (unsigned int i(0) ; i!=SIZE ; i++)
h_vector[i] = i;
for (unsigned int i(0) ; i!=1000 ; i++)
{
float *d_vector;
__START__
cudaMalloc(&d_vector, SIZE*sizeof(float));
__NEXT__(cMalloc);
cudaMemcpy(d_vector, h_vector, SIZE*sizeof(float), cudaMemcpyHostToDevice);
__NEXT__(cMemcpy1);
square_kernel<<<(SIZE+MAX_THREADS-1)/MAX_THREADS, MAX_THREADS>>>(d_vector);
cudaThreadSynchronize(); // Block until the device is finished
__NEXT__(cKernel);
cudaMemcpy(h_vector, d_vector, SIZE*sizeof(float), cudaMemcpyDeviceToHost);
__NEXT__(cMemcpy2);
cudaFree(d_vector);
__STOP__(cFree);
}
showMean(cMalloc);
showMean(cMemcpy1);
showMean(cKernel);
showMean(cMemcpy2);
showMean(cFree);
}
|
11,434 | #include <thrust/device_vector.h>
#include <thrust/fill.h>
__global__
void kgmul(float *a, float *b, float *c) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
c[i] = a[i] * b[i];
}
__global__
void kgmule(float *a, float b, float *c) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
c[i] = a[i] * b;
}
__global__
void kgdiv(float *a, float *b, float *c) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
c[i] = a[i] / b[i];
}
__global__
void ksub(float *a, float *b, float *c) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
c[i] = a[i] - b[i];
}
__global__
void kfill(float *a, float b) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
a[i] = b;
}
__global__
void kmask(float *a, float *c) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (a[i] <= 0) {
c[i] = 0;
} else {
c[i] = 1;
}
}
__global__
void kaxpye(float *a, float b, float c, float *d) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
d[i] = a[i] *b + c;
}
__global__
void kexp(float *a, float b, float c, float *d) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
d[i] = expf(a[i] * b) + c;
}
__global__
void kexpT(float *a, float b, float c, float *d) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
d[i] = 1/ (expf(a[i] * b) + c);
}
__global__
void klog(float *a, float b, float *c) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
c[i] = logf(a[i] + b);
}
__global__
void ksqrtT(float *a, float b, float d, float *c) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
c[i] = 1 / (sqrtf(a[i] + b) + d);
}
__global__
void kdeviceMemset(float *a, float *c) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
c[i] = a[0];
}
extern "C" {
void gmul(int blocks, int threads, float *a, float *b, float *c) {
kgmul<<<blocks, threads>>>(a, b, c);
}
void gmule(int blocks, int threads, float *a, float b, float *c) {
kgmule<<<blocks, threads>>>(a, b, c);
}
void gdiv(int blocks, int threads, float *a, float *b, float *c) {
kgdiv<<<blocks, threads>>>(a, b, c);
}
void gsub(int blocks, int threads, float *a, float *b, float *c) {
ksub<<<blocks, threads>>>(a, b, c);
}
void gmask(int blocks, int threads, float *a, float *c) {
kmask<<<blocks, threads>>>(a, c);
}
void gaxpye(int blocks, int threads, float *a, float b, float c, float *d) {
kaxpye<<<blocks, threads>>>(a, b, c, d);
}
void gexp(int blocks, int threads, float *a, float b, float c, float *d) {
kexp<<<blocks, threads>>>(a, b, c, d);
}
void gexpT(int blocks, int threads, float *a, float b, float c, float *d) {
kexpT<<<blocks, threads>>>(a, b, c, d);
}
void gfill(int blocks, int threads, float *a, float b) {
kfill<<<blocks, threads>>>(a, b);
}
void glog(int blocks, int threads, float *a, float b, float *c) {
klog<<<blocks, threads>>>(a, b, c);
}
void gsqrtT(int blocks, int threads, float *a, float b, float d, float *c) {
ksqrtT<<<blocks, threads>>>(a, b, d, c);
}
void gdeviceMemset(int blocks, int threads, float *a, float *c) {
kdeviceMemset<<<blocks, threads>>>(a, c);
}
//float* gdev_memset(size_t N, float value) {
// thrust::device_ptr<float> dev_ptr = thrust::device_malloc<float>(N);
// float* raw_ptr = thrust::raw_pointer_cast(dev_ptr);
// return raw_ptr;
//}
}
extern "C++"{
}
|
11,435 | #include "includes.h"
__global__ void Convolution_2D_globalMemory(unsigned char* imgInput, unsigned char* imgOutput, const float* mask, int height, int width, int channels) {
int Row, Col, filterRow, filterCol;
int rows = threadIdx.x + blockIdx.x * blockDim.x;
int cols = threadIdx.y + blockIdx.y * blockDim.y;
float sum = 0;
Row = rows - MASK_WIDTH / 2;
Col = cols - MASK_WIDTH / 2;
for (int c = 0; c < channels; c++)
{
sum = 0;
for (int i = 0; i < MASK_WIDTH; i++)
{
for (int j = 0; j < MASK_WIDTH; j++)
{
filterRow = Row + i;
filterCol = Col + j;
if ((filterRow >= 0) && (filterRow < height) && (filterCol >= 0) && (filterCol < width))
{
sum += imgInput[(filterRow * height + filterCol) * channels + c] * mask[i * MASK_WIDTH + j];
}
else { sum = 0; }
}
}
imgOutput[(rows * width + cols) * channels + c] = (unsigned char)sum;
}
} |
11,436 | #include "cstdio"
#include <iostream>
#include <chrono>
#include "cuda.h"
#include <cuda_runtime_api.h>
constexpr size_t SIZE = 16384 * 3; // 16384 * 3
constexpr size_t BLOCK_COUNT = 1024 + 512; //for shared alg 16384 * 3 for simple
constexpr size_t THREAD_PER_BLOCK = SIZE / BLOCK_COUNT;
constexpr size_t BLOCK_SIZE = SIZE / BLOCK_COUNT;
constexpr size_t SHARED_MEMORY = 16384 * 3; // opt 16384 * 3 for shared alg
constexpr size_t SHARED_MEMORY_COUNT = SHARED_MEMORY / sizeof(float);
/*
* GPU Elapsed time 409.04
* CPU Elapsed time 6828
*/
template<typename T>
__global__ void sumMatrixRowShared(const float* matrix, T* result)
{
__shared__ float ss[SHARED_MEMORY / sizeof(T)];
unsigned int idx = threadIdx.x;
unsigned int block_idx = blockIdx.x;
idx = idx + (SIZE/BLOCK_COUNT) * block_idx;
result[idx] = 0;
constexpr int SHARED_SIZE = SHARED_MEMORY / sizeof(T) / THREAD_PER_BLOCK;
for (size_t batch_num=0; batch_num < SIZE / SHARED_SIZE; batch_num++)
{
for (size_t i=0; i < SHARED_SIZE; i++)
{
ss[SHARED_SIZE * threadIdx.x + i] = matrix[idx * SIZE + batch_num * SHARED_SIZE + i];
}
for(size_t i=0; i < SHARED_SIZE; i++)
{
result[idx] += ss[SHARED_SIZE * threadIdx.x + i];
}
}
}
template <typename T>
void sumMatrixRowCPU(const float* matrix, T* result)
{
for(int idx = 0; idx < SIZE; idx++)
{
result[idx] = 0;
for(size_t i=0; i < SIZE; i++)
{
result[idx] = result[idx] + matrix[idx * SIZE + i];
}
}
}
__host__ int main()
{
//Выделяем память под вектора
auto* matrix = new float[SIZE * SIZE];
auto* result = new float[SIZE];
//Инициализируем значения векторов
for (int i = 0; i < SIZE * SIZE; i++)
{
matrix[i] = int(i/SIZE);
result[i%SIZE] = 0;
}
float* gpu_matrix;
float* gpu_result;
//Выделяем память для векторов на видеокарте
cudaMalloc((void**)&gpu_matrix, sizeof(float) * SIZE * SIZE);
cudaMalloc((void**)&gpu_result, sizeof(float) * SIZE);
cudaMemcpy(gpu_matrix, matrix, sizeof(float) * SIZE * SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_result, result, sizeof(float) * SIZE, cudaMemcpyHostToDevice);
dim3 gridSize = dim3(BLOCK_COUNT, 1, 1); //Размер используемой сетки
dim3 blockSize = dim3(THREAD_PER_BLOCK, 1, 1); //Размер используемого блока
//Выполняем вызов функции ядра
cudaEvent_t kernel_start;
cudaEventCreate(&kernel_start);
cudaEventRecord(kernel_start, 0);
sumMatrixRowShared<<<gridSize, blockSize>>>(gpu_matrix, gpu_result);
cudaEvent_t syncEvent; //Дескриптор события
cudaEventCreate(&syncEvent); //Создаем event
cudaEventRecord(syncEvent, 0); //Записываем event
cudaEventSynchronize(syncEvent); //Синхронизируем event
float time;
cudaEventElapsedTime(&time, kernel_start, syncEvent);
cudaMemcpy(result, gpu_result, sizeof(float) * SIZE, cudaMemcpyDeviceToHost);
//Результаты расчета
for (int i = 0; i < 10; i++)
{
printf("Element #%i: %.1f\n", i , result[i]);
}
std::cout << "GPU Elapsed time " << time << std::endl;
auto t1 = std::chrono::high_resolution_clock::now();
sumMatrixRowCPU(matrix, result);
auto t2 = std::chrono::high_resolution_clock::now();
std::cout << "CPU Elapsed time " << std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count() <<std::endl;
for (int i = 0; i < 10; i++)
{
printf("Element #%i: %.1f\n", i , result[i]);
}
// Освобождаем ресурсы
cudaEventDestroy(syncEvent);
cudaFree(gpu_matrix);
cudaFree(gpu_result);
delete[] result;
delete[] matrix;
}
|
11,437 | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
__global__ void vecAdd(double *a, double *b, double *c, int n){
int id = blockIdx.x*blockDim.x + threadIdx.x;
if(id<n){
c[id] = a[id] + b[id];
}
}
int main(int argc, char** argv){
int i, n = 100;
double *ha, *hb, *hc, *a, *b, *c;
int bytes = sizeof(double)*n;
ha = (double*)malloc(bytes);
hb = (double*)malloc(bytes);
hc = (double*)malloc(bytes);
cudaMalloc(&a, bytes);
cudaMalloc(&b, bytes);
cudaMalloc(&c, bytes);
for(i=0; i<n; i++){
ha[i] = hb[i] = rand()%50;
}
cudaMemcpy(a, ha, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(b, hb, bytes, cudaMemcpyHostToDevice);
int blocksize, gridsize;
blocksize = 1024;
gridsize = (int)ceil((float)n/blocksize);
vecAdd<<<gridsize, blocksize>>>(a, b, c, n);
cudaMemcpy(hc, c, bytes, cudaMemcpyDeviceToHost);
for(i=0; i<n; i++){
printf("%f + %f = %f\n", ha[i], hb[i], hc[i]);
}
printf("\nDONE\n");
return 0;
} |
11,438 | #include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#define N 10
__global__ void square_array(int* a) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) a[idx] = a[idx] * a[idx];
}
int main(void) {
int host[N];
int* device;
int i;
dim3 grid, block;
size_t nbytes = N * sizeof(int);
grid.x = 1;
grid.y = 1;
grid.z = 1;
block.x = 1;
block.y = 1;
block.z = 1;
int nthreads = 4;
int nblocks = N/nthreads + !!(N % nthreads);
grid.x = nblocks;
block.x = nthreads;
cudaMalloc(&device, nbytes);
for (i = 0; i != N; ++i)
host[i] = (int)i;
cudaMemcpy(device, host, nbytes, cudaMemcpyHostToDevice);
square_array<<<grid, block>>>(device);
cudaMemcpy(host, device, nbytes, cudaMemcpyDeviceToHost);
cudaFree(device);
for (i = 0; i != N; ++i) {
printf("%d: %d\n", i, host[i]);
}
}
|
11,439 | #include <stdio.h>
#include <cuda_runtime.h>
__global__ void vector_add_kernel(const float* a, const float* b, float* c, int ndata){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx >= ndata) return;
/* dims indexs
gridDim.z blockIdx.z
gridDim.y blockIdx.y
gridDim.x blockIdx.x
blockDim.z threadIdx.z
blockDim.y threadIdx.y
blockDim.x threadIdx.x
Pseudo code:
position = 0
for i in 6:
position *= dims[i]
position += indexs[i]
*/
c[idx] = a[idx] + b[idx];
}
void vector_add(const float* a, const float* b, float* c, int ndata){
const int nthreads = 512;
int block_size = ndata < nthreads ? ndata : nthreads; // 如果ndata < nthreads 那block_size = ndata就够了
int grid_size = (ndata + block_size - 1) / block_size; // 其含义是我需要多少个blocks可以处理完所有的任务
printf("block_size = %d, grid_size = %d\n", block_size, grid_size);
vector_add_kernel<<<grid_size, block_size, 0, nullptr>>>(a, b, c, ndata);
// 在核函数执行结束后,通过cudaPeekAtLastError获取得到的代码,来知道是否出现错误
// cudaPeekAtLastError和cudaGetLastError都可以获取得到错误代码
// cudaGetLastError是获取错误代码并清除掉,也就是再一次执行cudaGetLastError获取的会是success
// 而cudaPeekAtLastError是获取当前错误,但是再一次执行cudaPeekAtLastError或者cudaGetLastErro拿到的还是那个错
cudaError_t code = cudaPeekAtLastError();
if(code != cudaSuccess){
const char* err_name = cudaGetErrorName(code);
const char* err_message = cudaGetErrorString(code);
printf("kernel error %s:%d test_print_kernel failed. \n code = %s, message = %s\n", __FILE__, __LINE__, err_name, err_message);
}
} |
11,440 | //
// Created by gautam on 17/04/20.
//
#include <fstream>
#include "utils.cuh"
std::string const utils::DATABASE_DIR = "../DB";
std::string const utils::DATABASE_FILE_PATH = utils::DATABASE_DIR + "/Database";
std::vector<std::string> utils::tables = std::vector<std::string>();
void utils::ltrim(std::string &s) {
s.erase(s.begin(), std::find_if(s.begin(), s.end(), [](int ch) {
return !std::isspace(ch);
}));
}
void utils::rtrim(std::string &s) {
s.erase(std::find_if(s.rbegin(), s.rend(), [](int ch) {
return !std::isspace(ch);
}).base(), s.end());
}
void utils::trim(std::string &s) {
ltrim(s);
rtrim(s);
}
std::string utils::getFistWord(std::string &query) {
return query.substr(0, query.find(' '));
}
void utils::toLower(std::string &upper) {
std::transform(upper.begin(), upper.end(), upper.begin(),
[](unsigned char c) { return std::tolower(c); });
}
void utils::invalidQuery(std::string query) {
std::cout << "\"" << query << "\"" << "is not a valid query" << std::endl;
}
void utils::invalidQuery(std::string &query, std::string &errString) {
std::cout << "\"" << query << "\"" << "is not a valid query" << std::endl;
std::cout << "Error: " << errString << std::endl;
}
std::string utils::getMetadataFileName(std::string &tableName) {
return DATABASE_DIR + "/" + tableName + ".mdata";
}
std::string utils::getDataFileName(std::string &tableName) {
return DATABASE_DIR + "/" + tableName + ".data";
}
std::string utils::getTempFileName(std::string &tableName) {
return DATABASE_DIR + "/" + tableName + ".temp";
}
bool utils::fileExists(std::string &filename) {
if (FILE *file = fopen(filename.c_str(), "r")) {
fclose(file);
return true;
} else {
return false;
}
}
void utils::loadTables() {
std::string filename = DATABASE_FILE_PATH;
std::ifstream fin(filename);
if (utils::fileExists(filename)) {
std::string tableName;
while (fin >> tableName) {
tables.push_back(tableName);
}
}
}
bool utils::tableExists(std::string &tableName) {
if (tables.empty()) {
loadTables();
if(tables.empty()) {
return false;
}
}
int i;
for (i = 0; i < tables.size(); ++i) {
if(tables[i] == tableName) {
break;
}
}
return i != tables.size();
}
void utils::addTable(std::string &tableName) {
tables.push_back(tableName);
}
void utils::dropTable(std::string &tableName){
int i;
for (i = 0; i < tables.size(); ++i) {
if(tables[i] == tableName) {
break;
}
}
tables.erase(tables.begin() + i);
}
void utils::writeDatabase() {
std::string filename = DATABASE_FILE_PATH;
std::ofstream fout(filename);
for (const auto& tableName : tables) {
fout << tableName << std::endl;
}
fout.close();
}
void utils::printRow(void *row, std::vector<ColType> &cols) {
int start = 0;
for (const auto &c : cols) {
// printf("Start: %d\n", start);
switch (c.type) {
case TYPE_INT: {
int temp = *((int *) ((char *) row + start));
printf("%d", temp);
start += sizeof(int);
break;
}
case TYPE_FLOAT: {
float temp = *((float *) ((char *) row + start));
printf("%f", temp);
start += sizeof(float);
break;
}
case TYPE_BOOL:
break;
case TYPE_VARCHAR: {
char *temp = (char *) row + start;
printf("%s", temp);
start += c.size;
break;
}
case TYPE_DATETIME:
break;
case TYPE_INVALID:
break;
}
if (&c != &cols[cols.size() - 1]) {
printf(", ");
}
}
printf("\n");
}
void utils::printMultiple(void *data, std::vector<ColType> &cols, int rowSize, int numRows) {
int start = 0;
printf("\n");
for (int i = 0; i < numRows; i++, start += rowSize) {
printRow((char *)data + start, cols);
}
}
|
11,441 | #include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cstdio>
constexpr size_t BLOCKSIZE_x = 16;
constexpr size_t BLOCKSIZE_y = 16;
constexpr size_t N = 32;
constexpr size_t M = 16;
constexpr size_t W = 4;
//////////////////
// cuda_err_chk //
//////////////////
#define cuda_err_chk(ans) \
{ \
gpu_assert((ans), __FILE__, __LINE__); \
}
inline void gpu_assert(cudaError_t code, char const* file, int line)
{
if (code != cudaSuccess)
{
fprintf(stderr, "assert failed: %s %s %dn", cudaGetErrorString(code),
file, line);
abort();
}
}
//////////////
// i_div_up //
//////////////
int i_div_up(int a, int b)
{
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
////////////////////
// test_kernel_3d //
////////////////////
__global__ void test_kernel_3d(cudaPitchedPtr dev_pitched_ptr)
{
int tid_x = blockIdx.x * blockDim.x + threadIdx.x;
int tid_y = blockIdx.y * blockDim.y + threadIdx.y;
char* d_ptr = (char*) dev_pitched_ptr.ptr;
size_t pitch = dev_pitched_ptr.pitch;
size_t slice_pitch = pitch * N;
for (int w = 0; w < W; ++w)
{
char* slice = d_ptr + w * slice_pitch;
double* row = (double*) (slice + tid_y * pitch);
row[tid_x] = row[tid_x] * row[tid_x];
}
}
//////////
// main //
//////////
int main()
{
double a[N][M][W];
for (int i = 0; i < N; ++i)
{
for (int j = 0; j < M; ++j)
{
for (int w = 0; w < W; ++w)
{
a[i][j][w] = 3.f;
//printf("row %i column %i depth %i value %f n", i, j, w, a[i][j][w]);
}
}
}
cudaExtent extent = make_cudaExtent(M * sizeof(double), N, W);
cudaPitchedPtr d_pitched_ptr;
cuda_err_chk(cudaMalloc3D(&d_pitched_ptr, extent));
cudaMemcpy3DParms p = {0};
p.srcPtr.ptr = a;
p.srcPtr.pitch = M * sizeof(double);
p.srcPtr.xsize = M;
p.srcPtr.ysize = N;
p.dstPtr.ptr = d_pitched_ptr.ptr;
p.dstPtr.pitch = d_pitched_ptr.pitch;
p.dstPtr.xsize = M;
p.dstPtr.ysize = N;
p.extent.width = M * sizeof(double);
p.extent.height = N;
p.extent.depth = W;
p.kind = cudaMemcpyHostToDevice;
cuda_err_chk(cudaMemcpy3D(&p));
dim3 grid_size(i_div_up(M, BLOCKSIZE_x), i_div_up(N, BLOCKSIZE_y));
dim3 block_size(BLOCKSIZE_y, BLOCKSIZE_x);
test_kernel_3d<<<grid_size, block_size>>>(d_pitched_ptr);
cuda_err_chk(cudaPeekAtLastError());
cuda_err_chk(cudaDeviceSynchronize());
p.srcPtr.ptr = d_pitched_ptr.ptr;
p.srcPtr.pitch = d_pitched_ptr.pitch;
p.dstPtr.ptr = a;
p.dstPtr.pitch = M * sizeof(double);
p.kind = cudaMemcpyDeviceToHost;
cuda_err_chk(cudaMemcpy3D(&p));
for (int i = 0; i < N; ++i)
{
for (int j = 0; j < M; ++j)
{
for (int w = 0; w < W; ++w)
{
printf("row %3i column %3i depth %3i value %f\n", i, j, w,
a[i][j][w]);
}
}
}
return 0;
}
|
11,442 | #include <cassert>
#include <iostream>
#include <cuda_runtime.h>
class CudaTimer {
private:
cudaEvent_t _start;
cudaEvent_t _stop;
public:
CudaTimer() noexcept {
cudaEventCreate(&_start);
cudaEventCreate(&_stop);
}
~CudaTimer() noexcept {
cudaEventDestroy(_start);
cudaEventDestroy(_stop);
}
void start() noexcept {
cudaEventRecord(_start, 0);
}
void stop() noexcept {
cudaEventRecord(_stop, 0);
cudaEventSynchronize(_stop);
}
float count() noexcept {
float time = 0.0;
cudaEventElapsedTime(&time, _start, _stop);
return time;
}
};
__global__ void count_row_nnz(int m, int n, double *A, int lda, int *rownnz) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < m) {
int count = 0;
double *value = A + tid;
for (int i = 0; i < n; ++i) {
count += (*value) ?1 :0;
value += lda;
}
rownnz[tid] = count;
}
}
__global__ void block_scan(int n, int *array, int *buffer) {
extern __shared__ int shm[]; // size of shm: 2 * blockDim.x * sizeof(int)
// Initialize shm
int tid = threadIdx.x + (blockIdx.x << 1) * blockDim.x;
if (tid < n) {
shm[threadIdx.x] = array[tid];
} else {
shm[threadIdx.x] = 0;
}
if (tid + blockDim.x < n) {
shm[threadIdx.x + blockDim.x] = array[tid + blockDim.x];
} else {
shm[threadIdx.x + blockDim.x] = 0;
}
__syncthreads();
int num_elements = blockDim.x << 1;
// Start up-sweep phase
int idx = threadIdx.x << 1, shift = 1;
while (shift < num_elements) {
if (idx + shift < num_elements) {
shm[idx + shift] += shm[idx];
}
idx += idx + 1;
shift <<= 1;
}
__syncthreads();
// Start down-sweep phase
int sum = shm[num_elements - 1];
if (threadIdx.x == 0) {
shm[num_elements - 1] = 0;
}
idx = (idx - 1) >> 1;
shift >>= 1;
while (shift > 0) {
if (idx + shift < num_elements) {
int tmp = shm[idx + shift];
shm[idx + shift] += shm[idx];
shm[idx] = tmp;
}
idx = (idx - 1) >> 1;
shift >>= 1;
__syncthreads();
}
__syncthreads();
if (tid < n) {
array[tid] = shm[threadIdx.x];
}
if (tid + blockDim.x < n) {
array[tid + blockDim.x] = shm[threadIdx.x + blockDim.x];
}
if (threadIdx.x == 0) {
buffer[blockIdx.x] = sum;
}
}
__global__ void block_add(int n, int *array, int *buf) {
int tid = blockIdx.x * (blockDim.x << 1) + threadIdx.x;
int val = buf[blockIdx.x];
if (tid < n) {
array[tid] += val;
}
if (tid + blockDim.x < n) {
array[tid + blockDim.x] += val;
}
}
void exclusive_scan(int n, int *array) {
int bs = 1024;
int gs = (n - 1) / (bs << 1) + 1;
int *buffer;
cudaError_t cudaErr = cudaMalloc(reinterpret_cast<void **>(&buffer), gs * sizeof(int));
assert(cudaErr == cudaSuccess);
#ifndef NDEBUG
CudaTimer scanTimer;
scanTimer.start();
#endif
block_scan<<<gs , bs, 2 * bs * sizeof(int)>>>(n, array, buffer);
assert(cudaGetLastError() == cudaSuccess);
#ifndef NDEBUG
scanTimer.stop();
std::cout << scanTimer.count() << ",";
#endif
if (gs > 1) {
#ifndef NDEBUG
CudaTimer addTimer;
addTimer.start();
#endif
block_add<<<gs - 1, bs>>>(n - (bs << 1), array + (bs << 1), buffer);
assert(cudaGetLastError() == cudaSuccess);
#ifndef NDEBUG
addTimer.stop();
std::cout << addTimer.count() << ",";
#endif
}
cudaFree(buffer);
}
__global__ void fill_csr_values(
int m, int n,
double *A, int lda,
int *rowptr, int *colidx, double *values) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < m) {
int idx = rowptr[tid];
double *ptr = A + tid;
for (int i = 0; i < n; ++i) {
if (*ptr) {
colidx[idx] = i;
values[idx] = *ptr;
++idx;
}
ptr += lda;
}
}
}
void full_to_csr(
int m, int n,
double *A, int lda,
int **rowptr, int **colidx, double **values) {
cudaError_t cudaErr;
cudaErr = cudaMalloc(rowptr, (m + 1) * sizeof(int));
assert(cudaErr == cudaSuccess);
#ifndef NDEBUG
CudaTimer cntNnzTimer;
cntNnzTimer.start();
#endif
// Launch kernel to get number of nnz
int bs = 1024, gs = (m - 1) / bs + 1;
count_row_nnz<<<gs, bs>>>(m, n, A, lda, *rowptr);
assert(cudaGetLastError() == cudaSuccess);
#ifndef NDEBUG
cntNnzTimer.stop();
std::cout << cntNnzTimer.count() << ",";
#endif
exclusive_scan(m + 1, *rowptr);
int nnz = 0;
cudaErr = cudaMemcpy(&nnz, *rowptr + m, sizeof(int), cudaMemcpyDeviceToHost);
assert(cudaErr == cudaSuccess);
cudaErr = cudaMalloc(colidx, nnz * sizeof(int));
assert(cudaErr == cudaSuccess);
cudaErr = cudaMalloc(values, nnz * sizeof(double));
assert(cudaErr == cudaSuccess);
#ifndef NDEBUG
CudaTimer fillValTimer;
fillValTimer.start();
#endif
fill_csr_values<<<gs, bs>>>(m, n, A, lda, *rowptr, *colidx, *values);
assert(cudaGetLastError() == cudaSuccess);
#ifndef NDEBUG
fillValTimer.stop();
std::cout << fillValTimer.count() << std::endl;
#endif
}
|
11,443 | #include <cuda.h>
#include <iostream>
#include <vector>
#include <ctime>
#include <stdio.h>
#include <thrust/host_vector.h>
#include <thrust/extrema.h>
#include <thrust/device_ptr.h>
#include <thrust/device_malloc.h>
#include <thrust/device_free.h>
#include <thrust/device_vector.h>
#define TPB 1024
#define INF 99999999
using namespace std;
struct Node {
unsigned int start;
unsigned int adj;
};
__global__ void intialize(unsigned int *c_dev,
bool *u_dev,
bool *f_dev,
unsigned int N) {
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N && tid > 0) {
c_dev[tid] = INF;
f_dev[tid] = false;
u_dev[tid] = true;
}
if (tid == 0) {
c_dev[tid] = 0;
f_dev[tid] = true;
u_dev[tid] = false;
}
}
__global__ void relax_f(unsigned int *c_dev,
bool *u_dev,
bool *f_dev,
unsigned int *e_dev,
unsigned int *w_dev,
Node *v_dev,
unsigned int N) {
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
if (f_dev[tid]) {
for (int i = v_dev[tid].start;
i < v_dev[tid].start + v_dev[tid].adj;
i++) {
unsigned int succ = e_dev[i];
if (u_dev[succ]) {
atomicMin(&c_dev[succ], c_dev[tid] + w_dev[i]);
}
}
}
}
}
__global__ void update(unsigned int * c_dev,
bool *f_dev, bool *u_dev,
unsigned int *mssp, unsigned int N) {
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
f_dev[tid] = false;
if (c_dev[tid] == mssp[0]) {
u_dev[tid] = false;
f_dev[tid] = true;
}
}
}
__global__ void minimum(unsigned int *c_dev,
bool *u_dev,
unsigned int *mssp,
unsigned int N) {
__shared__ unsigned int sdata[TPB];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * 2 * blockDim.x + threadIdx.x;
unsigned int j = i + blockDim.x;
sdata[tid] = INF;
if ((j < N) && (i < N)) {
sdata[tid] = INF;
unsigned int A = u_dev[i] ? c_dev[i] : INF;
unsigned int B = u_dev[j] ? c_dev[j] : INF;
sdata[tid] = min(A, B);
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] = min(sdata[tid], sdata[tid + s]);
}
__syncthreads();
}
if (tid == 0) {
mssp[blockIdx.x] = sdata[0];
}
}
}
void DA2CF(unsigned int *c_dev,
bool *u_dev, bool *f_dev,
unsigned int *e_dev,
unsigned int *w_dev,
Node *v_dev,
unsigned int N, vector<unsigned int> &P) {
unsigned int extrablock = N % TPB > 0 ? 1 : 0;
unsigned int blocks = (extrablock + N / TPB) / 2;
unsigned int *mssp_dev;
unsigned int *dev_min_list;
cudaMalloc( (void**)&mssp_dev, sizeof(unsigned int) );
cudaMalloc( (void**)&dev_min_list, blocks * sizeof(unsigned int) );
intialize<<<N / TPB + extrablock, TPB>>>(c_dev, u_dev, f_dev, N);
cudaDeviceSynchronize();
unsigned int mssp = 0;
while (mssp != INF) {
mssp = INF;
relax_f<<< N / TPB + extrablock, TPB >>>(c_dev, u_dev, f_dev, e_dev, w_dev, v_dev, N);
cudaDeviceSynchronize();
minimum<<< blocks, TPB >>>(c_dev, u_dev, dev_min_list, N);
cudaDeviceSynchronize();
thrust::device_ptr<unsigned int> dev_ptr = thrust::device_pointer_cast(dev_min_list);
thrust::device_ptr<unsigned int> min_ptr =
thrust::min_element(dev_ptr, dev_ptr + blocks);
unsigned int min_value[1];
min_value[0] = min_ptr[0];
mssp = min_value[0];
cudaMemcpy( mssp_dev, &min_value, sizeof(unsigned int), cudaMemcpyHostToDevice);
update<<< N / TPB + extrablock, TPB >>>(c_dev, f_dev, u_dev, mssp_dev, N);
cudaDeviceSynchronize();
}
cudaFree(&dev_min_list);
cudaFree(&mssp_dev);
}
int main() {
unsigned int N, degree, M;
cin >> N;
cin >> degree;
cin >> M;
vector<unsigned int> c_host(N);
vector<Node> v_host(N);
vector<unsigned int> e_host(M);
vector<unsigned int> w_host(M);
vector<unsigned int> P;
unsigned int *c_dev, *w_dev, *e_dev;
bool *f_dev, *u_dev;
Node *v_dev;
for (unsigned int i = 0; i < M; i++) {
unsigned int ia, ib, w;
cin >> ia >> ib >> w;
e_host[i] = ib;
w_host[i] = w;
}
for (unsigned int i = 0; i < N - degree; i++) {
v_host[i].start = i * degree;
v_host[i].adj = degree;
}
unsigned int dec = degree - 1;
for (unsigned int z = N - degree; z < N; z++) {
v_host[z].start = v_host[z - 1].start + dec + 1;
v_host[z].adj = dec;
dec--;
}
// allocate frontiers, unresolved and cost vectors on the GPU
cudaMalloc( (void**)&c_dev, N * sizeof(unsigned int) );
cudaMalloc( (void**)&f_dev, N * sizeof(bool) );
cudaMalloc( (void**)&u_dev, N * sizeof(bool) );
cudaMalloc( (void**)&v_dev, N * sizeof(Node) );
cudaMalloc( (void**)&e_dev, M * sizeof(unsigned int) );
cudaMalloc( (void**)&w_dev, M * sizeof(unsigned int) );
// copy data to GPU memory
cudaMemcpy( v_dev, v_host.data(), N * sizeof(Node), cudaMemcpyHostToDevice);
cudaMemcpy( e_dev, e_host.data(), M * sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy( w_dev, w_host.data(), M * sizeof(unsigned int), cudaMemcpyHostToDevice);
// execute dijkstra compound frontiers
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventRecord(start, 0);
DA2CF(c_dev, u_dev, f_dev, e_dev, w_dev, v_dev, N, P);
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cout << elapsedTime / 1000.0f << " " << N << endl;
// free allocated memory on the GPU
cudaFree(c_dev);
cudaFree(f_dev);
cudaFree(u_dev);
cudaFree(v_dev);
cudaFree(w_dev);
cudaFree(e_dev);
return 0;
}
|
11,444 | //
// Created by kindr on 2021/5/16.
//
#include "floatPrecision.cuh"
#include "stdio.h"
#include "cuda_fp16.h"
const int n = 1 << 25;
__global__
void halfAdd() {
half x = 0;
for (int i = 0; i < n; i++) {
x += 0.125;
}
printf("%f\n", __half2float(x));
}
__global__
void floatAdd() {
float x = 0;
for (int i = 0; i < n; i++) {
x += 0.125;
}
printf("%f\n", x);
}
__global__
void doubleAdd() {
double x = 0;
for (int i = 0; i < n; i++) {
x += 0.125;
}
printf("%f\n", x);
}
void floatPrecision() {
halfAdd<<<1, 1>>>();
floatAdd<<<1, 1>>>();
doubleAdd<<<1, 1>>>();
cudaDeviceSynchronize();
}
|
11,445 | #include <curand.h>
#include <curand_kernel.h>
#define DIM 1600
#define PI 3.14159265
__global__ void Plot_kernel(uchar4 *ptr, unsigned char *R_input, unsigned char *G_input,unsigned char *B_input, size_t i_size)
{
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int offset = x + y * blockDim.x * gridDim.x;
unsigned char* f_r, *f_g, *f_b;
f_r = (unsigned char*)((char*)R_input + y*i_size);
f_g = (unsigned char*)((char*)G_input + y*i_size);
f_b = (unsigned char*)((char*)B_input + y*i_size);
ptr[offset].x = f_r[x];
ptr[offset].y = f_g[x];
ptr[offset].z = f_b[x];
ptr[offset].w = 255;
}
__device__ int log2(int N)
{
int k = N, i = 0;
while(k)
{
k >>= 1;
i++;
}
return i - 1;
}
__device__ int reverse(int N, int n)
{
int p = 0;
for(int j = 1; j <= log2(N); j++)
{
if(n & (1 << (log2(N) - j)))
p |= 1 << (j - 1);
}
return p;
}
__device__ void ordina_x(float *complex_r, float *complex_i,float *real_d_out, float *imagi_d_out,int row, int col, int x)
{
int N = row, a;
for(int i = 0; i < N; i++)
{
a = reverse((int)N, i);
real_d_out[i*col + x] = complex_r[a*col + x];
imagi_d_out[i*col + x] = complex_i[a*col + x];}
for(int j = 0; j < N; j++)
{
complex_r[j*col + x] = real_d_out[j*col + x];
complex_i[j*col + x] = imagi_d_out[j*col + x];}
}
__device__ void ordina_y(float *complex_r, float *complex_i,float *real_d_out, float *imagi_d_out,int row, int col, int y)
{
int N = row, a;
for(int i = 0; i < N; i++)
{
a = reverse((int)N, i);
real_d_out[y*col + i] = complex_r[y*col + a];
imagi_d_out[y*col + i] = complex_i[y*col + a];
}
for(int j = 0; j < N; j++)
{
complex_r[y*col + j] = real_d_out[y*col + j];
complex_i[y*col + j] = imagi_d_out[y*col + j];
}
}
__device__ void Func_FFT_X(float *complex_r, float *complex_i,int row, int col, int x)
{
int n = 1, N = row;
int a = N/2;
float temp_real, temp_imagi;
float t_r, t_i, a_r, a_i;
for(int j = 0; j < log2(N); j++)
{
for (int i = 0; i < N; i++)
{
if(!(i & n))
{
temp_real = complex_r[x + (i * col)];
temp_imagi = complex_i[x + (i * col)];
a_r = cos((-2) * ((i * a) % (n * a)) * PI / N);
a_i = sin((-2) * ((i * a) % (n * a)) * PI / N);
t_r = (a_r*complex_r[x + (i + n)*col]) - (a_i*complex_i[x + (i + n)*col]);
t_i = (a_i*complex_r[x + (i + n)*col]) + (a_r*complex_i[x + (i + n)*col]);
complex_r[x + (i * col)] += t_r;
complex_i[x + (i * col)] += t_i;
complex_r[x + (i + n)*col] = temp_real - t_r;
complex_i[x + (i + n)*col] = temp_imagi - t_i;
}
}
n *= 2;
a = a/2;
}
}
__device__ void Func_FFT_Y(float *complex_r, float *complex_i,int row, int col, int y)
{
int n = 1, N = col;
int a = N/2;
float temp_real, temp_imagi;
float t_r, t_i, a_r, a_i;
for(int j = 0; j < log2(N); j++)
{
for (int i = 0; i < N; i++)
{
if(!(i & n))
{
temp_real = complex_r[i + (y * col)];
temp_imagi = complex_i[i + (y * col)];
a_r = cos(-2 * ((i * a) % (n * a)) * PI/ N);
a_i = sin(-2 * ((i * a) % (n * a)) * PI/ N);
t_r = (a_r*complex_r[(i + n) + y*col]) - (a_i*complex_i[(i + n) + y*col]);
t_i = (a_i*complex_r[(i + n) + y*col]) + (a_r*complex_i[(i + n) + y*col]);
complex_r[i + (y * col)] += t_r;
complex_i[i + (y * col)] += t_i;
complex_r[(i + n) + y*col] = temp_real - t_r;
complex_i[(i + n) + y*col] = temp_imagi - t_i;
}
}
n *= 2;
a = a/2;
}
}
__global__ void FFT_X(unsigned char *R_input, unsigned char *G_input,unsigned char *B_input, size_t i_size,float *complex_r, float *complex_i,float *real_d_out, float *imagi_d_out,unsigned char *r_dataC, unsigned char *g_dataC,unsigned char *b_dataC, unsigned long col, unsigned long row,unsigned long colF, unsigned long rowF )
{
int x = threadIdx.x + (blockIdx.x * blockDim.x);
float temp;
if(x < col)
{
for (int i = 0; i < row; i++)
{
complex_r[x + (i * colF)] = 0.2989 * R_input[x + (i * i_size)] + 0.587 * G_input[x + (i * i_size)] + 0.1140 * B_input[x + (i * i_size)];
complex_i[x + (i * colF)] = 0;
}
for (int i = row; i < rowF; i++)
{
complex_r[x + (i * colF)] = 0;
complex_i[x + (i * colF)] = 0;
}
}
else
{
for (int i = 0; i < rowF; i++)
{
complex_r[x + (i * colF)] = 0;
complex_i[x + (i * colF)] = 0;
}
}
ordina_x(complex_r, complex_i, real_d_out, imagi_d_out, rowF, colF, x);
Func_FFT_X(complex_r, complex_i, rowF, colF, x);
for (int i = 0; i < rowF/2; i++)
{
temp = complex_r[x + (i * colF)];
complex_r[x + (i * colF)] = complex_r[x + ((i + rowF/2) * colF)];
complex_r[x + ((i + rowF/2) * colF)] = temp;
temp = complex_i[x + (i * colF)];
complex_i[x + (i * colF)] = complex_i[x + ((i + rowF/2) * colF)];
complex_i[x + ((i + rowF/2) * colF)] = temp;
}
}
__global__ void FFT_Y(unsigned char *R_input, unsigned char *G_input,unsigned char *B_input, size_t i_size,float *complex_r, float *complex_i,float *real_d_out, float *imagi_d_out,unsigned char *r_dataC, unsigned char *g_dataC,unsigned char *b_dataC, unsigned long col, unsigned long row,unsigned long colF, unsigned long rowF )
{
int y = threadIdx.x + (blockIdx.x * blockDim.x);
float temp;
ordina_y(complex_r, complex_i, real_d_out, imagi_d_out, rowF, colF, y);
Func_FFT_Y(complex_r, complex_i, rowF, colF, y);
for (int i = 0; i < colF/2; i++)
{
temp = complex_r[i + (y * colF)];
complex_r[i + (y * colF)] = complex_r[(i + colF/2) + (y * colF)];
complex_r[(i + colF/2) + (y * colF)] = temp;
temp = complex_i[i + (y * colF)];
complex_i[i + (y * colF)] = complex_i[(i + colF/2) + (y * colF)];
complex_i[(i + colF/2) + (y * colF)] = temp;
}
unsigned char v;
int a = (colF/2) - (col/2);
int temp_b = (rowF/2) - (row/2);
if( y >= temp_b)
for (int i = a; i < (colF/2) + (col/2); i++)
{
v = (unsigned char)(20*log10(sqrt((complex_r[i + (y * colF)]*complex_r[i + (y * colF)]) + (complex_i[i + (y * colF)]*complex_i[i + (y * colF)]))));
r_dataC[(i - a ) + (y - temp_b) * i_size] = v;
g_dataC[(i - a) + (y - temp_b) * i_size] = v;
b_dataC[(i - a) + (y - temp_b) * i_size] = v;
}
}
__global__ void erosion(unsigned char *R_input, unsigned char *G_input,unsigned char *B_input, size_t i_size,unsigned char *r_dataC, unsigned char *g_dataC,unsigned char *b_dataC, unsigned long col, unsigned long row,float *mask, unsigned int dim, int m)
{
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int offset = x + y * i_size;
int offset2, ximg, yimg;
int c = 0;
unsigned char color;
int end = dim/2, ini = -end, k = 0;
for (int i = ini; i <= end; i++)
{
ximg = x + i;
for (int j = ini; j <= end; j++)
{
yimg = y + j;
offset2 = ximg + yimg * i_size;
if (ximg < col && yimg < row)
if (ximg > 0 && yimg > 0)
c += (R_input[offset2]*mask[k]);
k++;
}
}
if(c < m) color = 0;
else color = 255;
r_dataC[offset] = color;
g_dataC[offset] = color;
b_dataC[offset] = color;
}
__global__ void histogramGray(unsigned char *R_input, unsigned char *G_input,unsigned char *B_input, size_t i_size,unsigned int *hist)
{
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int offset = x + y * i_size;
R_input[offset] = 0.2989 * R_input[offset] + 0.587 * G_input[offset] + 0.1140 * B_input[offset];
G_input[offset] = R_input[offset];
B_input[offset] = R_input[offset];
atomicAdd( &(hist[R_input[offset]]), 1);
}
__global__ void binary(unsigned char *R_input, unsigned char *G_input,unsigned char *B_input, size_t i_size,int um)
{
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int offset = x + y * i_size;
unsigned char c;
if (R_input[offset] > um) c = 255;
else c = 0;
R_input[offset] = c;
G_input[offset] = c;
B_input[offset] = c;
}
__global__ void Copy(unsigned char *R_input, unsigned char *G_input,unsigned char *B_input, size_t i_size,unsigned char *R_output, unsigned char *G_output,unsigned char *B_output)
{
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int offset = x + y * i_size;
R_output[offset] = R_input[offset];
G_output[offset] = G_input[offset];
B_output[offset] = B_input[offset];
}
__global__ void median_filter(unsigned char *R_input, unsigned char *G_input,unsigned char *B_input, size_t i_size,unsigned char *r_dataC, unsigned char *g_dataC,unsigned char *b_dataC, unsigned long col, unsigned long row,unsigned int dim)
{
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int offset = x + y * i_size;
int offset2, ximg, yimg;
unsigned char temp_r = 0, temp_g = 0, temp_b = 0, temp;
int end = dim/2, ini = -end, k = 0, n = 0, i, j;
int hr[9];
int hg[9];
int hb[9];
for (i = ini; i <= end; i++)
{
ximg = x + i;
for (j = ini; j <= end; j++)
{
yimg = y + j;
offset2 = ximg + yimg * i_size;
if (ximg < col && yimg < row)
if (ximg > 0 && yimg > 0)
{
hr[n] = R_input[offset2];
hg[n] = G_input[offset2];
hb[n] = B_input[offset2];
n++;
}
k++;
}
}
for (i = 0; i < n; i++)
for (j= i + 1; j < n; j++)
if (hr[j] < hr[i])
{
temp = hr[j];
hr[j] = hr[i];
hr[i] = temp;
}
for (i = 0; i < n; i++)
for (j= i + 1; j < n; j++)
if (hg[j] < hg[i])
{
temp = hg[j];
hg[j] = hg[i];
hg[i] = temp;
}
for (i = 0; i < n; i++)
for (j= i + 1; j < n; j++)
if (hb[j] < hb[i])
{
temp = hb[j];
hb[j] = hb[i];
hb[i] = temp;
}
if(n%2 == 1)
{
temp_r = hr[(n/2)];
temp_g = hg[(n/2)];
temp_b = hb[(n/2)];
}
else
{
temp_r = hr[(n/2)] + hr[(n/2) - 1];
temp_g = hg[(n/2)] + hg[(n/2) - 1];
temp_b = hb[(n/2)] + hb[(n/2) - 1];
}
r_dataC[offset] = temp_r;
g_dataC[offset] = temp_g;
b_dataC[offset] = temp_b;
}
__global__ void Operador_Convolucion(unsigned char *R_input, unsigned char *G_input,unsigned char *B_input, size_t i_size,unsigned char *r_dataC, unsigned char *g_dataC,unsigned char *b_dataC, unsigned long col, unsigned long row,float *mask, unsigned int dim)
{
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int offset = x + y * i_size;
int offset2, ximg, yimg;
unsigned char temp_r = 0, temp_g = 0, temp_b = 0;
int end = dim/2, ini = -end, k = 0;
for (int i = ini; i <= end; i++)
{
ximg = x + i;
for (int j = ini; j <= end; j++)
{
yimg = y + j;
offset2 = ximg + yimg * i_size;
if (ximg < col && yimg < row)
if (ximg > 0 && yimg > 0)
{
temp_r += R_input[offset2]*mask[k];
temp_g += G_input[offset2]*mask[k];
temp_b += B_input[offset2]*mask[k];
}
k++;
}
}
r_dataC[offset] = temp_r;
g_dataC[offset] = temp_g;
b_dataC[offset] = temp_b;
}
__global__ void Get_Histogram(unsigned char *R_input, unsigned char *G_input,unsigned char *B_input, size_t i_size,unsigned int *hist_r,unsigned int *hist_g,unsigned int *hist_b)
{
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int offset = x + y * i_size;
atomicAdd( &(hist_r[R_input[offset]]), 1);
atomicAdd( &(hist_g[G_input[offset]]), 1);
atomicAdd( &(hist_b[B_input[offset]]), 1);
}
__global__ void Equalization_GPU(unsigned char *R_input, unsigned char *G_input,unsigned char *B_input, size_t i_size,unsigned char *r_dataE, unsigned char *g_dataE,unsigned char *b_dataE,unsigned int *hist_r,unsigned int *hist_g,unsigned int *hist_b)
{
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int offset = x + y * i_size;
r_dataE[offset] = hist_r[R_input[offset]];
g_dataE[offset] = hist_g[G_input[offset]];
b_dataE[offset] = hist_b[B_input[offset]];
}
__global__ void Rotation_op(uchar4 *ptr, unsigned char *R_input, unsigned char *G_input,unsigned char *B_input, size_t i_size, float a,unsigned long col, unsigned long row)
{
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int offset = x + y * blockDim.x * gridDim.x;
x = x - (blockDim.x * gridDim.x / 2);
y = y - (blockDim.y * gridDim.y / 2);
unsigned char* f_r, *f_g, *f_b;
int ximg = (x*cos(a) + y*sin(a)) + (col/2), yimg = (y*cos(a) - x*sin(a)) + (row/2);
if (ximg < col && yimg < row)
{
f_r = (unsigned char*)((char*)R_input + yimg*i_size);
f_g = (unsigned char*)((char*)G_input + yimg*i_size);
f_b = (unsigned char*)((char*)B_input + yimg*i_size);
ptr[offset].x = f_r[ximg];
ptr[offset].y = f_g[ximg];
ptr[offset].z = f_b[ximg];
ptr[offset].w = 255;
}
else
{
ptr[offset].x = 0;
ptr[offset].y = 0;
ptr[offset].z = 0;
ptr[offset].w = 255;
}
}
__global__ void Scaling_op(unsigned char *R_input, unsigned char *G_input,unsigned char *B_input,unsigned char *R_output, unsigned char *G_output,unsigned char *B_output,size_t i_size, size_t pitch2, float s,unsigned long col, unsigned long row)
{
float x = threadIdx.x + (blockIdx.x * blockDim.x);
float y = threadIdx.y + (blockIdx.y * blockDim.y);
int offset = x + y * pitch2;
x = x - (DIM / 2);
y = y - (DIM / 2);
unsigned char* f_r, *f_g, *f_b;
x /= s; y /= s;
int ximg = x + (col/2), yimg = y + (row/2);
if (ximg < (col - 1) && yimg < (row - 1))
{
f_r = (unsigned char*)((char*)R_input + yimg*i_size);
f_g = (unsigned char*)((char*)G_input + yimg*i_size);
f_b = (unsigned char*)((char*)B_input + yimg*i_size);
float cx = x - floor(x);
float cy = y - floor(y);
float R1 = f_r[ximg]*(1 - cx) + f_r[ximg + 1]*(cx);
float R2 = f_r[ximg + i_size]*(1 - cx) + f_r[ximg + 1 + i_size]*(cx);
R_output[offset] = R1*(1 - cy) + R2*(cy);
R1 = f_g[ximg]*(1 - cx) + f_g[ximg + 1]*(cx);
R2 = f_g[ximg + i_size]*(1 - cx) + f_g[ximg + 1 + i_size]*(cx);
G_output[offset] = R1*(1 - cy) + R2*(cy);
R1 = f_b[ximg]*(1 - cx) + f_b[ximg + 1]*(cx);
R2 = f_b[ximg + i_size]*(1 - cx) + f_b[ximg + 1 + i_size]*(cx);
B_output[offset] = R1*(1 - cy) + R2*(cy);
}
else
{
R_output[offset] = 0;
G_output[offset] = 0;
B_output[offset] = 0;
}
}
__global__ void PPnoise(unsigned char *R_input, unsigned char *G_input,unsigned char *B_input, size_t i_size, int noiseP, int seed)
{
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int offset = x + y * i_size;
curandState_t state;
curand_init(seed, x, y, &state);
unsigned char noise = (unsigned char)(curand(&state) % 100);
if(curand(&state) % 100 < noiseP)
{
noise = 255 * (noise % 2);
R_input[offset] = noise;
G_input[offset] = noise;
B_input[offset] = noise;
}
}
|
11,446 | #include "stdio.h"
#define N 2048*2048 //Total threads
#define THREADS_PER_BLOCK 512
__global__ void add(int *a, int *b, int *c)
{
int index = threadIdx.x + blockIdx.x * blockDim.x; //thread_no + block_no * no.ofthreads/block
c[index] = a[index] + b[index];
}
void random_ints(int* a, int n)
{
int i;
for(i =0; i<n; ++i){
a[i]=rand()/2000;
}
}
int main(void)
{
int *a,*b,*c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
a = (int*)malloc(size);
random_ints(a, N);
b = (int*)malloc(size);
random_ints(b, N);
c = (int*)malloc(size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
add<<<N/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(d_a, d_b, d_c);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
printf("%d + %d is %d\n", *a, *b, *c);
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
11,447 | #include "includes.h"
cudaEvent_t start, stop;
__global__ void cudaComputeAndNormalizeGradientLength(unsigned char *channel_values, int* x_gradient, int* y_gradient) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int gradient_length = int(sqrt(float(x_gradient[index] * x_gradient[index] + y_gradient[index] * y_gradient[index])));
if (gradient_length > 255) {
gradient_length = 255;
}
channel_values[index] = gradient_length;
return;
} |
11,448 | #include <stdio.h>
#include <assert.h>
inline cudaError_t checkCuda(cudaError_t result)
{
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
return result;
}
void init(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = i;
}
}
__global__
void doubleElements(int *a, int N)
{
int indexWithinTheGrid;
indexWithinTheGrid = blockIdx.x * blockDim.x + threadIdx.x;
int gridStride = gridDim.x * blockDim.x;
for (int i = indexWithinTheGrid; i < N; i += gridStride)
{
if (i < N)
{
a[i] *= 2;
}
}
}
bool checkElementsAreDoubled(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
if (a[i] != i*2) return false;
}
return true;
}
int main()
{
/*
* `N` is greater than the size of the grid (see below).
*/
int N = 10000;
int *a;
size_t size = N * sizeof(int);
cudaError_t err;
err =cudaMallocManaged(&a, size);
if (err != cudaSuccess)
{
printf("Error: %s\n", cudaGetErrorString(err));
}
init(a, N);
/*
* The size of this grid is 256*32 = 8192.
*/
size_t threads_per_block = 256;
size_t number_of_blocks = 32;
doubleElements<<<number_of_blocks, threads_per_block>>>(a, N);
err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("Error: %s\n", cudaGetErrorString(err));
}
checkCuda( cudaDeviceSynchronize() );
bool areDoubled = checkElementsAreDoubled(a, N);
printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE");
cudaFree(a);
}
|
11,449 | /* Rician MLE diffusion and kurtosis tensor estimator by Viljami Sairanen (2016)
Based on algorithm in:
"Liu, Jia, Dario Gasbarra, and Juha Railavo.
"Fast Estimation of Diffusion Tensors under
Rician noise by the EM algorithm."
Journal of neuroscience methods 257 (2016) : 147 - 158" */
// to convert between single and double precision use following changes:
// double <-> double
// sqrt( <-> sqrt(
// fabs( <-> fabs(
// exp( <-> exp(
// log( <-> log(
#include <math.h>
__device__ size_t calculateGlobalIndex() {
// Which block are we?
size_t const globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x;
// Which THREAD are we within the block?
size_t const localthreadIdx = threadIdx.x + blockDim.x * threadIdx.y;
// How big is each block?
size_t const threadsPerBlock = blockDim.x*blockDim.y;
// Which THREAD are we overall?
return localthreadIdx + globalBlockIndex*threadsPerBlock;
}
__device__ double getBesseli0(double x) {
double ax, ans, y;
ax = fabs(x);
if (ax < 3.75) {
y = x / 3.75;
y *= y;
ans = 1.0 + y*(3.5156229 + y*(3.0899424 + y*(1.2067492 +
y*(0.2659732 + y*(0.360768e-1 + y*0.45813e-2)))));
ans *= exp(-ax); // scale by exp(-abs(real(x))); see matlab help for besseli
}
else {
y = 3.75 / ax;
ans = (1.0 / sqrt(ax)) * // scale by exp(-abs(real(x))); see matlab help for besseli
(0.39894228 + y * (0.1328592e-1
+ y * (0.225319e-2 + y * (-0.157565e-2 + y * (0.916281e-2
+ y * (-0.2057706e-1 + y * (0.2635537e-1 + y * (-0.1647633e-1
+ y * (0.392377e-2)))))))));
}
return ans;
}
__device__ double getBesseli1(double x) {
double ax, ans, y;
ax = fabs(x);
if (ax < 3.75) {
y = x / 3.75;
y *= y;
ans = ax * (0.5 + y *(0.87890594 + y *(0.51498869 + y *(0.15084934
+ y * (0.2658733e-1 + y * (0.301532e-2 + y * 0.32411e-3))))));
ans *= exp(-ax); // scale by exp(-abs(real(x))); see matlab help for besseli
}
else {
y = 3.75 / ax;
ans = 0.2282967e-1 + y * (-0.2895312e-1 + y * (0.1787654e-1
- y * 0.420059e-2));
ans = 0.39894228 + y * (-0.3988024e-1 + y * (-0.362018e-2
+ y * (0.163801e-2 + y * (-0.1031555e-1 + y * ans))));
ans *= 1.0 / sqrt(ax); // scale by exp(-abs(real(x))); see matlab help for besseli
}
return x < 0.0 ? -ans : ans;
}
__device__ double getMax(
double *arr,
const unsigned int length,
size_t const THREAD) {
double ans;
ans = arr[THREAD * length];
for (int i = 1; i < length; i++) {
if (arr[THREAD * length + i] > ans) {
ans = arr[THREAD * length + i];
}
}
return ans;
}
__device__ void LUdecomposition(double *a, int n, int *indx, double *vv, size_t const THREAD) {
int i, imax, j, k;
double big, dum, sum, temp;
for (i = 0; i<n; i++) {
big = 0.0;
for (j = 0; j<n; j++) {
temp = fabs(a[THREAD * n * n+ i*n + j]);
if (temp >= big) {
big = temp;
}
}
if (big == 0.0) { // Singular matrix can't compute
big = 1.0e-20;
}
vv[THREAD * n + i] = 1.0 / big;
}
for (j = 0; j<n; j++) {
for (i = 0; i<j; i++) {
sum = a[THREAD * n * n+ i*n + j];
for (k = 0; k<i; k++) {
sum -= a[THREAD * n * n+ i*n + k] * a[THREAD * n * n+ k*n + j];
}
a[THREAD * n * n+ i*n + j] = sum;
}
big = 0.0;
for (i = j; i<n; i++) {
sum = a[THREAD * n * n+ i*n + j];
for (k = 0; k<j; k++) {
sum -= a[THREAD * n * n+ i*n + k] * a[THREAD * n * n+ k*n + j];
}
a[THREAD * n * n+ i*n + j] = sum;
dum = vv[THREAD * n+ i] * fabs(sum);
if (dum >= big) {
big = dum;
imax = i;
}
}
if (j != imax) {
for (k = 0; k<n; k++) {
dum = a[THREAD * n * n+ imax*n + k];
a[imax*n + k] = a[THREAD * n * n+ j*n + k];
a[THREAD * n * n+ j*n + k] = dum;
}
vv[THREAD * n+ imax] = vv[THREAD * n+ j];
}
indx[THREAD * n+ j] = imax;
if (a[THREAD * n * n+ j*n + j] == 0.0) {
a[THREAD * n * n+ j*n + j] = 1.0e-20;
}
if (j != n) {
dum = 1.0 / a[THREAD * n * n+ j*n + j];
for (i = j + 1; i<n; i++) {
a[THREAD * n * n+ i*n + j] *= dum;
}
}
}
}
__device__ void LUsubstitutions(double *a, int n, int *indx, double *b, size_t const THREAD) {
int i, ii = 0, ip, j;
double sum;
for (i = 0; i<n; i++) {
ip = indx[(THREAD * n) + i];
sum = b[(THREAD * n) + ip];
b[(THREAD * n) + ip] = b[(THREAD * n) + i];
if (ii != 0) {
for (j = ii - 1; j<i; j++) {
sum -= a[(THREAD * n * n) + (i * n) + j] * b[(THREAD * n) + j];
}
}
else if (sum != 0) {
ii = i + 1;
}
b[(THREAD * n) + i] = sum;
}
for (i = n - 1; i >= 0; i--) {
sum = b[(THREAD * n) + i];
for (j = i + 1; j<n; j++) {
sum -= a[(THREAD * n * n) + (i * n) + j] * b[(THREAD * n) + j];
}
b[(THREAD * n) + i] = sum / a[(THREAD * n * n) + (i * n) + i];
}
}
__device__ void CholeskyDecomposition(double *a, int n, double *p, size_t const THREAD) {
int i, j, k;
double sum;
for (i = 0; i < n; i++) {
for (j = i; j < n; j++) {
sum = a[(THREAD * n * n) + (i*n) + j];
for (k = i-1; k >= 0; k--) {
sum -= a[(THREAD * n * n) + (i*n) + k]
* a[(THREAD * n * n) + (j*n) + k];
}
if (i == j) {
if (sum <= 0.0) {
sum = 1.0e-20; // Cholesky decomposition failed
}
p[THREAD*n + i] = sqrt(sum);
}
else {
a[(THREAD*n*n) + (j*n) + i] = sum / p[THREAD*n + i];
}
}
}
}
__device__ void CholeskyBacksubstitution(double *a, int n, double *p, double *b, double *x, size_t const THREAD) {
int i, k;
double sum;
for (i = 0; i < n; i++) { // Solve Ly=b, storing y in x
sum = b[THREAD*n + i];
for (k = i-1; k >= 0; k--) {
sum -= a[(THREAD*n*n) + (i*n) + k] * x[THREAD*n + k];
}
x[THREAD*n + i] = sum / p[THREAD*n + i];
}
for (i = n; i >= 0; i--) { // Solve L^(T)x=y
sum = x[THREAD*n + i];
for (k = i+1; k < n; k++) {
sum -= a[(THREAD*n*n) + (k*n) + i] * x[THREAD*n + k];
}
x[THREAD*n + i] = sum / p[THREAD*n + i];
}
}
__device__ void calculateExpZTheta(
double *expZTheta,
double *theta,
double *Z,
const unsigned int nParams,
const unsigned int nDWIs,
size_t const THREAD) {
for (int i = 0; i < nDWIs; i++) {
expZTheta[THREAD * nDWIs + i] = 0.0;
for (int j = 0; j < nParams; j++) {
expZTheta[THREAD * nDWIs + i] +=
Z[j * nDWIs + i] * theta[THREAD * nParams + j];
}
expZTheta[THREAD * nDWIs + i] = exp(expZTheta[THREAD * nDWIs + i]);
}
}
__device__ void calculateAB_1(
double *a,
double *b,
double *Y,
double *expZTheta,
double *sumYSQ,
const unsigned int nDWIs,
size_t const THREAD) {
a[THREAD] = sumYSQ[THREAD];
for (int i = 0; i < nDWIs; i++) {
a[THREAD] += expZTheta[THREAD * nDWIs + i] * expZTheta[THREAD * nDWIs + i];
b[THREAD * nDWIs + i] = Y[THREAD * nDWIs + i] * expZTheta[THREAD * nDWIs + i];
}
}
__device__ void calculateAB_2(
double *a,
double *b,
double *Y,
double *Z,
double *theta,
double *SigmaSQ,
double *expZTheta,
double *twotau,
const unsigned int nDWIs,
const unsigned int nParams,
size_t const THREAD) {
// Now indexing for i ranges [0, nDWIs-1] and j ranges [1, nParams] since first nParams is the theta(1)
a[THREAD] = 0.0;
for (int i = 0; i < nDWIs; i++) {
expZTheta[THREAD * nDWIs + i] = 0.0;
for (int j = 1; j < nParams; j++) {
expZTheta[THREAD * nDWIs + i] +=
Z[j * nDWIs + i] * theta[THREAD * nParams + j];
}
expZTheta[THREAD * nDWIs + i] = exp(expZTheta[THREAD * nDWIs + i]);
a[THREAD] += expZTheta[THREAD * nDWIs + i] * expZTheta[THREAD * nDWIs + i];
b[THREAD * nDWIs + i] = Y[THREAD * nDWIs + i] * expZTheta[THREAD * nDWIs + i];
twotau[THREAD * nDWIs + i] = b[THREAD * nDWIs + i] * exp(theta[THREAD * nParams+0]) / SigmaSQ[THREAD];
}
a[THREAD] = log(a[THREAD]);
}
__device__ void calculateEN(
double *EN,
double *twotau,
const unsigned int nDWIs,
bool *anyEN,
size_t const THREAD) {
anyEN[THREAD] = false;
for (int i = 0; i < nDWIs; i++) {
EN[THREAD * nDWIs + i] = 0.5 * twotau[THREAD * nDWIs + i] *
getBesseli1(twotau[THREAD * nDWIs + i]) /
getBesseli0(twotau[THREAD * nDWIs + i]);
if (EN[THREAD * nDWIs + i] > 0.0) {
anyEN[THREAD] = true;
}
}
}
__device__ void calculateZTheta(
double *c,
double *ZTheta,
double *theta,
double *SigmaSQ,
double *Z,
const unsigned int nDWIs,
const unsigned int nParams,
size_t const THREAD) {
// Now indexing for i ranges [0, nDWIs-1] and j ranges [1, nParams] since first nParams is the theta(1)
c[THREAD] = 2.0 * theta[THREAD * nParams+0] -
log(2.0 * SigmaSQ[THREAD]);
for (int i = 0; i < nDWIs; i++) {
ZTheta[THREAD * nDWIs + i] = 0.0;
for (int j = 1; j < nParams; j++) {
ZTheta[THREAD * nDWIs + i] +=
Z[j * nDWIs + i] * theta[THREAD * nParams + j];
}
ZTheta[THREAD * nDWIs + i] *= 2.0;
ZTheta[THREAD * nDWIs + i] += c[THREAD];
}
}
__device__ void calculateLoglikelihood(
double *loglikelihood,
double *expo,
double *ZTheta,
double *scaling,
double *expScaling,
double *EN,
const unsigned int nDWIs,
size_t const THREAD) {
loglikelihood[THREAD] = 0.0;
for (int i = 0; i < nDWIs; i++) {
expo[THREAD * nDWIs + i] = exp(ZTheta[THREAD * nDWIs + i] - scaling[THREAD]);
loglikelihood[THREAD] +=
EN[THREAD * nDWIs + i] * ZTheta[THREAD * nDWIs + i]
- expo[THREAD * nDWIs + i] * expScaling[THREAD];
}
}
__device__ void initializeInformationMatrices(
double *fisherInformation,
double *fisherInformation_sym,
const unsigned int nDeltaParams,
size_t const THREAD) {
for (int i = 0; i < nDeltaParams*nDeltaParams; i++) {
fisherInformation[THREAD * nDeltaParams*nDeltaParams + i] = 0.0;
fisherInformation_sym[THREAD * nDeltaParams*nDeltaParams + i] = 0.0;
}
}
__device__ void iterateSigmaSQ(
double *SigmaSQ,
double *SigmaSQ0,
double *tmpdouble,
double *a,
double *b,
double *twotau,
unsigned int *nIterSigmaSQ,
unsigned int iterLimitSigmaSQ,
const double toleranceSigmaSQ,
const unsigned int nDWIs,
bool *continueSigmaSQIteration,
size_t const THREAD) {
// Should be ok
continueSigmaSQIteration[THREAD] = true;
nIterSigmaSQ[THREAD] = 0;
while (continueSigmaSQIteration[THREAD]) {
(nIterSigmaSQ[THREAD])++;
SigmaSQ0[THREAD] = SigmaSQ[THREAD];
tmpdouble[THREAD] = 0.0;
for (int i = 0; i < nDWIs; i++) {
twotau[THREAD * nDWIs + i] = b[THREAD * nDWIs + i] / SigmaSQ[THREAD];
tmpdouble[THREAD] += twotau[THREAD * nDWIs + i] *
getBesseli1(twotau[THREAD * nDWIs + i]) /
getBesseli0(twotau[THREAD * nDWIs + i]);
}
SigmaSQ[THREAD] = 0.5 * a[THREAD] / ((double)(nDWIs) + tmpdouble[THREAD]);
continueSigmaSQIteration[THREAD] =
((nIterSigmaSQ[THREAD] < iterLimitSigmaSQ)
&&
(fabs(SigmaSQ[THREAD] - SigmaSQ0[THREAD]) > toleranceSigmaSQ));
}
}
__device__ void iterateS0(
double *theta,
double *theta1_old,
double *SigmaSQ,
double *a,
double *b,
double *twotau,
unsigned int *nIterS0,
unsigned int iterLimitS0,
const double toleranceS0,
const unsigned int nDWIs,
const unsigned int nParams,
bool *continueS0Iteration,
size_t const THREAD) {
continueS0Iteration[THREAD] = true;
nIterS0[THREAD] = 0;
while (continueS0Iteration[THREAD]) {
nIterS0[THREAD]++;
// Get initial theta(1) parameter
theta1_old[THREAD] = theta[THREAD * nParams+0];
// Calculate new theta(1) parameter
theta[THREAD * nParams+0] = 0.0;
for (int i = 0; i < nDWIs; i++) {
theta[THREAD * nParams+0] += (b[THREAD * nDWIs + i] *
getBesseli1(twotau[THREAD * nDWIs + i]) /
getBesseli0(twotau[THREAD * nDWIs + i]));
}
theta[THREAD * nParams+0] = log(theta[THREAD * nParams+0]) -a[THREAD];
// Update twotau for the next iteration step
for (int i = 0; i < nDWIs; i++) {
twotau[THREAD * nDWIs + i] = b[THREAD * nDWIs + i] *
exp(theta[THREAD * nParams+0]) / SigmaSQ[THREAD];
}
// Test to end while loop
continueS0Iteration[THREAD] =
((nIterS0[THREAD] < iterLimitS0)
&&
(fabs((theta[THREAD * nParams + 0] - theta1_old[THREAD]) / theta1_old[THREAD])));
}
}
__device__ void calculateFisherInformation(
double *fisherInformation,
double *fisherInformation_sym,
double *Z,
double *score,
double *DeltaTheta,
double *expo,
double *EN,
double *expScaling,
const unsigned int nDWIs,
const unsigned int nParams,
const unsigned int nDeltaParams,
size_t const THREAD) {
for (int j = 1; j < nParams; j++) {
score[THREAD * nDeltaParams + j - 1] = 0.0;
for (int i = 0; i < nDWIs; i++) {
score[THREAD * nDeltaParams + j - 1] +=
2.0 * Z[j * nDWIs + i] * (EN[THREAD * nDWIs + i] -
expo[THREAD * nDWIs + i] * expScaling[THREAD]);
for (int k = 1; k < nParams; k++) { // range of j and k are [1 to nParams]
fisherInformation[THREAD * nDeltaParams*nDeltaParams + (j - 1)*nDeltaParams + (k - 1)] +=
4.0 * Z[j * nDWIs + i] * Z[k * nDWIs + i] * expo[THREAD * nDWIs + i];
// Symmetrize Fisher Information
fisherInformation_sym[THREAD * nDeltaParams*nDeltaParams + (j - 1)*nDeltaParams + (k - 1)] =
(fisherInformation[THREAD * nDeltaParams*nDeltaParams + (j - 1)*nDeltaParams + (k - 1)] +
fisherInformation[THREAD * nDeltaParams*nDeltaParams + (j - 1)*nDeltaParams + (k - 1)]) *
0.5 * expScaling[THREAD];
}
}
DeltaTheta[THREAD * nDeltaParams + j - 1] = score[THREAD *nDeltaParams + j - 1];
}
// Make copy of symmetric Fisher information matrix
for (int i = 0; i < nDeltaParams*nDeltaParams; i++) {
fisherInformation[THREAD * nDeltaParams * nDeltaParams + i] = fisherInformation_sym[THREAD * nDeltaParams * nDeltaParams + i];
}
}
__device__ void iterateLoglikelihood(
int *indx,
double *score,
double *vv,
double *DeltaTheta,
double *Z,
double *expo,
double *theta,
double *loglikelihood,
double *loglikelihood_old,
double *new_theta,
double *regulatorLambda,
double *fisherInformation,
double *fisherInformation_sym,
double *ZTheta,
double *c,
double *scaling,
double *expScaling,
double *EN,
const unsigned int nDWIs,
const unsigned int nParams,
const unsigned int nDeltaParams,
const double regulatorLambda0,
const double regulatorRescaling,
unsigned int *nIterLoglikelihood,
const unsigned int iterLimitLoglikelihood,
const double toleranceLoglikelihood,
bool *continueLoglikelihoodIteration,
size_t const THREAD) {
nIterLoglikelihood[THREAD] = 0;
continueLoglikelihoodIteration[THREAD] = true;
regulatorLambda[THREAD] = regulatorLambda0;
while (continueLoglikelihoodIteration[THREAD]) {
nIterLoglikelihood[THREAD]++;
//loglikelihood_old[THREAD] = loglikelihood[THREAD]; // loglikelihood_old is not supposed to be updated in this loop
// Initialize DeltaTheta for LUdecomposition & substitutions
// because X = I\score calculated using LUsubstitutions actually
// replaces values in score and we don't want to loose that information
// so we have to save score into DeltaTheta variable
for (int j = 1; j < nParams; j++) {
DeltaTheta[THREAD * nDeltaParams + j - 1] = score[THREAD *nDeltaParams + j - 1];
}
// Regularize Fisher information matrix with lambda
for (int i = 0; i < nDeltaParams; i++) {
fisherInformation[THREAD * nDeltaParams*nDeltaParams + i*nDeltaParams + i] =
fisherInformation_sym[THREAD * nDeltaParams*nDeltaParams + i*nDeltaParams + i]
+ regulatorLambda[THREAD];
}
// Update regulatorLambda
regulatorLambda[THREAD] *= regulatorRescaling;
//LUdecomposition(fisherInformation, nDeltaParams, indx, vv, THREAD);
//LUsubstitutions(fisherInformation, nDeltaParams, indx, DeltaTheta, THREAD);
CholeskyDecomposition(fisherInformation, nDeltaParams, vv, THREAD);
CholeskyBacksubstitution(fisherInformation, nDeltaParams, vv, score, DeltaTheta, THREAD);
//goto THE_END_LOGLIKELIHOOD;
// Calculate new theta(2:end)
for (int i = 1; i < nParams; i++) {
new_theta[THREAD * nDeltaParams + i - 1] =
theta[THREAD * nParams + i]
+ DeltaTheta[THREAD * nDeltaParams + i - 1];
}
// Calculate ZTheta based on new_theta
for (int i = 0; i < nDWIs; i++) {
ZTheta[THREAD * nDWIs + i] = 0.0;
for (int j = 1; j < nParams; j++) {
ZTheta[THREAD * nDWIs + i] +=
Z[j* nDWIs + i] * new_theta[THREAD * nDeltaParams + j - 1];
}
ZTheta[THREAD * nDWIs + i] *= 2.0;
ZTheta[THREAD * nDWIs + i] += c[THREAD]; // c is based on theta(1) and sigmasq that are constant in this loop
}
scaling[THREAD] = getMax(ZTheta, nDWIs, THREAD);
expScaling[THREAD] = exp(scaling[THREAD]);
// Calculate new loglikelihood
// calculateLoglikelihood updates loglikelihood and expo variables
calculateLoglikelihood(loglikelihood, expo, ZTheta, scaling, expScaling, EN, nDWIs, THREAD);
// Check if new loglikelihood is NaN, if so more regulation is needed
// (f != f) is true only if f is NaN (IEEE standard)
if (loglikelihood[THREAD] != loglikelihood[THREAD]) {
// loglikelihood is NaN, check only iterations
continueLoglikelihoodIteration[THREAD] = (nIterLoglikelihood[THREAD] < iterLimitLoglikelihood);
}
else {
continueLoglikelihoodIteration[THREAD] =
((loglikelihood[THREAD] < loglikelihood_old[THREAD])
&&
(nIterLoglikelihood[THREAD] < iterLimitLoglikelihood));
}
}
//THE_END_LOGLIKELIHOOD:
}
__device__ void iterateTheta(
int *indx,
double *vv,
double *theta,
double *ZTheta,
double *c,
double *fisherInformation,
double *fisherInformation_sym,
double *score,
double *Z,
double *EN,
double *scaling,
double *expScaling,
double *expo,
double *DeltaTheta,
double *DeltaThetaScore,
double *new_theta,
double *loglikelihood,
double *loglikelihood_old,
double *regulatorLambda,
const double regulatorLambda0,
const double regulatorRescaling,
const unsigned int nDWIs,
const unsigned int nParams,
const unsigned int nDeltaParams,
unsigned int *nIterTheta,
unsigned int *nIterLoglikelihood,
const unsigned int iterLimitTheta,
const unsigned int iterLimitLoglikelihood,
const double toleranceTheta,
const double toleranceLoglikelihood,
bool *continueThetaIteration,
bool *continueLoglikelihoodIteration,
size_t const THREAD) {
// Now indexing for i ranges [0, nDWIs-1] and j ranges [1, nParams] since first nParams is the theta(1)
continueThetaIteration[THREAD] = true;
nIterTheta[THREAD] = 0;
loglikelihood_old[THREAD] = loglikelihood[THREAD];
while (continueThetaIteration[THREAD]) {
nIterTheta[THREAD]++;
calculateFisherInformation(fisherInformation, fisherInformation_sym, Z, score, DeltaTheta, expo, EN, expScaling, nDWIs, nParams, nDeltaParams, THREAD);
// Optimize loglikelihood
iterateLoglikelihood(indx, score, vv, DeltaTheta, Z, expo, theta, loglikelihood, loglikelihood_old, new_theta, regulatorLambda, fisherInformation, fisherInformation_sym, ZTheta, c, scaling, expScaling, EN, nDWIs, nParams, nDeltaParams, regulatorLambda0, regulatorRescaling, nIterLoglikelihood, iterLimitLoglikelihood, toleranceLoglikelihood, continueLoglikelihoodIteration, THREAD);
//goto THE_END_THETA;
DeltaThetaScore[THREAD] = 0.0;
for (int i = 0; i < nDeltaParams; i++) {
DeltaThetaScore[THREAD] += DeltaTheta[THREAD * nDeltaParams + i]
* score[THREAD * nDeltaParams + i];
}
// Check if new loglikelihood is NaN, if not
// update theta(2:end) and loglikelihood_old
if (loglikelihood[THREAD] != loglikelihood[THREAD]) {
// NaN, don't update variables
continueThetaIteration[THREAD] = (nIterTheta[THREAD] < iterLimitTheta);
} else {
for (int i = 1; i < nParams; i++) {
theta[THREAD * nParams + i] = new_theta[THREAD * nDeltaParams + i - 1];
}
loglikelihood_old[THREAD] = loglikelihood[THREAD];
continueThetaIteration[THREAD] =
(((DeltaThetaScore[THREAD] > toleranceTheta)
||
((loglikelihood[THREAD] - loglikelihood_old[THREAD]) > toleranceLoglikelihood))
&&
(nIterTheta[THREAD] < iterLimitTheta));
}
}
//THE_END_THETA:
}
__device__ void calculateNorms(
double *norm1,
double *norm2,
double *theta,
double *theta_old,
const unsigned int nParams,
size_t const THREAD) {
norm1[THREAD] = 0.0;
norm2[THREAD] = 0.0;
for (int i = 0; i < nParams; i++) {
norm1[THREAD] += theta_old[THREAD * nParams + i] * theta_old[THREAD * nParams + i];
norm2[THREAD] += (theta[THREAD * nParams + i] - theta_old[THREAD * nParams + i])*
(theta[THREAD * nParams + i] - theta_old[THREAD * nParams + i]);
}
norm1[THREAD] = sqrt(norm1[THREAD]);
norm2[THREAD] = sqrt(norm2[THREAD]);
}
__global__ void RicianMLE(
double *theta,
double *SigmaSQ,
double *Z,
double *fisherInformation,
double *fisherInformation_sym,
double *score,
double *DeltaTheta,
double *new_theta,
double *vv,
int *indx,
double *theta_old,
double *Y,
double *expZTheta,
double *ZTheta,
double *twotau,
double *expo,
double *EN,
double *b,
double *a,
double *c,
double *sumYSQ,
double *theta1_old,
double *SigmaSQ0,
double *SigmaSQ_old,
double *tmpdouble,
double *scaling,
double *expScaling,
double *loglikelihood,
double *loglikelihood_old,
double *regulatorLambda,
double *DeltaThetaScore,
double *norm1,
double *norm2,
unsigned int *nIterSigmaSQ,
unsigned int *nIterVoxel,
unsigned int *nIterS0,
unsigned int *nIterTheta,
unsigned int *nIterLoglikelihood,
bool *continueSigmaSQIteration,
bool *continueVoxelIteration,
bool *continueS0Iteration,
bool *continueThetaIteration,
bool *continueLoglikelihoodIteration,
bool *anyEN,
const double toleranceSigmaSQ,
const double toleranceS0,
const double toleranceTheta,
const double toleranceLoglikelihood,
const unsigned int iterLimitSigmaSQ,
const unsigned int iterLimitVoxel,
const unsigned int iterLimitS0,
const unsigned int iterLimitTheta,
const unsigned int iterLimitLoglikelihood,
const double regulatorLambda0,
const double regulatorRescaling,
const unsigned int nDWIs,
const unsigned int nParams,
const unsigned int nDeltaParams,
const unsigned int nVoxels) {
// Initial, work out which THREAD i.e. voxel we are computing
size_t const THREAD = calculateGlobalIndex();
if (THREAD >= nVoxels) {
return;
}
// First, optimize Rician loglikelihood w.r.t. SigmaSQ
calculateExpZTheta( expZTheta, theta, Z, nParams, nDWIs, THREAD);
calculateAB_1(a, b, Y, expZTheta, sumYSQ, nDWIs, THREAD);
iterateSigmaSQ(SigmaSQ, SigmaSQ0, tmpdouble, a, b, twotau, nIterSigmaSQ, iterLimitSigmaSQ, toleranceSigmaSQ, nDWIs, continueSigmaSQIteration, THREAD);
// Start voxel-wise optimization
continueVoxelIteration[THREAD] = true;
while (continueVoxelIteration[THREAD]) {
nIterVoxel[THREAD]++;
// Save initial theta and SigmaSQ to be used later to test if voxel optimization continues
SigmaSQ_old[THREAD] = SigmaSQ[THREAD];
for (int i = 0; i < nParams; i++) {
theta_old[THREAD * nParams + i] = theta[THREAD * nParams + i];
}
// Second, optimize w.r.t. S0 i.e. theta(1) with fixed theta(2:end) and SigmaSQ
// calcuateAB_2 updates a,b, expZTheta, and twotau variables
calculateAB_2(a, b, Y, Z, theta, SigmaSQ, expZTheta, twotau, nDWIs, nParams, THREAD);
// iterateS0 updates theta(1) and twotau variables
iterateS0(theta, theta1_old, SigmaSQ, a, b, twotau, nIterS0, iterLimitS0, toleranceS0, nDWIs, nParams, continueS0Iteration, THREAD);
// Third, optimize w.r.t. theta(2:end) with fixed theta(1) and SigmaSQ
// calculateEN updates conditional expectation EN and checks if any(EN > 0)
calculateEN(EN, twotau, nDWIs, anyEN, THREAD);
if (anyEN[THREAD]) {
// There is information to estimate tensor(s)
// calculateZTheta updates c and ZTheta variables
calculateZTheta(c, ZTheta, theta, SigmaSQ, Z, nDWIs, nParams, THREAD);
scaling[THREAD] = getMax(ZTheta, nDWIs, THREAD);
expScaling[THREAD] = exp(scaling[THREAD]);
// calculateLoglikelihood updates loglikelihood and expo variables
calculateLoglikelihood(loglikelihood, expo, ZTheta, scaling, expScaling, EN, nDWIs, THREAD);
initializeInformationMatrices(fisherInformation, fisherInformation_sym, nDeltaParams, THREAD);
iterateTheta(indx, vv, theta, ZTheta, c, fisherInformation, fisherInformation_sym, score, Z, EN, scaling, expScaling, expo, DeltaTheta, DeltaThetaScore, new_theta, loglikelihood, loglikelihood_old, regulatorLambda, regulatorLambda0, regulatorRescaling, nDWIs, nParams, nDeltaParams, nIterTheta, nIterLoglikelihood, iterLimitTheta, iterLimitLoglikelihood, toleranceTheta, toleranceLoglikelihood, continueThetaIteration, continueLoglikelihoodIteration, THREAD);
//goto THE_END;
}
else {
// There is no information for estimations
// Set theta(2:end) and information to zero
for (int i = 1; i < nParams; i++) {
theta[THREAD * nParams + i] = 0.0;
}
initializeInformationMatrices(fisherInformation, fisherInformation_sym, nDeltaParams, THREAD);
}
// Last, optimize w.r.t. SigmaSQ with fixed theta
calculateExpZTheta(expZTheta, theta, Z, nParams, nDWIs, THREAD);
calculateAB_1(a, b, Y, expZTheta, sumYSQ, nDWIs, THREAD);
iterateSigmaSQ(SigmaSQ, SigmaSQ0, tmpdouble, a, b, twotau, nIterSigmaSQ, iterLimitSigmaSQ, toleranceSigmaSQ, nDWIs, continueSigmaSQIteration, THREAD);
calculateNorms(norm1, norm2, theta, theta_old, nParams, THREAD);
continueVoxelIteration[THREAD] =
(((fabs((SigmaSQ[THREAD] - SigmaSQ_old[THREAD]) / SigmaSQ_old[THREAD]) > toleranceSigmaSQ)
||
((norm2[THREAD] / norm1[THREAD]) > toleranceTheta))
&&
(nIterVoxel[THREAD] < iterLimitVoxel));
}
//THE_END:
} |
11,450 | #include "includes.h"
__global__ void blend_kernel( float *outSrc, const float *inSrc ) {
// map from threadIdx/blockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
int left = offset - 1;
int right = offset + 1;
if(x == 0) left++;
if(x == DIM-1) right--;
int top = offset - DIM;
int bottom = offset + DIM;
if(y == 0) top += DIM;
if(y == DIM-1) bottom -= DIM;
outSrc[offset] = inSrc[offset] + SPEED * (inSrc[top] + inSrc[bottom] + inSrc[left] + inSrc[right] - inSrc[offset] * 4);
} |
11,451 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
int operation(int a, int b, int opp) {
switch (opp)
{
case 0:
return a && b;
break;
case 1:
return a || b;
break;
case 2:
return !(a && b);
break;
case 3:
return !(a || b);
break;
case 4:
return a ^ b;
break;
case 5:
return !(a ^ b);
break;
}
}
int main(int argc, char* argv[])
{
char* file_name = argv[1];
char* file_solution = argv[3];
unsigned int file_length = atol(argv[2]);
//char* file_name = "C:\\Users\\bszwim\\Downloads\\input_10000.txt";
//char* file_solution = "C:\\Users\\bszwim\\Downloads\\my_solution.txt";
//unsigned int file_length = 10000;
FILE* file_answer = fopen(file_solution, "w");
FILE* file_one = fopen(file_name, "r");
if (file_one == NULL) {
perror("unable to open file");
exit(1);
}
char line[256];
int lineCounter = 0;
float memsettime;
cudaEvent_t start, stop;
//clock_t t;
//t = clock();
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
while (fgets(line, sizeof(line), file_one) && lineCounter < file_length) {
lineCounter++;
int a = line[0] - '0';
int b = line[2] - '0';
int opp = line[4] - '0';
int solution = operation(a, b, opp);
fprintf(file_answer, "%d\n", solution);
}
//t = clock() - t;
//double time_taken = ((double)t) / CLOCKS_PER_SEC; // in seconds
//printf("%f seconds to execute \n", time_taken);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&memsettime, start, stop);
printf(" * Sequential execution time : %f * \n", memsettime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
fclose(file_answer);
fclose(file_one);
return 0;
} |
11,452 | #include "includes.h"
__global__ void finiteDiff(const int c, const double dt, const double dx, const int nt, const int nx, double *u, double *un) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int t = 0; t < nt; t++) {
for (int i = index; i < nx; i += stride) {
un[i] = u[i];
}
for (int i = index + 1; i < nx; i += stride) {
u[i] = un[i] - c * dt / dx * (un[i] - un[i - 1]);
}
}
} |
11,453 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
#define N 256
__global__ void matrix_vector_multi(float *A_d, float * B_d, float *C_d) {
int i, j;
for (j = 0; i < N; j++) {
A_d[j] = 0.0F;
for (i = 0; i < N; i++) {
A_d[j] = A_d[j] + B_d[j*N + i] * C_d[i];
}
}
}
int main(void) {
int i, j;
float A[N], B[N*N], C[N];
float *A_d, *B_d, *C_d;
dim3 blocks(1, 1, 1);
dim3 threads(1, 1, 1);
for (j = 0; j < N; j++) {
for (i = 0; i < N; i++) {
B[j*N + i] = ((float)j) / 256.0;
}
}
for (j = 0; j < N; j++)
C[j] = 1.0F;
cudaMalloc((void**)& A_d, N * sizeof(float));
cudaMalloc((void**)& B_d, N*N * sizeof(float));
cudaMalloc((void**)& C_d, N * sizeof(float));
cudaMemcpy(B_d, B, N*N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(C_d, C, N * sizeof(float), cudaMemcpyHostToDevice);
matrix_vector_multi <<< blocks, threads >>> (A_d, B_d, C_d);
cudaMemcpy(A, A_d, N * sizeof(float), cudaMemcpyDeviceToHost);
for (j = 0; j < N; j++) {
printf("A[%d]=%f \n", j, A[j]);
}
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
}
/*
==11856== Profiling application: .\Debug\chap2_02.exe
==11856== Profiling result:
Time(%) Time Calls Avg Min Max Name
73.21% 133.63us 1 133.63us 133.63us 133.63us matrix_vector_multi(float*, float*, float*)
25.46% 46.465us 2 23.232us 1.1840us 45.281us [CUDA memcpy HtoD]
1.33% 2.4320us 1 2.4320us 2.4320us 2.4320us [CUDA memcpy DtoH]
==11856== API calls:
Time(%) Time Calls Avg Min Max Name
98.47% 132.94ms 3 44.313ms 13.998us 132.47ms cudaMalloc
0.47% 633.40us 91 6.9600us 0ns 278.91us cuDeviceGetAttribute
0.46% 621.85us 3 207.28us 29.745us 321.25us cudaFree
0.40% 539.97us 3 179.99us 106.38us 270.86us cudaMemcpy
0.13% 181.97us 1 181.97us 181.97us 181.97us cuDeviceGetName
0.04% 56.341us 1 56.341us 56.341us 56.341us cudaLaunch
0.01% 11.198us 1 11.198us 11.198us 11.198us cuDeviceTotalMem
0.01% 9.0990us 1 9.0990us 9.0990us 9.0990us cudaConfigureCall
0.00% 3.1500us 3 1.0500us 350ns 1.7500us cuDeviceGetCount
0.00% 3.1490us 3 1.0490us 350ns 2.0990us cuDeviceGet
0.00% 1.7490us 3 583ns 350ns 700ns cudaSetupArgument
*/ |
11,454 | #include "includes.h"
__global__ void kernelUpdateNablaW(float *nabla_w,float *delta_nabla_w,int tws) {
if ((blockIdx.x*blockDim.x+threadIdx.x)<tws) {
nabla_w[blockIdx.x*blockDim.x+threadIdx.x]+=delta_nabla_w[blockIdx.x*blockDim.x+threadIdx.x];
}
} |
11,455 | #include "stdio.h"
#include "stdlib.h"
#include "cuda.h"
#define W 4
int matrixMul_cpu(float *M, float *N, float *P)
{
for(int i=0;i<W;i++)
for(int j=0;j<W;j++)
{
float sum=0;
for (int k=0;k<W;k++)
{
float a = *(M+i*W+k);
float b = *(N+k*W+j);
sum += a * b;
}
*(P+i*W+j) = sum;
}
return 0;
}
__global__ void matrixMul_gpu(float *M,float *N, float *P, int width)
{
int i = threadIdx.y;
int j = threadIdx.x;
float sum =0;
for (int k = 0;k<width;k++)
{
float a = *(M+i*width+k);
float b = *(N+k*width+j);
sum += a*b;
}
*(P+i*width+j) = sum;
}
int main()
{
int sNo = 0;
cudaSetDevice(sNo%8);
int size = W*W*sizeof(float);
float *M,*N,*P;
float *d_M,*d_N,*d_P;
M = (float *) malloc(size);
N = (float *) malloc(size);
P = (float *) malloc(size);
cudaMalloc((void **)&d_M,size);
cudaMalloc((void **)&d_N,size);
cudaMalloc((void **)&d_P,size);
for(int i=0;i<W*W;i++)
{
*(M+i) = i;
*(N+i) = i+1;
*(P+i) = 0;
}
cudaMemcpy(d_M,M,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_N,N,size,cudaMemcpyHostToDevice);
int err = matrixMul_cpu(M,N,P);
dim3 threadPerBlock(W,W);
matrixMul_gpu<<< 1, threadPerBlock >>>(d_M,d_N,d_P,W);
cudaMemcpy(P,d_P,size,cudaMemcpyDeviceToHost);
for(int i=0;i<W;i++)
{
for(int j=0;j<W;j++)
printf("%f ",*(P+i*W+j));
printf("\n");
}
free(M);free(N);free(P);
cudaFree(d_M);cudaFree(d_N);cudaFree(d_P);
return 0;
}
|
11,456 | #include "includes.h"
__global__ void rgb2hsl_kernel(int img_size, unsigned char *img_r, unsigned char *img_g, unsigned char *img_b, float *img_h, float *img_s, unsigned char *img_l)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
float H, S, L;
float var_r = ( (float)img_r[i]/255 );//Convert RGB to [0,1]
float var_g = ( (float)img_g[i]/255 );
float var_b = ( (float)img_b[i]/255 );
float var_min = (var_r < var_g) ? var_r : var_g;
var_min = (var_min < var_b) ? var_min : var_b; //min. value of RGB
float var_max = (var_r > var_g) ? var_r : var_g;
var_max = (var_max > var_b) ? var_max : var_b; //max. value of RGB
float del_max = var_max - var_min; //Delta RGB value
L = ( var_max + var_min ) / 2;
if ( del_max == 0 )//This is a gray, no chroma...
{
H = 0;
S = 0;
}
else //Chromatic data...
{
if ( L < 0.5 )
S = del_max/(var_max+var_min);
else
S = del_max/(2-var_max-var_min );
float del_r = (((var_max-var_r)/6)+(del_max/2))/del_max;
float del_g = (((var_max-var_g)/6)+(del_max/2))/del_max;
float del_b = (((var_max-var_b)/6)+(del_max/2))/del_max;
if( var_r == var_max ){
H = del_b - del_g;
}
else{
if( var_g == var_max ){
H = (1.0/3.0) + del_r - del_b;
}
else{
H = (2.0/3.0) + del_g - del_r;
}
}
}
if ( H < 0 )
H += 1;
if ( H > 1 )
H -= 1;
img_h[i] = H;
img_s[i] = S;
img_l[i] = (unsigned char)(L*255);
} |
11,457 | // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fsyntax-only \
// RUN: -verify -fopenmp %s
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fsyntax-only \
// RUN: -verify -fopenmp -x hip %s
// expected-no-diagnostics
// Tests there is no assertion in Sema::markKnownEmitted when fopenmp is used
// with CUDA/HIP host compilation.
static void f() {}
static void g() { f(); }
static void h() { g(); }
|
11,458 | #include <iostream>
#include <chrono>
using namespace std;
using namespace std::chrono;
typedef uint32_t uint;
#define H2D cudaMemcpyHostToDevice
#define D2H cudaMemcpyDeviceToHost
#define OK cudaSuccess
//CPU
uint i = 0, ind = 0;
float sec = 0.0f;
#define N_MB 1
#define N N_MB * 1024 * 1024
const uint NBytes_f32 = sizeof( float ) * N;
const uint nArrays = 1; //single default stream of 1D array
float *h_arr[ nArrays ], *h_result[ nArrays ]; //pinned H2D && D2H transfers
float nonPinnedArr[ N ]; //non-pinned H2D && D2H transfers are about 2-3times slower via PCIe
float2 *h_f2;
float3 *h_f3;
float4 *h_f4; //float4 host ptr ("http://roxlu.com/2013/011/basic-cuda-example")
high_resolution_clock::time_point t;
//GPU
float *d_arr[ nArrays ];
__device__ float2 d_arr2[ N / 2 ];
__device__ float3 d_arr3[ N / 3 ];
__device__ float4 d_arr4[ N / 4 ];
float2 *d_f2;
float3 *d_f3;
float4 *d_f4; //float4 device ptr
const uint nThreads = 512, nBlocks = ( N / nThreads ) + 1;
int freeGPUMem( void )
{
for ( i= 0; i < nArrays; i++ )
{
//HOST
cudaFreeHost( h_arr[ i ] );
cudaFreeHost( h_result[ i ] );
cudaFreeHost( nonPinnedArr );
cudaFree( h_f2 );
cudaFree( h_f3 );
cudaFree( h_f4 );
//DEVICE
cudaFree( d_arr[ i ] );
cudaFree( d_arr2 );
cudaFree( d_arr3 );
cudaFree( d_arr4 );
cudaFree( d_f2 );
cudaFree( d_f3 );
cudaFree( d_f4 );
};
cudaDeviceReset();
return 0;
};
void initGPUMem( void )
{
for ( i= 0; i < nArrays; i++ )
{
if ( cudaMallocHost( ( void** ) &h_arr[ i ], NBytes_f32 ) != OK ) { printf( "cudaMallocHost err!\n" ); return; };
if ( cudaMallocHost( ( void** ) &h_result[ i ], NBytes_f32 ) != OK ) { printf( "cudaMallocHost err!\n" ); return; };
if ( cudaMalloc( ( void** ) &d_arr, NBytes_f32 ) != OK ) { printf( "cudaMalloc err!\n" ); return; };
for ( ind = 0; ind < N; ind++ )
{
h_arr[ i ][ ind ] = float( ind );
nonPinnedArr[ ind ] = float( ind );
};
auto t1 = chrono::high_resolution_clock::now();
for ( ind = 0; ind < N; ind++ )
h_arr[ i ][ ind ] += h_arr[ i ][ ind ];
auto t2 = chrono::high_resolution_clock::now();
sec = duration_cast< duration< float > >( t2 - t1 ).count() ; //[ms]->[s]
cout << "CPU pinned accesses took <chrono> : "
<< sec << " [s]; "
<< float( N_MB ) / sec << "[MBps]\n";
auto t3 = chrono::high_resolution_clock::now();
for ( ind = 0; ind < N; ind++ )
nonPinnedArr[ ind ] += nonPinnedArr[ ind ];
auto t4 = chrono::high_resolution_clock::now();
sec = duration_cast< duration< float > >( t4 - t3 ).count() ;
cout << "CPU non-pinned accesses took <chrono> : "
<< sec << " [s]; "
<< float( N_MB ) / sec << "[MBps]\n";
//============================================================================
for ( ind = 0; ind < N; ind++ )
{
h_arr[ i ][ ind ] = float( ind );
nonPinnedArr[ ind ] = float( ind );
};
cudaEvent_t start1, stop1;
cudaEventCreate( &start1 );
cudaEventCreate( &stop1 );
cudaEventRecord( start1 );
for ( ind = 0; ind < N; ind++ )
h_arr[ i ][ ind ] += h_arr[ i ][ ind ];
cudaEventRecord( stop1 );
cudaEventSynchronize( stop1 );
float milliseconds = 0.0f;
cudaEventElapsedTime( &milliseconds, start1, stop1 );
sec = milliseconds / 1000.0f;
cout << "CPU pinned accesses took <cudaEvent> : "
<< sec << " [s]; "
<< float( N_MB ) / sec << "[MBps]\n";
cudaEvent_t start2, stop2;
cudaEventCreate( &start2 );
cudaEventCreate( &stop2 );
cudaEventRecord( start2 );
for ( ind = 0; ind < N; ind++ )
nonPinnedArr[ ind ] += nonPinnedArr[ ind ];
cudaEventRecord( stop2 );
cudaEventSynchronize( stop2 );
milliseconds = 0.0f;
cudaEventElapsedTime( &milliseconds, start2, stop2 );
sec = milliseconds / 1000.0f;
cout << "CPU non-pinned accesses took <cudaEvent> : "
<< sec << " [s]; "
<< float( N_MB ) / sec << "[MBps]\n";
//============================================================================
h_f2 = ( float2* )malloc( NBytes_f32 );
for ( ind = 0; ind < N; ind++ )
{
if ( ( ind % 2 ) == 0 )
h_f2[ ind / 2 ].x = h_arr[ i ][ ind ];
else if ( ( ind % 2 ) == 1 )
h_f2[ ind / 2 ].y = h_arr[ i ][ ind ];
};
d_f2 = h_f2;
if ( cudaMalloc( &d_f2, NBytes_f32 ) != OK ) { printf( "cudaMalloc err!" ); return; };
if ( cudaMemcpy( d_f2, h_f2, NBytes_f32, H2D ) != OK ) { printf( "cudaMemcpy err!" ); return; };
h_f3 = ( float3* )malloc( NBytes_f32 );
for ( ind = 0; ind < N; ind++ )
{
if ( ( ind % 3 ) == 0 )
h_f3[ ind / 3 ].x = h_arr[ i ][ ind ];
else if ( ( ind % 3 ) == 1 )
h_f3[ ind / 3 ].y = h_arr[ i ][ ind ];
else if ( ( ind % 3 ) == 2 )
h_f3[ ind / 3 ].z = h_arr[ i ][ ind ];
};
d_f3 = h_f3;
if ( cudaMalloc( &d_f3, NBytes_f32 ) != OK ) { printf( "cudaMalloc err!" ); return; };
if ( cudaMemcpy( d_f3, h_f3, NBytes_f32, H2D ) != OK ) { printf( "cudaMemcpy err!" ); return; };
h_f4 = ( float4* )malloc( NBytes_f32 );
for ( ind = 0; ind < N; ind++ )
{
if ( ( ind % 4 ) == 0 )
h_f4[ ind / 4 ].x = h_arr[ i ][ ind ];
else if ( ( ind % 4 ) == 1 )
h_f4[ ind / 4 ].y = h_arr[ i ][ ind ];
else if ( ( ind % 4 ) == 2 )
h_f4[ ind / 4 ].z = h_arr[ i ][ ind ];
else if ( ( ind % 4 ) == 3 )
h_f4[ ind / 4 ].w = h_arr[ i ][ ind ];
};
d_f4 = h_f4;
if ( cudaMalloc( &d_f4, NBytes_f32 ) != OK ) { printf( "cudaMalloc err!" ); return; };
if ( cudaMemcpy( d_f4, h_f4, NBytes_f32, H2D ) != OK ) { printf( "cudaMemcpy err!" ); return; };
for ( ind = 0; ind < N; ind++ )
h_arr[ i ][ ind ] = float( ind );
for ( ind = 0; ind < 3; ind++ )
cout << "h_arr[" << ind << "]: " << h_arr[ 0 ][ ind ] << endl;
cudaMemcpy( d_arr[ i ], h_arr[ i ], NBytes_f32, H2D );
};
};
__global__ void nop( void )
{
uint a = 0;
for ( size_t i = 0; i < N; i++ );
a++;
};
__global__ void singleThreadAccess( float *d_in )
{
uint tdx = threadIdx.x + blockIdx.x * blockDim.x;
if ( tdx < N )
{
for ( uint l = 0; l < N; l++ )
d_in[ l ] += d_in[ l ];
};
};
__global__ void medianThreadAccess( float *d_in )
{
uint tdx = threadIdx.x + blockIdx.x * blockDim.x;
if ( tdx < N )
{
d_in[ tdx ] += d_in[ tdx ];
};
};
__global__ void makeFloat2( float *d_in )
{
uint tdx = threadIdx.x + blockIdx.x * blockDim.x;
if ( tdx < N )
{
if ( !( tdx % 2 ) )
d_arr2[ tdx / 2 ].x = d_in[ tdx ];
else
d_arr2[ tdx / 2 ].y = d_in[ tdx ];
};
};
__global__ void float2_Access( void )
{
// for ( uint i = 0; i < 3; i++ )
// printf( "d_arr2[%i].x: %f\nd_arr2[%i].y: %f\n", i, d_arr2[ i ].x, i, d_arr2[ i ].y );
uint tdx = threadIdx.x + blockIdx.x * blockDim.x;
if ( tdx < ( N / 2 ) )
{
d_arr2[ tdx ].x += d_arr2[ tdx ].x;
d_arr2[ tdx ].y += d_arr2[ tdx ].y;
};
};
__global__ void makeFloat3( float *d_in )
{
uint tdx = threadIdx.x + blockIdx.x * blockDim.x;
if ( tdx < N )
{
if ( ( tdx % 3 ) == 0 )
d_arr3[ tdx / 3 ].x = d_in[ tdx ];
else if ( ( tdx % 3 ) == 1 )
d_arr3[ tdx / 3 ].y = d_in[ tdx ];
else if ( ( tdx % 3 ) == 2 )
d_arr3[ tdx / 3 ].z = d_in[ tdx ];
};
};
__global__ void float3_Access( void )
{
// for ( uint i = 0; i < 2; i++ )
// printf( "d_arr3[%i].x: %f\nd_arr3[%i].y: %f\nd_arr3[%i].z: %f\n", i, d_arr3[ i ].x, i, d_arr3[ i ].y, i, d_arr3[ i ].z );
uint tdx = threadIdx.x + blockIdx.x * blockDim.x;
if ( tdx < ( N / 3 ) )
{
d_arr3[ tdx ].x += d_arr3[ tdx ].x;
d_arr3[ tdx ].y += d_arr3[ tdx ].y;
d_arr3[ tdx ].z += d_arr3[ tdx ].z;
};
};
__global__ void makeFloat4( float *d_in )
{
uint tdx = threadIdx.x + blockIdx.x * blockDim.x;
if ( tdx < N )
{
if ( ( tdx % 4 ) == 0 )
d_arr4[ tdx / 4 ].x = d_in[ tdx ];
else if ( ( tdx % 4 ) == 1 )
d_arr4[ tdx / 4 ].y = d_in[ tdx ];
else if ( ( tdx % 4 ) == 2 )
d_arr4[ tdx / 4 ].z = d_in[ tdx ];
else if ( ( tdx % 4 ) == 3 )
d_arr4[ tdx / 4 ].w = d_in[ tdx ];
};
};
__global__ void float4_Access( void )
{
// for ( uint i = 0; i < 2; i++ )
// printf( "d_inf4[%i].x: %f\nd_inf4[%i].y: %f\nd_inf4[%i].z: %f\nd_inf4[%i].w: %f\n", i, d_inf4[ i ].x, i, d_inf4[ i ].y, i, d_inf4[ i ].z, i, d_inf4[ i ].w );
uint tdx = threadIdx.x + blockIdx.x * blockDim.x;
if ( tdx < ( N / 4 ) )
{
d_arr4[ tdx ].x += d_arr4[ tdx ].x;
d_arr4[ tdx ].y += d_arr4[ tdx ].y;
d_arr4[ tdx ].z += d_arr4[ tdx ].z;
d_arr4[ tdx ].w += d_arr4[ tdx ].w;
};
};
__global__ void arrFloat2_Access( float2 *d_in2 )
{
uint tdx = threadIdx.x + blockIdx.x * blockDim.x;
if ( tdx < ( N / 2 ) )
{
d_in2[ tdx ].x += d_in2[ tdx ].x;
d_in2[ tdx ].y += d_in2[ tdx ].y;
};
};
__global__ void arrFloat3_Access( float3 *d_in3 )
{
uint tdx = threadIdx.x + blockIdx.x * blockDim.x;
if ( tdx < ( N / 3 ) )
{
d_in3[ tdx ].x += d_in3[ tdx ].x;
d_in3[ tdx ].y += d_in3[ tdx ].y;
d_in3[ tdx ].z += d_in3[ tdx ].z;
};
};
__global__ void arrFloat4_Access( float4 *d_inf4 )
{
uint tdx = threadIdx.x + blockIdx.x * blockDim.x;
if ( tdx < ( N / 4 ) )
{
d_inf4[ tdx ].x += d_inf4[ tdx ].x;
d_inf4[ tdx ].y += d_inf4[ tdx ].y;
d_inf4[ tdx ].z += d_inf4[ tdx ].z;
d_inf4[ tdx ].w += d_inf4[ tdx ].w;
};
};
int main( void )
{ t = chrono::high_resolution_clock::now(); i=0;while( i < ( 0x1u << 18 ) ) { h_arr[ 0 ] = h_arr[ 0 ]; i+=1; }; //CPU warm up
int gpuCount = 0;
cudaGetDeviceCount( &gpuCount );
for ( size_t gpuNo = 0; gpuNo < gpuCount; gpuNo++ )
{
cudaSetDevice( gpuNo );
cudaDeviceProp gpuProperties; cudaGetDeviceProperties( &gpuProperties, gpuNo ); cout << endl << gpuProperties.name << ": " << endl;
initGPUMem();
for( i = 0; i < nArrays; i++ )
{
auto f1 = chrono::high_resolution_clock::now();
makeFloat2<<< nBlocks, nThreads >>>( d_arr[ i ] );
float2_Access<<< ( N / 2 ) / nThreads, nThreads >>>();
cudaDeviceSynchronize();
auto f2 = chrono::high_resolution_clock::now();
sec = duration_cast< duration< float > >( f2 - f1 ).count() ;
cout << "GPU split float2_Access took <chrono> : "
<< sec << " [s]; "
<< float( N_MB ) / sec << "[MBps]\n";
auto f3 = chrono::high_resolution_clock::now();
makeFloat3<<< nBlocks, nThreads >>>( d_arr[ i ] );
float3_Access<<< ( N / 3 ) / nThreads, nThreads >>>();
cudaDeviceSynchronize();
auto f4 = chrono::high_resolution_clock::now();
sec = duration_cast< duration< float > >( f4 - f3 ).count() ;
cout << "GPU split float3_Access took <chrono> : "
<< sec << " [s]; "
<< float( N_MB ) / sec << "[MBps]\n";
auto f5 = chrono::high_resolution_clock::now();
makeFloat4<<< nBlocks, nThreads >>>( d_arr[ i ] );
float4_Access<<< ( N / 4 ) / nThreads, nThreads >>>();
cudaDeviceSynchronize();
auto f6 = chrono::high_resolution_clock::now();
sec = duration_cast< duration< float > >( f6 - f5 ).count() ;
cout << "GPU split float4_Access took <chrono> : "
<< sec << " [s]; "
<< float( N_MB ) / sec << "[MBps]\n";
auto f7 = chrono::high_resolution_clock::now();
arrFloat2_Access<<< ( N / 2 ) / nThreads, nThreads >>>( d_f2 );
cudaDeviceSynchronize();
auto f8 = chrono::high_resolution_clock::now();
sec = duration_cast< duration< float > >( f8 - f7 ).count() ;
cout << "GPU pinned arrFloat2_Access took <chrono> : "
<< sec << " [s]; "
<< float( N_MB ) / sec << "[MBps]\n";
auto f9 = chrono::high_resolution_clock::now();
arrFloat3_Access<<< ( N / 3 ) / nThreads, nThreads >>>( d_f3 );
cudaDeviceSynchronize();
auto f10 = chrono::high_resolution_clock::now();
sec = duration_cast< duration< float > >( f10 - f9 ).count() ;
cout << "GPU pinned arrFloat3_Access took <chrono> : "
<< sec << " [s]; "
<< float( N_MB ) / sec << "[MBps]\n";
auto f11 = chrono::high_resolution_clock::now();
arrFloat4_Access<<< ( N / 4 ) / nThreads, nThreads >>>( d_f4 );
cudaDeviceSynchronize();
auto f12 = chrono::high_resolution_clock::now();
sec = duration_cast< duration< float > >( f12 - f11 ).count() ;
cout << "GPU pinned arrFloat4_Access took <chrono> : "
<< sec << " [s]; "
<< float( N_MB ) / sec << "[MBps]\n";
auto t1 = chrono::high_resolution_clock::now();
singleThreadAccess<<< 1, 1 >>>( d_arr[ i ] );
cudaDeviceSynchronize();
auto t2 = chrono::high_resolution_clock::now();
sec = duration_cast< duration< float > >( t2 - t1 ).count() ;
cout << "single thread GPU accesses took <chrono> : "
<< sec << " [s]; "
<< float( N_MB ) / sec << "[MBps]\n";
auto t3 = chrono::high_resolution_clock::now();
medianThreadAccess<<< nBlocks, nThreads >>>( d_arr[ i ] );
cudaDeviceSynchronize();
auto t4 = chrono::high_resolution_clock::now();
sec = duration_cast< duration< float > >( t4 - t3 ).count() ;
cout << "nBlocks[" << nBlocks << "]; nThreads[" << nThreads << "]; GPU accesses took <chrono> : "
<< sec << " [s]; "
<< float( N_MB ) / sec << "[MBps]\n";
//============================================================================
cudaEvent_t start1, stop1;
cudaEventCreate( &start1 );
cudaEventCreate( &stop1 );
cudaEventRecord( start1 );
makeFloat2<<< nBlocks, nThreads >>>( d_arr[ i ] );
float2_Access<<< ( N / 2 ) / nThreads, nThreads >>>();
cudaDeviceSynchronize();
cudaEventRecord( stop1 );
cudaEventSynchronize( stop1 );
float milliseconds = 0.0f;
cudaEventElapsedTime( &milliseconds, start1, stop1 );
sec = milliseconds / 1000.0f;
cout << "nBlocks[" << nBlocks << "]; nThreads[" << nThreads << "]; GPU split float2_Access took <cudaEvent> : "
<< sec << " [s]; "
<< float( N_MB ) / sec << "[MBps]\n";
cudaEvent_t start2, stop2;
cudaEventCreate( &start2 );
cudaEventCreate( &stop2 );
cudaEventRecord( start2 );
makeFloat3<<< nBlocks, nThreads >>>( d_arr[ i ] );
float3_Access<<< ( N / 3 ) / nThreads, nThreads >>>();
cudaDeviceSynchronize();
cudaEventRecord( stop2 );
cudaEventSynchronize( stop2 );
milliseconds = 0.0f;
cudaEventElapsedTime( &milliseconds, start2, stop2 );
sec = milliseconds / 1000.0f;
cout << "nBlocks[" << nBlocks << "]; nThreads[" << nThreads << "]; GPU split float3_Access took <cudaEvent> : "
<< sec << " [s]; "
<< float( N_MB ) / sec << "[MBps]\n";
cudaEvent_t start3, stop3;
cudaEventCreate( &start3 );
cudaEventCreate( &stop3 );
cudaEventRecord( start3 );
makeFloat4<<< nBlocks, nThreads >>>( d_arr[ i ] );
float4_Access<<< ( N / 4 ) / nThreads, nThreads >>>();
cudaDeviceSynchronize();
cudaEventRecord( stop3 );
cudaEventSynchronize( stop3 );
milliseconds = 0.0f;
cudaEventElapsedTime( &milliseconds, start3, stop3 );
sec = milliseconds / 1000.0f;
cout << "nBlocks[" << nBlocks << "]; nThreads[" << nThreads << "]; GPU split float4_Access took <cudaEvent> : "
<< sec << " [s]; "
<< float( N_MB ) / sec << "[MBps]\n";
cudaEvent_t start4, stop4;
cudaEventCreate( &start4 );
cudaEventCreate( &stop4 );
cudaEventRecord( start4 );
arrFloat2_Access<<< ( N / 2 ) / nThreads, nThreads >>>( d_f2 );
cudaDeviceSynchronize();
cudaEventRecord( stop4 );
cudaEventSynchronize( stop4 );
milliseconds = 0.0f;
cudaEventElapsedTime( &milliseconds, start4, stop4 );
sec = milliseconds / 1000.0f;
cout << "nBlocks[" << ( N / 2 ) / nThreads << "]; nThreads[" << nThreads << "]; GPU pinned arrFloat2_Access took <cudaEvent> : "
<< sec << " [s]; "
<< float( N_MB ) / sec << "[MBps]\n";
cudaEvent_t start5, stop5;
cudaEventCreate( &start5 );
cudaEventCreate( &stop5 );
cudaEventRecord( start5 );
arrFloat3_Access<<< ( N / 3 ) / nThreads, nThreads >>>( d_f3 );
cudaDeviceSynchronize();
cudaEventRecord( stop5 );
cudaEventSynchronize( stop5 );
milliseconds = 0.0f;
cudaEventElapsedTime( &milliseconds, start5, stop5 );
sec = milliseconds / 1000.0f;
cout << "nBlocks[" << ( N / 3 ) / nThreads << "]; nThreads[" << nThreads << "]; GPU pinned arrFloat3_Access took <cudaEvent> : "
<< sec << " [s]; "
<< float( N_MB ) / sec << "[MBps]\n";
cudaEvent_t start6, stop6;
cudaEventCreate( &start6 );
cudaEventCreate( &stop6 );
cudaEventRecord( start6 );
arrFloat4_Access<<< ( N / 4 ) / nThreads, nThreads >>>(d_f4);
cudaDeviceSynchronize();
cudaEventRecord( stop6 );
cudaEventSynchronize( stop6 );
milliseconds = 0.0f;
cudaEventElapsedTime( &milliseconds, start6, stop6 );
sec = milliseconds / 1000.0f;
cout << "nBlocks[" << ( N / 4 ) / nThreads << "]; nThreads[" << nThreads << "]; GPU pinned arrFloat4_Access took <cudaEvent> : "
<< sec << " [s]; "
<< float( N_MB ) / sec << "[MBps]\n";
cudaEvent_t start7, stop7;
cudaEventCreate( &start7 );
cudaEventCreate( &stop7 );
cudaEventRecord( start7 );
singleThreadAccess<<< 1, 1 >>>( d_arr[ i ] );
cudaDeviceSynchronize();
cudaEventRecord( stop7 );
cudaEventSynchronize( stop7 );
milliseconds = 0.0f;
cudaEventElapsedTime( &milliseconds, start7, stop7 );
sec = milliseconds / 1000.0f;
cout << "single thread GPU accesses took <cudaEvent> : "
<< sec << " [s]; "
<< float( N_MB ) / sec << "[MBps]\n";
cudaEvent_t start8, stop8;
cudaEventCreate( &start8 );
cudaEventCreate( &stop8 );
cudaEventRecord( start8 );
medianThreadAccess<<< nBlocks, nThreads >>>( d_arr[ i ] );
cudaDeviceSynchronize();
cudaEventRecord( stop8 );
cudaEventSynchronize( stop8 );
milliseconds = 0.0f;
cudaEventElapsedTime( &milliseconds, start8, stop8 );
sec = milliseconds / 1000.0f;
cout << "nBlocks[" << nBlocks << "]; nThreads[" << nThreads << "]; GPU accesses took <cudaEvent> : "
<< sec << " [s]; "
<< float( N_MB ) / sec << "[MBps]\n";
nop<<< nBlocks, nThreads >>>();
//============================================================================
cudaMemcpy( h_result[ i ], d_arr[ i ], NBytes_f32, D2H );//cudaMemcpyFromSymbol( h_result[ i ], d_arr2, NBytes_f32, H2D );
};
for ( ind = 0; ind < 3; ind++ )
cout << " h_result[" << ind << "]: " << h_result[ 0 ][ ind ] << endl;
freeGPUMem();
}; //end of gpuCount
return 0;
}
|
11,459 | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#define CUDA_SAFE_CALL( call) { \
cudaError err = call; \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} }
#define FRACTION_CEILING(numerator, denominator) ((numerator+denominator-1)/denominator)
/* Filter on GPU constant memory */
__constant__ int filterGPU[9];
/* Indexes a 2D array which is contiguously allocated in memory as a 1D array at position: (row, column) */
__device__ inline unsigned char* indexAt(unsigned char *array, int width, int bpp, int row, int col) {
return &array[(row * width + col) * bpp];
}
__device__ inline int* indexAt2(int *array, int width, int bpp, int row, int col) {
return &array[(row * width + col) * bpp];
}
/* Convolution on GPU (1 thread per pixel) */
__global__ void convolute(unsigned char *image, unsigned char *buffer, int sum, int height, int width, int bpp) {
// Get thread's coordinates
int X = threadIdx.x + blockIdx.x * blockDim.x;
int Y = threadIdx.y + blockIdx.y * blockDim.y;
// If it is inner pixel, convolute
if ((X > 0) && (Y > 0) && (X < height - 1) && (Y < width - 1)) {
for (int offset = 0; offset < bpp; offset++) {
int newValue = (*(indexAt(image, width, bpp, X - 1 , Y - 1 ) + offset)) * (*indexAt2(filterGPU, 3, 1, 0, 0))
+ (*(indexAt(image, width, bpp, X - 1 , Y ) + offset)) * (*indexAt2(filterGPU, 3, 1, 0, 1))
+ (*(indexAt(image, width, bpp, X - 1 , Y + 1 ) + offset)) * (*indexAt2(filterGPU, 3, 1, 0, 2))
+ (*(indexAt(image, width, bpp, X , Y - 1 ) + offset)) * (*indexAt2(filterGPU, 3, 1, 1, 0))
+ (*(indexAt(image, width, bpp, X , Y ) + offset)) * (*indexAt2(filterGPU, 3, 1, 1, 1))
+ (*(indexAt(image, width, bpp, X , Y + 1 ) + offset)) * (*indexAt2(filterGPU, 3, 1, 1, 2))
+ (*(indexAt(image, width, bpp, X + 1 , Y - 1 ) + offset)) * (*indexAt2(filterGPU, 3, 1, 2, 0))
+ (*(indexAt(image, width, bpp, X + 1 , Y ) + offset)) * (*indexAt2(filterGPU, 3, 1, 2, 1))
+ (*(indexAt(image, width, bpp, X + 1 , Y + 1 ) + offset)) * (*indexAt2(filterGPU, 3, 1, 2, 2));
newValue /= sum;
if (newValue > UCHAR_MAX) newValue = UCHAR_MAX;
else if (newValue < 0) newValue = 0;
*indexAt(buffer, width, bpp, X, Y) = newValue;
}
}
// Otherwise (if valid coordinates), just copy pixel
else if ((X >= 0) && (X < height) && (Y >= 0) && (Y < width))
*indexAt(buffer, width, bpp, X, Y) = *indexAt(image, width, bpp, X, Y);
}
extern "C" void initiate(int height, int width, int blockSize, int matrixSize, unsigned char *input, unsigned char *output, int loops, int sum, int bpp, int *filterCPU){
/* Declarations */
unsigned char *image, *buffer;
cudaEvent_t start, stop;
CUDA_SAFE_CALL(cudaEventCreate(&start));
CUDA_SAFE_CALL(cudaEventCreate(&stop));
/* Allocate GPU matrices (image, buffer) */
CUDA_SAFE_CALL(cudaMalloc((void**) &image, matrixSize));
CUDA_SAFE_CALL(cudaMalloc((void**) &buffer, matrixSize));
/* Calculate grid size */
int gridX = FRACTION_CEILING(height, blockSize);
int gridY = FRACTION_CEILING(width, blockSize);
dim3 block(blockSize, blockSize);
dim3 grid(gridX, gridY);
/* Copy filter from CPU to GPU (constant memory) */
CUDA_SAFE_CALL(cudaMemcpyToSymbol(filterGPU, filterCPU, 9 * sizeof(int)));
/* Initialize image */
CUDA_SAFE_CALL(cudaMemcpy(image, input, matrixSize, cudaMemcpyHostToDevice));
/* Start time */
CUDA_SAFE_CALL(cudaEventRecord(start, 0));
/* Main Loop (No convergence check) */
for (int loop = 0; loop < loops; loop++) {
// Convolution on GPU
convolute<<<grid, block>>>(image, buffer, sum, height, width, bpp);
CUDA_SAFE_CALL(cudaGetLastError());
// Synchronize threads
CUDA_SAFE_CALL(cudaThreadSynchronize());
// Swap buffers
unsigned char *temp = image;
image = buffer;
buffer = temp;
}
/* Stop time */
CUDA_SAFE_CALL(cudaEventRecord(stop, 0));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
float elapsedTime;
CUDA_SAFE_CALL(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("%3.1f ms\n", elapsedTime);
/* Copy GPU's image to CPU's output */
CUDA_SAFE_CALL(cudaMemcpy(output, image, matrixSize, cudaMemcpyDeviceToHost));
/* Free resources */
CUDA_SAFE_CALL(cudaFree(image));
CUDA_SAFE_CALL(cudaFree(buffer));
CUDA_SAFE_CALL(cudaEventDestroy(start));
CUDA_SAFE_CALL(cudaEventDestroy(stop));
}
|
11,460 | #include "includes.h"
// includes, project
#define PI 3.1415926536f
int MaxThreadsPerBlock;
int MaxThreadsX;
int MaxThreadsY;
// Conversion d'un vecteur réel en vecteur complexe
// Conversion d'un vecteur complexe en vecteur réel
// Multiplie point par point un vecteur complex par un vecteur réel
// Applique y = at*x +bt à chaque point d'un vecteur réel
// Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de réel
// Alpha n'est pas modifié
// Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de bytes
// Alpha n'est pas modifié
// Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de réel
// Alpha autorise l'affichage au dessus d'un certain seuil
// Processus auto-régressif X2 = a*X1 + b*X0 + N0;
// Expansion
// On applique une interpolation bi-linéaire à la source
// Transformation Cartesian To Polar
// On applique une interpolation bi-linéaire à la source
__global__ void FillTexTh(void *surface, int width, int height, size_t pitch, double* src, int Mask, int th, int pixValue)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned char *pixel1;
if (x >= width || y >= height) return;
double w = src[x + width*y];
if (w<0) {w=0;}
if (w>253) {w=253;}
pixel1 = (unsigned char *)( (char*)surface + y*pitch) + 4*x;
if (pixel1[3]>=th)
for (int i=0;i<3;i++)
{ if (Mask & (1<<i)) pixel1[i] = w; }
else
for (int i=0;i<3;i++)
{ if (Mask & (1<<i)) pixel1[i] = pixValue >> (i*8); }
} |
11,461 | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<string.h>
#include<cuda.h>
#define INPUT_SIZE 100000000
#define PRIME_RANGE 100000000
#define BLOCK_SIZE 32
typedef unsigned long long int uint64_c;
int generate_seed_primes(int*, int*, uint64_c);
void copy_seed_primes(uint64_c *,int *,int);
void print_primelist(uint64_c *, uint64_c);
void print_inputlist(uint64_c *);
void initializing_inputlist(uint64_c *);
void memsetting_range_of_input(uint64_c *,uint64_c);
void calculatePrime(uint64_c*, uint64_c*, uint64_c, uint64_c, uint64_c);
uint64_c appending_prime(uint64_c*, uint64_c*, uint64_c, uint64_c, uint64_c);
//KERNAL CODE GOES HERE!!
__global__ void prime_generator(uint64_c* device_input_list, uint64_c* device_prime_list, uint64_c* device_start_of_range,uint64_c* device_end_of_range, uint64_c* device_number_of_primes)
{
// printf("------- INSIDE KERNEL ---------%llu-------%llu------\n",device_start_of_range[0],device_end_of_range[0]);
int p= blockIdx.x * blockDim.x + threadIdx.x;
int primeno= device_prime_list[p];
for(uint64_c i=device_start_of_range[0];i<device_end_of_range[0];i++)
{
if(i % primeno==0)
{
device_input_list[i]=1;
}
}
}
//KERNAL CODE ENDS HERE!!!
int main()
{
cudaSetDevice(1);
// This code is just to generate the seed prime numbers
int input_size=100;
int *input;
uint64_c n= 10 ;// seed prime list.
int *seed_primelist;
input=(int *)malloc(input_size*sizeof(int));
seed_primelist=(int *)malloc(input_size*sizeof(int));
int num_of_seed = generate_seed_primes(input,seed_primelist,n);
// seed prime list code ends here.
//Starting code for gpu.
//declaring host variables.
// declaring the ranges of the input size and the primes to be generated.
uint64_c total_input_size = INPUT_SIZE;
printf("TOTAL INPUT SIZE IS: %llu\n",total_input_size);
uint64_c prime_range = PRIME_RANGE;
printf("THE PRIMES WILL BE GENERATED FROM 0 - %llu\n",prime_range);
printf("-------------------------------------------------------------------------\n\n\n");
// creating the host array of input-list and primelist.
uint64_c *input_list;
uint64_c *prime_list;
uint64_c number_of_primes= num_of_seed; //initializing the number of primes to the number of seed primes.
input_list=(uint64_c *)malloc(total_input_size * sizeof(uint64_c));
//setting all the values of the input list to -1.
initializing_inputlist(input_list);
prime_list=(uint64_c *)malloc(prime_range * sizeof(uint64_c));
//copying the seed primes in prime_list.
copy_seed_primes(prime_list,seed_primelist,num_of_seed);
//creating the device array of input list and primelist
uint64_c *device_input_list;
uint64_c *device_prime_list;
uint64_c *device_previous_range;
uint64_c *device_max_prime_range;
uint64_c *device_number_of_primes;
//allocating memory in gpu.
if(cudaMalloc((void** )&device_input_list,total_input_size * sizeof(uint64_c))!=cudaSuccess)
{
printf("ERROR: CANNOT ALLOCATE MEMORY IN GPU FOR INPUT LIST ------>> :) \n");
exit(0);
}
if(cudaMalloc((void** )&device_prime_list,prime_range * sizeof(uint64_c))!=cudaSuccess)
{
printf("ERROR: CANNOT ALLOCATE MEMORY IN GPU FOR PRIME LIST\n");
cudaFree(device_input_list);
exit(0);
}
if(cudaMalloc((void** )&device_previous_range,sizeof(uint64_c))!=cudaSuccess)
{
printf("ERROR: CANNOT ALLOCATE MEMORY IN GPU FOR PREVIOUS RANGE\n");
cudaFree(device_input_list);
cudaFree(device_prime_list);
exit(0);
}
if(cudaMalloc((void** )&device_max_prime_range,sizeof(uint64_c))!=cudaSuccess)
{
printf("ERROR: CANNOT ALLOCATE MEMORY IN GPU FOR MAX PRIME RANGE\n");
cudaFree(device_input_list);
cudaFree(device_prime_list);
cudaFree(device_previous_range);
exit(0);
}
if(cudaMalloc((void** )&device_number_of_primes,sizeof(uint64_c))!=cudaSuccess)
{
printf("ERROR: CANNOT ALLOCATE MEMORY IN GPU FOR NUMBER OF PRIMES\n");
cudaFree(device_input_list);
cudaFree(device_prime_list);
cudaFree(device_previous_range);
cudaFree(device_max_prime_range);
exit(0);
}
//allocating memory in gpu completed.
while(n<PRIME_RANGE){
//copying input list and prime list from host to device.
if(cudaMemcpy(device_prime_list,prime_list,prime_range * sizeof(uint64_c),cudaMemcpyHostToDevice)!=cudaSuccess)
{
printf("ERROR: CANNOT COPY PRIME LIST FROM HOST TO DEVICE\n");
cudaFree(device_input_list);
cudaFree(device_prime_list);
cudaFree(device_previous_range);
cudaFree(device_max_prime_range);
cudaFree(device_number_of_primes);
exit(0);
}
//copying input list and prime list from host to device completed.
//copying number of primes generated.
if(cudaMemcpy(device_number_of_primes,&number_of_primes,sizeof(uint64_c),cudaMemcpyHostToDevice)!=cudaSuccess)
{
printf("ERROR: CANNOT COPY NUMBER OF PRIMES FROM HOST TO DEVICE\n");
cudaFree(device_input_list);
cudaFree(device_prime_list);
cudaFree(device_previous_range);
cudaFree(device_max_prime_range);
cudaFree(device_number_of_primes);
exit(0);
}
uint64_c previous_range=n;
//copying previous range from host to device.
if(cudaMemcpy(device_previous_range,&previous_range,sizeof(uint64_c),cudaMemcpyHostToDevice)!=cudaSuccess)
{
printf("ERROR: CANNOT COPY PREVIOUS RANGE FROM HOST TO DEVICE\n");
cudaFree(device_input_list);
cudaFree(device_prime_list);
cudaFree(device_previous_range);
cudaFree(device_max_prime_range);
cudaFree(device_number_of_primes);
exit(0);
}
printf("THE NUMBER OF PRIMES GENERATED: %llu \n",number_of_primes);
//to determine the maximum range a the calculated prime range can determine.
uint64_c max_prime_range = pow(n,2);
printf("MAXIMUM RANGE PRIMES BETWEEN 0 - %llu CAN DETERMINE IS %llu \n", n,max_prime_range);
if(max_prime_range<=PRIME_RANGE){
if(cudaMemcpy(device_max_prime_range,&max_prime_range,sizeof(uint64_c),cudaMemcpyHostToDevice)!=cudaSuccess)
{
printf("ERROR: CANNOT COPY MAX PRIME RANGE FROM HOST TO DEVICE\n");
cudaFree(device_input_list);
cudaFree(device_prime_list);
cudaFree(device_previous_range);
cudaFree(device_max_prime_range);
cudaFree(device_number_of_primes);
exit(0);
}
printf("CALCULATE PRIME NUMBERS BETWEEN %llu - %llu\n", previous_range,max_prime_range);
memsetting_range_of_input(input_list,max_prime_range);
if(cudaMemcpy(device_input_list,input_list,total_input_size * sizeof(uint64_c),cudaMemcpyHostToDevice)!=cudaSuccess)
{
printf("ERROR: CANNOT COPY INPUT LIST FROM HOST TO DEVICE\n");
cudaFree(device_input_list);
cudaFree(device_prime_list);
cudaFree(device_previous_range);
cudaFree(device_max_prime_range);
cudaFree(device_number_of_primes);
exit(0);
}
// calculatePrime(input_list, prime_list, previous_range, max_prime_range, number_of_primes);
prime_generator<<<BLOCK_SIZE,256>>>(device_input_list, device_prime_list, device_previous_range, device_max_prime_range, device_number_of_primes);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
//copying input list from device to host
if(cudaMemcpy(input_list,device_input_list,total_input_size * sizeof(uint64_c),cudaMemcpyDeviceToHost)!=cudaSuccess)
{
printf("ERROR: CANNOT COPY INPUT LIST FROM DEVICE TO HOST\n");
cudaFree(device_input_list);
cudaFree(device_prime_list);
cudaFree(device_previous_range);
cudaFree(device_max_prime_range);
cudaFree(device_number_of_primes);
exit(0);
}
number_of_primes = appending_prime(input_list, prime_list, previous_range, max_prime_range, number_of_primes);
}
else
{
printf("IN ELSE PART.\n");
if(cudaMemcpy(device_max_prime_range,&prime_range,sizeof(uint64_c),cudaMemcpyHostToDevice)!=cudaSuccess)
{
printf("ERROR: CANNOT COPY MAX PRIME RANGE FROM HOST TO DEVICE\n");
cudaFree(device_input_list);
cudaFree(device_prime_list);
cudaFree(device_previous_range);
cudaFree(device_max_prime_range);
cudaFree(device_number_of_primes);
exit(0);
}
printf("CALCULATE PRIME NUMBERS BETWEEN %llu - %llu\n", previous_range,prime_range);
memsetting_range_of_input(input_list,prime_range);
if(cudaMemcpy(device_input_list,input_list,total_input_size * sizeof(uint64_c),cudaMemcpyHostToDevice)!=cudaSuccess)
{
printf("ERROR: CANNOT COPY INPUT LIST FROM HOST TO DEVICE\n");
cudaFree(device_input_list);
cudaFree(device_prime_list);
cudaFree(device_previous_range);
cudaFree(device_max_prime_range);
cudaFree(device_number_of_primes);
exit(0);
}
// printf("CHECKING TIME REQUIRED\n");
// calculatePrime(input_list, prime_list, previous_range, prime_range, number_of_primes);
prime_generator<<<BLOCK_SIZE,256>>>(device_input_list, device_prime_list, device_previous_range, device_max_prime_range, device_number_of_primes);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
//copying input list from device to host
if(cudaMemcpy(input_list,device_input_list,total_input_size * sizeof(uint64_c),cudaMemcpyDeviceToHost)!=cudaSuccess)
{
printf("ERROR: CANNOT COPY INPUT LIST FROM DEVICE TO HOST\n");
cudaFree(device_input_list);
cudaFree(device_prime_list);
cudaFree(device_previous_range);
cudaFree(device_max_prime_range);
cudaFree(device_number_of_primes);
exit(0);
}
number_of_primes = appending_prime(input_list, prime_list, previous_range, prime_range, number_of_primes);
}
printf("\n\n\n");
//print_inputlist(input_list);
n=pow(n,2);
}
printf("TOTAL NUMBER OF PRIMES GENERATED: %llu \n",number_of_primes);
print_primelist(prime_list,number_of_primes);
//ending code for gpu.
return 0;
}
uint64_c appending_prime(uint64_c* input_list, uint64_c* prime_list, uint64_c start_of_range,uint64_c end_of_range, uint64_c number_of_primes)
{
for(uint64_c i=start_of_range;i<end_of_range;i++)
{
if(input_list[i]==0)
{
prime_list[number_of_primes] = i;
number_of_primes++;
}
}
return number_of_primes;
}
void calculatePrime(uint64_c* input_list, uint64_c* prime_list, uint64_c start_of_range,uint64_c end_of_range, uint64_c number_of_primes)
{
printf("--------CALCULATING PRIME NUMBERS from %llu to %llu --------\n", start_of_range,end_of_range);
// print_primelist(prime_list,number_of_primes);
for(uint64_c i=start_of_range;i<end_of_range;i++)
{
for(uint64_c j=0;j<number_of_primes;j++){
if(i % prime_list[j]==0)
{
input_list[i]=1;
}
}
}
printf("-------- END CALCULATING PRIME NUMBERS--------\n");
}
void memsetting_range_of_input(uint64_c *input_list,uint64_c size)
{
memset(input_list,0,size * sizeof(uint64_c));
}
void initializing_inputlist(uint64_c *input_list){
for(int i=0;i<=INPUT_SIZE;i++)
{
input_list[i]=2;
}
}
void print_inputlist(uint64_c *input_list)
{
for(int i=0;i<INPUT_SIZE;i++)
{
printf("%d\t--->\t%llu\n", i,input_list[i]);
}
}
void print_primelist(uint64_c *prime_list,uint64_c number_of_primes)
{
for(int i=0;i<number_of_primes;i++)
{
printf("%llu\n",prime_list[i]);
}
}
void copy_seed_primes(uint64_c *prime_list,int * seed_primelist,int num_of_seed)
{
for(int i=0;i<num_of_seed;i++)
{
prime_list[i]=seed_primelist[i];
}
}
int generate_seed_primes(int *input,int *primelist, uint64_c n)
{
for (int p=2; p*p<=n; p++)
{
if (input[p] == 0)
{
for (int i=p*2; i<=n; i += p)
input[i] = 1;
}
}
int i=0;
for (int p=2; p<=n; p++){
if (input[p]==0)
{
primelist[i]=p;
i++;
}
}
return i;
}
|
11,462 | /*-
* Copyright 2015 Grammarly, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cuda_runtime.h>
#define MAX_NUM_THREADS_PER_BLOCK 512
#define MAX_NUM_BLOCKS_PER_KERNEL 128
__global__ void sigmoid(int nelems,
const float* __restrict__ data,
float* __restrict__ sigmoidData) {
const int nthreads = blockDim.x * gridDim.x;
const int start_i = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = start_i; i < nelems; i += nthreads) {
sigmoidData[i] = 1.0f / (1.0f + expf(-data[i]));
}
}
__global__ void sigmoid(int nelems,
const float* __restrict__ data,
float* __restrict__ sigmoidData,
float* __restrict__ derivative) {
const int nthreads = blockDim.x * gridDim.x;
const int start_i = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = start_i; i < nelems; i += nthreads) {
sigmoidData[i] = 1.0f / (1.0f + expf(-data[i]));
derivative[i] = sigmoidData[i] * (1.0f - sigmoidData[i]);
}
}
__global__ void tanh(int nelems,
const float* __restrict__ data,
float* __restrict__ tanhData) {
const int nthreads = blockDim.x * gridDim.x;
const int start_i = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = start_i; i < nelems; i += nthreads) {
tanhData[i] = tanhf(data[i]);
}
}
__global__ void tanh(int nelems,
const float* __restrict__ data,
float* __restrict__ tanhData,
float* __restrict__ derivative) {
const int nthreads = blockDim.x * gridDim.x;
const int start_i = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = start_i; i < nelems; i += nthreads) {
tanhData[i] = tanhf(data[i]);
derivative[i] = 1.0f - tanhData[i] * tanhData[i];
}
}
__global__ void tanhSigmRow(int nrows,
int ncols,
const float* __restrict__ data,
float* __restrict__ tanhSigmData) {
const int nthreads = blockDim.x * gridDim.x;
const int start_i = blockIdx.x * blockDim.x + threadIdx.x;
const int nelems = nrows * ncols;
const int margin = nrows / 4;
for (int i = start_i; i < nelems; i += nthreads) {
if (i % nrows < margin) {
tanhSigmData[i] = tanhf(data[i]);
} else {
tanhSigmData[i] = 1.0f / (1.0f + expf(-data[i]));
}
}
}
__global__ void tanhSigmRow(int nrows,
int ncols,
const float* __restrict__ data,
float* __restrict__ tanhSigmData,
float* __restrict__ derivative) {
const int nthreads = blockDim.x * gridDim.x;
const int start_i = blockIdx.x * blockDim.x + threadIdx.x;
const int nelems = nrows * ncols;
const int margin = nrows / 4;
for (int i = start_i; i < nelems; i += nthreads) {
if (i % nrows < margin) {
tanhSigmData[i] = tanhf(data[i]);
derivative[i] = 1.0f - tanhSigmData[i] * tanhSigmData[i];
} else {
tanhSigmData[i] = 1.0f / (1.0f + expf(-data[i]));
derivative[i] = tanhSigmData[i] * (1.0f - tanhSigmData[i]);
}
}
}
__global__ void tanhSigmColumn(int nrows,
int ncols,
const float* __restrict__ data,
float* __restrict__ tanhSigmData) {
const int nthreads = blockDim.x * gridDim.x;
const int start_i = blockIdx.x * blockDim.x + threadIdx.x;
const int nelems = ncols * nrows;
const int margin = ncols / 4 * nrows;
int i;
for (i = start_i; i < margin; i += nthreads) {
tanhSigmData[i] = tanhf(data[i]);
}
for (; i < nelems; i += nthreads) {
tanhSigmData[i] = 1.0f / (1.0f + expf(-data[i]));
}
}
__global__ void tanhSigmColumn(int nrows,
int ncols,
const float* __restrict__ data,
float* __restrict__ tanhSigmData,
float* __restrict__ derivative) {
const int nthreads = blockDim.x * gridDim.x;
const int start_i = blockIdx.x * blockDim.x + threadIdx.x;
const int nelems = ncols * nrows;
const int margin = ncols / 4 * nrows;
int i;
for (i = start_i; i < margin; i += nthreads) {
tanhSigmData[i] = tanhf(data[i]);
derivative[i] = 1.0f - tanhSigmData[i] * tanhSigmData[i];
}
for (; i < nelems; i += nthreads) {
tanhSigmData[i] = 1.0f / (1.0f + expf(-data[i]));
derivative[i] = tanhSigmData[i] * (1.0f - tanhSigmData[i]);
}
}
__global__ void relu(int nelems,
const float* __restrict__ data,
float* __restrict__ reluData) {
const int nthreads = blockDim.x * gridDim.x;
const int start_i = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = start_i; i < nelems; i += nthreads) {
reluData[i] = fmaxf(0.0f, data[i]);
}
}
__global__ void relu(int nelems,
const float* __restrict__ data,
float* __restrict__ reluData,
float* __restrict__ derivative) {
const int nthreads = blockDim.x * gridDim.x;
const int start_i = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = start_i; i < nelems; i += nthreads) {
reluData[i] = fmaxf(0.0f, data[i]);
derivative[i] = !signbit(data[i]);
}
}
extern "C" {
cudaError_t _sigmoid(cudaStream_t stream,
int nelems,
const float* __restrict__ data,
float* __restrict__ sigmoid_data) {
int num_blocks = std::min(MAX_NUM_BLOCKS_PER_KERNEL, (nelems - 1) / MAX_NUM_THREADS_PER_BLOCK + 1);
sigmoid<<<num_blocks, MAX_NUM_THREADS_PER_BLOCK, 0, stream>>>(nelems, data, sigmoid_data);
return cudaGetLastError();
}
cudaError_t _sigmoidDer(cudaStream_t stream,
int nelems,
const float* __restrict__ data,
float* __restrict__ sigmoid_data,
float* __restrict__ derivative) {
int num_blocks = std::min(MAX_NUM_BLOCKS_PER_KERNEL, (nelems - 1) / MAX_NUM_THREADS_PER_BLOCK + 1);
sigmoid<<<num_blocks, MAX_NUM_THREADS_PER_BLOCK, 0, stream>>>(nelems, data, sigmoid_data, derivative);
return cudaGetLastError();
}
cudaError_t _tanh(cudaStream_t stream,
int nelems,
const float* __restrict__ data,
float* __restrict__ tanh_data) {
int num_blocks = std::min(MAX_NUM_BLOCKS_PER_KERNEL, (nelems - 1) / MAX_NUM_THREADS_PER_BLOCK + 1);
tanh<<<num_blocks, MAX_NUM_THREADS_PER_BLOCK, 0, stream>>>(nelems, data, tanh_data);
return cudaGetLastError();
}
cudaError_t _tanhDer(cudaStream_t stream,
int nelems,
const float* __restrict__ data,
float* __restrict__ tanh_data,
float* __restrict__ derivative) {
int num_blocks = std::min(MAX_NUM_BLOCKS_PER_KERNEL, (nelems - 1) / MAX_NUM_THREADS_PER_BLOCK + 1);
tanh<<<num_blocks, MAX_NUM_THREADS_PER_BLOCK, 0, stream>>>(nelems, data, tanh_data, derivative);
return cudaGetLastError();
}
cudaError_t _tanhSigm(cudaStream_t stream,
int axis,
int nrows,
int ncols,
const float* __restrict__ data,
float* __restrict__ tanh_sigm_data) {
int num_blocks = std::min(MAX_NUM_BLOCKS_PER_KERNEL, (nrows * ncols - 1) / MAX_NUM_THREADS_PER_BLOCK + 1);
if (axis) {
tanhSigmColumn<<<num_blocks, MAX_NUM_THREADS_PER_BLOCK, 0, stream>>>(nrows, ncols, data, tanh_sigm_data);
} else {
tanhSigmRow<<<num_blocks, MAX_NUM_THREADS_PER_BLOCK, 0, stream>>>(nrows, ncols, data, tanh_sigm_data);
}
return cudaGetLastError();
}
cudaError_t _tanhSigmDer(cudaStream_t stream,
int axis,
int nrows,
int ncols,
const float* __restrict__ data,
float* __restrict__ tanh_sigm_data,
float* __restrict__ derivative) {
int num_blocks = std::min(MAX_NUM_BLOCKS_PER_KERNEL, (nrows * ncols - 1) / MAX_NUM_THREADS_PER_BLOCK + 1);
if (axis) {
tanhSigmColumn<<<num_blocks, MAX_NUM_THREADS_PER_BLOCK, 0, stream>>>(nrows, ncols, data, tanh_sigm_data, derivative);
} else {
tanhSigmRow<<<num_blocks, MAX_NUM_THREADS_PER_BLOCK, 0, stream>>>(nrows, ncols, data, tanh_sigm_data, derivative);
}
return cudaGetLastError();
}
cudaError_t _relu(cudaStream_t stream,
int nelems,
const float* __restrict__ data,
float* __restrict__ relu_data) {
int num_blocks = std::min(MAX_NUM_BLOCKS_PER_KERNEL, (nelems - 1) / MAX_NUM_THREADS_PER_BLOCK + 1);
relu<<<num_blocks, MAX_NUM_THREADS_PER_BLOCK, 0, stream>>>(nelems, data, relu_data);
return cudaGetLastError();
}
cudaError_t _reluDer(cudaStream_t stream,
int nelems,
const float* __restrict__ data,
float* __restrict__ relu_data,
float* __restrict__ derivative) {
int num_blocks = std::min(MAX_NUM_BLOCKS_PER_KERNEL, (nelems - 1) / MAX_NUM_THREADS_PER_BLOCK + 1);
relu<<<num_blocks, MAX_NUM_THREADS_PER_BLOCK, 0, stream>>>(nelems, data, relu_data, derivative);
return cudaGetLastError();
}
} |
11,463 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/sort.h>
#include <thrust/scan.h>
#include <iostream>
using namespace std;
#include <cuda.h>
#include <curand.h>
int main() {
int N = 6;
thrust::host_vector<int> A(N);
for(int i=0;i<N;++i)A[i]=i*i;
thrust::device_vector<int> B = A;
thrust::inclusive_scan(B.begin(), B.end(), B.begin());
thrust::host_vector<int> C = B;
for(int i = 0; i<A.size();++i) cout << A[i] << " "; cout << endl;
for(int i=0;i<C.size();++i) cout << C[i] << " "; cout << endl;
thrust::device_vector<double> D(N);
double* D_ptr = thrust::raw_pointer_cast(D.data());
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
curandGenerateUniformDouble(gen, D_ptr, N);
thrust::host_vector<double> E(N);
thrust::copy(D.begin(), D.end(), E.begin());
cout << "Random Gaussian : " << endl;
for(int i = 0; i < E.size(); i++) cout << " >> " << E[i] << endl;
}
|
11,464 | #include "includes.h"
// includes, project
#define PI 3.1415926536f
int MaxThreadsPerBlock;
int MaxThreadsX;
int MaxThreadsY;
// Conversion d'un vecteur réel en vecteur complexe
// Conversion d'un vecteur complexe en vecteur réel
// Multiplie point par point un vecteur complex par un vecteur réel
// Applique y = at*x +bt à chaque point d'un vecteur réel
// Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de réel
// Alpha n'est pas modifié
// Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de bytes
// Alpha n'est pas modifié
// Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de réel
// Alpha autorise l'affichage au dessus d'un certain seuil
// Processus auto-régressif X2 = a*X1 + b*X0 + N0;
// Expansion
// On applique une interpolation bi-linéaire à la source
// Transformation Cartesian To Polar
// On applique une interpolation bi-linéaire à la source
__global__ void KparamAR(double* a, double* b, double* c, double ss, double dtAR, int width, int height)
{
#define eps 1E-12;
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if (i >= width || j >= height) return;
int x= i;
int y= j;
if (i> width/2) x = width-i;
if (j> height/2) y = height-j;
double r = sqrt( (double)x*x + (double)y*y )+Eps;
a[i+j*width] = 2-dtAR*2*ss*r- pow(dtAR*ss*r,2);
b[i+j*width] = -1+dtAR*2*ss*r;
// c[i+j*width] = 50* pow(dtAR,2);
// Correction Jonathan 7-12-16
c[i+j*width] = 1;
} |
11,465 | #include <cuda.h>
#include <stdio.h>
#define BLOCKSIZE 256
__global__ void kern_set_val (float *gpu_ptr, float value, int nb) {
int i;
i=blockDim.x * blockIdx.x+threadIdx.x;
gpu_ptr[i] = value;
}
extern "C" void set_value (float *ptr, float value, int nb) {
float *gpu_ptr;
cudaMalloc (&gpu_ptr, sizeof(float)*nb);
//UP TO YOU : write kernel invocation here
kern_set_val <<<nb/BLOCKSIZE,BLOCKSIZE>>>(gpu_ptr,value,nb);
cudaThreadSynchronize ();
cudaMemcpy(ptr,gpu_ptr,nb*sizeof(float),cudaMemcpyDeviceToHost);
cudaFree (gpu_ptr);
}
|
11,466 | #include "includes.h"
__global__ void RELU(int *ip, int N, int C, int H, int W){
unsigned int input_id = blockDim.x*blockIdx.x + threadIdx.x;
int i = input_id/(C*H*W);
input_id = input_id%(C*H*W);
int j = input_id/(H*W);
input_id = input_id%(H*W);
int k = input_id/(W);
int l = input_id%W;
int temp = *(ip + i*C*H*W + j*H*W + k*W + l);
if(temp<0)
*(ip + i*C*H*W + j*H*W + k*W + l) = 0;
} |
11,467 | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
#define N 1000
#define T 1000
#define S 1024
#define BLOCK_SIZE 16
void initialize(const char *filename, float *z)
{
FILE *file;
int t, n;
char buf[1024];
file = fopen(filename, "r");
for(t = 0; t < S; t++){
for(n = 0; n < S; n++){
z[t*S+n] = 0;
}
}
for(t = 0; t < T; t++){
for(n = 0; n < N; n++){
fgets(buf, 1024, file);
z[t*S+n] = atof(buf);
}
}
}
__global__ void Kernel(const float *z, const float *denom, float *si)
{
int t1 = blockIdx.x*(BLOCK_SIZE) + threadIdx.x;
int t2 = blockIdx.y*(BLOCK_SIZE) + threadIdx.y;
int n;
float r;
r = 0;
for(n = 0; n < S; n++){
r += z[t1*S+n]*z[t2*S+n];
}
si[t1*S+t2] = r;
if (denom[t1] < 1e-6 || denom[t2] < 1e-6){
si[t1*S+t2] = 0;
}else{
si[t1*S+t2] /= (denom[t1]*denom[t2]);
}
}
void similarity_index(const float *z, float *si)
{
int t, n;
float r, denom[S];
for(t = 0; t < T; t++){
r = 0;
for(n = 0; n < N; n++){
r += z[t*S+n]*z[t*S+n];
}
denom[t] = sqrt(r);
}
for(t = T; t < S; t++){
denom[t] = 0;
}
float *zd, *denomd, *sid;
int size = S*S*sizeof(float);
cudaError_t stat;
stat = cudaMalloc((void**)&zd, size);
if (stat != cudaSuccess){
puts("1");
}
stat = cudaMalloc((void**)&denomd, S*sizeof(float));
if (stat != cudaSuccess){
puts("2");
}
stat = cudaMalloc((void**)&sid, size);
if (stat != cudaSuccess){
puts("3");
}
stat = cudaMemcpy(zd, z, size, cudaMemcpyHostToDevice);
if (stat != cudaSuccess){
puts("4");
}
stat = cudaMemcpy(denomd, denom, S*sizeof(float), cudaMemcpyHostToDevice);
if (stat != cudaSuccess){
puts("5");
}
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(S/BLOCK_SIZE, S/BLOCK_SIZE);
Kernel<<<dimGrid, dimBlock>>>(zd, denomd, sid);
stat = cudaMemcpy(si, sid, size, cudaMemcpyDeviceToHost);
if (stat != cudaSuccess){
puts("6");
}
cudaFree(zd);
cudaFree(denomd);
cudaFree(sid);
}
void output(char *filename, float *si)
{
FILE *file;
int t1, t2;
file = fopen(filename, "w");
for(t1 = 0; t1 < T; t1++){
for(t2 = 0; t2 < T; t2++){
fprintf(file, "%d %d %f\n", t1, t2, si[t1*S+t2]);
}
fprintf(file, "\n");
}
}
int main(int argc, char *argv[])
{
float *z, *si;
if (argc < 3){
fprintf(stderr, "usage: %s <input> <output>\n", argv[0]);
exit(1);
}
z = (float *)malloc(S*S*sizeof(float));
si = (float *)malloc(S*S*sizeof(float));
initialize(argv[1], z);
similarity_index(z, si);
output(argv[2], si);
free(z);
free(si);
return 0;
}
|
11,468 | #include "includes.h"
__global__ void ReducePI( float* d_sum, int num ){
int id = blockIdx.x * blockDim.x + threadIdx.x;
int gid = id;
float temp;
extern float __shared__ s_pi[];
s_pi[threadIdx.x] = 0.f;
while(gid < num){
temp = (gid + 0.5) / num;
s_pi[threadIdx.x] += 4.0f / (1 + temp*temp);
gid = blockDim.x * gridDim.x;
}
for(int i=(blockIdx.x >> 1); i>0; i++){
if(threadIdx.x < i){
s_pi[threadIdx.x] += s_pi[threadIdx.x+i];
}
__syncthreads();
}
if(threadIdx.x == 0){
d_sum[blockIdx.x] = s_pi[0];
}
} |
11,469 | #include <stdio.h>
#define ARRAY_COUNT 10
__shared__ float file_shared_array_static[ARRAY_COUNT];
extern __shared__ float file_shared_array_dynamic[];
__global__
void generateArrayStatic(float* out) {
__shared__ float array[ARRAY_COUNT];
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < ARRAY_COUNT) {
array[idx] = idx;
out[idx] = array[idx];
}
}
__global__
void generateArrayDynamic(float* out) {
extern __shared__ float array[];
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < ARRAY_COUNT) {
array[idx] = idx;
out[idx] = array[idx];
}
}
__global__
void generateArrayFileStatic(float* out) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < ARRAY_COUNT) {
file_shared_array_static[idx] = idx;
out[idx] = file_shared_array_static[idx];
}
}
__global__
void generateArrayFileDynamic(float* out) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < ARRAY_COUNT) {
file_shared_array_dynamic[idx] = idx;
out[idx] = file_shared_array_dynamic[idx];
}
}
void printArray(float* array) {
for (int x = 0; x < ARRAY_COUNT; x++) {
printf("%d ", int(array[x]));
}
printf("\n");
}
int main(void) {
printf("\n");
dim3 block(32);
dim3 grid((ARRAY_COUNT+block.x-1)/block.x);
float* host_array = (float*)malloc(ARRAY_COUNT*sizeof(float));
float* device_array;
cudaMalloc((float**)&device_array, ARRAY_COUNT*sizeof(float));
generateArrayStatic<<<grid,block>>>(device_array);
cudaDeviceSynchronize();
cudaMemcpy(host_array, device_array, ARRAY_COUNT*sizeof(float), cudaMemcpyDeviceToHost);
printf("%-30s", "generateArrayStatic: ");
printArray(host_array);
generateArrayDynamic<<<grid,block,ARRAY_COUNT*sizeof(float)>>>(device_array);
cudaDeviceSynchronize();
cudaMemcpy(host_array, device_array, ARRAY_COUNT*sizeof(float), cudaMemcpyDeviceToHost);
printf("%-30s", "generateArrayDynamic: ");
printArray(host_array);
generateArrayFileStatic<<<grid,block>>>(device_array);
cudaDeviceSynchronize();
cudaMemcpy(host_array, device_array, ARRAY_COUNT*sizeof(float), cudaMemcpyDeviceToHost);
printf("%-30s", "generateArrayFileStatic: ");
printArray(host_array);
generateArrayFileDynamic<<<grid,block,ARRAY_COUNT*sizeof(float)>>>(device_array);
cudaDeviceSynchronize();
cudaMemcpy(host_array, device_array, ARRAY_COUNT*sizeof(float), cudaMemcpyDeviceToHost);
printf("%-30s", "generateArrayFileDynamic: ");
printArray(host_array);
free(host_array);
cudaFree(device_array);
cudaDeviceReset();
printf("\n");
return 0;
} |
11,470 | #include <type_traits>
template <typename... Ls>
struct apply_impl
{
using type = std::integral_constant<bool,true>;
};
template <typename... Args>
using apply = typename apply_impl<Args...>::type;
template<typename T>
using type_impl = std::integral_constant<bool,T::value>;
template<typename P, typename T>
struct nope
{
using that = apply<P, T>;
using type = std::integral_constant<bool,that::value>;
// using type = type_impl<that>;
};
int main(int, char*[])
{
return 0;
}
|
11,471 | #include <iostream>
#include <cuda.h>
#include <stdio.h>
using std::cout;
using std::endl;
__global__ void my_kernel_1()
{
printf("I'm in block %i, thread %i\n", blockIdx.x, threadIdx.x);
}
__global__ void my_kernel2()
{
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
int threadId = blockId * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
printf("Running thread %i in block %i\n", threadId, blockId);
printf("Block position: x %i, y %i\n", blockIdx.x, blockIdx.y);
printf("Thread position: x %i, y %i\n", threadIdx.x, threadIdx.y);
}
int main(int argc, char *argv[])
{
cout << "Hello world!! I will call the first CUDA kernel now!!" << endl;
my_kernel_1<<<4, 4, 0>>>();
dim3 nblocks(4, 1, 1);
dim3 nthreads(4, 1, 1);
cout << "Launching the second CUDA kernel now!!" << endl;
my_kernel_1<<<nblocks, nthreads, 0>>>();
cudaDeviceSynchronize();
dim3 nblocks2(2,2,1);
dim3 nthreads2(2,2,1);
cout << "Launching the third CUDA kernel now!!" << endl;
my_kernel2<<<nblocks2, nthreads2, 0>>>();
cudaDeviceSynchronize();
return 0;
}
|
11,472 | #include <stdio.h>
#include <cuda.h>
#include <assert.h>
#define N 2//64
__device__ int* bar(int* p) {
//__ensures(__implies(__enabled(), __return_val_ptr() == p));
return p;
}
__global__ void foo(int* p) {
//bar(p)[threadIdx.x] = 0;
*(bar(p)+threadIdx.x) = 2;
//printf(" %d; ", bar(p)[threadIdx.x]);
}
|
11,473 | /*
standard matrix mult
*/
#include <iostream>
#include <math.h>
__global__
void matrixMultiplicationKernel(float* A, float* B, float* C, int N) {
int ROW = blockIdx.y*blockDim.y+threadIdx.y;
int COL = blockIdx.x*blockDim.x+threadIdx.x;
int stride = blockDim.x * gridDim.x;
float tmpSum = 0;
if (ROW < N && COL < N) {
// each thread computes one element of the block sub-matrix
for (int i = 0; i < N; i++) {
tmpSum += A[ROW * N + i] * B[i * N + COL];
}
}
C[ROW * N + COL] = tmpSum;
}
void matrixMultiplication(float *A, float *B, float *C, int N){
// declare the number of blocks per grid and the number of threads
// per block
// use 1 to 512 threads per block
dim3 threadsPerBlock(N, N);
dim3 blocksPerGrid(1, 1);
if (N*N > 1024){
threadsPerBlock.x = 1024;
threadsPerBlock.y = 1;
blocksPerGrid.x = ceil(double(N)/double(threadsPerBlock.x));
blocksPerGrid.y = ceil(double(N)/double(threadsPerBlock.y));
}
matrixMultiplicationKernel<<<blocksPerGrid,threadsPerBlock>>>(A, B, C, N);
}
int main(int argc, char* argv[])
{
if(argc < 2) std::cout << "Needs a dimension parameter.\n";
int N = atoi(argv[1]);
bool output = atoi(argv[2]);
float* A;
cudaError_t result = cudaMallocManaged(&A, N*N*sizeof(float));
if( result != cudaSuccess)
{
throw std::runtime_error("Failed allocation.");
}
float* B;
result = cudaMallocManaged(&B, N*N*sizeof(float));
if( result != cudaSuccess)
{
throw std::runtime_error("Failed allocation.");
}
float* C;
result = cudaMallocManaged(&C, N*N*sizeof(float));
if( result != cudaSuccess)
{
throw std::runtime_error("Failed allocation.");
}
for(int i=0; i < N*N; ++i)
{
A[i] = 1.2345;
B[i] = 1.2345;
C[i] = 0;
}
// if output set to 1, display A and B
if(output)
{
for(int i = 0; i < N*N; ++i)
{
if (i%N == 0) std::cout << "\n";
std::cout << A[i] << " ";
}
for(int i = 0; i < N*N; ++i)
{
if (i%N == 0) std::cout << "\n";
std::cout << B[i] << " ";
}
}
matrixMultiplication(A, B, C, N);
//matrixMultiplication(d_A.getData(), d_B.getData(), d_C.getData(), N);
// simpleMatMulKernell<<<1,256>>>(A,B,C,w);
cudaDeviceSynchronize();
// if output set to 1, show C after mult
if(output)
{
for(int i =0; i < N*N; ++i)
{
if (i%N == 0) std::cout << "\n";
std::cout << C[i] << " ";
}
}
std::cout << "\nC[0] : " << C[0] << "\n";
cudaFree(A);
cudaFree(B);
cudaFree(C);
return 0;
}
|
11,474 | // Warp counter in CUDA
#include <stdio.h>
#include <cuda_runtime.h>
// includes, project
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
//extern "C"
// FILL HERE: define lock class
// USe atomic operation for both lock and unlock functions
// Fill free to use any atomic operation that correctly works for unlock.
/**
* CUDA Kernel Device code
* Computes cooperative additions
*/
// FILL HERE: Implement a kernel code that counts the total number of warps
// used in the kernel by using lock.
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
int nwarps_host, *nwarps_dev;
err = cudaMalloc((void**)&nwarps_dev, sizeof(int));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device nwarps (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
nwarps_host = 0;
err = cudaMemcpy(nwarps_dev, &nwarps_host, sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy nwarps from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Kernel Invocation
int blocksPerGrid = 125;
int threadsPerBlock = 1000;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
// FILL HERE: Defind a kernel invocation code that uses the blocksPerGrid blocks of threadsPerBlock threads
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch blockCounterUnLocked kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaThreadSynchronize();
// Copy the device result to the host
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(&nwarps_host, nwarps_dev, sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy A from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaThreadSynchronize();
printf("number of warps = %d\n", nwarps_host);
// Free device global memory
cudaFree(nwarps_dev);
return 0;
}
|
11,475 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_functions.h"
struct ringPair{
int ro;
int co;
};
//з
__global__ static void norm(int row,int col,double *a,double *anorm){
int tid=threadIdx.x;
int i;
anorm[tid]=0;
for(i=0;i<row;i++){
anorm[tid]+=a[i*col+tid]*a[i*col+tid];
}
}
//鲢
__global__ static void sort(int col,double *anorm,double *tnorm,int *p,int *tp){
int tid=threadIdx.x;
int i,j,k;
int n=1; //鲢Ԫظ
while(n<col){
if(tid%(n*2)==0){
for(i=tid,j=tid+n,k=tid;;k++){
if(anorm[i]>anorm[j]){
tnorm[k]=anorm[i];
tp[k]=p[i];
i++;
if(i==tid+n){
for(;j<tid+n+n;j++){
k++;
tnorm[k]=anorm[j];
tp[k]=p[j];
}
break;
}
}else{
tnorm[k]=anorm[j];
tp[k]=p[j];
j++;
if(j==tid+n+n){
for(;i<tid+n;i++){
k++;
tnorm[k]=anorm[i];
tp[k]=p[i];
}
break;
}
}
}
}
n*=2;
__syncthreads();
anorm[tid]=tnorm[tid];
p[tid]=tp[tid];
__syncthreads();
}
}
__global__ static void one_side_jacobi(int i,int row,int col,double *a,double *v,double *anorm,struct ringPair *rp,int *n_clear){
int bid=blockIdx.x;
int tid=threadIdx.x;
int e=1e-8;
__shared__ int ro;
__shared__ int co;
__shared__ double d;
d=0;
if(tid==0){
int n=i*(col/2)+bid;
ro=rp[n].ro;
co=rp[n].co;
int j;
for(j=0;j<row;j++){
d+=a[j*col+ro]*a[j*col+co];
}
}
__syncthreads();
if(fabs(d)>col*e*sqrt(anorm[ro]*anorm[co])){
double ct=(anorm[ro]-anorm[co])/(2*d);
int sign;
if(ct>0){
sign=1;
}else if(ct==0){
sign=0;
}else{
sign=-1;
}
double t=sign/(fabs(ct)+sqrt(1+ct*ct));
double c=1/(sqrt(1+t*t));
double s=t*c;
double vr=c*a[tid*col+ro]+s*a[tid*col+co];
double vc=-s*a[tid*col+ro]+c*a[tid*col+co];
a[tid*col+ro]=vr;
a[tid*col+co]=vc;
vr=c*v[tid*col+ro]+s*v[tid*col+co];
vc=-s*v[tid*col+ro]+c*v[tid*col+co];
v[tid*col+ro]=vr;
v[tid*col+co]=vc;
}else{
(*n_clear)++;
}
}
//
__global__ static void svdSort(int col,double *anorm,int *p,double *a,double *at,double *v,double *vt){
int bid=blockIdx.x;
int tid=threadIdx.x;
int rows=tid*col;
at[rows+bid]=a[rows+p[bid]];
vt[rows+bid]=v[rows+p[bid]];
//зW
__syncthreads();
anorm[bid]=sqrt(anorm[bid]);
a[rows+bid]=at[rows+bid]/anorm[bid];
} |
11,476 |
#include <sstream>
#include <fstream>
#include <set>
#include <iostream>
#include <map>
#include <vector>
#include <iostream>
#include <bits/stdc++.h>
#include <sstream>
#include <ctime>
#include <cstdint>
#include <stdint.h>
#define SQUEEZE 32
#define DELIMITR " "
#define IS_CHANGED 1
#define NOT_CHANGED 0
#define BLOCK_SIZE 32
using namespace std;
using String = std::string;
class Grammar {
public:
std::set<String> nonterminalSet;
std::set<String> terminalSet;
std::map<String, std::set<std::pair<String, String >>> productionsDouble;
std::map<std::pair<String, String>, std::set<String >> reverseProductionsDouble;
std::map<String, std::set<String>> productionsUnary;//NonTerminal-> Set of Terminal
std::map<String, std::set<String>> reverseProductionsUnary;// Terminal -> Set of non terminal
std::map<String, std::set<String>> nonTerminalToBodyOfProduction;// NonTerminal ->Set nont termianl aka nontermina+ eleme from set or vice versa is key for reverseProduction
void parse_grammar(const String &filename, const String &delimiter = " ") {
std::ifstream file(filename);
if (file.is_open()) {
std::string line;
while (getline(file, line)) {
process_grammar_line(line, delimiter);
}
file.close();
}
make_reverse_relations();
make_nonTerminalToBodyOfProduction();
}
private:
void make_reverse_relations() {
//reverseProductionUnary
make_unary_reverse_relation();
make_double_reverse_relation();
}
void process_grammar_line(String line, const String &delimiter = " ") {
size_t pos = 0;
std::string token[2];
int c = 0;
while ((pos = line.find(delimiter)) != std::string::npos) {
token[c] = line.substr(0, pos);
line.erase(0, pos + delimiter.length());
c++;
}
String head = token[0];
if (c == 2) {
String left_terminal = token[1];
String right_terminal = line;
auto tail = make_pair(left_terminal, right_terminal);
this->nonterminalSet.insert(head);// нетерминалы множество
this->nonterminalSet.insert(left_terminal);
this->nonterminalSet.insert(right_terminal);
if (this->productionsDouble.count(head) == 1) { // продукции
auto iter = this->productionsDouble.find(head);
iter->second.insert(tail);
} else {
this->productionsDouble.insert(make_pair(head, set<pair<String, String >>({tail})));
}
} else if (c == 1) {
const String &terminal = line;
this->nonterminalSet.insert(head);
if (this->productionsUnary.count(head) == 1) {
auto iter = this->productionsUnary.find(head);
iter->second.insert(terminal);
} else {
this->productionsUnary.insert(make_pair(head, set<String>({terminal})));
}
this->terminalSet.insert(terminal);
} else {
throw "Error while process line from grammar";
}
}
void make_unary_reverse_relation() {
for (auto nonterminal: this->productionsUnary) {
for (auto terminal: nonterminal.second) {
if (reverseProductionsUnary.count(terminal) == 1) {
reverseProductionsUnary.find(terminal)->second.insert(nonterminal.first);
} else {
reverseProductionsUnary.insert(make_pair(terminal, set<String>({nonterminal.first})));
}
}
}
}
void make_double_reverse_relation() {
for (auto head:this->productionsDouble) {
for (auto elem_pair:head.second) {
if (reverseProductionsDouble.count(elem_pair) == 1) {
reverseProductionsDouble.find(elem_pair)->second.insert(head.first);
} else {
reverseProductionsDouble.insert(make_pair(elem_pair, set<String>({head.first})));
}
}
}
}
void make_nonTerminalToBodyOfProduction() {
for (auto leftNonTerminal: nonterminalSet) {
for (auto rightNonTerminal:nonterminalSet) {
auto key = make_pair(leftNonTerminal, rightNonTerminal);
if (reverseProductionsDouble.count(key)) {
if (nonTerminalToBodyOfProduction.count(leftNonTerminal)) {
nonTerminalToBodyOfProduction.find(leftNonTerminal)->second.insert(rightNonTerminal);
} else {
nonTerminalToBodyOfProduction.insert(
make_pair(leftNonTerminal, set<String>({rightNonTerminal})));
}
if (nonTerminalToBodyOfProduction.count(rightNonTerminal)) {
nonTerminalToBodyOfProduction.find(rightNonTerminal)->second.insert(leftNonTerminal);
} else {
nonTerminalToBodyOfProduction.insert(
make_pair(rightNonTerminal, set<String>({leftNonTerminal})));
}
} else {
}
}
}
}
};
class Edge {
public:
int from;
set<String> label;
int to;
Edge(int from, int to) {
this->from = from;
this->to = to;
}
};
class Graph {
public:
vector<Edge> edges;
int max_number_of_vertex;
int multiple_by_32; // is maxnumber if maxnumber % 32=0 or max_number+ (32 -maxnumber % 32)
void parse_graph(const String &filename, const String &delimiter = " ") {
std::ifstream file(filename);
int max_vertex = 0;
if (file.is_open()) {
std::string line;
while (getline(file, line)) {
size_t pos = 0;
std::string token[2];
int c = 0;
while ((pos = line.find(delimiter)) != std::string::npos) {
token[c] = line.substr(0, pos);
line.erase(0, pos + delimiter.length());
c++;
}
if (c == 2) {
int l = std::stoi(token[0]);
int r = std::stoi(line);
max_vertex = std::max(std::max(l, r), max_vertex);
Edge edge = Edge(l, r);
edge.label.insert(token[1]);
edges.push_back(edge);
} else {
throw "Error while process line from graph";
}
}
file.close();
} else{
throw "Error File not found";
}
max_vertex+=1;// т.к у нас верщины присутствует от 0 до max_vertex включетельно
max_number_of_vertex = max_vertex;
if (max_vertex % SQUEEZE == 0) {
multiple_by_32 = max_vertex;
} else {
int quout = max_vertex % SQUEEZE;
multiple_by_32 = max_vertex + SQUEEZE - quout;
}
}
void replace_terminals_to_noterminals(Grammar &grammar) {
for (auto &edge : edges) {
set<String> tmp;
for (const String &key:edge.label) {
if (grammar.reverseProductionsUnary.count(key) == 1) {
tmp.insert(grammar.reverseProductionsUnary.find(key)->second.begin(),
grammar.reverseProductionsUnary.find(key)->second.end());
}
}
edge.label.clear();
edge.label.insert(tmp.begin(), tmp.end());
}
}
};
uint32_t * allocate_matrix_host(int rows,int cols) {
// allocate memory in host RAM
uint32_t *matrix;
cudaMallocHost((void **) &matrix, sizeof(uint32_t)*rows * cols);
return matrix;
}
uint32_t * allocate_matrix_device(int rows,int cols){
uint32_t *matrix;
cudaMalloc((void **) &matrix, sizeof(uint32_t)*rows*cols);
return matrix;
}
void delete_matrix_device(uint32_t * matrix) {
cudaFree(matrix);
}
void delete_matrix_host(uint32_t * matrix) {
cudaFreeHost(matrix);
}
//__device__ is_changed = 0;
__global__ void gpu_matrix_mult(uint32_t *a,uint32_t *b, uint32_t *c, int m, int n, int k,uint32_t * is_changed)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t sum = 0;
uint32_t old_c;
if( col < k && row < m)
{
old_c = c[row*k+col];
sum = 0;
for(int i = 0; i < n; i++)
{
sum |= a[row * n + i] & b[i * k + col];
}
sum|=old_c;
if(*is_changed == NOT_CHANGED && sum!=old_c ) {
*is_changed = IS_CHANGED;
}
c[row * k + col] = sum;
}
// uint32_t value = 0;
//
// for (int k = 0; k < row_b; k++) {
// value |= a[i * row_b + k] & b[k * col_b + j];
// }
// if (*is_changed == NOT_CHANGED && (c[i * col_b + j] | value) != c[i * col_b + j]) {
// *is_changed = IS_CHANGED;
// }
// c[i * col_b + j] |=
}
struct Matrix {
uint32_t *matrix_host;
uint32_t *matrix_device;
uint32_t *matrix_squeezed_host;
uint32_t *is_changed_host;
};
struct Table {
uint32_t *table_n;
uint32_t *table_last;
};
class Solution {
public:
Graph graph;
Grammar grammar;
map<String, Matrix> nonTerminalToMatrix;
uint32_t * extra_matrix;
Table table;
Solution(const String &filename_grammar, const String &filename_graph, const String &delimiter = " ") {
// add table size as parameter
graph.parse_graph(filename_graph, delimiter);
grammar.parse_grammar(filename_grammar, delimiter);
graph.replace_terminals_to_noterminals(grammar);
construct_and_fill_matrices_for_nonterminal_test();
}
void compute_result() {
// initial setup
set<String> changed_matrices = set<String>();
for (auto &elem: nonTerminalToMatrix) {
if (*elem.second.is_changed_host == IS_CHANGED) {
changed_matrices.insert(elem.first);
}
}
if (changed_matrices.empty()) {
return;//
}
while (true) {
set<String> new_changed_matrices = set<String>();
for (auto &nonterminal: changed_matrices) {
if (grammar.nonTerminalToBodyOfProduction.count(nonterminal)) {
auto const &possibly_second_key_set = grammar.nonTerminalToBodyOfProduction.find(
nonterminal)->second;
// перемножаем все пары матриц, в теле которых стоит этот нетерминал если он там присутствует
for (const auto &sec: possibly_second_key_set) {
auto key1 = make_pair(nonterminal, sec);
auto key2 = make_pair(sec, nonterminal);
if (grammar.reverseProductionsDouble.count(key1)) {
auto iter = grammar.reverseProductionsDouble.find(key1);
for (const auto &res: iter->second) {
auto is_changed = perform_matrix_mul(res, iter->first.first, iter->first.second);
if (is_changed) {
new_changed_matrices.insert(res);
}
}
}
if (grammar.reverseProductionsDouble.count(key2)) {
auto iter = grammar.reverseProductionsDouble.find(key2);
for (const auto &res: iter->second) {
auto is_changed = perform_matrix_mul(res, iter->first.first, iter->first.second);
if (is_changed) {
new_changed_matrices.insert(res);
}
}
}
}
}
}
if (new_changed_matrices.empty()) {
//copy
break;
} else {
changed_matrices = new_changed_matrices;
//update matrices
}
//transfer
}
}
private:
// не забудь здесь выставить флаги для тех матриц, в которых не нули
// void construct_and_fill_matrices_for_nonterminals() {
// int rows = this->graph.multiple_by_32;
// int cols = this->graph.multiple_by_32 / SQUEEZE; // сжимаем по строкам
// for (auto nonterminal: grammar.nonterminalSet) {
// Matrix matrix = Matrix();
// matrix.matrix_host = alloc_matrix_host_with_zeros(rows, cols);
// matrix.is_changed_host = alloc_matrix_host_with_zeros(1, 1);
// this->nonTerminalToMatrix.insert(make_pair(nonterminal, matrix));
// matrix.matrix_device = alloc_matrix_device_with_zeros(rows, cols);// на гпу
// }// заполнили нулями для хоста
// for (auto &edge:graph.edges) {
// auto i = edge.from;
// auto j = edge.to;
// for (const auto &nonterminal:edge.label) { // заполнилии 1 в i,j для матриц на метках из i в j есть этот нетерминал
// fill_squeezed_matrix(this->nonTerminalToMatrix.find(nonterminal)->second.matrix_host, i, j,
// graph.multiple_by_32);
// }
// }
// for (const auto &nonterminal: grammar.nonterminalSet) {//трансфер данные с цпу на гпу
// auto &matrix = this->nonTerminalToMatrix.find(nonterminal)->second;
// transfer_matrix_from_host_to_gpu(matrix.matrix_host, matrix.matrix_device, rows, cols);
// }
// }
void construct_and_fill_matrices_for_nonterminal_test() {
int rows = this->graph.max_number_of_vertex;
int cols = this->graph.max_number_of_vertex;
int squeezed_cols = this->graph.multiple_by_32;
for (auto nonterminal: grammar.nonterminalSet) {
Matrix matrix = Matrix();
matrix.matrix_host = allocate_matrix_host(rows,cols); //alloc_matrix_host_with_zeros(rows, cols);
// matrix.matrix_squeezed_host = new uint32_t[rows*squeezed_cols];
matrix.is_changed_host = allocate_matrix_host(1,1);
*matrix.is_changed_host = NOT_CHANGED;
this->nonTerminalToMatrix.insert(make_pair(nonterminal, matrix));
}// заполнили нулями для хоста
extra_matrix = allocate_matrix_host(cols,rows); // аллок памяти для доп матрицы
for (auto &edge:graph.edges) {
auto i = edge.from;
auto j = edge.to;
for (const auto &nonterminal:edge.label) { // заполнилии 1 в i,j для матриц на метках из i в j есть этот нетерминал
auto &matrix = this->nonTerminalToMatrix.find(nonterminal)->second;
matrix.matrix_host[i * cols + j] = 1;
//write_bit(matrix.matrix_squeezed_host,i,j,squeezed_cols);
if (*matrix.is_changed_host == NOT_CHANGED) {
*matrix.is_changed_host = IS_CHANGED;
}
}
}
}
void write_bit(uint32_t *m, int i, int j,int cols){
// m[i * cols + (j / 32)] |= (1ULL << (31 - (j % 32)));
m[i * cols + (j / 32)] |= (1 << (31 - (j % 32)));
}
inline void fill_squeezed_matrix(uint32_t *matrix, int i, int j, int size32) {
// строка ок
int cols = size32 / 32;
int position_in_number32 = (SQUEEZE - 1) - (j % SQUEEZE);
int position_in_squezzed_row = j / 32;
matrix[i * cols + position_in_squezzed_row] |= (1L << position_in_number32);
}
// uint32_t *alloc_matrix_host_with_zeros(int rows, int cols) {
// }
// uint32_t *alloc_matrix_device_with_zeros(int rows, int cols) {
// }
void transfer_matrix_from_host_to_gpu(uint32_t *host, uint32_t *device, int rows, int cols) {
//
}
void transfer_matrix_from_gpu_to_host(uint32_t *device, uint32_t *host, int rows, int cols) {
}
void gpu_version(const uint32_t *a, const uint32_t *b, uint32_t *c, int n, uint32_t *is_changed){
// c += ab
// cout<<"H";
uint32_t * a_d = allocate_matrix_device(n,n);
uint32_t * b_d = allocate_matrix_device(n,n);
uint32_t * c_d = allocate_matrix_device(n,n);
uint32_t * flag_device = allocate_matrix_device(1,1);
cudaMemcpy( a_d,a, sizeof(uint32_t)*n*n, cudaMemcpyHostToDevice);
cudaMemcpy( b_d,b, sizeof(uint32_t)*n*n, cudaMemcpyHostToDevice);
cudaMemcpy( c_d,c, sizeof(uint32_t)*n*n, cudaMemcpyHostToDevice);
cudaMemcpy( flag_device,is_changed, sizeof(uint32_t), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((n + BLOCK_SIZE - 1) / BLOCK_SIZE, (n + BLOCK_SIZE - 1) / BLOCK_SIZE);
gpu_matrix_mult<<<dimGrid,dimBlock>>>(a_d,b_d, c_d, n, n, n,flag_device);
cudaDeviceSynchronize();
cudaMemcpy( c,c_d, sizeof(uint32_t)*n*n, cudaMemcpyDeviceToHost);
cudaMemcpy( is_changed,flag_device, sizeof(uint32_t), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
delete_matrix_device(a_d);
delete_matrix_device(b_d);
delete_matrix_device(c_d);
delete_matrix_device(flag_device);
}
// c = ab
void dummy_subring_matrix_mul(const uint32_t *a, int row_a, int col_a, const uint32_t *b, int row_b, int col_b,
uint32_t *c, uint32_t *is_changed) {
if (col_a != row_b) {
printf("The matrices can't be multiplied with each other.\n");
return;
}
gpu_version(a,b,c,row_a,is_changed);
//
// for (int i = 0; i < row_a; i++) {
//
// for (int j = 0; j < col_b; j++) {
// uint32_t value = 0;
//
// for (int k = 0; k < row_b; k++) {
// value |= a[i * row_b + k] & b[k * col_b + j];
// }
// if (*is_changed == NOT_CHANGED && (c[i * col_b + j] | value) != c[i * col_b + j]) {
// *is_changed = IS_CHANGED;
// }
// c[i * col_b + j] |= value;
// }
// }
}
// perform algo
//
// allocate matrices and tables on device
//
// A = C*B
int perform_matrix_mul(const String &head, const String &left, const String &right) {
int rows = graph.max_number_of_vertex;
int cols = graph.max_number_of_vertex;
auto &A = this->nonTerminalToMatrix.at(head);
auto &C = this->nonTerminalToMatrix.at(left);
auto &B = this->nonTerminalToMatrix.at(right);
*A.is_changed_host = 0;
if (head == left) {// нужно создать доп матрицу т.к A = C
copy(C.matrix_host, C.matrix_host + rows * cols, extra_matrix);
dummy_subring_matrix_mul(extra_matrix, rows, cols, B.matrix_host, rows, cols, A.matrix_host,
A.is_changed_host);
}
if (head == right) {//нужно создать доп матрицу т.к A = B
copy(B.matrix_host, B.matrix_host + rows * cols, extra_matrix);
dummy_subring_matrix_mul(C.matrix_host, rows, cols, extra_matrix, rows, cols, A.matrix_host,
A.is_changed_host);
} else {
dummy_subring_matrix_mul(C.matrix_host, rows, cols, B.matrix_host, rows, cols, A.matrix_host,
A.is_changed_host);
}
return *A.is_changed_host;
}
};
int main(int argc, char* argv[]) {
auto solution = Solution(argv[1], argv[2], DELIMITR);
clock_t begin = clock();
solution.compute_result();
clock_t end = clock();
double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
ifstream input(argv[3]);
vector<String > res(solution.grammar.nonterminalSet.begin(),solution.grammar.nonterminalSet.end());
sort(res.begin(),res.end());
ofstream outputfile;
outputfile.open(argv[3]);
for (auto &nonterminal: res) {
auto &matrix = solution.nonTerminalToMatrix.at(nonterminal);
outputfile << nonterminal;
for (int i = 0; i < solution.graph.max_number_of_vertex; i++) {
for (int j = 0; j < solution.graph.max_number_of_vertex; j++) {
if (matrix.matrix_host[i * solution.graph.max_number_of_vertex + j] != 0) {
outputfile << " " << i << " " << j;
}
}
}
outputfile << endl;
}
outputfile.close();
cout<<elapsed_secs<<endl;
}
|
11,477 | #include "includes.h"
__global__ void smem_dynamic_test(int * in, int * out, int size)
{
int tid = threadIdx.x;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ int smem[];
if (gid < size)
{
smem[tid] = in[gid];
out[gid] = smem[tid];
}
} |
11,478 | #include <stdio.h>
#include <stdint.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
struct GpuTimer
{
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer()
{
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start()
{
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
}
void Stop()
{
cudaEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
// Sequential radix sort
// Assume: nBits (k in slides) in {1, 2, 4, 8, 16}
void sortByHost(const uint32_t * in, int n,
uint32_t * out,
int nBits)
{
int nBins = 1 << nBits; // 2^nBits
int * hist = (int *)malloc(nBins * sizeof(int));
int * histScan = (int *)malloc(nBins * sizeof(int));
// In each counting sort, we sort data in "src" and write result to "dst"
// Then, we swap these 2 pointers and go to the next counting sort
// At first, we assign "src = in" and "dest = out"
// However, the data pointed by "in" is read-only
// --> we create a copy of this data and assign "src" to the address of this copy
uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t));
memcpy(src, in, n * sizeof(uint32_t));
uint32_t * originalSrc = src; // Use originalSrc to free memory later
uint32_t * dst = out;
// Loop from LSD (Least Significant Digit) to MSD (Most Significant Digit)
// (Each digit consists of nBits bits)
// In each loop, sort elements according to the current digit
// (using STABLE counting sort)
for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits)
{
// TODO: Compute "hist" of the current digit
memset(hist,0,nBins*sizeof(int));
for(int i=0;i<n;i++)
{
int bin=(src[i]>>bit)&(nBins-1);
hist[bin]++;
}
// TODO: Scan "hist" (exclusively) and save the result to "histScan"
histScan[0]=0;
for(int bin=1;bin<nBins;bin++)
{
histScan[bin]=histScan[bin-1]+hist[bin-1];
}
// TODO: From "histScan", scatter elements in "src" to correct locations in "dst"
for(int i=0;i<n;i++)
{
int bin=(src[i]>>bit)&(nBins-1);
dst[histScan[bin]]=src[i];
histScan[bin]++;
}
// TODO: Swap "src" and "dst"
uint32_t * tmp=src;
src=dst;
dst=tmp;
}
// TODO: Copy result to "out"
memcpy(out,src,n * sizeof(uint32_t));
// Free memories
free(hist);
free(histScan);
free(originalSrc);
}
/*
Use SMEM.
*/
__global__ void computeHistKernel2(int * in, int n, int * hist, int nBins, int bit)
{
// TODO
// Each block computes its local hist using atomic on SMEM
extern __shared__ int s_hist[];
int i=blockDim.x*blockIdx.x+threadIdx.x;
// Gán giá trị 0 cho local hist
// Nếu nBins> blockDim.x thì mỗi thread sẽ gán giá trị cho phần tử bin cách mỗi stride=blockDim.x
for(int stride=0;stride<nBins;stride+=blockDim.x)
if(threadIdx.x+stride<nBins)
s_hist[threadIdx.x+stride]=0;
__syncthreads();// syncthreads để chắc chắn các phần tử trong s_hist đã được gắn giá trị 0
// Tính local hist
if(i<n)
{
int bin=(in[i]>>bit)&(nBins-1);// lấy nBits ra để tính xem phần tử này thuộc bin nào
atomicAdd(&s_hist[bin], 1);
}
__syncthreads();// syncthreads để chắc chắn các phần tử trong block đã được tính trong s_hist
// Each block adds its local hist to global hist using atomic on GMEM
for(int stride=0;stride<nBins;stride+=blockDim.x)
if(threadIdx.x+stride<nBins)
atomicAdd(&hist[threadIdx.x+stride],s_hist[threadIdx.x+stride]);
}
// TODO: You can define necessary functions here
// Cộng giá trị blkSum vào các phần tử tương ứng
__global__ void addBlkKernel(int * in, int n, int * blkSums)
{
int i=blockDim.x*blockIdx.x+threadIdx.x;
if(i<n&&blockIdx.x>0)
in[i]+=blkSums[blockIdx.x-1];
}
__global__ void scatter(int * in, int * out, int *inScan, int n, int nZeros)
{
int i=blockDim.x*blockIdx.x+threadIdx.x;
if(i<n)
{
if (in[i]==0) out[i - inScan[i]] = in[i];
else out[nZeros + inScan[i]] = in[i];
}
}
__global__ void scanBlkKernel(int * in, int n, int * out, int * blkSums)
{
// TODO
extern __shared__ int s_in[];
int i=blockDim.x*blockIdx.x+threadIdx.x;
// gán giá trị tương ứng vào smem
if(i<n)
s_in[threadIdx.x]=in[i];
else
s_in[threadIdx.x]=0;
__syncthreads();
// cộng các giá cách nhau stride bước lại với nhau
for(int stride=1;stride<blockDim.x;stride*=2)
{
int temp=0;
if(threadIdx.x>=stride)
{
temp=s_in[threadIdx.x-stride];// lấy phần tử trước đó stride bước
}
__syncthreads();// chắc chắn giá trị năm trước stride bước đã được lấy vào bộ nhớ thanh ghi
if(threadIdx.x>=stride )
{
s_in[threadIdx.x]+=temp;
}
__syncthreads();// chắc chắn các giá trị đã được cộng xong
}
// gán giá trị tương ứng vào mảng out
if(i<n)
out[i]=s_in[threadIdx.x];
// thread cuối cùng trong block ghi giá trị vào blkSums theo blockIdx
if(blkSums!=NULL)
{
if(threadIdx.x==blockDim.x-1)
{
blkSums[blockIdx.x]=s_in[threadIdx.x];
}
}
}
// (Partially) Parallel radix sort: implement parallel histogram and parallel scan in counting sort
// Assume: nBits (k in slides) in {1, 2, 4, 8, 16}
// Why "int * blockSizes"?
// Because we may want different block sizes for diffrent kernels:
// blockSizes[0] for the histogram kernel
// blockSizes[1] for the scan kernel
void sortByDevice(const uint32_t * in, int n,
uint32_t * out,
int nBits, int * blockSizes)
{
// TODO
int * inScan = (int *)malloc(n * sizeof(int));
int * d_in,*d_inScan,*d_blkSums,*d_out;
dim3 blockSize1(blockSizes[0]);
dim3 gridSize1((n - 1) / blockSize1.x + 1);
dim3 blockSize2(blockSizes[1]);
dim3 gridSize2((n - 1) / blockSize2.x + 1);
CHECK(cudaMalloc(&d_in, n * sizeof(int)));
CHECK(cudaMalloc(&d_inScan,n*sizeof(int)));
CHECK(cudaMalloc(&d_blkSums,gridSize2.x*sizeof(int)));
CHECK(cudaMalloc(&d_out, n * sizeof(int)));
CHECK(cudaMemcpy(d_in, in, n * sizeof(int), cudaMemcpyHostToDevice));
int* blkSums = (int *)malloc(gridSize2.x*sizeof(int));
scanBlkKernel<<<gridSize2,blockSize2,gridSize2.x>>>(d_in,n,d_inScan,d_blkSums);
// bắt lỗi hàm kernel
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
{
printf("Sync kernel error 1: %s\n", cudaGetErrorString(errSync));
return;
}
if (errAsync != cudaSuccess)
{
printf("Async kernel error 1: %s\n", cudaGetErrorString(errAsync));
return;
}
CHECK(cudaMemcpy(blkSums,d_blkSums,gridSize2.x*sizeof(int),cudaMemcpyDeviceToHost));
for(int i=1;i<gridSize2.x;i++)
{
blkSums[i]+=blkSums[i-1];
}
CHECK(cudaMemcpy(d_blkSums,blkSums,gridSize2.x*sizeof(int),cudaMemcpyHostToDevice));
addBlkKernel<<<gridSize2,blockSize2>>>(d_inScan,n,d_blkSums);
// bắt lỗi hàm kernel
errSync = cudaGetLastError();
errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
{
printf("Sync kernel error 2: %s\n", cudaGetErrorString(errSync));
return;
}
if (errAsync != cudaSuccess)
{
printf("Async kernel error 2: %s\n", cudaGetErrorString(errAsync));
return;
}
// copy lệch qua 1 phần tử
CHECK(cudaMemcpy(&inScan[1],d_inScan,(n-1)*sizeof(int),cudaMemcpyDeviceToHost));
inScan[0]=0;
CHECK(cudaMemcpy(d_inScan,inScan,n*sizeof(int),cudaMemcpyHostToDevice));
inScan[0]=0;
int nZeros = n - inScan[n-1] - in[n-1];
scatter<<<gridSize2,blockSize2>>>(d_in,d_out,d_inScan,n,nZeros);
// bắt lỗi hàm kernel
errSync = cudaGetLastError();
errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
{
printf("Sync kernel error 3: %s\n", cudaGetErrorString(errSync));
return;
}
if (errAsync != cudaSuccess)
{
printf("Async kernel error 3: %s\n", cudaGetErrorString(errAsync));
return;
}
CHECK(cudaMemcpy(out,d_out,n*sizeof(int),cudaMemcpyDeviceToHost));
free(inScan);
CHECK(cudaFree(d_in));
CHECK(cudaFree(d_inScan));
}
// Radix sort
void sort(const uint32_t * in, int n,
uint32_t * out,
int nBits,
bool useDevice=false, int * blockSizes=NULL)
{
GpuTimer timer;
timer.Start();
if (useDevice == false)
{
printf("\nRadix sort by host\n");
sortByHost(in, n, out, nBits);
}
else // use device
{
printf("\nRadix sort by device\n");
sortByDevice(in, n, out, nBits, blockSizes);
}
timer.Stop();
printf("Time: %.3f ms\n", timer.Elapsed());
}
void printDeviceInfo()
{
cudaDeviceProp devProv;
CHECK(cudaGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %zu byte\n", devProv.totalGlobalMem);
printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor);
printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock);
printf("****************************\n");
}
void checkCorrectness(uint32_t * out, uint32_t * correctOut, int n)
{
for (int i = 0; i < n; i++)
{
if (out[i] != correctOut[i])
{
printf("INCORRECT :(\n");
return;
}
}
printf("CORRECT :)\n");
}
void printArray(uint32_t * a, int n)
{
for (int i = 0; i < n; i++)
printf("%i ", a[i]);
printf("\n");
}
int main(int argc, char ** argv)
{
// PRINT OUT DEVICE INFO
printDeviceInfo();
// SET UP INPUT SIZE
int n = (1 << 24) + 1;
// n = 1000000;
printf("\nInput size: %d\n", n);
// ALLOCATE MEMORIES
size_t bytes = n * sizeof(uint32_t);
uint32_t * in = (uint32_t *)malloc(bytes);
uint32_t * out = (uint32_t *)malloc(bytes); // Device result
uint32_t * correctOut = (uint32_t *)malloc(bytes); // Host result
// SET UP INPUT DATA
for (int i = 0; i < n; i++)
in[i] = rand()%2;
// printArray(in, n);
// SET UP NBITS
int nBits = 1; // Default
if (argc > 1)
nBits = atoi(argv[1]);
printf("\nNum bits per digit: %d\n", nBits);
// DETERMINE BLOCK SIZES
int blockSizes[2] = {512, 512}; // One for histogram, one for scan
if (argc == 4)
{
blockSizes[0] = atoi(argv[2]);
blockSizes[1] = atoi(argv[3]);
}
printf("\nHist block size: %d, scan block size: %d\n", blockSizes[0], blockSizes[1]);
// SORT BY HOST
sort(in, n, correctOut, nBits);
//printArray(correctOut, n);
// SORT BY DEVICE
sort(in, n, out, nBits, true, blockSizes);
//printArray(out,n);
checkCorrectness(out, correctOut, n);
// FREE MEMORIES
free(in);
free(out);
free(correctOut);
return EXIT_SUCCESS;
}
|
11,479 | #include <stdlib.h>
#include <stdio.h>
#include "cuda_runtime.h"
// Try CUDA 11 warp reduce
#if (CUDART_VERSION >= 9000)
template <typename T, unsigned int blk> __device__ void warp_shfl_reduce_real(volatile T *s_block)
{
unsigned int tid = threadIdx.x;
T val;
if (blk >= 64)
{
if (tid < 32)
{
s_block[tid] += s_block[tid + 32];
}
}
val = s_block[tid];
for (int i = 16; i >= 1; i /= 2)
{
val += __shfl_xor_sync(0xffffffff, val, i, 32);
}
s_block[tid] = val;
}
#endif
template <typename T, unsigned int blk> __device__ void warp_reduce_real(volatile T *s_block)
{
unsigned int tid = threadIdx.x;
if (blk >= 64)
{
if (tid < 32)
{
s_block[tid] += s_block[tid + 32];
}
}
if (blk >= 32)
{
if (tid < 16)
{
s_block[tid] += s_block[tid + 16];
}
}
if (blk >= 16)
{
if (tid < 8)
{
s_block[tid] += s_block[tid + 8];
}
}
if (blk >= 8)
{
if (tid < 4)
{
s_block[tid] += s_block[tid + 4];
}
}
if (blk >= 4)
{
if (tid < 2)
{
s_block[tid] += s_block[tid + 2];
}
}
if (blk >= 2)
{
if (tid < 1)
{
s_block[tid] += s_block[tid + 1];
}
}
}
template <typename T, unsigned int blk> __device__ void reduce_real(T *s_block)
{
unsigned int tid = threadIdx.x;
if (blk >= 1024)
{
if (tid < 512)
{
s_block[tid] += s_block[tid + 512];
}
__syncthreads();
}
if (blk >= 512)
{
if (tid < 256)
{
s_block[tid] += s_block[tid + 256];
}
__syncthreads();
}
if (blk >= 256)
{
if (tid < 128)
{
s_block[tid] += s_block[tid + 128];
}
__syncthreads();
}
if (blk >= 128)
{
if (tid < 64)
{
s_block[tid] += s_block[tid + 64];
}
__syncthreads();
}
#if (CUDART_VERSION > 9000)
if (blk >= 32)
{
if (tid < 32)
{
warp_shfl_reduce_real<T, blk>(s_block);
}
}
else
{
if (tid < 32)
{
warp_reduce_real<T, blk>(s_block);
}
}
#else
if (tid < 32)
{
warp_reduce_real<T, blk>(s_block);
}
#endif
}
/*
Householder transformation
(I - tau * hh * hh^T) * q = q - tau * hh * hh^T * q
Name here : Name in paper
q : X
hh : v
hh_tau : tau
nev : N_C
nb : nbw (==b)
ncols : N_R (==n+b-1)
*/
template <typename T, unsigned int blk>
__global__ void compute_hh_trafo_kernel_real(T * __restrict__ q, const T * __restrict__ hh, const T * __restrict__ hh_tau, const int nb, const int ldq, const int ncols)
{
__shared__ T q_s[blk + 1];
__shared__ T dotp_s[blk];
T q_v2;
int q_off, h_off, j;
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
j = ncols;
// q_off bad access!
q_off = bid + (j + tid - 1) * ldq;
h_off = tid + (j - 1) * nb;
q_s[tid] = q[q_off];
while (j >= 1)
{
if (tid == 0)
{
q_s[tid] = q[q_off];
}
q_v2 = q_s[tid];
dotp_s[tid] = q_v2 * hh[h_off];
__syncthreads();
reduce_real<T, blk>(dotp_s);
__syncthreads();
q_v2 -= dotp_s[0] * hh_tau[j - 1] * hh[h_off];
q_s[tid + 1] = q_v2;
if ((j == 1) || (tid == blockDim.x - 1))
{
q[q_off] = q_v2;
}
__syncthreads();
q_off -= ldq;
h_off -= nb;
j -= 1;
}
}
/*
Name here : Name in paper
q : X
hh : v
hh_tau : tau
nev : N_C
nb : nbw (==b)
ncols : N_R (==n+b-1)
*/
extern "C" void compute_hh_gpu_kernel(double *q, const double *hh, const double *hh_tau, const int nev, const int nb, const int ldq, const int ncols)
{
switch (nb)
{
case 1024:
compute_hh_trafo_kernel_real<double, 1024><<<nev, nb>>>(q, hh, hh_tau, nb, ldq, ncols);
break;
case 512:
compute_hh_trafo_kernel_real<double, 512><<<nev, nb>>>(q, hh, hh_tau, nb, ldq, ncols);
break;
case 256:
compute_hh_trafo_kernel_real<double, 256><<<nev, nb>>>(q, hh, hh_tau, nb, ldq, ncols);
break;
case 128:
compute_hh_trafo_kernel_real<double, 128><<<nev, nb>>>(q, hh, hh_tau, nb, ldq, ncols);
break;
case 64:
compute_hh_trafo_kernel_real<double, 64><<<nev, nb>>>(q, hh, hh_tau, nb, ldq, ncols);
break;
case 32:
compute_hh_trafo_kernel_real<double, 32><<<nev, nb>>>(q, hh, hh_tau, nb, ldq, ncols);
break;
case 16:
compute_hh_trafo_kernel_real<double, 16><<<nev, nb>>>(q, hh, hh_tau, nb, ldq, ncols);
break;
case 8:
compute_hh_trafo_kernel_real<double, 8><<<nev, nb>>>(q, hh, hh_tau, nb, ldq, ncols);
break;
case 4:
compute_hh_trafo_kernel_real<double, 4><<<nev, nb>>>(q, hh, hh_tau, nb, ldq, ncols);
break;
case 2:
compute_hh_trafo_kernel_real<double, 2><<<nev, nb>>>(q, hh, hh_tau, nb, ldq, ncols);
break;
case 1:
compute_hh_trafo_kernel_real<double, 1><<<nev, nb>>>(q, hh, hh_tau, nb, ldq, ncols);
break;
}
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("\n compute_hh_trafo CUDA kernel failed: %s \n", cudaGetErrorString(err));
exit(1);
}
}
|
11,480 | #include "includes.h"
using namespace std;
__global__ void cube(long *deviceOutput, long *deviceInput)
{
int idx = threadIdx.x;
long f = deviceInput[idx];
deviceOutput[idx] = f * f * f;
} |
11,481 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#define FILAS 16
#define COLUMNAS 16
#define BYTES_MATRIZ (FILAS * COLUMNAS * sizeof(int))
__global__ void kernel_multiplicar(int *d_m1, int *d_m2,int *d_mr)
{
// Encuentro posicin:
int fila = blockIdx.y * blockDim.y + threadIdx.y;
int columna = blockIdx.x * blockDim.x + threadIdx.x;
// Resultado de la multiplicacin:
int valor_acumulado = 0;
// Realizo la multiplicacin:
for (int i = 0; i < COLUMNAS; i++)
{
int v1 = d_m1[fila * COLUMNAS + i];
int v2 = d_m2[i * COLUMNAS + columna];
valor_acumulado += v1 * v2;
}
// Lo guardo en la posicin:
d_mr[fila * COLUMNAS + columna] = valor_acumulado;
}
void multiplicarMatrices(int *h_m1, int *h_m2, int *h_mr)
{
// Punteros a matrices en DEVICE:
int *d_m1;
int *d_m2;
int *d_mr;
// Reservo memoria en DEVICE:
cudaMalloc((void **)&d_m1, BYTES_MATRIZ);
cudaMalloc((void **)&d_m2, BYTES_MATRIZ);
cudaMalloc((void **)&d_mr, BYTES_MATRIZ);
// Muevo de HOST a DEVICE:
cudaMemcpy(d_m1, h_m1, BYTES_MATRIZ, cudaMemcpyHostToDevice);
cudaMemcpy(d_m2, h_m2, BYTES_MATRIZ, cudaMemcpyHostToDevice);
cudaMemcpy(d_mr, h_mr, BYTES_MATRIZ, cudaMemcpyHostToDevice);
// Defino tamao de bloques:
dim3 matriz_bloques(4, 4);
dim3 matriz_hilos(4, 4);
kernel_multiplicar <<< matriz_bloques, matriz_hilos >>> (d_m1, d_m2, d_mr);
// Espero a que termine de operar:
cudaDeviceSynchronize();
// Devolvemos resultado de DEVICE a HOST:
cudaMemcpy(h_mr, d_mr, BYTES_MATRIZ, cudaMemcpyDeviceToHost);
// Libero memoria de DEVICE:
cudaFree(d_m1);
cudaFree(d_m2);
cudaFree(d_mr);
}
void rellenarMatriz(int *h_m, int filas, int columnas)
{
/* Rellena una matriz de filasxcolumnas con nmeros aleatorios.
*/
srand(time(NULL));
for (int i = 0; i < filas; ++i) {
for (int j = 0; j < columnas; ++j) {
*(h_m + i * columnas + j) = rand() % 101;
}
}
}
void pintarMatriz(int *h_m, int filas, int columnas) {
/*
* Imprime matriz por pantalla.
*/
for (int i = 0; i < columnas; i++) {
printf("[");
for (int j = 0; j < filas; j++) {
if (j != filas && j != 0) {
printf("\t");
}
printf("%d", *(h_m + i * columnas + j));
}
printf("]\n");
}
}
int main()
{
// Declaracin de matrices en host:
int* h_m1 = (int *)malloc(BYTES_MATRIZ);
int* h_m2 = (int *)malloc(BYTES_MATRIZ);
int* h_mr = (int *)malloc(BYTES_MATRIZ); // Matriz resultado.
// Relleno con datos aleatorios las matrices:
rellenarMatriz(h_m1, FILAS, COLUMNAS);
rellenarMatriz(h_m2, FILAS, COLUMNAS);
// Imprimo:
printf("Matriz 1: \n");
pintarMatriz(h_m1, FILAS, COLUMNAS);
printf("Matriz 2: \n");
pintarMatriz(h_m2, FILAS, COLUMNAS);
// Multiplico:
multiplicarMatrices(h_m1, h_m2, h_mr);
// Imprimo resultado:
printf("Matriz resultado: ");
pintarMatriz(h_mr, FILAS, COLUMNAS);
// Libero espacio en memoria:
free(h_m1);
free(h_m2);
free(h_mr);
return 0;
}
|
11,482 | #include <iostream>
#include <sys/time.h>
#include <ctime>
#include <fstream>
#include <cmath>
#include <cstdlib>
using namespace std;
//Eratosthanes' sieve on odds
__global__ static void sieve(char *primes, int n, int root)
{
int i = blockIdx.x * blockDim.x + threadIdx.x + 3;
if (i < root && primes[i] == 0)
{
for (long j = i * i; j <= n; j += i)
{
primes[j] = 1;
}
}
}
//Eratosthanes' sieve on evens
__global__ static void Evens(char* P, int n)
{
long i = blockIdx.x * blockDim.x + threadIdx.x + threadIdx.x + 4;
if (i < n) {
P[i] = 1;
}
}
__global__ static void Init(char* P)
{
P[0] = 1;
P[1] = 1;
}
__host__ void isPrime(char* P, int max)
{
int blockSize = 32;
long root = sqrt(max);
char* d_Primes = NULL;
long sizePrimes = sizeof(char) * max;
cudaMalloc(&d_Primes, sizePrimes);
cudaMemset(d_Primes, 0, sizePrimes);
dim3 dimBlock(blockSize);
dim3 dimGrid((root + dimBlock.x) / dimBlock.x);
dim3 dimGridEven((max + dimBlock.x) / dimBlock.x);
Init<<<1,1>>>(d_Primes);
Evens<<<dimGridEven, dimBlock>>>(d_Primes, max);
sieve<<<dimGrid, dimBlock>>>(d_Primes, max, root);
cudaMemcpy(P, d_Primes, sizePrimes, cudaMemcpyDeviceToHost);
cudaFree(d_Primes);
}
int main(){
struct timeval start, end;
long mtime, seconds, useconds;
char *primes;
long long sum;
long long num;
cout << "enter number to sum primes to: " << endl;
cin >> num;
primes = (char*)malloc(num);
memset(primes, 0, num);
if (num < 2) {
cout << "no primes to sum!" << endl;;
return 0;
}
else{
sum = 2;
}
gettimeofday(&start, NULL);
isPrime(primes, num);
for (long n = 3; n <= num - 1; n += 2) {
if (primes[n] == 0){ //Indicates primacy
//cout << n << " is prime." << endl;
sum += n;
if(num >= 1 + n*n && num < (n+1)*(n + 1)) {
sum -= n*n;
}
}
}
free(primes);
gettimeofday(&end, NULL);
seconds = end.tv_sec - start.tv_sec;
useconds = end.tv_usec - start.tv_usec;
mtime = ((seconds) * 1000 + useconds/1000.0);
cout << "sum under " << num << " is " << sum << endl;
cout << "time: " << mtime << " milliseconds\n" << endl;
return 0;
}
|
11,483 | #include "includes.h"
__global__ void set_arr(float b, float * c, int N)
{
int idx=blockIdx.x*blockDim.x+threadIdx.x; if(idx>=N) return;
c[idx]=b;
} |
11,484 | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void hypterm (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ flux_in_4, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ cons_in_4, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.z);
double (*flux_0)[308][308] = (double (*)[308][308])flux_in_0;
double (*flux_1)[308][308] = (double (*)[308][308])flux_in_1;
double (*flux_2)[308][308] = (double (*)[308][308])flux_in_2;
double (*flux_3)[308][308] = (double (*)[308][308])flux_in_3;
double (*flux_4)[308][308] = (double (*)[308][308])flux_in_4;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
double (*cons_1)[308][308] = (double (*)[308][308])cons_in_1;
double (*cons_2)[308][308] = (double (*)[308][308])cons_in_2;
double (*cons_3)[308][308] = (double (*)[308][308])cons_in_3;
double (*cons_4)[308][308] = (double (*)[308][308])cons_in_4;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
double _t_1_;
double _t_0_;
double _t_2_;
double _t_3_;
double _t_4_;
double flux_0kc0jc0ic0;
double _t_6_;
double _t_5_;
double _t_7_;
double _t_8_;
double _t_9_;
double flux_1kc0jc0ic0;
double _t_11_;
double _t_10_;
double _t_12_;
double _t_13_;
double _t_14_;
double flux_2kc0jc0ic0;
double _t_16_;
double _t_15_;
double _t_17_;
double _t_18_;
double _t_19_;
double flux_3kc0jc0ic0;
double _t_21_;
double _t_20_;
double _t_22_;
double _t_23_;
double _t_24_;
double flux_4kc0jc0ic0;
double _t_27_;
double _t_26_;
double _t_28_;
double _t_29_;
double _t_30_;
double _t_25_;
double _t_33_;
double _t_32_;
double _t_34_;
double _t_35_;
double _t_36_;
double _t_31_;
double _t_39_;
double _t_38_;
double _t_40_;
double _t_41_;
double _t_42_;
double _t_37_;
double _t_45_;
double _t_44_;
double _t_46_;
double _t_47_;
double _t_48_;
double _t_43_;
double _t_51_;
double _t_50_;
double _t_52_;
double _t_53_;
double _t_54_;
double _t_49_;
double _t_57_;
double _t_56_;
double _t_58_;
double _t_59_;
double _t_60_;
double _t_55_;
double _t_63_;
double _t_62_;
double _t_64_;
double _t_65_;
double _t_66_;
double _t_61_;
double _t_69_;
double _t_68_;
double _t_70_;
double _t_71_;
double _t_72_;
double _t_67_;
double _t_75_;
double _t_74_;
double _t_76_;
double _t_77_;
double _t_78_;
double _t_73_;
double _t_81_;
double _t_80_;
double _t_82_;
double _t_83_;
double _t_84_;
double _t_79_;
_t_1_ = cons_1[k][j][i+1];
_t_1_ -= cons_1[k][j][i-1];
_t_0_ = 0.8 * _t_1_;
_t_2_ = cons_1[k][j][i+2];
_t_2_ -= cons_1[k][j][i-2];
_t_0_ -= 0.2 * _t_2_;
_t_3_ = cons_1[k][j][i+3];
_t_3_ -= cons_1[k][j][i-3];
_t_0_ += 0.038 * _t_3_;
_t_4_ = cons_1[k][j][i+4];
_t_4_ -= cons_1[k][j][i-4];
_t_0_ -= 0.0035 * _t_4_;
flux_0kc0jc0ic0 = _t_0_ * dxinv0;
_t_6_ = cons_1[k][j][i+1] * q_1[k][j][i+1];
_t_6_ -= cons_1[k][j][i-1] * q_1[k][j][i-1];
_t_6_ += q_4[k][j][i+1];
_t_6_ -= q_4[k][j][i-1];
_t_5_ = 0.8 * _t_6_;
_t_7_ = cons_1[k][j][i+2] * q_1[k][j][i+2];
_t_7_ -= cons_1[k][j][i-2] * q_1[k][j][i-2];
_t_7_ += q_4[k][j][i+2];
_t_7_ -= q_4[k][j][i-2];
_t_5_ -= 0.2 * _t_7_;
_t_8_ = cons_1[k][j][i+3] * q_1[k][j][i+3];
_t_8_ -= cons_1[k][j][i-3] * q_1[k][j][i-3];
_t_8_ += q_4[k][j][i+3];
_t_8_ -= q_4[k][j][i-3];
_t_5_ += 0.038 * _t_8_;
_t_9_ = cons_1[k][j][i+4] * q_1[k][j][i+4];
_t_9_ -= cons_1[k][j][i-4] * q_1[k][j][i-4];
_t_9_ += q_4[k][j][i+4];
_t_9_ -= q_4[k][j][i-4];
_t_5_ -= 0.0035 * _t_9_;
flux_1kc0jc0ic0 = _t_5_ * dxinv0;
_t_11_ = cons_2[k][j][i+1] * q_1[k][j][i+1];
_t_11_ -= cons_2[k][j][i-1] * q_1[k][j][i-1];
_t_10_ = 0.8 * _t_11_;
_t_12_ = cons_2[k][j][i+2] * q_1[k][j][i+2];
_t_12_ -= cons_2[k][j][i-2] * q_1[k][j][i-2];
_t_10_ -= 0.2 * _t_12_;
_t_13_ = cons_2[k][j][i+3] * q_1[k][j][i+3];
_t_13_ -= cons_2[k][j][i-3] * q_1[k][j][i-3];
_t_10_ += 0.038 * _t_13_;
_t_14_ = cons_2[k][j][i+4] * q_1[k][j][i+4];
_t_14_ -= cons_2[k][j][i-4] * q_1[k][j][i-4];
_t_10_ -= 0.0035 * _t_14_;
flux_2kc0jc0ic0 = _t_10_ * dxinv0;
_t_16_ = cons_3[k][j][i+1] * q_1[k][j][i+1];
_t_16_ -= cons_3[k][j][i-1] * q_1[k][j][i-1];
_t_15_ = 0.8 * _t_16_;
_t_17_ = cons_3[k][j][i+2] * q_1[k][j][i+2];
_t_17_ -= cons_3[k][j][i-2] * q_1[k][j][i-2];
_t_15_ -= 0.2 * _t_17_;
_t_18_ = cons_3[k][j][i+3] * q_1[k][j][i+3];
_t_18_ -= cons_3[k][j][i-3] * q_1[k][j][i-3];
_t_15_ += 0.038 * _t_18_;
_t_19_ = cons_3[k][j][i+4] * q_1[k][j][i+4];
_t_19_ -= cons_3[k][j][i-4] * q_1[k][j][i-4];
_t_15_ -= 0.0035 * _t_19_;
flux_3kc0jc0ic0 = _t_15_ * dxinv0;
_t_21_ = cons_4[k][j][i+1] * q_1[k][j][i+1];
_t_21_ -= cons_4[k][j][i-1] * q_1[k][j][i-1];
_t_21_ += q_4[k][j][i+1] * q_1[k][j][i+1];
_t_21_ -= q_4[k][j][i-1] * q_1[k][j][i-1];
_t_20_ = 0.8 * _t_21_;
_t_22_ = cons_4[k][j][i+2] * q_1[k][j][i+2];
_t_22_ -= cons_4[k][j][i-2] * q_1[k][j][i-2];
_t_22_ += q_4[k][j][i+2] * q_1[k][j][i+2];
_t_22_ -= q_4[k][j][i-2] * q_1[k][j][i-2];
_t_20_ -= 0.2 * _t_22_;
_t_23_ = cons_4[k][j][i+3] * q_1[k][j][i+3];
_t_23_ -= cons_4[k][j][i-3] * q_1[k][j][i-3];
_t_23_ += q_4[k][j][i+3] * q_1[k][j][i+3];
_t_23_ -= q_4[k][j][i-3] * q_1[k][j][i-3];
_t_20_ += 0.038 * _t_23_;
_t_24_ = cons_4[k][j][i+4] * q_1[k][j][i+4];
_t_24_ -= cons_4[k][j][i-4] * q_1[k][j][i-4];
_t_24_ += q_4[k][j][i+4] * q_1[k][j][i+4];
_t_24_ -= q_4[k][j][i-4] * q_1[k][j][i-4];
_t_20_ -= 0.0035 * _t_24_;
flux_4kc0jc0ic0 = _t_20_ * dxinv0;
_t_27_ = cons_2[k][j+1][i];
_t_27_ -= cons_2[k][j-1][i];
_t_26_ = 0.8 * _t_27_;
_t_28_ = cons_2[k][j+2][i];
_t_28_ -= cons_2[k][j-2][i];
_t_26_ -= 0.2 * _t_28_;
_t_29_ = cons_2[k][j+3][i];
_t_29_ -= cons_2[k][j-3][i];
_t_26_ += 0.038 * _t_29_;
_t_30_ = cons_2[k][j+4][i];
_t_30_ -= cons_2[k][j-4][i];
_t_26_ -= 0.0035 * _t_30_;
_t_25_ = _t_26_ * dxinv1;
flux_0kc0jc0ic0 -= _t_25_;
_t_33_ = cons_1[k][j+1][i] * q_2[k][j+1][i];
_t_33_ -= cons_1[k][j-1][i] * q_2[k][j-1][i];
_t_32_ = 0.8 * _t_33_;
_t_34_ = cons_1[k][j+2][i] * q_2[k][j+2][i];
_t_34_ -= cons_1[k][j-2][i] * q_2[k][j-2][i];
_t_32_ -= 0.2 * _t_34_;
_t_35_ = cons_1[k][j+3][i] * q_2[k][j+3][i];
_t_35_ -= cons_1[k][j-3][i] * q_2[k][j-3][i];
_t_32_ += 0.038 * _t_35_;
_t_36_ = cons_1[k][j+4][i] * q_2[k][j+4][i];
_t_36_ -= cons_1[k][j-4][i] * q_2[k][j-4][i];
_t_32_ -= 0.0035 * _t_36_;
_t_31_ = _t_32_ * dxinv1;
flux_1kc0jc0ic0 -= _t_31_;
_t_39_ = cons_2[k][j+1][i] * q_2[k][j+1][i];
_t_39_ -= cons_2[k][j-1][i] * q_2[k][j-1][i];
_t_39_ += q_4[k][j+1][i];
_t_39_ -= q_4[k][j-1][i];
_t_38_ = 0.8 * _t_39_;
_t_40_ = cons_2[k][j+2][i] * q_2[k][j+2][i];
_t_40_ -= cons_2[k][j-2][i] * q_2[k][j-2][i];
_t_40_ += q_4[k][j+2][i];
_t_40_ -= q_4[k][j-2][i];
_t_38_ -= 0.2 * _t_40_;
_t_41_ = cons_2[k][j+3][i] * q_2[k][j+3][i];
_t_41_ -= cons_2[k][j-3][i] * q_2[k][j-3][i];
_t_41_ += q_4[k][j+3][i];
_t_41_ -= q_4[k][j-3][i];
_t_38_ += 0.038 * _t_41_;
_t_42_ = cons_2[k][j+4][i] * q_2[k][j+4][i];
_t_42_ -= cons_2[k][j-4][i] * q_2[k][j-4][i];
_t_42_ += q_4[k][j+4][i];
_t_42_ -= q_4[k][j-4][i];
_t_38_ -= 0.0035 * _t_42_;
_t_37_ = _t_38_ * dxinv1;
flux_2kc0jc0ic0 -= _t_37_;
_t_45_ = cons_3[k][j+1][i] * q_2[k][j+1][i];
_t_45_ -= cons_3[k][j-1][i] * q_2[k][j-1][i];
_t_44_ = 0.8 * _t_45_;
_t_46_ = cons_3[k][j+2][i] * q_2[k][j+2][i];
_t_46_ -= cons_3[k][j-2][i] * q_2[k][j-2][i];
_t_44_ -= 0.2 * _t_46_;
_t_47_ = cons_3[k][j+3][i] * q_2[k][j+3][i];
_t_47_ -= cons_3[k][j-3][i] * q_2[k][j-3][i];
_t_44_ += 0.038 * _t_47_;
_t_48_ = cons_3[k][j+4][i] * q_2[k][j+4][i];
_t_48_ -= cons_3[k][j-4][i] * q_2[k][j-4][i];
_t_44_ -= 0.0035 * _t_48_;
_t_43_ = _t_44_ * dxinv1;
flux_3kc0jc0ic0 -= _t_43_;
_t_51_ = cons_4[k][j+1][i] * q_2[k][j+1][i];
_t_51_ -= cons_4[k][j-1][i] * q_2[k][j-1][i];
_t_51_ += q_4[k][j+1][i] * q_2[k][j+1][i];
_t_51_ -= q_4[k][j-1][i] * q_2[k][j-1][i];
_t_50_ = 0.8 * _t_51_;
_t_52_ = cons_4[k][j+2][i] * q_2[k][j+2][i];
_t_52_ -= cons_4[k][j-2][i] * q_2[k][j-2][i];
_t_52_ += q_4[k][j+2][i] * q_2[k][j+2][i];
_t_52_ -= q_4[k][j-2][i] * q_2[k][j-2][i];
_t_50_ -= 0.2 * _t_52_;
_t_53_ = cons_4[k][j+3][i] * q_2[k][j+3][i];
_t_53_ -= cons_4[k][j-3][i] * q_2[k][j-3][i];
_t_53_ += q_4[k][j+3][i] * q_2[k][j+3][i];
_t_53_ -= q_4[k][j-3][i] * q_2[k][j-3][i];
_t_50_ += 0.038 * _t_53_;
_t_54_ = cons_4[k][j+4][i] * q_2[k][j+4][i];
_t_54_ -= cons_4[k][j-4][i] * q_2[k][j-4][i];
_t_54_ += q_4[k][j+4][i] * q_2[k][j+4][i];
_t_54_ -= q_4[k][j-4][i] * q_2[k][j-4][i];
_t_50_ -= 0.0035 * _t_54_;
_t_49_ = _t_50_ * dxinv1;
flux_4kc0jc0ic0 -= _t_49_;
_t_57_ = cons_3[k+1][j][i];
_t_57_ -= cons_3[k-1][j][i];
_t_56_ = 0.8 * _t_57_;
_t_58_ = cons_3[k+2][j][i];
_t_58_ -= cons_3[k-2][j][i];
_t_56_ -= 0.2 * _t_58_;
_t_59_ = cons_3[k+3][j][i];
_t_59_ -= cons_3[k-3][j][i];
_t_56_ += 0.038 * _t_59_;
_t_60_ = cons_3[k+4][j][i];
_t_60_ -= cons_3[k-4][j][i];
_t_56_ -= 0.0035 * _t_60_;
_t_55_ = _t_56_ * dxinv2;
flux_0kc0jc0ic0 -= _t_55_;
flux_0[k][j][i] = flux_0kc0jc0ic0;
_t_63_ = cons_1[k+1][j][i] * q_3[k+1][j][i];
_t_63_ -= cons_1[k-1][j][i] * q_3[k-1][j][i];
_t_62_ = 0.8 * _t_63_;
_t_64_ = cons_1[k+2][j][i] * q_3[k+2][j][i];
_t_64_ -= cons_1[k-2][j][i] * q_3[k-2][j][i];
_t_62_ -= 0.2 * _t_64_;
_t_65_ = cons_1[k+3][j][i] * q_3[k+3][j][i];
_t_65_ -= cons_1[k-3][j][i] * q_3[k-3][j][i];
_t_62_ += 0.038 * _t_65_;
_t_66_ = cons_1[k+4][j][i] * q_3[k+4][j][i];
_t_66_ -= cons_1[k-4][j][i] * q_3[k-4][j][i];
_t_62_ -= 0.0035 * _t_66_;
_t_61_ = _t_62_ * dxinv2;
flux_1kc0jc0ic0 -= _t_61_;
flux_1[k][j][i] = flux_1kc0jc0ic0;
_t_69_ = cons_2[k+1][j][i] * q_3[k+1][j][i];
_t_69_ -= cons_2[k-1][j][i] * q_3[k-1][j][i];
_t_68_ = 0.8 * _t_69_;
_t_70_ = cons_2[k+2][j][i] * q_3[k+2][j][i];
_t_70_ -= cons_2[k-2][j][i] * q_3[k-2][j][i];
_t_68_ -= 0.2 * _t_70_;
_t_71_ = cons_2[k+3][j][i] * q_3[k+3][j][i];
_t_71_ -= cons_2[k-3][j][i] * q_3[k-3][j][i];
_t_68_ += 0.038 * _t_71_;
_t_72_ = cons_2[k+4][j][i] * q_3[k+4][j][i];
_t_72_ -= cons_2[k-4][j][i] * q_3[k-4][j][i];
_t_68_ -= 0.0035 * _t_72_;
_t_67_ = _t_68_ * dxinv2;
flux_2kc0jc0ic0 -= _t_67_;
flux_2[k][j][i] = flux_2kc0jc0ic0;
_t_75_ = cons_3[k+1][j][i] * q_3[k+1][j][i];
_t_75_ -= cons_3[k-1][j][i] * q_3[k-1][j][i];
_t_75_ += q_4[k+1][j][i];
_t_75_ -= q_4[k-1][j][i];
_t_74_ = 0.8 * _t_75_;
_t_76_ = cons_3[k+2][j][i] * q_3[k+2][j][i];
_t_76_ -= cons_3[k-2][j][i] * q_3[k-2][j][i];
_t_76_ += q_4[k+2][j][i];
_t_76_ -= q_4[k-2][j][i];
_t_74_ -= 0.2 * _t_76_;
_t_77_ = cons_3[k+3][j][i] * q_3[k+3][j][i];
_t_77_ -= cons_3[k-3][j][i] * q_3[k-3][j][i];
_t_77_ += q_4[k+3][j][i];
_t_77_ -= q_4[k-3][j][i];
_t_74_ += 0.038 * _t_77_;
_t_78_ = cons_3[k+4][j][i] * q_3[k+4][j][i];
_t_78_ -= cons_3[k-4][j][i] * q_3[k-4][j][i];
_t_78_ += q_4[k+4][j][i];
_t_78_ -= q_4[k-4][j][i];
_t_74_ -= 0.0035 * _t_78_;
_t_73_ = _t_74_ * dxinv2;
flux_3kc0jc0ic0 -= _t_73_;
flux_3[k][j][i] = flux_3kc0jc0ic0;
_t_81_ = cons_4[k+1][j][i] * q_3[k+1][j][i];
_t_81_ -= cons_4[k-1][j][i] * q_3[k-1][j][i];
_t_81_ += q_4[k+1][j][i] * q_3[k+1][j][i];
_t_81_ -= q_4[k-1][j][i] * q_3[k-1][j][i];
_t_80_ = 0.8 * _t_81_;
_t_82_ = cons_4[k+2][j][i] * q_3[k+2][j][i];
_t_82_ -= cons_4[k-2][j][i] * q_3[k-2][j][i];
_t_82_ += q_4[k+2][j][i] * q_3[k+2][j][i];
_t_82_ -= q_4[k-2][j][i] * q_3[k-2][j][i];
_t_80_ -= 0.2 * _t_82_;
_t_83_ = cons_4[k+3][j][i] * q_3[k+3][j][i];
_t_83_ -= cons_4[k-3][j][i] * q_3[k-3][j][i];
_t_83_ += q_4[k+3][j][i] * q_3[k+3][j][i];
_t_83_ -= q_4[k-3][j][i] * q_3[k-3][j][i];
_t_80_ += 0.038 * _t_83_;
_t_84_ = cons_4[k+4][j][i] * q_3[k+4][j][i];
_t_84_ -= cons_4[k-4][j][i] * q_3[k-4][j][i];
_t_84_ += q_4[k+4][j][i] * q_3[k+4][j][i];
_t_84_ -= q_4[k-4][j][i] * q_3[k-4][j][i];
_t_80_ -= 0.0035 * _t_84_;
_t_79_ = _t_80_ * dxinv2;
flux_4kc0jc0ic0 -= _t_79_;
flux_4[k][j][i] = flux_4kc0jc0ic0;
}
}
extern "C" void host_code (double *h_flux_0, double *h_flux_1, double *h_flux_2, double *h_flux_3, double *h_flux_4, double *h_cons_1, double *h_cons_2, double *h_cons_3, double *h_cons_4, double *h_q_1, double *h_q_2, double *h_q_3, double *h_q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
double *flux_0;
cudaMalloc (&flux_0, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_0\n");
cudaMemcpy (flux_0, h_flux_0, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_1;
cudaMalloc (&flux_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_1\n");
cudaMemcpy (flux_1, h_flux_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_2;
cudaMalloc (&flux_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_2\n");
cudaMemcpy (flux_2, h_flux_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_3;
cudaMalloc (&flux_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_3\n");
cudaMemcpy (flux_3, h_flux_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_4;
cudaMalloc (&flux_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_4\n");
cudaMemcpy (flux_4, h_flux_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_1;
cudaMalloc (&cons_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_1\n");
cudaMemcpy (cons_1, h_cons_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_2;
cudaMalloc (&cons_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_2\n");
cudaMemcpy (cons_2, h_cons_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_3;
cudaMalloc (&cons_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_3\n");
cudaMemcpy (cons_3, h_cons_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_4;
cudaMalloc (&cons_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_4\n");
cudaMemcpy (cons_4, h_cons_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_1;
cudaMalloc (&q_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_1\n");
cudaMemcpy (q_1, h_q_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_2;
cudaMalloc (&q_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_2\n");
cudaMemcpy (q_2, h_q_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_3;
cudaMalloc (&q_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_3\n");
cudaMemcpy (q_3, h_q_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_4;
cudaMalloc (&q_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_4\n");
cudaMemcpy (q_4, h_q_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 4, 4);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, blockconfig.z));
hypterm <<<gridconfig, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, -dxinv0, dxinv1, dxinv2, L, M, N);
cudaMemcpy (h_flux_0, flux_0, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_1, flux_1, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_3, flux_3, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_4, flux_4, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_2, flux_2, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
}
|
11,485 | #include <stdio.h>
#include <stdlib.h>
void my_cudasafe( cudaError_t error, char const *message)
{
if(error!=cudaSuccess)
{
fprintf(stderr,"ERROR: %s : %s\n",message,cudaGetErrorString(error));
exit(-1);
}
}
__global__ void matrixMulKernel(float *md, float *nd, float *pd, int width)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
float Pvalue = 0;
for(int k=0; k < width; ++k)
{
float Mdelement = md[ty*width+k];
float Ndelement = nd[k*width+tx];
Pvalue += Mdelement*Ndelement;
}
pd[ty * width + tx + 10] = Pvalue;
}
void matrixmul(float *m, float *n, float *p, int width)
{
int size = width*width*sizeof(float);
float *md, *nd, *pd;
my_cudasafe(cudaMalloc((void **)&md, size),"Cuda malloc : md");
my_cudasafe(cudaMemcpy(md, m, size, cudaMemcpyHostToDevice),"Cuda memcopy : md");
my_cudasafe(cudaMalloc((void **)&nd, size),"Cuda malloc : nd");
my_cudasafe(cudaMemcpy(nd, n, size, cudaMemcpyHostToDevice),"Cuda memcopy : nd");
my_cudasafe(cudaMalloc((void **)&pd, size),"Cuda malloc : pd");
dim3 dimBlock(width,width);
dim3 dimGrid(1,1);
matrixMulKernel<<<dimGrid,dimBlock>>>(md,nd,pd,width);
my_cudasafe(cudaGetLastError(),"Kernel invocation: matrixMulKernel");
my_cudasafe(cudaMemcpy(p, pd, size, cudaMemcpyDeviceToHost),"Cuda memcopy : pd");
my_cudasafe(cudaFree(md),"Cuda free : md");
my_cudasafe(cudaFree(nd),"Cuda free : nd");
my_cudasafe(cudaFree(pd),"Cuda free : pd");
}
int main()
{
float *m, *n, *p;
int width = 3;
int i = 0, j= 0;
m = (float *)malloc(width*width*sizeof(float));
n = (float *)malloc(width*width*sizeof(float));
p = (float *)malloc(width*width*sizeof(float));
for(i=0; i< width; i++)
{
for(j=0; j< width; j++)
{
m[width*i+j] = 1.00;
n[width*i+j] = 1.00;
}
}
matrixmul(m,n,p,width);
printf("\n\n M : \n");
for(i=0; i< width; i++)
{
for(j=0; j< width; j++)
{
printf("%f ",m[width*i+j]);
}
printf("\n");
}
printf("\n\n N : \n");
for(i=0; i< width; i++)
{
for(j=0; j< width; j++)
{
printf("%f ",n[width*i+j]);
}
printf("\n");
}
printf("\n\n P : \n");
for(i=0; i< width; i++)
{
for(j=0; j< width; j++)
{
printf("%f ",p[width*i+j]);
}
printf("\n");
}
return 0;
}
|
11,486 | #include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <iostream>
#include <vector>
#include <cuda_runtime.h>
#define threads_per_block 128
#define index(i, j) (i * num_nodes + j)
using namespace std;
__global__ void clustering_coefficient(const float* adj_matrix, const uint num_nodes, float* d_sum)
{
const uint u = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ float temp[threads_per_block];
float n_u = 0.0f; //number of vertices in the neighborhood of vertex u
float m_u = 0.0f; //number of edges in the neighborhood of vertex u
if (u >= num_nodes)
{
return;
}
for (uint v = 0; v < num_nodes; v++)
{
if (adj_matrix[index(u,v)] == 1.0f)
{
n_u++;
for (uint w = v + 1; w < num_nodes; w++)
{
if (adj_matrix[index(u,w)] == 1.0f &&
adj_matrix[index(v,w)] == 1.0f)
{
m_u++;
}
}
}
}
/*
C(u) = # edges in N_u m_u 2 * m_u
-------------- = -------- = ------------
max # edges in N_u /n_u\ n_u * (n_u - 1)
\ 2 /
*/
temp[threadIdx.x] = (2.0f * m_u) / (n_u * (n_u - 1.0f));
__syncthreads();
if (threadIdx.x == 0)
{
float sum = 0.0f;
for (uint i = 0; i < threads_per_block; i++)
{
if (u + i < num_nodes)
{
sum += temp[i];
}
}
atomicAdd(d_sum, sum);
}
}
uint NUM_NODES;
float* allocate_adj_matrix(uint num_nodes);
float* read_graph(char filename[]);
float clustering_coefficient(const uint u, const float* adj_matrix, const uint num_nodes);
float* allocate_adj_matrix(uint num_nodes)
{
uint size = num_nodes * num_nodes;
float* graph = new float[size];
for (uint i = 0; i < size; i++)
{
graph[i] = 0;
}
return graph;
}
float* read_graph(char filename[])
{
fstream f(filename, std::ios_base::in);
uint u,v;
vector<pair<float,float> > all_edges;
uint max_node = 0;
while (f >> u >> v)
{
all_edges.push_back(make_pair(u,v));
if (u > max_node)
{
max_node = u;
}
if (v > max_node)
{
max_node = v;
}
}
f.close();
NUM_NODES = max_node + 1;
float* graph = allocate_adj_matrix(NUM_NODES);
for (uint i = 0; i < all_edges.size(); i++)
{
u = all_edges[i].first;
v = all_edges[i].second;
graph[u * NUM_NODES + v] = 1.0f;
graph[v * NUM_NODES + u] = 1.0f;
}
return graph;
}
/**
* Host main routine
*/
int main(int argc, char* argv[]){
if (argc < 2)
{
cout << "Usage:" << endl
<< "./hw4 filename" << endl;
exit(-1);
}
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
float* h_adj_matrix = read_graph(argv[1]);
const uint num_nodes = NUM_NODES;
// Verify that allocations succeeded
if (h_adj_matrix == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
size_t size = num_nodes * num_nodes * sizeof(float);
// Allocate the device input vector A
float* d_adj_matrix = NULL;
err = cudaMalloc((void **)&d_adj_matrix, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
err = cudaMemcpy(d_adj_matrix, h_adj_matrix, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
uint max_blocks = prop.maxGridSize[0];
uint blocks = ceil((num_nodes + threads_per_block - 1) / threads_per_block);
// hardware limit
if (blocks > max_blocks)
{
blocks = max_blocks;
}
float* d_sum = NULL;
err = cudaMalloc((void **)&d_sum, sizeof(float));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate for a global variable (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaMemset(d_sum, 0, num_nodes * sizeof(float));
clustering_coefficient<<<blocks, threads_per_block>>>(d_adj_matrix, num_nodes, d_sum);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorDot kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float h_sum;
err = cudaMemcpy(&h_sum, d_sum, sizeof(float), cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy sum from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float C_G = h_sum / num_nodes;
// Free device global memory
err = cudaFree(d_adj_matrix);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_sum);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cout << C_G << endl;
// Reset the device and exit
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
return 0;
}
|
11,487 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#ifndef NDEBUG
#define CHECK_STATUS(status) \
if (status != cudaSuccess) \
fprintf(stderr, "File: %s\nLine:%d Function:%s>>>%s\n", __FILE__, __LINE__, __FUNCTION__,\
cudaGetErrorString(status))
#else
#define CHECK_STATUS(status) status
#endif
//////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
int deviceCount;
CHECK_STATUS(cudaGetDeviceCount(&deviceCount));
int device;
for (device = 0; device < deviceCount; ++device) {
cudaDeviceProp deviceProp;
CHECK_STATUS(cudaGetDeviceProperties(&deviceProp, device));
printf("Device %d has compute capability %d.%d.\n",
device, deviceProp.major, deviceProp.minor);
}
return 0;
}
|
11,488 | // Genetic Algorithm on Multiple GPUs
// ga_gpu.cu contains all the CUDA routines
#include<cuda.h>
#include<cuda_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#include<unistd.h>
#include<stdbool.h>
// Extern variables to make them available here in the cuda file
// Result from last compute of world.
extern unsigned char *currentGen;
// Current state of world.
extern unsigned char *nextGen;
// Map
extern double *map;
// Fitness
extern double *fitness;
// Global roulette indices
extern unsigned long long *globalRouletteWheel;
// Arrays that contain the two parents to sample, and the two cuts to make for each child
unsigned long long *samplingList;
// Problem Sizes
// Population Size
// Cities/Genome Length
extern unsigned long long popSize;
extern unsigned long long cities;
extern unsigned long long globalSize;
// Bind GPUs to the ranks
//cudaGetDeviceCount returns the number of CUDA capable devices on the system
extern "C" void bindGPUs(int rank)
{
// Bind GPUs to the ranks
int cudaDeviceCount;
int cE;
if( (cE = cudaGetDeviceCount( &cudaDeviceCount)) != cudaSuccess )
{
printf(" Unable to determine cuda device count, error is %d, count is %d\n",
cE, cudaDeviceCount );
exit(-1);
}
if( (cE = cudaSetDevice( rank % cudaDeviceCount )) != cudaSuccess )
{
printf(" Unable to have rank %d set to cuda device %d, error is %d \n",
rank, (rank % cudaDeviceCount), cE);
exit(-1);
}
}
// Initialization Routine for the population
extern "C" void popAlloc(int cities, int popSize){
// Allocate the memory
globalSize = cities*popSize;
cudaMallocManaged(¤tGen, globalSize*(sizeof(unsigned char)));
cudaMallocManaged(&nextGen, globalSize*(sizeof(unsigned char)));
}
// Initialization routing for the map
extern "C" void mapAlloc(unsigned long long cities){
unsigned long long mapsize = cities*cities;
cudaMallocManaged(&map, mapsize*(sizeof(double)));
}
// Allocatio routine for the ftiness array
extern "C" void fitnessAlloc(int num_ranks, int rank, int popSize){
// Allocate the memory
int localSize = popSize/num_ranks;
cudaMallocManaged(&fitness, localSize*(sizeof(double)));
}
// Initialization routine for the sampling array
extern "C" void samplingAlloc(int num_ranks, int local_children_num){
// Allocate the memory
cudaMallocManaged(&samplingList, local_children_num*4*(sizeof(unsigned long long)));
}
// Fitness calculation Kernel
__global__ void fitness_kernel(int num_ranks, int rank, int popSize, int cities, unsigned char *currentGen, double *fitness, double *map){
int localPopSize = popSize/num_ranks;
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
for (;
index < localPopSize;
index += blockDim.x * gridDim.x){
int j = 0;
double sum = 0.0;
int a = 0;
int b = 0;
double distance=0.0;
for(j=0;j<cities-1;j++){
// Simply find the total distance of the route
a = currentGen[rank*cities*localPopSize + index*cities + j];
b = currentGen[rank*cities*localPopSize + index*cities + j+1];
distance = map[a*cities + b];
sum = sum + distance;
}
// wrap around (last to first city)
a = currentGen[rank*cities*localPopSize + index*cities];
b = currentGen[rank*cities*localPopSize + index*cities + cities-1];
distance = map[a*cities + b];
sum = sum + distance;
// Fitness value is 1/total_distance
fitness[index] = 1.0/sum;
}
}
// Function to launch the Fitness Kernel
extern "C" bool fitness_kernelLaunch (int num_ranks, int rank, int popSize, int cities)
{
// get the minimum number of required blockCount
size_t threadsCount = 1024;
size_t blockCount = popSize/threadsCount+1;
if(blockCount == 0) blockCount++;
fitness_kernel<<<blockCount, threadsCount>>>(num_ranks, rank, popSize, cities, currentGen, fitness, map);
cudaDeviceSynchronize();
return false;
}
// This kernel simply performs the copying task of the selected parents on the GPU for efficiency
__global__ void parents_kernel(int num_ranks, int popSize, int cities, unsigned char *currentGen, unsigned char *nextGen, int r_wheel_length, unsigned long long *globalRouletteWheel){
int localPopSize = popSize/num_ranks;
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
int nextgen_i = index + (index/r_wheel_length)*(localPopSize - r_wheel_length);
int currentgen_i = globalRouletteWheel[index];
int j = 0;
for (;
index < r_wheel_length*num_ranks;
index += blockDim.x * gridDim.x){
for(j=0;j<cities;j++){
nextGen[nextgen_i*cities + j] = currentGen[currentgen_i*cities +j];
}
}
}
// Function to launch the Fitness Kernel
extern "C" bool parents_kernelLaunch (int num_ranks, int r_wheel_length)
{
// get the minimum number of required blockCount
//size_t blockCount = (worldWidth * worldHeight + threadsCount - 1)/threadsCount;
size_t threadsCount = 1024;
size_t blockCount = r_wheel_length/threadsCount+1;
if(blockCount == 0) blockCount++;
//printf("blockCount: %d \n", blockCount);
parents_kernel<<<blockCount, threadsCount>>>(num_ranks, popSize, cities, currentGen, nextGen, r_wheel_length, globalRouletteWheel);
cudaDeviceSynchronize();
return false;
}
// This kernel performs the OX crossover operation on the GPU
__global__ void crossover_kernel(int num_ranks,int rank, int popSize, int cities, unsigned char *currentGen, unsigned char *nextGen, int local_children_num, unsigned long long *samplingList){
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
// Calcualte the offset for the rank
unsigned long long offset = ( (rank+1) * popSize/num_ranks - (local_children_num) )*cities;
for (;
index < local_children_num;
index += blockDim.x * gridDim.x)
{
int *p1(new int[cities]);
int *p2(new int[cities]);
int *child(new int[cities]);
int i, j, next,pos,n;
n=cities;
// copy the parents into local memory for efficiency
for (i = 0; i < n; i++) {
p1[i] = currentGen[samplingList[index*4]*cities + i];
p2[i] = currentGen[samplingList[index*4+1]*cities + i];
child[i] = p1[i];
}
// This is the simplest OX operator for crossovers
for (j = samplingList[index*4+2]; j < samplingList[index*4+3]; j++) {
for (pos = 0; pos < n; pos++)
if (p2[j] == child[pos]) break;
next = pos+1;
while (next != samplingList[index*4+3]) {
if (pos == n ) pos = 0;
if (next == n) next = 0;
child[pos++] = child[next++];
}
}
for (j = samplingList[index*4+2]; j < samplingList[index*4+3]; j++)
child[j] = p2[j];
// Copy out the child into the shared memory
for (i = 0; i < cities; i++)
nextGen[offset+index*cities + i] = child[i];
}
}
// Function to launch the Crossover Kernel
extern "C" bool crossover_kernelLaunch (int num_ranks, int rank, int local_children_num)
{
size_t threadsCount = 1024;
size_t blockCount = local_children_num/threadsCount+1;
if(blockCount == 0) blockCount++;
crossover_kernel<<<blockCount, threadsCount>>>(num_ranks, rank, popSize, cities, currentGen, nextGen, local_children_num, samplingList);
cudaDeviceSynchronize();
return false;
}
|
11,489 | #include <stdio.h>
#include <time.h>
#define N 512
/*
void Matriz_CPU_Mult(int A[N][N], int B[N][N], int C[N][N]) {
int n,m;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
int sum = 0;
for (int k = 0; k < N; k++) {
m = A[i][k];
n = B[k][j];
sum += m * n;
}
C[i][j] = sum;
}
}
}
*/
__global__ void Matriz_GPU_Mult(double *a, double *b, double *c) {
int k, sum = 0;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < N && j < N) {
for (k = 0; k < N; k++) {
sum += a[j * N + k] * b[k * N + i];
}
c[j * N + i] = sum;
}
}
int main() {
double timeGPU; //, timeCPU;
double A[N][N], B[N][N], C[N][N];
double *d_a, *d_b, *d_c;
int cont,i,j;
//inicializacion
for (i = 0; i < N; i++) {
cont = 0;
for (j = 0; j < N; j++) {
A[i][j] = cont;
B[i][j] = cont;
cont++;
}
}
size_t bytes = N * sizeof(double);
cudaMalloc((void **) &d_a, bytes);
cudaMalloc((void **) &d_b, bytes);
cudaMalloc((void **) &d_c, bytes);
cudaMemcpy(d_a, A, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, B, bytes, cudaMemcpyHostToDevice);
//int threadsPerBlock(16);
//int numBlocks(N/threadsPerBlock);
dim3 threadsPerBlock(32, 32);
dim3 numBlocks((int)ceil((float)N/threadsPerBlock.x), (int)ceil((float)N/threadsPerBlock.y));
clock_t startGPU = clock();
Matriz_GPU_Mult<<<numBlocks, threadsPerBlock>>>(d_a, d_b, d_c);
timeGPU = ((double)(clock() - startGPU))/CLOCKS_PER_SEC;
cudaMemcpy(C, d_c, bytes, cudaMemcpyDeviceToHost);
/*
clock_t startCPU = clock();
Matriz_CPU_Mult(A, B, C);
timeCPU = ((double)(clock() - startCPU))/CLOCKS_PER_SEC;
*/
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// tiempos de ejecucion
printf("tiempo GPU = %f s\n",timeGPU);
//printf("\ntiempo CPU = %f s\n",timeCPU);
return 0;
}
|
11,490 | // This is the REAL "hello world" for CUDA!
// It takes the string "Hello ", prints it, then passes it to CUDA with an array
// of offsets. Then the offsets are added in parallel to produce the string "World!"
// By Ingemar Ragnemalm 2010
#include <stdio.h>
const int N = 16;
const int blocksize = 16;
__global__
void copy(char *a, char *b)
{
b[threadIdx.x] = a[threadIdx.x];
}
void printDeviceInfo() {
int nDevices;
auto err = cudaGetDeviceCount(&nDevices);
if (err != cudaSuccess) {
printf("cudaGetDeviceCount failed, cudaerror: %i\n", err);
return;
}
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
auto err = cudaGetDeviceProperties(&prop, i);
if (err != cudaSuccess) {
printf("cudaGetDeviceProperties failed, cudaerror: %i\n", err);
return;
}
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
int main()
{
printDeviceInfo();
char a[N] = "World\0\0\0\0\0\0\0";
char b[N] = "Not Working\0\0\0\0";
char *ad;
char *bd;
const int size = N*sizeof(char);
printf("input: %s\n", a);
cudaError_t err;
err = cudaMalloc( (void**)&ad, size );
if (err != cudaSuccess) {
printf("cudaerror: %i\n", err);
// return EXIT_FAILURE;
}
cudaMalloc( (void**)&bd, size );
cudaMemcpy( ad, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, size, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
copy<<<dimGrid, dimBlock>>>(ad, bd);
cudaMemcpy( b, bd, size, cudaMemcpyDeviceToHost );
cudaFree( ad );
cudaFree( bd );
printf("Hello %s\n", b);
if (strncmp(a, b, N)) {
printf("cuda kernel did not return expected result\n");
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
|
11,491 | #include <iostream>
using namespace std;
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
cout << cudaGetErrorString(error) << endl; \
} \
} while (0)
__global__
void add_vecs(int n, float *x, float *y, float *z)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i<n)
{
z[i] = x[i]+y[i];
}
}
int main(void)
{
int N = 10;
float *x, *y, *z, *d_x, *d_y, *d_z;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
z = (float*)malloc(N*sizeof(float));
CUDA_CHECK(cudaMalloc(&d_x, N*sizeof(float))); // 1D array representation for grid 2D
CUDA_CHECK(cudaMalloc(&d_y, N*sizeof(float)));
CUDA_CHECK(cudaMalloc(&d_z, N*sizeof(float)));
for (int i=0; i<N; i++)
{
x[i] = i+1;
y[i] = (i+1)*10;
}
CUDA_CHECK(cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(d_z, z, N*sizeof(float), cudaMemcpyHostToDevice));
int blockSize = 256; // # threads
int gridSize = (N/blockSize)+1; // # blocks
add_vecs<<<gridSize,blockSize>>>(N, d_x, d_y, d_z);
cudaDeviceSynchronize();
// check for errors
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "ERROR: %s \n", cudaGetErrorString(error));
}
CUDA_CHECK(cudaMemcpy(z, d_z, N*sizeof(float), cudaMemcpyDeviceToHost));
for (int i = 0; i < N; i++)
{
cout << z[i] << endl;
}
} |
11,492 | #include<stdio.h>
#include<stdlib.h>
#include<malloc.h>
#include<time.h>
#include<cuda.h>
__global__
void sum(float* A, float* B, float* C, int size){
int id = threadIdx.x + blockIdx.x * blockDim.x;
if(id < size){
C[id] = A[id] + B[id];
}
}
__host__
void print(float *M, int size){
printf("-----------Vector------------\n");
for(int i=0; i<size; i++){
printf("%f", M[i]);
printf("\n");
}
}
__host__
void receive(float *M, FILE *stream, int size){
for(int i=0; i<size; i++){
fscanf(stream, "%f", &M[i]);
}
fclose(stream);
}
int main(int argc, char** argv){
if(argc != 3){
printf("Must be called with the names of the files \n");
return 1;
}
int sizeA, sizeB;
cudaError_t error = cudaSuccess;
float *h_A, *h_B, *h_C;
FILE *f1, *f2;
f1 = fopen(argv[1], "r");
f2 = fopen(argv[2], "r");
fscanf(f1, "%d", &sizeA);
fscanf(f2, "%d", &sizeB);
if(sizeA != sizeB){
printf("The vectors should have same dimensions \b");
return 1;
}
//CPU
h_A = (float*)malloc(sizeA*sizeof(float));
h_B = (float*)malloc(sizeA*sizeof(float));
h_C = (float*)malloc(sizeA*sizeof(float));
receive(h_A, f1, sizeA);
receive(h_B, f2, sizeA);
//print(h_A, sizeA);
//print(h_B, sizeB);
//GPU
float *d_A, *d_B, *d_C;
int blockSize = 32;
int gridSize = ceil(sizeA / float(blockSize));
//dim3 dimBlock(blockSize,1,1);
//dim3 dimGrid(ceil(sizeA / float(blockSize)),1,1);
error = cudaMalloc((void**)&d_A, sizeA*sizeof(float));
if (error != cudaSuccess){
printf("Error allocating memory d_A");
return 1;
}
error = cudaMalloc((void**)&d_B, sizeA*sizeof(float));
if (error != cudaSuccess){
printf("Error allocating memory d_B");
return 1;
}
error = cudaMalloc((void**)&d_C, sizeA*sizeof(float));
if (error != cudaSuccess){
printf("Error allocating memory d_C");
return 1;
}
cudaMemcpy(d_A, h_A, sizeA*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, sizeA*sizeof(float), cudaMemcpyHostToDevice);
sum<<<gridSize, blockSize>>>(d_A, d_B, d_C, sizeA);
//cudaDeviceSynchronize();
cudaMemcpy(h_C, d_C, sizeA*sizeof(float), cudaMemcpyDeviceToHost);
print(h_C, sizeA);
free(h_A); free(h_B); free(h_C);
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
return 0;
} |
11,493 | /***************************************************************************
*
* (C) Copyright 2010 The Board of Trustees of the
* University of Illinois
* All Rights Reserved
*
***************************************************************************/
#include <stdio.h>
#include <cuda.h>
#include "util.h"
__device__ void testIncrementGlobal (
unsigned int *global_histo,
unsigned int sm_range_min,
unsigned int sm_range_max,
const uchar4 sm)
{
const unsigned int range = sm.x;
const unsigned int indexhi = sm.y;
const unsigned int indexlo = sm.z;
const unsigned int offset = sm.w;
/* Scan for inputs that are outside the central region of histogram */
if (range < sm_range_min || range > sm_range_max)
{
const unsigned int bin = range * BINS_PER_BLOCK + offset / 8 + (indexlo << 2) + (indexhi << 10);
const unsigned int bin_div2 = bin / 2;
const unsigned int bin_offset = (bin % 2 == 1) ? 16 : 0;
unsigned int old_val = global_histo[bin_div2];
unsigned short old_bin = (old_val >> bin_offset) & 0xFFFF;
if (old_bin < 255)
{
atomicAdd (&global_histo[bin_div2], 1 << bin_offset);
}
}
}
__device__ void testIncrementLocal (
unsigned int *global_overflow,
unsigned int smem[KB][256],
const unsigned int myRange,
const uchar4 sm)
{
const unsigned int range = sm.x;
const unsigned int indexhi = sm.y;
const unsigned int indexlo = sm.z;
const unsigned int offset = sm.w;
/* Scan for inputs that are inside the central region of histogram */
if (range == myRange)
{
/* Atomically increment shared memory */
unsigned int add = (unsigned int)(1 << offset);
unsigned int prev = atomicAdd (&smem[indexhi][indexlo], add);
/* Check if current bin overflowed */
unsigned int prev_bin_val = (prev >> offset) & 0x000000FF;
/* If there was an overflow, record it and record if it cascaded into other bins */
if (prev_bin_val == 0x000000FF)
{
const unsigned int bin =
range * BINS_PER_BLOCK +
offset / 8 + (indexlo << 2) + (indexhi << 10);
bool can_overflow_to_bin_plus_1 = (offset < 24) ? true : false;
bool can_overflow_to_bin_plus_2 = (offset < 16) ? true : false;
bool can_overflow_to_bin_plus_3 = (offset < 8) ? true : false;
bool overflow_into_bin_plus_1 = false;
bool overflow_into_bin_plus_2 = false;
bool overflow_into_bin_plus_3 = false;
unsigned int prev_bin_plus_1_val = (prev >> (offset + 8)) & 0x000000FF;
unsigned int prev_bin_plus_2_val = (prev >> (offset + 16)) & 0x000000FF;
unsigned int prev_bin_plus_3_val = (prev >> (offset + 24)) & 0x000000FF;
if (can_overflow_to_bin_plus_1 && prev_bin_val == 0x000000FF) overflow_into_bin_plus_1 = true;
if (can_overflow_to_bin_plus_2 && prev_bin_plus_1_val == 0x000000FF) overflow_into_bin_plus_2 = true;
if (can_overflow_to_bin_plus_3 && prev_bin_plus_2_val == 0x000000FF) overflow_into_bin_plus_3 = true;
unsigned int bin_plus_1_add;
unsigned int bin_plus_2_add;
unsigned int bin_plus_3_add;
if (overflow_into_bin_plus_1) bin_plus_1_add = (prev_bin_plus_1_val < 0x000000FF) ? 0xFFFFFFFF : 0x000000FF;
if (overflow_into_bin_plus_2) bin_plus_2_add = (prev_bin_plus_2_val < 0x000000FF) ? 0xFFFFFFFF : 0x000000FF;
if (overflow_into_bin_plus_3) bin_plus_3_add = (prev_bin_plus_3_val < 0x000000FF) ? 0xFFFFFFFF : 0x000000FF;
atomicAdd (&global_overflow[bin], 256);
if (overflow_into_bin_plus_1) atomicAdd (&global_overflow[bin+1], bin_plus_1_add);
if (overflow_into_bin_plus_2) atomicAdd (&global_overflow[bin+2], bin_plus_2_add);
if (overflow_into_bin_plus_3) atomicAdd (&global_overflow[bin+3], bin_plus_3_add);
}
}
}
__device__ void clearMemory (unsigned int smem[KB][256])
{
for (int i = threadIdx.x; i < BINS_PER_BLOCK / 4; i += blockDim.x)
{
((unsigned int*)smem)[i] = 0;
}
}
__device__ void copyMemory (unsigned int *dst, unsigned int src[KB][256])
{
for (int i = threadIdx.x; i < BINS_PER_BLOCK / 4; i += blockDim.x)
{
dst[i] = ((unsigned int*)src)[i];
}
}
__global__ void histo_main_kernel (
uchar4 *sm_mappings,
unsigned int num_elements,
unsigned int sm_range_min,
unsigned int sm_range_max,
unsigned int histo_height,
unsigned int histo_width,
unsigned int *global_subhisto,
unsigned int *global_histo,
unsigned int *global_overflow)
{
/* Most optimal solution uses 24 * 1024 bins per threadblock */
__shared__ unsigned int sub_histo[KB][256];
/* Each threadblock contributes to a specific 24KB range of histogram,
* and also scans every N-th line for interesting data. N = gridDim.x
*/
unsigned int local_scan_range = sm_range_min + blockIdx.y;
unsigned int local_scan_load = blockIdx.x * blockDim.x + threadIdx.x;
clearMemory (sub_histo);
__syncthreads();
if (blockIdx.y == 0)
{
/* Loop through and scan the input */
while (local_scan_load < num_elements)
{
/* Read buffer */
uchar4 sm = sm_mappings[local_scan_load];
local_scan_load += blockDim.x * gridDim.x;
/* Check input */
testIncrementLocal (
global_overflow,
sub_histo,
local_scan_range,
sm
);
testIncrementGlobal (
global_histo,
sm_range_min,
sm_range_max,
sm
);
}
}
else
{
/* Loop through and scan the input */
while (local_scan_load < num_elements)
{
/* Read buffer */
uchar4 sm = sm_mappings[local_scan_load];
local_scan_load += blockDim.x * gridDim.x;
/* Check input */
testIncrementLocal (
global_overflow,
sub_histo,
local_scan_range,
sm
);
}
}
/* Store sub histogram to global memory */
unsigned int store_index = blockIdx.x * (histo_height * histo_width / 4) + (local_scan_range * BINS_PER_BLOCK / 4);
__syncthreads();
copyMemory (&(global_subhisto[store_index]), sub_histo);
}
|
11,494 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
typedef unsigned long ulint;
typedef unsigned long long ulint64;
int banyakdata = 1024;
int dimensigrid = 8;
int dimensiblok = 128;
void modexp(ulint a, ulint b, ulint c, ulint* res) {
ulint64 s = a;
ulint64 ans = 1;
while (b != 0) {
if (b % 2 == 1) {
ans = ans * s % c;
b--;
}
b /= 2;
if (b != 0) {
s = s * s %c;
}
}
*res = ans;
}
void kernelenk(ulint *m, ulint e, ulint n, ulint *res) {
for (int i = 0; i < banyakdata; i++)
{
modexp(m[i], e, n, res + i);
}
}
void kerneldek(ulint *c, ulint d, ulint n, ulint *res) {
for (int i = 0; i < banyakdata; i++)
{
modexp(c[i], d, n, res + i);
}
}
void enkripsiCUDA(ulint *m, ulint e, ulint n, ulint *res) {
clock_t begin = clock();
kernelenk(m,e,n,res);
clock_t end = clock();
double time_spent = (double)(end - begin);
printf("Durasi : %f milliseconds\n", time_spent/1000);
printf("\n<<<<<<<<<<<<<<HASIL KE CPU>>>>>>>>>>>>>>>\n");
}
void dekripsiCUDA(ulint *c, ulint d, ulint n, ulint *res2) {
clock_t begin = clock();
kerneldek(c,d,n,res2);
clock_t end = clock();
double time_spent = (double)(end - begin);
printf("Durasi : %f milliseconds\n", time_spent/1000);
printf("\n<<<<<<<<<<<<<<HASIL KE CPU>>>>>>>>>>>>>>>\n");
}
void initenkripsi(ulint *m){
srand(2018);
for (int i = 0; i < banyakdata; i++) {
m[i] = rand() % 256;
}
}
int main(){
ulint *m, e, d, n, *res, *res2;
m = (ulint*)malloc(banyakdata * sizeof(ulint));
res = (ulint*)malloc(banyakdata * sizeof(ulint));
res2 = (ulint*)malloc(banyakdata * sizeof(ulint));
e = 211;
d = 259;
n = 299;
initenkripsi(m);
printf("<<<<<<<<<<<<<<Pesan Asli>>>>>>>>>>>>>>>\n");
for (int i = 0; i < 4; i++) {
printf("m[%d] = %lu\n", i, m[i]);
}
printf("m[...]\n");
printf("m[%d] = %lu\n", banyakdata-1, m[banyakdata-1]);
enkripsiCUDA(m,e,n,res);
printf("<<<<<<<<<<<<<<Hasil Enkripsi>>>>>>>>>>>>>>>\n");
for (int i = 0; i < 4; i++) {
printf("c[%d] = %lu c[%d] = %lu\n", 2*i, res[2*i], 2*i+1, res[2*i+1]);
}
printf("c ...\n");
printf("c[%d] = %lu c[%d] = %lu\n", banyakdata * 2-2, res[banyakdata * 2-2], banyakdata *2-1,res[banyakdata*2-1]);
dekripsiCUDA(res,d,n,res2);
printf("<<<<<<<<<<<<<<Hasil Dekripsi>>>>>>>>>>>>>>>\n");
for (int i = 0; i < 4; i++) {
printf("m[%d] = %lu\n", i, res2[i]);
}
printf("m[...]\n");
printf("m[%d] = %lu\n", banyakdata-1, res2[banyakdata-1]);
free(m);
free(res);
free(res2);
return 0;
} |
11,495 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <stdlib.h>
#define N 4096
#define T 1024
using namespace std;
__global__
void suma_vectores(int* a, int* b, int* c)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
c[i] = a[i] + b[i];
}
void llenar_vector(int* a)
{
int i;
for(i = 0; i < N; i++)
a[i] = rand()%T;
}
void print_vector(int* a)
{
int i;
for(i = 0; i < N; i++)
cout<<a[i]<<" ";
cout<<endl;
}
int main()
{
int a[N], b[N], c[N];
llenar_vector(a);
llenar_vector(b);
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
int tmp = ceil(N/T);
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
suma_vectores<<<tmp, T>>>(d_a, d_b, d_c);
cudaMemcpy(c, d_c, size,cudaMemcpyDeviceToHost);
print_vector(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
} |
11,496 |
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#define PGM_ID_LINE "P5"
#define PGM_ID_LINE_LEN 3
#define PI 3.14159265358979323846
#define NSEC_PER_SEC 1000000000
#define idx(arr, cols, i, j) (arr[(cols)*(i) + (j)])
struct compute_data {
unsigned char *in_image_h;
unsigned char *out_image_h;
float *gaussian_h;
int width;
int height;
int max_pixel_value;
float sigma;
int order;
};
/* Offset from a Gaussian's center for x, y at index 0, 0.
*
* For example, for order 5, -2 is returned.
*/
__host__ __device__ static inline float goffset(float order)
{
return -(order - 1)/2;
}
__host__ __device__ static inline int clamp(int x, int low, int high)
{
if (x < low) x = low;
if (x > high) x = high;
return x;
}
#define cuda_check(ret) _cuda_check((ret), __FILE__, __LINE__)
inline void _cuda_check(cudaError_t ret, const char *file, int line)
{
if (ret != cudaSuccess) {
fprintf(stderr, "CudaErr: %s (%s:%d)\n", cudaGetErrorString(ret), file, line);
exit(1);
}
}
__global__ void convolution_kernel(unsigned char *in_d, unsigned char *out_d, float *gauss_d, int width, int height, int order){
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
float term = 0;
int xsub, ysub;
int offset = goffset(order);
int i, j;
if (x >= width || y >= height)
return;
for (i = 0; i < order; i++) {
ysub = clamp(y + offset + i, 0, height - 1);
for (j = 0; j < order; j++) {
xsub = clamp(x + offset + j, 0, width - 1);
term += idx(in_d, width, ysub, xsub)
* idx(gauss_d, order, i, j);
}
}
idx(out_d, width, y, x) = term;
}
static int get_max_threads()
{
int max_threads;
struct cudaDeviceProp properties;
cudaGetDeviceProperties(&properties, 0);
max_threads = properties.maxThreadsPerBlock;
return max_threads;
}
/* Initialize the Gaussian convolution matrix.
*/
static void init_gaussian(struct compute_data *data)
{
float sum = 0;
float x, y, res;
float offset = goffset(data->order);
int i, j;
for (i = 0, y = offset; i < data->order; i++, y++){
for (j = 0, x = offset; j < data->order; j++, x++){
res = exp(-(x*x + y*y)/(2*data->sigma*data->sigma));
//res /= 2*PI*data->sigma*data->sigma;
sum += res;
idx(data->gaussian_h, data->order, i, j) = res;
}
}
for (int i = 0; i < data->order; i++){
for (int j = 0; j < data->order; j++){
idx(data->gaussian_h, data->order, i, j) /= sum;
}
}
}
/* Convolute the Gaussian with the input image.
*/
static void apply_gaussian(struct compute_data *data)
{
unsigned char *in_d, *out_d;
float *gauss_d;
int gx, gy, bx, by;
int size = data->width * data->height * sizeof(*in_d); // size for in and out
int gauss_size = data->order * data->order * sizeof(*gauss_d);
// Load data
cuda_check(cudaMalloc(&in_d, size));
cuda_check(cudaMalloc(&out_d,size));
cuda_check(cudaMalloc(&gauss_d,gauss_size));
cuda_check(cudaMemcpy(in_d, data->in_image_h, size, cudaMemcpyHostToDevice));
cuda_check(cudaMemcpy(gauss_d, data->gaussian_h, gauss_size, cudaMemcpyHostToDevice));
/* Compute grid and block sizes.
* A = number of threads (area)
* w' = s*w h' = s*h
* w'*h' = A
* s^2*w*h = A
* s = sqrt(A/(w*h))
*/
bx = data->width * sqrt(get_max_threads()) / sqrt(data->width * data->height);
by = data->height * sqrt(get_max_threads()) / sqrt(data->width * data->height);
gx = data->width/bx + 1;
gy = data->height/by + 1;
#ifdef DEBUG
printf("grid(%d, %d), block(%d, %d)\n", gx, gy, bx, by);
#endif
// Run kernel
dim3 grid_dim(gx, gy);
dim3 block_dim(bx, by);
convolution_kernel<<<grid_dim, block_dim>>>(in_d, out_d, gauss_d,
data->width, data->height, data->order);
cuda_check(cudaPeekAtLastError());
cuda_check(cudaDeviceSynchronize());
// Copy back to host
cuda_check(cudaMemcpy(data->out_image_h, out_d, size, cudaMemcpyDeviceToHost));
cuda_check(cudaFree(in_d));
cuda_check(cudaFree(out_d));
cuda_check(cudaFree(gauss_d));
}
// Return time passed in seconds.
static float get_timespec_delta(const struct timespec *start,
const struct timespec *stop)
{
long long delta_nsec, start_nsec, stop_nsec;
start_nsec = start->tv_sec * NSEC_PER_SEC + start->tv_nsec;
stop_nsec = stop->tv_sec * NSEC_PER_SEC + stop->tv_nsec;
delta_nsec = stop_nsec - start_nsec;
return (float)delta_nsec / NSEC_PER_SEC;
}
int main(int argc, char *argv[])
{
FILE *in_file;
FILE *out_file;
int amount, amount_read;
struct compute_data data;
struct timespec start, stop;
if (argc < 4) {
fprintf(stderr, "Usage: %s <input_file> <output_file> <sigma>\n", argv[0]);
return 1;
}
in_file = fopen(argv[1], "r");
if (NULL == in_file) {
fprintf(stderr, "Error: cannot open file %s\n", argv[1]);
return 1;
}
// Read pgm metadata.
char id_line[PGM_ID_LINE_LEN + 1];
if (NULL == fgets(id_line, PGM_ID_LINE_LEN, in_file)
|| strcmp(id_line, PGM_ID_LINE) != 0) {
fprintf(stderr, "Error: invalid PGM information\n");
return 1;
}
if (fscanf(in_file, "%d %d\n%d\n", &data.width, &data.height,
&data.max_pixel_value) < 3) {
fprintf(stderr, "Error: invalid PGM information\n");
return 1;
}
// Read image data.
amount = data.width*data.height;
data.in_image_h = (unsigned char*) malloc(amount*sizeof(*data.in_image_h));
amount_read = fread(data.in_image_h, sizeof(*data.in_image_h), amount, in_file);
if (amount_read < amount) {
fprintf(stderr, "Error: invalid PGM pixels\n");
return 1;
}
fclose(in_file);
// Determine sigma and order.
char *end;
data.sigma = strtod(argv[3], &end);
if (end == argv[3] || data.sigma <= 0) {
fprintf(stderr, "Error: invalid sigma value\n");
return 1;
}
data.order = ceil(6*data.sigma);
if (data.order % 2 == 0)
data.order += 1;
if (data.order > data.width || data.order > data.height) {
fprintf(stderr, "Error: sigma value too big for image size\n");
return 1;
}
// Compute image
data.out_image_h = (unsigned char *) malloc(amount*sizeof(*data.out_image_h));
data.gaussian_h = (float *) malloc(data.order*data.order*sizeof(*data.gaussian_h));
clock_gettime(CLOCK_MONOTONIC, &start);
init_gaussian(&data);
#ifdef DEBUG
for (int i = 0; i < data.order; i++) {
for (int j = 0; j < data.order; j++) {
printf("%f ", idx(data.gaussian_h, data.order, i, j));
}
printf("\n");
}
#endif
apply_gaussian(&data);
clock_gettime(CLOCK_MONOTONIC, &stop);
printf("Running time: %.6f secs\n", get_timespec_delta(&start, &stop));
// Output_image
out_file = fopen(argv[2], "w");
fprintf(out_file, "%s\n%d %d\n%d\n", PGM_ID_LINE, data.width, data.height,
data.max_pixel_value);
fwrite(data.out_image_h, sizeof(char), amount, out_file);
fclose(out_file);
free(data.in_image_h);
free(data.out_image_h);
return 0;
}
|
11,497 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
class cuStopwatch{
// todo: add your internal data structure, all in private
private:
cudaEvent_t startEvent;
cudaEvent_t stopEvent;
bool isStartEventStarted;
public:
cuStopwatch();
~cuStopwatch();
void start();
float stop();
};
cuStopwatch::cuStopwatch(){
// todo: constructor
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
isStartEventStarted = false;
}
cuStopwatch::~cuStopwatch(){
// todo: destructor
cudaEventDestroy(startEvent);
cudaEventDestroy(stopEvent);
}
void cuStopwatch::start(){
// todo: start the stopwatch, and ignore double start
if(!isStartEventStarted){
cudaEventRecord(startEvent);
isStartEventStarted = true;
}
}
float cuStopwatch::stop(){
// todo: stop the stopwatch and return elapsed time, ignore invalid stops (e.g. stop when not yet started or double stop)
if(!isStartEventStarted){
return -1.0;
}
cudaEventSynchronize(startEvent);
cudaEventRecord(stopEvent);
cudaEventSynchronize(stopEvent);
float ms;
cudaEventElapsedTime(&ms, startEvent, stopEvent);
return ms;
} |
11,498 | #include "includes.h"
__global__ void callOperation(int *a, int *b, int *res, int x, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n) {
res[tid] = a[tid] - (b[tid] * x);
}
} |
11,499 | #include "includes.h"
__global__ void add_weighted_kernel(unsigned int batchSize, unsigned int nbOutputs, unsigned int outputsHeight, unsigned int outputsWidth, float* estimated_labels, unsigned int nbChannels, unsigned int image_height, unsigned int image_width, float* input_image, unsigned char* workspace, float alpha)
{
const int batchEstimatedOffset = nbOutputs * outputsHeight * outputsWidth * blockIdx.z;
const int batchImageOffset = nbChannels * image_height * image_width * blockIdx.z;
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < outputsWidth * outputsHeight; i += stride)
{
unsigned int outputMax = 0;
if (nbOutputs > 1)
{
float maxVal = estimated_labels[i + batchEstimatedOffset];
for (unsigned int cls = 1; cls < nbOutputs; ++cls) {
const float tmp = estimated_labels[i
+ cls*outputsWidth*outputsHeight
+ batchEstimatedOffset];
if (tmp > maxVal) {
outputMax = cls;
maxVal = tmp;
}
}
const unsigned char ch0
= (unsigned char) max(colors[outputMax%4][0]*alpha, min(255.0, colors[outputMax%4][0]*alpha + input_image[i + batchImageOffset]));
const unsigned char ch1
= (unsigned char) max(colors[outputMax%4][1]*alpha, min(255.0, colors[outputMax%4][1]*alpha + input_image[i + image_height*image_width + batchImageOffset]));
const unsigned char ch2
= (unsigned char) max(colors[outputMax%4][2]*alpha, min(255.0, colors[outputMax%4][2]*alpha + input_image[i + 2*image_height*image_width + batchImageOffset]));
workspace[i*3 + batchImageOffset] = ch0;
workspace[i*3 + 1 + batchImageOffset] = ch1;
workspace[i*3 + 2 + batchImageOffset] = ch2;
}
}
} |
11,500 | #include "includes.h"
__global__ void ARR_ADDC(float* result, float* in1, float* in2, int N)
{
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < N)
{
result[index] = in1[index] + in2[index];
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.