serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
4,801 | #include <stdio.h>
__global__ void add(int *a, int *b, int *c) {
// note that add has no variables in its scope, instead it reads and
// modifies variables that live elsewhere.
*c = *a + *b;
}
int main(void) {
// declare three integers in the host's memory space
int h_a;
int h_b;
int h_c;
// declare pointers to three integers in the device's memory space
int *d_a;
int *d_b;
int *d_c;
// define how many bytes is an integer (on this system)
int nbytes = sizeof(int);
// allocate nbytes memory on the device for each of the d_a, d_b,
// and d_c variables
cudaMalloc((void **)&d_a, nbytes);
cudaMalloc((void **)&d_b, nbytes);
cudaMalloc((void **)&d_c, nbytes);
// set h_a and h_b to more or less arbitrary (but int) values
h_a = 2;
h_b = 7;
// copy nbytes of memory located at &h_a on the host to variable d_a
// on the device (then do the same for &h_b, d_b)
cudaMemcpy(d_a, &h_a, nbytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &h_b, nbytes, cudaMemcpyHostToDevice);
// call the integer add kernel with 1 block and 1 thread, pass it the
// values of d_a, d_b, as well as the (uninitialized) value of d_c
add<<<1,1>>>(d_a, d_b, d_c);
cudaMemcpy(&h_c, d_c, nbytes, cudaMemcpyDeviceToHost);
// free up the memory on the device that we cudaMalloc'ed earlier.
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
printf("h_c = %d\n", h_c);
printf("nbytes = %d\n", nbytes);
}
|
4,802 | /*
The code generates a 3D image of a stack of images.
For each image (matrix) calculate the variance at all points, and then create a topography matrix (relief matrix) with
the position (number in the stack) of the image that had the largest variance in a pixel. The same with the color of the
image (RGB matrices).
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
//************Global variables***************
struct point
{
int x;
int y;
};
#define IJ_TO_ID(i,j) (((i)*dimy)+(j))
//************** Kernel CUDA *********************
__global__ void EDF(int *R_d, int *G_d, int *B_d, int *Rf_d, int *Gf_d, int *Bf_d, int *topof_d, long double *max_d, int d) {
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int idy = threadIdx.y + blockIdx.y*blockDim.y;
int id = idx + idy*blockDim.x*gridDim.x;
//int id = idy + idx*blockDim.y*gridDim.y;
int dimx = 1040, dimy = 1388, tam_imag = 1040*1388, msk = 3, M_d[9], k;
long double X = 0.f, Xprom = 0.f, Y = 0.f, var = 0.f;
//Rf_d[id] = id;
//int img_x = (id) % dimx;
//int img_y = (id) / dimx;
int img_x = id / dimy;
int img_y = id % dimy;
//int i = 0;
//unsigned long long int id2;
M_d[0] = ((img_x < 1 || img_y < 1) ? 0 : G_d[IJ_TO_ID(img_x - 1, img_y - 1)]);
M_d[1] = ((img_x < 1) ? 0 : G_d[IJ_TO_ID(img_x - 1, img_y)]);
M_d[2] = ((img_x<1 || img_y>dimy - 2) ? 0 : G_d[IJ_TO_ID(img_x - 1, img_y + 1)]);
M_d[3] = ((img_y < 1) ? 0 : G_d[IJ_TO_ID(img_x, img_y - 1)]); //img_x
M_d[4] = G_d[IJ_TO_ID(img_x, img_y)];
M_d[5] = ((img_y > dimy - 2) ? 0 : G_d[IJ_TO_ID(img_x, img_y + 1)]);
M_d[6] = ((img_x > dimx - 2 || img_y < 1) ? 0 : G_d[IJ_TO_ID(img_x + 1, img_y - 1)]);
M_d[7] = ((img_x > dimx - 2) ? 0 : G_d[IJ_TO_ID(img_x + 1, img_y)]);
M_d[8] = ((img_x > dimx - 2 || img_y > dimy - 2) ? 0 : G_d[IJ_TO_ID(img_x + 1, img_y + 1)]);
for (k = 0;k < msk*msk;k++)
X += M_d[k];
Xprom = ((long double)X) / (msk*msk);
for (k = 0;k < msk*msk;k++)
Y += (Xprom - M_d[k])*(Xprom - M_d[k]);
var = ((long double)Y) / (msk*msk);
//syncthreads();
__syncthreads();
if (var > max_d[id]) {
topof_d[id] = d;
Rf_d[id] = R_d[id];
Gf_d[id] = G_d[id];
Bf_d[id] = B_d[id];
max_d[id] = var;
}
}
long msk = 3, dimx = 1040, dimy = 1388, tam_imag = 1040*1388;
//*****************Main function**********************
int main(int argc, char* argv[]) {
//***************Variables**************
int i, j, m, cont, tam_B, init, fin;
cudaError_t cudaStatus;
FILE *matrizR, *matrizG, *matrizB;
int d;
float t;
clock_t tinicio, t_GPU;
tinicio = clock();
int *topof_h, *R_h, *G_h, *B_h, *Rf_h, *Gf_h, *Bf_h;
long double *max_h;
int *topof_d, *R_d, *G_d, *B_d, *Rf_d, *Gf_d, *Bf_d;
long double *max_d;
//************ Malloc in host and device ***************
R_h = (int *)malloc(sizeof(int)*tam_imag);
cudaStatus = cudaMalloc((void**)&R_d, tam_imag * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed for R_d!\n");
exit(0);
}
G_h = (int *)malloc(sizeof(int)*tam_imag);
cudaStatus = cudaMalloc((void**)&G_d, tam_imag * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed for G_d Line=%d!\n", __LINE__);
exit(0);
}
B_h = (int *)malloc(sizeof(int)*tam_imag);
cudaStatus = cudaMalloc((void**)&B_d, tam_imag * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed for R_d!\n");
exit(0);
}
Rf_h = (int *)malloc(sizeof(int)*tam_imag);
memset((void*)Rf_h, 0, sizeof(int)*tam_imag);
cudaStatus = cudaMalloc((void**)&Rf_d, tam_imag * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed for Rf_d!\n");
exit(0);
}
Gf_h = (int *)malloc(sizeof(int)*tam_imag);
memset((void*)Gf_h, 0, sizeof(int)*tam_imag);
cudaStatus = cudaMalloc((void**)&Gf_d, tam_imag * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed for Gf_d line %d!\n", __LINE__);
fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus));
exit(0);
}
Bf_h = (int *)malloc(sizeof(int)*tam_imag);
memset((void*)Bf_h, 0, sizeof(int)*tam_imag);
cudaStatus = cudaMalloc((void**)&Bf_d, tam_imag * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed for Bf_d!\n");
fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus));
exit(0);
}
topof_h = (int *)malloc(sizeof(int)*tam_imag);
memset((void *)topof_h, 0, sizeof(int)*tam_imag);
cudaStatus = cudaMalloc((void**)&topof_d, tam_imag * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed for topof_d line %d!\n", __LINE__);
fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus));
exit(0);
}
//cudaMemset((void *)topof_d, 0, tam_imag * sizeof(int)); //hosam
max_h = (long double *)malloc(sizeof(long double)*tam_imag);
memset((void*)max_h, 0, sizeof(long double)*tam_imag);
cudaStatus = cudaMalloc((void**)&max_d, tam_imag * sizeof(long double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed for max_d line %d!\n", __LINE__);
fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus));
exit(0);
}
//cudaMemset(max_d, 0, sizeof(float)*tam_imag);
//cudaMemset((void *)max_h, 0, sizeof(float)*tam_imag);
//init=atoi(argv[1]);
//fin=atoi(argv[2]);
init = 1;
fin = 20;
//*************** Principal FOR ****************
for (d = init;d <= fin;d++)
{
printf("d=%d \n", d);
//*****************Read RGB files****************
char rutaR[1024];
sprintf(rutaR, "%s%d%s", "RGB/", d, "/R");
matrizR = fopen(rutaR, "r+");
if (!matrizR)
{
printf("Error open file R\n");
exit(0);
}
char rutaG[1024];
sprintf(rutaG, "%s%d%s", "RGB/", d, "/G");
matrizG = fopen(rutaG, "r+");
if (!matrizG)
{
printf("Error open file G\n");
exit(0);
}
char rutaB[1024];
sprintf(rutaB, "%s%d%s", "RGB/", d, "/B");
matrizB = fopen(rutaB, "r+");
if (!matrizB)
{
printf("Error open file B\n");
exit(0);
}
memset((void*)R_h, 0, sizeof(int)*tam_imag);
memset((void*)G_h, 0, sizeof(int)*tam_imag);
memset((void*)B_h, 0, sizeof(int)*tam_imag);
for(i=0;i<dimx;i++){
for(j=0;j<dimy;j++){
fscanf(matrizR, "%d", &R_h[i*dimy + j]);
fscanf(matrizG, "%d", &G_h[i*dimy + j]);
fscanf(matrizB, "%d", &B_h[i*dimy + j]);
}
}
fclose(matrizR);
fclose(matrizG);
fclose(matrizB);
//***************** Kernel EDF *******************
cudaStatus = cudaMemcpy(R_d, R_h, sizeof(int)*tam_imag, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed for R_d line %d!\n", __LINE__);
exit(0);
}
cudaStatus = cudaMemcpy(G_d, G_h, sizeof(int)*tam_imag, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed for G_d line %d!\n", __LINE__);
fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus));
exit(0);
}
cudaStatus = cudaMemcpy(B_d, B_h, sizeof(int)*tam_imag, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed for B_d line %d!\n", __LINE__);
fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus));
exit(0);
}
cudaStatus = cudaMemcpy(Rf_d, Rf_h, sizeof(int)*tam_imag, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed for Rf_d line %d!\n", __LINE__);
fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus));
exit(0);
}
cudaStatus = cudaMemcpy(Gf_d, Gf_h, sizeof(int)*tam_imag, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed for Gf_d line %d!\n", __LINE__);
fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus));
exit(0);
}
cudaStatus = cudaMemcpy(Bf_d, Bf_h, sizeof(int)*tam_imag, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed for Bf_d line %d!\n", __LINE__);
fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus));
exit(0);
}
cudaStatus = cudaMemcpy(topof_d, topof_h, sizeof(int)*tam_imag, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed for topof_d line %d!\n", __LINE__);
fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus));
exit(0);
}
cudaStatus = cudaMemcpy(max_d, max_h, sizeof(long double)*tam_imag, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed for max_d line %d!\n", __LINE__);
fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus));
exit(0);
}
dim3 Grid(347, 20);
dim3 Block(13, 16);
EDF <<<Grid, Block >>> (R_d, G_d, B_d, Rf_d, Gf_d, Bf_d, topof_d, max_d, d);
//printf("\n\n FINISH \n\n");
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ The code stops here
cudaStatus = cudaMemcpy(Rf_h, Rf_d, sizeof(int)*tam_imag, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed for Rf_h line %d!\n", __LINE__);
fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus));
exit(0);
}
cudaStatus = cudaMemcpy(Gf_h, Gf_d, sizeof(int)*tam_imag, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed for Gf_h line %d!\n", __LINE__);
fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus));
exit(0);
}
cudaStatus = cudaMemcpy(Bf_h, Bf_d, sizeof(int)*tam_imag, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed for Bf_h line %d!\n", __LINE__);
fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus));
exit(0);
}
cudaStatus = cudaMemcpy(topof_h, topof_d, sizeof(int)*tam_imag, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed for topof_h line %d!\n", __LINE__);
fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus));
exit(0);
}
cudaStatus = cudaMemcpy(max_h, max_d, sizeof(long double)*tam_imag, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed for max_h line %d!\n", __LINE__);
fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus));
exit(0);
}
} //End for
//****************Save results**************
FILE *archTopo, *archR, *archG, *archB;
archTopo = fopen("Resultados/topo-f3.txt", "w+");
archR = fopen("Resultados/R-f3.txt", "w+");
archG = fopen("Resultados/G-f3.txt", "w+");
archB = fopen("Resultados/B-f3.txt", "w+");
for(i=0;i<dimx;i++) {
for(j=0;j<dimy;j++) {
fprintf(archTopo, "%d ", topof_h[i*dimy + j]);
fprintf(archR, "%d ", Rf_h[i*dimy + j]);
fprintf(archG, "%d ", Gf_h[i*dimy + j]);
fprintf(archB, "%d ", Bf_h[i*dimy + j]);
}
fprintf(archTopo, "\n");
fprintf(archR, "\n");
fprintf(archG, "\n");
fprintf(archB, "\n");
}
/*for(i=0;i<tam_imag;i++) {
fprintf(archTopo, "%d ", topof_h[i]);
fprintf(archR, "%d ", Rf_h[i]);
fprintf(archG, "%d ", Gf_h[i]);
fprintf(archB, "%d ", Bf_h[i]);
}*/
fclose(archTopo);
fclose(archR);
fclose(archG);
fclose(archB);
free(max_h);
free(topof_h);
free(R_h);
free(G_h);
free(B_h);
free(Rf_h);
free(Gf_h);
free(Bf_h);
cudaFree(max_d);
cudaFree(topof_d);
cudaFree(R_d);
cudaFree(G_d);
cudaFree(B_d);
cudaFree(Rf_d);
cudaFree(Gf_d);
cudaFree(Bf_d);
t_GPU = clock();
t = ((float)t_GPU - (float)tinicio) / CLOCKS_PER_SEC;
printf("\ntiempo de procesamiento de varianzas: %6.3fs\n", t);
//getchar ();
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}//END Main function
|
4,803 | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
// P = max power of 2 to test up to
// i.e., test for N = 2^0, 2^1, 2^2... 2^P
#define P 15
#define TILE_WIDTH 1
#define ThreadsPerBlock (1<<10)
#define BlocksPerGrid ((1<<16)-1)
#define RANDRANGE 5
#define VERBOSE 0
__global__ void dot(float* a, float* b, float* c, unsigned int width) {
__shared__ float temp[ThreadsPerBlock];
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = ThreadsPerBlock*bx + tx;
int sumrange = width < ThreadsPerBlock ? width : ThreadsPerBlock;
if(index < width) {
temp[tx] = a[index]*b[index];
}
__syncthreads();
// Iterative halving sum
for(int offset = sumrange >> 1; offset > 0; offset >>= 1) {
if(tx < offset) {
temp[tx] += temp[tx+offset];
}
__syncthreads();
}
if(tx == 0) {
c[bx] = temp[0];
}
}
// Num subresults is the number of sub- dot products computed in the
// GPU. The host will add them all up.
float dotprod(float* a, float* b, unsigned int width) {
unsigned int size_C; // Number of elements in result vector
unsigned int mem_size_C;
float ret;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
// Allocate device memory for vectors A and B
unsigned int mem_size_Vect = sizeof(float) * width;
cudaMalloc((void**) &d_A, mem_size_Vect);
cudaMalloc((void**) &d_B, mem_size_Vect);
// Copy host memory to device
cudaMemcpy(d_A, a, mem_size_Vect, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, b, mem_size_Vect, cudaMemcpyHostToDevice);
// Allocate host memory for the result C = A dot B
size_C = 1 + ((width - 1) / ThreadsPerBlock);
mem_size_C = sizeof(float) * size_C;
h_C = (float*) malloc(mem_size_C);
*h_C = 0;
// Allocate device memory for the result
cudaMalloc((void**) &d_C, mem_size_C);
// Set up the calculation
dim3 blocks_Vect(ThreadsPerBlock);
dim3 grid_Vect(size_C);
dot<<< grid_Vect, blocks_Vect >>>(d_A, d_B, d_C, width);
// Copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
// Finish adding together the partial sums on the host (linearly).
// See the kernel dot product function to see the iterative halving
// (i.e., O(log n)) sum.
for(int i = 1; i < size_C; i++) {
h_C[0] += h_C[i];
}
ret = h_C[0];
// Clean up memory
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return ret;
}
// Allocates a matrix with random float entries.
void randomInit(float* data, int size) {
for (int i = 0; i < size; ++i)
data[i] = (float)(rand() % RANDRANGE +1);
}
int main(int argc, char** argv) {
unsigned int size_Vect; // Number of elements in vectors
unsigned int mem_size_Vect;
float dotprod_result;
float* h_A;
float* h_B;
// Test for different powers
for(int p = 1; p <= P; p++) {
// Allocate host memory fors vector A and B
size_Vect = 1<<p;
mem_size_Vect = sizeof(float) * size_Vect;
h_A = (float*) malloc(mem_size_Vect);
h_B = (float*) malloc(mem_size_Vect);
// Initialize host memory for vectors A and B
// We seed twice so that the beginning sequences in the
// loop are the same
srand(0);
randomInit(h_A, size_Vect);
srand(1);
randomInit(h_B, size_Vect);
// Perform the calculation
dotprod_result = dotprod(h_A, h_B, size_Vect);
// Basic test
#if VERBOSE
printf("A = [ ");
for(int i=0; i < size_Vect; i++) {
printf("%0.1f ", h_A[i]);
}
printf("]\nB = [ ");
for(int i=0; i < size_Vect; i++) {
printf("%0.1f ", h_B[i]);
}
printf("]\n");
#endif
printf("C = %0.2f\n", dotprod_result);
// Clean up memory
free(h_A);
free(h_B);
}
} |
4,804 | #include "includes.h"
__device__ float fitness_function(float x[])
{
float y,yp;
float res=0;
float y1=1+(x[0]-1)/4;
float yn=1+(x[NUM_OF_DIMENSIONS-1]-1)/4;
res+=pow(sin(phi*y1),2)+pow(yn-1,2);
for(int i=0;i<NUM_OF_DIMENSIONS-1;i++)
{
y=1+(x[i]-1)/4;
yp=1+(x[i+1]-1)/4;
res+=pow(y-1,2)*(1+10*pow(sin(phi*yp),2));
}
return res;
}
__global__ void kernelUpdateGBest(float *gBest,float *pBests)
{
float temp[NUM_OF_DIMENSIONS];
for(int i=0;i<10*NUM_OF_DIMENSIONS;i+=NUM_OF_DIMENSIONS)
{
for(int k=0;k<NUM_OF_DIMENSIONS;k++)
temp[k]=pBests[i+k];
if(fitness_function(temp)<fitness_function(gBest))
{
for(int k=0;k<NUM_OF_DIMENSIONS;k++)
gBest[k]=temp[k];
}
}
} |
4,805 | __global__ void getLineFromAccum(unsigned int* accum, int w_accum, int h_accum, int* dev_points, int* max) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int tid = y * w_accum + x;
if (x >= w_accum || y >= h_accum)
return;
int temp_max;
if (max[0] == (int)accum[tid]) {
dev_points[0] = x;
dev_points[1] = y;
// DELETE THE LINE FROM ACCU
int filter_size = 30;
for (int i = x - filter_size / 2; i < x + filter_size / 2; i++) {
for (int j = y - filter_size / 2; j < y + filter_size / 2; j++) {
if (i < w_accum && j < h_accum) {
int tid_temp = j * w_accum + i;
accum[tid_temp] = 0;
}
}
}
}
return;
} |
4,806 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define BLOCKSIZE 4 // Number of threads in each thread block
/*
* CUDA kernel to find a global max, each thread process
* one element.
* @param values input of an array of integers in which we search a max number
* @param max output of this kernel, the max number in array values
* @param reg_maxes output of this kernel, some regional max number for input array
* @param num_regions input of this kernel, number of regions we use to reduce lock contentions
* @param n input of this kernel, total number of element in input array
*/
__global__ void global_max(int *values, int *max, int *reg_maxes, int num_regions, int n)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int val = values[i];
int region = i % num_regions;
if(atomicMax(®_maxes[region], val) < val) {
atomicMax(max, val);
}
}
// Write the cuda kernel to normal all elements in input values,
// the output is stored back into output array, max is the maximum value in the array
// values, n is the total number of elements in values.
__global__ void normalize(int *values, int *max, float *output, int n)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(x < n) {
output[x] = (double) values[x] / (* max);
}
}
int main( int argc, char* argv[] )
{
// Size of vectors
int i;
int input[] = {4, 5, 6, 7, 19, 10, 0, 4, 2, 3, 1, 7, 9, 11, 45, 23, 100, 29};
int n = sizeof(input) / sizeof(float); //careful, this usage only works with statically allocated arrays, NOT dynamic arrays
// Host input vectors
int *h_in = input;
//Host output vector
float *h_out = (float *) malloc((n - 1) * sizeof(float));
// Device input vectors
int *d_in;;
//Device output vector
float *d_out;
int *d_reg_max;// memory for regional max
int *d_gl_max; // memory for global max
// Size, in bytes, of each vector
int bytes = n * sizeof(int);
int num_reg = ceil(n / (float)BLOCKSIZE); //num of regions we will use in calculation of global max
// Allocate memory for each vector on GPU
cudaMalloc(&d_in, bytes);
cudaMalloc(&d_out, n * sizeof(float));
cudaMalloc(&d_reg_max, num_reg * sizeof(int) );
cudaMalloc(&d_gl_max, sizeof(int) );
cudaMemset(d_reg_max, 0, num_reg * sizeof(int) );
cudaMemset(d_gl_max, 0, sizeof(int) );
// Copy host data to device
cudaMemcpy( d_in, h_in, bytes, cudaMemcpyHostToDevice);
// Number of threads in each thread block
int blockSize = BLOCKSIZE;
// Number of thread blocks in grid
int gridSize = (int)ceil((float)n/blockSize);
printf("BlockSize: %d, Gridsize: %d\n", blockSize, gridSize);
// Execute the kernel
global_max<<<gridSize, blockSize>>>(d_in, d_gl_max, d_reg_max, num_reg, n); //after this kernel called, *d_gl_max is ready to use
cudaDeviceSynchronize();
// Execute the second kernel, use the data returned by the first kernel
normalize<<<gridSize, blockSize>>>(d_in, d_gl_max, d_out, n);
// Copy array back to host
cudaMemcpy( h_out, d_out, n * sizeof(float), cudaMemcpyDeviceToHost );
// Show the result
printf("The original array is: ");
for(i = 0; i < n; i ++)
printf("%6d,", h_in[i] );
printf("\n\nNormalized array is: ");
for(i = 0; i < n; i++)
printf("%6.2f,", h_out[i] );
puts("");
// Release device memory
cudaFree(d_in);
cudaFree(d_out);
cudaFree(d_reg_max);
cudaFree(d_gl_max);
// Release host memory
free(h_out);
return 0;
}
|
4,807 | __global__ void init_i32 (int* vector, int value, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
vector[idx] = value;
}
}
extern "C" {
void VectorPacked_init_i32 (int* vector, int value, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
init_i32 <<<gridDim, blockDim, 0, stream>>> (vector, value, len);
}
}
__global__ void init_f32 (float* vector, float value, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
vector[idx] = value;
}
}
extern "C" {
void VectorPacked_init_f32 (float* vector, float value, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
init_f32 <<<gridDim, blockDim, 0, stream>>> (vector, value, len);
}
}
__global__ void addValue_i32 (int* vector, int value, int* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
output[idx] = vector[idx] + value;
}
}
extern "C" {
void VectorPacked_addValue_i32 (int* vector, int value, int* output, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
addValue_i32 <<<gridDim, blockDim, 0, stream>>> (vector, value, output, len);
}
}
__global__ void addValue_f32 (float* vector, float value, float* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
output[idx] = vector[idx] + value;
}
}
extern "C" {
void VectorPacked_addValue_f32 (float* vector, float value, float* output, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
addValue_f32 <<<gridDim, blockDim, 0, stream>>> (vector, value, output, len);
}
}
__global__ void scl_i32 (int* vector, int value, int* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
output[idx] = vector[idx] * value;
}
}
extern "C" {
void VectorPacked_scl_i32 (int* vector, int value, int* output, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
scl_i32 <<<gridDim, blockDim, 0, stream>>> (vector, value, output, len);
}
}
__global__ void scl_f32 (float* vector, float value, float* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
output[idx] = vector[idx] * value;
}
}
extern "C" {
void VectorPacked_scl_f32 (float* vector, float value, float* output, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
scl_f32 <<<gridDim, blockDim, 0, stream>>> (vector, value, output, len);
}
}
__global__ void add_i32 (int* left_op, int* right_op, int* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
output[idx] = left_op[idx] + right_op[idx];
}
}
extern "C" {
void VectorPacked_add_i32 (int* left_op, int* right_op, int* output, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
add_i32 <<<gridDim, blockDim, 0, stream>>> (left_op, right_op, output, len);
}
}
__global__ void add_f32 (float* left_op, float* right_op, float* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
output[idx] = left_op[idx] + right_op[idx];
}
}
extern "C" {
void VectorPacked_add_f32 (float* left_op, float* right_op, float* output, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
add_f32 <<<gridDim, blockDim, 0, stream>>> (left_op, right_op, output, len);
}
}
__global__ void sub_i32 (int* left_op, int* right_op, int* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
output[idx] = left_op[idx] - right_op[idx];
}
}
extern "C" {
void VectorPacked_sub_i32 (int* left_op, int* right_op, int* output, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
sub_i32 <<<gridDim, blockDim, 0, stream>>> (left_op, right_op, output, len);
}
}
__global__ void sub_f32 (float* left_op, float* right_op, float* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
output[idx] = left_op[idx] - right_op[idx];
}
}
extern "C" {
void VectorPacked_sub_f32 (float* left_op, float* right_op, float* output, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
sub_f32 <<<gridDim, blockDim, 0, stream>>> (left_op, right_op, output, len);
}
}
__global__ void mult_i32 (int* left_op, int* right_op, int* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
output[idx] = left_op[idx] * right_op[idx];
}
}
extern "C" {
void VectorPacked_mult_i32 (int* left_op, int* right_op, int* output, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
mult_i32 <<<gridDim, blockDim, 0, stream>>> (left_op, right_op, output, len);
}
}
__global__ void mult_f32 (float* left_op, float* right_op, float* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
output[idx] = left_op[idx] * right_op[idx];
}
}
extern "C" {
void VectorPacked_mult_f32 (float* left_op, float* right_op, float* output, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
mult_f32 <<<gridDim, blockDim, 0, stream>>> (left_op, right_op, output, len);
}
}
__global__ void square_i32 (int* vector, int* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
output[idx] = vector[idx] * vector[idx];
}
}
extern "C" {
void VectorPacked_square_i32 (int* vector, int* output, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
square_i32 <<<gridDim, blockDim, 0, stream>>> (vector, output, len);
}
}
__global__ void square_f32 (float* vector, float* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
output[idx] = vector[idx] * vector[idx];
}
}
extern "C" {
void VectorPacked_square_f32 (float* vector, float* output, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
square_f32 <<<gridDim, blockDim, 0, stream>>> (vector, output, len);
}
}
__global__ void binarize_i32 (int* vector, int threshold, int* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
output[idx] = vector[idx] > threshold ? 1 : 0;
}
}
extern "C" {
void VectorPacked_binarize_i32 (int* vector, int threshold, int* output, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
binarize_i32 <<<gridDim, blockDim, 0, stream>>> (vector, threshold, output, len);
}
}
__global__ void binarize_f32 (float* vector, float threshold, float* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
output[idx] = vector[idx] > threshold ? 1 : 0;
}
}
extern "C" {
void VectorPacked_binarize_f32 (float* vector, float threshold, float* output, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
binarize_f32 <<<gridDim, blockDim, 0, stream>>> (vector, threshold, output, len);
}
}
__global__ void aypb_i32 (int a, int* y, int b, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
y[idx] = a * y[idx] + b;
}
}
extern "C" {
void VectorPacked_aypb_i32 (int a, int* y, int b, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
aypb_i32 <<<gridDim, blockDim, 0, stream>>> (a, y, b, len);
}
}
__global__ void aypb_f32 (float a, float* y, float b, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
y[idx] = a * y[idx] + b;
}
}
extern "C" {
void VectorPacked_aypb_f32 (float a, float* y, float b, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
aypb_f32 <<<gridDim, blockDim, 0, stream>>> (a, y, b, len);
}
}
__global__ void axpb_y_i32 (int a, int* x, int b, int* y, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
y[idx] *= a * x[idx] + b;
}
}
extern "C" {
void VectorPacked_axpb_y_i32 (int a, int* x, int b, int* y, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
axpb_y_i32 <<<gridDim, blockDim, 0, stream>>> (a, x, b, y, len);
}
}
__global__ void axpb_y_f32 (float a, float* x, float b, float* y, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
y[idx] *= a * x[idx] + b;
}
}
extern "C" {
void VectorPacked_axpb_y_f32 (float a, float* x, float b, float* y, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
axpb_y_f32 <<<gridDim, blockDim, 0, stream>>> (a, x, b, y, len);
}
}
__global__ void xvpy_i32 (int* x, int* v, int* y, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
y[idx] += x[idx] * v[idx];
}
}
extern "C" {
void VectorPacked_xvpy_i32 (int* x, int* v, int* y, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
xvpy_i32 <<<gridDim, blockDim, 0, stream>>> (x, v, y, len);
}
}
__global__ void xvpy_f32 (float* x, float* v, float* y, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
y[idx] += x[idx] * v[idx];
}
}
extern "C" {
void VectorPacked_xvpy_f32 (float* x, float* v, float* y, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
xvpy_f32 <<<gridDim, blockDim, 0, stream>>> (x, v, y, len);
}
}
__global__ void x_avpb_py_i32 (int* x, int a, int* v, int b, int* y, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
y[idx] += x[idx] * (a * v[idx] + b);
}
}
extern "C" {
void VectorPacked_x_avpb_py_i32 (int* x, int a, int* v, int b, int* y, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
x_avpb_py_i32 <<<gridDim, blockDim, 0, stream>>> (x, a, v, b, y, len);
}
}
__global__ void x_avpb_py_f32 (float* x, float a, float* v, float b, float* y, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
y[idx] += x[idx] * (a * v[idx] + b);
}
}
extern "C" {
void VectorPacked_x_avpb_py_f32 (float* x, float a, float* v, float b, float* y, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
x_avpb_py_f32 <<<gridDim, blockDim, 0, stream>>> (x, a, v, b, y, len);
}
}
__global__ void sigmoid_f32 (float* vector, float* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
float tmp = vector[idx]; output[idx] = 0.5 - 0.5 * tmp / (1.0 + (tmp < 0.0 ? -tmp : tmp));
}
}
extern "C" {
void VectorPacked_sigmoid_f32 (float* vector, float* output, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
sigmoid_f32 <<<gridDim, blockDim, 0, stream>>> (vector, output, len);
}
}
__global__ void sigmoidDeriv_f32 (float* vector, float* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
float tmp = 1.0 + (vector[idx] < 0.0 ? -vector[idx] : vector[idx]); output[idx] = - 0.5 / (tmp*tmp);
}
}
extern "C" {
void VectorPacked_sigmoidDeriv_f32 (float* vector, float* output, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
sigmoidDeriv_f32 <<<gridDim, blockDim, 0, stream>>> (vector, output, len);
}
}
__global__ void tanh_f32 (float* vector, float* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
float tmp = vector[idx]; output[idx] = tmp / (1.0 + (tmp < 0.0 ? -tmp : tmp));
}
}
extern "C" {
void VectorPacked_tanh_f32 (float* vector, float* output, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
tanh_f32 <<<gridDim, blockDim, 0, stream>>> (vector, output, len);
}
}
__global__ void tanhDeriv_f32 (float* vector, float* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
float tmp = vector[idx] < 0.0 ? -vector[idx] : vector[idx]; output[idx] = 1.0 / ((1.0+tmp)*(1.0+tmp));
}
}
extern "C" {
void VectorPacked_tanhDeriv_f32 (float* vector, float* output, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
tanhDeriv_f32 <<<gridDim, blockDim, 0, stream>>> (vector, output, len);
}
}
__global__ void relu_f32 (float* vector, float* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
output[idx] = vector[idx] > 0.0 ? vector[idx] : 0.0;
}
}
extern "C" {
void VectorPacked_relu_f32 (float* vector, float* output, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
relu_f32 <<<gridDim, blockDim, 0, stream>>> (vector, output, len);
}
}
__global__ void reluDeriv_f32 (float* vector, float* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
output[idx] = vector[idx] > 0.0 ? 1.0 : 0.0;
}
}
extern "C" {
void VectorPacked_reluDeriv_f32 (float* vector, float* output, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
reluDeriv_f32 <<<gridDim, blockDim, 0, stream>>> (vector, output, len);
}
}
__global__ void customErrorCalc_f32 (float* vector, float* ideal_vector, float threshold, float scaleFoff, float scaleFon, float* output, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
float vectorValue = vector[idx];
if (ideal_vector[idx] > threshold) {
output[idx] = 1.0 - vectorValue;
if (vectorValue < threshold) {
output[idx] *= scaleFoff;
}
} else {
output[idx] = vectorValue * vectorValue;
if (vectorValue > threshold) {
output[idx] *= scaleFon;
}
}
}
}
extern "C" {
void VectorPacked_customErrorCalc_f32 (float* vector, float* ideal_vector, float threshold, float scaleFoff, float scaleFon, float* output, int len, cudaStream_t stream) {
dim3 gridDim;
dim3 blockDim;
blockDim.x = 1024;
gridDim.x = (len + blockDim.x - 1) / blockDim.x;
customErrorCalc_f32 <<<gridDim, blockDim, 0, stream>>> (vector, ideal_vector, threshold, scaleFoff, scaleFon, output, len);
}
}
|
4,808 |
// CudafyByExample.ripple_gpu
extern "C" __global__ void thekernel( unsigned char* ptr, int ptrLen0, int ticks);
// CudafyByExample.ripple_gpu
extern "C" __global__ void thekernel( unsigned char* ptr, int ptrLen0, int ticks)
{
int num = threadIdx.x + blockIdx.x * blockDim.x;
int num2 = threadIdx.y + blockIdx.y * blockDim.y;
int num3 = num + num2 * blockDim.x * gridDim.x;
float num4 = (float)(num - 512);
float num5 = (float)(num2 - 512);
float num6 = sqrtf(num4 * num4 + num5 * num5);
unsigned char b = (unsigned char)(128.0f + 127.0f * cosf(num6 / 10.0f - (float)ticks / 7.0f) / (num6 / 10.0f + 1.0f));
ptr[(num3 * 4)] = b;
ptr[(num3 * 4 + 1)] = b;
ptr[(num3 * 4 + 2)] = b;
ptr[(num3 * 4 + 3)] = 255;
}
|
4,809 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <iostream>
#include <chrono>
#include <thrust/count.h>
#include <thrust/functional.h>
#include <thrust/iterator/constant_iterator.h>
struct maior_que_zero{
__host__ __device__
bool operator()(const double &x){
return x > 0;
}
};
struct menor_que_zero{
__host__ __device__
bool operator()(const double &x){
return x < 0;
}
};
int main() {
int n = 0;
std::vector<double> stocks;
while (std::cin){
n++;
double stock;
std::cin >> stock;
stocks.push_back(stock);
}
thrust::device_vector<double> stocks_dev(stocks);
thrust::device_vector<double> ganho_diario(n-1);
// ganho diario (diferenca entre a acao atual e anterior)
thrust::transform(stocks_dev.begin() + 1, stocks_dev.end(), stocks_dev.begin(), ganho_diario.begin(), thrust::minus<double>());
// conta quantas vezes subiu
int quantos_positivos = thrust::count_if(ganho_diario.begin(), ganho_diario.end(), maior_que_zero());
std::cout << quantos_positivos << "\n";
// qual o aumento médio, levando em conta só as vezes em que subiu
//zera todos os negativos
thrust::replace_if(ganho_diario.begin(), ganho_diario.end(), ganho_diario.begin(), menor_que_zero(), 0);
double somaganhos = thrust::reduce(ganho_diario.begin(), ganho_diario.end(), 0.0, thrust::plus<double>());
double ganhomedio = somaganhos / quantos_positivos;
std::cout << ganhomedio << "\n";
return 0;
} |
4,810 | // Copyright (c) 2019-2020, NVIDIA CORPORATION.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////////////
// LOMBSCARGLE //
///////////////////////////////////////////////////////////////////////////////
template<typename T>
__device__ void _cupy_lombscargle_float( const int x_shape,
const int freqs_shape,
const T *__restrict__ x,
const T *__restrict__ y,
const T *__restrict__ freqs,
T *__restrict__ pgram,
const T *__restrict__ y_dot ) {
const int tx { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) };
const int stride { static_cast<int>( blockDim.x * gridDim.x ) };
T yD {};
if ( y_dot[0] == 0 ) {
yD = 1.0f;
} else {
yD = 2.0f / y_dot[0];
}
for ( int tid = tx; tid < freqs_shape; tid += stride ) {
T freq { freqs[tid] };
T xc {};
T xs {};
T cc {};
T ss {};
T cs {};
T c {};
T s {};
for ( int j = 0; j < x_shape; j++ ) {
sincosf( freq * x[j], &s, &c );
xc += y[j] * c;
xs += y[j] * s;
cc += c * c;
ss += s * s;
cs += c * s;
}
T c_tau {};
T s_tau {};
T tau { atan2f( 2.0f * cs, cc - ss ) / ( 2.0f * freq ) };
sincosf( freq * tau, &s_tau, &c_tau );
T c_tau2 { c_tau * c_tau };
T s_tau2 { s_tau * s_tau };
T cs_tau { 2.0f * c_tau * s_tau };
pgram[tid] = ( 0.5f * ( ( ( c_tau * xc + s_tau * xs ) * ( c_tau * xc + s_tau * xs ) /
( c_tau2 * cc + cs_tau * cs + s_tau2 * ss ) ) +
( ( c_tau * xs - s_tau * xc ) * ( c_tau * xs - s_tau * xc ) /
( c_tau2 * ss - cs_tau * cs + s_tau2 * cc ) ) ) ) *
yD;
}
}
extern "C" __global__ void __launch_bounds__( 512 ) _cupy_lombscargle_float32( const int x_shape,
const int freqs_shape,
const float *__restrict__ x,
const float *__restrict__ y,
const float *__restrict__ freqs,
float *__restrict__ pgram,
const float *__restrict__ y_dot ) {
_cupy_lombscargle_float<float>( x_shape, freqs_shape, x, y, freqs, pgram, y_dot );
}
template<typename T>
__device__ void _cupy_lombscargle_double( const int x_shape,
const int freqs_shape,
const T *__restrict__ x,
const T *__restrict__ y,
const T *__restrict__ freqs,
T *__restrict__ pgram,
const T *__restrict__ y_dot ) {
const int tx { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) };
const int stride { static_cast<int>( blockDim.x * gridDim.x ) };
T yD {};
if ( y_dot[0] == 0 ) {
yD = 1.0;
} else {
yD = 2.0 / y_dot[0];
}
for ( int tid = tx; tid < freqs_shape; tid += stride ) {
T freq { freqs[tid] };
T xc {};
T xs {};
T cc {};
T ss {};
T cs {};
T c {};
T s {};
for ( int j = 0; j < x_shape; j++ ) {
sincos( freq * x[j], &s, &c );
xc += y[j] * c;
xs += y[j] * s;
cc += c * c;
ss += s * s;
cs += c * s;
}
T c_tau {};
T s_tau {};
T tau { atan2( 2.0 * cs, cc - ss ) / ( 2.0 * freq ) };
sincos( freq * tau, &s_tau, &c_tau );
T c_tau2 { c_tau * c_tau };
T s_tau2 { s_tau * s_tau };
T cs_tau { 2.0 * c_tau * s_tau };
pgram[tid] = ( 0.5 * ( ( ( c_tau * xc + s_tau * xs ) * ( c_tau * xc + s_tau * xs ) /
( c_tau2 * cc + cs_tau * cs + s_tau2 * ss ) ) +
( ( c_tau * xs - s_tau * xc ) * ( c_tau * xs - s_tau * xc ) /
( c_tau2 * ss - cs_tau * cs + s_tau2 * cc ) ) ) ) *
yD;
}
}
extern "C" __global__ void __launch_bounds__( 512 ) _cupy_lombscargle_float64( const int x_shape,
const int freqs_shape,
const double *__restrict__ x,
const double *__restrict__ y,
const double *__restrict__ freqs,
double *__restrict__ pgram,
const double *__restrict__ y_dot ) {
_cupy_lombscargle_double<double>( x_shape, freqs_shape, x, y, freqs, pgram, y_dot );
}
|
4,811 | #include "stdio.h"
#define N 128
__global__ void add(int *A, int *B, int *C)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < N && j < N)
{
C[i * N + j] = A[i * N + j] + B[i * N + j];
}
}
int main( void )
{
int a[N * N], b[N * N], c[N * N];
int *dev_a, *dev_b, *dev_c;
for (int i = 0; i < N * N; ++i)
{
a[i] = -i;
b[i] = i * i;
}
cudaMalloc((void**)&dev_a, N * N * sizeof(int));
cudaMalloc((void**)&dev_b, N * N * sizeof(int));
cudaMalloc((void**)&dev_c, N * N * sizeof(int));
cudaMemcpy(dev_a, a, N * N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * N * sizeof(int), cudaMemcpyHostToDevice);
dim3 threadsPerBlock(N / 16, N / 16);
dim3 numBlocks(N / threadsPerBlock.x, N / threadsPerBlock.y);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
add<<<numBlocks, threadsPerBlock>>>(dev_a, dev_b, dev_c);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float worktime;
cudaEventElapsedTime(&worktime, start, stop);
printf("Time = %f ms \n", worktime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaMemcpy(c, dev_c, N * N * sizeof(int), cudaMemcpyDeviceToHost);
printf("%d + %d = %d\n", a[0], b[0], c[0]);
printf("%d + %d = %d\n", a[N * N - 1], b[N * N - 1], c[N * N - 1]);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
4,812 | #ifndef uint32_t
#define uint32_t unsigned int
#endif
#define H0 0x67452301
#define H1 0xEFCDAB89
#define H2 0x98BADCFE
#define H3 0x10325476
#define H4 0xC3D2E1F0
__device__
uint32_t rotl(uint32_t x, uint32_t n) {
return (x >> (32 - n)) | (x << n);
}
__device__
uint32_t get_global_id() {
uint32_t blockId, threadsPerBlock;
blockId = blockIdx.z * gridDim.x * gridDim.y
+ blockIdx.y * gridDim.x
+ blockIdx.x;
threadsPerBlock = blockDim.x;
return threadIdx.x + threadsPerBlock * blockId;
}
__global__ void crypt_kernel(ulong start, uint32_t *prefix, ulong plen, uint32_t mask, uint32_t *match){
int t;
uint32_t W[80], rnd, id, A,B,C,D,E,T1,T2;
uint32_t Ws[16];
id = get_global_id();
//if (id == 0) {
// printf("%08x\n", start);
//}
// brutforce is build up as: prefix | thr_id:04x | <rnd>:04x | start:08x
for (t = 0; t < plen; ++t) {
Ws[t] = prefix[t];
// printf("%04x", prefix[t]);
}
// printf("%04x\n", id);
T1 = (id & 0xf) | (((id >> 4) & 0xf) << 8) | (((id >> 8) & 0xf) << 16) | (((id >> 12) & 0xf) << 24);
T2 = (T1 & 0xe0e0e0e);
T2 = ((((T2 >> 1) & T2) >> 2) | (((T2 >> 2) & T2) >> 1)) & 0x1010101;
Ws[plen] = T1 + 0x30303030 + T2 * 0x27;
T1 = (uint)(start >> 32);
T1 = (T1 & 0xf) | (((T1 >> 4) & 0xf) << 8) | (((T1 >> 8) & 0xf) << 16) | (((T1 >> 12) & 0xf) << 24);
T2 = (T1 & 0xe0e0e0e);
T2 = ((((T2 >> 1) & T2) >> 2) | (((T2 >> 2) & T2) >> 1)) & 0x1010101;
Ws[plen + 2] = T1 + 0x30303030 + T2 * 0x27;
T1 = (uint)start;
T1 = (T1 & 0xf) | (((T1 >> 4) & 0xf) << 8) | (((T1 >> 8) & 0xf) << 16) | (((T1 >> 12) & 0xf) << 24);
T2 = (T1 & 0xe0e0e0e);
T2 = ((((T2 >> 1) & T2) >> 2) | (((T2 >> 2) & T2) >> 1)) & 0x1010101;
Ws[plen + 3] = T1 + 0x30303030 + T2 * 0x27;
Ws[plen + 4] = 0x80000000;
for (t = plen + 5; t < 15; ++t) {
Ws[t] = 0;
}
Ws[15] = 128 + 32 * plen;
// preparing buffer done
/*
if (id == 0) {
printf("%016x: ", start);
for (t = 0; t < 16; ++t) {
printf("%08x", Ws[t]);
}
printf(" - %u\n", Ws[15]);
}
*/
for (rnd = 0; rnd < 0x10000; ++rnd) {
// uint32_t digest[5];
#pragma unroll
for (t = 0; t < 16; ++t) {
W[t] = Ws[t];
}
T1 = (rnd & 0xf) | (((rnd >> 4) & 0xf) << 8) | (((rnd >> 8) & 0xf) << 16) | (((rnd >> 12) & 0xf) << 24);
T2 = (T1 & 0xe0e0e0e);
T2 = ((((T2 >> 1) & T2) >> 2) | (((T2 >> 2) & T2) >> 1)) & 0x1010101;
W[plen + 1] = T1 + 0x30303030 + T2 * 0x27;
for (t = 16; t < 80; t++) {
W[t] = rotl(W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16], 1);
}
A = H0;
B = H1;
C = H2;
D = H3;
E = H4;
for (t = 0; t < 20; t++) {
T1 = (B & C) | ((~B) & D);
T2 = rotl(A, 5) + T1 + E + 0x5A827999 + W[t];
E = D; D = C; C = rotl(B, 30); B = A; A = T2;
}
for (t = 20; t < 40; t++) {
T1 = B ^ C ^ D;
T2 = rotl(A, 5) + T1 + E + 0x6ED9EBA1 + W[t];
E = D; D = C; C = rotl(B, 30); B = A; A = T2;
}
for (t = 40; t < 60; t++) {
T1 = (B & C) | (B & D) | (C & D);
T2 = rotl(A, 5) + T1 + E + 0x8F1BBCDC + W[t];
E = D; D = C; C = rotl(B, 30); B = A; A = T2;
}
for (t = 60; t < 80; t++) {
T1 = B ^ C ^ D;
T2 = rotl(A, 5) + T1 + E + 0xCA62C1D6 + W[t];
E = D; D = C; C = rotl(B, 30); B = A; A = T2;
}
A += H0;
if ((A & mask) == 0) {
/*
for (t = 0; t < 16; ++t) {
printf("%08x", Ws[t]);
}
printf(" - %u\n", Ws[15]);
*/
match[0] = 1;
match[1] = id;
match[2] = rnd;
}
}
} |
4,813 | #include "includes.h"
using namespace std;
#define D 3
#define N 200
#define K 512
#define Nt 20
#define Rt 0.1f
#define c 0.001f
#define ct 0.0001f
__global__ void NextQTur(float* Qt, float* Pt) {
int i = threadIdx.x;
Qt[i + 0] += Pt[i + 0] * ct;
Qt[i + 1] += Pt[i + 1] * ct;
Qt[i + 2] += Pt[i + 2] * ct;
} |
4,814 | #include "includes.h"
__global__ void multiplyNumbersGPU(float *pDataA, float *pDataB, float *pResult)
{
int tid = (blockIdx.y * 128 * 256) + blockIdx.x * 256 + threadIdx.x;
pResult[tid] = sqrt(pDataA[tid] * pDataB[tid] / 12.34567) * sin(pDataA[tid]);
} |
4,815 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#define M 20
// RED = 0, BLACK = 1
enum nodeColor {
RED,
BLACK
};
enum result {
Failure,
Success,
FirstInsert
};
enum caseFlag {
NOOP,
DID_CASE1,
DID_CASE3
};
struct par_rbNode {
int key, color;
int flag;
struct par_rbNode *left, *right, *parent;
};
// /*Function prototypes */
__device__ void createNIL();
__device__ struct par_rbNode * createNode(int);
__device__ void createTree();
__device__ struct par_rbNode * Traverse(struct par_rbNode *,int);
__device__ enum result PlaceNode(struct par_rbNode *);
__device__ void Insert_Rebalance(struct par_rbNode *);
__device__ bool Update_Rotation(struct par_rbNode *, enum caseFlag *);
__device__ bool Left_Rotate(struct par_rbNode *);
__device__ bool Right_Rotate(struct par_rbNode *);
__device__ void printPreorder(struct par_rbNode* );
__device__ void printInorder(struct par_rbNode* );
__device__ void printPostorder(struct par_rbNode* );
__device__ struct par_rbNode *nodes;
__device__ struct par_rbNode *root;
__device__ struct par_rbNode *NIL;
__device__ struct par_rbNode *rtParent;
__device__ struct par_rbNode *rtSibling; // U might feel this is unncessary, but it will be used
__device__ int nodeIndex = 0;
__device__ int tmpIndex = 0;
__device__ struct par_rbNode *tmp[M];// need M tmps
__device__ int createFlag = false;
__device__ void createNIL(){
NIL = &nodes[0];
NIL->color = BLACK;
NIL->key = -1;
NIL->left = NIL->right = NIL->parent = NIL;
printf("NIL created\n");
}
__device__ struct par_rbNode * createNode(int key){
bool ok = false;
do{
ok = atomicCAS(&createFlag,false,true); //Capture the lock
}while(ok);
atomicAdd(&nodeIndex,1);
atomicAdd(&tmpIndex,1);
nodes[nodeIndex].key = key;
nodes[nodeIndex].flag = true;
nodes[nodeIndex].left = nodes[nodeIndex].right = nodes[nodeIndex].parent = NIL;
tmp[tmpIndex] = &nodes[nodeIndex];
createFlag = false;
// atomicCAS(&createFlag,true,false); //Release the lock
printf("Created %d\n",key);
return tmp[tmpIndex]; // Even if this thread pauses it will eventually return the correct pointer
}
__device__ void createTree(){
rtParent = createNode(-1);
rtSibling = createNode(-1);
// NIL = createNode(-1);
root = NIL;
rtParent->parent = NIL;
rtSibling->parent = rtParent;
rtSibling->right = NIL;
rtSibling->left = NIL;
rtParent->left = root;
//rtParent->left = root; Why only left, y not right?
//ANS: Since we check for left parent condition first
//(if u don't understand, try to insert a node to a tree with only one node)
rtParent->right = rtSibling;
rtParent->flag = false;
rtSibling->flag = false;
rtParent->color = BLACK;
rtSibling->color = BLACK;
// NIL->left = NIL;
// NIL->right = NIL;
NIL->parent = rtParent;
NIL->flag = false;
// NIL->color = BLACK;
printf("Tree Created \n");
printf("\n");
}
__device__ struct par_rbNode * Traverse(struct par_rbNode *newNode,int key){
struct par_rbNode *x;
// struct par_rbNode *inertPoint;
// struct par_rbNode *savert;
bool success;
bool ok;
// do{
// savert = root;
// success = DCAS(&root->flag,false,true,&root,savert,savert); //Catching the flag of the root
// }while(!success);
//An alternate for DCAS - should check if it works or not
// do{
// savert = root;
// success = atomicCAS(&root->flag,false,true); //Catching the flag of the root
// }while(savert!=root || !success);
do{
// savert = root;
success = atomicCAS(&root->flag,false,true); //Catching the flag of the root
}while(success);
//success => captured the root flag
//savert != root => root has changed
//!success => root is under lock
//thread will come out of the loop only after "success" and "savert==root"
x = root;
if(x != NIL){
while(x != NIL){
struct par_rbNode *y = x;
if(key == x->key) {
x->flag = false; // Release the flag that was just caught
return NULL; // Traversing is done. Node is already there so Insert() fails.
}
if(key < x->key){
if(x->left != NIL){
ok = atomicCAS(&x->left->flag,false,true);
if(ok){
x->flag = false; // Release the flag of x
return NULL;
}//end if
x->flag = false;
x = x->left;
}else{
newNode->parent = x;
x = x->left;
if(x == NIL){
printf("Insert Point %d\n",y->key);
return y;
}
}//end if
}else{
if(x->right != NIL){
ok = atomicCAS(&x->right->flag,false,true);
if(ok){
x->flag = false;
return NULL;
}//end if
x->flag = false;
x = x->right;
}else{
newNode->parent = x;
x = x->right;
if(x == NIL){
printf("Insert Point %d\n",y->key);
return y;
}
}//end if
}//end if
}//end while
// return x->parent;
}else{
return NIL;
}
}
__device__ enum result PlaceNode(struct par_rbNode *newNode){
//flags on newNode and insPoint are held
bool ok = true;
// struct par_rbNode *uncle,*savep;
if(newNode->parent == NIL){ //tree is empty
newNode->color = BLACK;
newNode->parent = rtParent;
rtParent->left = newNode;
root=newNode;
NIL->flag = false; // release NIL node, that u caught during Traverse
newNode->flag = false;
newNode->left = newNode->right = NIL;
return FirstInsert;
}else{ // the tree is not empty so...
// newNode->parent = insPoint;
//set the flags of the grandparent and uncle
// struct par_rbNode *insPoint = newNode->parent;
printf("%d\n",newNode->parent->key);
printf("%d\n",newNode->parent->parent->left->key);
if(newNode->parent == newNode->parent->parent->left){ //uncle is right child
printf("Insert Key %d\n",newNode->parent->key);
// savep = newNode->parent->parent; // save parent ptr
// uncle = savep->right; // rtSibling is used here, when newNode->parent is root
ok = atomicCAS(&newNode->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&newNode->parent->parent->right->flag,false,true);
// if(ok){
// ok = atomicCAS(&newNode->parent->parent,savep,savep) && atomicCAS(&savep->right,uncle,uncle);
// }
if(ok){ //back off
newNode->parent->parent->flag = false;
newNode->parent->parent->right->flag = false;
}else{
newNode->parent->parent->flag = false;
}//end if
}
}else{// uncle is left child
// savep = newNode->parent->parent; // save parent ptr
// uncle = savep->left;
ok = atomicCAS(&newNode->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&newNode->parent->parent->left->flag,false,true);
// if(ok){
// ok = atomicCAS(&newNode->parent->parent,savep,savep) && atomicCAS(&savep->left,uncle,uncle);
// }
if(ok){ //back off
newNode->parent->parent->flag = false;
newNode->parent->parent->left->flag = false;
}else{
newNode->parent->parent->flag = false;
}//end if
}
}//end if
if(ok){
// This "!ok" is when u fail to capture the grandparent flag,
// u haven't caught any extra flags so just get rid of the flag of newNode->parent
newNode->parent->flag = false; // release flag
newNode->parent = NIL;
return Failure; //avoid deadlock
}
// When u have successfully captured all the required flags.
// i.e. parent, grandparent, uncle
if(newNode->key < newNode->parent->key){
//insert as left child
newNode->parent->left = newNode;
return Success;
}else{//insertas right child
newNode->parent->right = newNode;
printf("THE OK = %d\n",ok);
return Success;
}
}
}
__device__ void Insert_Rebalance(struct par_rbNode *x){ //THIS FUNCTION DOESN'T BACKOFF. IT KEEPS TRYING
//we hold flags on x, p(x), p(p(x)), and uncle(x)
struct par_rbNode *oldx;
struct par_rbNode *uncle, *olduncle;
// struct par_rbNode *savep, *savegp;
struct par_rbNode *brother;
struct par_rbNode *nephew;
bool ok;
bool updateSucceeds; //Update-Rotation successded?
//caseF is short for caseFlag (avoiding confusion between global enum and local variable)
enum caseFlag caseF = NOOP; // initially not doing any case
//define uncle for first iteration
if(x->parent == x->parent->parent->left){
uncle = x->parent->parent->right;
}else{ // uncle is the left child not right
uncle = x->parent->parent->left;
}
while((x != root) && (x->parent->color == RED)){
//do color-update and/or rotaion as required
do{
updateSucceeds = Update_Rotation(x,&caseF);
}while(!updateSucceeds);
//CASE 1: move to grandparent after color update
if(caseF == DID_CASE1){
oldx = x; //save pointer to the old x
olduncle = uncle; // save pointer to old uncle;
x = x->parent->parent; // up to grandparent
do{ //find new uncle of x and get flags
if(x->parent == x->parent->parent->left){
// savep = x->parent;
// savegp = savep->parent;
// uncle = savegp->right;
ok = atomicCAS(&x->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->right->flag,false,true);
if(ok){
x->parent->flag = false;
x->parent->parent->flag = false;
x->parent->parent->right->flag = false;
}else{
x->parent->flag = false;
x->parent->parent->flag = false;
}
}else{
x->parent->flag = false;
}
}
}else{
// savep = x->parent;
// savegp = savep->parent;
// uncle = savegp->left;
ok = atomicCAS(&x->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->left->flag,false,true);
if(ok){
x->parent->flag = false;
x->parent->parent->flag = false;
x->parent->parent->left->flag = false;
}else{
x->parent->flag = false;
x->parent->parent->flag = false;
}
}else{
x->parent->flag = false;
}
}
}
}while(ok); //THIS FUNCTION DOESN'T BACKOFF. IT KEEPS TRYING
//Release old flags for CASE 1
oldx->parent->flag = false;
olduncle->flag = false;
oldx->flag = false;
}
//in CASE 3 loop will exit: parent will be BLACK
}
switch(caseF){
case NOOP: //In the beginning of this function we had
//x,p(x),p(p(x)),uncle(x) - release them
x->parent->parent->flag = false;
x->parent->flag = false;
uncle->flag = false;
x->flag = false;
break;
case DID_CASE1: //Release the last set of flags acquired
x->parent->parent->flag = false;
x->parent->flag = false;
uncle->flag = false;
x->flag = false;
break;
case DID_CASE3: //release flags on ROTATED x, etc
if(x == x->parent->left){
brother = x->parent->right;
nephew = x->parent->right->right;
}else{
brother = x->parent->left;
nephew = x->parent->left->left;
}
x->parent->flag = false;
brother->flag = false;
nephew->flag = false;
x->flag = false;
break;
}
// printf("last %d\n",x->key);
root->color = BLACK;
}
__device__ bool Update_Rotation(struct par_rbNode *x, enum caseFlag *caseF){
//we hold flags on x, p(x), p(p(x)) and uncle(x)
struct par_rbNode *xUncle;
struct par_rbNode *oldx; //*ggp; // ggp -> greatgrandparent
bool ok;
if(x->parent == x->parent->parent->left){
//the parent is a left child
xUncle = x->parent->parent->right;
if(xUncle->color == RED){
//CASE 1 - recoloring
// U have all the flags u need. So this is simple, similar to serial code
x->parent->color = BLACK;
xUncle->color = BLACK;
x->parent->parent->color = RED;
*caseF = DID_CASE1;
return true; // This true is for "updateSucceeds"
}else{ // rotation(s) will be needed
if(x == x->parent->right){//CASE2
oldx = x; // save old x in case rotate fails
x = x->parent;
ok = Left_Rotate(x);
if(!ok){
x = oldx; //undo change to x
return false; //This false is for "updateSucceeds"
}
}
//In CASE 3, if the right-rotation fails,
//CASE 3 fails but the algorithm still works
//beacuse the process will return false to
//Insert_Rebalance, and Insert_Rebalance will
//call Update_Rotation again to complete CASE3
do{ // get great grandparent's flag
// ggp = x->parent->parent->parent;
ok = atomicCAS(&x->parent->parent->parent->flag,false,true);
}while(ok); //KEEPS TRYING, DOESN'T BACK OFF
ok = Right_Rotate(x->parent->parent);
if(!ok){
x->parent->parent->parent->flag = false;
return false; //This false is for "updateSucceeds"
}else{
x->parent->color = BLACK;
x->parent->right->color = RED;
*caseF = DID_CASE3;
x->parent->parent->parent->flag = false; //remove the ggp flag as rotation was successful
return true;
}
}
//symmetric to above code
}else{
//the parent is a right child
xUncle = x->parent->parent->left;
if(xUncle->color == RED){
//CASE 1 - recoloring
// U have all the flags u need. So this is simple, similar to serial code
x->parent->color = BLACK;
xUncle->color = BLACK;
x->parent->parent->color = RED;
*caseF = DID_CASE1;
return true;
}else{ // rotation(s) will be needed
if(x == x->parent->left){//CASE2
oldx = x; // save old x in case rotate fails
x = x->parent;
ok = Right_Rotate(x);
if(!ok){
x = oldx; //undo change to x
return false;
}
}
//In CASE 3, if the left-rotation fails,
//CASE 3 fails but the algorithm still works
//beacuse the process will return false to
//Insert_Rebalance, and Insert_Rebalance will
//call Update_Rotation again to complete CASE3
do{ // get great grandparent's flag
// ggp = x->parent->parent->parent;
ok = atomicCAS(&x->parent->parent->parent->flag,false,true);
}while(ok);
ok = Left_Rotate(x->parent->parent);
if(!ok){
x->parent->parent->parent->flag = false;
return false;
}else{
x->parent->color = BLACK;
x->parent->left->color = RED;
*caseF = DID_CASE3;
x->parent->parent->parent->flag = false;
return true;
}
}
}
}
//A rotation will always be successful(true), as u can reach the rotate command
//only after u have cptured all the requried flags
__device__ bool Left_Rotate(struct par_rbNode *z){
//z is the root of the rotation subtree. The locks
// held at this point are : z,z->parent and z->right (and sibling of z but its not useful here)
// bool ok;
struct par_rbNode *zrl,*zr;
if(z->parent == rtParent){
//rotating at the root
zrl = z->right->left;
zr = z->right;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->right = zrl;
// ok = CAS3(z->right,zrl,z->right,
// z->right,z,zrl->parent,
// zrl,zrl,z->right->left);
//update other links
root = zr;
rtParent->left = root;
root->parent = rtParent;
z->parent = root;
root->left = z;
}else{
//rotating under the root (parent, etc . exist)
if(z == z->parent->left){
//z is left child
zrl = z->right->left;
zr = z->right;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->right = zrl;
//update other links
z->parent->left = zr;
z->right->parent = z->parent;
z->parent = zr;
z->right->left = z;
}else{
// z is right child
zrl = z->right->left;
zr = z->right;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->right = zrl;
//update other links
z->parent->right = zr;
z->right->parent = z->parent;
z->parent = zr;
z->right->left = z;
}
}
return true;
}
//symmetric to Left_rotate
__device__ bool Right_Rotate(struct par_rbNode *z){
//z is the root of the rotation subtree. The locks
// held at this point are : z,z->parent and z->left (and sibling of z but its not useful here)
// bool ok;
struct par_rbNode *zrl,*zr;
if(z->parent == rtParent){
//rotating at the root
zrl = z->left->right;
zr = z->left;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->left = zrl;
// ok = CAS3(z->left,zrl,z->left,
// z->left,z,zrl->parent,
// zrl,zrl,z->left->right);
//update other links
root = zr;
rtParent->right = root;
root->parent = rtParent;
z->parent = root;
root->right = z;
}else{
//rotating under the root (parent, etc . exist)
if(z == z->parent->right){
//z is right child
zrl = z->left->right;
zr = z->left;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->left = zrl;
//update other links
z->parent->right = zr;
z->left->parent = z->parent;
z->parent = zr;
z->left->right = z;
}else{
// z is left child
zrl = z->left->right;
zr = z->left;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->left = zrl;
//update other links
z->parent->left = zr;
z->left->parent = z->parent;
z->parent = zr;
z->left->right = z;
}
}
return true;
}
__device__ void Insert(int key){
struct par_rbNode *newNode = createNode(key); //Internally the flag of the newNode is held
// struct par_rbNode *insertPoint;
// // Create and initialize the new node
// enum result res = Failure;
// //insert the new node
// do{
// //Traverse tree to find insertion point
// insertPoint = Traverse(newNode,key);
// if(insertPoint != NULL){
// //add new node to tree
// // printf("Placing Node\n");
// res = PlaceNode(newNode);
// printf("res = %d\n",res);
// // res is short for result (avoiding confusion b/w global enum and local variable)
// if(res == Success){
// printf("rebalance\n");
// //node was added succcessfully so make
// //tree red-black again by doing the
// //necessary color updates and rotations
// Insert_Rebalance(newNode);
// }
// }else{
// printf("Key Exists\n");
// res = Success;
// break;
// }
// }while(res == Failure);
// printf("PreOrder: ");
// printPreorder(root);
// printf("\n");
// printf("\n");
// printf("InOrder: ");
// printInorder(root);
// printf("\n");
// printf("\n");
// printf("PostOrder: ");
// printPostorder(root);
// printf("\n");
// printf("\n");
}
//Functions for printing the tree
__device__ void printPreorder(struct par_rbNode* node)
{
if (node == NIL)
return;
/* first print the data of node */
printf("%d-", node->key);
printf("%d", node->color);
printf(" ");
/* then recur on left child */
printPreorder(node->left);
/* now recur on right child */
printPreorder(node->right);
}
__device__ void printInorder(struct par_rbNode* node)
{
if (node == NIL)
return;
/* first recur on left child */
printInorder(node->left);
/* then print the data of node */
printf("%d-", node->key);
printf("%d", node->color);
printf(" ");
/* now recur on right child */
printInorder(node->right);
}
__device__ void printPostorder(struct par_rbNode* node)
{
if (node == NIL)
return;
/* first recur on left child */
printPostorder(node->left);
/* then recur on right child */
printPostorder(node->right);
/* now print the data of node */
printf("%d-", node->key);
printf("%d", node->color);
printf(" ");
}
__device__ int threadsFinished = 0;
__device__ int passCreate = 0;
__global__ void RBT(struct par_rbNode *d_nodes) {
int id = blockIdx.x*blockDim.x+threadIdx.x;
int threadCount = gridDim.x*blockDim.x;
if(id == 0){
printf("Starting the Tree\n");
nodes = d_nodes; // Make it a global variable
createNIL();
createTree();
atomicAdd(&passCreate,1);
}
Insert(5);
Insert(6);
Insert(4);
// while(1){
// if(passCreate){
// Insert(id);
// break;
// }
// }
// //Print the time
// //This will keep track of number of threads that are done
atomicAdd(&threadsFinished,1);
// // //Print the tree after all the threads are done
if(threadsFinished == threadCount){
if(id == 0){
// printf("PreOrder: ");
// printPreorder(root);
// printf("\n");
// printf("\n");
// printf("InOrder: ");
// printInorder(root);
// printf("\n");
// printf("\n");
// printf("PostOrder: ");
// printPostorder(root);
// printf("\n");
// printf("\n");
}
}
//return to main
}
int main() {
struct par_rbNode h_nodes[M];
struct par_rbNode *d_nodes;
float time;
// 1. Allocate device array.
cudaMalloc(&d_nodes, M * sizeof(struct par_rbNode));
for(int i=0;i<M;i++){
h_nodes[i].flag = false;
h_nodes[i].color = RED;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// 2. Copy array contents from host to device.
cudaMemcpy(d_nodes, h_nodes, M * sizeof(struct par_rbNode), cudaMemcpyHostToDevice);
printf("Kernel Launched\n");
cudaEventRecord(start, 0);
RBT<<<1,1>>>(d_nodes);
cudaMemcpy(h_nodes, d_nodes, M * sizeof(struct par_rbNode), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
printf("Came back\n");
cudaEventElapsedTime(&time, start, stop);
printf ("Time for the kernel: %f ms\n", time);
return 0;
} |
4,816 | #include<stdio.h>
#include<stdlib.h>
#include<sys/types.h>
#include<sys/stat.h>
#include<fcntl.h>
#include<unistd.h>
#include<errno.h>
#include<cuda.h>
#include<cuda_runtime.h>
#define NXPROB 288 /* x dimension of problem grid */
#define NYPROB 288 /* y dimension of problem grid */
#define STEPS 100 /* number of time steps */
#define MAXWORKER 16 /* maximum number of worker tasks */
#define MINWORKER 1 /* minimum number of worker tasks */
__global__ void updateU(float * u, int time)
{
/*i - row and j - column*/
int i = (blockIdx.x*blockDim.x + threadIdx.x);
int j = (blockIdx.y*blockDim.y + threadIdx.y);
if (i >= NXPROB || j >= NYPROB) return;
if (i%NXPROB == 0 || j%NYPROB == 0 || j%NYPROB == NYPROB - 1 || i%NXPROB == NXPROB - 1) return;
int offsetw = (time % 2 == 0) ? NXPROB*NYPROB : 0;
int offsetr = (offsetw == 0) ? NYPROB*NXPROB : 0;
u[offsetw + i*NYPROB + j] = u[offsetr + i*NYPROB + j] +
0.1 * (u[offsetr + i*NYPROB + NYPROB + j] + u[offsetr + i*NYPROB - NYPROB + j] - 2 * u[offsetr + i*NYPROB + j])
+ 0.1 * (u[offsetr + i*NYPROB + j + NYPROB] + u[offsetr + i*NYPROB + j - NYPROB] - 2 * u[offsetr + i*NYPROB + j]);
}
int main(int argc, char* argv[])
{
printf("Cuda-Start\n");
cudaDeviceProp deviceProp;
cudaError error;
cudaGetDeviceProperties(&deviceProp, 0);
int sum_mem = 2 * NYPROB*NXPROB;
cudaEvent_t start, stop;
float time = 0;
int i;
float * u = (float *)malloc(sizeof(float)*sum_mem);
float * device_u;
for (i = 0; i<sum_mem / 2; i++){
if ((i%NXPROB == 0) || (i%NYPROB == NYPROB - 1)){
u[i] = 0;
continue;
}
if (i < NXPROB){
u[i] = 0;
continue;
}
/*if (i == NXPROB){
u[i] == 0.0;
continue;
}*/
if ((i<NYPROB*NXPROB) && (i>(NXPROB-1)*NYPROB)){
u[i] = 0;
continue;
}
//printf("WILL WRITE TO %d\n", i);
u[i] = rand() % 100;
}
for (i = sum_mem / 2; i < sum_mem; i++){
u[i] = 0;
}
error = cudaMalloc(&device_u, sum_mem*sizeof(float));
if (error != cudaSuccess){ fprintf(stderr, "Failed to allocate memory for matrix %s\n", cudaGetErrorString(error)); return -4; }
error = cudaMemcpy(device_u, u, sum_mem*sizeof(float), cudaMemcpyHostToDevice);
if (error != cudaSuccess){ fprintf(stderr, "Failed to copy matrix to device: %s\n", cudaGetErrorString(error)); fflush(stderr); return -7; }
int root;
for (root = 2; root*root <= deviceProp.maxThreadsPerBlock; root++)
if (root*root == deviceProp.maxThreadsPerBlock) break;
if (root*root>deviceProp.maxThreadsPerBlock) root--;
error = cudaEventCreate(&start);
if (error != cudaSuccess){ fprintf(stderr, "Failure(time): %s\n", cudaGetErrorString(error));fflush(stderr); return-17; }
error = cudaEventCreate(&stop);
if (error != cudaSuccess){ fprintf(stderr, "Failure(time): %s\n", cudaGetErrorString(error));fflush(stderr); return-17; }
dim3 threadsPerBlock(root, root);
int blockDimX = (NXPROB%root==0) ? (NXPROB / root) : (NXPROB / root + 1);
int blockDimY = (NYPROB%root==0) ? (NYPROB / root) : (NYPROB / root + 1);
dim3 numOfBlocks(blockDimX, blockDimY, 1);//pixsize
cudaEventRecord(start);
for (i = 0; i<STEPS; i++){
updateU <<< numOfBlocks, threadsPerBlock>>>(device_u,i);//3o shared
error = cudaGetLastError();
if (error != cudaSuccess){ fprintf(stderr, "Error in steps call %s\n", cudaGetErrorString(error));fflush(stderr); return -11; }
}
cudaEventRecord(stop);
error = cudaMemcpy(u, device_u, sum_mem*sizeof(float), cudaMemcpyDeviceToHost);
if (error != cudaSuccess){ fprintf(stderr, "Failed to copy matrix(to host): %s\n", cudaGetErrorString(error));fflush(stderr); return -8; }
cudaEventElapsedTime(&time, start, stop);
printf("Time %f \n", time);
return 0;
}
|
4,817 | #include "cuda_runtime.h"
#include "math.h"
__device__ int diff(int a, int b)
{
return (((16711680 & a) - (16711680 & b)) >> 16) * (((16711680 & a) - (16711680 & b)) >> 16)
+ (((65280 & a) - (65280 & b)) >> 8) * (((65280 & a) - (65280 & b)) >> 8)
+ ((255 & a) - (255 & b)) * ((255 & a) - (255 & b));
}
__device__ int diff_advanced(int a, int b, const float* weights)
{
return (int) ((float)(abs((16711680 & a) - (16711680 & b)) >> 16) * weights[0]
+ (float)abs(((65280 & a) - (65280 & b)) >> 8) * weights[1]
+ (float)abs((255 & a) - (255 & b)) * weights[2]);
}
//__device__ int diff_advanced(int a, int b, const int* weights)
//{
// return (((16711680 & a) - (16711680 & b)) >> 16) * (((16711680 & a) - (16711680 & b)) >> 16) * weights[0]
// + (((65280 & a) - (65280 & b)) >> 8) * (((65280 & a) - (65280 & b)) >> 8) * weights[1]
// + ((255 & a) - (255 & b)) * ((255 & a) - (255 & b)) * weights[2];
//}
__global__ void kernel(const int* tiles, const int* grid, int checks, int tilewidth, int* scores, int* bests, int tileN, int gridN, int count, int top)
{
//__shared__ int cutoff;
__shared__ int* best;
__shared__ int* topscores;
int block = gridDim.x * blockIdx.y + blockIdx.x;
if (threadIdx.x == 0) {
//cutoff = INT_MAX;
best = new int[top];
topscores = new int[top];
for (int i = 0; i < top; i++) {
topscores[i] = INT_MAX;
}
}
__syncthreads();
if (block < count) {
for (int c = 0; c < checks; c++) {
int t = ((threadIdx.x * checks) + c) * tilewidth;
if (t < tileN) {
int g = block * tilewidth;
if (g < gridN) {
int score = 0;
int i = 0;
while (i < tilewidth && score < topscores[top - 1]) {
score += diff(tiles[t + i], grid[g + i]);
i++;
}
if (score < topscores[top - 1]) {
int besttile = (threadIdx.x * checks) + c;;
for (int i = 0; i < top; i++) {
if (score < topscores[i]) {
int temp = topscores[i];
topscores[i] = score;
score = temp;
temp = best[i];
best[i] = besttile;
besttile = temp;
}
}
}
}
}
}
}
__syncthreads();
if (threadIdx.x == 0) {
if (block < count) {
for (int i = 0; i < top; i++) {
bests[block * top + i] = best[i];
scores[block * top + i] = topscores[i];
}
}
}
}
__global__ void kernel_advanced(const int* tiles, const int* grid, const int tilecount, const int gridcount, const int tilewidth, const float* weights, int* scores, int* bests)
{
int block = gridDim.x * blockIdx.y + blockIdx.x;
int index = blockDim.x * block + threadIdx.x;
int gridindex = index / tilecount;
int tileindex = index % tilecount;
if (gridindex < gridcount) {
int score = 0;
int t = tileindex * tilewidth;
int g = gridindex * tilewidth;
for (int i = 0; i < tilewidth; i++) {
score += diff_advanced(tiles[t + i], grid[g + i], weights);
}
scores[index] = score;
bests[index] = tileindex;
}
__syncthreads();
//if (block < blocks) {
// scores[block * threads + threadIdx.x] = INT_MAX;
// for (int c = 0; c < checks; c++) {
// int t = ((threadIdx.x * checks) + c) * tilewidth;
// if (t < tileN) {
// int g = block * tilewidth;
// if (g < gridN) {
// int score = 0;
// int i = 0;
// while (i < tilewidth && score < scores[block * threads + threadIdx.x]) {
// score += diff_advanced(tiles[t + i], grid[g + i], weights);
// i++;
// }
// if (score < scores[block * threads + threadIdx.x]) {
// scores[block * threads + threadIdx.x] = score;
// bests[block * threads + threadIdx.x] = (threadIdx.x * checks) + c;
// }
// }
// }
// }
//}
}
//__global__ void kernel_advanced(const int* tiles, const int* grid, int checks, int tilewidth, int* scores, int* bests, int tileN, int gridN, int blocks, int dither, const int* weights, const int threads)
//{
// int block = gridDim.x * blockIdx.y + blockIdx.x;
// if (block < blocks) {
// scores[block * threads + threadIdx.x] = INT_MAX;
// for (int c = 0; c < checks; c++) {
// int t = ((threadIdx.x * checks) + c) * tilewidth;
// if (t < tileN) {
// int g = block * tilewidth;
// if (g < gridN) {
// int score = 0;
// int i = 0;
// while (i < tilewidth && score < scores[block * threads + threadIdx.x]) {
// score += diff_advanced(tiles[t + i], grid[g + i], weights);
// i++;
// }
// if (score < scores[block * threads + threadIdx.x]) {
// scores[block * threads + threadIdx.x] = score;
// bests[block * threads + threadIdx.x] = (threadIdx.x * checks) + c;
// }
// }
// }
// }
// }
//}
int main()
{
return 0;
} |
4,818 | __global__ void matmul(int n, const float *A, const float *B, float *C){
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int row = by*blockDim.y + ty;
int col = bx*blockDim.x + tx;
if(row < n && col < n){
float val = 0.0;
for(int i=0; i<n; ++i){
val += A[row*n + i]*B[n*i + col];
}
C[row*n + col] = val;
}
}
__global__ void addone(int n_cols, float *A)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
//int bx = blockIdx.x;
//int by = blockIdx.y;
int idx = tx*n_cols + ty;
float val = tx*n_cols + ty + 1.0;
//float val = bx*n_cols + by + 1.0;
A[idx] = val;
}
|
4,819 | #include "stdio.h"
__global__ void MyKernel(int *array, int arrayCount)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < arrayCount)
{
array[idx] *= array[idx];
}
}
int main (void) {
int arrayCount = 1024*1024;
int *array = (int*)malloc(sizeof(int)*arrayCount);
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the
// maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
cudaOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize,
MyKernel, 0, 0);
// Round up according to array size
gridSize = (arrayCount + blockSize - 1) / blockSize;
//MyKernel<<< gridSize, blockSize >>>(array, arrayCount);
cudaDeviceSynchronize();
// calculate theoretical occupancy
int maxActiveBlocks;
cudaOccupancyMaxActiveBlocksPerMultiprocessor( &maxActiveBlocks,
MyKernel, blockSize,
0);
int device;
cudaDeviceProp props;
cudaGetDevice(&device);
cudaGetDeviceProperties(&props, device);
float occupancy = (maxActiveBlocks * blockSize / props.warpSize) /
(float)(props.maxThreadsPerMultiProcessor /
props.warpSize);
printf("Launched blocks of size %d with gridSize %d. Theoretical occupancy: %f\n",
blockSize, gridSize,occupancy);
}
|
4,820 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
__global__
void vecMulKernel(float* a, float* b, float* c, int n)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if(i<n) c[i] = a[i] * b[0];
}
int main(void)
{
int N = 24;
int i;
float *A, *B, *C, *d_A, *d_B, *d_C;
int size = N * sizeof(float);
A=(float*)malloc(size);
B=(float*)malloc(sizeof(float));
C=(float*)malloc(size);
//inicializacion de los vectores
for (i=0;i<N;i++)
{
A[i]=i;
}
B[0]=10;
//impresion vectores
printf("Primer vector\n");
for (i=0;i<4;i++)
printf ("%4.1f ", A[i] );
printf("\n");
//Memoria en GPU
cudaMalloc((void **) &d_A, size);
cudaMalloc((void **) &d_B, sizeof(float));
cudaMalloc((void **) &d_C, size);
//Copiar datos
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, sizeof(float), cudaMemcpyHostToDevice);
//Calculo de bloques
int block_size=8;
int n_blocks=N/block_size + (N%block_size == 0 ? 0:1);
// Kernel invocation
vecMulKernel <<< n_blocks, block_size >>> (d_A, d_B, d_C, N);
//vecAddKernel<<<ceil(N/8.0), 8>>>(d_A, d_B, d_C, N);
cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost);
// Free device memory for A, B, C
printf("Respuesta\n");
for (i=0;i<4;i++)
printf ("%4.1f ", C[i] );
printf("\n");
free(A);
free(B);
free(C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree (d_C);
return 0;
} |
4,821 | #include <cassert>
#include <iostream>
#include <vector>
// Here you can set the device ID that was assigned to you
#define MYDEVICE 1
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char* msg);
// Part 3 of 5: implement the kernel
__global__ void myFirstKernel(int* d_a, int numThreadsPerBlock) {
uint i = blockIdx.x * numThreadsPerBlock + threadIdx.x;
d_a[i] = blockIdx.x + threadIdx.x + 42;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
cudaSetDevice(MYDEVICE);
// pointer for device memory
int* d_a;
// define grid and block size
int numBlocks = 8;
int numThreadsPerBlock = 8;
// host vector
// hint: you might want to reserve some memory
std::vector<int> h_a;
h_a.reserve(numBlocks * numThreadsPerBlock);
// Part 1 of 5: allocate host and device memory
size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int);
cudaMalloc(&d_a, memSize);
// Part 2 of 5: configure and launch kernel
dim3 dimGrid(numBlocks);
dim3 dimBlock(numThreadsPerBlock);
myFirstKernel<<<dimGrid, dimBlock>>>(d_a, numThreadsPerBlock);
// block until the device has completed
cudaDeviceSynchronize();
// check if kernel execution generated an error
checkCUDAError("kernel execution");
// Part 4 of 5: device to host copy
cudaMemcpy(h_a.data(), d_a, memSize, cudaMemcpyDeviceToHost);
// Check for any CUDA errors
checkCUDAError("cudaMemcpy");
// Part 5 of 5: verify the data returned to the host is correct
for (int i = 0; i < numBlocks; ++i) { // 8
for (int j = 0; j < numThreadsPerBlock; ++j) {
assert(h_a[i * numThreadsPerBlock + j] == i + j + 42);
}
}
// free device memory
cudaFree(d_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
std::cout << "Correct!" << std::endl;
return 0;
}
void checkCUDAError(const char* msg)
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
std::cerr << "Cuda error: " << msg << " " << cudaGetErrorString(err)
<< std::endl;
exit(-1);
}
}
|
4,822 | #include <iostream>
#include <fstream>
#include <string>
#include <sstream>
#include <vector>
#include <utility>
#include <cstdlib>
__constant__ unsigned int d_lookup[256];
int get_one(unsigned int value);
struct MyBitMap {
unsigned int *bits;
int x,y;
unsigned long long int size;
MyBitMap(int row, int col) {
int intCols = (col+31)/32;
size = (unsigned long long int)row*(unsigned long long int)intCols;
//printf("Need size: %llu, row: %d, cols: %d \n", size, row, intCols);
bits = new unsigned int[size];
x = row;
y = intCols;
for(int i=0; i<row*intCols; i++)
bits[i] = 0;
}
~MyBitMap() {
delete [] bits;
}
int getSize(){
return x*y;
}
unsigned int *getPointer() {
return bits;
}
int getRow() {
return x;
}
int getCol() {
return y;
}
void setRow(int row1, unsigned int *second, int row2) {
for(int i=0; i<y; i++) {
bits[row1*y+i] = second[row2*y+i];
}
}
void resize(int row, int col) {
delete [] bits;
int intCols = (col+31)/32;
size = (unsigned long long int)row*(unsigned long long int)intCols;
//printf("Need size: %llu \n", size);
bits = new unsigned int[size];
x = row;
y = intCols;
for(int i=0; i<row*intCols; i++)
bits[i] = 0;
}
unsigned int getInt(int row, int colInt) {
return bits[row*y+colInt];
}
void setInt(int row, int colInt, unsigned int value) {
bits[row*y+colInt] = value;
}
void setBit(int row, int col) {
int i = row*y+col/32;
unsigned int flag = 1;
flag = flag<<(31-col%32);
bits[i] = bits[i] | flag;
}
void clearBit(int row, int col) {
int i = row*y+col/32;
unsigned int flag = 1;
flag = flag<<(31-col%32);
if((bits[i]&flag) != 0)
bits[i] = bits[i] - flag;
}
unsigned int getBit(int row, int col) {
int i = row*y+col/32;
unsigned int flag = 1;
flag = flag<<(31-col%32);
if((flag&bits[i]) == 0)
return 0;
else
return 1;
}
void print(int row) {
for(int i=0; i<y; i++)
std::cout<<bits[row*y+i]<<" ";
}
};
__global__ void count_ones(unsigned int *d_itemBitmap, unsigned int *d_bitmap, int numItem, int numTxn, int support)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
for (int i=idx; i<numItem; i += blockDim.x*gridDim.x) {
int count = 0;
int colInt = (numTxn+31)/32;
for(int j=0; j<colInt; ++j){
unsigned int temp = d_bitmap[i*colInt+j];
unsigned int one = 255;
one = one&temp;
temp=temp>>8;
unsigned int two = 255;
two = two&temp;
temp=temp>>8;
unsigned int three = 255;
three = three&temp;
unsigned int four = temp>>8;
count += d_lookup[one]+d_lookup[two]+d_lookup[three]+d_lookup[four];
}
if(count >= support){
int itemMapCol = (numItem+1+32)/32;
int index = itemMapCol*i+itemMapCol-1;
unsigned int flag = 1;
flag = flag<<(31-numItem%32);
d_itemBitmap[index] = d_itemBitmap[index] | flag;
}
}
}
__global__ void testSupport(unsigned int *pairs, unsigned int *d_parent_transactions, unsigned int *d_child_transactions, unsigned int *d_child_items, int numItem, int support, int numTxn, int numChild)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
for (int i=idx; i<numChild; i += blockDim.x*gridDim.x) {
int count = 0;
int colTxn = (numTxn+31)/32;
int colItem = (numItem+32)/32;
for(int j=0; j<colTxn; ++j) {
int a = pairs[2*i];
int b = pairs[2*i+1];
unsigned int temp = d_parent_transactions[a*colTxn+j] & d_parent_transactions[b*colTxn+j];
d_child_transactions[i*colTxn+j]=temp;
unsigned int one = 255;
one = one&temp;
temp=temp>>8;
unsigned int two = 255;
two = two&temp;
temp=temp>>8;
unsigned int three = 255;
three = three&temp;
unsigned int four = temp>>8;
count += d_lookup[one]+d_lookup[two]+d_lookup[three]+d_lookup[four];
}
if(count >= support) {
int indexHere = colItem*(i+1)-1;
unsigned int flag=1;
flag = flag<<(31-numItem%32);
d_child_items[indexHere] = d_child_items[indexHere] | flag;
}
}
}
__global__ void generateNext(unsigned int *pairs, unsigned int *d_parent, unsigned int *d_child, int itemSize, int itemNum, int size, int rowsItem)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
for (int i=idx; i<size; i += blockDim.x*gridDim.x) {
int a=0;
int b;
int newI = i+1;
int temp = rowsItem-1;
while(newI>temp) {
a++;
newI -= temp;
temp--;
}
b=a+newI;
int colInt = (itemNum+32)/32;
int equal = itemSize-2;
for(int p=0; p<colInt; p++) {
unsigned int aParent = d_parent[a*colInt+p];
unsigned int bParent = d_parent[b*colInt+p];
//printf("a: %d, b: %d, avalue: %u, bvalue: %u, p: %d, equal: %d\n",a,b, aParent, bParent, p, equal);
unsigned int flag = 1;
flag = flag<<31;
int satisfy=1;
for(int q=0; q<32; q++) {
if(equal==0) {
satisfy = 2;
break;
}
if((aParent&flag) != (bParent&flag)){
satisfy = 0;
break;
}
else {
if((aParent&flag)!=0)
--equal;
}
flag = flag>>1;
}
if(satisfy==2) {
for(int m=0; m<colInt; m++){
unsigned int aNewParent = d_parent[a*colInt+m];
unsigned int bNewParent = d_parent[b*colInt+m];
d_child[i*colInt+m] = aNewParent | bNewParent;
}
int indexHere = (i+1)*colInt-1;
unsigned int flag=1;
flag = flag<<(31-itemNum%32);
d_child[indexHere] = d_child[indexHere] | flag;
pairs[i*2] = a;
pairs[i*2+1] = b;
//printf("satisfied a: %d, b: %d , d_childlast: %u \n",a, b, d_child[indexHere]);
break;
}
if(satisfy==0){
int indexHere = (i+1)*colInt-1;
d_child[indexHere] = 0;
break;
}
}
}
}
int main(int argc, char *argv[])
{
std::ifstream input_file(argv[1]);
int numBlock = atoi(argv[2]);
int numThreads = atoi(argv[3]);
float support_ratio=0.01;
int tnx, numItem;
input_file>>tnx>>numItem;
float totalTime = 0;
MyBitMap bitmap(numItem, tnx);
int support = tnx*support_ratio;
std::string tempLine;
std::getline(input_file, tempLine);
for(int i=0; i<tnx; i++) {
std::string oneline;
std::getline(input_file, oneline);
std::istringstream items(oneline);
int item;
while(items>>item){
if (item<=numItem && item >0)
bitmap.setBit(item-1, i);
}
items.clear();
}
MyBitMap itemBitmap(numItem, numItem+1);
for(int i=0; i<numItem; i++) {
itemBitmap.setBit(i, i);
}
int lookup[256];
for(unsigned int i=0; i<256; i++) {
lookup[i]=get_one(i);
}
cudaMemcpyToSymbol(d_lookup, lookup, sizeof(int)*256);
unsigned int *d_bitmap, *d_itemBitmap;
cudaMalloc(&d_bitmap, bitmap.getSize()*sizeof(unsigned int));
cudaMalloc(&d_itemBitmap, itemBitmap.getSize()*sizeof(unsigned int));
cudaMemcpy(d_bitmap, bitmap.getPointer(), bitmap.getSize()*sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(d_itemBitmap, itemBitmap.getPointer(), itemBitmap.getSize()*sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
count_ones<<<numBlock, numThreads>>>(d_itemBitmap, d_bitmap, numItem, tnx, support);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
totalTime+=milliseconds;
std::cout<<"Init time: "<<milliseconds<<"--------------------------"<<std::endl;
cudaMemcpy(bitmap.getPointer(),d_bitmap, bitmap.getSize()*sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaMemcpy(itemBitmap.getPointer(), d_itemBitmap, itemBitmap.getSize()*sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaFree(d_bitmap);
cudaFree(d_itemBitmap);
int oldCount = numItem;
int newCount = 0;
for(int i=0; i<numItem; i++) {
if(itemBitmap.getBit(i, numItem) == 1)
newCount++;
}
int tnxCol = (tnx+31)/32;
int itemCol = (numItem+32)/32;
int itemSize = 1;
while(newCount > 1) {
std::cout<<std::endl<<"new itemSize: "<<itemSize<<" newCount: "<<newCount<<std::endl<<std::endl;
itemSize++;
MyBitMap newBitmap(newCount, tnx);
MyBitMap newItemmap(newCount, numItem+1);
int j=0;
for(int i=0; i<oldCount; i++) {
if(itemBitmap.getBit(i, numItem) == 1) {
newBitmap.setRow(j, bitmap.getPointer(), i);
newItemmap.setRow(j, itemBitmap.getPointer(), i);
newItemmap.clearBit(j, numItem);
j++;
}
}
int possibleNextChild = (newCount)*(newCount-1)/2;
unsigned int *d_pairs, *d_parent, *d_child;
cudaMalloc(&d_pairs, 2*possibleNextChild*sizeof(unsigned int));
cudaMalloc(&d_parent, newCount*sizeof(unsigned int)*itemCol);
cudaMalloc(&d_child, possibleNextChild*itemCol*sizeof(unsigned int));
printf("Device Variable alloc:\t%s\n", cudaGetErrorString(cudaGetLastError()));
cudaMemcpy(d_parent, newItemmap.getPointer(), newItemmap.getSize()*sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaEventRecord(start);
generateNext<<<numBlock, numThreads>>> (d_pairs, d_parent, d_child, itemSize, numItem, possibleNextChild, newCount);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
totalTime+=milliseconds;
std::cout<<"generate time: "<<milliseconds<<"--------------------------"<<std::endl;
unsigned int *pairs = new unsigned int[2*possibleNextChild];
MyBitMap child(possibleNextChild, numItem+1);
cudaError_t error1 = cudaMemcpy(pairs, d_pairs, 2*possibleNextChild*sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaError_t error2 = cudaMemcpy(child.getPointer(), d_child, itemCol*possibleNextChild*sizeof(unsigned int), cudaMemcpyDeviceToHost);
//printf("Error1: %s\n", cudaGetErrorString(error1));
//printf("Error2: %s\n", cudaGetErrorString(error2));
printf("Device Variable Copying:\t%s\n", cudaGetErrorString(cudaGetLastError()));
cudaFree(d_child);
cudaFree(d_pairs);
cudaFree(d_parent);
int usefulChild=0;
for(int m=0; m<possibleNextChild; m++) {
if(child.getBit(m,numItem) == 1)
usefulChild++;
}
unsigned int *pairsGen = new unsigned int[2*usefulChild];
std::cout<<std::endl<<"usefulChild:"<<usefulChild<<std::endl<<std::endl;
itemBitmap.resize(usefulChild, numItem+1);
j=0;
for(int m=0; m<possibleNextChild; m++) {
if(child.getBit(m, numItem) == 1) {
itemBitmap.setRow(j, child.getPointer(), m);
itemBitmap.clearBit(j, numItem);
pairsGen[j*2]=pairs[2*m];
pairsGen[j*2+1]=pairs[2*m+1];
++j;
}
}
delete []pairs;
unsigned int *d_parent_tnx, *d_child_tnx, *d_child_item;
cudaMalloc(&d_pairs, 2*usefulChild*sizeof(unsigned int));
cudaMalloc(&d_parent_tnx, newCount*sizeof(unsigned int)*tnxCol);
cudaMalloc(&d_child_tnx, usefulChild*sizeof(unsigned int)*tnxCol);
cudaMalloc(&d_child_item, usefulChild*sizeof(unsigned int)*itemCol);
cudaMemcpy(d_pairs, pairsGen, 2*usefulChild*sizeof(unsigned int),cudaMemcpyHostToDevice);
cudaMemcpy(d_parent_tnx,newBitmap.getPointer() , newCount*sizeof(unsigned int)*tnxCol,cudaMemcpyHostToDevice);
cudaMemcpy(d_child_item,itemBitmap.getPointer() , usefulChild*sizeof(unsigned int)*itemCol,cudaMemcpyHostToDevice);
cudaEventRecord(start);
testSupport<<<numBlock, numThreads>>> (d_pairs, d_parent_tnx, d_child_tnx, d_child_item, numItem, support, tnx, usefulChild);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
totalTime+=milliseconds;
std::cout<<"test time: "<<milliseconds<<"--------------------------"<<std::endl;
bitmap.resize(usefulChild, tnx);
cudaMemcpy(itemBitmap.getPointer(), d_child_item, usefulChild*sizeof(unsigned int)*itemCol, cudaMemcpyDeviceToHost);
cudaMemcpy(bitmap.getPointer(), d_child_tnx, usefulChild*sizeof(unsigned int)*tnxCol, cudaMemcpyDeviceToHost);
newCount = 0;
for(int m=0; m<usefulChild; m++) {
if(itemBitmap.getBit(m, numItem) == 1)
newCount++;
}
oldCount = usefulChild;
cudaFree(d_pairs);
cudaFree(d_parent_tnx);
cudaFree(d_child_tnx);
cudaFree(d_child_item);
delete[] pairsGen;
}
std::cout<<"total time: "<<totalTime<<" milliseconds--------------------------"<<std::endl;
return 0;
}
int get_one(unsigned int value){
int count = 0;
unsigned int flag = 1;
for(int i=0; i<8; i++) {
if((value&flag) == flag)
++count;
value = value>>1;
}
return count;
}
|
4,823 | #include "includes.h"
__global__ void add_img(float *image_padded, float *ave1, float *ave2, int nx, int ny, int nima) {
// Block index
int bx = blockIdx.x;
// Thread index
int tx = threadIdx.x;
float sum1 = 0.0;
float sum2 = 0.0;
int index = tx+bx*nx;
int index2 = tx+(nx>>1)+(bx+(ny>>1))*(nx*2+2);
for (int i=0; i<nima; i+=2) sum1 += image_padded[index2+i*(nx*2+2)*ny*2];
for (int i=1; i<nima; i+=2) sum2 += image_padded[index2+i*(nx*2+2)*ny*2];
ave1[index] = sum1;
ave2[index] = sum2;
return;
} |
4,824 | #include "includes.h"
__global__ void Match5(float *d_pts1, float *d_pts2, float *d_score, int *d_index)
{
__shared__ float4 buffer1[M5W*(NDIM/4 + 1)];
__shared__ float4 buffer2[M5H*NDIM/4];
__shared__ float scores[M5W*M5H];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bp1 = M5W*blockIdx.x;
if (ty<M5W)
for (int d=tx;d<NDIM/4;d+=M5W)
for (int j=ty;j<M5W;j+=M5H)
buffer1[j*(NDIM/4 + 1) + d] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d];
__syncthreads();
float max_score = 0.0f;
int index = -1;
for (int bp2=0;bp2<NPTS;bp2+=M5H) {
for (int d=tx;d<NDIM/4;d+=M5W)
buffer2[ty*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + ty)*(NDIM/4) + d];
__syncthreads();
if (ty<M5H/M5R) { //%%%%
float score[M5R]; //%%%%
for (int dy=0;dy<M5R;dy++)
score[dy] = 0.0f;
for (int d=0;d<NDIM/4;d++) {
float4 v1 = buffer1[tx*(NDIM/4 + 1) + d];
for (int dy=0;dy<M5R;dy++) {
float4 v2 = buffer2[(M5R*ty + dy)*(NDIM/4) + d]; //%%%%
score[dy] += v1.x*v2.x; score[dy] += v1.y*v2.y;
score[dy] += v1.z*v2.z; score[dy] += v1.w*v2.w;
}
}
for (int dy=0;dy<M5R;dy++)
scores[tx + M5W*(M5R*ty + dy)] = score[dy];
}
__syncthreads();
if (ty==0) {
for (int i=0;i<M5H;i++) {
if (scores[i*M2W + tx]>max_score) {
max_score = scores[i*M5W + tx];
index = bp2 + i;
}
}
}
__syncthreads();
}
if (ty==0) {
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
} |
4,825 | #include<stdio.h>
#include<math.h>
#define abs(x) (x > 0 ? x : -(x))
#define MAX(a,b) (a > b ? a : b)
#define MIN(a,b) (a < b ? a : b)
#define PI 3.1415926
#define GRIDDIM 32
#define BLOCKDIM 1024 //32*32
extern "C" void TOF_filter(float *filter_v, const int nx, const int ny, const float tof_sigma);
__device__ void TOF_filter_in_freq(float *filter_v, const int ix, const int iy, const int nx, const int ny, const float tof_sigma)
{
const float nx2 = nx / 2;
const float ny2 = ny / 2;
float w_2 = ((ix - (nx2 - 0.5)) / nx2) * ((ix - (nx2 - 0.5)) / nx2) + ((iy - (ny2 - 0.5)) / ny2) * ((ix - (nx2 - 0.5)) / nx2);
float temp_ = PI * PI * tof_sigma * tof_sigma * w_2;
*filter_v = j0f(temp_) / expf(temp_);
}
__global__ void TOF_filter_in_freq_kernel(float *filter_v, const int nx, const float ny, const float tof_sigma)
{
int step = blockDim.x * gridDim.x;
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < nx * ny; idx += step)
{
int ix = fmodf(idx, nx);
int iy = (idx - ix) / nx;
TOF_filter_in_freq(filter_v+idx, ix, iy, nx, ny, tof_sigma);
}
}
void TOF_filter(float *filter_v, const int nx, const int ny, const float tof_sigma)
{
float *filter_v_d;
cudaMalloc(&filter_v_d, nx * ny * sizeof(float));
TOF_filter_in_freq_kernel<<<GRIDDIM, BLOCKDIM>>>(filter_v_d, nx, ny, tof_sigma);
cudaDeviceSynchronize();
cudaMemcpy(filter_v, filter_v_d, nx * ny * sizeof(float),cudaMemcpyDeviceToHost);
cudaFree(filter_v_d);
}
|
4,826 | /**
* @file pctdemo_processMandelbrotElement.cu
*
* CUDA code to calculate the Mandelbrot Set on a GPU.
*
* Copyright 2011 The MathWorks, Inc.
*/
/** Work out which piece of the global array this thread should operate on */
__device__ size_t calculateGlobalIndex() {
// Which block are we?
size_t const globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x;
// Which thread are we within the block?
size_t const localThreadIdx = threadIdx.x + blockDim.x * threadIdx.y;
// How big is each block?
size_t const threadsPerBlock = blockDim.x*blockDim.y;
// Which thread are we overall?
return localThreadIdx + globalBlockIndex*threadsPerBlock;
}
/** The actual Mandelbrot algorithm for a single location */
__device__ double position( double const x0,
double const vx0,
double const dt ) {
int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x ;
// Initialise: z = z0
double x = x0;
double vx = vx0;
x = x + 0.5 * vx * dt;
return x;
}
/** Main entry point.
* Works out where the current thread should read/write to global memory
* and calls doIterations to do the actual work.
*/
__global__ void processMandelbrotElement(
double * xi,
double * yi,
double * zi,
double * vxi,
double * vyi,
double * vzi,
const double dt ) {
// Work out which thread we are
size_t const globalThreadIdx = calculateGlobalIndex();
// Get our X and Y coords
double const x = xi[globalThreadIdx];
double const y = yi[globalThreadIdx];
double const z = zi[globalThreadIdx];
double const vx = vxi[globalThreadIdx];
double const vy = vyi[globalThreadIdx];
double const vz = vzi[globalThreadIdx];
// Run the itearations on this location
xi[globalThreadIdx] = position( x, vx, dt );
yi[globalThreadIdx] = position( y, vy, dt );
zi[globalThreadIdx] = position( z, vz, dt );
}
|
4,827 | #include "includes.h"
__global__ void transposeUnroll4Col(float *out, float *in, const int nx, const int ny)
{
unsigned int ix = blockDim.x * blockIdx.x * 4 + threadIdx.x;
unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int ti = iy * nx + ix; // access in rows
unsigned int to = ix * ny + iy; // access in columns
if (ix + 3 * blockDim.x < nx && iy < ny)
{
out[ti] = in[to];
out[ti + blockDim.x] = in[to + blockDim.x * ny];
out[ti + 2 * blockDim.x] = in[to + 2 * blockDim.x * ny];
out[ti + 3 * blockDim.x] = in[to + 3 * blockDim.x * ny];
}
} |
4,828 | #include<iostream>
using namespace std;
__global__
void sum(int *input){
int tid = threadIdx.x;
int step =1;
int number_of_threads = blockDim.x;
while(number_of_threads>0){
if(tid<number_of_threads){
int fst = tid * step * 2;
int snd = fst + step;
printf("%d\\n",input[fst]+input[snd]);
input[fst]+=input[snd];
}
step *=2;
number_of_threads/=2;
}
}
int main(){
int count = 8;
int size = count * sizeof(int);
int h[] = {10,20,30,40,50,60,70,80};
int *d_h;
cudaMalloc(&d_h,size);
cudaMemcpy(d_h,h,size,cudaMemcpyHostToDevice);
sum<<<1,count/2>>>(d_h);
int result; cudaMemcpy(&result,d_h,sizeof(int),cudaMemcpyDeviceToHost);
cout<<result;
cudaFree(d_h);
}
|
4,829 | extern "C"
__global__ void mul(double* A, double* B, double* C, int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < size) {
// compute a column
for(int j=0; j < size; j++) {
double sum = 0.0;
for(int k=0; k < size; k++) {
sum += A[ (i*size)+k ] * B[ (k*size)+j ];
}
C[ (i*size)+j ] = sum;
}
// end of column computing
}
}
|
4,830 | #include "includes.h"
__global__ void KerCalcRidp(unsigned n,unsigned ini,unsigned idini,unsigned idfin,const unsigned *idp,unsigned *ridp)
{
unsigned p=blockIdx.x*blockDim.x + threadIdx.x; //-Number of particle.
if(p<n){
p+=ini;
const unsigned id=idp[p];
if(idini<=id && id<idfin)ridp[id-idini]=p;
}
} |
4,831 | // Gregory Paton
// 322:451
// CUDA Mandelbrot
#include <stdio.h>
#include <string.h>
#include <math.h>
#define X_RESN 800 /* x resolution */
#define Y_RESN 800 /* y resolution */
typedef struct complextype
{
float real, imag;
} Complex;
__global__
void work(int *id, int tb_x, int tb_y, int gr_x, int gr_y)
{
int i, j, k, idx;
Complex z, c;
float lengthsq, temp;
const int num_threads = tb_x * tb_y;
int work_width = X_RESN / num_threads;
const int bid = blockIdx.x + (blockIdx.y * gr_x);
const int tid = threadIdx.x + (threadIdx.y * tb_x) + (bid * num_threads);
int start = tid * work_width;
int stop = start + work_width;
if (stop > 800)
stop = 800;
// if X_RESN is not evenly divisible by num_threads
// give remainder of work to last thread
if (tid == num_threads - 1)
stop = X_RESN;
id[0] = 0;
for(i = start; i < stop; i++) {
for(j = 0; j < Y_RESN; j++) {
z.real = z.imag = 0.0;
c.real = ((float) j - 400.0)/200.0;
c.imag = ((float) i - 400.0)/200.0;
k = 0;
do {
temp = z.real*z.real - z.imag*z.imag + c.real;
z.imag = 2.0*z.real*z.imag + c.imag;
z.real = temp;
lengthsq = z.real*z.real+z.imag*z.imag;
k++;
} while (lengthsq < 4.0 && k < 100);
idx = i + (j * Y_RESN);
if (k == 100)
id[idx] = 1;
else
id[idx] = 0;
}
}
}
int main (int argc, char **argv)
{
int tb_x = 16;
int tb_y = 1;
int gr_x = 1;
int gr_y = 1;
if (argc == 5) {
tb_x = atoi(argv[1]);
tb_y = atoi(argv[2]);
gr_x = atoi(argv[3]);
gr_y = atoi(argv[4]);
}
else {
printf("usage: %s THREAD_BLOCK_WIDTH THREAD_BLOCK_HEIGHT GRID_WIDTH GRID_HEIGHT\n", argv[0]);
return -1;
}
float time;
cudaEvent_t start, stop;
int id[X_RESN * Y_RESN];
int *Id;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMalloc((void**)&Id, X_RESN * Y_RESN * sizeof(int));
dim3 dimBlock(tb_x, tb_y);
dim3 dimGrid(gr_x, gr_y);
work<<<dimGrid, dimBlock>>>(Id, tb_x, tb_y, gr_x, gr_y);
cudaMemcpy(id, Id, X_RESN * Y_RESN, cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("time: %fms\n", time);
cudaFree(Id);
/* Program Finished */
return 0;
}
|
4,832 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* File: wtime.c */
/* Description: a timer that reports the current wall time */
/* */
/* Author: Wei-keng Liao */
/* ECE Department Northwestern University */
/* email: wkliao@ece.northwestern.edu */
/* Copyright, 2005, Wei-keng Liao */
/* */
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
double wtime(void)
{
double now_time;
struct timeval etstart;
struct timezone tzp;
if (gettimeofday(&etstart, &tzp) == -1)
perror("Error: calling gettimeofday() not successful.\n");
now_time = ((double)etstart.tv_sec) + /* in seconds */
((double)etstart.tv_usec) / 1000000.0; /* in microseconds */
return now_time;
}
#ifdef _TESTING_
int main(int argc, char **argv) {
double time;
time = wtime();
printf("time of day = %10.4f\n", time);
return 0;
}
#endif
|
4,833 | #include <cstdio>
#include <cuda_runtime.h>
#include "print_kernel.cuh"
__global__ void cudaKernelFunc() {
int index = threadIdx.x + blockIdx.x * blockDim.x;
printf("Index: %d; thread: %d; block: %d; blockDim: %d\n",
index, threadIdx.x, blockIdx.x, blockDim.x);
}
void cudaCallKernel() {
cudaKernelFunc<<<10,10>>>();
cudaDeviceSynchronize();
}
|
4,834 | #pragma once
#include <stdio.h>
#include <time.h>
//#include <helper_cuda.h>
#define MAX_LEVELS 300
int getSPcores(cudaDeviceProp devProp)
{
int cores = 0;
int mp = devProp.multiProcessorCount;
switch (devProp.major){
case 2: // Fermi
if (devProp.minor == 1) cores = mp * 48;
else cores = mp * 32;
break;
case 3: // Kepler
cores = mp * 192;
break;
case 5: // Maxwell
cores = mp * 128;
break;
case 6: // Pascal
if (devProp.minor == 1) cores = mp * 128;
else if (devProp.minor == 0) cores = mp * 64;
else printf("Unknown device type\n");
break;
case 7: // Volta
if (devProp.minor == 0) cores = mp * 64;
else printf("Unknown device type\n");
break;
default:
printf("Unknown device type\n");
break;
}
return cores;
}
__device__ void quick_sort_gf32(
float *arr,
long first_index,
long last_index) {
// declaring index variables
long pivotIndex, index_a, index_b;
float temp;
if (first_index < last_index) {
// assigning first element index as pivot element
pivotIndex = first_index;
index_a = first_index;
index_b = last_index;
// Sorting in Ascending order with quick sort
while (index_a < index_b) {
while (arr[index_a] <= arr[pivotIndex] && index_a < last_index) {
index_a++;
}
while (arr[index_b] > arr[pivotIndex]) {
index_b--;
}
if (index_a < index_b) {
// Swapping operation
temp = arr[index_a];
arr[index_a] = arr[index_b];
arr[index_b] = temp;
}
}
// At the end of first iteration, swap pivot element with index_b element
temp = arr[pivotIndex];
arr[pivotIndex] = arr[index_b];
arr[index_b] = temp;
// Recursive call for quick sort, with partitioning
quick_sort_gf32(arr, first_index, index_b - 1);
quick_sort_gf32(arr, index_b + 1, last_index);
}
return;
}
/* Function to merge the two haves arr[l..m] and arr[m+1..r] of array arr[] */
__device__ void merge(float *arr, long l, long m, long r)
{
long i, j, k;
long n1 = m - l + 1;
long n2 = r - m;
/* create temp arrays */
float *L = new float[n1], *R = new float[n2];
/* Copy data to temp arrays L[] and R[] */
for (i = 0; i < n1; i++)
L[i] = arr[l + i];
for (j = 0; j < n2; j++)
R[j] = arr[m + 1 + j];
/* Merge the temp arrays back into arr[l..r]*/
i = 0;
j = 0;
k = l;
while (i < n1 && j < n2)
{
if (L[i] <= R[j])
{
arr[k] = L[i];
i++;
}
else
{
arr[k] = R[j];
j++;
}
k++;
}
/* Copy the remaining elements of L[], if there are any */
while (i < n1)
{
arr[k] = L[i];
i++;
k++;
}
/* Copy the remaining elements of R[], if there are any */
while (j < n2)
{
arr[k] = R[j];
j++;
k++;
}
}
/* Iterative mergesort function to sort arr[0...n-1] */
__device__ void mergeSort(float *arr, long n)
{
long curr_size; // For current size of subarrays to be merged
// curr_size varies from 1 to n/2
long left_start; // For picking starting index of left subarray
// to be merged
// Merge subarrays in bottom up manner. First merge subarrays of
// size 1 to create sorted subarrays of size 2, then merge subarrays
// of size 2 to create sorted subarrays of size 4, and so on.
for (curr_size=1; curr_size<=n-1; curr_size = 2*curr_size)
{
// Pick starting point of different subarrays of current size
for (left_start=0; left_start<n-1; left_start += 2*curr_size)
{
// Find ending point of left subarray. mid+1 is starting
// point of right
int mid = left_start + curr_size - 1;
int right_end = min(left_start + 2*curr_size - 1, n-1);
// Merge Subarrays arr[left_start...mid] & arr[mid+1...right_end]
merge(arr, left_start, mid, right_end);
}
}
}
__device__ void quickSort(float *arr, long elements) {
long beg[MAX_LEVELS], end[MAX_LEVELS], i=0, L, R, swap;
float piv;
beg[0]=0; end[0]=elements;
while (i>=0) {
L=beg[i]; R=end[i]-1;
if (L<R) {
piv=arr[L];
while (L<R) {
while (arr[R]>=piv && L<R) R--; if (L<R) arr[L++]=arr[R];
while (arr[L]<=piv && L<R) L++; if (L<R) arr[R--]=arr[L]; }
arr[L]=piv; beg[i+1]=L+1; end[i+1]=end[i]; end[i++]=L;
if (end[i]-beg[i]>end[i-1]-beg[i-1]) {
swap=beg[i]; beg[i]=beg[i-1]; beg[i-1]=swap;
swap=end[i]; end[i]=end[i-1]; end[i-1]=swap; }}
else {
i--; }}}
__device__ long searchsorted_gf32(
const float *arr,
const float value,
const long arr_size) {
// arr must be sorted
long first = 0, last = arr_size - 1, curr_idx;
if (value <= arr[0]) {
return 0;
}
else if (value > arr[last]) {
return arr_size;
}
while (first <= last) {
curr_idx = (long) (0.5 * (first + last));
if ((value > arr[curr_idx]) && (value <= arr[curr_idx + 1])) {
return curr_idx + 1;
}
else if (value < arr[curr_idx]) {
last = curr_idx - 1;
}
else if (value > arr[curr_idx]) {
first = curr_idx + 1;
}
else {
// printf("%d, %d, %d, %f\n", first, last, curr_idx, value);
return curr_idx;
}
}
return 0;
}
__global__ void fill_smins_gf32(
const float *uvecs,
const float *ref,
const float *test,
float *dot_ref,
float *dot_test,
float *dot_test_sort,
long *mins,
long *temp_mins,
const long n_uvecs,
const long n_ref,
const long n_test,
const long n_dims) {
size_t tid;
tid = ((blockIdx.x * blockDim.x) +
threadIdx.x);
if (tid >= n_uvecs) {
return;
}
// printf("tid: %d\n", tid);
float _inc_mult = (float) (1 - (float) (1e-7));
size_t i, j, k;
float *uvec, *sdot_ref, *sdot_test, *sdot_test_sort;
long *stemp_mins, *smins, _idx;
float stest_med;
i = blockIdx.x;
uvec = (float *) &uvecs[i * n_dims];
sdot_ref = &dot_ref[tid * n_ref];
for (j = 0; j < n_ref; ++j) {
sdot_ref[j] = 0.0;
for (k = 0; k < n_dims; ++k) {
sdot_ref[j] = sdot_ref[j] + (uvec[k] * ref[(j * n_dims) + k]);
// printf("sdot_ref[j]: %0.5f\n", sdot_ref[j]);
}
}
sdot_test = &dot_test[tid * n_test];
sdot_test_sort = &dot_test_sort[tid * n_test];
for (j = 0; j < n_test; ++j) {
sdot_test[j] = 0.0;
for (k = 0; k < n_dims; ++k) {
sdot_test[j] = sdot_test[j] + (
uvec[k] * test[(j * n_dims) + k]);
}
sdot_test_sort[j] = sdot_test[j];
// printf("sdot_test[j]: %0.5f\n", sdot_test[j]);
}
quick_sort_gf32(&sdot_ref[0], 0, n_ref - 1);
// mergeSort(&sdot_ref[0], n_ref);
quick_sort_gf32(&sdot_test_sort[0], 0, n_test - 1);
// quickSort(&sdot_ref[0], n_ref);
//// quickSort(&sdot_test_sort[0], n_test);
//
if ((n_test % 2) == 0) {
stest_med = 0.5 * (sdot_test_sort[n_test / 2] +
sdot_test_sort[(n_test / 2) - 1]);
}
else {
stest_med = sdot_test_sort[n_test / 2];
}
for (j = 0; j < n_test; ++j) {
sdot_test[j] = (
(sdot_test[j] - stest_med) * _inc_mult) + stest_med;
}
smins = &mins[tid * n_test];
stemp_mins = &temp_mins[tid * n_test];
for (j = 0; j < n_test; ++j) {
stemp_mins[j] = searchsorted_gf32(&sdot_ref[0], sdot_test[j], n_ref);
// printf("sdot_ref[0], stemp_mins[j]: %0.5f, %d\n", sdot_ref[0], stemp_mins[j]);
}
for (j = 0; j < n_test; ++j) {
_idx = n_ref - stemp_mins[j];
if (_idx < stemp_mins[j]) {
stemp_mins[j] = _idx;
}
if (stemp_mins[j] < smins[j]) {
smins[j] = stemp_mins[j];
}
// printf("smins[j]: %d\n", smins[j]);
}
return;
}
__global__ void test_ftn() {
printf("345435345\n");
return;
}
void depth_ftn_c_gf32(
const float *ref,
const float *test,
const float *uvecs,
long *depths,
const long n_ref,
const long n_test,
const long n_uvecs,
const long n_dims) {
size_t i, j, k;
// int dev_ct;
// cudaGetDeviceCount(&dev_ct);
// printf("GPU count is: %d\n", dev_ct);
float *d_uvecs, *d_ref, *d_test, *dot_ref, *dot_test, *dot_test_sort;
long *mins, *temp_mins;
cudaSetDevice(0);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
// int nDevices;
//
// cudaGetDeviceCount(&nDevices);
// for (i = 0; i < nDevices; i++) {
// cudaDeviceProp prop;
// cudaGetDeviceProperties(&prop, i);
// printf("Device Number: %zd\n", i);
// printf(" Device name: %s\n", prop.name);
// printf(" Memory Clock Rate (KHz): %d\n",
// prop.memoryClockRate);
// printf(" Memory Bus Width (bits): %d\n",
// prop.memoryBusWidth);
// printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
// 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
// }
int n_cpus = n_uvecs; //getSPcores(deviceProp);
printf("n_cpus: %d\n", n_cpus);
printf("n_uvecs: %d\n", n_uvecs);
printf("n_ref: %d\n", n_ref);
printf("n_test: %d\n", n_test);
printf("n_dims: %d\n", n_dims);
cudaError_t error;
// printf("1\n");
cudaMallocManaged(&d_uvecs, (n_uvecs * n_dims) * sizeof(float));
cudaMallocManaged(&d_ref, (n_ref * n_dims) * sizeof(float));
cudaMallocManaged(&d_test, (n_test * n_dims) * sizeof(float));
error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error 1: %s\n", cudaGetErrorString(error));
exit(-1);
}
cudaMallocManaged(&dot_ref, (n_cpus * n_ref) * sizeof(float));
cudaMallocManaged(&dot_test, (n_cpus * n_test) * sizeof(float));
cudaMallocManaged(&dot_test_sort, (n_cpus * n_test) * sizeof(float));
error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error 2: %s\n", cudaGetErrorString(error));
exit(-1);
}
cudaMallocManaged(&mins, (n_cpus * n_test) * sizeof(long));
cudaMallocManaged(&temp_mins, (n_cpus * n_test) * sizeof(long));
// printf("n_test: %d\n", n_test);
error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error 3: %s\n", cudaGetErrorString(error));
exit(-1);
}
for (i = 0; i < (n_ref * n_dims); ++i) {
d_ref[i] = ref[i];
}
for (i = 0; i < (n_test * n_dims); ++i) {
d_test[i] = test[i];
}
for (i = 0; i < (n_uvecs * n_dims); ++i) {
d_uvecs[i] = uvecs[i];
}
for (i = 0; i < (n_uvecs * n_test); ++i) {
temp_mins[i] = n_ref;
mins[i] = n_ref;
}
printf("0, mins: %d\n", mins[0]);
error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error 4: %s\n", cudaGetErrorString(error));
exit(-1);
}
// printf("\n\n");
// clock_t start = clock(), diff;
// printf("2\n");
cudaDeviceSetLimit(cudaLimitStackSize, 200 * 1024);
// for (i = 0; i < n_uvecs; ++i) {
// fill_smins_gf32(
// i,
// d_uvecs,
// d_ref,
// d_test,
// dot_ref,
// dot_test,
// dot_test_sort,
// mins,
// temp_mins,
// n_uvecs,
// n_ref,
// n_test,
// n_dims);
// }
dim3 block_size(1024);
dim3 thread_size(32);
fill_smins_gf32 <<< block_size, thread_size >>> (
d_uvecs,
d_ref,
d_test,
dot_ref,
dot_test,
dot_test_sort,
mins,
temp_mins,
n_uvecs,
n_ref,
n_test,
n_dims);
// test_ftn <<< 1, 1 >>> ();
cudaDeviceSynchronize();
// printf("3\n");
error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error 5: %s\n", cudaGetErrorString(error));
exit(-1);
}
for (k = 0; k < n_test; ++k) {
for (j = 0; j < n_cpus; ++j) {
// printf("k: %zu, j: %zu, mins: %d\n", k, j, mins[j * (n_test) + k]);
if (depths[k] > mins[j * (n_test) + k]) {
depths[k] = mins[j * (n_test) + k];
// printf("Updated depths[k]: %zu, %d\n", k, depths[k]);
// if (!depths[k]) {
// break;
// }
}
}
// printf("\n\n");
}
// for (i = 0; i < n_test; ++i) {
// printf("depths[i]: %d\n", depths[i]);
// }
// printf("4\n");
// diff = clock() - start;
// int msec = diff * 1000 / CLOCKS_PER_SEC;
// printf("Time taken %d seconds %d milliseconds\n", msec/1000, msec%1000);
return;
}
|
4,835 | #include <iostream>
#include <cuda.h>
#include <cstdio>
#include "scan_kernels.cuh"
using namespace std;
int main() {
// params:
int size = 1024*1024;
// allocate host:
int *data_host = NULL;
data_host = new int[size];
// allocate device:
int *data_device = NULL;
cudaMalloc((void**) &data_device, size * sizeof(int));
int *block_results = NULL;
cudaMalloc((void**) &block_results, 1024 * sizeof(int));
// fill host:
for(int i = 0; i < size; i++) {
data_host[i] = 1;
}
// copy host to device:
cudaMemcpy(data_device, data_host, size * sizeof(int), cudaMemcpyHostToDevice);
// kernel:
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
global_scan_kernel_1 <<<1024, 1024>>> (data_device, block_results);
global_scan_kernel_2 <<<1, 1024>>> (block_results);
global_scan_kernel_3 <<<1024, 1024>>> (data_device, block_results);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// copy device to host:
cudaMemcpy(data_host, data_device, size * sizeof(int), cudaMemcpyDeviceToHost);
// print:
cout << "time = " << time << endl;
for(int i = size-1; i < size; i++) {
cout << data_host[i] << " ";
}
cout << endl;
// free:
delete data_host;
cudaFree(data_device);
// end:
return 0;
}
|
4,836 | #include <iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
using namespace std;
__global__ void kernel(int *a, int n)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < n)
{
a[idx] *= 3;
}
}
int main()
{
cout << "main() begin" << endl;
int N = 100000;
int size = N * sizeof(int);
int* a_h{};
a_h = (int *)malloc(size);
cout << "after malloc. a_h: " << a_h << endl;
// Fill host memory with some values
for (int i = 0; i < N; ++i) a_h[i] = i;
int block_size = 1024; // Count of threads in a block
// how many blocks required to handle each element of array with one thread
// we have set thread count per block to be 4.
int block_count = N / block_size + ((N % block_size) ? 0 : 1);
cudaError_t err;
int* a_d{};
cudaMalloc(&a_d, size);
err = cudaGetLastError();
cout << "cudaMalloc: " << cudaGetErrorString(err) << endl;
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
err = cudaGetLastError();
cout << "cudaMemcpy host to device: " << cudaGetErrorString(err) << endl;
// Run the program block on the GPU
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
kernel<<< block_count, block_size >>> (a_d, size);
cudaEventRecord(stop);
// Find the calculation time on GPU
float calc_time = 0;
cudaEventSynchronize(stop); // Otherwise CPU will not wait for cudaEventElapsedTime to set the calc_time and print 0
cudaEventElapsedTime(&calc_time, start, stop);
cout << "GPU calculated the result in " << calc_time << " ms" << endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
// cudaMemcpy is sync therefore it is inherently a synchronization point
cudaMemcpy(a_h, a_d, size, cudaMemcpyDeviceToHost);
err = cudaGetLastError();
cout << "cudaMemcpy device to host: " << cudaGetErrorString(err) << endl;
//cudaDeviceSynchronize();
// print the result
//for (int i = 0; i < N; ++i) cout << a_h[i] << endl;
free(a_h);
cudaFree(a_d);
cout << "main() end" << endl;
} |
4,837 |
#include <stdio.h>
#include <cuda.h>
// Kernel wykonywane na "CUDA device"
__host__ __device__ float f(float x){
return exp(x*x)*cos(x);
}
__global__ void oblicz_fx(float h, float a, float *w)
{
// rozmiar bloku równy 64
__shared__ float y[64];
int i = blockIdx.x * blockDim.x + threadIdx.x;
y[threadIdx.x] = f(a+(i+1)*h);
__syncthreads();
if(threadIdx.x%2==0) y[threadIdx.x]+=y[threadIdx.x+1];
__syncthreads();
if(threadIdx.x%4==0) y[threadIdx.x]+=y[threadIdx.x+2];
__syncthreads();
if(threadIdx.x%8==0) y[threadIdx.x]+=y[threadIdx.x+4];
__syncthreads();
if(threadIdx.x%16==0) y[threadIdx.x]+=y[threadIdx.x+8];
__syncthreads();
if(threadIdx.x%32==0) y[threadIdx.x]+=y[threadIdx.x+16];
__syncthreads();
if(threadIdx.x==0) {
y[threadIdx.x]+=y[threadIdx.x+32];
w[blockIdx.x]=y[threadIdx.x];
}
/*
if(threadIdx.x==0){
float x=0;
for(int i=0;i<blockDim.x;i++)
x+=y[i];
w[blockIdx.x]=x;
}
*/
}
// program wykonywany na "host computer"
int main(void)
{
float *w_h, *w_d; // wskazniki do tablic na host i device
const int N = 4*65536+1; // liczba elementow tablicy
float a=0.0;
float b=1.0;
float h=(b-a)/N;
int bsize=64;
int gsize=(N-1)/bsize;
size_t size = gsize * sizeof(float);
w_h = (float *)malloc(size); // alokacja tablicy na host
cudaMalloc((void **) &w_d, size); // alokacja tablicy na device
// wykonanie obliczen na device
oblicz_fx <<< gsize, bsize >>> (h,a,w_d);
// skopiowanie wynikow z pamieci karty do pomieci hosta
cudaMemcpy(w_h, w_d, sizeof(float)*gsize, cudaMemcpyDeviceToHost);
// Print results
float sum=0.5*(f(a)+f(b));
for (int i=0; i<gsize; i++)
sum+=w_h[i];
sum*=h;
printf("calka=%f\n", sum);
// zwolnienie pamieci
free(w_h);
cudaFree(w_d);
}
|
4,838 | #include <iostream>
#include <cstdlib>
#include <cassert>
#include <zlib.h>
#include <png.h>
#define MASK_N 2
#define MASK_X 5
#define MASK_Y 5
#define SCALE 8
unsigned char *host_s = NULL; // source image array
unsigned char *host_t = NULL; // target image array
FILE *fp_s = NULL; // source file handler
FILE *fp_t = NULL; // target file handler
unsigned char *d_s = NULL; // source image array
unsigned char *d_t = NULL; // target image array
unsigned int width, height, channels; // image width, image height
size_t pitch_s, pitch_t;
// __device__ int mask[MASK_N][MASK_X][MASK_Y] = {
__constant__ int mask[MASK_N][MASK_X][MASK_Y] = {
{{ -1, -4, -6, -4, -1},
{ -2, -8,-12, -8, -2},
{ 0, 0, 0, 0, 0},
{ 2, 8, 12, 8, 2},
{ 1, 4, 6, 4, 1}},
{{ -1, -2, 0, 2, 1},
{ -4, -8, 0, 8, 4},
{ -6,-12, 0, 12, 6},
{ -4, -8, 0, 8, 4},
{ -1, -2, 0, 2, 1}}
};
int read_png(const char* filename, unsigned char** image, unsigned* height, unsigned* width, unsigned* channels) {
unsigned char sig[8];
FILE* infile;
infile = fopen(filename, "rb");
fread(sig, 1, 8, infile);
if (!png_check_sig(sig, 8))
return 1; /* bad signature */
png_structp png_ptr;
png_infop info_ptr;
png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if (!png_ptr)
return 4; /* out of memory */
info_ptr = png_create_info_struct(png_ptr);
if (!info_ptr) {
png_destroy_read_struct(&png_ptr, NULL, NULL);
return 4; /* out of memory */
}
png_init_io(png_ptr, infile);
png_set_sig_bytes(png_ptr, 8);
png_read_info(png_ptr, info_ptr);
int bit_depth, color_type;
png_get_IHDR(png_ptr, info_ptr, width, height, &bit_depth, &color_type, NULL, NULL, NULL);
png_uint_32 i, rowbytes;
png_bytep row_pointers[*height];
png_read_update_info(png_ptr, info_ptr);
rowbytes = png_get_rowbytes(png_ptr, info_ptr);
*channels = (int) png_get_channels(png_ptr, info_ptr);
if ((*image = (unsigned char *) malloc(rowbytes * *height)) == NULL) {
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
return 3;
}
for (i = 0; i < *height; ++i)
row_pointers[i] = *image + i * rowbytes;
png_read_image(png_ptr, row_pointers);
png_read_end(png_ptr, NULL);
return 0;
}
void write_png(const char* filename, png_bytep image, const unsigned height, const unsigned width, const unsigned channels) {
FILE* fp = fopen(filename, "wb");
png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
png_infop info_ptr = png_create_info_struct(png_ptr);
png_init_io(png_ptr, fp);
png_set_IHDR(png_ptr, info_ptr, width, height, 8,
PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT);
png_set_filter(png_ptr, 0, PNG_NO_FILTERS);
png_write_info(png_ptr, info_ptr);
png_set_compression_level(png_ptr, 1);
png_bytep row_ptr[height];
for (int i = 0; i < height; ++ i) {
row_ptr[i] = image + i * width * channels * sizeof(unsigned char);
}
png_write_image(png_ptr, row_ptr);
png_write_end(png_ptr, NULL);
png_destroy_write_struct(&png_ptr, &info_ptr);
fclose(fp);
}
void __global__ sobel (unsigned char* d_s, unsigned char* d_t, unsigned height, unsigned width, unsigned channels, size_t pitch_s, size_t pitch_t) {
int x, y, i, v, u; // for loop counter
int R, G, B; // color of R, G, B
double val[MASK_N*3] = {0.0};
int adjustX, adjustY, xBound, yBound;
// Task 2: Put mask[][][] into Shared Memory
__shared__ int M[2*5*5];
if(threadIdx.x<50)
M[threadIdx.x] = mask[threadIdx.x/25][ (threadIdx.x%25)/5][(threadIdx.x%25)%5];
__syncthreads();
// Task 1: Relabel x or y or both into combination of blockIdx, threadIdx ... etc
y = blockIdx.x * blockDim.x + threadIdx.x;
if(y>=height) return;
for (x = 0; x < width; ++x) {
for (i = 0; i < MASK_N; ++i) {
adjustX = (MASK_X % 2) ? 1 : 0;
adjustY = (MASK_Y % 2) ? 1 : 0;
xBound = MASK_X /2;
yBound = MASK_Y /2;
val[i*3+2] = 0.0;
val[i*3+1] = 0.0;
val[i*3] = 0.0;
for (v = -yBound; v < yBound + adjustY; ++v) {
for (u = -xBound; u < xBound + adjustX; ++u) {
// if ((x + u) >= 0 && (x + u) < width && y + v >= 0 && y + v < height) {
R = (d_s+pitch_s *(y+v) )[channels*(x+2+u)+2];
G = (d_s+pitch_s *(y+v) )[channels*(x+2+u)+1];
B = (d_s+pitch_s *(y+v) )[channels*(x+2+u)+0];
val[i*3+2] += R * M[i*25+(u + xBound)*5+ (v + yBound)];
val[i*3+1] += G * M[i*25+(u + xBound)*5+ (v + yBound)];
val[i*3+0] += B * M[i*25+(u + xBound)*5+ (v + yBound)];
// val[i*3+2] += R * mask[i][u + xBound][v + yBound];
// val[i*3+1] += G * mask[i][u + xBound][v + yBound];
// val[i*3+0] += B * mask[i][u + xBound][v + yBound];
// }
}
}
}
double totalR = 0.0;
double totalG = 0.0;
double totalB = 0.0;
for (i = 0; i < MASK_N; ++i) {
totalR += val[i * 3 + 2] * val[i * 3 + 2];
totalG += val[i * 3 + 1] * val[i * 3 + 1];
totalB += val[i * 3 + 0] * val[i * 3 + 0];
}
totalR = sqrt(totalR) / SCALE;
totalG = sqrt(totalG) / SCALE;
totalB = sqrt(totalB) / SCALE;
const unsigned char cR = (totalR > 255.0) ? 255 : totalR;
const unsigned char cG = (totalG > 255.0) ? 255 : totalG;
const unsigned char cB = (totalB > 255.0) ? 255 : totalB;
(d_t+pitch_t *y )[channels*x+2] = cR;
(d_t+pitch_t *y )[channels*x+1] = cG;
(d_t+pitch_t *y )[channels*x+0] = cB;
}
}
int main(int argc, char** argv) {
assert(argc == 3);
read_png(argv[1], &host_s, &height, &width, &channels);
host_t = (unsigned char*) malloc(height * width * channels * sizeof(unsigned char));
// Task 1: Allocate memory on GPU
cudaMallocPitch( &d_s, &pitch_s,(size_t) (width+4) * channels * sizeof(char), height+4);
cudaMallocPitch( &d_t, &pitch_t,(size_t) width * channels * sizeof(char), height);
size_t a = pitch_s * 2;
// Task 1: Memory copy from Host to Device (GPU)
cudaMemcpy2D( d_s + a + 2 * channels, pitch_s, host_s, width * channels, width * channels, height, cudaMemcpyHostToDevice);
// Task 1: Modify sobel() to CUDA kernel function
sobel<<<height/64+1,64>>>( d_s + a, d_t, height, width, channels, pitch_s, pitch_t);
// Task 1: Memory Copy from Device (GPU) to Host
cudaMemcpy2D( host_t, width * channels, d_t, pitch_t, width * channels, height, cudaMemcpyDeviceToHost);
// Task 1: Free memory on device
cudaFree (d_s);
cudaFree (d_t);
write_png(argv[2], host_t, height, width, channels);
// Task 3: Free Pinned memory
free (host_s);
free (host_t);
return 0;
}
|
4,839 | #include<stdio.h>
const int MATRIX_WIDTH = 400;
const int MATRIX_BYTES = MATRIX_WIDTH * MATRIX_WIDTH * sizeof(float);
const int MAX_NO_THREADS = 512;
__global__ void matrix_add(float *d_in1, float *d_in2, float *d_out){
int index = threadIdx.x + blockIdx.x*blockDim.x ;
*(d_out+index) = *(d_in1+index) + *(d_in2+index);
}
int check(float *h_in1, float *h_in2, float *h_out){
int flag=1;
for(int i=0;i<MATRIX_WIDTH*MATRIX_WIDTH;i++){
if(h_in1[i]+h_in2[i]!=h_out[i])
break;
}
return flag;
}
int main(){
//allocating size for host matrices
float h_in1[MATRIX_WIDTH*MATRIX_WIDTH], h_in2[MATRIX_WIDTH*MATRIX_WIDTH], h_out[MATRIX_WIDTH*MATRIX_WIDTH];
//generating the input matrices
int i;
for(i=0;i<MATRIX_WIDTH*MATRIX_WIDTH;i++){
h_in1[i]=(float)i;
h_in2[i]=(float)(MATRIX_WIDTH*MATRIX_WIDTH-i);
}
//declaring device memory pointers
float *d_in1, *d_in2, *d_out;
//allocating device memory
cudaMalloc(&d_in1, MATRIX_BYTES);
cudaMalloc(&d_in2, MATRIX_BYTES);
cudaMalloc(&d_out, MATRIX_BYTES);
//transferring memory from host to device
cudaMemcpy(d_in1, h_in1, MATRIX_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_in2, h_in2, MATRIX_BYTES, cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&stop);
cudaEventCreate(&start);
//starting kernel
cudaEventRecord(start);
matrix_add<<<(int)(MATRIX_WIDTH*MATRIX_WIDTH/MAX_NO_THREADS)+1, MAX_NO_THREADS>>>(d_in1, d_in2, d_out);
cudaEventRecord(stop);
//transferring memory from device to host
cudaMemcpy(h_out, d_out, MATRIX_BYTES, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
if(check(h_in1,h_in2,h_out))
printf("the result is correct\n");
else
printf("the result is incorrect\n");
float time= 0 ;
cudaEventElapsedTime(&time, start, stop);
printf("time spent in gpu in ms: %f\n",time);
//freeing memory
cudaFree(d_in1);
cudaFree(d_in2);
cudaFree(d_out);
return 0;
}
|
4,840 | /* This code will multiply two vectors and
check the result.
*/
#include <cuda.h>
#include <iostream>
/* Fill in your dotProduct kernel here...
*/
#define THREADS_PER_BLOCK 256
__device__ float result;
__global__ void calcDotProductKern(float *x, float *y, int N)
{
__shared__ float product[THREADS_PER_BLOCK]; //All threads in a block must be able
//to access this array
int index = threadIdx.x + blockIdx.x * blockDim.x; //index
product[threadIdx.x] = x[index] * y[index]; //result of elementwise
//multiplication goes into product
//Make sure every thread has finished
__syncthreads();
//Sum the elements serially to obtain dot product
if( 0 == threadIdx.x ) //Pick one thread to sum, otherwise all will execute
{
float sum = 0;
for(int j=0; j < THREADS_PER_BLOCK; j++)
sum += product[j];
atomicAdd(&result,sum);
}
}
float calcDotProduct(float *x, float *y, int N)
{
int threads = THREADS_PER_BLOCK;
int blocks = (N + threads - 1)/ threads;
float result = 0;
cudaMemcpyToSymbol(result, &result, sizeof(result), 0, cudaMemcpyHostToDevice);
calcDotProductKern<<<blocks,threads>>>(x, y, N);
cudaMemcpyFromSymbol(&result, result, sizeof(result), 0, cudaMemcpyDeviceToHost);
return result;
}
int main(void)
{
const int N = 10000;
float *x_host = new float[N];
float *y_host = new float[N];
// Fill matrix and vector on host
for(int i=0 ; i < N ; i++)
{
x_host[i] = sin(i*0.1f);
y_host[i] = cos(i*0.23f);
}
float *x;
float *y;
cudaMalloc(&x, N*sizeof(float));
cudaMalloc(&y, N*sizeof(float));
// Copy x and y to device
cudaMemcpy(x, x_host, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(y, y_host, N*sizeof(float), cudaMemcpyHostToDevice);
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
float prodGPU = calcDotProduct(x, y, N);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaMemcpy(y_host, y, N*sizeof(float), cudaMemcpyDeviceToHost);
// Check result
float prod = 0;
for(int i=0 ; i < N ; i++)
{
prod += y_host[i] * x_host[i];
}
if( fabs(prod - prodGPU) / prod < 1e-4 )
{
std::cout << "Multiplication correct!" << std::endl;
float timeInMs;
cudaEventElapsedTime(&timeInMs, start, end);
std::cout << "Time: " << timeInMs << "ms" << std::endl;
std::cout << "Bandwidth: " << (2*N*sizeof(float)) / 1.0e9 / (timeInMs/1000) << "Gbps" << std::endl;
}
else
{
std::cout << "Multiplication wrong!" << std::endl;
}
cudaFree(x);
cudaFree(y);
delete[] x_host;
delete[] y_host;
}
|
4,841 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#define N 1000
#define BLOCK_SIZE 10
__global__ void scaMult_g (int *a, int *b, int *c, int *sum, int n)
{
int tid = threadIdx.x;
if (tid > n- 1) return;
{
c[tid] = a[tid] * b[tid];
atomicAdd(sum, c[tid]);
}
}
int main() {
int *host_a = new int[N];
int *host_b = new int[N];
int *a = new int[N];
int *b = new int[N];
int *sum = 0;
int *dev_c, *dev_sum, *dev_a, *dev_b, host_sum;
for (int i = 0; i < N; i++)
{
host_a[i] = 8;
host_b[i] = 1;
}
cudaMalloc((void**)&dev_c, N * sizeof(int));
cudaMalloc((void**)&dev_sum, sizeof(int));
cudaMalloc((void**)&dev_a, N * sizeof(int));
cudaMalloc((void**)&dev_b, N * sizeof(int));
cudaMemcpy(dev_a, host_a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, host_b, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_sum, sum, sizeof(int), cudaMemcpyHostToDevice);
scaMult_g <<< BLOCK_SIZE, N/BLOCK_SIZE >>> (dev_a, dev_b, dev_c, dev_sum, N);
cudaMemcpy(&host_sum, dev_sum, sizeof(int), cudaMemcpyDeviceToHost);
int s = 0;
for (int i = 0; i < N; i++)
{
s = s + host_a[i] * host_b[i];
}
printf("g <a,b>=%d \n", host_sum);
printf("c <a,b>=%d \n", s);
getchar();
} |
4,842 | #include <iostream>
#include <cuda.h>
#include <random>
#define N 4096
#define THREAD 256
#define BLOCK 18
#define HANDLE_ERROR(x) checkCudaError(x, __LINE__)
void checkCudaError(cudaError_t msg, int x)
{
if (msg != cudaSuccess) {
fprintf(stderr, "line: %d %s\n", x, cudaGetErrorString(msg));
exit(1);
}
return;
}
__global__ void kernel(double *x, double *y, double *z, double *tf, double *ox, double *oy, double *oz)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < N; i+=stride)
{
ox[i] = x[i] * tf[0] + y[i] * tf[1] + z[i] * tf[2] + tf[3];
oy[i] = x[i] * tf[4] + y[i] * tf[5] + z[i] * tf[6] + tf[7];
oz[i] = x[i] * tf[8] + y[i] * tf[9] + z[i] * tf[10] + tf[11];
}
}
int main()
{
cudaEvent_t start[2], stop[2];
float time;
double org_x[N], org_y[N], org_z[N];
std::random_device rnd;
std::mt19937 mt(rnd());
std::uniform_real_distribution<double> rand100(0.0, 100.0);
for (int i = 0; i < N; i++)
{
org_x[i] = rand100(mt);
org_y[i] = rand100(mt);
org_z[i] = rand100(mt);
}
int count;
HANDLE_ERROR(cudaGetDeviceCount(&count));
double tf_org[12] = {1.0, 1.5, 1.2, 11.0,
0.9, 1.1, 0.8, 2.0,
1.0, 1.0, 0.6, -1.0};
double *tf;
HANDLE_ERROR(cudaMalloc((void**)&tf, sizeof(double)*12));
HANDLE_ERROR(cudaMemcpy(tf, tf_org, sizeof(double)*12, cudaMemcpyHostToDevice));
double *ox, *oy, *oz;
HANDLE_ERROR(cudaMalloc((void**)&ox, sizeof(double)*N));
HANDLE_ERROR(cudaMalloc((void**)&oy, sizeof(double)*N));
HANDLE_ERROR(cudaMalloc((void**)&oz, sizeof(double)*N));
double *x, *y, *z;
for (int i = 0; i < count; i++)
{
HANDLE_ERROR(cudaEventCreate(&start[i]));
HANDLE_ERROR(cudaEventCreate(&stop[i]));
HANDLE_ERROR(cudaSetDevice(i));
HANDLE_ERROR(cudaEventRecord(start[i], (cudaStream_t)i));
HANDLE_ERROR(cudaMalloc((void**)&x, sizeof(double)*N));
HANDLE_ERROR(cudaMalloc((void**)&y, sizeof(double)*N));
HANDLE_ERROR(cudaMalloc((void**)&z, sizeof(double)*N));
HANDLE_ERROR(cudaMemcpy(x, org_x, sizeof(double)*N, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(y, org_y, sizeof(double)*N, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(z, org_z, sizeof(double)*N, cudaMemcpyHostToDevice));
kernel<<<BLOCK, THREAD>>>(x, y, z, tf, ox, oy, oz);
HANDLE_ERROR(cudaEventRecord(stop[i], (cudaStream_t)i));
HANDLE_ERROR(cudaEventSynchronize(stop[i]));
HANDLE_ERROR(cudaEventElapsedTime(&time, start[i], stop[i]));
std::cout << "device: " << i << std::endl;
std::cout << "time: " << time << std::endl;
HANDLE_ERROR(cudaFree(x));
HANDLE_ERROR(cudaFree(y));
HANDLE_ERROR(cudaFree(z));
HANDLE_ERROR(cudaEventDestroy(start[i]));
HANDLE_ERROR(cudaEventDestroy(stop[i]));
}
HANDLE_ERROR(cudaFree(tf));
HANDLE_ERROR(cudaFree(ox));
HANDLE_ERROR(cudaFree(oy));
HANDLE_ERROR(cudaFree(oz));
return 0;
} |
4,843 | // Copyright (c) 2015 Patrick Diehl
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
extern "C" __global__ void logn(size_t* count, float* in, float* out) {
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < count[0];
i += gridDim.x * blockDim.x) {
out[i] = logf(in[i]);
}
}
extern "C" __global__ void expn(size_t* count, float* in, float* out) {
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < count[0];
i += gridDim.x * blockDim.x) {
out[i] = expf(in[i]);
}
}
extern "C" __global__ void add(size_t* count, float* in1, float* out,float* in2) {
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < count[0];
i += gridDim.x * blockDim.x) {
out[i] = in1[i] + in2[i];
}
}
extern "C" __global__ void dbl(size_t* count, float* in, float* out) {
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < count[0];
i += gridDim.x * blockDim.x) {
out[i] = 2.0 * in[i];
}
}
extern "C" __global__ void mul(size_t* count, float* in1, float* out, float* in2) {
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < count[0];
i += gridDim.x * blockDim.x) {
out[i] = in1[i] * in2[i];
}
}
|
4,844 | #include "includes.h"
/* Start Header
***************************************************************** /
/*!
\file knn-kernel.cu
\author Koh Wen Lin
\brief
Contains the implementation for kmeans clustering on the gpu.
*/
/* End Header
*******************************************************************/
#define KMEAN_BLOCK_SIZE 32
#define KMEAN_BLOCK_SIZE_1D KMEAN_BLOCK_SIZE * KMEAN_BLOCK_SIZE
__global__ void MeanUpdate(float* dMeanIn, unsigned k, unsigned d, int* count)
{
float ooc = 1.0f / max(1, count[threadIdx.x]);
for(int i = 0; i < d; ++i)
dMeanIn[threadIdx.x * d + i] *= ooc;
} |
4,845 | extern "C"
__global__ void setValue_kernel(int *vals)
{
int N = 1e6;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < N)
vals[idx] = idx;
}
|
4,846 | #include <stdio.h>
#include <stdlib.h>
const int N = 2048;
__global__ void add(const float *a, float *c, int n){
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < n)
c[idx]=a[idx];
}
int main(){
float *h_a, *h_c, *d_a, *d_c;
const size_t ds = N*sizeof(float);
h_a = (float *)malloc(ds);
h_c = (float *)malloc(ds);
cudaMalloc(&d_a, ds);
cudaMalloc(&d_c, ds);
for (int i = 0; i < N; i++){
h_a[i] = rand()/(float)RAND_MAX;
h_c[i] = 0;
}
cudaMemcpy(d_a, h_a, ds, cudaMemcpyHostToDevice);
add<<<(N+255)/256, 256>>>(d_a, d_c, N);
cudaMemcpy(h_c, d_c, ds, cudaMemcpyDeviceToHost);
printf("h_a[0] = %f\n", h_a[0]);
printf("h_c[0] = %f\n", h_c[0]);
return 0;
}
|
4,847 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <cuda.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// Image structure
typedef struct
{
int width, height; // Image size
int bytes_per_pixel; // 1 for grayscale image, 3 for rgb image
unsigned long long total_bytes; // Total bytes in data, width * height * bytes_per_pixel
unsigned char * data; // Image data - very large array of 8-bit values
float mean, variance, stddev; // Image metrics, left unevaluated at the beginning
} image_t;
void alloc_image(image_t * image, int width, int height, int bytes_per_pixel)
{
// Allocate large chunk of memory for the image
image->width = width;
image->height = height;
image->bytes_per_pixel = bytes_per_pixel;
image->total_bytes = (unsigned long long) (width) * height * bytes_per_pixel;
image->data = (unsigned char *) malloc(width * height * bytes_per_pixel);
if (!image->data) {
printf("Unable to allocate %llu bytes of memory!", image->total_bytes);
exit(1); // Critical error for us
}
image->mean = 0.0f;
image->variance = 0.0f;
image->stddev = 0.0f;
printf("%llu MiB allocated for the image\n",
(unsigned long long) (image->total_bytes + sizeof(image_t) + 1024 * 1024 - 1) / (1024 * 1024));
}
void dealloc_image(image_t * image) {
// Free memory
free(image->data);
printf("%llu MiB deallocated\n",
(unsigned long long) (image->total_bytes + sizeof(image_t) + 1024 * 1024 - 1) / (1024 * 1024));
image->data = NULL;
image->width = 0;
image->height = 0;
image->bytes_per_pixel = 0;
image->total_bytes = 0;
}
void clone_image(image_t * image, image_t * src) {
// Create a full copy of an image src
alloc_image(image, src->width, src->height, src->bytes_per_pixel);
memcpy(image->data, src->data, src->total_bytes);
image->mean = src->mean;
image->variance = src->variance;
image->stddev = src->stddev;
}
void read_image(image_t * image, const char * path) {
FILE * fp = fopen(path, "rb");
if (!fp) {
printf("File %s not found or can't be opened! Exiting...", path); // Critical error for our program
exit(1);
}
unsigned long off_set = 0, next_offset = 0, pos_count = 0, tag_value[25] = { 0 }, strip_offset_val = 0, strip_offset = 0;
int i, j, k, tag_id[25] = { 0 }; //, tag_type[25] = { 0 }, tag_count[25] = { 0 };
int tagCount = 0, img_Width = 0, img_Len = 0, IFD_count = 0;
short buffer[4] = { 0 };
fseek(fp, 4, SEEK_SET);
for (i = 3; i >= 0; i--) {
buffer[i] = getc(fp);
}
off_set = ((int)buffer[0] << 24) | ((int)buffer[1] << 16) | ((int)buffer[2] << 8) | ((int)buffer[3]);
fseek(fp, off_set, SEEK_SET);
k = 1;
while (k) {
IFD_count++; // No. of IFD's in this TIFF File
buffer[1] = getc(fp);
buffer[0] = getc(fp);
tagCount = ((int)buffer[0] << 8) | ((int)buffer[1]); // No. of entries in an IFD
for (i = 0; i < tagCount; i++) { // Read all the entries of this IFD
buffer[1] = getc(fp);
buffer[0] = getc(fp);
tag_id[i] = ((int)buffer[0] << 8) | ((int)buffer[1]); // Tag ID
for (j = 1; j >= 0; j--) {
buffer[j] = getc(fp);
}
for (j = 3; j >= 0; j--) {
buffer[j] = getc(fp);
}
// Gives no. of values for this Tag
for (j = 3; j >= 0; j--) {
buffer[j] = getc(fp);
}
tag_value[i] = ((int)buffer[0] << 24) | ((int)buffer[1] << 16) | ((int)buffer[2] << 8) | ((int)buffer[3]);
// Gets the value if the above count is 1, else offset of the starting value
if (tag_id[i] == 256) // Tag ID 256 says about the image width
img_Width = tag_value[i];
if (tag_id[i] == 257) // Tag ID 257 says about the image length
img_Len = tag_value[i];
if (tag_id[i] == 273) // Tag ID 273 says about the offset which points to the offset of strips
strip_offset_val = tag_value[i];
}
alloc_image(image, img_Width, img_Len, 1);
for (i = 0; i < img_Len; i++) { // Read the pixel values from image and store it in the matrix
fseek(fp, (strip_offset_val + (i * 4)), SEEK_SET); // Move to the offset of the current strip's offset
for (j = 3; j >= 0; j--) {
buffer[j] = getc(fp);
}
strip_offset = ((int)buffer[0] << 24) | ((int)buffer[1] << 16) | ((int)buffer[2] << 8) | ((int)buffer[3]);
fseek(fp, strip_offset, SEEK_SET); // Move to the offset of the current strip
for (j = 0; j < img_Width; j++) {
getc(fp);
short tmp_c2 = getc(fp);
image->data[i * img_Width + j] = tmp_c2;
}
}
pos_count = ((off_set + 2) + (tagCount * 12));
fseek(fp, pos_count, SEEK_SET);
for (i = 3; i >= 0; i--) {
buffer[i] = getc(fp);
}
next_offset = ((int)buffer[0] << 24) | ((int)buffer[1] << 16) | ((int)buffer[2] << 8) | ((int)buffer[3]); // Next IFD offset
if (next_offset != 0)
{
fseek(fp, next_offset, SEEK_SET);
}
else {
k = 0;
}
}
fclose(fp);
printf("Image %s loaded successfully\n", path);
}
void WriteHexString(FILE *fptr, char *s) {
unsigned int i, c;
char hex[3];
for (i = 0; i<strlen(s); i += 2) {
hex[0] = s[i];
hex[1] = s[i + 1];
hex[2] = '\0';
sscanf(hex, "%X", &c);
putc(c, fptr);
}
}
void write_image(image_t * image, const char * path)
{
if ((image->bytes_per_pixel != 1) && (image->bytes_per_pixel != 3))
{
printf("Only 1 and 3 bytes per pixel images are supported in write_image procedure");
exit(1);
return;
}
FILE * fptr = fopen(path, "wb");
if (!fptr) {
printf("File %s can't be opened for writing! Exiting...", path); // Critical error for our program
exit(1);
}
/* Write the header */
WriteHexString(fptr, "4d4d002a"); /* Big endian & TIFF identifier */
int nx = image->width;
int ny = image->height;
int offset = nx * ny * 3 + 8;
putc((offset & 0xff000000) / 16777216, fptr);
putc((offset & 0x00ff0000) / 65536, fptr);
putc((offset & 0x0000ff00) / 256, fptr);
putc((offset & 0x000000ff), fptr);
/* Write the binary data */
unsigned long long i;
if (image->bytes_per_pixel == 3) {
// Just save the data "as is"
for (i = 0; i < image->total_bytes; i++)
fputc(image->data[i], fptr);
} else {
// Save each pixel three times as r, g, b component
for (i = 0; i < image->total_bytes; i++) {
fputc(image->data[i], fptr);
fputc(image->data[i], fptr);
fputc(image->data[i], fptr);
}
}
/* Write the footer */
WriteHexString(fptr, "000e"); /* The number of directory entries (14) */
/* Width tag, short int */
WriteHexString(fptr, "0100000300000001");
fputc((nx & 0xff00) / 256, fptr); /* Image width */
fputc((nx & 0x00ff), fptr);
WriteHexString(fptr, "0000");
/* Height tag, short int */
WriteHexString(fptr, "0101000300000001");
fputc((ny & 0xff00) / 256, fptr); /* Image height */
fputc((ny & 0x00ff), fptr);
WriteHexString(fptr, "0000");
/* Bits per sample tag, short int */
WriteHexString(fptr, "0102000300000003");
offset = nx * ny * 3 + 182;
putc((offset & 0xff000000) / 16777216, fptr);
putc((offset & 0x00ff0000) / 65536, fptr);
putc((offset & 0x0000ff00) / 256, fptr);
putc((offset & 0x000000ff), fptr);
/* Compression flag, short int */
WriteHexString(fptr, "010300030000000100010000");
/* Photometric interpolation tag, short int */
WriteHexString(fptr, "010600030000000100020000");
/* Strip offset tag, long int */
WriteHexString(fptr, "011100040000000100000008");
/* Orientation flag, short int */
WriteHexString(fptr, "011200030000000100010000");
/* Sample per pixel tag, short int */
WriteHexString(fptr, "011500030000000100030000");
/* Rows per strip tag, short int */
WriteHexString(fptr, "0116000300000001");
fputc((ny & 0xff00) / 256, fptr);
fputc((ny & 0x00ff), fptr);
WriteHexString(fptr, "0000");
/* Strip byte count flag, long int */
WriteHexString(fptr, "0117000400000001");
offset = nx * ny * 3;
putc((offset & 0xff000000) / 16777216, fptr);
putc((offset & 0x00ff0000) / 65536, fptr);
putc((offset & 0x0000ff00) / 256, fptr);
putc((offset & 0x000000ff), fptr);
/* Minimum sample value flag, short int */
WriteHexString(fptr, "0118000300000003");
offset = nx * ny * 3 + 188;
putc((offset & 0xff000000) / 16777216, fptr);
putc((offset & 0x00ff0000) / 65536, fptr);
putc((offset & 0x0000ff00) / 256, fptr);
putc((offset & 0x000000ff), fptr);
/* Maximum sample value tag, short int */
WriteHexString(fptr, "0119000300000003");
offset = nx * ny * 3 + 194;
putc((offset & 0xff000000) / 16777216, fptr);
putc((offset & 0x00ff0000) / 65536, fptr);
putc((offset & 0x0000ff00) / 256, fptr);
putc((offset & 0x000000ff), fptr);
/* Planar configuration tag, short int */
WriteHexString(fptr, "011c00030000000100010000");
/* Sample format tag, short int */
WriteHexString(fptr, "0153000300000003");
offset = nx * ny * 3 + 200;
putc((offset & 0xff000000) / 16777216, fptr);
putc((offset & 0x00ff0000) / 65536, fptr);
putc((offset & 0x0000ff00) / 256, fptr);
putc((offset & 0x000000ff), fptr);
/* End of the directory entry */
WriteHexString(fptr, "00000000");
/* Bits for each colour channel */
WriteHexString(fptr, "000800080008");
/* Minimum value for each component */
WriteHexString(fptr, "000000000000");
/* Maximum value per channel */
WriteHexString(fptr, "00ff00ff00ff");
/* Samples per pixel for each channel */
WriteHexString(fptr, "000100010001");
fclose(fptr);
printf("File %s written successfully.\n", path);
}
__global__ void cuda_CovVar(const unsigned char *input1, const unsigned char *input2, float mean1, float mean2, const unsigned long long n, double *cov, double *var)
{
__shared__ double sharedCovData[512];
__shared__ double sharedVarData[512];
int id = blockIdx.x * blockDim.x + threadIdx.x;
int tx = threadIdx.x;
sharedCovData[tx] = 0;
sharedVarData[tx] = 0;
__syncthreads();
for(unsigned long long i = id; i < n; i+= gridDim.x*blockDim.x)
{
sharedCovData[tx] += ((double)input1[i]-mean1)*((double)input2[i]-mean2);
sharedVarData[tx] += ((double)input2[i]-mean2)*((double)input2[i]-mean2);
}
__syncthreads();
// block-wide reduction in _shared_ mem
for(int offset = blockDim.x / 2; offset > 0; offset >>= 1)
{
if(tx < offset)
{
sharedCovData[tx] += sharedCovData[tx + offset];
sharedVarData[tx] += sharedVarData[tx + offset];
}
__syncthreads();
}
// finally, thread 0 writes the calculated result of this block
if(threadIdx.x == 0)
{
// final result should be updated in an exclusive way by each block
cov[blockIdx.x] = sharedCovData[0];
var[blockIdx.x] = sharedVarData[0];
}
}
__global__ void cuda_mean(const unsigned char *input, double *mean, const unsigned long long n)
{
__shared__ float sharedSumData[512];
int id = blockIdx.x * blockDim.x + threadIdx.x;
int tx = threadIdx.x;
sharedSumData[tx] = 0;
for(unsigned long long i = id; i < n; i+= gridDim.x*blockDim.x)
{
sharedSumData[tx] += (float)input[i];
}
__syncthreads();
// block-wide reduction in _shared_ mem
for(int offset = blockDim.x / 2; offset > 0; offset >>= 1)
{
if(tx < offset)
{
sharedSumData[tx] += sharedSumData[tx + offset];
}
__syncthreads();
}
// finally, thread 0 writes the calculated result of this block
if(threadIdx.x == 0)
{
// final result should be updated in an exclusive way by each block
mean[blockIdx.x] = sharedSumData[0];
}
}
__global__ void cuda_var(const unsigned char *input, double mean, const unsigned long long n, double *var)
{
__shared__ float sharedSumData[512];
int id = blockIdx.x * blockDim.x + threadIdx.x;
int tx = threadIdx.x;
sharedSumData[tx] = 0;
for(unsigned long long i = id; i < n; i+= gridDim.x*blockDim.x)
{
sharedSumData[tx] += ((float)input[i]-mean)*((float)input[i]-mean);
}
//sharedSumData[tx] = (x-lmean)*(x-lmean);
__syncthreads();
// block-wide reduction in _shared_ mem
for(int offset = blockDim.x / 2; offset > 0; offset >>= 1)
{
if(tx < offset)
{
sharedSumData[tx] += sharedSumData[tx + offset];
}
__syncthreads();
}
// finally, thread 0 writes the calculated result of this block
if(threadIdx.x == 0)
{
// final result should be updated in an exclusive way by each block
var[blockIdx.x] = sharedSumData[0];
}
}
// Evaluate image mean and standart deviation
void cuda_eval_stats(image_t * image, unsigned char *data )
{
if (image->bytes_per_pixel != 1) {
printf("Mean and standart deviation are only evaluated for grayscale images!\n");
exit(1);
}
double mean = 0.0f, variance = 0.0f, stddev = 0.0f;
unsigned long long i;
// Evaluate mean
//for (i = 0; i < image->total_bytes; i++)
// mean += image->data[i];
double *d_mean;
double h_mean[16];
gpuErrchk(cudaMalloc((void**)&d_mean, 16*sizeof(double)));
cuda_mean<<<16,256>>>(data, d_mean, image->total_bytes);
cudaMemcpy(h_mean, d_mean, 16*sizeof(double) , cudaMemcpyDeviceToHost );
cudaFree(d_mean);
for (i = 0; i < 16; i++)
mean += h_mean[i];
//printf("mean: %lf\n",mean);
mean /= image->total_bytes;
// Evaluate variance
//for (i = 0; i < image->total_bytes; i++) {
// float tmp = (float) (image->data[i]) - mean;
// variance += tmp * tmp;
//}
double *d_var;
double h_var[16];
gpuErrchk(cudaMalloc((void**)&d_var, 16*sizeof(double)));
cuda_var<<<16,256>>>(data, mean, image->total_bytes, d_var);
cudaMemcpy(h_var, d_var, 16*sizeof(double) , cudaMemcpyDeviceToHost );
cudaFree(d_var);
for (i = 0; i < 16; i++)
variance += h_var[i];
//printf("var: %lf\n",variance);
//printf("variance: %f\n",mean);
variance /= image->total_bytes;
stddev = sqrtf(variance);
// Set up those values
image->mean = (float) mean;
image->stddev = (float) stddev;
}
// Evaluate image mean and standart deviation
void eval_stats(image_t * image)
{
if (image->bytes_per_pixel != 1) {
printf("Mean and standart deviation are only evaluated for grayscale images!\n");
exit(1);
}
double mean = 0.0f, variance = 0.0f, stddev = 0.0f;
unsigned long long i;
// Evaluate mean
for (i = 0; i < image->total_bytes; i++)
mean += image->data[i];
mean /= image->total_bytes;
// Evaluate variance
for (i = 0; i < image->total_bytes; i++) {
float tmp = (float) (image->data[i]) - mean;
variance += tmp * tmp;
}
variance /= image->total_bytes;
stddev = sqrtf(variance);
// Set up those values
image->mean = mean;
image->stddev = stddev;
}
float covariance(image_t * b, image_t * gs)
{
// Estimate mean
//eval_stats(b);
//eval_stats(gs);
double covariance = 0.0f;
double variance = 0.0f;
unsigned long long i;
//for (i = 0; i < b->total_bytes; i++)
//{
// covariance += (b->data[i] - b->mean) * (gs->data[i] - gs->mean);
// variance += (gs->data[i] - gs->mean) * (gs->data[i] - gs->mean);
//}
// printf("Covariance - Variance : %.2lf - %.2lf \n", covariance, variance);
unsigned char *d_b, *d_gs;
double *d_covariance, *d_variance;
gpuErrchk(cudaMalloc((void**)&d_variance, 16*sizeof(double)));
gpuErrchk(cudaMalloc((void**)&d_covariance, 16*sizeof(double)));
gpuErrchk(cudaMalloc((void**)&d_gs, b->total_bytes*sizeof(unsigned char)));
gpuErrchk(cudaMalloc((void**)&d_b, b->total_bytes*sizeof(unsigned char)));
gpuErrchk(cudaMemcpy(d_gs, gs->data, b->total_bytes*sizeof(unsigned char), cudaMemcpyHostToDevice) );
gpuErrchk(cudaMemcpy(d_b, b->data, b->total_bytes*sizeof(unsigned char), cudaMemcpyHostToDevice) );
cuda_eval_stats(b, d_b);
cuda_eval_stats(gs, d_gs);
cuda_CovVar<<< 16, 256>>>(d_b, d_gs, b->mean, gs->mean, b->total_bytes, d_covariance, d_variance);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
double covs[16], vars[16];
cudaMemcpy(covs, d_covariance, 16*sizeof(double) , cudaMemcpyDeviceToHost );
cudaMemcpy(vars, d_variance, 16*sizeof(double) , cudaMemcpyDeviceToHost );
for (i = 0; i < 16; i++)
{
covariance += covs[i];
variance += vars[i];
}
//printf("Covariance - Variance : %.2lf - %.2lf \n", covariance, variance);
// Both should be divided by N, but we're going to divide them anyway
cudaFree(d_gs);
cudaFree(d_b);
cudaFree(d_covariance);
cudaFree(d_variance);
//free(covs);
return covariance / variance;
}
__global__ void cuda_GramSchmidt(float *results, unsigned long long n, const unsigned char * gs, float phi)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for(int i = idx; i<n; i += gridDim.x*blockDim.x)
{
results[i] = results[i] - (phi * gs[i]);
}
}
// Core routine - Gram-Schmidt transformation
void GramSchmidtTransformation(image_t * gs, image_t * bands, float ** phi)
{
// Gram-Schmidt imlementation for 4 vectors
// First gs element is the same as band 0, i.e. artificial low res pan image
clone_image(gs + 0, bands + 0);
alloc_image(gs + 1, bands[0].width, bands[0].height, bands[0].bytes_per_pixel);
alloc_image(gs + 2, bands[0].width, bands[0].height, bands[0].bytes_per_pixel);
alloc_image(gs + 3, bands[0].width, bands[0].height, bands[0].bytes_per_pixel);
float *res = (float *)malloc(bands[0].total_bytes*sizeof(float));
unsigned char *d_band, *d_gs;
float *d_results;
// For the rest three images, we need to follow modified Gram-Schmidt routine
unsigned l, T;
unsigned long long i;
for (T = 1; T < 4; T++)
{
phi[T][0] = 0.0f;
phi[T][1] = 0.0f;
phi[T][2] = 0.0f;
phi[T][3] = 0.0f;
for (l = 0; l < T; l++)
phi[T][l] = covariance(bands + T, gs + l);
gpuErrchk(cudaMalloc((void**)&d_results, bands[0].total_bytes*sizeof(float)));
gpuErrchk(cudaMalloc((void**)&d_gs, bands[0].total_bytes*sizeof(unsigned char)));
for (i = 0; i < bands[T].total_bytes; i++)
res[i] = (bands[T].data[i] - bands[T].mean);
gpuErrchk(cudaMemcpy( d_results, res, bands[T].total_bytes*sizeof(float), cudaMemcpyHostToDevice));
for (l = 0; l < T; l++)
{
gpuErrchk( cudaMemcpy( d_gs, gs[l].data, bands[T].total_bytes*sizeof(unsigned char), cudaMemcpyHostToDevice) );
cuda_GramSchmidt<<< 16, 256>>>(d_results, bands[T].total_bytes, d_gs, phi[T][l]);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
}
gpuErrchk( cudaMemcpy( res, d_results, bands[T].total_bytes*sizeof(float) , cudaMemcpyDeviceToHost ) );
for (i = 0; i < bands[T].total_bytes; i++)
gs[T].data[i] = roundf(res[i]);
cudaFree(d_gs);
cudaFree(d_results);
}
free(res);
}
unsigned char get_closest_point(image_t * src, unsigned long long idx, image_t * where)
{
int si = idx % src->width;
int sj = idx / src->width;
int wi = (si / (src->height - 1.0f)) * (where->height - 1.0f);
int wj = (sj / (src->width - 1.0f)) * (where->width - 1.0f);
return where->data[wj * where->width + wi];
}
// Resize image to new size
void resize(image_t * dst, image_t * src, int new_w, int new_h)
{
// Note that we can only resize grayscale images
if (src->bytes_per_pixel != 1) {
printf("Resizing is only implemented for grayscale images!\n");
exit(1);
}
// First - allocate memory for the dst image
alloc_image(dst, new_w, new_h, 1);
// Aspect ratio should not be changed
unsigned long long i;
for (i = 0; i < dst->total_bytes; i++)
dst->data[i] = get_closest_point(dst, i, src);
}
__global__ void cuda_get_closest_point(unsigned char *dst, int srcHeight, int srcWidth, unsigned long long n, const unsigned char * where, int whereHeight, int whereWidth, float phi)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for(int i = idx; i<n; i += gridDim.x*blockDim.x)
{
int si = i % srcWidth;
int sj = i / srcWidth;
int wi = (si / (srcHeight - 1.0f)) * (whereHeight - 1.0f);
int wj = (sj / (srcWidth - 1.0f)) * (whereWidth - 1.0f);
dst[i] += phi * where[wj * whereWidth + wi];
}
}
void GramSchmidtReverseTransformation(image_t * dst, image_t * gs, image_t * bands, float ** phi)
{
// Gram-Schmidt imlementation for 4 vectors
// First gs element is the same as band 0, i.e. artificial low res pan image
alloc_image(dst + 0, gs[0].width, gs[0].height, gs[0].bytes_per_pixel);
alloc_image(dst + 1, gs[0].width, gs[0].height, gs[0].bytes_per_pixel);
alloc_image(dst + 2, gs[0].width, gs[0].height, gs[0].bytes_per_pixel);
alloc_image(dst + 3, gs[0].width, gs[0].height, gs[0].bytes_per_pixel);
unsigned l, T;
unsigned char *d_gs, *d_dst;
float *d_results;
//float *h_results = (float*)malloc(dst[0].total_bytes*sizeof(float));
//gpuErrchk(cudaMalloc((void**)&d_results, dst[0].total_bytes*sizeof(float)));
gpuErrchk(cudaMalloc((void**)&d_dst, dst[0].total_bytes*sizeof(unsigned char)));
gpuErrchk(cudaMalloc((void**)&d_gs, gs[0].total_bytes*sizeof(unsigned char)));
for (T = 0; T < 4; T++)
{
gpuErrchk(cudaMemset( d_dst, (unsigned char)roundf(bands[T].mean), dst[0].total_bytes*sizeof(unsigned char)));
gpuErrchk(cudaMemcpy( d_gs, gs[T].data, gs[T].total_bytes*sizeof(unsigned char), cudaMemcpyHostToDevice ));
cuda_get_closest_point<<< 16, 256>>>(d_dst, dst[0].height, dst[0].width, dst[0].total_bytes, d_gs, gs[T].height, gs[T].width, 1.0);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
for (l = 0; l < T; l++)
{
gpuErrchk( cudaMemcpy( d_gs, gs[l].data, gs[T].total_bytes*sizeof(unsigned char), cudaMemcpyHostToDevice) );
cuda_get_closest_point<<< 16, 256>>>(d_dst, dst[0].height, dst[0].width, dst[0].total_bytes, d_gs, gs[l].height, gs[l].width, phi[T][l]);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
}
gpuErrchk( cudaMemcpy(dst[T].data, d_dst, dst[0].total_bytes*sizeof(unsigned char) , cudaMemcpyDeviceToHost ) );
}
cudaFree(d_gs);
cudaFree(d_dst);
//cudaFree(d_results);
}
void coalesce(image_t * images, image_t * res)
{
alloc_image(res, images[1].width, images[1].height, 3);
unsigned long long i;
for (i = 0; i < images[1].total_bytes; i++) {
res->data[3 * i + 0] = images[1].data[i];
res->data[3 * i + 1] = images[2].data[i];
res->data[3 * i + 2] = images[3].data[i];
}
}
void stretch(image_t * dst, image_t * src) {
// No need for dst and src to be of the same size
eval_stats(dst);
eval_stats(src);
float gain = src->stddev / dst->stddev;
float bias = src->mean - gain * dst->mean;
unsigned long long i;
for (i = 0; i < dst->total_bytes; i++)
dst->data[i] = roundf(dst->data[i] * gain + bias);
}
int main(int argc, char * argv[])
{
struct timespec start;
struct timespec finish;
if (argc != 5){
printf("%s <R-filename1> <G-filename2> <B-filename3> <Pan-filename4>\n", argv[0]);
return 1;
}
// Original images
image_t r, g, b, p;
printf("--- Loading initial images...\n");
read_image(&r, argv[1]); // r
read_image(&g, argv[2]); // g
read_image(&b, argv[3]); // b
read_image(&p, argv[4]); // High res pan band
if ((r.width != g.width) || (r.width != b.width) || (r.height != g.height) || (r.height != b.height)) {
printf("red, green, blue images are not the same size!\n");
return 2;
}
// Create low res pan band
printf("--- Simulating low res pan band...\n");
image_t sim_p;
resize(&sim_p, &p, r.width, r.height);
image_t gs[4];
image_t bands[4]; // Original bands
image_t out[4];
bands[0] = sim_p;
bands[1] = r;
bands[2] = g;
bands[3] = b;
float ** phi;
phi = (float**)malloc(4 * sizeof(float *));
unsigned i;
for (i = 0; i < 4; i++)
phi[i] = (float*)calloc(4, sizeof(float));
clock_gettime(CLOCK_MONOTONIC, &start);
printf("--- Executing Gram-Schmidt transformation...\n");
GramSchmidtTransformation(gs, bands, phi);
clock_gettime(CLOCK_MONOTONIC, &finish);
double elapsed;
elapsed = (finish.tv_sec - start.tv_sec);
elapsed += (finish.tv_nsec - start.tv_nsec) / 1000000000.0;
printf("Wall-Clock time passed: %lf \n",elapsed);
printf("--- Stretching high res pan image...\n");
stretch(&p, gs + 0);
dealloc_image(gs + 0);
gs[0] = p;
clock_gettime(CLOCK_MONOTONIC, &start);
printf("--- Executing inverse Gram-Schmidt transformation...\n");
GramSchmidtReverseTransformation(out, gs, bands, phi);
clock_gettime(CLOCK_MONOTONIC, &finish);
elapsed = (finish.tv_sec - start.tv_sec);
elapsed += (finish.tv_nsec - start.tv_nsec) / 1000000000.0;
printf("Wall-Clock time passed: %lf \n",elapsed);
//write_image(bands+0, "i_in_sim_p.tif");
//write_image(bands+1, "i_in_r.tif");
//write_image(bands+2, "i_in_g.tif");
//write_image(bands+3, "i_in_b.tif");
write_image(gs+0, "i_gs0.tif");
write_image(gs+1, "i_gs1.tif");
write_image(gs+2, "i_gs2.tif");
write_image(gs+3, "i_gs3.tif");
// Deallocate all non-needed images here
dealloc_image(bands + 0); // sim_p
dealloc_image(bands + 1); // r
dealloc_image(bands + 2); // g
dealloc_image(bands + 3); // b
dealloc_image(gs + 0); // Modified pan
dealloc_image(gs + 1); // GS band 1
dealloc_image(gs + 2); // GS band 2
dealloc_image(gs + 3); // GS band 3
for (i = 0; i < 4; i++)
free(phi[i]);
free(phi);
printf("--- Coalescing image...\n");
image_t res;
coalesce(out, &res);
write_image(out+0, "i_out_r.tif");
write_image(out+1, "i_out_g.tif");
write_image(out+2, "i_out_b.tif");
write_image(out+3, "i_out_p.tif");
dealloc_image(out + 0);
dealloc_image(out + 1);
dealloc_image(out + 2);
dealloc_image(out + 3);
printf("--- Writing image...\n");
write_image(&res, "Gram-Schmidt.tif");
dealloc_image(&res);
printf("--- Everything is done!\n");
return 0;
}
|
4,848 | #include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
using namespace std;
/// With k20m and k40m GPUs banks are organized in sets of 8 bytes,
/// for this reason, conflicts happen when accesses to doubles fall on the
/// same bank
__global__ void MyKernelHomogeneos(unsigned long long * time) {
const unsigned sharedSize = 4096;
__shared__ double shared[sharedSize];
unsigned long long startTime;
unsigned long long finishTime;
// const int idx = 0; //perform a broadcast
// const int idx = blockIdx.x; // perform a broadcast
// const int idx = threadIdx.x; // no bank conflict - each therad access different bank
// const int idx = threadIdx.x*2; // bank conflict starts
// const int idx = threadIdx.x*32; // worst bank conflict - all threads access same bank
// const int idx = threadIdx.x*128; // same worst bank conflict
const int idx = threadIdx.x*4; // current test
if (idx < sharedSize) {
// time the access an homogeneous array
startTime = clock();
shared[idx]++;
finishTime = clock();
time[threadIdx.x] = (finishTime - startTime);
}
}
struct test{
double x,y,w,z;
// Adding padding here offsets the 4-way bank conflict
// at the cost of wasting shared memory
// comment it out to test difference
// double padding;
};
struct test2{
// This will present a 32-way bank conflict if no padding is added
// with:
// __shared__ test2 shared[32];
// shared[threadIdx.x].x[0]++;
double x[32];
// Adding padding here offsets the 32-way bank conflict
// at the cost of wasting shared memory
// comment it out to test difference
// double padding;
};
__global__ void MyKernelDS(unsigned long long * time) {
const unsigned sharedSize = 1024;
__shared__ test shared[sharedSize];
unsigned long long startTime;
unsigned long long finishTime;
const int idx = threadIdx.x*1; // current test
// time the access an homogeneous array
startTime = clock();
shared[idx].x++;
finishTime = clock();
time[threadIdx.x] = (finishTime - startTime);
}
int main(int argc, char const *argv[])
{
const unsigned nThreads = 32;
unsigned long long time[nThreads];
unsigned long long * d_time;
cudaMalloc(&d_time, sizeof(unsigned long long)*nThreads);
const unsigned long long overhead = 0;
for (int r = 0; r < 10; r++)
{
cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
// MyKernelHomogeneos<<< 1,nThreads >>>(d_time);
MyKernelDS<<< 1,nThreads >>>(d_time);
cudaMemcpy(&time, d_time, sizeof(unsigned long long)*nThreads, cudaMemcpyDeviceToHost);
cout << "Time:\t";
for (int i = 0; i < nThreads; i++)
{
cout<<(time[i]-overhead)/32<<"\t";
}
cout << endl<<endl;
}
cudaFree(d_time);
cudaDeviceReset();
return 0;
} |
4,849 | #include "includes.h"
__global__ void kernel_vec_equals_vec1_plus_alpha_times_vec2(double *vec, double *vec1, double alpha, double *d_a1, double *vec2, int numElements)
{
int iam = threadIdx.x;
int bid = blockIdx.x;
int threads_in_block = blockDim.x;
int gid = bid*threads_in_block + iam;
if (gid < numElements){
double a = alpha;
if (d_a1) a *= *d_a1;
vec[gid] = vec1[gid] + a * vec2[gid];
}
} |
4,850 | #include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <time.h>
#ifndef Size
#define Size 1000
#endif
#define b 4
void metric_mul_gold(int A[Size][Size], int B[Size][Size], int C[Size][Size])
{
int i,j,k;
for(i=0; i<Size; i++)
for(j=0; j<Size; j++)
for(k=0; k<Size; k++)
C[i][j] += A[i][k]*B[k][j];
return;
}
void metric_mul(int A[Size][Size], int B[Size][Size], int C[Size][Size]);
int main(void)
{
int i, j, k;
int size = sizeof(int) * Size * Size;
int *aptr, *bptr, *cptr;
int *host_A, *host_B, *host_C;
srand(time(NULL));
host_A = (int *)malloc(size);
host_B = (int *)malloc(size);
host_C = (int *)malloc(size);
aptr = host_A;
bptr = host_B;
cptr = host_C;
for (i = 0; i < Size; i++)
for (j = 0; j < Size; j++) {
*aptr++ = rand() % 10;
*bptr++ = rand() % 10;
*cptr++ = 0;
}
int *gold_C;
gold_C = (int *)malloc(size);
metric_mul_gold((int (*)[Size])host_A, (int (*)[Size])host_B, (int (*)[Size])gold_C);
cudaEvent_t start_time, stop_time;
float exectime;
cudaEventCreate(&start_time);
cudaEventCreate(&stop_time);
cudaEventRecord(start_time, 0);
metric_mul((int (*)[Size])host_A, (int (*)[Size])host_B, (int (*)[Size])host_C);
cudaEventRecord(stop_time, 0);
cudaEventSynchronize(stop_time);
cudaEventElapsedTime(&exectime, start_time, stop_time);
printf("real %f ms\n ", exectime);
cudaEventDestroy(start_time);
cudaEventDestroy(stop_time);
//check result
if (!memcmp(host_C, gold_C, size))
printf("AC!\n");
else
printf("Failed!\n");
/*k = 0;
for (i = 0; i < Size; i++)
for (j = 0; j < Size; j++)
printf("host_C[%d][%d] = %d\n", i, j, host_C[k++]);*/
free(host_A);
free(host_B);
free(host_C);
free(gold_C);
return 0;
}
|
4,851 | /*
*
*/
#include <stdio.h>
#include <time.h>
#include <cuda_runtime.h>
#include <cassert>
#include <cstdlib>
#include <functional>
#include <iostream>
#include <algorithm>
#include <vector>
using std::cout;
using std::generate;
using std::vector;
#define CUDA_CALL(x) do { if((x)!=cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
#define CHECK(x) do { if((x)!=cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
#define LEARNING_RATE 0.25
#define NUMB_OF_EPOCHS 100000
#define TD_X 4 // training data in x- dimension
#define TD_Y 2 // training data in y- dimension
#define TD_Z 2 // training data in z- dimension
double TRAINING_DATA[TD_X][TD_Y][TD_Z] = {{{0,0},{0}},
{{0,1},{1}},
{{1,0},{1}},
{{1,1},{1}}};
void trainOnCPU(struct neuron *neurons);
void printNetworkInfo();
#include "Neuron.cu"
int main(void){
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
printNetworkInfo();
// declare and initialize neurons
struct neuron neurons[5];
setNeurons(neurons);
// train network from CPU.
float CPUtime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
trainOnCPU(neurons);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&CPUtime, start, stop);
printf("Compute time on CPU: %3.6f ms\n", CPUtime);
return(1);
}
|
4,852 | #include "GpuUtils.cuh"
#include "GpuFocalProcessing.cuh"
#include "GpuProjectionProcessing.cuh"
#include "GpuTimer.cuh"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void addKernelGpu(double* res, const double* a, const double* b)
{
int i = threadIdx.x;
res[i] = a[i] + b[i];
}
double winGpu::testPlusGpu(const double* a, const double* b, double* res, size_t size)
{
double* devA = 0;
double* devB = 0;
double* devRes = 0;
cudaSetDevice(0);
cudaMalloc((void**)&devRes, size * sizeof(double));
cudaMalloc((void**)&devA, size * sizeof(double));
cudaMalloc((void**)&devB, size * sizeof(double));
cudaMemcpy(devA, a, size * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(devB, b, size * sizeof(double), cudaMemcpyHostToDevice);
float time;
GPU_TIMER_START;
addKernelGpu << <1, (int)size >> > (devRes, devA, devB);
cudaDeviceSynchronize();
GPU_TIMER_STOP(time);
cudaMemcpy(res, devRes, size * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(devA);
cudaFree(devB);
cudaFree(devRes);
return (double)time;
}
double winGpu::performFocalOpGpu(pixel* input, int height, int width, pixel* output, std::vector<double> matrix)
{
return winGpu::doFocalOpGpu(input, height, width, output, matrix);
}
double winGpu::performTransformUtmToWgsCoordsGpu(double xOrigin, double yOrigin, double xPixelSize, double yPixelSize,
int height, int width, int zone, bool southhemi, double* lon, double* lat)
{
return winGpu::doTransformUtmToWgsCoordsGpu(xOrigin, yOrigin, xPixelSize, yPixelSize,
height, width, zone, southhemi, lon, lat);
}
double winGpu::performTransformWgsToUtmCoordsGpu(double xOrigin, double yOrigin, double xPixelSize, double yPixelSize,
int height, int width, int zone, double* x, double* y)
{
return winGpu::doTransformWgsToUtmCoordsGpu(xOrigin, yOrigin, xPixelSize, yPixelSize,
height, width, zone, x, y);
}
|
4,853 | #include <cstdio>
#include <cstdlib>
#include <cmath>
#include <cuda_runtime_api.h>
#define BASE_TYPE float
#define BLOCK_SIZE 10
__global__ void mult(const BASE_TYPE *A, const BASE_TYPE *B, BASE_TYPE *C, const int N, const int M)
{
int aBegin = N * blockDim.y * blockIdx.y;
int aEnd = aBegin + N - 1;
int aStep = blockDim.x;
int bBegin = blockDim.x * blockIdx.x;
int bStep = blockDim.y * M;
__shared__ BASE_TYPE as [BLOCK_SIZE][BLOCK_SIZE];
__shared__ BASE_TYPE bs [BLOCK_SIZE][BLOCK_SIZE];
BASE_TYPE sum = 0.0;
for (int ia = aBegin, ib = bBegin; ia < aEnd; ia += aStep, ib += bStep) {
as[threadIdx.y][threadIdx.x] = A[ia + N * threadIdx.y + threadIdx.x];
bs[threadIdx.y][threadIdx.x] = B[ib + M * threadIdx.y + threadIdx.x];
__syncthreads();
for (int k = 0; k < blockDim.x; k++) {
sum += as[threadIdx.y][k] * bs[k][threadIdx.x];
}
__syncthreads();
}
int ind = M * (blockDim.y * blockIdx.y + threadIdx.y) + blockDim.x * blockIdx.x + threadIdx.x;
C[ind] = sum;
}
BASE_TYPE* gen_array(const int N)
{
BASE_TYPE *a = new BASE_TYPE[N * N];
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
a[i * N + j] = i * N + j;
}
return a;
}
void print_array(BASE_TYPE *a, const int N)
{
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
printf("%5.0f ", a[i *N + j]);
printf("\n");
}
printf("\n");
}
void cuda_init_array(BASE_TYPE **dev, const BASE_TYPE *host, const size_t size)
{
cudaError_t err;
err = cudaMalloc((void **)dev, size);
if (err != cudaSuccess)
throw err;
if (host != NULL)
{
err = cudaMemcpy(*dev, host, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
throw err;
}
}
void cuda_init_grid_and_block(dim3 *grid, dim3 *block, const int N)
{
*grid = dim3(1);
*block = dim3(N, N, 1);
printf("Block %d %d %d\n", block->x, block->y, block->z);
printf("Grid %d %d %d\n", grid->x, grid->y, grid->z);
}
int main()
{
const int N = 10;
const size_t size = N * N * sizeof(BASE_TYPE);
cudaError_t err;
dim3 blockDim, gridDim;
cuda_init_grid_and_block(&gridDim, &blockDim, N);
BASE_TYPE *host_a = gen_array(N), *host_b = gen_array(N);
BASE_TYPE *dev_a, *dev_b, *dev_c;
if (host_a == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
print_array(host_a, N);
print_array(host_b, N);
try
{
cuda_init_array(&dev_a, host_a, size);
cuda_init_array(&dev_b, host_b, size);
cuda_init_array(&dev_c, NULL, size);
}
catch (cudaError_t err)
{
fprintf(stderr, "Failed to allocate device (error code: %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
mult<<<gridDim, blockDim>>>(dev_a, dev_b, dev_c, N, N);
err = cudaMemcpy(host_a, dev_c, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device (error code: %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
print_array(host_a, N);
cudaFree(dev_a);
cudaFree(dev_b);
delete[] host_a;
delete[] host_b;
return 0;
} |
4,854 | #include "includes.h"
__global__ void get_mi(int nbins, int nsamples, int nx, float * x_bin_scores, int pitch_x_bin_scores, float * entropies_x, int ny, float * y_bin_scores, int pitch_y_bin_scores, float * entropies_y, float * mis, int pitch_mis)
{
int
col_x = blockDim.x * blockIdx.x + threadIdx.x,
col_y = blockDim.y * blockIdx.y + threadIdx.y;
if((col_x >= nx) || (col_y >= ny))
return;
float
prob, logp, mi = 0.f,
* x_bins = x_bin_scores + col_x * pitch_x_bin_scores,
* y_bins = y_bin_scores + col_y * pitch_y_bin_scores;
// calculate joint entropy
for(int i = 0; i < nbins; i++) {
for(int j = 0; j < nbins; j++) {
prob = 0.f;
for(int k = 0; k < nsamples; k++)
prob += x_bins[k * nbins + i] * y_bins[k * nbins + j];
prob /= (float)nsamples;
if(prob <= 0.f)
logp = 0.f;
else
logp = __log2f(prob);
mi += prob * logp;
}
}
// calculate mi from entropies
mi += entropies_x[col_x] + entropies_y[col_y];
(mis + col_y * pitch_mis)[col_x] = mi;
} |
4,855 | /*******************************************************************************
This program uses two libraries from the CUDA toolkit "cuFFT" and "cuRand"
executeCudaRNG() routine generates a normally distributed random number arrays
executeCudaFFT() routine gives an example on how to use the cuFFT library to get the
Frequency spectrum of a two tone signal and see the frequency components of the
time domain signal
Author: Said Darham
*******************************************************************************/
#include <iostream>
#include <stdlib.h> //srand and rand
#include <math.h>
#include <iomanip> //for setting float precision
#include <curand.h>
#include <curand_kernel.h>//cuRand header files
#include <cufft.h>//cuFFT
#define SEED 1234
#define MAXLEN 1000
#define SAMPLERATE 500
#define PI 3.14159265358979323846
//Timer struct declaration. Using CUDA EVENTS
typedef struct timer{
cudaEvent_t startEvent;
cudaEvent_t stopEvent;
float time_ms;
} timerEvent;
typedef float2 Complex;
/*******************************************************************************
PROFILER FUNCTIONS USING EVENTS
*******************************************************************************/
void startEventTimer(timerEvent *timer){
/* startEventTimer()
Creates and starts recording an event
*/
cudaEventCreate(&timer->startEvent);
cudaEventCreate(&timer->stopEvent);
cudaEventRecord(timer->startEvent);
}
void stopEventTimer(timerEvent *timer){
/* stopEventTimer()
Stops an event and calculates the elapsed time between start and stop event
*/
cudaEventRecord(timer->stopEvent);
cudaEventSynchronize(timer->stopEvent);
cudaEventElapsedTime(&timer->time_ms, timer->startEvent, timer->stopEvent);
}
void freeEventTimer(timerEvent *timer){
/* freeEventTimer()
cleans up the events
*/
cudaEventDestroy(timer->startEvent);
cudaEventDestroy(timer->stopEvent);
}
/*******************************************************************************
Helper Functions
*******************************************************************************/
void printArray(float *array, const int n){
//helper function to print the array of n elements
for(int i = 0; i<n; i++){
std::cout << std::fixed << std::setprecision(4) << array[i] << "\n";
}
std::cout << std::endl;
}
void printComplexArray( Complex *array, const int n){
//helper function to Print a complex array of n elements
for(int i = 0; i < 50; i++) //CHANGE from 50 to n
std::cout << std::fixed << std::setprecision(4) << array[i].x << " " << array[i].y << "i" << std::endl;
}
void generateSignal( Complex *array, const int n){
//helper function to initialize a complex baseband signal containing 2 frequencies 5 and 10 Hz
float samplePeriod = (float)1 / (float)SAMPLERATE; //sampling period of digital signal
for(int iSample = 0; iSample < n; iSample++){
array[iSample].x = 0.7*cos(2*PI*5*iSample*samplePeriod) + cos(2*PI*10*iSample*samplePeriod);
array[iSample].y = 0.7*sin(2*PI*5*iSample*samplePeriod) + sin(2*PI*10*iSample*samplePeriod);
}
}
/*TODO: Consider implementing these on Device GPU for performance */
void getFrequencies(float *array, float *amplitude, float *frequency,const int n){
//Returns array of frequency components and corresponding amplitude from spectrum
std::cout << "Extracting frequency components and amplitude...\n";
float threshold = 0.5;
for(int freqIdx = 0; freqIdx<n; freqIdx++){
if( array[freqIdx] > threshold ){
std::cout << std::fixed << std::setprecision(4) << "Amplitude: " << array[freqIdx] << " Frequency: " << freqIdx * (float)SAMPLERATE / (float)MAXLEN << " Hz" << std::endl;
}
}
std::cout << std::endl;
}
void magFFT(Complex *a, float *result, const int n){
// computes the magnitude of the complex FFT signal and scales it by the length
//TODO: consider using hypotf() from CUDA math library (device function)
for(int i = 0; i<n; i++){
result[i] = sqrt( pow(a[i].x / (float)n, 2.0) + pow(a[i].y / (float)n, 2.0) );
}
}
/*******************************************************************************
Kernel Functions
*******************************************************************************/
__global__ void initStates(const unsigned int seed, curandState_t *states){
//initialize the states for each thread
int tid = blockIdx.x * blockDim.x + threadIdx.x;
curand_init( seed, tid, 0, &states[tid]);
}
__global__ void rngNormal(float *dRand, curandState_t *states){
//generate a batch of normally distributed random numbers
int tid = blockIdx.x * blockDim.x + threadIdx.x;
dRand[tid] = curand_normal(&states[tid]);
}
/*******************************************************************************
Test Functions
*******************************************************************************/
void executeCudaRNG(int totalThreads, int numBlocks, int blockSize){
std::cout << "\nExecuting Random Number Generator Using cuRAND...";
//host and device random number array
float *hRand, *dRand;
// Size, in bytes, of each vector
const unsigned int bytes = totalThreads*sizeof(float);
//random number generator (rgn) states
curandState_t *states;
// Start a timer
timerEvent timer;
startEventTimer(&timer);
//allocate host and device memory
hRand = (float *)malloc(bytes);
cudaMalloc((void **)&dRand, bytes );
cudaMalloc((void**) &states, totalThreads * sizeof(curandState_t));
//initialize states
initStates<<<numBlocks, totalThreads>>>(SEED, states);
//Generate normally distributed data (float) with mean 0 and standard deviation of 1 ~N(0,1)
rngNormal<<<numBlocks, blockSize>>>(dRand, states);
//copy results from device to host
cudaMemcpy(hRand, dRand, bytes, cudaMemcpyDeviceToHost);
stopEventTimer(&timer);
std::cout << "Elapsed Time: " << timer.time_ms << " ms\n" << std::endl;
//print the array if you want
//printArray(hRand, totalThreads);
//clean up
cudaFree(states);
cudaFree(dRand);
free(hRand);
}
void executeCudaFFT(void){
std::cout << "Executing FFT Example using cuFFT...";
//initialize host variables
Complex *hSig = new Complex[MAXLEN]; //complex baseband signal
Complex *hSig_w = new Complex[MAXLEN]; //spectrum of time domain signal
float *hMagSig_w = new float[MAXLEN]; //magnitude of spectrum (host)
//arrays containing a vector of the frequency components and corresponding amplitudes
float *amplitude = new float[MAXLEN];
float *frequency = new float[MAXLEN];
//size of complex signal
int bytes = MAXLEN * sizeof(Complex);
//initialize host complex array withh a simple two tone signal
generateSignal(hSig, MAXLEN);
// Start a timer
timerEvent timer;
startEventTimer(&timer);
//initialize and allocate the device signal
cufftComplex *dSig;
cudaMalloc((void **)&dSig, bytes);
cudaMemcpy(dSig, hSig, bytes, cudaMemcpyHostToDevice);
//Executing FFT on device
cufftHandle plan;
cufftPlan1d(&plan, MAXLEN, CUFFT_C2C, 1);
cufftExecC2C(plan, (cufftComplex *)dSig, (cufftComplex *)dSig, CUFFT_FORWARD);
cudaMemcpy(hSig_w, dSig, bytes, cudaMemcpyDeviceToHost);
stopEventTimer(&timer);
std::cout << "Elapsed Time: " << timer.time_ms << " ms\n" << std::endl;
//Compute the magnitude of the fourier transformed signal
magFFT(hSig_w, hMagSig_w, MAXLEN);
//extract the amplitude and frequencies
getFrequencies(hMagSig_w, amplitude, frequency, MAXLEN);
//clean up
delete hSig, hSig_w, hMagSig_w;
cufftDestroy(plan);
cudaFree(dSig);
}
/*******************************************************************************
MAIN
*******************************************************************************/
int main(int argc, char** argv)
{
int totalThreads = (1 << 10);
int blockSize = 256;
//User wants to run the Global vs Pinned Examples
if( argc > 2 && argc < 4){
// Ensure the user supplies both number of threads and block size
// otherwise use default values
totalThreads = atoi(argv[1]);
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
std::cout << "\nUsing " << totalThreads << " Threads and " << blockSize << " BlockSize\n" ;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
std::cout << "Warning: Total thread count is not evenly divisible by the block size\n";
std::cout << "The total number of threads will be rounded up to %d\n";
}
//Execute random number generator test on GPU using cuRand()
executeCudaRNG(totalThreads, numBlocks, blockSize);
//Execute FFT computation using cuFFT()
executeCudaFFT();
return 0;
}
|
4,856 | #include <stdlib.h>
#include <stdio.h>
__global__ void mallocTest()
{
size_t size = 123;
char* ptr = (char*)malloc(size);
memset(ptr, 0, size);
ptr[0] = 9;
printf("Thread %d got pointer: %p: %d\n", threadIdx.x, ptr, ptr[0]);
free(ptr);
}
// int main()
// {
// // Set a heap size of 128 megabytes. Note that this must
// // be done before any kernel is launched.
// cudaDeviceSetLimit(cudaLimitMallocHeapSize, 128*1024*1024);
// mallocTest<<<1, 5>>>();
// cudaDeviceSynchronize();
// return 0;
// }
#include<iostream>
int main(){
int r= 2;
int arr[r];
arr[0]= 3;
std::cout<< arr[0];
} |
4,857 | /*
Sequencial
real 1m11.421s
user 1m10.983s
sys 0m0.232s
Paralelo
real 0m40.724s
user 2m33.424s
sys 0m3.183s
Paralelo - GPU - OpenMP
real 0m4.863s
user 0m3.624s
sys 0m1.211s
Paralelo - GPU - CUDA
real 0m0.442s
user 0m0.174s
sys 0m0.264s
=================================================================================
OpenMP:
==8143== Event result:
Invocations Event Name Min Max Avg Total
Device "GeForce GT 1030 (0)"
Kernel: mm$_omp_fn$0
1 warps_launched 72 72 72 72
==8143== Metric result:
Invocations Metric Name Metric Description Min Max Avg
Device "GeForce GT 1030 (0)"
Kernel: mm$_omp_fn$0
1 warp_execution_efficiency Warp Execution Efficiency 86.81% 86.81% 86.81%
CUDA:
==8528== Profiling result:
No events/metrics were profiled.
*/
#include <stdio.h>
#include <stdlib.h>
__global__
void mm(double *a, double *b, double *c, int width) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
double sum = 0;
for (int k = 0; k < width; k++) {
double x = a[i * width + k];
double y = b[k * width + j];
sum += x * y;
}
c[i * width + j] = sum;
}
int main(){
int width = 2000;
int size = width * width * sizeof(double);
double *a = (double*) malloc(size);
double *b = (double*) malloc(size);
double *c = (double*) malloc(size);
for (int i = 0; i < width; i++) {
for (int j = 0; j < width; j++) {
a[i * width + j] = i;
b[i * width + j] = j;
c[i * width + j] = 0;
}
}
int block_size = 44;
int grid_size = ((width - 1) / block_size) + 1;
double *d_a, *d_b, *d_c;
cudaMalloc((void **) &d_a, size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_b, size);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_c, size);
dim3 dimGrid(grid_size, grid_size, 1);
dim3 dimBlock(block_size, block_size, 1);
mm<<<dimGrid,dimBlock>>>(d_a, d_b, d_c, width);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
// for(int i = 0; i < width; i++) {
// for(int j = 0; j < width; j++) {
// int index = i * width + j;
// printf("\n c[%d][%d] = %f", i, j, c[index]);
// }
// }
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
} |
4,858 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float* var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19) {
for (int i=0; i < var_1; ++i) {
comp += (var_4 * -1.7769E-35f / (+1.8131E34f / -1.7173E-35f));
comp += (var_5 + (var_6 / (var_7 - (-1.8712E34f / -0.0f))));
for (int i=0; i < var_2; ++i) {
var_8[i] = +1.2402E36f;
comp += var_8[i] * (-1.1860E35f + (var_9 * fabsf(+1.2819E-8f)));
}
for (int i=0; i < var_3; ++i) {
comp = (+0.0f * (var_10 / (-1.1915E34f + var_11 / var_12)));
}
if (comp == (-1.5743E-37f / logf((var_13 - (+1.1502E35f + (+1.9065E-43f / (var_14 / var_15 + var_16))))))) {
comp += var_17 / var_18 / (var_19 - -1.5312E-26f);
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float* tmp_9 = initPointer( atof(argv[9]) );
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20);
cudaDeviceSynchronize();
return 0;
}
|
4,859 | #include "includes.h"
__global__ void convdw_gpu_kernel(float *dw, float *dy, float *x, const int S,const int outSize, const int inSize){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if(row < inSize && col < outSize){
// printf("row %d, col %d, bias[col] %.2f\n", row, col,bias[col]);
for(int i = 0; i < S; ++i){
dw[row*outSize+col] +=x[row+S*i ]*dy[i*outSize+col];
// printf("x[%d] is %.1f,dy[%d] is %.1f\n", row + S*i,x[row + S*i],i*S+row,dy[i*outSize+col]);
}
// printf("conv dw %d is %3f\n",row*outSize+col, dw[row*outSize+col] );
}
} |
4,860 | /* This file is part of the Marching Cubes GPU based algorithm based on
* Paul Bourke's tabulation approach to marching cubes
* http://paulbourke.net/geometry/polygonise/
*
*
* We model cubes with 8 vertices labelled as below
*
*
* 4--------(4)---------5
* /| /|
* / | / |
* / | / |
* (7) | (5) |
* / | / |
* / (8) / (9)
* / | / |
* 7---------(6)--------6 |
| | | |
* | 0------(0)---|-------1
* | / | /
* (11) / (10) /
* | / | /
* | (3) | (1)
* | / | /
* | / | /
* |/ |/
* 3---------(2)--------2
*
* where X axis is horizontal, +ve to right
* Y axis is vertical, +ve upwards
* Z axis is into page, +ve towards back
*
* 0: ( x, y, z+1 ) 4: ( x, y+1, z+1 )
* 1: ( x+1, y, z+1 ) 5: ( x+1, y+1, z+1 )
* 2: ( x+1, y, z ) 6: ( x+1, y+1, z )
* 3: ( x, y, z ) 7: ( x, y+1, z )
*
* There are 12 edges, 0 - 11 where each edge connectes two vertices as follows:
*
* 0: 0, 1 1: 1, 2 2: 2, 3 3: 3, 0
* 4: 4, 5 5: 5, 6 6: 6, 7 7: 7, 4
* 8: 0, 4 9: 1, 5 10: 2, 6 11: 3, 7
*/
// NB Below, these are ordered from lower to higher value
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
__constant__
uint16_t EDGE_VERTICES[12][2] = {
{ 0, 1 }, { 2, 1 }, { 3, 2 }, { 3, 0 },
{ 4, 5 }, { 6, 5 }, { 7, 6 }, { 7, 4 },
{ 0, 4 }, { 1, 5 }, { 2, 6 }, { 3, 7 }
};
/*
* This file describes the relationship between the vertices under the surface
* and the edges which are therefore impacted
* There are 256 distinct entries
*/
__constant__
uint16_t EDGE_TABLE[256]={
0x0 , 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c,
0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
0x190, 0x99 , 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c,
0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
0x230, 0x339, 0x33 , 0x13a, 0x636, 0x73f, 0x435, 0x53c,
0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
0x3a0, 0x2a9, 0x1a3, 0xaa , 0x7a6, 0x6af, 0x5a5, 0x4ac,
0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
0x460, 0x569, 0x663, 0x76a, 0x66 , 0x16f, 0x265, 0x36c,
0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff , 0x3f5, 0x2fc,
0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55 , 0x15c,
0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc ,
0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc,
0xcc , 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c,
0x15c, 0x55 , 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc,
0x2fc, 0x3f5, 0xff , 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c,
0x36c, 0x265, 0x16f, 0x66 , 0x76a, 0x663, 0x569, 0x460,
0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac,
0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa , 0x1a3, 0x2a9, 0x3a0,
0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c,
0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33 , 0x339, 0x230,
0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c,
0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99 , 0x190,
0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c,
0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0
}; |
4,861 | // On Maverick2: sbatch mvk2GPUMatMul
// nvcc BrodayWalker1B.cu -o BrodayWalker1B.exe
//***************************************************************************
// Name: Broday Walker
// Instructor: Dr. Colmenares
// Class: CMPS 5433
// Date: March 2, 2020
//***************************************************************************
// This program implements matrix multiplication using a GPU on Maverick2.
// The program reports the elapsed time taken to complete the matrix
// multiplication in milliseconds. It is significantly faster than its
// sequential counterpart.
//***************************************************************************
#include <stdio.h>
#include <cuda.h>
enum N {N = 32};
// matmulKernel performs matrix multiplication on a linearized array
// This code was given in the slides and adapted for use here
__global__ void matmulKernel(int *Ad, int *Bd, int *Cd, int width)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
float sum = 0;
for (int k = 0; k < width; k++)
{
int Aelement = Ad[ty * width + k];
int Belement = Bd[k * width + tx];
sum += Aelement * Belement;
}
Cd[ty * width + tx] = sum;
}
int main()
{
// Declarations
int A[N * N], B[N * N], C[N * N];
int *Ad, *Bd, *Cd;
int size = N * N * sizeof(int);
int sum = 0;
// Declare the timer
// Reference:
// https://devblogs.nvidia.com/how-implement-performance-metrics-cuda-cc/
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
// Fill arrays A and C
// Array C will be filled with 0s
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
{
A[i * N + j] = i;
C[i * N + j] = 0;
}
// Fill B
int row = N - 1;
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
B[i * N + j] = row;
row--;
}
/* Allocate memory and copy to device */
cudaMalloc((void**)&Ad, size);
cudaMemcpy(Ad, A, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&Bd, size);
cudaMemcpy(Bd, B, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&Cd, size);
/* End memory allocation and copying to device */
/* Define grid and block dimensions */
dim3 dimGrid( 1, 1 );
dim3 dimBlock( N, N );
/* Record start time */
cudaEventRecord(start);
/* Invoke the kernel */
matmulKernel<<<dimGrid, dimBlock>>>(Ad, Bd, Cd, N);
/* Record end time */
cudaEventRecord(stop);
/* Copy the matrix multiplication results from device to host */
cudaMemcpy(C, Cd, size, cudaMemcpyDeviceToHost);
/* Block CPU execution until the specified event is recorded */
cudaEventSynchronize(stop);
/* Returns the elapsed time in milliseconds to the first argument */
cudaEventElapsedTime(&milliseconds, start, stop);
cudaFree(Ad);
cudaFree(Bd);
cudaFree(Cd);
// Sum the array and print the results
for (int i = 0; i < N * N; i++)
sum += C[i];
// Print results
printf("The summation of all the elements is %d.\n", sum);
// Print elapsed time
printf("\nElapsed time in milliseconds: %f.\n", milliseconds);
return 0;
} |
4,862 | #include "includes.h"
__global__ void sumArrays(float *A, float *B, float *C, const int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
for (int i = 0; i < N; ++i)
{
C[idx] = A[idx] + B[idx];
}
}
} |
4,863 | /***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include <cstdio>
#define PI 3.1415926535897932384626433832795029
#define PIx2 6.2831853071795864769252867665590058
/* Adjustable parameters */
#define KERNEL_RHO_PHI_THREADS_PER_BLOCK 512
#define KERNEL_FH_THREADS_PER_BLOCK 256
#define KERNEL_FH_K_ELEMS_PER_GRID 512
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
#define CUDA_ERRCK \
{cudaError_t err; \
if ((err = cudaGetLastError()) != cudaSuccess) { \
fprintf(stderr, "CUDA error on line %d: %s\n", __LINE__, cudaGetErrorString(err)); \
exit(-1); \
} \
}
struct kValues {
float Kx;
float Ky;
float Kz;
float RhoPhiR;
float RhoPhiI;
};
__constant__ __device__ kValues c[KERNEL_FH_K_ELEMS_PER_GRID];
void createDataStructs(int numK, int numX,
float*& realRhoPhi, float*& imagRhoPhi,
float*& outR, float*& outI)
{
realRhoPhi = (float* ) calloc(numK, sizeof(float));
imagRhoPhi = (float* ) calloc(numK, sizeof(float));
outR = (float*) calloc (numX, sizeof (float));
outI = (float*) calloc (numX, sizeof (float));
}
__global__ void
ComputeRhoPhiGPU(int numK,
float* phiR, float* phiI,
float* dR, float* dI,
float* realRhoPhi, float* imagRhoPhi)
{
int indexK = blockIdx.x*KERNEL_RHO_PHI_THREADS_PER_BLOCK + threadIdx.x;
if (indexK < numK) {
float rPhiR = phiR[indexK];
float rPhiI = phiI[indexK];
float rDR = dR[indexK];
float rDI = dI[indexK];
realRhoPhi[indexK] = rPhiR * rDR + rPhiI * rDI;
imagRhoPhi[indexK] = rPhiR * rDI - rPhiI * rDR;
}
}
__global__ void
ComputeFH_GPU(int numK, int kGlobalIndex,
float* x, float* y, float* z,
float* outR, float* outI)
{
float sX;
float sY;
float sZ;
float sOutR;
float sOutI;
// Determine the element of the X arrays computed by this thread
int xIndex = blockIdx.x*KERNEL_FH_THREADS_PER_BLOCK + threadIdx.x;
sX = x[xIndex];
sY = y[xIndex];
sZ = z[xIndex];
sOutR = outR[xIndex];
sOutI = outI[xIndex];
// Loop over all elements of K in constant mem to compute a partial value
// for X.
int kIndex = 0;
int kCnt = numK - kGlobalIndex;
if (kCnt < KERNEL_FH_K_ELEMS_PER_GRID) {
for (kIndex = 0;
(kIndex < (kCnt % 4)) && (kGlobalIndex < numK);
kIndex++, kGlobalIndex++) {
float expArg = PIx2 *
(c[kIndex].Kx * sX + c[kIndex].Ky * sY + c[kIndex].Kz * sZ);
float cosArg = cos(expArg);
float sinArg = sin(expArg);
sOutR += c[kIndex].RhoPhiR * cosArg - c[kIndex].RhoPhiI * sinArg;
sOutI += c[kIndex].RhoPhiI * cosArg + c[kIndex].RhoPhiR * sinArg;
}
}
for (;
(kIndex < KERNEL_FH_K_ELEMS_PER_GRID) && (kGlobalIndex < numK);
kIndex += 4, kGlobalIndex += 4) {
float expArg = PIx2 *
(c[kIndex].Kx * sX + c[kIndex].Ky * sY + c[kIndex].Kz * sZ);
float cosArg = cos(expArg);
float sinArg = sin(expArg);
sOutR += c[kIndex].RhoPhiR * cosArg - c[kIndex].RhoPhiI * sinArg;
sOutI += c[kIndex].RhoPhiI * cosArg + c[kIndex].RhoPhiR * sinArg;
int kIndex1 = kIndex + 1;
float expArg1 = PIx2 *
(c[kIndex1].Kx * sX + c[kIndex1].Ky * sY + c[kIndex1].Kz * sZ);
float cosArg1 = cos(expArg1);
float sinArg1 = sin(expArg1);
sOutR += c[kIndex1].RhoPhiR * cosArg1 - c[kIndex1].RhoPhiI * sinArg1;
sOutI += c[kIndex1].RhoPhiI * cosArg1 + c[kIndex1].RhoPhiR * sinArg1;
int kIndex2 = kIndex + 2;
float expArg2 = PIx2 *
(c[kIndex2].Kx * sX + c[kIndex2].Ky * sY + c[kIndex2].Kz * sZ);
float cosArg2 = cos(expArg2);
float sinArg2 = sin(expArg2);
sOutR += c[kIndex2].RhoPhiR * cosArg2 - c[kIndex2].RhoPhiI * sinArg2;
sOutI += c[kIndex2].RhoPhiI * cosArg2 + c[kIndex2].RhoPhiR * sinArg2;
int kIndex3 = kIndex + 3;
float expArg3 = PIx2 *
(c[kIndex3].Kx * sX + c[kIndex3].Ky * sY + c[kIndex3].Kz * sZ);
float cosArg3 = cos(expArg3);
float sinArg3 = sin(expArg3);
sOutR += c[kIndex3].RhoPhiR * cosArg3 - c[kIndex3].RhoPhiI * sinArg3;
sOutI += c[kIndex3].RhoPhiI * cosArg3 + c[kIndex3].RhoPhiR * sinArg3;
}
outR[xIndex] = sOutR;
outI[xIndex] = sOutI;
}
void computeRhoPhi_GPU(int numK,
float* phiR_d, float* phiI_d, float* dR_d, float* dI_d,
float* realRhoPhi_d, float* imagRhoPhi_d)
{
int rhoPhiBlocks = numK / KERNEL_RHO_PHI_THREADS_PER_BLOCK;
if (numK % KERNEL_RHO_PHI_THREADS_PER_BLOCK)
rhoPhiBlocks++;
dim3 DimRhoPhiBlock(KERNEL_RHO_PHI_THREADS_PER_BLOCK, 1);
dim3 DimRhoPhiGrid(rhoPhiBlocks, 1);
printf("Launch RhoPhi Kernel on GPU: Blocks (%d, %d), Threads Per Block %d\n",
rhoPhiBlocks, 1, KERNEL_RHO_PHI_THREADS_PER_BLOCK);
ComputeRhoPhiGPU <<< DimRhoPhiGrid, DimRhoPhiBlock >>>
(numK, phiR_d, phiI_d, dR_d, dI_d, realRhoPhi_d, imagRhoPhi_d);
}
void computeFH_GPU(int numK, int numX,
float* x_d, float* y_d, float* z_d,
kValues* kVals,
float* outR_d, float* outI_d)
{
int FHGrids = numK / KERNEL_FH_K_ELEMS_PER_GRID;
if (numK % KERNEL_FH_K_ELEMS_PER_GRID)
FHGrids++;
int FHBlocks = numX / KERNEL_FH_THREADS_PER_BLOCK;
if (numX % KERNEL_FH_THREADS_PER_BLOCK)
FHBlocks++;
dim3 DimFHBlock(KERNEL_FH_THREADS_PER_BLOCK, 1);
dim3 DimFHGrid(FHBlocks, 1);
printf("Launch GPU Kernel: Grids %d, Blocks Per Grid (%d, %d), Threads Per Block (%d, %d), K Elems Per Thread %d\n",
FHGrids, DimFHGrid.x, DimFHGrid.y, DimFHBlock.x, DimFHBlock.y, KERNEL_FH_K_ELEMS_PER_GRID);
for (int FHGrid = 0; FHGrid < FHGrids; FHGrid++) {
// Put the tile of K values into constant mem
int FHGridBase = FHGrid * KERNEL_FH_K_ELEMS_PER_GRID;
kValues* kValsTile = kVals + FHGridBase;
int numElems = MIN(KERNEL_FH_K_ELEMS_PER_GRID, numK - FHGridBase);
printf("Copying %d bytes to constant memory\n", numElems * sizeof(kValues));
cudaMemcpyToSymbol(c, kValsTile, numElems * sizeof(kValues), 0);
CUDA_ERRCK;
ComputeFH_GPU <<< DimFHGrid, DimFHBlock >>>
(numK, FHGridBase, x_d, y_d, z_d, outR_d, outI_d);
CUDA_ERRCK;
}
}
|
4,864 | #include <chrono>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <iostream>
#include <string.h>
#include <string>
#include <vector>
#include <fstream>
long avgTime(std::vector<long> times) {
long long total = 0;
for (double t : times) {
total += t;
}
return total / times.size();
}
std::vector<int> readLayerSize(std::string logpath) {
std::ifstream infile(logpath);
if(!infile.good()) {
std::cout << "open file " << logpath << "error\n";
std::vector<int> empty;
return empty;
}
std::string line;
std::vector<int> sizes;
while (std::getline(infile, line)) {
sizes.push_back(std::stoi(line));
}
return sizes;
}
int main(int argc, char *argv[]) {
if (argc < 3) {
std::cerr << "Usage" << argv[0] << "<size> <repeat-times>\n";
return 1;
}
std::string arg1 = argv[1];
std::string arg2 = argv[2];
const unsigned int N = std::stoi(arg1);
const unsigned int bytes = N * sizeof(float);
std::cout << "transfer data size: " << bytes << " bytes" << std::endl;
int *h_a = (int *)malloc(bytes);
int *d_a;
cudaMalloc((int **)&d_a, bytes);
memset(h_a, 0, bytes);
std::vector<long> h2d_times;
std::vector<long> d2h_times;
for (int i = 0; i < stoi(arg2); i++) {
auto s = std::chrono::high_resolution_clock::now();
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
auto e = std::chrono::high_resolution_clock::now();
h2d_times.push_back((e - s).count());
s = std::chrono::high_resolution_clock::now();
cudaMemcpy(h_a, d_a, bytes, cudaMemcpyDeviceToHost);
e = std::chrono::high_resolution_clock::now();
d2h_times.push_back((e - s).count());
}
std::cout << "Host to Device memcopy " << avgTime(h2d_times) << " ns\n";
std::cout << "Device to Host " << avgTime(d2h_times) << " ns\n";
return 0;
} |
4,865 | #include <stdio.h>
#include <iostream>
#include <vector>
#include <time.h>
#include <math.h>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
std::cout << cudaGetErrorString(error) << std::endl; \
} \
} while (0)
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
const int block_num = 512;
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
const int threadsPerBlock = sizeof(unsigned long long) * 8;
/*
PointsPooling module for sparse-to-dense
Args:
pc: [bs, proposal_num, point_num, channel_num]
box_3d: [bs, proposal_num, 6] x, y, z, l, h, w
pc_loc: [bs, proposal_num, point_num, 3]
Return:
out_features: [bs, proposal_num, l, h, w, sample_num, c]
out_idx: [bs, proposal_num, l, h, w, sample_num]
sampled_num_lists: [bs, proposal_num, l, h, w]
pillars: [bs, proposal_num, l, h, w, 3]
*/
__global__ void points_pooling_gpu(const int bs, const int proposal_num, const int point_num, const int channel_num,
const int l, const int h, const int w, const int sample_num,
const float* pc, const float* box_3d, const float* pc_loc,
float* out_features, int* out_idx, int *sampled_num_lists, float* pillars){
int loop_times = bs * proposal_num;
CUDA_1D_KERNEL_LOOP(batch_inds, loop_times){
// recurrent the proposals
const float* cur_pc = pc + batch_inds * point_num * channel_num;
const float* cur_box_3d = box_3d + batch_inds * 6;
const float* cur_pc_loc = pc_loc + batch_inds * point_num * 3;
float box_cx = cur_box_3d[0];
float box_by = cur_box_3d[1];
float box_cz = cur_box_3d[2];
float box_l = cur_box_3d[3];
float box_h = cur_box_3d[4];
float box_w = cur_box_3d[5];
float interval_l = box_l / float(l);
float interval_h = box_h / float(h);
float interval_w = box_w / float(w);
float xmin = box_cx - box_l / 2.;
float ymin = box_by - box_h;
float zmin = box_cz - box_w / 2.;
float* cur_out_features = out_features + batch_inds * l * h * w * sample_num * channel_num;
int* cur_out_idx = out_idx + batch_inds * l * h * w * sample_num;
int* cur_sampled_num_list = sampled_num_lists + batch_inds * l * h * w;
float* cur_pillars = pillars + batch_inds * l * h * w;
float tmp_x, tmp_y, tmp_z;
int tmp_idx;
for (int i=0; i < l; i++){
for (int j=0; j<h; j++){
for (int k = 0; k < w; k++){
tmp_x = xmin + (i + 0.5) * interval_l;
tmp_y = ymin + (j + 0.5) * interval_h;
tmp_z = zmin + (k + 0.5) * interval_w;
tmp_idx = (i * h * w + j * w + k) * 3;
cur_pillars[tmp_idx] = tmp_x;
cur_pillars[tmp_idx + 1] = tmp_y;
cur_pillars[tmp_idx + 2] = tmp_z;
}
}
}
float cur_pc_x, cur_pc_y, cur_pc_z;
for (int i = 0; i < point_num; i++){
// calculate the result
cur_pc_x = cur_pc_loc[i * 3 + 0];
cur_pc_y = cur_pc_loc[i * 3 + 1];
cur_pc_z = cur_pc_loc[i * 3 + 2];
int x_inds = min(max(int(floor((cur_pc_x - xmin) / interval_l)), 0), l - 1);
int y_inds = min(max(int(floor((cur_pc_y - ymin) / interval_h)), 0), h - 1);
int z_inds = min(max(int(floor((cur_pc_z - zmin) / interval_w)), 0), w - 1);
int grid_inds = x_inds * h * w + y_inds * w + z_inds;
if (cur_sampled_num_list[grid_inds] >= sample_num)
continue;
int cur_pc_inds = cur_sampled_num_list[grid_inds];
int out_grid_inds = grid_inds * sample_num + cur_pc_inds;;
cur_out_idx[out_grid_inds] = i;
for (int j = 0; j < channel_num; j ++){
cur_out_features[out_grid_inds * channel_num + j] = cur_pc[i * channel_num + j];
}
cur_sampled_num_list[grid_inds] += 1;
}
}
}
/* Calculate gradients of PointsPool operation in sparse to dense
Args:
pc: [bs, proposal_num, point_num, channel_num]
out_idx: [bs, proposal_num, l, h, w, sample_num]
sampled_num_lists: [bs, proposal_num, l, h, w]
features_grad: [bs, proposal_num, l, h, w, sample_num, channel_num]
Return:
pc_grad: [bs, proposal_num, point_num, channel_num]
*/
__global__ void points_pooling_grad_gpu(const int bs, const int proposal_num, const int point_num, const int channel_num,
const int l, const int h, const int w, const int sample_num,
const float* pc, const int* out_idx, const int *sampled_num_lists, const float* features_grad,
float *pc_grad){
int loop_times = bs * proposal_num * l * h * w * sample_num * channel_num;
CUDA_1D_KERNEL_LOOP(point_inds, loop_times){
int proposal_idx = point_inds / (l * h * w * sample_num * channel_num);
int sample_num_lists_idx = point_inds / (sample_num * channel_num);
int out_idx_idx = point_inds / channel_num;
int channel_idx = point_inds % channel_num;
int cur_sample_idx = out_idx_idx % sample_num;
if (cur_sample_idx >= sampled_num_lists[sample_num_lists_idx])
continue;
const int* cur_out_idx = out_idx + out_idx_idx;
float* cur_pc_grad = pc_grad + proposal_idx * point_num * channel_num +
cur_out_idx[0] * channel_num + channel_idx;
atomicAdd(&cur_pc_grad[0], features_grad[point_inds]);
}
}
void pointsPoolingLauncher(const int bs, const int proposal_num, const int point_num, const int channel_num,
const int l, const int h, const int w, const int sample_num,
const float* pc, const float* box_3d, const float* pc_loc,
float* out_features, int* out_idx, int *sampled_num_lists, float* pillars){
points_pooling_gpu<<<block_num, threadsPerBlock>>>(
bs, proposal_num, point_num, channel_num,
l, h, w, sample_num,
pc, box_3d, pc_loc,
out_features, out_idx, sampled_num_lists, pillars
);
}
void pointsPoolingGradLauncher(const int bs, const int proposal_num, const int point_num, const int channel_num,
const int l, const int h, const int w, const int sample_num,
const float* pc, const int* out_idx, const int *sampled_num_lists, const float* features_grad,
float *pc_grad){
points_pooling_grad_gpu<<<block_num, threadsPerBlock>>>(
bs, proposal_num, point_num, channel_num,
l, h, w, sample_num,
pc, out_idx, sampled_num_lists, features_grad,
pc_grad);
}
|
4,866 | #include "includes.h"
__global__ void mini1(int *a,int *b,int n)
{
int block=256*blockIdx.x;
int mini=7888888;
for(int i=block;i<min(256+block,n);i++)
{
if(mini>a[i])
{
mini=a[i];
}
}
b[blockIdx.x]=mini;
} |
4,867 | #include <iostream>
#include <fstream>
#include <string.h>
#include <sys/time.h>
#include <math.h>
// CUDA runtime
//#include <cuda_runtime.h>
// helper functions and utilities to work with CUDA
//#include <helper_functions.h>
//#include <helper_cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
using namespace std;
//**************************************************************************
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1e-6);
}
//**************************************************************************
__global__ void transformacion_no_shared(float *A, float *B, int N)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
float Aim2, Aim1, Ai, Aip1, Aip2;
if (i < N)
{
Aim2 = (i - 2 < 0) ? 0.0 : A[i - 2];
Aim1 = (i - 1 < 0) ? 0.0 : A[i - 1];
Aip1 = (i + 1 > N) ? 0.0 : A[i + 1];
Aip2 = (i + 2 > N) ? 0.0 : A[i + 2];
Ai = A[i];
B[i] = (pow(Aim2, 2) + 2.0 * pow(Aim1, 2) + pow(Ai, 2) - 3.0 * pow(Aip1, 2) + 5.0 * pow(Aip2, 2)) / 24.0;
}
}
//**************************************************************************
// Vector maximum kernel
__global__ void reduceMax(float *V_in, float *V_out, const int N)
{
extern __shared__ int sdata[];
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = ((i < N) ? V_in[i] : 0.0);
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
if (sdata[tid] < sdata[tid + s])
sdata[tid] = sdata[tid + s];
__syncthreads();
}
if (tid == 0)
V_out[blockIdx.x] = sdata[0];
}
/**************************************************************************
**************************************************************************/
int main(int argc, char *argv[])
{
int blockSize, N;
if (argc != 3)
{
cerr << "Error en los argumentos: blockSize numValores" << endl;
return (-1);
}
else
{
blockSize = atoi(argv[1]);
//numBlocks = atoi(argv[2]);
N = atoi(argv[2]);
}
//N = blockSize * numBlocks;
//Get GPU information
int devID;
cudaDeviceProp props;
cudaError_t err;
err = cudaGetDevice(&devID);
if (err != cudaSuccess)
{
cout << "ERRORRR" << endl;
}
cudaGetDeviceProperties(&props, devID);
printf("Device %d: \"%s\" with Compute %d.%d capability\n", devID, props.name, props.major, props.minor);
cout << "Tamaño bloque: " << blockSize << endl;
cout << "Nº valores: " << N << endl;
//Variables
int size = N * sizeof(float);
float *A = new float[N];
float *B = new float[N];
float *A_CPU = new float[N];
float *B_CPU = new float[N];
int blocks_per_grid = ceil(float(N) / blockSize);
float *B_out_red = new float[blocks_per_grid];
float *A_device = NULL;
float *B_device = NULL;
float *B_d_red_in = NULL;
float *B_d_red_out = NULL;
//Initialize vector A (GPU) y A (CPU)
for (int i = 0; i < N; i++)
{
A[i] = (float)(1 - (i % 100) * 0.001);
A_CPU[i] = (float)(1 - (i % 100) * 0.001);
}
//Reserve memory
err = cudaMalloc((void **)&A_device, size);
if (err != cudaSuccess)
{
cout << "ERROR RESERVA [A Device]" << endl;
}
err = cudaMalloc((void **)&B_device, size);
if (err != cudaSuccess)
{
cout << "ERROR RESERVA [B Device]" << endl;
}
err = cudaMalloc((void **)&B_d_red_in, size);
if (err != cudaSuccess)
{
cout << "ERROR RESERVA [A Device Reduction INPUT]" << endl;
}
err = cudaMalloc((void **)&B_d_red_out, blocks_per_grid * sizeof(float));
if (err != cudaSuccess)
{
cout << "ERROR RESERVA [A Device Reduction OUTPUT]" << endl;
}
/* ---------------------------------------------------------------------- */
/* ------------------------------ CPU phase ----------------------------- */
double t1 = cpuSecond();
float Ai, Aim1, Aim2, Aip1, Aip2;
float max = 0.0;
for (int i = 0; i < N; i++)
{
Aim2 = (i - 2 < 0) ? 0.0 : A_CPU[i - 2];
Aim1 = (i - 1 < 0) ? 0.0 : A_CPU[i - 1];
Aip1 = (i + 1 > N) ? 0.0 : A_CPU[i + 1];
Aip2 = (i + 2 > N) ? 0.0 : A_CPU[i + 2];
Ai = A_CPU[i];
B_CPU[i] = (pow(Aim2, 2) + 2.0 * pow(Aim1, 2) + pow(Ai, 2) - 3.0 * pow(Aip1, 2) + 5.0 * pow(Aip2, 2)) / 24.0;
}
double Tcpu_max = cpuSecond() - t1;
for (int i = 0; i < N; i++)
{
max = (B_CPU[i] > max) ? B_CPU[i] : max;
}
cout << "Tiempo gastado CPU = " << Tcpu_max << endl;
cout << "Máximo: " << max << endl;
/* ---------------------------------------------------------------------- */
/* ------------------ GPU phase >>[No shared memory]<< ------------------ */
t1 = cpuSecond();
//Host A to Device
err = cudaMemcpy(A_device, A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
cout << "ERROR COPIA [A Device]" << endl;
}
int threadsPerBlock = blockSize;
int blocksPerGrid = ceil((float)N / (float)threadsPerBlock);
transformacion_no_shared<<<blocksPerGrid, threadsPerBlock>>>(A_device, B_device, N);
//Device to Host
cudaMemcpy(B, B_device, size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
//Time GPU
double Tgpu = cpuSecond() - t1;
cout << "Tiempo gastado GPU = " << Tgpu << endl
<< endl;
/* ------------------------------------------------------------------- */
// GPU REDUCTION PHASE
t1 = cpuSecond();
//Host to device
err = cudaMemcpy(B_d_red_in, B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
cout << "ERROR COPIA A GPU REDUCTION" << endl;
}
int shared_mem_size = threadsPerBlock * sizeof(float);
reduceMax<<<blocksPerGrid, threadsPerBlock, shared_mem_size>>>(B_d_red_in, B_d_red_out, N);
cudaDeviceSynchronize();
//Device to Host
cudaMemcpy(B_out_red, B_d_red_out, blocks_per_grid * sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
max = 0.0;
for (int i = 0; i < blocks_per_grid; i++)
max = (B_out_red[i] > max) ? B_out_red[i] : max;
//Time GPU Reduction
double Tgpu_reduction = cpuSecond() - t1;
cout << "Tiempo gastado GPU REDUCTION = " << Tgpu_reduction << endl;
cout << "Máximo: " << max << endl
<< endl;
cout << "Ganancia [TGPU]= " << Tcpu_max / Tgpu << endl;
cout << "Ganancia [TGPU reduction]= " << Tcpu_max / Tgpu_reduction << endl;
}
|
4,868 | /**
*
* Matrix Multiplication - CUDA for GPUs
*
* CS3210
*
**/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <assert.h>
#include <math.h>
int size, paddedSize;
#define BLOCKSIZE 32
typedef struct
{
float ** element;
} matrix;
long long wall_clock_time()
{
#ifdef __linux__
struct timespec tp;
clock_gettime(CLOCK_REALTIME, &tp);
return (long long)(tp.tv_nsec + (long long)tp.tv_sec * 1000000000ll);
#else
struct timeval tv;
gettimeofday(&tv, NULL);
return (long long)(tv.tv_usec * 1000 + (long long)tv.tv_sec * 1000000000ll);
#endif
}
/**
* Allocates memory for a matrix of size SIZE
* The memory is allocated row-major order, i.e.
* elements from the same row are allocated at contiguous
* memory addresses.
**/
void allocate_matrix(matrix* m)
{
int i;
cudaError_t rc;
// allocate array for all the rows
rc = cudaMallocManaged((void**)&(m->element), sizeof(float*) * paddedSize);
if (rc != cudaSuccess)
{
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(rc));
exit(1);
}
// allocate an array for each row of the matrix
for (i = 0; i < paddedSize; i++)
{
rc = cudaMallocManaged((void**)&(m->element[i]), sizeof(float) * paddedSize);
if (rc != cudaSuccess)
{
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(rc));
exit(1);
}
}
}
/**
* Free the memory allocated for a matrix.
**/
void free_matrix(matrix* m) {
int i;
for (i = 0; i < paddedSize; i++)
cudaFree(m->element[i]);
cudaFree(m->element);
}
/**
* Initializes the elements of the matrix with
* random values between 0 and 9
**/
void init_matrix(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
{
// m.element[i][j] = rand() % 10;
m.element[i][j] = 1;
}
}
/**
* Initializes the elements of the matrix with
* element 0.
**/
void init_matrix_zero(matrix m)
{
int i, j;
for (i = 0; i < paddedSize; i++)
for (j = 0; j < paddedSize; j++)
{
m.element[i][j] = 0.0;
}
}
__global__ void transpose_kernel(matrix src, matrix dest, int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i >= size || j >= size) return;
dest.element[i][j] = src.element[j][i];
}
// SHARED MEMORY KERNEL HERE
__global__ void sm_kernel(matrix a, matrix b, matrix result, int size)
{
// Initialize shared memory
__shared__ float aMat[BLOCKSIZE][BLOCKSIZE];
__shared__ float bMat[BLOCKSIZE][BLOCKSIZE];
// Calculate the index in the resulting matrix
int i = (blockIdx.x) * blockDim.x + threadIdx.x;
int j = (blockIdx.y) * blockDim.y + threadIdx.y;
int k, m, numBlocks;
float sum = 0.0f;
// Require M blocks to finish
numBlocks = ((size % BLOCKSIZE) == 0) ? (size / BLOCKSIZE) : (size / BLOCKSIZE + 1);
// For each block in turn
for(k = 0; k < numBlocks; k++){
// each thread copy one element to the buffer
aMat[threadIdx.x][threadIdx.y] = a.element[i][k * BLOCKSIZE + threadIdx.y];
bMat[threadIdx.y][threadIdx.x] = b.element[j][k * BLOCKSIZE + threadIdx.x];
__syncthreads();
// Do a partial sum of all available elements
for(m = 0; m < BLOCKSIZE; m++)
sum += aMat[threadIdx.x][m] * bMat[threadIdx.y][m];
__syncthreads();
}
// When done, the sum is complete and we write it back to global
result.element[i][j] = sum;
}
/**
* Each kernel computes the result element (i,j).
*/
__global__ void mm_kernel(matrix a, matrix b, matrix result, int size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k;
float sum = 0.0f;
if (i >= size || j >= size)
return;
for(k = 0; k < size; k++)
sum += a.element[i][k] * b.element[j][k];
result.element[i][j] = sum;
}
void print_matrix(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
{
printf("row %4d: ", i);
for (j = 0; j < size; j++)
printf("%6.2f ", m.element[i][j]);
printf("\n");
}
}
void work()
{
matrix a, b, bt, result1, result2;
long long before, after;
float time1, time2;
int correct, i, j, dim;
cudaError_t rc;
// Allocate memory for matrices
allocate_matrix(&a);
allocate_matrix(&b);
allocate_matrix(&bt);
allocate_matrix(&result1);
allocate_matrix(&result2);
// Initialize matrix elements
init_matrix_zero(a);
init_matrix_zero(b);
init_matrix(a);
init_matrix(b);
// Perform CUDA matrix multiplication
dim3 transblock(32, 32);
dim = (size % 32 == 0) ? size / 32 : size / 32 + 1;
dim3 transgrid(dim, dim);
dim3 block(BLOCKSIZE, BLOCKSIZE); // a block of 32 x 32 CUDA threads
// dim = (size % BLOCKSIZE == 0) ? size / BLOCKSIZE : size / BLOCKSIZE + 1;
dim = paddedSize / BLOCKSIZE;
dim3 grid(dim , dim ); // a grid of CUDA thread blocks
before = wall_clock_time();
init_matrix_zero(bt);
transpose_kernel<<<transgrid, transblock>>>(b, bt, size);
cudaDeviceSynchronize();
mm_kernel<<<transblock, transgrid>>>(a, bt, result1, size);
cudaDeviceSynchronize();
after = wall_clock_time();
time1 = ((float)(after - before))/1000000000;
fprintf(stderr, "Optimized MM on GPU took %1.2f seconds\n", time1);
// was there any error?
rc = cudaGetLastError();
if (rc != cudaSuccess)
printf("Last CUDA error %s\n", cudaGetErrorString(rc));
before = wall_clock_time();
init_matrix_zero(bt);
transpose_kernel<<<transgrid, transblock>>>(b, bt, size);
cudaDeviceSynchronize();
// fprintf(stderr,"Starting SM with blocksize %dx%d and grid %dx%d", BLOCKSIZE, BLOCKSIZE, dim, dim);
sm_kernel<<<grid, block>>>(a, bt, result2, size);
cudaDeviceSynchronize();
after = wall_clock_time();
time2 = ((float)(after - before))/1000000000;
fprintf(stderr, "SM MM on GPU took %1.2f seconds\n", time2);
// was there any error?
rc = cudaGetLastError();
if (rc != cudaSuccess)
printf("Last CUDA error %s\n", cudaGetErrorString(rc));
// Compare the results
correct = 1;
for (i = 0; correct && i < size; i++)
for (j = 0; j < size; j++)
if (result1.element[i][j] != result2.element[i][j]) {
correct = 0;
break;
}
if (correct) {
//printf("The result matrices are identical!\n");
printf("Speedup: %1.4f\n", time1/time2);
}
else {
printf("Difference in result matrices at element (%d, %d)!\n", i, j);
// print_matrix(result1);
// print_matrix(result2);
}
free_matrix(&a);
free_matrix(&b);
free_matrix(&result1);
free_matrix(&result2);
}
int main(int argc, char ** argv)
{
srand(0);
// printf("Usage: %s <size>\n", argv[0]);
if (argc >= 2)
size = atoi(argv[1]);
else
size = 1024;
paddedSize = (size % BLOCKSIZE == 0) ? size : (1 + size / BLOCKSIZE) * BLOCKSIZE;
fprintf(stderr,"Optimized/SM multiplication of size %d\n", size);
// Multiply the matrices
work();
return 0;
}
|
4,869 | #include <stdio.h>
__global__ void rev(char *a,int *len)
{
int id=threadIdx.x;
int val=a[id];
int k=1;
int sum=0;
while(val>0)
{
int rem=val%2;
rem*=k;
k*=10;
val/=2;
sum+=rem;
}
printf("%c\t%d\n",a[id],sum);
}
int main()
{
char a[20];
int *d_m;
char *d_a;
printf("Enter String:");
scanf("%s",a);
int size = sizeof(int)*strlen(a);
int len=strlen(a);
cudaMalloc((void**)&d_a,size);
cudaMalloc((void**)&d_m,sizeof(int));
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_m, &len, sizeof(int), cudaMemcpyHostToDevice);
rev<<<1,len>>>(d_a,d_m);
cudaFree(d_a);
cudaFree(d_m);
}
|
4,870 | #include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <unistd.h>
#include <stdlib.h>
#include <arpa/inet.h>
#include <math.h>
#include "cs_dbg.h"
#include "cs_cuda.h"
#include "cs_helper.h"
#include "cs_perm_selection.h"
// #define CUDA_DBG
// #define CUDA_DBG1
__global__ void d_do_perm_selection_L ( int *dp, int tbl_size,
int *cubep, int cube_size, int random )
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
int i ;
while ( t_idx < cube_size )
{
i = cubep[ t_idx ] ;
i = ( i + random ) % tbl_size ;
// dp[ i ] = t_idx ;
dp[ t_idx ] = i ; //
t_idx += CUDA_MAX_THREADS ;
}
}
void
h_do_perm_selection_L ( int *d_perm_tbl, int tbl_size, int *d_perm_tbl_cube,
int cube_size, int random, int sink )
{
int nThreadsPerBlock = 512;
int nBlocks ; //= ( cube_size + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
// note: the nBlocks is based on cube_size ;
#ifdef CUDA_DBG1
fprintf(stderr, "%s: perm %p tblsize %d cube %p cubesize %d random %d\n",
__func__, d_perm_tbl, tbl_size, d_perm_tbl_cube, cube_size,
random ) ;
#endif
set_device_mem_i ( d_perm_tbl, tbl_size, ( sink + random ) % tbl_size ) ;
h_block_adj ( cube_size, nThreadsPerBlock, &nBlocks ) ;
d_do_perm_selection_L <<< nBlocks, nThreadsPerBlock >>> (
d_perm_tbl, tbl_size, d_perm_tbl_cube, cube_size, random ) ;
cudaThreadSynchronize() ;
#ifdef CUDA_DBG
dbg_p_d_data_i("h_do_perm_selection_L", d_perm_tbl, tbl_size ) ;
#endif
}
__global__ void d_do_perm_selection_R ( int *dp, int tbl_size, int random )
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
int i ;
while ( t_idx < tbl_size )
{
if ( t_idx == 0 )
dp[ t_idx ] = 0 ;
else
{
i = t_idx + random ;
dp[ t_idx ] = i % tbl_size ;
if ( i / tbl_size )
dp[ t_idx ]++ ; // take out 0
}
t_idx += CUDA_MAX_THREADS ;
}
}
void
h_do_perm_selection_R ( int *d_perm_tbl, int tbl_size, int random )
{
int nThreadsPerBlock = 512;
int nBlocks ; //= ( tbl_size + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
#ifdef CUDA_DBG1
fprintf( stderr, "%s: perm %p size %d random %d\n",
__func__, d_perm_tbl, tbl_size, random ) ;
#endif
#ifdef CUDA_DBG
if ( tbl_size <= random )
fprintf( stderr, "%s: ERROR tblsize %d >= random %d\n",
__func__, tbl_size, random ) ;
#endif
h_block_adj ( tbl_size, nThreadsPerBlock, &nBlocks ) ;
d_do_perm_selection_R <<< nBlocks, nThreadsPerBlock >>> (
d_perm_tbl, tbl_size, random ) ;
cudaThreadSynchronize() ;
#ifdef CUDA_OBS
dbg_p_d_data_i("h_do_perm_selection_R", d_perm_tbl, tbl_size ) ;
#endif
}
void
h_do_get_perm_matrix( int *dp, int ox, int oy, int oz,
int cx, int cy, int cz, int *sinkp )
{
int sink = -1, idx, i, j, k, frame_size ;
frame_size = ox * oy ;
for ( i = 0 ; i < cz ; i++ )
{
idx = i * frame_size ;
for ( j = 0 ; j < cy ; j++ )
{
for ( k = 0 ; k < cx ; k++ )
*dp++ = idx++ ;
if (( sink < 0 ) && ox != cx )
sink = cx ;
idx += ( ox - cx ) ;
}
if (( sink < 0 ) && oy != cy )
sink = cy * ox ;
}
if ( sink < 0 )
{
if ( oz != cz )
sink = frame_size * cz ;
else
sink = 0 ; // will be over-written anyway, so just give a number
}
*sinkp = sink ;
}
// try to fit shifting cube in block ...
int
cube_size( int *p )
{
int k, i ;
k = 1 ;
i = 3 ;
while ( i-- )
k *= *p++ ;
return ( k ) ;
}
void
ck_blk( char *s, int *small, int *large )
{
int i ;
i = 3 ;
while ( i-- )
{
if ( small[i] > large[i] )
{
printf("%s: %s small %d %d %d large %d %d %d\n",
__func__, s,
small[0], small[1], small[2],
large[0], large[1], large[2]) ;
exit( 33 ) ;
}
}
}
void
ck_size( char *s, int *p, int size )
{
int k, i ;
k = 1 ;
i = 3 ;
while ( i-- )
k *= *p++ ;
if ( k > size )
{
printf("%s: %s got %d need %d\n", __func__, s, k, size ) ;
exit( 33 ) ;
}
}
int
h_do_find_perm_size( int bx, int by, int bz, int *cx,
int *cy, int *cz, int max_z, int nmea, int min_x, int min_y )
{
double f ;
int dox, done_once, bb[3], cc[3], xy, yz, xz, i, j, k ;
bb[0] = bx ; // block
bb[1] = by ;
bb[2] = bz ;
#ifdef CUDA_DBG
printf("%s: block %d %d %d cube %d %d %d man_z %d nmea %d min x/y %d %d\n",
__func__, bx, by, bz, *cx, *cy, *cz, max_z, nmea, min_x, min_y ) ;
#endif
k = cube_size( bb ) ;
if ( nmea >= k )
{
*cx = bx ;
*cy = by ;
*cz = bz ;
return ( 1 ) ;
}
cc[0] = *cx ; // selection
cc[1] = *cy ;
cc[2] = *cz ;
if (( cc[0] > bb[0] ) || ( cc[1] > bb[1] ) || ( cc[2] > bb[2] ))
{
#ifdef CUDA_DBG
printf("size mismatch: %d %d %d -- %d %d %d -- %d\n", cc[0], cc[1], cc[2],
bb[0], bb[1], bb[2], nmea ) ;
#endif
return ( 0 ) ;
}
#ifdef CUDA_DBG
printf("%s: init: %d %d %d -- %d %d %d -- %d\n", __func__, cc[0], cc[1],
cc[2], bb[0], bb[1], bb[2], nmea ) ;
#endif
i = cube_size( cc ) ;
if ( !i )
{
#ifdef CUDA_DBG
printf("%s: size 0: %d %d %d -- %d %d %d -- %d\n", __func__,
cc[0], cc[1], cc[2], bb[0], bb[1], bb[2], nmea ) ;
#endif
return ( 0 ) ;
}
f = ( double )nmea / ( double )i ;
#ifdef CUDA_OBS
printf("2: f %f i %d \n", f, i ) ;
#endif
if ( f < 1.0 ) // razi ...
{
#ifdef CUDA_DBG
printf("%s:less than 1.0: %d %d %d -- %d %d %d -- %d f %f\n",
__func__, cc[0], cc[1], cc[2], bb[0], bb[1], bb[2], nmea, f ) ;
#endif
return ( 0 ) ;
}
f = pow ( f, 1.0/3.0 ) ;
// it will not shrink ... razi
i = 3 ;
while ( i-- )
cc[i] = ( int )( f * ( double ) cc[i] ) ;
if ( cc[2] > max_z )
cc[2] = max_z ;
#ifdef CUDA_DBG
printf("%s: max: %d %d %d t %d -- %f mea %d \n", __func__,
cc[0], cc[1], cc[2], cube_size( cc ), f, nmea ) ;
#endif
#ifdef CUDA_DBG
ck_size( "first adjust", cc, nmea ) ;
#endif
// ok ... less than nmeas ... make sure it is inside the blk
i = 3 ;
while ( i-- )
{
if ( cc[i] > bb[i] )
{
f = (( double ) bb[i] ) / (( double ) cc[i] ) ;
for ( j = 0 ; j < 3 ; j++ )
cc[j] = ( int )(( double )cc[j] * f + 0.5 ) ; // round up
}
}
if ( cc[2] > max_z )
cc[2] = max_z ;
if ( cc[0] < min_x )
cc[0] = min_x ;
if ( cc[1] < min_y )
cc[1] = min_y ;
i = nmea / ( cc[0] * cc[1] ) ;
if ( cc[2] > i )
cc[2] = i ;
#ifdef CUDA_OBS
ck_size( "inside the box", cc, nmea ) ;
#endif
// ok ... less than nmeas
// ok ... inside the block
#ifdef CUDA_DBG
printf("%s: inside the box: %d %d %d t %d -- %f -- max %d\n", __func__,
cc[0], cc[1], cc[2], cc[0]* cc[1]* cc[2], f, max_z ) ;
#endif
// ok ... now increase the size ...
done_once = 0 ;
dox = 1 ;
while ( 1 )
{
xy = cc[0] * cc[1] ;
xz = cc[0] * cc[2] ;
yz = cc[1] * cc[2] ;
k = nmea - cube_size( cc ) ;
done_once++ ;
if (( cc[0] > min_x ) && ( cc[1] > min_y ) && ( k >= xy ) && ( cc[2] < bz ) && ( cc[2] < max_z ))
{
cc[2]++ ;
done_once = 0 ;
} else
{
if ( dox )
{
dox = 0 ;
if (( k >= yz ) && ( cc[0] < bx ))
{
done_once = 0 ;
cc[0]++ ;
} else if (( k >= xz ) && ( cc[1] < by ))
{
cc[1]++ ;
done_once = 0 ;
}
} else
{
dox = 1 ;
if (( k >= xz ) && ( cc[1] < by ))
{
cc[1]++ ;
done_once = 0 ;
} else if (( k >= yz ) && ( cc[0] < bx ))
{
cc[0]++ ;
done_once = 0 ;
}
}
}
#ifdef CUDA_DBG
printf("%s: searching: %d %d %d t %d -- done %d\n", __func__,
cc[0], cc[1], cc[2], cube_size( cc ), done_once ) ;
#endif
if ( done_once == 3 )
break ;
}
#ifdef CUDA_OBS
printf("%s: winner: %d %d %d t %d %d -- %d\n", __func__,
cc[0], cc[1], cc[2], cube_size( cc ), nmea, nmea - cube_size(cc) ) ;
#endif
*cx = cc[0] ;
*cy = cc[1] ;
*cz = cc[2] ;
return ( 1 ) ;
}
|
4,871 | #include "device_launch_parameters.h"
#include <iostream>
#include <string>
int main() {
int device_count;
cudaGetDeviceCount(&device_count);
for (int i = 0; i < device_count; i++) {
//cuda存放设备信息的结构体
cudaDeviceProp device_prop;
cudaGetDeviceProperties(&device_prop, i);
std::cout << "=============================================================================" << std::endl;
std::cout << "使用GPU device:" << i << ": " << device_prop.name << std::endl;
std::cout << "设备全局内存总量:" << device_prop.totalGlobalMem / 1024 / 1024 << "MB" << std::endl;
std::cout << "SM数量(一个线程块对应一个物理上的sm):" << device_prop.multiProcessorCount << std::endl;
std::cout << "每个线程块的共享内存大小:" << device_prop.sharedMemPerBlock / 1024.0 << "KB" << std::endl;
std::cout << "设备上一个线程块中可用的32位寄存器数量:" << device_prop.regsPerBlock << std::endl;
std::cout << "每个SM的最大线程数:" << device_prop.maxThreadsPerMultiProcessor << std::endl;
std::cout << "每个SM的最大线程束数:" << device_prop.maxThreadsPerMultiProcessor / 32 << std::endl;
std::cout << "设备上多处理器的数量:" << device_prop.multiProcessorCount << std::endl;
std::cout << "=============================================================================" << std::endl;
}
return 0;
}
|
4,872 | #include <stdio.h>
#include <math.h>
__global__ void VecAdd(int n, const float *A, const float *B, float* C) {
/********************************************************************
*
* Compute C = A + B
* where A is a (1 * n) vector
* where B is a (1 * n) vector
* where C is a (1 * n) vector
*
********************************************************************/
// INSERT KERNEL CODE HERE
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < n) {
C[tid] = A[tid] + B[tid];
}
}
void basicVecAdd( float *A, float *B, float *C, int n)
{
// Initialize thread block and kernel grid dimensions ---------------------
const unsigned int BLOCK_SIZE = 256;
//INSERT CODE HERE
const unsigned int GRID_SIZE = (int)ceil((float(n)) / BLOCK_SIZE);
VecAdd <<<GRID_SIZE, BLOCK_SIZE>>> (n, A, B, C);
} |
4,873 | #include <assert.h>
#include <stdio.h>
#include <stdio.h>
#include <cuda.h>
__global__ void
matrix_multipy(float *M, float *I, float *R, int w){
int x = threadIdx.x;
int y = threadIdx.y;
float Rvalue = 0;
for(int i = 0; i< w;i++){
Rvalue += M[y*w + i] * I[i*w + x];
}
R[y*w + x] = Rvalue;
}
int
main(void) {
int w, power;
if(scanf("%d\n", &w) != 1 || w < 1){
printf("error\n");
return -1;
}
if(scanf("%d\n", &power) != 1 || power < 0){
printf("error\n");
return -1;
}
float *M = (float *)malloc(w * w * sizeof(float));
float *I = (float *)malloc(w * w * sizeof(float));
float *R = (float *)malloc(w * w * sizeof(float));
float *dM, *dI, *dR;
cudaMalloc(&dM, w*w*sizeof(float));
cudaMalloc(&dI, w*w*sizeof(float));
cudaMalloc(&dR, w*w*sizeof(float));
if(power==0){
int j, k =0;
for(j = 0; j<w;j++){
for(k = 0; k<w;k++){
if(j==k)
printf("%.3f ",1.0);
else
printf("%.3f ",0.0);
}
printf("\n");
}
free(M);
free(I);
free(R);
return -1;
}
for(int i = 0; i < w*w; i++){
float x;
if(scanf("%f",&x) == 1){
M[i] = x;
I[i] = x;
}
else {
printf("error\n");
return -1;
}
}
cudaMemcpy(dM, M, w*w*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dI, I, w*w*sizeof(float), cudaMemcpyHostToDevice);
dim3 dimBlock (w,w);
dim3 dimGrid(1);
for(int i = 1; i<power; i++){
matrix_multipy<<<dimGrid,dimBlock>>>(dM,dI,dR,w);
cudaThreadSynchronize();
cudaMemcpy(dI, dR, w*w*sizeof(float), cudaMemcpyDeviceToHost);
}
cudaMemcpy(R,dR,w*w*sizeof(float),cudaMemcpyDeviceToHost);
for (int i = 0; i < w * w; i++) {
if (power == 1)
printf("%.3f ", M[i]);
else
printf("%.3f ", R[i]);
if ((i + 1) % w == 0)
printf("\n");
}
cudaFree(dR);
cudaFree(dI);
cudaFree(dR);
free(M);
free(I);
free(R);
return 0;
}
|
4,874 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
typedef unsigned long ulint;
typedef unsigned long long ulint64;
int banyakdata = 1024;
int dimensigrid = 8;
int dimensiblok = 128;
void modexp(ulint a, ulint b, ulint c, ulint* res) {
ulint64 s = a;
ulint64 ans = 1;
while (b != 0) {
if (b % 2 == 1) {
ans = ans * s % c;
b--;
}
b /= 2;
if (b != 0) {
s = s * s %c;
}
}
*res = ans;
}
void enkripsi(ulint g, ulint k, ulint p, ulint m, ulint y, ulint *res) {
modexp(g, k, p, res);
modexp(y, k, p, res + 1);
*(res + 1) = *(res + 1) * m % p;
}
void dekripsi(ulint a, ulint b, ulint p, ulint e, ulint *res) {
modexp(a, e, p, res);
*res = *res * b % p;
}
void kernelenk(ulint *m, ulint *k, ulint g, ulint p, ulint y, ulint *res) {
for (int i = 0; i < banyakdata; i++)
{
enkripsi(g, k[i], p, m[i], y, res + 2 * i);
}
}
void kerneldek(ulint *c, ulint p, ulint e, ulint *res) {
for (int i = 0; i < banyakdata; i++)
{
dekripsi(c[2*i], c[2*i+1], p, e, res + i);
}
}
void enkripsiCUDA(ulint *m, ulint *k, ulint g, ulint p, ulint y, ulint *res) {
clock_t begin = clock();
kernelenk(m,k,g,p,y,res);
clock_t end = clock();
double time_spent = (double)(end - begin);
printf("Durasi : %f milliseconds\n", time_spent/1000);
printf("\n<<<<<<<<<<<<<<HASIL KE CPU>>>>>>>>>>>>>>>\n");
}
void dekripsiCUDA(ulint *c, ulint p, ulint e, ulint *res2) {
clock_t begin = clock();
kerneldek(c,p,e,res2);
clock_t end = clock();
double time_spent = (double)(end - begin);
printf("Durasi : %f milliseconds\n", time_spent/1000);
printf("\n<<<<<<<<<<<<<<HASIL KE CPU>>>>>>>>>>>>>>>\n");
}
void initenkripsi(ulint *m, ulint *k){
for (int i = 0; i < banyakdata; i++) {
m[i] = rand() % 256;
k[i] = rand() % 256;
}
}
int main(){
ulint *m, *k, *res, *res2, g, p, y, x, e;
m = (ulint*)malloc(banyakdata * sizeof(ulint));
k = (ulint*)malloc(banyakdata * sizeof(ulint));
res = (ulint*)malloc(banyakdata * 2 * sizeof(ulint));
res2 = (ulint*)malloc(banyakdata * sizeof(ulint));
srand(2018);
p = 257;
g = rand() % 256;
x = rand() % 256;
modexp(g,x,p,&y);
initenkripsi(m, k);
printf("<<<<<<<<<<<<<<Pesan Asli>>>>>>>>>>>>>>>\n");
for (int i = 0; i < 4; i++) {
printf("m[%d] = %lu\n", i, m[i]);
}
printf("m[...]\n");
printf("m[%d] = %lu\n", banyakdata-1, m[banyakdata-1]);
enkripsiCUDA(m,k,g,p,y,res);
printf("<<<<<<<<<<<<<<Hasil Enkripsi>>>>>>>>>>>>>>>\n");
for (int i = 0; i < 4; i++) {
printf("c[%d] = %lu c[%d] = %lu\n", 2*i, res[2*i], 2*i+1, res[2*i+1]);
}
printf("c ...\n");
printf("c[%d] = %lu c[%d] = %lu\n", banyakdata * 2-2, res[banyakdata * 2-2], banyakdata *2-1,res[banyakdata*2-1]);
e = p-x-1;
dekripsiCUDA(res,p,e,res2);
printf("<<<<<<<<<<<<<<Hasil Dekripsi>>>>>>>>>>>>>>>\n");
for (int i = 0; i < 4; i++) {
printf("m[%d] = %lu\n", i, res2[i]);
}
printf("m[...]\n");
printf("m[%d] = %lu\n", banyakdata-1, res2[banyakdata-1]);
free(m);
free(k);
free(res);
free(res2);
return 0;
} |
4,875 | #include "cuda_runtime.h"
#include <stdio.h>
__global__ void kernel(void) {
}
int main(void) {
kernel<<<1,1>>> ();
printf("Hello Cuda!\n");
return 0;
} |
4,876 | #include <cuComplex.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void multiply_kernel_ccc(cuFloatComplex *in1, cuFloatComplex *in2,
cuFloatComplex *out, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
float re, im;
re = in1[i].x * in2[i].x - in1[i].y * in2[i].y;
im = in1[i].x * in2[i].y + in1[i].y * in2[i].x;
out[i].x = re;
out[i].y = im;
}
}
void exec_multiply_kernel_ccc(cuFloatComplex *in1, cuFloatComplex *in2,
cuFloatComplex *out, int n, int grid_size,
int block_size, cudaStream_t stream) {
multiply_kernel_ccc<<<grid_size, block_size, 0, stream>>>(in1, in2, out, n);
}
void get_block_and_grid_multiply(int *minGrid, int *minBlock) {
cudaOccupancyMaxPotentialBlockSize(minGrid, minBlock, multiply_kernel_ccc, 0,
0);
} |
4,877 | #include <cstdio>
#define N 200
__global__ void add(int* a, int* b, int* c)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < N)
{
c[idx] = a[idx] + b[idx];
}
}
int main()
{
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
for (int i = 0; i < N; i++)
{
a[i] = i;
b[i] = i*i;
}
cudaMalloc((void**)&dev_a, N*sizeof(int));
cudaMalloc((void**)&dev_b, N*sizeof(int));
cudaMalloc((void**)&dev_c, N*sizeof(int));
cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice);
add<<<(N+127)/128, 128>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
for (int i = 0; i < N; i++)
{
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
return 0;
}
|
4,878 | #include "includes.h"
__global__ void Thumbnail_uchar(cudaTextureObject_t uchar_tex, int *histogram, int src_width, int src_height)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < src_height && x < src_width)
{
unsigned char pixel = tex2D<unsigned char>(uchar_tex, x, y);
atomicAdd(&histogram[pixel], 1);
}
} |
4,879 | #include <cuda_runtime.h>
#include <stdio.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
} \
void printMatrix(int *C, const int nx, const int ny) {
int *ic = C;
printf("\nMatrix: (%d.%d)\n", nx, ny);
for (int iy = 0; iy < ny; iy++) {
for (int ix = 0; ix < nx; ix++) {
printf("%3d", ic[ix]);
}
ic += nx;
printf("\n");
}
printf("\n");
return;
}
__global__ void printThreadIndex(int *A, const int nx, const int ny) {
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * nx + ix;
printf("thread_id (%d,%d) block_id (%d,%d) coordinate (%d,%d) "
"global index %2d ival %2d\n",
threadIdx.x, threadIdx.y,
blockIdx.x, blockIdx.y,
ix, iy, idx, A[idx]);
}
void sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny) {
float *ia = A;
float *ib = B;
float *ic = C;
for (int iy = 0; iy < ny; iy++) {
for (int ix = 0; ix < nx; ix++) {
ic[ix] = ia[ix] + ib[ix];
}
ia += nx;
ib += nx;
ic += nx;
}
return;
}
__global__ void sumMatrixOnGPU2D(float *MatA, float *MatB, float *MatC,
int nx, int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * nx + ix;
if (ix < nx && iy < ny) {
MatC[idx] = MatA[idx] + MatB[idx];
}
}
int main(int argc, char **argv) {
printf("%s Starting...\n", argv[0]);
// デバイス情報を取得
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// 行列の次元を設定
int nx = 8;
int ny = 6;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float);
// ホストメモリを確保
int *h_A;
h_A = (int *)malloc(nBytes);
// ホスト行列を整数で初期化
for (int i = 0; i < nxy; i++) {
h_A[i] = i;
}
printMatrix(h_A, nx, ny);
// デバイスメモリを確保
int *d_MatA;
CHECK(cudaMalloc((void **)&d_MatA, nBytes));
// ホストからデバイスへデータを転送
CHECK(cudaMemcpy(d_MatA, h_A, nBytes, cudaMemcpyHostToDevice));
// 実行設定をセットアップ
dim3 block(4, 2);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
// カーネルを呼び出す
printThreadIndex<<<grid, block>>>(d_MatA, nx, ny);
CHECK(cudaDeviceSynchronize());
// ホストとデバイスのメモリを解放
CHECK(cudaFree(d_MatA));
free(h_A);
// デバイスをリセット
CHECK(cudaDeviceReset());
return(0);
}
|
4,880 | #include "includes.h"
__global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride_x, int stride_y, int size, int pad, float *delta, float *prev_delta, int *indexes)
{
int h = (in_h + pad - size) / stride_y + 1;
int w = (in_w + pad - size) / stride_x + 1;
int c = in_c;
int area_x = (size - 1) / stride_x;
int area_y = (size - 1) / stride_y;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int index = id;
int j = id % in_w;
id /= in_w;
int i = id % in_h;
id /= in_h;
int k = id % in_c;
id /= in_c;
int b = id;
int w_offset = -pad / 2;
int h_offset = -pad / 2;
float d = 0;
int l, m;
for(l = -area_y; l < area_y+1; ++l){
for(m = -area_x; m < area_x+1; ++m){
int out_w = (j-w_offset)/stride_x + m;
int out_h = (i-h_offset)/stride_y + l;
int out_index = out_w + w*(out_h + h*(k + c*b));
int valid = (out_w >= 0 && out_w < w &&
out_h >= 0 && out_h < h);
d += (valid && indexes[out_index] == index) ? delta[out_index] : 0;
}
}
prev_delta[index] += d;
} |
4,881 | #include "CudaProcess.cuh"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
using namespace std;
__global__ void kernel(int* pSrc1, int* pSrc2, int* pResult, int length)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= length) {
return;
}
pResult[idx] = pSrc1[idx] + pSrc2[idx];
return;
}
void Process() {
int length = 1024;
size_t size = sizeof(int) * length;
int* pHostSrc1;
int* pHostSrc2;
int* pHostResult;
int* pDevSrc1;
int* pDevSrc2;
int* pDevResult;
cudaMallocHost(&pHostSrc1, size);
cudaMallocHost(&pHostSrc2, size);
cudaMallocHost(&pHostResult, size);
cudaMalloc(&pDevSrc1, size);
cudaMalloc(&pDevSrc2, size);
cudaMalloc(&pDevResult, size);
//
pHostSrc1[2] = 3;
pHostSrc2[2] = 5;
cudaMemcpy(pDevSrc1, pHostSrc1, size, cudaMemcpyHostToDevice);
cudaMemcpy(pDevSrc2, pHostSrc2, size, cudaMemcpyHostToDevice);
dim3 block(128, 1, 1);
dim3 grid((length + 128 - 1) / 128, 1, 1);
kernel << <grid, block >> > (pDevSrc1, pDevSrc2, pDevResult, length);
cudaMemcpy(pHostResult, pDevResult, size, cudaMemcpyDeviceToHost);
cout << pHostSrc1[2] << ":" << pHostSrc2[2] << ":" << pHostResult[2] << endl;
cout << "end" << endl;
cudaFree(pDevSrc1);
cudaFree(pDevSrc2);
cudaFree(pDevResult);
cudaFreeHost(pHostSrc1);
cudaFreeHost(pHostSrc2);
cudaFreeHost(pHostResult);
cudaDeviceReset();
}
|
4,882 | #include <stdio.h>
#include <stdlib.h>
#include <cublas.h>
#include <math.h>
#include "cudamat_kernels.cuh"
#include "cudamat.cuh"
extern "C" {
/* ------------------------------ CUBLAS init/shutdown ------------------------------ */
inline bool check_cublas_error() {
cublasStatus status = cublasGetError();
return status != CUBLAS_STATUS_SUCCESS;
}
inline bool checkCUDAError() {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
printf("%s\n", cudaGetErrorString( err));
return cudaSuccess != err;
}
extern const char* get_last_cuda_error() {
cudaError_t err = cudaGetLastError();
return cudaGetErrorString( err);
}
extern int cublas_init() {
cublasInit();
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
extern int cublas_shutdown() {
cublasShutdown();
cudaThreadExit();
return 0;
}
extern int cuda_set_device(int deviceId) {
cudaSetDevice(deviceId);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int init_random(rnd_struct* rnd_state, int seed, char* cudamatpath) {
unsigned int * host_mults;
host_mults = (unsigned int*)malloc(NUM_RND_STREAMS * sizeof(unsigned int));
FILE * pFile;
pFile = fopen (cudamatpath,"r");
for (int i = 0; i < NUM_RND_STREAMS; i++) {
fscanf (pFile, "%u", &host_mults[i]);
}
fclose (pFile);
cublasAlloc(NUM_RND_STREAMS, sizeof(unsigned int), (void**)&rnd_state->dev_mults);
cublasAlloc(NUM_RND_STREAMS, sizeof(unsigned long long), (void**)&rnd_state->dev_words);
cublasSetVector(NUM_RND_STREAMS, sizeof(unsigned int), host_mults, 1, rnd_state->dev_mults, 1);
//cudaMalloc((void **)&rnd_state->dev_mults, NUM_RND_STREAMS * sizeof(unsigned int));
//cudaMalloc((void **)&rnd_state->dev_words, NUM_RND_STREAMS * sizeof(unsigned long long));
//cudaMemcpy(rnd_state->dev_mults, host_mults, NUM_RND_STREAMS * sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaThreadSynchronize();
kSeedRandom<<<NUM_RND_BLOCKS, NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, seed);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Utility routines ------------------------------ */
extern int get_leading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[1] : mat->size[0];
}
extern int get_nonleading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[0] : mat->size[1];
}
extern void set_transpose(cudamat* mat, int is_trans) {
mat->is_trans = is_trans;
}
inline char get_transpose_char(cudamat* mat) {
return mat->is_trans ? 't' : 'n';
}
extern void cuda_sync_threads() {
cudaThreadSynchronize();
}
/* ------------------------------ Allocating/moving data ------------------------------ */
extern int allocate_device_memory(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
cublasStatus stat;
stat = cublasAlloc(len, sizeof(mat->data_device[0]), (void**)&mat->data_device);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
extern int allocate_device_memory_sparse(cudamat_sparse* mat) {
int nnz = mat->nnz, rows = mat->size[0];
cublasStatus stat;
stat = cublasAlloc(nnz, sizeof(mat->data_device.data[0]), (void**)&mat->data_device.data);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
stat = cublasAlloc(nnz, sizeof(mat->data_device.indices[0]), (void**)&mat->data_device.indices);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
stat = cublasAlloc(rows + 1, sizeof(mat->data_device.indptr[0]), (void**)&mat->data_device.indptr);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
extern int copy_to_host(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
if (mat->on_device) {
cublasGetVector(len, sizeof(mat->data_host[0]), mat->data_device, 1, mat->data_host, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else
return ERROR_NOT_ON_DEVICE;
return 0;
}
extern int copy_to_device(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory(mat);
if (err_code)
return err_code;
}
cublasSetVector(len, sizeof(mat->data_host[0]), mat->data_host, 1, mat->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int copy_sparse_to_device(cudamat_sparse* mat) {
int len = mat->nnz, rows = mat->size[0];
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory_sparse(mat);
if (err_code)
return err_code;
}
cublasSetVector(len, sizeof(mat->data_host.data[0]), mat->data_host.data, 1, mat->data_device.data, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
cublasSetVector(len, sizeof(mat->data_host.indices[0]), mat->data_host.indices, 1, mat->data_device.indices, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
cublasSetVector(rows + 1, sizeof(mat->data_host.indptr[0]), mat->data_host.indptr, 1, mat->data_device.indptr, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int copy_on_device(cudamat* mat1, cudamat* mat2) {
int len = mat1->size[0]*mat1->size[1];
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
cublasScopy(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
extern int get_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = source->size[0];
int width = source->size[1];
if ((end - start) != target->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
kGetRowSlice<<<kernelBlockGrid,kernelBlockDim>>>(source->data_device, target->data_device, start, end, width, height);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int set_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = target->size[0];
int width = target->size[1];
if ((end - start) != source->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
kSetRowSlice<<<kernelBlockGrid,kernelBlockDim>>>(source->data_device, target->data_device, start, end, width, height);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int copy_transpose(cudamat* source, cudamat* target) {
unsigned int height = source->size[0];
unsigned int width = source->size[1];
if (source->size[0] != target->size[1] || source->size[1] != target->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
// setup execution parameters
unsigned int grid_x = height / COPY_BLOCK_SIZE;
if (height % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = width / COPY_BLOCK_SIZE;
if (width % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, 1);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
kTranspose<<< grid, threads >>>(target->data_device, source->data_device, height, width);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int free_device_memory(cudamat* mat) {
if (mat->owns_data && mat->on_device) {
cublasStatus stat;
stat = cublasFree(mat->data_device);
mat->on_device = 0;
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error())
return CUBLAS_ERROR;
}
return 0;
}
extern int set_shape(cudamat* mat, unsigned int m, unsigned int n) {
mat->size[0] = m;
mat->size[1] = n;
return 0;
}
extern int reshape(cudamat* mat, unsigned int m, unsigned int n) {
if (mat->size[0] * mat->size[1] != m * n)
return ERROR_INCOMPATIBLE_DIMENSIONS;
mat->size[0] = m;
mat->size[1] = n;
return 0;
}
extern int get_slice(cudamat* source, cudamat* target, unsigned int first_col, unsigned int last_col) {
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (last_col > source->size[1] || (first_col >= last_col))
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_col * num_rows;
target->on_device = 1;
target->on_host = 0;
target->size[0] = source->size[0];
target->size[1] = last_col - first_col;
target->is_trans = 0;
target->owns_data = 0;
return 0;
}
extern int get_vector_slice(cudamat* source, cudamat* target, unsigned int first_ind, unsigned int last_ind) {
// source must be a vector.
if (source->size[0] > 1 && source->size[1] > 1)
return ERROR_GENERIC;
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (first_ind >= last_ind)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_ind * num_rows;
target->on_device = 1;
target->on_host = 0;
target->is_trans = 0;
target->owns_data = 0;
if (source->size[0] > 1) {
if (last_ind > source->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = last_ind - first_ind;
target->size[1] = 1;
} else {
if (last_ind > source->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = 1;
target->size[1] = last_ind - first_ind;
}
return 0;
}
/* ------------------------------ Initialization routines ------------------------------ */
extern void init_from_array(cudamat* mat, float* data, int m, int n) {
mat->data_host = data;
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 1;
mat->is_trans = 0;
mat->owns_data = 1;
}
extern void init_from_sparse_array(cudamat_sparse* mat, float* data, int* indices, int* indptr, int m, int n, int nnz) {
mat->data_host.data = data;
mat->data_host.indices = indices;
mat->data_host.indptr = indptr;
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 1;
mat->is_trans = 0;
mat->owns_data = 1;
mat->nnz = nnz;
}
extern void set_on_device(cudamat* mat) {
mat->on_device = 1;
}
extern int init_empty(cudamat* mat, int m, int n) {
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 0;
mat->is_trans = 0;
mat->owns_data = 1;
return allocate_device_memory(mat);
}
/* ------------------------------ Random number generation ------------------------------ */
extern int fill_with_rand(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomUniform<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int fill_with_randn(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomGaussian<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int sample_bernoulli(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kSampleBernoulli<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int sample_bernoulli_tanh(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kSampleBernoulliTanh<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int sample_poisson(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kSamplePoisson<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int sample_gaussian(rnd_struct* rnd_state, cudamat* mat, cudamat* target, float mult) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kSampleGaussian<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len, mult);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int perturb_energy(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kPerturbEnergy<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int perturb_prob(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kPerturbProb<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int dropout(rnd_struct* rnd_state, cudamat* mat, float dropprob, float val) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomDropout<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len, dropprob, val);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Algebraic operations ------------------------------ */
extern int add_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddColVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError()) {
return CUDA_ERROR;
}
return 0;
}
extern int add_col_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddColMult<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, mult, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_diagonal_scalar(cudamat* mat, float val, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultDiagonalScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, w);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_diagonal_scalar(cudamat* mat, float val, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddDiagonalScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, w);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_diagonal(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[1] * vec->size[0] ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultDiagonal<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_diagonal(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[1] * vec->size[0] ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddDiagonal<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_row_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddRowMult<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, mult, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddRowVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultByColVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultByRowVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int div_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivByColVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int div_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivByRowVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than_eq(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThanEq<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThan<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than_eq_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThanEqScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThanScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than_eq(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThanEq<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThan<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int upper_bound(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kUpperBound<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int lower_bound(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLowerBound<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than_eq_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThanEqScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThanScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int upper_bound_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kUpperBoundScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int lower_bound_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLowerBoundScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int max_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kMaxColumnwise<<<gridDim, 32, shared_mem_size>>>(mat->data_device, target->data_device, w, h);
cudaThreadSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int choose_max_and_accumulate(cudamat* mat, cudamat* acc) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !acc->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (acc->size[0] != mat->size[0] || acc->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kChooseMaxAndAccumulate<<<gridDim,32>>>(mat->data_device, acc->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int choose_max_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != mat->size[0] || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kChooseMaxColumnwise<<<gridDim, 32, shared_mem_size>>>(mat->data_device, target->data_device, w, h);
cudaThreadSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int argmax_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kArgMaxColumnwise<<<gridDim,32>>>(mat->data_device, target->data_device, w, h);
cudaThreadSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int sqsum_by_axis(cudamat* mat, cudamat* target, int axis, float mult, float p) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kSqSumColumnwise<<<gridDim, 32, shared_mem_size>>>(mat->data_device, target->data_device, w, h, mult, p);
cudaThreadSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int normlimit_by_axis(cudamat* mat, cudamat* target, int axis,
float norm) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != mat->size[0] || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kNormLimitColumnwise<<<gridDim,32, shared_mem_size>>>(mat->data_device, target->data_device, norm, w, h);
cudaThreadSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int sign(cudamat* mat, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSign<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_cos(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyCos<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sin(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplySin<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sigmoid(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplySigmoid<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_tanh(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyTanh<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_abs(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyAbs<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_log_1_plus_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyLog1PlusExp<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_log(cudamat* mat, cudamat* target, float tiny) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLog<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len, tiny);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kExp<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_ceil(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCeil<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_floor(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kFloor<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sqrt(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSqrt<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_pow(cudamat* mat, float pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kPow<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_pow_matrix(cudamat* mat, cudamat* pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kPowMatrix<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int compute_cross_entropy(cudamat* mat, cudamat* pow, cudamat* target, float tiny) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCrossEntropy<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow->data_device, target->data_device, len, tiny);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int compute_cross_entropy_bernoulli(cudamat* mat, cudamat* pow, cudamat* target, float tiny) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCrossEntropyBernoulli<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow->data_device, target->data_device, len, tiny);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int correct_preds(cudamat* mat, cudamat* pow, cudamat* target, float cutoff) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCorrectPreds<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow->data_device, target->data_device, len, cutoff);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int reciprocal(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kReciprocal<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int dot(cudamat* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) {
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (get_leading_dimension(mat1) != get_leading_dimension(target) ||
get_nonleading_dimension(mat2) != get_nonleading_dimension(target) ||
get_nonleading_dimension(mat1) != get_leading_dimension(mat2)) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
int m = get_leading_dimension(mat1),
k = get_leading_dimension(mat2),
n = get_nonleading_dimension(mat2);
cublasSgemm(get_transpose_char(mat1), get_transpose_char(mat2),
m, n, k,
alpha, mat1->data_device, mat1->size[0],
mat2->data_device, mat2->size[0],
beta, target->data_device, target->size[0]);
if (check_cublas_error())
return CUBLAS_ERROR;
cudaThreadSynchronize();
return 0;
}
extern int sparse_dot(cudamat_sparse* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) {
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
int m = mat1->size[0],
k = mat1->size[1],
k2 = mat2->size[0],
n = mat2->size[1];
if (k != k2) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
unsigned int grid_x = m / COPY_BLOCK_SIZE;
if (m % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = n / COPY_BLOCK_SIZE;
if (n % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, 1);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
kSparseDot<<<grid, threads>>>(m, n, k, mat1->data_device.data,
mat1->data_device.indptr,
mat1->data_device.indices,
mat2->data_device, target->data_device, beta, alpha);
if (check_cublas_error())
return CUBLAS_ERROR;
cudaThreadSynchronize();
return 0;
}
extern float vdot(cudamat* mat1, cudamat* mat2, int* err_code) {
int len = mat1->size[0]*mat1->size[1];
float res;
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans) {
*err_code = ERROR_TRANSPOSEDNESS;
return 0;
}
if (mat2->size[0] * mat2->size[1] != len) {
*err_code = ERROR_INCOMPATIBLE_DIMENSIONS;
return 0;
}
res = cublasSdot(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
/* Perform the operation mat1 = mat1 + alpha * mat2. mat1 and mat2 must
have the same transposedness. */
extern int add_mult(cudamat* mat1, cudamat* mat2, float alpha) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
cublasSaxpy(len, alpha, mat2->data_device, 1, mat1->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int add_mult_sign(cudamat* mat1, cudamat* mat2, float mult) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddMultSign<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, len, mult);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int add_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAdd<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int subtract_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSubtract<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int divide_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivide<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
/* Elementwise multiplication of 2 matrices */
extern int mult_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMult<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sin_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSinDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_cos_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCosDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_logistic_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLogisticDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_tanh_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kTanhDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_rectified_linear_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kRectifiedLinearDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_rectified_linear_smooth_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kRectifiedLinearSmoothDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int assign_scalar(cudamat* mat, float alpha) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kAssignScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int divide_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivideScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern float euclid_norm(cudamat* mat, int* err_code) {
int len = mat->size[0]*mat->size[1];
float res = cublasSnrm2(len, mat->data_device, 1);
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
extern int selectRows(cudamat* source, cudamat* target, cudamat* indices){
const int nRetRows = indices->size[1];
if (nRetRows==0) return 0;
dim3 gridDim((nRetRows+31)/32);
dim3 blockDim(32);
kSelectRows<<<gridDim, blockDim>>>(source->data_device, target->data_device, indices->data_device, nRetRows, source->size[0], source->size[1]);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int swapColumns(cudamat* source, cudamat* target, cudamat* indices1, cudamat* indices2){
const int cols = indices1->size[1]*indices1->size[0],
h = source->size[0],
w = source->size[1];
kSwapColumns<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(source->data_device, target->data_device, indices1->data_device, indices2->data_device, cols, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int setSelectedRows(cudamat* target, cudamat* source, cudamat* indices){
const int nSetRows = indices->size[1];
if (nSetRows==0)
return 0;
dim3 gridDim((nSetRows+31)/32);
dim3 blockDim(32);
kSetSelectedRows<<<gridDim, blockDim>>>(target->data_device, source->data_device, indices->data_device, nSetRows, target->size[0], target->size[1]);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int generate_translations_big_var_off(cudamat* source, cudamat* target, cudamat* off_x, cudamat* off_y, int source_w, int target_w, int num_channels) {
dim3 kernelBlockGrid(source->size[1], 1, 1);
dim3 kernelBlockDim(512, 1, 1);
kGenerateTranslationsBigVarOff<<<kernelBlockGrid, kernelBlockDim>>>(source->data_device, target->data_device, off_x->data_device, off_y->data_device, source_w, target_w, num_channels);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int blockify(cudamat* source, cudamat* target, int blocksize) {
dim3 kernelBlockGrid(source->size[1], 1, 1);
dim3 kernelBlockDim(512, 1, 1);
kBlockify<<<kernelBlockGrid, kernelBlockDim>>>(source->data_device, target->data_device, source->size[0], blocksize);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int softmax(cudamat* mat, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kSoftMax<<<gridDim, 32, shared_mem_size>>>(mat->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int softmax_overwrite(cudamat* mat) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kSoftMaxOverwrite<<<gridDim, 32, shared_mem_size>>>(mat->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_softmax_grad(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSoftMaxGrad<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, labels->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int get_softmax_correct(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != 1 || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kSoftMaxCorrect<<<gridDim, 32>>>(mat->data_device, labels->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int accumulate_columns(cudamat* mat, cudamat* indices, cudamat* target, float mult, int avg) {
unsigned int h = mat->size[0],
w = mat->size[1],
w2 = target->size[1];
if (!mat->on_device || !indices->on_device|| !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] != 1 || indices->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (NUM_VECTOR_OP_THREADS_PER_BLOCK < w2)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAccumulateColumns<<<h, NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, indices->data_device, target->data_device, w, w2, h, mult, avg);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int get_softmax_cross_entropy(cudamat* mat, cudamat* labels, cudamat* target, float tiny) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != 1 || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSoftMaxCrossEntropy<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, labels->data_device, target->data_device, w, h, tiny);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int expand(cudamat* source, cudamat* indices, cudamat* target){
unsigned int h = source->size[0],
w = source->size[1],
w2 = target->size[1];
if (!source->on_device || !indices->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (source->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] != 1 || indices->size[1] != w2)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kExpand<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(source->data_device, indices->data_device, target->data_device, h, w, w2);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int expand_and_add(cudamat* source, cudamat* mat, cudamat* indices, cudamat* target, float mult){
unsigned int h = source->size[0],
w = source->size[1],
w2 = mat->size[1];
if (!source->on_device || !mat->on_device || !indices->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] != 1 || indices->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kExpandAndAdd<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(source->data_device, mat->data_device, indices->data_device, target->data_device, w, h, mult, w2);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
}
|
4,883 | #include<stdio.h>
#include<string.h>
#include<stdlib.h>
#include<math.h>
#include <cuda_runtime.h>
//#include <cutil_inline.h>
using namespace std;
#define SUBMATRIX_SIZE 10000
#define NUM_BIN 500
#define HIST_MIN 0.0
#define HIST_MAX 3.5
////////////////////////////////////////////////////////////////////////
__global__ void distance(float *a, float *d, int xind, int yind, int *dev_hist)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int thread_idx = idx;
idx += xind;
float alpha = a[idx], delta = d[idx];
float cos_d1 = cos(delta), sin_d1 = sin(delta), dist;
int ymax = yind + SUBMATRIX_SIZE;
int bin_index;
int offset = 0;
float a_diff, sin_a_diff, cos_a_diff;
float cos_d2, sin_d2, numer, denom, mult1, mult2;
for(int i=yind; i<ymax; i++)
{
if(idx > i)
{
a_diff = a[i] - alpha;
sin_a_diff = sin(a_diff);
cos_a_diff = cos(a_diff);
sin_d2 = sin(d[i]);
cos_d2 = cos(d[i]);
mult1 = cos_d2 * cos_d2 * sin_a_diff * sin_a_diff;
mult2 = cos_d1 * sin_d2 - sin_d1 * cos_d2 * cos_a_diff;
mult2 = mult2 * mult2;
numer = sqrt(mult1 + mult2);
denom = sin_d1 *sin_d2 + cos_d1 * cos_d2 * cos_a_diff;
//dist = atan(num);
dist = atan2(numer,denom);
if(dist < HIST_MIN)
bin_index = 0;
else if(dist >= HIST_MAX)
bin_index = NUM_BIN + 1;
else
bin_index = int(((dist - HIST_MIN) * NUM_BIN / HIST_MAX) +1);
offset = ((NUM_BIN+2)*thread_idx);
bin_index += offset;
dev_hist[bin_index]++;
}
}
}
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
float *d_alpha, *d_delta;
float *h_alpha, *h_delta;
int NUM_PARTICLES;
if (argc < 3)
{
printf("\nMust pass in cluster_data file on command line!\n");
printf("\nUsage: ", argv[0] );
printf(" <cluster_data file> <distances file> \n\n");
exit(1);
}
FILE *infile, *outfile ;
infile = fopen(argv[1],"r");
outfile = fopen(argv[2], "w");
//////////////////////////////////////////////////////////////////////
// Read in the cluster_data file
////////////////////////////////////////////////////////////////////////////
char axis_titles[256];
char dummy[256];
fscanf(infile, "%s %s %s", &axis_titles, &dummy, &axis_titles);
fscanf(infile, "%d", &NUM_PARTICLES);
int size = NUM_PARTICLES * sizeof(float);
printf("# particles: %d\n",NUM_PARTICLES);
h_alpha = (float*)malloc(size);
h_delta = (float*)malloc(size);
for(int i=0; i<NUM_PARTICLES; i++)
{
fscanf(infile, "%f %s %f %s ", &h_alpha[i], &dummy, &h_delta[i], &dummy);
//fscanf(infile, "%f%s %f ", &h_alpha[i], &dummy, &h_delta[i]);
// printf("%e %s %e\n", h_alpha[i], dummy, h_delta[i]);
}
////////////////////////////////////////////////////////////////////////////
//allocation of histogram
///////////////////////////////////////////////////////////////////////////
int *hist, *dev_hist;
int size_hist = SUBMATRIX_SIZE * (NUM_BIN+2);
int size_hist_bytes = size_hist*sizeof(int);
hist = (int*)malloc(size_hist_bytes);
memset(hist, 0, size_hist_bytes);
printf("size_hist: %d\n",size_hist_bytes);
cudaMalloc((void **) &dev_hist, (size_hist_bytes));
cudaMemset(dev_hist, 0, size_hist_bytes);
unsigned long *hist_array;
hist_array = (unsigned long*)malloc((NUM_BIN+2) * sizeof(unsigned long));
memset(hist_array, 0, (NUM_BIN+2)*sizeof(unsigned long));
////////////////////////////////////////////////////////////////////////////
// Define the grid and block size
////////////////////////////////////////////////////////////////////////////
dim3 grid, block;
grid.x =100;
block.x = SUBMATRIX_SIZE/grid.x; //NUM_PARTICLES/block.x;
////////////////////////////////////////////////////////////////////////////
cudaMalloc((void **) &d_alpha, size );
cudaMalloc((void **) &d_delta, size );
// Check to see if we allocated enough memory.
if (0==d_alpha || 0==d_delta|| 0==dev_hist)
{
printf("couldn't allocate memory\n");
return 1;
}
// Initialize array to all 0's
cudaMemset(d_alpha,0,size);
cudaMemset(d_delta,0,size);
cudaMemcpy(d_alpha, h_alpha, size, cudaMemcpyHostToDevice );
cudaMemcpy(d_delta, h_delta, size, cudaMemcpyHostToDevice );
int x, y;
int num_submatrices = NUM_PARTICLES / SUBMATRIX_SIZE;
int bin_index = 0;
for(int k = 0; k < num_submatrices; k++)
{
y = k*SUBMATRIX_SIZE;
// printf("%d %d\n",k,y);
for(int j = 0; j < num_submatrices; j++)
{
x = j *SUBMATRIX_SIZE;
//printf("----\n");
//printf("%d %d\t\t%d %d\n",k,y,j,x);
//printf("----\n");
cudaMemset(dev_hist,0,size_hist_bytes);
distance<<<grid,block>>>(d_alpha, d_delta, x, y, dev_hist);
cudaMemcpy(hist, dev_hist, size_hist_bytes, cudaMemcpyDeviceToHost);
for(int m=0; m<size_hist; m++)
{
bin_index = m%(NUM_BIN+2);
//if(bin_index == 0)
//printf("\n");
//printf("%3i:%3i ", m, hist[m]);
//printf("%3i ", hist[m]);
hist_array[bin_index] += hist[m];
}
//printf("\n");
}
}
unsigned long total = 0;
float bin_width = (HIST_MAX - HIST_MIN) / NUM_BIN;
float bins_mid = 0;
fprintf(outfile, "%s %s\n", "Angular Distance(radians)","Number of Entries");
for(int k=0; k<NUM_BIN+2; k++)
{
bins_mid = bin_width*(k - 0.5);
fprintf(outfile, "%.3e %s %lu \n", bins_mid, ",", hist_array[k]);
total += hist_array[k];
}
printf("total: %lu \n", total);
fclose(infile);
fclose(outfile);
free(h_alpha);
free(h_delta);
free(hist);
cudaFree(d_alpha);
cudaFree(d_delta);
cudaFree(dev_hist);
return 0;
}
//////////////////////////////////////////////////////////////////////
|
4,884 | #include "pq.cuh"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
//typedef struct pqueue {
// int max_size;
// int num_elems;
// int* elems; // kdtree indicies
// double* dists; // distances from kdtree point to query point
//} pqueue;
// has_left_child(pqueue* q, int index) { return index*2 + 1 < q->num_elems }
//
//__device__ void swap(pqueue* q, int a, int b) {
//
// // save the current values at index a
// int idx = q->elems[a];
// int dist = q->dists[a];
// // replace
// q->elems[a] = q->elems[b];
// q->dists[a] = q->dists[b];
//
// q->elems[b] = idx;
// q->dists[b] = dist;
//}
//
//__device__ void pq_print(pqueue* q) {
//
// // [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
// // new_line: 1, 3, 7
// printf("--priority queue: num_elems=%i, max_size=%i\n", q->num_elems, q->max_size);
// int new_line = 1;
// int level = 0;
// printf("level 0\n");
// for(int i=0; i<q->num_elems; i++) {
// if(i == new_line) {
// level++;
// printf("level %i\n", level);
// new_line = (new_line * 2) + 1;
// }
// printf("%i kdt_idx=%i, dist=%.12f\n", i, q->elems[i], q->dists[i]);
// }
// printf("--\n");
//}
//
//__device__ pqueue* pq_build(int size) { // should this be the number of registers/thread?
//// printf("pq_build()\n");
// pqueue* q = (pqueue*) malloc(sizeof(pqueue));
// q->max_size = size;
// q->num_elems = 0;
// q->elems = (int*) malloc(sizeof(int) * q->max_size);
// q->dists = (double*) malloc(sizeof(double) * q->max_size);
//
// memset(q->elems, 0, size * sizeof(int));
// memset(q->dists, 0, size * sizeof(double));
//
// return q;
//}
//
//__device__ void heapify_up(pqueue* q) {
//
//// printf("heapify_up()\n");
//
// // start at the last element in the heap
// int idx = q->num_elems - 1;
//
//// printf("idx=%i, current dist %.12f, parent's distance %.12f (at idx=%i)\n", idx, q->dists[idx], q->dists[(idx-1)/2], (idx-1)/2);
// while(idx > 0 && q->dists[idx] < q->dists[(idx - 1)/2]) { // check if the current node is smaller than its parent; if so, the current node is out of place!
//// printf("swapping %i and %i\n",idx, (idx-1)/2 );
// swap(q, idx, (idx-1)/2); // swap the parent and child
// idx = (idx-1)/2; // go up to the next element (the parent of the current node)
// }
//}
//
//__device__ void heapify_down(pqueue* q) {
//
// // start at the first element of the heap
// int idx = 0;
//
// while((idx*2+1) < q->num_elems - 1) { // while this node has a left child ; no need to check for the right child since the heap is always filled from the left
// // get the index of the smaller of the two children
// int smaller_child = idx*2 + 1; // default to the left child
// if(q->dists[idx*2+2] < q->dists[idx*2 + 1]) smaller_child = idx*2 + 2; // if right child is actually smaller than left child, set smaller to right
//
// if(q->dists[idx] < q->dists[smaller_child]) break; // if this node is already smaller than its children, then we are done
//
// // swap the current node with the smaller child
// swap(q, idx, smaller_child);
//
// // move down the heap to the smaller_child
// idx = smaller_child;
// }
//}
// the thread must check whether the pq is full BEFORE the function call
__device__ void pq_insert(pqueue* q, int new_idx, double new_dist) {
// printf("pq_insert()\n");
// add the newest element to the last spot
q->elems[q->num_elems] = new_idx;
q->dists[q->num_elems] = new_dist;
// increase queue size
q->num_elems++;
// heapify_up(q);
// start at the last element in the heap
int idx = q->num_elems - 1;
int parent_idx = (idx-1)/2;
// printf("idx=%i, current dist %.12f, parent's distance %.12f (at idx=%i)\n", idx, q->dists[idx], q->dists[(idx-1)/2], (idx-1)/2);
while(idx > 0 && q->dists[idx] < q->dists[parent_idx]) { // check if the current node is smaller than its parent; if so, the current node is out of place!
// printf("swapping %i and %i\n",idx, (idx-1)/2 );
//swap(q, idx, (idx-1)/2); // swap the parent and child
// save the current values at index a
int val = q->elems[idx];
int dist = q->dists[idx];
// replace
q->elems[idx] = q->elems[parent_idx];
q->dists[idx] = q->dists[parent_idx];
q->elems[parent_idx] = val;
q->dists[parent_idx] = dist;
idx = parent_idx; // go up to the next element (the parent of the current node)
}
}
// returns the index of the kdtree which contains the shortest distance from the query point so far
__device__ void pq_extract(pqueue* q, int* result, double* dist) {
// printf("pq_extract()\n");
if(q->num_elems == 0) {
*result = -1;
return;
}
// extract the minimum element
int min = q->elems[0];
double d = q->dists[0];
q->num_elems--;
// move the last element to the front
q->elems[0] = q->elems[q->num_elems];
q->dists[0] = q->dists[q->num_elems];
// heapify_down(q);
// start at the first element of the heap
int idx = 0;
while((idx*2+1) < q->num_elems - 1) { // while this node has a left child ; no need to check for the right child since the heap is always filled from the left
// get the index of the smaller of the two children
int smaller_child = idx*2 + 1; // default to the left child
if(q->dists[idx*2+2] < q->dists[idx*2 + 1]) smaller_child = idx*2 + 2; // if right child is actually smaller than left child, set smaller to right
if(q->dists[idx] < q->dists[smaller_child]) break; // if this node is already smaller than its children, then we are done
// swap the current node with the smaller child
// swap(q, idx, smaller_child);
// save the current values at index a
int val = q->elems[idx];
int dist = q->dists[idx];
// replace
q->elems[idx] = q->elems[smaller_child];
q->dists[idx] = q->dists[smaller_child];
q->elems[smaller_child] = val;
q->dists[smaller_child] = dist;
// move down the heap to the smaller_child
idx = smaller_child;
}
*result = min;
*dist = d;
return;
}
|
4,885 | // Inspired from
// https://developer.nvidia.com/thrust
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <cstdlib>
#include <iostream>
int main(void)
{
// generate 32M random numbers on the host
thrust::host_vector<int> h_vec(32 << 20);
thrust::generate(h_vec.begin(), h_vec.end(), rand);
// show first 10 elements
std::cout << "First unsorted (from 32M numbers):" << std::endl;
for(int i=0; i<10; i++) std::cout << h_vec[i] << "\t"; std::cout << std::endl;
// transfer data to device
thrust::device_vector<int> d_vec = h_vec;
// sort data on the device
thrust::sort(d_vec.begin(), d_vec.end());
// transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
std::cout << "First sorted:" << std::endl;
for(int i=0; i<10; i++) std::cout << h_vec[i] << "\t"; std::cout << std::endl;
return 0;
}
|
4,886 | #include "includes.h"
extern "C"
{
}
__global__ void vsquare(const double *a, double *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
c[i] = a[i] * a[i];
} |
4,887 | // GPU kernel
__global__ void summation_kernel(int data_size, float * data_out) {
// Get the id of this thread in the whole thread group
unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
// Get the total number of threads in the whole thread group
unsigned int nb_threads_total = blockDim.x * gridDim.x;
// Get the number of part for each thread
unsigned int units_per_thread = nb_threads_total / data_size;
// Compute datas for this thread
int i;
float res = 0;
for (i = ((id+1) * units_per_thread)-1; i >= (id * units_per_thread); --i) {
if (i%2==0) res += 1.0/(i+1.0);
else res -= 1.0/(i+1.0);
}
// Store the result
data_out[id] = res;
}
|
4,888 | #include <cstdio>
int main() {
//host-side
const int WIDTH = 5;
int a[WIDTH][WIDTH];
int b[WIDTH][WIDTH];
int c[WIDTH][WIDTH] = {0};
//make a,b matrices
for (int x = 0; x < WIDTH; x++) {
for (int y = 0; y < WIDTH; y++) {
a[x][y] = x * 10 + y;
b[x][y] = (x * 10 + x) * 100;
c[x][y] = a[x][y] + b[x][y];
}
}
//print
for (int x = 0; x < WIDTH; x++) {
for (int y = 0; y < WIDTH; y++) {
printf("%5d", c[x][y]);
}
printf("\n");
}
return 0;
} |
4,889 | #include <iostream>
#include <math.h>
#include <time.h>
#include <stdlib.h>
#include <random>
#include <vector>
#include <chrono>
#include <deque>
#include <algorithm>
#include <iterator>
#define BLOCK_SIZE 32
__global__ void swap(int *arr, const int skip, const int oflag, const int order, const int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n/2) {
int j = i*2 - i%skip;
int k = j + skip;
int f = ((int) (i/oflag)) % 2 == 0 ? order:-order;
int x, y;
if (f == 1) {
x = arr[j] < arr[k] ? arr[j]:arr[k];
y = arr[j] > arr[k] ? arr[j]:arr[k];
}
else {
x = arr[j] > arr[k] ? arr[j]:arr[k];
y = arr[j] < arr[k] ? arr[j]:arr[k];
}
arr[j] = x;
arr[k] = y;
}
}
bool check_correctness(int *orig_arr, int *arr_sorted, const int n) {
std::sort(orig_arr, orig_arr+n);
for (int i = 0; i < n; i++) {
if (orig_arr[i] != arr_sorted[i]) {
return false;
}
}
return true;
}
void bitonic_sort(int *arr, const int order, const int n) {
int skip = 1;
while (skip < n) {
int k = skip;
while (k > 0) {
swap<<<(n/2 + BLOCK_SIZE - 1)/BLOCK_SIZE, BLOCK_SIZE>>>(arr, k, skip, order, n);
cudaDeviceSynchronize();
k /= 2;
}
skip *= 2;
}
}
void random_vector(int *arr, const int n, const int min_val=0.0, const int max_val=1000.0) {
static std::random_device rd;
static std::mt19937 mte(rd());
std::uniform_int_distribution<int> dist(min_val, max_val);
for (int i = 0; i < n; i++) {
arr[i] = dist(mte);
}
}
int main(void) {
int n = 1 << 25;
int *arr, *temp;
cudaMallocManaged(&arr, n*sizeof(int));
random_vector(arr, n, 0, 10000);
temp = new int[n];
std::copy(arr, arr+n, temp);
auto t1 = std::chrono::high_resolution_clock::now();
bitonic_sort(arr, 1, n);
auto t2 = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>( t2 - t1 ).count();
std::cout << duration << std::endl;
t1 = std::chrono::high_resolution_clock::now();
std::cout << check_correctness(temp, arr, n) << std::endl;
t2 = std::chrono::high_resolution_clock::now();
duration = std::chrono::duration_cast<std::chrono::milliseconds>( t2 - t1 ).count();
std::cout << duration << std::endl;
cudaFree(arr);
return 0;
}
|
4,890 | #include "includes.h"
float *A,*L,*U,*input;
void arrayInit(int n);
void verifyLU(int n);
void updateLU(int n);
void freemem(int n);
/*
*/
__global__ void reduce( float *a, int size, int c) {
int tid = blockIdx.x; //Handle the data at the index
int thid = threadIdx.x;
int index=c,j=0;//size=b
int numthreads = blockDim.x;
for(j=index+1;j<size;j+=numthreads) {
a[((tid+index+1)*size + j+thid)] = (float)(a[((tid+index+1)*size + j+thid)] - (float)a[((tid+index+1)*size+index)] * a[((index*size) + j+thid)]);
}
} |
4,891 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <cuda.h>
#include <curand_kernel.h>
#define IL_BLOKOW 256
#define IL_WATKOW 256
#define IL_WEWN_TESTOW 1024
#define PI 3.14159265358979323846 // przyblizenie liczby pi do 20 miejsc po przecinku
// Cudowna wersja metody Monte Carlo
__global__ void cuda_monte_carlo(float *wyniki, curandState *stany)
{
unsigned int moje_id = threadIdx.x + blockDim.x * blockIdx.x;
int i;
long k = 0;
float x, y;
curand_init(moje_id*moje_id, moje_id, 0, &stany[moje_id]); // Inicjalizacja CURAND
for(i = 0; i < IL_WEWN_TESTOW; i++)
{
x = curand_uniform(&stany[moje_id]);
y = curand_uniform(&stany[moje_id]);
if((x * x + y * y) <= 1.0f) k++;
}
wyniki[moje_id] = (4 * (float)k / IL_WEWN_TESTOW);
}
// Sekwencyjna wersja na procesorze
float proc_sekw_monte_carlo(long ilosc_testow)
{
long k, i;
float x, y;
srand(time(NULL));
for(i = 0; i < ilosc_testow; i++)
{
x = rand() / (float) RAND_MAX;
y = rand() / (float) RAND_MAX;
if((x * x + y * y) <= 1.0f) k++;
}
return (4 * (float)k / ilosc_testow);
}
// Funkcja main
int main(int argc, char *argv[])
{
int i;
clock_t start, stop;
float *gfx_w;
curandState *gfx_stany;
float proc[IL_BLOKOW * IL_WATKOW];
printf("\t-> Ilosc blokow: %d.\n\t-> Ilosc watkow na blok: %d.\n\t-> Ilosc testow dla kazdego watku: %d.\n\n", IL_BLOKOW, IL_WATKOW, IL_WEWN_TESTOW);
/*****************************************
* Start wersji na CUDA
*****************************************/
start = clock();
cudaMalloc((void **)&gfx_w, IL_WATKOW * IL_BLOKOW * sizeof(float));
cudaMalloc((void **)&gfx_stany, IL_WATKOW * IL_BLOKOW * sizeof(curandState));
cuda_monte_carlo<<<IL_BLOKOW, IL_WATKOW>>>(gfx_w, gfx_stany);
cudaMemcpy(proc, gfx_w, IL_WATKOW * IL_BLOKOW * sizeof(float), cudaMemcpyDeviceToHost);
float pi_cuda;
for(i = 0; i < IL_WATKOW * IL_BLOKOW; i++)
{
pi_cuda += proc[i];
}
pi_cuda /= (IL_WATKOW * IL_BLOKOW);
stop = clock();
printf("\t-> Czas liczenia PI na CUDA: %.6f s.\n", (stop-start)/(float)CLOCKS_PER_SEC);
printf("\t-> Wartosc PI wg CUDA: %.10f (blad wzgledem rzeczywistej wartosci: %.10f).\n", pi_cuda, pi_cuda - PI);
// ----------- Koniec wersji na CUDA
/*****************************************
* Start wersji sekwencyjnej na procesorze
*****************************************/
start = clock();
float pi_proc_sekw = proc_sekw_monte_carlo(IL_WATKOW * IL_BLOKOW * IL_WEWN_TESTOW);
stop = clock();
printf("\t-> Czas liczenia PI sekwencyjnie na procesorze: %.6f s.\n", (stop-start)/(float)CLOCKS_PER_SEC);
printf("\t-> Wartosc PI wg CPU (sekw.): %.10f (blad wzgledem rzeczywistej wartosci: %.10f).\n", pi_proc_sekw, pi_proc_sekw - PI);
// ----------- Koniec wersji sekwensyjnej na procesorze
return 0;
}
|
4,892 |
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
__global__ void flops( float* floats , int n , int m )
{
int idx = threadIdx.x + blockIdx.x * blockDim.x ;
if( idx >= m )
return ;
float temp = 3.14159 * idx ;
int i ;
for( i = 0 ; i < n ; i++ )
temp = temp + temp/2.0 ;
floats[idx] = temp ;
}
int main( int argc , char** argv )
{
cudaDeviceReset() ;
struct timeval start, end ;
int n = 10 ;
if( argc > 1 )
n = atoi(argv[1]) ;
else
printf( "Optional arguments: (arg1) number of floats to add and divide, (arg2) number of threads \n" ) ;
int m = 100 ;
if( argc > 2 )
m = atoi(argv[2]) ;
int blocks = m/32 + 1 ;
float* d_floats ;
cudaError_t status = cudaMalloc( &d_floats , m * sizeof(float) ) ;
float* h_floats = (float*) malloc( m * sizeof(float) ) ;
gettimeofday(&start, NULL) ;
if( status == cudaSuccess )
{
flops<<< blocks , 32 >>>( d_floats , n , m ) ;
status = cudaDeviceSynchronize() ;
}
gettimeofday(&end, NULL) ;
printf("%ld microseconds on GPU\n", ((end.tv_sec * 1000000 + end.tv_usec)
- (start.tv_sec * 1000000 + start.tv_usec)));
if( status == cudaSuccess )
status = cudaMemcpy( h_floats , d_floats , m * sizeof(float) , cudaMemcpyDeviceToHost ) ;
if( status != cudaSuccess )
printf( "ERROR: %s\n" , cudaGetErrorString(status) ) ;
float out = 0.0 ;
gettimeofday(&start, NULL) ;
int i , j ;
for( i = 0 ; i < m ; i++ )
{
float temp = 3.14159 * i ;
for( j = 0 ; j < n ; j++ )
out = temp + temp / 2.0 ;
}
gettimeofday(&end, NULL) ;
printf("%ld microseconds on CPU\n", ((end.tv_sec * 1000000 + end.tv_usec)
- (start.tv_sec * 1000000 + start.tv_usec)));
out = 0.0 ;
for( i = 0 ; i < m ; i++ )
out += h_floats[i] ;
printf( "Total val: %f \n" , out ) ;
free(h_floats) ;
cudaFree(d_floats) ;
cudaDeviceReset() ;
}
|
4,893 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#include <cuda.h>
#define COMMENT "Histogram_GPU"
#define RGB_COMPONENT_COLOR 255
//Tamanhos dos blocos das threads
#define BLOCK_SIZE 32
typedef struct {
unsigned char red, green, blue;
} PPMPixel;
typedef struct {
int x, y;
PPMPixel *data;
} PPMImage;
double rtclock()
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
static PPMImage *readPPM(const char *filename) {
char buff[16];
PPMImage *img;
FILE *fp;
int c, rgb_comp_color;
fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
if (!fgets(buff, sizeof(buff), fp)) {
perror(filename);
exit(1);
}
if (buff[0] != 'P' || buff[1] != '6') {
fprintf(stderr, "Invalid image format (must be 'P6')\n");
exit(1);
}
img = (PPMImage *) malloc(sizeof(PPMImage));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
c = getc(fp);
while (c == '#') {
while (getc(fp) != '\n')
;
c = getc(fp);
}
ungetc(c, fp);
if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) {
fprintf(stderr, "Invalid image size (error loading '%s')\n", filename);
exit(1);
}
if (fscanf(fp, "%d", &rgb_comp_color) != 1) {
fprintf(stderr, "Invalid rgb component (error loading '%s')\n",
filename);
exit(1);
}
if (rgb_comp_color != RGB_COMPONENT_COLOR) {
fprintf(stderr, "'%s' does not have 8-bits components\n", filename);
exit(1);
}
while (fgetc(fp) != '\n')
;
img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
if (fread(img->data, 3 * img->x, img->y, fp) != img->y) {
fprintf(stderr, "Error loading image '%s'\n", filename);
exit(1);
}
fclose(fp);
return img;
}
__global__ void cudaHistogram(PPMPixel* data, int rows, int cols, float* h){
//Definindo variaveis locais na funcao na GPU
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int tid = (cols)*row + col;
int j, k, l;
//Verificao para os limites das threads
if(col < (cols) && row < (rows)){
//Searching for the right value of the pixel
int x = 0;
for (j = 0; j <= 3; j++) {
for (k = 0; k <= 3; k++) {
for (l = 0; l <= 3; l++) {
if (data[tid].red == j && data[tid].green == k && data[tid].blue == l) {
atomicAdd(&h[x], 1);
}
x++;
}
}
}
}
}
void Histogram(PPMImage *image, float *h) {
cudaEvent_t start, stop;
float milliseconds = 0;
PPMPixel *pixels_dev;
float* h_dev;
float n = image->y * image->x;
//printf("%d, %d\n", rows, cols );
int i;
for (i = 0; i < n; i++) {
image->data[i].red = floor((image->data[i].red * 4) / 256);
image->data[i].blue = floor((image->data[i].blue * 4) / 256);
image->data[i].green = floor((image->data[i].green * 4) / 256);
}
//Processo para calcular o tempo de alocar memoria na GPU
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaMalloc(&pixels_dev, sizeof(PPMPixel)*image->x*image->y);
cudaMalloc(&h_dev, sizeof(float)*64);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
//printf("Alocar Memoria = %f\n",milliseconds/1000);
//Calular o tempo de copiar dados da CPU para a GPU
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaMemcpy(pixels_dev, image->data, image->x*image->y*sizeof(PPMPixel), cudaMemcpyHostToDevice);
cudaMemcpy(h_dev, h, 64*sizeof(float), cudaMemcpyHostToDevice);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
//printf("\nOffload do buffer = %f\n",milliseconds/1000);
dim3 blocks(1,1,1);
//variavel para threadsPerBlock e o tamanho do block para cada dimensao 2D
dim3 threadsPerBlock(BLOCK_SIZE,BLOCK_SIZE,1);
//define a quantidade de blocos por dimensao/BLOCK_SIZE. se dimensao < block_size, entao define como 1 block
blocks.x=((image->y/BLOCK_SIZE) + (((image->y)%BLOCK_SIZE)==0?0:1));
blocks.y=((image->x/BLOCK_SIZE) + (((image->x)%BLOCK_SIZE)==0?0:1));
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaHistogram<<<blocks, threadsPerBlock>>> (pixels_dev, image->x, image->y, h_dev);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
//printf("\nTempo de kernel = %f\n",milliseconds/1000);
//GPU para CPU
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaMemcpy(h, h_dev, 64*sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
//printf("\nTempo de offload para receber = %f\n",milliseconds/1001);
cudaFree(h_dev);
}
int main(int argc, char *argv[]) {
if( argc != 2 ) {
printf("Too many or no one arguments supplied.\n");
}
double t_start, t_end;
int i;
char *filename = argv[1]; //Recebendo o arquivo!;
//scanf("%s", filename);
PPMImage *image = readPPM(filename);
float n = image->y * image->x;
float *h = (float*)malloc(sizeof(float) * 64);
//Inicializar h
for(i=0; i < 64; i++) h[i] = 0.0;
t_start = rtclock();
Histogram(image, h);
t_end = rtclock();
for (i = 0; i < 64; i++){
printf("%0.3f ", h[i]/n);
}
printf("\n");
fprintf(stdout, "\n%0.6lfs\n", t_end - t_start);
free(h);
}
|
4,894 | #include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#include<math.h>
#define NUM 10000000
#define CUDA_ERROR_EXIT(str) do{\
cudaError err = cudaGetLastError();\
if( err != cudaSuccess){\
printf("Cuda Error: '%s' for %s\n", cudaGetErrorString(err), str);\
exit(-1);\
}\
}while(0);
#define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec))
__device__ unsigned int exor(unsigned long a,unsigned long b)
{
unsigned int res;
for (int i = 63; i >= 0; i--)
{
// Find current bits in x and y
bool b1 = a & (1 << i);
bool b2 = b & (1 << i);
// If both are 1 then 0 else xor is same as OR
bool xoredBit = (b1 & b2) ? 0 : (b1 | b2);
// Update result
res <<= 1;
res |= xoredBit;
}
//res=exor(a,b);
return res;
}
__global__ void calculate(unsigned long *mem,int num,int iter)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i >= num)
return;
// unsigned long *t1,*t2;
if(i<num/2){
int tmp=i*2;
if(tmp+iter<num)
mem[tmp]=exor(mem[tmp],mem[tmp+iter]);
// else
// mem[tmp]=exor(mem[tmp],0);
// mem[num]=res;
}
}
int main(int argc, char **argv){
struct timeval start, end, t_start, t_end;
int i,blocks=0;
unsigned long *p1,*g1;
unsigned long seed,num;
if(argc == 3){
num = atoi(argv[1]); /*Update after checking*/
if(num <= 0)
num = NUM;
seed=atoi(argv[2]);
}
// printf("%d",time(0));
p1 = (unsigned long *)malloc((num+1) *sizeof(unsigned long));
srand(seed);
for(i=0; i<num; ++i){
p1[i]=random();
// printf("%d %lu\n",i,p1[i]);
}
p1[i]=0;
gettimeofday(&t_start, NULL);
//for(i=0;i<num;i++)
// printf("%d-%lu\n",i,p1[i]);
cudaMalloc(&g1, (num+1) * sizeof(unsigned long));
CUDA_ERROR_EXIT("cudaMalloc");
cudaMemcpy(g1, p1, (num+1) * sizeof(unsigned long) , cudaMemcpyHostToDevice);
CUDA_ERROR_EXIT("cudaMemcpy");
gettimeofday(&start, NULL);
blocks = num /1024;
if(num % 1024)
++blocks;
for(i=0;i<log(num)/log(2);i++){
calculate<<<blocks,1024>>>(g1,num,(int)pow(2,i));
}
CUDA_ERROR_EXIT("kernel invocation");
gettimeofday(&end, NULL);
/* Copy back result*/
cudaMemcpy(p1, g1, (num+1) * sizeof(unsigned long), cudaMemcpyDeviceToHost);
CUDA_ERROR_EXIT("memcpy");
gettimeofday(&t_end, NULL);
// for(i=0;i<num;i++)
// printf("%d-%lu\n",i,p1[i]);
printf("%lu",p1[0]);
printf("Total time = %ld microsecs Processsing =%ld microsecs\n", TDIFF(t_start, t_end), TDIFF(start, end));
cudaFree(g1);
/*Print the last element for sanity check*/
printf("The XOR final value is %lu",p1[num]);
free(p1);
// free(pfinal);
return 0;
}
|
4,895 | #include <cuda.h>
#include <cuda_runtime_api.h>
#include <iostream>
#include <time.h>
#define TILE_SIZE 4
#define WINDOW_SIZE (3)
template<class IMG_TYPE>
__global__ void kernelMedian( const IMG_TYPE * __restrict__ in, IMG_TYPE *output, int j_dim, int pitch)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned char filterVector[9] = {0,0,0,0,0,0,0,0,0};
if((row==0) || (col==0) || (row==pitch-1) || (col==j_dim-1))
output[row*j_dim+col] = 0; //Deal with boundry conditions
else {
for (int x = 0; x < WINDOW_SIZE; x++) {
for (int y = 0; y < WINDOW_SIZE; y++){
filterVector[x*WINDOW_SIZE+y] = in[(row+x-1)*j_dim+(col+y-1)]; // setup the filterign window.
}
}
for (int i = 0; i < 9; i++) {
for (int j = i + 1; j < 9; j++) {
if (filterVector[i] > filterVector[j]) {
//Swap the variables.
char tmp = filterVector[i];
filterVector[i] = filterVector[j];
filterVector[j] = tmp;
}
}
}
output[row*j_dim+col] = filterVector[4]; //Set the output variables.
}
}
template<class IMG_TYPE>
__global__ void medianFilterSharedKernel(const IMG_TYPE * __restrict__ inputImageKernel, IMG_TYPE *outputImagekernel, int imageWidth, int imageHeight)
{
//Set the row and col value for each thread.
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ unsigned char sharedmem[(TILE_SIZE+2)] [(TILE_SIZE+2)]; //initialize shared memory
//Take some values.
bool is_x_left = (threadIdx.x == 0), is_x_right = (threadIdx.x == TILE_SIZE-1);
bool is_y_top = (threadIdx.y == 0), is_y_bottom = (threadIdx.y == TILE_SIZE-1);
//Initialize with zero
if(is_x_left)
sharedmem[threadIdx.x][threadIdx.y+1] = 0;
else if(is_x_right)
sharedmem[threadIdx.x + 2][threadIdx.y+1]=0;
if (is_y_top){
sharedmem[threadIdx.x+1][threadIdx.y] = 0;
if(is_x_left)
sharedmem[threadIdx.x][threadIdx.y] = 0;
else if(is_x_right)
sharedmem[threadIdx.x+2][threadIdx.y] = 0;
}
else if (is_y_bottom){
sharedmem[threadIdx.x+1][threadIdx.y+2] = 0;
if(is_x_right)
sharedmem[threadIdx.x+2][threadIdx.y+2] = 0;
else if(is_x_left)
sharedmem[threadIdx.x][threadIdx.y+2] = 0;
}
//Setup pixel values
sharedmem[threadIdx.x+1][threadIdx.y+1] = inputImageKernel[row*imageWidth+col];
//Check for boundry conditions.
if(is_x_left && (col>0))
sharedmem[threadIdx.x][threadIdx.y+1] = inputImageKernel[row*imageWidth+(col-1)];
else if(is_x_right && (col<imageWidth-1))
sharedmem[threadIdx.x + 2][threadIdx.y+1]= inputImageKernel[row*imageWidth+(col+1)];
if (is_y_top && (row>0)){
sharedmem[threadIdx.x+1][threadIdx.y] = inputImageKernel[(row-1)*imageWidth+col];
if(is_x_left)
sharedmem[threadIdx.x][threadIdx.y] = inputImageKernel[(row-1)*imageWidth+(col-1)];
else if(is_x_right )
sharedmem[threadIdx.x+2][threadIdx.y] = inputImageKernel[(row-1)*imageWidth+(col+1)];
}
else if (is_y_bottom && (row<imageHeight-1)){
sharedmem[threadIdx.x+1][threadIdx.y+2] = inputImageKernel[(row+1)*imageWidth + col];
if(is_x_right)
sharedmem[threadIdx.x+2][threadIdx.y+2] = inputImageKernel[(row+1)*imageWidth+(col+1)];
else if(is_x_left)
sharedmem[threadIdx.x][threadIdx.y+2] = inputImageKernel[(row+1)*imageWidth+(col-1)];
}
__syncthreads(); //Wait for all threads to be done.
//Setup the filter.
unsigned char filterVector[9] = {sharedmem[threadIdx.x][threadIdx.y], sharedmem[threadIdx.x+1][threadIdx.y], sharedmem[threadIdx.x+2][threadIdx.y],
sharedmem[threadIdx.x][threadIdx.y+1], sharedmem[threadIdx.x+1][threadIdx.y+1], sharedmem[threadIdx.x+2][threadIdx.y+1],
sharedmem[threadIdx.x] [threadIdx.y+2], sharedmem[threadIdx.x+1][threadIdx.y+2], sharedmem[threadIdx.x+2][threadIdx.y+2]};
{
for (int i = 0; i < 9; i++) {
for (int j = i + 1; j < 9; j++) {
if (filterVector[i] > filterVector[j]) {
//Swap Values.
char tmp = filterVector[i];
filterVector[i] = filterVector[j];
filterVector[j] = tmp;
}
}
}
outputImagekernel[row*imageWidth+col] = filterVector[4]; //Set the output image values.
}
}
|
4,896 | #include <stdio.h>
#include <time.h>
#define MWIDTH 4096
#define MTILE 16
#define BWIDTH 16
__global__ void gpu_matrixMul(int *a, int *b, int *c, int Width, int tile_width){
int start_row = (blockDim.y*blockIdx.y + threadIdx.y)*tile_width;
int end_row = start_row + tile_width;
int start_col = (blockDim.x*blockIdx.x + threadIdx.x)*tile_width;
int end_col = start_col + tile_width;
for (int row = start_row; row < end_row; row++) {
for(int col = start_col; col < end_col; col++) {
float sum = 0;
for (int k = 0; k < Width; k++) {
sum += a[row * Width + k]*b[k * Width + col];
}
c[row*Width+col] = sum;
}
}
}
int main(){
double timeGPU;
int *h_a, *h_b, *h_c, *d_a, *d_b, *d_c;
h_a = (int *)malloc(MWIDTH*MWIDTH*sizeof(int));
h_b = (int *)malloc(MWIDTH*MWIDTH*sizeof(int));
h_c = (int *)malloc(MWIDTH*MWIDTH*sizeof(int));
cudaMalloc(&d_a, MWIDTH*MWIDTH*sizeof(int));
cudaMalloc(&d_b, MWIDTH*MWIDTH*sizeof(int));
cudaMalloc(&d_c, MWIDTH*MWIDTH*sizeof(int));
for (int i = 0; i < MWIDTH*MWIDTH; i++) {
h_a[i] = 1;
h_b[i] = 1;
h_c[i] = 0;}
cudaMemcpy(d_a, h_a, MWIDTH*MWIDTH*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, MWIDTH*MWIDTH*sizeof(int), cudaMemcpyHostToDevice);
cudaMemset(d_c, 0, MWIDTH*MWIDTH*sizeof(int));
clock_t startGPU = clock();
gpu_matrixMul<<<dim3((MWIDTH/(MTILE*BWIDTH)), (MWIDTH/(MTILE*BWIDTH))), dim3(BWIDTH,BWIDTH)>>>(d_a, d_b, d_c, MWIDTH, MTILE);
cudaMemcpy(h_c, d_c, MWIDTH*MWIDTH*sizeof(int), cudaMemcpyDeviceToHost);
timeGPU = ((double)(clock() - startGPU))/CLOCKS_PER_SEC;
printf("tiempo GPU = %f s\n",timeGPU);
/*
for (int i=0; i < MWIDTH*MWIDTH; i++)
if (h_c[i] != MWIDTH) {printf("Mismatch at offset %d, was: %d, should be: %d\n", i, h_c[i], MWIDTH); return 1;}
printf("Success!\n");
*/
return 0;
} |
4,897 |
void is_a_thrust_bug();
int main(int argc, char** argv) {
is_a_thrust_bug();
return 0;
}
|
4,898 | #include <stdio.h>
__global__ void square( int *d_num_steps, unsigned long long *d_fact, double *d_out){
int idx = threadIdx.x;
int num_steps = *d_num_steps;
for(int k=idx+1; k< num_steps; k+=blockDim.x){
d_out[idx] += (double) k*0.5/ (double) d_fact[k-1];
}
}
int main(int argc, char ** argv){
int h_num_steps = 21;
int THREADS = atoi(argv[1]);
double e;
// generate the output array on the host
unsigned long long h_fact[h_num_steps];
h_fact[0]=1;
for(int f=1; f<h_num_steps; f++){
h_fact[f] = h_fact[f-1]*f;
}
double h_out[THREADS];
//declare GPU memory pointers
int *d_num_steps;
unsigned long long *d_fact;
double *d_out;
//allocate GPU memory
cudaMalloc((void **) &d_num_steps, sizeof(int));
cudaMalloc((void **) &d_fact, h_num_steps*sizeof(unsigned long long));
cudaMalloc((void **) &d_out, THREADS*sizeof(double));
// transfer the array to the GPU
cudaMemcpy(d_num_steps, &h_num_steps, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_fact, h_fact, h_num_steps*sizeof(unsigned long long), cudaMemcpyHostToDevice);
// launch the kernel
square<<<1, THREADS>>>(d_num_steps, d_fact, d_out);
//copy back the result array to the CPU
cudaMemcpy(h_out, d_out, THREADS*sizeof(double), cudaMemcpyDeviceToHost);
//print out the resulting array
for (int i=0; i<THREADS; i++){
e+=h_out[i];
}
printf("e: %f\n", e);
// free GPU memory allocation
cudaFree(d_num_steps);
cudaFree(d_fact);
cudaFree(d_out);
return 0;
}
|
4,899 | #include <stdio.h>
__global__ void VecAdd(int* ret, int a, int b)
{
ret[threadIdx.x] = a + b + threadIdx.x;
}
int main(void)
{
int a = 10;
int b = 100;
int* ret = NULL; // results of addition
cudaMallocManaged(&ret, 1000 * sizeof(int));
VecAdd<<< 1, 1000 >>>(ret, a, b);
cudaDeviceSynchronize();
for (int i = 0; i < 1000; i++) {
printf("%4d: %d + %d + %4d = %5d\n", i, a, b, i, ret[i]);
}
cudaFree(ret);
return 0;
}
|
4,900 | #include "includes.h"
// Hello Cuda World Program //
/*
* Author: Malhar Bhatt
* Subject : High Performance Computing
*
*/
/**
* Empty Function named Kernel() qualified with __global__
*
*/
__global__ void kernel (void)
{
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.