serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
10,301 |
#ifdef _WIN32
# define IMPORT __declspec(dllimport)
# define EXPORT __declspec(dllexport)
#else
# define IMPORT
# define EXPORT
#endif
IMPORT int curand_main();
IMPORT int nppif_main();
EXPORT int static_version()
{
return curand_main() == 0 && nppif_main() == 0;
}
|
10,302 | #include <stdio.h>
#include <cuda_runtime.h>
#include <time.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if(error 1= cudaSuccess) \
{ \
printf("Error: %s : %d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
void CheckResult(float* hostResult, float* deviceResult, const int N)
{
double epsilon = 1.0E-8;
int match = 1;
for(int i = 0; i < N; ++i)
{
if(abs(hostResult[i] - deviceResult[i]) > epsilon)
{
match = 0;
printf("Array do not match!\n");
printf("Host %5.2gf GPU %5.2f at current %d \n", hostResult[i], deviceResult[i], i);
break;
}
}
if(match == 1)
{
printf("Array match.\n\n");
}
return;
}
void InitData(float* data, const int size)
{
time_t t;
srand((unsigned)time(&t));
for(int i =0; i < size; ++i)
{
data[i] = (float)(rand() & 0xFF) /10.0f;
}
}
void SumArrayOnHost(float* A, float* B, float* C, const int size)
{
for(int i =0; i< size;++i)
{
C[i] = A[i] + B[i];
}
}
__global__ void SumArrayOnDevice(float* A, float* B, float* C)
{
int i = threadIdx.x;
C[i] = A[i] + B[i];
}
int main(int argc, char const *argv[])
{
printf("%s Starting ... \n", argv[0]);
int dev = 0;
cudaSetDevice(dev);
int nElem = 128;
printf("Array size is %d\n", nElem);
size_t nBytes = nElem * sizeof(float);
float* hA;
float* hB;
float* hostResult;
float* deviceResult;
hA = (float*)malloc(nBytes);
hB = (float*)malloc(nBytes);
hostResult = (float*)malloc(nBytes);
deviceResult = (float*)malloc(nBytes);
InitData(hA, nElem);
InitData(hB, nElem);
memset(hostResult, 0, nBytes);
memset(deviceResult, 0, nBytes);
float* dA;
float* dB;
float* dC;
cudaMalloc((float**)&dA, nBytes);
cudaMalloc((float**)&dB, nBytes);
cudaMalloc((float**)&dC, nBytes);
cudaMemcpy(dA, hA, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB, nBytes, cudaMemcpyHostToDevice);
dim3 block(nElem);
dim3 grid((nElem + block.x - 1) / block.x);
SumArrayOnDevice<<<grid,block>>>(dA, dB, dC);
printf("Execution configuration <<<%d, %d>>>\n", grid.x, block.x);
cudaMemcpy(deviceResult, dC, nBytes, cudaMemcpyDeviceToHost);
SumArrayOnHost(hA, hB,hostResult,nElem);
CheckResult(hostResult, deviceResult,nElem);
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
free(hA);
free(hB);
free(hostResult);
free(deviceResult);
return 0;
} |
10,303 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define N 10
__global__ void add(int *a,int *b,int *c ) {
int tid = blockIdx.x;
printf("Executing on %d\n",tid);
if(tid<N){
c[tid] = a[tid] + b[tid];
}
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(void) {
int a[N],b[N],c[N];
int *ad,*bd,*cd;
cudaMalloc((int **)&ad,N*sizeof(int));
cudaMalloc((int **)&bd,N*sizeof(int));
cudaMalloc((int **)&cd,N*sizeof(int));
for (int i=0; i<N; i++) {
a[i] = i * 2;
b[i] = i * 3;
}
printf("[");
for (int i=0; i<N; i++) {
printf("%d,",a[i]);
}
printf("]\n");
printf("[");
for (int i=0; i<N; i++) {
printf("%d,",b[i]);
}
printf("]\n");
cudaMemcpy(ad,&a,N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(bd,&b,N*sizeof(int),cudaMemcpyHostToDevice);
add<<<N,1>>>(ad,bd,cd);
cudaMemcpy(&c,cd,N*sizeof(int),cudaMemcpyDeviceToHost);
printf("Addition:");
printf("[");
for (int i=0; i<N; i++) {
printf("%d,",c[i]);
}
printf("]");
cudaFree(ad);
cudaFree(bd);
cudaFree(cd);
return 0;
}
|
10,304 | // c = a + b
__global__ void array_add(double* a, double* b, double* c, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= N) return;
c[idx] = a[idx] + b[idx];
for (int i=idx; i < N; i+=32)
c[i] = a[i] + b[i];
}
extern "C" void ext_array_add(int grid_size, int block_size,
double* a, double* b, double* c, int N)
{
array_add<<<grid_size,block_size>>>(a, b, c, N);
}
|
10,305 |
/*#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void helloKernel(void)
{
}
int main()
{
int count = 0;
int check=0;
cudaDeviceProp props;
cudaGetDeviceCount(&count);
if(count>0)
{
printf("Cuda Devices onboard: %d.\n", count);
helloKernel << <250, 250 >> >();
printf("Hello Cuda\n");
for (int i = 0; i < count; i++)
{
cudaGetDeviceProperties(&props, count-1);
printf("Cuda Device Number: %d\n", count);
printf("Cuda Device Name : %s\n",props.name);
printf("CUDA Multiprocessor count: %d\n", props.multiProcessorCount);
printf("Is %s\n", (check=props.isMultiGpuBoard == 1) ? "MultiGpuBoard\n" : "UniGpuBoard");
if(check == 1)
{
printf("MultiGpuBoardId: %d\n", props.multiGpuBoardGroupID);
}
printf("ClockRate: %d MHz\n", props.clockRate/1000);
printf("MemorClockRate: %d MHz\n", props.memoryClockRate/1000);
printf("Is %s\n", (props.integrated) ? "Integrated" : "Discrete");
printf("MaxThreadsPerlock: %d\n\n", props.maxThreadsPerBlock);
}
}
else
{
printf("No CUDA capable device found\n");
}
system("Pause");
return 911;
}*/ |
10,306 | /*
* This sample implements a separable convolution
* of a 2D image with an arbitrary filter.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
unsigned int filter_radius;
typedef float myDataType;
#define MAX_XY 32
#define FILTER_LENGTH (2 * filter_radius + 1)
#define ABS(val) ((val)<0.0 ? (-(val)) : (val))
#define accuracy 0.00005
#define cudaCheckError() { \
cudaError_t error=cudaGetLastError(); \
if(error!=cudaSuccess) { \
printf("ERROR IN CUDA %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(error)); \
cudaDeviceReset(); \
exit(EXIT_FAILURE); \
} \
}
__global__ void convolutionRowGPU(myDataType *d_Dst, myDataType *d_Src, myDataType *d_Filter,int imageW, int imageH, int filterR) {
int x,y,k,d;
x = blockIdx.x*blockDim.x + threadIdx.x;
y = blockIdx.y*blockDim.y + threadIdx.y;
myDataType sum = 0;
for(k = -filterR; k <= filterR; k++) {
d = x + k;
if(d >= 0 && d < imageW) {
sum += d_Src[y * imageW + d] * d_Filter[filterR -k];
}
}
//printf("ROW X:%d Y:%d SUM:%f\n\n",threadIdx.x,threadIdx.y,sum);
d_Dst[y*imageW + x] = sum;
}
__global__ void convolutionColumnGPU(myDataType *d_Dst, myDataType *d_Src, myDataType *d_Filter,
int imageW, int imageH, int filterR) {
int x,y,k,d;
x = blockIdx.x*blockDim.x + threadIdx.x;
y = blockIdx.y*blockDim.y + threadIdx.y;
myDataType sum = 0;
for(k = -filterR; k <= filterR; k++) {
d = y + k;
if(d >= 0 && d < imageH) {
sum += d_Src[d * imageW + x] * d_Filter[filterR -k];
//printf("X:%d Y:%d SUM:%f\n\n",threadIdx.x,threadIdx.y,sum);
}
}
//printf("COL X:%d Y:%d SUM:%f\n\n",threadIdx.x,threadIdx.y,sum);
d_Dst[y * imageW + x] = sum;
}
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(myDataType *h_Dst, myDataType *h_Src, myDataType *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
myDataType sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = x + k;
if (d >= 0 && d < imageW) {
sum += h_Src[y * imageW + d] * h_Filter[filterR - k];
}
}
//printf("ROW X:%d Y:%d SUM:%f\n\n",x,y,sum);
h_Dst[y * imageW + x] = sum;
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(myDataType *h_Dst, myDataType *h_Src, myDataType *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
myDataType sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = y + k;
if (d >= 0 && d < imageH) {
sum += h_Src[d * imageW + x] * h_Filter[filterR - k];
}
}
//printf("COL X:%d Y:%d SUM:%f\n\n",x,y,sum);
h_Dst[y * imageW + x] = sum;
}
}
}
//
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
myDataType
*h_Filter,
*h_Input,
*h_Buffer,
*h_OutputCPU,
*d_Filter,
*d_Input,
*d_Buffer,
*d_OutputGPU,
*h_OutputGPU;
int imageW;
int imageH;
//int i=MAX_XY;
//int count=0;
unsigned int i;
double timing;
clock_t start;
clock_t end;
printf("Enter filter radius : ");
scanf("%d", &filter_radius);
// Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa,
// dhladh imageW = imageH = N, opou to N to dinei o xrhsths.
// Gia aplothta thewroume tetragwnikes eikones.
printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH);
scanf("%d", &imageW);
imageH = imageW;
// while(1){
// if(imageW % i == 0) {
// dim3 threads(i,i);
// dim3 blocks(imageW/i,imageW/i);
// break;
// }
// i--;
// }
dim3 threads(MAX_XY,MAX_XY);
dim3 blocks (imageH/MAX_XY,imageW/MAX_XY);
// if(imageH < MAX_XY && imageW < MAX_XY){
// threads = (imageH,imageH);
// blocks = (1,1);
// }
// else{
// threads = (MAX_XY,MAX_XY);
// blocks = (imageW/MAX_XY,imageW/MAX_XY);
// }
printf("Image Width x Height = %i x %i\n\n", imageW, imageH);
printf("Allocating and initializing host arrays and device array...\n");
// Tha htan kalh idea na elegxete kai to apotelesma twn malloc...
h_Filter = (myDataType *)malloc(FILTER_LENGTH * sizeof(myDataType));
h_Input = (myDataType *)malloc(imageW * imageH * sizeof(myDataType));
h_Buffer = (myDataType *)malloc(imageW * imageH * sizeof(myDataType));
h_OutputCPU = (myDataType *)malloc(imageW * imageH * sizeof(myDataType));
h_OutputGPU = (myDataType *)malloc(imageW * imageH * sizeof(myDataType));
if (h_Filter==NULL || h_Input == NULL || h_Buffer == NULL || h_OutputCPU == NULL){
printf("Something went wrong wille malloc in CPU\n");
}
printf("Memmory allocation for host arrays: COMPLETED \n");
cudaMallocManaged((void**)&d_Filter,FILTER_LENGTH * sizeof(myDataType));
cudaMallocManaged((void**)&d_Input,imageH * imageW * sizeof(myDataType));
cudaMallocManaged((void**)&d_Buffer,imageH * imageW * sizeof(myDataType));
cudaMallocManaged((void**)&d_OutputGPU,imageH * imageW * sizeof(myDataType));
cudaCheckError();
printf("Memmory allocation for device arrays: COMPLETED \n");
// to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai
// arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai
// to convolution kai arxikopoieitai kai auth tuxaia.
srand(200);
for (i = 0; i < FILTER_LENGTH; i++) {
h_Filter[i] = (myDataType)(rand() % 16);
}
for (i = 0; i < imageW * imageH; i++) {
h_Input[i] = (myDataType)rand() / ((myDataType)RAND_MAX / 255) + (myDataType)rand() / (myDataType)RAND_MAX;
}
printf("initialization of host arrays: COMPLETED \n");
cudaMemcpy(d_Filter, h_Filter,FILTER_LENGTH * sizeof(myDataType),cudaMemcpyHostToDevice);
cudaMemcpy(d_Input, h_Input,imageH * imageW * sizeof(myDataType),cudaMemcpyHostToDevice);
cudaCheckError();
printf("initialization of device arrays: COMPLETED \n\n");
printf("GPU computation...\n");
convolutionRowGPU<<<blocks,threads>>>(d_Buffer,d_Input,d_Filter,imageW,imageH,filter_radius);
cudaCheckError();
cudaDeviceSynchronize();
convolutionColumnGPU<<<blocks,threads>>>(d_OutputGPU,d_Buffer,d_Filter,imageW,imageH,filter_radius);
cudaCheckError();
printf("GPU computation : COMPLETED\n\n");
cudaMemcpy(h_OutputGPU,d_OutputGPU,imageH * imageW * sizeof(myDataType),cudaMemcpyDeviceToHost);
// To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU.
printf("CPU computation...\n");
start = clock();
convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius); // convolution kata grammes
convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius); // convolution kata sthles
end = clock();
timing = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("CPU computation : COMPLETED in time:%10.5f\n",timing);
// Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia
// pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas
printf("\nCPU computations == GPU computation?\n");
for (i = 0; i < imageW * imageH; i++) {
if(h_OutputGPU[i] > h_OutputCPU[i] + accuracy || h_OutputGPU[i] < h_OutputCPU[i] - accuracy){
printf("CPU computations == GPU computation : FALSE line:%d difrence:%f \nExitting program...\n",i,h_OutputGPU[i]-h_OutputCPU[i]);
//count++;
// free all the allocated memory CPU
free(h_OutputCPU);
free(h_OutputGPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
// free all the allocated memory GPU
cudaFree(d_OutputGPU);
cudaFree(d_Buffer);
cudaFree(d_Input);
cudaFree(d_Filter);
cudaCheckError();
cudaDeviceReset();
return(1);
}
}
printf("CPU computations == GPU computation : TRUE \nExitting program after Memmory Free...\n");
// free all the allocated memory CPU
free(h_OutputCPU);
free(h_OutputGPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
// free all the allocated memory GPU
cudaFree(d_OutputGPU);
cudaFree(d_Buffer);
cudaFree(d_Input);
cudaFree(d_Filter);
cudaDeviceReset();
return 0;
}
|
10,307 | #include "includes.h"
__global__ void calc(float *d_D, int n, int k){ //kernel
__shared__ float s_d[3*256]; //shared in block table of floats (size 3*number threads/block)
int i = blockIdx.x * blockDim.x + threadIdx.x; //We find i & j in the Grid of threads
int j = blockIdx.y * blockDim.y + threadIdx.y;
int b_index = 3 * (threadIdx.x + blockDim.x*threadIdx.y); //Calculation of initial index in shared table s_d
s_d[b_index] = d_D[i + j*n]; //Pass values from device table to shared
s_d[b_index + 1] = d_D[i + k*n];
s_d[b_index + 2] = d_D[k + j*n];
if (s_d[b_index] > s_d[b_index + 1] + s_d[b_index + 2]) s_d[b_index] = s_d[b_index + 1] + s_d[b_index + 2]; //Calculation of new distance value
d_D[i + j*n] = s_d[b_index]; //Pass the values back to the table s_d
} |
10,308 | /*=========================================================================
Program: Insight Segmentation & Registration Toolkit
Module: $RCSfile: itkImage.h,v $
Language: C++
Date: $Date: 2009-02-05 19:04:56 $
Version: $Revision: 1.150 $
Copyright (c) Insight Software Consortium. All rights reserved.
See ITKCopyright.txt or http://www.itk.org/HTML/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notices for more information.
=========================================================================*/
#include <stdio.h>
#define SIZE 2048
#define DIVUP(a,b) ( a % b ) == 0 ? (a/b): (a/b) + 1;
__global__ void VectorAddKernel( float * Vector1, float * Vector2, float * Output, int size)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if( idx < size )
{
Output[idx] = Vector1[idx] + Vector2[idx];
}
}
int main()
{
float HostVector1[SIZE];
float HostVector2[SIZE];
float HostOutputVector[SIZE];
for( int i=0; i<SIZE; i++)
{
HostVector1[i] = i;
HostVector2[i] = i;
}
float * GPUVector1;
float * GPUVector2;
float * GPUOutputVector;
cudaError_t err;
unsigned int totalSize = SIZE* sizeof(float);
err = cudaMalloc((void**)&GPUVector1, totalSize );
err = cudaMalloc((void**)&GPUVector2, totalSize );
err = cudaMalloc((void**)&GPUOutputVector, totalSize );
cudaMemcpy(GPUVector1, HostVector1, totalSize , cudaMemcpyHostToDevice);
cudaMemcpy(GPUVector2, HostVector2, totalSize , cudaMemcpyHostToDevice);
//
// Define here the strategy for defining the distribution of the problem
//
// Size of the data block that will be passed to each one of the streaming
// multi-processors.
dim3 BlockDim(128,1,1);
//
// Size of the grid of multi-processors that will be used for processing
// the total amount of data.
//
int numberOfProcessors = DIVUP(SIZE, BlockDim.x);
dim3 GridDim( numberOfProcessors, 1 , 1 );
// 17 blocks = 2050 / 128
//
// This call is asynchronous.
//
// Kernels have a timeout of 5 seconds... if the kernel runs for more than 5 seconds
// The operating system (Microsoft Windows) will consider that the display crashed.
//
VectorAddKernel<<<GridDim,BlockDim>>>(GPUVector1,GPUVector2,GPUOutputVector,SIZE);
//
// Do other stuff here...
//
//
// This call will wait until the GPU is done
// cudaThreadSynchronize();
//
err = cudaMemcpy( HostOutputVector, GPUOutputVector, totalSize, cudaMemcpyDeviceToHost);
err = cudaFree( GPUVector1 );
err = cudaFree( GPUVector2 );
err = cudaFree( GPUOutputVector );
for(int i=0; i<SIZE; i++)
{
printf("%8.3f\n",HostOutputVector[i]);
}
if( err )
{
printf("err %d", err );
}
}
|
10,309 | #include "cuda_runtime.h"
#include <cuda.h>
#include <cstdio>
#include <iostream>
#include <iomanip>
#include <fstream>
#include <vector>
#include "device_launch_parameters.h"
using namespace std;
const int MAX_STRING_LENGTH = 256;
const int THREADS = 3;
const string DATA_FILE = "/home/lukasz/Documents/GitHub/Lygretus_Programavimas/lab3_cuda/IFF-8-8_ZumarasLukas_L1_dat_1.txt"; // 1, 2, 3
const string REZ_FILE = "/home/lukasz/Documents/GitHub/Lygretus_Programavimas/lab3_cuda/IFF-8-8_ZumarasLukas_L1_rez.txt"; // 1, 2, 3
struct BenchmarkGPU {
char Name[MAX_STRING_LENGTH];
int MSRP = -1;
double Score = -1;
char result[MAX_STRING_LENGTH+2];
string toString() {
stringstream ss;
ss << setw(45) << Name << " | " << setw(6) << MSRP << " | " << setw(8) << Score << " | " << setw(12) << result;
return ss.str();
}
};
double calculateNew(int x, double y) {
return (x / y);
}
void readGPUFile(BenchmarkGPU *data);
void write_results_to_file(BenchmarkGPU* data, int n, const string file_path, const string title);
__global__ void sum_on_gpu(BenchmarkGPU* gpus, int* count, int* n, int* chunk_size, BenchmarkGPU* results);
__device__ void gpu_memset(char* dest, int add);
__device__ void gpu_strcat(char* dest, char* src, int offset);
int main() {
// Host
int n = 25;
BenchmarkGPU data[n];
readGPUFile(data);
BenchmarkGPU results[n];
int chunk_size = n / THREADS;
int count = 0;
char* sresults[25];
// GPU
BenchmarkGPU* d_all_gpus;
int* d_count;
int* d_n;
int* d_chunk_size;
BenchmarkGPU* d_results;
char** d_sresults;
// Memory allocation for GPU
cudaMalloc((void**)&d_all_gpus, n * sizeof(BenchmarkGPU));
cudaMalloc((void**)&d_results, n * sizeof(BenchmarkGPU));
cudaMalloc((void**)&d_count, sizeof(int));
cudaMalloc((void**)&d_n, sizeof(int));
cudaMalloc((void**)&d_chunk_size, sizeof(int));
// Copies memory from CPU to GPU
cudaMemcpy(d_all_gpus, data, n * sizeof(BenchmarkGPU), cudaMemcpyHostToDevice);
cudaMemcpy(d_count, &count, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_n, &n, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_chunk_size, &chunk_size, sizeof(int), cudaMemcpyHostToDevice);
sum_on_gpu<<<1,THREADS>>>(d_all_gpus, d_count, d_n, d_chunk_size, d_results);
cudaDeviceSynchronize();
cudaMemcpy(&results, d_results, n * sizeof(BenchmarkGPU), cudaMemcpyDeviceToHost);
cudaMemcpy(&count, d_count, 1, cudaMemcpyDeviceToHost);
cudaFree(d_all_gpus);
cudaFree(d_count);
cudaFree(d_n);
cudaFree(d_chunk_size);
cudaFree(d_results);
cout << "Found results: " << count << endl;
cout << "Finished" << endl;
write_results_to_file(results, count, REZ_FILE, "A dalies rezultatai");
return 0;
}
/**
* GPU
* Sums gpus list chunk data properties
* @param gpus BenchmarkGPUs list
* @param count BenchmarkGPUs list size
* @param chunk_size Summed items per thread
* @param results Summed chunk results
*/
__global__ void sum_on_gpu(BenchmarkGPU* gpus, int* count, int* n, int* chunk_size, BenchmarkGPU* results) {
int start_index = threadIdx.x * *chunk_size;
int end_index = start_index + 1 * *chunk_size;
if (threadIdx.x == blockDim.x -1)
end_index = *n;
printf("Thread: %d Start Index: %d End Index: %d\n", threadIdx.x, start_index, end_index);
for (int i = start_index; i < end_index; ++i) {
BenchmarkGPU tmp;
gpu_memset(tmp.Name,0);
gpu_memset(tmp.result,2);
tmp.MSRP = 0;
tmp.Score = 0.0;
gpu_strcat(tmp.Name, gpus[i].Name, 0);
tmp.Score = gpus[i].Score;
tmp.MSRP = gpus[i].MSRP;
double my_number = tmp.MSRP / tmp.Score;
char tmp_res[256+2];
gpu_memset(tmp_res, 2);
tmp_res[0] = 'F';
tmp_res[1] = '-';
if(my_number < 70)
tmp_res[0] = 'E';
if(my_number < 60)
tmp_res[0] = 'D';
if(my_number < 50)
tmp_res[0] = 'C';
if(my_number < 40)
tmp_res[0] = 'B';
if(my_number < 30)
tmp_res[0] = 'A';
gpu_strcat(tmp_res, gpus[i].Name, 2);
printf("Thread: %d Brand: %d\n", threadIdx.x, tmp_res);
gpu_strcat(tmp.result, tmp_res,0);
if(tmp.result[0] < 'F')
{
int index = atomicAdd(count, 1);
results[index] = tmp;
}
// printf("Thread: %d Index: %d Brand: %s Make Year: %d Mileage: %f\n", threadIdx.x, index, results[index].Name, results[index].Score, results[index].MSRP);
}
}
/**
* Appends char array to other char array
* @param dest Destination array
* @param src Source array
*/
__device__ void gpu_strcat(char* dest, char* src, int offset) {
int i = 0;
do {
dest[offset + i] = src[i];}
while (src[i++] != 0 && i + offset != MAX_STRING_LENGTH+offset);
}
/**
* Zeroes all char memory
* @param dest Char array
*/
__device__ void gpu_memset(char* dest, int add) {
for (int i = 0; i < MAX_STRING_LENGTH + add; ++i) {
dest[i] = 0;
}
}
void readGPUFile(BenchmarkGPU *data)
{
string line;
ifstream myfile;
myfile.open(DATA_FILE);
if(!myfile.is_open()) {
perror("Error open");
exit(EXIT_FAILURE);
}
int ch = 0;
int count = 0;
while(getline(myfile, line)) {
string::size_type pos;
pos=line.find(' ',0);
line = line.substr(pos+1);
switch (ch) {
case 0:
strcpy(data[count].Name, line.c_str());
break;
case 1:
data[count].MSRP = stoi(line);
break;
case 2:
data[count].Score = stoi(line);
count++;
ch = -1;
break;
}
ch++;
}
}
/**
* Writes given monitor cars formatted in table to file
* @param cars Cars list
* @param file_path Result file path
* @param title Results table title
*/
void write_results_to_file(BenchmarkGPU* gpus, int n, const string file_path, const string title) {
ofstream file;
file.open(file_path);
file << setw(80) << title << endl
<< "------------------------------------------------------------------------------------------------------------------------"
<< endl
<< setw(45) << "Name" << " | " << setw(6) << "MSRP" << " | " << setw(8) << "Score" << " | " << setw(20) << "Result" << endl
<< "------------------------------------------------------------------------------------------------------------------------"
<< endl;
for (int i = 0; i < n; ++i) {
file << gpus[i].toString() << endl;
}
file << endl << endl << endl;
}
|
10,310 |
/************************************************
*
* Test for printf function!
* date:2018-5-16
* a :zhonghy
*
*************************************************/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
__global__ void helloCUDA(float f)
{
printf("Hello thread %d, f=%f\n", threadIdx.x, f);
}
__global__ void helloCUDAZero(float f)
{
if( 0 == threadIdx.x)
printf("Hello thread %d, f=%f\n", threadIdx.x, f);
}
int main(int argc, char *argv[])
{
//helloCUDA<<<1, 5>>>(1.2345f);
//cudaDeviceSynchronize();
helloCUDAZero<<<1, 5>>>(1.2345f);
cudaDeviceSynchronize();
return EXIT_SUCCESS;
}
|
10,311 | #include <stdio.h>
__global__ void MatMul(int *a, int *b, int *t, int m0, int n0, int m1, int n1)
{
int nthColumn = threadIdx.x, mthRow = blockIdx.x;
int temp = 0;
for( int i = 0; i < n0 ; i++ )
{
temp += a[mthRow*n0 + i] * b[nthColumn + i*n1];
// printf("%d %d: %d %d\n", mthRow, nthColumn, a[mthRow*n0 + i], b[nthColumn + i*n1]);
}
t[mthRow*n1 + nthColumn] = temp;
}
int main() {
int *a, *b, *t, m0, n0, m1, n1, i, j;
int *d_a, *d_b, *d_t;
printf("Enter value of m0\n"); scanf("%d", &m0);
printf("Enter value of n0\n"); scanf("%d", &n0);
int size0 = sizeof(int)*m0*n0;
printf("Enter value of m1\n"); scanf("%d", &m1);
printf("Enter value of n1\n"); scanf("%d", &n1);
int size1 = sizeof(int)*m1*n1;
int sizet = sizeof(int)*m0*n1;
if(n0!=m1)
{
printf("Invalid matrix dimensions.\n");
exit(0);
}
a = (int *)malloc(m0*n0*sizeof(int));
b = (int *)malloc(m1*n1*sizeof(int));
t = (int *)malloc(m0*n1*sizeof(int));
printf("Enter input matrix A\n");
for(i=0; i< m0*n0; i++)
scanf("%d", &a[i]);
printf("Enter input matrix B\n");
for(i=0; i< m1*n1; i++)
scanf("%d", &b[i]);
cudaMalloc((void**)&d_a, size0);
cudaMalloc((void**)&d_b, size1);
cudaMalloc((void**)&d_t, sizet);
cudaMemcpy(d_a, a, size0, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size1, cudaMemcpyHostToDevice);
MatMul<<<m0,n1>>>(d_a, d_b, d_t, m0, n0, m1, n1);
cudaMemcpy(t, d_t, sizet, cudaMemcpyDeviceToHost);
printf("result vector:\n");
for(i=0; i<m0; i++) {
for(j =0; j<n1; j++)
printf("%d ", t[i*n1+j]);
printf("\n");
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_t);
return 0;
}
// 2 3 3 2 1 2 3 4 5 6 1 2 3 4 5 6 |
10,312 | #include "includes.h"
__device__ inline unsigned int RM_Index(unsigned int row, unsigned int col, unsigned int width) {
return (row * width + col);
}
__global__ void ComplementNBNormalizeKernel(float *feature_weights_, float *per_class_sum_, unsigned int n_classes_, unsigned int n_features_) {
// Each thread will take one feature
int feat_col = threadIdx.x + (blockIdx.x * blockDim.x);
unsigned int i = 0;
if (feat_col < n_features_) { /* Boundary condition check */
for (i = 0; i < n_classes_; ++i) { // For each class
feature_weights_[RM_Index(i, feat_col, n_features_)] /= per_class_sum_[i];
}
}
} |
10,313 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <iostream>
#define max_target_size 40000000
#define max_pattern_size 40000000
#define n_thread 256
#define NUM_THREADS_PER_BLOCK 256
#define SIZE_OF_CHUNK 32
using namespace std;
void buildLPS(char*, int*, int);
void seq_kmp(char*, char*, int*, int*, int, int);
void check_CUDA_Error(const char*);
double single_gpu(char*, char*, int*, int*, int, int, int);
void check_CUDA_Error(const char *msg)
{
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void naive(char* target, char* pattern, int* ans, int target_size, int pattern_size){
int j;
for (int i = 0; i < target_size - pattern_size + 1; i++){
j = 0;
while (j < pattern_size && target[i+j] == pattern[j]){
j++;
}
if (j == pattern_size){
ans[i] = 1;
}
}
}
void buildLPS(char* pattern, int* lps, int len){
for (int i = 1, j = 0; i < len; i++){
while (j > 0 && (pattern[i] != pattern[j])){
j = lps[j - 1];
}
if (pattern[i] == pattern[j]){
j++;
}
lps[i] = j;
}
}
void seq_kmp(char* target, char* pattern, int* lps, int* ans, int target_size, int pattern_size){
int i = 0;
int j = target_size;
int k = i;
while (i < j){
while (k > 0 && (target[i] != pattern[k])){
k = lps[k - 1];
}
if (target[i] == pattern[k]){
k++;
}
if (k == pattern_size){
ans[i - k + 1] = 1;
k = lps[k-1];
}
i++;
}
return;
}
__global__ void kmp_kernel(char* target, char* pattern, int* lps, int* ans, int target_size, int pattern_size, int chunk_size){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i = (idx * chunk_size);
int j = (idx * chunk_size) + chunk_size + pattern_size;
if (i >= target_size){
return;
}
if (j > target_size){
j = target_size;
}
int k = 0;
while (i < j){
while (k > 0 && (target[i] != pattern[k])){
k = lps[k - 1];
}
if (target[i] == pattern[k]){
k++;
}
if (k == pattern_size){
ans[i - k + 1] = 1;
k = lps[k-1];
}
i++;
}
return;
}
__global__ void kmp_kernel_share(char* target, char* pattern, int* lps, int* ans, int target_size, int pattern_size, int chunk_size){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i = (idx * chunk_size);
int j = (idx * chunk_size) + chunk_size + pattern_size;
__shared__ int n_lps[12000];
__syncthreads();
int pattern_chunk_size = ceil((double) pattern_size / NUM_THREADS_PER_BLOCK);
int pi = threadIdx.x * pattern_chunk_size;
int pj = threadIdx.x * pattern_chunk_size + pattern_chunk_size;
while (pi < pattern_size && pi < pj){
n_lps[pi] = lps[pi];
pi++;
}
__syncthreads();
if (i >= target_size){
return;
}
if (j > target_size){
j = target_size;
}
int k = 0;
while (i < j){
while (k > 0 && (target[i] != pattern[k])){
k = n_lps[k - 1];
}
if (target[i] == pattern[k]){
k++;
}
if (k == pattern_size){
ans[i - k + 1] = 1;
k = n_lps[k-1];
}
i++;
}
return;
}
double single_gpu(char* target, char* pattern, int* lps, int* ans, int target_size, int pattern_size, int shared){
char* g_target;
char* g_pattern;
int* g_lps;
int* g_ans;
clock_t start, end;
double time_taken;
start = clock();
cudaMalloc((void**)&g_target, target_size * sizeof(char));
cudaMalloc((void**)&g_pattern, pattern_size * sizeof(char));
cudaMalloc((void**)&g_lps, pattern_size * sizeof(int));
cudaMalloc((void**)&g_ans, target_size * sizeof(int));
check_CUDA_Error("memory allocation on device");
cudaMemcpy(g_target, target, target_size * sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(g_pattern, pattern, pattern_size * sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(g_lps, lps, pattern_size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(g_ans, ans, target_size * sizeof(int), cudaMemcpyHostToDevice);
check_CUDA_Error("memory copy to device");
int num_chunks = (target_size - 1) / SIZE_OF_CHUNK + 1;
int num_blocks = 0;
for (int i = 0; i < num_chunks; i += NUM_THREADS_PER_BLOCK){
num_blocks++;
}
dim3 numBlock(1, 1, 1);
dim3 numThread(n_thread, 1, 1);
int chunk_size = ceil((double) target_size / (n_thread));
//kmp_kernel<<<(target_size / pattern_size + n_thread) / n_thread, n_thread>>>(g_target, g_pattern, g_lps, g_ans, target_size, pattern_size);
if (shared == 0){
kmp_kernel<<<num_blocks, NUM_THREADS_PER_BLOCK>>>(g_target, g_pattern, g_lps, g_ans, target_size, pattern_size, SIZE_OF_CHUNK);
} else {
kmp_kernel_share<<<num_blocks, NUM_THREADS_PER_BLOCK>>>(g_target, g_pattern, g_lps, g_ans, target_size, pattern_size, SIZE_OF_CHUNK);
}
//kmp_kernel<<<numBlock, numThread>>>(g_target, g_pattern, g_lps, g_ans, target_size, pattern_size, chunk_size);
check_CUDA_Error("Launch kernal");
cudaDeviceSynchronize();
check_CUDA_Error("DeviceSynchronize");
cudaMemcpy(ans, g_ans, target_size * sizeof(int), cudaMemcpyDeviceToHost);
check_CUDA_Error("memory copy to host");
end = clock();
time_taken = ((double)(end - start))/ CLOCKS_PER_SEC;
cudaFree(g_target);
cudaFree(g_pattern);
cudaFree(g_lps);
cudaFree(g_ans);
return time_taken;
}
double multi_gpu(char* target, char* pattern, int* lps, int* ans, int target_size, int pattern_size, int shared){
int slice_len = target_size / 2 + pattern_size - 1;
float slice_overlap = (float) slice_len / (float) target_size;
if (slice_overlap > 0.8)
{
printf("----Multi GPU utilization low, switching to Single version----\n");
return single_gpu(target, pattern, lps, ans, target_size, pattern_size, shared);
}
if (slice_overlap < 0.5)
{
slice_len++;
}
int offset = target_size - slice_len;
char* g_target_first;
char* g_target_second;
char* g_pattern_first;
char* g_pattern_second;
int* g_lps_first;
int* g_lps_second;
int* g_ans_first;
int* g_ans_second;
clock_t start, end;
double time_taken;
start = clock();
cudaSetDevice(0);
check_CUDA_Error("Set Device 0 as current");
cudaMalloc((void**)&g_target_first, slice_len * sizeof(char));
cudaMalloc((void**)&g_pattern_first, pattern_size * sizeof(char));
cudaMalloc((void**)&g_lps_first, pattern_size * sizeof(int));
cudaMalloc((void**)&g_ans_first, slice_len * sizeof(int));
check_CUDA_Error("memory allocation on device 0");
cudaMemcpy(g_target_first, target, slice_len * sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(g_pattern_first, pattern, pattern_size * sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(g_lps_first, lps, pattern_size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(g_ans_first, ans, slice_len * sizeof(int), cudaMemcpyHostToDevice);
check_CUDA_Error("memory copy to device 0");
cudaSetDevice(1);
check_CUDA_Error("Set Device 1 as current");
cudaMalloc((void**)&g_target_second, slice_len * sizeof(char));
cudaMalloc((void**)&g_pattern_second, pattern_size * sizeof(char));
cudaMalloc((void**)&g_lps_second, pattern_size * sizeof(int));
cudaMalloc((void**)&g_ans_second, slice_len * sizeof(int));
check_CUDA_Error("memory allocation on device 1");
cudaMemcpy(g_target_second, &target[offset], slice_len * sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(g_pattern_second, pattern, pattern_size * sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(g_lps_second, lps, pattern_size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(g_ans_second, &ans[offset], slice_len * sizeof(int), cudaMemcpyHostToDevice);
check_CUDA_Error("memory copy to device 1");
int num_chunks = (slice_len - 1) / SIZE_OF_CHUNK + 1;
int num_blocks = 0;
for (int i = 0; i < num_chunks; i += NUM_THREADS_PER_BLOCK){
num_blocks++;
}
cudaSetDevice(0);
if (shared == 0){
kmp_kernel<<<num_blocks, NUM_THREADS_PER_BLOCK>>>(g_target_first, g_pattern_first, g_lps_first, g_ans_first, slice_len, pattern_size, SIZE_OF_CHUNK);
} else {
kmp_kernel_share<<<num_blocks, NUM_THREADS_PER_BLOCK>>>(g_target_first, g_pattern_first, g_lps_first, g_ans_first, slice_len, pattern_size, SIZE_OF_CHUNK);
}
check_CUDA_Error("Launch kernal on device 0");
cudaSetDevice(1);
if (shared == 0){
kmp_kernel<<<num_blocks, NUM_THREADS_PER_BLOCK>>>(g_target_second, g_pattern_second, g_lps_second, g_ans_second, slice_len, pattern_size, SIZE_OF_CHUNK);
} else {
kmp_kernel_share<<<num_blocks, NUM_THREADS_PER_BLOCK>>>(g_target_second, g_pattern_second, g_lps_second, g_ans_second, slice_len, pattern_size, SIZE_OF_CHUNK);
}
check_CUDA_Error("Launch kernal on device 1");
cudaSetDevice(0);
cudaDeviceSynchronize();
check_CUDA_Error("DeviceSynchronize");
cudaMemcpy(ans, g_ans_first, slice_len * sizeof(int), cudaMemcpyDeviceToHost);
check_CUDA_Error("device 0 memory copy to host");
cudaSetDevice(1);
cudaDeviceSynchronize();
check_CUDA_Error("DeviceSynchronize");
cudaMemcpy(&ans[offset], g_ans_second, slice_len * sizeof(int), cudaMemcpyDeviceToHost);
check_CUDA_Error("device 1 memory copy to host");
end = clock();
time_taken = ((double)(end - start))/ CLOCKS_PER_SEC;
cudaSetDevice(0);
cudaFree(g_target_first);
cudaFree(g_pattern_first);
cudaFree(g_lps_first);
cudaFree(g_ans_first);
cudaSetDevice(1);
cudaFree(g_target_second);
cudaFree(g_pattern_second);
cudaFree(g_lps_second);
cudaFree(g_ans_second);
return time_taken;
}
int main(int argc, char* argv[]){
/*
char* target = (char*) malloc(max_target_size * sizeof(char));
char* pattern = (char*) malloc(max_pattern_size * sizeof(char));
FILE* fp = fopen("test.txt", "r");
int line = 0;
if (fp != NULL){
while (line < 2){
if (line == 0){
fgets(target, max_target_size, fp);
} else {
fgets(pattern, max_pattern_size, fp);
}
line++;
}
}
fclose(fp);
*/
char* pattern;
char* target;
int target_size;
int pattern_size;
if (argc < 5){
printf("./kmp <shared_memory> <pinned_memory> <pattern_provided> <target_file> <pattern_file>\n");
exit(1);
}
FILE *fp = fopen(argv[4], "r");
if (!fp)
{
exit(EXIT_FAILURE);
}
fseek(fp, 0, SEEK_END);
int fsize = ftell(fp);
fseek(fp, 0, SEEK_SET);
if (atoi(argv[2]) == 0){
target = (char *) malloc(fsize);
} else{
cudaHostAlloc((void **)&target, fsize, cudaHostAllocDefault);
check_CUDA_Error("Pinned memory allocation on host - target");
}
fread(target, fsize, 1, fp);
fclose(fp);
target_size = strlen(target) - 1;
if (atoi(argv[3]) == 0){
const int pat_buffer_size = 40000000;
if (atoi(argv[2]) == 0){
pattern = (char *)malloc(pat_buffer_size * sizeof(char));
} else{
cudaHostAlloc((void **)&pattern, pat_buffer_size, cudaHostAllocDefault);
check_CUDA_Error("Pinned memory allocation on host - pattern");
}
printf("Please type a pattern/keyword you would like to search:\n");
cin >> pattern;
pattern_size = strlen(pattern);
} else {
fp = fopen(argv[5], "r");
if (!fp)
{
exit(EXIT_FAILURE);
}
fseek(fp, 0, SEEK_END);
fsize = ftell(fp);
fseek(fp, 0, SEEK_SET);
if (atoi(argv[2]) == 0){
pattern = (char *)malloc(fsize * sizeof(char));
} else{
cudaHostAlloc((void **)&pattern, fsize, cudaHostAllocDefault);
check_CUDA_Error("Pinned memory allocation on host - pattern");
}
fread(pattern, fsize, 1, fp);
fclose(fp);
pattern_size = strlen(pattern) - 1;
}
int* lps = (int*) malloc(pattern_size * sizeof(int));
int* ans;
if (atoi(argv[2]) == 0){
ans = (int *)malloc(target_size * sizeof(int));
} else{
cudaHostAlloc((void **)&ans, target_size * sizeof(int), cudaHostAllocDefault);
check_CUDA_Error("Pinned memory allocation on host - ans");
}
int* seq_ans = (int*) malloc(target_size * sizeof(int));
int* naive_ans = (int*) malloc(target_size * sizeof(int));
for (int i = 0; i < target_size; i++){
ans[i] = 0;
}
for (int i = 0; i < target_size; i++){
seq_ans[i] = 0;
}
for (int i = 0; i < target_size; i++){
naive_ans[i] = 0;
}
clock_t start, end;
int count;
double time_taken;
start = clock();
naive(target, pattern, naive_ans, target_size, pattern_size);
end = clock();
time_taken = ((double)(end - start))/ CLOCKS_PER_SEC;
count = 0;
for (int i = 0; i < target_size; i++){
if (naive_ans[i] != 0){
count++;
}
}
printf("naive cpu found: %d, time taken: %lf\n", count, time_taken);
for (int i = 0; i < pattern_size; i++){
lps[i] = 0;
}
buildLPS(pattern, lps, pattern_size);
/*
for (int i = 0; i < pattern_size; i++){
printf("%d ", lps[i]);
}
printf("\n");
*/
start = clock();
seq_kmp(target, pattern, lps, seq_ans, target_size, pattern_size);
end = clock();
time_taken = ((double)(end - start))/ CLOCKS_PER_SEC;
count = 0;
for (int i = 0; i < target_size; i++){
if (seq_ans[i] != 0){
count++;
}
}
printf("kmp cpu found: %d, time taken: %lf\n", count, time_taken);
for (int i = 0; i < pattern_size; i++){
lps[i] = 0;
}
buildLPS(pattern, lps, pattern_size);
int shared = atoi(argv[1]);
cudaFree(0);
time_taken = single_gpu(target, pattern, lps, ans, target_size, pattern_size, shared);
count = 0;
for (int i = 0; i < target_size; i++){
if (ans[i] != 0){
count++;
}
}
printf("kmp single_gpu found: %d, time taken: %lf\n", count, time_taken);
for (int i = 0; i < target_size; i++){
ans[i] = 0;
}
cudaSetDevice(0);
cudaFree(0);
cudaSetDevice(1);
cudaFree(0);
time_taken = multi_gpu(target, pattern, lps, ans, target_size, pattern_size, shared);
count = 0;
for (int i = 0; i < target_size; i++){
if (ans[i] != 0){
count++;
}
}
printf("kmp multi_gpu found: %d, time taken: %lf\n", count, time_taken);
if (atoi(argv[2]) != 0){
cudaFreeHost(target);
cudaFreeHost(pattern);
} else {
free(target);
free(pattern);
}
free(lps);
if (atoi(argv[2]) != 0){
cudaFreeHost(ans);
} else {
free(ans);
}
return 0;
}
|
10,314 | //pass
//--blockDim=64 --gridDim=64 --no-inline
#include <stdio.h>
#include <stdlib.h>
#include "cuda.h"
#define N 2//64
__global__ void foo() {
float x = (float)2;
}
|
10,315 | #include <stdio.h>
#include <stdlib.h>
/**
* Tempo sequencial: 0m0.404s
* Tempo CUDA: 0m1.970s (overhead de copiar os dados para a GPU)
*/
__global__ void scan_cuda(double* a, double *s, int width) {
// kernel scan
int thread = threadIdx.x;
int block = blockIdx.x * blockDim.x;
// Cria vetor na memória local.
__shared__ double partial[1024];
// Carrega elementos do vetor da memória global para a local.
if (block + thread < width) {
partial[thread] = a[block + thread];
}
// espera que todas as threads tenham carregado seus elementos
__syncthreads();
// Realiza o scan em log n passos.
for (int i = 1; i < blockDim.x; i *= 2) {
// Se thread ainda participa, atribui a soma para
// uma variável temporária.
double temp = 0;
if (thread >= i) {
temp = partial[thread] + partial[thread - i];
// Espera todas as threads fazerem as somas.
__syncthreads();
// Copia o valor calculado em definitivo para o
// vetor local.
partial[thread] = temp;
}
__syncthreads();
}
// Copia da memória local para a global.
if (block + thread < width) {
a[block + thread] = partial[thread];
}
// Se for a última thread do block, copia o seu valor
// para o vetor de saída.
if (thread == blockDim.x - 1) {
s[blockIdx.x + 1] = a[block + thread];
}
}
__global__ void add_cuda(double *a, double *s, int width) {
// kernel soma
int thread = threadIdx.x;
int block = blockIdx.x * blockDim.x;
// Adiciona o somatório do último elemento do bloco anterior
// ao elemento atual.
if (block + thread < width) {
a[block + thread] += s[blockIdx.x];
}
}
int main()
{
int width = 40000000;
int size = width * sizeof(double);
int block_size = 1024;
int num_blocks = (width-1)/block_size + 1;
int s_size = (num_blocks * sizeof(double));
double *a = (double*) malloc (size);
double *s = (double*) malloc (s_size);
for(int i = 0; i < width; i++)
a[i] = i;
double *d_a, *d_s;
// alocar vetores "a" e "s" no device
cudaMalloc((void **) &d_a, size);
cudaMalloc((void **) &d_s, s_size);
// copiar vetor "a" para o device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
// definição do número de blocos e threads (dimGrid e dimBlock)
dim3 dimGrid(num_blocks, 1, 1);
dim3 dimBlock(block_size, 1, 1);
// chamada do kernel scan
scan_cuda<<<dimGrid, dimBlock>>>(d_a, d_s, width);
// copiar vetor "s" para o host
cudaMemcpy(s, d_s, s_size, cudaMemcpyDeviceToHost);
// scan no host (já implementado)
s[0] = 0;
for (int i = 1; i < num_blocks; i++)
s[i] += s[i-1];
// copiar vetor "s" para o device
cudaMemcpy(d_s, s, s_size, cudaMemcpyHostToDevice);
// chamada do kernel da soma
add_cuda<<<dimGrid, dimBlock>>>(d_a, d_s, width);
// copiar o vetor "a" para o host
cudaMemcpy(a, d_a, size, cudaMemcpyDeviceToHost);
printf("\na[%d] = %f\n", width-1, a[width-1]);
cudaFree(d_a);
cudaFree(d_s);
}
|
10,316 | #include "includes.h"
__device__ void computearray_size(int* block_cntr_array,int *finalsize,int *orig_number_of_char)
{
*finalsize = 0;
for(int i=0;i<*orig_number_of_char;i++)
{
(*finalsize)=(*finalsize) + block_cntr_array[i];
}
}
__device__ int char_huffman_table_gpu[MAX_CHAR][MAX_CHAR-1]; //To write the output from compression in GPU //char *compressedfile_array=0; bool *compressedfile_array=0; bool *finalcompressed_array=0; // To keep track of how many characters each block wrote int *block_cntr_array=0; int *block_cntr_array_check=0; int *d_last_byte_padding=0; int *finalsize=0; int *orig_number_of_char=0; int *huffman_check = (int *)malloc((MAX_CHAR)*(MAX_CHAR-1) *sizeof(int));
bool *d_bool = 0;
bool *h_bool = 0;
__global__ void final_compression(int *block_cntr_array,bool *compressedfile_array,bool *finalcompressed_array,int number_of_char)
//__device__ void final_compression(int *block_cntr_array,bool *compressedfile_array,bool *finalcompressed_array)
{
int index_blocks=blockIdx.x*blockDim.x+threadIdx.x;
int index_file=(blockIdx.x*blockDim.x+threadIdx.x)*255;
int final_index=0;
if(index_blocks < number_of_char)
{
for(int i=0;i<index_blocks;i++)
{
final_index = final_index+ block_cntr_array[i];
}
for(int i=0;i<block_cntr_array[index_blocks];i++)
{
finalcompressed_array[final_index+i]=compressedfile_array[index_file+i];
}
}
}
__global__ void compress_file_gpu(unsigned char *d_input,bool *compressedfile_array,int *char_huffman_table2,int *block_cntr_array,int* d_last_byte_padding,int *finalsize,int *orig_number_of_char,int number_of_char)
{
//int write_counter=0,
int block_counter=0; //how many bits have been written in specific byte
unsigned char input_char;
//unsigned char output_char = 0x0;
//unsigned char end_of_file = 255;
//unsigned char mask = 0x01; //00000001;
int index_file=(blockIdx.x*blockDim.x+threadIdx.x)*255;
int index_blocks=blockIdx.x*blockDim.x+threadIdx.x;
if(index_blocks < number_of_char)
{
//for(int i=0;i<MAX_CHAR;i++)
//{
//int *row = (int*)((char*)char_huffman_table2 + i * pitch);
//for (int c = 0; c < MAX_CHAR-1; ++c) {
// char_huffman_table_gpu[i][c] = row[c];
//}
//}
input_char = d_input[index_blocks];
for(int i = 0 ; i < (MAX_CHAR - 1) ; i++)
{
if(char_huffman_table2[input_char*255+i] == 0) //detect if current character on particular position has 0 or 1
{
//output_char = output_char << 1; //if 0 then shift bits one position to left (last bit after shifting is 0)
compressedfile_array[index_file+i] = false;
//write_counter++;
block_counter++;
}
else if(char_huffman_table2[input_char*255+i] == 1)
{
//output_char = output_char << 1; //if 1 then shift bits one position to left...
//output_char = output_char | mask; //...and last bit change to: 1
//write_counter++;
compressedfile_array[index_file+i] = true;
block_counter++;
}
else //-1
{
/*if(input_char == end_of_file) //if EOF is detected then write current result to file
{
if(write_counter != 0)
{
output_char = output_char << (8-write_counter);
compressedfile_array[index_file]=output_char;
output_char = 0x0;
}
else //write_counter == 0
{
compressedfile_array[index_file]=output_char;
}
}*/
break;
}
/*if(write_counter == 8) //if result achieved 8 (size of char) then write it to compressed_file
{
compressedfile_array[index_file]=output_char;
output_char = 0x0;
write_counter = 0;
}*/
}
block_cntr_array[index_blocks]=block_counter;
//*d_last_byte_padding = write_counter; //to decompress file we have to know how many bits in last byte have been written
//update_config(write_counter); //TODO to zakomentowac przy ostatecznych pomiarach
computearray_size(block_cntr_array,finalsize,orig_number_of_char);
//final_compression(block_cntr_array,compressedfile_array,finalcompressed_array);
}
} |
10,317 | //pass
//--gridDim=1 --blockDim=2 --no-inline
//This kernel is racy.
//
//The memcpy resolves to a non-integer number of element writes so we have to
//handle the arrays in and out at the byte-level.
#define memcpy(dst, src, len) __builtin_memcpy(dst, src, len)
typedef struct {
short x;
short y;
} s_t; //< sizeof(s_t) == 4
__global__ void k(s_t *in, s_t *out) {
size_t len = 5;
memcpy(&out[threadIdx.x], &in[threadIdx.x], len);
}
|
10,318 | /* Render the Mandelbrot set using Orbit Traps */
#include <stdlib.h>
#include <stdio.h>
#include <malloc.h>
#include <tiffio.h>
#include <assert.h>
/* CUDA_N is the resolution of the output image (size CUDA_N x CUDA_N) */
#define CUDA_N 16000
/* 8-bit red, green, and blue channels */
typedef struct {
unsigned char r, g, b;
} pixel;
typedef struct {
double a_r, a_g, a_b;
double b_r, b_g, b_b;
double c_r, c_g, c_b;
double d_r, d_g, d_b;
} palette;
typedef struct {
pixel *d_pixels;
pixel *h_pixels;
palette *d_palette;
palette *h_palette;
char *outfile;
char *palfile;
double esc_radius;
int counter_max;
double x, y, ref_x, ref_y;
double a, b, c;
double width;
int linedist;
} fractal;
void
write_to_tiff (fractal *fract)
{
int row, col, idx;
TIFF *output;
char *raster;
pixel *img = (*fract).h_pixels;
printf("Writing to file.\n");
/* Open the output image */
if ((output = TIFFOpen (fract->outfile, "w")) == NULL)
{
fprintf (stderr, "Could not open outgoing image.\n");
exit (EXIT_FAILURE);
}
/* malloc space for the image lines */
raster = (char*) malloc (CUDA_N * 3 * sizeof (char));
if (raster == NULL)
{
printf ("malloc() failed in write_to_tiff.\n");
exit (EXIT_FAILURE);
}
/* Write the tiff tags to the file */
TIFFSetField (output, TIFFTAG_IMAGEWIDTH, CUDA_N);
TIFFSetField (output, TIFFTAG_IMAGELENGTH, CUDA_N);
TIFFSetField (output, TIFFTAG_COMPRESSION, COMPRESSION_DEFLATE);
TIFFSetField (output, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG);
TIFFSetField (output, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_RGB);
TIFFSetField (output, TIFFTAG_BITSPERSAMPLE, 8);
TIFFSetField (output, TIFFTAG_SAMPLESPERPIXEL, 3);
printf("Wrote image file tags.\n");
for (row = 0; row < CUDA_N; row++)
{
for (col = 0; col < CUDA_N; col++)
{
idx = row*CUDA_N + (CUDA_N - col);
raster[col*3] = img[idx].r;
raster[col*3+1] = img[idx].g;
raster[col*3+2] = img[idx].b;
}
if (TIFFWriteScanline (output, raster, row, CUDA_N * 3) != 1)
{
fprintf (stderr, "Could not write image\n");
exit (EXIT_FAILURE);
}
}
free (raster);
/* close the file */
TIFFClose (output);
}
/* color(t) = a + b * cos[2pi(c*t+d)] */
__device__ void
color_pxl(double t, palette *pal, double *r_out, double *g_out, double *b_out)
{
*r_out = 255. * (pal->a_r + pal->b_r * cos(M_PI * 2. * (pal->c_r * t + pal->d_r)));
*g_out = 255. * (pal->a_g + pal->b_g * cos(M_PI * 2. * (pal->c_g * t + pal->d_g)));
*b_out = 255. * (pal->a_b + pal->b_b * cos(M_PI * 2. * (pal->c_b * t + pal->d_b)));
}
/* distance between (x1, y1) and (x2, y2) */
__device__ double
point_dist(double x1, double x2, double y1, double y2)
{
return sqrt((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1));
}
/* distance between (x0, y0) and line (ax+by+c=0) */
__device__ double
line_dist(double x0, double y0, double a, double b, double c)
{
double d, n;
d = sqrt(a*a+b*b);
n = abs(a*x0+b*y0+c);
return n/d;
}
__global__ void
render(pixel *pxls,
float xmin, float xmax, float ymin, float ymax,
double esc, int count_max,
double xref, double yref,
double a, double b, double c, int linedist,
palette *pal)
{
int i, j, idx;
float x1, y1, x2, y2, xtmp;
int counter = 0;
double dist = 1e9;
double r_out, g_out, b_out;
/* compute x (i) and y (j) index from Block and Thread */
i = blockIdx.x * blockDim.x + threadIdx.x;
j = blockIdx.y * blockDim.y + threadIdx.y;
if(i >= CUDA_N || j >= CUDA_N) return; /* verify inbounds of image */
/* find x and y cartesian points for pixel */
x1 = xmax - ( ((float) i / (float) CUDA_N) * (xmax - xmin) );
y1 = ymax - ( ((float) j / (float) CUDA_N) * (ymax - ymin) );
x2 = x1;
y2 = y1;
while( ( (x1*x1 + y1*y1) < esc ) && counter < count_max )
{
xtmp = x1 * x1 - y1 * y1 + x2;
y1 = 2. * x1 * y1 + y2;
x1 = xtmp;
counter++;
dist = min(dist,
linedist == 0 ? point_dist(x1,xref,y1,yref) : line_dist(x1, y1, a, b, c));
}
idx = i + j*CUDA_N;
color_pxl(dist, pal, &r_out, &g_out, &b_out);
pxls[idx].r = (char) r_out;
pxls[idx].g = (char) g_out;
pxls[idx].b = (char) b_out;
}
/* initialize the color palette with user inputs
* or a default state if no input is provided. */
void pal_init(palette *pal, char *infile)
{
FILE *palette;
if(infile == NULL) {
/* a nice light blue default */
pal->a_r = 0.39;
pal->a_g = 0.55;
pal->a_b = 0.5;
pal->b_r = 0.55;
pal->b_g = 0.26;
pal->b_b = 0.68;
pal->c_r = 0.5;
pal->c_g = 1.5;
pal->c_b = 0.0;
pal->d_r = 0.26;
pal->d_g = 0.11;
pal->d_b = 0.24;
} else {
if ((palette = fopen(infile, "r")) == NULL)
{
printf ("Error reading input file %s.\n", infile);
exit (EXIT_FAILURE);
}
/* WARNING -- poor checks for malformed input here. */
assert(fscanf (palette, "%lf %lf %lf\n", &(pal->a_r), &(pal->a_g), &(pal->a_b)) != EOF);
assert(fscanf (palette, "%lf %lf %lf\n", &(pal->b_r), &(pal->b_g), &(pal->b_b)) != EOF);
assert(fscanf (palette, "%lf %lf %lf\n", &(pal->c_r), &(pal->c_g), &(pal->c_b)) != EOF);
assert(fscanf (palette, "%lf %lf %lf\n", &(pal->d_r), &(pal->d_g), &(pal->d_b)) != EOF);
(void) fclose (palette);
} /* end else */
} /* end pal_init */
void
print_usage ()
{
/* print program use */
printf ("Render the Mandelbrot set using Orbit Traps.\n\n");
printf ("mandel usage:\n");
printf ("mandel [-options ...]\n\n");
printf ("options include:\n");
printf ("\t-h\t\t\tprint this screen\n");
printf ("\t-f NAME\t\t\toutput file to use (i.e. /tmp/mandel.tif)\n");
printf ("\t-p NAME\t\t\tfile to use for color palette\n");
printf ("\t-x #.###...#\t\tcenter X coordinate of image\n");
printf ("\t-y #.###...#\t\tcenter Y coordinate of image\n");
printf ("\t-rx #.###...#\t\tX coordinate for distance reference\n");
printf ("\t-ry #.###...#\t\tY coordinate for distance reference\n");
printf ("\t-L\t\t\tuse the line equation for orbit trap instead of a point\n");
printf ("\t-a #.###...#\t\tA parameter of reference line in form Ax + By + C = 0\n");
printf ("\t-b #.###...#\t\tB parameter of reference line in form Ax + By + C = 0\n");
printf ("\t-c #.###...#\t\tC parameter of reference line in form Ax + By + C = 0\n");
printf ("\t-w ##.#\t\t\twidth of image (x and y +/- width)\n");
printf ("\t-m ####\t\t\tmax iterations to compute\n");
printf ("\t-e ##.#\t\t\tescape radius\n");
}
void
parse_args (int argc, char **argv, fractal * mandel)
{
int i = 1;
while (i < argc)
{
if (!strcmp (argv[i], "-h"))
{
print_usage ();
exit (EXIT_SUCCESS);
}
else if (!strcmp (argv[i], "-f"))
{
mandel->outfile = argv[i + 1];
i += 2;
}
else if (!strcmp (argv[i], "-p"))
{
mandel->palfile = argv[i + 1];
i += 2;
}
else if (!strcmp (argv[i], "-x"))
{
mandel->x = (double) atof(argv[i + 1]);
i += 2;
}
else if (!strcmp (argv[i], "-y"))
{
mandel->y = (double) atof(argv[i + 1]);
i += 2;
}
else if (!strcmp (argv[i], "-rx"))
{
mandel->ref_x = (double) atof(argv[i + 1]);
i += 2;
}
else if (!strcmp (argv[i], "-ry"))
{
mandel->ref_y = (double) atof(argv[i + 1]);
i += 2;
}
else if (!strcmp (argv[i], "-a"))
{
mandel->a = (double) atof(argv[i + 1]);
i += 2;
}
else if (!strcmp (argv[i], "-b"))
{
mandel->b = (double) atof(argv[i + 1]);
i += 2;
}
else if (!strcmp (argv[i], "-c"))
{
mandel->c = (double) atof(argv[i + 1]);
i += 2;
}
else if (!strcmp (argv[i], "-w"))
{
mandel->width = (double) atof(argv[i + 1]);
i += 2;
}
else if (!strcmp (argv[i], "-m"))
{
mandel->counter_max = atoi(argv[i + 1]);
i += 2;
}
else if (!strcmp (argv[i], "-L"))
{
mandel->linedist = 1;
i += 1;
}
else if (!strcmp (argv[i], "-e"))
{
mandel->esc_radius = atof(argv[i + 1]);
i += 2;
}
else
{
print_usage ();
exit (EXIT_FAILURE);
}
}
}
int main(int argc, char **argv)
{
fractal mandel;
mandel.d_pixels = NULL;
mandel.h_pixels = NULL;
mandel.d_palette = NULL;
mandel.h_palette = NULL;
mandel.outfile = (char *) "/tmp/mandel.tif"; /* default */
mandel.palfile = NULL;
mandel.esc_radius= 2e5;
mandel.counter_max = 350;
mandel.x = 0.0;
mandel.y = 0.0;
mandel.ref_x = 0.0;
mandel.ref_y = 0.0;
mandel.width = 2.5;
mandel.a = 1.0;
mandel.b = -1.0;
mandel.c = 0.0;
mandel.linedist = 0;
cudaError_t err;
/* process input arguments */
parse_args(argc, argv, &mandel);
/* sanity check */
if(mandel.linedist == 1 && (mandel.a == 0.0 && mandel.b == 0.0)) {
printf("Illegal configuration. A and B cannot both be set to zero.\n");
exit(EXIT_FAILURE);
}
/* HOST buffer for color palette */
mandel.h_palette = (palette*) malloc(sizeof(palette));
if(mandel.h_palette == NULL) {
printf("malloc() failed in main.\n");
exit(EXIT_FAILURE);
}
/* Initialize the palette */
pal_init(mandel.h_palette, mandel.palfile);
/* assign a CUDA memory buffer for the fractal rendering */
err = cudaMalloc(&(mandel.d_pixels), CUDA_N*CUDA_N*sizeof(pixel));
if(err != cudaSuccess) {
printf("%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* assign a CUDA memory buffer for the color palette */
err = cudaMalloc(&(mandel.d_palette), sizeof(palette));
if(err != cudaSuccess) {
printf("%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Allocated CUDA device memory.\n");
/* setup block sizes to allow for rendering in min number of blocks */
dim3 threadsPerBlock(16, 16);
dim3 numBlocks(CUDA_N / threadsPerBlock.x, CUDA_N / threadsPerBlock.y);
/* copy palette to device */
/* copy the buffer from HOST to DEVICE */
err = cudaMemcpy(mandel.d_palette, mandel.h_palette, sizeof(palette), cudaMemcpyHostToDevice);
if(err != cudaSuccess) {
printf("%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* dispatch the CUDA process */
render<<<numBlocks, threadsPerBlock>>>(mandel.d_pixels,
mandel.x-mandel.width, mandel.x+mandel.width, mandel.y-mandel.width, mandel.y+mandel.width,
mandel.esc_radius, mandel.counter_max,
mandel.ref_x, mandel.ref_y,
mandel.a, mandel.b, mandel.c, mandel.linedist,
mandel.d_palette);
printf("Completed render.\n");
/* HOST buffer for completed render */
mandel.h_pixels = (pixel*) malloc(CUDA_N*CUDA_N*sizeof(pixel));
if(mandel.h_pixels == NULL) {
printf("malloc() failed in main.\n");
exit(EXIT_FAILURE);
}
/* copy the buffer from DEVICE to HOST */
err = cudaMemcpy(mandel.h_pixels, mandel.d_pixels, CUDA_N*CUDA_N*sizeof(pixel), cudaMemcpyDeviceToHost);
if(err != cudaSuccess) {
printf("%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Mem copy complete.\n");
/* then free the DEVICE memory */
cudaFree(mandel.d_pixels);
cudaFree(mandel.d_palette);
printf("Freed CUDA memory.\n");
/* then write the buffer to file */
write_to_tiff(&mandel);
/* and free the buffer */
printf("Wrote to file.\n");
free (mandel.h_pixels);
free (mandel.h_palette);
return 0;
}
|
10,319 | // nvcc distances.cu -o par.out && ./par.out data.dat rand0.dat 32768 30 180
#include<iostream>
#include<fstream>
#include<string.h>
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<time.h>
using namespace std;
//Structura que define un punto 3D
//Accesa a cada componente con var.x, var.y, var.z
struct Punto{
float x,y,z;
};
struct Node{
//Punto nodepos; // Coordenadas del nodo (posición del nodo) // Se obtiene con las coordenadas del nodo.
//int in_vicinage; //Cantidad de nodos vecinos.
//int *nodes_vicinage; // Array con los master id de localizacion de los nodos vecinos.
int len; // Cantidad de elementos en el nodo.
Punto *elements;
};
void read_file(string file_loc, Punto *data){
//cout << file_loc << endl;
string line; //No uso esta variable realmente, pero con eof() no se detenía el loop
ifstream archivo(file_loc);
if (archivo.fail() | !archivo ){
cout << "Error al cargar el archivo " << endl;
exit(1);
}
int n_line = 1;
if (archivo.is_open() && archivo.good()){
archivo >> data[0].x >> data[0].y >> data[0].z;
while(getline(archivo, line)){
archivo >> data[n_line].x >> data[n_line].y >> data[n_line].z;
n_line++;
}
}
//cout << "Succesfully readed " << file_loc << endl;
}
void save_histogram(string name, int bns, long int ***histo){
int i, j, k;
unsigned int **reshape = new unsigned int*[bns];
for (i=0; i<bns; i++){
*(reshape+i) = new unsigned int[bns*bns];
}
for (i=0; i<bns; i++){
for (j=0; j<bns; j++){
for (k=0; k<bns; k++){
reshape[i][bns*j+k] = histo[i][j][k];
}
}
}
ofstream file;
file.open(name.c_str(),ios::out | ios::binary);
if (file.fail()){
cout << "Error al guardar el archivo " << endl;
exit(1);
}
for (i=0; i<bns; i++){
for (j=0; j<bns*bns; j++){
file << reshape[i][j] << " ";
}
file << endl;
}
file.close();
}
__device__
void count_3_N111(Punto *elements, unsigned int len, unsigned int ***XXX, float dmax2, float ds){
/*
Funcion para contar los triangulos en un mismo Nodo.
row, col, mom => posición del Nodo. Esto define al Nodo.
*/
int i,j,k;
int a,b,c;
float dx,dy,dz;
float d12,d13,d23;
float x1,y1,z1,x2,y2,z2,x3,y3,z3;
for (i=0; i<len; ++i){
x1 = elements[i].x;
y1 = elements[i].y;
z1 = elements[i].z;
for (j=i+1; j<len-1; ++j){
x2 = elements[j].x;
y2 = elements[j].y;
z2 = elements[j].z;
dx = x2-x1;
dy = y2-y1;
dz = z2-z1;
d12 = dx*dx+dy*dy+dz*dz;
if (d12<=dmax2){
d12 = sqrt(d12);
a = (int)(d12*ds);
for (k=j+1; k<len; ++k){
x3 = elements[k].x;
y3 = elements[k].y;
z3 = elements[k].z;
dx = x3-x1;
dy = y3-y1;
dz = z3-z1;
d13 = dx*dx+dy*dy+dz*dz;
if (d13<=dmax2){
d13 = sqrt(d13);
b = (int)(d13*ds);
dx = x3-x2;
dy = y3-y2;
dz = z3-z2;
d23 = dx*dx+dy*dy+dz*dz;
if (d23<=dmax2){
d23 = sqrt(d23);
c = (int)(d23*ds);
atomicAdd(&XXX[a][b][c],1);
}
}
}
}
}
}
}
__device__
void count_3_N112(Punto *elements1, unsigned int len1, Punto *elements2, unsigned int len2, unsigned int ***XXX, float dmax2, float ds, float size_node){
/*
Funcion para contar los triangulos en dos
nodos con dos puntos en N1 y un punto en N2.
row, col, mom => posición de N1.
u, v, w => posición de N2.
*/
int i,j,k;
int a,b,c;
float dx,dy,dz;
float d12,d13,d23;
float x1,y1,z1,x2,y2,z2,x3,y3,z3;
for (i=0; i<len2; ++i){
// 1er punto en N2
x1 = elements2[i].x;
y1 = elements2[i].y;
z1 = elements2[i].z;
for (j=0; j<len1; ++j){
// 2do punto en N1
x2 = elements1[j].x;
y2 = elements1[j].y;
z2 = elements1[j].z;
dx = x2-x1;
dy = y2-y1;
dz = z2-z1;
d12 = dx*dx+dy*dy+dz*dz;
if (d12<=dmax2){
d12=sqrt(d12);
a = (int)(d12*ds);
for (k=j+1; k<len1; ++k){
// 3er punto en N1
x3 = elements1[k].x;
y3 = elements1[k].y;
z3 = elements1[k].z;
dx = x3-x1;
dy = y3-y1;
dz = z3-z1;
d13 = dx*dx+dy*dy+dz*dz;
if (d13<=dmax2){
d13 = sqrt(d13);
b = (int)(d13*ds);
dx = x3-x2;
dy = y3-y2;
dz = z3-z2;
d23 = dx*dx+dy*dy+dz*dz;
if (d23<=dmax2){
d23 = sqrt(d23);
c = (int)(d23*ds);
atomicAdd(&XXX[a][b][c],1);
}
}
}
for (k=i+1; k<len2; ++k){
// 3er punto en N2
x3 = elements2[k].x;
y3 = elements2[k].y;
z3 = elements2[k].z;
dx = x3-x1;
dy = y3-y1;
dz = z3-z1;
d13 = dx*dx+dy*dy+dz*dz;
if (d13<=dmax2){
d13 = sqrt(d13);
b = (int)(d13*ds);
dx = x3-x2;
dy = y3-y2;
dz = z3-z2;
d23 = dx*dx+dy*dy+dz*dz;
if (d23<=dmax2){
d23 = sqrt(d23);
c = (int)(d23*ds);
atomicAdd(&XXX[a][b][c],1);
}
}
}
}
}
}
}
__device__
void count_3_N123(Punto *elements1, unsigned int len1, Punto *elements2, unsigned int len2, Punto *elements3, unsigned int len3, unsigned int ***XXX, float dmax2, float ds, float size_node){
/*
Funcion para contar los triangulos en tres
nodos con un puntos en N1, un punto en N2
y un punto en N3.
row, col, mom => posición de N1.
u, v, w => posición de N2.
a, b, c => posición de N3.
*/
int i,j,k;
int a,b,c;
float dx,dy,dz;
float d12,d13,d23;
float x1,y1,z1,x2,y2,z2,x3,y3,z3;
for (i=0; i<len1; ++i){
// 1er punto en N1
x1 = elements1[i].x;
y1 = elements1[i].y;
z1 = elements1[i].z;
for (j=0; j<len3; ++j){
// 2do punto en N3
x3 = elements3[j].x;
y3 = elements3[j].y;
z3 = elements3[j].z;
dx = x3-x1;
dy = y3-y1;
dz = z3-z1;
d13 = dx*dx+dy*dy+dz*dz;
if (d13<=dmax2){
d13 = sqrt(d13);
b = (int)(d13*ds);
for (k=0; k<len2; ++k){
// 3er punto en N2
x2 = elements2[k].x;
y2 = elements2[k].y;
z2 = elements2[k].z;
dx = x3-x2;
dy = y3-y2;
dz = z3-z2;
d23 = dx*dx+dy*dy+dz*dz;
if (d23<=dmax2){
d23 = sqrt(d23);
c = (int)(d23*ds);
dx = x2-x1;
dy = y2-y1;
dz = z2-z1;
d12 = dx*dx+dy*dy+dz*dz;
if (d12<=dmax2){
d12 = sqrt(d12);
a = (int)(d12*ds);
atomicAdd(&XXX[a][b][c],1);
}
}
}
}
}
}
}
// Kernel function to populate the grid of nodes
__global__
void histo_XXX(Node ***tensor_node, unsigned int ***XXX_A, unsigned int ***XXX_B, unsigned int ***XXX_C, unsigned int ***XXX_D, unsigned int partitions, float dmax2, float dmax, float ds, float size_node){
// Esto es para el nodo pivote.
int row, col, mom, idx;
idx = blockIdx.x * blockDim.x + threadIdx.x;
mom = (int) (idx/(partitions*partitions));
col = (int) ((idx%(partitions*partitions))/partitions);
row = idx%partitions;
if (row<partitions && col<partitions && mom<partitions){
//Contar triangulos dentro del mismo nodo
count_3_N111(tensor_node[row][col][mom].elements, tensor_node[row][col][mom].len, XXX_A, dmax2, ds);
//Para entre nodos
int u, v, w, a ,b, c; //Indices del nodo 2 (u, v, w) y del nodo 3 (a, b, c)
unsigned int dis_nod12, dis_nod23, dis_nod31;
unsigned int internode_max = (int)(dmax/size_node);
unsigned int internode_max2 = (int)(dmax2/(size_node*size_node));
//float x1N=row, y1N=col, z1N=mom, x2N, y2N, z2N, x3N, y3N, z3N;
unsigned int dx_nod12, dy_nod12, dz_nod12, dx_nod23, dy_nod23, dz_nod23, dx_nod31, dy_nod31, dz_nod31;
//=======================
// Nodo 2 movil en Z:
//=======================
for (w=mom+1; w<partitions; w++){
dis_nod12=w-mom;
//==============================================
// 2 puntos en N y 1 punto en N'
//==============================================
if (dis_nod12<=internode_max){
count_3_N112(tensor_node[row][col][mom].elements, tensor_node[row][col][mom].len, tensor_node[row][col][w].elements, tensor_node[row][col][w].len, XXX_B, dmax2, ds, size_node);
//==============================================
// 1 punto en N1, 1 punto en N2 y 1 punto en N3
//==============================================
//=======================
// Nodo 3 movil en Z:
//=======================
for (c=w+1; c<partitions; c++){
dis_nod31=c-mom;
dis_nod23=c-w;
if (dis_nod23<=internode_max && dis_nod31<=internode_max){
count_3_N123(tensor_node[row][col][mom].elements, tensor_node[row][col][mom].len, tensor_node[row][col][w].elements, tensor_node[row][col][w].len, tensor_node[row][col][c].elements, tensor_node[row][col][c].len, XXX_B, dmax2, ds, size_node);
}
}
//=======================
// Nodo 3 movil en ZY:
//=======================
for (b=col+1; b<partitions; b++){
dy_nod31 = b-col;
for (c = 0; c<partitions; c++ ){
dz_nod31=c-mom;
dis_nod31 = dy_nod31*dy_nod31 + dz_nod31*dz_nod31;
if (dis_nod31 <= internode_max2){
dis_nod23 = dy_nod31*dy_nod31 + (c-w)*(c-w); // La dy es la misma entre 3 y 1 que 32 porque el nodo 2 solo se mueve en z (por ahora)
if (dis_nod23 <= internode_max2){
count_3_N123(tensor_node[row][col][mom].elements, tensor_node[row][col][mom].len, tensor_node[row][col][w].elements, tensor_node[row][col][w].len, tensor_node[row][b][c].elements, tensor_node[row][b][c].len, XXX_B, dmax2, ds, size_node);
}
}
}
}
//=======================
// Nodo 3 movil en ZYX:
//=======================
for (a=row+1; a<partitions; a++){
dx_nod31=a-row;
for (b = 0; b<partitions; b++){
dy_nod31 = b-col;
for (c = 0; c<partitions; c++){
dz_nod31 = c-mom;
dis_nod31 = dx_nod31*dx_nod31 + dy_nod31*dy_nod31 + dz_nod31*dz_nod31;
if (dis_nod31 <= internode_max2){
dis_nod23 = dx_nod31*dx_nod31 + dy_nod31*dy_nod31 + (c-w)*(c-w);
if (dis_nod23 <= internode_max2){
count_3_N123(tensor_node[row][col][mom].elements, tensor_node[row][col][mom].len, tensor_node[row][col][w].elements, tensor_node[row][col][w].len, tensor_node[a][b][c].elements, tensor_node[a][b][c].len, XXX_B, dmax2, ds, size_node);
}
}
}
}
}
}
}
//=======================
// Nodo 2 movil en ZY:
//=======================
for (v=col+1; v<partitions; v++){
dy_nod12=v-col;
for (w = 0; w<partitions; w++){
dz_nod12 = w-mom;
dis_nod12 = dy_nod12*dy_nod12+dz_nod12*dz_nod12;
if (dis_nod12 <= internode_max2){
//==============================================
// 2 puntos en N y 1 punto en N'
//==============================================
count_3_N112(tensor_node[row][col][mom].elements, tensor_node[row][col][mom].len, tensor_node[row][v][w].elements, tensor_node[row][v][w].len, XXX_C, dmax2, ds, size_node);
//==============================================
// 1 punto en N1, 1 punto en N2 y un punto en N3
//==============================================
//=======================
// Nodo 3 movil en Z:
//=======================
for (c=w+1; c<partitions; c++){
dz_nod23=c-w;
dz_nod31=c-mom;
dis_nod31 = dy_nod12*dy_nod12 + dz_nod31*dz_nod31;
if (dis_nod31 <= internode_max2 && dz_nod23<partitions){
count_3_N123(tensor_node[row][col][mom].elements, tensor_node[row][col][mom].len, tensor_node[row][v][w].elements, tensor_node[row][v][w].len, tensor_node[row][v][c].elements, tensor_node[row][v][c].len, XXX_C, dmax2, ds, size_node);
}
}
//=======================
// Nodo 3 movil en ZY:
//=======================
for (b=v+1; b<partitions; b++){
dy_nod31=b-col;
for (c=0; c<partitions; c++){
dz_nod31=c-mom;
dis_nod31 = dy_nod31*dy_nod31 + dz_nod31*dz_nod31;
if (dis_nod31 <= internode_max2){
dis_nod23 = (b-v)*(b-v) + (c-w)*(c-w);
if (dis_nod23 <= internode_max2){
count_3_N123(tensor_node[row][col][mom].elements, tensor_node[row][col][mom].len, tensor_node[row][v][w].elements, tensor_node[row][v][w].len, tensor_node[row][b][c].elements, tensor_node[row][b][c].len, XXX_C, dmax2, ds, size_node);
}
}
}
}
//=======================
// Nodo 3 movil en ZYX:
//=======================
for (a=row+1; a<partitions; a++){
//dx_nod31=dx_nod23<internode_max && dx_nod12=0
dx_nod31=a-row;
for (b=0; b<partitions; b++){
dy_nod31 = b-col;
for (c=0; c<partitions; c++){
dz_nod31 = c-mom;
dis_nod31 = dx_nod31*dx_nod31 + dy_nod31*dy_nod31 + dz_nod31*dz_nod31;
if (dis_nod31 <= internode_max2){
dis_nod23 = dx_nod31*dx_nod31 + (b-v)*(b-v) + (c-w)*(c-w);
if (dis_nod23 <= internode_max2){
count_3_N123(tensor_node[row][col][mom].elements, tensor_node[row][col][mom].len, tensor_node[row][v][w].elements, tensor_node[row][v][w].len, tensor_node[a][b][c].elements, tensor_node[a][b][c].len, XXX_C, dmax2, ds, size_node);
}
}
}
}
}
}
}
}
//=======================
// Nodo 2 movil en ZYX:
//=======================
for (u=row+1; u<partitions; ++u){
dx_nod12 = u-row;
for (v=0; v<partitions; ++v){
dy_nod12 = v-col;
for (w=0; w<partitions; ++w){
dz_nod12 = w-mom;
dis_nod12 = dx_nod12*dx_nod12 + dy_nod12*dy_nod12 + dz_nod12*dz_nod12;
if (dis_nod12 <= internode_max2){
//==============================================
// 2 puntos en N y 1 punto en N'
//==============================================
count_3_N112(tensor_node[row][col][mom].elements, tensor_node[row][col][mom].len, tensor_node[u][v][w].elements, tensor_node[u][v][w].len, XXX_D, dmax2, ds, size_node);
//==============================================
// 1 punto en N1, 1 punto en N2 y 1 punto en N3
//==============================================
a = u;
b = v;
//=======================
// Nodo 3 movil en Z:
//=======================
for (c=w+1; c<partitions; ++c){
dz_nod31 = c-mom;
dis_nod31 = dx_nod12*dx_nod12 + dy_nod12*dy_nod12 + dz_nod31*dz_nod31;
if (dis_nod31 <= internode_max2){
dz_nod23 = c-w;
if (dz_nod23 <= internode_max){
count_3_N123(tensor_node[row][col][mom].elements, tensor_node[row][col][mom].len, tensor_node[u][v][w].elements, tensor_node[u][v][w].len, tensor_node[a][b][c].elements, tensor_node[a][b][c].len, XXX_D, dmax2, ds, size_node);
}
}
}
//=======================
// Nodo 3 movil en ZY:
//=======================
for (b=v+1; b<partitions; ++b){
dy_nod31 = b-col;
for (c=0; c<partitions; ++c){
dz_nod31 = c-mom;
dis_nod31 = (a-row)*(a-row) + dy_nod31*dy_nod31 + dz_nod31*dz_nod31;
if (dis_nod31 <= internode_max2){
dy_nod23 = b-v;
dz_nod23 = c-w;
dis_nod23 = dy_nod23*dy_nod23 + dz_nod23*dz_nod23;
if (dis_nod23 <= internode_max2){
count_3_N123(tensor_node[row][col][mom].elements, tensor_node[row][col][mom].len, tensor_node[u][v][w].elements, tensor_node[u][v][w].len, tensor_node[a][b][c].elements, tensor_node[a][b][c].len, XXX_D, dmax2, ds, size_node);
}
}
}
}
//=======================
// Nodo 3 movil en ZYX:
//=======================
for (a=u+1; a<partitions; ++a){
dx_nod31 = a-row;
for (b=0; b<partitions; ++b){
dy_nod31 = b-col;
for (c=0; c<partitions; ++c){
dz_nod31 = c-mom;
dis_nod31 = dx_nod31*dx_nod31 + dy_nod31*dy_nod31 + dz_nod31*dz_nod31;
if (dis_nod31 <= internode_max2){
dx_nod23 = a-u;
dy_nod23 = b-v;
dz_nod23 = c-w;
dis_nod23 = dx_nod23*dx_nod23 + dy_nod23*dy_nod23 + dz_nod23*dz_nod23;
if (dis_nod23 <= internode_max2){
count_3_N123(tensor_node[row][col][mom].elements, tensor_node[row][col][mom].len, tensor_node[u][v][w].elements, tensor_node[u][v][w].len, tensor_node[a][b][c].elements, tensor_node[a][b][c].len, XXX_D, dmax2, ds, size_node);
}
}
}
}
}
}
}
}
}
}
}
/*
void add_neighbor(int *&array, int &lon, int id){
lon++;
int *array_aux;
cudaMallocManaged(&array_aux, lon*sizeof(int));
for (int i=0; i<lon-1; i++){
array_aux[i] = array[i];
}
cudaFree(&array);
array = array_aux;
array[lon-1] = id;
}
*/
//===================================================================
void add(Punto *&array, int &lon, float _x, float _y, float _z){
lon++;
Punto *array_aux; // = new Punto[lon];
cudaMallocManaged(&array_aux, lon*sizeof(Punto));
for (int i=0; i<lon-1; i++){
array_aux[i].x = array[i].x;
array_aux[i].y = array[i].y;
array_aux[i].z = array[i].z;
}
cudaFree(array);
array = array_aux;
array[lon-1].x = _x;
array[lon-1].y = _y;
array[lon-1].z = _z;
}
void make_nodos(Node ***nod, Punto *dat, unsigned int partitions, float size_node, unsigned int n_pts){
/*
Función para crear los nodos con los datos y puntos random
Argumentos
nod: arreglo donde se crean los nodos.
dat: datos a dividir en nodos.
*/
int row, col, mom;
//int node_id, n_row, n_col, n_mom, internode_max, id_max = pow((int) dmax/size_node + 1,2); // Row, Col and Mom of the possible node in the neighborhood
// Inicializamos los nodos vacíos:
for (row=0; row<partitions; row++){
for (col=0; col<partitions; col++){
for (mom=0; mom<partitions; mom++){
nod[row][col][mom].len = 0;
cudaMallocManaged(&nod[row][col][mom].elements, sizeof(Punto));
}
}
}
// Llenamos los nodos con los puntos de dat:
for (int i=0; i<n_pts; i++){
row = (int)(dat[i].x/size_node);
col = (int)(dat[i].y/size_node);
mom = (int)(dat[i].z/size_node);
add(nod[row][col][mom].elements, nod[row][col][mom].len, dat[i].x, dat[i].y, dat[i].z);
}
}
//===================================================================
void symmetrize(long int ***XXX, unsigned int bn){
int i,j,k;
float elem;
for (i=0; i<bn; i++){
for (j=i; j<bn; j++){
for (k=j; k<bn; k++){
elem = XXX[i][j][k] + XXX[k][i][j] + XXX[j][k][i] + XXX[j][i][k] + XXX[k][j][i] + XXX[i][k][j];
XXX[i][j][k] = elem;
XXX[k][i][j] = elem;
XXX[j][k][i] = elem;
XXX[j][i][k] = elem;
XXX[k][j][i] = elem;
XXX[i][k][j] = elem;
}
}
}
}
int main(int argc, char **argv){
string data_loc = argv[1];
string rand_loc = argv[2];
string mypathto_files = "../../fake_DATA/DATOS/";
//This creates the full path to where I have my data files
data_loc.insert(0,mypathto_files);
rand_loc.insert(0,mypathto_files);
unsigned int n_pts = stoi(argv[3]), bn=stoi(argv[4]);
float dmax=stof(argv[5]), size_box = 250.0, size_node = 2.17 * 250/pow(n_pts, (double)1/3);
float dmax2 = dmax*dmax, ds = ((float)(bn))/dmax;
unsigned int partitions = (int)(ceil(size_box/size_node)); //How many divisions per box dimension
// Crea los histogramas
//cout << "Histograms initialization" << endl;
unsigned int ***DDD_A, ***DDD_B, ***DDD_C, ***DDD_D;
long int ***DDD;
// inicializamos los histogramas
cudaMallocManaged(&DDD_A, bn*sizeof(unsigned int**));
cudaMallocManaged(&DDD_B, bn*sizeof(unsigned int**));
cudaMallocManaged(&DDD_C, bn*sizeof(unsigned int**));
cudaMallocManaged(&DDD_D, bn*sizeof(unsigned int**));
DDD = new long int**[bn];
for (int i=0; i<bn; i++){
cudaMallocManaged(&*(DDD_A+i), bn*sizeof(unsigned int*));
cudaMallocManaged(&*(DDD_B+i), bn*sizeof(unsigned int*));
cudaMallocManaged(&*(DDD_C+i), bn*sizeof(unsigned int*));
cudaMallocManaged(&*(DDD_D+i), bn*sizeof(unsigned int*));
cudaMallocManaged(&*(DDD+i), bn*sizeof(unsigned int*));
*(DDD+i) = new long int*[bn];
for (int j = 0; j < bn; j++){
cudaMallocManaged(&*(*(DDD_A+i)+j), bn*sizeof(unsigned int));
cudaMallocManaged(&*(*(DDD_B+i)+j), bn*sizeof(unsigned int));
cudaMallocManaged(&*(*(DDD_C+i)+j), bn*sizeof(unsigned int));
cudaMallocManaged(&*(*(DDD_D+i)+j), bn*sizeof(unsigned int));
*(*(DDD+i)+j) = new long int[bn];
}
}
//Inicializa en 0 //Esto se podría paralelizar en GPU
for (int i=0; i<bn; i++){
for (int j=0; j<bn; j++){
for (int k = 0; k < bn; k++){
DDD_A[i][j][k]= 0;
DDD_B[i][j][k]= 0;
DDD_C[i][j][k]= 0;
DDD_D[i][j][k]= 0;
*(*(*(DDD+i)+j)+k)= 0.0;
}
}
}
//cout << "Finished histograms initialization" << endl;
//cout << "Starting to read the data files" << endl;
Punto *data, *rand; //Crea un array de n_pts puntos
cudaMallocManaged(&data, n_pts*sizeof(Punto));
cudaMallocManaged(&rand, n_pts*sizeof(Punto));
//Llama a una funcion que lee los puntos y los guarda en la memoria asignada a data y rand
read_file(data_loc,data);
read_file(rand_loc,rand);
cout << "Successfully readed the data" << endl;
//Create Nodes
cout << "Started nodes initialization" << endl;
Node ***nodeD;
cudaMallocManaged(&nodeD, partitions*sizeof(Node**));
for (int i=0; i<partitions; i++){
cudaMallocManaged(&*(nodeD+i), partitions*sizeof(Node*));
for (int j=0; j<partitions; j++){
cudaMallocManaged(&*(*(nodeD+i)+j), partitions*sizeof(Node));
}
}
//cout << "Finished nodes initialization" << endl;
//cout << "Started the data classification into the nodes." << endl;
make_nodos(nodeD, data, partitions, size_node, n_pts);
cout << "Finished the data classification in nodes" << endl;
//cout << "Calculating the nuber of blocks and threads for the kernel for XXX" << endl;
//Sets GPU arrange of threads
cout << (unsigned int)(ceil((float)(partitions*partitions*partitions)/(float)(1024))) << " blocks with 1024 threads" << endl;
dim3 grid((unsigned int)(ceil((float)(partitions*partitions*partitions)/(float)(1024))),1,1);
dim3 block(1024,1,1);
cout << "Number of partitions " << partitions << endl;
cout << "Check nodes i,2,3" << endl;
for (int i=0; i < partitions; i++){
if (nodeD[i][2][3].len > 0){
cout << i << ", " << nodeD[i][2][3].len << ", " << nodeD[i][2][3].elements[nodeD[1][2][3].len-1].x << endl;
}
}
cout << "Entering to the kernel" << endl;
clock_t begin = clock();
histo_XXX<<<grid,block>>>(nodeD, DDD_A, DDD_B, DDD_C, DDD_D, partitions, dmax2, dmax, ds, size_node);
//add_2<<<grid, block>>>(nodeD[1][2][3].len, nodeD[1][2][3].elements);
//Waits for the GPU to finish
cudaDeviceSynchronize();
//Check here for errors
cudaError_t error = cudaGetLastError();
cout << "The error code is " << error << endl;
if(error != 0)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("\nTiempo en CPU usado = %.4f seg.\n", time_spent );
//Suma todos los subhistogramas
for (int i=0; i<bn; i++){
for (int j=0; j<bn; j++){
for (int k = 0; k < bn; k++){
DDD[i][j][k] = DDD_A[i][j][k] + DDD_B[i][j][k] + DDD_C[i][j][k] + DDD_D[i][j][k];
}
}
}
symmetrize(DDD, bn);
cout << "value in DDD[1,2,3] is " << DDD[1][2][3] << endl;
save_histogram("DDD_GPU.dat", bn, DDD);
cout << "\nGuarde histograma DDD..." << endl;
// Free memory
// Free the histogram arrays
cout << "Free the histograms allocated memory" << endl;
for (int i=0; i<bn; i++){
for (int j = 0; j < bn; j++){
cudaFree(&*(*(DDD_A+i)+j));
cudaFree(&*(*(DDD_B+i)+j));
cudaFree(&*(*(DDD_C+i)+j));
cudaFree(&*(*(DDD_D+i)+j));
}
cudaFree(&*(DDD_A+i));
cudaFree(&*(DDD_B+i));
cudaFree(&*(DDD_C+i));
cudaFree(&*(DDD_D+i));
}
cudaFree(&DDD_A);
cudaFree(&DDD_B);
cudaFree(&DDD_C);
cudaFree(&DDD_D);
for (int i = 0; i < bn; i++){
for (int j = 0; j < bn; j++){
delete[] *(*(DDD + i) + j);
}
delete[] *(DDD + i);
}
delete[] DDD;
//Free the nodes and their inner arrays.
cout << "Free the nodes allocated memory" << endl;
for (int i=0; i<partitions; i++){
for (int j=0; j<partitions; j++){
cudaFree(&*(*(nodeD+i)+j));
}
cudaFree(&*(nodeD+i));
}
cudaFree(&nodeD);
//Free data and random arrays
cout << "Free the data allocated memory" << endl;
cudaFree(&data);
cudaFree(&rand);
cout << "Finished the program" << endl;
return 0;
}
|
10,320 | struct Real3
{
double value[3];
};
__device__ void copy(const Real3& in1, const Real3& in2,
Real3* out1, Real3* out2)
{
*out1 = in1;
*out2 = in2;
}
__global__ void call_min(int* offsets, const Real3* inputs, Real3* outputs)
{
int idx = offsets[threadIdx.x];
// Copy with some bogus offsets
copy(inputs[idx], inputs[idx + 1], &outputs[idx - 1], &outputs[idx]);
}
|
10,321 | #include "includes.h"
//**********************************
//Nathan Durst
//FFT Cuda Program
//December, 5 2016
//**********************************
//This application uses cuda c and implements
// the Cooley-Tukey FFT algorithm to transforms
// an array of complex numbers into a data set
// correlation of complex numbers.
#define N 16384
#define PI 3.14
//kernel function declaration
__global__ void FFT(float * R, float * I, float * xR, float * xI)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
float real = 0, imag = 0;
//iterate through entire array for each index and calculate even
// and odd for real and imaginary numbers.
for (int i = 0; i<(N/2); i++)
{
//even
real += R[i] * cos((2*PI*(i*2))/N) - I[i] * sin((2*PI*id*(i*2))/N);
imag += R[i] * -sin((2*PI*(i*2))/N) + I[i] * cos((2*PI*id*(i*2))/N);
//odd
real += R[i] * cos((2*PI*(i*2+1))/N) - I[i] * sin((2*PI*id*(i*2+1))/N);
imag += R[i] * -sin((2*PI*(i*2+1))/N) + I[i] * cos((2*PI*id*(i*2+1))/N);
}
xR[id] = real;
xI[id] = imag;
} |
10,322 | // __global__ 函数 (GPU上运行) 计算立方和
#include<stdio.h>
#define DATA_SIZE 1024*1024
__global__ static void sumOfSquares(int *num, int* result)
{
int sum = 0;
int i;
for (i = 0; i < DATA_SIZE; i++) {
sum += num[i] * num[i] * num[i]-num[i]*num[i]+
num[i]/(num[i]+1) * num[i]/(num[i]+2) * num[i]/(num[i]+3);
}
*result = sum;
}
__global__ static void sum(int *sum,int* result)
{
__shared__ int sum_number[1024];
int number = 0;
for(int i = 0;i<blockDim.x;i++)
number += sum[i+threadIdx.x*blockDim.x];
sum_number[threadIdx.x] = number;
__syncthreads();
number = 0;
if(0 == threadIdx.x)
{
for(int i = 0;i<blockDim.x;i++)
{
number += sum_number[i];
}
*result = number;
}
}
__global__ static void sprintf_gpu(int a)
{
printf("result:%d",a*blockIdx.x*blockDim.x+threadIdx.x);
}
__global__ static void squares(int* num, int *squ)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
// if(i == j)
squ[i] = num[i] * num[i] * num[i]-num[i]*num[i]+
num[i]/(num[i]+1) * num[i]/(num[i]+2) * num[i]/(num[i]+3);
}
extern "C" void fun(int *num,int *squ, int* result)
{
//sumOfSquares << <64,64 , 0 >> >(num, result);
squares<<<1024,1024>>>(num,squ);
//cudaThreadSynchronize();
sum<<<1,1024,0>>>(squ,result);
}
|
10,323 | #include "includes.h"
__global__ void MatMulKernel( float *C, float *A, float *B, int Aheight, int Awidth, int Bwidth ) {
float result = 0;
int elementNum = blockIdx.x * blockDim.x + threadIdx.x;
if( elementNum > Aheight * Bwidth ) {
return;
}
int row = elementNum / Bwidth;
int col = elementNum % Bwidth;
for( int e = 0; e < Awidth; e++ ) {
result += A[row * Awidth + e] * B[e * Bwidth + col];
}
C[row * Bwidth + col] = result;
} |
10,324 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <cstdlib>
struct saxpy_functor
{
double const a;
saxpy_functor(double _a)
: a(_a)
{
}
__host__ __device__ double operator()(double const& x, double const& y) const
{
return a * x + y;
}
};
void saxpy_fast(
double A, thrust::device_vector<double>& X, thrust::device_vector<double>& Y)
{
// Y <- A * X + Y
thrust::transform(
X.begin(), X.end(), Y.begin(), Y.begin(), saxpy_functor(A));
}
void saxpy_slow(
double A, thrust::device_vector<double>& X, thrust::device_vector<double>& Y)
{
thrust::device_vector<double> temp(X.size());
// temp <- A
thrust::fill(temp.begin(), temp.end(), A);
// temp <- A * X
thrust::transform(X.begin(), X.end(), temp.begin(), temp.begin(),
thrust::multiplies<double>());
// Y <- A * X + Y
thrust::transform(
temp.begin(), temp.end(), Y.begin(), Y.begin(), thrust::plus<double>());
}
int main()
{
thrust::device_vector<double> a(5000000, 20.0);
thrust::device_vector<double> b(5000000, 10.0);
saxpy_fast(5.0f, a, b);
return 0;
}
|
10,325 | #include "TsetlinMachineConfig.cuh"
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
__device__ int inline action(int state)
{
if (state <= NUMBER_OF_STATES)
return 0;
else
return 1;
}
__global__ void type_i_feedback(curandState *state, int *ta_state, int *clause_feedback, int *clause_output, int *Xi, float s)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
/* Copy state to local memory for efficiency */
curandState localState = state[index];
for (int i = index; i < CLAUSES*FEATURES; i += stride) {
int clause = i / FEATURES;
int feature = i % FEATURES;
int output = clause_output[clause];
if (clause_feedback[clause] != 1) {
continue;
}
if (output == 0) {
if (curand_uniform(&localState) <= 1.0/s) {
if (ta_state[i*2] > 1) {
ta_state[i*2] -= 1;
}
}
if (curand_uniform(&localState) <= 1.0/s) {
if (ta_state[i*2+1] > 1) {
ta_state[i*2+1] -= 1;
}
}
} else if (output == 1) {
if (Xi[feature] == 1) {
if (BOOST_TRUE_POSITIVE_FEEDBACK == 1 || curand_uniform(&localState) <= (s-1)/s) {
if (ta_state[i*2] < NUMBER_OF_STATES*2) {
ta_state[i*2] += 1;
}
}
if (curand_uniform(&localState) <= 1.0/s) {
if (ta_state[i*2+1] > 1) {
ta_state[i*2+1] -= 1;
}
}
} else if (Xi[feature] == 0) {
if (BOOST_TRUE_POSITIVE_FEEDBACK == 1 || curand_uniform(&localState) <= (s-1)/s){
if (ta_state[i*2+1] < NUMBER_OF_STATES*2) {
ta_state[i*2+1] += 1;
}
}
if (curand_uniform(&localState) <= 1.0/s) {
if (ta_state[i*2] > 1) {
ta_state[i*2] -= 1;
}
}
}
}
}
state[index] = localState;
}
__global__ void type_ii_feedback(int *ta_state, int *clause_feedback, int *clause_output, int *Xi)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int action_include;
int action_include_negated;
for (int i = index; i < CLAUSES*FEATURES; i += stride) {
int clause = i / FEATURES;
int feature = i % FEATURES;
if (clause_feedback[clause] != -1 || clause_output[clause] == 0) {
continue;
}
action_include = action(ta_state[i*2]);
action_include_negated = action(ta_state[i*2+1]);
if (Xi[feature] == 0) {
if (action_include == 0 && ta_state[i*2] < NUMBER_OF_STATES*2) {
ta_state[i*2] += 1;
}
} else if (Xi[feature] == 1) {
if (action_include_negated == 0 && ta_state[i*2+1] < NUMBER_OF_STATES*2) {
ta_state[i*2+1] += 1;
}
}
}
}
/* Sum up the votes for each class */
__global__ void sum_up_class_votes(int *clause_output, int *sum)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int local_sum = 0;
for (int j = index; j < CLAUSES; j += stride) {
int sign = 1 - 2 * (j & 1);
local_sum += sign * clause_output[j];
}
atomicAdd(sum, local_sum);
}
__global__ void generate_clause_feedback(curandState *state, int *clause_feedback, int *class_sum, int target)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
/* Copy state to local memory for efficiency */
curandState localState = state[index];
for (int j = index; j < CLAUSES; j += stride) {
int sign = 1 - 2 * (j & 1);
if (target) {
if (curand_uniform(&localState) > (1.0/(THRESHOLD*2))*(THRESHOLD - *class_sum)) {
clause_feedback[j] = 0;
} else {
clause_feedback[j] = sign;
}
} else {
if (curand_uniform(&localState) > (1.0/(THRESHOLD*2))*(THRESHOLD + *class_sum)) {
clause_feedback[j] = 0;
} else {
clause_feedback[j] = -1*sign;
}
}
}
state[index] = localState;
}
__global__ void initialize_clause_output(int *clause_output)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// Initialize clause output
for (int j = index; j < CLAUSES; j += stride) {
clause_output[j] = 1;
}
}
__global__ void calculate_clause_output(int *ta_state, int *clause_output, int *Xi)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int action_include, action_include_negated;
for (int i = index; i < CLAUSES*FEATURES; i += stride) {
int clause = i / FEATURES;
int feature = i % FEATURES;
action_include = action(ta_state[i*2]);
action_include_negated = action(ta_state[i*2+1]);
if ((action_include == 1 && Xi[feature] == 0) || (action_include_negated == 1 && Xi[feature] == 1)) {
clause_output[clause] = 0;
}
}
}
__global__ void initialize_clause_output_predict(int *clause_output, int *all_exclude)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// Initialize clause output
for (int j = index; j < CLAUSES; j += stride) {
clause_output[j] = 1;
all_exclude[j] = 1;
}
}
__global__ void calculate_clause_output_predict(int *ta_state, int *clause_output, int *all_exclude, int *Xi)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int action_include, action_include_negated;
for (int i = index; i < CLAUSES*FEATURES; i += stride) {
int clause = i / FEATURES;
int feature = i % FEATURES;
action_include = action(ta_state[i*2]);
action_include_negated = action(ta_state[i*2+1]);
if (action_include == 1 || action_include_negated == 1) {
all_exclude[clause] = 0;
}
if ((action_include == 1 && Xi[feature] == 0) || (action_include_negated == 1 && Xi[feature] == 1)) {
clause_output[clause] = 0;
}
}
}
__global__ void update_with_all_exclude(int *clause_output, int *all_exclude)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// Initialize clause output
for (int j = index; j < CLAUSES; j += stride) {
if (all_exclude[j] == 1) {
clause_output[j] = 0;
}
}
}
|
10,326 | #include "includes.h"
__global__ void sumMatrixOnGPU(float *MatA, float *MatB, float *MatC, int nx, int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * nx + ix;
//printf("nx: %d, ny: %d, ix: %d, iy: %d, idx: %d\n", nx, ny, ix, iy, idx);
if (ix<nx && iy<ny)
{
MatC[idx] = MatA[idx] + MatB[idx];
//printf("GPU Add: %f + %f = %f.\n", MatA[idx], MatB[idx], MatC[idx]);
}
} |
10,327 | //
// Created by sjhuang on 2021/8/21.
//
/************************************************************************/
/* cuda测试 */
/************************************************************************/
#include <iostream>
bool InitCUDA()
{
int count;
cudaGetDeviceCount(&count);//获得cuda设备的数量
if(count == 0)
{
std::cout<<"There is no device.\n" ;
return false;
}
int i;
for(i = 0; i < count; i++)
{
cudaDeviceProp prop;//cuda设备属性对象
if(cudaGetDeviceProperties(&prop, i) == cudaSuccess)
{
std::cout<<"设备名称:"<<prop.name<<"\n" ;
std::cout<<"计算能力的主代号:"<<prop.major<<"\t"<<"计算能力的次代号:"<<prop.minor<<"\n" ;
std::cout<<"时钟频率:"<<prop.clockRate<<"\n" ;
std::cout<<"设备上多处理器的数量:"<<prop.multiProcessorCount<<"\n" ;
std::cout<<"GPU是否支持同时执行多个核心程序:"<<prop.concurrentKernels<<"\n" ;
}
}
cudaSetDevice(i);//启动设备
return true;
}
int main()
{
if(!InitCUDA())
{
return 0;
}
std::cout<<"cuda配置成功!\n" ;
return 0;
} |
10,328 | /*
* Parallel and Distributed Systems
* Exercise 3
* V1. GPU with one thread per moment
* Authors:
* Portokalidis Stavros, AEM 9334, stavport@ece.auth.gr
* Christoforidis Savvas, AEM 9147, schristofo@ece.auth.gr
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// block dimensions
#define BX 16
#define BY 16
// window size
#define WS 5
// replace pointer types
#define old(i,j,n) *(old+(i)*n+(j))
#define current(i,j,n) *(current+(i)*n+(j))
#define w(i,j) *(w+(i)*WS+(j))
#define d_w(i,j) *(d_w+(i)*WS+(j))
#define G(i,j,n) *(G+(i)*n+(j))
#define d_current(i,j,n) *(d_current+(i)*n+(j))
#define d_old(i,j,n) *(d_old+(i)*n+(j))
// cuda kernel
__global__ void kernel(int *d_current, int *d_old, double *d_w, int n) {
// compute column and row global index
const int c = blockIdx.x * blockDim.x + threadIdx.x;
const int r = blockIdx.y * blockDim.y + threadIdx.y;
// check if within bounds.
if ((c >= n) || (r >= n))
return;
double influence = 0; // weighted influence of the neighbors
for(int ii=0; ii<WS; ii++){
for(int jj=0; jj<WS; jj++){
influence += d_w(ii,jj) * d_old((r-2+n+ii)%n, (c-2+n+jj)%n, n);
}
}
// magnetic moment gets the value of the SIGN of the weighted influence of its neighbors
if(fabs(influence) < 10e-7){
d_current(r,c,n) = d_old(r,c,n); // remains the same in the case that the weighted influence is zero
}
else if(influence > 10e-7){
d_current(r,c,n) = 1;
}
else if(influence < 0){
d_current(r,c,n) = -1;
}
}
void ising( int *G, double *w, int k, int n){
dim3 block(BX,BY); // blockDim
dim3 grid((n+BX-1)/BX,(n+BY-1)/BY); // gridDim
int * old = (int*) malloc(n*n*sizeof(int)); // old spin lattice
int * current = (int*) malloc(n*n*sizeof(int)); // current spin lattice
if( old==NULL || current==NULL){
printf("memory allocation failed (CPU)\n");
exit(1);
}
// device variables
int *d_old, *d_current, *tmp;
double * d_w;
if( cudaMalloc(&d_old , n*n*sizeof(int)) != cudaSuccess || cudaMalloc(&d_current , n*n*sizeof(int)) || cudaMalloc(&d_w, WS*WS*sizeof(double))){
printf("memory allocation failed (GPU)\n");
exit(1);
}
// copy host to device
cudaMemcpy(d_w, w, WS*WS*sizeof(double), cudaMemcpyHostToDevice );
cudaMemcpy(d_old, G, n*n*sizeof(int), cudaMemcpyHostToDevice );
// run for k steps
for(int l=0; l<k; l++){
// kernel execution
kernel<<<grid,block>>>(d_current, d_old, d_w, n );
cudaDeviceSynchronize();
// copy device to host
cudaMemcpy(old, d_old, n*n*sizeof(int), cudaMemcpyDeviceToHost );
cudaMemcpy(current, d_current, n*n*sizeof(int), cudaMemcpyDeviceToHost );
// save result in G
cudaMemcpy(G , d_current , n*n*sizeof(int), cudaMemcpyDeviceToHost);
// swap the pointers for the next iteration
tmp = d_old;
d_old = d_current;
d_current = tmp;
// terminate if no changes are made
int areEqual = 1;
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
if(old(i,j,n) != current(i,j,n)){
areEqual = 0;
i=n;
j=n;
}
}
}
// termination branch
if(areEqual == 1){
printf("terminated: spin values stay same (step %d)\n" , l);
exit(0);
}
}
// free host/device space
free(old);
free(current);
cudaFree(d_old);
cudaFree(d_current);
cudaFree(d_w);
}
|
10,329 | #include "includes.h"
__global__ void addOneElementPerThread(double* a, double* b, double* c, int n)
{
// Get our global thread ID
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int id = idy * n + idx;
// Make sure we do not go out of bounds
if (idx < n && idy < n)
c[id] = a[id] + b[id];
} |
10,330 | /* nqueens.cu
* Jonathan Lehman
* February 26, 2012
*
* Compile with: nvcc -o nqueens nqueens.cu
* to get default with _N_ = 4 and numBX = 1 numBY = 1 sumOnGPU = 0
*
* Or specify _N_ by compiling with: nvcc -o nqueens nqueens.cu -D_N_=x
* where x is the board size desired where x must be >= 4 and <= 22
*
* and/Or specify numBX by compiling with: nvcc -o nqueens nqueens.cu -DnumBX=y
* where y is the number of tuple values to be generated by blockIdx.x
* where y must be >= 1 such that N^numBX < maxgridsize (in this case 65535 blocks)
*
* and/or specify numBY by compiling with nvcc -o nqueens nqueens.cu -DnumBY=z
* where z is the number of groups of ((N / 2) + (N % 2)) columns by N^numBX rows that work on the solution
* essentially, this evenly divides the work of the tuples being generated iteratively by each thread between each group
* where z must be <= N^numBX
*
* and/or specify whether or not to add the block totals on the GPU or cpu with nvcc -o nqueens nqueens.cu -DsumOnGPU=a
* where a is 1 or 0, with 1 doing the sum on the GPU and 0 doing the sum on the CPU
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <sys/time.h>
#include <iostream>
#include <cuda_runtime.h>
#include <cuda.h>
#include <vector>
__global__ void queen(long*, int, int, int, int);
double getTime();
//set board size
#ifndef _N_
#define _N_ 4
#endif
// Keep track of the gpu time.
cudaEvent_t start, stop;
float elapsedTime;
// Keep track of the CPU time.
double startTime, stopTime;
//array for block sums
long *a;
int main(int argc, char **argv){
if(argc < 3) {
printf("\nError, too few arguments. Usage: ./proj2 {#of_samples} {bucket_width} {block_size}.\n");
return -1;
}
//const int _N_ = atoi(argv[1]);
const int numBX = atoi(argv[1]);
const int numBY = atoi(argv[2]);
const int numGen = _N_ - 3 - numBX;
//ensure number of tuples generated iteratively is not less than 0
if(numGen < 0){
fprintf(stderr, "\nnqeens: The number of values in the tuple generated iteratively cannot be less than 0.\n NumGen = _N_(%d) - 3 - numBX(%d) = %d\n", _N_, numBX, numGen);
exit(1);
}
//ensure N is in the correct range
if(_N_ < 4 || _N_ > 22){
fprintf(stderr, "\nnqeens: _N_(%d) must be between 4 and 22 inclusive\n", _N_);
exit(1);
}
//ensure that at least one of the tuple values is generated by the block's X coordinate value
if(numBX < 1){
fprintf(stderr, "\nnqeens: The number of tuples generated by each block's X coordinate value (numBX=%d) must be >= 1\n", numBX);
exit(1);
}
//ensure that the number of Y segments that the numGen work is divided into
//is at least one per work segment
/* if(numBY > pow(_N_, numGen)){
fprintf(stderr, "\nnqeens: numBY(%d) must be less than or equal to _N_^numGen(%d)\n", numBY, pow(_N_, numGen));
exit(1);
}*/
long *dev_a;
int gW, gH, numberBlocks;
//calculate grid width based on factor N,
gW = pow(_N_, numBX);
//depends on if N is even or odd
int sizePerYSeg = (_N_ / 2) + (_N_ % 2);
gH = sizePerYSeg * numBY;
numberBlocks = gW * gH;
//check that GPU can handle arguments
//checkGPUCapabilities(gW, gH, _N_, _N_, numberBlocks);
/* Initialize the source arrays here. */
a = new long[numberBlocks];
/* Allocate global device memory. */
cudaMalloc((void **)&dev_a, sizeof(long) * numberBlocks);
/* Start the timer. */
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
/* Execute the kernel. */
dim3 block(_N_, _N_); //threads w x h
dim3 grid(gW, gH); //blocks w x h
queen<<<grid, block>>>(dev_a, sizePerYSeg, numBX, numBY, numGen);
/* Wait for the kernel to complete. Needed for timing. */
cudaThreadSynchronize();
/* Stop the timer and print the resulting time. */
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
/* Get result from device. */
cudaMemcpy(a, dev_a, sizeof(long) * numberBlocks, cudaMemcpyDeviceToHost);
//print any cuda error messages
const char* errorString = cudaGetErrorString(cudaGetLastError());
printf("GPU Error: %s\n", errorString);
/* Start the CPU timer. */
startTime = getTime();
int sum = 0;
//check if N is even or odd, then calculate sum, which is number of solutions
if(_N_ % 2 == 0){
for(int i = 0; i < numberBlocks; i++){
sum+= a[i];
}
sum *= 2;
}
else{
int numBlocksPerSeg = numberBlocks / numBY;
int rowSizeOfGrid = pow(_N_, numBX);
for(int j = 0; j < numBY; j++){
int start = j * numBlocksPerSeg;
for(int i = start; i < start + numBlocksPerSeg - rowSizeOfGrid; i++){
sum+= a[i];
}
}
sum *= 2;
//add last block row of sums for each Y block
for(int j = 0; j < numBY; j++){
for(int i = j * numBlocksPerSeg + numBlocksPerSeg - rowSizeOfGrid; i < j * numBlocksPerSeg + numBlocksPerSeg; i++){
sum+= a[i];
}
}
}
/* Stop the CPU timer */
stopTime = getTime();
double totalTime = stopTime - startTime;
printf("Number of Solutions: %d\n", sum);
//add cpu time and gpu time and print result
printf( "GPU Time: %f secs\nCPU Time: %f secs\nTotal Time: %f secs\n", (elapsedTime / 1000.0), totalTime, (elapsedTime / 1000.0) + totalTime );
//destroy cuda event
cudaEventDestroy(start);
cudaEventDestroy(stop);
/* Free the allocated device memory. */
cudaFree(dev_a);
//free allocated host memory
free(a);
}
__global__
void queen (long *a, int sizePerYSeg, int nBX, int nBY, int nGen )
{
__shared__ long solutions[_N_][_N_];
__shared__ char tuple[_N_][_N_][_N_];
int totalWrong = 0;
solutions[threadIdx.x][threadIdx.y] = 0;
int totNumGen = powf(_N_, nGen);
int bYsegment = blockIdx.y / sizePerYSeg;
int workSize = totNumGen / nBY;
int extra = totNumGen - workSize * nBY;//extra work to be done by last segment
//set tuple by block Y value
tuple[threadIdx.x][threadIdx.y][0] = blockIdx.y % sizePerYSeg;
//set tuple(s) by block X value
int rem = blockIdx.x;
for(int i = 1; i <= nBX; i++){
tuple[threadIdx.x][threadIdx.y][i] = rem % _N_;
rem = rem / _N_;
}
int tupCtr = nBX;
//set tuples by thread value
tuple[threadIdx.x][threadIdx.y][++tupCtr] = threadIdx.x;
tuple[threadIdx.x][threadIdx.y][++tupCtr] = threadIdx.y;
//check if thread is valid at this point
for(int i = tupCtr; i > 0; i--){
for(int j = i - 1, ctr = 1; j >= 0; j--, ctr++){
//same row
totalWrong += tuple[threadIdx.x][threadIdx.y][i] == tuple[threadIdx.x][threadIdx.y][j];
//diag upleft
totalWrong += (tuple[threadIdx.x][threadIdx.y][i] - ctr) == tuple[threadIdx.x][threadIdx.y][j];
//diag downleft
totalWrong += (tuple[threadIdx.x][threadIdx.y][i] + ctr) == tuple[threadIdx.x][threadIdx.y][j];
}
}
if(totalWrong == 0){
//iterate through all numbers to generate possible solutions thread must check
//does not do if thread is already not valid at this point
int start = bYsegment * workSize;
for(int c = start; c < start + workSize + (bYsegment == nBY - 1) * extra; c++){
//generate last values in tuple, convert to base N and store to tuple array
int rem = c;
for(int b = 0, k = tupCtr + 1; b < nGen; b++, k++){
tuple[threadIdx.x][threadIdx.y][k] = rem % _N_;
rem = rem / _N_;
}
//checks that the numGen tuple values are indeed unique (saves work overall)
for(int x = 0; x < nGen && totalWrong == 0; x++){
for(int y = 0; y < nGen && totalWrong == 0; y++){
totalWrong += tuple[threadIdx.x][threadIdx.y][tupCtr + 1 + x] == tuple[threadIdx.x][threadIdx.y][tupCtr + 1 + y] && x != y;
}
}
//check one solution
for(int i = _N_ - 1; i > totalWrong * _N_; i--){
for(int j = i - 1, ctr = 1; j >= 0; j--, ctr++){
//same row
totalWrong += tuple[threadIdx.x][threadIdx.y][i] == tuple[threadIdx.x][threadIdx.y][j];
//diag upleft
totalWrong += (tuple[threadIdx.x][threadIdx.y][i] - ctr) == tuple[threadIdx.x][threadIdx.y][j];
//diag downleft
totalWrong += (tuple[threadIdx.x][threadIdx.y][i] + ctr) == tuple[threadIdx.x][threadIdx.y][j];
}
}
//add 1 to solution total if nothing wrong
solutions[threadIdx.x][threadIdx.y] += !(totalWrong);
//reset total wrong
totalWrong = 0;
}
}
//sync the threads so that thread 0 can make the calculations
__syncthreads();
//have thread 0 sum for all threads in block to get block total
if(threadIdx.x == 0 && threadIdx.y == 0){
//ensure that the block total value is 0 initially
long sum = 0;
//iterate through each threads solution and add it to the block total
for(int i =0; i < _N_; i++){
for(int j = 0; j < _N_; j++){
//use local var
sum += solutions[i][j];
}
}
//store to global memory
a[gridDim.x * blockIdx.y + blockIdx.x] = sum;
}
//sync the threads so that calculations can be made
__syncthreads();
}
double getTime(){
timeval thetime;
gettimeofday(&thetime, 0);
return thetime.tv_sec + thetime.tv_usec / 1000000.0;
}
|
10,331 | #include "stdio.h"
__global__ void add(int *a, int *b, int *c, int *N){
int tID = blockIdx.x;
if (tID < *N){
c[tID] = a[tID] + b[tID];
}
}
int main(int argc, char* argv[]){
int N = atoi(argv[1]);
int *a, *b, *c, *n;
int *dev_a, *dev_b, *dev_c, *dev_n;
a = (int*)malloc(N*sizeof(int));
b = (int*)malloc(N*sizeof(int));
c = (int*)malloc(N*sizeof(int));
n = (int*)malloc(sizeof(int));
cudaMalloc((void **) &dev_a, N*sizeof(int));
cudaMalloc((void **) &dev_b, N*sizeof(int));
cudaMalloc((void **) &dev_c, N*sizeof(int));
cudaMalloc((void **) &dev_n, sizeof(int));
// Fill Arrays
for (int i = 0; i < N; i++)
{
a[i] = rand()%10000;
b[i] = rand()%10000;
}
*n = N;
cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_n, n, sizeof(int), cudaMemcpyHostToDevice);
add<<<N,1>>>(dev_a, dev_b, dev_c, dev_n);
cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++){
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
return 0;
} |
10,332 | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//#include <stdio.h>
//#include <cuda.h>
//#include <iostream>
////#define BIN_COUNT 7
//#define BLOCK_SIZE 64
//#define N 512
//
//// This graph does the histogram calculations
//__global__ void histogram(int* d_in, int* d_bins, const int BIN_COUNT)
//{
// // Getting the global thread id.
// int gid = threadIdx.x + (blockDim.x * blockIdx.x);
// // Checking what bin the value belongs to.
// int whatBin = d_in[gid] % BIN_COUNT;
// // Create shared version of the bin.
// __shared__ int* temp;
// // Initalizing the array
// temp = new int[BIN_COUNT];
// // Synchronising threads.
// __syncthreads();
// // Atomic add on the shared memory, Increment value by one.
// atomicAdd(&(temp[whatBin]), 1);
// // Synchronising threads
// __syncthreads();
// // Atomic add on the output bins with the partial results stored across shared memory
// atomicAdd(&d_bins[threadIdx.x], temp[threadIdx.x]);
// // Synchronising threads.
// __syncthreads();
// //printf("\n Hello gid = %i, BIN = %i | %i %i %i\n", gid,whatBin,temp[0], temp[1], temp[2]);
//}
//
//// This funciton sets up the histogram kernal
//void histogramMiddle(int* h_input, int* h_bins) {
// // Creating pointers for the device input and bin storage
// int* d_input, * d_bins;
// // Initializing the error variable
// cudaError_t err;
// // Intalizing variables.
// int BIN_COUNT = 8;
// int noThreads = BLOCK_SIZE;
// int noBlocks = N / BLOCK_SIZE;
//
// // Allocating the input and bin on the device.
// err = cudaMalloc((void**)&d_input, sizeof(int) * N);
// printf("\n Allocating d_input error %s \n", cudaGetErrorString(err));
// err = cudaMalloc((void**)&d_bins, sizeof(int) * (N / BLOCK_SIZE));
// printf("\n Allocating d_bins error %s \n", cudaGetErrorString(err));
//
// // Copying the data to the GPU;
// err = cudaMemcpy(d_input, h_input, sizeof(int) * N, cudaMemcpyHostToDevice);
// printf("\n Copying input data from CPU -> GPU error: %s \n", cudaGetErrorString(err));
//
// // Now call the Kernal
// histogram << < noBlocks, noThreads >> > (d_input, d_bins, BIN_COUNT);
// err = cudaDeviceSynchronize();
// printf("\n Kernel error: %s \n", cudaGetErrorString(err));
//
// // Time to copy the bins back into the CPU
// err = cudaMemcpy(h_bins, d_bins, sizeof(int) * (N / BLOCK_SIZE), cudaMemcpyDeviceToHost);
// printf("\n Copying bins from GPU -> CPU error: %s \n", cudaGetErrorString(err));
// for (int i = 0; i < (N / BLOCK_SIZE); i++) {
// printf("\n bin_id %i %i \n ", i % BLOCK_SIZE, h_bins[i]);
// }
// return;
//}
//int main(void)
//{
// // Initalizing the arrays
// int* input = new int[N];
// int* bins = new int[N % BLOCK_SIZE];
//
// // Putting in values
// // The way this is being set up [0] = 0; [255] = 255; [N] = N;
// for (int i = 0; i < N; i++) {
// input[i] = i;
// }
// // Calling the function that prepares the kernal.
// histogramMiddle(input, bins);
//}
|
10,333 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
//#include <iostream>
//#include <fstream>
//#include <string>
//// notes
//using namespace std;
///////////////////////////////////////////////////////////////////////////////
// GPU code to calculate the bin number.
// This assumes that you have normalized your data that you want to plot to
// lie between 0.0 and 1.0.
// Outside this range is classified as overflow or underflow.
///////////////////////////////////////////////////////////////////////////////
__device__ int get_bin_num(float normalized_val, int nbins)
{
// The data goes in bins number 1 to nbins
// 0 is the underflow
// nbins-1 is the overflow
//
// Remember that we have underflow (0) and overflow (nbins-1)
if (normalized_val>=1.0)
{
// If it is greater than or equal to 1.0, put it in the overflow bin
return nbins-1;
}
else if (normalized_val<0.0)
{
return 0;
}
else if (normalized_val==0.0)
{
return 1;
}
// Do this calculation only if it fails the other conditionals.
// I think this buys us a few CPU cycles.
int ret = (int)(normalized_val*(nbins-2)) + 1;
return ret;
}
///////////////////////////////////////////////////////////////////////////////
// GPU code to calculate the separation between two galaxies given the
// right ascenscion and declanation.
///////////////////////////////////////////////////////////////////////////////
//__global__ void CalcSep(float* raA, float* decA, int ngals, int nthreads, int* hist_array, float hist_lo, float hist_hi, int hist_nbins)
__global__ void CalcSep(float* raA_0, float* sin_decA_0, float* cos_decA_0, \
float* raA_1, float* sin_decA_1, float* cos_decA_1, \
int ngals, int nthreads, int* hist_array, float hist_lo, float hist_hi, int hist_nbins)
{
//does all the i's simultaneously - one for each thread
int ix = blockDim.x * blockIdx.x + threadIdx.x;
// Get normalization term
float norm = hist_hi-hist_lo;
float norm_val = 0;
int bin = 0;
int hist_array_bin_block = ix*hist_nbins;
int hist_array_bin = 0;
float sep=0;
float sin_dec_ix,sin_dec_ij;
float cos_dec_ix,cos_dec_ij;
float ra_ix, ra_ij;
// Do the ix ``column"
sin_dec_ix = sin_decA_0[ix];
cos_dec_ix = cos_decA_0[ix];
ra_ix = raA_0[ix];
for(int ij=ix+1;ij<ngals;ij++)
{
sin_dec_ij = sin_decA_1[ij];
cos_dec_ij = cos_decA_1[ij];
ra_ij = raA_1[ij];
sep = acos( sin_dec_ix*sin_dec_ij + cos_dec_ix*cos_dec_ij*cos(fabs(ra_ix-ra_ij)) );
norm_val = (sep-hist_lo)/norm;
bin = get_bin_num(norm_val,hist_nbins);
hist_array_bin = hist_array_bin_block + bin;
// If we passed 0 bins or -x on the command line, don't try
// to fill the super array.
if (hist_nbins>2)
{
hist_array[hist_array_bin]++;
}
}//loop over gals
// Then the ngals-ix ``column"
ix = (ngals - 1) - ix;
sin_dec_ix = sin_decA_0[ix];
cos_dec_ix = cos_decA_0[ix];
ra_ix = raA_0[ix];
for(int ij=ix+1;ij<ngals;ij++)
{
sin_dec_ij = sin_decA_1[ij];
cos_dec_ij = cos_decA_1[ij];
ra_ij = raA_1[ij];
sep = acos( sin_dec_ix*sin_dec_ij + cos_dec_ix*cos_dec_ij*cos(fabs(ra_ix-ra_ij)) );
norm_val = (sep-hist_lo)/norm;
bin = get_bin_num(norm_val,hist_nbins);
hist_array_bin = hist_array_bin_block + bin;
// If we passed 0 bins or -x on the command line, don't try
// to fill the super array.
if (hist_nbins>2)
{
hist_array[hist_array_bin]++;
}
}//loop over gals
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//Host code
int main(int argc, char **argv)
{
printf("In main.....\n");
clock_t loopstart = clock();
int ngals = 2000;
int nbins = 100;
float hist_lo = 0.0;
float hist_hi = 2.0;
// Declare memory for the master histogram array.
int nbins_with_overflow = nbins + 2;
int *h_hist_array_compressed = 0;
h_hist_array_compressed = (int*)malloc(nbins_with_overflow*sizeof(int));
if (0==h_hist_array_compressed)
{
printf("Couldn't allocate memory on host for h_hist_array_compressed....\n");
return 1;
}
int ngals_per_calculation_block = 10000;
//allocate total arrays
printf("Allocating memory for arrays\n");
float *h_raA_total = 0;
float *h_decA_total = 0;
float *h_sin_decA_total = 0;
float *h_cos_decA_total = 0;
h_raA_total = (float*)malloc(1000000*sizeof(float));
h_decA_total = (float*)malloc(1000000*sizeof(float));
h_sin_decA_total = (float*)malloc(1000000*sizeof(float));
h_cos_decA_total = (float*)malloc(1000000*sizeof(float));
printf("Allocated memory for arrays\n");
///////////////////////////////////////////////////////////////////////////
// Grab the number of galaxies from the command line *if* they have
// been specified.
char* filename;
if (argc>1)
{
filename = argv[1];
ngals = atoi(argv[2]);
if (argc>3)
{
nbins = atoi(argv[3]);
}
}
else
{
printf("Usage: %s <number of galaxies> <number of histogram bins>\n",\
argv[0]);
printf("\nDefault is 1000 galaxies and 100 bins\n\n");
}
printf ("Parsed the command line\n");
///////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////
// Initialise input vectors.
// Read in galaxies from file.
///////////////////////////////////////////////////////////////////////////
//ifstream infile(filename.c_str());
FILE *infile;
infile = fopen(filename,"r");
printf("%s\n", filename);
int i = 0;
while (fscanf(infile, "%f %f", &h_raA_total[i], &h_decA_total[i]) != EOF)
{
h_cos_decA_total[i] = cos(h_decA_total[i]);
h_sin_decA_total[i] = sin(h_decA_total[i]);
i += 1;
if (i>ngals) break;
}
ngals = i-1;
// Check to make sure we haven't read in too many galaxies.
if (ngals>1000000)
{
printf( "Too many galaxies!\n");
printf( "We only made space for 1e6!\n");
exit(-1);
}
// How many times will we loop over the subblocks?
int nsubblocks = ngals/ngals_per_calculation_block;
if (ngals%ngals_per_calculation_block != 0)
{
printf("ngals must be an integer multiple of %d\n",ngals_per_calculation_block);
printf("ngals: %d\tngals_per_calculation_block: %d\tmodulo: %d\n", ngals, ngals_per_calculation_block, ngals%ngals_per_calculation_block);
exit(-1);
}
///////////////////////////////////////////////////////////////////////////
//allocate vectors in host memory
float* h_raA_0 = 0;
float* h_decA_0 = 0;
float* h_sin_decA_0 = 0;
float* h_cos_decA_0 = 0;
float* h_raA_1 = 0;
float* h_decA_1 = 0;
float* h_sin_decA_1 = 0;
float* h_cos_decA_1 = 0;
///////////////////////////////////////////////////////////////////////////
// Define histo arrays and memory info and the like
size_t gal_mem_needed = ngals_per_calculation_block * sizeof(float);
// Allocate memory for the cos and sin of the right asenscion. This saves
// us some time rather than recalcuating this over and over on the GPUs.
h_raA_0 = (float*)malloc(gal_mem_needed);
h_decA_0 = (float*)malloc(gal_mem_needed);
h_cos_decA_0 = (float*)malloc(gal_mem_needed);
h_sin_decA_0 = (float*)malloc(gal_mem_needed);
h_raA_1 = (float*)malloc(gal_mem_needed);
h_decA_1 = (float*)malloc(gal_mem_needed);
h_cos_decA_1 = (float*)malloc(gal_mem_needed);
h_sin_decA_1 = (float*)malloc(gal_mem_needed);
if (0==h_raA_0 || 0==h_sin_decA_0 || 0==h_cos_decA_0 || 0==h_raA_1 || 0==h_sin_decA_1 || 0==h_cos_decA_1)
{
printf("Couldn't allocate memory on host....\n");
return 1;
}
///////////////////////////////////////////////////////////////////////////
//allocate vectors in device memory
float* d_raA_0=0;
float* d_sin_decA_0=0;
float* d_cos_decA_0=0;
float* d_raA_1=0;
float* d_sin_decA_1=0;
float* d_cos_decA_1=0;
cudaMalloc(&d_raA_0, gal_mem_needed);
cudaMalloc(&d_cos_decA_0, gal_mem_needed);
cudaMalloc(&d_sin_decA_0, gal_mem_needed);
cudaMalloc(&d_raA_1, gal_mem_needed);
cudaMalloc(&d_cos_decA_1, gal_mem_needed);
cudaMalloc(&d_sin_decA_1, gal_mem_needed);
if (0==d_raA_0 || 0==d_cos_decA_0 || 0==d_sin_decA_0 || 0==d_raA_1 || 0==d_cos_decA_1 || 0==d_sin_decA_1)
{
printf("Couldn't allocate memory on device....\n");
return 1;
}
///////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////
for (int j=0;j<nsubblocks;j++)
{
for (int k=0;k<nsubblocks;k++)
{
printf("nsubblocks: %d\t\t%d% d\n",nsubblocks,j,k);
///////////////////////////////////////////////////////////////////////////
// Define histo arrays and memory info and the like
// How many threads will there be?
int nthreads = ngals_per_calculation_block/2;
printf("nthreads: %d\n",nthreads);
// From here on out, use the number of bins with underflow/overflow added in
// to the calculation.
int nbins_in_super_hist_array = nthreads*nbins_with_overflow;
size_t hist_mem_needed = nbins_in_super_hist_array*sizeof(int);
int *h_hist_array = 0;
h_hist_array = (int*)malloc(hist_mem_needed);
///////////////////////////////////////////////////////////////////////////
if (0==h_hist_array)
{
printf("Couldn't allocate memory on host....\n");
return 1;
}
///////////////////////////////////////////////////////////////////////////
int *d_hist_array;
cudaMalloc(&d_hist_array, hist_mem_needed);
if (0==d_hist_array)
{
printf("Couldn't allocate memory on device....\n");
return 1;
}
///////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////
// Copy over the subblock of galaxy coordinates.
///////////////////////////////////////////////////////////////////////////
int gal0_lo = ngals_per_calculation_block*j;
int gal0_hi = ngals_per_calculation_block*(j+1);
int gal1_lo = ngals_per_calculation_block*k;
int gal1_hi = ngals_per_calculation_block*(k+1);
int gal_count = 0;
for (int gal0=gal0_lo;gal0<gal0_hi;gal0++)
{
h_raA_0[gal_count] = h_raA_total[gal0];
h_cos_decA_0[gal_count] = h_cos_decA_total[gal0];
h_sin_decA_0[gal_count] = h_sin_decA_total[gal0];
gal_count += 1;
}
gal_count = 0;
for (int gal1=gal1_lo;gal1<gal1_hi;gal1++)
{
h_raA_1[gal_count] = h_raA_total[gal1];
h_cos_decA_1[gal_count] = h_cos_decA_total[gal1];
h_sin_decA_1[gal_count] = h_sin_decA_total[gal1];
gal_count += 1;
}
///////////////////////////////////////////////////////////////////////////
// Zero out the super-array that will hold the histogram entries
// for each thread.
printf("nbins_in_super_hist_array: %d\n",nbins_in_super_hist_array);
for (int i=0;i<nbins_in_super_hist_array;i++)
{
h_hist_array[i]=0.0;
}
///////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////
// Copy vectors from host to device memory.
cudaMemcpy(d_raA_0, h_raA_0, gal_mem_needed, cudaMemcpyHostToDevice);
cudaMemcpy(d_raA_1, h_raA_1, gal_mem_needed, cudaMemcpyHostToDevice);
//cudaMemcpy(d_decA, h_decA, gal_mem_needed, cudaMemcpyHostToDevice);
cudaMemcpy(d_sin_decA_0, h_sin_decA_0, gal_mem_needed, cudaMemcpyHostToDevice);
cudaMemcpy(d_cos_decA_0, h_cos_decA_0, gal_mem_needed, cudaMemcpyHostToDevice);
cudaMemcpy(d_sin_decA_1, h_sin_decA_1, gal_mem_needed, cudaMemcpyHostToDevice);
cudaMemcpy(d_cos_decA_1, h_cos_decA_1, gal_mem_needed, cudaMemcpyHostToDevice);
cudaMemcpy(d_hist_array, h_hist_array, hist_mem_needed, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////////////
// Calculate our thread/grid/block sizes.
int threadsPerBlock = 256;
// IS THIS CALCULATION BEING DONE PROPERLY? OPTIMALLY?????
int blocksPerGrid = (nthreads + threadsPerBlock - 1) / threadsPerBlock; //???????
printf("threadsPerBlock: %d\t\tblocksPerGrid: %d\n",threadsPerBlock,blocksPerGrid);
// Set up the cuda timer.
// Ccan't use simple CPU timer since that would only time the kernel launch overhead.
// Need to make sure all threads have finished before stop the timer
// so can synchronise threads before and after kernel launch if using cpu timer?
// I didn't get sensible results when I've tried that though.
cudaEvent_t cudastart, cudaend;
cudaEventCreate(&cudastart);
cudaEventCreate(&cudaend);
//record the start time
cudaEventRecord(cudastart,0);
///////////////////////////////////////////////////////////////////////////
// Run the kernel!
//CalcSep<<<blocksPerGrid, threadsPerBlock>>>(d_raA, d_decA, ngals, nthreads, d_hist_array, hist_lo, hist_hi, nbins_with_overflow);
//CalcSep<<<blocksPerGrid, threadsPerBlock>>>(d_raA, d_sin_decA, d_cos_decA, ngals_per_calculation_block, nthreads, d_hist_array, hist_lo, hist_hi, nbins_with_overflow);
CalcSep<<<blocksPerGrid, threadsPerBlock>>>(d_raA_0, d_sin_decA_0, d_cos_decA_0, \
d_raA_1, d_sin_decA_1, d_cos_decA_1, \
ngals_per_calculation_block, nthreads, d_hist_array, hist_lo, hist_hi, nbins_with_overflow);
// Copy the info back off the GPU to the host.
cudaMemcpy(h_hist_array, d_hist_array, hist_mem_needed, cudaMemcpyDeviceToHost);
///////////////////////////////////////////////////////////////////////////
// Record the end time
cudaEventRecord(cudaend,0);
cudaEventSynchronize(cudaend);
///////////////////////////////////////////////////////////////////////////
// How long did the kernel take? this gives time in ms
float cudaelapsed=0;
cudaEventElapsedTime(&cudaelapsed, cudastart, cudaend);
printf("elapsed time for GPU in ms: %f\n",cudaelapsed);
///////////////////////////////////////////////////////////////////////////
// Collapse the super histogram array to a simple histogram array and write
// it out to histogram_array.txt
int sum = 0;
int master_bin = 0;
for (int i=0;i<nbins_in_super_hist_array;i++)
{
//printf("%d ",h_hist_array[i]);
sum += h_hist_array[i];
master_bin = i%nbins_with_overflow;
//printf("%d\n",master_bin);
h_hist_array_compressed[master_bin] += h_hist_array[i];
}
printf("\ntotal: %d\n",sum);
///////////////////////////////////////////////////////////////////////////
// Free up the device memory.
cudaEventDestroy(cudastart);
cudaEventDestroy(cudaend);
cudaFree(d_hist_array);
free(h_hist_array);
}
}
// Free up the device memory.
cudaFree(d_raA_0);
cudaFree(d_sin_decA_0);
cudaFree(d_cos_decA_0);
cudaFree(d_raA_1);
cudaFree(d_sin_decA_1);
cudaFree(d_cos_decA_1);
// Free up the host memory.
free(h_raA_0);
free(h_decA_0);
free(h_sin_decA_0);
free(h_cos_decA_0);
free(h_raA_1);
free(h_decA_1);
free(h_sin_decA_1);
free(h_cos_decA_1);
FILE *outfile;
// write to file (add text to a file or create a file if it does not exist.
outfile = fopen("histogram_array.txt","w+");
// Print out the compressed array
fprintf(outfile,"%f %f\n",hist_lo,hist_hi);
for (int i=0;i<nbins_with_overflow;i++)
{
fprintf(outfile,"%d ",h_hist_array_compressed[i]);
}
fprintf(outfile,"\n");
fclose(outfile);
free(h_hist_array_compressed);
clock_t loopend = clock();
float loopelapsed = (float)(loopend-loopstart);
printf("elapsed time for CPU in ms: %f \n", loopelapsed/CLOCKS_PER_SEC*1000);
}
|
10,334 | #include <stdio.h>
#define BLOCKSIZE 512
__global__ void partialLabeling (const char *cuStr, int *cuPos, int strLen) {
int begId = blockIdx.x * BLOCKSIZE;
int endId = (blockIdx.x + 1) * BLOCKSIZE;
endId = (endId < strLen)? endId : strLen;
int sum = 0;
for (int i = begId; i < endId; ++i) {
if (cuStr[i] == ' ') {
sum = 0;
} else {
++sum;
}
cuPos[i] = sum;
}
}
__global__ void prefixLabeling (const char *cuStr, int *cuPos, int strLen) {
int begId = (blockIdx.x + 1) * BLOCKSIZE;
int endId = (blockIdx.x + 2) * BLOCKSIZE;
endId = (endId < strLen)? endId : strLen;
if (cuStr[begId - 1] != ' ') {
int prevPos = cuPos[begId - 1];
for (int i = begId; i < endId && cuStr[i] != ' '; ++i) {
cuPos[i] += prevPos;
}
}
}
void labeling (const char *cuStr, int *cuPos, int strLen) {
int block_dim = strLen / BLOCKSIZE;
partialLabeling <<<block_dim + 1, 1>>> (cuStr, cuPos, strLen);
cudaThreadSynchronize();
prefixLabeling <<<block_dim, 1>>> (cuStr, cuPos, strLen);
cudaThreadSynchronize();
}
|
10,335 | #include <cstdio>
/*
diameter: 2*Radius + 1
0 0 0 0 0 0 0 0
x x x x x x x x
x x x x x x x x
x x x x x x x x
x x x x x x x x
x x x x x x x x
x x x x x x x x
0 0 0 0 0 0 0 0
x - data
0 - redundant halo
*/
template<int radius, int diameter>
__global__ void dFdY_kernel(const float * input, float * output, int nx, int ny)
{
/// sm_20: 13 registers used by this kernel
const int gtidx = blockDim.x * blockIdx.x + threadIdx.x;
int outputIndex = gtidx + radius * nx;
int inputIndex = outputIndex - radius * nx;
// declare local register buffer
float buffer[diameter];
// Fill the buffer up to start computations
#pragma unroll
for (int i = 1; i < diameter; ++i)
{
buffer[i] = input[inputIndex];
inputIndex += nx;
}
/// Move front towards y (vertical) direction
for (int y = 0; y < ny; ++y)
{
// update register values
#pragma unroll
for (int i = 0; i < diameter - 1; ++i)
{
buffer[i] = buffer[i + 1];
}
buffer[diameter - 1] = input[inputIndex];
// compute (df/dy)(x,y) := [f(x,y+h) - f(x,y+h)]/2*h
float derivative = 0.5f * (buffer[2] - buffer[0]);
// write output
output[outputIndex] = derivative;
outputIndex += nx;
inputIndex += nx;
}
}
void TestPartialDerivative_dFdY()
{
const int nx = 2048;
const int ny = 2048;
const int paddedny = (1 + ny + 1);
const int nelem = nx * paddedny;
const int nbytes = nelem * sizeof(float);
float* fh; cudaMallocHost((void**)&fh, nbytes);
float* dh; cudaMallocHost((void**)&dh, nbytes);
/// Fill input array: f(x,y) := (x-A)^2 + (y-B)^2
/// Fill first halo row
int a = -1;
for (int x = 0; x < nx; ++x)
{
fh[x + (a + 1) * nx] = (float)((x - nx * 0.5f)*(x - nx * 0.5f) + (a - ny * 0.5f)*(a - ny * 0.5f));
}
/// Fill working data (function values)
for (int y = 0; y < ny; ++y)
{
for (int x = 0; x < nx; ++x)
{
fh[x + (y + 1) * nx] = (float)((x - nx * 0.5f)*(x - nx * 0.5f) + (y - ny * 0.5f)*(y - ny * 0.5f));
}
}
/// Fill last halo row
a = ny;
for (int x = 0; x < nx; ++x)
{
fh[x + (a + 1) * nx] = (float)((x - nx * 0.5f)*(x - nx * 0.5f) + (a - ny * 0.5f)*(a - ny * 0.5f));
}
/// Fill output array with zeros
for (int y = 0; y < paddedny; ++y)
{
for (int x = 0; x < nx; ++x)
{
dh[x + y * nx] = 0.0f;
}
}
float* fd; cudaMalloc((void**)&fd, nbytes);
float* dd; cudaMalloc((void**)&dd, nbytes);
cudaMemcpy(fd, fh, nbytes, cudaMemcpyHostToDevice);
cudaMemcpy(dd, dh, nbytes, cudaMemcpyHostToDevice);
/// Initialize timer
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int nthread = 128;
int nblock = nx/nthread;
/// Record time befor kernel launch
cudaEventRecord(start, 0);
const int radius = 1;
dFdY_kernel<radius, 2*radius + 1><<<nblock, nthread, (nthread + 2 * radius) * sizeof(float)>>>(fd, dd, nx, ny);
/// Record time after simulation
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaMemcpy(dh, dd, nbytes, cudaMemcpyDeviceToHost);
/// Calculate kernel time
float time_ms; cudaEventElapsedTime(&time_ms, start, stop);
printf("df/dy kernel time: %f ms\n", time_ms);
/// Release timer
cudaEventDestroy(start);
cudaEventDestroy(stop);
/// Free device memory
cudaFree(fd);
cudaFree(dd);
/// write result to file
FILE* file = fopen("resultDfDy.txt","w");
for (int y = 0; y < paddedny; ++y)
{
for (int x = 0; x < nx; ++x)
{
fprintf(file,"%d %d %f \n", x, y, dh[x + y * nx]);
}
fprintf(file,"\n");
}
fclose(file);
/// Free host memory
cudaFreeHost(fh);
cudaFreeHost(dh);
}
/*
offset: 32 elements = 128B
0 x x x x x x x x 0
0 x x x x x x x x 0
0 x x x x x x x x 0
0 x x x x x x x x 0
0 x x x x x x x x 0
0 x x x x x x x x 0
0 x x x x x x x x 0
0 x x x x x x x x 0
0 x x x x x x x x 0
x - data
0 - redundant halo
where 0 symbol means 128B offset (32*4B)
*/
template<int radius, int offset>
__global__ void dFdX_kernel(const float * input, float * output, int nx, int ny)
{
/// sm_20: 14 registers used by this kernel
extern __shared__ float smem[];
const int gtidx = blockDim.x * blockIdx.x + threadIdx.x;
const int ltidx = threadIdx.x;
const int blockdimx = blockDim.x;
const int rowsize = offset + nx + offset;
const int tx = ltidx + radius;
/// Move front towards y (vertical) direction
for (int y = 0; y < ny; ++y)
{
// calculate global input index
const int inputIndex = gtidx + offset + y * rowsize;
__syncthreads();
// load "halo" left && right
if (ltidx < radius)
{
smem[ltidx] = input[inputIndex - radius];
smem[ltidx + blockdimx + radius] = input[blockdimx + inputIndex];
}
// load "internal" data
smem[tx] = input[inputIndex];
__syncthreads();
// compute (df/dx)(x,y) := [f(x+h,y) - f(x-h,y)]/2*h
float derivative = 0.5f * (smem[tx + 1] - smem[tx - 1]);
// write output
output[inputIndex] = derivative;
}
}
void TestPartialDerivative_dFdX()
{
const int nx = 2048;
const int ny = 2048;
const int pad32 = 32;
const int paddednx = (pad32 + nx + pad32);
const int nelem = paddednx * ny;
const int nbytes = nelem * sizeof(float);
float* fh; cudaMallocHost((void**)&fh, nbytes);
float* dh; cudaMallocHost((void**)&dh, nbytes);
memset(fh, 0, nbytes);
memset(dh, 0, nbytes);
/// Fill input array: f(x,y) := (x-A)^2 + (y-B)^2
for (int y = 0; y < ny; ++y)
{
/// Fill first 32 elements in the row
for (int x = 0; x < pad32; ++x)
{
fh[x + y * paddednx] = 0.0f;
}
int a = pad32 - 1;
fh[a + y * paddednx] = (float)((a - paddednx * 0.5f)*(a - paddednx * 0.5f) + (y - ny * 0.5f)*(y - ny * 0.5f));
/// Fill working data (function values)
for (int x = pad32; x < pad32 + nx; ++x)
{
fh[x + y * paddednx] = (float)((x - paddednx * 0.5f)*(x - paddednx * 0.5f) + (y - ny * 0.5f)*(y - ny * 0.5f));
}
/// Fill last 32 elements in the row
for (int x = pad32 + nx; x < pad32 + nx + pad32; ++x)
{
fh[x + y * paddednx] = 0.0f;
}
a = pad32 + nx;
fh[a + y * paddednx] = (float)((a - paddednx * 0.5f)*(a - paddednx * 0.5f) + (y - ny * 0.5f)*(y - ny * 0.5f));
}
/// Fill output array with zeros
for (int y = 0; y < ny; ++y)
{
for (int x = 0; x < paddednx; ++x)
{
dh[x + y * paddednx] = 0.0f;
}
}
float* fd; cudaMalloc((void**)&fd, nbytes);
float* dd; cudaMalloc((void**)&dd, nbytes);
cudaMemcpy(fd, fh, nbytes, cudaMemcpyHostToDevice);
cudaMemcpy(dd, dh, nbytes, cudaMemcpyHostToDevice);
/// Initialize timer
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int nthread = 128;
int nblock = nx/nthread;
/// Record time befor kernel launch
cudaEventRecord(start, 0);
const int radius = 1;
dFdX_kernel<radius, pad32><<<nblock, nthread, (nthread + 2*radius) * sizeof(float)>>>(fd, dd, nx, ny);
/// Record time after simulation
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaMemcpy(dh, dd, nbytes, cudaMemcpyDeviceToHost);
/// Calculate kernel time
float time_ms; cudaEventElapsedTime(&time_ms, start, stop);
printf("df/dx kernel time: %f ms\n", time_ms);
/// Release timer
cudaEventDestroy(start);
cudaEventDestroy(stop);
/// Free device memory
cudaFree(fd);
cudaFree(dd);
/// write result to file
FILE* file = fopen("resultDfDx.txt","w");
for (int y = 0; y < ny; ++y)
{
for (int x = pad32; x < pad32 + nx; ++x)
{
fprintf(file,"%d %d %f \n", x, y, dh[x + y * paddednx]);
}
fprintf(file,"\n");
}
fclose(file);
/// Free host memory
cudaFreeHost(fh);
cudaFreeHost(dh);
}
int main(int argc, char** argv)
{
TestPartialDerivative_dFdY();
TestPartialDerivative_dFdX();
return 0;
} |
10,336 | // Copyright (c) 2019-2020, NVIDIA CORPORATION.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <thrust/complex.h>
///////////////////////////////////////////////////////////////////////////////
// UPFIRDN1D //
///////////////////////////////////////////////////////////////////////////////
template<typename T>
__device__ void _cupy_upfirdn1D( const T *__restrict__ inp,
const T *__restrict__ h_trans_flip,
const int up,
const int down,
const int axis,
const int x_shape_a,
const int h_per_phase,
const int padded_len,
T *__restrict__ out,
const int outW ) {
const int t { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) };
const int stride { static_cast<int>( blockDim.x * gridDim.x ) };
for ( size_t tid = t; tid < outW; tid += stride ) {
#if ( __CUDACC_VER_MAJOR__ >= 11 ) && ( __CUDACC_VER_MINOR__ >= 2 )
__builtin_assume( padded_len > 0 );
__builtin_assume( up > 0 );
__builtin_assume( down > 0 );
__builtin_assume( tid > 0 );
#endif
const int x_idx { static_cast<int>( ( tid * down ) / up ) % padded_len };
int h_idx { static_cast<int>( ( tid * down ) % up * h_per_phase ) };
int x_conv_idx { x_idx - h_per_phase + 1 };
if ( x_conv_idx < 0 ) {
h_idx -= x_conv_idx;
x_conv_idx = 0;
}
T temp {};
int stop = ( x_shape_a < ( x_idx + 1 ) ) ? x_shape_a : ( x_idx + 1 );
for ( int x_c = x_conv_idx; x_c < stop; x_c++ ) {
temp += inp[x_c] * h_trans_flip[h_idx];
h_idx += 1;
}
out[tid] = temp;
}
}
extern "C" __global__ void __launch_bounds__( 512 ) _cupy_upfirdn1D_float32( const float *__restrict__ inp,
const float *__restrict__ h_trans_flip,
const int up,
const int down,
const int axis,
const int x_shape_a,
const int h_per_phase,
const int padded_len,
float *__restrict__ out,
const int outW ) {
_cupy_upfirdn1D<float>( inp, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW );
}
extern "C" __global__ void __launch_bounds__( 512 ) _cupy_upfirdn1D_float64( const double *__restrict__ inp,
const double *__restrict__ h_trans_flip,
const int up,
const int down,
const int axis,
const int x_shape_a,
const int h_per_phase,
const int padded_len,
double *__restrict__ out,
const int outW ) {
_cupy_upfirdn1D<double>( inp, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW );
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_upfirdn1D_complex64( const thrust::complex<float> *__restrict__ inp,
const thrust::complex<float> *__restrict__ h_trans_flip,
const int up,
const int down,
const int axis,
const int x_shape_a,
const int h_per_phase,
const int padded_len,
thrust::complex<float> *__restrict__ out,
const int outW ) {
_cupy_upfirdn1D<thrust::complex<float>>(
inp, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW );
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_upfirdn1D_complex128( const thrust::complex<double> *__restrict__ inp,
const thrust::complex<double> *__restrict__ h_trans_flip,
const int up,
const int down,
const int axis,
const int x_shape_a,
const int h_per_phase,
const int padded_len,
thrust::complex<double> *__restrict__ out,
const int outW ) {
_cupy_upfirdn1D<thrust::complex<double>>(
inp, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW );
}
///////////////////////////////////////////////////////////////////////////////
// UPFIRDN2D //
///////////////////////////////////////////////////////////////////////////////
template<typename T>
__device__ void _cupy_upfirdn2D( const T *__restrict__ inp,
const int inpH,
const T *__restrict__ h_trans_flip,
const int up,
const int down,
const int axis,
const int x_shape_a,
const int h_per_phase,
const int padded_len,
T *__restrict__ out,
const int outW,
const int outH ) {
const int ty { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) };
const int tx { static_cast<int>( blockIdx.y * blockDim.y + threadIdx.y ) };
const int stride_y { static_cast<int>( blockDim.x * gridDim.x ) };
const int stride_x { static_cast<int>( blockDim.y * gridDim.y ) };
for ( int x = tx; x < outH; x += stride_x ) {
for ( int y = ty; y < outW; y += stride_y ) {
int x_idx {};
int h_idx {};
#if ( __CUDACC_VER_MAJOR__ >= 11 ) && ( __CUDACC_VER_MINOR__ >= 2 )
__builtin_assume( padded_len > 0 );
__builtin_assume( up > 0 );
__builtin_assume( down > 0 );
#endif
if ( axis == 1 ) {
#if ( __CUDACC_VER_MAJOR__ >= 11 ) && ( __CUDACC_VER_MINOR__ >= 2 )
__builtin_assume( x > 0 );
#endif
x_idx = ( static_cast<int>( x * down ) / up ) % padded_len;
h_idx = ( x * down ) % up * h_per_phase;
} else {
#if ( __CUDACC_VER_MAJOR__ >= 11 ) && ( __CUDACC_VER_MINOR__ >= 2 )
__builtin_assume( y > 0 );
#endif
x_idx = ( static_cast<int>( y * down ) / up ) % padded_len;
h_idx = ( y * down ) % up * h_per_phase;
}
int x_conv_idx { x_idx - h_per_phase + 1 };
if ( x_conv_idx < 0 ) {
h_idx -= x_conv_idx;
x_conv_idx = 0;
}
T temp {};
int stop = ( x_shape_a < ( x_idx + 1 ) ) ? x_shape_a : ( x_idx + 1 );
for ( int x_c = x_conv_idx; x_c < stop; x_c++ ) {
if ( axis == 1 ) {
temp += inp[y * inpH + x_c] * h_trans_flip[h_idx];
} else {
temp += inp[x_c * inpH + x] * h_trans_flip[h_idx];
}
h_idx += 1;
}
out[y * outH + x] = temp;
}
}
}
extern "C" __global__ void __launch_bounds__( 64 ) _cupy_upfirdn2D_float32( const float *__restrict__ inp,
const int inpH,
const float *__restrict__ h_trans_flip,
const int up,
const int down,
const int axis,
const int x_shape_a,
const int h_per_phase,
const int padded_len,
float *__restrict__ out,
const int outW,
const int outH ) {
_cupy_upfirdn2D<float>(
inp, inpH, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW, outH );
}
extern "C" __global__ void __launch_bounds__( 64 ) _cupy_upfirdn2D_float64( const double *__restrict__ inp,
const int inpH,
const double *__restrict__ h_trans_flip,
const int up,
const int down,
const int axis,
const int x_shape_a,
const int h_per_phase,
const int padded_len,
double *__restrict__ out,
const int outW,
const int outH ) {
_cupy_upfirdn2D<double>(
inp, inpH, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW, outH );
}
extern "C" __global__ void __launch_bounds__( 64 )
_cupy_upfirdn2D_complex64( const thrust::complex<float> *__restrict__ inp,
const int inpH,
const thrust::complex<float> *__restrict__ h_trans_flip,
const int up,
const int down,
const int axis,
const int x_shape_a,
const int h_per_phase,
const int padded_len,
thrust::complex<float> *__restrict__ out,
const int outW,
const int outH ) {
_cupy_upfirdn2D<thrust::complex<float>>(
inp, inpH, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW, outH );
}
extern "C" __global__ void __launch_bounds__( 64 )
_cupy_upfirdn2D_complex128( const thrust::complex<double> *__restrict__ inp,
const int inpH,
const thrust::complex<double> *__restrict__ h_trans_flip,
const int up,
const int down,
const int axis,
const int x_shape_a,
const int h_per_phase,
const int padded_len,
thrust::complex<double> *__restrict__ out,
const int outW,
const int outH ) {
_cupy_upfirdn2D<thrust::complex<double>>(
inp, inpH, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW, outH );
}
|
10,337 | #include<iostream>
#include<cuda.h>
#include<stdio.h>
using namespace std;
__global__ void matrixmul(int* d_a,int* d_b,int* d_c,int r1,int c2,int r2)
{
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
if(row<r1 && col <c2)
{
int sum = 0;
for(int i=0; i<r2; i++)
{
sum = sum + d_a[row*r2+i]*d_b[i*c2+col];
}
d_c[row*c2 + col] = sum;
}
}
int main()
{
int r1=30,r2=30,c1=30,c2 = 30;
int *a= new int[r1*c1];
int *b= new int[r2*c2];
int *c= new int[r1*c2];
for(int i=0;i<r1;i++)
for(int j=0;j<c1;j++)
{ a[i*c1+j] = rand()%100;}
for(int i=0;i<r2;i++)
{ for(int j=0;j<c2;j++)
{ b[i*c2+j]= rand()%100;}}
if(r2!=c1)
{
cout<<"not possible";
return 0;
}
else
{
int *d_a,*d_b,*d_c;
cudaMalloc((void**)&d_a,sizeof(int)*r1*c1);
cudaMalloc((void**)&d_b,sizeof(int)*r2*c2);
cudaMalloc((void**)&d_c,sizeof(int)*r1*c2);
cudaMemcpy(d_a,a,sizeof(int)*r1*c1,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,sizeof(int)*r2*c2,cudaMemcpyHostToDevice);
const dim3 blocksize(ceil(r1-1)/16 +1,ceil(c2-1)/16+1,1);
const dim3 gridsize(16,16,1);
//matrixmul<<<dim3(50,50),1>>>(d_a,d_b,d_c,r1,c2,r2);
matrixmul<<<blocksize,gridsize>>>(d_a,d_b,d_c,r1,c2,r2);
cudaMemcpy(c,d_c,sizeof(int)*r1*c2,cudaMemcpyDeviceToHost);
for(int i=0;i<r1;i++)
{ for(int j=0;j<c2;j++)
{ cout<<c[i*c2+j]<<" ";
}
cout<<endl;}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
return 0;
}
|
10,338 | #include "includes.h"
__global__ void convolutionColumns3DKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD, int kernel_index, int kernel_radius )
{
__shared__ float s_Data[COLUMNS_BLOCKDIM_Z][COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1];
//Offset to the upper halo edge
const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x;
const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y;
const int baseZ = blockIdx.z * COLUMNS_BLOCKDIM_Z + threadIdx.z;
d_Src += (baseZ * imageH + baseY) * imageW + baseX;
d_Dst += (baseZ * imageH + baseY) * imageW + baseX;
const float* kernel = &c_Kernel[kernel_index*MAX_KERNEL_LENGTH];
//Main data
#pragma unroll
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) {
s_Data[threadIdx.z][threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * imageW];
}
//Upper halo
#pragma unroll
for (int i = 0; i < COLUMNS_HALO_STEPS; i++) {
s_Data[threadIdx.z][threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY + i * COLUMNS_BLOCKDIM_Y >= 0) ? d_Src[i * COLUMNS_BLOCKDIM_Y * imageW] : 0;
}
//Lower halo
#pragma unroll
for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++) {
s_Data[threadIdx.z][threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y]= (baseY + i * COLUMNS_BLOCKDIM_Y < imageH) ? d_Src[i * COLUMNS_BLOCKDIM_Y * imageW] : 0;
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) {
float sum = 0;
//#pragma unroll
for (int j = -kernel_radius; j <= kernel_radius; j++) {
sum += kernel[kernel_radius - j] * s_Data[threadIdx.z][threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j];
}
d_Dst[i * COLUMNS_BLOCKDIM_Y * imageW] = sum;
}
} |
10,339 | #include "includes.h"
__global__ void pnpolyGPU(const float *vertex, float testx, float testy, int* results)
{
int id = blockIdx.x;
int indexOriginX = (blockIdx.x + 1) * 3;
int indexOriginY = (blockIdx.x + 1) * 3 + 1;
int indexDestinoX = blockIdx.x * 3;
int indexDestinoY = blockIdx.x * 3 + 1;
if ( ((vertex[indexOriginY]>testy) != (vertex[indexDestinoY]>testy)) && (testx < (vertex[indexDestinoX]-vertex[indexOriginX]) * (testy-vertex[indexOriginY]) / (vertex[indexDestinoY]-vertex[indexOriginY]) + vertex[indexOriginX]) )
results[id] = 1;
else
results[id] = 0;
} |
10,340 | #include "includes.h"
__global__ static void k_reorder_send_buf_total(int nr_prts, int nr_total_blocks, uint *d_bidx, uint *d_sums, float4 *d_xi4, float4 *d_pxi4, float4 *d_xchg_xi4, float4 *d_xchg_pxi4)
{
int i = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x;
if (i >= nr_prts)
return;
if (d_bidx[i] == CUDA_BND_S_OOB) {
int j = d_sums[i];
d_xchg_xi4[j] = d_xi4[i];
d_xchg_pxi4[j] = d_pxi4[i];
}
} |
10,341 | #include "includes.h"
__global__ void markPotentialMatchedDepthPairKernel( cudaTextureObject_t index_map, unsigned img_rows, unsigned img_cols, unsigned* reference_pixel_matched_indicator ) {
const auto x = threadIdx.x + blockDim.x*blockIdx.x;
const auto y = threadIdx.y + blockDim.y*blockIdx.y;
if (x >= img_cols || y >= img_rows) return;
//The indicator will must be written to pixel_occupied_array
const auto offset = y * img_cols + x;
//Read the value on index map
const auto surfel_index = tex2D<unsigned>(index_map, x, y);
//Need other criterion?
unsigned indicator = 0;
if(surfel_index != d_invalid_index) {
indicator = 1;
}
reference_pixel_matched_indicator[offset] = indicator;
} |
10,342 | #include "includes.h"
__global__ void lifter(float* cepstrum, int nCoefs, int nhalf) {
int i = threadIdx.x + blockDim.x*blockIdx.x;
int k = i + nCoefs;
if (k < nhalf+2-nCoefs) {
cepstrum[k] = 0.0; // kill all the cepstrum coefficients above nCoefs
}
} |
10,343 |
#include "cuda_runtime.h"
#include <stdio.h>
int main()
{
int dimx = 16;
int num_bytes = dimx * sizeof( int );
int *d_a = 0, *h_a = 0; //device and host pointers
h_a = (int *) malloc( num_bytes );
// allocate memory on the GPU
cudaMalloc( (void **) &d_a, num_bytes );
if( 0 == h_a || 0 == d_a )
{
printf("couldn't allocate memory\n");
return 911;
} /* end if */
// memset on the gpu
cudaMemset( d_a, 0, num_bytes );
//
cudaMemcpy( h_a, d_a, num_bytes, cudaMemcpyDeviceToHost );
for( int i = 0; i < dimx; i++ )
{
printf("%d ", h_a[i] );
}
printf("\n");
free( h_a );
cudaFree( d_a );
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
if ( cudaDeviceReset() != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
} |
10,344 | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
/* Every thread gets exactly one value in the unsorted array. */
#define THREADS 128 // 2^7
#define BLOCKS 1024 // 2^10
#define NUM_VALS THREADS*BLOCKS
void print_elapsed(clock_t start, clock_t stop)
{
double elapsed = ((double) (stop - start)) / CLOCKS_PER_SEC;
printf("Elapsed time: %.3fs\n", elapsed);
}
__global__ void bitonic_sort_step(int *dev_values, int j, int k)
{
unsigned int i, ixj; /* Sorting partners: i and ixj */
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i^j;
/* The threads with the lowest ids sort the array. */
if ((ixj)>i) {
if ((i&k)==0) {
/* Sort ascending */
if (dev_values[i]>dev_values[ixj]) {
// swap
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
if ((i&k)!=0) {
/* Sort descending */
if (dev_values[i]<dev_values[ixj]) {
// swap
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
}
}
void bitonic_sort(int *values)
{
int *dev_values;
size_t size = NUM_VALS * sizeof(int);
cudaMalloc((void**) &dev_values, size);
cudaMemcpy(dev_values, values, size, cudaMemcpyHostToDevice);
dim3 blocks(BLOCKS,1);
dim3 threads(THREADS,1);
int j, k;
/* Major step */
for (k = 2; k <= NUM_VALS; k <<= 1) {
/* Minor step */
for (j=k>>1; j>0; j=j>>1) {
bitonic_sort_step<<<blocks, threads>>>(dev_values, j, k);
}
}
cudaMemcpy(values, dev_values, size, cudaMemcpyDeviceToHost);
cudaFree(dev_values);
}
int main(int argc, char const *argv[])
{
clock_t start, stop;
int *values = (int*)malloc(NUM_VALS * sizeof(int));
FILE *f = fopen("reverse_dataset.txt", "r");
for(int i=0;i< NUM_VALS; i++) {
fscanf(f, "%d\n", &values[i]);
}
start = clock();
bitonic_sort(values);
stop = clock();
print_elapsed(start, stop);
return 0;
}
|
10,345 | #include <iostream>
#include <cstdlib>
#include <cstring>
#include <cuda_runtime.h>
using namespace std;
__global__ void AddVet(int *c, int *a, int *b){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
c[idx] = a[idx] + b[idx];
}
int main (int argc, char **argv){
//C = A + B -> Objetivo do código é somar vetores de inteiros
int h_vetS = 16;
int *h_A = NULL;
int *h_B = NULL;
int *h_C = NULL;
int *d_A = NULL;
int *d_B = NULL;
int *d_C = NULL;
h_A = new int [h_vetS];
h_B = new int [h_vetS];
h_C = new int [h_vetS];
cudaDeviceReset();
cudaMalloc((void**)&d_A, sizeof(int) * h_vetS);
cudaMalloc((void**)&d_B, sizeof(int) * h_vetS);
cudaMalloc((void**)&d_C, sizeof(int) * h_vetS);
//Inicializando as variáveis
for (int i = 0 ; i < h_vetS; i++){
h_A[i] = i + 1;
h_B[i] = (i + 1) * 10;
h_C[i] = 0;
}
//Cópia host -> device
cudaMemcpy(d_A, h_A, sizeof(int) * h_vetS, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, sizeof(int) * h_vetS, cudaMemcpyHostToDevice);
AddVet<<<2, 8>>>(d_C, d_A, d_B);
//Código sequencial
/*
for (int i = 0 ; i < h_vetS; i++){
h_C[i] = h_A[i] + h_B[i];
~
}
*/
//Cópia device -> host
cudaMemcpy(h_C, d_C, sizeof(int) * h_vetS, cudaMemcpyDeviceToHost);
//Exibir resultado
for (int i = 0 ; i < h_vetS; i++){
cout << "i = " << i << " h_C[i] = " << h_C[i] << endl;
}
delete[] h_A;
delete[] h_B;
delete[] h_C;
return EXIT_SUCCESS;
}
|
10,346 | #include "includes.h"
__global__ void atbashGPU(char const *in, char *out, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
out[n - 1 - i] = in[i];
}
} |
10,347 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/functional.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/sequence.h>
#include <iostream>
#include <cstdlib>
#include <ctime>
#include <chrono>
using namespace std;
using sys_clock = std::chrono::system_clock;
/// used to fill a host vector
struct rand_functor
{
int mod = 0;
rand_functor(int _mod = 0) : mod(_mod) { std::srand(std::time(0)); }
template<typename T>
void operator()(T &var)
{
if(mod > 0)
var = std::rand() % mod;
else
var = std::rand();
}
};
struct matrix_mult
{
/// Fill the structure
int * data;
matrix_mult(int* _data): data(_data){}
template <typename Tuple>
__host__ __device__
void operator()( Tuple t){
thrust::get<3>(t) = thrust::get<0>(t) * thrust::get<2>(t) + data[thrust::get<2>(t)];
}
};
void cpu_matrix_mult(float *A, float *B, float *C, int row_size, int col_size)
{
int N= row_size;
/// CPU matrix mult
for(int i=0; i<N; ++i)
for(int j=0; j<N; ++j)
for(int k=0; k<N; ++k)
C[i*N+j] += A[i*N+k] * B[k*N + j];
}
void print_matrix(float *A, int row_size, int col_size)
{
std::cout << "\n";
for(int i = 0; i < row_size; i++)
{
for(int j = 0; j <col_size; j++)
{
std::cout << A[i * col_size + j] << " ";
}
std::cout << "\n";
}
}
void thrust_matrix_mult(const int row_size, const int col_size)
{
const int matrix_size = col_size * row_size;
std::chrono::time_point<sys_clock> t1, t2;
std::chrono::duration<double, std::milli> exec_time_ms;
/// These are for the CPU matrix mult
float *A = (float*)malloc(sizeof(float) * matrix_size);
float *B = (float*)malloc(sizeof(float) * matrix_size);
float *C = (float*)malloc(sizeof(float) * matrix_size);
/// Vectors for the thrust matrix mult
thrust::host_vector<float> result(matrix_size);
thrust::host_vector<float> matrix_hA(matrix_size), matrix_hB(matrix_size);
thrust::device_vector<float> matrix_A(matrix_size), matrix_B(matrix_size), matrix_C(matrix_size, 0.0f);
thrust::device_vector<int> ids(matrix_size),data(matrix_size);
thrust::sequence(ids.begin(),ids.end(),1,1);
thrust::sequence(data.begin(),data.end(),1,1);
/// Additional variables you may need
thrust::for_each(matrix_hA.begin(), matrix_hA.end(), rand_functor(10));
thrust::for_each(matrix_hB.begin(), matrix_hB.end(), rand_functor(10));
matrix_A = matrix_hA;
matrix_B = matrix_hB;
thrust::copy(matrix_A.begin(), matrix_A.end(), A);
thrust::copy(matrix_B.begin(), matrix_B.end(), B);
t1 = sys_clock::now();
cpu_matrix_mult(A, B, C, row_size, col_size);
t2 = sys_clock::now();
exec_time_ms = t2 - t1;
std::cout << "CPU mm time: " << exec_time_ms.count() << "ms\n";
t1 = sys_clock::now();
/// Thrust code!
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(matrix_A.begin(),matrix_B.begin(),ids.begin(),matrix_C.begin())),
thrust::make_zip_iterator(thrust::make_tuple(matrix_A.end(),matrix_B.end(),ids.end(),matrix_C.end())),
matrix_mult(thrust::raw_pointer_cast(data.data()))
);
/*
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(A.begin(),B.begin(),ids.begin(),res.begin())),
thrust::make_zip_iterator(thrust::make_tuple(A.end(),B.end(),ids.end(),res.end())),
functor_add(thrust::raw_pointer_cast(data.data()))
);
*/
result = matrix_C;
t2 = sys_clock::now();
exec_time_ms = t2 - t1;
std::cout << "Thrust GPU mm time: " << exec_time_ms.count() << "ms\n";
std::cout << "\nChecking Matrices" << std::endl;
// Compare matrices (CPU & thrust) for correctness
bool tutzke =true;
for(int kuz =0; kuz < col_size; kuz++){
if(C[kuz] == result[kuz]){
tutzke = false;
break;
}
}
if(tutzke){
cout << "matrix match!" << endl;
}else{
cout << "jaja nice try" << endl;
}
}
int main(int argc, char* argv[])
{
if (argc < 2)
thrust_matrix_mult(50, 50);
else
thrust_matrix_mult(atoi(argv[1]), atoi(argv[1]));
return 0;
}
|
10,348 | #include "includes.h"
__global__ void scanKernelInclusive(int *c, const int *a, size_t size, size_t offset)
{
int myId =
threadIdx.x;
if (((myId - offset) < size) &&
(myId >= offset))
{
c[myId] = a[myId];
__syncthreads();
size_t _stepsLeft =
size;
unsigned int _neighbor =
1;
while (_stepsLeft)
{
int op1 = c[myId];
int op2 = 0;
if ((myId - offset) >= _neighbor)
{
op2 =
c[myId - _neighbor];
}
else
{
break;
}
__syncthreads();
c[myId] =
op1 + op2;
__syncthreads();
_stepsLeft >>= 1;
_neighbor <<= 1;
}
if (offset > 0)
{
c[myId] +=
c[offset - 1];
}
}
} |
10,349 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
__global__ void initialize(double *a, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<n)
a[i] =(double)i/n;
}
int main()
{
//Serial Code
const int n = 10000000;
double *a;
double start, end;
a = (double*)malloc(n * sizeof(double));
int i;
start = clock();
for (i = 0; i < n; i++)
a[i] = (double)i / n;
end = clock();
for (i = 0; i < 5; i++)
printf("a[%d]: %.7f\n",i, a[i]);
printf(" ...\n");
for (i = n-5; i < n; i++)
printf("a[%d]: %.7f\n", i, a[i]);
double total = (end - start) / CLOCKS_PER_SEC;
printf("time: %f\n\n",total);
//Cuda
double* ac;
double* d_a;
ac = (double*)malloc(n * sizeof(double));
printf("Cuda\n");
cudaMalloc(&d_a, sizeof(double)*n);
double t = clock();
initialize<<<10000,1000>>>(d_a, n);
cudaDeviceSynchronize();
t = (clock() - t) / CLOCKS_PER_SEC;
cudaMemcpy(ac, d_a, n*sizeof(double), cudaMemcpyDeviceToHost);
for (i = 0; i < 5; i++)
printf("a[%d]: %.7f\n", i, ac[i]);
printf(" ...\n");
for (i = n - 5; i < n; i++)
printf("a[%d]: %.7f\n", i, ac[i]);
printf("time:%f\n", t);
double timesfaster = total / t;
printf("Using cuda, the code executed %f times faster\n", timesfaster);
} |
10,350 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15) {
if (comp == (+1.9838E-35f / tanhf(+1.4160E11f))) {
comp = var_1 * (var_2 - var_3);
comp = -1.4350E-29f * cosf((var_4 / (var_5 - -0.0f)));
comp = (var_6 + var_7);
if (comp >= (+1.5993E-19f * atan2f(-1.3948E-42f * (var_8 - (-0.0f + +1.4240E6f + +1.9767E-35f / -1.8233E35f)), (var_9 - fmodf(var_10 * +1.8475E-37f * (var_11 * var_12 * tanhf(-1.7305E36f)), -1.6347E35f))))) {
comp = (+1.7185E-35f / (-1.6289E-17f * +1.3788E-41f - var_13 + -1.8367E23f));
float tmp_1 = -1.3860E-37f;
comp += tmp_1 + (-1.8416E34f / log10f(var_14 + var_15));
comp = ceilf(cosf(-0.0f));
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16);
cudaDeviceSynchronize();
return 0;
}
|
10,351 | # include <stdio.h>
# include <stdint.h>
# include "cuda_runtime.h"
//compile nvcc *.cu -o test
__global__ void global_latency (unsigned int * my_array, int array_length, int iterations, unsigned int * duration, unsigned int *index);
void parametric_measure_global(int N, int iterations);
void measure_global();
int main(){
cudaSetDevice(0);
measure_global();
cudaDeviceReset();
return 0;
}
void measure_global() {
int N, iterations;
//stride in element
iterations = 1;
N = 400*1024*1024;
printf("\n=====%10.4f MB array, Kepler pattern read, read 160 element====\n", sizeof(unsigned int)*(float)N/1024/1024);
parametric_measure_global(N, iterations);
printf("===============================================\n\n");
}
void parametric_measure_global(int N, int iterations) {
cudaDeviceReset();
cudaError_t error_id;
int i;
unsigned int * h_a;
/* allocate arrays on CPU */
h_a = (unsigned int *)malloc(sizeof(unsigned int) * (N+2));
unsigned int * d_a;
/* allocate arrays on GPU */
error_id = cudaMalloc ((void **) &d_a, sizeof(unsigned int) * (N+2));
if (error_id != cudaSuccess) {
printf("Error 1.0 is %s\n", cudaGetErrorString(error_id));
}
/* initialize array elements*/
for (i=0; i<N; i++)
h_a[i] = 0;
/*
* 32 MB (8M) stride access pattern:
*
* h[0]=8M, h[1]=8M+1
* h[8M]=16M, h[8M+1]=16M+1
* ...
* h[384M]=392M, h[384M+1]=392M+1
* h[392M]=400M, h[392M+1]=400M+1
*
* Stage 1:
* If we start from j=0 and follow the pointer as
* j=h[j]
* then we will visit indice: 0, 8M, 16M,...,392M <--- 49 indices
*
* Stage 3:
* When we get to j=1, we start the 8M stride again
* 1,8M+1,...,392M+1 <--- 49 indices
*/
for (i=0; i<50; i++){
h_a[i * 1024 * 1024 * 8] = (i+1)*1024*1024*8;
h_a[i * 1024 * 1024 * 8 + 1] = (i+1)*1024*1024*8+1;
}
// 1568 MB entry
/*
* 4B (1 stride)
*
* h[392M+1]=392M+2
* h[392M+2]=392M+3
* h[392M+3]=392M+1
*
* Stage 4:
* When we get j=392M+1, we start the 1 stride pattern as
* 392M+2, 392M+3, 392M+1, 392M+2 ... <--- until the end
*/
h_a[392*1024*1024+ 1] = 392*1024*1024 + 2;
h_a[392*1024*1024 + 2] = 392*1024*1024 + 3;
h_a[392*1024*1024 + 3] = 392*1024*1024 + 1;
/*
* 1MB (.25M) stride
*
* h[392M]=392M+.25M
* h[392M+.25N]=392M+.5M
* ...
* h[392M+7.5M]=392M+7.75M
* h[392M+7.75M]=1
*
* Stage 2:
* When we get to j=392M, we keep going as
* (392+.25)M, (392+.5)M,...,(392+7.75)M. <--- 30 indices
* Then we have j=h[(392+7.75)M]=1
*/
for (i=0; i< 31; i++)
h_a[(i+1568)*1024*256] = (i + 1569)*1024*256;
h_a[1599*1024*256] = 1;
h_a[N] = 0;
h_a[N+1] = 0;
/* copy array elements from CPU to GPU */
error_id = cudaMemcpy(d_a, h_a, sizeof(unsigned int) * N, cudaMemcpyHostToDevice);
if (error_id != cudaSuccess) {
printf("Error 1.1 is %s\n", cudaGetErrorString(error_id));
}
unsigned int *h_index = (unsigned int *)malloc(sizeof(unsigned int)*256);
unsigned int *h_timeinfo = (unsigned int *)malloc(sizeof(unsigned int)*256);
unsigned int *duration;
error_id = cudaMalloc ((void **) &duration, sizeof(unsigned int)*256);
if (error_id != cudaSuccess) {
printf("Error 1.2 is %s\n", cudaGetErrorString(error_id));
}
unsigned int *d_index;
error_id = cudaMalloc( (void **) &d_index, sizeof(unsigned int)*256 );
if (error_id != cudaSuccess) {
printf("Error 1.3 is %s\n", cudaGetErrorString(error_id));
}
cudaDeviceSynchronize ();
/* launch kernel*/
dim3 Db = dim3(1);
dim3 Dg = dim3(1,1,1);
global_latency <<<Dg, Db>>>(d_a, N, iterations, duration, d_index);
cudaDeviceSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error kernel is %s\n", cudaGetErrorString(error_id));
}
/* copy results from GPU to CPU */
cudaDeviceSynchronize ();
error_id = cudaMemcpy((void *)h_timeinfo, (void *)duration, sizeof(unsigned int)*256, cudaMemcpyDeviceToHost);
if (error_id != cudaSuccess) {
printf("Error 2.0 is %s\n", cudaGetErrorString(error_id));
}
error_id = cudaMemcpy((void *)h_index, (void *)d_index, sizeof(unsigned int)*256, cudaMemcpyDeviceToHost);
if (error_id != cudaSuccess) {
printf("Error 2.1 is %s\n", cudaGetErrorString(error_id));
}
cudaDeviceSynchronize ();
for(i=0;i<256;i++)
printf("%3d: %d\t %d\n", i,h_index[i], h_timeinfo[i]);
/* free memory on GPU */
cudaFree(d_a);
cudaFree(d_index);
cudaFree(duration);
/*free memory on CPU */
free(h_a);
free(h_index);
free(h_timeinfo);
cudaDeviceReset();
}
__global__ void global_latency (unsigned int * my_array, int array_length, int iterations, unsigned int * duration, unsigned int *index) {
unsigned int start_time, end_time;
unsigned int j = 0;
__shared__ unsigned int s_tvalue[256];
__shared__ unsigned int s_index[256];
int k;
for(k=0; k<160; k++){
s_index[k] = 0;
s_tvalue[k] = 0;
}
//first round
// for (k = 0; k < iterations*256; k++)
// j = my_array[j];
//second round
for (k = 0; k < iterations*256; k++) {
start_time = clock();
j = my_array[j];
s_index[k]= j;
end_time = clock();
s_tvalue[k] = end_time-start_time;
}
my_array[array_length] = j;
my_array[array_length+1] = my_array[j];
for(k=0; k<256; k++){
index[k]= s_index[k];
duration[k] = s_tvalue[k];
}
}
|
10,352 | /*reference https://www.nvidia.com/content/nvision2008/tech_presentations/Game_Developer_Track/NVISION08-Image_Processing_and_Video_with_CUDA.pdf*/
#include <fstream>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#define TILE_W 10
#define TILE_H 10
#define R 2 // filter radius
#define D (R*2+1) // filter diameter
#define S (D*D) // filter size
#define BLOCK_W (TILE_W+(2*R))
#define BLOCK_H (TILE_H+(2*R))
//--------------------------------------------------------------------------------------------------------------------
__global__ void box_filter(const unsigned char *in, unsigned char *out, const unsigned int w, const unsigned int h)
{
__shared__ unsigned char smem[BLOCK_W*BLOCK_H];
int x = blockIdx.x*TILE_W + threadIdx.x - R;
int y = blockIdx.y*TILE_H + threadIdx.y - R;
// clamp to edge of image
x = max(0, x);
x = min(x, w-1);
y = max(y, 0);
y = min(y, h-1);
unsigned int index = y * w + x;
unsigned int bindex = threadIdx.y * blockDim.y + threadIdx.x;
// each thread copies its pixel of the block to shared memory
smem[bindex] = in[index];
__syncthreads();
// only threads inside the apron will write results
if ((threadIdx.x >= R) && (threadIdx.x < (BLOCK_W-R)) && (threadIdx.y >= R) && (threadIdx.y < (BLOCK_H-R)))
{
float sum = 0;
for(int dy = -R; dy <= R; dy++)
{
for(int dx = -R; dx <= R; dx++)
{
float i = smem[bindex + (dy*blockDim.x) + dx];
sum += i;
}
}
out[index] = sum / S;
}
}
//--------------------------------------------------------------------------------------------------------------------
const unsigned int imgw = 100;
const unsigned int imgh = 100;
void loadImg(unsigned char **data, unsigned int *w, unsigned int *h, unsigned int *ch){
*w = imgw;
*h = imgh;
*ch = 1;
*data = (unsigned char *)malloc(imgw*imgh*sizeof(unsigned char));
for (int i = 0; i < imgw*imgh; i++) (*data)[i] = i%8;
}
//--------------------------------------------------------------------------------------------------------------------
int main()
{
unsigned char *data = NULL, *d_idata = NULL, *d_odata = NULL;
unsigned int w, h, channels;
unsigned int numElements;
size_t datasize;
loadImg(&data, &w, &h, &channels);
printf("Loaded input file with w:%d h:%d channels:%d \n",w, h, channels);
printf("input:\n");
for (int i = 0; i < TILE_W; i++)
{
for (int j = 0; j < TILE_H; j++) printf("%d ", data[i*w+j]);
printf("\n");
}
numElements = w*h*channels;
datasize = numElements * sizeof(unsigned char);
cudaMalloc(&d_idata, datasize);
cudaMalloc(&d_odata, datasize);
printf("Allocate Devicememory for data\n");
cudaMemcpy(d_idata, data, datasize, cudaMemcpyHostToDevice);
printf("Copy input data from the host memory to the CUDA device\n");
dim3 threadsPerBlock(BLOCK_W, BLOCK_H);
dim3 blocksPerGrid((w+threadsPerBlock.x-1)/threadsPerBlock.x, (h+threadsPerBlock.y-1)/threadsPerBlock.y);
printf("CUDA kernel launch with [%d %d] blocks of [%d %d] threads\n", blocksPerGrid.x, blocksPerGrid.y,
threadsPerBlock.x, threadsPerBlock.y);
box_filter<<<blocksPerGrid, threadsPerBlock>>>(d_idata, d_odata, w, h);
cudaMemcpy(data, d_odata, datasize, cudaMemcpyDeviceToHost);
printf("Copy output data from the CUDA device to the host memory\n");
printf("output:\n");
for (int i = 0; i < TILE_W; i++)
{
for (int j = 0; j < TILE_H; j++) printf("%d ", data[i*w+j]);
printf("\n");
}
free(data);
cudaFree(d_idata);
cudaFree(d_odata);
printf("Free device and host memory\n");
}
|
10,353 | #include <stdio.h>
#include <cuda_runtime.h>
#include <string.h>
#define TOTAL_SIZE 1024
//#define TOTAL_SIZE (1024*1024*1024)
#define block_dim 1024
#define chk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
double *h_c, *h_a, *h_b;
double **d_c, **d_a, **d_b;
cudaStream_t *streams;
cudaEvent_t start, finish;
void allocate(int devices, int multi_gpu) {
int i = 0, parts, rem;
//h_c = (double *) malloc(sizeof(double) * TOTAL_SIZE);
//h_a = (double *) malloc(sizeof(double) * TOTAL_SIZE);
//h_b = (double *) malloc(sizeof(double) * TOTAL_SIZE);
d_c = (double **) malloc(sizeof(double *) * devices);
d_a = (double **) malloc(sizeof(double *) * devices);
d_b = (double **) malloc(sizeof(double *) * devices);
cudaMallocHost((void **) &h_c, sizeof(double) * TOTAL_SIZE);
cudaMallocHost((void **) &h_a, sizeof(double) * TOTAL_SIZE);
cudaMallocHost((void **) &h_b, sizeof(double) * TOTAL_SIZE);
//cudaMallocHost((void **)d_c, sizeof(double *) * devices);
//cudaMallocHost((void **)d_a, sizeof(double *) * devices);
//cudaMallocHost((void **)d_b, sizeof(double *) * devices);
streams = (cudaStream_t *) malloc(sizeof(cudaStream_t) * devices);
for (i=0; i<devices; ++i) {
cudaStreamCreate(&streams[i]);
}
parts = TOTAL_SIZE / devices;
rem = TOTAL_SIZE % devices;
i = 0;
if (multi_gpu) {
for (i=0; i<devices-1; ++i) {
cudaSetDevice(i);
printf("\nS%d", streams[i]);
chk(cudaMallocAsync((void **) &d_c[i], sizeof(double) * parts, streams[i]));
chk(cudaMallocAsync((void **) &d_a[i], sizeof(double) * parts, streams[i]));
chk(cudaMallocAsync((void **) &d_b[i], sizeof(double) * parts, streams[i]));
}
}
cudaSetDevice(i);
chk(cudaMallocAsync((void **) &d_c[i], sizeof(double) * (parts + rem), streams[i]));
chk(cudaMallocAsync((void **) &d_a[i], sizeof(double) * (parts + rem), streams[i]));
chk(cudaMallocAsync((void **) &d_b[i], sizeof(double) * (parts + rem), streams[i]));
cudaEventCreate(&start);
cudaEventCreate(&finish);
}
extern "C" __global__ void vec_add(double *c, double *a, double *b, int PART_SIZE) {
int t = threadIdx.x + blockIdx.x * blockDim.x;
if (t < TOTAL_SIZE && t < PART_SIZE) {
c[t] = a[t] + b[t];
if (t % 100)
printf("\n%f", c[t]);
}
}
void kernels_launch(int devices, int multi_gpu) {
int parts = TOTAL_SIZE / devices;
int rem = TOTAL_SIZE % devices;
int i = 0;
if (multi_gpu) {
for (i=0; i<devices-1; ++i) {
cudaSetDevice(i);
vec_add<<<parts/block_dim + 1, block_dim, 0, streams[i]>>>(d_c[i], d_a[i], d_b[i], parts);
}
}
cudaSetDevice(i);
vec_add<<<(parts + rem)/block_dim + 1, block_dim, 0, streams[i]>>>(d_c[i], d_a[i], d_b[i], parts + rem);
}
void data_transferHtoD(int devices, int multi_gpu) {
int parts = TOTAL_SIZE / devices;
int rem = TOTAL_SIZE % devices;
int i = 0;
if (multi_gpu) {
for (i=0; i<devices-1; ++i) {
cudaSetDevice(i);
printf("\nS%d", streams[i]);
chk(cudaMemcpyAsync(d_a[i], h_a + (parts * i), sizeof(double) * parts, cudaMemcpyHostToDevice, streams[i]));
chk(cudaMemcpyAsync(d_b[i], h_b + (parts * i), sizeof(double) * parts, cudaMemcpyHostToDevice, streams[i]));
}
}
cudaSetDevice(i);
chk(cudaMemcpyAsync(d_a[i], h_a + (parts * i), sizeof(double) * (parts + rem), cudaMemcpyHostToDevice, streams[i]));
chk(cudaMemcpyAsync(d_b[i], h_b + (parts * i), sizeof(double) * (parts + rem), cudaMemcpyHostToDevice, streams[i]));
}
void data_transferDtoH(int devices, int multi_gpu) {
int parts = TOTAL_SIZE / devices;
int rem = TOTAL_SIZE % devices;
int i = 0;
if (multi_gpu) {
//Data trnsfer back
for (i=0; i<devices-1; ++i) {
cudaSetDevice(i);
chk(cudaMemcpyAsync(h_c + (parts * i), d_c[i], sizeof(double) * parts, cudaMemcpyDeviceToHost, streams[i]));
}
cudaSetDevice(i);
chk(cudaMemcpyAsync(h_c + (parts * i), d_c[i], sizeof(double) * (parts + rem), cudaMemcpyDeviceToHost, streams[i]));
}
}
void deallocate(int devices) {
for (int i=0; i<devices; ++i) {
cudaSetDevice(i);
cudaFreeAsync(d_c[i], streams[i]);
cudaFreeAsync(d_a[i], streams[i]);
cudaFreeAsync(d_b[i], streams[i]);
}
for (int i=0; i<devices; ++i) {
cudaStreamDestroy(streams[i]);
}
free(d_c);
free(d_a);
free(d_b);
//free(h_a);
//free(h_b);
//free(h_c);
//cudaFreeHost(d_c);
//cudaFreeHost(d_a);
//cudaFreeHost(d_b);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
cudaEventDestroy(start);
cudaEventDestroy(finish);
}
void verify() {
double diff_sq = 0.0;
double sum_sq = 0.0;
for (int i=0; i<TOTAL_SIZE; ++i) {
sum_sq += h_c[i] * h_c[i];
diff_sq += (h_c[i] - (h_a[i] + h_b[i])) * (h_c[i] - (h_a[i] + h_b[i]));
}
printf("\n%f\t%f\n", h_c[0], h_c[5]);
printf("\n\nError Rate: %e\n", diff_sq / sum_sq);
}
int main(int argc, char **argv) {
int i, parts, rem, devices = 1;
float exec_time;
int multi_gpu = 0;
if (argc > 1 && strcmp(argv[1], "-m") == 0) {
multi_gpu = 1;
}
chk(cudaGetDeviceCount(&devices));
printf("\nNum devices available = %d\n", devices);
if (devices == 0) {
printf("\nError: No devices found\n");
exit(1);
}
if (devices ==1)
multi_gpu = 0;
allocate(devices, multi_gpu);
//Initialize data
for (i=0; i<TOTAL_SIZE; ++i) {
h_a[i] = i + 1;
h_b[i] = i + 2;
}
data_transferHtoD(devices, multi_gpu);
cudaEventRecord(start);
kernels_launch(devices, multi_gpu);
cudaEventRecord(finish);
for (i=0; i<devices; ++i)
cudaStreamWaitEvent(streams[i], finish);
data_transferDtoH(devices, multi_gpu);
for (i=0; i<devices; ++i)
cudaStreamSynchronize(streams[i]);
if (TOTAL_SIZE <= 2048) {
verify();
}
cudaEventElapsedTime(&exec_time, start, finish);
printf("MultiGPU Time = %f", exec_time / 1000);
deallocate(devices);
printf("\nFinished.\n");
return 0;
}
|
10,354 | #include "dev_noise.cuh"
__global__ void noiseOmegaCulc(float *dev_omega_n_I, float *dev_omega_n_Q, unsigned int length,
float omega_amp, float delta_alpha, float delta_omega){
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < length){
dev_omega_n_I[tid] = omega_amp*cosf(delta_alpha*tid) + delta_omega;
dev_omega_n_Q[tid] = omega_amp*sinf(delta_alpha*tid) + delta_omega;
}
}
__global__ void noiseSoSCulc(float *dev_cos_value, float *dev_sin_value,
unsigned int pitch_width, unsigned int width, unsigned int heigth, float delta_t,
float *dev_omega_n_I, float *dev_omega_n_Q, float *dev_phi_n){
unsigned int x = threadIdx.x,
y = threadIdx.y,
tidy = blockIdx.y * blockDim.y + y;
__shared__ float sha_cos_value[THREADS_EACH_BLOCK / BLOCK_DIM_X_32][BLOCK_DIM_X_32],
sha_sin_value[THREADS_EACH_BLOCK / BLOCK_DIM_X_32][BLOCK_DIM_X_32];
sha_cos_value[y][x] = 0;
sha_sin_value[y][x] = 0;
__syncthreads();
if (tidy < heigth){
__shared__ float sha_omega_n_I[THREADS_EACH_BLOCK / BLOCK_DIM_X_32],
sha_omega_n_Q[THREADS_EACH_BLOCK / BLOCK_DIM_X_32],
sha_phi_n_I[THREADS_EACH_BLOCK / BLOCK_DIM_X_32],
sha_phi_n_Q[THREADS_EACH_BLOCK / BLOCK_DIM_X_32];
if (x == 0){
sha_omega_n_I[y] = dev_omega_n_I[tidy];
sha_omega_n_Q[y] = dev_omega_n_Q[tidy];
sha_phi_n_I[y] = dev_phi_n[tidy];
sha_phi_n_Q[y] = dev_phi_n[heigth + tidy];
}
__syncthreads();
for (unsigned int tidx = blockIdx.x * blockDim.x + x;
tidx < width; tidx += gridDim.x*blockDim.x){
sha_cos_value[y][x] = cosf(sha_omega_n_I[y] * delta_t*tidx + 2 * CR_CUDART_PI*sha_phi_n_I[y]);
sha_sin_value[y][x] = sinf(sha_omega_n_Q[y] * delta_t*tidx + 2 * CR_CUDART_PI*sha_phi_n_Q[y]);
__syncthreads();
for (unsigned int heigth_ii = blockDim.y / 2, extra = blockDim.y % 2;
heigth_ii > 0; extra = heigth_ii % 2, heigth_ii /= 2){
if (y < heigth_ii){
sha_cos_value[y][x] += sha_cos_value[heigth_ii + extra + y][x];
sha_sin_value[y][x] += sha_sin_value[heigth_ii + extra + y][x];
}
heigth_ii += extra;
__syncthreads();
}
if (y == 0){
unsigned int loc = blockIdx.y*pitch_width + tidx;
dev_cos_value[loc] = sha_cos_value[0][x];
dev_sin_value[loc] = sha_sin_value[0][x];
}
}
}
}
__global__ void noiseSoSSum(float *dev_cos_value, float *dev_sin_value,
unsigned int pitch_width, unsigned int width, unsigned int heigth, float sum_amp){
unsigned int loc;
float reg_cos_value, reg_sin_value;
for (unsigned int tidx = blockIdx.x*blockDim.x + threadIdx.x;
tidx < width; tidx += gridDim.x*blockDim.x){
reg_cos_value = 0;
reg_sin_value = 0;
for (unsigned int heigth_ii = 0; heigth_ii < heigth; heigth_ii++){
loc = heigth_ii*pitch_width + tidx;
reg_cos_value += dev_cos_value[loc];
reg_sin_value += dev_sin_value[loc];
}
dev_cos_value[tidx] = sum_amp * reg_cos_value;
dev_sin_value[tidx] = sum_amp * reg_sin_value;
}
}
__global__ void noiseSoSCulcBaseCol(float *dev_cos_value, float *dev_sin_value,
unsigned int path_num, unsigned int col_num, unsigned int row_num,
float delta_t, float *dev_omega_n_I, float *dev_omega_n_Q, float *dev_phi_n){
unsigned int tidx = threadIdx.x,
tidy = threadIdx.y,
bidy = blockIdx.y;
//unsigned int x = threadIdx.x,
// y = threadIdx.y,
// tidy = blockIdx.y * blockDim.y + y;
//__shared__ float sha_cos_value[THREADS_EACH_BLOCK / BLOCK_DIM_X_32][BLOCK_DIM_X_32],
// sha_sin_value[THREADS_EACH_BLOCK / BLOCK_DIM_X_32][BLOCK_DIM_X_32];
//sha_cos_value[y][x] = 0;
//sha_sin_value[y][x] = 0;
//__syncthreads();
//if (tidy < heigth){
// __shared__ float sha_omega_n_I[THREADS_EACH_BLOCK / BLOCK_DIM_X_32],
// sha_omega_n_Q[THREADS_EACH_BLOCK / BLOCK_DIM_X_32],
// sha_phi_n_I[THREADS_EACH_BLOCK / BLOCK_DIM_X_32],
// sha_phi_n_Q[THREADS_EACH_BLOCK / BLOCK_DIM_X_32];
// if (x == 0){
// sha_omega_n_I[y] = dev_omega_n_I[tidy];
// sha_omega_n_Q[y] = dev_omega_n_Q[tidy];
// sha_phi_n_I[y] = dev_phi_n[tidy];
// sha_phi_n_Q[y] = dev_phi_n[heigth + tidy];
// }
// __syncthreads();
// for (unsigned int tidx = blockIdx.x * blockDim.x + x;
// tidx < width; tidx += gridDim.x*blockDim.x){
// sha_cos_value[y][x] = cosf(sha_omega_n_I[y] * delta_t*tidx + 2 * CR_CUDART_PI*sha_phi_n_I[y]);
// sha_sin_value[y][x] = sinf(sha_omega_n_Q[y] * delta_t*tidx + 2 * CR_CUDART_PI*sha_phi_n_Q[y]);
// __syncthreads();
// for (unsigned int heigth_ii = blockDim.y / 2, extra = blockDim.y % 2;
// heigth_ii > 0; extra = heigth_ii % 2, heigth_ii /= 2){
// if (y < heigth_ii){
// sha_cos_value[y][x] += sha_cos_value[heigth_ii + extra + y][x];
// sha_sin_value[y][x] += sha_sin_value[heigth_ii + extra + y][x];
// }
// heigth_ii += extra;
// __syncthreads();
// }
// if (y == 0){
// unsigned int loc = blockIdx.y*pitch_width + tidx;
// dev_cos_value[loc] = sha_cos_value[0][x];
// dev_sin_value[loc] = sha_sin_value[0][x];
// }
// }
//}
} |
10,355 | #include <cuda.h>
#include <stdio.h>
#include <iostream>
#include <stdlib.h>
#include <sys/time.h>
using namespace std;
const int BlockDim = 32;
const int ThreadDim = 32;
const int batch = 128;
const int in_channel = 32;
const int in_size = 128;
void gen_tensor(float *a, int size) {
size /= sizeof(float);
for (int i = 0; i < size; ++i)
a[i] = 1.0;
}
extern "C" __global__ void default_function_kernel0(void* __restrict__ A_change, void* __restrict__ A) {
for (int n_inner = 0; n_inner < 4; ++n_inner) {
for (int h_inner = 0; h_inner < 4; ++h_inner) {
int4 _1 = (make_int4)((((((((((int)blockIdx.y) * 2097152) + (n_inner * 524288)) + (((int)threadIdx.y) * 16384)) + (h_inner * 4096)) + (((int)threadIdx.x) * 128)) + ((int)blockIdx.x)))+(32*0), (((((((((int)blockIdx.y) * 2097152) + (n_inner * 524288)) + (((int)threadIdx.y) * 16384)) + (h_inner * 4096)) + (((int)threadIdx.x) * 128)) + ((int)blockIdx.x)))+(32*1), (((((((((int)blockIdx.y) * 2097152) + (n_inner * 524288)) + (((int)threadIdx.y) * 16384)) + (h_inner * 4096)) + (((int)threadIdx.x) * 128)) + ((int)blockIdx.x)))+(32*2), (((((((((int)blockIdx.y) * 2097152) + (n_inner * 524288)) + (((int)threadIdx.y) * 16384)) + (h_inner * 4096)) + (((int)threadIdx.x) * 128)) + ((int)blockIdx.x)))+(32*3));
((float4*)((float*)A_change + (((((((((int)blockIdx.y) * 2097152) + (n_inner * 524288)) + (((int)blockIdx.x) * 16384)) + (((int)threadIdx.y) * 512)) + (h_inner * 128)) + (((int)threadIdx.x) * 4)))))[0] = make_float4(((float*)A)[_1.x],((float*)A)[_1.y],((float*)A)[_1.z],((float*)A)[_1.w]);
}
}
}
static void HandleError( cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
double get_durtime(struct timeval t1, struct timeval t2) {
return (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
}
int main() {
float *a, *d_a, *d_ach;
int size = batch * in_channel * in_size * in_size * sizeof(float);
a = (float *)malloc(size); gen_tensor(a, size);
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_ach, size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
dim3 grid(BlockDim, BlockDim);
dim3 block(ThreadDim, ThreadDim);
struct timeval t1, t2;
gettimeofday(&t1, 0);
default_function_kernel0 <<<grid, block>>> ((void *)d_ach, (void *)d_a);
HANDLE_ERROR(cudaDeviceSynchronize());
gettimeofday(&t2, 0);
double conv_time = get_durtime(t1, t2);
printf ("Convolution time: %f ms\n", conv_time);
free(a);
cudaFree(d_a), cudaFree(d_ach);
return 0;
}
|
10,356 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
// includes, kernels
#include "scan_naive_kernel.cu"
void runTest( int argc, char** argv);
extern "C" unsigned int compare( const float* reference, const float* data, const unsigned int len);
extern "C" void computeGold( float* reference, float* idata, const unsigned int len);
void checkCUDAError(const char *msg);
int checkResults(float *, float *, int, float);
int
main( int argc, char** argv)
{
runTest( argc, argv);
exit(0);
}
void
runTest( int argc, char** argv)
{
unsigned int num_elements = 512;
const unsigned int mem_size = sizeof( float) * num_elements;
const unsigned int shared_mem_size = sizeof(float) * num_elements;
// allocate host memory to store the input data
float* h_data = (float*) malloc(mem_size);
// initialize the input data on the host to be integer values
// between 0 and 10
for( unsigned int i = 0; i < num_elements; ++i){
h_data[i] = floorf(10*(rand()/(float)RAND_MAX));
}
// compute reference solution
float* reference = (float*) malloc( mem_size);
computeGold( reference, h_data, num_elements);
// allocate device memory input and output arrays
float* d_idata;
float* d_odata;
cudaMalloc( (void**) &d_idata, mem_size);
cudaMalloc( (void**) &d_odata, mem_size);
// copy host memory to device input array
cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice);
// setup execution parameters
// Note that these scans only support a single thread-block worth of data,
dim3 grid(1, 1, 1);
dim3 threads(512, 1, 1);
printf("Running parallel prefix sum (scan) of %d elements\n", num_elements);
scan_naive<<< grid, threads, 2 * shared_mem_size >>>(d_odata, d_idata, num_elements);
cudaThreadSynchronize();
// copy result from device to host
cudaMemcpy( h_data, d_odata, sizeof(float) * num_elements, cudaMemcpyDeviceToHost);
float epsilon = 0.0f;
unsigned int result_regtest = checkResults( reference, h_data, num_elements, epsilon);
printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED");
// cleanup memory
free( h_data);
free( reference);
cudaFree(d_idata);
cudaFree(d_odata);
}
void
checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
printf("CUDA ERROR: %s (%s).\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
int
checkResults(float *reference, float *gpu_result, int num_elements, float threshold)
{
int checkMark = 1;
for(int i = 0; i < num_elements; i++)
if((reference[i] - gpu_result[i]) > threshold){
checkMark = 0;
break;
}
return checkMark;
}
|
10,357 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
struct clock {
char* name;
long totalTime;
long startTime;
struct clock* next;
};
struct clock *clocks = NULL;
long time();
void startClock(char* name) {
struct clock *cp = clocks;
while (cp != NULL) {
if (strcmp(cp->name,name) == 0) {
clocks->startTime = time();
return;
}
cp = cp->next;
}
// if you are here, no match
cp = (struct clock*)malloc(sizeof(struct clock));
cp->name = (char*) malloc(strlen(name)+1);
strcpy(cp->name,name);
cp->totalTime = 0;
cp->startTime = time();
cp->next = clocks;
clocks = cp;
return;
}
void stopClock(char* name) {
struct clock *cp = clocks;
while (cp && strcmp(cp->name,name)) {
cp = cp->next;
}
if (cp && cp->startTime) {
cp->totalTime += (time() - cp->startTime);
cp->startTime = 0;
}
}
void dump() {
struct clock *cp = clocks;
while (cp) {
printf("%-20s %ld micros\n",cp->name, cp->totalTime);
cp = cp->next;
}
}
void printClock(char* name) {
struct clock *cp = clocks;
while (cp && strcmp(cp->name,name)) {
cp = cp->next;
}
if (cp) {
printf("%-20s %ld micros\n",cp->name,cp->totalTime);
}
}
long time() {
struct timeval tv;
gettimeofday(&tv,NULL);
return 1000000*(tv.tv_sec % (60*60*24*365)) + tv.tv_usec;
}
|
10,358 | #define X_BLOCK 32
#define PITCH 8192
extern "C"
__global__ void DoAtoms(float * result0) { //(float *constant,float *input0,float *input1,float *input2,float *input3,float *input4,float *input5,float *input6,float *input7,float *input8,float *input9,float *input10,float *input11,float *input12,float *input13,float *input14,float *input15,float *input16,float *input17,float *input18,float *input19,float *input20,float *input21,float *input22,float *input23,float *input24,float *input25,float *input26,float *input27,float *input28,float *input29,float *input30,float *input31,float *input32,float *input33,float *input34,float *input35,float *input36,float *input37,float *input38,float *input39,float *input40,float *input41,float *input42,float *input43,float *input44,float *input45,float *input46,float *input47,float *input48,float *input49,float *input50,float *input51,float *input52,float *input53,float *input54,float *input55,float *input56,float *input57,float *input58,float *input59,float *result0){
result0[(((blockIdx.y*PITCH)+(blockIdx.x*X_BLOCK))+threadIdx.x)] = ((((((((((3468.6504/sqrtf(((((((blockIdx.x*X_BLOCK)+threadIdx.x)-4965.004)*(((blockIdx.x*X_BLOCK)+threadIdx.x)-4965.004))+((blockIdx.y-3499.4614)*(blockIdx.y-3499.4614)))+((blockIdx.z-6176.091)*(blockIdx.z-6176.091)))))+(106.55623/sqrtf(((((((blockIdx.x*X_BLOCK)+threadIdx.x)-4027.6821)*(((blockIdx.x*X_BLOCK)+threadIdx.x)-4027.6821))+((blockIdx.y-1980.8116)*(blockIdx.y-1980.8116)))+((blockIdx.z-5386.449)*(blockIdx.z-5386.449))))))+(717.5819/sqrtf(((((((blockIdx.x*X_BLOCK)+threadIdx.x)-3246.3042)*(((blockIdx.x*X_BLOCK)+threadIdx.x)-3246.3042))+((blockIdx.y-5327.5703)*(blockIdx.y-5327.5703)))+((blockIdx.z-3685.3599)*(blockIdx.z-3685.3599))))))+(2963.1875/sqrtf(((((((blockIdx.x*X_BLOCK)+threadIdx.x)-3496.234)*(((blockIdx.x*X_BLOCK)+threadIdx.x)-3496.234))+((blockIdx.y-5694.6367)*(blockIdx.y-5694.6367)))+((blockIdx.z-3103.7507)*(blockIdx.z-3103.7507))))))+(4091.7354/sqrtf(((((((blockIdx.x*X_BLOCK)+threadIdx.x)-3956.9036)*(((blockIdx.x*X_BLOCK)+threadIdx.x)-3956.9036))+((blockIdx.y-3761.7817)*(blockIdx.y-3761.7817)))+((blockIdx.z-2858.0657)*(blockIdx.z-2858.0657))))))+(3957.8835/sqrtf(((((((blockIdx.x*X_BLOCK)+threadIdx.x)-504.9164)*(((blockIdx.x*X_BLOCK)+threadIdx.x)-504.9164))+((blockIdx.y-16.424118)*(blockIdx.y-16.424118)))+((blockIdx.z-5801.628)*(blockIdx.z-5801.628))))))+(2591.022/sqrtf(((((((blockIdx.x*X_BLOCK)+threadIdx.x)-3684.188)*(((blockIdx.x*X_BLOCK)+threadIdx.x)-3684.188))+((blockIdx.y-1368.6476)*(blockIdx.y-1368.6476)))+((blockIdx.z-5052.5093)*(blockIdx.z-5052.5093))))))+(1979.9531/sqrtf(((((((blockIdx.x*X_BLOCK)+threadIdx.x)-5800.155)*(((blockIdx.x*X_BLOCK)+threadIdx.x)-5800.155))+((blockIdx.y-1098.5345)*(blockIdx.y-1098.5345)))+((blockIdx.z-2261.4316)*(blockIdx.z-2261.4316))))))+(2044.7126/sqrtf(((((((blockIdx.x*X_BLOCK)+threadIdx.x)-4358.316)*(((blockIdx.x*X_BLOCK)+threadIdx.x)-4358.316))+((blockIdx.y-3836.3572)*(blockIdx.y-3836.3572)))+((blockIdx.z-4461.438)*(blockIdx.z-4461.438))))))+(3429.4006/sqrtf(((((((blockIdx.x*X_BLOCK)+threadIdx.x)-1698.7139)*(((blockIdx.x*X_BLOCK)+threadIdx.x)-1698.7139))+((blockIdx.y-1590.7648)*(blockIdx.y-1590.7648)))+((blockIdx.z-213.14264)*(blockIdx.z-213.14264))))));
}
#undef X_BLOCK
#undef PITCH
|
10,359 | #include "includes.h"
__global__ void gen_matvecT(float *A, float *x, float *y, const int m, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if ( xIndex < n ) {
float c = 0.0f;
for(int i=0; i<m; i++)
c = c + y[i] * A[xIndex * m + i];
x[xIndex] = c;
}
} |
10,360 | #include <algorithm>
#include <chrono>
#include <cstdint>
#include <cstdlib>
#include <fstream>
#include <iostream>
const int THREADS_PER_BLOCK = 512;
__global__ void Check(const int n, const int m, const int startLevel,
const int* exprVar, const int* exprNeg,
int* set, int* flags, const int q) {
int e = blockIdx.x * blockDim.x + threadIdx.x;
if (e >= q) {
return;
}
for (int i = 0; i < startLevel; ++i) {
if (((uint32_t)e) & (((uint32_t)1) << ((uint32_t)i))) {
set[e * n + i] = 1;
} else {
set[e * n + i] = 0;
}
}
for (int i = 0; i < m; ++i) {
int disjunctRes = 0;
for (int j = 0; j < 3; ++j) {
int index = exprVar[i * 3 + j];
if (index >= startLevel) {
disjunctRes = -1;
} else {
int elem = set[e * n + index];
elem ^= exprNeg[i * 3 + j];
if (elem == 1) {
disjunctRes = 1;
break;
}
}
}
if (disjunctRes == 0) {
flags[e] = 0;
break;
}
if (disjunctRes == -1) {
flags[e] = -1;
}
}
}
__global__ void DFS(const int n, const int m, const int startLevel,
const int* exprVar, const int* exprNeg,
int* set, int* flags, int* isFound, const int q) {
int e = blockIdx.x * blockDim.x + threadIdx.x;
if (e >= q) {
return;
}
for (int k = startLevel; k >= startLevel;) {
if (k == n) {
--k;
} else if (set[e * n + k] != 0) {
set[e * n + k] = (set[e * n + k] == -1 ? 1 : 0);
flags[e] = 1;
for (int i = 0; i < m; ++i) {
int disjunctRes = 0;
for (int j = 0; j < 3; ++j) {
int index = exprVar[i * 3 + j];
if (index > k) {
disjunctRes = -1;
} else {
int elem = set[e * n + index];
elem ^= exprNeg[i * 3 + j];
if (elem == 1) {
disjunctRes = 1;
break;
}
}
}
if (disjunctRes == 0) {
flags[e] = 0;
break;
}
if (disjunctRes == -1) {
flags[e] = -1;
}
}
atomicMax(isFound, flags[e]);
if (*isFound == 1) {
return;
}
if (flags[e] == 0) {
continue;
}
++k;
} else {
set[e * n + k] = -1;
--k;
}
}
}
int main(int argc, char* argv[]) {
std::chrono::high_resolution_clock::time_point totalStart = std::chrono::high_resolution_clock::now();
if (argc != 4) {
std::cerr << "Usage: " << argv[0] << " input_file output_file precalc_depth" << std::endl;
return 0;
}
std::ifstream fin(argv[1]);
std::ofstream fout(argv[2]);
int n, m;
fin >> n >> m;
int* exprVar = (int*)malloc(3 * m * sizeof(*exprVar));
int* exprNeg = (int*)malloc(3 * m * sizeof(*exprNeg));
int* cudaExprVar = nullptr;
int* cudaExprNeg = nullptr;
for (int i = 0; i < m; ++i) {
fin >> exprVar[3 * i]
>> exprNeg[3 * i]
>> exprVar[3 * i + 1]
>> exprNeg[3 * i + 1]
>> exprVar[3 * i + 2]
>> exprNeg[3 * i + 2];
--exprVar[3 * i];
--exprVar[3 * i + 1];
--exprVar[3 * i + 2];
}
int startLevel = std::min(n, atoi(argv[3]));
int q = (1 << startLevel);
int* set = (int*)calloc(q * n, sizeof(*set));
int* cudaSet = nullptr;
for (int i = 0; i < q * n; ++i) {
set[i] = -1;
}
int* flags = (int*)calloc(q, sizeof(*flags));
for (int i = 0; i < q; ++i) {
flags[i] = 1;
}
int* cudaFlags = nullptr;
bool isSolution = false;
cudaMalloc(&cudaExprVar, 3 * m * sizeof(*exprVar));
cudaMalloc(&cudaExprNeg, 3 * m * sizeof(*exprNeg));
cudaMalloc(&cudaSet, q * n * sizeof(*set));
cudaMalloc(&cudaFlags, q * sizeof(*flags));
cudaMemcpy(cudaExprVar, exprVar, 3 * m * sizeof(*exprVar), cudaMemcpyHostToDevice);
cudaMemcpy(cudaExprNeg, exprNeg, 3 * m * sizeof(*exprNeg), cudaMemcpyHostToDevice);
cudaMemcpy(cudaSet, set, q * n * sizeof(*set), cudaMemcpyHostToDevice);
cudaMemcpy(cudaFlags, flags, q * sizeof(*flags), cudaMemcpyHostToDevice);
int qBlock = (q + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
Check<<<qBlock, THREADS_PER_BLOCK>>>(n, m, startLevel,
cudaExprVar, cudaExprNeg, cudaSet, cudaFlags, q);
cudaMemcpy(set, cudaSet, q * n * sizeof(*set), cudaMemcpyDeviceToHost);
cudaMemcpy(flags, cudaFlags, q * sizeof(*flags), cudaMemcpyDeviceToHost);
for (int i = 0, j = q - 1;;) {
while (i < q && flags[i] != 0) {
++i;
}
while (j >= 0 && flags[j] == 0) {
--j;
}
if (i >= j) {
q = i;
break;
}
memcpy(set + i * n, set + j * n, n * sizeof(*set));
std::swap(flags[i], flags[j]);
}
int* isFound = nullptr;
cudaMalloc(&isFound, sizeof(*isFound));
cudaMemset(isFound, 0, sizeof(*isFound));
if (q > 0) {
cudaMemcpy(cudaSet, set, q * n * sizeof(*set), cudaMemcpyHostToDevice);
cudaMemcpy(cudaFlags, flags, q * sizeof(*flags), cudaMemcpyHostToDevice);
qBlock = (q + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
DFS<<<qBlock, THREADS_PER_BLOCK>>>(n, m, startLevel,
cudaExprVar, cudaExprNeg, cudaSet, cudaFlags, isFound, q);
cudaMemcpy(set, cudaSet, q * n * sizeof(*set), cudaMemcpyDeviceToHost);
cudaMemcpy(flags, cudaFlags, q * sizeof(*flags), cudaMemcpyDeviceToHost);
}
for (int e = 0; e < q; ++e) {
if (flags[e] == 1) {
for (int i = 0; i < n; ++i) {
fout << "x_" << i + 1 << " = " <<
(set[e * n + i] == 1 ? 1 : 0) << std::endl;
}
isSolution = true;
break;
}
}
if (!isSolution) {
fout << "No solution" << std::endl;
}
free(exprVar);
free(exprNeg);
free(set);
free(flags);
cudaFree(cudaExprVar);
cudaFree(cudaExprNeg);
cudaFree(cudaSet);
cudaFree(cudaFlags);
cudaFree(isFound);
std::chrono::high_resolution_clock::time_point totalEnd = std::chrono::high_resolution_clock::now();
double totalTime = std::chrono::duration_cast<std::chrono::duration<double>>(totalEnd - totalStart).count();
std::cout << "Total time: " << totalTime << std::endl;
return 0;
}
|
10,361 | #include "sha256.cuh"
#define ROTRIGHT(a, b) (((a) >> (b)) | ((a) << (32 - (b))))
#define CH(x, y, z) (((x) & (y)) ^ (~(x) & (z)))
#define MAJ(x, y, z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
#define EP0(x) (ROTRIGHT(x, 2) ^ ROTRIGHT(x, 13) ^ ROTRIGHT(x, 22))
#define EP1(x) (ROTRIGHT(x, 6) ^ ROTRIGHT(x, 11) ^ ROTRIGHT(x, 25))
#define SIG0(x) (ROTRIGHT(x, 7) ^ ROTRIGHT(x, 18) ^ ((x) >> 3))
#define SIG1(x) (ROTRIGHT(x, 17) ^ ROTRIGHT(x, 19) ^ ((x) >> 10))
__constant__ uint32_t sha256_kernel[] = {
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2};
__device__ void sha256_transform(Sha256Context *ctx, const uint8_t data[64]) {
uint32_t a, b, c, d, e, f, g, h, i, j, t1, t2, m[64];
#pragma unroll 16
for (i = 0, j = 0; i < 16; ++i, j += 4) {
m[i] = (data[j] << 24) | (data[j + 1] << 16) | (data[j + 2] << 8) | (data[j + 3]);
}
#pragma unroll 64
for (; i < 64; ++i) {
m[i] = SIG1(m[i - 2]) + m[i - 7] + SIG0(m[i - 15]) + m[i - 16];
}
a = ctx->state[0];
b = ctx->state[1];
c = ctx->state[2];
d = ctx->state[3];
e = ctx->state[4];
f = ctx->state[5];
g = ctx->state[6];
h = ctx->state[7];
#pragma unroll 64
for (i = 0; i < 64; ++i) {
t1 = h + EP1(e) + CH(e, f, g) + sha256_kernel[i] + m[i];
t2 = EP0(a) + MAJ(a, b, c);
h = g;
g = f;
f = e;
e = d + t1;
d = c;
c = b;
b = a;
a = t1 + t2;
}
ctx->state[0] += a;
ctx->state[1] += b;
ctx->state[2] += c;
ctx->state[3] += d;
ctx->state[4] += e;
ctx->state[5] += f;
ctx->state[6] += g;
ctx->state[7] += h;
}
__device__ void sha256_init(Sha256Context *ctx) {
ctx->dataLen = 0;
ctx->bitLen = 0;
ctx->state[0] = 0x6a09e667;
ctx->state[1] = 0xbb67ae85;
ctx->state[2] = 0x3c6ef372;
ctx->state[3] = 0xa54ff53a;
ctx->state[4] = 0x510e527f;
ctx->state[5] = 0x9b05688c;
ctx->state[6] = 0x1f83d9ab;
ctx->state[7] = 0x5be0cd19;
}
__device__ void sha256_update(Sha256Context *ctx, const uint8_t data[], size_t len) {
// for each byte in message
for (auto i = 0; i < len; ++i) {
// ctx->data == message 512 bit chunk
ctx->data[ctx->dataLen] = data[i];
ctx->dataLen++;
if (ctx->dataLen == 64) {
sha256_transform(ctx, ctx->data);
ctx->bitLen += 512;
ctx->dataLen = 0;
}
}
}
__device__ void sha256_final(Sha256Context *ctx) {
uint32_t i;
i = ctx->dataLen;
// Pad whatever data is left in the buffer.
if (ctx->dataLen < 56) {
ctx->data[i++] = 0x80;
while (i < 56) {
ctx->data[i++] = 0x00;
}
} else {
ctx->data[i++] = 0x80;
while (i < 64) {
ctx->data[i++] = 0x00;
}
sha256_transform(ctx, ctx->data);
memset(ctx->data, 0, 56);
}
// Append to the padding the total message's length in bits and transform.
ctx->bitLen += ctx->dataLen * 8;
ctx->data[63] = ctx->bitLen;
ctx->data[62] = ctx->bitLen >> 8;
ctx->data[61] = ctx->bitLen >> 16;
ctx->data[60] = ctx->bitLen >> 24;
ctx->data[59] = ctx->bitLen >> 32;
ctx->data[58] = ctx->bitLen >> 40;
ctx->data[57] = ctx->bitLen >> 48;
ctx->data[56] = ctx->bitLen >> 56;
sha256_transform(ctx, ctx->data);
}
__device__ void sha256_write_output(Sha256Context *ctx, uint8_t hash[]) {
for (auto i = 0; i < 4; ++i) {
hash[i] = (ctx->state[0] >> (24 - i * 8)) & 0x000000ff;
hash[i + 4] = (ctx->state[1] >> (24 - i * 8)) & 0x000000ff;
hash[i + 8] = (ctx->state[2] >> (24 - i * 8)) & 0x000000ff;
hash[i + 12] = (ctx->state[3] >> (24 - i * 8)) & 0x000000ff;
hash[i + 16] = (ctx->state[4] >> (24 - i * 8)) & 0x000000ff;
hash[i + 20] = (ctx->state[5] >> (24 - i * 8)) & 0x000000ff;
hash[i + 24] = (ctx->state[6] >> (24 - i * 8)) & 0x000000ff;
hash[i + 28] = (ctx->state[7] >> (24 - i * 8)) & 0x000000ff;
}
}
|
10,362 |
#include <iostream>
int main(int argc, char** argv)
{
return 0;
}
|
10,363 | #include "includes.h"
__global__ void gen_matvec(float *A, float *x, float *y, const int m, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if ( xIndex < m ){
float c = 0.0f;
for(int i=0; i<n; i++)
c = c + x[i] * A[xIndex + m * i];
y[xIndex] = c;
}
} |
10,364 | #include <stdio.h>
//compilar: nvcc matrizSoma.cu -o matrizSoma
#define N 512
#define B 32
__global__ void matrix_add(float *a, float *b, float *c) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x < N && y < N){
c[x + y * N] = a[x + y * N] + b[x + y * N];
}
}
int main() {
float *a, *b, *c;
float *d_a, *d_b, *d_c;
int size = N;
dim3 dimen (B, B);
cudaMalloc( (void **) &d_a, size*size*sizeof(float) );
cudaMalloc( (void **) &d_b, size*size*sizeof(float) );
cudaMalloc( (void **) &d_c, size*size*sizeof(float) );
a = (float *)malloc( size*size*sizeof(float) );
b = (float *)malloc( size*size*sizeof(float) );
c = (float *)malloc( size*size*sizeof(float) );
for( int i = 0; i < N*N; i++ ) {
a[i] = b[i] = i;
c[i] = 0;
}
cudaMemcpy( d_a, a, size*size*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_b, b, size*size*sizeof(float), cudaMemcpyHostToDevice );
dim3 grade ((N + B-1)/B, (N + B-1)/B);
matrix_add<<<grade, dimen>>>( d_a, d_b, d_c );
cudaMemcpy( c, d_c, size*size*sizeof(float), cudaMemcpyDeviceToHost );
int i;
for(i=0; i<N*N; i++){
printf( "c[%d] = %lf\n",i, c[i] );
}
free(a);
free(b);
free(c);
cudaFree( d_a );
cudaFree( d_b );
cudaFree( d_c );
return 0;
} /* end main */
|
10,365 | #include "includes.h"
__device__ void EstimateParForSubsample(float* subImageDefs, bool safeBounds, int inputWidth, int inputHeight, int2 & subImg, int & diameterPix)
{
diameterPix = (int)( fminf( (float)inputWidth,(float)inputHeight ) * subImageDefs[2] ); // <0,1>
subImg.x = (int)((float)inputWidth * (subImageDefs[0] + 1) * 0.5f) ;//- diameterPix / 2;
subImg.y = (int)((float)inputHeight * (subImageDefs[1] + 1) * 0.5f);// - diameterPix / 2;
int maxDiameter = min(inputWidth - 1, inputHeight - 1);
diameterPix = max(1, diameterPix);
diameterPix = min(maxDiameter, diameterPix);
if (safeBounds)
{
subImg.x = max(subImg.x, 1);
subImg.y = max(subImg.y, 1);
subImg.x = min(subImg.x, inputWidth - diameterPix - 1);
subImg.y = min(subImg.y, inputHeight - diameterPix - 1);
}
}
__global__ void RetinaTransform_HaveAtLeastOneValueThere (float * subImageDefs, float* input, int inputWidth, int inputHeight, float* output,int outputDataSize, float* retinaMask, int retinaDataSize, int retinaMaskColHint, float* retinaDataInserted)
{
int id_retinaPoint = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int2 subImg;
int diameterPix;
bool safeBounds = 0;
EstimateParForSubsample( subImageDefs, safeBounds, inputWidth, inputHeight, subImg, diameterPix );
if (id_retinaPoint<outputDataSize)
{
output[id_retinaPoint] = 0; // default value
float x_mask = (retinaMask[id_retinaPoint*retinaMaskColHint]*diameterPix);
float y_mask = (retinaMask[id_retinaPoint*retinaMaskColHint+1]*diameterPix);
int x = subImg.x + x_mask;
int y = subImg.y + y_mask;
if (x<inputWidth && y<inputHeight && x>=0 && y>=0)
{
float val = input[x+y*inputWidth];
output[id_retinaPoint] = val;
atomicAdd(output + id_retinaPoint , val);
atomicAdd(retinaDataInserted + id_retinaPoint , 1);
}
}
} |
10,366 | #include "test.cuh"
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
__global__ void kernelPrint(){
printf("GPU run!\n");
}
void CudaRun(){
printf("cpu run!\n");
kernelPrint<<<1,5>>>();
cudaDeviceSynchronize();
}
|
10,367 | #include <iostream>
#include <cuda_runtime.h>
__global__ void foo_device(int * n){
//int i = threadIdx.x;
n[0] = 42;
}
int main(int argc, char const *argv[])
{
int * device;
int host;
cudaError_t error;
error = cudaMalloc( (void **) &device, sizeof(int));
if (error != cudaSuccess)
{
std::cout << "cudaMalloc returned error " << cudaGetErrorString(error) << "\n";
}
foo_device<<<1,20>>>(device);
error = cudaGetLastError();
if (error != cudaSuccess)
{
std::cout << "kernel returned error " << cudaGetErrorString(error) << "\n";;
}
error = cudaMemcpy(&host, device, sizeof(int), cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
std::cout << "cudaMemcyp returned error " << cudaGetErrorString(error) << "\n";;
}
std::cout << "the cuda number is: " << host << std::endl;
return 0;
}
|
10,368 | /* jacobi.c - Poisson problem in 3d
*
*/
#include <math.h>
#include <stdio.h>
__device__ void print_matrix2(double*** A, int N){
int i,j,k;
for (i=0; i<N; i++){
printf("\n %d -th Layer \n", i);
for(j=0; j<N; j++){
for(k=0; k<N; k++){
printf("%lf \t", A[i][j][k]);
}
printf("\n");
}
}
}
__global__ void jacobi_gpu2(double*** u, double***prev_u, double*** f, int N, double step_width, double denominator) {
double temp;
int j_index=threadIdx.y + blockIdx.y*blockDim.y;
int k_index= threadIdx.x + blockIdx.x*blockDim.x;
int i_index=threadIdx.z + blockIdx.z*blockDim.z;
//printf("%d %d %d \n", j_index, k_index, i_index);
if ((j_index<N-2) && (k_index<N-2) && (i_index<N-2)){
temp=prev_u[i_index][j_index+1][k_index+1] + prev_u[i_index+2][j_index+1][k_index+1]+
prev_u[i_index+1][j_index][k_index+1] + prev_u[i_index+1][j_index+2][k_index+1] +
prev_u[i_index+1][j_index+1][k_index]+ prev_u[i_index+1][j_index+1][k_index+2] + step_width*step_width*f[i_index+1][j_index+1][k_index+1];
u[i_index+1][j_index+1][k_index+1]=temp*denominator;
}
//printf("On the GPU we now have matrix:\n");
//print_matrix2(u,N);
}
|
10,369 | //---------------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <iostream>
//---------------------------------------------------------------------------------
static const int N = 1000001; //Number of rows in input matrix
static const int M = 100; //Number of columns in input matrix
using namespace std;
//---------------------------------------------------------------------------------
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
//---------------------------------------------------------------------------------
__global__ void matrixTranspose(unsigned int* A_d, unsigned int *T_d, int rowCount, int colCount) {
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** Populate vecADD kernel function ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (row < rowCount && col < colCount){
T_d[col*rowCount+row] = A_d[row*colCount+col];
}
}
//---------------------------------------------------------------------------------
int main(void) {
unsigned int **A ;
unsigned int **T ;
unsigned int *A_h;
unsigned int *A_d;
unsigned int *T_h;
unsigned int *T_d;
//Set Device
CUDA_CHECK_RETURN(cudaSetDevice(0));
//See random number generator
srand(time(NULL));
//Clear command prompt
cout << "\033[2J\033[1;1H";
cout << "Allocating arrays on host ... ";
A_h = new unsigned int[N*M];
T_h = new unsigned int[N*M];
A = new unsigned int* [N];
for (int i = 0; i < N; ++i) {
A[i] = new unsigned int[M];
}
T = new unsigned int* [M];
for (int i = 0; i < M; ++i) {
T[i] = new unsigned int[N];
}
cout << "done.\nPopluating input matrix on host ... ";
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
A[i][j] = rand();
}
}
cout << "done.\nConverting 2-dimensional input matrix to 1-dimensional array on host ... ";
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** Add code for converting 2-dimensional input matrix to 1-dimensional array here ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
A_h[i*M+j] = A[i][j];
}
}
cout << "done.\nAllocating arrays on device ... ";
CUDA_CHECK_RETURN(
cudaMalloc((void** ) &A_d, sizeof(unsigned int) * N*M));
CUDA_CHECK_RETURN(
cudaMalloc((void** ) &T_d, sizeof(unsigned int) * N*M));
cout << "done.\nCopying arrays from host to device ... ";
CUDA_CHECK_RETURN(
cudaMemcpy(A_d, A_h, sizeof(int) * N*M,
cudaMemcpyHostToDevice));
cout << "done.\nLaunching kernel ... ";
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** define kernel launch parameters ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
dim3 dimBlock(32,32);
dim3 dimGrid(ceil((double)M/32), ceil((double)N/32));
//Time kernel launch
//Time kernel launch
cudaEvent_t start, stop;
CUDA_CHECK_RETURN(cudaEventCreate(&start));
CUDA_CHECK_RETURN(cudaEventCreate(&stop));
float elapsedTime;
CUDA_CHECK_RETURN(cudaEventRecord(start, 0));
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** Add kernel call here ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
matrixTranspose<<< dimGrid, dimBlock >>>(A_d, T_d, N, M);
CUDA_CHECK_RETURN(cudaEventRecord(stop, 0));
CUDA_CHECK_RETURN(cudaEventSynchronize(stop));
CUDA_CHECK_RETURN(cudaEventElapsedTime(&elapsedTime, start, stop));
CUDA_CHECK_RETURN(cudaThreadSynchronize()); // Wait for the GPU launched work to complete
CUDA_CHECK_RETURN(cudaGetLastError()); //Check if an error occurred in device code
CUDA_CHECK_RETURN(cudaEventDestroy(start));
CUDA_CHECK_RETURN(cudaEventDestroy(stop));
cout << "done.\nElapsed kernel time: " << elapsedTime << " ms\n";
cout << "Copying results back to host .... ";
CUDA_CHECK_RETURN(
cudaMemcpy(T_h, T_d, sizeof(int) * N*M,
cudaMemcpyDeviceToHost));
cout << "done.\nConverting 1-dimensional output array to 2-dimensional matrix on host ... ";
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
//**** Add code for converting 1-dimensional output array to 2-dimensional matrix here ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
for (int i = 0; i < M; ++i) {
for (int j = 0; j < N; ++j) {
T[i][j] = T_h[i*N+j];
}
}
cout << "done.\nVerifying results on host ... ";
//Add code to time host calculations
clock_t st, ed;
st = clock();
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** Check that results from kernel are correct ****
// **** Complete validation code below ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
bool valid = true;
for (int i = 0; i < M; i++){
for(int j = 0; j < N; j++){
if (T[i][j] != A[j][i])
{
cout << "done.\n***GPU results are incorrect***";
valid = false;
break;
}
}
if(!valid){
break;
}
}
cout << "done\n";
if (valid) {
cout << "GPU results are valid.\n";
}
ed = clock() - st;
cout << "Elapsed time on host: " << ((float) ed) / CLOCKS_PER_SEC * 1000
<< " ms" << endl;
cout << "Freeing memory on device ... ";
CUDA_CHECK_RETURN(cudaFree((void* ) A_d));
CUDA_CHECK_RETURN(cudaFree((void* ) T_d));
CUDA_CHECK_RETURN(cudaDeviceReset());
cout << "done.\nFreeing memory on host ... ";
delete[] A_h;
delete[] T_h;
for (int i = 0; i < N; ++i) {
delete[] A[i];
}
delete[] A;
for (int i = 0; i < M; ++i) {
delete[] T[i];
}
delete[] T;
cout << "done.\nExiting program.\n";
return 0;
}
|
10,370 | //----------------------------------------------------------------------
/*!\file gpu_algorithmsn/BasicComplexMath.cu
*
* \author Felix Laufer
*
*
* CUDA: Output methods for debugging
*
*/
//----------------------------------------------------------------------
#include <math.h>
#include <cufft.h>
#include <stdio.h>
namespace gpu_algorithms
{
namespace cuda
{
typedef cufftComplex Complex;
typedef cufftReal Real;
// Convert real data stream to complex data stream
static __global__ void Real2Complex(const Real *idata, Complex *odata, const unsigned int stream_size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const unsigned int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = threadID; i < stream_size; i += numThreads)
{
odata[i].x = idata[i];
odata[i].y = 0.0f;
}
}
// Convert complex data stream to real data stream
static __global__ void Complex2Real(const Complex *idata, Real *odata, const unsigned int stream_size)
{
const unsigned int numThreads = blockDim.x * gridDim.x;
const unsigned int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = threadID; i < stream_size; i += numThreads)
{
odata[i] = idata[i].x;
}
}
// Print a real data stream located in host memory in matrix form
static __host__ void PrintHostMatrix(const Real* idata, const unsigned int nx, const unsigned ny)
{
for(unsigned int i = 0; i < nx * ny; ++i)
{
printf("%10.8lf ", idata[i]);
if ((i+1) % nx == 0)
{
printf("%s\n", "");
}
}
printf("%s\n", "-----------------------------------------------");
}
// Print a complex data stream located in device memory in matrix form
static __host__ void PrintDeviceComplexMatrix(const Complex* idata, const unsigned int nx, const unsigned int ny)
{
unsigned int stream_size = nx * ny;
unsigned int stream_size_real = stream_size * sizeof(Real);
Real* result;
cudaMalloc((void**)&result, stream_size_real);
const dim3 grid(ceil(stream_size / 256.0f));
const dim3 block(256.0f);
Complex2Real<<<grid, block>>>(idata, result, stream_size);
Real* result_host = new Real[stream_size];
cudaMemcpy(result_host, result, stream_size_real, cudaMemcpyDeviceToHost);
PrintHostMatrix(result_host, nx, ny);
cudaFree(result);
delete result_host;
}
// Print a complex real stream located in device memory in matrix form
static __host__ void PrintDeviceRealMatrix(const Real* idata, const unsigned int nx, const unsigned int ny)
{
unsigned int stream_size = nx * ny;
unsigned int stream_size_real = stream_size * sizeof(Real);
Real* result_host = new Real[stream_size];
cudaMemcpy(result_host, idata, stream_size_real, cudaMemcpyDeviceToHost);
PrintHostMatrix(result_host, nx, ny);
delete result_host;
}
}
}
|
10,371 | #include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <assert.h>
#define N 2//64
__device__ int f(int x) {
return x + 2;
}
__global__ void foo(int *y, int x) {
*y = f(x);
}
|
10,372 | #include "includes.h"
__global__ void reduce_max_kernel(float *d_out, const float *d_logLum, int size) {
int tid = threadIdx.x; // Local thread index
int myId = blockIdx.x * blockDim.x + threadIdx.x; // Global thread index
extern __shared__ float temp[];
// --- Loading data to shared memory. All the threads contribute to loading the data to shared memory.
temp[tid] = (myId < size) ? d_logLum[myId] : -10000000;
// --- Your solution
// if (myId < size) { temp[tid] = d_logLum[myId]; } else { temp[tid] = d_logLum[tid]; }
// --- Before going further, we have to make sure that all the shared memory loads have been completed
__syncthreads();
// --- Reduction in shared memory. Only half of the threads contribute to reduction.
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s) { temp[tid] = fmaxf(temp[tid], temp[tid + s]); }
// --- At the end of each iteration loop, we have to make sure that all memory operations have been completed
__syncthreads();
}
// --- Your solution
//for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
// if (tid < s) { if (myId < size) { temp[tid] = fmaxf(d_logLum[myId + s], d_logLum[myId]); } else { temp[tid] = d_logLum[tid]; } }
// __syncthreads();
//}
if (tid == 0) {
d_out[blockIdx.x] = temp[0];
}
} |
10,373 | #include <iostream>
#include <time.h>
#include <math.h>
//#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
using std::cout; using std::cin;
//void ImpError(cudaError_t err);
void ImpError(cudaError_t err)
{
cout << cudaGetErrorString(err); // << " en " << __FILE__ << __LINE__;
//exit(EXIT_FAILURE);
}
__global__
void vecAddKernel(float* A, float* B, float* C, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n)
C[i] = A[i] + B[i];
}
void vecAdd(float* A, float* B, float* C, int n)
{
int size = n * sizeof(float);
float* d_A, * d_B, * d_C;
cudaError_t err = cudaSuccess;
err = cudaMalloc((void**)& d_A, size);
if (err != cudaSuccess)
{
cout << "d_A";
ImpError(err);
}
err = cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
ImpError(err);
err = cudaMalloc((void**)& d_B, size);
if (err != cudaSuccess)
ImpError(err);
err = cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
ImpError(err);
err = cudaMalloc((void**)& d_C, size);
if (err != cudaSuccess)
ImpError(err);
//<<#bloques,#threads por bloques>>
vecAddKernel<<<ceil(n / 512.0), 512>>>(d_A, d_B, d_C, n);
err = cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
ImpError(err);
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
}
void Imprimir(float* A, int n)
{
for (int i = 0; i < n; ++i)
if (i<n) cout << A[i] << " ";
cout << "\n";
}
void GenVector(float* A, int n)
{
for (int i = 0; i < n; ++i)
A[i] = static_cast <float> (rand()) / (static_cast <float> (RAND_MAX / n));
}
int main(int argc, char** argv)
{
int array_size = 10;
float* A, * B, * C;
srand(time(NULL));
/*
if (argc == 2)
{
array_size = strtof(argv[1], NULL);
}
else
cout << "Ingrese array_size"; cin >> array_size;
*/
A = new float[array_size];
B = new float[array_size];
C = new float[array_size];
GenVector(A, array_size);
GenVector(B, array_size);
vecAdd(A, B, C, array_size);
Imprimir(A, array_size);
Imprimir(B, array_size);
Imprimir(C, array_size);
//cudaDeviceSynchronize();
return 0;
} |
10,374 | #include <stdio.h>
#include <cuda.h>
__global__ void fill(cudaPitchedPtr U){
}
int main(int argc, char *argv[]){
struct cudaPitchedPtr U;
struct cudaExtent ext;
ext.width = 40;
ext.height = 40;
ext.depth = 40;
printf("%15s pitch xsize ysize\n", "extent");
for(ext.depth=500; ext.depth < 700; ext.depth+=100){
for(ext.height=500; ext.height < 700; ext.height+=100){
for(ext.width=500; ext.width < 700; ext.width+=100){
cudaMalloc3D(&U, ext);
printf("[%3d][%3d][%3d] %5d %5d %5d\n", ext.depth, ext.height, ext.width, U.pitch, U.xsize, U.ysize);
cudaFree(U.ptr);
}
}
}
return 0;
}
|
10,375 | // CUDA-C includes
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <cstring>
#include <math.h>
#define MAX_DISTANCE 164025000000.0
#define NO_HEAD -1
extern "C"
//Adds two arrays
float * runCudaPart(float *h_in, float *h_out);
double runCalcClusterHeadsAndTotalEnergy(int *h_out, float *h_in_x, float * h_in_y, int *h_in_ch, int arr_size_ch, int arr_size, int base_x, int base_y);
__global__ void square(float *d_out, float *d_in)
{
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f; //node to ch d^2
}
__global__ void calcToCh(float *d_out_float, int *d_out, float *d_in_x, float * d_in_y, int * d_in_ch, int arr_size_ch, int base_x, int base_y)
{
int idx = threadIdx.x;
float dist2 = MAX_DISTANCE, cur_dist2;
for(int i = 0; i < arr_size_ch; i++)
{
if(d_in_x[idx] == base_x && d_in_y[idx] == base_y)
{
dist2 = 0.0;
d_out[idx] = idx;
d_out_float[idx] = 0.0;
continue;
}
if (d_in_ch[i] == NO_HEAD)
{
continue;
}
cur_dist2 = ( powf(fabs(d_in_x[d_in_ch[i]]-d_in_x[idx]), 2) + powf(fabs(d_in_y[d_in_ch[i]]-d_in_y[idx]), 2) );
if( cur_dist2 < dist2)
{
dist2 = cur_dist2;
d_out[idx] = d_in_ch[i];
d_out_float[idx] = (50e-9 * 2000) + ((100e-12) * 2000 * dist2) + (50e-9 * 2000);
}
}
}
double runCalcClusterHeadsAndTotalEnergy(int *h_out, float *h_in_x, float * h_in_y, int *h_in_ch, int arr_size_ch, int arr_size, int base_x, int base_y)
{
const int ARRAY_SIZE_POINTS = arr_size;
const int ARRAY_BYTES_POINTS = ARRAY_SIZE_POINTS * sizeof(float);
const int ARRAY_SIZE_CH = arr_size_ch;
const int ARRAY_BYTES_CH = ARRAY_SIZE_CH * sizeof(float);
int * d_in_ch;//declare GPU memory pointers
float * d_in_y;//declare GPU memory pointers
float * d_in_x;//declare GPU memory pointers
int * d_out_int;//declare GPU memory pointers
float * d_out_float;//declare GPU memory pointers
float h_out_float[ARRAY_SIZE_POINTS];
cudaMalloc((void**) &d_in_ch, ARRAY_BYTES_CH); // allocate GPU memory
cudaMalloc((void**) &d_out_int, ARRAY_BYTES_POINTS);
cudaMalloc((void**) &d_out_float, ARRAY_BYTES_POINTS);
cudaMalloc((void**) &d_in_x, ARRAY_BYTES_POINTS);
cudaMalloc((void**) &d_in_y, ARRAY_BYTES_POINTS);
cudaMemcpy(d_in_ch, h_in_ch, ARRAY_BYTES_CH, cudaMemcpyHostToDevice);// Transfer the array to GPU
cudaMemcpy(d_in_y, h_in_y, ARRAY_BYTES_POINTS, cudaMemcpyHostToDevice);
cudaMemcpy(d_in_x, h_in_x, ARRAY_BYTES_POINTS, cudaMemcpyHostToDevice);
calcToCh <<< 1, ARRAY_SIZE_POINTS >>> (d_out_float, d_out_int, d_in_x, d_in_y, d_in_ch, ARRAY_SIZE_CH, base_x, base_y);// Launch the Kernel
cudaMemcpy(h_out, d_out_int, ARRAY_BYTES_POINTS, cudaMemcpyDeviceToHost);// copy back the result array to the CPU
cudaMemcpy(h_out_float, d_out_float, ARRAY_BYTES_POINTS, cudaMemcpyDeviceToHost);
double total = 0.0;
for (int i = 1; i < ARRAY_SIZE_POINTS; i++)
{
total += h_out_float[i];
}
float bd2 = 0.0;
for (int i = 1; i < ARRAY_SIZE_CH; i++)
{
if (h_in_ch[i] == NO_HEAD) continue;
bd2 = ( powf(fabs(h_in_x[h_in_ch[i]]-base_x), 2) + powf(fabs(h_in_y[h_in_ch[i]]-base_y), 2) );
total += (50e-9 * 2000) + ((100e-12) * 2000 * bd2);// cluster heads to base (d^2)
}
cudaFree(d_in_ch);
cudaFree(d_in_x);
cudaFree(d_in_y);
cudaFree(d_out_int);
return total;
}
// Main cuda function
float * runCudaPart(float *h_in, float *h_out)
{
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
//declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
cudaMalloc((void**) &d_in, ARRAY_BYTES);
cudaMalloc((void**) &d_out, ARRAY_BYTES);
// Transfer the array to GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES , cudaMemcpyHostToDevice);
// Launch the Kernel
square <<< 1, ARRAY_SIZE >>> (d_out, d_in);
/** square<<<NUMBER_OF_BLOCKS, NUMBER_OF_THREADS>>>(d_out, d_in);
* NUMBER_OF_THREADS max 1024, eski kartlar için 512
* NUMBER_OF_BLOCKS kullanılacak blok sayısı
* square<<< 2, 16 >>>(d_out, d_in); TOPLAMDA 32 THREAD OLUR...
* */
// copy back the result array to the CPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_out);
return h_out;
}
//__global__ void calcEnergyToCh_d2(float *d_out, float *d_in_x, float * d_in_y)
//{
// int idx = threadIdx.x;
// float f = d_in_x[idx];
// // dizi başında ch var. diğer elemanların toplamını
// d_out[idx] = ( powf(fabs(d_in_x[0]-d_in_x[idx]), 2) + powf(fabs(d_in_y[0]-d_in_y[idx]), 2) ); //node to ch d^2
//}
//double runCalcChTotalEnergy(float *h_out, float *h_in_x, float * h_in_y, int arr_size, int base_x, int base_y)
//{
// const int ARRAY_SIZE = arr_size;
// const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// float * d_in_y;//declare GPU memory pointers
// float * d_in_x;//declare GPU memory pointers
// float * d_out;//declare GPU memory pointers
// cudaMalloc((void**) &d_out, ARRAY_BYTES); // allocate GPU memory
// cudaMalloc((void**) &d_in_x, ARRAY_BYTES); // allocate GPU memory
// cudaMalloc((void**) &d_in_y, ARRAY_BYTES); // allocate GPU memory
// cudaMemcpy(d_in_y, h_in_y, ARRAY_BYTES , cudaMemcpyHostToDevice);// Transfer the array to GPU
// cudaMemcpy(d_in_x, h_in_x, ARRAY_BYTES , cudaMemcpyHostToDevice);// Transfer the array to GPU
// calcDistToCh_d2 <<< 1, ARRAY_SIZE >>> (d_out, d_in_x, d_in_y);// Launch the Kernel
// cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);// copy back the result array to the CPU
// cudaFree(d_in_x);
// cudaFree(d_in_y);
// cudaFree(d_out);
// double total = 0.0;
// for (int i = 1; i < ARRAY_SIZE; i++)
// {
// total += h_out[i];
// }
// total += ( pow(abs(h_in_x[0]-base_x), 2) + pow(abs(h_in_y[0]-base_y), 2) ); // ch to base (d^2)
// printf("\n=====%f=======\n", total);
// return total;
//}
|
10,376 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <time.h>
//#include <common.h>
#define M 10
#define NR_BLOCK 1024
__global__ void compute(const float * a, float * b)
{
int i = blockIdx.x;
int j;
for (j = 0; j < M; j++) {
if ((i + j * NR_BLOCK) > 0 && (i + j * NR_BLOCK) < M) {
b[i + j * NR_BLOCK] = 0.2 * (a[M+((i+j*NR_BLOCK)-1)] + a[M+(i+j*NR_BLOCK)] + a[M+((i+j*NR_BLOCK)+1)] + a[(i+j*NR_BLOCK)] + a[2*M+(i+j*NR_BLOCK)]);
}
}
}
struct params {
float ** a;
float ** b;
float * c;
float * d;
float * c_a;
float * c_b;
int up, down, j;
int stop;
int num_pes;
};
typedef struct params params_t;
void foo(params_t * param)
{
int j = param->j;
int up = param->up;
int down = param->down;
int num_pes = param->num_pes;
// above
if (up != -1 && j == 0) {
cudaMemcpy(param->c_a,
param->c,
M * sizeof(float),
cudaMemcpyHostToDevice);
} else {
cudaMemcpy(param->c_a,
param->a[j - 1],
M * sizeof(float),
cudaMemcpyHostToDevice);
}
// middle
cudaMemcpy(&(param->c_a[M]),
param->a[j],
M * sizeof(float),
cudaMemcpyHostToDevice);
// below
if (down != -1 && j == param->stop - 1) {
cudaMemcpy(&(param->c_a[2 * M]),
param->d,
M * sizeof(float),
cudaMemcpyHostToDevice);
} else {
cudaMemcpy(&(param->c_a[2 * M]),
param->a[j + 1],
M * sizeof(float),
cudaMemcpyHostToDevice);
}
/*if (!(down != -1 && j == (M / num_pes - 2))) {
cudaMemcpy(&(param->c_a[2 * M]),
param->a[j + 1],
M * sizeof(float),
cudaMemcpyHostToDevice);
} else {
cudaMemcpy(&(param->c_a[2 * M]),
param->d,
M * sizeof(float),
cudaMemcpyHostToDevice);
}*/
cudaMemcpy(param->c_b,
param->b[j],
M * sizeof(float),
cudaMemcpyHostToDevice);
compute<<<NR_BLOCK, 1>>>(param->c_a, param->c_b);
cudaMemcpy(param->b[j], param->c_b, M * sizeof(float), cudaMemcpyDeviceToHost);
}
/*cudaMemcpy(c_a, a[j - 1], M * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(&c_a[M], a[j], M * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(&c_a[2*M], a[j+1], M * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(b[j], c_b, M * sizeof(float), cudaMemcpyDeviceToHost); */
//}
// printf("[debug] updating a with b\n");
/* for (j = 1; j < M - 1; j++) {
for (k = 1; k < M - 1; k++) {
a[j][k] = b[j][k];
}
}*/
/* #ifdef DEBUG
printf("[debug output of b]\n");
for (j = 0; j < M; j++) {
for (k = 0; k < M; k++) {
printf("%5.5g ", a[j][k]);
}
printf("\n");
}
printf("\n\n");
#endif * DEBUG * */
//}
//}
|
10,377 | #include <stdio.h>
#include <math.h>
void readInput(const char *filename, int **Aos, int *rows, int *cols) {
FILE *file;
file = fopen(filename, "r");
fscanf(file, "%d %d", rows, cols);
int * A_F1 = (int *) malloc(*rows * (*cols)* (4) * sizeof(int));
int * A_F2 = (int *) malloc(*rows * (*cols) * sizeof(int));
for(int j = 0; j < 4; j++) {
int counter = 0;
for(int i = 0; i < *cols*(*rows); i++){
fscanf(file, "%d ", &A_F1[counter +j]);
counter = counter + 4;
}
}
int counter = 0;
for(int j = 0; j < *cols*(*rows);j++){
A_F2[j] = A_F1[counter]*1 + A_F1[counter+1]*2 + A_F1[counter+2]*2*2 + A_F1[counter+3]*2*2*2;
counter = counter +4;
}
*Aos = A_F2;
}
void printMatrix(int *A, int rows, int cols) {
for(int i = 0; i < rows*cols; i++){
printf("%i ", A[i]);
}
printf("\n");
};
__global__ void step_periodic(int * array,int *buffer,int rows, int cols){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if (tId < rows*cols){
int reject = 1;
int x = tId%(cols);
int y = (int) tId/rows;
int total = 0;
int c_aux = x -1;
if (c_aux < 0){
c_aux = cols-1;
reject = 0;
}
if (reject ==1 && buffer[(y*cols + c_aux)] == 1 || buffer[(y*cols + c_aux)] == 3 || buffer[(y*cols + c_aux)] == 10 ||
buffer[(y*cols + c_aux)] == 9 || buffer[(y*cols + c_aux)] == 7 || buffer[(y*cols + c_aux)] == 11 ||
buffer[(y*cols + c_aux)] == 13 || buffer[(y*cols + c_aux)] == 15){
total = total + 1;
}else if(c_aux == 0){
if (buffer[(y*cols + c_aux)] == 4 || buffer[(y*cols + c_aux)] == 10 || buffer[(y*cols + c_aux)] == 6 ||
buffer[(y*cols + c_aux)] == 12 || buffer[(y*cols + c_aux)] == 7 || buffer[(y*cols + c_aux)] == 13 ||
buffer[(y*cols + c_aux)] == 14 || buffer[(y*cols + c_aux)] == 15){
total = total + 1;
}
}else {
total = total + 0;
}
reject = 1;
c_aux = x + 1;
if (c_aux == cols){
c_aux = 0;
reject = 0;
}
if (reject ==1 && buffer[(y*cols + c_aux)] == 4 || buffer[(y*cols + c_aux)] == 10 || buffer[(y*cols + c_aux)] == 6 ||
buffer[(y*cols + c_aux)] == 12 || buffer[(y*cols + c_aux)] == 7 || buffer[(y*cols + c_aux)] == 13 ||
buffer[(y*cols + c_aux)] == 14 || buffer[(y*cols + c_aux)] == 15){
total = total + 4;
}else if(c_aux == cols-1){
if (buffer[(y*cols + c_aux)] == 1 || buffer[(y*cols + c_aux)] == 3 || buffer[(y*cols + c_aux)] == 10 ||
buffer[(y*cols + c_aux)] == 9 || buffer[(y*cols + c_aux)] == 7 || buffer[(y*cols + c_aux)] == 11 ||
buffer[(y*cols + c_aux)] == 13 || buffer[(y*cols + c_aux)] == 15){
total = total + 4;
}
}else {
total = total + 0;
}
reject = 1;
c_aux = y + 1;
if (c_aux == rows){
c_aux = 0;
reject = 0;
}
int g = (((y+1)%rows)*cols);
if (reject ==1 && buffer[(g + x)] == 2 || buffer[(g + x)] == 3 || buffer[(g + x)] == 6 ||
buffer[(g + x)] == 5 || buffer[(g + x)] == 7 || buffer[(g + x)] == 11 ||
buffer[(g + x)] == 14 || buffer[(g + x)] == 15){
total = total + 2;
}else if(c_aux == rows-1){
if (buffer[(g + x)] == 8 || buffer[(g + x)] == 12 || buffer[(g + x)] == 5 ||
buffer[(g + x)] == 9 || buffer[(g + x)] == 14 || buffer[(g + x)] == 13 ||
buffer[(g + x)] == 11 || buffer[(g + x)] == 15){
total = total + 2;
}
}else {
total = total + 0;
}
reject = 1;
c_aux = y - 1;
if (c_aux <0){
c_aux = ((rows-1)%rows)*cols;
reject = 0;
}
g = (((y-1)%rows)+rows)%rows;
c_aux = g*cols;
if (reject ==1 && buffer[(c_aux + x)] == 8 || buffer[(c_aux + x)] == 12 || buffer[(c_aux + x)] == 5 ||
buffer[(c_aux + x)] == 9 || buffer[(c_aux + x)] == 14 || buffer[(c_aux + x)] == 13 ||
buffer[(c_aux + x)] == 11 || buffer[(c_aux + x)] == 15){
total = total + 8;
}else if(c_aux == 0){
if (buffer[(c_aux + x)] == 2 || buffer[(c_aux + x)] == 3 || buffer[(c_aux + x)] == 6 ||
buffer[(c_aux + x)] == 5 || buffer[(c_aux + x)] == 7 || buffer[(c_aux + x)] == 11 ||
buffer[(c_aux + x)] == 14 || buffer[(c_aux + x)] == 15 ){
total = total + 8;
}
}else{
total = total + 0;
}
array[tId] = total;
}
}
int main(int argc, char const *argv[])
{
int rows, cols;
int *array;
int *d_array;
int *d_buffer;
readInput("../initial.txt", &array, &rows, &cols);
//printMatrix(array,rows,cols);
int n = (int)(rows*cols);
int block_size = 256;
int grid_size = (int) ceil((float) n/ block_size);
cudaMalloc(&d_array ,rows * cols * sizeof(int));
cudaMalloc(&d_buffer,rows*cols*sizeof(int));
cudaMemcpy(d_array, array, rows * cols * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_buffer, array, rows * cols * sizeof(int), cudaMemcpyHostToDevice);
for(int k = 0; k < 1000; k++){
step_periodic<<<grid_size, block_size>>>(d_array, d_buffer, rows, cols);
cudaMemcpy(d_buffer,d_array,rows*cols * sizeof(int), cudaMemcpyDeviceToDevice);
}
cudaMemcpy(array, d_array, rows * cols * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_array);
cudaFree(d_buffer);
return(0);
}
|
10,378 | #include <iostream>
#include <cmath>
#include <algorithm>
#include <fstream>
using namespace std;
#define MAXTHREAD 1024
#define DIM 1000
void print(float* matrix, int row_dim, int col_dim);
/* CUDA Variable */
float *input_device,
*result_device,
*temp_row,
*device_pivot;
int *temp_index, *pindex_device;
__constant__ int device_i[1];
__constant__ int device_j[1];
__global__ void normalize(float* input_device, float* result_device, float* device_pivot){
int k = blockIdx.x*MAXTHREAD + threadIdx.x;
int col_dim = DIM;
int d_i = device_i[0];
int d_j = device_j[0];
if(k<DIM)
{
result_device[d_i*col_dim+k] /= device_pivot[0];
input_device[d_i*col_dim+k] /= device_pivot[0];
}
}
__global__ void eliminate(float* input_device, float* result_device, float* device_pivot){
int k = blockIdx.x*MAXTHREAD + threadIdx.x;
int col_dim = DIM;
int d_i = device_i[0];
int d_j = device_j[0];
if(k<DIM)
{
input_device [d_i*col_dim+k] -= (device_pivot[0] * input_device [d_j*col_dim+k]);
result_device[d_i*col_dim+k] -= (device_pivot[0] * result_device[d_j*col_dim+k]);
}
}
__global__ void cuda_do_math(float* input_device, float* result_device, float* device_pivot){
int k = blockIdx.x*MAXTHREAD + threadIdx.x;
int col_dim = DIM;
int d_i = device_i[0];
int d_j = device_j[0];
if(k<DIM)
{
if(d_i==d_j)
{
device_pivot[0] = input_device[d_i*col_dim+d_j];
result_device[d_i*col_dim+k] /= device_pivot[0];
input_device[d_i*col_dim+k] /= device_pivot[0];
}else{
device_pivot[0] = input_device[d_i*col_dim+d_j]/input_device[d_j*col_dim+d_j];
input_device [d_i*col_dim+k] -= (device_pivot[0] * input_device [d_j*col_dim+k]);
result_device[d_i*col_dim+k] -= (device_pivot[0] * result_device[d_j*col_dim+k]);
}
}
}
__global__ void reduce_max(float* input_device, float* result_device, int* temp_index, int* pindex_device){
int k = blockIdx.x*MAXTHREAD + threadIdx.x;
int col_dim = DIM;
int range = DIM;
int d_j = device_j[0];
temp_index[k] = 0;
if(k>=d_j && k<col_dim)
{
temp_index[k] = k;
}
__syncthreads();
if(range%2 && k==0)
{
if( abs(input_device[ (temp_index[0]*DIM+d_j) ] )> abs(input_device[ (temp_index[range]*DIM+d_j) ]) )
{
temp_index[0] = temp_index[0];
}else{
temp_index[0] = temp_index[range];
}
}
__syncthreads();
while(range>1){
range /= 2;
if(k<range)
{
if( abs(input_device[ (temp_index[k]*DIM+d_j) ] )> abs(input_device[ (temp_index[k+range]*DIM+d_j) ]) )
{
temp_index[k] = temp_index[k];
}else{
temp_index[k] = temp_index[k+range];
}
}
//__syncthreads();
if(range%2 && k==0)
{
if( abs(input_device[ (temp_index[0]*DIM+d_j) ] )> abs(input_device[ (temp_index[range-1]*DIM+d_j) ]) )
{
temp_index[0] = temp_index[0];
}else{
temp_index[0] = temp_index[range-1];
}
}
__syncthreads();
}
if(k==0){
pindex_device[0] = temp_index[0];
}
}
__global__ void copy_row(float* input_device, float* result_device, float* temp_row){
int k = blockIdx.x*MAXTHREAD + threadIdx.x;
int col_dim = DIM;
int d_i = device_i[0];
int d_j = device_j[0];
float tmp = 0;
if(k<DIM)
{
temp_row[k] = input_device[d_j*col_dim+k];
input_device[d_j*col_dim+k] = input_device[d_i*col_dim+k];
input_device[d_i*col_dim+k] = temp_row[k];
temp_row[k] = result_device[d_j*col_dim+k];
result_device[d_j*col_dim+k] = result_device[d_i*col_dim+k];
result_device[d_i*col_dim+k] = temp_row[k];
}
}
/** matrix inverse */
void inv(float* input, int row_dim, int col_dim, float* output)
{
int size = sizeof(float)*row_dim*col_dim;
// check square matrix
if(col_dim == row_dim)
{
cudaMalloc(&temp_row, col_dim*sizeof( float ));
cudaMalloc(&temp_index, col_dim*sizeof( float ));
cudaMalloc(&device_pivot, sizeof( float ));
cudaMalloc(&pindex_device, sizeof( int ));
cudaMemcpy(input_device, input, size, cudaMemcpyHostToDevice);
cudaMemcpy(result_device, output, size, cudaMemcpyHostToDevice);
for(int j = 0;j < col_dim; j++)
{
//find max magnitude
int p = -1;
/*
float tmp = 0;
for(int i = j; i < row_dim; i++)
{
if(abs(input[i*col_dim+j]) > tmp)
{
tmp = abs(input[i*col_dim+j]);
p = i;
}
}
*/
cudaMemcpyToSymbol(device_j, &j, sizeof( int ));
reduce_max<<<col_dim/MAXTHREAD+1, MAXTHREAD>>>(input_device, result_device, temp_index, pindex_device);
if (cudaGetLastError() != cudaSuccess) {cout<< "error"<<endl;}
//int tt[1];
//tt[0]=-1;
//cudaMemcpyFromSymbol(&tt, device_pivot, sizeof( int ));
cudaMemcpy(&p, pindex_device, sizeof( int ), cudaMemcpyDeviceToHost);
//cout<<p<<"p "<<tt[0]<<endl;
// have zero row
if(p == -1)
{
cout << "it's singular";
return;
}
cudaMemcpyToSymbol(device_i, &p, sizeof( int )); //Actually is p
copy_row<<<col_dim/MAXTHREAD+1, MAXTHREAD>>>(input_device, result_device, temp_row);
//row operation
for (int i = 0; i < row_dim; i++)
{
cudaMemcpyToSymbol(device_i, &i, sizeof( int ));
if (cudaGetLastError() != cudaSuccess) {cout<< "error"<<endl;}
cuda_do_math<<<col_dim/MAXTHREAD+1, MAXTHREAD>>>(input_device, result_device, device_pivot);
if (cudaGetLastError() != cudaSuccess) {cout<< "error"<<endl;}
//print(output, row_dim, col_dim);
//cout << "----------------------\n";
}
}
cudaMemcpy(input, input_device, size, cudaMemcpyDeviceToHost);
cudaMemcpy(output, result_device, size, cudaMemcpyDeviceToHost);
cudaFree(temp_row);
cudaFree(temp_index);
cudaFree(pindex_device);
cudaFree(device_pivot);
}
else
{
cout << "it isn't sqare matrix";
return;
}
}
/** matrix print */
void print(float* matrix, int row_dim, int col_dim)
{
for(int i=0; i < row_dim; i++)
{
for(int j=0; j < row_dim; j++)
{
cout << matrix[i*col_dim+j]<<" ";
}
cout<<";"<<endl;
}
}
/** matrix save */
void fprint(float* matrix, int row_dim, int col_dim)
{
fstream file;
file.open("inMatrix.txt",ios::out);
for(int i=0; i < row_dim; i++)
{
for(int j=0; j < row_dim; j++)
{
file << matrix[i*col_dim+j]<<" ";
}
file<<""<<endl;
}
file.close();
}
int main ()
{
float* input;
float* result;
//random seed
srand(0);
//set dimention
int row_dim = DIM;
int col_dim = DIM;
/* CUDA */
int size = sizeof(float)*row_dim*col_dim;
//initial array
input = new float [size];
result = new float [size];
for(int i = 0; i < row_dim; i++)
{
for(int j = 0;j < col_dim; j++)
{
input[i*col_dim+j] = (float)(rand()%9);
result[i*col_dim+j] = (i == j)?1.0f:0.0f;
}
}
//fprint(input, row_dim, col_dim);
/* CUDA */
cudaMalloc(&input_device, size);
cudaMalloc(&result_device, size);
//check input
fprint(input, row_dim, col_dim);
cout << "----------------------\n";
//test inverse
inv(input, row_dim, col_dim, result);
//check result
//fprint(result, row_dim, col_dim);
//print(input, row_dim, col_dim);
/* CUDA */
cudaFree(input_device);
cudaFree(result_device);
delete input;
delete result;
return 0;
}
|
10,379 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
// 1gb = 1073741824 bytes; float = 4 bytes; => size = 268435456 size_t
const size_t size = 1024 * 1024 * 256;
const size_t count = 25;
int main()
{
// initialize clocks
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float* dev_tmp;
float* tmp = (float*)malloc( size* sizeof(float));
cudaMalloc((void**)&dev_tmp,size * sizeof(float));
cudaEventRecord(start, 0);
for (size_t iter = 0; iter < count; iter++) {
cudaMemcpy(dev_tmp, tmp, size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(tmp, dev_tmp, size * sizeof(float), cudaMemcpyDeviceToHost);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
float speed = 2.f * 1000 * 25 / elapsedTime; // GB per sec
std::cout << speed << " GB/sec";
cudaFree(dev_tmp);
free(tmp);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
10,380 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float* var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23) {
if (comp == var_2 + (var_3 * ceilf(-1.9789E3f))) {
if (comp <= (var_4 * floorf(-0.0f))) {
for (int i=0; i < var_1; ++i) {
comp = (var_6 * var_7 / (var_8 / var_9));
float tmp_1 = +1.9321E-36f;
var_5[i] = var_10 - (var_11 * -1.8622E-28f);
comp = var_5[i] / tmp_1 + +1.4103E-2f + var_12;
if (comp >= (-1.5870E34f / sinf(-1.3204E-42f + (+1.9652E-36f * var_13 / +1.1403E26f - var_14)))) {
float tmp_2 = var_15 - (+0.0f / -0.0f * -1.5701E-30f);
float tmp_3 = -0.0f;
float tmp_4 = -1.7256E-36f;
comp += tmp_4 - tmp_3 * tmp_2 / fmodf((+1.3540E35f * expf(cosf(+1.2175E34f))), var_16 * logf((var_17 / sinf(-1.6336E10f))));
}
if (comp <= var_18 * -1.6615E24f) {
float tmp_5 = (+1.1237E36f - atanf((var_19 - +1.5109E-1f / +1.0057E-44f)));
comp = tmp_5 - +1.4897E-37f * +0.0f / (var_20 + +1.6520E34f);
comp = var_21 + var_22;
comp = sqrtf((var_23 - -1.8748E-41f));
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float* tmp_6 = initPointer( atof(argv[6]) );
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24);
cudaDeviceSynchronize();
return 0;
}
|
10,381 | namespace Schelling {
const int neighbourhoodSize = 2;
}
__device__ inline bool isSimilarNeighbour(int* inFlatPosTab, const int inElementId, const int i, const int j, const int inTabSide) {
return inFlatPosTab[inElementId] != 0 && inFlatPosTab[ i * inTabSide + j] == inFlatPosTab[inElementId];
}
__global__ void movingKernel (int* inFlatPosTab, int* outMovingTab, const int inTabSide) {
int globalId = threadIdx.x + blockDim.x * blockIdx.x;
if (globalId < inTabSide * inTabSide) {
int neighboursCount;
for (int i = -Schelling::neighbourhoodSize; i <= Schelling::neighbourhoodSize; ++i) {
for (int j = -Schelling::neighbourhoodSize; j <= Schelling::neighbourhoodSize; ++j) {
if ( isSimilarNeighbour(inFlatPosTab, globalId, i, j, inTabSide) ) ++neighboursCount;
}
}
outMovingTab[globalId] = neighboursCount;
}
}
|
10,382 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
#include <cuda.h>
float *a, *b; // host data
float *c, *c2; // results
//GPU Vector Adding Function
__global__ void vecAdd(float *A,float *B,float *C, int N){
if(blockIdx.x*blockDim.x + threadIdx.x < N){
float value = A[blockIdx.x*blockDim.x + threadIdx.x] + B[blockIdx.x*blockDim.x + threadIdx.x];
C[blockIdx.x*blockDim.x + threadIdx.x] = value;
}
}
//CPU Vector Adding Function
void vecAdd_h(float *A1,float *B1, float *C1, int N){
for(int i=0;i<N;i++){
C1[i] = A1[i] + B1[i];
}
}
int main(int argc,char **argv){
printf("Begin \n");
//Declaring number of elements in the Vector
long int n=10000000 ; //100, 10000,1000000 and 10000000
long int nBytes = n*sizeof(float);
int block_size, block_no;
//Memory allocating for the vector arrays
a = (float *)malloc(nBytes);
b = (float *)malloc(nBytes);
c = (float *)malloc(nBytes);
c2 = (float *)malloc(nBytes);
//Memory pointers and other required parameters for GPU function
float *a_d,*b_d,*c_d;
block_size=1024;
block_no = (n/block_size) + 1;
dim3 dimBlock(block_size,1,1);
dim3 dimGrid(block_no,1,1);
//Assigning values to the created matrices
for(int i = 0; i < n; i++ ){
a[i] = sin(i)*sin(i);
b[i] = cos(i)*cos(i);
}
printf("Allocating device memory on host..\n");
cudaMalloc((void **)&a_d, n*sizeof(float));
cudaMalloc((void **)&b_d, n*sizeof(float));
cudaMalloc((void **)&c_d, n*sizeof(float));
printf("Copying to device..\n");
cudaMemcpy(a_d, a, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, n*sizeof(int), cudaMemcpyHostToDevice);
printf("Doing GPU Vector add\n");
clock_t start_d=clock();
vecAdd<<<dimGrid, dimBlock>>>(a_d, b_d, c_d, n);
cudaDeviceSynchronize();
clock_t end_d = clock();
cudaMemcpy(c, c_d, n*sizeof(float), cudaMemcpyDeviceToHost);
// printf("I m in GPU vector matrix\n");
// for (int i = 0; i <n;i++){
// printf("%f ", c[i]);
// }
printf("\nDoing CPU Vector add\n");
clock_t start_h = clock();
vecAdd_h(a,b,c2,n);
clock_t end_h = clock();
// printf("I m in cpu vector matrix\n");
// for (int i = 0; i <n; i++){
// printf("%f ", c2[i]);
// }
double time_d = (double)(end_d-start_d)/CLOCKS_PER_SEC;
double time_h = (double)(end_h-start_h)/CLOCKS_PER_SEC;
printf("Number of elements: %li GPU Time: %f CPU Time: %f\n",n, time_d, time_h);
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
return 0;
} |
10,383 | // n*n
// OpenMP
// CUDA
// n = 1024:8192 1024
// n
// OpenMP |
10,384 | #include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <cfloat>
#include <cstdio>
#define HANDLE_ERROR(func,message) if((func)!=cudaSuccess) { printf("%s \n",message); return; }
__global__
void findNearest(float* points,float* dists,unsigned int* idxs,int point_num)
{
int c_idx = threadIdx.x + blockIdx.x*blockDim.x;
if(c_idx>=point_num)
return;
float c_x=points[c_idx];
float c_y=points[c_idx+1];
float c_z=points[c_idx+2];
float min_dist=FLT_MAX;
unsigned int min_idx=0;
for(unsigned int s_idx=0;s_idx<point_num;s_idx++)
{
if(s_idx==c_idx) continue;
float s_x=points[s_idx];
float s_y=points[s_idx+1];
float s_z=points[s_idx+2];
float dist=(c_x-s_x)*(c_x-s_x)+(c_y-s_y)*(c_y-s_y)+(c_z-s_z)*(c_z-s_z);
if(dist<min_dist)
{
min_dist=dist;
min_idx=s_idx;
}
}
dists[c_idx]=sqrt(min_dist);
idxs[c_idx]=min_idx;
}
void findNearestPoints(
float* points,
unsigned int point_num,
float* dists,
unsigned int* idxs
)
{
int block_num=point_num/1024;
if(point_num%1024>0) block_num++;
float* d_points;
HANDLE_ERROR(cudaMalloc((void**)&d_points, point_num * 3 * sizeof(float)),"allocate error")
HANDLE_ERROR(cudaMemcpy(d_points, points, point_num * 3 * sizeof(float), cudaMemcpyHostToDevice),"points copy error")
float* d_dists;
HANDLE_ERROR(cudaMalloc((void**)&d_dists, point_num * sizeof(float)),"allocate error")
unsigned int* d_idxs;
HANDLE_ERROR(cudaMalloc((void**)&d_idxs, point_num * sizeof(unsigned int)),"allocate error")
findNearest<<<block_num,1024>>>(d_points,d_dists,d_idxs,point_num);
HANDLE_ERROR(cudaMemcpy(dists, d_dists, point_num * sizeof(float), cudaMemcpyDeviceToHost),"dists copy error")
HANDLE_ERROR(cudaMemcpy(idxs, d_idxs, point_num * sizeof(unsigned int), cudaMemcpyDeviceToHost),"idxs copy error")
} |
10,385 | __global__
void multiply(int n, float *x, float *y)
{
for (int i = 0; i < n; i++)
y[i] = x[i] * y[i];
}
|
10,386 | //#include "device_math.cuh"
//#include "slamtypes.h"
//#include <vector>
//#include <cutil.h>
//#include "phdfilter.h"
//#include "rng.h"
//#include "disparity.h"
|
10,387 | #include<stdio.h>
#include<cuda_runtime.h>
#include<math.h>
#define CHANNELS 3
#define BLUR_SIZE 3
#define SIZE 12
#define PI 3.14
float h_image[SIZE*SIZE*CHANNELS],h_filter[BLUR_SIZE*BLUR_SIZE],h_blurredimage[SIZE*SIZE*CHANNELS];
__global__ void gaussian_blur(float *d_image,float *d_blurredimage,float* d_filter)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int px = bx*blockDim.x+tx;
int py = by*blockDim.y+ty;
float c;
int fx,fy,ch,imgx,imgy;
for(ch=0;ch<CHANNELS;ch++)
{
c=0;
for(fx=0;fx<BLUR_SIZE;fx++)
{
for(fy=0;fy<BLUR_SIZE;fy++)
{
imgx = px + fx - BLUR_SIZE/2;
imgy = py + fy - BLUR_SIZE/2;
imgx = min(max(imgx,0),SIZE-1);
imgy = min(max(imgy,0),SIZE-1);
c+=d_filter[fy*BLUR_SIZE+fx]*d_image[imgy*SIZE*CHANNELS+imgx*CHANNELS+ch];
}
}
d_blurredimage[py*SIZE*CHANNELS+px*CHANNELS+ch] = c;
}
}
void blur(float *d_image,float *d_blurredimage,float *d_filter)
{
dim3 dimBlock(4,4,1);
dim3 dimGrid(SIZE/dimBlock.x,SIZE/dimBlock.y);
gaussian_blur<<<dimGrid,dimBlock>>>(d_image,d_blurredimage,d_filter);
}
void initialise_image()
{
int i,j,k;
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
{
for(k=0;k<CHANNELS;k++)
h_image[i*SIZE*CHANNELS + j*CHANNELS + k] =rand()%256;
}
}
}
void initialise_filter()
{
int i,j;
double sum = 0.0,r,s=1/(2*PI);
for(i=-BLUR_SIZE/2;i<=BLUR_SIZE/2;i++)
{
for(j=-BLUR_SIZE/2;j<=BLUR_SIZE/2;j++)
{
r = exp(-(i*i+j*j))/2;
h_filter[(i+2)*BLUR_SIZE+(j+2)] = r*s;
sum +=h_filter[(i+2)*BLUR_SIZE+(j+2)];
}
}
for(i=0;i<BLUR_SIZE;i++)
{
for(j=0;j<BLUR_SIZE;j++)
h_filter[i*BLUR_SIZE+j]/=sum;
}
}
int main(int argc, char *argv[])
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if(!deviceCount){
fprintf(stderr,"No devices supporting cuda\n");
exit(EXIT_FAILURE);
}
int deviceId = 0;
cudaSetDevice(deviceId);
printf("Initialising\n");
initialise_image();
initialise_filter();
float *d_image,*d_blurredimage,*d_filter;
const int ARRAY_BYTES = SIZE*SIZE*CHANNELS*sizeof(float);
const int FILTER_BYTES = BLUR_SIZE*BLUR_SIZE*sizeof(float);
cudaMalloc((void**)&d_image,ARRAY_BYTES);
cudaMalloc((void**)&d_blurredimage,ARRAY_BYTES);
cudaMalloc((void**)&d_filter,FILTER_BYTES);
cudaMemcpy(d_image,h_image,ARRAY_BYTES,cudaMemcpyHostToDevice);
cudaMemcpy(d_filter,h_filter,FILTER_BYTES,cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
blur(d_image,d_blurredimage,d_filter);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,start,stop);
cudaMemcpy(h_blurredimage,d_blurredimage,ARRAY_BYTES,cudaMemcpyDeviceToHost);
printf("Elapsed time is %f\n",elapsedTime);
cudaFree(d_image);
cudaFree(d_blurredimage);
return 0;
} |
10,388 | /*
============================================================================
Name : backpropagate.cu
Author : Christophoros Bekos (mpekchri@auth.gr)
Version :
Copyright : @ copyright notice
Description : CUDA compute reciprocals
============================================================================
*/
#include <iostream>
#include <numeric>
#include <stdlib.h>
#include <stdio.h>
#define work_per_block 100
#define threads_per_warp 32
#define threads_per_warp 32
__device__ void sigmoid(float& z) {
z = 1.0 / (1.0 + exp(-(z)));
}
__device__ void hadamard_product_small(float* sh_a, float* sh_b, float* sh_res,
int multiplier, int size, int mult) {
int thread_id = threadIdx.y * blockDim.x + threadIdx.x;
int block_id = blockIdx.x;
// start the computations
int cnt = 0;
for (int i = thread_id * multiplier;
i < thread_id * multiplier + multiplier; i++) {
sh_res[i * mult] = sh_b[i] * sh_a[i] * ((int) (i < size));
cnt++;
}
// result is stored in sh_b vector\
//done
}
__device__ void array_sum_small(float* sha, float& result, int size,
int start) {
int thread_id = threadIdx.y * blockDim.x + threadIdx.x;
// start the computations
for (int i = threads_per_warp; i < work_per_block; i = i * 2) {
// switch 1 : even warps add their's neighbors contents
switch ((int) floor(thread_id / (double) i) % 2) {
case 0:
// thread_id % i == even
// add the "more next vector"
sha[thread_id] = sha[thread_id]
+ sha[i + thread_id]
* ((int) (start + thread_id + i < size));
break;
default:
// thread_id % i == odd
// do nothing
break;
}
__syncthreads();
// switch2 : odd warps clean up their content
switch ((int) floor(thread_id / (double) i) % 2) {
case 0:
// thread_id % i == even
// do nothing
break;
default:
// thread_id % i == odd
// clean up
sha[thread_id] = 0;
//__syncthreads();
break;
}
__syncthreads();
}
// loop ended, sha[0:threads_per_warp] got the sum
if (thread_id == 0) {
for (int i = 0; i < threads_per_warp; i++) {
result = result + sha[i];
sha[i] = 0;
}
}
}
__device__ void backpropagate_some_cols(float* result, int rows_per_block,
int col_length, float* matrix, float* vector, int last_block, int size,
float* sigm_der) {
// README :
// each block uses rows threads
// each block modifies rows columns ( cols columns per block)
// each thread modifies one column , column's length is col_length
// cols : number of columns that this block will modify
// one last block has less job to do, this one takes parameter last_block == 1
// and size (after index exceeds size in last block, no computation must be made)
int thread_id = threadIdx.y * blockDim.x + threadIdx.x;
int block_id = blockIdx.x;
extern __shared__ float shared[];
float* temp = shared;
float* m = &temp[rows_per_block];
float* v = &m[col_length * rows_per_block];
float* res = &v[col_length * rows_per_block];
// move data in shared memory
for (int i = thread_id * col_length;
i < thread_id * col_length + col_length; i++) {
m[i] = matrix[i];
}
v[thread_id] = 0;
v[thread_id] = vector[thread_id] * (thread_id < col_length);
__syncthreads();
int cnt = 0;
for (int i = thread_id * col_length;
i < thread_id * col_length + col_length; i++) {
m[i] = m[i] * v[cnt];
cnt++;
}
__syncthreads();
temp[thread_id] = 0;
for (int i = thread_id * col_length;
i < thread_id * col_length + col_length; i++) {
temp[thread_id] += m[i];
}
__syncthreads();
result[thread_id] = temp[thread_id] * sigm_der[thread_id];
}
__global__ void backpropagate(float* result, int rows_per_block, int col_length,
float* matrix, float* vector, int last_block, int size,
float* sigm_der) {
int block_id = blockIdx.y * gridDim.x + blockIdx.x;
int thread_id = threadIdx.y * blockDim.x + threadIdx.x;
backpropagate_some_cols(&result[block_id * rows_per_block], rows_per_block,
col_length, &matrix[block_id * rows_per_block], vector,
(block_id == last_block), size,
&sigm_der[block_id * rows_per_block]);
}
void initialize(float *data, unsigned size, float arg) {
for (unsigned i = 0; i < size; ++i) {
data[i] = arg;
}
}
void cpu_backpropagate(float* d_L, int rows, int cols, float** d_new,
float* sigm_der, float* w);
int main(void) {
int rows = 783;
int cols = 30;
float *w = new float[rows * cols];
float *d_old = new float[cols];
float *delta = new float[rows];
float *delta_gpu = new float[rows];
float* sigm_der = new float[rows];
float *m, *v, *new_delta, *sigm_der_gpu;
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
w[i * cols + j] = 1.2;
}
}
initialize(d_old, cols, 1.5);
initialize(sigm_der, rows, 1.6);
cudaMalloc((void**) &m, sizeof(float) * (rows * cols));
cudaMalloc((void**) &v, sizeof(float) * cols);
cudaMalloc((void**) &new_delta, sizeof(float) * rows);
cudaMalloc((void**) &sigm_der_gpu, sizeof(float) * rows);
cudaMemcpy(m, w, sizeof(float) * (rows * cols), cudaMemcpyHostToDevice);
cudaMemcpy(v, d_old, sizeof(float) * cols, cudaMemcpyHostToDevice);
cudaMemcpy(sigm_der_gpu, sigm_der, sizeof(float) * rows,
cudaMemcpyHostToDevice);
int numofthreads = work_per_block;
int rows_per_block = numofthreads;
int col_length = cols;
int last_block = floor(rows / work_per_block);
float cache = 11000 * sizeof(float);
int num_of_blocks = floor(rows / work_per_block) + 1;
int size_for_last_block = rows
- floor(rows / work_per_block) * numofthreads;
// printf("aaaa %d \n", num_of_blocks);
// BACKPROPAGATE FOR 1 ITERATION
// IN GPU
//printf("sadfa %d ",size_for_last_block);
backpropagate<<<num_of_blocks, rows_per_block, cache>>>(new_delta,
rows_per_block, col_length, m, v, last_block, size_for_last_block,
sigm_der_gpu);
cudaDeviceSynchronize();
cudaMemcpy(delta_gpu, new_delta, sizeof(float) * rows,
cudaMemcpyDeviceToHost);
// IN CPU
cpu_backpropagate(d_old, rows, cols, &delta, sigm_der, w);
// COMPARE RESULTS
int success = 1;
for (int i = 0; i < rows; i++) {
// printf("kappa %f \n", delta[i]);
if (delta[i] != delta_gpu[i]) {
printf("ERROR in a, cpu = %f, gpu = %f\n", delta[i], delta_gpu[i]);
success = 0;
}
}
/* Free memory */
cudaFree(new_delta);
cudaFree(m);
cudaFree(v);
if (success) {
printf("SUCCESS \n");
}
return 0;
}
float* hadamard_product(int size, float* a, float* b) {
// returns the datamard product for vectors a and b
// (return a.*b in matlab)
// size = length of arrays a and b
float* result = new float[size];
for (int i = 0; i < size; i++) {
result[i] = a[i] * b[i];
}
return result;
}
float* mull_backpropagate(int rows, int cols, float* matrix, float* vector) {
// TESTED
// returns "rows x 1" vector
float* temp = NULL;
float* res = new float[rows];
for (int j = 0; j < rows; j++) {
temp = hadamard_product(cols, &matrix[j * cols], vector);
res[j] = 0;
for (int i = 0; i < cols; i++) {
res[j] += temp[i];
}
delete[] temp;
}
return res;
}
void cpu_backpropagate(float* d_L, int rows, int cols, float** d_new,
float* sigm_der, float* w) {
float* w_d;
w_d = mull_backpropagate(rows, cols, w, d_L);
d_new[0] = hadamard_product(rows, w_d, sigm_der);
delete[] w_d;
}
|
10,389 | //pass
//--blockDim=[32,1] --gridDim=[1,1]
#include <cuda.h>
#define NUM 32
__global__ void BitonicKernel(int * values)
{
__shared__ int shared[NUM];
unsigned int tid = threadIdx.x;
// Copy input to shared mem.
shared[tid] = values[tid];
#ifdef MUTATION
if (threadIdx.x == 0) {
#endif
__syncthreads();
#ifdef MUTATION
/* BUGINJECT: NON_UNIFORM_CONTROL_FLOW, UP */
}
#endif
// Parallel bitonic sort.
for (unsigned int k = 2;
k <= NUM; k *= 2)
{
// Bitonic merge:
for (unsigned int j = k / 2;
j>0; j /= 2)
{
unsigned int ixj = tid ^ j;
if (ixj > tid)
{
if ((tid & k) == 0)
{
if (shared[tid] > shared[ixj])
{
unsigned int tmp = shared[tid];
shared[tid] = shared[ixj];
shared[ixj] = shared[tid];
}
}
else
{
if (shared[tid] < shared[ixj])
{
unsigned int tmp = shared[tid];
shared[tid] = shared[ixj];
shared[ixj] = shared[tid];
}
}
}
__syncthreads();
}
}
// Write result.
values[tid] = shared[tid];
}
|
10,390 | /*
* HxUpdater.cpp
*
* Created on: 25 янв. 2016 г.
* Author: aleksandr
*/
#include "HxUpdater.h"
#include "SmartIndex.h"
// indx - индекс вдоль правой или левой границы по y от firstX до lastX
__host__ __device__
void HxUpdater::operator() (const int indx) {
// correct Hx along the bottom
// nn = firstY - 1;
// for (mm = firstX; mm <= lastX; mm++)
// Hx(mm, nn) += Chxe(mm, nn) * Ez1G(g1, mm);
//
// // correct Hx along the top
// nn = lastY;
// for (mm = firstX; mm <= lastX; mm++)
// Hx(mm, nn) -= Chxe(mm, nn) * Ez1G(g1, mm);
float Chxe = S/377.0;
Hx(indx, firstY - 1) = Hx(indx, firstY - 1) + Chxe * Ez1D[indx];
Hx(indx, lastY) = Hx(indx, lastY) - Chxe * Ez1D[indx];
}
|
10,391 | #include "includes.h"
const int Nthreads = 1024, NrankMax = 3, nt0max = 71, NchanMax = 1024;
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void getwtw(const double *Params, const double *dWU, double *wtw){
int nt0, tidx, tidy, bid, Nchan,k;
double x;
nt0 = (int) Params[4];
Nchan = (int) Params[9];
tidx = threadIdx.x;
tidy = threadIdx.y;
bid = blockIdx.x;
while (tidy<nt0){
x = 0.0f;
for (k=0; k<Nchan; k++)
x += dWU[tidx + k*nt0 + bid * Nchan*nt0] *
dWU[tidy + k*nt0 + bid * Nchan*nt0];
wtw[tidx + tidy*nt0 + bid * nt0*nt0] = x;
tidy+=blockDim.y;
}
} |
10,392 | // Tests executing two kernels, with host code between kernel launches.
#include <cstdio>
#define N 100
__global__ void kernel1(int* in, int* out)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if(idx < N)
out[idx] = in[idx] + 1;
}
__global__ void kernel2(int*in, int*out)
{
int idx = threadIdx.x + blockDim.x*blockIdx.x;
if(idx < N)
out[idx] = in[idx]*in[idx];
}
int main()
{
int* in = (int*) malloc(N*sizeof(int));
for(int i = 0; i < N; i++)
in[i] = i;
int* din;
int* dout;
cudaMalloc((void**)&din, N*sizeof(int));
cudaMalloc((void**)&dout, N*sizeof(int));
cudaMemcpy(din, in, N*sizeof(int), cudaMemcpyHostToDevice);
kernel1<<<1,N>>>(din, dout);
cudaMemcpy(in, dout, N*sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < N; i++)
in[i]--;
kernel2<<<1,N>>>(din, dout);
cudaMemcpy(in, dout, N*sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 1; i < N; i++)
{
in[i] = in[i]/i;
printf("%d ", in[i]);
}
printf("\n");
} |
10,393 | #include "includes.h"
__global__ void reduce_normal_eqs_64_mult_constr_GPU(float *d_C_reduced, const float *d_C, int gridDim_x_normal_equations, int n_constraints) {
// check if there are constraints left to be processed
int constraint_ind = blockIdx.x * 4 + threadIdx.y;
if (constraint_ind < n_constraints) {
int tid = 64 * threadIdx.y + threadIdx.x;
// put data in shared memory
int ind = blockIdx.y * n_constraints * gridDim_x_normal_equations * 64 +
constraint_ind * gridDim_x_normal_equations * 64 + threadIdx.x;
__shared__ float DATA[64 * 4];
// load and sum the first gridDim_x_normal_equations elements
float tmp = 0.0f;
for (int i = 0; i < gridDim_x_normal_equations; i++)
tmp += d_C[ind + i * 64];
DATA[tid] = tmp;
__syncthreads(); // ensure reading stage has finished
if ((tid - 64 * threadIdx.y) < 32) { // warp-reduce
DATA[tid] += DATA[tid + 32];
__syncthreads();
DATA[tid] += DATA[tid + 16];
__syncthreads();
DATA[tid] += DATA[tid + 8];
__syncthreads();
DATA[tid] += DATA[tid + 4];
__syncthreads();
DATA[tid] += DATA[tid + 2];
__syncthreads();
DATA[tid] += DATA[tid + 1];
__syncthreads();
}
// write results
if (threadIdx.x == 0)
d_C_reduced[blockIdx.y * n_constraints + constraint_ind] = DATA[tid];
}
} |
10,394 | #include "includes.h"
__global__ void scale(float knot_max, int nx, int nsamples, float * x, int pitch_x)
{
int
col_idx = blockDim.x * blockIdx.x + threadIdx.x;
if(col_idx >= nx) return;
float
min, max,
* col = x + col_idx * pitch_x;
// find the min and the max
min = max = col[0];
for(int i = 1; i < nsamples; i++) {
if(col[i] < min) min = col[i];
if(col[i] > max) max = col[i];
}
float delta = max - min;
for(int i = 0; i < nsamples; i++)
col[i] = (knot_max * (col[i] - min)) / delta;
} |
10,395 | #include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#define SIZE 2048
#define THREADS_PER_BLOCK 512
// A Simple timer for measuring the walltime
double seconds(){
struct timeval tmp;
double sec;
gettimeofday( &tmp, (struct timezone *)0 );
sec = tmp.tv_sec + ((double)tmp.tv_usec)/1000000.0;
return sec;
}
// Multithreaded naive version of Matrix GEMM
void matmul_cpu( double *A, double *B, double *C ){
int i, j, k;
double loc_sum;
#pragma omp parallel for private( i, j, k, loc_sum )
for(i = 0; i < SIZE; i++ ){
for( j = 0; j < SIZE; j++ ){
loc_sum = 0.0;
for( k = 0; k < SIZE; k++ ){
loc_sum += A[ (SIZE * i) + k ] * B[ (SIZE * k) + j ];
}
C[ (SIZE * i) + j ] = loc_sum;
}
}
}
// GPU naive version of Matrix GEMM using shared memory
__global__ void CUDA_matmul_shared( double *A, double *B, double *C ){
// Implement e version with shared memory
}
// GPU naive version of Matrix GEMM
__global__ void CUDA_matmul( double *A, double *B, double *C ){
int i, j, k;
double loc_sum;
i = blockIdx.x;
j = threadIdx.x;
if( i < SIZE ){
while( j < SIZE){
loc_sum = 0.0;
for( k = 0; k < SIZE; k++ ) loc_sum += A[ (SIZE * i) + k] * B[ ( SIZE * k ) + j];
C[ (SIZE * i) + j ] = loc_sum;
j += blockDim.x;
}
}
}
/* do matrix multiplication on the GPU here */
void matmul_gpu(double *A, double *B, double *C ){
size_t size_in_bytes;
cudaError_t err;
double *d_A, *d_B, *d_C;
size_in_bytes = sizeof(double) * SIZE * SIZE;
cudaMalloc( (void **) &d_A, size_in_bytes);
cudaMemcpy( d_A, A, size_in_bytes, cudaMemcpyHostToDevice );
cudaMalloc( (void **) &d_B, size_in_bytes);
cudaMemcpy( d_B, B, size_in_bytes, cudaMemcpyHostToDevice );
cudaMalloc((void **) &d_C, size_in_bytes);
#ifdef __SHARED
CUDA_matmul_shared<<< SIZE, THREADS_PER_BLOCK >>>( d_A, d_B, d_C );
#else
CUDA_matmul<<< SIZE, THREADS_PER_BLOCK >>>( d_A, d_B, d_C );
#endif
/* check if the kernel launch was successful */
err = cudaGetLastError();
if( err != cudaSuccess ){
printf( "failed to lauch GPU kernel:\n%s\n", cudaGetErrorString(err) );
return;
}
cudaMemcpy( C, d_C, size_in_bytes, cudaMemcpyDeviceToHost );
cudaFree( d_A );
cudaFree( d_B );
cudaFree( d_C );
cudaDeviceReset();
}
int main(int argc, char **argv){
int flag = 0;
double time, time_cpu, time_gpu;
double *A, *B, *C, *C_GPU;
size_t size_in_bytes;
int i = 0;
size_in_bytes = SIZE* SIZE * sizeof(double);
A = (double *) malloc( size_in_bytes );
B = (double *) malloc( size_in_bytes );
C = (double *) malloc( size_in_bytes );
C_GPU = (double *) malloc( size_in_bytes );
/* fill matrix A & B on cpu */
#pragma omp parallel for private( i )
for( i = 0; i < SIZE * SIZE; i++ ){
A[i] = rand() / (double) RAND_MAX;
B[i] = rand() / (double) RAND_MAX;
}
time = seconds();
matmul_cpu( A, B , C );
time_cpu = seconds() - time;
time = seconds();
matmul_gpu( A, B, C_GPU );
time_gpu = seconds() - time;
/* check result */
#ifdef __DEBUG
#pragma omp parallel for private( i ) reduction( +:flag )
for( i = 0; i < SIZE * SIZE; i++ )
if( fabs( ( C[i] - C_GPU[i] ) / C[i] ) > 0.000001){
fprintf( stdout, "\nC[%d] = %.3g\t%.3g", i, C[i], C_GPU[i] );
flag += 1;
}
#endif
if( !flag ){
fprintf( stdout, "Program completed successfully!" );
fprintf( stdout, "Time for CPU code: %g seconds\n", time_cpu );
fprintf( stdout, "Time for GPU code: %g seconds\n", time_gpu );
}
else{
fprintf( stdout, "Program completed unsuccessfully!" );
}
return 0;
}
|
10,396 | #include <cuda_runtime_api.h>
#include <stdint.h>
__device__ float tex2d_clamp_uvc(const float *pixels, int width, int height, int u, int v, int c) {
int clamp_u = min(max(0, u), width-1);
int clamp_v = min(max(0, v), height-1);
//return pixels[clamp_u + clamp_v * width + c * width * height];
return pixels[clamp_u + width * (clamp_v + height * c)];
}
__device__ float tex2d_clamp_cuv(const float *pixels, int channels, int width, int height, int c, int u, int v) {
int clamp_u = min(max(0, u), width-1);
int clamp_v = min(max(0, v), height-1);
return pixels[c + channels * (clamp_u + width * clamp_v)];
}
__device__ float lerp_filter(float a, float b, float t)
{
return a + t * (a - b);
}
__device__ float bicubic_w0(float a) {
return (1.0f/6.0f)*(a*(a*(-a + 3.0f) - 3.0f) + 1.0f);
}
__device__ float bicubic_w1(float a) {
return (1.0f/6.0f)*(a*a*(3.0f*a - 6.0f) + 4.0f);
}
__device__ float bicubic_w2(float a) {
return (1.0f/6.0f)*(a*(a*(-3.0f*a + 3.0f) + 3.0f) + 1.0f);
}
__device__ float bicubic_w3(float a) {
return (1.0f/6.0f)*(a*a*a);
}
__device__ float catrom_w0(float a) {
//return -0.5f*a + a*a - 0.5f*a*a*a;
return a*(-0.5f + a*(1.0f - 0.5f*a));
}
__device__ float catrom_w1(float a) {
//return 1.0f - 2.5f*a*a + 1.5f*a*a*a;
return 1.0f + a*a*(-2.5f + 1.5f*a);
}
__device__ float catrom_w2(float a) {
//return 0.5f*a + 2.0f*a*a - 1.5f*a*a*a;
return a*(0.5f + a*(2.0f - 1.5f*a));
}
__device__ float catrom_w3(float a) {
//return -0.5f*a*a + 0.5f*a*a*a;
return a*a*(-0.5f + 0.5f*a);
}
/*__device__ float mitchell_w0(float a) {
float b = absf(a);
return
(b < 1.0f) * () +
(b >= 1.0f) * (b < 2.0f) * ();
}*/
__device__ float interpolate_bicubic_filter(
float x,
float a0,
float a1,
float a2,
float a3)
{
float r = a0 * bicubic_w0(x);
r += a1 * bicubic_w1(x);
r += a2 * bicubic_w2(x);
r += a3 * bicubic_w3(x);
return r;
}
__device__ float interpolate_bicubic_interpolate(
const float *pixels,
int width,
int height,
float u,
float v,
int c)
{
u -= 0.5f;
v -= 0.5f;
float px = floorf(u);
float py = floorf(v);
float fx = u - px;
float fy = v - py;
int ipx = (int)px;
int ipy = (int)py;
return interpolate_bicubic_filter(fy,
interpolate_bicubic_filter(fx,
tex2d_clamp_uvc(pixels, width, height, ipx-1, ipy-1, c),
tex2d_clamp_uvc(pixels, width, height, ipx, ipy-1, c),
tex2d_clamp_uvc(pixels, width, height, ipx+1, ipy-1, c),
tex2d_clamp_uvc(pixels, width, height, ipx+2, ipy-1, c)),
interpolate_bicubic_filter(fx,
tex2d_clamp_uvc(pixels, width, height, ipx-1, ipy, c),
tex2d_clamp_uvc(pixels, width, height, ipx, ipy, c),
tex2d_clamp_uvc(pixels, width, height, ipx+1, ipy, c),
tex2d_clamp_uvc(pixels, width, height, ipx+2, ipy, c)),
interpolate_bicubic_filter(fx,
tex2d_clamp_uvc(pixels, width, height, ipx-1, ipy+1, c),
tex2d_clamp_uvc(pixels, width, height, ipx, ipy+1, c),
tex2d_clamp_uvc(pixels, width, height, ipx+1, ipy+1, c),
tex2d_clamp_uvc(pixels, width, height, ipx+2, ipy+1, c)),
interpolate_bicubic_filter(fx,
tex2d_clamp_uvc(pixels, width, height, ipx-1, ipy+2, c),
tex2d_clamp_uvc(pixels, width, height, ipx, ipy+2, c),
tex2d_clamp_uvc(pixels, width, height, ipx+1, ipy+2, c),
tex2d_clamp_uvc(pixels, width, height, ipx+2, ipy+2, c)));
}
__global__ void interpolate_bicubic_kernel(
const float *in_pixels,
int in_width,
int in_height,
int channels,
float *out_pixels,
int out_width,
int out_height)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int x = idx % out_width;
int y = (idx / out_width) % out_height;
int c = idx / (out_width * out_height);
if ((x < out_width) && (y < out_height) && (c < channels)) {
float u = ((float)x) / ((float)out_width) * ((float)in_width);
float v = ((float)y) / ((float)out_height) * ((float)in_height);
float interp_value = interpolate_bicubic_interpolate(in_pixels, in_width, in_height, u, v, c);
out_pixels[x + y * out_width + c * out_width * out_height] = interp_value;
}
}
extern "C" void neuralops_cuda_interpolate2d_bicubic(
const float *in_pixels,
size_t in_width,
size_t in_height,
size_t channels,
float *out_pixels,
size_t out_width,
size_t out_height,
cudaStream_t stream)
{
int n = out_width * out_height * channels;
interpolate_bicubic_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
in_pixels,
in_width,
in_height,
channels,
out_pixels,
out_width,
out_height);
}
__device__ float catmullrom_filter(
float x,
float a0,
float a1,
float a2,
float a3)
{
float r = a0 * catrom_w0(x);
r += a1 * catrom_w1(x);
r += a2 * catrom_w2(x);
r += a3 * catrom_w3(x);
return r;
}
__device__ float catmullrom_filter2d(
const float *pixels,
int width,
int height,
float u,
float v,
int c)
{
u -= 0.5f;
v -= 0.5f;
float px = floorf(u);
float py = floorf(v);
float fx = u - px;
float fy = v - py;
int ipx = (int)px;
int ipy = (int)py;
return catmullrom_filter(fy,
catmullrom_filter(fx,
tex2d_clamp_uvc(pixels, width, height, ipx-1, ipy-1, c),
tex2d_clamp_uvc(pixels, width, height, ipx, ipy-1, c),
tex2d_clamp_uvc(pixels, width, height, ipx+1, ipy-1, c),
tex2d_clamp_uvc(pixels, width, height, ipx+2, ipy-1, c)),
catmullrom_filter(fx,
tex2d_clamp_uvc(pixels, width, height, ipx-1, ipy, c),
tex2d_clamp_uvc(pixels, width, height, ipx, ipy, c),
tex2d_clamp_uvc(pixels, width, height, ipx+1, ipy, c),
tex2d_clamp_uvc(pixels, width, height, ipx+2, ipy, c)),
catmullrom_filter(fx,
tex2d_clamp_uvc(pixels, width, height, ipx-1, ipy+1, c),
tex2d_clamp_uvc(pixels, width, height, ipx, ipy+1, c),
tex2d_clamp_uvc(pixels, width, height, ipx+1, ipy+1, c),
tex2d_clamp_uvc(pixels, width, height, ipx+2, ipy+1, c)),
catmullrom_filter(fx,
tex2d_clamp_uvc(pixels, width, height, ipx-1, ipy+2, c),
tex2d_clamp_uvc(pixels, width, height, ipx, ipy+2, c),
tex2d_clamp_uvc(pixels, width, height, ipx+1, ipy+2, c),
tex2d_clamp_uvc(pixels, width, height, ipx+2, ipy+2, c)));
}
__global__ void catmullrom_kernel(
const float *in_pixels,
int in_width,
int in_height,
int channels,
float *out_pixels,
int out_width,
int out_height)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int x = idx % out_width;
int y = (idx / out_width) % out_height;
int c = idx / (out_width * out_height);
if ((x < out_width) && (y < out_height) && (c < channels)) {
float u = ((float)x) / ((float)out_width) * ((float)in_width);
float v = ((float)y) / ((float)out_height) * ((float)in_height);
float interp_value = catmullrom_filter2d(in_pixels, in_width, in_height, u, v, c);
out_pixels[x + y * out_width + c * out_width * out_height] = interp_value;
}
}
extern "C" void neuralops_cuda_interpolate2d_catmullrom(
const float *in_pixels,
size_t in_width,
size_t in_height,
size_t channels,
float *out_pixels,
size_t out_width,
size_t out_height,
cudaStream_t stream)
{
int n = out_width * out_height * channels;
catmullrom_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
in_pixels,
in_width,
in_height,
channels,
out_pixels,
out_width,
out_height);
}
__device__ float interpolate_2x2_bilinear_interpolate(
const float *pixels,
int width,
int height,
float u,
float v,
int c)
{
u -= 0.5f;
v -= 0.5f;
float px = floorf(u);
float py = floorf(v);
int ipx = (int)px;
int ipy = (int)py;
return 0.25 * (
tex2d_clamp_uvc(pixels, width, height, ipx, ipy, c) +
tex2d_clamp_uvc(pixels, width, height, ipx+1, ipy, c) +
tex2d_clamp_uvc(pixels, width, height, ipx, ipy+1, c) +
tex2d_clamp_uvc(pixels, width, height, ipx+1, ipy+1, c));
}
__global__ void interpolate_2x2_bilinear_kernel(
const float *in_pixels,
int in_width,
int in_height,
int channels,
float *out_pixels,
int out_width,
int out_height)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int x = idx % out_width;
int y = (idx / out_width) % out_height;
int c = idx / (out_width * out_height);
if ((x < out_width) && (y < out_height) && (c < channels)) {
float u = ((float)x) / ((float)out_width) * ((float)in_width);
float v = ((float)y) / ((float)out_height) * ((float)in_height);
float interp_value = interpolate_2x2_bilinear_interpolate(in_pixels, in_width, in_height, u, v, c);
out_pixels[x + y * out_width + c * out_width * out_height] = interp_value;
}
}
extern "C" void neuralops_cuda_interpolate2d_2x2_bilinear(
const float *in_pixels,
size_t in_width,
size_t in_height,
size_t channels,
float *out_pixels,
size_t out_width,
size_t out_height,
cudaStream_t stream)
{
int n = out_width * out_height * channels;
interpolate_2x2_bilinear_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
in_pixels,
in_width,
in_height,
channels,
out_pixels,
out_width,
out_height);
}
__device__ float interpolate_bilinear_interpolate(
const float *pixels,
int width,
int height,
float u,
float v,
int c)
{
u -= 0.5f;
v -= 0.5f;
float px = floorf(u);
float py = floorf(v);
float fx = u - px;
float fy = v - py;
int ipx = (int)px;
int ipy = (int)py;
return lerp_filter(
lerp_filter(
tex2d_clamp_uvc(pixels, width, height, ipx, ipy, c),
tex2d_clamp_uvc(pixels, width, height, ipx+1, ipy, c),
fx),
lerp_filter(
tex2d_clamp_uvc(pixels, width, height, ipx, ipy+1, c),
tex2d_clamp_uvc(pixels, width, height, ipx+1, ipy+1, c),
fx),
fy);
}
__global__ void interpolate_bilinear_kernel(
const float *in_pixels,
int in_width,
int in_height,
int channels,
float *out_pixels,
int out_width,
int out_height)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int x = idx % out_width;
int y = (idx / out_width) % out_height;
int c = idx / (out_width * out_height);
if ((x < out_width) && (y < out_height) && (c < channels)) {
float u = ((float)x) / ((float)out_width) * ((float)in_width);
float v = ((float)y) / ((float)out_height) * ((float)in_height);
float interp_value = interpolate_bilinear_interpolate(in_pixels, in_width, in_height, u, v, c);
out_pixels[x + y * out_width + c * out_width * out_height] = interp_value;
}
}
extern "C" void neuralops_cuda_interpolate2d_bilinear(
const float *in_pixels,
size_t in_width,
size_t in_height,
size_t channels,
float *out_pixels,
size_t out_width,
size_t out_height,
cudaStream_t stream)
{
int n = out_width * out_height * channels;
interpolate_bilinear_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
in_pixels,
in_width,
in_height,
channels,
out_pixels,
out_width,
out_height);
}
|
10,397 | #include <stdio.h>
int main() {
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf("Device name: %s\n", prop.name);
printf("TotalGlobalMem: %d kB\n", prop.totalGlobalMem/1024);
printf("SharedMemPerBlock: %d kB\n", prop.sharedMemPerBlock/1024);
printf("MaxThreadsPerBlock: %d\n", prop.maxThreadsPerBlock);
printf("MaxThreadsDim [x]: %d [y]: %d [z]: %d\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("MaxGridSize [x]: %d [y]: %d [z]: %d\n",
prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("MultiProcessorCount: %d\n", prop.multiProcessorCount);
}
}
|
10,398 | #include "includes.h"
#define index(i, j, w) ((i)*(w)) + (j)
__global__ void blurKernel (unsigned char * d_inputArray, unsigned char * d_outputArray, int w, int h, int blurSize){
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
if(Col<w && Row < h){
int pixVal = 0;
int pixels = 0;
for(int blurRow = -blurSize; blurRow < blurSize+1; ++blurRow){
for(int blurCol = -blurSize; blurCol < blurSize+1; ++blurCol){
int curRow = Row + blurRow;
int curCol = Col + blurCol;
//verify we have a valid image pixel
if(curRow > -1 && curRow < h && curCol > -1 && curCol < w){
pixVal += d_inputArray[curRow*w+curCol];
pixels++; // keep track of number of pixels in the avg
}
}
}
//write our new pixel value out
d_outputArray[Row*w+Col] = (unsigned char)(pixVal/pixels);
}
} |
10,399 | //Desription: Image processing algorithm for "pyramid generation". See "Pyramidal Implementation of the Lucas Kanade Feature Tracker Description of the algorithm" for a detailed description
//It uses a “tiled convolution” structure, where each block is responsible for generating a corresponding “tile” on the pyramid image.
#include <sys/time.h>
#include <stdio.h>
#include <math.h>
//Time stamp function in seconds
double getTimeStamp() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (double) tv.tv_usec/1000000 + tv.tv_sec;
}
//When generating multiple levels of pyramids, i corresponds to the pyramid level the current kernel call is generating
__global__ void generatePyramid (float *pyramids, int i, int origImgWidth, int origImgHeight) {
int ix = threadIdx.x + blockIdx.x*blockDim.x;
int iy = threadIdx.y + blockIdx.y*blockDim.y ;
int origImgSize = origImgWidth*origImgHeight;
int imgOffset = int(origImgSize * (1-pow(0.25, i-1))/(1-0.25));
int pyramidOffset = int(origImgSize * (1-pow(0.25, i))/(1-0.25));
int imgHeight = origImgHeight >> (i-1);
int imgWidth = origImgWidth >> (i-1);
int pyrmHeight = imgHeight >> 1;
int pyrmWidth = imgWidth >> 1;
int idx = iy* pyrmWidth + ix ;
extern __shared__ float sImg [];
//Move data block uses to shared memory for faster reads
//center
sImg[threadIdx.x*2 + 1 + (threadIdx.y*2 + 1)*(blockDim.x*2 + 1)] = pyramids[imgOffset + iy*2*imgWidth + ix*2];
//center right
sImg[threadIdx.x*2 + 2 + (threadIdx.y*2 + 1)*(blockDim.x*2 + 1)] = pyramids[imgOffset + iy*2*imgWidth + ix*2 + 1];
//bottom center
sImg[threadIdx.x*2 + 1 + (threadIdx.y*2 + 2)*(blockDim.x*2 + 1)] = pyramids[imgOffset + (iy*2 + 1)*imgWidth + ix*2];
//bottom right
sImg[threadIdx.x*2 + 2 + (threadIdx.y*2 + 2)*(blockDim.x*2 + 1)] = pyramids[imgOffset + (iy*2 + 1)*imgWidth + ix*2 + 1];
if (threadIdx.y == 0) {
//top center
sImg[threadIdx.x*2 + 1 + (threadIdx.y*2)*(blockDim.x*2 + 1)] = blockIdx.y == 0 ? pyramids[imgOffset + iy*2*imgWidth + ix*2]:pyramids[imgOffset + (iy*2-1)*imgWidth + ix*2];
//top right
sImg[threadIdx.x*2 + 2 + (threadIdx.y*2)*(blockDim.x*2 + 1)] = blockIdx.y == 0 ? pyramids[imgOffset + iy*2*imgWidth + ix*2 + 1]:pyramids[imgOffset + (iy*2-1)*imgWidth + ix*2 + 1];
}
if (threadIdx.x == 0) {
//center left
sImg[threadIdx.x*2 + (threadIdx.y*2 + 1)*(blockDim.x*2 + 1)] = blockIdx.x == 0 ? pyramids[imgOffset + iy*2*imgWidth + ix*2]:pyramids[imgOffset + iy*2*imgWidth + ix*2 - 1];
//bottom left
sImg[threadIdx.x*2 + (threadIdx.y*2 + 2)*(blockDim.x*2 + 1)] = blockIdx.x == 0 ? pyramids[imgOffset + (iy*2 + 1)*imgWidth + ix*2]:pyramids[imgOffset + (iy*2 + 1)*imgWidth + ix*2 - 1];
}
if (threadIdx.x == 0 && threadIdx.y == 0)
//top left
sImg[threadIdx.x*2 + (threadIdx.y*2)*(blockDim.x*2 + 1)] = (blockIdx.x == 0 || blockIdx.y == 0) ? pyramids[imgOffset + iy*2*imgWidth + ix*2]:pyramids[imgOffset + (iy*2-1)*imgWidth + ix*2 - 1];
__syncthreads();
if( (ix<pyrmWidth) && (iy<pyrmHeight) ) {
#ifdef DEBUG
int centerX = min(max(2*ix, 0), imgWidth);
int centerY = min(max(2*iy, 0), imgHeight);
int left = min(max(2*ix - 1, 0), imgWidth);
int down = min(max(2*iy - 1, 0), imgHeight);
int right = min(max(2*ix + 1, 0), imgWidth);
int up = min(max(2*iy + 1, 0), imgHeight);
printf("Index: (%d, %d)\n", ix, iy);
printf("Center (%d, %d): %lf\n", centerX, centerY, pyramids[centerX + centerY*imgWidth + imgOffset]);
printf("Center Left (%d, %d): %lf\n", left, centerY, pyramids[left + centerY*imgWidth + imgOffset]);
printf("Center Right (%d, %d): %lf\n", right, centerY, pyramids[right + centerY*imgWidth + imgOffset]);
printf("Up Left (%d, %d): %lf\n", left, up, pyramids[left + up*imgWidth + imgOffset]);
printf("Up Center (%d, %d): %lf\n", centerX, up, pyramids[centerX + up*imgWidth + imgOffset]);
printf("Up Right (%d, %d): %lf\n", right, up, pyramids[right + up*imgWidth + imgOffset]);
printf("Down Left (%d, %d): %lf\n", left, down, pyramids[left + down*imgWidth + imgOffset]);
printf("Down Center (%d, %d): %lf\n", centerX, down, pyramids[centerX + down*imgWidth + imgOffset]);
printf("Down Right (%d, %d): %lf\n", right, down, pyramids[right + down*imgWidth + imgOffset]);
#endif
float pValue = 0;
pValue += sImg[threadIdx.x*2 + 1 + (threadIdx.y*2 + 1)*(blockDim.x*2 + 1)]/4.0;
pValue += 1/8*sImg[threadIdx.x*2 + 2 + (threadIdx.y*2 + 1)*(blockDim.x*2 + 1)]/8.0;
pValue += 1/8*sImg[threadIdx.x*2 + 1 + (threadIdx.y*2 + 2)*(blockDim.x*2 + 1)]/8.0;
pValue += 1/16*sImg[threadIdx.x*2 + 2 + (threadIdx.y*2 + 2)*(blockDim.x*2 + 1)]/16.0;
pValue += 1/8*sImg[threadIdx.x*2 + 1 + (threadIdx.y*2)*(blockDim.x*2 + 1)]/8.0;
pValue += 1/16*sImg[threadIdx.x*2 + 2 + (threadIdx.y*2)*(blockDim.x*2 + 1)]/16.0;
pValue += 1/8*sImg[threadIdx.x*2 + (threadIdx.y*2 + 1)*(blockDim.x*2 + 1)]/8.0;
pValue += 1/16*sImg[threadIdx.x*2 + (threadIdx.y*2 + 2)*(blockDim.x*2 + 1)]/16.0;
pValue += 1/16*sImg[threadIdx.x*2 + (threadIdx.y*2)*(blockDim.x*2 + 1)]/16.0;
pyramids[idx + pyramidOffset] = pValue;
}
}
int main() {
FILE *img = fopen("img.bmp", "rb");
unsigned char info[54];
fread(info, sizeof(unsigned char), 54, img); // read the 54-byte header
//Extract image height and width from header
int imgWidth = *(int*)&info[18];
int imgHeight = *(int*)&info[22];
printf("Size: %d %d\n", imgWidth, imgHeight);
int imgSize = imgWidth * imgHeight;
unsigned char* data = (unsigned char*)malloc(3*imgSize*sizeof(unsigned char)); // allocate 3 bytes per pixel
fread(data, sizeof(unsigned char), 3*imgSize, img); // read the rest of the data at once
fclose(img);
int pyramidLevels = 3;
//In terms of #elements
//Geometric series formula
int pyramidsSize = int(imgSize * (1-pow(0.25, pyramidLevels+1))/(1-0.25));
float* h_pyramids;
cudaHostAlloc( (void **) &h_pyramids, pyramidsSize*sizeof(float), 0) ;
//Init data...
for (int i = 0; i < imgHeight; i++) {
for (int j = 0; j < imgWidth; j++) {
h_pyramids[i*imgWidth + j] = data[((imgHeight - 1 - i)*imgWidth + j)*3];
}
}
float* d_pyramids;
cudaMalloc((void**) &d_pyramids, pyramidsSize*sizeof(float));
double timeStampA = getTimeStamp() ;
cudaMemcpy(d_pyramids, h_pyramids, imgSize*sizeof(float), cudaMemcpyHostToDevice);
double timeStampB = getTimeStamp() ;
dim3 block(16, 16);
for (int i = 1; i <= pyramidLevels; i++) {
int gridX = ceil((imgWidth >> i)/block.x);
int gridY = ceil((imgWidth >> i)/block.y);
dim3 grid(gridX, gridY);
generatePyramid<<<grid, block, (2*block.x + 1) * (2*block.y + 1)*sizeof(float)>>> (d_pyramids, i, imgWidth, imgHeight);
}
cudaDeviceSynchronize();
double timeStampC = getTimeStamp() ;
cudaMemcpy (h_pyramids, d_pyramids, pyramidsSize*sizeof(float), cudaMemcpyDeviceToHost);
double timeStampD = getTimeStamp();
cudaError_t err = cudaPeekAtLastError();
if (err != cudaSuccess) {
printf("Error: %s", cudaGetErrorString(err));
exit(-1);
}
cudaFree(d_pyramids);
cudaFreeHost(h_pyramids);
cudaDeviceReset();
printf("\n\n\n%.6f\n", timeStampB-timeStampA);
printf("%.6f\n", timeStampC-timeStampA);
printf("%.6f\n", timeStampD-timeStampC);
}
|
10,400 | #include "includes.h"
#define N 50
#define NewN 100
#define LifeN 500
#define numofthreads 512
int numofeles=0,capacity;
struct chromosome
{
long long weight=0, value=0;
bool chromo[100003];
};
chromosome chromoele[N],*cudaChromo,*cudaNewpopulation,newpopulation[NewN],res,x[2];
int weight[100001],value[100001],*devValue,*devWeight,*devnumeles;
__global__ void evaluate(chromosome *cudaChromo,int *devValue,int *devWeight, int numele)
{
int idx = threadIdx.x+blockDim.x*blockIdx.x;
for (int i = 0; i < numele; i++){
if (cudaChromo[idx].chromo[i])
cudaChromo[idx].value += devValue[i];
cudaChromo[idx].weight += (cudaChromo[idx].chromo[i] ? 1 : 0)*devWeight[i];
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.