serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
6,401 | #include <stdio.h>
#include <cuda_runtime.h>
#include <assert.h>
#include <math.h>
struct Matrix {
float* addr;
int height;
int width;
};
#define TILE_WIDTH 32
#define BLOCK_WIDTH 32
#define CHECK(call) { \
const cudaError_t error = call; \
if (error != cudaSuccess) { \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error)); \
assert(false); \
} \
}
void read_args(char* arg_file, int* ppsi, int* pnum);
struct Matrix readMatrix(char* file_name);
void writeMatrix(char* fileName, Matrix M, bool numeric);
void writeIndexMatrix(char* file_name, Matrix M);
void writeNumericMatrix(char* file_name, Matrix M);
__global__ void calcMatrixDist(Matrix M, Matrix N, Matrix P);
__global__ void findMinDistIdx(Matrix dist, Matrix ret, int psi, int num);
int main(int argc, char **argv) {
assert(argc == 5);
char *arg_file = argv[1], *model_file = argv[2], *input_file = argv[3], *output_file = argv[4];
int psi, num;
read_args(arg_file, &psi, &num);
struct Matrix model = readMatrix(model_file);
struct Matrix input = readMatrix(input_file);
printf("Loading matrixs from %s and %s completed\n", model_file, input_file);
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
float *deModel = NULL, *deInput = NULL, *deDist = NULL, *deRet = NULL;
cudaMalloc((void **)&deModel, model.height*model.width*sizeof(float));
cudaMalloc((void **)&deInput, input.height*input.width*sizeof(float));
cudaMalloc((void **)&deDist, model.height*input.height*sizeof(float));
cudaMalloc((void **)&deRet, input.height*num*sizeof(int));
cudaMemcpy(deModel, model.addr, model.height*model.width*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(deInput, input.addr, input.height*input.width*sizeof(float), cudaMemcpyHostToDevice);
free(model.addr);
free(input.addr);
model.addr = deModel;
input.addr = deInput;
struct Matrix dist;
dist.height = input.height;
dist.width = model.height;
dist.addr = deDist;
int dx = (int)(ceil((float)model.height/TILE_WIDTH)),
dy = (int)(ceil((float)input.height/TILE_WIDTH));
dim3 dimGrid(dx, dy);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
printf("Calculating distance matrix...... Grid: (%d, %d), Block: (%d,%d)\n", dx, dy, TILE_WIDTH, TILE_WIDTH);
calcMatrixDist<<<dimGrid, dimBlock>>>(input, model, dist);
/*
输出中间结果-距离矩阵,验证calcMatrixDist正确性
*/
/*
struct Matrix toWrite;
toWrite.height = dist.height;
toWrite.width = model.height;
toWrite.addr = (float*)malloc(toWrite.height*toWrite.width*sizeof(float));
cudaMemcpy(toWrite.addr, deDist, toWrite.height*toWrite.width*sizeof(float), cudaMemcpyDeviceToHost);
writeNumericMatrix("bin/dist.csv", toWrite);
free(toWrite.addr);
*/
struct Matrix ret;
ret.height = input.height;
ret.width = num;
ret.addr = deRet;
int bx = (int)(ceil((float)ret.height/BLOCK_WIDTH));
printf("Finding index with minimum distance...... Grid: %d, Block: %d\n", bx, num);
findMinDistIdx<<<bx, num>>>(dist, ret, psi, num);
ret.addr = (float*)malloc(ret.height*num*sizeof(float));
cudaMemcpy(ret.addr, deRet, ret.height*num*sizeof(float), cudaMemcpyDeviceToHost);
writeIndexMatrix(output_file, ret);
free(ret.addr);
cudaFree(deModel);
cudaFree(deInput);
cudaFree(deDist);
cudaFree(deRet);
cudaDeviceReset();
return 0;
}
void read_args(char* arg_file, int* ppsi, int* pnum) {
FILE* f = fopen(arg_file, "r");
if (f == NULL) {
printf("Fail to read args\n");
assert(false);
}
assert(fscanf(f, "%d %d", ppsi, pnum) >= 0);
}
struct Matrix readMatrix(char* matrix_file) {
struct Matrix matrix;
FILE* f = fopen(matrix_file, "r");
if (f == NULL) {
printf("Fail to read model\n");
assert(false);
}
int height, width;
assert(fscanf(f, "%d\t%d", &height, &width) >= 0);
matrix.addr = (float*)malloc(height*width*sizeof(float));
matrix.height = height;
matrix.width = width;
float* ptr = matrix.addr;
for (int i = 0; i < height*width; ++i, ++ptr)
assert(fscanf(f, "%f", ptr) >= 0);
return matrix;
}
void writeMatrix(char* file_name, Matrix M, bool numeric) {
FILE* f = fopen(file_name, "w");
if (f == NULL) {
printf("Fail to write result\n");
assert(false);
}
fprintf(f, "%d\t%d\n", M.height, M.width);
float* ptr = M.addr;
for (int i = 0; i < M.height; ++i) {
for (int j = 0; j < M.width; ++j, ++ptr) {
if (numeric)
fprintf(f, "%f\t", *ptr);
else
fprintf(f, "%d\t", (int)(*ptr));
}
fprintf(f, "\n");
}
}
void writeNumericMatrix(char* fileName, Matrix M) {
writeMatrix(fileName, M, true);
}
void writeIndexMatrix(char* fileName, Matrix M) {
writeMatrix(fileName, M, false);
}
__global__ void calcMatrixDist(Matrix M, Matrix N, Matrix P) {
assert(M.width == N.width);
assert(P.height == M.height && P.width == N.height);
__shared__ float sharedM[TILE_WIDTH][TILE_WIDTH];
__shared__ float sharedN[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
float v = 0;
int rowM = by*TILE_WIDTH + ty, rowN = bx*TILE_WIDTH + ty;
for (int i = 0; i < (int)(ceil((float)M.width/TILE_WIDTH)); ++i) {
int offM = rowM*M.width + i*TILE_WIDTH + tx;
if (i*TILE_WIDTH + tx < M.width && rowM < M.height)
sharedM[ty][tx] = M.addr[offM];
else
sharedM[ty][tx] = 0;
int offN = rowN*N.width + i*TILE_WIDTH + tx;
if (i*TILE_WIDTH + tx < N.width && rowN < N.height)
sharedN[ty][tx] = N.addr[offN];
else
sharedN[ty][tx] = 0;
__syncthreads();
for (int j = 0; j < TILE_WIDTH; ++j)
v += (sharedM[ty][j] - sharedN[tx][j])*(sharedM[ty][j] - sharedN[tx][j]);
__syncthreads();
}
int row = by*TILE_WIDTH + ty, col = bx*TILE_WIDTH + tx;
if (row < P.height && col < P.width)
P.addr[row*P.width + col] = v;
/*
printf("block: (%d,%d) thread: (%d,%d), write into: (%d, %d): %f\n",
bx, by, tx, ty, row, col, v);
*/
}
__global__ void findMinDistIdx(Matrix dist, Matrix ret, int psi, int num) {
assert(dist.width == psi*num);
assert(ret.height == dist.height);
assert(ret.width == num);
int bx = blockIdx.x, tx = threadIdx.x;
for (int i = 0; i < BLOCK_WIDTH; ++i) {
int row = bx*BLOCK_WIDTH + i;
if (row >= ret.height)
break;
int offset = row*dist.width + tx*psi, idx = 0;
double min = dist.addr[offset];
for (int j = 1; j < psi; ++j) {
if (dist.addr[offset + j] < min) {
min = dist.addr[offset + j];
idx = j;
}
}
ret.addr[row*ret.width + tx] = idx;
}
}
|
6,402 | #include<iostream>
#include<cstdio>
using namespace std;
__global__ void maxi(int *a,int *b,int n)
{
int block=256*blockIdx.x;
int max=0;
for(int i=block;i<min(256+block,n);i++)
{
if(max<a[i])
{
max=a[i];
}
}
b[blockIdx.x]=max;
}
int main()
{
cout<<"Enter the size of array: ";
int n;
cin>>n;
int a[n];
cudaEvent_t start,end,start1,end1;
for(int i=0;i<n;i++)
{
a[i]=rand()%n;
}
for(int i=0;i<n;i++)
{
printf("%d\t",a[i]);
}
cudaEventCreate(&start1);
cudaEventCreate(&end1);
cudaEventRecord(start1);
int max=0;
for(int i=0;i<n;i++)
{
if(a[i]>max)
{
max=a[i];
}
}
cudaEventRecord(end1);
cudaEventSynchronize(end1);
float time1=0;
cudaEventElapsedTime(&time1,start1,end1);
cout<<"\nSequential Processing:";
cout<<"\nMax="<<max;
cout<<"\nSequential time="<<time1;
int *ad,*bd;
int size=n*sizeof(int);
cudaMalloc(&ad,size);
cudaMemcpy(ad,a,size,cudaMemcpyHostToDevice);
int grids=ceil(n*1.0f/256.0f);
cudaMalloc(&bd,grids*sizeof(int));
dim3 grid(grids,1);
dim3 block(1,1);
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
while(n>1)
{
maxi<<<grids,block>>>(ad,bd,n);
n=ceil(n*1.0f/256.0f);
cudaMemcpy(ad,bd,n*sizeof(int),cudaMemcpyDeviceToDevice);
}
cudaEventRecord(end);
cudaEventSynchronize(end);
float time=0;
cudaEventElapsedTime(&time,start,end);
int ans[2];
cudaMemcpy(ans,ad,4,cudaMemcpyDeviceToHost);
cout<<"\nParallel Processing:\nMax="<<ans[0]<<endl;
cout<<"Parallel Time=";
cout<<time<<endl;
}
|
6,403 | //headers
#include <stdio.h>
#include <cuda.h>
#define imin(a, b) ((a < b) ? a : b)
#define sum_squares(x) (x * (x + 1) * (2 * x + 1) / 6)
//global variables
float *hostA = NULL;
float *hostB = NULL;
float *partial_hostC = NULL;
float *deviceA = NULL;
float *deviceB = NULL;
float *partial_deviceC = NULL;
const int iNumberOfArrayElements = 33 * 1024;
const int threadsPerBlock = 256;
const int blocksPerGrid = imin(32, (iNumberOfArrayElements + threadsPerBlock - 1) / threadsPerBlock);
// *** CUDA KERNEL DEFINITION ***
__global__ void vecDotProduct(float *input1, float *input2, float *output)
{
//variable declaration
//shared across all threads within block
__shared__ float cache[threadsPerBlock];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int cacheIndex = threadIdx.x;
float temp = 0;
//code
while(tid < iNumberOfArrayElements)
{
temp += input1[tid] * input2[tid];
tid += blockDim.x * gridDim.x;
}
//set the cache values
cache[cacheIndex] = temp;
//synchronize threads in the block
__syncthreads();
//summation reduction
int i = blockDim.x / 2;
while(i != 0)
{
if(cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
//copy to output memory
if(cacheIndex == 0)
output[blockIdx.x] = cache[0];
}
int main(void)
{
//function declaration
void cleanup(void);
//code
//allocate memory on host
hostA = (float *)malloc(iNumberOfArrayElements * sizeof(float));
if(hostA == NULL)
{
printf("CPU Memory Fatal Error - Can Not Allocate Enough Memory For Host Input Array 1.\nExiting Now ...\n");
exit(EXIT_FAILURE);
}
hostB = (float *)malloc(iNumberOfArrayElements * sizeof(float));
if(hostB == NULL)
{
printf("CPU Memory Fatal Error - Can Not Allocate Enough Memory For Host Input Array 2.\nExiting Now ...\n");
cleanup();
exit(EXIT_FAILURE);
}
partial_hostC = (float *)malloc(blocksPerGrid * sizeof(float));
if(partial_hostC == NULL)
{
printf("CPU Memory Fatal Error - Can Not Allocate Enough Memory For Host Output Array.\nExiting Now ...\n");
cleanup();
exit(EXIT_FAILURE);
}
//allocate memory on device
cudaError_t err = cudaSuccess;
err = cudaMalloc((void **)&deviceA, iNumberOfArrayElements * sizeof(float));
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error - %s In The File Name %s At Line No %d.\nExiting Now ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&deviceB, iNumberOfArrayElements * sizeof(float));
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error - %s In The File Name %s At Line No %d.\nExiting Now ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&partial_deviceC, blocksPerGrid * sizeof(float));
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error - %s In The File Name %s At Line No %d.\nExiting Now ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
//fill the host input array
for(int i = 0; i < iNumberOfArrayElements; i++)
{
hostA[i] = i;
hostB[i] = i * 2;
}
//copy the host input array to device input
err = cudaMemcpy(deviceA, hostA, iNumberOfArrayElements * sizeof(float), cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error - %s In The File Name %s At Line No %d.\nExiting Now ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
err = cudaMemcpy(deviceB, hostB, iNumberOfArrayElements * sizeof(float), cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error - %s In The File Name %s At Line No %d.\nExiting Now ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
//launch the kernel
vecDotProduct<<<blocksPerGrid, threadsPerBlock>>>(deviceA, deviceB, partial_deviceC);
//copy the device output array back to host
err = cudaMemcpy(partial_hostC, partial_deviceC, blocksPerGrid * sizeof(float), cudaMemcpyDeviceToHost);
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error - %s In The File Name %s At Line No %d.\nExiting Now ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
//calculate final result on host
float finalC = 0.0f;
for(int i = 0; i < blocksPerGrid; i++)
{
finalC += partial_hostC[i];
}
//check if the final value is correct
if(finalC == (2 * sum_squares((float)(iNumberOfArrayElements - 1))))
printf("Dot Product Calculated On Device Is Accurate.\n");
else
printf("Dot Product Calculated On Device Is Not Accurate.\n");
printf("Dot Product = %0.6.\n", finalC);
printf("Expected Product = %0.6f.\n", 2 * sum_squares((float)(iNumberOfArrayElements - 1)));
//total cleanup
cleanup();
return (0);
}
void cleanup(void)
{
//code
//free device memory
if(partial_deviceC)
{
cudaFree(partial_deviceC);
partial_deviceC = NULL;
}
if(deviceB)
{
cudaFree(deviceB);
deviceB = NULL;
}
if(deviceA)
{
cudaFree(deviceA);
deviceA = NULL;
}
//free host memory
if(partial_hostC)
{
free(partial_hostC);
partial_hostC = NULL;
}
if(hostB)
{
free(hostB);
hostB = NULL;
}
if(hostA)
{
free(hostA);
hostA = NULL;
}
}
|
6,404 | #include <thrust/sort.h>
#include <thrust/device_ptr.h>
#include <fstream>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <iostream>
#include <cuda_runtime.h>
#include <algorithm>
using namespace std;
int main(void)
{
uint64_t number_of_elements = 1024L*1024*1024;
uint64_t *h_key_array;
cudaMallocManaged(&h_key_array, number_of_elements*sizeof(uint64_t));
for (uint64_t i = 0; i < number_of_elements; i++) {
h_key_array[i] = ((uint64_t)rand()) << 32 | (uint64_t)rand();
}
printf("size : %lu\n", sizeof(uint64_t));
thrust::device_ptr<uint64_t> th_key_array( h_key_array );
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
float totalseconds = 0;
int iterations = 1;
for(int i = 0; i < iterations; i++)
{
cudaEventRecord(start, 0);
//thrust::sort_by_key( th_key_array, th_key_array+number_of_elements, th_value_array );
thrust::sort( th_key_array, th_key_array+number_of_elements );
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
totalseconds = totalseconds + milliseconds;
if(i == iterations - 1) break;
}
printf("Elapsed time: %f s.\n", totalseconds/(iterations*1000));
//std::sort(h_key_ref.begin(), h_key_ref.end());
//bool result = compareAB(h_key_array, h_key_ref);
//printf("Test: %s\n", result == true ? "SUCCESS" : "FAIL");
return 0;
}
|
6,405 | #include "includes.h"
__global__ void idwt_per_Y_1(float *d_dst, float *src_A, float *src_D, int rows, int cols, int next_rows, int filt_len, int halo) {
extern __shared__ float s_Data[];
//Offset to the upper halo edge
const int baseX = blockIdx.x * I_Y_BLOCKDIM_X + threadIdx.x;
const int baseY = ((blockIdx.y * I_Y_RESULT_STEPS) - halo) * I_Y_BLOCKDIM_Y + threadIdx.y;
int l2 = filt_len / 2;
if (baseX < cols)
{
src_A += baseY * cols + baseX;
src_D += baseY * cols + baseX;
d_dst += (2 * baseY - l2 + 1) * cols + baseX;
//Loading data to shared memory
//Upper halo
#pragma unroll
for (int i = 0; i < halo; i++)
{
s_Data[(threadIdx.x*(I_Y_RESULT_STEPS + 2 * halo) *I_Y_BLOCKDIM_Y) + threadIdx.y + i * I_Y_BLOCKDIM_Y] = (baseY + i * I_Y_BLOCKDIM_Y >= 0) ? src_A[i * I_Y_BLOCKDIM_Y * cols] : src_A[(i * I_Y_BLOCKDIM_Y * cols) + (rows*cols)];
s_Data[((threadIdx.x + I_Y_BLOCKDIM_X)*(I_Y_RESULT_STEPS + 2 * halo) *I_Y_BLOCKDIM_Y) + threadIdx.y + i * I_Y_BLOCKDIM_Y] = (baseY + i * I_Y_BLOCKDIM_Y >= 0) ? src_D[i * I_Y_BLOCKDIM_Y * cols] : src_D[(i * I_Y_BLOCKDIM_Y * cols) + (rows*cols)];
}
//Lower halo + Main data
#pragma unroll
for (int i = halo; i < halo + I_Y_RESULT_STEPS + halo; i++)
{
s_Data[(threadIdx.x*(I_Y_RESULT_STEPS + 2 * halo) *I_Y_BLOCKDIM_Y) + threadIdx.y + i * I_Y_BLOCKDIM_Y] = (baseY + i * I_Y_BLOCKDIM_Y < rows) ? src_A[i * I_Y_BLOCKDIM_Y * cols] : src_A[(i * I_Y_BLOCKDIM_Y * cols) - (rows*cols)];
s_Data[((threadIdx.x + I_Y_BLOCKDIM_X)*(I_Y_RESULT_STEPS + 2 * halo) *I_Y_BLOCKDIM_Y) + threadIdx.y + i * I_Y_BLOCKDIM_Y] = (baseY + i * I_Y_BLOCKDIM_Y < rows) ? src_D[i * I_Y_BLOCKDIM_Y * cols] : src_D[(i * I_Y_BLOCKDIM_Y * cols) - (rows*cols)];
}
__syncthreads();
#pragma unroll
for (int i = halo; i < I_Y_RESULT_STEPS + halo; i++)
{
int pos_y = 2 * baseY + 2 * i * I_Y_BLOCKDIM_Y;
if (pos_y + 1 < (2 * rows + filt_len - 2)) {
float temp_1 = 0, temp_2 = 0;
for (int l = 0; l < l2; ++l)
{
int t = 2 * l;
temp_1 += c_lpr[t] * s_Data[(threadIdx.x*(I_Y_RESULT_STEPS + 2 * halo) *I_Y_BLOCKDIM_Y) + threadIdx.y + i* I_Y_BLOCKDIM_Y - l]
+ c_hpr[t] * s_Data[((threadIdx.x + I_Y_BLOCKDIM_X)*(I_Y_RESULT_STEPS + 2 * halo) *I_Y_BLOCKDIM_Y) + threadIdx.y + i * I_Y_BLOCKDIM_Y - l];
temp_2 += c_lpr[t + 1] * s_Data[(threadIdx.x*(I_Y_RESULT_STEPS + 2 * halo) *I_Y_BLOCKDIM_Y) + threadIdx.y + i * I_Y_BLOCKDIM_Y - l]
+ c_hpr[t + 1] * s_Data[((threadIdx.x + I_Y_BLOCKDIM_X)*(I_Y_RESULT_STEPS + 2 * halo) *I_Y_BLOCKDIM_Y) + threadIdx.y + i * I_Y_BLOCKDIM_Y - l];
}
if ((pos_y >= l2 - 1) && (pos_y < next_rows + l2 - 1)) d_dst[2 * i * I_Y_BLOCKDIM_Y * cols] = temp_1;
if ((pos_y + 1 >= l2 - 1) && (pos_y + 1 < next_rows + l2 - 1)) d_dst[(2 * i * I_Y_BLOCKDIM_Y + 1) * cols] = temp_2;
}
}
}
} |
6,406 | #include "Sha2.cu"
#include "Sha2.cuh"
#define SHA256_DIGESTSIZE 32
#define SHA256_BLOCKSIZE 64
#define SALT_SIZE 16
__constant__ int ITERATIONS = 100000;
__constant__ unsigned char SHA256_IPAD_CONST = (unsigned char) 0x36;
__constant__ unsigned char SHA256_OPAD_CONST = (unsigned char) 0x5C;
__constant__ __device__ unsigned char cuda_init_derive[32] = {
230, 88, 20, 228, 56, 39, 89, 248,
85, 80, 2, 158, 114, 61, 199, 231,
1>>24,1>>16,1>>8, 0x01, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0
};
__device__ void hmac_sha256_c(sha256_ctx actx, sha256_ctx bctx, unsigned char *d, int ld, unsigned char *out) {
// sha256_ctx octx;
sha256_hash(d, ld, &actx);
sha256_end(out, &actx);
memcpy(&actx, &bctx, sizeof(sha256_ctx));
sha256_hash(out, SHA256_DIGESTSIZE, &actx);
sha256_end ((unsigned char *) out, &actx);
}
__device__ void cuda_derive_key_sha256 (unsigned char *pwd, int pwd_len, unsigned char *u){
unsigned char j[SHA256_DIGESTSIZE];
unsigned char ibuf[SHA256_BLOCKSIZE], obuf[SHA256_BLOCKSIZE];
int i, c;
sha256_ctx ictx, octx;
memset(ibuf + pwd_len, SHA256_IPAD_CONST, SHA256_BLOCKSIZE - pwd_len);
memset(obuf + pwd_len, SHA256_OPAD_CONST, SHA256_BLOCKSIZE - pwd_len);
for(i = 0; i < pwd_len; ++i) {
ibuf[i] = (unsigned char) (pwd[i] ^ SHA256_IPAD_CONST);
obuf[i] = (unsigned char) (pwd[i] ^ SHA256_OPAD_CONST);
}
sha256_begin(&ictx);
sha256_begin(&octx);
sha256_hash((unsigned char*)ibuf, SHA256_BLOCKSIZE, &ictx);
sha256_hash((unsigned char*)obuf, SHA256_BLOCKSIZE, &octx);
hmac_sha256_c(ictx, octx, (unsigned char * )cuda_init_derive, SALT_SIZE+4, j);
memcpy(u, j, SHA256_DIGESTSIZE);
for(c = 1; c < ITERATIONS; c++) {
hmac_sha256_c(ictx, octx, j, SHA256_DIGESTSIZE, j);
for(i = 0; i<SHA256_DIGESTSIZE;i++) {
u[i] ^= j[i];
}
}
}
|
6,407 | #include <stdio.h>
#include <assert.h>
#include <cuda.h>
#include <sys/time.h>
#define CUDA_CHECK(cmd) {cudaError_t error = cmd; if(error!=cudaSuccess){printf("<%s>:%i ",__FILE__,__LINE__); printf("[CUDA] Error: %s\n", cudaGetErrorString(error));}}
const int blocksize=16;
const int N=256;
// only works for squared blocks and grids
__global__ void mm_gpu(float *a, float *b, float *c){
const int cx=blockIdx.x*blockDim.x+threadIdx.x;
const int cy=blockIdx.y*blockDim.y+threadIdx.y;
const int tx=threadIdx.x;
const int ty=threadIdx.y;
__shared__ float as[blocksize][blocksize];
__shared__ float bs[blocksize][blocksize];
float c_temp=0.0f;
// loop over blocks
for (int l=0;l<gridDim.x; l++)
{
// copy data to shared mem
as[ty][tx]=a[cy*N+l*blocksize+tx];
bs[ty][tx]=b[(l*blocksize+ty)*N+cx];
__syncthreads();
// now loop over shared mem
for (int k=0;k<blocksize;k++)
c_temp+=as[ty][k]*bs[k][tx];
__syncthreads();
}
c[cy*N+cx]=c_temp;
}
void mm_cpu(float *a, float *b, float *c, int N){
int i,j,k;
for (i=0;i<N;i++)
for (j=0;j<N;j++)
{
c[i*N+j]=0.0f;
for (k=0;k<N;k++)
c[i*N+j]+=a[i*N+k]*b[k*N+j];
}
}
int main(void)
{
float *a_h, *b_h, *c_h ,*c2_h; // host data
float *a_d, *b_d, *c_d;// device data
float delta = 0.1f;
int nBytes, i;
dim3 dimBlock(blocksize,blocksize);
dim3 dimGrid(ceil(N/(float)blocksize),ceil(N/(float)blocksize));
struct timeval t1, t2, t3;
long cgpu, chost;
nBytes = N*N*sizeof(float);
a_h = (float *)malloc(nBytes);
b_h = (float *)malloc(nBytes);
c_h = (float *)malloc(nBytes);
c2_h = (float *)malloc(nBytes);
CUDA_CHECK(cudaMalloc((void **) &a_d, nBytes));
CUDA_CHECK(cudaMalloc((void **) &b_d, nBytes));
CUDA_CHECK(cudaMalloc((void **) &c_d, nBytes));
for (i=0; i<N*N; i++) {
a_h[i] = 1.0f + 0.001*i;
b_h[i] = 5.0f + 0.0001*i;
}
gettimeofday(&t1,NULL);
CUDA_CHECK(cudaMemcpy(a_d, a_h, nBytes, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(b_d, b_h, nBytes, cudaMemcpyHostToDevice));
mm_gpu<<<dimGrid,dimBlock>>>(a_d,b_d,c_d);
CUDA_CHECK(cudaDeviceSynchronize());
CUDA_CHECK(cudaMemcpy(c_h, c_d, nBytes, cudaMemcpyDeviceToHost));
gettimeofday(&t2,NULL);
mm_cpu(a_h,b_h,c2_h,N);
gettimeofday(&t3,NULL);
// for (i=0; i< N*N; i++) printf("%d,%d: %f %f\n",i/N,i%N,c_h[i],c2_h[i]);
for (i=0; i< N*N; i++) if (abs(c_h[i]-c2_h[i])>delta) printf("Result incorrect! %d,%d: %f %f\n",i/N,i%N,c_h[i],c2_h[i]);
free(a_h); free(b_h); free(c_h); free(c2_h); cudaFree(a_d); cudaFree(b_d); cudaFree(c_d);
cgpu=(t2.tv_sec - t1.tv_sec)*1000000 + (t2.tv_usec - t1.tv_usec);
chost = (t3.tv_sec - t2.tv_sec)*1000000 + (t3.tv_usec - t2.tv_usec);
printf( "%13ld microseconds on GPU\n", cgpu );
printf( "%13ld microseconds on host\n", chost );
return 0;
}
|
6,408 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,int var_4,int var_5,int var_6,float var_7,float var_8,float* var_9,float* var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
for (int i=0; i < var_3; ++i) {
for (int i=0; i < var_4; ++i) {
float tmp_1 = -0.0f;
comp += tmp_1 + +1.7959E5f - -0.0f;
comp += -1.4499E34f + var_7 - coshf(+1.2568E-11f * (+1.5309E-41f - var_8));
for (int i=0; i < var_5; ++i) {
var_9[i] = atan2f(-1.3134E-27f - (+1.0253E-36f / -0.0f * var_11 + var_12), var_13 - (+1.5368E35f - var_14 + -1.3070E-27f));
var_10[i] = var_15 + (-1.7340E28f / (-1.8622E35f * (-1.3255E36f - +1.6146E-37f)));
comp += var_10[i] - var_9[i] - floorf((-1.8950E8f * (+1.8632E34f / var_16)));
comp = -1.4070E-42f - (-1.1692E-42f * +1.1446E-35f * (+0.0f + (var_17 - -1.3687E34f)));
}
for (int i=0; i < var_6; ++i) {
float tmp_2 = +1.2110E-3f;
comp = tmp_2 + -1.9469E-14f / sqrtf(expf(+1.7079E35f));
comp += +1.5617E-41f + var_18 / (var_19 / (+1.3114E-44f + var_20));
comp += (var_21 * -0.0f + (var_22 * (var_23 - +1.9176E-41f / +1.8814E35f)));
}
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
int tmp_5 = atoi(argv[5]);
int tmp_6 = atoi(argv[6]);
int tmp_7 = atoi(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float* tmp_10 = initPointer( atof(argv[10]) );
float* tmp_11 = initPointer( atof(argv[11]) );
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24);
cudaDeviceSynchronize();
return 0;
}
|
6,409 | #include "includes.h"
__global__ void Copy_A_to_B (float * A , float * B , int size){
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id<size)
B[id] = A[id];
} |
6,410 | #include <cuda.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <curand_kernel.h>
#include <iostream>
#include <thrust/scan.h>
#define NUM_BANKS 16
#define LOG_NUM_BANKS 4
#define CONFLICT_FREE_OFFSET(n) \ ((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS))
typedef unsigned long long int size_int;
using namespace std;
/* this GPU kernel function is used to initialize the random states */
__global__ void init(unsigned int seed, curandState_t* states) {
/* we have to initialize the state */
curand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */
blockIdx.x, /* the sequence number should be different for each core (unless you want all
cores to get the same sequence of numbers for some reason - use thread id! */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&states[blockIdx.x]);
}
/* this GPU kernel takes an array of states, and an array of ints, and puts a random int into each */
__global__ void randoms(curandState_t* states, float* numbers) {
/* curand works like rand - except that it takes a state as a parameter */
numbers[blockIdx.x] = curand_uniform(&states[blockIdx.x]);
}
__global__ void binary_search_id(size_int *sample_idx, float *numbers, float *prefix_sum, unsigned int N, size_int n){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
int l = 0;
int r = n - 1;
float k = numbers[tid];
int mid;
while (l < r){
mid = (l + r) / 2;
if(prefix_sum[mid] < k)
l = mid + 1;
else
r = mid;
}
sample_idx[tid] = r;
}
}
/*
void random_generator(unsigned int N, float *cpu_nums)
{
//CUDA's random number library uses curandState_t to keep track of the seed value we will store a random state for every thread
curandState_t* states;
// allocate space on the GPU for the random states
cudaMalloc((void**) &states, N * sizeof(curandState_t));
// invoke the GPU to initialize all of the random states
init<<<N, 1>>>(time(0), states);
// allocate an array of unsigned ints on the CPU and GPU
float* gpu_nums;
cudaMalloc((void**) &gpu_nums, N * sizeof(float));
// invoke the kernel to get some random numbers
randoms<<<N, 1>>>(states, gpu_nums, 100);
// copy the random numbers back
cudaMemcpy(cpu_nums, gpu_nums, N * sizeof(float), cudaMemcpyDeviceToHost);
// free the memory we allocated for the states and numbers
cudaFree(states);
cudaFree(gpu_nums);
}
*/
void random_weight_sample_cuda(unsigned int N, size_int *sample_idx, float *weights, size_int n){
//Compute the prefix sum of weights
float prefix_sum_weights[n];
thrust::inclusive_scan(weights, weights + n, prefix_sum_weights); // out-place scan
// Generate N random numbers, between (0,1]
curandState_t* states;
/* allocate space on the GPU for the random states */
cudaMalloc((void**) &states, N * sizeof(curandState_t));
/* invoke the GPU to initialize all of the random states */
init<<<N, 1>>>(time(0), states);
/* allocate an array of unsigned ints on the CPU and GPU */
float* gpu_nums;
cudaMalloc((void**) &gpu_nums, N * sizeof(float));
/* invoke the kernel to get some random numbers */
randoms<<<N, 1>>>(states, gpu_nums);
//allocate gpu array for d_weights and d_sample_idx
float* d_weights;
cudaMalloc((void**) &d_weights, n * sizeof(float));
size_int* d_sample_idx;
cudaMalloc((void**) &d_sample_idx, N * sizeof(size_int));
//copy weights array to d_weights
cudaMemcpy(d_weights, prefix_sum_weights, sizeof(float) * n, cudaMemcpyHostToDevice);
int block_size = 256;
int grid_size = (N + block_size - 1)/block_size; // ensure that we call enough thread
binary_search_id<<<grid_size, block_size>>>(d_sample_idx, gpu_nums, d_weights, N, n);
//copy d_sample_idx back to CPU
cudaMemcpy(sample_idx, d_sample_idx, N * sizeof(size_int), cudaMemcpyDeviceToHost);
/* free the memory we allocated for the states and numbers */
cudaFree(states);
cudaFree(gpu_nums);
cudaFree(d_weights);
cudaFree(d_sample_idx);
}
|
6,411 | // file: memoryCheck.cu
__global__ void badMemoryReference(int *A) {
A[threadIdx.x] = 0; // line 3 - faulting store
}
int main() {
/*
int *invalidPtr = 0x0234; // pointer arbitrarily chosen,
// not allocated via cudaMalloc()
*/
int *invalidPtr = reinterpret_cast<int *>(0x0234);
int *validPtr = 0;
cudaMalloc((void **)&validPtr, sizeof(int)*64);
badMemoryReference<<< dim3(1,1), dim3(64, 1) >>>( invalidPtr );
return 0;
}
|
6,412 | /*MIT License
Copyright (c) 2019 Xavier Martinez
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
inline __host__ __device__ float3 operator-(const float3 &a, const float3 &b) {
return make_float3(a.x-b.x, a.y-b.y, a.z-b.z);
}
inline __host__ __device__ float3 operator+(const float3 &a, const float3 &b) {
return make_float3(a.x+b.x, a.y+b.y, a.z+b.z);
}
inline __host__ __device__ float3 operator/(float3 a, float3 b)
{
return make_float3(a.x / b.x, a.y / b.y, a.z / b.z);
}
inline __host__ __device__ float3 operator/(float3 a, float b)
{
return make_float3(a.x / b, a.y / b, a.z / b);
}
inline __host__ __device__ float3 operator/(float b, float3 a)
{
return make_float3(b / a.x, b / a.y, b / a.z);
}
inline __host__ __device__ float3 operator*(float3 a, float3 b)
{
return make_float3(a.x * b.x, a.y * b.y, a.z * b.z);
}
inline __host__ __device__ float3 operator*(float3 a, float b)
{
return make_float3(a.x * b, a.y * b, a.z * b);
}
inline __host__ __device__ float3 operator*(float b, float3 a)
{
return make_float3(b * a.x, b * a.y, b * a.z);
}
inline __host__ __device__ float3 operator-(float3 &a)
{
return make_float3(-a.x, -a.y, -a.z);
}
inline __host__ __device__ int3 operator-(int3 a, int b)
{
return make_int3(a.x - b, a.y - b, a.z - b);
}
inline __host__ __device__ float3 fmaxf(float3 a, float3 b)
{
return make_float3(fmaxf(a.x,b.x), fmaxf(a.y,b.y), fmaxf(a.z,b.z));
}
inline __host__ __device__ float3 fminf(float3 a, float3 b)
{
return make_float3(fminf(a.x,b.x), fminf(a.y,b.y), fminf(a.z,b.z));
}
inline __host__ __device__ int3 max(int3 a, int3 b)
{
return make_int3(max(a.x,b.x), max(a.y,b.y), max(a.z,b.z));
}
inline __host__ __device__ int3 operator+(int3 a, int b)
{
return make_int3(a.x + b, a.y + b, a.z + b);
}
inline __host__ __device__ uint3 operator+(uint3 a, uint3 b)
{
return make_uint3(a.x + b.x, a.y + b.y, a.z + b.z);
}
inline __host__ __device__ int3 min(int3 a, int3 b)
{
return make_int3(min(a.x,b.x), min(a.y,b.y), min(a.z,b.z));
} |
6,413 | #include "includes.h"
__global__ void DrawRgbBackgroundKernel(float *target, int inputWidth, int inputHeight, float r, float g, float b)
{
int column = threadIdx.x + blockDim.x * blockIdx.z;
if (column >= inputWidth)
return;
int id = inputWidth * ( blockIdx.y * gridDim.x + blockIdx.x) // blockIdx.x == row, blockIdx.y == color channel
+ column;
int imagePixels = inputWidth * inputHeight;
if (id < 3*imagePixels) // 3 for RGB
{
float color = 0.0f;
switch (blockIdx.y)
{
case 0:
color = r;
break;
case 1:
color = g;
break;
case 2:
color = b;
break;
}
target[id] = color;
}
} |
6,414 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void testCollatz(long n, long blockSize, long* counterEx) {
long lowRange = ceil(n * 1.0 / blockSize) * blockIdx.x;
long highRange = ceil(n * 1.0 / blockSize) * (blockIdx.x + 1);
long i;
for (i = lowRange; i < highRange && i <= n; i++) {
long temp = i;
int iteration = 0;
if (temp == 0) continue;
while (temp != 1) {
iteration++;
if (iteration >= 1000) {
*counterEx = i;
break;
}
if (temp % 2 == 0) temp /= 2;
else temp = (3 * temp) + 1;
}
}
}
int main(int argc, char**argv){
long N, B;
long* h_counterEx, *d_counterEx;
if (argc >= 2) {
N = strtol(argv[1], NULL, 10);
B = strtol(argv[2], NULL, 10);
} else {
return -1;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
h_counterEx = (long*)malloc(sizeof(long));
*h_counterEx = -1;
cudaMalloc((void**) &d_counterEx, sizeof(long));
cudaMemcpy(d_counterEx, h_counterEx, sizeof(long), cudaMemcpyHostToDevice);
testCollatz<<<B,1>>>(N, B, d_counterEx);
cudaMemcpy(h_counterEx, d_counterEx, sizeof(long), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime = -1;
cudaEventElapsedTime(&elapsedTime,start, stop);
if (*h_counterEx == -1) {
printf("Verifying %ld took %f s\n", N, elapsedTime / 1000.0);
} else {
printf("Found a counterexample: %ld\n", *h_counterEx);
}
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
|
6,415 | #include "includes.h"
__global__ void TestpermuteWalkers ( const int dim, const int nwl, const int *kr, const float *xxC, float *xxCP ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int t = i + j * dim;
if ( i < dim && j < nwl ) {
xxCP[t] = xxC[t];
}
} |
6,416 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda_runtime.h>
__global__ void vecAdd( float* A, float* B, float* C, int N )
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if( i<N )
C[i] = A[i] + B[i];
}
int main(void)
{
srand(time(0));
int N = 1024*1024;
size_t sz = N*sizeof(float);
float* hA = (float*)malloc(sz);
float* hB = (float*)malloc(sz);
float* hC = (float*)malloc(sz);
for( int i=0; i<N; i++ )
{
hA[i] = rand() & 0xff;
hB[i] = rand() & 0xff;
hC[i] = 0;
}
float* dA, *dB, *dC;
cudaMalloc( (void **)&dA, sz );
cudaMalloc( (void **)&dB, sz );
cudaMalloc( (void **)&dC, sz );
cudaStream_t stream;
cudaStreamCreate(&stream);
cudaMemcpyAsync( dA, hA, sz, cudaMemcpyHostToDevice, stream );
cudaMemcpyAsync( dB, hB, sz, cudaMemcpyHostToDevice, stream );
vecAdd<<<N/1024, 1024, 0, stream>>>(dA, dB, dC, N);
cudaMemcpyAsync( hC, dC, sz, cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize( stream );
for( int i=0; i<10; i++ )
{
printf("%8f + %8f = %8f\n", hA[i], hB[i], hC[i]);
}
free(hA);
free(hB);
free(hC);
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
cudaStreamDestroy(stream);
return 0;
}
|
6,417 | /*------------------------------------------------------------------------------
Copyright © 2015 by Nicola Bombieri
H-BF is provided under the terms of The MIT License (MIT):
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
------------------------------------------------------------------------------*/
/**
* @author Federico Busato
* Univerity of Verona, Dept. of Computer Science
* federico.busato@univr.it
*/
#include <cuda_runtime.h>
#include <fstream>
#include <iostream>
#include <string>
#include <stdlib.h>
#include <fstream>
int main() {
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, 0);
std::cout << std::endl << "Number of Streaming Multiprocessors:\t" << devProp.multiProcessorCount
<< std::endl << " Compute Cabability:\t" << devProp.major << devProp.minor << '0'
<< std::endl << std::endl;
if (std::getenv("NUM_OF_SM") == NULL) {
std::ofstream file;
try {
file.open(std::string(std::getenv("HOME")).append("/.bashrc").c_str(), std::fstream::app);
}
catch (std::ios_base::failure &fail) {
std::cout << "An exception occurred: bashrc not found" << std::endl;
}
file << std::endl << "export NUM_OF_SM=" << devProp.multiProcessorCount
<< std::endl << "export CUDA_ARCH=" << devProp.major << devProp.minor << "0" << std::endl;
file.close();
std::cout << "please exec: source ~/.bashrc" << std::endl << std::endl;
}
}
|
6,418 | #include "includes.h"
__global__ void Matrix_getRow_FloatId_naive(const float * A , int Acount, int Acols, float * out0 , int out0count, int out0cols, const float row_id)
{
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id<Acols)
{
out0[id] = A[id+(int)row_id*Acols];
}
} |
6,419 | #include "includes.h"
__global__ void sum_2( float4 *localbuf, float4 *ptrd, int offset_0, int offset_1, int N ) {
int idx= blockDim.x * blockIdx.x + threadIdx.x;
if( idx < N ) {
float4 t1 = ptrd[ offset_0 + idx ];
float4 t2 = ptrd[ offset_1 + idx ];
t1.x += t2.x;
t1.y += t2.y;
t1.z += t2.z;
t1.w += t2.w;
localbuf[ idx ] = t1;
}
} |
6,420 | #ifndef _SHERMANMORRISON_KERNEL_
#define _SHERMANMORRISON_KERNEL_
#endif |
6,421 | extern "C" {
__device__ inline int threadIdx_x() { return threadIdx.x; }
__device__ inline int threadIdx_y() { return threadIdx.y; }
__device__ inline int threadIdx_z() { return threadIdx.z; }
__device__ inline int blockIdx_x() { return blockIdx.x; }
__device__ inline int blockIdx_y() { return blockIdx.y; }
__device__ inline int blockIdx_z() { return blockIdx.z; }
__device__ inline int blockDim_x() { return blockDim.x; }
__device__ inline int blockDim_y() { return blockDim.y; }
__device__ inline int blockDim_z() { return blockDim.z; }
__device__ inline int gridDim_x() { return gridDim.x; }
__device__ inline int gridDim_y() { return gridDim.y; }
__device__ inline int gridDim_z() { return gridDim.z; }
__global__ void lambda_11973(float*, float*);
__global__ __launch_bounds__ (128 * 1 * 1) void lambda_11973(float* _11976_13141, float* _11977_13142) {
float* shared_13150;
float* pshared_13150;
int threadIdx_x_13156;
int pthreadIdx_x_13156;
int blockDim_x_13162;
int pblockDim_x_13162;
int blockIdx_x_13168;
int pblockIdx_x_13168;
int threadIdx_y_13174;
int pthreadIdx_y_13174;
int blockDim_y_13180;
int pblockDim_y_13180;
int blockIdx_y_13186;
int pblockIdx_y_13186;
int _13200;
int p_13200;
int threadIdx_x_13207;
int pthreadIdx_x_13207;
int blockDim_x_13210;
int pblockDim_x_13210;
int blockIdx_x_13213;
int pblockIdx_x_13213;
int threadIdx_y_13216;
int pthreadIdx_y_13216;
int blockDim_y_13219;
int pblockDim_y_13219;
int blockIdx_y_13222;
int pblockIdx_y_13222;
int _13225;
int p_13225;
__shared__ float reserver_shared_13150[128];
pshared_13150 = reserver_shared_13150;
l13148: ;
shared_13150 = pshared_13150;
threadIdx_x_13156 = threadIdx_x();
pthreadIdx_x_13156 = threadIdx_x_13156;
l13154: ;
threadIdx_x_13156 = pthreadIdx_x_13156;
blockDim_x_13162 = blockDim_x();
pblockDim_x_13162 = blockDim_x_13162;
l13160: ;
blockDim_x_13162 = pblockDim_x_13162;
blockIdx_x_13168 = blockIdx_x();
pblockIdx_x_13168 = blockIdx_x_13168;
l13166: ;
blockIdx_x_13168 = pblockIdx_x_13168;
threadIdx_y_13174 = threadIdx_y();
pthreadIdx_y_13174 = threadIdx_y_13174;
l13172: ;
threadIdx_y_13174 = pthreadIdx_y_13174;
blockDim_y_13180 = blockDim_y();
pblockDim_y_13180 = blockDim_y_13180;
l13178: ;
blockDim_y_13180 = pblockDim_y_13180;
blockIdx_y_13186 = blockIdx_y();
pblockIdx_y_13186 = blockIdx_y_13186;
l13184: ;
blockIdx_y_13186 = pblockIdx_y_13186;
int _13188;
_13188 = blockDim_y_13180 * blockIdx_y_13186;
int _13191;
_13191 = blockDim_x_13162 * blockIdx_x_13168;
int _13189;
_13189 = threadIdx_y_13174 + _13188;
int _13192;
_13192 = threadIdx_x_13156 + _13191;
int _13190;
_13190 = 2048 * _13189;
int _13193;
_13193 = _13190 + _13192;
float* _13194;
_13194 = _11976_13141 + _13193;
float _13195;
_13195 = *_13194;
_13200 = threadIdx_x();
p_13200 = _13200;
l13198: ;
_13200 = p_13200;
float* _13201;
_13201 = shared_13150 + _13200;
float _13203;
_13203 = _13195;
*_13201 = _13203;
threadIdx_x_13207 = threadIdx_x();
pthreadIdx_x_13207 = threadIdx_x_13207;
l13205: ;
threadIdx_x_13207 = pthreadIdx_x_13207;
blockDim_x_13210 = blockDim_x();
pblockDim_x_13210 = blockDim_x_13210;
l13208: ;
blockDim_x_13210 = pblockDim_x_13210;
blockIdx_x_13213 = blockIdx_x();
pblockIdx_x_13213 = blockIdx_x_13213;
l13211: ;
blockIdx_x_13213 = pblockIdx_x_13213;
threadIdx_y_13216 = threadIdx_y();
pthreadIdx_y_13216 = threadIdx_y_13216;
l13214: ;
threadIdx_y_13216 = pthreadIdx_y_13216;
blockDim_y_13219 = blockDim_y();
pblockDim_y_13219 = blockDim_y_13219;
l13217: ;
blockDim_y_13219 = pblockDim_y_13219;
blockIdx_y_13222 = blockIdx_y();
pblockIdx_y_13222 = blockIdx_y_13222;
l13220: ;
blockIdx_y_13222 = pblockIdx_y_13222;
_13225 = threadIdx_x();
p_13225 = _13225;
l13223: ;
_13225 = p_13225;
int _13229;
_13229 = blockDim_y_13219 * blockIdx_y_13222;
float* _13226;
_13226 = shared_13150 + _13225;
int _13230;
_13230 = threadIdx_y_13216 + _13229;
int _13232;
_13232 = blockDim_x_13210 * blockIdx_x_13213;
float _13227;
_13227 = *_13226;
int _13231;
_13231 = 2048 * _13230;
int _13233;
_13233 = threadIdx_x_13207 + _13232;
float _13236;
_13236 = _13227;
int _13234;
_13234 = _13231 + _13233;
float* _13235;
_13235 = _11977_13142 + _13234;
*_13235 = _13236;
return ;
}
} |
6,422 | #include "mode.hh"
#include <cstdlib>
#include <cstring>
#include "../cpu/kernels.hh"
namespace
{
ProgramMode compute_mode()
{
auto mode = getenv("RT_MODE");
if (mode == nullptr)
return ProgramMode::MONOTHREAD;
else if (!strcmp(mode, "CPU"))
return ProgramMode::MONOTHREAD;
else if (!strcmp(mode, "MCPU"))
{
//cpu::kernels_init();
return ProgramMode::MULTITHREAD;
}
else if (!strcmp(mode, "GPU"))
return ProgramMode::GPU;
else
return ProgramMode::MONOTHREAD;
}
}
ProgramMode program_mode()
{
static ProgramMode res = ProgramMode::UNDEFINED;
if (res == ProgramMode::UNDEFINED)
res = compute_mode();
return res;
}
|
6,423 | #include "includes.h"
using namespace std;
__global__ void addition(int *a, int *b, int *c)
{
*c = *a + *b;
} |
6,424 | #include <chrono>
#include <iostream>
//Kernel definition
template<typename T>
__global__
void copyKernel (T* out,
T* in,
const unsigned int N)
{
const unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
for (unsigned int i= id; i < N; i = i + blockDim.x * gridDim.x)
{
const unsigned el_id = i;
((T*) out)[el_id] = ((T*) in)[el_id];
// ((T*) out)[(1<<29) + 100] = ((T*) in)[0];
}
}
template<typename T>
__global__
void initKernel (T* out)
{
((T*) out)[threadIdx.x] = threadIdx.x;
}
int main () {
using namespace std::chrono;
unsigned int N = 1<<29;
void* out;
void* in;
auto err1 = cudaMallocHost(&out, N*4);
auto err2 = cudaMalloc(&in, N*4);
initKernel<<<1, N>>> (static_cast<int*> (in));
cudaDeviceSynchronize();
cudaMemcpy(in, out, N*4, cudaMemcpyDeviceToHost);
for(int i = 0; i < N; i++) {
std::cout<< i << "," << static_cast<int*>(out)[i] << std::endl;
}
return 0;
}
|
6,425 | //#include <iostream>
//#include <assert.h>
//
//#include "Device.h"
//#include "RayTracing.h"
//#include "Sphere.h"
//#include "cudaTools.h"
//
//#include <limits>
//
//using std::cout;
//using std::endl;
//
///* ========== DECLARATION ========== */
//
//extern __global__ void rayTracing(uchar4* ptrDevPixels, uint w, uint h, float t, uint nbSphere);
//
///* ---------- PUBLIC ---------- */
//
//RayTracing::RayTracing(const Grid &grid, uint width, uint height, float dt, uint nbSphere) :
// Animable_I<uchar4>(grid, width, height, "RayTracing Roulin")
//{
// // time
// this->t = 0;
// this->dt = dt;
//
// // Inputs
// this->nbSphere = nbSphere;
// this->spheres = new Sphere[nbSphere];
//
// // Init spheres
// float margin = 200.f;
// for(int i = 0; i < this->nbSphere; i++)
// {
// float3 center;
// center.x = randomFloat(margin, width - margin);
// center.y = randomFloat(margin, height - margin);
// center.z = randomFloat(10.f, 2.f * width);
//
// float radius = randomFloat(20.f, w / 10.f);
// float hue = randomFloat(0.f, 1.f);
//
// this->spheres[i] = Sphere(center, radius, hue);
// }
//}
//
//RayTracing::~RayTracing(void)
//{
// delete[] spheres;
//}
//
///* ~~~~~~~~~~ METHODS ~~~~~~~~~~ */
///**
// * Override
// * Call periodicly by the API
// *
// * Note : domaineMath pas use car pas zoomable
// */
//void RayTracing::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
//{
// Device::lastCudaError("raytracing rgba uchar4 (before)"); // facultatif, for debug only, remove for release
//
// // start kernel
// rayTracing<<<dg, db>>>(ptrDevPixels, w, h, t, nbSphere);
//
// Device::lastCudaError("raytracing rgba uchar4 (after)"); // facultatif, for debug only, remove for release
//}
//
///* ~~~~~~~~~~ OVERRIDES ~~~~~~~~~~ */
//
//void RayTracing::animationStep()
//{
// this->t += dt;
//}
//
//
//float RayTracing::randomFloat(float min, float max)
//{
// float random = ((float) rand()) / (float) RAND_MAX;
// float diff = max - min;
// float r = random * diff;
// return min + r;
//}
///**
// * Override (code naturel omp)
// */
|
6,426 | #include <iostream>
#include <string>
#include <sstream>
#include <fstream>
#include <algorithm>
#include <chrono>
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <cuda.h>
#include <unistd.h>
//#define _DEBUG_
//#define _TIME_MEASURE_
#ifdef _DEBUG_
#include <string>
#include <sstream>
int __print_step = 0;
void __pt_log(const char *h_, const char *f_, ...){
std::stringstream ss;
ss << h_ << f_ << '\n';
std::string format = ss.str();
va_list va;
va_start(va, f_);
vprintf(format.c_str(), va);
va_end(va);
__print_step++;
}
#define VA_ARGS(...) , ##__VA_ARGS__
#define LOG(f_, ...) __pt_log(\
"[LOG] Step %3d: ", (f_), \
__print_step VA_ARGS(__VA_ARGS__))
#else
#define LOG(f_, ...)
#endif
#define INF 1000000000
#define CEIL(a, b) (( (a) - 1 ) / (b) + 1 )
int **Dist;
int *data;
int block_size;
int vert, edge;
int vert2;
inline void init(){
vert2 = vert*vert;
Dist = new int*[vert];
data = new int[vert2];
std::fill(data, data + vert2, INF);
for(int i=0;i<vert;++i){
Dist[i] = data + i*vert;
Dist[i][i] = 0;
}
if(vert < block_size){
block_size = vert;
}
}
inline void finalize(){
delete[] Dist;
delete[] data;
}
void dump_from_file_and_init(const char *file){
std::ifstream fin(file);
std::stringstream ss;
ss << fin.rdbuf();
ss >> vert >> edge;
LOG("vert: %d, edge: %d", vert, edge);
init();
int i, j, w;
while(--edge >=0){
ss >> i >> j >> w;
Dist[i][j] = w;
}
fin.close();
}
void dump_to_file(const char *file){
std::ofstream fout(file);
fout.write((char*)data, sizeof(int)*vert2);
fout.close();
}
__global__ void init_gpu(int reps){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= reps) return;
}
__global__ void phase_one(int32_t* const dist, int block_size, int round, int width, int vert){
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int c = block_size * round + ty;
const int r = block_size * round + tx;
const int cell = c*width+r;
if(c >= vert || r >= vert){ //out of bounds, filled in with INF for each element
dist[cell] = INF;
}
__syncthreads();
int low = block_size * round;
int up = low + block_size;
int n;
for( ; low<up ; ++low){
// min(dist[c][r], dist[c][low] + dist[low][r])
n = dist[ c*width+low ] + dist[ low*width+r ];
if(n < dist[cell]){
dist[cell] = n;
}
__syncthreads();
}
}
__global__ void phase_two(int32_t* const dist, int block_size, int round, int width, int vert){
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int c;
int r;
if(bx >= round)++bx; //shift
if(by == 0){ //horizontal
c = block_size * round + ty;
r = block_size * bx + tx;
}else{ //vertical
c = block_size * bx + ty;
r = block_size * round + tx;
}
int cell = c * width + r;
if(c >= vert || r >= vert){ //out of bounds, filled in with INF for each element
dist[cell] = INF;
}
__syncthreads();
int low = round*block_size;
int up = low + block_size;
int n;
for( ; low<up ; ++low){
// min(dist[c][r], dist[c][low] + dist[low][r])
n = dist[ c*width+low ] + dist[ low*width+r ];
if(n < dist[cell]){
dist[cell] = n;
}
__syncthreads();
}
}
__global__ void phase_three(int32_t* const dist, int block_size, int round, int width, int vert){
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
if(bx >= round)++bx; //shift x
if(by >= round)++by; //shift y
const int c = block_size * by + ty;
const int r = block_size * bx + tx;
const int cell = c*width + r;
if(c >= vert || r >= vert){ //out of bounds, filled in with INF for each element
dist[cell] = INF;
}
__syncthreads();
int low = round * block_size;
int up = low + block_size;
int n;
for( ; low<up ; ++low){
// min(dist[c][r], dist[c][low] + dist[low][r])
n = dist[ c*width+low ] + dist[ low*width+r ];
if(n < dist[cell]){
dist[cell] = n;
}
__syncthreads();
}
}
extern __shared__ int S[];
void block_FW(){
#ifdef _TIME_MEASURE_
auto start = std::chrono::high_resolution_clock::now();
#endif
int Round = CEIL(vert, block_size);
int padded_size = Round * block_size;
size_t vert_w_bytes = vert * sizeof(int);
size_t padded_w_bytes = padded_size * sizeof(int);
int32_t *device_ptr;
//size_t pitch;
dim3 p2b(Round-1, 2, 1); //phase 2 block
dim3 p3b(Round-1, Round-1, 1); //phase 3 block
dim3 dimt(block_size, block_size, 1); //thread
//cudaMallocPitch(&device_ptr, &pitch, vert_byte, vert_byte, vert);
cudaMalloc(&device_ptr, padded_w_bytes * padded_size);
//size_t pitch_int = pitch / sizeof(int);
//LOG("pitch => %zu bytes (%zu words)", pitch, pitch_int);
LOG("the number of blocks: %d", Round);
//dst_ptr, dst_pitch, src, src_pitch, w, h, kind
cudaMemcpy2D(device_ptr, padded_w_bytes, data, vert_w_bytes,
vert_w_bytes, vert, cudaMemcpyHostToDevice);
for(int r=0; r < Round; ++r){
LOG("Round %d/%d", r+1, Round);
phase_one<<< 1 , dimt >>>(device_ptr, block_size, r, padded_size, vert);
phase_two<<< p2b , dimt >>>(device_ptr, block_size, r, padded_size, vert);
phase_three<<< p3b , dimt >>>(device_ptr, block_size, r, padded_size, vert);
}
cudaMemcpy2D(data, vert_w_bytes, device_ptr, padded_w_bytes,
vert_w_bytes, vert, cudaMemcpyDeviceToHost);
cudaFree(device_ptr);
#ifdef _TIME_MEASURE_
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff = end - start;
double elapsed_time = diff.count() * 1000;
printf("Total time: %f ms (%f GFLOPS)\n", elapsed_time, 2*vert*vert*vert / (elapsed_time * 1e6));
#endif
}
int main(int argc, char **argv){
dump_from_file_and_init(argv[1]);
block_size = std::atoi(argv[3]);
block_FW();
dump_to_file(argv[2]);
finalize();
return 0;
}
|
6,427 | #include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <cfloat>
#include <chrono>
#include <fstream>
#include <iostream>
#include <random>
#include <sstream>
#include <stdexcept>
#include <vector>
#include <chrono>
#include <time.h>
double gpu_time_used;
#define I(row, col, ncols) (row * ncols + col)
#define CUDA_CALL(x) {if((x) != cudaSuccess){ \
printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \
printf(" %s\n", cudaGetErrorString(cudaGetLastError())); \
exit(EXIT_FAILURE);}}
__global__ void get_dst(float *dst, float *x, float *y,
float *mu_x, float *mu_y){
int i = blockIdx.x;
int j = threadIdx.x;
dst[I(i, j, blockDim.x)] = (x[i] - mu_x[j]) * (x[i] - mu_x[j]);
dst[I(i, j, blockDim.x)] += (y[i] - mu_y[j]) * (y[i] - mu_y[j]);
}
__global__ void regroup(int *group, float *dst, int k){
int i = blockIdx.x;
int j;
float min_dst;
min_dst = dst[I(i, 0, k)];
group[i] = 1;
for(j = 1; j < k; ++j){
if(dst[I(i, j, k)] < min_dst){
min_dst = dst[I(i, j, k)];
group[i] = j + 1;
}
}
}
__global__ void clear(float *sum_x, float *sum_y, int *nx, int *ny){
int j = threadIdx.x;
sum_x[j] = 0;
sum_y[j] = 0;
nx[j] = 0;
ny[j] = 0;
}
__global__ void recenter_step1(float *sum_x, float *sum_y, int *nx, int *ny,
float *x, float *y, int *group, int n){
int i;
int j = threadIdx.x;
for(i = 0; i < n; ++i){
if(group[i] == (j + 1)){
sum_x[j] += x[i];
sum_y[j] += y[i];
nx[j]++;
ny[j]++;
}
}
}
__global__ void recenter_step2(float *mu_x, float *mu_y, float *sum_x,
float *sum_y, int *nx, int *ny){
int j = threadIdx.x;
mu_x[j] = sum_x[j]/nx[j];
mu_y[j] = sum_y[j]/ny[j];
}
void kmeans(int nreps, int n, int k,
float *x_d, float *y_d, float *mu_x_d, float *mu_y_d,
int *group_d, int *nx_d, int *ny_d,
float *sum_x_d, float *sum_y_d, float *dst_d){
int i;
for(i = 0; i < nreps; ++i){
get_dst<<<n,k>>>(dst_d, x_d, y_d, mu_x_d, mu_y_d);
regroup<<<n,1>>>(group_d, dst_d, k);
clear<<<1,k>>>(sum_x_d, sum_y_d, nx_d, ny_d);
recenter_step1<<<1,k>>>(sum_x_d, sum_y_d, nx_d, ny_d, x_d, y_d, group_d, n);
recenter_step2<<<1,k>>>(mu_x_d, mu_y_d, sum_x_d, sum_y_d, nx_d, ny_d);
}
}
void read_data(float **x, float **y, float **mu_x, float **mu_y, int *n, int *k,char* arg);
void print_results(int *group, float *mu_x, float *mu_y, int n, int k,char* argv);
int main(int argc,char* argv[]){
/* cpu variables */
int n; /* number of points */
int k; /* number of clusters */
// int *group;
float *x = NULL, *y = NULL, *mu_x = NULL, *mu_y = NULL;
/* gpu variables */
int *group_d, *nx_d, *ny_d;
float *x_d, *y_d, *mu_x_d, *mu_y_d, *sum_x_d, *sum_y_d, *dst_d;
/* read data from files on cpu */
read_data(&x, &y, &mu_x, &mu_y, &n, &k,argv[2]);
/* allocate cpu memory */
// group = (int*) malloc(n*sizeof(int));
/* allocate gpu memory */
cudaMallocManaged(&group_d,n*sizeof(int));
cudaMallocManaged(&nx_d, k*sizeof(int));
cudaMallocManaged(&ny_d, k*sizeof(int));
cudaMallocManaged(&x_d, n*sizeof(float));
cudaMallocManaged(&y_d, n*sizeof(float));
cudaMallocManaged(&mu_x_d, k*sizeof(float));
cudaMallocManaged(&mu_y_d, k*sizeof(float));
cudaMallocManaged(&sum_x_d, k*sizeof(float));
cudaMallocManaged(&sum_y_d, k*sizeof(float));
cudaMallocManaged(&dst_d, n*k*sizeof(float));
memcpy(x_d, x, n*sizeof(float));
memcpy(y_d, x, n*sizeof(float));
memcpy(mu_x_d, mu_x, k*sizeof(float));
memcpy(mu_y_d, mu_y, k*sizeof(float));
/* write data to gpu */
// CUDA_CALL(cudaMemcpy(x_d, x, n*sizeof(float), cudaMemcpyHostToDevice));
// CUDA_CALL(cudaMemcpy(y_d, y, n*sizeof(float), cudaMemcpyHostToDevice));
// CUDA_CALL(cudaMemcpy(mu_x_d, mu_x, k*sizeof(float), cudaMemcpyHostToDevice));
// CUDA_CALL(cudaMemcpy(mu_y_d, mu_y, k*sizeof(float), cudaMemcpyHostToDevice));
/* perform kmeans */
const auto start = std::chrono::high_resolution_clock::now();
kmeans(100, n, k, x_d, y_d, mu_x_d, mu_y_d, group_d, nx_d, ny_d, sum_x_d, sum_y_d, dst_d);
const auto end = std::chrono::high_resolution_clock::now();
const auto duration =
std::chrono::duration_cast<std::chrono::duration<float>>(end - start);
std::cerr << "CUDA Took: " << duration.count() << "s" << " for "<<argv[3]<<" points." << std::endl;
gpu_time_used = duration.count();
/* read back data from gpu */
// CUDA_CALL(cudaMemcpy(group, group_d, n*sizeof(int), cudaMemcpyDeviceToHost));
// CUDA_CALL(cudaMemcpy(mu_x, mu_x_d, k*sizeof(float), cudaMemcpyDeviceToHost));
// CUDA_CALL(cudaMemcpy(mu_y, mu_y_d, k*sizeof(float), cudaMemcpyDeviceToHost));
/* print results and clean up */
print_results(group_d, mu_x_d, mu_y_d, n, k,argv[3]);
free(x);
free(y);
free(mu_x);
free(mu_y);
// free(group);
CUDA_CALL(cudaFree(x_d));
CUDA_CALL(cudaFree(y_d));
CUDA_CALL(cudaFree(mu_x_d));
CUDA_CALL(cudaFree(mu_y_d));
CUDA_CALL(cudaFree(group_d));
CUDA_CALL(cudaFree(nx_d));
CUDA_CALL(cudaFree(ny_d));
CUDA_CALL(cudaFree(sum_x_d));
CUDA_CALL(cudaFree(sum_y_d));
CUDA_CALL(cudaFree(dst_d));
return 0;
}
void read_data(float **x, float **y, float **mu_x, float **mu_y, int *n, int *k,char* arg){
FILE *fp;
char buf[64];
*n = 0;
fp = fopen(arg, "r");
while(fgets(buf, 64, fp) != NULL){
*n += 1;
*x = (float*) realloc(*x, (*n)*sizeof(float));
*y = (float*) realloc(*y, (*n)*sizeof(float));
std::istringstream line_stream(buf);
float x1,y1;
line_stream >> x1 >> y1;
(*x)[*n - 1] = x1;
(*y)[*n - 1] = y1;
}
fclose(fp);
*k = 0;
fp = fopen("../../data/kmeans/initCoord.txt", "r");
while(fgets(buf, 64, fp) != NULL){
*k += 1;
*mu_x = (float*) realloc(*mu_x, (*k)*sizeof(float));
*mu_y = (float*) realloc(*mu_y, (*k)*sizeof(float));
std::istringstream line_stream(buf);
float x1,y1;
line_stream >> x1 >> y1;
(*mu_x)[*k - 1] = x1;
(*mu_y)[*k - 1] = x1;
}
fclose(fp);
}
void print_results(int *group, float *mu_x, float *mu_y, int n, int k,char* arg){
FILE *fp;
int i;
std::string str(arg),str1,str2;
str = "result/cuda/" + str;
str1 = str + "_group_members.txt";
fp = fopen(str1.c_str(), "w");
for(i = 0; i < n; ++i){
fprintf(fp, "%d\n", group[i]);
}
fclose(fp);
str2 = str + "_centroids.txt";
fp = fopen(str2.c_str(), "w");
for(i = 0; i < k; ++i){
fprintf(fp, "%0.6f %0.6f\n", mu_x[i], mu_y[i]);
}
fclose(fp);
fp = fopen("CUDAtimes.txt", "a");
fprintf(fp, "%0.6f\n", gpu_time_used);
fclose(fp);
}
|
6,428 | #include <stdio.h>
#include <curand_kernel.h>
extern "C"
__device__ int get_max(int x,int y){
if(x>y)
return x;
return y;
}
extern "C"
__device__ int get_min(int x,int y){
if(x<y)
return x;
return y;
}
extern "C"
__global__ void multiply_them(float *dest, float *a, float *b,int rows,int cols , int channels)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if(row<rows && col <cols){
for (int i =0 ; i < channels;i++){
dest[channels* (row*cols + col) + i] = 2.0* a[channels* (row*cols + col) +i ]* b[channels* (row*cols + col) +i] ;
}
}
}
extern "C"
__global__ void multiply_them_2d(float *dest, float *a, float *b,int rows,int cols)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < rows && col < cols)
dest[row*cols + col] = 15.0;
}
extern "C"
__device__ void testing(){
printf("OK");
}
extern "C"
__device__ float compute_distance(float *a,
float *aa,
float *b,
float *bb,
int rows,
int cols,
int channels,
int patch_size,
int ax,
int ay,
int bx,
int by){
int num_points = 0;
float pixel_sum = 0;
float temp_distance = 0;
int curr_pix_ax = 0;
int curr_pix_ay = 0;
int curr_pix_bx = 0;
int curr_pix_by = 0;
for(int y = -patch_size/2 ; y <= patch_size/2 ; y++ ){
for(int x = -patch_size/2 ; x <= patch_size/2 ; x++){
curr_pix_ax = ax + x;
curr_pix_ay = ay + y;
curr_pix_bx = bx + x;
curr_pix_by = by + y;
if ( curr_pix_ax > 0 && curr_pix_ax < cols && curr_pix_ay > 0 && curr_pix_ay < rows
&&
curr_pix_bx > 0 && curr_pix_bx < cols && curr_pix_by > 0 && curr_pix_by < rows ){
for(int ch = 0 ; ch < channels ; ch++){
temp_distance = a[channels*(curr_pix_ay*cols + curr_pix_ax ) +ch]
- bb[channels*(curr_pix_by*cols + curr_pix_bx ) +ch] ;
pixel_sum += temp_distance * temp_distance;
temp_distance = aa[channels*(curr_pix_ay*cols + curr_pix_ax ) +ch]
- b[channels*(curr_pix_by*cols + curr_pix_bx ) +ch] ;
pixel_sum += temp_distance * temp_distance;
}
num_points ++;
}
}
}
return pixel_sum / num_points;
}
extern "C"
__device__ void compare_and_update(float *a,
float *aa,
float *b,
float *bb,
int rows,
int cols ,
int channels,
int patch_size,
int *nnf,
float *nnd,
int x,
int y,
int bx_new,
int by_new,
int *best_x,
int *best_y,
float *best_d)
{
float dist_new = compute_distance(a,aa,b,bb,rows,cols,channels,patch_size,x,y,bx_new,by_new);
if(dist_new < *best_d){
*best_d = dist_new;
*best_y = by_new;
*best_x = bx_new;
}
}
extern "C"
__device__ float get_rand(){
int tId = threadIdx.x + (blockIdx.x * blockDim.x);
curandState state;
curand_init((unsigned long long)clock() + tId, 0, 0, &state);
float rand1 = (float)curand_uniform_double(&state);
return rand1;
}
extern "C"
__global__ void patch_match(float *a,
float *aa,
float *b,
float *bb,
int *nnf,
float *nnd,
int rows,
int cols ,
int channels,
int patch_size,
int iters,
int jump_size)
{
printf("%f",get_rand());
int xmin, xmax, ymin, ymax;
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
int init_x = nnf[2*(row*cols + col) + 0 ];
int init_y = nnf[2*(row*cols + col) + 1 ];
nnd[row*cols + col] = compute_distance(a,aa,b,bb,rows,cols,channels,patch_size,col,row,init_x,init_y) ;
for(int i = 0 ; i < iters; i++){
for(int jump = jump_size ; jump >0 ; jump /=2){
int best_x = nnf[2*(row*cols + col) + 0 ];
int best_y = nnf[2*(row*cols + col) + 1 ];
float best_d = nnd[row*cols + col];
//test up
if (row - jump >=0){
int test_x = nnf[2*(row*cols + col) + 0 ];
int test_y = nnf[2*(row*cols + col) + 1 ] + jump;
if(test_y < rows)
{
compare_and_update(a,
aa,
b,
bb,
rows,
cols ,
channels,
patch_size,
nnf,
nnd,
col,
row,
test_x,
test_y,
&best_x,
&best_y,
&best_d);
}
}
if (row + jump < rows){
int test_x = nnf[2*(row*cols + col) + 0 ];
int test_y = nnf[2*(row*cols + col) + 1 ] - jump;
if(test_y >=0)
{
compare_and_update(a,
aa,
b,
bb,
rows,
cols ,
channels,
patch_size,
nnf,
nnd,
col,
row,
test_x,
test_y,
&best_x,
&best_y,
&best_d);
}
}
//test left
if (col - jump >=0){
int test_x = nnf[2*(row*cols + col) + 0 ] +jump;
int test_y = nnf[2*(row*cols + col) + 1 ];
if(test_x < cols)
{
compare_and_update(a,
aa,
b,
bb,
rows,
cols ,
channels,
patch_size,
nnf,
nnd,
col,
row,
test_x,
test_y,
&best_x,
&best_y,
&best_d);
}
}
//test right
if (col + jump < cols){
int test_x = nnf[2*(row*cols + col) + 0 ] -jump;
int test_y = nnf[2*(row*cols + col) + 1 ];
if(test_x >=0)
{
compare_and_update(a,
aa,
b,
bb,
rows,
cols ,
channels,
patch_size,
nnf,
nnd,
col,
row,
test_x,
test_y,
&best_x,
&best_y,
&best_d);
}
}
int rs_start = 500;
if (rs_start > get_max(cols, rows)) {
rs_start = get_max(cols, rows);
}
for (int mag = rs_start; mag >= 1; mag /= 2) {
xmin = get_max(best_x - mag, 0), xmax = get_min(best_x + mag + 1, cols);
ymin = get_max(best_y - mag, 0), ymax = get_min(best_y + mag + 1, rows);
int test_x = xmin + (int)(get_rand()*(xmax - xmin)) % (xmax - xmin);
int test_y = ymin + (int)(get_rand()*(ymax - ymin)) % (ymax - ymin);
compare_and_update(a,
aa,
b,
bb,
rows,
cols ,
channels,
patch_size,
nnf,
nnd,
col,
row,
test_x,
test_y,
&best_x,
&best_y,
&best_d);
}
nnf[2*(row*cols + col) + 0] = best_x;
nnf[2*(row*cols + col) + 1] = best_y;
nnd[1*(row*cols + col) ] = best_d;
__syncthreads();
}
}
}
|
6,429 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <algorithm>
#include <curand.h>
#define MAXTHREADS 512u
#define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); }
using namespace std;
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){
if (code != cudaSuccess){
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void printArray(unsigned int* arr, int n){
for(int i=0; i<n; i++){
printf("%u ", arr[i]);
}
printf("\n");
}
void printFromDevice(unsigned int* d_array, int length){
unsigned int *h_temp=new unsigned int[length];
cudaMemcpy(h_temp, d_array, length*sizeof(unsigned int), cudaMemcpyDeviceToHost);
printArray(h_temp, length);
delete[] h_temp;
}
unsigned int nextPowerOf2(unsigned int n){
unsigned k=0;
if(n&&!(n&(n-1))){
return n;
}
while(n!=0){
n>>=1;
k++;
}
return 1<<k;
}
__global__
void addIncrements(unsigned int* const d_in, unsigned int* const d_sum, const size_t length, const int grid){
int gid=blockIdx.x*blockDim.x+threadIdx.x;
if(gid<length){
d_in[gid]+=d_sum[blockIdx.x*grid/gridDim.x];
}
}
__global__
void exclusiveSum(unsigned int *d_out, unsigned int *d_in, unsigned int *d_sum, int n){
extern __shared__ unsigned int temp[];
int tid=threadIdx.x;
int gid=blockDim.x*blockIdx.x+tid;
temp[2*tid]=(2*gid<n)? d_in[2*gid]: 0u;
temp[2*tid+1]=(2*gid+1<n)? d_in[2*gid+1]: 0u;
unsigned int offset=1u;
unsigned int p=2u*blockDim.x;
//downsweep
for(unsigned d=p>>1; d>0; d>>=1){
__syncthreads();
if(tid<d){
int ai=offset*(2*tid+1)-1;
int bi=offset*(2*tid+2)-1;
temp[bi]+=temp[ai];
}
offset<<=1;
}
//clear the last element
if(tid==0){
d_sum[blockIdx.x]=temp[p-1];
temp[p-1]=0;
}
//upsweep
for(unsigned d=1; d<p; d<<=1){
offset>>=1;
__syncthreads();
if(tid<d){
int ai=offset*(2*tid+1)-1;
int bi=offset*(2*tid+2)-1;
unsigned int t=temp[ai];
temp[ai]=temp[bi];
temp[bi]+=t;
}
}
__syncthreads();
//write results to device memory
if(2*gid<n){
d_out[2*gid]=temp[2*tid];
}
if(2*gid+1<n){
d_out[2*gid+1]=temp[2*tid+1];
}
}
void exclusiveScan(unsigned int* const d_in, const size_t length){
unsigned int *d_sum;
int n=(length+1)/2;
int block=min(MAXTHREADS, nextPowerOf2(n));
int grid=(n+block-1)/block;
checkCudaErrors(cudaMalloc((void**)&d_sum, grid*sizeof(unsigned int)));
exclusiveSum<<<grid, block, 2*block*sizeof(unsigned int)>>>(d_in, d_in, d_sum, length);
if(grid>1){
exclusiveScan(d_sum, grid);
int b=min(MAXTHREADS, nextPowerOf2(length));
int g=(length+b-1)/b;
addIncrements<<<g, b>>>(d_in, d_sum, length, grid);
}
cudaFree(d_sum);
}
__global__
void getPredicate(unsigned int *d_in, unsigned int *d_scatter, size_t length, int radix, int shift){
int gid=blockDim.x*blockIdx.x+threadIdx.x;
if(gid<length){
int digit=(d_in[gid]>>shift)&(radix-1);
d_scatter[digit*length+gid]=1u;
}
}
__global__
void radixScatter(unsigned int *d_in, unsigned int *d_out, unsigned int *d_scatter, size_t length, int radix, int shift){
int gid=blockDim.x*blockIdx.x+threadIdx.x;
if(gid<length){
int digit=(d_in[gid]>>shift)&(radix-1);
int pos=d_scatter[digit*length+gid];
d_out[pos]=d_in[gid];
}
}
void radixSort(unsigned int *d_in, size_t length, int radix){
unsigned int *d_out, *d_scatter;
checkCudaErrors(cudaMalloc((void**)&d_out, length*sizeof(unsigned int)));
checkCudaErrors(cudaMalloc((void**)&d_scatter, radix*length*sizeof(unsigned int)));
int block=min(MAXTHREADS, nextPowerOf2(length));
int grid=(length+block-1)/block;
int jump=(int)log2(radix);
for(int i=0; i<8*sizeof(unsigned int); i+=jump){
checkCudaErrors(cudaMemset(d_scatter, 0u, radix*length*sizeof(unsigned int)));
getPredicate<<<grid, block>>>(d_in, d_scatter, length, radix, i);
exclusiveScan(d_scatter, radix*length);
radixScatter<<<grid, block>>>(d_in, d_out, d_scatter, length, radix, i);
checkCudaErrors(cudaMemcpy(d_in, d_out, length*sizeof(unsigned int), cudaMemcpyDeviceToDevice));
}
cudaFree(d_out);
cudaFree(d_scatter);
}
__global__
void histogram(unsigned int* d_in, unsigned int* d_hist, int n, int numBins){
int epb=n/gridDim.x;
int ept=epb/blockDim.x;
int gid=blockIdx.x*epb+threadIdx.x*ept;
unsigned int val, old=d_in[gid], count=0;
for(int i=0; i<=ept; i++){
if(i<ept){
val=d_in[gid+i];
}if(val!=old || i==ept){
atomicAdd(&(d_hist[old]), i-count);
old=val;
count=i;
}
}
}
__global__
void modMap(unsigned int* d_in, int n){
int gid=blockDim.x*blockIdx.x+threadIdx.x;
d_in[gid]%=n;
}
void randset(unsigned int* d_in, int m, size_t n){
curandGenerator_t generator;
curandCreateGenerator(&generator, CURAND_RNG_PSEUDO_DEFAULT);
curandGenerate(generator, d_in, n);
modMap<<<(n+MAXTHREADS-1)/MAXTHREADS, MAXTHREADS>>>(d_in, m);
}
int main(){
//Input parameters
int n=1024*10000;
int radix=2;
int numBins=1024;
//Generate a random device array of size n
unsigned int *d_input;
checkCudaErrors(cudaMalloc((void**)&d_input, n*sizeof(unsigned int)));
randset(d_input, numBins, n);
//Allocate a histogram on device
unsigned int *d_hist;
cudaMalloc((void**)&d_hist, numBins*sizeof(unsigned int));
cudaMemset(d_hist, 0, numBins*sizeof(unsigned int));
//Sort the array
radixSort(d_input, n, radix);
//Set timer
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
int grid=2;
histogram<<<grid, MAXTHREADS>>>(d_input, d_hist, n, numBins);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float time;
cudaEventElapsedTime(&time, start, stop);
printf("Time for the histogram: %f ms\n", time);
printFromDevice(d_hist, numBins);
cudaFree(d_input);
cudaFree(d_hist);
return 0;
} |
6,430 | #include "pgm.cuh"
#include <cstdio>
#include <cstdlib>
#include <cctype>
/*
Source for some of the parsing code:
http://ugurkoltuk.wordpress.com/2010/03/04/an-extreme-simple-pgm-io-api/
*/
void skipFileComments(FILE *fp);
float* loadPGM(const char *filename, int *width, int *height) {
printf("Loading image %s\n", filename);
FILE *pgmFile;
char version[3];
pgmFile = fopen(filename, "rb");
if (pgmFile == NULL) {
printf("pgmparse error: can't open file!\n");
exit(-1);
}
fgets(version, sizeof(version), pgmFile);
if (strcmp(version, "P5")) {
fprintf(stderr, "Wrong filetype?\n");
exit(-1);
}
int rows, cols;
int maxGrey = 0;
skipFileComments(pgmFile);
fscanf(pgmFile, "%d", &cols);
skipFileComments(pgmFile);
fscanf(pgmFile, "%d", &rows);
skipFileComments(pgmFile);
fscanf(pgmFile, "%d", &maxGrey);
fgetc(pgmFile); // Skip a newline(?)
int bytesNeeded = sizeof(float)*rows*cols;
printf("Rows: %d and Cols: %d\n", rows, cols);
printf("Bytes needed: %d\n", bytesNeeded);
printf("Max greyscale color: %d\n", maxGrey);
float *imageData = (float*) malloc(bytesNeeded);
int i,j, lo, hi;
if (maxGrey > 255) {
for(i = 0; i < rows; ++i) {
for(j = 0; j < cols; ++j) {
hi = fgetc(pgmFile);
lo = fgetc(pgmFile);
imageData[i*rows+j] = (float)((hi<<8)+lo);
}
printf("\n");
}
} else {
for(i = 0; i < rows; ++i) {
for(j = 0; j < cols; ++j) {
lo = fgetc(pgmFile);
imageData[i*cols+j] = lo/255.0;
}
}
}
fclose(pgmFile);
*width = cols;
*height = rows;
return imageData;
}
void savePGM(const char *filename, float* imageData, int width, int height) {
printf("Saving image %s\n", filename);
int i,j;
FILE *file;
file = fopen(filename, "w");
if (file == NULL) {
printf("Error creating file\n");
exit(-1);
} else {
fprintf(file,"P5\n%d %d\n255\n", width, height);
for(i = 0; i < height; ++i) {
for(j = 0; j < width; ++j)
fputc(255*imageData[i*width + j],file);
}
fclose(file);
}
}
void skipFileComments(FILE *fp)
{
int ch;
char line[100];
while ((ch = fgetc(fp)) != EOF && isspace(ch))
;
if (ch == '#') {
fgets(line, sizeof(line), fp);
skipFileComments(fp);
} else
fseek(fp, -1, SEEK_CUR);
}
|
6,431 | #include "includes.h"
__global__ void scan(int *v, const int n)
{
int tIdx = threadIdx.x;
int step = 1;
while (step < n) {
int indiceDroite = tIdx;
int indiceGauche = indiceDroite + step;
if (indiceGauche < n) {
v[indiceDroite] = v[indiceDroite] + v[indiceGauche];
}
step = step * 2;
__syncthreads();
}
} |
6,432 | #include <cufft.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#include <cuda.h>
#define NUM 1024//4096//256//1024//256//844800
#define NUM2 39//1213//4000000//250000//1212
#define batch 375//206//1//825//16//3300//1
int main(int argc,char *argv[])
{
FILE *fp;
FILE *file;
unsigned char *f_h;
int h,i,t1,t2,f,k,c;
float *f_hf;
cufftComplex *f_hc;
cufftComplex *f_Inverse;
//double s0 = 3.28281192*pow(10.0,-26.0); //e^2/mc
//double v0 = 1420; //中心周波数
//double a,n; //アルファ,周波数帯域(MHz)-15 <= n <= 15
//double s;
float real[NUM*batch]; //ディスパージョンの影響を除去した後の、REAL成分の信号
//float DM[1]; //ディスパージョンメジャー
char infile_name[100], outfile_name[100];
printf("入力ファイル名を入力してください。\n");
scanf("%s", infile_name);
/*
printf("DMの値を入力してください。\n");
scanf("%f", DM);
*/
printf("出力ファイル名を入力してください。\n");
scanf("%s", outfile_name);
//ファイルオープン
fp = fopen(infile_name,"rb");
//ファイルが空だった場合の処理
if(fp == NULL){
printf("ファイルをオープンできませんでした。\n");
return 1;
}
//6.6msごとに配列に入れていく
for(h = 0;h <= NUM2;h++){
//ホスト・メモリの確保
f_h = (unsigned char *)malloc(sizeof(unsigned char)*NUM*batch);
//float型のメモリを確保
f_hf = (float *)malloc(sizeof(float)*NUM*batch);
//cufftComplex型のメモリを確保
f_hc = (cufftComplex *)malloc(sizeof(cufftComplex)*NUM*batch);
f_Inverse = (cufftComplex *)malloc(sizeof(cufftComplex)*NUM*batch);
//f_Inverser = (cufftReal *)malloc(sizeof(cufftReal)*NUM*batch);
t1 = 0;
t2 = 0;
t1 = NUM*batch*h;
t2 = NUM*batch*h + NUM*batch;
//バイナリデータを配列に格納
for(i = t1;i < t2;i++){
fseek(fp, i * sizeof(unsigned char), SEEK_SET);
fread(&f_h[i-t1],sizeof(unsigned char),1,fp);
f_hf[i-t1] = f_h[i-t1]; //unsigned char型かたfloat型への型変換
f_hc[i-t1].x = f_hf[i-t1]; //float型からcufftComplex型への変換
f_hc[i-t1].y = 0;
}
free(f_hf);
free(f_h);
//fclose(fp);
cufftComplex *f_d;
//デバイスメモリの確保
cudaMalloc((void **)&f_d, sizeof(cufftComplex)*NUM*batch);
//ホストからデバイスへの転送
cudaMemcpy(f_d, f_hc, sizeof(cufftComplex)*NUM*batch, cudaMemcpyHostToDevice);
cufftHandle plan;
//1次元FFTの準備
cufftPlan1d(&plan, NUM, CUFFT_C2C, batch);
//順方向への変換を実行
cufftExecC2C(plan, f_d, f_d, CUFFT_FORWARD);
//デバイスからホストへ転送
cudaMemcpy(f_hc, f_d, sizeof(cufftComplex)*NUM*batch, cudaMemcpyDeviceToHost);
cudaFree(f_d);
cufftDestroy(plan);
/*ディスパージョンの影響除去*/
/*
int e = 0;
s = s0/(v0*v0*v0);
a = s*DM[0];
n = -15+(t - 216)*0.125;
//データの格納及び帯域幅でカット
*/
for(c = 0; c < NUM*batch; c++){
f_Inverse[c].x = f_hc[c].x;
f_Inverse[c].y = f_hc[c].y;
//printf("%d\n", t-216+240*e);
}
free(f_hc);
cufftComplex *f_dI;
//cufftReal *f_drI;
cudaMalloc((void **)&f_dI, sizeof(cufftComplex)*NUM*batch);
//cudaMalloc((void **)&f_drI, sizeof(cufftReal)*NUM*batch);
cudaMemcpy(f_dI, f_Inverse, sizeof(cufftComplex)*NUM*batch, cudaMemcpyHostToDevice);
cufftHandle planI;
cufftPlan1d(&planI, NUM, CUFFT_C2C, batch);
cufftExecC2C(planI, f_dI, f_dI, CUFFT_INVERSE);
cudaMemcpy(f_Inverse, f_dI, sizeof(cufftComplex)*NUM*batch, cudaMemcpyDeviceToHost);
cudaFree(f_dI);
//free(f_Inverse);
cufftDestroy(planI);
for(f = 0;f < NUM*batch;f++){
real[f] = (f_Inverse[f].x)/NUM;
//data[f].y = f_I
//real[f] = data[f].x;
}
free(f_Inverse);
file = fopen(outfile_name,"ab");
if(file == NULL){
printf("ファイルをオープンできませんでした。\n");
return 1;
}
for(k = 0; k < NUM*batch; k++){
fwrite(&real[k],sizeof(float),1,file);
}
fclose(file);
}
fclose(fp);
return 0;
}
|
6,433 | #include "includes.h"
# define MAX(a, b) ((a) > (b) ? (a) : (b))
# define GAUSSIAN_KERNEL_SIZE 3
# define SOBEL_KERNEL_SIZE 5
# define TILE_WIDTH 32
# define SMEM_SIZE 128
__global__ void computeSum(float *d_filteredImage, float *d_imageSumGrid, unsigned int n)
{
__shared__ float smem[SMEM_SIZE];
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
float localSum = 0;
if (idx + 3 * blockDim.x < n)
{
float a1 = d_filteredImage[idx];
float a2 = d_filteredImage[idx + blockDim.x];
float a3 = d_filteredImage[idx + 2 * blockDim.x];
float a4 = d_filteredImage[idx + 3 * blockDim.x];
localSum = a1 + a2 + a3 + a4;
}
smem[tid] = localSum;
__syncthreads();
if (blockDim.x >= 1024 && tid < 512)
smem[tid] += smem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256)
smem[tid] += smem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128)
smem[tid] += smem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64)
smem[tid] += smem[tid + 64];
__syncthreads();
if (tid < 32)
{
volatile float *vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if (tid == 0) d_imageSumGrid[blockIdx.x] = smem[0];
} |
6,434 | #include "includes.h"
// create an image buffer. return host ptr, pass out device pointer through pointer to pointer
__global__ void resultant(unsigned char *a, unsigned char *b, unsigned char *c)
{
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float opposite_side = float(a[idx]);
float adjacent_side = float(b[idx]);
// Figure out the hypotenuse
c[idx] = (unsigned char) sqrtf((opposite_side + adjacent_side)*(opposite_side + adjacent_side ) - (2 * opposite_side * adjacent_side));
if ( c[idx] > 15 && c [idx -1] != 148 )
c[idx] = 148;
else
c[idx] = 0;
} |
6,435 | #include <string>
#include <cstring>
#include <cctype>
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <fstream>
#include <bitset>
#include <cuda_runtime.h>
#include <stdio.h>
#define DIM 128
#include "csv.hpp"
#include "timer.h"
using namespace std;
extern __shared__ int dsmem[];
int recursiveReduce(int *data, int const size)
{
if (size == 1) return data[0];
int const stride = size / 2;
for (int i = 0; i < stride; i++)
data[i] += data[i + stride];
return recursiveReduce(data, stride);
}
// unroll4 + complete unroll for loop + gmem
__global__ void reduceGmem(int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n) return;
// in-place reduction in global memory
if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceSmem(int *g_idata, int *g_odata, unsigned int n)
{
__shared__ int smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
// boundary check
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n) return;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// set to smem by each threads
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction in shared memory
if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
__global__ void sumArraysOnGPU(int *A, int B, int *C, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) C[i] = A[i] - B;
}
int main(int argc, char **argv)
{
int N = atoi(argv[2]);
unsigned int t, travdirtime;
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
cudaSetDevice(dev);
// initialization
// int size = 1 << 24; // total number of elements to reduce
// printf(" with array size %d ", size);
int size = N;
// execution configuration
int blocksize = DIM; // initial block size
dim3 block (blocksize, 1);
dim3 grid ((size + block.x - 1) / block.x, 1);
printf("grid %d block %d\n", grid.x, block.x);
// allocate host memory
size_t bytes = size * sizeof(int);
int *h_idata = (int *) malloc(bytes);
int *h_odata = (int *) malloc(grid.x * sizeof(int));
int *tmp = (int *) malloc(bytes);
int *h_stddev = (int *) malloc(bytes);
// initialize the array
/*
for (int i = 0; i < size; i++)
h_idata[i] = (int)( rand() & 0xFF );
*/
const string csv_file = std::string(argv[1]);
vector<vector<string>> data;
Csv objCsv(csv_file);
if (!objCsv.getCsv(data)) {
std::cout << "read ERROR" << std::endl;
return 1;
}
for (int row = 0; row < data.size(); row++) {
vector<string> rec = data[row];
h_idata[row] = atoi( rec[1].c_str());
}
memcpy (tmp, h_idata, bytes);
int gpu_sum = 0;
// allocate device memory
int *d_idata = NULL;
int *d_odata = NULL;
int *d_stddev = NULL;
cudaMalloc((void **) &d_idata, bytes);
cudaMalloc((void **) &d_odata, grid.x * sizeof(int));
cudaMalloc((void **) &d_stddev, bytes);
// reduce gmem
start_timer(&t);
cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice);
reduceGmem<<<grid.x, block>>>(d_idata, d_odata, size);
cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost);
travdirtime = stop_timer(&t);
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("reduceGmem: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x,
block.x);
print_timer(travdirtime);
// reduce smem
start_timer(&t);
cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice);
reduceSmem<<<grid.x, block>>>(d_idata, d_odata, size);
cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost);
travdirtime = stop_timer(&t);
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("reduceSmem: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x, block.x);
print_timer(travdirtime);
float avg = gpu_sum / N;
start_timer(&t);
cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice);
sumArraysOnGPU<<<grid.x, block>>>(d_idata, avg, d_stddev, N);
cudaMemcpy(h_stddev, d_stddev, bytes, cudaMemcpyDeviceToHost);
printf("reduceArray: %f <<<grid %d block %d>>>\n", avg, grid.x, block.x);
travdirtime = stop_timer(&t);
print_timer(travdirtime);
cout << "writing file..." << endl;
std::remove("tmp");
ofstream outputfile("tmp");
start_timer(&t);
for(int i = 0; i < N; i++)
outputfile << h_idata[i] << "," << h_stddev[i] << std::endl;
outputfile.close();
travdirtime = stop_timer(&t);
print_timer(travdirtime);
// free host memory
free(h_idata);
free(h_odata);
// free device memory
cudaFree(d_idata);
cudaFree(d_odata);
// reset device
cudaDeviceReset();
return EXIT_SUCCESS;
}
|
6,436 | #define NUM_ELEMENTS 512
// CUDA kernel to perform the reduction in parallel on the GPU
//! @param g_idata input data in global memory
// result is expected in index 0 of g_idata
//! @param n input number of elements to scan from input data
__global__ void reduction(float *g_data, int n)
{
int stride;
// Define shared memory
__shared__ float scratch[NUM_ELEMENTS];
// Load the shared memory
int gindex = blockIdx.x * blockDim.x + threadIdx.x;
int lindex = threadIdx.x;
scratch[ lindex ] = g_data[ gindex ];
if(threadIdx.x + blockDim.x < n)
scratch[ lindex + n/2 ] = g_data[ gindex + n/2];
__syncthreads();
int selectorEsquema = 3;
if (selectorEsquema == 1) { // ESQUEMA 1
// Do sum reduction on the shared memory data
for( stride=NUM_ELEMENTS/2; stride>=1; stride = stride/2 ) /* COMPLETAR 2 (el bucle de la reducci�n): Se ha dejado como ejemplo el caso correspondiente al esquema de reducci�n 1. Hay que cambiarlo si escogemos el esquema 2 o el 3 */
{
/* COMPLETAR 2 (el cuerpo de la reducci�n en los dos renglones siguientes, seg�n hayamos escogido el esquema de reducci�n 1, 2 � 3) */
if ( lindex < stride )
scratch[ lindex ] += scratch[ lindex + stride ];
__syncthreads();
}
} else if (selectorEsquema == 2) { // ESQUEMA 2
for (stride = 1; stride < NUM_ELEMENTS; stride *= 2)
{
int i = 2 * stride * lindex;
if ( i < NUM_ELEMENTS )
scratch[ i ] += scratch[ i + stride ];
__syncthreads();
}
} else if (selectorEsquema == 3) { // ESQUEMA 3
for( stride=NUM_ELEMENTS; stride>=1; stride = stride/2 )
{
if ( lindex < stride / 2 )
scratch[ lindex ] += scratch[ stride - lindex - 1 ];
__syncthreads();
}
}
// Store results back to global memory
if(threadIdx.x == 0)
g_data[0] = scratch[0];
return;
}
|
6,437 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <iostream>
// Define kernel function.
__global__ void gpuAdd(int *device_a, int *device_b, int *device_c)
{
*device_c = *device_a + *device_b;
}
int main(int argc, char **argv)
{
// Define host variables and device pointers.
int host_a, host_b, host_c;
int *device_a, *device_b, *device_c;
// Initialize host variables.
host_a = 11;
host_b = 13;
// CUDA 错误处理。
cudaError_t cudaStatus;
// Allocate GPU buffers for three vectors(two input, ont output).
cudaStatus = cudaMalloc((void**)&device_c, sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed.\n");
// 跳转到标号 Error
goto Error;
}
cudaStatus = cudaMalloc((void**)&device_a, sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed.\n");
// 跳转到标号 Error
goto Error;
}
cudaStatus = cudaMalloc((void**)&device_b, sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed.\n");
// 跳转到标号 Error
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(device_a, &host_a, sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed.\n");
// 跳转到标号 Error
goto Error;
}
cudaStatus = cudaMemcpy(device_b, &host_b, sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed.\n");
// 跳转到标号 Error
goto Error;
}
// Launch kernel on GPU device with one thread for each element.
gpuAdd <<< 1, 1 >>> (device_a, device_b, device_c);
// Check for any errors launching the kernel.
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
// 跳转到标号 Error
goto Error;
}
// Copy output vector from GPU device to host memory.
cudaStatus = cudaMemcpy(&host_c, device_c, sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed.\n");
// 跳转到标号 Error
goto Error;
}
printf("Passing parameters by reference output: %d + %d = %d\n", host_a, host_b, host_c);
Error:
// 标号处理,直接释放内存,结束程序。
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_c);
return 0;
} |
6,438 | #include <stdio.h>
#include <sys/time.h>
//////////////////////////////////////////////////////
// Simple vector addition in CUDA
//////////////////////////////////////////////////////
#define N 1024*1024 //Number of elements in the vector
// Definition of the kernel that will be executed by all threads on the GPU
__global__ void add(float *a, float *b, float *c, int n){
int id = (blockDim.x * blockIdx.x)*n + threadIdx.x*n;
for (int i = id; i < id+n; i++)
{
c[i] = a[i] + b[i];
}
}
int main(void) {
float *A, *B, *C;
float *d_A, *d_B, *d_C;
int size = N * sizeof(float);
int numBlocks, numThreadsPerBlock;
struct timeval copy_start, copy_end, process_start, process_end;
int t_copy, t_process;
// Memory allocation on the HOST
A = (float*)malloc(size);
B = (float*)malloc(size);
C = (float*)malloc(size);
// Initial values
for (int i=0; i<N; i++)
{
A[i] = i+1;
B[i] = (i+1)*2;
}
//Memory allocation on the GPU
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_B, size);
cudaMalloc((void**)&d_C, size);
//Copy data from HOST to GPU
gettimeofday(©_start, NULL);
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
gettimeofday(©_end, NULL);
// Number of threads and blocks used to compute the kernel
numThreadsPerBlock = 1024;
numBlocks = 256;
//Executing kernel function
gettimeofday(&process_start, NULL);
add<<<numBlocks,numThreadsPerBlock>>>(d_A,d_B,d_C,N/(numBlocks*numThreadsPerBlock));
gettimeofday(&process_end, NULL);
//Copy result from GPU to HOST
cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost);
//Compute process and copy time
t_copy = (copy_end.tv_sec - copy_start.tv_sec)*1000000 + copy_end.tv_usec - copy_start.tv_usec;
t_process = (process_end.tv_sec - process_start.tv_sec)*1000000 + process_end.tv_usec - process_start.tv_usec;
//Display results
printf("\n#########################\n");
printf("Calculation results\n");
printf("#########################\n");
printf("Vector A : [%f,%f, ...,%f] \n",A[0],A[1],A[N-1]);
printf("Vector B : [%f,%f, ...,%f] \n",B[0],B[1],B[N-1]);
printf("Vector C (result A+B) : [%f,%f, ...,%f] \n",C[0],C[1],C[N-1]);
printf("\n#########################\n");
printf("Performance results\n");
printf("#########################\n");
printf("Copy HOST to GPU time : %d uS\n", t_copy);
printf("Kernel process time : %d uS\n", t_process);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(A);
free(B);
free(C);
return 0;
}
|
6,439 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <iostream>
#include <vector>
template<typename T>
thrust::device_vector<T> concatInSingleVector(std::vector<thrust::device_vector<T>> const& vectors)
{
// calculate final size
size_t size = 0;
for (auto const& vec : vectors)
{
size += vec.size();
}
thrust::device_vector<T> returnVec(size);
size_t offset = 0;
for (auto const& vec: vectors)
{
thrust::copy(vec.begin(), vec.end(), returnVec.begin() + offset);
offset += vec.size();
}
return returnVec;
}
int main()
{
thrust::device_vector<int> vec_d(3, 1);
thrust::device_vector<int> vec_d2(4, 2);
std::vector<thrust::device_vector<int>> vectors{vec_d, vec_d2};
auto concatVec = concatInSingleVector<int>(vectors);
int size = concatVec.size();
return 0;
} |
6,440 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void prt_details_wrp()
{
int gid=blockIdx.y*gridDim.x*blockDim.x+blockIdx.x*blockDim.x+threadIdx.x;
int warpid=threadIdx.x/32;
int flatbid = blockIdx.y*gridDim.x+blockIdx.x;
printf("gid : %d, warpid : %d, flattened bid : %d\n",gid, warpid,flatbid);
}
int main()
{
dim3 block(42);
dim3 grid(2, 2);
prt_details_wrp <<<grid, block>>>();
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
} |
6,441 | #include "includes.h"
__device__ inline unsigned int RM_Index(unsigned int row, unsigned int col, unsigned int width) {
return (row * width + col);
}
__global__ void BernoulliNBLearnKernel(float *feature_probs, float *class_count_, const float *d_row_sums, unsigned int n_samples_, unsigned int n_classes_, unsigned int n_features_) {
// Each thread will take one term
unsigned int tidx = threadIdx.x;
unsigned int feat_col = tidx + (blockIdx.x * blockDim.x);
unsigned int i = 0;
if (feat_col < n_features_) { // End condition check
// For each label
for (i = 0; i < n_classes_; ++i) {
feature_probs[RM_Index(i, feat_col, n_features_)] /=
class_count_[i]; // d_row_sums[i];
if (feat_col == 0) {
class_count_[i] = class_count_[i] / (float)n_samples_;
}
}
}
} |
6,442 | /*
Authors
- Dibyadarshan Hota 16CO154
- Omkar Prabhu 16CO233
*/
#include <iostream>
#include <string>
#include <sstream>
#include <cuda.h>
#include<stdio.h>
#include <ctime>
#include <iomanip>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#include <thrust/device_free.h>
#define ll long long
using namespace std;
/**
* Kernel for computing Betweenness Centrality
* res: Stored in global memory variable bc
*/
__global__
void betweenness_centrality_kernel (float *bc, int nodes, int edges, const int *V, const int *E, int *Queue_curr, int *Queue_next, int *Depth_Nodes, int *Depth_Points, unsigned long long *sigma, int *distance, float *delta, int *next_source, size_t scale_distance, size_t scale_sigma, size_t scale_delta, size_t scale_Queue_curr, size_t scale_Queue_next, size_t scale_depthnodes, size_t scale_depthpoints) {
// ================================== VARIABLES INIT ============================================
// common global delta, sigma and distance pointers for
// offset by blockId times scale for each block to get its storing space
int *s_distance = (int*)((char*)distance + blockIdx.x*scale_distance);
unsigned long long *s_sigma = (unsigned long long*)((char*)sigma + blockIdx.x*scale_sigma);
float *s_delta = (float*)((char*)delta + blockIdx.x*scale_delta);
// printf ("Thread number %d\n", threadIdx.x);
// shared pointers local to block
// pointing to global memory as entire shared memory space is not enough to hold for all blocks
__shared__ int *s_queue_curr;
__shared__ int *s_queue_next;
__shared__ int *s_depthnodes;
__shared__ int *s_depthpoints;
__shared__ int curr_source;
__shared__ int block_source;
// lengths
__shared__ int len_queue_curr;
__shared__ int len_queue_next;
__shared__ int len_depthnodes;
__shared__ int len_depthpoints;
__shared__ int depth;
// current thread
int tid = threadIdx.x;
// init
if (tid == 0) {
// block source
block_source = blockIdx.x;
// current source operation
curr_source = block_source;
// offset by blockId time scale for getting the space for each block
s_queue_curr = (int*)((char*)Queue_curr + blockIdx.x*scale_Queue_curr);
s_queue_next = (int*)((char*)Queue_next + blockIdx.x*scale_Queue_next);
s_depthnodes = (int*)((char*)Depth_Nodes + blockIdx.x*scale_depthnodes);
s_depthpoints = (int*)((char*)Depth_Points + blockIdx.x*scale_depthpoints);
// Check point
// for (int i = 0; i < nodes; i++) {
// printf("%d: %d", i, s_distance[i]);
// }
// printf("Block %d I %d", blockIdx, );
}
// wait for init to complete
__syncthreads();
// ================================== MAIN ============================================
// check if the block is operating on a valid source i.e < total nodes
while (block_source < nodes) {
// ============================== distance, delta and sigma INIT ============================================
// In parallel
for(int k = threadIdx.x; k < nodes; k += blockDim.x) {
if(k == curr_source) {
s_distance[k] = 0;
s_sigma[k] = 1;
}
else {
s_distance[k] = INT_MAX;
s_sigma[k] = 0;
}
s_delta[k] = 0;
}
// wait for completion
__syncthreads();
// ============================== Shortest Path Calculation using curr source ======================
// ============================== Using Work Efficiency ============================================
// init lenghts
if(tid == 0) {
s_queue_curr[0] = curr_source;
len_queue_curr = 1;
len_queue_next = 0;
s_depthnodes[0] = curr_source;
len_depthnodes = 1;
s_depthpoints[0] = 0;
s_depthpoints[1] = 1;
len_depthpoints = 2;
depth = 0;
// Check point
// printf("Block: %d Root: %d\n", blockIdx.x, curr_source);
}
__syncthreads();
// start
while(1) {
// In parallel for current queue elements
for(int k = threadIdx.x; k < len_queue_curr; k += blockDim.x) {
// get vertex at current depth
int v = s_queue_curr[k];
// traverse neighbours
for(int r = V[v]; r < V[v+1]; r++) {
int w = E[r];
// update if not already updated
if(atomicCAS(&s_distance[w], INT_MAX, s_distance[v]+1) == INT_MAX) {
int ii = atomicAdd(&len_queue_next, 1);
s_queue_next[ii] = w;
}
// add the total paths possible to sigma
if(s_distance[w] == (s_distance[v]+1)) {
atomicAdd(&s_sigma[w], s_sigma[v]);
}
}
}
__syncthreads();
// check if completely traversed
if(len_queue_next == 0) {
break;
}
else {
// move next traversal elements from Queue next to Queue curr
for(int i = threadIdx.x; i < len_queue_next; i += blockDim.x) {
s_queue_curr[i] = s_queue_next[i];
s_depthnodes[i+len_depthnodes] = s_queue_next[i];
}
__syncthreads();
// Set variables
if(tid == 0) {
s_depthpoints[len_depthpoints] = s_depthpoints[len_depthpoints-1] + len_queue_next;
len_depthpoints++;
len_queue_curr = len_queue_next;
len_depthnodes += len_queue_next;
len_queue_next = 0;
depth++;
}
__syncthreads();
}
}
// get depth for traversal
if(tid == 0) {
depth = s_distance[s_depthnodes[len_depthnodes-1]] - 1;
}
__syncthreads();
// ============================== BC calculation using Brande's Algorithm ============================================
// In parallel
// go from depth
while(depth > 0) {
for(int k = threadIdx.x + s_depthpoints[depth]; k < s_depthpoints[depth+1]; k += blockDim.x) {
// get elements at this depth
int w = s_depthnodes[k];
// init
float dsw = 0;
float sw = (float)s_sigma[w];
// check neighbours
for(int r = V[w]; r < V[w+1]; r++) {
int v = E[r];
// neighbour within 1 distance
if(s_distance[v] == (s_distance[w]+1)) {
// accumulate sigma
dsw += (sw/(float)s_sigma[v])*(1.0f+s_delta[v]);
}
}
// update delta using dsw
s_delta[w] = dsw;
}
// move to higher depth
__syncthreads();
if(tid == 0) {
depth--;
}
__syncthreads();
}
for(int k = threadIdx.x; k < nodes; k += blockDim.x) {
atomicAdd(&bc[k], s_delta[k]);
}
__syncthreads();
// ============================== NEXT OPERATING SOURCE FOR BLOCK ============================================
// get a block's next operating source
// using number of blocks launched stored in next source variable
if(tid == 0) {
block_source = atomicAdd(next_source, 1);
curr_source = block_source;
}
__syncthreads();
}
}
/**
* Main function
*/
int main () {
// ================================ READ INPUT AND MAKE Compressed Adjancency List ====================================
// freopen("graph", "r", stdin);
// nodes and edges
int nodes, edges;
cin>>nodes>>edges;
// compressed adjancency list
int * V = new int[nodes + 1];
int * E = new int[2 * edges];
// read graph data in CSR format
string line;
int node = 0;
int counter = 0;
getline(cin, line);
for (int i = 0; i < nodes; ++i) {
getline(cin, line);
// cout<<"->>>"<<node<<" "<<counter<<"\n";
V[node] = counter;
istringstream is(line);
int tmp;
while (is >> tmp) {
E[counter] = tmp;
counter += 1;
// cout<<"-->"<<node<<" "<<counter<<"\n";
}
++node;
}
V[node] = counter;
// Check compressed adj list value
// cout<<"\n";
// for (int i = 0; i <= nodes; i++) {
// cout<<V[i]<<" ";
// }
// cout<<"\n";
// for (int i = 0; i < 2 * edges; ++i) {
// cout<<E[i]<<" ";
// }
// cout<<"\n";
// ================================ DECLARE AND INIT VARIABLES ====================================
// host pointer bc holds the final result
float *bc = new float[nodes];
// device pointers explained in kernel
float *d_bc, *d_delta;
int *d_V, *d_E, *d_distance, *d_Queue1, *d_Queue2, *d_Depth_Nodes, *d_Depth_Points, *d_next_source;
unsigned long long *d_sigma;
// offsets filled later
size_t scale_distance, scale_sigma, scale_delta, scale_Q1, scale_Q2, scale_depthnodes, scale_depthpoints;
// to get next source vertex after a block finishes computing the BC using the current source
int *next_source = new int;
// set to number of SM as each block is assigned to 1 SM
// next_source[0] = 5;
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop,0);
next_source[0] = prop.multiProcessorCount;
// Allocate space on device
cudaMalloc((void**)&d_bc, sizeof(float) * nodes);
cudaMalloc((void**)&d_V, sizeof(int) * (nodes + 1));
cudaMalloc((void**)&d_E, sizeof(int) * (2*edges));
// Allocate these common variables space equal to n times the number of blocks
// As shared memory won't have enough space to use them for each block
cudaMallocPitch((void**)&d_Queue1, &scale_Q1, sizeof(int) * nodes, next_source[0]);
cudaMallocPitch((void**)&d_Queue2, &scale_Q2,sizeof(int) * nodes, next_source[0]);
cudaMallocPitch((void**)&d_Depth_Nodes, &scale_depthnodes,sizeof(int) * nodes, next_source[0]);
cudaMallocPitch((void**)&d_Depth_Points, &scale_depthpoints,sizeof(int) * (nodes + 1), next_source[0]);
cudaMallocPitch((void**)&d_sigma, &scale_sigma, sizeof(unsigned long long) * nodes, next_source[0]);
cudaMallocPitch((void**)&d_distance, &scale_distance, sizeof(int) * nodes, next_source[0]);
cudaMallocPitch((void**)&d_delta, &scale_delta, sizeof(float) * nodes, next_source[0]);
cudaMalloc((void**)&d_next_source, sizeof(int));
// Copy Required items from host to device
cudaMemcpy(d_V, V, sizeof(int) * (nodes+1), cudaMemcpyHostToDevice);
cudaMemcpy(d_E, E, sizeof(int) * (2*edges), cudaMemcpyHostToDevice);
cudaMemset(d_bc, 0, sizeof(float) * nodes);
cudaMemcpy(d_next_source , next_source, sizeof(int), cudaMemcpyHostToDevice);
// ================================ KERNEL PARAMS AND CALL ====================================
// Grid parameters
dim3 cudaGrid, cudaBlock;
cudaGrid.x = next_source[0];cudaGrid.y = 1;cudaGrid.z = 1;
// Block parameters
cudaBlock.x = 64;cudaBlock.y = 1;cudaBlock.z = 1;
float elapsed_time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// KERNEL CALL
betweenness_centrality_kernel <<<cudaGrid, cudaBlock>>> (d_bc, nodes, edges, d_V, d_E, d_Queue1, d_Queue2, d_Depth_Nodes, d_Depth_Points, d_sigma, d_distance, d_delta, d_next_source, scale_distance, scale_sigma, scale_delta, scale_Q1, scale_Q2, scale_depthnodes, scale_depthpoints);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
// ================================ RESULT ====================================
cudaMemcpy(bc, d_bc, sizeof(float) * nodes, cudaMemcpyDeviceToHost);
cout<<"Result: \n";
// for (int i = 0; i < nodes; i++) {
// cout<<"Node: "<<i<<" BC: "<<fixed<<setprecision(6)<<bc[i]/2.0<<"\n";
// }
cout<<"\n";
// Print the time for execution
cout<<"Execution time: "<<elapsed_time/1000.0<<endl;
// Maximum element using thrust min reduction
// thrust::device_vector<float> device_bc_max(bc, bc + nodes);
// thrust::device_ptr<float> ptr = device_bc_max.data();
// int max_index = thrust::max_element(ptr, ptr + nodes) - ptr;
// cout<<"Max BC value: "<<device_bc_max[max_index]/2.0<<endl;
// Maximum BC value
float max_bc = 0.0;
for (int i = 0; i < nodes; ++i) {
max_bc = (bc[i] > max_bc) ? bc[i] : max_bc;
}
cout<<"Max BC value: "<<max_bc/2.0<<endl;
// ================================ MEMORY RELEASE ====================================
// free device variable
cudaFree(d_bc);
cudaFree(d_V);
cudaFree(d_E);
cudaFree(d_Queue1);
cudaFree(d_Queue2);
cudaFree(d_Depth_Nodes);
cudaFree(d_Depth_Points);
cudaFree(d_sigma);
cudaFree(d_distance);
cudaFree(d_delta);
cudaFree(d_next_source);
// thrust::device_free(device_bc_max);
// free host variables
free(V);
free(E);
free(bc);
return 0;
}
|
6,443 | #include "includes.h"
__global__ void matrixMultiply(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) {
//@@ Insert code to implement matrix multiplication here
// Calculate the row index
int numRows = blockIdx.y*blockDim.y+threadIdx.y;
// Calculate the column index
int numColumns = blockIdx.x*blockDim.x+threadIdx.x;
if ((numRows < numARows) && (numColumns < numBColumns)) {
float Cval = 0.0;
// Each thread computes one element of the block sub-matrix
for (int k = 0; k < numBRows; ++k) {
Cval += A[numRows*numBRows+k]*B[numColumns+k*numBColumns];
}
C[numRows*numBColumns+numColumns] = Cval;
}
} |
6,444 | /*
**********************************************
* CS314 Principles of Programming Languages *
* Spring 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void strongestNeighborScan_gpu(int * src, int * oldDst, int * newDst, int * oldWeight, int * newWeight, int * madeChanges, int distance, int numEdges) {
/*numEdges is the number of tasks that need to be completed*/
/*distance is the stride aka the change between each src/dst/weight node check*/
int numThreads = blockDim.x * gridDim.x; //total number of threads
int tid = blockDim.x * blockIdx.x + threadIdx.x; // global index of the thread
int i = 0;
/*this code will automatically loop through the number of threads, as long as you refer to each element in the arrays as [tid]*/
for(i = tid; i < numEdges; i += numThreads)
{
/*quickly ensure that the stride is even valid/in the array*/
if(i-distance >= 0){
/*check if everything at i is in the same segment*/
if(src[i] == src[i-distance]){
if (oldWeight[i-distance] > oldWeight[i]){ // check if the weight in the next stride is greater than what we have now
newDst[i] = oldDst[i-distance];
newWeight[i] = oldWeight[i-distance];
(*madeChanges) = 1;
}else if(oldWeight[i-distance] == oldWeight[i]){ //nextDoor weight is equal
/*smaller vertexID should be treated as greater*/
/*it will be already found by dst[i-distance] */
newDst[i] = oldDst[i-distance];
newWeight[i] = oldWeight[i];
}else{//in this case, the left oldWeight is greater than the rightside
newDst[i] = oldDst[i];
newWeight[i] = oldWeight[i];
}
//if nothing else, just return the same weight and dst from before
}else{
newDst[i] = oldDst[i];
newWeight[i] = oldWeight[i];
}
}else{
newDst[i] = oldDst[i];
newWeight[i] = oldWeight[i];
}
}
} |
6,445 | #include<stdio.h>
#include<time.h>
#include<time.h>
#include<stdlib.h>
#include<math.h>
__global__ void func1(int *c,int *a,int *b, int n)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
// printf("i = %d\n", i);
if(i < n)
{
a[i] = 2;
b[i] = 3;
}
}
__global__ void func2(int *c,int *a,int *b, int n)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < n)
{
c[i] = a[i] + b[i];
}
}
int main()
{
float timespentGPU,timespentGPU1,timespentGPU2;
float timespentCPU,timespentCPU1,timespentCPU2;
cudaEvent_t start, stop;
cudaEventCreate(&start); //Creates an event object
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
int *d_c;
int *d_a;
int *d_b;
int n=2*1000000 ;
// int a[n],b[n],c[n];
int *h_a=(int*) malloc(n* sizeof(int));
int *h_b=(int*) malloc(n * sizeof(int));
int *h_c=(int*) malloc(n * sizeof(int));
int i ;
int blocks = 1024;
int threads= 1024;
// printf("Here");
cudaMalloc((void **)&d_c, n*sizeof(int));
cudaMemcpy(d_c, h_c, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_a, n*sizeof(int));
cudaMemcpy(d_a, h_a, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_b, n*sizeof(int));
cudaMemcpy(d_b, h_b, n*sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
func1<<<blocks, threads>>>(d_c,d_a,d_b,n);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(×pentGPU1, start, stop);
cudaDeviceSynchronize();
cudaMemcpy(h_c, d_c, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_c);
cudaMemcpy(h_a, d_a, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaMemcpy(h_b, d_b, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_b);
cudaMalloc((void **)&d_c, n*sizeof(int));
cudaMemcpy(d_c, h_c, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_a, n*sizeof(int));
cudaMemcpy(d_a, h_a, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_b, n*sizeof(int));
cudaMemcpy(d_b, h_b, n*sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
func2<<<blocks, threads>>>(d_c,d_a,d_b, n);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(×pentGPU2, start, stop);
cudaDeviceSynchronize();
cudaMemcpy(h_c, d_c, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_c);
cudaMemcpy(h_a, d_a, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaMemcpy(h_b, d_b, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_b);
for (i=0;i<n;i++)
{
printf("c =%d\n",h_c[i]);
}
timespentGPU = timespentGPU1+timespentGPU2;
printf("\n timespent on GPU=%f ms",timespentGPU);
cudaEventRecord(start, 0);
for(i = 0;i<n;i++)
{
h_a[i] = 2;
h_b[i] = 3;
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(×pentCPU1, start, stop);
cudaEventRecord(start, 0);
for (i=0;i<n;i++)
{
h_c[i] = h_a[i] + h_b[i];
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(×pentCPU2, start, stop);
timespentCPU = timespentCPU1+timespentCPU2;
printf("\n timespent on CPU=%f ms",timespentCPU);
printf("\n Speedup = %f",timespentCPU/timespentGPU);
return 0;
}
|
6,446 | #include "includes.h"
__global__ void process_coarseness_ek_pix(double * output_ak, double *output_ekh, double *output_ekv,int colsize, int rowsize,long lenOf_ek)
{
int y = threadIdx.x + blockIdx.x * blockDim.x;
int x = threadIdx.y + blockIdx.y * blockDim.y;
double input1,input2;
int posx1 = x+lenOf_ek;
int posx2 = x-lenOf_ek;
int posy1 = y+lenOf_ek;
int posy2 = y-lenOf_ek;
if(y < (colsize) && x < (rowsize))
{
if(posx1 < (int)rowsize && posx2 >= 0)
{
input1 = output_ak[y * rowsize + posx1];
input2 = output_ak[y * rowsize + posx2];
output_ekh[y*rowsize+x] = fabs(input1 - input2);
}
else output_ekh[y*rowsize+x] = 0;
if(posy1 < (int)colsize && posy2 >= 0)
{
input1 = output_ak[posy1 * rowsize + x];
input2 = output_ak[posy2 * rowsize + x];
output_ekv[y*rowsize+x] = fabs(input1 - input2);
}
else output_ekv[y*rowsize+x] = 0;
}
} |
6,447 | #include "includes.h"
__global__ void STREAM_Add(float *a, float *b, float *c, size_t len)
{
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < len) {
c[idx] = a[idx]+b[idx];
idx += blockDim.x * gridDim.x;
}
} |
6,448 | #include "includes.h"
__global__ void reduction(float *g_data, int n)
{
__shared__ float partialSum[NUM_ELEMENTS];
unsigned int t = threadIdx.x;
partialSum[t] = g_data[t];
for (int i = blockDim.x/2; i > 0; i>>=1)
{
__syncthreads();
if(t<i)
{
partialSum[t] += partialSum[t + i];
}
}
if(t==0)
{
g_data[0] = partialSum[0];
}
} |
6,449 | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "cdist.cuh"
__global__ void sqdistKernel(float* x, float* y, int dim, float* z) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
if (ix < dim * dim) {
int x_ix = ix / dim;
int y_ix = ix - x_ix * dim;
float diff = x[x_ix] - y[y_ix];
z[ix] = diff * diff;
}
}
float* sqdistWrapper(float* x, float* y, int xdim, int ydim)
{
int x_size = xdim * sizeof(float);
int y_size = ydim * sizeof(float);
int z_size = x_size * y_size / sizeof(float);
float* x_gpu = 0;
float* y_gpu = 0;
float* z_gpu = 0;
cudaError_t err_x = cudaMalloc((void **) &x_gpu, x_size);
cudaError_t err_y = cudaMalloc((void **) &y_gpu, y_size);
cudaError_t err_z = cudaMalloc((void **) &z_gpu, z_size);
if ((err_x != cudaSuccess) ||
(err_y != cudaSuccess) ||
(err_z != cudaSuccess))
{
if (x_gpu) cudaFree(x_gpu);
if (y_gpu) cudaFree(y_gpu);
if (z_gpu) cudaFree(z_gpu);
fprintf(stderr, "!!!! GPU memory allocation error\n");
return 0;
}
err_x = cudaMemcpy(x_gpu, x, x_size, cudaMemcpyHostToDevice);
err_y = cudaMemcpy(y_gpu, y, y_size, cudaMemcpyHostToDevice);
if ((err_x != cudaSuccess) ||
(err_y != cudaSuccess))
{
if (x_gpu) cudaFree(x_gpu);
if (y_gpu) cudaFree(y_gpu);
if (z_gpu) cudaFree(z_gpu);
fprintf(stderr, "!!!! GPU memory allocation error\n");
return 0;
}
sqdistKernel<<<xdim, ydim>>>(x_gpu, y_gpu, xdim, z_gpu);
float* z = new float[xdim*ydim];
err_z = cudaMemcpy(z, z_gpu, z_size, cudaMemcpyDeviceToHost);
if (x_gpu) cudaFree(x_gpu);
if (y_gpu) cudaFree(y_gpu);
if (z_gpu) cudaFree(z_gpu);
return z;
}
int main(void)
{
float* x = new float[10];
for(int i = 0; i < 10; ++i)
x[i] = (float) i;
float* z = sqdistWrapper(x, x, 10, 10);
for(int i = 0; i < 10; ++i){
for(int j = 0; j < 10; ++j)
printf("z[%i, %i] = %2f", i, j, z[i*10 + j]);
printf("\n");
}
free(x);
delete[] z;
return 0;
}
|
6,450 | #include <chrono>
#include <iostream>
#include <stdlib.h>
#include <unistd.h>
static int SLEEP_TIME = 50000;
static int GENERATION_STEP = 1;
__global__ void singleBlockLifeKernel(uint32_t *cols, int numGenerations) {
__shared__ uint8_t grid[1024]; // TODO Should this be uint32_t?
int colIdx = threadIdx.x;
// Copy data from global memory to shared memory
uint32_t colData = cols[colIdx];
// Split the data out into an easy to handle array
for (int i = 0; i < 32; ++i) {
grid[i * 32 + colIdx] = ((colData & 1 << i)) >> i;
}
// The bit mask is a quick and dirty way of computing the positive bounded %32
uint8_t leftIdx = ((colIdx - 1) & 0x1f);
uint8_t rightIdx = ((colIdx + 1) & 0x1f);
for (int g = 0; g < numGenerations; ++g) {
uint8_t lastSides = 0, lastMiddle = 0, thisSides = 0, thisMiddle = 0,
nextSides = 0, nextMiddle = 0;
// Get the nieghbors from the row below
lastSides = grid[31 * 32 + leftIdx] & 1;
lastSides += grid[31 * 32 + rightIdx] & 1;
lastMiddle = grid[31 * 32 + colIdx];
// Get the neighbors in this row and the cell itself
thisSides = grid[leftIdx] & 1;
thisSides += grid[rightIdx] & 1;
thisMiddle = grid[colIdx];
// Perform cellular automata
for (int i = 0; i < 31; ++i) {
// Get the neighbors in the next row
nextSides = grid[(i + 1) * 32 + leftIdx] & 1;
nextSides += grid[(i + 1) * 32 + rightIdx] & 1;
nextMiddle = grid[(i + 1) * 32 + colIdx];
// Calculate the numbers of neighbors still alive
uint8_t neighbors =
lastSides + lastMiddle + thisSides + nextSides + nextMiddle;
// Write the next state directly to the memory location already allocated
// for this square, just in a differnt bit
// TODO Maybe just make this a macro?
grid[i * 32 + colIdx] |=
(~neighbors >> 1 & neighbors & (thisMiddle | neighbors) << 1) & 2;
// The current row becomes the last row, mutatis mutandis for the next row
lastSides = thisSides;
lastMiddle = thisMiddle;
thisSides = nextSides;
thisMiddle = nextMiddle;
}
// The next row for the last row in the cell will be the dame as the first
// row
nextSides = grid[leftIdx] & 1;
nextSides += grid[rightIdx] & 1;
nextMiddle = grid[colIdx] & 1;
// Compute the number of neighbors for this row
uint8_t neighbors =
lastSides + lastMiddle + thisSides + nextSides + nextMiddle;
// Write the next state directly to the memory location already allocated
// for this square, just in a differnt bit
grid[31 * 32 + colIdx] |=
(~neighbors >> 1 & neighbors & (thisMiddle | neighbors) << 1) & 2;
// Make sure all threads have finished the current generation before starting the next generation
__syncthreads();
// Shift the next state of the cell into the current state of the cell
for (int i = 0; i < 32; ++i) {
grid[i * 32 + colIdx] >>= 1;
}
}
// Clear the register to store compacted data
colData = 0;
// Cram the data back into a single value
for (int i = 0; i < 32; ++i) {
colData |= ((grid[i * 32 + colIdx]) & 1) << i;
}
// Copy the data back into global memory
cols[colIdx] = colData;
}
void generateGrid(uint32_t *&cols) {
uint32_t seed = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now().time_since_epoch())
.count();
srand(seed);
for (int i = 0; i < 32; ++i) {
cols[i] = rand() & rand() & 0xFFFFFFFF;
}
}
void drawGrid(uint32_t *col, int generation) {
printf("\033[H");
for (int y = 0; y < 32; ++y) {
// printf("\n\033[1;%dH", y+1);
for (int x = 0; x < 32; ++x)
printf((col[x] & (1l << y)) ? "██" : " ");
printf("\n");
}
printf("%d ", generation);
usleep(SLEEP_TIME);
}
int main(int argc, char **argv) {
if (argc > 1)
GENERATION_STEP = std::stoi(argv[1]);
if (argc > 2)
SLEEP_TIME = std::stoi(argv[2]);
uint32_t *cols;
uint32_t generation = 0;
cudaMallocManaged(&cols, sizeof(uint32_t) * 32);
generateGrid(cols);
drawGrid(cols, generation);
while (true) {
singleBlockLifeKernel<<<1, 32>>>(cols, GENERATION_STEP);
generation += GENERATION_STEP;
cudaDeviceSynchronize();
drawGrid(cols, generation);
}
cudaFree(cols);
return 0;
} |
6,451 | #include <stdio.h>
#include <stdlib.h>
// these are just for timing measurments
#include <time.h>
// Computes minimum in a 3D volume, at each output point
// To compile it with nvcc execute: nvcc -O2 -o grid3d grid3d.cu
//define the window size (cubic volume) and the data set size
#define WSIZE 6
#define DATAXSIZE 100
#define DATAYSIZE 100
#define DATAZSIZE 20
//define the chunk sizes that each threadblock will work on
#define BLKXSIZE 8
#define BLKYSIZE 8
#define BLKZSIZE 8
// for cuda error checking
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
return 1; \
} \
} while (0)
// device function to compute 3D volume minimum at each output point
__global__ void cmp_win(int knode[][DATAYSIZE][DATAXSIZE], const int kcell[][DATAYSIZE+(WSIZE-1)][DATAXSIZE+(WSIZE-1)])
{
__shared__ int smem[(BLKZSIZE + (WSIZE-1))][(BLKYSIZE + (WSIZE-1))][(BLKXSIZE + (WSIZE-1))];
int tempnode, i, j, k;
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int idy = blockIdx.y*blockDim.y + threadIdx.y;
int idz = blockIdx.z*blockDim.z + threadIdx.z;
if ((idx < (DATAXSIZE+WSIZE-1)) && (idy < (DATAYSIZE+WSIZE-1)) && (idz < (DATAZSIZE+WSIZE-1))){
smem[threadIdx.z][threadIdx.y][threadIdx.x]=kcell[idz][idy][idx];
if ((threadIdx.z > (BLKZSIZE - WSIZE)) && (idz < DATAZSIZE))
smem[threadIdx.z + (WSIZE-1)][threadIdx.y][threadIdx.x] = kcell[idz + (WSIZE-1)][idy][idx];
if ((threadIdx.y > (BLKYSIZE - WSIZE)) && (idy < DATAYSIZE))
smem[threadIdx.z][threadIdx.y + (WSIZE-1)][threadIdx.x] = kcell[idz][idy+(WSIZE-1)][idx];
if ((threadIdx.x > (BLKXSIZE - WSIZE)) && (idx < DATAXSIZE))
smem[threadIdx.z][threadIdx.y][threadIdx.x + (WSIZE-1)] = kcell[idz][idy][idx+(WSIZE-1)];
if ((threadIdx.z > (BLKZSIZE - WSIZE)) && (threadIdx.y > (BLKYSIZE - WSIZE)) && (idz < DATAZSIZE) && (idy < DATAYSIZE))
smem[threadIdx.z + (WSIZE-1)][threadIdx.y + (WSIZE-1)][threadIdx.x] = kcell[idz+(WSIZE-1)][idy+(WSIZE-1)][idx];
if ((threadIdx.z > (BLKZSIZE - WSIZE)) && (threadIdx.x > (BLKXSIZE - WSIZE)) && (idz < DATAZSIZE) && (idx < DATAXSIZE))
smem[threadIdx.z + (WSIZE-1)][threadIdx.y][threadIdx.x + (WSIZE-1)] = kcell[idz+(WSIZE-1)][idy][idx+(WSIZE-1)];
if ((threadIdx.y > (BLKYSIZE - WSIZE)) && (threadIdx.x > (BLKXSIZE - WSIZE)) && (idy < DATAYSIZE) && (idx < DATAXSIZE))
smem[threadIdx.z][threadIdx.y + (WSIZE-1)][threadIdx.x + (WSIZE-1)] = kcell[idz][idy+(WSIZE-1)][idx+(WSIZE-1)];
if ((threadIdx.z > (BLKZSIZE - WSIZE)) && (threadIdx.y > (BLKYSIZE - WSIZE)) && (threadIdx.x > (BLKXSIZE - WSIZE)) && (idz < DATAZSIZE) && (idy < DATAYSIZE) && (idx < DATAXSIZE))
smem[threadIdx.z+(WSIZE-1)][threadIdx.y+(WSIZE-1)][threadIdx.x+(WSIZE-1)] = kcell[idz+(WSIZE-1)][idy+(WSIZE-1)][idx+(WSIZE-1)];
}
__syncthreads();
if ((idx < DATAXSIZE) && (idy < DATAYSIZE) && (idz < DATAZSIZE)){
tempnode = knode[idz][idy][idx];
for (i=0; i<WSIZE; i++)
for (j=0; j<WSIZE; j++)
for (k=0; k<WSIZE; k++)
if (smem[threadIdx.z + i][threadIdx.y + j][threadIdx.x + k] < tempnode)
tempnode = smem[threadIdx.z + i][threadIdx.y + j][threadIdx.x + k];
knode[idz][idy][idx] = tempnode;
}
}
int main(int argc, char *argv[])
{
typedef int cRarray[DATAYSIZE+WSIZE-1][DATAXSIZE+WSIZE-1];
typedef int nRarray[DATAYSIZE][DATAXSIZE];
int i, j, k, u, v, w, temphnode;
const dim3 blockSize(BLKXSIZE, BLKYSIZE, BLKZSIZE);
const dim3 gridSize(((DATAXSIZE+BLKXSIZE-1)/BLKXSIZE), ((DATAYSIZE+BLKYSIZE-1)/BLKYSIZE), ((DATAZSIZE+BLKZSIZE-1)/BLKZSIZE));
// these are just for timing
clock_t t0, t1, t2, t3;
double t1sum=0.0f;
double t2sum=0.0f;
double t3sum=0.0f;
// overall data set sizes
const int nx = DATAXSIZE;
const int ny = DATAYSIZE;
const int nz = DATAZSIZE;
// window (cubic minimization volume) dimensions
const int wx = WSIZE;
const int wy = WSIZE;
const int wz = WSIZE;
// pointers for data set storage via malloc
nRarray *hnode; // storage for result computed on host
nRarray *node, *d_node; // storage for result computed on device
cRarray *cell, *d_cell; // storage for input
// start timing
t0 = clock();
// allocate storage for data set
if ((cell = (cRarray *)malloc(((nx+(wx-1))*(ny+(wy-1))*(nz+(wz-1)))*sizeof(int))) == 0) {fprintf(stderr,"malloc Fail \n"); return 1;}
if ((node = (nRarray *)malloc((nx*ny*nz)*sizeof(int))) == 0) {fprintf(stderr,"malloc Fail \n"); return 1; }
if ((hnode = (nRarray *)malloc((nx*ny*nz)*sizeof(int))) == 0) {fprintf(stderr, "malloc Fail \n"); return 1; }
// synthesize data
for(i=0; i<(nz+(wz-1)); i++)
for(j=0; j<(ny+(wy-1)); j++)
for (k=0; k<(nx+(wx-1)); k++){
cell[i][j][k] = rand(); // unless we use a seed this will produce the same sequence all the time
if ((i<nz) && (j<ny) && (k<nx)) {
node[i][j][k] = RAND_MAX;
hnode[i][j][k] = RAND_MAX;
}
}
t1 = clock();
t1sum = ((double)(t1-t0))/CLOCKS_PER_SEC;
printf("Init took %3.2f seconds. Begin compute\n", t1sum);
// allocate GPU device buffers
cudaMalloc((void **) &d_cell, (((nx+(wx-1))*(ny+(wy-1))*(nz+(wz-1)))*sizeof(int)));
cudaCheckErrors("Failed to allocate device buffer");
cudaMalloc((void **) &d_node, ((nx*ny*nz)*sizeof(int)));
cudaCheckErrors("Failed to allocate device buffer2");
// copy data to GPU
cudaMemcpy(d_node, node, ((nx*ny*nz)*sizeof(int)), cudaMemcpyHostToDevice);
cudaCheckErrors("CUDA memcpy failure");
cudaMemcpy(d_cell, cell, (((nx+(wx-1))*(ny+(wy-1))*(nz+(wz-1)))*sizeof(int)), cudaMemcpyHostToDevice);
cudaCheckErrors("CUDA memcpy2 failure");
cmp_win<<<gridSize,blockSize>>>(d_node, d_cell);
cudaCheckErrors("Kernel launch failure");
// copy output data back to host
cudaMemcpy(node, d_node, ((nx*ny*nz)*sizeof(int)), cudaMemcpyDeviceToHost);
cudaCheckErrors("CUDA memcpy3 failure");
t2 = clock();
t2sum = ((double)(t2-t1))/CLOCKS_PER_SEC;
printf(" Device compute took %3.2f seconds. Beginning host compute.\n", t2sum);
// now compute the same result on the host
for (u=0; u<nz; u++)
for (v=0; v<ny; v++)
for (w=0; w<nx; w++){
temphnode = hnode[u][v][w];
for (i=0; i<wz; i++)
for (j=0; j<wy; j++)
for (k=0; k<wx; k++)
if (temphnode > cell[i+u][j+v][k+w]) temphnode = cell[i+u][j+v][k+w];
hnode[u][v][w] = temphnode;
}
t3 = clock();
t3sum = ((double)(t3-t2))/CLOCKS_PER_SEC;
printf(" Host compute took %3.2f seconds. Comparing results.\n", t3sum);
// and compare for accuracy
for (i=0; i<nz; i++)
for (j=0; j<ny; j++)
for (k=0; k<nx; k++)
if (hnode[i][j][k] != node[i][j][k]) {
printf("Mismatch at x= %d, y= %d, z= %d Host= %d, Device = %d\n", i, j, k, hnode[i][j][k], node[i][j][k]);
return 1;
}
printf("Results match!\n");
free(cell);
free(node);
cudaFree(d_cell);
cudaCheckErrors("cudaFree fail");
cudaFree(d_node);
cudaCheckErrors("cudaFree fail");
return 0;
}
|
6,452 | ////
//// Created by Chen on 11/8/2020.
////
#include <cufft.h>
#include <cstdio>
#include "common.cuh"
#define PI 3.14159265358979324f
__global__
void waveSliceTransmitKernel(cufftComplex *wave, cufftReal const *slice, unsigned nPix,
float waveLength, float relativityGamma,
cufftComplex *waveOut)
/*
* transmit the wave function in real space through one single slice.
* waveLength is in Angstroms.
*/
{
unsigned batch = gridDim.x * blockDim.x;
unsigned ii; // the global index of 2D array
float t_real, t_imag; // transmission function
float w_real, w_imag; // wave
float factor = waveLength * relativityGamma;
//slide rightwards
unsigned gridStartIdx = 0;
while(gridStartIdx < nPix) {
ii = gridStartIdx + blockDim.x * blockIdx.x + threadIdx.x;
if (gridStartIdx > 0)
printf("ii = %d\n", ii);
if (ii < nPix) {
t_real = cosf(slice[ii] * factor);
t_imag = sinf(slice[ii] * factor);
w_real = wave[ii].x * t_real - wave[ii].y * t_imag;
w_imag = wave[ii].x * t_imag + wave[ii].y * t_real;
waveOut[ii].x = w_real;
waveOut[ii].y = w_imag;
}
gridStartIdx += batch;
}
}
__global__
void waveSpacePropagateFourierKernel(cufftComplex *waveFourier,
int n1, int n2, float dz, float d1, float d2,
float waveLength,
cufftComplex *waveOut)
{
unsigned batch = gridDim.x * blockDim.x;
unsigned nPix = n1 * n2;
float dfx = 1.0f / d1 / float(n1);
float dfy = 1.0f / d2 / float(n2);
//Nyquist frequency and 1/3 filter
float fMax = 0.5f / (d1 >= d2 ? d1 : d2);
float filter = 0.6667f * fMax;
unsigned ii; // the global index of 2D array
int i, j; // the dual index
int is, js; // the ifftshifted indexes, signed
float fx, fy; // the corresponding spatial frequency to is, js
float f2; // the squared spatial frequency f2 = fx*fx + fy*fy
float p_real, p_imag; // spatial propagator
float w_real, w_imag; // wave
//slide rightwards
unsigned gridStartIdx = 0;
while(gridStartIdx < nPix) {
ii = gridStartIdx + blockDim.x * blockIdx.x + threadIdx.x;
if (ii < nPix) {
i = (int)ii / n2;
j = (int)ii % n2;
is = i + (i<(n1+1)/2? n1/2: -(n1+1)/2);
js = j + (j<(n2+1)/2? n2/2: -(n2+1)/2);
fx = ((float)is - floorf((float)n1/2.0f)) * dfx;
fy = ((float)js - floorf((float)n2/2.0f)) * dfy;
f2 = fx*fx + fy*fy;
if (f2 <= filter * filter) {
p_real = cosf(waveLength * PI * dz * f2);
p_imag = -sinf(waveLength * PI * dz * f2);
w_real = waveFourier[ii].x * p_real - waveFourier[ii].y * p_imag;
w_imag = waveFourier[ii].x * p_imag + waveFourier[ii].y * p_real;
waveOut[ii].x = w_real / (float)nPix;
waveOut[ii].y = w_imag / (float)nPix;
} else {
waveOut[ii].x = 0;
waveOut[ii].y = 0;
}
}
gridStartIdx += batch;
}
}
__global__
void waveLensPropagateKernel(cufftComplex *waveFourier, int n1, int n2, float d1, float d2,
float waveLength, float cs_mm, float defocus, float aperture,
cufftComplex *waveOut)
{
unsigned batch = gridDim.x * blockDim.x;
unsigned nPix = n1 * n2;
float dfx = 1.0f / d1 / float(n1);
float dfy = 1.0f / d2 / float(n2);
float fAper = aperture / waveLength;
float c1 = 0.5f * PI * cs_mm * 1e7f * waveLength * waveLength * waveLength;
float c2 = PI * defocus * waveLength;
unsigned ii; // the global index of 2D array
int i, j; // the dual index
int is, js; // the ifftshifted indexes
float fx, fy; // the corresponding spatial frequency to is, js
float f2; // the squared spatial frequency f2 = fx*fx + fy*fy
float h_real, h_imag; // modulation transfer function
float w_real, w_imag; // wave
float aberr; // optic aberration
unsigned gridStartIdx = 0;
while(gridStartIdx < nPix) {
ii = gridStartIdx + blockDim.x * blockIdx.x + threadIdx.x;
if (ii < nPix) {
i = (int)ii / n2;
j = (int)ii % n2;
is = i + (i<(n1+1)/2? n1/2: -(n1+1)/2);
js = j + (j<(n2+1)/2? n2/2: -(n2+1)/2);
fx = ((float)is - floorf((float)n1/2.0f)) * dfx;
fy = ((float)js - floorf((float)n2/2.0f)) * dfy;
f2 = fx*fx + fy*fy;
if (f2 <= fAper * fAper) {
aberr = c1 * f2 * f2 - c2 * f2;
h_real = cosf(aberr);
h_imag = -sinf(aberr);
w_real = waveFourier[ii].x * h_real - waveFourier[ii].y * h_imag;
w_imag = waveFourier[ii].x * h_imag + waveFourier[ii].y * h_real;
waveOut[ii].x = w_real / (float)nPix;
waveOut[ii].y = w_imag / (float)nPix;
} else {
waveOut[ii].x = 0;
waveOut[ii].y = 0;
}
}
gridStartIdx += batch;
}
}
namespace emsim { namespace cuda {
void waveSliceTransmit(cufftComplex *wave, cufftReal const *slice, int nPix,
float waveLength, float relativityGamma,
cufftComplex *waveOut) {
unsigned blockDimX = maxThreadsPerBlock;
if (blockDimX > nPix) blockDimX = nPix;
auto gridDimX = (unsigned) ceilf((float) nPix / (float) blockDimX);
gridDimX = gridDimX > 2147483647 ? 2147483647 : gridDimX;
waveSliceTransmitKernel<<<gridDimX, blockDimX>>>(wave, slice, nPix, waveLength, relativityGamma, waveOut);
}
void waveSpacePropagateFourier(cufftComplex *waveFourier,
int n1, int n2, float dz, float d1, float d2,
float waveLength,
cufftComplex *waveOut) {
unsigned nPix = n1 * n2;
unsigned blockDimX = maxThreadsPerBlock;
if (blockDimX > nPix) blockDimX = nPix;
auto gridDimX = (unsigned) ceilf((float) nPix / (float) blockDimX);
gridDimX = gridDimX > 2147483647 ? 2147483647 : gridDimX;
waveSpacePropagateFourierKernel<<<gridDimX, blockDimX>>>(waveFourier, n1, n2,
dz, d1, d2, waveLength,
waveOut);
}
void waveLensPropagate(cufftComplex *waveFourier, int n1, int n2, float d1, float d2,
float waveLength, float cs_mm, float defocus, float aperture,
cufftComplex *waveOut) {
unsigned nPix = n1 * n2;
unsigned blockDimX = maxThreadsPerBlock;
if (blockDimX > nPix) blockDimX = nPix;
auto gridDimX = (unsigned) ceilf((float) nPix / (float) blockDimX);
gridDimX = gridDimX > 2147483647 ? 2147483647 : gridDimX;
waveLensPropagateKernel<<<gridDimX, blockDimX>>>(waveFourier, n1, n2,
d1, d2, waveLength, cs_mm, defocus, aperture,
waveOut);
}
} }
|
6,453 | #include "definitions.cuh"
//Performs CFD calculation on global memory. This code does not use any advance optimization technique on GPU
// But still acheives many fold performance gain
__global__ void calculateCFD_V1( float* input, float* output, unsigned int Ni, unsigned int Nj,
float h)
{
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; // Y - ID
unsigned int j = blockDim.y * blockIdx.y + threadIdx.y; // X - ID
unsigned int iPrev = i-1; // Previous Y element
unsigned int iNext = i+1; // Next Y element
unsigned int jPrev = j-1; //Previous X element
unsigned int jNext = j+1; // Next X element
unsigned int index = i * Nj + j;
if( i > 0 && j > 0 && i < (Ni-1) && j <(Nj-1))
output[index] = 0.25f * (input[iPrev * Nj + j] + input[iNext* Nj + j] + input[i * Nj+ jPrev]
+ input[i* Nj + jNext] - 4*h*h);
}
|
6,454 | #include <cuda_runtime.h>
#include <cstdio>
#include <iostream>
/**
* @property 图像饱和度降低
* @func 将图像转换为几种HSL图像
* @param_out out_image 转换后的图像
* @param_in in_image 待转换图像
* @param_in pixel_amount 像素点个数
* @param_in type 亮度类型
* @param_in alpha 是否有alpha通道
*/
void desaturate_by_cuda(float * const out_image,float const *in_image,const int pixel_amount, const int type,const bool alpha);
/****************************************************************************************************************************/
__global__ void kernel_desaturate_alpha(float *out,float const *in, const int size,const int type)
{
extern __shared__ float s[];
int in_idx = threadIdx.x + blockIdx.x * blockDim.x * 8 ;
int out_idx = threadIdx.x+ blockIdx.x * blockDim.x * 4 ;
int tid=threadIdx.x;
int stride=tid*4;
int stride1=stride+blockDim.x*4;
if (in_idx< size * 4)
{
s[tid]=in[in_idx];
s[tid+blockDim.x]=in[in_idx+blockDim.x];
s[tid+blockDim.x*2]=in[in_idx+blockDim.x*2];
s[tid+blockDim.x*3]=in[in_idx+blockDim.x*3];
s[tid+blockDim.x*4]=in[in_idx+blockDim.x*4];
s[tid+blockDim.x*5]=in[in_idx+blockDim.x*5];
s[tid+blockDim.x*6]=in[in_idx+blockDim.x*6];
s[tid+blockDim.x*7]=in[in_idx+blockDim.x*7];
}
__syncthreads();
if(type==0)
{
out[out_idx]=max(s[stride+0],max(s[stride+1],s[stride+2]));
out[out_idx+blockDim.x*2]=max(s[stride1+0],max(s[stride1+1],s[stride1+2]));
}
if(type==1)
{
float const max_v = max(s[stride+0],max(s[stride+1],s[stride+2]));
float const min_v = min(s[stride+0],min(s[stride+1],s[stride+2]));
out[out_idx]=0.5f*(max_v+min_v);
float const max_s = max(s[stride1+0],max(s[stride1+1],s[stride1+2]));
float const min_s = min(s[stride1+0],min(s[stride1+1],s[stride1+2]));
out[out_idx+blockDim.x*2]=0.5f*(max_s+min_s);
}
if(type==2)
{
out[out_idx]=0.21f * s[stride+0] + 0.72f * s[stride+1] + 0.07f * s[stride+2];
out[out_idx+blockDim.x*2]=0.21f * s[stride1+0] + 0.72f * s[stride1+1] + 0.07f * s[stride1+2];
}
if(type==3)
{
out[out_idx]=0.30f * s[stride+0] + 0.59f * s[stride+1] + 0.11f * s[stride+2];
out[out_idx+blockDim.x*2]=0.30f * s[stride1+0] + 0.59f * s[stride1+1] + 0.11f * s[stride1+2];
}
if(type==4)
{
out[out_idx]=((float)(s[stride+0] + s[stride+1] + s[stride+2])) / 3.0f;
out[out_idx+blockDim.x*2]=((float)(s[stride1+0] + s[stride1+1] + s[stride1+2])) / 3.0f;
}
out[out_idx+tid+1]=s[stride+3];
out[out_idx+blockDim.x*2+tid+1]=s[stride1+3];
}
__global__ void kernel_desaturate(float *out,float const *in, const int size,const int type)
{
extern __shared__ float s[];
int in_idx = threadIdx.x + blockIdx.x * blockDim.x * 6 ;
int out_idx = threadIdx.x+ blockIdx.x * blockDim.x * 2 ;
int tid=threadIdx.x;
int stride=tid*3;
int stride1=stride+blockDim.x*3;
if (in_idx< size * 3)
{
s[tid]=in[in_idx];
s[tid+blockDim.x]=in[in_idx+blockDim.x];
s[tid+blockDim.x*2]=in[in_idx+blockDim.x*2];
s[tid+blockDim.x*3]=in[in_idx+blockDim.x*3];
s[tid+blockDim.x*4]=in[in_idx+blockDim.x*4];
s[tid+blockDim.x*5]=in[in_idx+blockDim.x*5];
}
__syncthreads();
if(type==0)
{
out[out_idx]=max(s[stride+0],max(s[stride+1],s[stride+2]));
out[out_idx+blockDim.x]=max(s[stride1+0],max(s[stride1+1],s[stride1+2]));
}
if(type==1)
{
float const max_v = max(s[stride+0],max(s[stride+1],s[stride+2]));
float const min_v = min(s[stride+0],min(s[stride+1],s[stride+2]));
out[out_idx]=0.5f*(max_v+min_v);
float const max_s = max(s[stride1+0],max(s[stride1+1],s[stride1+2]));
float const min_s = min(s[stride1+0],min(s[stride1+1],s[stride1+2]));
out[out_idx+blockDim.x]=0.5f*(max_s+min_s);
}
if(type==2)
{
out[out_idx]=0.21f * s[stride+0] + 0.72f * s[stride+1] + 0.07f * s[stride+2];
out[out_idx+blockDim.x]=0.21f * s[stride1+0] + 0.72f * s[stride1+1] + 0.07f * s[stride1+2];
}
if(type==3)
{
out[out_idx]=0.30f * s[stride+0] + 0.59f * s[stride+1] + 0.11f * s[stride+2];
out[out_idx+blockDim.x]=0.30f * s[stride1+0] + 0.59f * s[stride1+1] + 0.11f * s[stride1+2];
}
if(type==4)
{
out[out_idx]=((float)(s[stride+0] + s[stride+1] + s[stride+2])) / 3.0f;
out[out_idx+blockDim.x]=((float)(s[stride1+0] + s[stride1+1] + s[stride1+2])) / 3.0f;
}
}
void desaturate_by_cuda(float * const out_image,float const *in_image,const int pixel_amount, const int type,const bool alpha)
{
float *d_in=NULL;
float *d_out=NULL;
int bytes_in=pixel_amount*(3+alpha)*sizeof(float);
int bytes_out=pixel_amount*(1+alpha)* sizeof(float);
const int blocksize=256;
dim3 block(blocksize,1,1);
dim3 grid((pixel_amount-1+blocksize*2)/(blocksize*2),1,1);
cudaMalloc(&d_in,bytes_in);
cudaMalloc(&d_out,bytes_out);
cudaMemcpy(d_in,in_image,bytes_in,cudaMemcpyHostToDevice);
if(alpha)
{
kernel_desaturate_alpha<<<grid,block,blocksize*4* sizeof(float)>>>(d_out,d_in,pixel_amount,type);
}
else
{
kernel_desaturate<<<grid,block,blocksize*6* sizeof(float)>>>(d_out,d_in,pixel_amount,type);
}
cudaMemcpy(out_image,d_out,bytes_out,cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_out);
}
|
6,455 | /******************************************************************************
LICENSE
Copyright (c) 2015 Codeplay Software Ltd.
Copyright (c) 2006-2008 Kevin Beason (kevin.beason@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Modified version for SYCL of Kevin Beason smallpt
* http://www.kevinbeason.com/smallpt/
*
*
*
* Modified version using CUDA
* Add the original License in the source file
* The number of samples is set at 40
* spheres_glob is not declared in the global space
*
* Zheming Jin
*
******************************************************************************/
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <cuda.h>
class RNG {
public:
unsigned int x;
const uint32_t fmask = (1 << 23) - 1;
__device__
RNG(const unsigned int seed) { x = seed; }
__device__
uint32_t next() {
x ^= x >> 6;
x ^= x << 17;
x ^= x >> 9;
return uint32_t(x);
}
__device__
float operator()(void) {
union {
float f;
uint32_t i;
} u;
u.i = (next() & fmask) | 0x3f800000;
return u.f - 1.f;
}
};
struct Vec { // Usage: time ./smallpt 5000 && xv image.ppm
float x, y, z; // position, also color (r,g,b)
__host__ __device__
Vec(float x_ = 0, float y_ = 0, float z_ = 0) : x(x_), y(y_), z(z_) {}
__device__
Vec operator+(const Vec &b) const { return Vec(x + b.x, y + b.y, z + b.z); }
__device__
Vec operator-(const Vec &b) const { return Vec(x - b.x, y - b.y, z - b.z); }
__host__ __device__
Vec operator*(float b) const { return Vec(x * b, y * b, z * b); }
__device__
Vec mult(const Vec &b) const { return Vec(x * b.x, y * b.y, z * b.z); }
__device__
Vec &norm() {
return *this = *this * (1 / sqrt(x * x + y * y + z * z));
}
__device__
float dot(const Vec &b) const {
return x * b.x + y * b.y + z * b.z;
} // cross:
__device__
Vec operator%(Vec &b) {
return Vec(y * b.z - z * b.y, z * b.x - x * b.z, x * b.y - y * b.x);
}
};
struct Ray {
Vec o, d;
__device__
Ray(Vec o_, Vec d_) : o(o_), d(d_) {}
};
enum Refl_t { DIFF, SPEC, REFR }; // material types, used in radiance()
struct Sphere {
float rad; // radius
Vec p, e, c; // position, emission, color
Refl_t refl; // reflection type (DIFFuse, SPECular, REFRactive)
__host__ __device__
Sphere(float rad_, Vec p_, Vec e_, Vec c_, Refl_t refl_)
: rad(rad_), p(p_), e(e_), c(c_), refl(refl_) {}
__device__
inline float intersect(const Ray &r) const { // returns distance, 0 if nohit
Vec op = p - r.o; // Solve t^2*d.d + 2*t*(o-p).d + (o-p).(o-p)-R^2 = 0
float t, eps = 1.5e-2f, b = op.dot(r.d),
det = b * b - op.dot(op) + rad * rad;
if (det < 0)
return 0;
else
det = sqrt(det);
return (t = b - det) > eps ? t : ((t = b + det) > eps ? t : 0);
}
};
__host__ __device__
inline float clamp(float x) { return x < 0 ? 0 : x > 1 ? 1 : x; }
inline int toInt(float x) {
return int(pow(clamp(x), 1 / 2.2f) * 255 + .5f);
}
template<typename T>
__device__
inline bool intersect(const Ray &r, float &t, int &id,
T spheres) {
float d, inf = t = 1e20f;
for (int i = 9; i--;)
if ((d = spheres[i].intersect(r)) && d < t) {
t = d;
id = i;
}
return t < inf;
}
template<typename T>
__device__
Vec radiance(const Ray &r_, int depth_, T spheres, RNG &rng) {
float t;
int id = 0;
Ray r = r_;
int depth = depth_;
Vec cl(0, 0, 0); // accumulated color
Vec cf(1, 1, 1); // accumulated reflectance
while (1) {
if (!intersect(r, t, id, spheres))
return cl; // if miss, return black
const Sphere &obj = spheres[id]; // the hit object
Vec x = r.o + r.d * t, n = (x - obj.p).norm(),
nl = n.dot(r.d) < 0 ? n : n * -1, f = obj.c;
float p = f.x > f.y && f.x > f.z ? f.x : f.y > f.z ? f.y : f.z; // max refl
cl = cl + cf.mult(obj.e);
if (++depth > 5) {
if (rng() < p) {
f = f * (1 / p);
} else {
return cl;
}
} // R.R.
cf = cf.mult(f);
if (obj.refl == DIFF) { // Ideal DIFFUSE reflection
float r1 = 2 * M_PI * rng(), r2 = rng(), r2s = sqrt(r2);
Vec w = nl,
u = ((fabs(w.x) > .1 ? Vec(0, 1) : Vec(1)) % w).norm(),
v = w % u;
Vec d = (u * cos(r1) * r2s + v * sin(r1) * r2s +
w * sqrt(1 - r2)).norm();
r = Ray(x, d);
continue;
} else if (obj.refl == SPEC) { // Ideal SPECULAR reflection
r = Ray(x, r.d - n * 2 * n.dot(r.d));
continue;
}
Ray reflRay(x, r.d - n * 2 * n.dot(r.d)); // Ideal dielectric REFRACTION
bool into = n.dot(nl) > 0; // Ray from outside going in?
float nc = 1, nt = 1.5, nnt = into ? nc / nt : nt / nc, ddn = r.d.dot(nl),
cos2t;
if ((cos2t = 1 - nnt * nnt * (1 - ddn * ddn)) <
0) { // Total internal reflection
r = reflRay;
continue;
}
Vec tdir =
(r.d * nnt -
n * ((into ? 1 : -1) * (ddn * nnt + sqrt(cos2t)))).norm();
float a = nt - nc, b = nt + nc, R0 = a * a / (b * b),
c = 1 - (into ? -ddn : tdir.dot(n));
float Re = R0 + (1 - R0) * c * c * c * c * c, Tr = 1 - Re,
P = .25 + .5 * Re, RP = Re / P, TP = Tr / (1 - P);
if (rng() < P) {
cf = cf * RP;
r = reflRay;
} else {
cf = cf * TP;
r = Ray(x, tdir);
}
continue;
}
}
__global__ void raytrace(Vec* c, const Sphere* spheres_, const int w, const int h, const int samps) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
Vec r;
const Sphere *spheres = &spheres_[0];
Ray cam(Vec(50, 52, 295.6), Vec(0, -0.042612, -1).norm()); // cam pos, dir
Vec cx = Vec(w * .5135 / h), cy = (cx % cam.d).norm() * .5135;
RNG rng(1 + (y * w) + x); // initialise our own rng with rand() seed
for (int sy = 0, i = (h - y - 1) * w + x; sy < 2; sy++) // 2x2 subpixel rows
for (int sx = 0; sx < 2; sx++, r = Vec()) { // 2x2 subpixel cols
for (int s = 0; s < samps; s++) {
float r1 = 2 * rng(), dx = r1 < 1 ? sqrt(r1) - 1
: 1 - sqrt(2 - r1);
float r2 = 2 * rng(), dy = r2 < 1 ? sqrt(r2) - 1
: 1 - sqrt(2 - r2);
Vec d = cx * (((sx + .5 + dx) / 2 + x) / w - .5) +
cy * (((sy + .5 + dy) / 2 + y) / h - .5) + cam.d;
r = r + radiance(Ray(cam.o + d * 140, d.norm()), 0, spheres, rng) *
(1. / samps);
} // Camera rays are pushed ^^^^^ forward to start in interior
c[i] = c[i] + Vec(clamp(r.x), clamp(r.y), clamp(r.z)) * .25;
}
};
int main(int argc, char *argv[]) {
const Sphere spheres_glob[] = {
// Scene: radius, position, emission, color, material
Sphere(1e4, Vec(1e4 + 1, 40.8, 81.6), Vec(), Vec(.75, .25, .25),
DIFF), // Left
Sphere(1e4, Vec(-1e4 + 99, 40.8, 81.6), Vec(), Vec(.25, .25, .75),
DIFF), // Rght
Sphere(1e4, Vec(50, 40.8, 1e4), Vec(), Vec(.75, .75, .75), DIFF), // Back
Sphere(1e4, Vec(50, 40.8, -1e4 + 170), Vec(), Vec(), DIFF), // Frnt
Sphere(1e4, Vec(50, 1e4, 81.6), Vec(), Vec(.75, .75, .75), DIFF), // Botm
Sphere(1e4, Vec(50, -1e4 + 81.6, 81.6), Vec(), Vec(.75, .75, .75),
DIFF), // Top
Sphere(16.5, Vec(27, 16.5, 47), Vec(), Vec(1, 1, 1) * .999, SPEC), // Mirr
Sphere(16.5, Vec(73, 16.5, 78), Vec(), Vec(1, 1, 1) * .999, REFR), // Glas
Sphere(600, Vec(50, 681.6 - .27, 81.6), Vec(12, 12, 12), Vec(),
DIFF) // Lite
};
int w = 1024, h = 768;
int samps = 40; // # samples
Vec *c = (Vec*) malloc(sizeof(Vec) * w * h);
Vec *color_buffer;
Sphere *spheres_buffer;
cudaMalloc((void**)&color_buffer, sizeof(Vec) * w * h);
cudaMalloc((void**)&spheres_buffer, sizeof(Sphere) * 9);
cudaMemcpy(spheres_buffer, &spheres_glob[0], sizeof(Sphere) * 9, cudaMemcpyHostToDevice);
raytrace<<< dim3(w/8, h/8), dim3(8, 8) >>>(color_buffer, spheres_buffer, w, h, samps);
cudaMemcpy(c, color_buffer, sizeof(Vec) * w * h, cudaMemcpyDeviceToHost);
FILE *f = fopen("image.ppm", "w"); // Write image to PPM file.
fprintf(f, "P3\n%d %d\n%d\n", w, h, 255);
for (int i = 0; i < w * h; i++)
fprintf(f, "%d %d %d ", toInt(c[i].x), toInt(c[i].y), toInt(c[i].z));
free(c);
cudaFree(color_buffer);
cudaFree(spheres_buffer);
return 0;
}
|
6,456 | #include <stdio.h>
#include <ctime>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <dirent.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <vector>
#include <iostream>
#include <fstream>
#include <climits>
using namespace std;
bool scan_dir(const char* dir, vector<string> &files)
{
DIR *dp; // 定义子目录流指针
struct dirent *entry; // 定义dirent结构指针保存后续目录
struct stat statbuf; // 定义statbuf结构保存文件属性
if((dp = opendir(dir)) == NULL) return false; // 打开目录,获取子目录流指针,判断操作是否成功
chdir (dir); // 切换到当前目录
while((entry = readdir(dp)) != NULL) // 获取下一级目录信息,如果未否则循环
{
lstat(entry->d_name, &statbuf); // 获取下一级成员属性
if(S_IFDIR &statbuf.st_mode) // 判断下一级成员是否是目录
{
if (strcmp(".", entry->d_name) == 0 || strcmp("..", entry->d_name) == 0)
continue;
if (!scan_dir(entry->d_name, files)) return false; // 递归调用自身,扫描下一级目录的内容
}
else
{
files.push_back(string(getcwd(NULL, 0)) + "/" + string(entry->d_name));
}
}
chdir(".."); // 回到上级目录
closedir(dp); // 关闭子目录流
return true;
}
void read_data(vector<string> &files, vector<int> &data)
{
char buff[256];
for (int i=0; i<files.size(); i++)
{
ifstream fin(files[i].c_str(),ios::in);
if (!fin.is_open())
{
cout<<"load "<<files[i]<<" fail."<<endl;
continue;
}
else cout<<"loading "<<files[i]<<endl;
while(fin.getline(buff,256))
data.push_back(atoi(buff));
fin.close();
}
}
static void HandleError( cudaError_t err, const char *file, int line )
{
if (err != cudaSuccess)
{
printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
// a>=b return 0 a<b return 1
__device__ int cmp_without_if(int a, int b)
{
int c = a - b;
return (c >> 31) & 0x1;
}
__device__ int max_without_if(int a, int b)
{
int c = a - b;
int k = (c >> 31) & 0x1;
return a - k * c;
}
__device__ int min_without_if(int a, int b)
{
return (a + b) - max_without_if(a,b);
}
__global__ void reduce_sum(int *d_arr, int len, int *d_sum)
{
__shared__ int cache[1024];
int tid = threadIdx.x;
cache[tid] = 0;
for(int i=tid; i<len; i+=blockDim.x)
cache[tid] += d_arr[i];
__syncthreads();
int i = blockDim.x/2;
while(i)
{
if (tid < i) cache[tid] += cache[tid+i];
__syncthreads();
i /= 2;
}
if (tid == 0) *d_sum = cache[0];
}
__global__ void reduce_min_max(int *d_min_arr, int *d_max_arr, int len, int *d_min, int *d_max)
{
__shared__ int mincache[1024];
__shared__ int maxcache[1024];
int tid = threadIdx.x;
mincache[tid] = 9999999;
maxcache[tid] = -9999999;
for(int i=tid; i<len; i+=blockDim.x)
{
mincache[tid] = min_without_if(mincache[tid], d_min_arr[i]);
maxcache[tid] = max_without_if(maxcache[tid], d_max_arr[i]);
}
__syncthreads();
int i = blockDim.x/2;
while(i)
{
if (tid < i)
{
mincache[tid] = min_without_if(mincache[tid], mincache[tid+i]);
maxcache[tid] = max_without_if(maxcache[tid], maxcache[tid+i]);
}
__syncthreads();
i /= 2;
}
if (tid == 0)
{
*d_min = mincache[0];
*d_max = maxcache[0];
}
}
__global__ void partition_min_max(int *d_arr, int len, int *d_min_arr, int *d_max_arr)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int max = -9999999, min = 9999999;
for (int i=tid; i<len; i+=blockDim.x*gridDim.x)
{
min = min_without_if(min, d_arr[i]);
max = max_without_if(max, d_arr[i]);
}
d_min_arr[tid] = min;
d_max_arr[tid] = max;
}
__global__ void partition_less_num(int pivot,int *d_arr,int len,int *d_lt_num_arr)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
d_lt_num_arr[tid] = 0;
for (int i=tid; i<len; i+=blockDim.x*gridDim.x)
{
d_lt_num_arr[tid] += cmp_without_if(d_arr[i], pivot);
}
}
__host__ int nth_order_stat(int target_order_stat, int *h_arr, int *d_arr, int len);
__host__ int partition_on_pivot(int target_order_stat, int *h_arr, int *d_arr, int len)
{
int pivot = h_arr[rand()%len];
int threadNum = 1024;
int blockNum = (len + threadNum - 1) / threadNum;
blockNum = blockNum <= 1024 ? blockNum : 1024;
int *d_lt_num_arr, *d_lt_num, lt_num;
HANDLE_ERROR( cudaMalloc((void**)&d_lt_num_arr,sizeof(int)*blockNum*threadNum) );
HANDLE_ERROR( cudaMalloc((void**)&d_lt_num,sizeof(int)) );
partition_less_num<<<blockNum,threadNum>>>(pivot,d_arr,len,d_lt_num_arr);
reduce_sum<<<1,1024>>>(d_lt_num_arr,blockNum*threadNum,d_lt_num);
HANDLE_ERROR( cudaMemcpy(<_num,d_lt_num,sizeof(int),cudaMemcpyDeviceToHost) );
HANDLE_ERROR( cudaFree(d_lt_num_arr) );
HANDLE_ERROR( cudaFree(d_lt_num) );
int pivot_order_stat = lt_num + 1;
if (pivot_order_stat == target_order_stat)
return pivot;
else if (pivot_order_stat == 1)
return partition_on_pivot(target_order_stat, h_arr, d_arr, len);
else if (pivot_order_stat < target_order_stat)
{
target_order_stat -= lt_num;
int *h_subarr = new int[len-lt_num];
int subarr_num = 0;
for (int i=0; i<len; i++)
{
if (h_arr[i] >= pivot) h_subarr[subarr_num++] = h_arr[i];
}
int *d_subarr;
HANDLE_ERROR( cudaMalloc((void**)&d_subarr,sizeof(int)*subarr_num) );
HANDLE_ERROR( cudaMemcpy(d_subarr,h_subarr,sizeof(int)*subarr_num,cudaMemcpyHostToDevice) );
int res = nth_order_stat(target_order_stat, h_subarr, d_subarr, subarr_num);
delete[] h_subarr;
HANDLE_ERROR( cudaFree(d_subarr) );
return res;
}
else
{
int *h_subarr = new int[lt_num];
int subarr_num = 0;
for (int i=0; i<len; i++)
{
if (h_arr[i] < pivot) h_subarr[subarr_num++] = h_arr[i];
}
int *d_subarr;
HANDLE_ERROR( cudaMalloc((void**)&d_subarr,sizeof(int)*subarr_num) );
HANDLE_ERROR( cudaMemcpy(d_subarr,h_subarr,sizeof(int)*subarr_num,cudaMemcpyHostToDevice) );
int res = nth_order_stat(target_order_stat, h_subarr, d_subarr, subarr_num);
delete[] h_subarr;
HANDLE_ERROR( cudaFree(d_subarr) );
return res;
}
}
__host__ int nth_order_stat(int target_order_stat, int *h_arr, int *d_arr, int len)
{
int threadNum = 1024;
int blockNum = (len + threadNum - 1) / threadNum;
blockNum = blockNum <= 1024 ? blockNum : 1024;
int *d_min_arr, *d_max_arr;
int h_min, h_max, *d_min, *d_max;
HANDLE_ERROR( cudaMalloc((void**)&d_min_arr,sizeof(int)*blockNum*threadNum) );
HANDLE_ERROR( cudaMalloc((void**)&d_max_arr,sizeof(int)*blockNum*threadNum) );
HANDLE_ERROR( cudaMalloc((void**)&d_min,sizeof(int)) );
HANDLE_ERROR( cudaMalloc((void**)&d_max,sizeof(int)) );
partition_min_max<<<blockNum,threadNum>>>(d_arr,len,d_min_arr,d_max_arr);
reduce_min_max<<<1,1024>>>(d_min_arr,d_max_arr,blockNum*threadNum,d_min,d_max);
HANDLE_ERROR( cudaMemcpy(&h_min,d_min,sizeof(int),cudaMemcpyDeviceToHost) );
HANDLE_ERROR( cudaMemcpy(&h_max,d_max,sizeof(int),cudaMemcpyDeviceToHost) );
HANDLE_ERROR (cudaFree(d_min_arr));
HANDLE_ERROR (cudaFree(d_max_arr));
HANDLE_ERROR (cudaFree(d_min));
HANDLE_ERROR (cudaFree(d_max));
if (h_max == h_min || target_order_stat == 1)
return h_min;
else if (target_order_stat == len)
return h_max;
else
return partition_on_pivot(target_order_stat, h_arr, d_arr, len);
}
__host__ int median(int *arr, int len)
{
int *d_arr;
cudaMalloc((void**)&d_arr,sizeof(int)*len);
cudaMemcpy(d_arr,arr,sizeof(int)*len,cudaMemcpyHostToDevice);
int res = nth_order_stat((int)((len+0.5)/2), arr, d_arr, len);
cudaFree(d_arr);
return res;
}
int main()
{
//开始时间戳
clock_t h_t1 = clock();
//读入数据
vector<string> files;
scan_dir("./data40M", files);
vector<int> h_vec;
read_data(files,h_vec);
//读取数据时间戳
clock_t h_t2 = clock();
//GPU运算开始时间
cudaEvent_t d_start, d_stop;
cudaEventCreate( &d_start );
cudaEventCreate( &d_stop );
cudaEventRecord( d_start, 0 );
//调用算法
int res = median(h_vec.data(), h_vec.size());
//GPU运算结束时间
cudaEventRecord( d_stop, 0 );
cudaEventSynchronize( d_stop );
float elapsedTime;
cudaEventElapsedTime( &elapsedTime, d_start, d_stop );
printf( "Time on GPU: %.3f s\n", elapsedTime/1000 );
//结束时间戳
clock_t h_end = clock();
cout<<"Median: "<<res<<endl;
cout<<"IO Time: "<<(double)(h_t2-h_t1)/CLOCKS_PER_SEC<<"s"<<endl;
cout<<"Algorithm Time: "<<(double)(h_end-h_t2)/CLOCKS_PER_SEC<<"s"<<endl;
cout<<"Total Time: "<<(double)(h_end-h_t1)/CLOCKS_PER_SEC<<"s"<<endl;
cudaEventDestroy( d_start );
cudaEventDestroy( d_stop );
return 0;
}
|
6,457 | #include <stdio.h>
#define MAX_BUF 100000000
typedef unsigned int UINT;
UINT buffer[MAX_BUF];
// 核函数统一使用该命名,参数列表可自定义
__global__ void kernel()
{
}
UINT ReadFile(const char *szFile, UINT data[])
{
UINT len;
FILE *fp;
fp = fopen(szFile, "rb");
fread(&len, sizeof(UINT), 1, fp);
if (len > MAX_BUF)
{
fclose(fp);
return 0;
}
fread(data, sizeof(UINT), len, fp);
fclose(fp);
return len;
}
void WriteFile(const char *szFile, UINT data[], UINT len)
{
FILE *fp;
if (len > MAX_BUF)
return;
fp = fopen(szFile, "wb");
fwrite(&len, sizeof(UINT), 1, fp);
fwrite(data, sizeof(UINT), len, fp);
fclose(fp);
}
int main(int argc, char *argv[])
{
UINT length;
if (argc != 2)
return 1;
length = ReadFile(argv[1], buffer);
//sorting code
//....
WriteFile("output.bin", buffer, length);
return 0;
} |
6,458 | #include <cmath>
__global__ void myexp(float* value)
{
value[threadIdx.x] = std::exp(value[threadIdx.x]);
}
|
6,459 | #include "includes.h"
__device__ void get_conflict_col_id(bool *dl_matrix, short *deleted_cols, int *conflict_col_id, int *conflict_edge, int total_dl_matrix_col_num, int vertex_num) {
// if(threadIdx.x==0){
// printf("conflict edge a %d edge b
// %d\n",conflict_edge[0],conflict_edge[1]);
// }
bool *edge_a_dlmatrix =
dl_matrix + conflict_edge[0] * total_dl_matrix_col_num;
bool *edge_b_dlmatrix =
dl_matrix + conflict_edge[1] * total_dl_matrix_col_num;
for (int j = threadIdx.x; j < total_dl_matrix_col_num; j = j + blockDim.x) {
if (edge_a_dlmatrix[j] == edge_b_dlmatrix[j] && deleted_cols[j] > 0 &&
edge_b_dlmatrix[j] == 1) {
atomicMax(conflict_col_id, j);
}
}
}
__global__ void get_conflict_col_id(int *dl_matrix, int *deleted_cols, int *conflict_col_id, int *conflict_edge, int total_dl_matrix_col_num, int vertex_num){
//if(threadIdx.x==0){
// printf("conflict edge a %d edge b %d\n",conflict_edge[0],conflict_edge[1]);
// }
for (int j = threadIdx.x; j < total_dl_matrix_col_num;
j = j + blockDim.x) {
if (dl_matrix[conflict_edge[0] * total_dl_matrix_col_num + j]
== dl_matrix[conflict_edge[1] * total_dl_matrix_col_num + j] &&
deleted_cols[j] > 0 && dl_matrix[conflict_edge[1] * total_dl_matrix_col_num + j]==1) {
atomicMax(conflict_col_id, j);
}
}
__syncthreads();
} |
6,460 | __global__ void matching(int *keypoints ,const unsigned char *in, int *allProbablities, int *allIndexList, int *matchingResult , int width, int height, int lenght, int fernNum, int fernSize, int patchLenght){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int patchSize =(int)(patchLenght /2);
int x = keypoints[index*2];
int y = keypoints[index*2+1];
int startX = x - patchSize;
int endX = x + patchSize;
int startY = y - patchSize;
int endY = y + patchSize;
if(startX < 0 ){
startX = 0;
}
if (endX >= width ){
endX = width -1;
}
if(startY < 0 ){
startY = 0;
}
if (endY >= height){
endY = height -1;
}
int patchHeight = endY - startY;
int patcWidth = endX - endY;
int size = patchHeight*patcWidth;
int patch[1024];
int count = 0;
for(int j= 0; j < patchHeight; j++){
for(int i = startY ; i < endY; i++){
patch[count] = in[startX*height+i];
count++;
}
startX = startX +1;
}
int result[250];
int I1, I2,num, decimalNum, index2;
for(int i = 0; i< fernNum ; i++){
decimalNum = 0;
num = lenght/2;
for(int j = 0; j < fernSize; j++){
index2 = (fernSize*i*2)+(j*2);
I1 = allIndexList[index2];
I2 = allIndexList[index2+1];
if(I1 < size && I2 < size){
if(patch[I1] < patch[I2]){
decimalNum = decimalNum +num;
}
num = num /2;
}
}
for(int j = 0; j< 250; j++){
result[j] = result[j] + logf(allProbablities[j*lenght+decimalNum]);
}
}
num = result[0];
index2 = 0;
for(int k = 1; k < 250; k++){
decimalNum = result[k];
if( decimalNum> num ){
num = decimalNum;
index2 = k;
}
}
matchingResult[index] = index2;
} |
6,461 | #include "includes.h"
// Jim Samson
// CSF441 Computer Architecture
// Assignment 4
// Most code is written by Dr. Mock
// This HW Assignment uses cuda and the Sobel filter to convert an image.
/***********************************************************************
* sobel-cpu.cu
*
* Implements a Sobel filter on the image that is hard-coded in main.
* You might add the image name as a command line option if you were
* to use this more than as a one-off assignment.
*
* See https://stackoverflow.com/questions/17815687/image-processing-implementing-sobel-filter
* or https://blog.saush.com/2011/04/20/edge-detection-with-the-sobel-operator-in-ruby/
* for info on how the filter is implemented.
*
* Compile/run with: nvcc sobel-cpu.cu -lfreeimage
*
***********************************************************************/
#define threadsPerBlock 22
// Returns the index into the 1d pixel array
// Given te desired x,y, and image width
__device__ int pixelIndex(int x, int y, int width)
{
return (y*width + x);
}
__global__ void sobel(char *returnPixels, int width, char *pixels)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int x00 = -1; int x20 = 1;
int x01 = -2; int x21 = 2;
int x02 = -1; int x22 = 1;
x00 *= pixels[pixelIndex(x-1,y-1,width)];
x01 *= pixels[pixelIndex(x-1,y,width)];
x02 *= pixels[pixelIndex(x-1,y+1,width)];
x20 *= pixels[pixelIndex(x+1,y-1,width)];
x21 *= pixels[pixelIndex(x+1,y,width)];
x22 *= pixels[pixelIndex(x+1,y+1,width)];
int y00 = -1; int y10 = -2; int y20 = -1;
int y02 = 1; int y12 = 2; int y22 = 1;
y00 *= pixels[pixelIndex(x-1,y-1,width)];
y10 *= pixels[pixelIndex(x,y-1,width)];
y20 *= pixels[pixelIndex(x+1,y-1,width)];
y02 *= pixels[pixelIndex(x-1,y+1,width)];
y12 *= pixels[pixelIndex(x,y+1,width)];
y22 *= pixels[pixelIndex(x+1,y+1,width)];
int px = x00 + x01 + x02 + x20 + x21 + x22;
int py = y00 + y10 + y20 + y02 + y12 + y22;
returnPixels[pixelIndex(x,y,width)] = sqrt(float(px*px + py*py));
} |
6,462 | #include <stdio.h>
int main(int argc, char **argv) {
int nDevices;
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
printf("Device name: %s\n", prop.name);
printf("Capabilities: %d.%d\n", prop.major, prop.minor);
printf("Global mem: %lu\n", prop.totalGlobalMem / 1024 / 1024 / 1024);
printf("Max threads per block: %d\n", prop.maxThreadsPerBlock);
return 0;
}
|
6,463 | #include "includes.h"
__device__ void softmax_device(int n, float *input, float temp, float *output)
{
int i;
float sum = 0;
float largest = -INFINITY;
for (i = 0; i < n; ++i) {
int val = input[i];
largest = (val>largest) ? val : largest;
}
for (i = 0; i < n; ++i) {
float e = expf(input[i] / temp - largest / temp);
sum += e;
output[i] = e;
}
for (i = 0; i < n; ++i) {
output[i] /= sum;
}
}
__global__ void softmax_kernel(int n, int offset, int batch, float *input, float temp, float *output)
{
int b = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (b >= batch) return;
softmax_device(n, input + b*offset, temp, output + b*offset);
} |
6,464 | /*
CUDA kernels and functions
Kurt Kaminski 2016
*/
#ifndef __FLUID_KERNELS__
#define __FLUID_KERNELS__
#include <cuda_runtime.h>
//__device__ const int BLOCK_SIZE = 8;
//__device__ const int GRID_SIZE = 64;
__device__ int
clamp(int i)
{
if (i < 0) i = 0;
if (i > 255) i = 255;
return i;
}
__device__ float
clamp(float i, float min, float max)
{
if (i < min) i = min;
if (i > max) i = max;
return i;
}
__device__ float
fitRange(float valueIn, float baseMin, float baseMax, float limitMin, float limitMax)
{
return ((limitMax - limitMin) * (valueIn - baseMin) / (baseMax - baseMin)) + limitMin;
}
// Get 1d index from 2d coords
__device__ int
IX(int x, int y)
{
return x + (y * blockDim.x * gridDim.x);
//return x + (y * BLOCK_SIZE * GRID_SIZE);
}
__device__ int
getX(int w)
{
int x = threadIdx.x + (blockIdx.x * blockDim.x);
//int x = threadIdx.x + (blockIdx.x * BLOCK_SIZE);
if (x >= w) return 0;
else return x;
}
__device__ int
getY(int h)
{
int y = threadIdx.y + (blockIdx.y * blockDim.y);
//int y = threadIdx.y + (blockIdx.y * BLOCK_SIZE);
if (y >= h) return 0;
else return y;
}
// Returns true if within the bounds of both the container edges and a user-defined boundary
__device__ bool
checkBounds(float *_boundary, int x, int y, int w, int h)
{
if (x > 1 && x < w-2 && y > 1 && y < h-2 && _boundary[4*IX(x,y)+0] < 1 ){
return true;
}
else {
return false;
}
}
__device__ bool
checkBounds(int x, int y, int w, int h)
{
if (x > 1 && x < w-2 && y > 1 && y < h-2){
return true;
}
else {
return false;
}
}
// Functions for converting to/from a int (4 bytes, 1 byte per RGBA, which are in the range 0-255)
// to 4 floats in the range 0.0-1.0
// Note how the data is stored in BGRA format due to how its stored on the GPU.
__device__ int
rgbaToInt(float r, float g, float b, float a)
{
return
(clamp((int)(a * 255.0f)) << 24) |
(clamp((int)(r * 255.0f)) << 16) |
(clamp((int)(g * 255.0f)) << 8) |
(clamp((int)(b * 255.0f)) << 0);
}
__device__ void
intToRgba(int pixel, float &r, float &g, float &b, float &a)
{
b = float(pixel&0xff) / 255.0f;
g = float((pixel>>8)&0xff) / 255.0f;
r = float((pixel>>16)&0xff) / 255.0f;
a = float((pixel>>24)&0xff) / 255.0f;
}
__device__ void
rgbaToColor(float *dest, int id, float r, float g, float b, float a)
{
dest[4*id+0] = b;
dest[4*id+1] = g;
dest[4*id+2] = r;
dest[4*id+3] = a;
}
// Set boundary conditions
__device__ void
set_bnd( int b, int x, int y, float *field, float *boundary, int w, int h) {
int sz = w*h;
int id = IX(x,y);
bool outOfBnd = boundary[4*id+0] > 0.0 ? true : false;
//if (boundary[4*id+0] > 0.0) outOfBnd = true;
//if (x==0) field[id] = b==1 ? -1*field[IX(1,y)] : field[IX(1,y)];
//if (x==w-1) field[id] = b==1 ? -1*field[IX(w-2,y)] : field[IX(w-2,y)];
//if (y==0) field[id] = b==2 ? -1*field[IX(x,1)] : field[IX(x,1)];
//if (y==h-1) field[id] = b==2 ? -1*field[IX(x,h-2)] : field[IX(x,h-2)];
if (x==0 || outOfBnd) field[id] = b==1 ? -1*field[IX(1,y)] : -1 * field[IX(1,y)];
if (x==w-1 || outOfBnd) field[id] = b==1 ? -1*field[IX(w-2,y)] : -1 * field[IX(w-2,y)];
if (y==0 || outOfBnd) field[id] = b==2 ? -1*field[IX(x,1)] : -1 * field[IX(x,1)];
if (y==h-1 || outOfBnd) field[id] = b==2 ? -1*field[IX(x,h-2)] : -1 * field[IX(x,h-2)];
//if (outOfBnd){
// field[id] = -1*field[id];
// field[IX(x+1,y)] = -1*field[IX(x+1,y)];
// field[IX(x-1,y)] = -1*field[IX(x-1,y)];
// field[IX(x,y+1)] = -1*field[IX(x,y+1)];
// field[IX(x,y-1)] = -1*field[IX(x,y-1)];
//}
if (id == 0) field[id] = 0.5*(field[IX(1,0)]+field[IX(0,1)]); // southwest
if (id == sz-w) field[id] = 0.5*(field[IX(1,h-1)]+field[IX(0, h-2)]); // northwest
if (id == w-1) field[id] = 0.5*(field[IX(w-2,0)]+field[IX(w-1,1)]); // southeast
if (id == sz-1) field[id] = 0.5*(field[IX(w-2,h-1)]+field[IX(w-1,h-2)]); // northeast
}
__global__ void
DrawSquare( float *field, float value, int w, int h ) {
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
float posX = (float)x/w;
float posY = (float)y/h;
if ( posX < .92 && posX > .45 && posY < .51 && posY > .495 ) {
field[id] = value;
}
}
__global__ void
SetBoundary( int b, float *field, float *boundary, int w, int h ) {
int x = getX(w);
int y = getY(h);
set_bnd(b, x, y, field, boundary, w, h);
}
__global__ void
getSum( float *_data, float _sum, int w, int h ) {
int x = getX(w);
int y = getY(h);
_sum += _data[IX(x,y)];
}
__global__ void
ClearArray(float *field, float value, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
field[id] = value;
}
__global__ void
ClearArray(int *field, float value, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
field[id] = value;
}
__global__ void
MapArray(float *field, float value, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
field[id] = float(x) * value;
}
// How can I template these?
__global__ void
AddFromUI ( float *field, float value, float dt, int x_coord, int y_coord, int w, int h ) {
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
if (x>x_coord-5 && x<x_coord+5 && y>y_coord-5 && y<y_coord+5){
// if (x == x_coord && y==y_coord){
field[id] += value * dt;
}
else return;
}
__global__ void
AddFromUI ( float *field, float *valueUI, int index, float dt, int w, int h ) {
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
field[id] += valueUI[4*id+index] * dt;
}
__global__ void
AddObstacleVelocity ( float *u, float *v, float *obstacle, float dt, int w, int h ) {
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
u[id] += obstacle[4*id+2] * dt; //red
v[id] += obstacle[4*id+1] * dt; //green
}
__global__ void
SetFromUI ( float *A, float *B, float *valueUI, int w, int h ) {
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
float v1 = valueUI[4*id+2]; //red
float v2 = valueUI[4*id+1]; //green
if (v1 > 0.0) A[id] = v1;
if (v2 > 0.0) B[id] = v2;
}
__global__ void
MakeSource(int *src, float *dest, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
int pixel = src[id];
float r,g,b,a;
intToRgba(pixel, r, g, b, a);
dest[id] = r;
}
// *!* This is currently only grabbing the red channel *!*
__global__ void
MakeSource(int *src, int *dest, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
int pixel = src[id];
float r,g,b,a;
intToRgba(pixel, r, g, b, a);
dest[id] = src[id]&0xff/255;
}
__global__ void
AddSource(float *field, float *source, float dt, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
field[id] += (dt * source[id]);
}
__global__ void
MakeColor(float *src, int *dest, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
dest[id] = rgbaToInt(src[id], src[id], src[id], 1.0);
//dest[id] = rgbaToInt(1.0, src[id], src[id], 1.0);
}
__global__ void
MakeColor(float *src0, float *src1, float *src2, float *src3, float *dest, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
rgbaToColor(dest, id, src0[id], src1[id], src2[id], src3[id]);
}
__device__ float
bilerp(float *src, float _i, float _j, int w, int h)
{
int i0, j0, i1, j1;
float s0, t0, s1, t1;
float i;
float j;
// fit bounds
i = (_i < 0.5f) ? 0.5f : _i;
i = (_i > float(w)-2.0+0.5f) ? float(w)-2.0+0.5f : _i;
j = (_j > float(h)-2.0+0.5f) ? float(h)-2.0+0.5f : _j;
j = (_j < 0.5) ? 0.5 : _j;
// bilinear interpolation
i0 = int(i);
i1 = i0+1;
j0 = int(j);
j1 = j0+1;
s1 = (float)i-i0;
s0 = (float)1-s1;
t1 = (float)j-j0;
t0 = (float)1-t1;
return (float) s0*(t0*src[IX(i0,j0)] + t1*src[IX(i0,j1)])+
s1*(t0*src[IX(i1,j0)] + t1*src[IX(i1,j1)]);
}
__global__ void
Advect (float *vel_u, float *vel_v, float *src_u, float *src_v,
float *boundary, float *dest_u, float *dest_v,
float timeStep, float diff, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
//if (x > 1 && x < w-1 && y > 1 && y < h-1){
if (checkBounds(boundary, x, y, w, h)) {
float dt0 = (float)timeStep * float(w-2);
float i = float(x) - dt0 * vel_u[id];
float j = float(y) - dt0 * vel_v[id];
dest_u[id] = diff * bilerp(src_u, i, j, w, h);
dest_v[id] = diff * bilerp(src_v, i, j, w, h);
}
else {
dest_u[id] = 0.0;
dest_v[id] = 0.0;
}
}
__global__ void
Advect (float *vel_u, float *vel_v, float *src, float *boundary, float *dest,
float timeStep, float diff, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
if (checkBounds(boundary, x, y, w, h)) {
//if (x > 1 && x < w-1 && y > 1 && y < h-1){
float dt0 = (float)timeStep * float(w-2);
float i = float(x) - dt0 * vel_u[id];
float j = float(y) - dt0 * vel_v[id];
dest[id] = diff * bilerp(src, i, j, w, h);
}
else {
dest[id] = 0.0;
}
}
__device__ float
curl(int i, int j, float *u, float *v)
{
float du_dy = (u[IX(i, j+1)] - u[IX(i, j-1)]) * 0.5f;
float dv_dx = (v[IX(i+1, j)] - v[IX(i-1, j)]) * 0.5f;
return du_dy - dv_dx;
}
__global__ void
vorticityConfinement(float *u, float *v, float *Fvc_x, float *Fvc_y, float *_boundary,
float curlAmt, float dt, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
//if (x>1 && x<w-2 && y>1 && y<h-2){
if (checkBounds(_boundary, x, y, w, h)) {
// Calculate magnitude of curl(u,v) for each cell. (|w|)
// curl[I(i, j)] = Math.abs(curl(i, j));
// Find derivative of the magnitude (n = del |w|)
float dw_dx = ( abs(curl(x+1,y, u, v)) - abs(curl(x-1,y, u, v)) ) * 0.5f;
float dw_dy = ( abs(curl(x,y+1, u, v)) - abs(curl(x,y-1, u, v)) ) * 0.5f;
// Calculate vector length. (|n|)
// Add small factor to prevent divide by zeros.
float length = sqrt(dw_dx * dw_dx + dw_dy * dw_dy);
length = length + 0.000001f;
//if (length == 0.0) length -= 0.000001f;
// N = ( n/|n| )
float vel = curl(x, y, u, v);
// N x w
// 0.5 = curl amount
Fvc_y[id] = Fvc_y[id] + ((dw_dx/length) * vel * dt * curlAmt);
Fvc_x[id] = Fvc_x[id] + ((dw_dy/length) * -vel * dt * curlAmt);
}
else {
Fvc_x[id] = 0.0;
Fvc_y[id] = 0.0;
}
}
__global__ void
ApplyBuoyancy( float *vel_u, float *vel_v, float *temp, float *dens,
float *dest_u, float *dest_v, float ambientTemp, float buoy, float weight,
float dt, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
if (checkBounds(x, y, w, h)) {
dest_u[id] = vel_u[id];
dest_v[id] = vel_v[id];
float T = temp[id];
float Sigma = buoy;
float Kappa = weight;
if (T > ambientTemp) {
float D = dens[id];
dest_u[id] += (dt * (T - ambientTemp) * Sigma - D * Kappa) * 0;
dest_v[id] += (dt * (T - ambientTemp) * Sigma - D * Kappa) * .1;
}
else {
return;
}
}
}
__global__ void
ComputeDivergence( float *u, float *v, float *boundary, float *dest, int w, int h )
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
//if (x > 2 && x < w-2 && y > 2 && y < h-2){
if (checkBounds(x, y, w, h)){
float vN, vS, vE, vW;
// Find neighboring obstacles:
float oN = boundary[4 * IX(x, y+1) + 0];
float oS = boundary[4 * IX(x, y-1) + 0];
float oE = boundary[4 * IX(x+1, y) + 0];
float oW = boundary[4 * IX(x-1, y) + 0];
// Find neighboring velocities, use center pressure for solid cells:
vN = (oN > 0.0) ? boundary[4 * IX(x, y+1) + 1] : v[IX(x, y+1)];
vS = (oS > 0.0) ? boundary[4 * IX(x, y-1) + 1] : v[IX(x, y-1)];
vE = (oE > 0.0) ? boundary[4 * IX(x+1, y) + 2] : u[IX(x+1, y)];
vW = (oW > 0.0) ? boundary[4 * IX(x-1, y) + 2] : u[IX(x-1, y)];
dest[id] = 0.5 * ( vE - vW + vN - vS ) / float(w-2);
}
else {
return;
}
}
__global__ void
Jacobi( float *p, float *divergence, float *boundary, float *dest, int w, int h )
{
// thread # + (tile # * tile dim)
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int id = x + (y * blockDim.x * gridDim.x);
int tx = threadIdx.x;
int ty = threadIdx.y;
if (checkBounds(x, y, w, h)){
// neighboring ; obstacles:
float oN = boundary[4 * IX(x, y+1) + 0];
float oS = boundary[4 * IX(x, y-1) + 0];
float oE = boundary[4 * IX(x+1, y) + 0];
float oW = boundary[4 * IX(x-1, y) + 0];
// Find neighboring pressure, use center pressure for solid cells:
//float pC = p[id];
float pN = (oN > 0.0) ? p[id] : p[IX(x, y+1)];
float pS = (oS > 0.0) ? p[id] : p[IX(x, y-1)];
float pE = (oE > 0.0) ? p[id] : p[IX(x+1, y)];
float pW = (oW > 0.0) ? p[id] : p[IX(x-1, y)];
//float cellSize = 1.0;
//float Alpha = -cellSize * cellSize;
float Alpha = -1.0;
float bC = divergence[id];
float InverseBeta = .25;
dest[id] = (pW + pE + pS + pN + Alpha * bC) * InverseBeta;
}
else {
return;
}
}
__global__ void
SubtractGradient( float *vel_u, float *vel_v, float *p, float *boundary,
float *dest_u, float *dest_v, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
if (checkBounds(x, y, w, h)){
// Find neighboring obstacles:
float oN = boundary[4 * IX(x, y+1) + 0];
float oS = boundary[4 * IX(x, y-1) + 0];
float oE = boundary[4 * IX(x+1, y) + 0];
float oW = boundary[4 * IX(x-1, y) + 0];
float pN = (oN > 0.0) ? p[id] : p[IX(x, y+1)];
float pS = (oS > 0.0) ? p[id] : p[IX(x, y-1)];
float pE = (oE > 0.0) ? p[id] : p[IX(x+1, y)];
float pW = (oW > 0.0) ? p[id] : p[IX(x-1, y)];
float obstV = (oN > 0.0) ? boundary[4 * IX(x, y+1) + 1] :
(oS > 0.0) ? boundary[4 * IX(x, y-1) + 1] : 0.0;
float obstU = (oE > 0.0) ? boundary[4 * IX(x+1, y) + 2] :
(oW > 0.0) ? boundary[4 * IX(x+1, y) + 2] : 0.0;
float vMask = (oN > 0.0 || oS > 0.0 || oE > 0.0 || oW > 0.0) ? 0.0 : 1.0;
// Enforce the free-slip boundary condition:
float old_u = vel_u[id];
float old_v = vel_v[id];
//float cellSize = 1.0;
//float GradientScale = 1.125 / cellSize;
float GradientScale = 0.5 * float(w-2);
float grad_u = (pE - pW) * GradientScale;
float grad_v = (pN - pS) * GradientScale;
float new_u = old_u - grad_u;
float new_v = old_v - grad_v;
obstU = 0;
obstV = 0;
dest_u[id] = (vMask * new_u) + obstU;
dest_v[id] = (vMask * new_v) + obstV;
}
else {
dest_u[id] = 0.0;
dest_v[id] = 0.0;
}
}
#endif |
6,465 | #include <iostream>
#include <stdexcept>
#include <stdint.h>
#include <cuda_runtime.h>
#include <cuda_fp16.h>
// Not very optimized, but it's just for the test/example
__global__ void half2float_kernel(half* input, size_t input_pitch,
uint16_t width, uint16_t height,
float* output, size_t output_pitch) {
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
auto in = input + y*input_pitch + x;
auto out = output + y*output_pitch + x;
*reinterpret_cast<float2*>(out) =
__half22float2(*reinterpret_cast<half2*>(in));
}
int divUp(int total, int grain) {
return (total + grain - 1) / grain;
}
void half2float(half* input, size_t input_pitch,
uint16_t width, uint16_t height,
float* output, size_t output_pitch) {
dim3 block(32, 8);
dim3 grid(divUp(width/2, block.x), divUp(height, block.y));
if (width % 2 == 1) {
throw std::runtime_error("Width must be a multiple of 2.");
}
half2float_kernel<<<grid, block>>>
(input, input_pitch, width, height, output, output_pitch);
auto e = cudaDeviceSynchronize();
if (e != cudaSuccess) {
std::cerr << "CUDA runtime error converting half to float: "
<< cudaGetErrorString(e)
<< std::endl;
}
}
|
6,466 | #include "includes.h"
__global__ void softmax_gradient_kernel( const int dim, const float* Y, const float* dY, float* dX) {
Y += blockIdx.x * dim;
dY += blockIdx.x * dim;
dX += blockIdx.x * dim;
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SOFTMAX_NUM_THREADS];
float tmp;
// A two-level reduction to compute the inner products.
tmp = 0;
for (int i = idx; i < dim; i += blockDim.x) {
tmp += dY[i] * Y[i];
}
reduction_buffer[idx] = tmp;
__syncthreads();
if (idx == 0) {
tmp = reduction_buffer[0];
for (int i = 1; i < blockDim.x; ++i)
tmp += reduction_buffer[i];
reduction_buffer[0] = tmp;
}
__syncthreads();
// Compute gradient.
tmp = reduction_buffer[0];
for (int i = idx; i < dim; i += blockDim.x) {
dX[i] = Y[i] * (dY[i] - tmp);
}
} |
6,467 | #include "includes.h"
__global__ void kCumsum(float *mat, float *target, float *temp, unsigned int height) {
// extern __shared__ float temp[];// allocated on invocation
const int thid = threadIdx.x;
if (2*thid < height) {
const int super_offset = blockIdx.x * height;
target += super_offset;
mat += super_offset;
temp += super_offset;
int offset = 1;
//float s = 0.0;
temp[2*thid] = mat[2*thid]; // load input into shared memory
temp[2*thid+1] = mat[2*thid+1];
for (int d = height>>1; d > 0; d >>= 1) {// build sum in place up the tree
__syncthreads();
if (thid < d) {
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
temp[bi] += temp[ai];
} else if (thid == d && thid % 2 == 1) {
//int bi = offset*(2*thid+2)-1;
//temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) { temp[height - 1] = 0; } // clear the last element
for (int d = 1; d < height; d *= 2) { // traverse down tree & build scan
offset >>= 1;
__syncthreads();
if (thid < d) {
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
// write results to device memory
// if (thid == -1) {
// target[0] = temp[1];
// target[height-1] = s;
// } else {
target[2*thid] = temp[2*thid];
target[2*thid+1] = temp[2*thid+1];
// }
}
} |
6,468 | #include <stdio.h>
#include <stdlib.h>
__global__ void hello_kernel(void)
{
//int i = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int b = blockIdx.x;
printf("Hello from block : %d, threadId : %d\n", b, i);
}
int main()
{
hello_kernel<<< 4, 16>>>();
//printf from device are not automatically flushed
cudaDeviceSynchronize();
exit(EXIT_SUCCESS);
}
|
6,469 | __global__ void cost_value(double * * result,
double * * Il,
double * * Ir,
double Tc,
double Tg,
double Tb)
{
} |
6,470 | #include <stdio.h>
__global__
void hello_kernel() {
printf("hello world from cuda thread %d\n", int(threadIdx.x));
}
int main(void) {
hello_kernel<<<1, 32>>>();
//cudaDeviceSynchronize();
cudaError_t cudaerr = cudaDeviceSynchronize();
if (cudaerr != cudaSuccess)
printf("kernel launch failed with error \"%s\".\n",
cudaGetErrorString(cudaerr));
double* ptr;
auto size = 10 * sizeof(double);
double *ptr_host = (double*)malloc(size);
cudaMalloc(&ptr, size);
cudaFree(ptr);
cudaerr = cudaDeviceSynchronize();
if (cudaerr != cudaSuccess)
printf("kernel launch failed with error \"%s\".\n",
cudaGetErrorString(cudaerr));
return 0;
}
|
6,471 | //~ #include <half.hpp>
__device__ void Vec_add(float *x, float *y , float* z, float gaaa[], int n) {
/* blockDim.x = threads_per_block */
/* First block gets first threads_per_block components. */
/* Second block gets next threads_per_block components, etc. */
int i = blockDim.x * blockIdx.x + threadIdx.x;
float test_val, test_val2;
/* block_count*threads_per_block may be >= n */
if (i < n) {
z[i] = x[i] + y[i+2] + gaaa[(2*3+i)+456];
return;
}
} /* Vec_add */
|
6,472 | /*
example to show how to use stream and async method to make the data
transfer and kernel function executed concurrently.
*/
#include <iostream>
using namespace std;
static void HandleError( cudaError_t err,const char *file, int line ) {
if (err != cudaSuccess) {
cout << cudaGetErrorString(err) << file << line << endl;
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define DIM 16
__shared__ int share[256];
__global__ void fun(int * in, int * out)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
*out = *in + 1000*tid;
}
int main()
{
int * inGlobe;
int * outGlobe;
int tmp[DIM*DIM];
int tmp2[DIM*DIM];
cudaStream_t stream;
HANDLE_ERROR( cudaStreamCreate( &stream ) );
HANDLE_ERROR(cudaMalloc((void**)&inGlobe, DIM * DIM * sizeof (int)));
HANDLE_ERROR(cudaMalloc((void**)&outGlobe, DIM * DIM * sizeof (int)));
for (int i = 0; i < DIM*DIM; ++i)
tmp[i] = i;
for (int i = 0; i < DIM*DIM; ++i)
{
cudaMemcpyAsync(inGlobe+i,tmp+i,sizeof(int),cudaMemcpyHostToDevice,stream ) ;
//0 is device and stream is exec stream.
fun<<<1,1,0,stream>>>(inGlobe+i,outGlobe+i);
cudaMemcpyAsync(tmp2+i,outGlobe+i,sizeof(int),cudaMemcpyDeviceToHost,stream);
}
HANDLE_ERROR( cudaStreamSynchronize( stream ) );
for (int i = 0; i < DIM * DIM; ++i)
cout << tmp2[i] << " " ;
int k;
cin >> k;
return 0;
}
|
6,473 | #include "includes.h"
__global__ void diffKernel( float *in, float *out, int n )
{
// Wrtie the kernel to implement the diff operation on an array
int id = (blockDim.x * blockIdx.x) + threadIdx.x;
if(id < n-1)
out[id] = in[id+1] - in[id];
} |
6,474 | #include <stdio.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
int main()
{
int device_count = 0;
cudaGetDeviceCount(&device_count);
printf("gpu count: %d\n", device_count);
cudaDeviceProp device_prop;
for (int i = 0; i < device_count; i++)
{
cudaGetDeviceProperties(&device_prop, i);
printf("\n\n\n");
printf("gpu model: %s\n", device_prop.name);
printf("gpu memory capacity(MB): %f\n",float(device_prop.totalGlobalMem) /(1024.0*1024.0));
}
return 0;
}
|
6,475 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
__global__ void getmaxcu(unsigned int num[], unsigned int size, unsigned int gap)
{
unsigned int i=gap, //loop variables
start = (threadIdx.x)*gap;
if(start%2!=0 || size<=0)
return;
else{
//bottom-up tree search
while(start+i<size && start%(2*i)==0){
if(num[start]<num[start+i])
num[start]=num[start+i];
i*=2;
__syncthreads();
}
}
}
int main(int argc, char *argv[])
{
if(argc !=2)
{
printf("usage: maxgpu num\n");
printf("num = size of the array\n");
exit(1);
}
cudaError_t err; // error var
unsigned int size = 0, backup_size, // The size of array
thread_num,
i; // loop index
struct cudaDeviceProp prop; // specs
unsigned int *numbers, *cudanumbers, // pointers to array
*max=(unsigned int *)malloc(sizeof(unsigned int)); // pointers to max number
size = atol(argv[1]);
backup_size=size;
numbers = (unsigned int *)malloc(size * sizeof(unsigned int));
if(!numbers)
{
printf("Unable to allocate mem for an array of size %u\n", size);
exit(1);
}
srand(time(NULL)); // setting a seed for the random number generator
// Fill-up the array with random numbers from 0 to size-1
for( i = 0; i < size; i++)
numbers[i] = rand() % size;
cudaGetDeviceProperties(&prop, 0);
cudaMalloc((void**)&cudanumbers, size * sizeof(unsigned int));
cudaMemcpy(cudanumbers, numbers, size * sizeof(unsigned int), cudaMemcpyHostToDevice);
thread_num=ceil(prop.maxThreadsPerBlock/32)*32;
unsigned int offset=0;
i=0;
//search 1024 elements at once; search remaining elements
//structure: |----1024----| |----1024----| ... |size%1024| left to right
while(offset<backup_size){
if(backup_size-offset>=thread_num){
getmaxcu<<<1, thread_num>>>((cudanumbers+offset), thread_num, 1);
}
else{
getmaxcu<<<1, backup_size-offset>>>((cudanumbers+offset), backup_size-offset, 1);
}
getmaxcu<<<1, 1>>>((cudanumbers), backup_size, offset);
offset+=thread_num;
}
cudaMemcpy(max, cudanumbers, sizeof(unsigned int), cudaMemcpyDeviceToHost);
err=cudaGetLastError();
if(err!=cudaSuccess){
printf("CUDA error: %s\n", cudaGetErrorString(err));
exit(-1);
}
cudaThreadSynchronize();
cudaFree(cudanumbers);
printf("CUDA returns: %d\n", *max);
//printf("Sequential returns: %u\n", getmax(numbers, backup_size));
free(numbers);
free(max);
exit(0);
}
/*
unsigned int getmax(unsigned int num[], unsigned int size)
{
unsigned int i, j;
unsigned int max = num[0];
for(i = 1; i < size; i++)
if(num[i] > max){
max = num[i];
j=i;
}
//printf("Sequential found max at %d\n", j);
return max;
}
__global__ void printArray(unsigned int arr[], unsigned int length, unsigned int jump){
for(int i = 0; i+jump<length; i++)
printf("%d: %d\n", i+jump, arr[i+jump]);
}
void search(unsigned int arr[], unsigned int size, unsigned int target){
printf("CUDA found max at");
for(int i = 1; i < size; i++)
if(arr[i] == target)
printf(" %d ", i);
printf("\n");
}
*/ |
6,476 | #include "includes.h"
const int Nthreads = 1024, maxFR = 5000, NrankMax = 6;
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void cleanup_spikes(const double *Params, const float *err, const int *ftype, float *x, int *st, int *id, int *counter){
int lockout, indx, tid, bid, NT, tid0, j, t0;
volatile __shared__ float sdata[Nthreads+2*81+1];
bool flag=0;
float err0, Th;
lockout = (int) Params[4] - 1;
tid = threadIdx.x;
bid = blockIdx.x;
NT = (int) Params[0];
tid0 = bid * blockDim.x ;
Th = (float) Params[2];
while(tid0<NT-Nthreads-lockout+1){
if (tid<2*lockout)
sdata[tid] = err[tid0 + tid];
if (tid0+tid+2*lockout<NT)
sdata[tid+2*lockout] = err[2*lockout + tid0 + tid];
else
sdata[tid+2*lockout] = 0.0f;
__syncthreads();
err0 = sdata[tid+lockout];
t0 = tid+lockout + tid0;
if(err0 > Th*Th && t0<NT-lockout-1){
flag = 0;
for(j=-lockout;j<=lockout;j++)
if(sdata[tid+lockout+j]>err0){
flag = 1;
break;
}
if(flag==0){
indx = atomicAdd(&counter[0], 1);
if (indx<maxFR){
st[indx] = t0;
id[indx] = ftype[t0];
x[indx] = err0;
}
}
}
tid0 = tid0 + blockDim.x * gridDim.x;
}
} |
6,477 | # include <bits/stdc++.h>
# include <cuda.h>
#define TILE_WIDTH 32 //(TITLE_WIDTH = BLOCKSIZE)
using namespace std;
// ::::::::::::::::::::::::::::::::::::::::::GPU::::::::::::::::::::::::::::::::
__global__ void KernelNormalMul(float *Mat1,float *Mat2,float *Mat3,int m,int n,int p){
int j = threadIdx.y + blockDim.y * blockIdx.y; // row
int i = threadIdx.x + blockDim.x * blockIdx.x; // col
if((j<m) && (i<p)){
float value=0.0;
for(int k=0;k<n;++k){
value+=Mat1[n*j+k]*Mat2[p*k+i];
}
Mat3[p*j+i]=value;
}
}
__global__ void KernelTilesMul(float *Mat1,float *Mat2,float *Mat3,int rowM1,int colM1,int colM2){
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
float Pvalue = 0.0;
for(int k = 0; k < (colM1+TILE_WIDTH-1)/(TILE_WIDTH); ++k){
if(k*TILE_WIDTH + tx < colM1 && row < rowM1){
Mds[ty][tx] = Mat1[row*colM1 + k*TILE_WIDTH + tx];
}else{
Mds[ty][tx] = 0.0;
}
if(k*TILE_WIDTH + ty < colM1 && col < colM2){
Nds[ty][tx] = Mat2[(k*TILE_WIDTH + ty) * colM2 + col];
}else{
Nds[ty][tx] =0.0;
}
__syncthreads();
for(int k = 0; k < TILE_WIDTH; ++k){
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
if (row < rowM1 && col < colM2){
Mat3[row*colM2+col] = Pvalue;
}
}
void d_MatrixMult(float *Mat1,float *Mat2,float *Mat3,int rowM1,int colM1,int colM2, int op ){
float * d_Mat1;
float * d_Mat2;
float * d_Mat3;
float Blocksize=TILE_WIDTH; // Bloque de 2 dimensiones 32*32=256 número de blokes= 1024 (1024/256=4)
int size1=rowM1*colM1;
int size2=colM1*colM2;
int size3=rowM1*colM2;
// 1. Separamos memoria en el device
cudaMalloc(&d_Mat1,size1*sizeof(float));
cudaMalloc(&d_Mat2,size2*sizeof(float));
cudaMalloc(&d_Mat3,size3*sizeof(float));
// 2. Copiamos el valor de las variables de host a las variables del device.
cudaMemcpy(d_Mat1, Mat1,size1*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(d_Mat2, Mat2,size2*sizeof(float),cudaMemcpyHostToDevice);
// 3. Lógica de bloques e hilos, elementos para realizar la parelelización.
dim3 dimGrid(ceil(colM2/Blocksize),ceil(rowM1/Blocksize),1);
//dim3 dimGrid((m+Blocksize-1)/Blocksize,(p+Blocksize-1)/Blocksize,1);
dim3 dimBlock(Blocksize,Blocksize,1);
// 4. Invocación del kernel (invoción del host, ejecutadas en el device), <<<<#dimGrid,#dimBlock>>>
if(op==1){KernelNormalMul<<<dimGrid,dimBlock>>>(d_Mat1,d_Mat2,d_Mat3,rowM1,colM1,colM2);}else{
KernelTilesMul<<<dimGrid,dimBlock>>>(d_Mat1,d_Mat2,d_Mat3,rowM1,colM1,colM2);
}
// 5. Copiamos el resultado para mostrar en el I/O del host.
cudaMemcpy (Mat3,d_Mat3,size3*sizeof(float),cudaMemcpyDeviceToHost);
// 6. Liberamos memoria.
cudaFree(d_Mat3);
}
// :::::::::::::::::::::::::::::::::::::::Normal::::::::::::::::::::::::::::::::
void h_Mul_Mat(float *Mat1,float *Mat2, float *Mat3,int m,int n,int p){
for(int i=0;i<m;i++){
for(int j=0;j<p;j++){
float value=0.0;
for(int k=0;k<n;k++){
value+=Mat1[n*i+k]*Mat2[p*k+j];
}
Mat3[p*i+j]=value;
}
}
}
void llena_mat(float *Mat, float Value,int m,int n){// ver matriz como vector serial.
int size=n*m; // matriz lineal
for(int i =0 ; i<size ; i++){
Mat[i]=Value;
}
}
void mostrar_mat(float *Mat,int m,int n){//
int size=n*m; // matriz lineal
for (int i=0;i<size;i++) {
if(i%n==0 && n!=0){
cout<<endl;
}
cout<<"["<<Mat[i]<<"] ";
}
cout<<endl;
}
int check_mat(float *Mat1,float *Mat2,int m,int p){
for(int i=0; i<(m*p);++i){
if(Mat1[i]!=Mat2[i]){
cout<<"Error, Las matrices no son iguales"<<endl;
return 0;
}
}
cout<<"Las Matrices son iguales"<<endl;
return 0;
}
int check_mat_float(float *Mat1,float *Mat2,int m,int p){
for(int i=0; i<(m*p);++i){
if(fabs(Mat1[i]-Mat2[i]) > 0.1){
cout<<"Error, Las matrices no son iguales"<<endl;
return 0;
}
}
cout<<"Las Matrices son iguales"<<endl;
return 0;
}
// :::::::::::::::::::::::::::::::::::Clock Function::::::::::::::::::::::::::::
double diffclock(clock_t clock1,clock_t clock2){
double diffticks=clock2-clock1;
double diffms=(diffticks)/(CLOCKS_PER_SEC/1); // /1000 mili
return diffms;
}
// :::::::::::::::::::::::::::::::::::::::Main::::::::::::::::::::::::::::::::.
int main(){
double T1,T2,T3; // variables de tiempo
int rowM1=2048;
int colM1=1024;
int colM2=1200;
float *Mat1 = (float*)malloc((rowM1*colM1)*sizeof(float));
float *Mat2 = (float*)malloc((colM1*colM2)*sizeof(float));
float *Mat3 = (float*)malloc((rowM1*colM2)*sizeof(float));
float *Mat4 = (float*)malloc((rowM1*colM2)*sizeof(float));
float *Mat5 = (float*)malloc((rowM1*colM2)*sizeof(float));
llena_mat(Mat1,1.0,rowM1,colM1);
llena_mat(Mat2,2.0,colM1,colM2);
llena_mat(Mat3,0.0,rowM1,colM2);
llena_mat(Mat4,0.0,rowM1,colM2);
llena_mat(Mat5,0.0,rowM1,colM2);
clock_t start = clock();
h_Mul_Mat(Mat1,Mat2,Mat3,rowM1,colM1,colM2);
clock_t end = clock();
T1=diffclock(start,end);
cout <<"Tiempo secuencial: "<<T1<<endl;
//mostrar_mat(Mat3,rowM1,colM2);
clock_t start2 = clock();
d_MatrixMult(Mat1,Mat2,Mat4,rowM1,colM1,colM2,1); // paralelo
clock_t end2 = clock();
//mostrar_mat(Mat4,rowM1,colM2);
T2=diffclock(start2,end2);
cout <<"Tiempo Paralelo: "<<T2<<endl;
cout<<"Aceleración lograda: "<<T1/T2<<endl;
check_mat_float(Mat3,Mat4,rowM1,colM2);
clock_t start3 = clock();
d_MatrixMult(Mat1,Mat2,Mat5,rowM1,colM1,colM2,2); // tiles
//mostrar_mat(Mat5,rowM1,colM2);
clock_t end3 = clock();
T3=diffclock(start3,end3);
cout <<"Tiempo Paralelo con Tiles: "<<T3<<endl;
cout<<"Aceleración lograda Respecto a el tiempo paralelo: "<<T2/T3<<endl;
check_mat_float(Mat4,Mat5,rowM1,colM2);
free(Mat1);
free(Mat2);
free(Mat3);
free(Mat4);
free(Mat5);
return 0;
}
// http://www.techdarting.com/2014/03/matrix-multiplication-in-cuda-using.html
|
6,478 | #include <algorithm>
#include <iostream>
#include <cassert>
#include <cmath>
#include <chrono>
using namespace std;
#define TIMER_SET(t0) std::chrono::time_point<std::chrono::steady_clock> t0 = std::chrono::steady_clock::now()
#define TIMER_DIFF(t0, t1) std::chrono::duration_cast<std::chrono::microseconds> (t1 - t0).count()
__global__
void kernel(float * A, int N) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= N) return;
A[id] = sqrtf( powf(3.1415926f, id) );
}
int main(int argc, char * argv[]) {
const int N = 10000000;
float * A;
TIMER_SET(t0);
#ifdef CPU_DEMO
A = new float[N];
#else
cudaMallocManaged(&A, sizeof(float) * N);
#endif
// Calculate A[i] = sqrtf( powf(PI, i) )
#ifdef CPU_DEMO
for (int i = 0; i < N; ++i) {
A[i] = sqrtf( powf(3.1415926f, i) );
}
#else
int dimBlock = 64, dimGrid = (N + dimBlock - 1) / dimBlock;
kernel<<<dimGrid, dimBlock>>>(A, N);
cudaDeviceSynchronize();
#endif
// for (int i = 0; i < 5; ++i) {
// cout << ' ' << A[i];
// }
// cout << endl;
TIMER_SET(t1);
cout << "Time (microsecond): " << TIMER_DIFF(t0, t1) << endl;
#ifdef CPU_DEMO
delete A;
#else
cudaFree(A);
#endif
return 0;
}
|
6,479 | #include <cuda_runtime.h>
__global__ void computeForcesKernel(int N, const double3 *p, double3 *f) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
double3 ftot{0.0, 0.0, 0.0};
for (int i = 0; i < N; ++i) {
double dx = p[i].x - p[idx].x;
double dy = p[i].y - p[idx].y;
double dz = p[i].z - p[idx].z;
double inv_r = 1 / sqrt(1e-150 + dx * dx + dy * dy + dz * dz);
double inv_rrr = inv_r * inv_r * inv_r;
ftot.x += dx * inv_rrr;
ftot.y += dy * inv_rrr;
ftot.z += dz * inv_rrr;
}
f[idx] = ftot;
}
void computeForces(int N, const double3 *p, double3 *f) {
constexpr int numThreads = 1024;
int numBlocks = (N + numThreads - 1) / numThreads;
computeForcesKernel<<<numBlocks, numThreads>>>(N, p, f);
}
|
6,480 | #include <stdio.h>
#include <iostream>
#include <math.h>
#include "cuda.h"
#include <time.h>
#define BLOCK_DIM 16
__global__ void computeDistance(float* A, int wA, int pA, float* B, int wB, int pB, int dim, float* AB) {
// Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B
// They will contain, for each thread, the current coordinates of A and B - block_dim in each step
__shared__ float shared_A[BLOCK_DIM][BLOCK_DIM];
__shared__ float shared_B[BLOCK_DIM][BLOCK_DIM];
// Sub-matrix of A (begin, step, end) and sub-matrix of B (begin, step)
__shared__ int begin_A;
__shared__ int begin_B;
__shared__ int step_A;
__shared__ int step_B;
__shared__ int end_A;
// Thread index
int tx = threadIdx.x; // Local query point index
int ty = threadIdx.y; // Local ref point index
// Other variables
float tmp;
float ssd = 0;
// Loop parameters
begin_A = BLOCK_DIM * blockIdx.y; // Each block has its own start on ref points
begin_B = BLOCK_DIM * blockIdx.x; // Each block has its own start on query points
step_A = BLOCK_DIM * pA; // next step = next row of the big matrix
step_B = BLOCK_DIM * pB;
end_A = begin_A + (dim-1) * pA; // Each submatrix treated by given block has BLOCK_DIM columns and dim rows
// Conditions
int cond0 = (begin_A + tx < wA); // current column is out of A
int cond1 = (begin_B + tx < wB); // current column is out of B
int cond2 = (begin_A + ty < wA); // ty is column number in A
// Loop over all the sub-matrices of A and B required to compute the block sub-matrix
for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) {
// Load the matrices from device memory to shared memory; each thread loads one element of each matrix
// ty corresponds to row, tx to column in the resulting matrix, as well as ref and query points in input,
// but when copying to local memory, they work just as numbers for indeces (tx is column number in both cases)
// a/pA + ty is the row number in A corresponding to this thread in this block
if (a/pA + ty < dim){
shared_A[ty][tx] = (cond0)? A[a + pA * ty + tx] : 0;
shared_B[ty][tx] = (cond1)? B[b + pB * ty + tx] : 0;
}
else {
shared_A[ty][tx] = 0;
shared_B[ty][tx] = 0;
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix
if (cond2 && cond1) {
for (int k = 0; k < BLOCK_DIM; ++k){
tmp = shared_A[k][ty] - shared_B[k][tx];
ssd += tmp*tmp;
}
}
// Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory; each thread writes one element
if(cond2 && cond1)
AB[ (begin_A + ty) * pB + begin_B + tx ] = ssd;
}
// Selection sort
__global__ void sort(float *dist, int *ind, int width, int pitch, int ind_pitch, int height, int k){
// Variables
int l, i, min_index;
float *p_dist;
int *p_ind;
float min_value, tmp;
// xIndex is column in the sorted matrix
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex < width) {
p_dist = dist+xIndex;
p_ind = ind+xIndex;
min_value = *p_dist;
for (l = 0; l < k; l++) {
min_index = l;
min_value = *(p_dist+l*pitch);
for (i=l+1; i < height; i++) {
if (*(p_dist+i*pitch) < min_value) {
min_index = i;
min_value = *(p_dist+i*pitch);
}
}
if (min_index != l) {
tmp = *(p_dist+min_index*pitch);
*(p_dist+min_index*pitch) = *(p_dist+l*pitch);
*(p_dist+l*pitch) = tmp;
}
p_ind[l*ind_pitch] = min_index;
}
}
}
__global__ void parallelSqrt(float *dist, int width, int pitch, int k) {
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if (xIndex<width && yIndex<k)
dist[yIndex*pitch + xIndex] = sqrt(dist[yIndex*pitch + xIndex]);
}
// Compute the mean of the first k elements
__global__ void mean(float *dist, int width, int pitch, float *res, int k) {
float sum;
float *p;
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex < width) {
sum = 0;
p = dist + xIndex;
for (int l = 0; l < k*pitch; l += pitch) sum += *(p+l);
res[xIndex] = sum/k;
}
}
void printErrorMessage(cudaError_t error, int memorySize){
printf("==================================================\n");
printf("MEMORY ALLOCATION ERROR : %s\n", cudaGetErrorString(error));
printf("Wished allocated memory : %d\n", memorySize);
printf("==================================================\n");
}
void knn(float* ref_host, int ref_width, float* query_host, int query_width, int height, int k, float* dist_host, float* res_host, int *ind_host) {
// Initialize variables
float *ref_dev;
float *query_dev;
float *dist_dev;
int *ind_dev;
float *res_dev;
size_t ref_pitch_in_bytes;
size_t query_pitch_in_bytes;
size_t res_pitch_in_bytes;
size_t ind_pitch_in_bytes;
size_t dist_pitch_in_bytes;
size_t ref_pitch;
size_t query_pitch;
// size_t res_pitch;
size_t ind_pitch;
cudaError_t result;
// Allocate device memory
result = cudaMallocPitch((void **) &ref_dev, &ref_pitch_in_bytes, ref_width * sizeof(float), height);
if (result){
cudaFree(ref_dev);
printErrorMessage(result, ref_width*sizeof(float)*height);
return;
}
result = cudaMallocPitch((void **) &query_dev, &query_pitch_in_bytes, query_width*sizeof(float), height);
if (result){
cudaFree(query_dev);
printErrorMessage(result, query_width*sizeof(float)*k);
return;
}
result = cudaMallocPitch((void **) &dist_dev, &dist_pitch_in_bytes, query_width*sizeof(float), ref_width);
if (result){
cudaFree(dist_dev);
printErrorMessage(result, query_width*sizeof(float)*ref_width);
return;
}
result = cudaMallocPitch((void **) &ind_dev, &ind_pitch_in_bytes, query_width*sizeof(int), k);
if (result){
cudaFree(ind_dev);
printErrorMessage(result, query_width*sizeof(int)*k);
return;
}
result = cudaMallocPitch((void **) &res_dev, &res_pitch_in_bytes, query_width*sizeof(float), 1);
if (result){
cudaFree(res_dev);
printErrorMessage(result, query_width*sizeof(float));
return;
}
// Copy reference and query points to global memory
cudaMemcpy2D(ref_dev, ref_pitch_in_bytes, ref_host, ref_width*sizeof(float), ref_width*sizeof(float), height, cudaMemcpyHostToDevice);
cudaMemcpy2D(query_dev, query_pitch_in_bytes, query_host, query_width*sizeof(float), query_width*sizeof(float), height, cudaMemcpyHostToDevice);
// Compute the pitches
ref_pitch = ref_pitch_in_bytes/sizeof(float);
query_pitch = query_pitch_in_bytes/sizeof(float);
// res_pitch = res_pitch_in_bytes/sizeof(float);
ind_pitch = ind_pitch_in_bytes/sizeof(int);
// Set kernel dims
// Each block has 16x16 threads, and processes 1/16 of ref width
// It creates a local 16x16 matrix, which goes down the rows
// The number of blocks depends on nb_ref, threads/block is fixed
dim3 threads_per_block_2D(BLOCK_DIM, BLOCK_DIM, 1);
dim3 threads_per_block_1D(BLOCK_DIM * BLOCK_DIM, 1, 1);
dim3 blocks_2D(std::ceil((float) query_width/BLOCK_DIM), std::ceil((float) ref_width/BLOCK_DIM), 1);
dim3 blocks_2D_k(std::ceil((float) query_width/BLOCK_DIM), std::ceil((float) k/BLOCK_DIM), 1);
dim3 blocks_1D(std::ceil((float) query_width/(BLOCK_DIM*BLOCK_DIM)), 1, 1);
// Start kernels
computeDistance<<<blocks_2D, threads_per_block_2D>>>(ref_dev, ref_width, ref_pitch, query_dev, query_width, query_pitch, height, dist_dev);
sort<<<blocks_1D, threads_per_block_1D>>>(dist_dev, ind_dev, query_width, query_pitch, ind_pitch, ref_width, k);
// insertionSort<<<blocks_1D, threads_per_block_1D>>>(dist_dev, query_pitch, ind_dev, ind_pitch, query_width, ref_width, k);
parallelSqrt<<<blocks_2D_k, threads_per_block_2D>>>(dist_dev, query_width, query_pitch, k);
mean<<<blocks_1D, threads_per_block_1D>>>(dist_dev, query_width, query_pitch, res_dev, k);
// Copy memory from device to host
cudaMemcpy2D(res_host, query_width*sizeof(float), res_dev, query_pitch_in_bytes, query_width*sizeof(float), 1, cudaMemcpyDeviceToHost);
cudaMemcpy2D(dist_host, query_width*sizeof(float), dist_dev, dist_pitch_in_bytes, query_width*sizeof(float),k , cudaMemcpyDeviceToHost);
cudaMemcpy2D(ind_host, query_width*sizeof(int) , ind_dev, ind_pitch_in_bytes, query_width*sizeof(int) , k, cudaMemcpyDeviceToHost);
cudaFree(ref_dev); cudaFree(query_dev); cudaFree(res_dev); cudaFree(ind_dev);
}
int main() {
// Initialize variables
float *ref;
float *query;
float *dist;
float *res;
int *ind;
int ref_nb = 4096;
int query_nb = 4096;
int dim = 32;
int k = 20;
// Allocate host memory
ref = (float *) malloc(ref_nb * dim * sizeof(float));
query = (float *) malloc(query_nb * dim * sizeof(float));
dist = (float *) malloc(query_nb * k * sizeof(float));
res = (float *) malloc(query_nb * 1 * sizeof(float)); // Mean of the first k distances in the sorted matrix
ind = (int *) malloc(query_nb * k * sizeof(int));
// Generate random data
srand(time(NULL));
for (int i = 0; i<ref_nb * dim; i++) ref[i] = (float) (rand() % 100);
for (int i = 0; i<query_nb * dim; i++) query[i] = (float) (rand() % 100);
knn(ref, ref_nb, query, query_nb, dim, k, dist, res, ind);
for (int j = 0; j < 10; j++) {
std::cout << "( ";
for (int i = 0; i < dim; i++) std::cout << query[i*query_nb+j] << " ";
std::cout << ")" << std::endl;
std::cout << res[j] << std::endl;
for (int i = 0; i < k; i++) std::cout << ind[i*query_nb+j] << " ";
std::cout << std::endl << std::endl;
}
for (int i = 0; i < k; i++) {
for (int j = 0; j < 10; j++) {
std::cout << dist[i*query_nb + j] << " ";
}
std::cout << std::endl;
}
free(ref); free(query); free(dist); free(ind);
return 0;
}
|
6,481 | /*
*
* test.c
* tim.burgess@noaa.gov
*
* A place for trying out various code
*/
#include <stdlib.h>
#include <stdio.h>
#include <ctype.h>
#include <cuda.h>
#define NPIXELS 100
static char daytab[2][13] = {
{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31},
{0, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}};
int day_of_year(int year, int month, int day);
unsigned char *landmask_d;
int main() {
// report on free mem
// checkCUDAMemory();
// allocate device memory for landmask
size_t landMemSize = NPIXELS * sizeof(char);
cudaMalloc((void **)&landmask_d, landMemSize);
printf("allocating %ld device Kbytes for landmask\n", landMemSize/1024);
// checkCUDAError("memory allocation");
cudaFree(landmask_d);
//printf("day of year:%d\n", day_of_year(2010, 8, 9) );
return 0;
}
int day_of_year(int year, int month, int day) {
int i, leap;
leap = year%4 == 0 && (year%100 != 0 || year%400 == 0);
for (i = 1; i < month; i++)
day += daytab[leap][i];
return day;
}
|
6,482 | //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Copyright 2021 Brian Hamilton //
// //
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated //
// documentation files (the "Software"), to deal in the Software without restriction, including without //
// limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of //
// the Software, and to permit persons to whom the Software is furnished to do so, subject to the following //
// conditions: //
// //
// The above copyright notice and this permission notice shall be included in all copies or substantial //
// portions of the Software. //
// //
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT //
// LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO //
// EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN //
// AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE //
// OR OTHER DEALINGS IN THE SOFTWARE. //
// //
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// //
// FDTD tutorial for 180th ASA meeting - CUDA Kernels to accompany Matlab code //
// //
// Compiles to PTX from Matlab, but can be compiled to PTX with 'nvcc --ptx kernel_2d.cu' //
// //
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
//air update
__global__ void air_update(double *u0, const double * __restrict__ u1, const double * __restrict__ u2, int Nx, int Ny, bool * in_mask)
{
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iy = blockIdx.y*blockDim.y + threadIdx.y;
if ((ix>0) && (ix<Nx-1) && (iy>0) && (iy<Ny-1)) {
int ii = iy*Nx+ix;
u0[ii] = (0.5*(u1[ii-1]+u1[ii+1]+u1[ii-Nx]+u1[ii+Nx]) - u2[ii])*in_mask[ii];
}
}
//rigid boundary update
__global__ void rigid_update(double *u0, const double * __restrict__ u1, const double * __restrict__ u2, int Nx, int Nb, int * ib, int * Kib)
{
int ix = blockIdx.x*blockDim.x + threadIdx.x;
if (ix<Nb) {
int ii = ib[ix]-1; //from matlab indices
u0[ii] = (2-0.5*Kib[ix])*u1[ii] + 0.5*(u1[ii-1]+u1[ii+1]+u1[ii-Nx]+u1[ii+Nx]) - u2[ii];
}
}
//add loss to boundary nodes
__global__ void apply_loss(double *u0, const double * __restrict__ u2, int Nx, int Nb, int * ib, int * Kib, double lf)
{
int ix = blockIdx.x*blockDim.x + threadIdx.x;
if (ix<Nb) {
int ii = ib[ix]-1; //from matlab indices
u0[ii] = (u0[ii] + lf*(4-Kib[ix])*u2[ii])/(1.0+lf*(4-Kib[ix]));
}
}
|
6,483 | #include <stdio.h>
#include <time.h>
#include <cuda.h>
// kernel
__global__ void antialiasingDevice(int *mat, int a, int b,int *res)
{
int sum = 0;
int neig = 0;
int j = blockIdx.x*blockDim.x + threadIdx.x;
int i = blockIdx.y*blockDim.y + threadIdx.y;
if((i < a) && (j < b)){
for (int dx = -1; dx < 2; ++dx)
for (int dy = -1 ; dy < 2; ++dy) {
int ni = i + dx;
int nj = j + dy;
if ((ni >= 0) && (ni < a) && (nj >= 0) && (nj < b)) {
neig++;
sum += mat[ni * b + nj];
}
}
res[i * b + j] = sum / neig;
}
}
int main(void)
{
int *mat_h, *res_h; // pointers to host memory
int *mat_d, *res_d; // pointer to device memory
cudaError_t err;
int a,b;
scanf("%d %d", &a, &b);
//cudaSetDevice(1);
// allocate arrays on host
size_t size = a*b*sizeof(int);
mat_h = (int *)malloc(size);
res_h = (int *)malloc(size);
for (int i = 0; i < a; ++i) {
for (int j = 0; j < b; ++j) {
scanf("%d", mat_h + (i * b + j));
}
}
// allocate array on device
err = cudaMalloc((void **) &mat_d, size);
if (err != cudaSuccess)
fprintf(stderr,"Problemas solicitando memoria para mat_d\n");
err = cudaMalloc((void **) &res_d, size);
if (err != cudaSuccess)
fprintf(stderr,"Problemas solicitando memoria para res_d\n");
// copy data from host to device
err = cudaMemcpy(mat_d, mat_h, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
fprintf(stderr,"Problemas copiando memoria a device mat_d mat_h\n");
float blockSize = 1024;
dim3 dimBlock (ceil(b/blockSize), ceil(a/blockSize),1);
dim3 dimGrid (blockSize, blockSize,1);
float elapsed=0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
antialiasingDevice <<< dimGrid, dimBlock >>> (mat_d, a, b, res_d);
//cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
fprintf(stderr,"The elapsed time in gpu was %.8f ms\n", elapsed);
// Retrieve result from device and store in b_h
err = cudaMemcpy(res_h, res_d, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
fprintf(stderr,"Problemas copiando de device a host\n");
//print results
for (int i = 0; i < a; ++i) {
for (int j = 0; j < b; ++j) {
printf("%d ", res_h[i*b + j]);
}
printf("\n");
}
// cleanup
free(mat_h); free(res_h); cudaFree(res_d); cudaFree(mat_d);
}
|
6,484 | #include "includes.h"
__global__ void sub3(float *val1, float *val2, int *num_elem)
{
int i = threadIdx.x;
val1[i] += val2[i]+1;
} |
6,485 | #include "includes.h"
__global__ void binarySearch( const int limit, const int databaseSize, const long* databaseArray, const long* inputArray, int* outputArray) {
const int bIdx = gridDim.x * blockIdx.y + blockIdx.x;
const int tIdx = blockDim.x * bIdx + threadIdx.x;
if(tIdx < limit) {
const long input = inputArray[tIdx];
int output = -1;
int startIdx = 0;
int endIdx = databaseSize - 1;
while(startIdx <= endIdx) {
const int idx = (startIdx + endIdx) >> 1;
const long temp = databaseArray[idx];
if(temp < input) {
startIdx = idx + 1;
} else if (temp == input) {
output = idx;
break;
} else {
endIdx = idx - 1;
}
}
outputArray[tIdx] = output;
}
} |
6,486 | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#define SIZE 1024
#define THREADS 1024
#define BLOCKS SIZE / THREADS
#define CHECK
double a[SIZE][SIZE];
double b[SIZE][SIZE];
double c[SIZE];
__global__ void sum_matrix_lines(double *matrix, double *vec) {
int y = (blockIdx.y * BLOCKS) + threadIdx.y;
for (int x = 0; x < SIZE; x++) {
vec[y] += matrix[y * SIZE + x];
}
}
__global__ void gen_matrix_from_lines(double *matrix, double *vec) {
int y = (blockIdx.y * BLOCKS) + threadIdx.y;
for (int x = 0; x < SIZE; x++) {
matrix[y * SIZE + x] = vec[x] + vec[y];
}
}
int main() {
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
b[i][j] = 20.19;
}
}
cudaDeviceReset();
double *matrix_1;
double *matrix_2;
double *matrix_sum;
cudaMalloc(&matrix_1, SIZE * SIZE * sizeof(*matrix_1));
cudaMalloc(&matrix_2, SIZE * SIZE * sizeof(*matrix_2));
cudaMalloc(&matrix_sum, SIZE * sizeof(*matrix_sum));
cudaMemcpy(matrix_2, b, SIZE * SIZE * sizeof(*matrix_2), cudaMemcpyHostToDevice);
dim3 grid_size = {1, BLOCKS, 1};
dim3 block_size = {1, THREADS, 1};
sum_matrix_lines<<<block_size, grid_size, 0, 0>>>(matrix_2, matrix_sum);
cudaDeviceSynchronize();
gen_matrix_from_lines<<<block_size, grid_size, 0, 0>>>(matrix_1, matrix_sum);
cudaDeviceSynchronize();
cudaMemcpy(a, matrix_1, SIZE * SIZE * sizeof(*c), cudaMemcpyDeviceToHost);
#ifdef CHECK
double temp = a[0][0];
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
if (a[i][j] - temp < 0 ? temp - a[i][j] : a[i][j] - temp > 0.000000000001) {
printf("НУ ДА НУ ДА, ПОШЁЛ Я НАХЕР\n");
break;
}
}
}
printf("%lf\n", a[0][0]);
#endif
cudaFree(matrix_1);
cudaFree(matrix_2);
cudaFree(matrix_sum);
}
|
6,487 | /*
* Do not change this file
*/
#include <iostream>
#include <fstream>
#include <cassert>
#include <cstring>
#include <string>
#include <chrono>
#include <cstdlib>
#include <ctime>
#define MAX_LENGTH 4096
/**
* Read file, save edges to array (x_x) and
* record the size of each type of edge array (x_x_count).
*/
namespace utils {
int target;
int len;
int *list;
int read_file(std::string filename) {
std::ifstream inputf(filename, std::ifstream::in);
len = 0;
list = (int*)malloc(sizeof(int) * MAX_LENGTH);
if(inputf) {
while (!inputf.eof()){
inputf >> list[len];
len++;
}
} else {
return -1;
}
inputf.close();
return 0;
}
int read_target() {
std::cout << "Your Input Target n : " <<std::endl;
std::cin >> utils::target;
std::cout << "FrequencyPrefixSum of target " << utils::target << " array is output to out.txt." << std::endl;
std::cout << "Please compare the out.txt with the groundtruth answer_for_n_is_2.txt by command:" << std::endl;
std::cout << "\tdiff out.txt answer_for_n_is_2.txt" << std::endl;
return 0;
}
int write_file(std::string filename, int *out){
std::ofstream outputf(filename, std::ofstream::out);
if(outputf.is_open()){
for(int i = 0; i < len; i++){
outputf << out[i] << std::endl;
}
} else {
return -1;
}
outputf.close();
return 0;
}
}
/**
* Global function: prefix sum
* d_in: original array
* d_out: prefix sum array (need to be allocated before)
* numElems: the sum of the array.
*/
__global__ void prefix_sum_kernel(int *d_out, int *d_in, int numElems) {
unsigned int d_hist_idx = blockDim.x * blockIdx.x + threadIdx.x;
if (d_hist_idx >= numElems)
{
return;
}
unsigned int cdf_val = 0;
for (int i = 0; i <= d_hist_idx; ++i)
{
cdf_val = cdf_val + d_in[i];
}
d_out[d_hist_idx] = cdf_val;
}
__global__ void improved_prefix_sum_kernel(int *out, int *in, int n)
{
__shared__ int temp[2049];
int threadId = threadIdx.x;
int offset = 1;
//load input into shared memory
temp[2 * threadId] = in[2 * threadId];
temp[2 * threadId + 1] = in[2 * threadId + 1];
__syncthreads();
for(int d = n/2; d > 0; d /= 2) // build sum in place up the tree
{
__syncthreads();
if(threadId < d)
{
int ai = offset * (2 * threadId + 1) - 1;
int bi = offset * (2 * threadId + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if(threadId == 0) // clear the last element
temp[n-1] = 0;
for(int d = 1; d < n; d *= 2)
{
offset /= 2;
__syncthreads();
if(threadId < d)
{
int ai = offset * (2 * threadId + 1) - 1;
int bi = offset * (2 * threadId + 2) - 1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
out[2 * threadId] = temp[2 * threadId + 1];
out[2 * threadId + 1] = temp[2 * threadId + 2];
if (threadId == 0) {
out[n - 1] = out[n - 2] + in[n - 1];
}
}
__global__ void map_kernel(int *out, int *in, int numElems, int target_n){
unsigned int d_hist_idx = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int num_threads = blockDim.x * gridDim.x;
for (int i = d_hist_idx; i < numElems; i += num_threads)
{
if(target_n == in[i]){
out[i] = 1;
} else {
out[i] = 0;
}
}
}
int main(int argc, char **argv) {
assert(argc == 3 && "Input format error!");
std::string filename = argv[1];
std::string out_filename = argv[2];
assert(utils::read_file(
filename
) == 0
);
assert(utils::read_target() == 0);
dim3 grid(1);
dim3 block(1024);
int numElems = utils::len;
int *h_in = utils::list;
int *h_out = (int*)malloc(sizeof(int) * numElems);
int *d_in;
int *d_out;
cudaMalloc((void**)&d_in, sizeof(int) * numElems);
cudaMalloc((void**)&d_out, sizeof(int) * numElems);
cudaMemcpy(d_in, h_in, sizeof(int) * numElems, cudaMemcpyHostToDevice);
/*begin--- fill your code here to achieve functionality of frequencyPrefixSum */
int *d_map_out;
cudaMalloc((void**)&d_map_out, sizeof(int) * numElems);
int *h_map_out = (int*) malloc(sizeof(int) * numElems);
map_kernel<<<grid, block>>>(
d_map_out,
d_in,
numElems,
utils::target
);
cudaDeviceSynchronize();
improved_prefix_sum_kernel<<<grid, block>>>(
d_out,
d_map_out,
numElems
);
/*end ---- fill your code here to achieve functionality of frequencyPrefixSum */
cudaMemcpy(h_out, d_out, sizeof(int) * numElems, cudaMemcpyDeviceToHost);
assert(
utils::write_file(
out_filename,
h_out
) == 0
);
cudaFree(d_out);
cudaFree(d_in);
free(h_out);
return 0;
} |
6,488 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add(int *a, int *b,int *c)
{
*c = *a + *b;
}
int main()
{
int a, b, c; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int size =sizeof(int);// Allocate space for device copies of a, b, c
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
// Setup input values
a = 26754;
b = 73456;
// Copy inputs to device
cudaMemcpy(d_a, &a, size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size,cudaMemcpyHostToDevice);
// Launch add() kernel on GPU
add <<<1, 1 >>>(d_a,d_b,d_c);
// Copy result back to host
cudaMemcpy(&c,d_c, size,cudaMemcpyDeviceToHost);
// Cleanup
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
printf("toplam sonucu : %d\n", c);
return 0;
}
|
6,489 | #include <stdio.h>
#define Width 32
#define TILE_WIDTH 16
__global__ void MatrixMulKernel(float *Md, float *Nd, float *Pd, int ncols){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float PValue = 0; //PValue is used to store element of the output MatrixMulKernel
int k = 0;
for(k = 0; k < ncols; k++){
float Melement = Md[row * ncols + k];
float Nelement = Nd[k * ncols + col];
PValue += Melement * Nelement;
}
Pd[row * ncols +col] = PValue;
}
int main(int argc, char **argv){
int i,j;
int size = Width * Width * sizeof(float);
float M[Width][Width], N[Width][Width], P[Width][Width];
float *Md, *Nd, *Pd;
for(i = 0; i < Width; i++){
for(j = 0; j < Width; j++){
M[i][j] = 1;
N[i][j] = 2;
}
}
cudaMalloc((void**)&Md, size);
cudaMalloc((void**)&Nd, size);
cudaMalloc((void**)&Pd, size);
cudaMemcpy(Md, M, size, cudaMemcpyHostToDevice);
cudaMemcpy(Nd, N, size, cudaMemcpyHostToDevice);
//setup the execution configuration
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
dim3 dimGrid(Width/TILE_WIDTH, Width/TILE_WIDTH);
//launch the device computation thread!
MatrixMulKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd, Width);
//read P from the device
cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost);
//free device matrices
cudaFree(Md);
cudaFree(Nd);
cudaFree(Pd);
for(i = 0; i < Width; i++){
for(j = 0; j < Width; j++){
printf("%.2f ", P[i][j]);
}
printf("\n");
}
}
|
6,490 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <inttypes.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define BLOCK_WIDTH 32
#define TAILLE 2048
#define gettime(t) clock_gettime(CLOCK_MONOTONIC_RAW, t)
#define get_sub_seconde(t) (1e-9*(double)t.tv_nsec)
/** return time in second
*/
double get_elapsedtime(void)
{
struct timespec st;
int err = gettime(&st);
if (err !=0) return 0;
return (double)st.tv_sec + get_sub_seconde(st);
}
void init(double** A, double** B, double** C, int size)
{
int i = 0, j = 0;
srand(2019);
for(i = 0; i < size; i++)
{
for(j = 0; j < size; j++)
{
A[i][j] = rand();
B[i][j] = rand();
C[i][j] = 0.0;
}
}
}
void mult(double** A, double** B, double** C, int size)
{
int i = 0, j = 0, k = 0;
for(i = 0; i < size; i++)
{
for(j = 0; j < size; j++)
{
double sum = 0.;
for(k = 0; k < size; k++)
{
sum += A[i][k] * B[k][j];
}
C[i][j] = sum;
}
}
}
// QUESTION 4
__global__
void MulMatrixKernel(double* A, double* B, double* C, int N)
{
__shared__ double share_A[BLOCK_WIDTH][BLOCK_WIDTH];
__shared__ double share_B[BLOCK_WIDTH][BLOCK_WIDTH];
double sum = 0;
int col = threadIdx.x + blockDim.x * blockIdx.x;
int line = threadIdx.y + blockDim.y * blockIdx.y;
for(int tile = 0; tile < gridDim.x; tile++)
{
share_A[threadIdx.x][threadIdx.y] = A[line * N + tile * blockDim.x + threadIdx.x];
share_B[threadIdx.x][threadIdx.y] = B[(tile*blockIdx.y + threadIdx.y) * N + col];
__syncthreads();
for(int i = 0; i < BLOCK_WIDTH; i++)
{
sum += share_A[i][threadIdx.y]*share_B[threadIdx.x][i];
}
__syncthreads();
}
C[line * N + col] = sum;
}
// FIN QUESTION 4
int main(int argc, char** argv){
int N, i;
double *A_data;
double *B_data;
double *C_data;
double **A;
double **B;
double **C;
double t0 = 0., t1 = 0., duration = 0.;
N = (argc < 2)?TAILLE:atoi(argv[1]);
fprintf(stdout, "Matrix Multiplication\n Size: %dx%d\n", N, N);
// Memory allocation
A_data = (double*) malloc(sizeof(double) * N * N);
B_data = (double*) malloc(sizeof(double) * N * N);
C_data = (double*) malloc(sizeof(double) * N * N);
A = (double**) malloc(sizeof(double *) * N);
B = (double**) malloc(sizeof(double *) * N);
C = (double**) malloc(sizeof(double *) * N);
for(i = 0; i < N; i++)
{
A[i] = &A_data[i * N];
B[i] = &B_data[i * N];
C[i] = &C_data[i * N];
}
// Value initialization
init(A, B, C, N);
// QUESTION 8
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//FIN QUESTION 8
// QUESTION 1
double *d_A, *d_B, *d_C;
cudaMalloc(&d_A, sizeof(double) * N * N);
cudaMalloc(&d_B, sizeof(double) * N * N);
cudaMalloc(&d_C, sizeof(double) * N * N);
// FIN QUESTION 1
// QUESTION 2
cudaMemcpy(d_A, A_data, sizeof(double) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B_data, sizeof(double) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_C, C_data, sizeof(double) * N * N, cudaMemcpyHostToDevice);
// FIN QUESTION 2
// QUESTION 3
int nbBlocks = N / BLOCK_WIDTH;
if(N % BLOCK_WIDTH) nbBlocks++;
dim3 gridSize(nbBlocks, nbBlocks);
dim3 blockSize(BLOCK_WIDTH, BLOCK_WIDTH);
// FIN QUESTION 3
// QUESTION 4
cudaEventRecord(start); // QUESTION 8
MulMatrixKernel<<<gridSize, blockSize>>>(d_A, d_B, d_C, N);
cudaEventRecord(stop); // QUESTION 8
// FIN QUESTION 4
// QUESTION 5
cudaMemcpy(C_data, d_C, sizeof(double) * N * N, cudaMemcpyDeviceToHost);
// FIN QUESTION 5
// QUESTION 8
cudaEventSynchronize(stop);
uint64_t nb_op = N * N * N;
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Matrice %dx%d\n\tTemps: %f s\n\tMFlops: %.2f\n", N, N, milliseconds/1000, (nb_op / (milliseconds/1000))*1E-6);
// FIN QUESTION 8
// Compute multiplication
t0 = get_elapsedtime();
// mult(A, B, C, N);
t1 = get_elapsedtime();
// Pretty print
duration = (t1 - t0);
fprintf(stdout, "Performance results: \n");
fprintf(stdout, " Time: %lf s\n", duration);
fprintf(stdout, " MFlops: %.2f\n", (nb_op / duration)*1E-6);
return 0;
}
|
6,491 | /*
**********************************************
* CS314 Principles of Programming Languages *
* Fall 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void check_handshaking_gpu(int * strongNeighbor, int * matches, int numNodes) {
/** YOUR CODE GOES BELOW **/
// Get thread ID ( for a 1 dimensional block and grid )
int threadNum = blockDim.x * gridDim.x;
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
__syncthreads();
for (int i = threadId; i < numNodes; i += threadNum) {
if (matches[i] == -1) {
if (strongNeighbor[i] != -1 && strongNeighbor[strongNeighbor[i]] == i) {
matches[i] = strongNeighbor[i];
}
}
}
/** YOUR CODE GOES ABOVE **/
}
|
6,492 | // includes
#include <stdio.h>
#include <stdlib.h>
//-------------Funcion llenar "velocidad"
void llenarVelocidad(float * pmat, int row, int colum){
FILE *fichero;
int node=row*colum;
int i,j;
int nvel=9;
float leer;
fichero = fopen("matriz_con_func_dist.txt","r");
if (fichero==NULL)
{
printf( "No se puede abrir el fichero.\n" );
system("pause");
exit (EXIT_FAILURE);
}
for(i=0;i<node;i++){
for(j=0;j<nvel;j++){
fscanf(fichero,"%f",&leer);
pmat[ i*nvel + j]=leer;
//printf("%f",leer);
}
fscanf(fichero, "\n");
}
fclose(fichero);
//--------------------------------------------------------------------------------------------------------------------------------------
//---------------------------------------------------- COMPROBACION DE QUE ESTA LEYENDO CORRECTAMENTE-----------------------------------
/* printf( "Contenido del fichero:\n" );
for (i = 0; i < node; i++) {
for (j = 0; j < nvel; j++)
printf ("%f ", pmat[ i*nvel + j]);
printf ("\n");
}*/
}
|
6,493 | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "constants.cuh"
#include "mesh.cuh"
#include "matrix_functions.cuh"
#include "material.cuh"
#include "sys.cuh"
void compute_xphys(struct sparse *h ,double *hs, double*x, double *xphys, struct mesh *mesh) {
double *x_lin, *xphys_tmp;
x_lin = (double*)malloc(mesh->nelx*mesh->nely * sizeof(double));
xphys_tmp = (double*)malloc(mesh->nelx*mesh->nely * sizeof(double));
for (int i = 0; i < mesh->nelx*mesh->nely; i++) {
xphys_tmp[i] = 0;
xphys[i] = 0;
}
linearize_matrix(x, x_lin, mesh->nely, mesh->nelx);
for (int i = 0; i < h->nnz; i++) { //tmp2=H*tmp1
xphys_tmp[h->row[i] - 1] += h->val[i] * x_lin[h->col[i] - 1];
}
for (int i = 0; i < mesh->nelx*mesh->nely; i++) {
xphys[i] = xphys_tmp[i] / hs[i];
}
free(xphys_tmp);
free(x_lin);
}
void compute_sk(double **sk, double *ke, double *xphys, struct mesh *mesh, struct material *material) {
int k;
double *xphys_lin, *tmp, *ke_lin;
(*sk) = (double*)malloc(mesh->nelx*mesh->nely * 64 * sizeof(double));
ke_lin = (double*)malloc(KEROW*KECOL * sizeof(double)); //ke linearizzato
xphys_lin = (double*)malloc(mesh->nelx*mesh->nely * sizeof(double)); //xphys linearizzato
tmp = (double*)malloc(mesh->nelx*mesh->nely * sizeof(double));
linearize_matrix(ke, ke_lin, KEROW, KECOL);
if (mesh->ft == 0 || mesh->ft == 1) {
linearize_matrix(xphys, xphys_lin, mesh->nely, mesh->nelx);
for (int i = 0; i < mesh->nelx*mesh->nely; i++) {
tmp[i] = pow(xphys_lin[i], mesh->penal)*(material->e0 - material->emin) + material->emin;
}
}
else {
for (int i = 0; i < mesh->nelx*mesh->nely; i++) {
tmp[i] = pow(xphys[i], mesh->penal)*(material->e0 - material->emin) + material->emin;
}
}
k = 0;
for (int tmp_index = 0; tmp_index < mesh->nelx*mesh->nely; tmp_index++) {
for (int i = 0; i < 64; i++) {
(*sk)[k] = ke_lin[i] * tmp[tmp_index];
k++;
}
}
free(tmp);
free(ke_lin);
free(xphys_lin);
}
void compute_sm(double **sm, double *me, double *xphys, struct mesh *mesh, struct material *material) {
int k = 0;
double *xphys_lin, *tmp, *me_lin, pi = 3.141592653589;
(*sm) = (double*)malloc(mesh->nelx*mesh->nely * 64 * sizeof(double));
me_lin = (double*)malloc(MEROW*MECOL * sizeof(double)); //me linearizzato
xphys_lin = (double*)malloc(mesh->nelx*mesh->nely * sizeof(double)); //xphys linearizzato
tmp = (double*)malloc(mesh->nelx*mesh->nely * sizeof(double));
linearize_matrix(me, me_lin, MEROW, MECOL);
if ((mesh->ft == 0) || (mesh->ft == 1)) {
linearize_matrix(xphys, xphys_lin, mesh->nely, mesh->nelx);
for (int i = 0; i < mesh->nelx*mesh->nely; i++) {
tmp[i] = (xphys_lin[i] * sin((pi / 2)*pow(xphys_lin[i], 2))) + material->rhomin;
}
}
else {
for (int i = 0; i < mesh->nelx*mesh->nely; i++) {
tmp[i] = (xphys[i] * sin((pi / 2)*pow(xphys[i], 2))) + material->rhomin;
}
}
for (int tmp_index = 0; tmp_index < mesh->nelx*mesh->nely; tmp_index++) {
for (int i = 0; i < 64; i++) {
(*sm)[k] = (me_lin[i] * tmp[tmp_index])*(material->rho0 - material->rhomin);
k++;
}
}
free(tmp);
free(me_lin);
free(xphys_lin);
}
void modify_m_k(struct sparse *mat, struct mesh *mesh, int *freedofs, int *fixeddofs) {
int *ik_tmp, *jk_tmp, index = 0, found, count, c;
int n_freedofs = (2 * (mesh->nelx + 1)*(mesh->nely + 1)) - mesh->fixed_count;
double *vk_tmp, value;
ik_tmp = (int*)malloc(mat->nnz * sizeof(int));
jk_tmp = (int*)malloc(mat->nnz * sizeof(int));
vk_tmp = (double*)malloc(mat->nnz * sizeof(double));
for (int i = 0; i < mat->nnz; i++) { //MAT=(MAT+MAT')/2
found = 0;
for (int j = 0;j<mat->nnz; j++) {
if ((mat->row[i] == mat->col[j]) && (mat->col[i] == mat->row[j])) {
value = (mat->val[i] + mat->val[j]);
if (fabs(value) > pow(10, -9)) {
found = 1;
ik_tmp[index] = mat->row[i];
jk_tmp[index] = mat->col[i];
vk_tmp[index] = value / 2;
index++;
break;
}
}
}
if (found == 0) {
ik_tmp[index] = mat->row[i];
jk_tmp[index] = mat->col[i];
vk_tmp[index] = mat->val[i] / 2;
index++;
}
}
count = 0; //MAT=MAT(freedofs,freedofs)
for (int i = 0; i < n_freedofs;i++) {
for (int j = 0; j < mat->nnz;j++) {
if (ik_tmp[j] == freedofs[i]) {
for (int k = 0; k < n_freedofs;k++) {
if (jk_tmp[j] == freedofs[k]) {
count++;
break;
}
}
}
}
}
free(mat->row);
free(mat->col);
free(mat->val);
mat->row = (int*)malloc(count * sizeof(int));
mat->col = (int*)malloc(count * sizeof(int));
mat->val = (double*)malloc(count * sizeof(double));
count = 0;
for (int i = 0; i < n_freedofs; i++) {
for (int j = 0; j < mat->nnz; j++) {
if (ik_tmp[j] == freedofs[i]) {
for (int k = 0; k < n_freedofs; k++) {
if (jk_tmp[j] == freedofs[k]) {
mat->row[count] = ik_tmp[j];
mat->col[count] = jk_tmp[j];
mat->val[count] = vk_tmp[j];
count++;
//printf("(%d,%d) %f\n", ik_tmp[j], jk_tmp[j], vk_tmp[j]);
break;
}
}
}
}
}
mat->nnz = count;
for (int i = 0; i < mat->nnz; i++) { //correct indexes
c = 0;
for (int j = 0; j < mesh->fixed_count;j++) {
if (mat->row[i] > fixeddofs[j]) {
c++;
}
}
mat->row[i] -= c;
c = 0;
for (int j = 0; j < mesh->fixed_count; j++) {
if (mat->col[i] > fixeddofs[j]) {
c++;
}
}
mat->col[i] -= c;
//printf("(%d,%d) %f\n", (*ik)[i], (*jk)[i], (*vk)[i]);
}
free(ik_tmp);
free(jk_tmp);
free(vk_tmp);
}
void compute_s(struct sparse *k, struct sparse *m, struct sparse *s, struct mesh *mesh) {
int found, count = 0;
for (int i = 0; i < k->nnz;i++) {
found = 0;
for (int j = 0; j < m->nnz; j++) {
if ((k->row[i] == m->row[j]) && (k->col[i] == m->col[j])) {
found = 1;
if ((k->val[i] + m->val[j]) != 0)
{
count++;
}
break;
}
}
if (found == 0) {
count++;
}
}
for (int i = 0; i < m->nnz; i++) {
found = 0;
for (int j = 0; j < k->nnz; j++) {
if ((m->row[i] == k->row[j]) && (m->col[i] == k->col[j])) {
found = 1;
}
}
if (found == 0) {
count++;
}
}
s->nnz = count;
s->row = (int*)malloc(s->nnz * sizeof(int));
s->col = (int*)malloc(s->nnz * sizeof(int));
s->val = (double*)malloc(s->nnz * sizeof(double));
count = 0;
for (int i = 0; i < k->nnz; i++) {
found = 0;
for (int j = 0; j < m->nnz; j++) {
if ((k->row[i] == m->row[j]) && (k->col[i] == m->col[j])) {
found = 1;
if (fabs(k->val[i] + m->val[j]) > pow(10, -9))
{
s->row[count] = k->row[i];
s->col[count] = k->col[i];
s->val[count] = (mesh->beta*k->val[i]) + (mesh->alpha*m->val[j]);
count++;
}
break;
}
}
if (found == 0) {
s->row[count] = k->row[i];
s->col[count] = k->col[i];
s->val[count] = (mesh->beta*k->val[i]);
count++;
}
}
for (int i = 0; i < m->nnz; i++) {
found = 0;
for (int j = 0; j < k->nnz; j++) {
if ((m->row[i] == k->row[j]) && (m->col[i] == k->col[j])) {
found = 1;
}
}
if (found == 0) {
s->row[count] = m->row[i];
s->col[count] = m->col[i];
s->val[count] = mesh->alpha*m->val[i];
count++;
}
}
/*for (int i = 0; i < (*s_nnz); i++) {
printf("(%d,%d) %f\n", (*is)[i], (*js)[i], (*vs)[i]);
}*/
}
void g_matrix_init(int ndof, int nstate, struct sparse *a, struct sparse *b, struct sparse *c, double *dstato, struct sparse *e, struct sparse *k, struct sparse *m, struct sparse *s, int *freedofs, struct sparse *f) {
int found;
//-------estato init-----------
e->val = (double*)malloc((int)(pow(ndof, 2) + ndof) * sizeof(double));
e->row = (int*)malloc((int)(pow(ndof, 2) + ndof) * sizeof(int));
e->col = (int*)malloc((int)(pow(ndof, 2) + ndof) * sizeof(int));
e->nnz = 0;
e->nrow = nstate;
e->ncol = nstate;
for (int i = 0; i < ndof;i++) { //ESTATO(1:NDOF,1:NDOF)=speye(NDOF);
e->row[i] = i + 1;
e->col[i] = i + 1;
e->val[i] = 1;
e->nnz++;
}
for (int i = 0; i < m->nnz; i++) { //ESTATO(NDOF+1:NSTATE,NDOF+1:NSTATE) = M;
e->row[i + ndof] = m->row[i] + ndof;
e->col[i + ndof] = m->col[i] + ndof;
e->val[i + ndof] = m->val[i];
e->nnz++;
}
/*for (int i = 0;i < e->nnz;i++) {
printf("(%d,%d) %f\n", e->row[i], e->col[i], e->val[i]);
}*/
//-------astato init----------
a->val = (double*)malloc((int)(2 * pow(ndof, 2) + ndof) * sizeof(double));
a->row = (int*)malloc((int)(2 * pow(ndof, 2) + ndof) * sizeof(int));
a->col = (int*)malloc((int)(2 * pow(ndof, 2) + ndof) * sizeof(int));
a->nnz = 0;
a->nrow = nstate;
a->ncol = nstate;
for (int i = 0; i < ndof; i++) { //ASTATO(1:NDOF,NDOF+1:NSTATE) = speye(NDOF);
a->row[i] = i + 1;
a->col[i] = i + ndof + 1;
a->val[i] = 1;
a->nnz++;
}
for (int i = 0; i < k->nnz; i++) { //ASTATO(NDOF+1:NSTATE,:) = [-K -S];
a->row[i + ndof] = k->row[i] + ndof;
a->col[i + ndof] = k->col[i];
a->val[i + ndof] = -k->val[i];
a->nnz++;
}
for (int i = 0; i < s->nnz; i++) {
a->row[i + ndof + k->nnz] = k->row[i] + ndof;
a->col[i + ndof + k->nnz] = k->col[i] + ndof;
a->val[i + ndof + k->nnz] = -s->val[i];
a->nnz++;
}
/*for (int i = 0; i < (*a_nnz); i++) {
printf("(%d,%d) %f\n", (*astato_row)[i], (*astato_col)[i], (*astato)[i]);
}*/
//-------bstato init----------
b->val = (double*)malloc(ndof * sizeof(double));
b->row = (int*)malloc(ndof * sizeof(int));
b->col = (int*)malloc(ndof * sizeof(int));
b->nnz = 0;
b->nrow = nstate;
b->ncol = NINP;
for (int i = 0; i < f->nnz; i++) { //BSTATO(NDOF+1:NSTATE,1) = F(freedofs);
found = 0;
for (int j = 0; j < ndof;j++) {
if (f->row[i] == freedofs[j]) {
found = 1;
b->row[b->nnz] = b->nnz + ndof + 1;
b->col[b->nnz] = f->col[i];
b->val[b->nnz] = f->val[i];
b->nnz++;
break;
}
}
}
/*for (int i = 0; i < (*b_nnz); i++) {
printf("(%d,%d) %f\n", (*bstato_row)[i], (*bstato_col)[i], (*bstato)[i]);
}*/
//-----cstato init ------
c->val = (double*)malloc(nstate * sizeof(double));
c->row = (int*)malloc(nstate * sizeof(int));
c->col = (int*)malloc(nstate * sizeof(int));
c->nnz = 0;
c->nrow = NOUT;
c->ncol = nstate;
for (int i = 0; i < f->nnz; i++) { //BSTATO(NDOF+1:NSTATE,1) = F(freedofs);
found = 0;
for (int j = 0; j < ndof;j++) {
if (f->row[i] == freedofs[j]) {
found = 1;
c->row[c->nnz] = f->col[i];
c->col[c->nnz] = c->nnz + 1;
c->val[c->nnz] = f->val[i];
c->nnz++;
break;
}
}
}
/*for (int i = 0; i < (*c_nnz); i++) {
printf("(%d,%d) %f\n", (*cstato_row)[i], (*cstato_col)[i], (*cstato)[i]);
}*/
//-------dstato init-----
(*dstato) = 0;
}
void filt_mod_sensitivities(struct mesh *mesh, double *x, double *df0dx,struct sparse *h, double *hs) {
int numel;
double *x_lin, *tmp1, *tmp2;
numel = mesh->nelx*mesh->nely;
tmp1 = (double*)malloc(numel * sizeof(double));
if (mesh->ft == 1) {
x_lin = (double*)malloc(numel * sizeof(double));
tmp2 = (double*)malloc(numel * sizeof(double));
linearize_matrix(x, x_lin, mesh->nely, mesh->nelx); //x_lin=x(:)
for (int i = 0; i < numel; i++) {
tmp1[i] = x_lin[i] * df0dx[i]; //tmp1=x(:).*df0dx(:)
tmp2[i] = 0;
}
for (int i = 0; i < h->nnz; i++) { //tmp2=H*tmp1
tmp2[h->row[i] - 1] += h->val[i] * tmp1[h->col[i] - 1];
}
for (int i = 0; i < numel; i++) { //tmp3./Hs./max(1e-3,x(:));
df0dx[i] = (tmp2[i] / hs[i]) / max((pow(10, -3)), x_lin[i]);
}
free(x_lin);
free(tmp2);
}
else if (mesh->ft == 2) {
for (int i = 0; i < numel; i++) { //tmp1=df0dx(:)./Hs
tmp1[i] = df0dx[i] / hs[i];
df0dx[i] = 0;
}
for (int i = 0; i < h->nnz; i++) {
df0dx[h->row[i] - 1] += h->val[i] * tmp1[h->col[i] - 1]; //H*tmp1
}
}
free(tmp1);
}
void volume_constraint(double **fval, double **dfdx, double *x, struct mesh *mesh, struct sparse *h, double *hs) {
double epsi, epsi2, sum;
double *x_lin, *tmp, *tmp2, *dfdx2;
int numel;
numel = mesh->nelx*mesh->nely;
x_lin = (double*)malloc(numel * sizeof(double));
epsi = 0.01;
linearize_matrix(x, x_lin, mesh->nely, mesh->nelx);
if (IVOLUME == 0) {
tmp = (double*)malloc(numel * sizeof(double));
sum = 0;
for (int i = 0; i < numel; i++) {
sum += x_lin[i];
}
(*fval)[0] = ((mesh->area*sum) / (mesh->vmax*VOLFRAC)) - 1 - epsi; // fval = area*sum(x(:))/(VMAX*volfrac)-1-EPSI;
for (int i = 0; i < numel; i++) { // dfdx = area/(VMAX*volfrac)*ones(length(x(:)),1);
(*dfdx)[i] = mesh->area / (mesh->vmax*VOLFRAC);
}
if (mesh->ft == 2) {
for (int i = 0; i < numel; i++) { //tmp=dfdx(:) ./ HS
tmp[i] = (*dfdx)[i] / hs[i];
(*dfdx)[i] = 0;
}
for (int i = 0; i < h->nnz; i++) { //dfdx=H*tmp
(*dfdx)[h->row[i] - 1] += h->val[i] * tmp[h->col[i] - 1];
}
}
free(tmp);
}
else {
dfdx2 = (double*)malloc(numel * sizeof(double));
tmp = (double*)malloc(numel * sizeof(double));
tmp2 = (double*)malloc(numel * sizeof(double));
epsi2 = 0.05;
sum = 0;
for (int i = 0; i < numel; i++) {
sum += x_lin[i];
}
//fval = [fval1; fval2];
(*fval)[0] = ((mesh->area*sum) / (mesh->vmax * VOLFRAC)) - 1 - epsi; // fval1 = area*sum(x(:))/(VMAX*volfrac)-1-EPSI;
(*fval)[1] = ((-mesh->area*sum) / (mesh->vmax * VOLFRAC)) + 1 - epsi2; // fval2 = area*sum(x(:))/(VMAX*volfrac)+1-EPSI2;
for (int i = 0; i < numel; i++) { // dfdx = area/(VMAX*volfrac)*ones(length(x(:)),1); dfdx2=-area/(VMAX*volfrac)*ones(length(x(:)),1);
(*dfdx)[i] = mesh->area / (mesh->vmax*VOLFRAC);
dfdx2[i] = -(mesh->area) / (mesh->vmax*VOLFRAC);
}
if (mesh->ft == 2) {
for (int i = 0; i < numel; i++) { //tmp=dfdx(:)./Hs tmp2=dfdx2(:)./Hs
tmp[i] = (*dfdx)[i] / hs[i];
tmp2[i] = dfdx2[i] / hs[i];
(*dfdx)[i] = 0;
dfdx2[i] = 0;
}
for (int i = 0; i < h->nnz; i++) {
(*dfdx)[h->row[i] - 1] += h->val[i] * tmp[h->col[i] - 1]; //H*tmp
dfdx2[h->row[i] - 1] += h->val[i] * tmp2[h->col[i] - 1];
}
}
for (int i = numel; i < 2 * numel; i++) {
(*dfdx)[i] = dfdx2[i - numel];
}
free(tmp);
free(dfdx2);
free(tmp2);
}
free(x_lin);
}
void free_sparse(struct sparse *a, struct sparse *b, struct sparse *c, struct sparse *e, struct sparse *k, struct sparse *m, struct sparse *s, struct sparse *invu) {
free(a->row);
free(a->col);
free(a->val);
free(b->row);
free(b->col);
free(b->val);
free(c->row);
free(c->col);
free(c->val);
free(e->row);
free(e->col);
free(e->val);
free(k->row);
free(k->col);
free(k->val);
free(m->row);
free(m->col);
free(m->val);
free(s->row);
free(s->col);
free(s->val);
free(invu->row);
free(invu->col);
free(invu->val);
}
|
6,494 | #include "includes.h"
__global__ void add( float *x, float *y, float *z, float *deltaX, float *deltaY, float *deltaZ ) {
int tid = blockIdx.x; // this thread handles the data at its thread id
if (tid < N)
x[tid] = x[tid] + deltaX[tid];
if (tid < N)
y[tid] = y[tid] + deltaY[tid];
if (tid<N)
z[tid] = z[tid] + deltaZ[tid];
} |
6,495 | #include "includes.h"
extern "C" {
}
#define TB 256
#define EPS 1e-4
__device__ bool InverseMat4x4(double m_in[4][4], double inv_out[4][4]) {
double m[16], inv[16];
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
m[i * 4 + j] = m_in[i][j];
}
}
inv[0] = m[5] * m[10] * m[15] -
m[5] * m[11] * m[14] -
m[9] * m[6] * m[15] +
m[9] * m[7] * m[14] +
m[13] * m[6] * m[11] -
m[13] * m[7] * m[10];
inv[4] = -m[4] * m[10] * m[15] +
m[4] * m[11] * m[14] +
m[8] * m[6] * m[15] -
m[8] * m[7] * m[14] -
m[12] * m[6] * m[11] +
m[12] * m[7] * m[10];
inv[8] = m[4] * m[9] * m[15] -
m[4] * m[11] * m[13] -
m[8] * m[5] * m[15] +
m[8] * m[7] * m[13] +
m[12] * m[5] * m[11] -
m[12] * m[7] * m[9];
inv[12] = -m[4] * m[9] * m[14] +
m[4] * m[10] * m[13] +
m[8] * m[5] * m[14] -
m[8] * m[6] * m[13] -
m[12] * m[5] * m[10] +
m[12] * m[6] * m[9];
inv[1] = -m[1] * m[10] * m[15] +
m[1] * m[11] * m[14] +
m[9] * m[2] * m[15] -
m[9] * m[3] * m[14] -
m[13] * m[2] * m[11] +
m[13] * m[3] * m[10];
inv[5] = m[0] * m[10] * m[15] -
m[0] * m[11] * m[14] -
m[8] * m[2] * m[15] +
m[8] * m[3] * m[14] +
m[12] * m[2] * m[11] -
m[12] * m[3] * m[10];
inv[9] = -m[0] * m[9] * m[15] +
m[0] * m[11] * m[13] +
m[8] * m[1] * m[15] -
m[8] * m[3] * m[13] -
m[12] * m[1] * m[11] +
m[12] * m[3] * m[9];
inv[13] = m[0] * m[9] * m[14] -
m[0] * m[10] * m[13] -
m[8] * m[1] * m[14] +
m[8] * m[2] * m[13] +
m[12] * m[1] * m[10] -
m[12] * m[2] * m[9];
inv[2] = m[1] * m[6] * m[15] -
m[1] * m[7] * m[14] -
m[5] * m[2] * m[15] +
m[5] * m[3] * m[14] +
m[13] * m[2] * m[7] -
m[13] * m[3] * m[6];
inv[6] = -m[0] * m[6] * m[15] +
m[0] * m[7] * m[14] +
m[4] * m[2] * m[15] -
m[4] * m[3] * m[14] -
m[12] * m[2] * m[7] +
m[12] * m[3] * m[6];
inv[10] = m[0] * m[5] * m[15] -
m[0] * m[7] * m[13] -
m[4] * m[1] * m[15] +
m[4] * m[3] * m[13] +
m[12] * m[1] * m[7] -
m[12] * m[3] * m[5];
inv[14] = -m[0] * m[5] * m[14] +
m[0] * m[6] * m[13] +
m[4] * m[1] * m[14] -
m[4] * m[2] * m[13] -
m[12] * m[1] * m[6] +
m[12] * m[2] * m[5];
inv[3] = -m[1] * m[6] * m[11] +
m[1] * m[7] * m[10] +
m[5] * m[2] * m[11] -
m[5] * m[3] * m[10] -
m[9] * m[2] * m[7] +
m[9] * m[3] * m[6];
inv[7] = m[0] * m[6] * m[11] -
m[0] * m[7] * m[10] -
m[4] * m[2] * m[11] +
m[4] * m[3] * m[10] +
m[8] * m[2] * m[7] -
m[8] * m[3] * m[6];
inv[11] = -m[0] * m[5] * m[11] +
m[0] * m[7] * m[9] +
m[4] * m[1] * m[11] -
m[4] * m[3] * m[9] -
m[8] * m[1] * m[7] +
m[8] * m[3] * m[5];
inv[15] = m[0] * m[5] * m[10] -
m[0] * m[6] * m[9] -
m[4] * m[1] * m[10] +
m[4] * m[2] * m[9] +
m[8] * m[1] * m[6] -
m[8] * m[2] * m[5];
double det = m[0] * inv[0] + m[1] * inv[4] + m[2] * inv[8] + m[3] * inv[12];
if (abs(det) < 1e-9) {
return false;
}
det = 1.0 / det;
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
inv_out[i][j] = inv[i * 4 + j] * det;
}
}
return true;
}
__global__ void best_local_affine_kernel( float *output, float *input, float *affine_model, int h, int w, float epsilon, int kernel_radius )
{
int size = h * w;
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) {
int x = id % w, y = id / w;
double Mt_M[4][4] = {}; // 4x4
double invMt_M[4][4] = {};
double Mt_S[3][4] = {}; // RGB -> 1x4
double A[3][4] = {};
for (int i = 0; i < 4; i++)
for (int j = 0; j < 4; j++) {
Mt_M[i][j] = 0, invMt_M[i][j] = 0;
if (i != 3) {
Mt_S[i][j] = 0, A[i][j] = 0;
if (i == j)
Mt_M[i][j] = 1e-3;
}
}
for (int dy = -kernel_radius; dy <= kernel_radius; dy++) {
for (int dx = -kernel_radius; dx <= kernel_radius; dx++) {
int xx = x + dx, yy = y + dy;
int id2 = yy * w + xx;
if (0 <= xx && xx < w && 0 <= yy && yy < h) {
Mt_M[0][0] += input[id2 + 2*size] * input[id2 + 2*size];
Mt_M[0][1] += input[id2 + 2*size] * input[id2 + size];
Mt_M[0][2] += input[id2 + 2*size] * input[id2];
Mt_M[0][3] += input[id2 + 2*size];
Mt_M[1][0] += input[id2 + size] * input[id2 + 2*size];
Mt_M[1][1] += input[id2 + size] * input[id2 + size];
Mt_M[1][2] += input[id2 + size] * input[id2];
Mt_M[1][3] += input[id2 + size];
Mt_M[2][0] += input[id2] * input[id2 + 2*size];
Mt_M[2][1] += input[id2] * input[id2 + size];
Mt_M[2][2] += input[id2] * input[id2];
Mt_M[2][3] += input[id2];
Mt_M[3][0] += input[id2 + 2*size];
Mt_M[3][1] += input[id2 + size];
Mt_M[3][2] += input[id2];
Mt_M[3][3] += 1;
Mt_S[0][0] += input[id2 + 2*size] * output[id2 + 2*size];
Mt_S[0][1] += input[id2 + size] * output[id2 + 2*size];
Mt_S[0][2] += input[id2] * output[id2 + 2*size];
Mt_S[0][3] += output[id2 + 2*size];
Mt_S[1][0] += input[id2 + 2*size] * output[id2 + size];
Mt_S[1][1] += input[id2 + size] * output[id2 + size];
Mt_S[1][2] += input[id2] * output[id2 + size];
Mt_S[1][3] += output[id2 + size];
Mt_S[2][0] += input[id2 + 2*size] * output[id2];
Mt_S[2][1] += input[id2 + size] * output[id2];
Mt_S[2][2] += input[id2] * output[id2];
Mt_S[2][3] += output[id2];
}
}
}
bool success = InverseMat4x4(Mt_M, invMt_M);
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 4; j++) {
for (int k = 0; k < 4; k++) {
A[i][j] += invMt_M[j][k] * Mt_S[i][k];
}
}
}
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 4; j++) {
int affine_id = i * 4 + j;
affine_model[12 * id + affine_id] = A[i][j];
}
}
}
return ;
} |
6,496 | #include <math.h>
#include <float.h>
#include <cuda.h>
#define BLOCK_SIZE 256
__global__ void gpu_Reduce (float *s, int N, int skip) {
__shared__ float sdata[BLOCK_SIZE];
int i = (blockIdx.x * blockDim.x + threadIdx.x) * skip;
sdata[threadIdx.x] = i < N? s[i] : 0.0;
__syncthreads();
// Do reduction in shared mem
for (int s=1; s < blockDim.x; s *=2) {
int index = 2 * s * threadIdx.x;
if (index < blockDim.x) {
sdata[index] += sdata[index + s];
}
__syncthreads();
}
if (threadIdx.x == 0) s[i] = sdata[0];
}
__global__ void gpu_Heat (float *h, float *g, int N) {
int j = threadIdx.x + blockDim.x * blockIdx.x + 1;
int i = threadIdx.y + blockDim.y * blockIdx.y + 1;
if (i < N - 1 && j < N - 1) {
g[i*N + j]= 0.25 * ( h[ i*N + (j-1) ]+ // left
h[ i*N + (j+1) ]+ // right
h[ (i-1)*N + j ]+ // top
h[ (i+1)*N + j ]); // bottom
}
}
__device__ void gpu_Diff_Reduce_Aux(float *h, float *g, float *sdata, int N) {
float diff;
int i_lin = blockIdx.x * blockDim.x + threadIdx.x;
int i = i_lin / N;
int j = i_lin - i * N;
// Compute diff
if (i > 0 && i < N - 1 && j > 0 && j < N - 1) {
diff = g[i*N + j] - h[i*N + j];
sdata[threadIdx.x] = diff * diff;
} else {
sdata[threadIdx.x] = 0.0;
}
__syncthreads();
// Do reduction in shared mem
for (int s=1; s < blockDim.x; s *=2) {
int index = 2 * s * threadIdx.x;;
if (index < blockDim.x) {
sdata[index] += sdata[index + s];
}
__syncthreads();
}
}
__global__ void gpu_Diff_Reduce(float *h, float *g, float *s, int N) {
__shared__ float sdata[BLOCK_SIZE];
gpu_Diff_Reduce_Aux(h, g, sdata, N);
// Write result for this block to global mem
if (threadIdx.x == 0) s[blockIdx.x] = sdata[0];
}
__global__ void gpu_Diff_Reduce_Atomic(float *h, float *g, float *s, int N) {
__shared__ float sdata[BLOCK_SIZE];
gpu_Diff_Reduce_Aux(h, g, sdata, N);
// Write result for this block to global mem
if (threadIdx.x == 0) atomicAdd(s, sdata[0]);
}
|
6,497 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <cuda.h>
unsigned int getmax(unsigned int *, unsigned int);
//my function to get the max number in the array using Nvdia parallel reduction techniques.
__global__ void getmaxcu(unsigned int* numbersDevice, unsigned int size, unsigned int* max){
int threadID = threadIdx.x;
int uniqueID = threadID + (blockDim.x * blockIdx.x);
// printf("Checking tID:%d, bDimension:%d, blockID:%d, and id:%d.\n", threadIdx.x, blockDim.x, blockIdx.x, id);
__syncthreads();
for(int stride = 1; stride < size; stride *= 2){
if(uniqueID % (stride * 2) == 0){
if(numbersDevice[uniqueID] < numbersDevice[uniqueID + stride]){
numbersDevice[uniqueID] = numbersDevice[uniqueID + stride];
if(numbersDevice[uniqueID + stride] > *max)
*max = numbersDevice[uniqueID + stride];
}
else{
numbersDevice[uniqueID + stride] = numbersDevice[uniqueID];
}
if(numbersDevice[uniqueID] > *max)
*max = numbersDevice[uniqueID];
}
__syncthreads();
}
if(uniqueID == 0){
if(numbersDevice[0] > *max)
*max = numbersDevice[0];
}
}
int main(int argc, char *argv[])
{
unsigned int size = 0; // The size of the array
unsigned int i; // loop index
unsigned int * numbers; //pointer to the array
unsigned int result[1];
result[0] = 0;
int numOfThreads;
int numOfBlocks;
if(argc !=2)
{
printf("usage: maxseq num\n");
printf("num = size of the array\n");
exit(1);
}
size = atol(argv[1]);
numbers = (unsigned int *)malloc(size * sizeof(unsigned int));
if( !numbers )
{
printf("Unable to allocate mem for an array of size %u\n", size);
exit(1);
}
srand(time(NULL)); // setting a seed for the random number generator
// Fill-up the array with random numbers from 0 to size-1
for( i = 0; i < size; i++){
numbers[i] = rand() % size;
// printf("The number at numbers[%d]=%d.\n",i, numbers[i]);
}
//INFO ABOUT THE CURRENT DEVICE I AM USING
cudaDeviceProp dev;
cudaGetDeviceProperties(&dev, 0);
numOfThreads = dev.maxThreadsPerBlock;
//printf("size:%d and maxThreads:%d.\n", size, numOfThreads);
if(size <= numOfThreads){
numOfThreads = size;
numOfBlocks = 1;
}
else{
numOfBlocks = (int)ceil((double)size / (double)numOfThreads);
}
printf("Num of threads:%d and num of blocks:%d.\n", numOfThreads, numOfBlocks);
unsigned int* numbersDevice;
unsigned int* max;
cudaError_t error;
error = cudaMalloc((void**)&numbersDevice, size * sizeof(unsigned int));
if(error != cudaSuccess){
printf("Error in cudaMalloc!!!!\n");
exit(1);
}
error = cudaMemcpy(numbersDevice, numbers, size * sizeof(unsigned int), cudaMemcpyHostToDevice);
if(error != cudaSuccess){
printf("Error in cudaMemcpy!!!!!\n");
exit(1);
}
error = cudaMalloc((void**)&max, sizeof(unsigned int));
if(error != cudaSuccess){
printf("Error in cudaMalloc for max.\n");
exit(1);
}
error = cudaMemcpy(max, result, sizeof(unsigned int), cudaMemcpyHostToDevice);
if(error != cudaSuccess){
printf("Error in cudaMemcpy for max.\n");
exit(1);
}
getmaxcu<<<numOfBlocks, numOfThreads>>>(numbersDevice, size, max);
cudaMemcpy(result, max, sizeof(unsigned int), cudaMemcpyDeviceToHost);
printf("Successfully finished the getmaxcu method and got max = %d.\n", *result);
/*int nDevices;
cudaGetDeviceCount(&nDevices);
for(int k = 0; k < nDevices; k++){
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, k);
printf("Device Number: %d\n", k);
printf("Device Name: %s\n", prop.name);
printf("Max threads per block: %d\n", prop.maxThreadsPerBlock);
printf("Warp size: %d\n", prop.warpSize);
printf("Num of Mps: %d\n", prop.multiProcessorCount);
for(int z = 0; z < 3; z++){
printf("MaxthreadsDim: %d maxGridSize: %d\n", prop.maxThreadsDim[z], prop.maxGridSize[z]);
}
}*/
// printf("The maximum number in the array is: %u\n",
// getmax(numbers, size));
cudaFree(numbersDevice);
cudaFree(max);
free(numbers);
exit(0);
}
/*
input: pointer to an array of long int
number of elements in the array
output: the maximum number of the array
*/
unsigned int getmax(unsigned int num[], unsigned int size)
{
unsigned int i;
unsigned int max = num[0];
for(i = 1; i < size; i++)
if(num[i] > max)
max = num[i];
return( max );
}
|
6,498 | #include <cstdio>
#define N 64
#define TPB 16
__device__ float scale(int i, int n)
{
return ((float) i)/(n - 1);
}
__device__ float distance(float x1, float x2)
{
return sqrt( (x2 - x1)*(x2 - x1) );
}
__global__ void distanceKernel(float *d_out, float ref, int len)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
// calculate i-th point
const float x = scale(i, len);
// Calculate distance
d_out[i] = distance(x, ref);
printf("%2d %f %f %f\n", i, ref, x, d_out[i]);
}
int main()
{
const float ref = 0.5f;
float *d_out = 0;
cudaMalloc( &d_out, N*sizeof(float) );
distanceKernel<<<N/TPB, TPB>>>(d_out, ref, N);
cudaFree(d_out);
return 0;
} |
6,499 | #include<stdio.h>
#include<cuda.h>
__global__ void kernel( void ) {
}
int main( void ) {
kernel<<<1,1>>>();
printf( "Hello, World!" );
return 0;
} |
6,500 | #include <cuda.h>
#include <cuda_runtime.h>
#include <fstream>
#include <iostream>
#include "cuda_kernel.cuh"
__global__ void raw2gray_kernal(int width, int height, unsigned char *gpu_bayer,
unsigned char *gpu_gray) {
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
// G B
// R G
float pixel =
0.7152f * 0.5 *
(gpu_bayer[2 * index_x + 2 * index_y * (2 * width)] +
gpu_bayer[2 * index_x + 1 + (2 * index_y + 1) * (2 * width)]) +
0.2126f * gpu_bayer[2 * index_x + 1 + 2 * index_y * (2 * width)] +
0.0722f * gpu_bayer[2 * index_x + (2 * index_y + 1) * (2 * width)];
gpu_gray[index_x + index_y * width] = (unsigned char)pixel;
}
bool cuda_raw2gray(int width, int height, unsigned char *img,
unsigned char *res) {
int BLOCK_SIZE = 24;
dim3 grid(1);
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(((width + (BLOCK_SIZE - 1)) / BLOCK_SIZE),
((height + (BLOCK_SIZE - 1)) / BLOCK_SIZE));
unsigned char *d_img, *d_res;
cudaMalloc(&d_img, 4 * width * height * sizeof(unsigned char));
cudaMalloc(&d_res, width * height * sizeof(unsigned char));
cudaMemcpy(d_img, img, 4 * width * height * sizeof(unsigned char),
cudaMemcpyHostToDevice);
raw2gray_kernal<<<grid, block>>>(width, height, d_img, d_res);
cudaMemcpy(res, d_res, width * height * sizeof(unsigned char),
cudaMemcpyDeviceToHost);
cudaFree(d_img);
cudaFree(d_res);
cudaError_t cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "raw2gray_kernal failed: %s\n",
cudaGetErrorString(cudaStatus));
return false;
}
return true;
}
// init cuda enviroment.
int32_t cuda_init(void) {
int32_t count;
cudaGetDeviceCount(&count);
if (count == 0) {
fprintf(stderr, "There is no device.\n");
return 0;
}
std::cout
<< "-------------------------- GPU dev info --------------------------"
<< std::endl;
int32_t i;
for (i = 0; i < count; i++) {
cudaDeviceProp prop;
if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if (prop.major >= 1) {
printf("Identify: %s\n", prop.name);
printf("Host Memory: %d\n", (int32_t)prop.canMapHostMemory);
printf("Clock Rate: %d khz\n", (int32_t)prop.clockRate);
printf("Compute Mode: %d\n", (int32_t)prop.computeMode);
printf("Device Overlap: %d\n", (int32_t)prop.deviceOverlap);
printf("Integrated: %d\n", (int32_t)prop.integrated);
printf("Kernel Exec Timeout Enabled: %d\n",
(int32_t)prop.kernelExecTimeoutEnabled);
printf("Max Grid Size: %d * %d * %d\n", (int32_t)prop.maxGridSize[0],
(int32_t)prop.maxGridSize[1], (int32_t)prop.maxGridSize[2]);
printf("Max Threads Dim: %d * %d * %d\n",
(int32_t)prop.maxThreadsDim[0], (int32_t)prop.maxThreadsDim[1],
(int32_t)prop.maxThreadsDim[2]);
printf("Max Threads per Block: %d\n", (int32_t)prop.maxThreadsPerBlock);
printf("Maximum Pitch: %d bytes\n", (int32_t)prop.memPitch);
printf("Minor Compute Capability: %d\n", (int32_t)prop.minor);
printf("Number of Multiprocessors: %d\n",
(int32_t)prop.multiProcessorCount);
printf("32bit Registers Availble per Block: %d\n",
(int32_t)prop.regsPerBlock);
printf("Shared Memory Available per Block: %d bytes\n",
(int32_t)prop.sharedMemPerBlock);
printf("Alignment Requirement for Textures: %d\n",
(int32_t)prop.textureAlignment);
printf("Constant Memory Available: %d bytes\n",
(int32_t)prop.totalConstMem);
printf("Global Memory Available: %d bytes\n",
(int32_t)prop.totalGlobalMem);
printf("Warp Size: %d threads\n", (int32_t)prop.warpSize);
break;
}
}
}
if (i == count) {
fprintf(stderr, "There is no device supporting CUDA.\n");
return 0;
}
cudaSetDevice(i);
return 1;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.