serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
22,101 | #include "includes.h"
__global__ void relax(int* U, int* F, int* d, size_t gSize, int* adjMat) {
int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x;
if (globalThreadId < gSize) {
if (F[globalThreadId]) {
for (int i = 0; i < gSize; i++) {
if(adjMat[globalThreadId*gSize + i] && i != globalThreadId && U[i]) {
atomicMin(&d[i], d[globalThreadId] + adjMat[globalThreadId * gSize + i]);
}
}
}
}
} |
22,102 | /*
@Author: 3sne ( Mukur Panchani )
@FileName: q1FindSubstring.cu
@Task: CUDA program that finds a substring in a given string.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
void resetBuf(char* b, int blen) {
for ( int i = 0; i < blen; i++ )
b[i] = '\0';
}
__global__ void findSs(char *text, char *ss, int *retArr, int ssLen) {
int tid = threadIdx.x;
int w = 0;
retArr[tid] = 1;
for (int i = tid; i < (tid + ssLen); i++ ) {
if (text[i] != ss[w]) {
retArr[tid] = 0;
break;
}
w += 1;
}
}
int main() {
char *buf = (char*)malloc(sizeof(char) * 10240);
char *text, *subStr, *dtext, *dsubStr;
int *retArr, *dretarr;
printf("[IN] Enter Text >> ");
scanf("%[^\n]s", buf);
int tLen = strlen(buf);
text = (char*)malloc(sizeof(char) * tLen);
strcpy(text, buf);
resetBuf(buf, 10240);
printf("[IN] Enter Sub-String >> ");
scanf("%s", buf);
int ssLen = strlen(buf);
subStr = (char*)malloc(sizeof(char) * ssLen);
strcpy(subStr, buf);
free(buf);
retArr = (int*)malloc(sizeof(int) * (tLen - ssLen + 1));
for (int i = 0; i < (tLen - ssLen + 1); i++) {
retArr[i] = 0;
}
cudaMalloc((void **)&dtext, sizeof(char) * tLen);
cudaMalloc((void **)&dsubStr, sizeof(char) * ssLen);
cudaMalloc((void **)&dretarr, sizeof(int) * (tLen - ssLen + 1));
cudaMemcpy(dtext, text, sizeof(char) * tLen, cudaMemcpyHostToDevice);
cudaMemcpy(dsubStr, subStr, sizeof(char) * ssLen, cudaMemcpyHostToDevice);
cudaMemcpy(dretarr, retArr, sizeof(int) * (tLen - ssLen + 1), cudaMemcpyHostToDevice);
dim3 block_conf (tLen - ssLen + 1, 1);
findSs<<<1, block_conf>>>(dtext, dsubStr, dretarr, ssLen);
cudaMemcpy(retArr, dretarr, sizeof(int) * (tLen - ssLen + 1), cudaMemcpyDeviceToHost);
int yay = 0;
for (int i = 0; i < tLen - ssLen + 1; i++) {
if (retArr[i] == 1) {
yay = 1;
break;
}
}
if (yay) {
printf("Substring found in Text @ index(es) ");
for (int i = 0; i < tLen - ssLen + 1; i++) {
if (retArr[i])
printf("%d ", i);
}
} else {
printf("Substring un-found in Text :(");
}
printf("\n");
cudaFree(dtext);
cudaFree(dsubStr);
cudaFree(dretarr);
free(text);
free(subStr);
return 0;
} |
22,103 | void MatrixMultiply_Banded(double *x,double *b,int m,int n,int Bandwidth)
{
int i,j;
int j_start,j_end;
double A;
for(i=0;i<m;i++)
{
if(i>=0 && i<Bandwidth-1)
{j_start = Bandwidth-1-i;j_end = n;}
else if(i>=Bandwidth-1 && i<m-Bandwidth+1)
{j_start = 0;j_end = n;}
else if(i>=m-Bandwidth+1 && i<m)
{j_start = 0;j_end = Bandwidth-1+m-i;}
b[i] = 0.0;
for(j=j_start;j<j_end;j++)
{
A = Bandwidth - abs(j-Bandwidth+1);
b[i] += A*x[i-(Bandwidth-1)+j];
}
}
}
__global__ void MatrixMultiply_GPU(double *x,double *b,int m,int n,int Bandwidth)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int j,j_start,j_end;
double A;
while(tid<m)
{
if(tid>=0 && tid<Bandwidth-1)
{j_start = Bandwidth-1-tid;j_end = n;}
else if(tid>=Bandwidth-1 && tid<m-Bandwidth+1)
{j_start = 0;j_end = n;}
else if(tid>=m-Bandwidth+1 && tid<m)
{j_start = 0;j_end = Bandwidth-1+m-tid;}
b[tid] = 0.0;
for(j=j_start;j<j_end;j++)
{
A = Bandwidth - abs(j-Bandwidth+1);
b[tid] += A*x[tid-(Bandwidth-1)+j];
}
tid += gridDim.x*blockDim.x;
}
}
__global__ void Dotproduct(double *a,double *b,double *c,int Dim)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid<Dim)
{
c[tid] = a[tid]*b[tid];
tid += gridDim.x*blockDim.x;
}
}
__global__ void Dotproduct_Shared(double *a,double *b,double *PartialSum,int Dim)
{
__shared__ double cache[256];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
double temp = 0.0;
while(tid<Dim)
{
temp += a[tid] * b[tid];
tid += gridDim.x*blockDim.x;
}
cache[cacheIndex] = temp;
__syncthreads();
if(cacheIndex == 0)
{
temp = 0.0;
for(int i=0;i<256;i++)
{
temp += cache[i];
}
PartialSum[blockIdx.x] = temp;
}
}
__global__ void Dotproduct_Shared_Reduction(double *a,double *b,double *PartialSum,int Dim)
{
__shared__ double cache[256];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
double temp = 0.0;
while(tid<Dim)
{
temp += a[tid] * b[tid];
tid += gridDim.x*blockDim.x;
}
cache[cacheIndex] = temp;
__syncthreads();
int i = 256;
while(i>1)
{
if(cacheIndex < i/2)
{
cache[cacheIndex] += cache[cacheIndex + i/2];
}
i /= 2;
__syncthreads();
}
if(cacheIndex == 0)
{
PartialSum[blockIdx.x] = cache[cacheIndex];
}
}
__global__ void Pi_Shared_Reduction(double *PartialSum,int Dim)
{
__shared__ double cache[256];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
double temp = 0.0;
while(tid<Dim)
{
temp += 1.0 / (4.0*tid + 1.0) - 1.0 / (4.0*tid + 3.0);
tid += gridDim.x*blockDim.x;
}
cache[cacheIndex] = temp;
__syncthreads();
int i = 256;
while(i>1)
{
if(cacheIndex < i/2)
{
cache[cacheIndex] += cache[cacheIndex + i/2];
}
i /= 2;
__syncthreads();
}
if(cacheIndex == 0)
{
PartialSum[blockIdx.x] = cache[cacheIndex];
}
}
__global__ void UpdateSolution(double *x,double *p,double *r,double *Ax,double alpha,int Dim)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid<Dim)
{
x[tid] += alpha*p[tid];
r[tid] -= alpha*Ax[tid];
tid += gridDim.x*blockDim.x;
}
}
__global__ void UpdateSearchDirection(double *p,double *r,double beta,int Dim)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid<Dim)
{
p[tid] = r[tid] + beta*p[tid];
tid += gridDim.x*blockDim.x;
}
}
|
22,104 | #include "cuda.h"
__device__ float integration(float *data, int length, int channel_amount)
{
float sum = 0;
for (int i = 0; i < length; i++) {
sum += data[i*channel_amount];
}
return sum;
}
__global__ void remove_empty(float *inds, int *anchors, float *view, int *anchors_shape, int *view_shape)
{
int anchor_id = blockIdx.x;
if (anchor_id >= anchors_shape[0]) return;
int *anchor_base = anchors + 4*anchor_id;
int y1 = anchor_base[0];
int x1 = anchor_base[1];
int y2 = anchor_base[2];
int x2 = anchor_base[3];
if (x1 < 0) x1 = 0;
if (x1 >= view_shape[0]) x1 = view_shape[0]-1;
if (x2 < 0) x2 = 0;
if (x2 >= view_shape[0]) x2 = view_shape[0]-1;
if (y1 < 0) y1 = 0;
if (y1 >= view_shape[1]) y1 = view_shape[1]-1;
if (y2 < 0) y2 = 0;
if (y2 >= view_shape[1]) y2 = view_shape[1]-1;
int anchor_w = x2 - x1;
int anchor_l = y2 - y1;
int channel = threadIdx.x;
if (channel >= view_shape[2]) return;
int line_id = blockIdx.y;
if (line_id >= anchor_w) return;
float *pos = view + ((x1 + line_id)*view_shape[1] + y1)*view_shape[2] + channel;
int length = anchor_l;
int channel_amount = view_shape[2];
*(inds + channel_amount*anchor_id + channel) += integration(pos, length, channel_amount);
}
|
22,105 | /*
* Copyright (c) 2022 Mohamed Khaled <Mohamed_Khaled_Kamal@outlook.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "cuda/vector_helpers.cuh"
extern "C"
{
/**
* @brief calculated squared norm difference between two 3-dimension vecors ||first_vector-second_vector||^2
* used float4 for better performance
*
* @param first_yuv first color vector
* @param second_yuv second color vecotr
* @return answer of squared norm difference
*/
__device__ static inline float norm_squared(float4 first_yuv, float4 second_yuv)
{
float x = first_yuv.x - second_yuv.x;
float y = first_yuv.y - second_yuv.y;
float z = first_yuv.z - second_yuv.z;
return (x*x) + (y*y) + (z*z);
}
/**
* @brief calculate w as stated in bilateral filter research paper
*
* @param first_yuv first color vector
* @param second_yuv second color vecotr
* @return the calculated w
*/
__device__ static inline float calculate_w(int x, int y, int r, int c,
float4 pixel_value, float4 neighbor_value,
float sigma_space, float sigma_color)
{
float first_term, second_term;
first_term = (((x - r) * (x - r)) + ((y - c) * (y - c))) / (2 * sigma_space * sigma_space);
second_term = norm_squared(pixel_value, neighbor_value) / (2 * sigma_color * sigma_color);
return __expf(-first_term - second_term);
}
/**
* @brief apply the bilateral filter on the pixel sent
*
* @param src_tex_Y Y channel of source image
* @param src_tex U channel of source image if yuv, or UV channels if format is nv12
* @param src_tex_V V channel of source image
* @param dst_Y Y channel of destination image
* @param dst_U U channel of destination image if format is in yuv
* @param dst_V V channel of destination image if format is in yuv
* @param dst_UV UV channels of destination image if format is in nv12
* @param width width of Y channel
* @param height height of Y channel
* @param width_uv width of UV channels
* @param height_uv height of UV channels
* @param pitch pitch of Y channel
* @param pitch_uv pitch of UV channels
* @param x x coordinate of pixel to be filtered
* @param y y coordinate of pixel to be filtered
* @param sigma_space sigma space parameter
* @param sigma_color sigma color parameter
* @param window_size window size parameter
* @return void
*/
__device__ static inline void apply_biltaeral(
cudaTextureObject_t src_tex_Y, cudaTextureObject_t src_tex, cudaTextureObject_t src_tex_V,
uchar *dst_Y, uchar *dst_U, uchar *dst_V, uchar2 *dst_UV,
int width, int height, int width_uv, int height_uv, int pitch, int pitch_uv,
int x, int y,
float sigma_space, float sigma_color, int window_size)
{
int start_r = x - window_size / 2;
int start_c = y - window_size / 2;
float4 neighbor_pixel = make_float4(0.f, 0.f, 0.f, 0.f);
float Wp = 0.f;
float4 new_pixel_value = make_float4(0.f, 0.f, 0.f, 0.f);
float w = 0.f;
int channel_ratio = width / width_uv; // ratio between Y channel and UV channels
float4 currrent_pixel;
if (!src_tex_V) { // format is in nv12
float2 temp_uv = tex2D<float2>(src_tex, x/channel_ratio, y/channel_ratio) * 255.f;
currrent_pixel.x = tex2D<float>(src_tex_Y, x, y) * 255.f;
currrent_pixel.y = temp_uv.x;
currrent_pixel.z = temp_uv.y;
currrent_pixel.w = 0.f;
} else { // format is fully planar
currrent_pixel = make_float4(tex2D<float>(src_tex_Y, x, y) * 255.f,
tex2D<float>(src_tex, x/channel_ratio, y/channel_ratio) * 255.f,
tex2D<float>(src_tex_V, x/channel_ratio, y/channel_ratio) * 255.f,
0.f);
}
for (int i=0; i < window_size; i++)
{
for (int j=0; j < window_size; j++)
{
int r=start_r+i;
int c=start_c+j;
bool in_bounds=r>=0 && r<width && c>=0 && c<height;
if (in_bounds)
{
if (!src_tex_V){
float2 temp_uv = tex2D<float2>(src_tex, r/channel_ratio, c/channel_ratio);
neighbor_pixel=make_float4(tex2D<float>(src_tex_Y, r, c) * 255.f,
temp_uv.x * 255.f,
temp_uv.y * 255.f, 0.f);
} else {
neighbor_pixel=make_float4(tex2D<float>(src_tex_Y, r, c) * 255.f,
tex2D<float>(src_tex, r/channel_ratio, c/channel_ratio) * 255.f,
tex2D<float>(src_tex_V, r/channel_ratio, c/channel_ratio) * 255.f, 0.f);
}
w=calculate_w(x,y,r,c,currrent_pixel,neighbor_pixel,sigma_space,sigma_color);
Wp+=w;
new_pixel_value+= neighbor_pixel*w;
}
}
}
new_pixel_value = new_pixel_value / Wp;
dst_Y[y*pitch + x] = new_pixel_value.x;
if (!src_tex_V) {
dst_UV[(y/channel_ratio) * pitch_uv + (x/channel_ratio)] = make_uchar2(new_pixel_value.y, new_pixel_value.z);
} else {
dst_U[(y/channel_ratio) * pitch_uv + (x/channel_ratio)] = new_pixel_value.y;
dst_V[(y/channel_ratio) * pitch_uv + (x/channel_ratio)] = new_pixel_value.z;
}
return;
}
__global__ void Process_uchar(cudaTextureObject_t src_tex_Y, cudaTextureObject_t src_tex_U, cudaTextureObject_t src_tex_V,
uchar *dst_Y, uchar *dst_U, uchar *dst_V,
int width, int height, int pitch,
int width_uv, int height_uv, int pitch_uv,
int window_size, float sigmaS, float sigmaR)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y >= height || x >= width)
return;
apply_biltaeral(src_tex_Y, src_tex_U, src_tex_V,
dst_Y, dst_U, dst_V, (uchar2*)nullptr,
width, height, width_uv, height_uv, pitch, pitch_uv,
x, y,
sigmaS, sigmaR, window_size);
}
__global__ void Process_uchar2(cudaTextureObject_t src_tex_Y, cudaTextureObject_t src_tex_UV, cudaTextureObject_t unused1,
uchar *dst_Y, uchar2 *dst_UV, uchar *unused2,
int width, int height, int pitch,
int width_uv, int height_uv, int pitch_uv,
int window_size, float sigmaS, float sigmaR)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y >= height || x >= width)
return;
apply_biltaeral(src_tex_Y, src_tex_UV, (cudaTextureObject_t)nullptr,
dst_Y, (uchar*)nullptr, (uchar*)nullptr, dst_UV,
width, height, width_uv, height_uv, pitch, pitch_uv,
x, y,
sigmaS, sigmaR, window_size);
}
}
|
22,106 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <time.h>//Time heading
//Password Cracking using CUDA
__device__ char* encryptDecrypt(char* tempPassword){
char * generatedPwd = (char *) malloc(sizeof(char) * 11);
generatedPwd[0] = tempPassword[0] + 2;
generatedPwd[1] = tempPassword[0] - 2;
generatedPwd[2] = tempPassword[0] + 1;
generatedPwd[3] = tempPassword[1] + 3;
generatedPwd[4] = tempPassword[1] - 3;
generatedPwd[5] = tempPassword[1] - 1;
generatedPwd[6] = tempPassword[2] + 2;
generatedPwd[7] = tempPassword[2] - 2;
generatedPwd[8] = tempPassword[3] + 4;
generatedPwd[9] = tempPassword[3] - 4;
generatedPwd[10] = '\0';
for(int i =0; i<10; i++){
if(i >= 0 && i < 6){
if(generatedPwd[i] > 122){
generatedPwd[i] = (generatedPwd[i] - 122) + 97;
}else if(generatedPwd[i] < 97){
generatedPwd[i] = (97 - generatedPwd[i]) + 97;
}
}else{
if(generatedPwd[i] > 57){
generatedPwd[i] = (generatedPwd[i] - 57) + 48;
}else if(generatedPwd[i] < 48){
generatedPwd[i] = (48 - generatedPwd[i]) + 48;
}
}
}
return generatedPwd;
}
__global__ void crack(char * alphabet, char * numbers){
char matchedPwd[4];
matchedPwd[0] = alphabet[blockIdx.x];
matchedPwd[1] = alphabet[blockIdx.y];
matchedPwd[2] = numbers[threadIdx.x];
matchedPwd[3] = numbers[threadIdx.y];
char* encryptedPwd = "plodwy3171"; //nz13
char* search = encryptDecrypt(matchedPwd);
int iter = 0;
int is_match = 0;
while (*encryptedPwd != '\0' || *search != '\0') {
if (*encryptedPwd == *search) {
encryptedPwd++;
search++;
} else if ((*encryptedPwd == '\0' && *search != '\0') || (*encryptedPwd != '\0' && *search == '\0') || *encryptedPwd != *search) {
is_match = 1;
break;
}
}
if (is_match == 0) {
printf("Password found successfully: %c%c%c%c \n", matchedPwd[0],matchedPwd[1],matchedPwd[2],matchedPwd[3]);
}
}
int time_count(struct timespec *start, struct timespec *end,
long long int *diff)
{
long long int in_sec = end->tv_sec - start->tv_sec;
long long int in_nano = end->tv_nsec - start->tv_nsec;
if (in_nano < 0)
{
in_sec--;
in_nano += 1000000000;
}
*diff = in_sec * 1000000000 + in_nano;
return !(*diff > 0);
}
int main(int argc, char ** argv){
struct timespec start, end;
long long int time_used;
char cpuCharacter[26] = {'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'};
char cpuDigits[26] = {'0','1','2','3','4','5','6','7','8','9'};
char * gpuCharacter;
cudaMalloc( (void**) &gpuCharacter, sizeof(char) * 26);
cudaMemcpy(gpuCharacter, cpuCharacter, sizeof(char) * 26, cudaMemcpyHostToDevice);
char * gpuDigits;
cudaMalloc( (void**) &gpuDigits, sizeof(char) * 26);
cudaMemcpy(gpuDigits, cpuDigits, sizeof(char) * 26, cudaMemcpyHostToDevice);
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
crack<<< dim3(26,26,1), dim3(10,10,1) >>>( gpuCharacter, gpuDigits );
cudaThreadSynchronize();
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
time_count(&start, &end, &time_used);
printf("Time taken: %f seconds OR %lld Nano Seconds\n", (time_used / 1.0e9), (time_used));
return 0;
}
|
22,107 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <assert.h>
#ifndef THREADS_PER_BLOCK
#define THREADS_PER_BLOCK 1024
#endif
//#define VERBOSE
//#define PROF
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = cudaDeviceSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
}
__global__ void vc(float *dA, float *dB, int N) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < N) {
dA[id] = dB[id];
}
}
extern "C" {
void vcCUDA(float* A, float *B, int start, int end, int GPUN) {
float *dA, *dB;
if (GPUN > 0) {
assert(end - start + 1 == GPUN);
#ifdef VERBOSE
printf("In vcCUDA\n");
printf("\t GPUN: %d\n", GPUN);
printf("\t range: %d..%d\n", start, end);
#endif
#ifdef PROF
cudaEvent_t startCudaKernelEvent, endCudaKernelEvent;
CudaSafeCall(cudaEventCreate(&startCudaKernelEvent));
CudaSafeCall(cudaEventCreate(&endCudaKernelEvent));
#endif
CudaSafeCall(cudaMalloc(&dA, sizeof(float) * GPUN));
CudaSafeCall(cudaMalloc(&dB, sizeof(float) * GPUN));
CudaSafeCall(cudaMemcpy(dB, B + start, sizeof(float) * GPUN, cudaMemcpyHostToDevice));
#ifdef PROF
CudaSafeCall(cudaEventRecord(startCudaKernelEvent));
#endif
vc<<<ceil(((float)GPUN)/THREADS_PER_BLOCK), THREADS_PER_BLOCK>>>(dA, dB, GPUN);
#ifdef PROF
CudaSafeCall(cudaEventRecord(endCudaKernelEvent));
CudaSafeCall(cudaEventSynchronize(endCudaKernelEvent));
#endif
CudaCheckError();
CudaSafeCall(cudaDeviceSynchronize());
CudaSafeCall(cudaMemcpy(A + start, dA, sizeof(float) * GPUN, cudaMemcpyDeviceToHost));
#ifdef PROF
float msecKernel;
CudaSafeCall(cudaEventElapsedTime(&msecKernel, startCudaKernelEvent, endCudaKernelEvent));
printf("CUDA kernel: %lf msec\n", msecKernel);
#endif
CudaSafeCall(cudaFree(dA));
CudaSafeCall(cudaFree(dB));
}
}
}
|
22,108 | #include <iostream>
#include <stdio.h>
#include <math.h>
#define kx 3
#define ky 3
#define nx 224
#define ny 224
#define ni 64
#define nn 64
#define batch 64
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void random_ints(int* a, int N)
{
int i;
for (i = 0; i < N; i++)
{
a[i] = rand();
}
}
void zeros(int* a, int N)
{
int i;
for (i = 0; i < N; i++)
{
a[i] = 0;
}
}
// CURRENT MEMORY PERFORMANCE = 5.77 GB/s
// perform a single application (matrix-vector multiply) of 1 weights matrix to a subset of a single input feature map
// the batch size (batch) determines the size of the subset
// the dimensions of the weights matrix are (kx, ky)
// the dimensions of all input and output feature maps are (nx, ny)
// the number of input feature maps is ni
// the number of output feature maps is nn
// the input and output feature maps are thus represented as 3D arrays (logically)
// the corresponding weights matrices are thus represented as a 4D array (logically)
// this is what is done in a 3D convolution layer
// this method utilizes a scratchpad memory for better thread block performance
__global__
void matrix_vector_mult(int *inp, int *outp, int *kern)
{
// scratchpad memory used for shared variables
// NOTE: must batch enough such that this data can fit in the shared memory
__shared__ int temp_kern[nn * kx * ky]; // all kernel matrices for given input feature map
__shared__ int temp_inp[nx * ny / batch]; // batched subset of given input feature map
// only 1 thread in block needs to populate all shared variables but temp_ind
if (threadIdx.x == 0) {
int hold = nn* kx * ky;
int k_start = (blockIdx.x/batch) * kx * ky; // every (batch) thread blocks use the same weights matrices
for (int j = 0; j < hold; j++) { // populate temp_kern
int t = k_start + j;
temp_kern[j] = kern[t];
}
}
int i_index = (blockIdx.x * (nx * ny / batch)) + threadIdx.x; // 1 thread block per subset of each feature map
temp_inp[threadIdx.x] = inp[i_index]; // piecemeal load in the input feature map
__syncthreads(); // sync all threads to this point - input feature map loaded
int l_start = threadIdx.x - ky/2 - (ny/(batch/2) * (kx/2));
for (int i=0; i<nn; i++) {
int out = 0;
for (int j=0; j<kx; j++) {
for (int k=0; k<ky; k++) {
int curr = l_start + (ny/(batch/2)*j) + k;
int k_index = (i*kx*ky) + (j*ky) + k;
if ((curr >= 0) && (curr <= (nx*ny/batch-1))) { // check against barriers of input feature map
out += temp_inp[curr] * temp_kern[k_index];
}
}
}
// store output
int n_index = (i * nx * ny) + threadIdx.x; // rotate through output feature maps constantly
outp[n_index] += out;
}
}
int main(void)
{
// declare host + device pointers
int *inp, *outp, *kern;
int *d_inp, *d_outp, *d_kern;
// compute array sizes
int i_size = ni*nx*ny;
int o_size = nn*nx*ny;
int k_size = nn*ni*kx*ky;
// allocate space for each array on the device
gpuErrchk( cudaMalloc(&d_inp, i_size*sizeof(int)) );
gpuErrchk( cudaMalloc(&d_outp, o_size*sizeof(int)) );
gpuErrchk( cudaMalloc(&d_kern, k_size*sizeof(int)) );
// allocate space and populate each array on the host
inp = (int*)malloc(i_size*sizeof(int));
outp = (int*)malloc(o_size*sizeof(int));
kern = (int*)malloc(k_size*sizeof(int));
random_ints(inp, i_size);
zeros(outp, o_size);
random_ints(kern, k_size);
// copy populated host arrays to corresponding device arrays
gpuErrchk( cudaMemcpy(d_inp, inp, i_size*sizeof(int), cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_outp, outp, o_size*sizeof(int), cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_kern, kern, k_size*sizeof(int), cudaMemcpyHostToDevice) );
// launch all threads on device
// # blocks = # input feature maps * # batches / input feature map
// # threads / block = # elements in each batch
matrix_vector_mult<<<ni*batch, nx*ny/batch>>>(d_inp, d_outp, d_kern);
// determine if run succeeded
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
// copy output array back to host
gpuErrchk( cudaMemcpy(outp, d_outp, o_size, cudaMemcpyDeviceToHost) );
// free all memory
free(inp); free(outp); free(kern);
gpuErrchk( cudaFree(d_inp) ); gpuErrchk( cudaFree(d_outp) ); gpuErrchk( cudaFree(d_kern) );
return 0;
} |
22,109 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <limits.h>
#define M_PI 3.1415926535897
#define VECTOR_COUNT 2
cudaError_t computeElementsHelper(int* a, int* b, int* lengthNoSqrt, int* dotProduct, int N, int blockSize);
__global__ void computeElementsKernel(int* lengthNoSqrt, int* product, int* a, int* b, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
//printf("Doing something with thread %d\n", i);
//printf("Element: %d %d\n", a[i], b[i]);
//find the dot product.
atomicAdd(product, a[i] * b[i]);
//printf("Sumsquares one before: %d\n", lengthNoSqrt[0]);
//printf("Sumsquares two before: %d\n", lengthNoSqrt[1]);
atomicAdd(&(lengthNoSqrt[0]), a[i] * a[i]);
atomicAdd(&(lengthNoSqrt[1]), b[i] * b[i]);
//printf("Sumsquares one after: %d\n", lengthNoSqrt[0]);
//printf("Sumsquares two after: %d\n", lengthNoSqrt[1]);
}
}
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int* genVector(int N) {
int* vector = (int*)malloc(sizeof(int) * N);
for (int i = 0; i < N; i++) {
int randNum = rand() % 20 - 10;
vector[i] = randNum;
}
return vector;
}
int findDotProduct(int* a, int* b, int N) {
int sum = 0;
for (int i = 0; i < N; i++) {
sum = sum + (a[i] * b[i]);
}
return sum;
}
void printArray(int* x, int size) {
for (int i = 0; i < size; i++) {
printf("arr[%d] = %d\n", i, x[i]);
}
}
double findVectorLength(int* x, int N) {
int sumSquares = 0;
for (int i = 0; i < N; i++) {
sumSquares = sumSquares + pow(x[i], 2);
}
//printf("SumSquares serial: %d\n", sumSquares);
double distance = sqrt(sumSquares);
return distance;
}
double convertToDegrees(double rad) {
return rad * (180 / M_PI);
}
void printDeviceProperties() {
printf("--------------------DEVICE PROPERTIES----------------------\n\n");
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf("Device name: %s\n", prop.name);
printf("Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf("Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf("Peak Memory Bandwidth (GB/s): %f\n\n",
2.0 * prop.memoryClockRate * (prop.memoryBusWidth / 8) / 1.0e6);
}
}
double doTheSerialThing(int* vectorOne, int* vectorTwo, int SIZE) {
//printf("-----------------SERIAL IMPLEMENTATION----------------------\n\n");
double dotProduct = (double)findDotProduct(vectorOne, vectorTwo, SIZE);
double vectorLengthOne = findVectorLength(vectorOne, SIZE);
double vectorLengthTwo = findVectorLength(vectorTwo, SIZE);
double cosTheta = dotProduct / (vectorLengthOne * vectorLengthTwo);
double angleInRadians = acos(cosTheta);
double angleInDegrees = convertToDegrees(angleInRadians);
//printf("length one: %f\n", vectorLengthOne);
//printf("length two: %f\n", vectorLengthTwo);
//printf("Angle in radians: %f\n", angleInRadians);
//printArray(vectorOne, SIZE);
//printArray(vectorTwo, SIZE);
//printf("DOT PRODUCT SERIAL: %f\n", dotProduct);
return angleInDegrees;
}
int main(int argc, char** argv)
{
//Before beginning, print device properties.
//printDeviceProperties();
srand(time(NULL));
clock_t start, end;
double cpu_time_used;
int SIZE = atoi(argv[1]);
int BLOCK_SIZE = atoi(argv[2]);
int* vectorOne = NULL;
int* vectorTwo = NULL;
int lengthsNoSqrt[VECTOR_COUNT] = { 0 };
int dotProduct[1] = { 0 };
double angleSerial = 0;
int numberBlocks = 0;
if (SIZE % BLOCK_SIZE == 0)
numberBlocks = SIZE / BLOCK_SIZE;
else
numberBlocks = (SIZE / BLOCK_SIZE) + 1;
printf("Info\n------------------\n");
printf("Number of elements: %d\n", SIZE);
printf("Number of threads per block: %d\n", BLOCK_SIZE);
printf("Number of blocks will be created: %d\n\n", numberBlocks);
//arrays will be generated
if (argc == 3) {
printf("Time\n------------------\n");
start = clock();
vectorOne = genVector(SIZE);
vectorTwo = genVector(SIZE);
end = clock();
cpu_time_used = ((double)(end - start) * 1000) / CLOCKS_PER_SEC;
printf("Time for the array generation : %f ms\n", cpu_time_used);
}
//arrays will be read from file.
else if (argc == 4) {
char const* const fileName = argv[3]; /* should check that argc > 1 */
FILE* file = fopen(fileName, "r"); /* should check the result */
char line[256];
fgets(line, sizeof(line), file);
int count = atoi(line);
int* allArray = (int*)malloc(sizeof(int) * count * 2);
vectorOne = (int*)malloc(sizeof(int) * count);
vectorTwo = (int*)malloc(sizeof(int) * count);
int i = 0;
//printf("COUNT: %d\n", count);
while (fgets(line, sizeof(line), file)) {
/* note that fgets don't strip the terminating \n, checking its
presence would allow to handle lines longer that sizeof(line) */
int number = atoi(line);
allArray[i] = number;
i++;
}
/* may check feof here to make a difference between eof and io failure -- network
timeout for instance */
/*
for (int i = 0; i < count; i++) {
printf("allArray[%d] = %d\n", i, allArray[i]);
}
*/
for (int i = 0; i < count; i++) {
vectorOne[i] = allArray[i];
}
for (int i = count; i < count * 2; i++) {
vectorTwo[i - count] = allArray[i];
}
fclose(file);
}
else {
printf("GIVE APPROPRIATE NUMBER OF ARGUMENTS PLEASE!!!\n");
return 0;
}
start = clock();
angleSerial = doTheSerialThing(vectorOne, vectorTwo, SIZE);
end = clock();
cpu_time_used = ((double)(end - start) * 1000) / CLOCKS_PER_SEC;
printf("Time for the CPU function: %f ms\n", cpu_time_used);
//printf("---------------------PARALLEL IMPLEMENTATION-----------------\n\n");
// Calculate angle with CUDA.
cudaError_t cudaStatus = computeElementsHelper(vectorOne, vectorTwo, lengthsNoSqrt, dotProduct, SIZE, BLOCK_SIZE);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "computeElements failed!");
return 1;
}
// find the angle here.
double lenOne = sqrt( (double) lengthsNoSqrt[0]);
double lenTwo = sqrt( (double) lengthsNoSqrt[1]);
double cosTheta = ( ((double) (dotProduct[0])) / (lenOne * lenTwo));
double angleInRadians = acos(cosTheta);
double angle = convertToDegrees(angleInRadians);
printf("\n");
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
printf("Results\n-----------------\n");
printf("CPU Result: %0.3f\n", angleSerial);
printf("GPU Result: %0.3f\n", angle);
return 0;
}
cudaError_t computeElementsHelper(int* a, int* b, int* lengthNoSqrt, int* dotProduct, int N, int blockSize)
{
int* dev_a = 0;
int* dev_b = 0;
int* dev_lengthNoSqrt = 0;
int* dev_product = 0;
cudaError_t cudaStatus;
clock_t start, end;
double timeUsed;
double totalGpuTime = 0;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
}
cudaStatus = cudaMalloc((void**)&dev_a, N * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc dev a failed!\n");
}
cudaStatus = cudaMalloc((void**)&dev_b, N * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc dev b failed!\n");
}
cudaStatus = cudaMalloc((void**)&dev_lengthNoSqrt, VECTOR_COUNT * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc dev length failed!\n");
}
cudaStatus = cudaMalloc((void**)&dev_product, sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc dev product failed!\n");
}
// Copy input vectors from host memory to GPU buffers.
start = clock();
cudaStatus = cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!\n");
}
cudaStatus = cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!\n");
}
end = clock();
timeUsed = ((double)(end - start) * 1000) / CLOCKS_PER_SEC;
totalGpuTime += timeUsed;
printf("Time for the Host to Device transfer: %f ms\n", timeUsed);
// Launch a kernel on the GPU with one thread for each element.
int numberBlocks = 0;
if (N % blockSize == 0)
numberBlocks = N / blockSize;
else
numberBlocks = (N / blockSize) + 1;
start = clock();
computeElementsKernel <<< numberBlocks, blockSize >>> (dev_lengthNoSqrt, dev_product, dev_a, dev_b, N);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "computeElementsKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
}
end = clock();
timeUsed = ((double)(end - start) * 1000) / CLOCKS_PER_SEC;
totalGpuTime += timeUsed;
printf("Time for the kernel execution: %f ms\n", timeUsed);
start = clock();
cudaStatus = cudaMemcpy(lengthNoSqrt, dev_lengthNoSqrt, VECTOR_COUNT * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy for dev_lengths failed!\n");
}
cudaStatus = cudaMemcpy(dotProduct, dev_product, sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy for dotProduct failed!\n");
}
end = clock();
timeUsed = ((double)(end - start) * 1000) / CLOCKS_PER_SEC;
printf("Time for the Device to Host transfer: %f ms\n", timeUsed);
totalGpuTime += timeUsed;
printf("Total execution time for GPU: %f ms\n", totalGpuTime);
cudaFree(dev_product);
cudaFree(dev_lengthNoSqrt);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
} |
22,110 | template<typename Destination, typename Data>
__global__ void absArrays(size_t elements, Destination *dst, Data *src) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = abs(src[kernelIndex]); }
}
|
22,111 | extern "C" {
__global__ void rgb2gray(uchar3 *dataIn, unsigned char *dataOut, int imgHeight, int imgWidth)
{
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
if (xIndex < imgWidth && yIndex < imgHeight)
{
uchar3 rgb = dataIn[yIndex * imgWidth + xIndex];
dataOut[yIndex * imgWidth + xIndex] = 0.299f * rgb.x + 0.587f * rgb.y + 0.114f * rgb.z;
}
}
} |
22,112 | #include<iostream>
__global__ void add(int a,int b,int *c)
{
*c = a+b;
}
int main()
{
int c;
int *dev_c;
cudaMalloc((void **)&dev_c,sizeof(int));
add<<<1,1>>>(2,7,dev_c);
cudaMemcpy(&c,dev_c,sizeof(int),cudaMemcpyDeviceToHost);
std::cout<<c<<std::endl;
cudaFree(dev_c);
return 0;
}
|
22,113 | #include <stdio.h>
#include <stdlib.h>
#define SIZE 1000
__global__ void demo(int * p){
int tx=threadIdx.x;
int bx=blockIdx.x;
int thid = tx+bx*blockDim.x;
// Some of the threads try to access memory out of array boundary.
// The program may not get any error message, but will pose a potential bug.
p[thid]=thid+p[thid];
}
int main(int argc , char **argv){
int * p_cpu;
int * p_gpu;
cudaError_t err;
err=cudaMalloc((void**)&p_gpu,SIZE*sizeof(int));
if( err != cudaSuccess)
{
printf("CUDA error: %s\n", cudaGetErrorString(err));
exit(-1);
}
p_cpu=(int *)malloc(SIZE*sizeof(int));
int i;
for(i=0;i<SIZE;i++){
p_cpu[i]=1;
}
err=cudaMemcpy( p_gpu, p_cpu, sizeof(int)*SIZE, cudaMemcpyHostToDevice);
if( err != cudaSuccess)
{
printf("CUDA error: %s\n", cudaGetErrorString(err));
exit(-1);
}
dim3 dimGrid((SIZE-1)/512+1,1);
dim3 dimBlock(512,1);
demo<<<dimGrid,dimBlock>>>(p_gpu);
free(p_cpu);
err=cudaFree(p_gpu);
if( err != cudaSuccess)
{
printf("CUDA error: %s\n", cudaGetErrorString(err));
exit(-1);
}
return 0;
}
|
22,114 | /* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include <stdio.h>
struct DataElement
{
char *name;
int value;
};
__global__
void Kernel(DataElement *elem) {
printf("On device: name=%s, value=%d\n", elem->name, elem->value);
elem->name[0] = 'd';
elem->value++;
}
void launch(DataElement *elem) {
DataElement *d_elem;
char *d_name;
int namelen = strlen(elem->name) + 1;
// Allocate storage for struct and text
cudaMalloc(&d_elem, sizeof(DataElement));
cudaMalloc(&d_name, namelen);
// Copy up each piece separately, including new “text” pointer value
cudaMemcpy(d_elem, elem, sizeof(DataElement), cudaMemcpyHostToDevice);
cudaMemcpy(d_name, elem->name, namelen, cudaMemcpyHostToDevice);
cudaMemcpy(&(d_elem->name), &d_name, sizeof(char*), cudaMemcpyHostToDevice);
// Finally we can launch our kernel, but CPU & GPU use different copies of “elem”
Kernel<<< 1, 1 >>>(d_elem);
cudaMemcpy(&(elem->value), &(d_elem->value), sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(elem->name, d_name, namelen, cudaMemcpyDeviceToHost);
cudaFree(d_name);
cudaFree(d_elem);
}
int main(void)
{
DataElement *e;
e = (DataElement*)malloc(sizeof(DataElement));
e->value = 10;
e->name = (char*)malloc(sizeof(char) * (strlen("hello") + 1));
strcpy(e->name, "hello");
launch(e);
printf("On host: name=%s, value=%d\n", e->name, e->value);
free(e->name);
free(e);
cudaDeviceReset();
} |
22,115 | #include "includes.h"
__global__ void tileMatMul(float* matA, float* matB, float* matC, int aRows, int aCols, int bRows, int bCols, int cRows, int cCols)
{
//define row and column values
int Row = blockIdx.y * TILE_DIM + threadIdx.y;
int Col = blockIdx.x * TILE_DIM + threadIdx.x;
//shared memory arrays
__shared__ float sharedMatA[TILE_DIM][TILE_DIM];
__shared__ float sharedMatB[TILE_DIM][TILE_DIM];
float cResultValue = 0.0;
//calculate tiled matrix multiplication on shared memory
for(int i = 0; i < (aCols-1)/TILE_DIM+1; ++i)
{
if(Row < aRows && i*TILE_DIM+threadIdx.x < aCols)
{
sharedMatA[threadIdx.y][threadIdx.x] = matA[Row*aCols + i*TILE_DIM+threadIdx.x];
}
else
sharedMatA[threadIdx.y][threadIdx.x] = 0.0;
if(Col < bCols && i*TILE_DIM+threadIdx.y < cRows)
sharedMatB[threadIdx.y][threadIdx.x] = matB[(i*TILE_DIM+threadIdx.y)*bCols+Col];
else
sharedMatB[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for(int j = 0; j < TILE_DIM; ++j)
cResultValue += sharedMatA[threadIdx.y][j] * sharedMatB[j][threadIdx.x];
__syncthreads();
}
//put the results in the result matrix
if(Row < cRows && Col < cCols)
matC[Row*cCols+Col] = cResultValue;
} |
22,116 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <time.h>
#define dT 0.2f
#define G 0.6f
//#define BLOCK_SIZE 32
//#define BLOCK_SIZE 64
//#define BLOCK_SIZE 128
//#define BLOCK_SIZE 256
#define BLOCK_SIZE 512
// Global variables
int num_planets;
int num_timesteps;
// Host arrays
float2* velocities;
float4* planets;
// Device arrays
float2* velocities_d;
float4* planets_d;
// Parse command line arguments
void parse_args(int argc, char** argv){
if(argc != 2){
printf("Useage: nbody num_timesteps\n");
exit(-1);
}
num_timesteps = strtol(argv[1], 0, 10);
}
// Reads planets from planets.txt
void read_planets(){
FILE* file = fopen("planets256.txt", "r");
//FILE* file = fopen("planets1024.txt", "r");
//FILE* file = fopen("planets4096.txt", "r");
if(file == NULL){
printf("'planets.txt' not found. Exiting\n");
exit(-1);
}
char line[200];
fgets(line, 200, file);
sscanf(line, "%d", &num_planets);
planets = (float4*)malloc(sizeof(float4)*num_planets);
velocities = (float2*)malloc(sizeof(float2)*num_planets);
for(int p = 0; p < num_planets; p++){
fgets(line, 200, file);
sscanf(line, "%f %f %f %f %f",
&planets[p].x,
&planets[p].y,
&velocities[p].x,
&velocities[p].y,
&planets[p].z);
}
fclose(file);
}
// Writes planets to file
void write_planets(int timestep){
char name[20];
int n = sprintf(name, "planets_out.txt");
FILE* file = fopen(name, "wr+");
for(int p = 0; p < num_planets; p++){
fprintf(file, "%f %f %f %f %f\n",
planets[p].x,
planets[p].y,
velocities[p].x,
velocities[p].y,
planets[p].z);
}
fclose(file);
}
// TODO 7. Calculate the change in velocity for p, caused by the interaction with q
__device__ float2 calculate_velocity_change_planet(float4 p, float4 q){
float2 r;
r.x = q.x - p.x;
r.y = q.y - p.y;
if(r.x == 0 && r.y == 0){
float2 v = {0.0f, 0.0f};
return v;
}
float abs_dist = sqrt(r.x*r.x + r.y*r.y);
float dist_cubed = abs_dist*abs_dist*abs_dist;
float2 dv;
dv.x = dT*G*q.z/dist_cubed * r.x;
dv.y = dT*G*q.z/dist_cubed * r.y;
return dv;
}
// TODO 5. Calculate the change in velocity for my_planet, caused by the interactions with a block of planets
__device__ float2 calculate_velocity_change_block(float4 my_planet, float4* shared_planets) {
float2 velocity = {0.0f, 0.0f};
for(int i = 0; i < blockDim.x; i++) {
float2 tempv = calculate_velocity_change_planet(my_planet, shared_planets[i]);
velocity.x += tempv.x;
velocity.y += tempv.y;
}
return velocity;
}
// TODO 4. Update the velocities by calculating the planet interactions ==> DONE!
__global__ void update_velocities(float4* planets, float2* velocities, int num_planets){
// Step 1: Overall declarations and setup for the the update-velocity function
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
float4 my_planet = planets[thread_id];
// Step 2: How the planets get distributed into groups of BLOCK_SIZE, and
__shared__ float4 shared_planets[BLOCK_SIZE];
for(int i = 0; i < num_planets; i+=blockDim.x) {
shared_planets[threadIdx.x] = planets[i + threadIdx.x];
__syncthreads();
// Step 3: The call to the parallel routine calculate_velocity_change_block which help update the velocities for each time-step.
float2 tempv = calculate_velocity_change_block(my_planet, shared_planets);
velocities[thread_id].x += tempv.x;
velocities[thread_id].y += tempv.y;
__syncthreads();
}
}
// TODO 7. Update the positions of the planets using the new velocities
__global__ void update_positions(float4* planets, float2* velocities, int num_planets){
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
planets[thread_id].x += velocities[thread_id].x * dT;
planets[thread_id].y += velocities[thread_id].y * dT;
}
int main(int argc, char** argv){
//set timestamp
clock_t begin = clock();
parse_args(argc, argv);
read_planets();
// TODO 1. Allocate device memory, and transfer data to device ==> DONE!
// -> Step 1: allocation of an array for the number of planets on the host using cudaMalloc
cudaMalloc(&planets_d, sizeof(float4)*num_planets);
// -> Step 2: allocation of an array for velocities on the host using cudaMalloc
cudaMalloc(&velocities_d, sizeof(float2)*num_planets);
// -> Step 3: allocation of an array for the number of planets on the device using cudaMalloc
cudaMemcpy(planets_d, planets, sizeof(float4)*num_planets, cudaMemcpyHostToDevice);
// -> Step 4: allocation of an array for velocities on the device using cudaMalloc
cudaMemcpy(velocities_d, velocities, sizeof(float2)*num_planets, cudaMemcpyHostToDevice);
//calculate first time (copy to device)
clock_t first = clock();
double time_spent_to_copy_to_device = (double)(first - begin) / CLOCKS_PER_SEC;
//print time
printf("Copy-to-Device-Time: %f\n", time_spent_to_copy_to_device);
// -> Step 1: Calculating the number of blocks used based on BLOCK_SIZE
int num_blocks = num_planets/BLOCK_SIZE + ((num_planets%BLOCK_SIZE == 0) ? 0 : 1);
// Main loop
for(int t = 0; t < num_timesteps; t++) {
// TODO 2. Call kernels ==> DONE
// -> Step 2: Correct parallel calls to update_velocites and update_positions functions
update_velocities<<<num_blocks, BLOCK_SIZE>>>(planets_d, velocities_d, num_planets);
update_positions<<<num_blocks, BLOCK_SIZE>>>(planets_d, velocities_d, num_planets);
}
//calculate first time (Caclulation)
clock_t second = clock();
double time_spent_to_calculate = (double)(second - first) / CLOCKS_PER_SEC;
//print time
printf("Calculation-Time: %f\n", time_spent_to_calculate);
// TODO 3. Transfer data back to host
// -> Step 1: Transfer the position and velocity arrays back to host
cudaMemcpy(velocities, velocities_d, sizeof(float2)*num_planets, cudaMemcpyDeviceToHost);
cudaMemcpy(planets, planets_d, sizeof(float4)*num_planets, cudaMemcpyDeviceToHost);
// Output
write_planets(num_timesteps);
//free Stuff
free(velocities);
free(planets);
cudaFree(planets_d);
//calculate end time (copy to host)
clock_t end = clock();
double time_spent_to_copy_to_host = (double)(end - second) / CLOCKS_PER_SEC;
//print time
printf("Copy-to-Host-Time: %f\n", time_spent_to_copy_to_host);
double time_all = (double)(end - begin) / CLOCKS_PER_SEC;
printf("All-Time: %f\n", time_all);
}
/*
REPORT #####################################################################################################
-> New PC on Tulpan (i7 7700k / GTX 1080 ti)
a)
Serial Version: (1 Thread, 21000 Timesteps)
- 256 Planets: 24.623289s
- 1024 Planets: 395.421915s (~6min 35s)
- 4069 Planets: take to much time
Cuda Version: (BLOCK_SIZE 64)
- 256 Planets:
Copy-to-Device-Time: 0.160985s
Calculation-Time: 1.113486s
Copy-to-Host-Time: 0.026647s
-> All-Time: 1.301118s
- 1024 Planets:
Copy-to-Device-Time: 0.155725s
Calculation-Time: 4.020773s
Copy-to-Host-Time: 0.101277s
-> All-Time: 4.277775s
- 4069 Planets:
Copy-to-Device-Time: 0.151724s
Calculation-Time: 16.207968s
Copy-to-Host-Time: 0.409272s
-> All-Time: 16.768964s
==> SPEEDUP:
- 256 18.924716x
- 1024 92.436352x
b)
Cuda Version: (BLOCK_SIZE 32)
- 256 Planets:
Copy-to-Device-Time: 0.140635
Calculation-Time: 1.073448
Copy-to-Host-Time: 0.029205
-> All-Time: 1.243288 <-- FASTEST (256 Planets)
- 1024 Planets:
Copy-to-Device-Time: 0.148540
Calculation-Time: 4.160538
Copy-to-Host-Time: 0.105188
-> All-Time: 4.414266
- 4069 Planets:
Copy-to-Device-Time: 0.167685
Calculation-Time: 16.654356
Copy-to-Host-Time: 0.421628
-> All-Time: 17.243669 //slowest (4096)
Cuda Version: (BLOCK_SIZE 256)
- 256 Planets:
Copy-to-Device-Time: 0.153521
Calculation-Time: 1.120654
Copy-to-Host-Time: 0.026555
-> All-Time: 1.300730
- 1024 Planets:
Copy-to-Device-Time: 0.144414
Calculation-Time: 4.000408
Copy-to-Host-Time: 0.100507
-> All-Time: 4.245329 <-- FASTEST (1024 Planets)
- 4069 Planets:
Copy-to-Device-Time: 0.169759
Calculation-Time: 15.745258
Copy-to-Host-Time: 0.397490
-> All-Time: 16.312507 <-- FASTEST (4096 Planets)
Cuda Version: (BLOCK_SIZE 512)
- 256 Planets:
Copy-to-Device-Time: 0.162534
Calculation-Time: 5.162525
Copy-to-Host-Time: 0.143631
-> All-Time: 5.468690 //slowest (256)
- 1024 Planets:
Copy-to-Device-Time: 0.141358
Calculation-Time: 4.169906
Copy-to-Host-Time: 0.104564
-> All-Time: 4.415828 //slowest (1024)
- 4069 Planets:
Copy-to-Device-Time: 0.143752
Calculation-Time: 16.378144
Copy-to-Host-Time: 0.414755
-> All-Time: 16.936651
There are some small differences using different BLOCK_SIZEs.
The over all fastest one is the Number 256. (except of run with 256 Planets)
Realy slow is the run with BLCK_SIZE 512 and 256 Planets.
In general the differences are realy small.
The differences seem to come from a combination of Problem-Size and BLOCK_SIZE.
It also depends on the used hardware. (Which was the same for all the runs)
-> There is also Occupancy in CUDA, which defined as a ratio of active warps per SM to max. warps that can be active at once.
This can help to pick a good BLOCK_SIZE.
*/
|
22,117 | #include "includes.h"
__device__ float Sat(float r, float g, float b){
float min = fmin(fmin(r, g), b);
float max = fmax(fmax(r, g), b);
float delta = max - min;
float S = max != 0.0f ? delta / max : 0.0f;
return S;
}
__global__ void FilmGradeKernelC( float* p_Input, int p_Width, int p_Height, float p_ContR, float p_ContG, float p_ContB, float p_SatR, float p_SatG, float p_SatB, float p_ContP) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < p_Width && y < p_Height) {
const int index = (y * p_Width + x) * 4;
float contR = (p_Input[index] - p_ContP) * p_ContR + p_ContP;
float contG = (p_Input[index + 1] - p_ContP) * p_ContG + p_ContP;
float contB = (p_Input[index + 2] - p_ContP) * p_ContB + p_ContP;
float luma = contR * 0.2126f + contG * 0.7152f + contB * 0.0722f;
float outR = (1.0f - (p_SatR * 0.2126f + p_SatG * 0.7152f + p_SatB * 0.0722f)) * luma + contR * p_SatR;
float outG = (1.0f - (p_SatR * 0.2126f + p_SatG * 0.7152f + p_SatB * 0.0722f)) * luma + contG * p_SatG;
float outB = (1.0f - (p_SatR * 0.2126f + p_SatG * 0.7152f + p_SatB * 0.0722f)) * luma + contB * p_SatB;
p_Input[index] = outR;
p_Input[index + 1] = outG;
p_Input[index + 2] = outB;
}} |
22,118 | #include "includes.h"
__global__ void conv_2d(int* Mat, int* res, int n) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int start_r = row - MASK_OFFSET;
int start_c = col - MASK_OFFSET;
int temp = 0;
for (int i = 0; i < MASK_LEN; i++)
{
for (int j = 0; j < MASK_LEN; j++)
{
if ((start_r + i >= 0) && (start_r + i < n))
{
if ((start_c + j >= 0) && (start_c + j < n))
{
temp += Mat[(start_r + i) * n + (start_c + j)] * mask[i * MASK_LEN + j];
}
}
}
}
res[row * n + col] = temp;
} |
22,119 | #include <iostream>
#include <cuda.h>
using namespace std;
///usr/local/bin/nvcc mult-matriz-vector.cu -o mult.out
__global__ void MultMatrizVectKernel(float *A, float *B, float *C, int n)
{
int i = n * blockIdx.x;
float sum;
if(i < n*n)
{
for(int j = 0; j < n ; ++j)
{
sum += A[i + j] * B[j];
}
C[blockIdx.x] += sum;
}
}
void MultMatrizVector(float *A, float *B, float *C, int n)
{
float *d_A, *d_B, *d_C;
size_t size_A = n*n * sizeof(float);
size_t size_B = n * sizeof(float);
size_t size_C = n * sizeof(float);
cudaMalloc((void **) &d_A, size_A);
cudaMalloc((void **) &d_B, size_B);
cudaMalloc((void **) &d_C, size_C);
cudaMemcpy(d_A, A, size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, size_B, cudaMemcpyHostToDevice);
MultMatrizVectKernel<<< n, 1 >>>(d_A, d_B, d_C, n);
cudaMemcpy(C, d_C, size_C, cudaMemcpyDeviceToHost);
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
}
int main(void)
{
float *A, *B , *C;
int n = 10 ;
A = new float[n*n];
B = new float[n];
C = new float[n];
for (int i = 0; i < n*n; ++i)
{
A[i] = i;
}
for (int i = 0; i < n; ++i)
{
B[i] = i;
}
MultMatrizVector(A, B, C ,n);
for(int i = 0; i < n ; ++i)
{
cout << C[i] << " ";
}
return 0;
}
|
22,120 | #include <fstream>
#include <iomanip>
#include <string>
#include <thrust/for_each.h>
#include <thrust/host_vector.h>
#include <thrust/tuple.h>
#include <thrust/iterator/zip_iterator.h>
typedef thrust::tuple<double, double, double> CVec3;
struct functor_output_tuple : public thrust::unary_function<CVec3, void> {
std::ofstream& ofs;
__host__
functor_output_tuple(std::ofstream& _ofs) : ofs(_ofs) {}
__host__
void operator()(const CVec3& vec) {
double vec_x = thrust::get<0>(vec);
double vec_y = thrust::get<1>(vec);
double vec_z = thrust::get<2>(vec);
ofs << std::setprecision(5) << std::fixed <<
vec_x << " " <<
vec_y << " " <<
vec_z << " " << '\n' << std::fixed;
}
};
int main() {
thrust::host_vector<double> pos_x(4);
thrust::host_vector<double> pos_y(4);
thrust::host_vector<double> pos_z(4);
pos_x[0] = 0.2;
pos_x[1] = 1.4;
pos_x[2] = 2.2;
pos_x[3] = 13.2;
pos_y[0] = 0.2;
pos_y[1] = 1.4;
pos_y[2] = 22.2;
pos_y[3] = 13.2;
pos_z[0] = 111110.2;
pos_z[1] = 11.4;
pos_z[2] = 2.222222222;
pos_z[3] = 3.141592;
std::string test = "TestingFileName.vtk";
std::ofstream ofs;
ofs.open(test.c_str());
thrust::for_each(
thrust::make_zip_iterator(
thrust::make_tuple(
pos_x.begin(),
pos_y.begin(),
pos_z.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(
pos_x.end(),
pos_y.end(),
pos_z.end())),
functor_output_tuple(ofs));
ofs.close();
return 0;
} |
22,121 | //Based on the work of Andrew Krepps
#include <iostream>
#include <random>
#include <chrono>
#include <stdio.h>
static const int CYPHER_OFFSET = 3;
__global__ void add(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] + b[thread_idx];
}
__global__ void subtract(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] - b[thread_idx];
}
__global__ void mult(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] * b[thread_idx];
}
__global__ void mod(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] % b[thread_idx];
}
__global__ void caesarCypher(char * textToEncrypt, const int offset)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
char c;
c = textToEncrypt[thread_idx] - offset;
// This assume the input array is all capital letters
if (c < 'A')
{
c += 'Z' - 'A' + 1;
}
else if (c > 'Z')
{
c -= 'Z' - 'A' + 1;
}
textToEncrypt[thread_idx] = c;
}
void hostAdd(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] + b[i];
}
}
void hostSub(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] - b[i];
}
}
void hostMult(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] * b[i];
}
}
void hostMod(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
// Protect against divide by 0.
// cuda code catches this error and sets result to 0 by default.
if (b[i] == 0)
{
c[i] = 0;
}
else
{
c[i] = a[i] % b[i];
}
}
}
void hostCaesarCypher(char * textToEncrypt, const int offset, const int size)
{
for (int i = 0; i < size; ++i)
{
char c;
c = textToEncrypt[i] - offset;
// This assume the input array is all capital letters
if (c < 'A')
{
c += 'Z' - 'A' + 1;
}
else if (c > 'Z')
{
c -= 'Z' - 'A' + 1;
}
textToEncrypt[i] = c;
}
}
void executeHostTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], c[totalThreads];
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
// Add all of the numbers c[i] = a[i] + b[i];
hostAdd(a,b,c, totalThreads);
// Subtract all of the numbers c[i] = a[i] - b[i];
hostSub(a,b,c, totalThreads);
// Multiply all of the numbers c[i] = a[i] * b[i];
hostMult(a,b,c, totalThreads);
// Mod all of the numbers c[i] = a[i] % b[i];
hostMod(a,b,c, totalThreads);
// Allocate space for a character array.
int minChar = 'A';
int maxChar = 'Z';
std::uniform_int_distribution<int> charDist(minChar, maxChar);
char textToEncrypt[totalThreads];
for (int i = 0; i < totalThreads; ++i)
{
textToEncrypt[i] = charDist(gen);
}
hostCaesarCypher(textToEncrypt, totalThreads, CYPHER_OFFSET);
}
void executeGPUTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], c[totalThreads];
int *gpu_a, *gpu_b, *gpu_c;
cudaMalloc((void**)&gpu_a, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_b, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_c, totalThreads * sizeof(int));
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
cudaMemcpy(gpu_a, a, totalThreads * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, b, totalThreads * sizeof(int), cudaMemcpyHostToDevice);
// Add all of the numbers c[i] = a[i] + b[i];
add<<<numBlocks, blockSize>>>(gpu_a,gpu_b,gpu_c);
cudaMemcpy(c, gpu_c, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
// Subtract all of the numbers c[i] = a[i] - b[i];
subtract<<<numBlocks, blockSize>>>(gpu_a,gpu_b,gpu_c);
cudaMemcpy(c, gpu_c, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
// Multiply all of the numbers c[i] = a[i] * b[i];
mult<<<numBlocks, blockSize>>>(gpu_a,gpu_b,gpu_c);
cudaMemcpy(c, gpu_c, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
// Mod all of the numbers c[i] = a[i] % b[i];
mod<<<numBlocks, blockSize>>>(gpu_a,gpu_b,gpu_c);
cudaMemcpy(c, gpu_c, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(gpu_a);
cudaFree(gpu_b);
cudaFree(gpu_c);
// Allocate space for a character array.
int minChar = 'A';
int maxChar = 'Z';
std::uniform_int_distribution<int> charDist(minChar, maxChar);
char textToEncrypt[totalThreads];
for (int i = 0; i < totalThreads; ++i)
{
textToEncrypt[i] = charDist(gen);
}
char * gpuTextToEncrypt;
cudaMalloc((void**)&gpuTextToEncrypt, totalThreads * sizeof(char));
cudaMemcpy(gpuTextToEncrypt, a, totalThreads * sizeof(char), cudaMemcpyHostToDevice);
caesarCypher<<<numBlocks, blockSize>>>(gpuTextToEncrypt, CYPHER_OFFSET);
cudaMemcpy(textToEncrypt, gpuTextToEncrypt, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(textToEncrypt);
}
void printArray(const int * const arr, const int xSize, const int ySize)
{
for (size_t i = 0; i < xSize; ++i)
{
for(size_t j = 0; j < ySize; ++j)
{
std::cout << arr[i * ySize + j] << " ";
}
std::cout << '\n';
}
std::cout << std::flush;
}
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = 256;
int blockSize = 256;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
auto startTime = std::chrono::system_clock::now();
executeHostTest(totalThreads, blockSize, numBlocks);
auto endTime = std::chrono::system_clock::now();
std::chrono::duration<double> totalTime = endTime-startTime;
std::cout << "Host execution took: " << totalTime.count() << " seconds." << std::endl;
startTime = std::chrono::system_clock::now();
executeGPUTest(totalThreads, blockSize, numBlocks);
endTime = std::chrono::system_clock::now();
totalTime = endTime-startTime;
std::cout << "GPU execution took: " << totalTime.count() << " seconds." << std::endl;
return 0;
}
|
22,122 | // PGPGU Class: Hello World
#include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
__global__ void hello_kernel(char *odata, int num)
{
char hello_str[12]="Hello CUDA!";
int idx = blockIdx.x*blockDim.x+threadIdx.x;
if (idx < num)
odata[idx]=hello_str[idx];
}
int main(void)
{
char *h_data,*d_data;
const int strlen = 12;
size_t strsize = strlen*sizeof(char);
h_data = (char*)malloc(strsize);
memset(h_data,0,strlen);
cudaMalloc((void**)&d_data,strsize);
cudaMemcpy(d_data,h_data,strsize,cudaMemcpyHostToDevice);
int blocksize = 8;
int nblock = strlen/blocksize + (strlen % blocksize == 0? 0:1);
hello_kernel<<<nblock,blocksize>>>(d_data,strlen);
cudaMemcpy(h_data,d_data,sizeof(char)*strlen,cudaMemcpyDeviceToHost);
printf("%s\n",h_data);
free(h_data);
cudaFree(d_data);
}
|
22,123 | #include "includes.h"
__global__ void build_expected_output(int *output, int n_rows, int k, const int *labels) {
int row = threadIdx.x + blockDim.x * blockIdx.x;
if (row >= n_rows) return;
int cur_label = labels[row];
for (int i = 0; i < k; i++) {
output[row * k + i] = cur_label;
}
} |
22,124 | /*----------------------------------------------------------------------
Program pdf0.c computes a pair distribution function for n atoms
given the 3D coordinates of the atoms.
----------------------------------------------------------------------*/
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <stdlib.h>
#define NHBIN 2000 // Histogram size
float al[3]; // Simulation box lengths
int n; // Number of atoms
float *r; // Atomic position array
FILE *fp;
__constant__ float DALTH[3];
__constant__ int DN;
__constant__ float DDRH;
__device__ float d_SignR(float v,float x) {if (x > 0) return v; else return -v;}
__global__ void gpu_histogram_kernel(float *r,float *nhis) {
int i,j,a,ih;
float rij,dr;
int iBlockBegin = (DN/gridDim.x)*blockIdx.x;
int iBlockEnd = min((DN/gridDim.x)*(blockIdx.x+1),DN);
int jBlockBegin = (DN/gridDim.y)*blockIdx.y;
int jBlockEnd = min((DN/gridDim.y)*(blockIdx.y+1),DN);
for (i=iBlockBegin+threadIdx.x; i<iBlockEnd; i+=blockDim.x) {
for (j=jBlockBegin+threadIdx.y; j<jBlockEnd; j+=blockDim.y) {
if (i<j) {
// Process (i,j) atom pair
rij = 0.0;
for (a=0; a<3; a++) {
dr = r[3*i+a]-r[3*j+a];
/* Periodic boundary condition */
dr = dr-d_SignR(DALTH[a],dr-DALTH[a])-d_SignR(DALTH[a],dr+DALTH[a]);
rij += dr*dr;
}
rij = sqrt(rij); /* Pair distance */
ih = rij/DDRH;
// nhis[ih] += 1.0; /* Entry to the histogram */
atomicAdd(&nhis[ih],1.0);
} // end if i<j
} // end for j
} // end for i
}
/*--------------------------------------------------------------------*/
void histogram() {
/*----------------------------------------------------------------------
Constructs a histogram NHIS for atomic-pair distribution.
----------------------------------------------------------------------*/
float alth[3];
float* nhis; // Histogram array
float rhmax,drh,density,gr;
int a,ih;
float* dev_r; // Atomic positions
float* dev_nhis; // Histogram
/* Half the simulation box size */
for (a=0; a<3; a++) alth[a] = 0.5*al[a];
/* Max. pair distance RHMAX & histogram bin size DRH */
rhmax = sqrt(alth[0]*alth[0]+alth[1]*alth[1]+alth[2]*alth[2]);
drh = rhmax/NHBIN; // Histogram bin size
nhis = (float*)malloc(sizeof(float)*NHBIN);
// for (ih=0; ih<NHBIN; ih++) nhis[ih] = 0.0; // Reset the histogram
cudaMalloc((void**)&dev_r,sizeof(float)*3*n);
cudaMalloc((void**)&dev_nhis,sizeof(float)*NHBIN);
cudaMemcpy(dev_r,r,3*n*sizeof(float),cudaMemcpyHostToDevice);
cudaMemset(dev_nhis,0.0,NHBIN*sizeof(float));
cudaMemcpyToSymbol(DALTH,alth,sizeof(float)*3,0,cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(DN,&n,sizeof(int),0,cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(DDRH,&drh,sizeof(float),0,cudaMemcpyHostToDevice);
dim3 numBlocks(8,8,1);
dim3 threads_per_block(16,16,1);
gpu_histogram_kernel<<<numBlocks,threads_per_block>>>(dev_r,dev_nhis);
// Compute dev_nhis on GPU: dev_r[] ® dev_nhis[]
cudaMemcpy(nhis,dev_nhis,NHBIN*sizeof(float),cudaMemcpyDeviceToHost);
density = n/(al[0]*al[1]*al[2]);
/* Print out the histogram */
fp = fopen("pdf_gpu.d","w");
for (ih=0; ih<NHBIN; ih++) {
gr = nhis[ih]/(2*M_PI*pow((ih+0.5)*drh,2)*drh*density*n);
fprintf(fp,"%e %e\n",(ih+0.5)*drh,gr);
}
fclose(fp);
free(nhis);
}
|
22,125 | #include "includes.h"
__global__ void global_memory_kernel(int *d_go_to_state, unsigned int *d_failure_state, unsigned int *d_output_state, unsigned char *d_text, unsigned int *d_out, size_t pitch, int m, int n, int p_size, int alphabet, int num_blocks ) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int effective_pitch = pitch / sizeof ( int );
int chars_per_block = n / num_blocks;
int start_block = blockIdx.x * chars_per_block;
int stop_block = start_block + chars_per_block;
int chars_per_thread = ( stop_block - start_block ) / blockDim.x;
int start_thread = start_block + chars_per_thread * threadIdx.x;
int stop_thread;
if( blockIdx.x == num_blocks -1 && threadIdx.x==blockDim.x-1)
stop_thread = n - 1;
else stop_thread = start_thread + chars_per_thread + m-1;
int r = 0, s;
int column;
for ( column = start_thread; ( column < stop_thread && column < n ); column++ ) {
while ( ( s = d_go_to_state[r * effective_pitch + (d_text[column]-(unsigned char)'A')] ) == -1 )
r = d_failure_state[r];
r = s;
d_out[idx] += d_output_state[r];
}
} |
22,126 | #include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
double seconds(){
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp,&tzp);
return ((double)tp.tv_sec+(double)tp.tv_usec*1.e-6);
}
void initialData(float *ip, int size){
for (int i = 0; i < size; i ++){
ip[i] = (float)(rand() & 0xFF)/100.0f;
}
return;
}
void checkResult(float *hostRef, float *gpuRef, const int N){
double epsilon = 1.0e-8;
for (int i=0; i < N; i++){
if(abs(hostRef[i]-gpuRef[i])>epsilon){
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n",hostRef[i],gpuRef[i],i);
break;
}
}
return;
}
__global__ void readOffset(float *A,float *B,float *C,const int n,int offset){
// assigned offset value
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int k = i + offset;
if (k<n) C[i] = A[k] + B[k];
}
void sumArraysOnHost(float *A, float *B, float *C, const int n, int offset){
for (int idx = offset,k=0; idx < n; idx++, k++){
C[k] = A[idx] + B[idx];
}
}
int main(int argc, char **argv){
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp,dev);
printf("%s starting reduction at ",argv[0]);
printf("device %d: %s ",dev,deviceProp.name);
cudaSetDevice(dev);
// set up array size
int nElem = 1 << 20; // total number of elements to reduce
printf(" with array size %d \n",nElem);
size_t nBytes = nElem * sizeof(float);
// set up offset for summary
int blocksize = 512;
int offset = 0;
if (argc > 1) offset = atoi(argv[1]);
if (argc > 2) offset = atoi(argv[2]);
// execution configuration
dim3 block(blocksize,1);
dim3 grid((nElem+block.x-1)/block.x,1);
// allocate host mem
float *h_A = (float *)malloc(nBytes);
float *h_B = (float *)malloc(nBytes);
float *hostRef = (float *)malloc(nBytes);
float *gpuRef = (float *)malloc(nBytes);
// initialize host array
initialData(h_A,nElem);
memcpy(h_B,h_A,nBytes);
// summary at host side
sumArraysOnHost(h_A,h_B,hostRef,nElem,offset);
// allocate device memory
float *d_A, *d_B, *d_C;
cudaMalloc((float **)&d_A,nBytes);
cudaMalloc((float **)&d_B,nBytes);
cudaMalloc((float **)&d_C,nBytes);
// copy data from host to device
cudaMemcpy(d_A,h_A,nBytes,cudaMemcpyHostToDevice);
cudaMemcpy(d_B,h_A,nBytes,cudaMemcpyHostToDevice);
double iStart = seconds();
readOffset<<<grid,block>>>(d_A,d_B,d_C,nElem,offset);
cudaDeviceSynchronize();
double iElaps = seconds() - iStart;
printf("readoffset <<<%4d,%4d>>> offset %4d elapsed %f sec\n",grid.x,block.x,offset,iElaps);
cudaGetLastError();
// copy kernel result back to host side and check results
cudaMemcpy(gpuRef,d_C,nBytes,cudaMemcpyDeviceToHost);
checkResult(hostRef,gpuRef,nElem-offset);
// free host and device mem
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
// reset device
cudaDeviceReset();
return EXIT_SUCCESS;
}
|
22,127 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define N (1024)
__global__ void inc(int *s, int *d, int len)
{
int i,j;
int part; // 各スレッドが担当するデータの個数
int idx_start, idx_end; // 各スレッドの担当範囲
part = len / (gridDim.x * blockDim.x); // blockDim:1ブロック中のスレッド数
idx_start = part * blockDim.x * blockIdx.x + threadIdx.x; // threadIdx:自分のスレッド番号
idx_end = idx_start + part;
for (j=0; j<1000000; j++) {
for (i = idx_start; i < idx_end; i++)
s[i] *= d[i];
s[i] /= 3;
}
return;
}
int dat0[N], dat1[N];
int main(int argc, char *argv[])
{
bool dout;
int i;
int c_time;
int *s, *d;
size_t array_size;
if (argc > 1)
dout = false;
else dout = true;
for (i=0; i<N;i++) {
dat0[i] = i+100;
dat1[i] = N-i;
}
if (dout) {
printf("input:");
for (i=0; i<N;i++)
printf("[%d*%d]", dat0[i], dat1[i]);
printf("\n");
}
array_size = sizeof(int) * N;
cudaMalloc((void **)&s, array_size);
cudaMalloc((void **)&d, array_size);
cudaMemcpy(s, dat0, array_size,
cudaMemcpyHostToDevice);
cudaMemcpy(d, dat1, array_size,
cudaMemcpyHostToDevice);
c_time = (int)clock();
inc<<<32, 32>>>(s, d, N);
cudaDeviceSynchronize();
c_time = (int)clock() - c_time;
cudaMemcpy(dat0, s, array_size,
cudaMemcpyDeviceToHost);
cudaMemcpy(dat1, d, array_size,
cudaMemcpyDeviceToHost);
if (dout) {
printf("output:");
for (i=0; i<N; i++)
printf("%d ", dat0[i]);
printf("\n");
}
printf("Time:- %d\n",c_time);
return 0;
}
|
22,128 |
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <ctype.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/mman.h>
#include <time.h>
#include <sys/time.h>
#include "imageFilter_kernel.cu"
#define IMG_DATA_OFFSET_POS 10
#define BITS_PER_PIXEL_POS 28
int swap;
void test_endianess();
void swap_bytes(char *bytes, int num_bytes);
int main(int argc, char *argv[])
{
int i;
int fd;
char *fdata;
struct stat finfo;
char * inputfname;
char * outputfname;
if (argc < 4)
{
printf("USAGE: %s <bitmap input filename> <bitmap output file name> <part specifier>\n", argv[0]);
exit(1);
}
inputfname = argv[1];
outputfname = argv[2];
char partId = argv[3][0];
if(partId != 'a' && partId != 'b' && partId != 'c')
{
printf("Please provide a part specifier: a, b, or c\n");
exit(1);
}
printf("Image filter: Running...\n");
fd = open(inputfname, O_RDONLY);
fstat(fd, &finfo);
fdata = (char*) malloc(finfo.st_size);
read (fd, fdata, finfo.st_size);
if ((fdata[0] != 'B') || (fdata[1] != 'M'))
{
printf("File is not a valid bitmap file. Terminating the program\n");
exit(1);
}
test_endianess(); // will set the variable "swap"
unsigned short *bitsperpixel = (unsigned short *)(&(fdata[BITS_PER_PIXEL_POS]));
if (swap)
{
printf("swapping\n");
swap_bytes((char *)(bitsperpixel), sizeof(*bitsperpixel));
}
// ensure its 3 bytes per pixel
if (*bitsperpixel != 24)
{
printf("Error: Invalid bitmap format - ");
printf("This application only accepts 24-bit pictures. Exiting\n");
exit(1);
}
unsigned short *data_pos = (unsigned short *)(&(fdata[IMG_DATA_OFFSET_POS]));
if (swap)
{
swap_bytes((char *)(data_pos), sizeof(*data_pos));
}
int imgdata_bytes = (int)finfo.st_size - (int)(*(data_pos));
printf("This file has %d bytes of image data, %d pixels\n", imgdata_bytes, imgdata_bytes / 3);
int width = *((int*)&fdata[18]);
printf("Width: %d\n", width);
int height = *((int*)&fdata[22]);
printf("Height: %d\n", height);
int fileSize = (int) finfo.st_size;
//p will point to the first pixel
char* p = &(fdata[*data_pos]);
//Set the number of blocks and threads
dim3 grid(12, 1, 1);
dim3 block(1024, 1, 1);
char* d_inputPixels;
cudaMalloc((void**) &d_inputPixels, width * height * 3);
cudaMemcpy(d_inputPixels, p, width * height * 3, cudaMemcpyHostToDevice);
char* d_outputPixels;
cudaMalloc((void**) &d_outputPixels, width * height * 3);
cudaMemset(d_outputPixels, 0, width * height * 3);
struct timeval start_tv, end_tv;
time_t sec;
time_t ms;
time_t diff;
gettimeofday(&start_tv, NULL);
int numberOfthread = grid.x * block.x;
int pixelPerthread = (width * height) / numberOfthread;
if((width * height) % numberOfthread != 0)
pixelPerthread += 1;
int blocksRows = (width - 8) / 120;
int blocksCols = (height - 8) / 120;
if((width - 8) % 120 != 0)
blocksRows += 1;
if((height - 8) % 120 != 0)
blocksCols += 1;
int times = (blocksRows * blocksCols) / 12;
if((blocksRows * blocksCols) % 12 != 0)
times += 1;
if(partId == 'a')
{
imageFilterKernelPartA<<<grid, block>>>((char3*) d_inputPixels, (char3*) d_outputPixels, width, height , pixelPerthread);
}
else if(partId == 'b')
{
imageFilterKernelPartB<<<grid, block>>>((char3*) d_inputPixels, (char3*) d_outputPixels, width, height , pixelPerthread, numberOfthread);
}
else if(partId == 'c')
{
imageFilterKernelPartC<<<grid, block>>>((char3*) d_inputPixels, (char3*) d_outputPixels, width, height, blocksRows, blocksCols, times);
}
cudaThreadSynchronize();
gettimeofday(&end_tv, NULL);
sec = end_tv.tv_sec - start_tv.tv_sec;
ms = end_tv.tv_usec - start_tv.tv_usec;
diff = sec * 1000000 + ms;
printf("%10s:\t\t%fms\n", "Time elapsed", (double)((double)diff/1000.0));
char* outputPixels = (char*) malloc(height * width * 3);
cudaMemcpy(outputPixels, d_outputPixels, height * width * 3, cudaMemcpyDeviceToHost);
memcpy(&(fdata[*data_pos]), outputPixels, height * width * 3);
FILE *writeFile;
writeFile = fopen(outputfname,"w+");
for(i = 0; i < fileSize; i++)
fprintf(writeFile,"%c", fdata[i]);
fclose(writeFile);
return 0;
}
void test_endianess() {
unsigned int num = 0x12345678;
char *low = (char *)(&(num));
if (*low == 0x78) {
//dprintf("No need to swap\n");
swap = 0;
}
else if (*low == 0x12) {
//dprintf("Need to swap\n");
swap = 1;
}
else {
printf("Error: Invalid value found in memory\n");
exit(1);
}
}
void swap_bytes(char *bytes, int num_bytes)
{
int i;
char tmp;
for (i = 0; i < num_bytes/2; i++) {
//dprintf("Swapping %d and %d\n", bytes[i], bytes[num_bytes - i - 1]);
tmp = bytes[i];
bytes[i] = bytes[num_bytes - i - 1];
bytes[num_bytes - i - 1] = tmp;
}
}
|
22,129 | #include "conv2d-bias-add.hh"
#include "conv2d-bias-add-grad.hh"
#include "graph.hh"
#include "../runtime/graph.hh"
#include "../runtime/node.hh"
#include "../memory/alloc.hh"
#include "ops-builder.hh"
#include <cassert>
#include <stdexcept>
namespace ops
{
Conv2DBiasAdd::Conv2DBiasAdd(Op* z, Op* bias)
: Op("conv2d_bias_add",
Shape({z->shape_get()[0], z->shape_get()[1],
z->shape_get()[2], z->shape_get()[3]}),
{z, bias})
{}
void Conv2DBiasAdd::compile()
{
auto& g = Graph::instance();
auto& cz = g.compiled(preds()[0]);
auto& cbias = g.compiled(preds()[1]);
Shape out_shape({int(cz.out_shape[0]), int(cz.out_shape[1]),
int(cz.out_shape[2]), int(cz.out_shape[3])});
dbl_t* out_data = tensor_alloc(out_shape.total());
int input_size[4] = { cz.out_shape[0], cz.out_shape[1],
cz.out_shape[2], cz.out_shape[3]};
auto out_node = rt::Node::op_mat_rvect_add(cz.out_data, cbias.out_data, out_data,
input_size[0] * input_size[1] * input_size[2],
input_size[3],
{cz.out_node, cbias.out_node});
/*
auto out_node = rt::Node::op_conv2d_bias_add(cz.out_data, cbias.out_data,
out_data, input_size,
{cz.out_node, cbias.out_node});
*/
g.add_compiled(this, {out_node}, {out_data}, out_node, out_shape, out_data);
}
Op* Conv2DBiasAdd::child_grad(std::size_t index, Op* dout)
{
assert(index < 2);
if (dout == nullptr)
throw std::runtime_error {"conv2d_bias_add must not be the final node of the gradient"};
auto& builder = OpsBuilder::instance();
if (index == 0)
return dout;
else
return builder.conv2d_bias_add_grad(dout);
}
}
|
22,130 | #include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include<cuda_runtime.h>
#define THREAD_NUM 256
#define MATRIX_SIZE 1000
int blocks_num = (MATRIX_SIZE + THREAD_NUM - 1) / THREAD_NUM;
void generateMatrix(float *a, float *b) //a for matrix b for vector
{
int i;
int size = MATRIX_SIZE * MATRIX_SIZE;
for (i = 0; i < size; i++)
a[i] = i;
for (i = 0; i < MATRIX_SIZE; i++)
b[i] = i * i;
}
__global__ static void CUDAkernal(const float *a, const float *b, float *c, int n)
{
int i;
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int idx = bid * THREAD_NUM + tid;
if (idx < n)
{
float t = 0;
for (i = 0; i < n; i++)
t += a[idx*n+i]*b[i];
c[idx] = t;
}
}
int main()
{
int i;
float *a, *b, *c;
float *cuda_a, *cuda_b, *cuda_c;
int n = MATRIX_SIZE;
//alloc
a = (float*)malloc(sizeof(float)*n*n);
b = (float*)malloc(sizeof(float)*n);
c = (float*)malloc(sizeof(float)*n);
cudaMalloc((void**)&cuda_a, sizeof(float)*n*n);
cudaMalloc((void**)&cuda_b, sizeof(float)*n);
cudaMalloc((void**)&cuda_c, sizeof(float)*n);
generateMatrix(a, b);
//Copy
cudaMemcpy(cuda_a, a, sizeof(float)*n*n, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_b, b, sizeof(float)*n, cudaMemcpyHostToDevice);
CUDAkernal<<<blocks_num, THREAD_NUM, 0>>>(cuda_a, cuda_b, cuda_c, n);
cudaMemcpy(c, cuda_c, sizeof(float)*n, cudaMemcpyDeviceToHost);
cudaFree(cuda_a);
cudaFree(cuda_b);
cudaFree(cuda_c);
}
|
22,131 | #include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <string.h>
#include <sys/time.h>
#include <time.h>
#include <iostream>
using namespace std;
/*structure of the nodes of the tree*/
__host__ __device__ int strcmp_(char* str1,char* str2){
const unsigned char* ptr1= (const unsigned char*)str1;
const unsigned char* ptr2= (const unsigned char*)str2;
while(ptr1 && *ptr1 == *ptr2)
++ptr1, ++ptr2;
return *ptr1 - *ptr2;
}
template<class T>
class Node{
public:
int parent;
T key[80];
int height;
int left;
int right;
int succ,pred;
__host__ __device__ Node(T* key_){
strcpy(key,key_);
parent=height=left=right=INT_MIN;
succ=pred=INT_MIN;
}
__host__ __device__ Node(){
new(this)Node("");
}
friend ostream &operator<<(ostream &os, Node& node){
os << node.key;
return os;
}
__host__ __device__ bool operator<(Node &node){
return strcmp_(key,node.key)<0;
}
__host__ __device__ bool operator==(Node& node){
return strcmp_(key,node.key)==0;
}
__host__ __device__ bool operator>(Node &node){
return strcmp_(key,node.key)>0;
}
};
/*Definition of the tree*/
template<class T>
class Tree{
public:
int root,next_index;
int size;
/*container of the tree*/
//node at index 0 is sentinel node with key INT_MIN
// initialized root with key INT_MAX and index=1
Node<T>* arr;
__host__ __device__ Tree(int size_):root(1),next_index(2),size(size_){
arr= new Node<T>[size];
char str_[80];
for(int i=0;i<79;++i)
str_[i]=(char)127;
strcpy(arr[root].key,str_);
arr[root].parent = 0;
arr[0].right= root;
arr[root].pred= 0;
arr[0].succ= root;
arr[root].height=0;
};
__host__ __device__ Tree(){}
void inorder(int);
void preorder(int);
void postorder(int);
__host__ __device__ int search(Node<T>&);
__host__ __device__ int search(T*);
__host__ __device__ int height(int);
__host__ __device__ void insert(T*);
__host__ __device__ void delete_(T*);
__host__ __device__ void delete_rebalance(int);
__host__ __device__ void init_node(Node<T>&, int, int, int, int);
__host__ __device__ int tri_node_restructure(int, int, int);
__host__ __device__ void recompute_height(int);
__host__ __device__ int taller_child(int);
__host__ __device__ bool search_2(T*);
void insert_nodes(char*,int);
};
template<class T>
void Tree<T>::inorder(int index){
if(index==INT_MIN || strcmp_(arr[index].key,"")==0)
return;
inorder(arr[index].left);
cout<< arr[index] <<"\n";
inorder(arr[index].right);
}
template<class T>
void Tree<T>::preorder(int index){
if(index==INT_MIN || strcmp_(arr[index],"")==0)
return;
cout << arr[index] <<" ";
preorder(arr[index].left);
preorder(arr[index].right);
}
template<class T>
void Tree<T>::postorder(int index){
if(index==INT_MIN || strcmp_(arr[index].key,"")==0)
return;
postorder(arr[index].left);
postorder(arr[index].right);
cout << arr[index] <<" ";
}
template<typename T> __host__ __device__
int Tree<T>::search(Node<T> &node){
int temp=root;
while(temp!=INT_MIN){
if(arr[temp]==node)
return temp;
int child= (arr[temp]<node? arr[temp].right:
arr[temp].left);
if(child==INT_MIN)
return temp;
temp= child;
}
return temp;
}
template<typename T> __host__ __device__
int Tree<T>::search(T* key_){
int temp= root;
while(temp!=INT_MIN){
if(arr[temp].key == key_)
return temp;
int child= (arr[temp].key <key_?arr[temp].right
:arr[temp].left);
if(child==INT_MIN)
return temp;
temp=child;
}
return temp;
}
template<typename T> __host__ __device__
void Tree<T>::recompute_height(int x){
while(x!=root){
arr[x].height = max(height(arr[x].right), height(arr[x].left));
x= arr[x].parent;
}
}
template<typename T> __host__ __device__
int Tree<T>::height(int index){
if(index==INT_MIN)
return 0;
return arr[index].height+1;
}
template<typename T> __host__ __device__
int Tree<T>::tri_node_restructure(int x, int y, int z){
/*
x= parent(y)
y= parent(z)
*/
bool z_is_left_child= (arr[y].left ==z);
bool y_is_left_child = (arr[x].left== y);
int a=INT_MIN,b=INT_MIN,c=INT_MIN;
int t0=INT_MIN,t1=INT_MIN,t2=INT_MIN,t3=INT_MIN;
if(z_is_left_child && y_is_left_child){
a= z; b = y; c= x;
t0 = arr[z].left; t1= arr[z].right;
t2= arr[y].right; t3= arr[x].right;
// printf("first if: %d %d %d %d\n",t0,t1,t2,t3);
}else if(!z_is_left_child && y_is_left_child){
a= y; b=z; c= x;
t0= arr[y].left; t1= arr[z].left;
t2= arr[z].right; t3= arr[x].right;
// printf("second if: %d %d %d %d\n",t0,t1,t2,t3);
}else if(z_is_left_child && !y_is_left_child){
a=x; c= y; b=z;
t0= arr[x].left; t1= arr[z].left;
t2= arr[z].right; t3= arr[y].right;
// printf("third if: %d %d %d %d\n",t0,t1,t2,t3);
}else{
a=x; b=y; c=z;
t0= arr[x].left; t1= arr[y].left;
t2= arr[z].left; t3= arr[z].right;
// printf("fourth if:%d %d %d %d %d\n",arr[z].left,t0,t1,t2,t3);
}
// attach b to the parent of x
if(x==root){
root= b;
arr[b].parent= INT_MIN;
}else{
int parent_x= arr[x].parent;
arr[b].parent=parent_x;
if(arr[parent_x].left == x)
arr[parent_x].left = b;
else arr[parent_x].right = b;
}
/* make b
/ \
a c */
arr[b].left= a;
arr[a].parent= b;
arr[b].right = c;
arr[c].parent =b;
/*attach t0, t1, t2 and t3*/
arr[a].left = t0;
if(t0!=INT_MIN) arr[t0].parent = a;
arr[a].right = t1;
if(t1!=INT_MIN) arr[t1].parent = a;
arr[c].left= t2;
if(t2!=INT_MIN) arr[t2].parent = c;
arr[c].right = t3;
if(t3!=INT_MIN) arr[t3].parent = c;
recompute_height(a);
recompute_height(c);
return b;
}
template<typename T> __host__ __device__
void Tree<T>::init_node(Node<T>& node,int curr_ind, int pred_, int succ_, int parent_){
arr[curr_ind].parent= parent_;
arr[curr_ind].height=0;
arr[curr_ind].pred= pred_;
arr[curr_ind].succ= succ_;
arr[succ_].pred= curr_ind;
arr[pred_].succ= curr_ind;
if(arr[parent_] < node)
arr[parent_].right= curr_ind;
else arr[parent_].left= curr_ind;
}
template<typename T> __host__ __device__
void Tree<T>::insert(T* key_){
strcpy(arr[next_index].key,key_);
int p= search(arr[next_index]);
if(arr[p] == arr[next_index]){
strcpy(arr[next_index].key,"");
return;
}
int pred_= arr[p] < arr[next_index]?p: arr[p].pred;
int succ_= arr[pred_].succ;
init_node(arr[next_index],next_index,pred_,succ_,p);
// // after insert maximum one node will get imbalanced
recompute_height(p);
int x,y,z;
x=y=z= next_index;
while(x!=root){
if(abs(height(arr[x].left) - height(arr[x].right))<=1){
z=y;
y=x;
x= arr[x].parent;
}else break;
}
if(x!=root)
tri_node_restructure(x,y,z);
++next_index;
}
template<typename T> __host__ __device__
int Tree<T>::taller_child(int x){
return (height(arr[x].left) > height(arr[x].right)?
arr[x].left : arr[x].right);
}
template<typename T> __host__ __device__
void Tree<T>::delete_rebalance(int p){
int x,y,z;
while(p!=root){
if(abs(height(arr[p].left)-height(arr[p].right))>1){
x=p;
y= taller_child(x);
z= taller_child(y);
p= tri_node_restructure(x,y,z);
}
p=arr[p].parent;
}
}
template<typename T> __host__ __device__
void Tree<T>::delete_(T* key_){
int p;
int parent_;
int succ_;
p= search(key_);
if(strcmp_(arr[p].key,key_)==0)
return;
// node has no children
parent_ = arr[p].parent;
if(arr[p].left==INT_MIN && arr[p].right == INT_MIN){
if(arr[parent_].right == p)
arr[parent_].right= INT_MIN;
else arr[parent_].left = INT_MIN;
recompute_height(parent_);
delete_rebalance(parent_);
return;
}else if(arr[p].left==INT_MIN){ // when deleted node has only right child
if(arr[parent_].left==p)
arr[parent_].left= arr[p].right;
else arr[parent_].right = arr[p].right;
recompute_height(parent_);
delete_rebalance(parent_);
return;
}else if(arr[p].right == INT_MIN){ // when deleted node has only left child
if(arr[parent_].left==p)
arr[parent_].left= arr[p].left;
else arr[parent_].right= arr[p].left;
recompute_height(parent_);
delete_rebalance(parent_);
return;
} // when deleted node has both children
succ_ = arr[p].right;
while(arr[succ_].left!=INT_MIN)
succ_ = arr[succ_].left;
strcpy(arr[p].key,arr[succ_].key);
parent_= arr[succ_].parent;
arr[parent_].left = arr[succ_].right;
recompute_height(parent_);
delete_rebalance(parent_);
return;
}
template<typename T> __host__ __device__
bool Tree<T>::search_2(T* key_){
int p=search(key_);
Node<char> node= Node<char>(key_);
while(arr[p]> node)
p= arr[p].pred;
int x= p;
while(arr[p]< node)
p= arr[p].succ;
int y=p;
return (arr[x] == node ||arr[y] == node);
}
template<typename T>
void Tree<T>::insert_nodes(char* filename,int max){
FILE* fp= fopen(filename,"r");
char buff[200];
fgets(buff,200,fp);
int size= atoi(buff);
Tree<char>*tree= new Tree(size+max);
while(fgets(buff,200,fp)!=NULL){
buff[strlen(buff)-1]='\0';
char* temp1= strtok(buff,"$");
char* temp2= strtok(NULL,"$");
// // Node<char> *node=new Node<char>(temp1);
tree->insert(temp1);
}
fclose(fp);
*this = *tree;
}
|
22,132 | #define M_PI 3.14159265358979323846
#include <cstdio>
#include <cstdlib>
#include <ctgmath>
#include <ctime>
//#include <complex>
// For the CUDA runtime routines (prefixed with "cuda_")
//#include <cuda.h>
#include <curand_kernel.h>
#include <curand.h>
#include <cuComplex.h>
//#include <cuda_runtime.h>
//#include <device_launch_parameters.h>
#define NUM_OF_THREADS 10000
#define THREADS_PER_BLOCK 256
#define TURNS 25000 /*number of revolution*/
#define NE 10000 /*number of electron*/
#define NUMOFZPT 300 /*number of segmentation of phase*/
//double numofzptdb = numofzpt;
//#define _C_m_s (2.99792458e8) /*c-m/s*/
//#define R56 (0.0512e-3) /*dispersion-m*/
#define ES (0.44e-3) /*energy spread*/
#define DAMPRATE (1.45e-4) /*damping rate*/
//#define EBE (0.629e9) /*beam energy-GeV*/
//#define MANONOR (0.3e6) /*modulation amplitude-GeV*/
#define MA (0.00047694753577106518) /*normalized modulation amplitude*/
//double my_gamma = EBE/(0.511e6); /*Lorentz factor*/
#define NUMMB (20.0) /*number of microbunch*/
#define QNEP (7.4929300010076163e-006) /*for quantum excitation*/
#define MODWL (1.0e-6) /*modulation wavelength-m*/
#define NORR56 (321.69908772759482) /*normalized dispersion*/
#define DN (1.3646097851959425e-005)
#define LPRANGE (125.66370614359172) /*phase range in longitudinal phase space*/
//double zposegdb;
__constant__ int seeding = 1;
__device__ double atomicDoubleAdd(double* address, double val){
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ cuDoubleComplex ft(double *x){
cuDoubleComplex value = make_cuDoubleComplex(0.0, 0.0);//, I = make_cuDoubleComplex(0.0, 1.0);
int l;
double y = -2.0*M_PI/(double)NUMOFZPT*20.0;
cuDoubleComplex tmp;
for(l = 0; l < NUMOFZPT; l++){
sincos(y*(double)l, &tmp.x, &tmp.y);
value.x += x[l]*tmp.x;
value.y += x[l]*tmp.y;
//printf("%f\n",x[l]);
x[l] = 0;
}
return value;
}
// Kernel
__global__ void CalKernel(double *lp, double *lpth, double *bf, curandStateXORWOW_t *states){
int tid = threadIdx.x, id = tid + blockIdx.x * THREADS_PER_BLOCK;
curandStateXORWOW_t localState = states[id];
__shared__ double zdis[NUMOFZPT];
double l_lp, l_lpth;
cuDoubleComplex l_bf;
int zposeg;
/*energy array*/
double iniu; /*for Gaussian random number*/
double iniv; /*for Gaussian random number*/
double err;
if(id < NE){
iniu = curand_uniform_double(&localState);
iniv = curand_uniform_double(&localState);
l_lp = curand_uniform_double(&localState)*LPRANGE;
l_lpth = sqrt(-2.0*log(iniu))*cos(2.0*M_PI*iniv)*ES;
//if(id == 0) printf("\nlp[id] = %f, lpth[id] = %f\n",lp[id],lpth[id]);
int i;
if(id == 0){
for(i = 0; i < NUMOFZPT; i++){
zdis[i] = 0;
}
}
for(i = 0; i < TURNS; i++){
__syncthreads();
//if(id == 0 && i < 10) printf("\n**TURN: %d**\n",i);
l_lp += MA*sin(l_lp);
iniu = curand_uniform_double(&localState);
iniv = curand_uniform_double(&localState);
err = -DAMPRATE*l_lpth + QNEP*sqrt(-2.0*log(iniu))*cos(2.0*M_PI*iniv);
//if(id == 0 && i < 10) printf("err = %f\n",err);
iniu = curand_uniform_double(&localState);
iniv = curand_uniform_double(&localState);
l_lp += NORR56*l_lpth+ (err + DN*sqrt(-2.0*log(iniu))*cos(2.0*M_PI*iniv))*NORR56/2.0;
l_lpth = l_lpth + err;
//if(id == 0 && i < 10) printf("lp[id] = %f, lpth[id] = %f\n",lp[id],lpth[id]);
l_lp = fmod(l_lp,LPRANGE);
zposeg = (l_lp/(LPRANGE/(double)NUMOFZPT));
if(zposeg >= 0) {atomicDoubleAdd(&zdis[zposeg], 1.0);}
__syncthreads();
if(threadIdx.x == 0){
l_bf = ft(zdis);
atomicDoubleAdd(&bf[i], l_bf.x);
atomicDoubleAdd(&bf[i+TURNS], l_bf.y);
}
}
if(l_lp < 0.0) l_lp = l_lp + LPRANGE;
lp[id] = l_lp;
lpth[id] = l_lpth;
}
}
__global__ void SetupKernel(curandStateXORWOW_t *states){
int id = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
if(id < NE){
curand_init(seeding, id, 0, &states[id]);
}
}
void Calculate(double *lp, double *lpth, double *bf, int blocksPerGrid, curandStateXORWOW_t *states){
cudaError_t error;
double *d_lp, *d_lpth, *d_bf, bf_tmp[TURNS*2];
//cudaMemcpyToSymbol(blockcounter, &counter, sizeof(int));
// Allocate memory for result on Device
cudaMalloc(&d_lp, sizeof(double)*NE);
cudaMalloc(&d_lpth, sizeof(double)*NE);
cudaMalloc(&d_bf, sizeof(double)*TURNS*2);
cudaMemset(d_bf, 0, sizeof(double)*TURNS*2);
// Launch Kernel
CalKernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(d_lp, d_lpth, d_bf, states);
// check for error
error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
// Copy result to Host
error = cudaMemcpy(lp, d_lp, sizeof(double)*NE, cudaMemcpyDeviceToHost);
//printf("Error NO:%d\n", error);
printf("CUDA error: %s\n", cudaGetErrorString(error));
cudaMemcpy(lpth, d_lpth, sizeof(double)*NE, cudaMemcpyDeviceToHost);
cudaMemcpy(bf_tmp, d_bf, sizeof(double)*TURNS*2, cudaMemcpyDeviceToHost);
int i;
for(i = 0; i < TURNS; i++){
//if(i < 10) printf("(%f, %f i)\n",bf_tmp[i],bf_tmp[i+TURNS]);
bf[i] = (bf_tmp[i]/NE)*(bf_tmp[i]/NE) + (bf_tmp[i+TURNS]/NE)*(bf_tmp[i+TURNS]/NE);
bf[i] = sqrt(bf[i]);
}
// Free Memory
cudaFree(d_lp);
cudaFree(d_lpth);
cudaFree(d_bf);
}
/*void SetupConstant(){
// Calculate constant value
double l_ma = MANONOR/EBE;
double l_qnep = ES*sqrt(2.0*DAMPRATE);
double l_norr56 = 2.0*M_PI*R56/MODWL;
double l_dn = 1.0/sqrt(M_PI*my_gamma/137.0)*DAMPRATE/2.0;
double l_lprange = 2.0*M_PI*NUMMB;
printf("\nma = %f\nqnep = %f\nnorr56 = %f\ndn = %f\nlprange = %f\n",l_ma,l_qnep,l_norr56,l_dn,l_lprange);
// Copy constant value to device
cudaMemcpyToSymbol(ma, &l_ma, sizeof(double));
cudaMemcpyToSymbol(qnep, &l_qnep, sizeof(double));
cudaMemcpyToSymbol(norr56, &l_norr56, sizeof(double));
cudaMemcpyToSymbol(dn, &l_dn, sizeof(double));
cudaMemcpyToSymbol(lprange, &l_lprange, sizeof(double));
}*/
void CalOnDevice(double *lp, double *lpth, double *bf){
int blocksPerGrid = (NUM_OF_THREADS + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
curandStateXORWOW_t *states;
// Allocate memory for Random Generator State
cudaMalloc((void **)&states, THREADS_PER_BLOCK * blocksPerGrid * sizeof(curandStateXORWOW_t));
// Setup Constant
/*printf("Setup Constant...");
SetupConstant();
printf("Complete.\n");*/
// Setup Random Generator State
printf("Setup Random Generator State...");
SetupKernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(states);
printf("Complete.\n");
// Start Calculation
printf("Start Calculation...");
Calculate(lp, lpth, bf, blocksPerGrid, states);
printf("Complete.\n");
cudaFree(states);
}
int main() {
FILE *fpout;
fpout = fopen("out.txt","w");
FILE *fpoutt;
fpoutt = fopen("outt.txt","w");
FILE *fpouttt;
fpouttt = fopen("outtt.txt","w");
double lp[NE]; /*phase array*/
double lpth[NE];
double bf[TURNS];
//double segpb = ((double)numofzpt)/nummb;
printf("Execute calculation on the device.\n");
CalOnDevice(lp, lpth, bf);
int j;
printf("Output: out.txt\n");
for(j = 0; j < TURNS; j++){
fprintf(fpout,"%f\n",sqrt(bf[j]));
}
printf("Output: outt.txt\n");
for(j = 0; j < NE; j++){
fprintf(fpoutt,"%f\n",lp[j]);
}
printf("Output: outtt.txt\n");
for(j = 0; j < NE; j++){
fprintf(fpouttt,"%f\n",lpth[j]);
}
fclose(fpout);
fclose(fpoutt);
fclose(fpouttt);
return 0;
}
|
22,133 | #include "includes.h"
__global__ void kernel_move_inv_write(char* _ptr, char* end_ptr, unsigned int pattern)
{
unsigned int i;
unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE);
if (ptr >= (unsigned int*) end_ptr) {
return;
}
for (i = 0;i < BLOCKSIZE/sizeof(unsigned int); i++){
ptr[i] = pattern;
}
return;
} |
22,134 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30,float var_31,float var_32,float var_33) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
if (comp < +1.4268E-35f + var_3 - fmodf(var_4 + (-1.2361E-18f * (var_5 * var_6)), var_7 - var_8)) {
if (comp >= -1.7135E29f / var_9 - +1.1539E27f) {
float tmp_1 = -0.0f;
comp += tmp_1 - +1.2048E34f / +1.6050E21f;
if (comp == sqrtf(-1.1915E14f)) {
float tmp_2 = -1.5838E-22f;
comp += tmp_2 + log10f(var_10 + (var_11 - +1.1871E-43f - -1.0763E-37f * (+0.0f / var_12)));
}
if (comp > +1.2736E35f - (var_13 - (var_14 + (var_15 - (var_16 + var_17))))) {
comp = -1.3103E-36f - +1.1597E-36f * (var_18 / var_19 + var_20);
comp += (var_21 - +1.0217E-27f / var_22 / (var_23 * +1.8086E-41f));
comp += (var_24 / var_25);
}
if (comp < var_26 - fmodf((var_27 * cosf((var_28 + var_29 - var_30 + (var_31 / var_32 * var_33)))), -1.5462E-4f / fabsf(asinf(-1.8932E-36f)))) {
comp = (+1.7096E9f / -0.0f * +1.3261E11f);
}
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
float tmp_32 = atof(argv[32]);
float tmp_33 = atof(argv[33]);
float tmp_34 = atof(argv[34]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33,tmp_34);
cudaDeviceSynchronize();
return 0;
}
|
22,135 | #include <stdio.h>
__global__ void add(int *a, int *b, int *c){
*c = *a + *b;
}
int main(void){
int a,b,c;
int *d_a,*d_b,*d_c;
int size = sizeof(int);
cudaMalloc((void **)&d_a,size);
cudaMalloc((void **)&d_b,size);
cudaMalloc((void **)&d_c,size);
a = 3;
b = 5;
cudaMemcpy(d_a,&a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,&b,size,cudaMemcpyHostToDevice);
add<<<1,1>>>(d_a,d_b,d_c);
cudaMemcpy(&c,d_c,size,cudaMemcpyDeviceToHost);
printf("Result: %d",c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
22,136 | /**
* @file collateSegments.cu
* @date Spring 2020, revised Spring 2021
* @author Hugo De Moraes
*/
#include <stdio.h>
#include <stdlib.h>
/**
* Scans input in parallel and collates the indecies with important data
*
* @param src the original unfiltered array
* @param scanResult the output array of strongestNeighborScan.cu
* @param output the array to be modified
* @param numEdges the number of edges/elements in the above arrays
*/
__global__ void collateSegments_gpu(
int * src,
int * scanResult,
int * output,
int numEdges
) {
// Get Thread ID
const int NUM_THREADS = blockDim.x * gridDim.x;
const int COL = blockIdx.x * blockDim.x + threadIdx.x;
const int ROW = blockIdx.y * blockDim.y + threadIdx.y;
const int FIRST_T_ID = COL + ROW * NUM_THREADS;
// Each thread should handle numEdges / NUM_THREADS amount of work
for(int curTID = FIRST_T_ID; curTID < numEdges; curTID += NUM_THREADS) {
// compares src[i] with src[i+1],
// if they are not equal, then the i-th data element is the last one in its own segment
if(src[curTID] != src[curTID+1]) {
output[src[curTID]] = scanResult[curTID];
}
}
}
/**
* After the previous step, the maximum-weight neighbors are placed as the last element within each segment in the output array(s).
* Most elements in these two arrays do not contain useful information.
* We want to collate the useful parts of this array,
* in order to produce an output array that has the same number of elements as the number of segments,
* and only store the maximum-weight neighbor information.
*/
|
22,137 | #include<iostream>
#include<stdlib.h>
#include<time.h>
#define N 999999
#define nblocks 100
using namespace std;
__global__ void cudaArrayMax(float *a, float *b)
{
int id = threadIdx.x + blockDim.x *blockIdx.x;
int stride = nblocks;
__shared__ float cache[nblocks];
float thmax = a[id];
for (int i = id; i < N; i += stride)
if (a[i] > thmax)
thmax = a[i];
cache[threadIdx.x] = thmax;
__syncthreads();
float max = cache[0];
for (int i = 0; i < nblocks; i++)
if (cache[i] > max)
max = cache[i];
b[blockIdx.x] = max;
}
int main()
{
srand(time(0));
float *ha, *ht, hmax;
float *da, *dt, dmax;
unsigned int size = N *sizeof(float);
ha = (float *) malloc(size);
ht = (float *) malloc(nblocks *sizeof(float));
for (int i = 0; i < N; i++)
ha[i] = rand() / (float) RAND_MAX;
/* ----- BEGIN CPU ----- */
clock_t cpustart = clock();
hmax = ha[0];
for (int i = 0; i < N; i++)
if (ha[i] > hmax)
hmax = ha[i];
clock_t cpuend = clock();
float cputime = 1000 *(cpuend - cpustart) / (float) CLOCKS_PER_SEC;
cout << "cpu max is " << hmax << " in " << cputime << endl;
/* ----- END CPU ----- */
/* ----- BEGIN GPU ----- */
cudaMalloc((void **) &da, size);
cudaMalloc((void **) &dt, nblocks *sizeof(float));
clock_t gpustart = clock();
cudaMemcpy(da, ha, size, cudaMemcpyHostToDevice);
cudaArrayMax <<<nblocks, nblocks>>> (da, dt);
cudaMemcpy(ht, dt, nblocks *sizeof(float), cudaMemcpyDeviceToHost);
dmax = ht[0];
for (int i = 0; i < nblocks; i++)
if (ht[i] > dmax)
dmax = ht[i];
clock_t gpuend = clock();
float gputime = 1000 *(gpuend - gpustart) / (float) CLOCKS_PER_SEC;
cout << "gpu max is " << dmax << " in " << gputime << endl;
/* ----- END GPU ----- */
cout << "speedup = " << cputime / gputime;
cudaFree(da);
cudaFree(dt);
return 0;
}
|
22,138 | // tests cuEventCreate
#include <iostream>
#include <memory>
using namespace std;
#include <cuda.h>
__global__ void longKernel(float *data, int N, float value) {
for(int i = 0; i < N; i++) {
data[i] += value;
}
}
int main(int argc, char *argv[]) {
int N = 102400; // * 1024;
CUstream stream;
cuStreamCreate(&stream, 0);
float *hostfloats;
cuMemHostAlloc((void **)&hostfloats, N * sizeof(float), CU_MEMHOSTALLOC_PORTABLE);
CUdeviceptr devicefloats;
cuMemAlloc(&devicefloats, N * sizeof(float));
// floats[2] = 4.0f;
cuMemcpyHtoDAsync(devicefloats, hostfloats, N * sizeof(float), stream);
longKernel<<<dim3(102400 / 32, 1, 1), dim3(32, 1, 1)>>>((float *)devicefloats, N, 3.0f);
cout << "queued kernel x" << endl;
cuCtxSynchronize();
cout << "finished" << endl;
cuMemFreeHost(hostfloats);
cuMemFree(devicefloats);
cuStreamDestroy(stream);
return 0;
}
|
22,139 | // mul 2 arr(2D) on device-GPU
|
22,140 | #include<stdio.h>
__global__ void myKernel(int64_t **dA) {
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 256*(i+1); j++) {
dA[i][j] = dA[i][j] + 1;
}
}
}
extern "C" {
void kernelLOW(int64_t **hPtrs, size_t *hPtrSizes, int64_t N) {
int64_t **dA = (int64_t**)malloc(sizeof(int64_t*)*N);
for (int i = 0; i < N; i++) {
cudaMalloc(&dA[i], hPtrSizes[i]*sizeof(int64_t));
cudaMemcpy(dA[i], hPtrs[i], hPtrSizes[i]*sizeof(int64_t), cudaMemcpyHostToDevice);
}
int64_t **dAs;
cudaMalloc(&dAs, sizeof(int64_t*)*N);
cudaMemcpy(dAs, dA, sizeof(int64_t*)*N, cudaMemcpyHostToDevice);
myKernel<<<1,1>>>(dAs);
cudaDeviceSynchronize();
for (int i = 0; i < N; i++) {
cudaMemcpy(hPtrs[i], dA[i], hPtrSizes[i]*sizeof(int64_t), cudaMemcpyDeviceToHost);
}
}
void kernelMIDLOW(int64_t **dAs, int64_t N) {
myKernel<<<1,1>>>(dAs);
cudaDeviceSynchronize();
}
} |
22,141 |
#include <iostream>
using namespace std;
int print_cuda_version()
{
int count = 0;
if (cudaSuccess != cudaGetDeviceCount(&count)) {
return -1;
}
if (count == 0) {
return -1;
}
for (int device = 0; device < count; ++device) {
cudaDeviceProp prop;
if (cudaSuccess == cudaGetDeviceProperties(&prop, device)) {
std::cout << prop.major << "." << prop.minor << std::endl;
}
}
return 0;
}
int main(int argc, char ** argv)
{
return print_cuda_version();
}
|
22,142 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "common.h"
#define SHARED_ARRAY_SIZE 128
__global__ void smem_static_test(int * in, int * out, int size)
{
int tid = threadIdx.x;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int smem[SHARED_ARRAY_SIZE];
if (gid < size)
{
smem[tid] = in[gid];
out[gid] = smem[tid];
}
}
__global__ void smem_dynamic_test(int * in, int * out, int size)
{
int tid = threadIdx.x;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ int smem[];
if (gid < size)
{
smem[tid] = in[gid];
out[gid] = smem[tid];
}
}
//int main(int argc, char ** argv)
//{
// int size = 1 << 22;
// int block_size = SHARED_ARRAY_SIZE;
// bool dynamic = false;
//
// if (argc > 1)
// {
// dynamic = atoi(argv[1]);
// }
//
// //number of bytes needed to hold element count
// size_t NO_BYTES = size * sizeof(int);
//
// // host pointers
// int *h_in, *h_ref, *d_in, *d_out;
//
// //allocate memory for host size pointers
// h_in = (int *)malloc(NO_BYTES);
// h_ref = (int *)malloc(NO_BYTES);
//
// initialize(h_in, size, INIT_ONE_TO_TEN);
//
// cudaMalloc((int **)&d_in, NO_BYTES);
// cudaMalloc((int **)&d_out, NO_BYTES);
//
// //kernel launch parameters
// dim3 block(block_size);
// dim3 grid((size / block.x) + 1);
//
// cudaMemcpy(d_in, h_in, NO_BYTES, cudaMemcpyHostToDevice);
//
// if (!dynamic)
// {
// printf("Static smem kernel \n");
// smem_static_test << <grid, block >> > (d_in, d_out, size);
// }
// else
// {
// printf("Dynamic smem kernel \n");
// smem_dynamic_test << <grid, block, sizeof(int)* SHARED_ARRAY_SIZE >> > (d_in, d_out, size);
// }
// cudaDeviceSynchronize();
//
// cudaMemcpy(h_ref, d_out, NO_BYTES, cudaMemcpyDeviceToHost);
//
// cudaFree(d_in);
// cudaFree(d_out);
//
// free(h_in);
// free(h_ref);
//
// cudaDeviceReset();
// return EXIT_SUCCESS;
//} |
22,143 | #include "includes.h"
/****************************************************************************
Floyd - Warshall Algorithm developed using CUDA. A 2011-2012 assignement for
Parallel Programming Course of Electrical and Computer Engineering Department
in the Aristotle Faculty of Enginnering - Thessaloniki.
*****************************************************************************/
#define INF 100000000
#define BLOCKSIZE 128
#define BITSFT 7 //log2(BLOCKSIZE)
/*****************************************
Array Generator - filling weight matrices
according to Floyd-Warshall theory.
******************************************/
__global__ void pFloyd(float *D,float *Q,int vertices,int k,int k2)
{
int i,j,index;
i= blockIdx.x;
j=(blockIdx.y << BITSFT) + threadIdx.x;
index=(i << vertices)+j; //vertices equals log2(vertices).
if((D[(i << vertices)+k]+D[(k2)+j])<D[index])
{
D[index]=D[(i << vertices)+k]+D[(k2)+j];
Q[index]=k;
}
} |
22,144 | /**
* Adds up 1,000,000 times of the block ID to
* a variable.
* What to observe/ponder:
* - Any difference between shared and global memory?
* - Does the result differ between runs?
*/
#include <stdio.h>
__device__ __managed__ volatile int global_counter[2];
void check_cuda_errors()
{
cudaError_t rc;
rc = cudaGetLastError();
if (rc != cudaSuccess)
{
printf("Last CUDA error %s\n", cudaGetErrorString(rc));
}
}
__global__ void shared_mem(int times)
{
__shared__ int shared_counter[2];
int i;
// Zero out both counters
shared_counter[threadIdx.x] = 0;
for (i = 0; i < times; i++) {
shared_counter[threadIdx.x] += blockIdx.x;
}
printf("Shared (Blk: %d, Th: %d): %d\n", blockIdx.x, threadIdx.x, shared_counter[threadIdx.x]);
}
__global__ void global_mem(int times)
{
int i;
// Zero out both counters
global_counter[threadIdx.x] = 0;
for (i = 0; i < times; i++) {
global_counter[threadIdx.x] += blockIdx.x;
}
printf("Global (Blk: %d, Th: %d): %d\n", blockIdx.x, threadIdx.x, global_counter[threadIdx.x]);
}
int main(int argc, char **argv)
{
shared_mem<<<10, 2>>>(1000000);
cudaDeviceSynchronize();
check_cuda_errors();
global_mem<<<10, 2>>>(1000000);
cudaDeviceSynchronize();
check_cuda_errors();
return 0;
} |
22,145 | #include <math.h>
#include <cuda_runtime.h>
// the rbf kernel function
__host__ __device__ float rbf_kernel(int tx, int ty, float *a, float *b, int len, int invert)
{
float sigma = 10.;
float beta = 0.5/sigma/sigma;
float d = 0;
float k = 0;
float x,y;
if(invert == 0)
{
for(int i=0;i<len;i++)
{
x = a[tx*len+i];
y = b[ty*len+i];
d += (x-y)*(x-y);
}
}
else if (invert == 1)
{
int half = len/2;
for(int i=0;i<half;i++)
{
x = a[tx*len+i+half];
y = b[ty*len+i];
d += (x-y)*(x-y);
x = a[tx*len+i];
y = b[ty*len+i+half];
d += (x-y)*(x-y);
// d += (a[tx*len+i+half]-b[ty*len+i])*(a[tx*len+i+half]-b[ty*len+i]);
// d += (a[tx*len+i]-b[ty*len+i+half])*(a[tx*len+i]-b[ty*len+i+half]);
}
}
k = exp(-beta*d);
return k;
}
__global__ void create_kron_mat( int *edges_index_1, int *edges_index_2,
float *edges_pssm_1, float *edges_pssm_2,
int *edges_index_product, float *edges_weight_product,
int n_edges_1, int n_edges_2,
int n_nodes_2)
{
int tx = threadIdx.x + blockDim.x * blockIdx.x;
int ty = threadIdx.y + blockDim.y * blockIdx.y;
int ind=0, len = 40;
float w;
int invert;
if ( (tx < n_edges_1) && (ty < n_edges_2) ){
////////////////////////////////////
// first pass
// i-j VS a-b
////////////////////////////////////
// get the index of the element
ind = tx * n_edges_2 + ty;
// get its weight
invert=0;
w = rbf_kernel(tx,ty,edges_pssm_1,edges_pssm_2,len,invert);
// store it
edges_weight_product[ind] = w;
edges_index_product[2*ind] = edges_index_1[2*tx] * n_nodes_2 + edges_index_2[2*ty] ;
edges_index_product[2*ind + 1] = edges_index_1[2*tx+1] * n_nodes_2 + edges_index_2[2*ty+1] ;
////////////////////////////////////
// second pass
// j-i VS a-b
////////////////////////////////////
// get the index element
ind = ind + n_edges_1 * n_edges_2;
// get the weight
invert=1;
w = rbf_kernel(tx,ty,edges_pssm_1,edges_pssm_2,len,invert);
// store it
edges_weight_product[ind] = w;
edges_index_product[2*ind] = edges_index_1[2*tx+1] * n_nodes_2 + edges_index_2[2*ty];
edges_index_product[2*ind + 1] = edges_index_1[2*tx] * n_nodes_2 + edges_index_2[2*ty+1];
}
}
// the rbf kernel function
__host__ __device__ float rbf_kernel_shared(int tx, int ty, float *tile, int len, int invert, const int halfsdimx)
{
float sigma = 10.;
float beta = 0.5/sigma/sigma;
float d = 0;
float k = 0;
float x=0,y=0;
int i;
if(invert == 0)
{
for(i=0;i<len;++i)
{
// ind = tx*len+i;
// if (ind <640)
// x = tile[ind];
// else{
// x = 0;
// printf("Wrong X index for thread %d %d\n", tx,ty);
// }
// ind = (ty+halfsdimx)*len + i;
// if (ind <640)
// y = tile[ind];
// else{
// y = 0;
// printf("Wrong Y index for thread %d %d i=%d : ind = %d\n", tx,ty,i, ind);
// }
x = tile[tx*len+i];
y = tile[ (ty+halfsdimx)*len + i];
d += (x-y)*(x-y);
}
}
else if (invert == 1)
{
int half = len/2;
for(int i=0;i<half;i++)
{
x = tile[tx*len+i+half];
y = tile[(ty+halfsdimx)*len+i];
d += (x-y)*(x-y);
x = tile[tx*len+i];
y = tile[(ty+halfsdimx)*len + i + half];
d += (x-y)*(x-y);
}
}
k = exp(-beta*d);
return k;
}
__global__ void create_kron_mat_shared( int *edges_index_1, int *edges_index_2,
float *edges_pssm_1, float *edges_pssm_2,
int *edges_index_product, float *edges_weight_product,
int n_edges_1, int n_edges_2,
int n_nodes_2)
{
int tx = threadIdx.x + blockDim.x * blockIdx.x;
int ty = threadIdx.y + blockDim.y * blockIdx.y;
int ind=0, len = 40;
float w=0;
int invert;
const int SDIMX = 16;
const int HALF_SDIMX = 8;
const int SDIMY = 40;
__shared__ float tile[SDIMX*SDIMY];
if ( (tx < n_edges_1) && (ty < n_edges_2) ){
////////////////////////////////////
// first pass
// i-j VS a-b
////////////////////////////////////
// get the index of the element
ind = tx * n_edges_2 + ty;
// populate shared memory
for (int i=0; i<len;i++)
{
tile[threadIdx.x*len + i] = edges_pssm_1[tx*len+i];
tile[(threadIdx.y+HALF_SDIMX)*len+i] = edges_pssm_2[ty*len+i];
}
__syncthreads();
// get its weight
invert=0;
w = rbf_kernel_shared(threadIdx.x,threadIdx.y,tile,len,invert,HALF_SDIMX);
// store it
edges_weight_product[ind] = w;
edges_index_product[2*ind] = edges_index_1[2*tx] * n_nodes_2 + edges_index_2[2*ty] ;
edges_index_product[2*ind + 1] = edges_index_1[2*tx+1] * n_nodes_2 + edges_index_2[2*ty+1] ;
////////////////////////////////////
// second pass
// j-i VS a-b
////////////////////////////////////
// get the index element
ind = ind + n_edges_1 * n_edges_2;
// get the weight
invert=1;
w = rbf_kernel_shared(threadIdx.x,threadIdx.y,tile,len,invert,HALF_SDIMX);
// store it
edges_weight_product[ind] = w;
edges_index_product[2*ind] = edges_index_1[2*tx+1] * n_nodes_2 + edges_index_2[2*ty];
edges_index_product[2*ind + 1] = edges_index_1[2*tx] * n_nodes_2 + edges_index_2[2*ty+1];
}
}
__global__ void create_nodesim_mat(float *nodes_pssm_1, float *nodes_pssm_2, float *W0, int n_nodes_1, int n_nodes_2)
{
int tx = threadIdx.x + blockDim.x * blockIdx.x;
int ty = threadIdx.y + blockDim.y * blockIdx.y;
int len,ind;
if ( (tx<n_nodes_1) && (ty<n_nodes_2))
{
len = 20;
ind = tx * n_nodes_2 + ty;
int invert = 0;
float sim;
sim = rbf_kernel(tx,ty,nodes_pssm_1,nodes_pssm_2,len,invert);
W0[ind] = sim;
}
}
__global__ void create_p_vect(float *node_info1, float* node_info2, float *p, int n_nodes_1, int n_nodes_2)
{
int tx = threadIdx.x + blockDim.x * blockIdx.x;
int ty = threadIdx.y + blockDim.y * blockIdx.y;
float cutoff = 0.5;
if ( (tx < n_nodes_1) && (ty < n_nodes_2) )
{
int ind = tx * n_nodes_2 + ty;
if ( (node_info1[tx] < cutoff) && (node_info2[ty] < cutoff))
p[ind] = 0;
else
p[ind] = node_info1[tx] * node_info2[ty];
}
}
|
22,146 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define err 0.000001
__device__
void f(float x, float *y)
{
// *y = exp(x)-5*pow(x,2); // slide
*y = ((70 + 1.463/pow(x, 2)) * (x - 0.0394)) - (0.08314 * 215);
}
__device__
void g(float x, float *y)
{
// *y = exp(x)-10*x; // slide
*y = 70 - 1.463 + 2*1.463*0.0394;
}
__global__
void newtraph()
{
float x,xS,fx,gx,fS,gS;
x = 1;
printf("%11s %11s %11s %11s\n",
"x", "f(x)", "f'(x)", "x");
do
{
xS=x;
f(x, &fx);
g(x, &gx);
f(xS, &fS);
g(xS, &gS);
x=x-fx/gx;
printf("%11.6f %11.6f %11.6f %11.6f\n",
xS,fS,gS,x);
}
while(fabs(x-xS)>err);
printf("Hasil = %.6f\n",x);
}
int main(int argc,char **argv) {
newtraph<<<1, 1>>>();
cudaDeviceSynchronize();
return 0;
}
|
22,147 | /** @file
* Name: Parallel LU Decomposition - CUDA Version
* Authored by: Team Segfault
* Description: This program performs Lower/Upper decomposition on a square matrix and
* subsequently solves the associated system of equations with Forward and Backward substitution.
* Implementation Date: 11/23/2020
*/
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
bool GetUserInput(int, char *[], int&,int&);
void InitializeMatrices(float **&, float **&, float **&, int);
void InitializeVectors(float *&, float*&, float*&, int);
void PrintMatrix(float **, int);
void PrintVector(float*, int);
void DeleteMatrix(float**,int);
void DeleteVector(float *);
void LUDecomp(float *, float *, int);
__global__ void RowOperations(float *, float *, int, int);
__global__ void ForwardSubstitution(float *, float *, float* , int);
__global__ void BackwardSubstitution(float *, float *, float* , int);
//------------------------------------------------------------------
// Main Program
//------------------------------------------------------------------
int main(int argc, char *argv[]){
srand(time(NULL)); //set the seed
float **a, **lower, **upper; //Matrices
float *b, *x, *y; //Vectors
float *d_lower, *d_upper, *d_b, *d_x, *d_y; //Device pointers
int n,isPrint;
float runtime;
if (!GetUserInput(argc,argv,n,isPrint)) return 1;
// a == upper and lower -> 0
InitializeMatrices(a, lower, upper, n);
InitializeVectors(x, y, b, n);
//Get start time
runtime = clock()/(float)CLOCKS_PER_SEC;
// ######################### BEGIN LU Decomp ##############################3
cudaMalloc((void**)&d_lower, n*n*sizeof(float));
cudaMalloc((void**)&d_upper, n*n*sizeof(float));
cudaMemcpy(d_upper, upper[0], n*n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_lower, lower[0], n*n*sizeof(float), cudaMemcpyHostToDevice);
LUDecomp(d_lower, d_upper, n);
cudaDeviceSynchronize();
// ######################### END LU Decomp ##############################3
// ######################### BEGIN Substitution ##############################
cudaMalloc((void**)&d_b, n*sizeof(float));
cudaMalloc((void**)&d_x, n*sizeof(float));
cudaMalloc((void**)&d_y, n*sizeof(float));
cudaMemcpy(d_b, b, n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_x, x, n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, n*sizeof(float), cudaMemcpyHostToDevice);
dim3 dimGrid(n,1);
dim3 dimBlock(n,1);
ForwardSubstitution<<<dimGrid, dimBlock>>>(d_lower, d_y, d_b, n);
BackwardSubstitution<<<dimGrid, dimBlock>>>(d_upper, d_x, d_y, n);
cudaThreadSynchronize();
// ######################### END Substitution ##############################3
// ######################### BEGIN Copy Back ##############################
cudaMemcpy(lower[0],d_lower, n*n*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(upper[0],d_upper, n*n*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(x,d_x, n*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(y,d_y, n*sizeof(float),cudaMemcpyDeviceToHost);
// ######################### END Copy Back ##############################
runtime = clock() - runtime; //Make note of time
if(isPrint == 1){
printf("A:\n");
PrintMatrix(a,n);
printf("B:\n");
PrintVector(b,n);
printf("--------------------------------------------------\n");
printf("Lower:\n");
PrintMatrix(lower,n);
printf("Upper:\n");
PrintMatrix(upper,n);
printf("Y:\n");
PrintVector(y,n);
printf("X:\n");
PrintVector(x,n);
}
printf("LU Decomposition and Forward/Backward substitution to solve Ax=B ran in %.2f seconds\n", (runtime)/float(CLOCKS_PER_SEC));
cudaFree(d_lower);
cudaFree(d_upper);
cudaFree(d_b);
cudaFree(d_x);
cudaFree(d_y);
DeleteMatrix(upper,n);
DeleteMatrix(lower,n);
DeleteMatrix(a,n);
DeleteVector(x);
DeleteVector(y);
DeleteVector(b);
return 0;
}
//------------------------------------------------------------------
// KERNEL DRIVERS
//------------------------------------------------------------------
void LUDecomp(float *d_lower, float *d_upper, int thicness){
int i, numBlocks, numThreads;
for(i = 0; i < thicness; ++i){
// Since all of these are square these are the same.
numBlocks = numThreads = thicness-i;
dim3 dimGrid(numBlocks,1);
dim3 dimBlock(numThreads,1);
RowOperations<<<dimGrid,dimBlock>>>(d_lower, d_upper, i, thicness);
}
}
//------------------------------------------------------------------
// KERNELS
//------------------------------------------------------------------
__global__ void RowOperations(float *lower, float *upper, int i, int thicness){
// Let us get this diagonal thing out of the way
if(blockIdx.x * blockDim.x + threadIdx.x == 0)
lower[ i*thicness + i ] = 1;
int k = blockIdx.x + i + 1;
int j = threadIdx.x + i;
if( !( k < thicness && j < thicness) ) return; // Whoops
__shared__ float pivot;
// And get one pivot per block
if(threadIdx.x == 0)
pivot = -1.0/upper[ i*thicness + i ];
// Hey guys! Wait up!
__syncthreads();
// It is worth noting that the matrices are column major here
lower[k + thicness*i] = upper[k + thicness*i]/upper[i + thicness*i];
upper[k + thicness*j] = upper[k + thicness*j] + pivot*upper[k + thicness*i] * upper[i + thicness*j];
}
__global__ void ForwardSubstitution(float *lower, float *y, float* b, int thicness){
if(blockIdx.x * blockDim.x + threadIdx.x == 0) // Last Element
y[0] = b[0] / lower[0];
int i = blockIdx.x + 1;
int j = i - threadIdx.x;
if( !( i < thicness && j < thicness) || ( j < 0 ) ) return; // Whoops
__shared__ float temp;
if(threadIdx.x == 0)
temp = b[i];
// Hey guys! Wait up!
__syncthreads();
temp = temp - lower[ i + thicness*j] * y[j];
// Hey guys! Wait up!
__syncthreads();
y[i] = temp/lower[i + thicness*i];
}
__global__ void BackwardSubstitution(float *upper, float *x, float* y, int thicness){
if(blockIdx.x * blockDim.x + threadIdx.x == 0)
x[thicness - 1] = y[thicness - 1] / upper[(thicness - 1) + thicness*(thicness-1)]; // Last Element
int i = thicness - blockIdx.x - 2;
int j = thicness - i - threadIdx.x - 1;
if( !( i < thicness && j < thicness) || ( j < 0 ) ) return; // Whoops
__shared__ float temp;
if(threadIdx.x == 0)
temp = y[i];
// Hey guys! Wait up!
__syncthreads();
temp = temp - upper[ i + thicness*j] * x[j];
// Hey guys! Wait up!
__syncthreads();
x[i] = temp/upper[i + thicness*i];
}
//------------------------------------------------------------------
// UTILITIES
//------------------------------------------------------------------
// Get user input of matrix dimension and printing option
bool GetUserInput(int argc, char *argv[],int& n,int& isPrint)
{
bool isOK = true;
if(argc < 2)
{
printf("Arguments:<X> [<Y>]");
printf("X : Matrix size [X x X]");
printf("Y = 1: print the input/output matrix if X < 10");
printf("Y <> 1 or missing: does not print the input/output matrix");
isOK = false;
}
else
{
//get matrix size
n = atoi(argv[1]);
if (n <=0)
{
printf("Matrix size must be larger than 0");
isOK = false;
}
//is print the input/output matrix
if (argc >=3)
isPrint = (atoi(argv[2])==1 && n <=9)?1:0;
else
isPrint = 0;
}
return isOK;
}
//delete matrix matrix a[n x n]
void DeleteMatrix(float **a,int n)
{
delete[] a[0];
delete[] a;
}
void DeleteVector(float* x)
{
delete[] x;
}
//Fills matrix A with random values, upper and lower is filled with 0's except for their diagonals
void InitializeMatrices(float **&a, float **&lower, float **&upper, int size){
a = new float*[size];
a[0] = new float[size*size];
for (int i = 1; i < size; i++)
a[i] = a[i-1] + size;
lower = new float*[size];
lower[0] = new float[size*size];
for (int i = 1; i < size; i++)
lower[i] = lower[i-1] + size;
upper = new float*[size];
upper[0] = new float[size*size];
for (int i = 1; i < size; i++)
upper[i] = upper[i-1] + size;
for(int i = 0; i < size; ++i){
for(int j = 0; j < size; ++j){
upper[i][j] = a[i][j] = (rand() % 11) + 1;
lower[i][j] = 0;
}
}
}
void InitializeVectors(float *&x, float *&y, float*& b, int n) {
// allocate square 2d matrix
x = new float[n];
y = new float[n];
b = new float[n];
for (int j = 0 ; j < n ; j++) {
b[j] = (float)(rand() % 11) + 1;
x[j] = y[j] = 0;
}
}
//Print the matrix that was passed to it
void PrintMatrix(float **matrix, int size)
{
for (int i = 0 ; i < size ; i++){
for (int j = 0 ; j < size ; j++){
printf("%.2f\t", matrix[j][i]);
}
printf("\n");
}
}
void PrintVector(float* x, int n)
{
for (int j = 0 ; j < n ; j++) {
printf("%.2f\n", x[j]);
}
} |
22,148 | #include <stdio.h>
#include <stdlib.h>
// cuda include
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
typedef struct{
int *gene;
int fitness;
}Indiv;
__device__ float Grand(curandState *state){
//get index
int index = blockIdx.x * blockDim.x + threadIdx.x;
//gen local_state
curandState local_state = state[index];
//get rand_num
float rand_num = curand_uniform(&local_state);
//write back rand status
state[index] = local_state;
//return rand_num
return rand_num;
}
__device__ int randLimit(int limit, curandState *state){
float f_rand = Grand(state) * (limit + 1);
return (int)f_rand;
}
__device__ void GSrand(curandState *state, unsigned int seed){
int index = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(seed, index, 0, &state[index]);
}
__device__ int getParent(Indiv *source_space, int CCE){
int ans = 0;
return ans;
}
__device__ Indiv newIndiv(Indiv *source_space, int CCE, int gene_size){
Indiv new_indiv;
// get parents
int *father = source_space[getParent(source_space, CCE)].gene;
int *mother = source_space[getParent(source_space, CCE)].gene;
// get gene space
new_indiv.gene = (int*)malloc(sizeof(int) * gene_size);
// gen new gene
return new_indiv;
}
__global__ void newGeneration(Indiv *pre_generation, Indiv *now_generation){
;
}
|
22,149 | //fail: assertion
//--blockDim=64 --gridDim=64 --no-inline
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <assert.h>
#define N 2//64
__device__ int f(int x) {
return x + 1;
}
__global__ void foo(int *y) {
*y = f(2);
}
|
22,150 | #include <iostream>
using namespace std;
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
__global__ void fillKernel(int *a,int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n) a[tid] = tid;
}
void fill(int * d_a, int n)
{
int nThreadPerBlock = 512;
int nBlocks = n / nThreadPerBlock + ((n%nThreadPerBlock)?1:0);
fillKernel <<< nBlocks, nThreadPerBlock >>> (d_a, n);
}
int main()
{
const int N = 50000;
thrust::device_vector<int> a(N);
fill(thrust::raw_pointer_cast(&a[0]), N);
int sumA = thrust::reduce(a.begin(), a.end(), 0);
int sumCheck = 0;
for(int i=0; i!=N; ++i)
sumCheck += i;
if(sumA == sumCheck)
cout << "Test Succeeded!" << endl;
else {
cerr << "Test FAILED!" << endl;
return 1;
}
return 0;
} |
22,151 | #include "includes.h"
__global__ void KernelNormalMul(float *Mat1,float *Mat2,float *Mat3,int m,int n,int p){
int j = threadIdx.y + blockDim.y * blockIdx.y; // row
int i = threadIdx.x + blockDim.x * blockIdx.x; // col
if((j<m) && (i<p)){
float value=0.0;
for(int k=0;k<n;++k){
value+=Mat1[n*j+k]*Mat2[p*k+i];
}
Mat3[p*j+i]=value;
}
} |
22,152 |
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define NUM 10000
#define SEED 18
#define CUDA_ERROR_EXIT(str) do{\
cudaError err = cudaGetLastError();\
if(err!=cudaSuccess){\
printf("Cuda Error: %s for %s \n",cudaGetErrorString(err),str);\
exit(-1);\
}\
}while(0);
#define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec))
__global__ void calculate(int *d_arr,unsigned long num,int merge_size){
int i=blockIdx.x * blockDim.x + threadIdx.x;
if(i%merge_size!=0 || i>num)
return;
if(i+merge_size/2<num)
d_arr[i]=d_arr[i]^d_arr[i+merge_size/2];
return;
}
int main(int argc, char **argv)
{
struct timeval start, end, t_start, t_end;
if(argc!=3)
{
printf("Insufficient number of arguments");
exit(-1);
}
unsigned long num=NUM;
num = atoi(argv[1]);
unsigned long seed=SEED;
seed = atoi(argv[2]);
srand(seed);
int *h_arr;
int *d_arr;
h_arr=(int *)malloc(num*sizeof(num));
for(int i=0;i<num;i++)
h_arr[i]=rand();
/*
h_arr[0]=10;
h_arr[1]=9;
h_arr[2]=19;
h_arr[3]=5;
h_arr[4]=4;*/
gettimeofday(&t_start,NULL);
cudaMalloc(&d_arr,num*sizeof(int));
CUDA_ERROR_EXIT("cudamalloc");
cudaMemcpy(d_arr,h_arr,num*sizeof(int),cudaMemcpyHostToDevice);
CUDA_ERROR_EXIT("memcpy");
gettimeofday(&start,NULL);
int merge_size=1;
while(merge_size<num){
merge_size*=2;
calculate<<< (1023+num)/1024,1024 >>> (d_arr,num,merge_size);
}
gettimeofday(&end,NULL);
cudaMemcpy(h_arr,d_arr,num*sizeof(int),cudaMemcpyDeviceToHost);
CUDA_ERROR_EXIT("memcpy");
gettimeofday(&t_end,NULL);
// printf("Total time = %ld microsecs Processsing =%ld microsecs\n", TDIFF(t_start, t_end), TDIFF(start, end));
printf("%d\n",h_arr[0]);
cudaFree(d_arr);
free(h_arr);
return 0;
}
|
22,153 | #include "includes.h"
__global__ void kernel()
{
} |
22,154 | #include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <errno.h>
#include <time.h>
#include <stdbool.h>
/*
References
- https://www.drdobbs.com/parallel/cuda-supercomputing-for-the-masses-part/208801731?pgno=2
*/
/*
reverseArray - reverses an array in kernel
@params int*A, int dim_a
@return void
*/
__global__ void reverseArray(int *A, int dim_a){
int tid, temp;
tid = blockIdx.x* blockDim.x+ threadIdx.x;
if(tid < dim_a/2){
temp = A[tid];
A[tid] = A[dim_a-1-tid];
A[dim_a-1-tid] = temp;
}
}
/*
main program
*/
int main(int argc, char** argv){
// pointer for host memory and size
int *h_a;
int dim_a = 16*1024*1024; // 16 MB
// array to compare results
int *check;
// pointer for device memory
int *d_a;
// define grid and block size
int num_th_per_blk = 8;
int num_blocks = dim_a / num_th_per_blk;
// allocate host and device memory
size_t memSize = num_blocks * num_th_per_blk * sizeof(int);
h_a = (int *) malloc(memSize);
check = (int *) malloc(memSize);
cudaMalloc((void **) &d_a, memSize);
// Initialize input array on host
int val;
srand(time(NULL));
for (int i = 0; i < dim_a; i++){
val = rand();
h_a[i] = val;
check[i] = val;
}
// Copy host array to device array
cudaMemcpy(d_a, h_a, memSize, cudaMemcpyHostToDevice );
// launch kernel
dim3 dimGrid(num_blocks);
dim3 dimBlock(num_th_per_blk);
reverseArray<<< dimGrid, dimBlock >>>(d_a, dim_a);
// device to host copy
cudaMemcpy(h_a, d_a, memSize, cudaMemcpyDeviceToHost );
printf("Verifying program correctness.... ");
// verify the data returned to the host is correct
for (int i = 0; i < dim_a; i++){
if(i == 100) break;
assert(h_a[i] == check[dim_a - 1 - i]);
}
printf("Everthing checks out!\n");
// free device memory
cudaFree(d_a);
// free host memory
free(h_a);
free(check);
return 0;
} //qsub hw10.sh -q UI-GPU -I ngpus=1 |
22,155 | /*
* File: mandel.c
* Author: davidr
*
* Created on May 22, 2013, 9:42 AM
*/
#include <stdlib.h>
#include <math.h>
#include <stdio.h>
#include <time.h>
# define NPOINTS 2000
# define MAXITER 2000
struct complex{
double real;
double imag;
};
void checkCUDAError(const char*);
__global__ void mandel_numpoints(int *d_np){
double ztemp;
int iter;
struct complex z, c;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
c.real = -2.0+2.5*(double)(i)/(double)(NPOINTS)+1.0e-7;
c.imag = 1.125*(double)(j)/(double)(NPOINTS)+1.0e-7;
z=c;
d_np[i + NPOINTS*j] = 0;
for (iter=0; iter<MAXITER; iter++){
ztemp=(z.real*z.real)-(z.imag*z.imag)+c.real;
z.imag=z.real*z.imag*2+c.imag;
z.real=ztemp;
if ((z.real*z.real+z.imag*z.imag)>4.0e0) {
d_np[i + NPOINTS*j] = 1;
break;
}
}
}
/*
*
*/
int main(int argc, char** argv) {
int numoutside = 0;
double area, error;//, ztemp;
//struct complex z, c;
time_t t1,t2;
int *h_np; /* Array to save numpoints in host */
int *d_np;/* Array to save numpoints in device */
size_t sz = NPOINTS * NPOINTS * sizeof(int);
h_np = (int *) malloc(sz);
cudaMalloc((void**) &d_np, sz);
for(int i = 0; i < NPOINTS*NPOINTS; i++){
h_np[i] = 0;
}
cudaMemcpy(d_np, h_np, sz, cudaMemcpyHostToDevice);
dim3 dimGrid(100,100);
dim3 dimBlock(20,20);
t1 = time(NULL);
mandel_numpoints<<<dimGrid,dimBlock>>>(d_np);
cudaThreadSynchronize();
checkCUDAError("kernel invocation");
cudaMemcpy(h_np,d_np,sz,cudaMemcpyDeviceToHost);
checkCUDAError("memcpy");
t2 = time(NULL);
for(int i=0; i < NPOINTS*NPOINTS; i++){
if(h_np[i] > 0){
numoutside++;
}
}
area=2.0*2.5*1.125*(double)(NPOINTS*NPOINTS-numoutside)/(double)(NPOINTS*NPOINTS);
error=area/(double)NPOINTS;
printf("Area of Mandlebrot set = %12.8f +/- %12.8f\n",area,error);
printf("Tiempo de ejecución: %f segundos \n",difftime(t2,t1));
cudaFree(d_np);
free(h_np);
return 0;
}
/* Utility function to check for and report CUDA errors */
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
22,156 | /*
To query the number of CUDA-capable GPUs
in a host and the capabilities of each GPU.
Run it on the Hummingbird GPU node and
report the results.
*/
#include<stdio.h>
int main() {
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("GPU Num: %d\n", i);
printf(" - GPU Name: %s\n", prop.name);
printf(" - Mem Speed: %d\n", prop.memoryClockRate);
printf(" - Mem Bus Width (bits): %d\n", prop.memoryBusWidth);
float MemPeakBandWidth = 2.0 * prop.memoryClockRate * (prop.memoryBusWidth/8) / 1.0e6;
printf(" - Mem Peak Bandwidth(GB/s): %f\n", MemPeakBandWidth);
printf("*** Compute Capability: %d\n", prop.major);
}
return 0;
}
|
22,157 | /*
1.Input Data
2.What Need to be calculated
3.Design your threads and thread blocks
4. Implementation on CPU and GPU
5. Built in check points
6. Output data
*/
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<math.h>
#include<cuda.h>
#include<cuda_runtime.h>
#include<time.h>
#define NumberOfELements 100000
#define PI 3.14159265
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
int line_length(FILE *input_file){
char read_lines[100];
int total_lines = 0;
while(fgets(read_lines, 100, input_file) != NULL) total_lines++;
rewind(input_file);
return(total_lines);
}
void get_data(FILE *input_file, int n_lines, float *asc, float *decl){
char read_lines[100];
float right_asc, declin;
int i=0;
while(fgets(read_lines, 100, input_file) != NULL){
sscanf(read_lines, "%f %f", &right_asc, &declin);
asc[i] = right_asc * PI/ (60 * 180);
decl[i] = declin * PI/ (60 * 180);
++i;
}
fclose(input_file);
}
__global__ void histogram_calc(float *rt_rl, float *decn_rl, float *rt_syc, float *decn_syc, float pi, unsigned long long int *histogram){
float galxs_rdns;
float galxs_dgrs;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if( index < NumberOfELements)
for( int i = 0; i < NumberOfELements; ++i){
galxs_rdns = acos(sin(decn_rl[index]) * sin(decn_syc[i]) + cos(decn_rl[index]) * cos(decn_syc[i]) * cos(rt_rl[index] - rt_syc[i]));
galxs_dgrs = galxs_rdns * (180 /pi);
// histogram[(int)(galxs_dgrs*4)] = (histogram[(int)(galxs_dgrs*4)] + 1);
atomicAdd(&histogram[(int)(galxs_dgrs*4)], 1);
__syncthreads();
}
}
int main(int argc, char *argv[]) {
FILE *input_file, *output_file;
unsigned long long int *DD, *DR, *RR;
int total_lines_r, total_lines_s;
float *right_ascension_real, *declination_real, *right_ascension_synthetic, *declination_synthetic;
long int sum_DD, sum_DR, sum_RR;
float *d_DC, *d_DR, *d_RR, *d_RC;
double omg = 0.00;
int bin_width = 4;
int degrees = 180;
int num_of_bins =
num_of_bins = bin_width * degrees;
time_t start, stop;
/* Check that we have 4 command line arguments */
if ( argc != 4 ) {
printf("Usage: %s real_data synthetic_data output_file\n", argv[0]);
return(0);
}
start = clock();
//open real data file
input_file = fopen(argv[1], "r");
if (input_file == NULL){
printf("file does not exist%s\n", argv[1]);
return 0;
}
// count lines in a real file
total_lines_r = line_length(input_file);
//printf("%s contains %d lines\n", argv[1], total_lines_r);
//alocate memory for real data on host
right_ascension_real = (float *)calloc(total_lines_r, sizeof(float));
declination_real = (float *)calloc(total_lines_r, sizeof(float));
//get data
get_data(input_file, total_lines_r, right_ascension_real, declination_real);
//open synthetic data
input_file = fopen(argv[2], "r");
if (input_file == NULL){
printf("file does not exist%s\n", argv[2]);
return 0;
}
//count lines in sysnthetic file
total_lines_s = line_length(input_file);
// printf("%s contains %d lines\n", argv[2], total_lines_s);
//alocate memory for the sysnthetic data on host
right_ascension_synthetic = (float *)calloc(total_lines_s, sizeof(float));
declination_synthetic = (float *)calloc(total_lines_s, sizeof(float));
//get second data
get_data(input_file, total_lines_s,right_ascension_synthetic, declination_synthetic);
// where data is stored
long int *host_DD;
long int *host_DR;
long int *host_RR;
//Alocate memory for the host
host_DD = (long int *)malloc((num_of_bins+1) * sizeof(long int));
host_DR = (long int *)malloc((num_of_bins+1) * sizeof(long int));
host_RR = (long int *)malloc((num_of_bins+1) * sizeof(long int));
for (int i = 0; i <= num_of_bins; ++i ) {
host_DD[i] = 0L;
host_DR[i] = 0L;
host_RR[i] = 0L;
}
//Allocate device memory
cudaMalloc((void **)&DD, (NumberOfELements+1) * sizeof(unsigned long long int));
cudaMalloc((void **)&DR, (NumberOfELements+1) * sizeof(unsigned long long int));
cudaMalloc((void **)&RR, (NumberOfELements+1) * sizeof(unsigned long long int));
cudaMalloc((void **)&d_DR, (NumberOfELements+1) * sizeof(float));
cudaMalloc((void **)&d_DC, (NumberOfELements+1) * sizeof(float));
cudaMalloc((void **)&d_RR, (NumberOfELements+1) * sizeof(float));
cudaMalloc((void **)&d_RC, (NumberOfELements+1) * sizeof(float));
//copy the data from host memory to device memory
cudaMemcpy(d_DR, right_ascension_real, (NumberOfELements) * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_DC, declination_real, (NumberOfELements) * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_RR, right_ascension_synthetic, (NumberOfELements) * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_RC, declination_synthetic, (NumberOfELements) * sizeof(float), cudaMemcpyHostToDevice);
//Lauch the kernel for DD
int blockSize = 256;
int numBlocks = ((NumberOfELements -1) + blockSize - 1) / blockSize;
histogram_calc <<<numBlocks, blockSize>>>(d_DR, d_DC, d_DR, d_DC,PI, DD);
cudaDeviceSynchronize();
//copy the results back to the host
cudaMemcpy(host_DD, DD, num_of_bins * sizeof(long int), cudaMemcpyDeviceToHost);
sum_DD = 0L;
for (int i = 0; i <= (num_of_bins); ++i )
sum_DD += host_DD[i];
printf("histograms DD = %ld\n", sum_DD);
//Lauch the kernel DR
histogram_calc <<<numBlocks, blockSize>>>(d_DR, d_DC, d_RR, d_RC,PI, DR);
cudaDeviceSynchronize();
//copy the results back to the host
cudaMemcpy(host_DR, DR, num_of_bins * sizeof(long int), cudaMemcpyDeviceToHost);
sum_DR = 0L;
for (int i = 0; i <= num_of_bins; ++i )
sum_DR += host_DR[i];
printf("histograms DR = %ld\n", sum_DR);
//Lauch the kernel RR
histogram_calc <<<numBlocks, blockSize>>>(d_RR, d_RC, d_RR, d_RC, PI, RR);
//copy the results back to the host
cudaMemcpy(host_RR, RR, num_of_bins * sizeof(long int), cudaMemcpyDeviceToHost);
sum_RR = 0L;
for (int i = 0; i <= num_of_bins; ++i )
sum_RR += host_RR[i];
printf("histograms RR = %ld\n", sum_RR);
/* Open the output file */
output_file = fopen(argv[3],"w");
if ( output_file == NULL ) {
printf("Unable to open %s\n",argv[3]);
return(-1);
}
for(int i = 0; i < num_of_bins; ++i){
if (host_RR[i] > 0 ) {
omg = ((double)host_DD[i]/(double)(host_RR[i])) - ((2.0*host_DR[i])/(double)(host_RR[i])) + ((double)host_RR[i]/(double)(host_RR[i]));
// omg = (double)((host_DD[i] - 2*host_DR[i] + host_RR[i])/host_RR[i]);
printf("Omega = %6.3f\n", omg);
fprintf(output_file, "%6.3f\n", omg);
}
}
fclose(output_file);
free(right_ascension_synthetic);
free(declination_synthetic);
free(right_ascension_real);
free(declination_real);
free(host_DD);
free(host_DR);
free(host_RR);
cudaFree(DD);
cudaFree(DR);
cudaFree(RR);
cudaFree(d_RR);
cudaFree(d_RC);
cudaFree(d_DR);
cudaFree(d_DC);
stop = clock();
printf("\nExcution time = %6.1f seconds\n",
((double) (stop-start))/ CLOCKS_PER_SEC);
return (0);
}
|
22,158 | #include <stdio.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <string.h>
int log2 (int i)
{
int r = 0;
while (i >>= 1) r++;
return r;
}
int bit_reverse (int w, int bits)
{
int r = 0;
for (int i = 0; i < bits; i++)
{
int bit = (w & (1 << i)) >> i;
r |= bit << (bits - i - 1);
}
return r;
}
__global__ void simple_histo (int* d_bins, const int* d_in, const int BIN_COUNT)
{
int index = threadIdx.x + blockDim.x * blockIdx.x;
int bin = d_in[index] % BIN_COUNT;
atomicAdd(&(d_bins[bin]), 1);
}
int main (int argc, char **argv)
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
cudaSetDevice(dev);
cudaDeviceProp devProps;
if (cudaGetDeviceProperties(&devProps, dev) == 0)
{
printf("Using device %d:\n", dev);
printf("%s; global mem: %zdB; compute v%d.%d; clock: %d kHz\n",
devProps.name, (int)devProps.totalGlobalMem,
(int)devProps.major, (int)devProps.minor,
(int)devProps.clockRate);
}
const int ARRAY_SIZE = 65536;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
const int BIN_COUNT = 16;
const int BIN_BYTES = BIN_COUNT * sizeof(int);
int h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = bit_reverse(i, log2(ARRAY_SIZE));
}
int h_bins[BIN_COUNT];
memset(h_bins, 0, BIN_BYTES);
// declare GPU memory pointers
int *d_in, *d_bins;
// allocate GPU memory
cudaMalloc(&d_in, ARRAY_BYTES);
if (d_in == NULL) {
fprintf(stderr, "Failed to alloc GPU mem\n");
exit(EXIT_FAILURE);
}
cudaMalloc(&d_bins, BIN_BYTES);
if (d_bins == NULL) {
fprintf(stderr, "Failed to alloc GPU mem\n");
exit(EXIT_FAILURE);
}
// transfer the arrays to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_bins, h_bins, BIN_BYTES, cudaMemcpyHostToDevice);
simple_histo<<<ARRAY_SIZE / 64, 64>>>(d_bins, d_in, BIN_COUNT);
// copy back the sum from GPU
cudaMemcpy(h_bins, d_bins, BIN_BYTES, cudaMemcpyDeviceToHost);
for(int i = 0; i < BIN_COUNT; i++) {
printf("bin %d: count %d\n", i, h_bins[i]);
}
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_bins);
return 0;
}
|
22,159 | #include "includes.h"
#define INF 2147483647
extern "C" {
}
__global__ void oneMove(int * tab, int dist, int pow, int blocksPerTask, int period) {
__shared__ int tmp_T[1024];
__shared__ int begin;
if(threadIdx.x == 0)
begin = (blockIdx.x/blocksPerTask)*dist*2 + (blockIdx.x%blocksPerTask)*512*pow;
__syncthreads();
if((blockIdx.x / period) % 2 == 0) {
for(int i = begin; i < begin + pow*512; i += 512) {
if(threadIdx.x < 512) tmp_T[threadIdx.x] = tab[i + threadIdx.x];
else tmp_T[threadIdx.x] = tab[i + threadIdx.x - 512 + dist];
__syncthreads();
if(threadIdx.x < 512 && tmp_T[threadIdx.x] > tmp_T[threadIdx.x + 512]) {
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512];
tmp_T[threadIdx.x + 512] ^= tmp_T[threadIdx.x];
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512];
}
__syncthreads();
if(threadIdx.x < 512) tab[i + threadIdx.x] = tmp_T[threadIdx.x];
else tab[i + threadIdx.x - 512 + dist] = tmp_T[threadIdx.x];
__syncthreads();
}
} else {
for(int i = begin; i < begin + pow*512; i += 512) {
if(threadIdx.x < 512) tmp_T[threadIdx.x] = tab[i + threadIdx.x];
else tmp_T[threadIdx.x] = tab[i + threadIdx.x - 512 + dist];
__syncthreads();
if(threadIdx.x < 512 && tmp_T[threadIdx.x] < tmp_T[threadIdx.x + 512]) {
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512];
tmp_T[threadIdx.x + 512] ^= tmp_T[threadIdx.x];
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512];
}
__syncthreads();
if(threadIdx.x < 512) tab[i + threadIdx.x] = tmp_T[threadIdx.x];
else tab[i + threadIdx.x - 512 + dist] = tmp_T[threadIdx.x];
__syncthreads();
}
}
} |
22,160 | /*
* Solves the Panfilov model using an explicit numerical scheme.
* Based on code orginally provided by Xing Cai, Simula Research Laboratory
* and reimplementation by Scott B. Baden, UCSD
*
* Modified and restructured by Didem Unat, Koc University
*
* Refer to "Detailed Numerical Analyses of the Aliev-Panfilov Model on GPGPU"
* https://www.simula.no/publications/detailed-numerical-analyses-aliev-panfilov-model-gpgpu
* by Xing Cai, Didem Unat and Scott Baden
*
*/
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <getopt.h>
using namespace std;
#define TILE_DIM 32
#define kk 8.0
#define a 0.1
#define epsilon 0.01
#define M1 0.07
#define M2 0.3
#define b 0.1
#define d 5e-5
// For Command Line Args
void cmdLine(int argc, char *argv[], double &T, int &n, int &px, int &py, int &plot_freq, int &no_comm, int &num_threads);
// Timer: Make successive calls and take a difference to get the elapsed time.
double getTime();
// Allocate a 2D array
double **alloc2D(int m, int n);
// Mirror Ghost Boundries
void mirrorBoundries(double *E_prev_1D, const int n, const int m, const int WIDTH);
void mirrorBoundries(double *E_prev_1D, double* d_E_prev_1D, const int n, const int m, const int WIDTH);
/*
Reports statistics about the computation
These values should not vary (except to within roundoff)
when we use different numbers of processes to solve the problem
*/
double stats(double **E, int m, int n, double *_mx);
double stats1D(double *E, int m, int n, double *_mx, int WIDTH);
// ============================== Kernels ===========================
__global__ void mirrorkernel(double *E_prev_1D, const int n, const int m, const int WIDTH);
void simV1(const double alpha, const int n, const int m, const double dt, int WIDTH, double* time, double *d_E_1D, double *d_E_prev_1D, double *d_R_1D);
void simV2(const double alpha, const int n, const int m, const double dt, int WIDTH, double* time, double *d_E_1D, double *d_E_prev_1D, double *d_R_1D);
void simV3(const double alpha, const int n, const int m, const double dt, int WIDTH, double* time, double *d_E_1D, double *d_E_prev_1D, double *d_R_1D);
void simV4(const double alpha, const int n, const int m, const double dt, int WIDTH, double* time, double *d_E_1D, double *d_E_prev_1D, double *d_R_1D);
void simV5(const double alpha, const int n, const int m, const double dt, int WIDTH, double* time, double *d_E_1D, double *d_E_prev_1D, double *d_R_1D);
// ============================= Exp 1= ===============
// Main Refined -- Versioin 4 Refined --
// Main program
int main(int argc, char **argv)
{
/*
* Solution arrays
* E is the "Excitation" variable, a voltage
* R is the "Recovery" variable
* E_prev is the Excitation variable for the previous timestep,
* and is used in time integration
*/
// For Serial Version
double **E, **R, **E_prev;
// For Host and GPU
double *E_1D, *R_1D, *E_prev_1D;
double *d_E_1D, *d_E_prev_1D, *d_R_1D;
// Various constants - these definitions shouldn't change
double T = 1000.0;
int m = 200, n = 200;
int plot_freq = 0;
int px = 1, py = 1;
int no_comm = 0;
int version = 4;
int WIDTH;
double time_elapsed = 0.0;
// int version = 4;
cmdLine(argc, argv, T, n, px, py, plot_freq, no_comm, version);
m = n;
// Allocate contiguous memory for solution arrays
// The computational box is defined on [1:m+1,1:n+1]
// We pad the arrays in order to facilitate differencing on the
// boundaries of the computation box
int Total_Bytes = (m + 2) * (n + 2) * sizeof(double);
WIDTH = m + 2;
E = alloc2D(m + 2, n + 2);
E_prev = alloc2D(m + 2, n + 2);
R = alloc2D(m + 2, n + 2);
// Allocate space on the host (PINNED Memory)
cudaError_t status = cudaMallocHost(&E_1D, Total_Bytes);
status = cudaMallocHost(&E_prev_1D, Total_Bytes);
status = cudaMallocHost(&R_1D, Total_Bytes);
if (status != cudaSuccess) {
printf("Error allocating pinned host memory\n");
}
// Allocate space on the GPU
cudaMalloc(&d_E_1D, Total_Bytes);
cudaMalloc(&d_E_prev_1D, Total_Bytes);
cudaMalloc(&d_R_1D, Total_Bytes);
int col, row;
// Initialization
for (row = 1; row <= m; row++)
{
for (col = 1; col <= n; col++)
{
E_prev[row][col] = 0;
R[row][col] = 0;
E_prev_1D[row * WIDTH + col] = 0;
R_1D[row * WIDTH + col] = 0;
}
}
for (row = 1; row <= m; row++)
{
for (col = n / 2 + 1; col <= n; col++)
{
E_prev[row][col] = 1.0;
E_prev_1D[row * WIDTH + col] = 1.0;
}
}
for (row = m / 2 + 1; row <= m; row++)
{
for (col = 1; col <= n; col++)
{
R[row][col] = 1.0;
R_1D[row * WIDTH + col] = 1.0;
}
}
double dx = 1.0 / n;
// For time integration, these values shouldn't change
double rp = kk * (b + 1) * (b + 1) / 4;
double dte = (dx * dx) / (d * 4 + ((dx * dx)) * (rp + kk));
double dtr = 1 / (epsilon + ((M1 / M2) * rp));
double dt = (dte < dtr) ? 0.95 * dte : 0.95 * dtr;
double alpha = d * dt / (dx * dx);
int devId = 0;
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, devId);
printf("\n ******** Device : %s **********\n", prop.name);
cout << "Simulation Version : " << version<<endl;
cout << "Block Size :"<< TILE_DIM <<endl;
cout << "Grid Size : " << n << endl;
cout << "Duration of Sim : " << T << endl;
cout << "Time step dt : " << dt << endl;
cout << "Process geometry: " << px << " x " << py << endl;
if (no_comm)
{
cout << "Communication : DISABLED" << endl;
}
cout << endl;
// Start the timer
//double t0 = getTime();
// Simulated time is different from the integer timestep number
// Simulated time
double t = 0.0;
// Integer timestep number
int niter = 0;
const dim3 block_size(TILE_DIM, TILE_DIM);
const dim3 num_blocks(WIDTH / block_size.x, WIDTH / block_size.y);
cudaMemcpy(d_R_1D, R_1D, Total_Bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_E_prev_1D, E_prev_1D, Total_Bytes, cudaMemcpyHostToDevice);
// very well done
//simV5(alpha, n, m, dt, WIDTH, &time_elapsed, d_E_1D, d_E_prev_1D, d_R_1D);
while (t < T)
{
t += dt;
niter++;
//mirrorBoundries(E_prev_1D, n, m, WIDTH);
//mirrorBoundries(E_prev_1D, d_E_prev_1D,n, m, WIDTH);
mirrorkernel<<<num_blocks, block_size>>>(d_E_prev_1D, n, m ,WIDTH);
cudaStreamSynchronize(0);
//cudaMemcpy(E_prev_1D, d_E_prev_1D, Total_Bytes, cudaMemcpyDeviceToHost);
switch (version){
case 1:
simV1(alpha, n, m, dt, WIDTH, &time_elapsed, d_E_1D, d_E_prev_1D, d_R_1D);
break;
case 2:
simV2(alpha, n, m, dt, WIDTH, &time_elapsed, d_E_1D, d_E_prev_1D, d_R_1D);
break;
case 3:
simV3(alpha, n, m, dt, WIDTH, &time_elapsed, d_E_1D, d_E_prev_1D, d_R_1D);
break;
case 4:
simV4(alpha, n, m, dt, WIDTH, &time_elapsed, d_E_1D, d_E_prev_1D, d_R_1D);
break;
// case 5:
// break;
case 0:
cout<<"\n Implement the Serial Version"<<endl;
break;
default:
cout<<"\nPlease Enter the Correct version"<<endl;
return 0;
}
//cudaMemcpy(d_E_prev_1D, d_E_1D, Total_Bytes, cudaMemcpyDeviceToDevice);
//swap current E with previous E
double **tmp = E;
E = E_prev;
E_prev = tmp;
double *tmp2 = d_E_1D;
d_E_1D = d_E_prev_1D;
d_E_prev_1D = tmp2;
} //end of while loop
cudaMemcpy(E_prev_1D, d_E_prev_1D, Total_Bytes, cudaMemcpyDeviceToHost);
//double time_elapsed = getTime() - t0;
double Gflops = (double)(niter * (1E-9 * n * n) * 28.0) / time_elapsed;
double BW = (double)(niter * 1E-9 * (n * n * sizeof(double) * 4.0)) / time_elapsed;
cout << "Number of Iterations : " << niter << endl;
cout << "Elapsed Time (sec) : " << time_elapsed << endl;
cout << "Sustained Gflops Rate : " << Gflops << endl;
cout << "Sustained Bandwidth (GB/sec): " << BW << endl<< endl;
double mx;
double l2norm = stats(E_prev, m, n, &mx);
cout << "Max: " << mx << " L2norm: " << l2norm << endl;
l2norm = stats1D(E_prev_1D, m, n, &mx, WIDTH);
cout << "Max: " << mx << " L2norm: " << l2norm << " (1D or GPU)" <<endl;
free(E);
free(E_prev);
free(R);
cudaFreeHost(E_1D);
cudaFreeHost(E_prev_1D);
cudaFreeHost(R_1D);
cudaFree(d_E_1D);
cudaFree(d_E_prev_1D);
cudaFree(d_R_1D);
return 0;
}
// ************************************************ Kernels Start ***************************************
__global__ void mirrorkernel(double *E_prev_1D, const int n, const int m, const int WIDTH){
/*
* Copy data from boundary of the computational box
* to the padding region, set up for differencing
* on the boundary of the computational box
* Using mirror boundaries
*/
//int col, row;
size_t row = blockIdx.y * blockDim.y + threadIdx.y + 1;
size_t col = blockIdx.x * blockDim.x + threadIdx.x + 1;
if (row <= m) {
E_prev_1D[row * WIDTH + 0] = E_prev_1D[row * WIDTH + 2];
E_prev_1D[row * WIDTH + (n + 1)] = E_prev_1D[row * WIDTH + (n - 1)];
}
if (col <= n) {
E_prev_1D[0 * WIDTH + col] = E_prev_1D[2 * WIDTH + col];
E_prev_1D[(m + 1) * WIDTH + col] = E_prev_1D[(m - 1) * WIDTH + col];
}
}
__global__ void simulate_version1_PDE(const double alpha, const int n, const int m, const double dt, double *E_1D, double *E_prev_1D, double *R_1D, const int WIDTH)
{
int RADIUS = 1;
int row = blockIdx.y * blockDim.y + threadIdx.y + RADIUS;
int col = blockIdx.x * blockDim.x + threadIdx.x + RADIUS;
if (row >= 1 && row <= m && col >= 1 && col <= n)
{
E_1D[row * WIDTH + col] = E_prev_1D[row * WIDTH + col] + alpha * (E_prev_1D[row * WIDTH + (col + 1)] + E_prev_1D[row * WIDTH + (col - 1)] - 4 * E_prev_1D[row * WIDTH + col] + E_prev_1D[(row + 1) * WIDTH + col] + E_prev_1D[(row - 1) * WIDTH + col]);
}
}
__global__ void simulate_version1_ODE(const double alpha, const int n, const int m, const double dt, double *E_1D, double *E_prev_1D, double *R_1D, const int WIDTH)
{
int RADIUS = 1;
int row = blockIdx.y * blockDim.y + threadIdx.y + RADIUS;
int col = blockIdx.x * blockDim.x + threadIdx.x + RADIUS;
int index = row * WIDTH + col;
if (row >= 1 && row <= m && col >= 1 && col <= n)
{
E_1D[index] = E_1D[index] - dt * (kk * E_1D[index] * (E_1D[index] - a) * (E_1D[index] - 1) + E_1D[index] * R_1D[index]);
R_1D[index] = R_1D[index] + dt * (epsilon + M1 * R_1D[index] / (E_1D[index] + M2)) * (-R_1D[index] - kk * E_1D[index] * (E_1D[index] - b - 1));
}
}
// checkpoint 2
__global__ void simulate_version2(const double alpha, const int n, const int m, const double dt, double *E_1D, double *E_prev_1D, double *R_1D, const int WIDTH)
{
int RADIUS = 1;
int row = blockIdx.y * blockDim.y + threadIdx.y + RADIUS;
int col = blockIdx.x * blockDim.x + threadIdx.x + RADIUS;
int index = row * WIDTH + col;
if (row >= 1 && row <= m && col >= 1 && col <= n)
{
// PDE
E_1D[row * WIDTH + col] = E_prev_1D[row * WIDTH + col] + alpha * (E_prev_1D[row * WIDTH + (col + 1)] + E_prev_1D[row * WIDTH + (col - 1)] - 4 * E_prev_1D[row * WIDTH + col] + E_prev_1D[(row + 1) * WIDTH + col] + E_prev_1D[(row - 1) * WIDTH + col]);
//ODE
E_1D[index] = E_1D[index] - dt * (kk * E_1D[index] * (E_1D[index] - a) * (E_1D[index] - 1) + E_1D[index] * R_1D[index]);
R_1D[index] = R_1D[index] + dt * (epsilon + M1 * R_1D[index] / (E_1D[index] + M2)) * (-R_1D[index] - kk * E_1D[index] * (E_1D[index] - b - 1));
}
}
// checkpoint 1
__global__ void simulate_version3(const double alpha, const int n, const int m, const double dt, double *E_1D, double *E_prev_1D, double *R_1D, const int WIDTH)
{
//int RADIUS = 1;
int row = blockIdx.y * blockDim.y + threadIdx.y + 1;
int col = blockIdx.x * blockDim.x + threadIdx.x + 1;
int index = row * WIDTH + col;
if (row >= 1 && row <= m && col >= 1 && col <= n)
{
double e_temp; //= E_1D[index];
double r_temp = R_1D[index];
double e_prev_temp = E_prev_1D[index];
// PDE
e_temp = e_prev_temp + alpha * (E_prev_1D[row * WIDTH + (col + 1)] + E_prev_1D[row * WIDTH + (col - 1)] - 4 * e_prev_temp + E_prev_1D[(row + 1) * WIDTH + col] + E_prev_1D[(row - 1) * WIDTH + col]);
//ODE
e_temp = e_temp - dt * (kk * e_temp * (e_temp - a) * (e_temp - 1) + e_temp * r_temp);
r_temp = r_temp + dt * (epsilon + M1 * r_temp / (e_temp + M2)) * (-r_temp - kk * e_temp * (e_temp - b - 1));
E_1D[index] = e_temp;
R_1D[index] = r_temp;
}
}
__global__ void simulate_version4(const double alpha, const int n, const int m, const double dt, double *E_1D, double *E_prev_1D, double *R_1D, const int WIDTH)
{
// __shared__ double tempR[(TILE_DIM + 2)*(TILE_DIM + 2)];
__shared__ double tempE_prev[(TILE_DIM + 2)*(TILE_DIM + 2)];
size_t LocalWidth = TILE_DIM + 2;
// Global Indexing
size_t row = blockIdx.y * blockDim.y + threadIdx.y + 1;
size_t col = blockIdx.x * blockDim.x + threadIdx.x + 1;
size_t index = row * WIDTH + col;
size_t local_index = (threadIdx.y + 1)* LocalWidth + threadIdx.x + 1;
// copy all
if (row >= 1 && row <= m && col >= 1 && col <= n ){
tempE_prev[local_index] = E_prev_1D[index];
}
// copy Right & Left
if (threadIdx.x + 1 == TILE_DIM){
tempE_prev[local_index+1] = E_prev_1D[index+1];
tempE_prev[local_index-TILE_DIM] = E_prev_1D[index-TILE_DIM];
}
// copy Up & Down
if (threadIdx.y + 1== TILE_DIM){
tempE_prev[local_index + LocalWidth] = E_prev_1D[index + WIDTH];
tempE_prev[local_index - TILE_DIM*LocalWidth] = E_prev_1D[index - TILE_DIM*WIDTH];
}
// Make sure all threads get to this point before proceeding!
__syncthreads(); // This will syncronize threads in a block
if (row >= 1 && row <= m && col >= 1 && col <= n)
{
double e_temp;
double r_temp = R_1D[index];
// PDE
e_temp = tempE_prev[local_index] + alpha * (tempE_prev[local_index + 1] + tempE_prev[local_index- 1] - 4 * tempE_prev[local_index] + tempE_prev[local_index + LocalWidth] + tempE_prev[local_index- LocalWidth]);
//ODE
e_temp = e_temp - dt * (kk * e_temp * (e_temp - a) * (e_temp - 1) + e_temp * r_temp);
r_temp = r_temp + dt * (epsilon + M1 * r_temp / (e_temp + M2)) * (-r_temp - kk * e_temp * (e_temp - b - 1));
E_1D[index] = e_temp;
R_1D[index] = r_temp;
}
}
void simV1(const double alpha, const int n, const int m, const double dt, int WIDTH, double* time, double *d_E_1D, double *d_E_prev_1D, double *d_R_1D)
{
const dim3 block_size(TILE_DIM, TILE_DIM);
const dim3 num_blocks(WIDTH / block_size.x, WIDTH / block_size.y);
// Start the timer
double t0 = getTime();
simulate_version1_PDE<<<num_blocks, block_size>>>(alpha, n, m, dt, d_E_1D, d_E_prev_1D, d_R_1D, WIDTH);
simulate_version1_ODE<<<num_blocks, block_size>>>(alpha, n, m, dt, d_E_1D, d_E_prev_1D, d_R_1D, WIDTH);
cudaStreamSynchronize(0);
// end timer
double time_elapsed = getTime() - t0;
*time += time_elapsed;
}
void simV2(const double alpha, const int n, const int m, const double dt, int WIDTH, double* time, double *d_E_1D, double *d_E_prev_1D, double *d_R_1D)
{
const dim3 block_size(TILE_DIM, TILE_DIM);
const dim3 num_blocks(WIDTH / block_size.x, WIDTH / block_size.y);
// Start the timer
double t0 = getTime();
simulate_version2<<<num_blocks, block_size>>>(alpha, n, m, dt, d_E_1D, d_E_prev_1D, d_R_1D, WIDTH);
cudaStreamSynchronize(0);
double time_elapsed = getTime() - t0;
*time += time_elapsed;
}
void simV3(const double alpha, const int n, const int m, const double dt, int WIDTH, double* time, double *d_E_1D, double *d_E_prev_1D, double *d_R_1D)
{
const dim3 block_size(TILE_DIM, TILE_DIM);
const dim3 num_blocks(WIDTH / block_size.x, WIDTH / block_size.y);
// Start the timer
double t0 = getTime();
simulate_version3<<<num_blocks, block_size>>>(alpha, n, m, dt, d_E_1D, d_E_prev_1D, d_R_1D, WIDTH);
cudaStreamSynchronize(0);
double time_elapsed = getTime() - t0;
*time += time_elapsed;
}
void simV4(const double alpha, const int n, const int m, const double dt, int WIDTH, double* time, double *d_E_1D, double *d_E_prev_1D, double *d_R_1D)
{
const dim3 block_size(TILE_DIM, TILE_DIM);
const dim3 num_blocks(WIDTH / block_size.x, WIDTH / block_size.y);
// Start the timer
double t0 = getTime();
simulate_version4<<<num_blocks, block_size>>>(alpha, n, m, dt, d_E_1D, d_E_prev_1D, d_R_1D, WIDTH);
cudaStreamSynchronize(0);
double time_elapsed = getTime() - t0;
*time += time_elapsed;
}
//************************************************* Kernels End *****************************************
// --------------------------------------------- Optimaztion Start-------------------------------------------------
__global__ void simulate_version5(const double alpha, const int n, const int m, const double dt, double *E_1D, double *E_prev_1D, double *R_1D, const int WIDTH)
{
int RADIUS = 1;
int row = blockIdx.y * blockDim.y + threadIdx.y + RADIUS;
int col = blockIdx.x * blockDim.x + threadIdx.x + RADIUS;
int index = row * WIDTH + col;
double t = 0.0;
int niter = 0;
double T = 1000.0;
while (t < T) {
t += dt;
niter++;
if (row <= m) {
E_prev_1D[row * WIDTH + 0] = E_prev_1D[row * WIDTH + 2];
E_prev_1D[row * WIDTH + (n + 1)] = E_prev_1D[row * WIDTH + (n - 1)];
}
if (col <= n) {
E_prev_1D[0 * WIDTH + col] = E_prev_1D[2 * WIDTH + col];
E_prev_1D[(m + 1) * WIDTH + col] = E_prev_1D[(m - 1) * WIDTH + col];
}
__syncthreads();
if (row >= 1 && row <= m && col >= 1 && col <= n)
{
// PDE
E_1D[row * WIDTH + col] = E_prev_1D[row * WIDTH + col] + alpha * (E_prev_1D[row * WIDTH + (col + 1)] + E_prev_1D[row * WIDTH + (col - 1)] - 4 * E_prev_1D[row * WIDTH + col] + E_prev_1D[(row + 1) * WIDTH + col] + E_prev_1D[(row - 1) * WIDTH + col]);
//ODE
E_1D[index] = E_1D[index] - dt * (kk * E_1D[index] * (E_1D[index] - a) * (E_1D[index] - 1) + E_1D[index] * R_1D[index]);
R_1D[index] = R_1D[index] + dt * (epsilon + M1 * R_1D[index] / (E_1D[index] + M2)) * (-R_1D[index] - kk * E_1D[index] * (E_1D[index] - b - 1));
// double *tmp2 = E_1D;
// E_1D = E_prev_1D;
// E_prev_1D = tmp2;
E_prev_1D[index] = E_1D[index];
}
//E_prev_1D[index] = E_1D[index];
//if (row == 1 && col == 1) {
//double *tmp2 = E_1D;
//E_1D = E_prev_1D;
//E_prev_1D = tmp2;
//}
__syncthreads();
}
}
void simV5(const double alpha, const int n, const int m, const double dt, int WIDTH, double* time, double *d_E_1D, double *d_E_prev_1D, double *d_R_1D)
{
const dim3 block_size(TILE_DIM, TILE_DIM);
const dim3 num_blocks(WIDTH / block_size.x, WIDTH / block_size.y);
// Start the timer
double t0 = getTime();
simulate_version5<<<num_blocks, block_size>>>(alpha, n, m, dt, d_E_1D, d_E_prev_1D, d_R_1D, WIDTH);
cudaStreamSynchronize(0);
double time_elapsed = getTime() - t0;
*time += time_elapsed;
}
// --------------------------------------------- Optimation End -------------------------------------------------
//================================================== Utilities =========================================
// Mirror Ghost Boundries
void mirrorBoundries(double *E_prev_1D, double* d_E_prev_1D, const int n, const int m, const int WIDTH){
// ==================================================
const dim3 block_size(TILE_DIM, TILE_DIM);
const dim3 num_blocks(WIDTH / block_size.x, WIDTH / block_size.y);
int Total_Bytes = WIDTH * WIDTH * sizeof(double);
// Copy to GPU
cudaMemcpy(d_E_prev_1D, E_prev_1D, Total_Bytes, cudaMemcpyHostToDevice);
mirrorkernel<<<num_blocks, block_size>>>(d_E_prev_1D, n, m ,WIDTH);
cudaMemcpy(E_prev_1D, d_E_prev_1D, Total_Bytes, cudaMemcpyDeviceToHost);
}
// Mirror Ghost Boundries
void mirrorBoundries(double *E_prev_1D, const int n, const int m, const int WIDTH)
{
/*
* Copy data from boundary of the computational box
* to the padding region, set up for differencing
* on the boundary of the computational box
* Using mirror boundaries
*/
int col, row;
for (row = 1; row <= m; row++)
{
//E_prev[row][0] = E_prev[row][2];
E_prev_1D[row * WIDTH + 0] = E_prev_1D[row * WIDTH + 2];
}
for (row = 1; row <= m; row++)
{
//E_prev[row][n + 1] = E_prev[row][n - 1];
E_prev_1D[row * WIDTH + (n + 1)] = E_prev_1D[row * WIDTH + (n - 1)];
}
for (col = 1; col <= n; col++)
{
//E_prev[0][col] = E_prev[2][col];
E_prev_1D[0 * WIDTH + col] = E_prev_1D[2 * WIDTH + col];
}
for (col = 1; col <= n; col++)
{
//E_prev[m + 1][col] = E_prev[m - 1][col];
E_prev_1D[(m + 1) * WIDTH + col] = E_prev_1D[(m - 1) * WIDTH + col];
}
}
// Allocate a 2D array
double **alloc2D(int m, int n)
{
double **E;
int nx = n, ny = m;
E = (double **)malloc(sizeof(double *) * ny + sizeof(double) * nx * ny);
assert(E);
int row;
for (row = 0; row < ny; row++)
E[row] = (double *)(E + ny) + row * nx;
return (E);
}
/* Reports statistics about the computation
These values should not vary (except to within roundoff)
when we use different numbers of processes to solve the problem
*/
double stats(double **E, int m, int n, double *_mx)
{
double mx = -1;
double l2norm = 0;
int col, row;
for (row = 1; row <= m; row++)
for (col = 1; col <= n; col++)
{
l2norm += E[row][col] * E[row][col];
if (E[row][col] > mx)
mx = E[row][col];
}
*_mx = mx;
l2norm /= (double)((m) * (n));
l2norm = sqrt(l2norm);
return l2norm;
}
double stats1D(double *E, int m, int n, double *_mx, int WIDTH)
{
double mx = -1;
double l2norm = 0;
int col, row;
int index = -1;
for (row = 1; row <= m; row++)
{
for (col = 1; col <= n; col++)
{
index = row * WIDTH + col;
l2norm += E[index] * E[index];
if (E[index] > mx)
{
mx = E[index];
}
}
}
*_mx = mx;
l2norm /= (double)((m) * (n));
l2norm = sqrt(l2norm);
return l2norm;
}
// Timer
// Make successive calls and take a difference to get the elapsed time.
static const double kMicro = 1.0e-6;
double getTime()
{
struct timeval TV;
struct timezone TZ;
const int RC = gettimeofday(&TV, &TZ);
if (RC == -1)
{
cerr << "ERROR: Bad call to gettimeofday" << endl;
return (-1);
}
return (((double)TV.tv_sec) + kMicro * ((double)TV.tv_usec));
}
void cmdLine(int argc, char *argv[], double &T, int &n, int &px, int &py, int &plot_freq, int &no_comm, int &num_threads)
{
/// Command line arguments
// Default value of the domain sizes
static struct option long_options[] = {
{"n", required_argument, 0, 'n'},
{"px", required_argument, 0, 'x'},
{"py", required_argument, 0, 'y'},
{"tfinal", required_argument, 0, 't'},
{"plot", required_argument, 0, 'p'},
{"nocomm", no_argument, 0, 'k'},
{"numthreads", required_argument, 0, 'o'},
};
// Process command line arguments
int ac;
for (ac = 1; ac < argc; ac++)
{
int c;
while ((c = getopt_long(argc, argv, "n:x:y:t:kp:o:", long_options, NULL)) != -1)
{
switch (c)
{
// Size of the computational box
case 'n':
n = atoi(optarg);
break;
// X processor geometry
case 'x':
px = atoi(optarg);
// Y processor geometry
case 'y':
py = atoi(optarg);
// Length of simulation, in simulated time units
case 't':
T = atof(optarg);
break;
// Turn off communication
case 'k':
no_comm = 1;
break;
// Plot the excitation variable
case 'p':
plot_freq = atoi(optarg);
break;
// Plot the excitation variable
case 'o':
num_threads = atoi(optarg);
break;
// Error
default:
printf("Usage: a.out [-n <domain size>] [-t <final time >]\n\t [-p <plot frequency>]\n\t[-px <x processor geometry> [-py <y proc. geometry] [-k turn off communication] [-o <Number of OpenMP threads>]\n");
exit(-1);
}
}
}
} |
22,161 | #include<bits/stdc++.h>
using namespace std;
__global__ void add(int * dev_a[], int * dev_b[], int * dev_c[])
{
dev_c[threadIdx.x][blockIdx.x]=dev_a[threadIdx.x][blockIdx.x]+dev_b[threadIdx.x][blockIdx.x];
}
__global__ void add2(int * dev_a, int * dev_b, int * dev_c)
{
dev_c[threadIdx.x + blockDim.x * blockIdx.x]=dev_a[threadIdx.x + blockDim.x * blockIdx.x]+dev_b[threadIdx.x + blockDim.x * blockIdx.x];
}
inline void GPUassert(cudaError_t code, char * file, int line, bool Abort=true)
{
if (code != 0) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code),file,line);
if (Abort) exit(code);
}
}
#define GPUerrchk(ans) { GPUassert((ans), __FILE__, __LINE__); }
#define N 60
int aa[N][N];
int bb[N][N];
int cc[N][N];
int main(void)
{
for(int i = 0; i < N; i++)
for(int j = 0; j < N; j++) aa[i][j] = 1, bb[i][j] = 2;
int ts1 = clock();
int ** h_a = (int **)malloc(N * sizeof(int *));
for(int i=0; i<N;i++){
GPUerrchk(cudaMalloc((void**)&h_a[i], N*sizeof(int)));
GPUerrchk(cudaMemcpy(h_a[i], &aa[i][0], N*sizeof(int), cudaMemcpyHostToDevice));
}
int **d_a;
GPUerrchk(cudaMalloc((void ***)&d_a, N * sizeof(int *)));
GPUerrchk(cudaMemcpy(d_a, h_a, N*sizeof(int *), cudaMemcpyHostToDevice));
int ** h_b = (int **)malloc(N * sizeof(int *));
for(int i=0; i<N;i++){
GPUerrchk(cudaMalloc((void**)&h_b[i], N*sizeof(int)));
GPUerrchk(cudaMemcpy(h_b[i], &bb[i][0], N*sizeof(int), cudaMemcpyHostToDevice));
}
int ** d_b;
GPUerrchk(cudaMalloc((void ***)&d_b, N * sizeof(int *)));
GPUerrchk(cudaMemcpy(d_b, h_b, N*sizeof(int *), cudaMemcpyHostToDevice));
int ** h_c = (int **)malloc(N * sizeof(int *));
for(int i=0; i<N;i++){
GPUerrchk(cudaMalloc((void**)&h_c[i], N*sizeof(int)));
}
int ** d_c;
GPUerrchk(cudaMalloc((void ***)&d_c, N * sizeof(int *)));
GPUerrchk(cudaMemcpy(d_c, h_c, N*sizeof(int *), cudaMemcpyHostToDevice));
add<<<N,N>>>(d_a,d_b,d_c);
int tf1 = clock();
printf("time1: %.5lf\n", (tf1-ts1)/double(CLOCKS_PER_SEC)*1000);
GPUerrchk(cudaPeekAtLastError());
for(int i=0; i<N;i++){
GPUerrchk(cudaMemcpy(&cc[i][0], h_c[i], N*sizeof(int), cudaMemcpyDeviceToHost));
}
/*for(int i=0;i<N;i++) {
for(int j=0;j<N;j++) {
printf("(%d,%d):%d\n",i,j,cc[i][j]);
}
}*/
int ts2 = clock();
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void **) &dev_a, N * N * sizeof(int));
cudaMalloc((void **) &dev_b, N * N * sizeof(int));
cudaMalloc((void **) &dev_c, N * N * sizeof(int));
int *a = new int[N], *b = new int[N], *c = new int[N];
for(int i = 0; i < N; i++)
for(int j = 0; j < N; j++) a[i * N + j] = aa[i][j], b[i * N + j] = bb[i][j];
cudaMemcpy(dev_a, a, N * N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * N * sizeof(int), cudaMemcpyHostToDevice);
add2<<<N,N>>>(dev_a,dev_b,dev_c);
int tf2 = clock();
printf("time2: %.5lf\n", (tf2-ts2)/double(CLOCKS_PER_SEC)*1000);
GPUerrchk(cudaMemcpy(c, dev_c, N*N*sizeof(int), cudaMemcpyDeviceToHost));
/*for(int i=0;i<N;i++) {
for(int j=0;j<N;j++) {
printf("(%d,%d):%d\n",i,j,c[i * N + j]);
}
}*/
return cudaThreadExit();
}
|
22,162 | /*******************************************************************************
* serveral useful gpu functions will be defined in this file to facilitate
* the extension scheme
******************************************************************************/
typedef struct
{
double sR;
double sL;
} double_eno_derivative;
__device__ inline
double max2(double x, double y)
{
return (x<y) ? y : x;
}
__device__ inline
double min2(double x, double y)
{
return (x<y) ? x : y;
}
__device__ inline
double min_mod(double x, double y)
{
return (x*y<0) ? 0.0 : (fabs(x)<fabs(y) ? x : y);
}
__device__ inline
double sign(double x)
{
return (x>0) ? 1.0 : -1.0;
}
// convert subindex to linear index
__device__ inline
int sub2ind(int row_idx, int col_idx, int pge_idx, int rows, int cols, int pges)
{
int row_idxn = min2(rows-1, max2(0, row_idx));
int col_idxn = min2(cols-1, max2(0, col_idx));
int pge_idxn = min2(pges-1, max2(0, pge_idx));
int ind = pge_idxn * rows * cols + col_idxn * rows + row_idxn;
return ind;
}
/******************************************************************************
* calculate Eno derivatives at node v0: [v4,v1,v0,v2,v3]
*
******************************************************************************/
__device__ inline
double_eno_derivative eno_derivative_field( double v4, double v1, double v0, double v2, double v3, double ds, double dis_f, double dis_b, double v_forward, double v_backward)
{
double p2m;
double_eno_derivative eno_d;
double p2 = v1 - 2.0 * v0 + v2;
double p2r = v0 - 2.0 * v2 + v3;
p2m = 0.5 * min_mod(p2, p2r) / pow(ds, 2);
double v_f = (dis_f==ds) ? v2 : v_forward;
eno_d.sR = (v_f - v0) / dis_f - dis_f * p2m;
double p2l = v0 - 2.0 * v1 + v4;
p2m = 0.5 * min_mod(p2, p2l) / pow(ds, 2);
double v_b = (dis_b==ds) ? v1 : v_backward;
eno_d.sL = (v0 - v_b) / dis_b + dis_b * p2m;
return eno_d;
}
/*******************************************************************************
* calculate upwind normal with ENO scheme at a single point
* along a single direction
*******************************************************************************/
__device__ inline
double upwind_normal_point( double v4, double v1, double v0, double v2, double v3, double pr, double pl, double ds)
{
double p2m;
double p2 = v1 - 2.0 * v0 + v2;
double p2r = v0 - 2.0 * v2 + v3;
p2m = 0.5 * min_mod(p2, p2r) / pow(ds, 2);
double vr = (pr==ds) ? v2 : 0;
double sR = (vr - v0) / pr - pr * p2m;
double p2l = v0 - 2.0 * v1 + v4;
p2m = 0.5 * min_mod(p2, p2l) / pow(ds, 2);
double vl = (pl==ds) ? v1 : 0;
double sL = (v0 - vl) / pl + pl * p2m;
return (fabs(vr) < fabs(vl)) ? sR : sL;
}
// calculate the upwind normal
__global__
void upwind_normal(double * nx, double * ny, double * nz, double const * lsf, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int right2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
nx[ind] = upwind_normal_point( lsf[left2], lsf[left], lsf[ind], lsf[right], lsf[right2], xpr[ind], xpl[ind], dx);
int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int front2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
ny[ind] = upwind_normal_point( lsf[back2], lsf[back], lsf[ind], lsf[front], lsf[front2], ypf[ind], ypb[ind], dy);
int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int up2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
nz[ind] = upwind_normal_point( lsf[down2], lsf[down], lsf[ind], lsf[up], lsf[up2], zpu[ind], zpd[ind], dz);
}
// modify forward/backward(c_f/b) values at v0:[v4,v1,v0,v2,v3]
// if dis_b/f!=ds, then there is boundary nearby, backward/forward ENO is then constructed
// C(x) = c2*x^2 + c1*x + c0, c(-ds/2) = v0, c(ds/2) = v2 assuming boundary is between v0,v2
// and used to calculate c_b/f at boundary crossing nodes
__device__ inline
void cubic_eno_interp(double & c_forward, double & c_backward, double dis_f, double dis_b, double ds, double v4, double v1, double v0, double v2, double v3)
{
// if there is a boundary in the forward direction
c_forward = 0;
c_backward = 0;
if(dis_f!=ds){
double p2 = v2 - 2.0 * v0 + v1;
double p2r = v3 - 2.0 * v2 + v0;
double c2 = 0.5 * min_mod(p2,p2r) / pow(ds,2); // coefficient for second order term
double c1 = (v2 - v0) / ds; // coefficient for the linear term
double c0 = (v2 + v0) / 2 - c2 * pow(ds,2) / 4; // constant term
double x = dis_f - ds / 2; // coordinate for the boundary point
c_forward = c2 * x * x + c1 * x + c0; // interpolated value at the right boundary
// note that the formula works even if c2 is very small
// then we have a linear interpolation
}
// if there is a boundary in the backward direction
if(dis_b!=ds){
double p2 = v2 - 2.0 * v0 + v1;
double p2l = v0 - 2.0 * v1 + v4;
double c2 = 0.5 * min_mod(p2,p2l) / pow(ds,2);
double c1 = (v0 - v1) / ds;
double c0 = (v1 + v0) / 2 - c2 * pow(ds,2) / 4;
double x = ds / 2 - dis_b;
c_backward = c2 * x * x + c1 * x + c0;
}
}
// interpolate values at boundary points
__global__
void boundary_interpolate(double * cpr, double * cpl, double * cpf, double * cpb, double * cpu, double * cpd, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, double const * lsf, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int right2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
cubic_eno_interp(cpr[ind],cpl[ind],xpr[ind],xpl[ind],dx,lsf[left2],lsf[left],lsf[ind],lsf[right],lsf[right2]);
int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int front2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
cubic_eno_interp(cpf[ind],cpb[ind],ypf[ind],ypb[ind],dy,lsf[back2],lsf[back],lsf[ind],lsf[front],lsf[front2]);
int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int up2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
cubic_eno_interp(cpu[ind],cpd[ind],zpu[ind],zpd[ind],dz,lsf[down2],lsf[down],lsf[ind],lsf[up],lsf[up2]);
}
// calculate extend step
// now lsf represents a scalar field (not the level set function)
__global__
void extend_step(double * step, double const * deltat, double const * lsf, double const * vx, double const * vy, double const * vz, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, double const * cpr, double const * cpl, double const * cpf, double const * cpb, double const * cpu, double const * cpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int right2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
double_eno_derivative eno_dx = eno_derivative_field( lsf[left2], lsf[left], lsf[ind], lsf[right], lsf[right2], dx, xpr[ind], xpl[ind], cpr[ind], cpl[ind]);
double xR = eno_dx.sR;
double xL = eno_dx.sL;
int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int front2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
double_eno_derivative eno_dy = eno_derivative_field( lsf[back2], lsf[back], lsf[ind], lsf[front], lsf[front2], dy, ypf[ind], ypb[ind], cpf[ind], cpb[ind]);
double yF = eno_dy.sR;
double yB = eno_dy.sL;
int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int up2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
double_eno_derivative eno_dz = eno_derivative_field( lsf[down2], lsf[down], lsf[ind], lsf[up], lsf[up2], dz, zpu[ind], zpd[ind], cpu[ind], cpd[ind]);
double zU = eno_dz.sR;
double zD = eno_dz.sL;
step[ind] = (min2(0,vx[ind]) * xR + max2(0,vx[ind]) * xL +
min2(0,vy[ind]) * yF + max2(0,vy[ind]) * yB +
min2(0,vz[ind]) * zU + max2(0,vz[ind]) * zD ) * deltat[ind];
}
|
22,163 | #include <stdio.h>
#define NUM_BLOCKS 16
#define BLOCK_WIDTH 1
__global__
void hello()
{
printf("hello world, I am a thread in block %d\n",blockIdx.x);
}
int main(int argc, char **argv)
{
// lauch the kernel
hello<<<NUM_BLOCKS,BLOCK_WIDTH>>>();
//force the printf() to flush
cudaDeviceSynchronize();
printf("That's all!\n");
return 0;
}
|
22,164 | // Testing class objects passing
// Author: alpha74
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
using namespace std;
class Marks
{
public:
int m1;
int m2;
// Default ctor
Marks()
{
m1 = 0;
m2 = 0;
}
};
const int N = 10;
__global__ void add(Marks *A, int * sum)
{
int tid = blockIdx.x;
printf("\n blockIdx.x = %d", tid);
if (tid < N)
{
printf("\n tid: %d", tid);
sum[tid] = A[tid].m1 + A[tid].m2;
}
}
int main()
{
Marks A[ N ];
Marks *dev_A;
int sum[N];
int *dev_sum; // Pointer to hold sum
// Device memory allocation
cudaMalloc((void**)&dev_A, N * sizeof(Marks));
cudaMalloc((void**)&dev_sum, N * sizeof(int));
// Initializing marks
for (int i = 0; i < N; i++)
{
A[i].m1 = 9;
A[i].m2 = 8;
}
// Copy contents of A to dev_A
cudaMemcpy(dev_A, A, N * sizeof(Marks), cudaMemcpyHostToDevice);
add <<< N, 1 >>> (dev_A, dev_sum);
cudaMemcpy(sum, dev_sum, N * sizeof(int), cudaMemcpyDeviceToHost);
cout << "\n Sums: \n ";
for (int i = 0; i < N; i++)
cout << "\n " << i + 1 << ": " << sum[i];
// Freeing device memory
cudaFree(dev_A);
cudaFree(dev_sum);
}
|
22,165 | #include<stdio.h>
void CPUFunction()
{
printf("This function is defined to run on the CPU.\n");
}
__global__ void GPUFunction()
{
printf("This function is defined to run on the GPU.\n");
printf("This function is defined to run on the GPU.\n");
printf("This function is defined to run on the GPU.\n");
}
int main()
{
CPUFunction();
GPUFunction <<<10, 120>>>();
cudaDeviceSynchronize();
}
|
22,166 | #include <cstdio>
__global__ void helloWorldKernel() {
printf("Hello World from GPU\n");
}
__global__ void helloWorldwithThreadKernel() {
printf("Hello World from GPU block: %d thread: %d\n", blockIdx.x, threadIdx.x);
}
int main() {
std::printf("Hello World from CPU\n");
std::printf("--------------------------------\n");
helloWorldKernel<<<1, 10>>>();
cudaDeviceSynchronize();
std::printf("--------------------------------\n");
helloWorldwithThreadKernel<<<4, 2>>>();
cudaDeviceSynchronize();
std::printf("--------------------------------\n");
return 0;
}
|
22,167 | #include <stdio.h>
#include "VecAdd_kernel.cu"
int main(int argc, char *argv[])
{
int N = 100;
unsigned int size;
float *d_A, *d_B, *d_C;
float *h_A, *h_B, *h_C;
/****************************
* Initialization of memory *
****************************/
size = N * sizeof(float);
h_A = (float *) malloc(size);
h_B = (float *) malloc(size);
h_C = (float *) malloc(size);
for (unsigned i=0; i<N; i++) {
h_A[i] = 1.0f;
h_B[i] = 2.0f;
h_C[i] = 0.0f;
}
// YOUR TASKS:
// - Allocate below device arrays d_A, d_B and d_C
// - Transfer array data from host to device arrays
// Insert code below this line.
/****************************
* GPU execution *
****************************/
// YOUR TASK:
// - Define below the number of threads per block and blocks per grid
// Update the two lines below this line.
int threadsPerBlock = 0;
int blocksPerGrid = 0;
VecAdd_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_A,d_B,d_C,N);
cudaThreadSynchronize();
// YOUR TASK:
// - Transfer data results stored in d_C to host array
// Insert code below this line.
/****************************
* Verification *
****************************/
float sum = 0.0f;
for (unsigned i=0; i<N; i++) {
sum += h_C[i];
}
printf("Vector addition\n");
if (abs(sum-3.0f*(float) N)<=1e-10)
{
printf("PASSED!\n");
}
else
{
printf("FAILED!\n");
}
/****************************
* Cleaning memory *
****************************/
// YOUR TASK:
// - Free device memory for the allocated d_A, d_B and d_C arrays
// Insert code below this line.
free(h_A);
free(h_B);
free(h_C);
return 0;
} |
22,168 | #include "includes.h"
__global__ void rMD_ED_D(float *S, float *T, int window_size, int dimensions, float *data_out, int trainSize, int gm) {
long long int i, j, p;
float sumErr = 0, dd = 0;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (gm == 0) {
extern __shared__ float T2[];
// offset training set
int s = dimensions * 2 * window_size * (idx / window_size);
int t = s + idx % window_size;
if (idx >= (trainSize * window_size)) //
return;
if (threadIdx.x == 0) {
for (i = 0; i < dimensions; i++)
for (j = 0; j < window_size; j++)
T2[window_size * i + j] = T[window_size * i + j];
}
__syncthreads();
for (j = 0; j < window_size; j++) {
dd = 0;
for (p = 0; p < dimensions; p++)
dd += (S[(t + p * 2 * window_size) + j] - T2[(p * window_size) + j]) *
(S[(t + p * 2 * window_size) + j] - T2[(p * window_size) + j]);
sumErr += dd;
}
data_out[idx] = sqrt(sumErr);
} else {
int s = dimensions * 2 * window_size * (idx / window_size);
int t = s + idx % window_size;
if (idx >= (trainSize * window_size))
return;
for (j = 0; j < window_size; j++) {
dd = 0;
for (p = 0; p < dimensions; p++)
dd += (S[(t + p * 2 * window_size) + j] - T[(p * window_size) + j]) *
(S[(t + p * 2 * window_size) + j] - T[(p * window_size) + j]);
sumErr += dd;
}
data_out[idx] = sqrt(sumErr);
}
} |
22,169 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/random/linear_congruential_engine.h>
#include <thrust/random/uniform_real_distribution.h>
#include <iostream>
// nvcc -std=c++14 -O3 tarefa2.cu -o t2 && ./t2
struct fillRand
{
thrust::uniform_real_distribution<double> dist;
thrust::minstd_rand rng;
fillRand(thrust::uniform_real_distribution<double> dist, thrust::minstd_rand rng) : dist(dist), rng(rng) {}
__host__ __device__ double operator()(const double &x)
{
return dist(rng);
}
};
int main()
{
int seed;
std::cin >> seed;
// default_random_engine is currently an alias for minstd_rand, and may change in a future version.
thrust::minstd_rand rng(seed);
// thrust::uniform_int_distribution<int> dist(-7, 13);
thrust::uniform_real_distribution<double> dist(25, 40);
thrust::device_vector<double> vetor(10, 0);
thrust::transform(vetor.begin(), vetor.end(), vetor.begin(), fillRand(dist, rng));
// for (auto i = vetor.begin(); i != vetor.end(); i++)
// std::cout << *i << " "; // este acesso é lento! -- GPU
// printf("\n");
thrust::host_vector<double> host(vetor);
for (auto i = host.begin(); i != host.end(); i++)
std::cout << *i << " "; // este acesso é rápido -- CPU
printf("\n");
}
|
22,170 | #include "includes.h"
__global__ void backProp1(float* in, float* dsyn1, float* layer1, float* syn2, float* label, float* out)
{
int j = blockDim.x*blockIdx.x + threadIdx.x;
int k = blockDim.y*blockIdx.y + threadIdx.y;
float error = 0.0;
#pragma unroll
for (int l=0; l < 10; ++l)
error += (label[l] - out[l]) * syn2[k*10 + l];
float delta = error * (layer1[k]*(1-layer1[k]));
dsyn1[j*128 + k] += delta * in[j] / (60000.0/10.0);
} |
22,171 | #include "includes.h"
__global__ void r_step( float4 *__restrict__ devPos, float4 *__restrict__ deviceVel, unsigned int numBodies, float dt)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index > numBodies) {return;};
devPos[index].x += deviceVel[index].x * dt;
devPos[index].y += deviceVel[index].y * dt;
devPos[index].z += deviceVel[index].z * dt;
} |
22,172 | #if __CUDA_ARCH__ < 600
#define atomicMin_block(X,Y) atomicMin(X,Y)
#define atomicAdd_block(X,Y) atomicAdd(X,Y)
#endif
__global__
void glo(int * x, int * y) {
atomicAdd(x+3,1);
}
__global__
void blo(int * x, int * y) {
atomicAdd_block(x+3,1);
}
__global__
void sha(int * x, int * y) {
__shared__ int c[1024];
atomicAdd(c+3,1);
x[3] = c[3];
}
__global__
void shablo(int * x, int * y) {
__shared__ int c[1024];
atomicAdd_block(c+3,1);
x[3] = c[3];
}
|
22,173 | #include "includes.h"
__global__ void dev_get_potential_energy( float *partial_results, float eps2, float *field_m, float *fxh, float *fyh, float *fzh, float *fxt, float *fyt, float *fzt, int n_field) {
extern __shared__ float thread_results[];
unsigned int i, j;
float dx, dy, dz, r, dr2, potential_energy = 0;
for (j=threadIdx.x + blockIdx.x*blockDim.x; j < n_field; j += blockDim.x*gridDim.x){
for (i=0; i<j; i++){
dx = (fxh[i] - fxh[j]) + (fxt[i] - fxt[j]);
dy = (fyh[i] - fyh[j]) + (fyt[i] - fyt[j]);
dz = (fzh[i] - fzh[j]) + (fzt[i] - fzt[j]);
dr2 = dx*dx + dy*dy + dz*dz;
r = sqrt(eps2 + dr2);
potential_energy -= field_m[i]*field_m[j] / r;
}
}
// Reduce results from all threads within this block
thread_results[threadIdx.x] = potential_energy;
__syncthreads();
for (i = blockDim.x/2; i>0; i>>=1) {
if (threadIdx.x < i) {
thread_results[threadIdx.x] += thread_results[threadIdx.x + i];
}
__syncthreads();
}
if (threadIdx.x == 0) {
partial_results[blockIdx.x] = thread_results[0];
}
} |
22,174 | #include <iostream>
#include <set>
#include <algorithm>
#include <assert.h>
#include "cuda_runtime.h"
using namespace std;
#define ITERATIONS (10000) //times of memory visit for each thread
#define KB (1024/sizeof(int))
#define MB (KB*1024)
#define MAX_NUM_THREADS (1024) // a block has maximal thread size
#define EXPER_TIME (10) //experiments are repeated 10 times
//kernel function
__global__ void strided_access(unsigned *arr, int length, int stride, bool record, unsigned *duration, unsigned *help); //used to attain the average cycle of the multi-threaded kernel
void TLB_latency(int N, int stride);
void generate_strided(unsigned *arr, int length, int stride);
//global variables
int numThreadsGlobal;
int numBlocksGlobal;
int dataSizeGlobal; //in MB
int pageSizeGlobal; //in KB
/*
* TLB latency: ./tlb_GPU blockSize gridSize pageSize_KB dataSize_MB
*
* others for TLB latency.
*/
int main(int argc, char* argv[]){
if (argc < 4) {
cerr<<"Shall provide the blockSize, gridSize used and page size."<<endl;
cerr<<"Eg.: ./tlb_GPU bSize gSize pageSize_KB dataSize_MB"<<endl;
exit(0);
}
numThreadsGlobal = atoi(argv[1]);
numBlocksGlobal = atoi(argv[2]);
pageSizeGlobal = atoi(argv[3]) * KB;
dataSizeGlobal = atoi(argv[4]) * MB;
cudaSetDevice(0);
cout<<"Latency: Data size: "<<(float)dataSizeGlobal/MB<<"MB\tbsize: "<<numThreadsGlobal<<"\tgsize: "<<numBlocksGlobal<<'\t';
TLB_latency(dataSizeGlobal, pageSizeGlobal);
cudaDeviceReset();
return 0;
}
//multi-threaded kernels
__global__ void strided_access(unsigned *arr, int length, int stride, bool record, unsigned *duration, unsigned *help) {
unsigned long timestamp;
unsigned gid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned curIdx = (gid* (stride+100)) % length; //adding an offset in case that only a few elements are accessed
unsigned anc = 0;
double total = 0;
//repeated visit, run fixed iterations
timestamp = clock64();
for (int i = 0; i < ITERATIONS; i++) {
curIdx = arr[curIdx];
anc += curIdx; //to ensure the curIdx has been read, this instruction is 16-cycle long on K40m
}
timestamp = clock64() - timestamp;
total += timestamp;
if (record) {
duration[gid] = total/ITERATIONS-16; //deduce the register add instruction overhead
help[gid] = anc;
}
}
/*
* N: number of data elements
* stride: stride for strided-access, set as the page size
*/
void TLB_latency(int N, int stride) {
cudaDeviceReset();
cudaError_t error_id;
unsigned *h_a, *d_a;
unsigned *h_timeinfo, *d_timeinfo;
unsigned *help;
h_a = (unsigned*)malloc(sizeof(unsigned)*N);
error_id = cudaMalloc ((void **) &d_a, sizeof(unsigned)*N);
if (error_id != cudaSuccess) cerr<<"Error 1.0 is "<<cudaGetErrorString(error_id)<<endl;
/* initialize array elements on CPU with pointers into d_a. */
generate_strided(h_a,N,stride);
/* copy array elements from CPU to GPU */
error_id = cudaMemcpy(d_a, h_a, sizeof(unsigned)*N, cudaMemcpyHostToDevice);
if (error_id != cudaSuccess) cerr<<"Error 1.1 is "<<cudaGetErrorString(error_id)<<endl;
h_timeinfo = (unsigned *) malloc(sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal);
error_id = cudaMalloc((void **) &d_timeinfo, sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal);
if (error_id != cudaSuccess) cerr << "Error 1.2 is " << cudaGetErrorString(error_id) << endl;
error_id = cudaMalloc((void **) &help, sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal);
if (error_id != cudaSuccess) cerr << "Error 1.3 is " << cudaGetErrorString(error_id) << endl;
cudaThreadSynchronize();
dim3 Db = dim3(numThreadsGlobal);
dim3 Dg = dim3(numBlocksGlobal);
double total = 0;
/* launch kernel*/
for (int e = 0; e < EXPER_TIME; e++) {
//kernel execution
strided_access<<<Dg, Db>>>(d_a, N, stride, false, NULL, NULL); //warp up
strided_access<<<Dg, Db>>>(d_a, N, stride, true, d_timeinfo, help); //recording
cudaThreadSynchronize();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) cerr<<"Error kernel is "<<cudaGetErrorString(error_id)<<endl;
/* copy results from GPU to CPU */
cudaThreadSynchronize ();
error_id = cudaMemcpy((void *)h_timeinfo, (void *)d_timeinfo, sizeof(unsigned)*numThreadsGlobal * numBlocksGlobal, cudaMemcpyDeviceToHost);
if (error_id != cudaSuccess) cerr<<"Error 2.2 is "<<cudaGetErrorString(error_id)<<endl;
double temp = 0; //here we use double, otherwise it will overflow
for(int i = 0; i < numThreadsGlobal*numBlocksGlobal; i++) {
temp += h_timeinfo[i];
}
temp /= (numThreadsGlobal*numBlocksGlobal);
total += temp;
cudaThreadSynchronize();
}
total /= EXPER_TIME;
cout<<"cycle: "<<total<<endl;
/* free memory on GPU */
cudaFree(help);
cudaFree(d_a);
cudaFree(d_timeinfo);
/*free memory on CPU */
free(h_a);
free(h_timeinfo);
cudaDeviceReset();
}
void generate_strided(unsigned *arr, int length, int stride) {
for (int i = 0; i < length; i++) {
//arr[i] = (i + stride) % length;
arr[i] = (i + 2048*1024/256) % length;
}
} |
22,175 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define MAXBLOCKS 1
#define MAXTHREADS 10
//Helper method
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
//__global__ (paralellized method)
__global__ void VectorAdd(int *c, const int *a, const int *b)
{
int i = threadIdx.x; //Assign each c element to a single thread
c[i] = a[i] + b[i];
}
int main()
{
int *a, *b, *c; //CPU
//Allocate CPU memory
a = (int*)malloc(MAXTHREADS*sizeof(int));
b = (int*)malloc(MAXTHREADS*sizeof(int));
c = (int*)malloc(MAXTHREADS*sizeof(int));
for (int i = 0; i < MAXTHREADS; ++i) //Populate array
{
a[i] = i;
b[i] = i;
c[i] = 0;
}
//Call "surrogate" method
cudaError_t cudaStatus = addWithCuda(c, a, b, MAXTHREADS);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
//Display result
printf("\nMAXTHREADS (%d) VECTOR ADDITION USING CUDA\n\n", MAXTHREADS);
printf("c[i] = a[i] + b[i]\n");
printf("======================================\n");
for (int i = 0; i < MAXTHREADS; ++i)
printf("a[%d] = %d, b[%d] = %d, c[%d] = %d\n", i, a[i], i, b[i], i, c[i]);
//Free CPU memory
free(a);
free(b);
free(c);
//cudaDeviceReset must be called before exiting in order for profiling and
//tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
//Helper/"surrogate" method for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *d_a = 0;//GPU
int *d_b = 0;//GPU
int *d_c = 0;//GPU
cudaError_t cudaStatus;
//Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
//Allocate GPU memory
cudaStatus = cudaMalloc((void**)&d_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&d_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&d_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
//Copy data to GPU
cudaStatus = cudaMemcpy(d_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(d_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
//Run GPU using MAXBLOCK number of blocks and size number of threads
VectorAdd<<<MAXBLOCKS, size>>>(d_c, d_a, d_b);
//Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
//cudaDeviceSynchronize waits for the kernel to finish, and returns
//any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
//Copy result back to CPU
cudaStatus = cudaMemcpy(c, d_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
//Free GPU memory
cudaFree(d_c);
cudaFree(d_a);
cudaFree(d_b);
return cudaStatus;
}
|
22,176 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define IDX(i,j,cols,mbc) i*cols + j + mbc
__global__ void heat2d_update(int Nx, int Ny, int mbc, double dx, double dy,
double dt, double ***q, double*** qp);
__global__ void setup_arrays2d_cuda(int Nx, int Ny, int mbc,
double *qmem, double** qrows, double ***q);
void allocate_2d(int N, int M, int mbc, double ***q)
{
int rows = N + 1 + 2*mbc;
int cols = M + 1 + 2*mbc;
double *qmem = (double*) malloc(rows*cols*sizeof(double));
double **qrows = (double**) malloc(rows*sizeof(double*));
for(int i = 0; i < rows; i++)
{
qrows[i] = &qmem[cols*i + mbc];
}
*q = &qrows[mbc];
}
void delete_2d(int mbc, double ***q)
{
free(&(*q)[-mbc][-mbc]);
free(&(*q)[-mbc]);
*q = NULL;
}
/* Initial condition */
double init(double x, double y)
{
return exp(-10*(x*x + y*y));
}
int main(int argc, char** argv)
{
/* ------------------------------ Input parameters -------------------------------- */
int Nx = atoi(argv[1]);
int Ny = atoi(argv[2]);
int nout = atoi(argv[3]);
/* Always print first and last time step, at a minimum */
nout = (nout < 2) ? 2 : nout;
double L = 1;
double Tfinal = 0.1;
/* --------------------------- Numerical parameters ------------------------------- */
double ax = -L;
double bx = L;
double ay = -L;
double by = L;
double dx = (bx-ax)/Nx;
double dy = (by-ay)/Ny;
double dx2 = dx*dx;
double dy2 = dy*dy;
/* Write out meta data */
FILE *fout = fopen("heat2d.out","w");
fwrite(&Nx,1,sizeof(int),fout);
fwrite(&Ny,1,sizeof(int),fout);
fwrite(&ax,1,sizeof(double),fout);
fwrite(&bx,1,sizeof(double),fout);
fwrite(&ay,1,sizeof(double),fout);
fwrite(&by,1,sizeof(double),fout);
/* ---------------------------- Initialize solution ------------------------------- */
int mbc = 1;
double **q;
allocate_2d(Nx,Ny,mbc,&q);
for(int i = -1; i <= Nx+1; i++)
{
double x = ax + i*dx;
for(int j = -1; j <= Ny+1; j++)
{
double y = ay + j*dy;
q[i][j] = init(x,y);
}
}
/* ----------------------------- Compute time step ---------------------------------*/
/* Compute a stable time step
1. Estimate a stable time step 'dt_stable'. This may not evenly divide Tfinal.
2. Compute a minimum number M of time steps we need to take.
3. Divide Tfinal by M to get get a dt that is guaranteed smaller than dt_est and
satisfies M*dt = Tfinal.
*/
double dsmin = (dx < dy) ? dx : dy;
double ds2 = dsmin*dsmin;
double dt_stable = 0.95*ds2/4; /* Stable time step */
int M = ceil(Tfinal/dt_stable) + 1; /* Compute M to guarantee we hit Tfinal */
double dt = Tfinal/M; /* dt <= dt_stable; M*dt = Tfinal */
/* More meta data */
fwrite(&M,1,sizeof(int),fout);
/*
Set up an array of 'n' values that tell us when to save our solution
so that we save exactly nout time steps at roughly equally spaced times.
*/
int *noutsteps = (int*) malloc(nout*sizeof(int));
double dM = ((double) M-1)/(nout-1);
dM = (dM < 1) ? 1 : dM;
for(int m = 0; m <= nout-1; m++)
{
noutsteps[m] = (int) floor(m*dM);
}
/* Output initial condition */
double t = 0;
int k = 0;
fwrite(&t,1,sizeof(double),fout);
for(int i = 0; i <= Nx; i++)
{
fwrite(&q[i][0],Ny+1,sizeof(double),fout);
}
k++; /* Number of output files created */
/* ---------------------------- Setup CUDA arrays ----------------------------------*/
int rows = (Nx + 1 + 2*mbc);
int cols = (Ny + 1 + 2*mbc);
double *dev_qmem, **dev_qrows, ***dev_q;
cudaMalloc( (void**) &dev_qmem, rows*cols*sizeof(double));
cudaMalloc( (void***) &dev_qrows, rows*sizeof(double*));
cudaMalloc( (void****) &dev_q, sizeof(double**));
setup_arrays2d_cuda<<<1,1>>>(Nx,Ny,mbc,dev_qmem, dev_qrows,dev_q);
double *dev_qpmem, **dev_qprows, ***dev_qp;
cudaMalloc( (void**) &dev_qpmem, rows*cols*sizeof(double));
cudaMalloc( (void***) &dev_qprows, rows*sizeof(double*));
cudaMalloc( (void****) &dev_qp, sizeof(double**));
setup_arrays2d_cuda<<<1,1>>>(Nx,Ny,mbc,dev_qpmem, dev_qprows,dev_qp);
/* --------------------------- Start time stepping ---------------------------------*/
/* Store q^{n+1} */
double **qp;
allocate_2d(Nx,Ny,mbc,&qp);
/* Time loop; compute q^{n+1} at each time step */
for(int n = 0; n <= M-1; n++)
{
t += dt;
/* No-flux boundary conditions */
for(int j = 0; j <= Ny; j++)
{
q[-1][j] = q[1][j];
q[Nx+1][j] = q[Nx-1][j];
}
for(int i = 0; i <= Nx; i++)
{
q[i][-1] = q[i][1];
q[i][Ny+1] = q[i][Ny-1];
}
/* ------------------------------ CUDA Kernel call -----------------------------*/
int qsize = rows*cols;
cudaMemcpy(dev_qmem, &q[-mbc][-mbc], qsize*sizeof(double),
cudaMemcpyHostToDevice);
int msize = Nx + 1;
int nsize = Ny + 1;
int gx = 8, gy = 8;
dim3 block(gx,gy);
dim3 grid((msize+block.x - 1)/block.x, (nsize+block.y - 1)/block.y);
int rows = (gx + 1 + 2*mbc);
int cols = (gy + 1 + 2*mbc);
size_t bytes_per_block = rows*(cols*sizeof(double) + sizeof(double*));
heat2d_update<<<grid,block,bytes_per_block>>>(Nx, Ny, mbc, dx2, dy2, dt, dev_q, dev_qp);
cudaDeviceSynchronize();
cudaMemcpy(&qp[-mbc][-mbc], dev_qpmem, qsize*sizeof(double), cudaMemcpyDeviceToHost);
/* -------------------------------- Write output -------------------------------*/
if (n == noutsteps[k])
{
fwrite(&t,1,sizeof(double),fout);
for(int i = 0; i <= Nx; i++)
{
fwrite(&qp[i][0],Ny+1,sizeof(double),fout);
}
k++;
}
double **qtmp = q;
q = qp;
qp = qtmp;
}
fclose(fout);
delete_2d(mbc,&q);
delete_2d(mbc,&qp);
free(noutsteps);
return 0;
} |
22,177 | #include <time.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
__global__ void GPUEuler2(float *y, float t_i, float delta,int N) {
int myID = threadIdx.x + blockDim.x * blockIdx.x;
if(myID < N) {
y[myID] = y[myID] + delta * (4*t_i - y[myID]+3+myID);
}
}
int main(int argc, char** argv) {
int hilos2c[] = {64,128,256,512},bloque2c[4];
float tiempoGPU2c, t_i2c;
float *dev_e2c, *hst_y2c;
cudaEvent_t start2c, end2c;
printf("seccion 2.c\n");
for (int j=8;j<9;j++){
int m=pow(10,j);
for(int w= 0; w<4;w++){
hst_y2c = (float*) malloc(sizeof(float)*m+1);
cudaMalloc((void**) &dev_e2c,(m+1)*sizeof(float));
for(int i=0;i<m+1;i++){
hst_y2c[i]=i;
}
bloque2c[w] = (int) ceil((float) (m+1) /hilos2c[w]);
cudaEventCreate(&start2c);
cudaEventCreate(&end2c);
cudaEventRecord(start2c,0);
cudaMemcpy(dev_e2c, hst_y2c, (m+1)*sizeof(float), cudaMemcpyHostToDevice);
float n=powf(10,3);
for (int i=0;i<n+1;i++){
t_i2c = i/n;
GPUEuler2<<<bloque2c[w],hilos2c[w]>>>(dev_e2c,t_i2c,1/n,m+1);
}
cudaEventRecord(end2c,0);
cudaEventSynchronize(end2c);
cudaEventElapsedTime(&tiempoGPU2c,start2c,end2c);
printf("%f\n",tiempoGPU2c);
cudaFree(dev_e2c);
free(hst_y2c);
}
}
return 0;
} |
22,178 | #include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <cuda_runtime_api.h>
#include <errno.h>
#include <unistd.h>
/******************************************************************************
* This program takes an initial estimate of m and c and finds the associated
* rms error. It is then as a base to generate and evaluate 8 new estimates,
* which are steps in different directions in m-c space. The best estimate is
* then used as the base for another iteration of "generate and evaluate". This
* continues until none of the new estimates are better than the base. This is
* a gradient search for a minimum in mc-space.
*
* To compile:
* nvcc -o linear_regression_ques_a linear_regression_ques_a.cu -lm
*
* To run:
* ./linear_regression_ques_a
*
*****************************************************************************/
typedef struct point_t {
double a;
double y;
} point_t;
int n_data = 1000;
__device__ int d_n_data = 1000;
//actual data
point_t data[] = {
{77.98,130.36},{79.09,148.43},{72.34,123.95},{65.93,116.99},
{77.57,132.91},{79.38,148.35},{85.45,141.04},{68.12,112.26},
{83.96,140.95},{65.32,107.96},{73.70,144.54},{68.51,110.97},
{23.70,43.30},{32.60,75.51},{97.45,167.12},{24.44,62.51},
{35.26,73.73},{42.05,87.45},{80.60,154.01},{42.19,78.14},
{11.57,36.87},{89.39,138.89},{79.04,151.16},{75.37,140.63},
{16.37,48.59},{85.73,138.70},{19.40,60.34},{ 1.87,30.33},
{64.62,100.67},{82.50,149.78},{69.86,126.38},{76.07,111.45},
{75.30,138.59},{ 8.55,34.53},{76.84,150.23},{88.69,149.26},
{69.72,143.26},{37.04,75.54},{84.39,168.66},{42.23,83.86},
{ 9.89,36.55},{74.04,123.28},{75.72,123.75},{19.76,61.01},
{ 7.21,24.85},{47.72,87.46},{46.74,99.94},{87.50,141.68},
{29.77,84.23},{58.59,112.53},{56.40,110.57},{72.64,133.51},
{18.77,73.65},{50.47,92.54},{17.67,50.48},{22.96,50.27},
{93.80,157.23},{28.78,62.97},{70.77,137.07},{18.46,48.76},
{78.73,147.81},{28.64,64.04},{15.87,71.24},{20.60,58.87},
{24.04,71.30},{29.82,64.70},{74.92,119.91},{57.65,128.03},
{ 8.60,17.53},{64.15,114.03},{54.12,111.55},{46.26,86.85},
{69.34,128.76},{62.32,123.58},{35.54,91.65},{63.94,108.18},
{ 8.79,26.44},{94.79,158.97},{69.39,127.07},{54.95,85.68},
{80.80,123.50},{ 5.04,28.59},{60.70,122.70},{64.93,116.81},
{75.21,140.82},{94.73,158.77},{70.97,119.67},{18.60,49.57},
{35.65,90.07},{51.29,106.23},{99.39,164.53},{68.69,128.54},
{43.27,97.46},{54.00,103.11},{98.43,155.75},{85.94,149.47},
{77.76,130.00},{ 9.66,21.48},{65.38,113.14},{86.03,135.26},
{52.91,109.09},{40.51,88.36},{96.33,178.71},{65.01,112.30},
{22.21,63.56},{92.23,155.97},{ 5.44,50.68},{47.24,91.65},
{70.34,134.08},{91.42,134.44},{ 6.25,45.93},{30.84,78.59},
{59.59,93.17},{59.66,116.00},{56.91,124.37},{26.40,80.75},
{41.75,104.14},{92.10,151.34},{24.18,73.47},{71.32,139.54},
{94.73,165.99},{54.05,102.18},{58.35,108.14},{54.49,84.38},
{60.06,111.97},{72.99,135.35},{74.88,134.12},{55.44,112.28},
{99.26,172.77},{15.34,42.67},{53.88,93.18},{35.90,73.13},
{79.41,137.27},{52.71,100.10},{14.90,64.25},{77.27,143.40},
{94.85,161.63},{27.95,67.70},{22.65,69.15},{19.92,75.99},
{21.63,58.86},{63.89,125.04},{31.18,68.30},{85.21,142.92},
{ 6.99,41.71},{65.13,123.67},{43.22,91.87},{14.54,51.08},
{71.03,120.54},{85.32,139.23},{ 1.65,15.42},{68.44,114.18},
{24.89,43.34},{44.09,71.91},{52.78,100.42},{39.00,104.71},
{30.45,68.40},{93.79,152.03},{43.67,82.38},{60.03,110.47},
{72.81,133.44},{ 3.36,39.56},{23.63,51.23},{ 2.24,35.53},
{69.37,120.48},{21.85,53.01},{99.40,148.97},{48.26,95.05},
{19.43,47.20},{18.54,58.80},{59.71,130.46},{83.76,146.14},
{61.24,108.80},{59.79,118.94},{32.18,74.26},{69.93,107.25},
{18.80,65.71},{77.62,135.24},{36.38,80.62},{22.11,61.87},
{87.67,159.19},{18.27,42.13},{59.46,113.12},{27.86,76.32},
{44.58,81.58},{51.48,91.53},{34.66,68.59},{24.31,60.65},
{77.44,155.88},{89.86,165.12},{14.31,40.08},{ 8.99,63.09},
{62.05,109.52},{99.16,166.58},{16.23,62.72},{21.00,57.58},
{64.70,110.52},{36.50,70.82},{13.60,32.41},{72.48,132.93},
{16.55,48.30},{72.33,146.17},{74.80,144.98},{86.52,145.51},
{18.74,53.20},{42.46,84.17},{34.14,83.02},{48.40,105.26},
{94.58,164.10},{54.73,97.10},{15.98,54.69},{41.41,82.26},
{54.70,107.38},{78.82,140.70},{18.53,70.95},{ 7.02,48.19},
{36.99,81.18},{11.80,22.97},{89.96,163.81},{18.02,55.52},
{70.09,123.95},{84.84,138.33},{70.15,139.19},{17.89,44.33},
{91.76,146.52},{35.98,77.33},{71.59,112.49},{44.29,103.12},
{ 6.47,33.93},{ 2.33,39.95},{64.72,113.89},{48.82,88.91},
{71.34,127.29},{99.75,165.06},{51.38,91.86},{76.72,118.85},
{ 9.09,53.80},{44.40,95.91},{55.88,112.71},{27.00,65.93},
{43.09,89.10},{15.77,58.36},{ 6.66,52.56},{ 0.72,15.70},
{ 9.10,32.67},{73.31,146.06},{80.63,145.39},{89.34,151.93},
{70.99,109.10},{18.48,40.16},{13.53,60.15},{ 3.62,35.30},
{ 0.67,22.84},{10.18,34.71},{62.39,115.13},{16.83,50.51},
{ 4.95,34.83},{17.56,65.63},{20.46,71.24},{ 3.24,22.13},
{62.62,108.31},{83.41,159.53},{13.94,62.74},{39.53,75.26},
{70.08,127.75},{96.18,154.40},{ 4.70,51.92},{ 4.83,34.24},
{86.68,155.95},{39.14,95.10},{54.83,101.35},{45.90,113.05},
{83.84,164.08},{91.32,141.46},{99.38,151.63},{57.80,88.41},
{15.70,64.96},{50.13,86.85},{ 5.49,49.01},{46.72,94.05},
{89.37,154.06},{30.59,63.82},{71.14,121.90},{17.65,61.37},
{17.22,71.53},{63.80,131.31},{48.19,93.75},{46.52,99.40},
{93.86,171.03},{23.64,68.51},{89.26,157.22},{49.78,104.78},
{85.35,137.00},{72.36,124.10},{82.00,138.39},{13.84,48.03},
{89.78,154.55},{90.40,143.48},{ 3.06,22.06},{51.56,99.83},
{61.82,112.08},{38.70,87.78},{31.97,74.39},{99.69,176.25},
{78.62,152.64},{52.43,102.53},{92.86,145.69},{81.64,144.55},
{76.06,122.34},{ 4.95,32.38},{87.52,171.59},{95.04,149.43},
{60.28,110.39},{82.25,169.12},{22.81,57.02},{ 6.91,40.13},
{53.36,106.12},{96.26,156.75},{15.49,45.17},{36.79,83.03},
{82.18,147.65},{31.99,74.58},{44.35,86.62},{35.53,75.38},
{41.16,96.88},{98.23,160.31},{48.38,84.37},{86.01,161.33},
{ 7.43,31.88},{24.37,44.83},{11.01,41.83},{16.86,44.57},
{13.72,32.74},{61.53,119.79},{48.45,82.98},{34.80,74.74},
{73.82,131.76},{45.49,87.37},{44.29,99.77},{85.94,167.42},
{38.49,75.27},{30.29,70.78},{68.66,114.49},{27.31,67.54},
{67.58,121.97},{66.72,123.44},{61.54,95.46},{43.92,85.74},
{43.87,109.10},{56.28,107.24},{86.68,137.08},{ 0.81,20.16},
{18.28,49.52},{51.27,112.06},{43.31,65.28},{29.32,52.29},
{98.77,169.88},{17.18,52.57},{76.85,145.82},{82.70,135.08},
{83.64,131.25},{12.33,38.01},{76.06,153.75},{83.89,135.10},
{29.57,65.30},{47.52,108.75},{ 8.76,39.70},{20.95,67.72},
{ 5.41,42.36},{61.13,90.13},{ 3.08,14.51},{38.82,95.19},
{17.75,66.51},{ 1.83,29.54},{60.84,110.57},{44.30,95.26},
{58.12,115.47},{43.36,84.41},{74.23,122.77},{46.90,93.52},
{95.44,148.73},{ 7.39,45.61},{52.81,104.35},{37.16,60.15},
{ 9.81,46.92},{15.75,52.64},{ 7.55,55.70},{80.65,123.61},
{70.31,144.36},{56.05,118.04},{41.85,90.23},{48.33,100.55},
{61.36,129.79},{30.99,63.92},{97.88,165.01},{88.06,163.98},
{ 3.94,42.78},{51.10,94.74},{25.92,56.47},{32.24,56.10},
{56.42,105.70},{35.94,65.57},{88.44,140.75},{18.42,50.02},
{92.97,165.67},{93.14,152.64},{84.46,142.58},{59.79,101.62},
{47.70,93.97},{85.61,150.83},{75.95,125.79},{68.98,124.25},
{12.19,46.92},{64.71,111.41},{65.35,117.94},{56.03,117.75},
{33.65,80.22},{99.83,154.54},{42.11,91.90},{29.26,71.92},
{12.45,42.87},{31.86,58.42},{67.32,131.56},{62.65,119.19},
{21.02,60.23},{36.42,63.61},{21.46,68.57},{87.10,154.60},
{54.01,111.59},{79.76,131.26},{20.26,40.04},{92.15,144.67},
{18.69,56.39},{77.38,133.16},{34.32,90.42},{66.53,107.16},
{32.44,72.36},{57.18,99.61},{17.61,67.09},{19.97,67.47},
{60.67,108.28},{46.88,87.00},{97.25,170.60},{80.52,149.02},
{37.36,79.89},{94.27,169.69},{80.94,147.55},{30.75,71.59},
{66.58,128.72},{63.16,95.21},{ 4.90,18.07},{50.47,91.47},
{73.25,120.81},{57.26,113.72},{98.04,168.70},{ 6.88,43.58},
{52.69,84.44},{86.30,154.21},{63.61,125.07},{44.90,77.52},
{13.76,44.88},{88.55,157.17},{39.41,85.60},{56.27,99.27},
{93.04,146.91},{ 1.71,16.08},{84.43,171.48},{90.37,147.40},
{32.91,74.15},{96.28,183.66},{48.10,93.15},{86.94,140.01},
{20.19,46.78},{54.74,118.64},{93.47,156.25},{83.56,159.37},
{58.91,104.24},{17.17,55.02},{61.22,119.69},{45.82,94.79},
{17.17,74.27},{21.22,47.78},{17.64,50.62},{16.56,56.21},
{32.37,76.59},{66.74,114.96},{26.23,60.03},{27.07,75.08},
{16.26,45.80},{46.20,97.82},{ 9.14,45.88},{13.38,43.76},
{27.19,59.57},{34.86,88.57},{85.63,144.87},{55.71,110.49},
{55.62,101.77},{76.12,137.16},{53.24,109.80},{61.48,121.39},
{ 6.40,25.24},{46.74,92.45},{71.59,132.39},{28.43,77.52},
{68.73,131.25},{44.27,107.42},{65.88,128.88},{32.63,73.60},
{ 5.98,46.75},{ 8.52,36.36},{80.13,150.13},{99.95,172.25},
{49.04,82.13},{74.27,131.66},{ 2.55,21.35},{37.91,72.65},
{58.32,98.33},{78.65,138.32},{64.44,142.24},{39.76,102.27},
{34.24,88.03},{34.36,86.23},{58.79,107.80},{27.45,68.17},
{85.03,134.11},{43.05,102.99},{73.68,124.75},{59.46,111.47},
{25.43,70.54},{78.96,141.05},{52.15,117.19},{83.16,143.72},
{82.92,147.05},{24.77,54.57},{ 4.47,13.97},{20.28,47.25},
{59.33,116.38},{42.32,88.75},{29.10,72.39},{ 3.09,34.29},
{89.79,156.34},{16.88,42.49},{29.57,57.84},{20.46,70.34},
{ 3.32,30.40},{50.04,115.30},{ 8.70,34.66},{56.89,127.48},
{44.98,110.33},{78.79,142.46},{75.98,125.64},{67.87,132.39},
{12.65,55.67},{29.76,68.30},{23.04,68.87},{ 1.39,46.55},
{11.93,47.74},{76.81,130.63},{36.23,60.87},{82.17,148.30},
{57.96,98.37},{55.25,110.04},{90.64,157.35},{ 6.46,53.56},
{37.06,76.23},{ 0.78,20.01},{18.00,57.24},{39.01,86.87},
{81.30,137.64},{93.07,160.70},{46.12,107.39},{28.41,54.49},
{ 3.98,12.01},{ 1.40,26.15},{17.50,42.51},{63.86,122.40},
{34.02,58.71},{26.39,74.74},{84.32,133.79},{43.45,94.44},
{65.57,109.22},{87.73,148.77},{ 6.60,22.61},{25.00,57.85},
{78.06,141.20},{59.36,120.65},{40.58,82.33},{60.34,94.44},
{28.54,59.24},{28.87,62.43},{16.30,58.16},{15.93,46.27},
{81.31,145.87},{ 4.11,32.68},{ 4.59,26.34},{46.27,77.29},
{76.18,141.01},{54.57,106.15},{50.88,91.60},{29.43,70.00},
{40.28,71.90},{ 7.40,54.73},{47.47,96.96},{37.73,81.98},
{50.41,98.64},{51.27,75.77},{18.77,57.85},{89.78,149.43},
{52.15,97.04},{96.49,149.33},{10.02,31.91},{66.31,117.85},
{23.53,46.57},{13.75,58.44},{79.11,126.53},{88.69,156.62},
{48.84,95.55},{16.67,59.50},{38.77,78.61},{ 1.34,17.60},
{42.10,92.77},{16.13,59.32},{39.91,52.59},{29.80,73.67},
{32.05,78.76},{79.15,138.73},{ 4.87,38.81},{39.19,78.12},
{91.91,151.01},{ 6.42,30.39},{75.87,144.60},{31.41,70.40},
{53.11,97.77},{95.82,150.10},{81.34,142.82},{43.06,82.20},
{48.51,119.24},{33.72,96.27},{39.97,84.44},{73.14,151.85},
{88.82,160.36},{20.75,47.84},{27.20,78.42},{90.60,146.84},
{29.83,59.96},{89.87,146.86},{13.72,51.99},{99.15,167.18},
{52.08,100.57},{82.43,144.09},{11.42,35.59},{ 5.44,17.65},
{80.93,125.60},{33.03,80.04},{85.31,146.26},{ 3.22,44.89},
{23.00,52.64},{ 7.39,47.54},{48.59,98.05},{10.55,51.61},
{69.16,123.97},{87.24,155.52},{92.00,168.71},{ 6.61,45.05},
{32.89,66.39},{55.80,119.58},{43.54,111.85},{68.67,135.21},
{ 0.24,30.71},{57.59,114.41},{16.17,58.04},{ 6.25,41.72},
{ 9.16,46.64},{91.22,162.13},{46.47,104.68},{18.86,49.65},
{52.75,108.61},{85.46,133.74},{73.60,138.14},{50.55,118.86},
{30.54,66.35},{50.85,111.86},{70.25,126.00},{49.48,102.17},
{24.93,66.08},{ 2.63,34.63},{62.07,118.69},{62.25,108.45},
{20.28,61.81},{32.06,67.70},{54.64,97.24},{48.12,90.16},
{19.27,44.36},{80.24,141.84},{65.21,113.97},{53.61,115.71},
{84.83,148.51},{34.33,75.45},{93.22,152.04},{91.56,151.73},
{10.38,49.46},{39.41,92.74},{26.89,56.87},{95.22,146.87},
{41.17,86.91},{81.89,147.34},{14.58,45.46},{18.39,54.05},
{71.89,104.40},{ 0.30,30.11},{63.96,110.11},{44.40,92.86},
{83.48,154.12},{14.82,50.29},{36.78,76.46},{51.66,100.51},
{88.52,152.67},{93.61,159.05},{16.01,50.41},{16.07,48.57},
{68.77,130.06},{32.32,67.18},{33.34,89.63},{37.47,77.02},
{44.11,89.78},{40.36,80.08},{50.29,99.98},{65.33,109.83},
{24.20,58.45},{13.68,51.23},{54.82,103.81},{98.53,163.98},
{72.10,125.16},{ 5.72,29.97},{ 4.33,37.36},{13.79,49.50},
{72.88,133.72},{35.89,57.79},{24.93,66.44},{65.67,123.16},
{10.63,52.88},{82.36,150.47},{ 0.62,27.36},{16.79,58.06},
{ 3.84,26.35},{85.22,153.42},{74.02,107.95},{32.04,83.35},
{71.72,122.28},{54.89,104.07},{74.45,141.35},{ 1.42,26.12},
{36.63,61.69},{42.20,92.21},{26.41,71.01},{99.70,173.70},
{81.19,154.01},{ 5.16,18.63},{54.40,94.12},{15.33,59.58},
{70.31,127.16},{11.42,44.79},{35.26,99.03},{18.03,48.41},
{22.02,70.90},{73.79,131.75},{71.15,135.36},{23.14,33.04},
{86.06,160.02},{70.34,123.21},{45.58,78.57},{87.12,151.37},
{67.94,115.34},{69.98,133.49},{15.68,59.43},{71.70,151.09},
{51.82,102.37},{ 0.58,15.36},{ 2.90,47.52},{44.80,92.48},
{82.32,132.50},{ 4.63,14.38},{31.96,48.86},{46.05,84.79},
{13.51,51.36},{51.24,107.59},{44.03,90.54},{51.97,108.04},
{68.32,116.35},{62.22,102.36},{61.33,123.14},{69.45,108.62},
{34.29,68.38},{94.12,159.76},{88.61,164.29},{21.54,61.18},
{80.70,148.58},{32.74,70.20},{91.67,148.53},{67.20,129.49},
{47.66,89.84},{20.00,42.76},{ 4.79,28.06},{40.06,81.69},
{ 9.27,20.83},{73.38,145.35},{86.29,149.36},{68.78,139.89},
{ 9.72,52.80},{34.60,73.98},{84.87,152.58},{23.98,60.25},
{20.35,57.12},{53.22,101.34},{78.28,147.59},{74.65,135.40},
{69.74,121.56},{86.11,141.16},{52.67,93.22},{36.21,73.36},
{62.41,119.51},{96.71,171.05},{48.40,115.07},{77.92,128.50},
{38.72,94.44},{19.42,60.45},{27.74,67.47},{65.43,136.98},
{48.30,100.65},{22.79,41.37},{50.96,87.94},{69.21,128.50},
{58.99,119.90},{ 6.56,37.55},{58.15,93.18},{75.73,133.38},
{63.81,138.63},{19.75,58.04},{63.51,112.74},{92.64,158.71},
{73.19,119.83},{74.40,120.50},{80.13,153.79},{98.06,176.77},
{26.53,71.35},{39.94,89.47},{10.63,46.96},{22.47,70.86},
{23.26,50.74},{76.09,143.68},{79.01,149.60},{75.69,115.59},
{60.72,114.92},{ 8.69,34.04},{66.46,127.71},{85.83,144.31},
{91.27,152.15},{ 2.99,30.02},{37.62,71.00},{60.92,102.47},
{44.12,101.36},{37.63,62.04},{68.97,120.90},{53.24,112.70},
{ 4.07,47.04},{60.77,110.59},{87.05,154.03},{92.55,155.02},
{73.87,130.02},{22.58,59.37},{ 3.30,26.74},{48.73,102.89},
{ 5.67,29.37},{83.16,143.34},{65.46,115.06},{37.49,78.43},
{11.22,42.48},{32.22,88.20},{68.33,125.75},{86.67,144.24},
{34.74,74.45},{95.02,159.49},{73.56,125.83},{72.81,144.78},
{29.15,50.65},{ 8.90,31.43},{85.79,142.73},{56.29,115.86},
{50.87,101.85},{74.48,131.71},{ 9.10,50.93},{65.98,128.12},
{53.17,96.22},{95.92,160.00},{51.40,105.43},{49.84,99.95},
{12.84,43.78},{26.81,78.07},{35.84,67.05},{51.20,106.73},
{52.67,99.15},{ 2.97,22.17},{87.10,158.04},{97.92,150.43},
{19.36,66.09},{26.94,81.91},{74.10,139.62},{48.95,95.97},
{73.76,131.04},{43.02,100.53},{52.41,98.19},{ 0.03,32.02},
{94.16,161.53},{42.19,92.10},{ 0.09,37.47},{33.99,72.70},
{99.79,162.51},{33.87,64.33},{80.79,143.75},{32.37,84.16},
{92.68,162.80},{48.63,95.91},{79.88,160.76},{55.73,103.59},
{97.68,174.76},{91.55,150.78},{61.67,96.86},{62.10,104.76},
{10.15,30.28},{99.29,174.74},{77.47,128.21},{79.09,137.80},
{97.86,168.94},{87.41,152.14},{47.77,100.51},{ 7.12,31.81},
{74.21,128.46},{98.33,161.20},{31.56,65.88},{ 8.51,60.46},
{29.91,78.70},{25.98,54.92},{89.41,143.94},{87.04,143.13},
{40.64,67.19},{58.30,119.17},{47.49,98.34},{13.46,52.97},
{15.42,38.65},{99.33,171.47},{ 1.32,41.87},{ 0.76,29.01},
{94.15,167.71},{42.90,104.51},{51.04,112.91},{43.17,76.83},
{75.58,141.83},{76.09,127.74},{78.98,147.69},{40.88,76.51},
{ 3.39,34.13},{47.86,98.78},{60.23,116.36},{27.28,85.94},
{67.71,118.19},{29.86,61.94},{72.12,131.79},{97.82,178.48},
{38.60,70.05},{75.57,116.03},{38.59,89.37},{59.33,119.02},
{26.31,58.70},{80.91,149.89},{ 9.16,29.56},{ 3.76,25.74},
{41.59,89.75},{86.21,158.11},{84.75,154.34},{21.36,56.39},
{56.08,99.09},{42.90,79.09},{56.57,95.10},{61.51,129.85},
{34.02,79.24},{ 0.78,17.70},{98.72,175.73},{30.08,77.88},
{50.75,100.00},{92.19,157.85},{49.00,84.27},{79.00,142.36},
{75.20,137.48},{57.53,105.12},{84.52,151.03},{34.09,88.44},
{49.31,109.97},{59.81,105.11},{ 8.99,26.04},{15.12,46.94}
};
double residual_error(double a, double y, double m, double c) {
double e = (m * a) + c - y;
return e * e;
}
__device__ double d_residual_error(double a, double y, double m, double c) {
double e = (m * a) + c - y;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<n_data; i++) {
error_sum += residual_error(data[i].a, data[i].y, m, c);
}
mean = error_sum / n_data;
return sqrt(mean);
}
__global__ void d_rms_error(double *m, double *c, double *error_sum_arr, point_t *d_data) {
/*
Calculate the current index by using:
- The thread id
- The block id
- The number of threads per block
*/
int i = threadIdx.x + blockIdx.x * blockDim.x;
//Work out the error sum 1000 times and store them in an array.
error_sum_arr[i] = d_residual_error(d_data[i].a, d_data[i].y, *m, *c);
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
//Get the system time before we begin the linear regression.
clock_gettime(CLOCK_MONOTONIC, &start);
cudaError_t error;
//Device variables
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
point_t *d_data;
be = rms_error(bm, bc);
//Allocate memory for d_dm
error = cudaMalloc(&d_dm, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Allocate memory for d_dc
error = cudaMalloc(&d_dc, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Allocate memory for d_error_sum_arr
error = cudaMalloc(&d_error_sum_arr, (sizeof(double) * 1000));
if(error){
fprintf(stderr, "cudaMalloc on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Allocate memory for d_data
error = cudaMalloc(&d_data, sizeof(data));
if(error){
fprintf(stderr, "cudaMalloc on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i] = bc + (oc[i] * step);
}
//Copy memory for dm to d_dm
error = cudaMemcpy(d_dm, dm, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dm returned %d %s\n", error,
cudaGetErrorString(error));
}
//Copy memory for dc to d_dc
error = cudaMemcpy(d_dc, dc, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dc returned %d %s\n", error,
cudaGetErrorString(error));
}
//Copy memory for data to d_data
error = cudaMemcpy(d_data, data, sizeof(data), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_data returned %d %s\n", error,
cudaGetErrorString(error));
}
for(i=0;i<8;i++) {
//Host variable storing the array returned from the kernel function.
double h_error_sum_arr[1000];
//Stores the total sum of the values from the error sum array.
double error_sum_total;
//Stores the mean of the total sum of the error sums.
double error_sum_mean;
//Call the rms_error function using 100 blocks and 10 threads.
d_rms_error <<<100,10>>>(&d_dm[i], &d_dc[i], d_error_sum_arr, d_data);
cudaThreadSynchronize();
//Copy memory for d_error_sum_arr
error = cudaMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), cudaMemcpyDeviceToHost);
if(error){
fprintf(stderr, "cudaMemcpy to error_sum returned %d %s\n", error,
cudaGetErrorString(error));
}
//Loop through the error sum array returned from the kernel function
for(int j=0; j<n_data; j++) {
//Add each error sum to the error sum total.
error_sum_total += h_error_sum_arr[j];
}
//Calculate the mean for the error sum.
error_sum_mean = error_sum_total / n_data;
//Calculate the square root for the error sum mean.
e[i] = sqrt(error_sum_mean);
if(e[i] < best_error) {
best_error = e[i];
best_error_i = i;
}
//Reset the error sum total.
error_sum_total = 0;
}
//printf("best m,c is %lf,%lf with error %lf in direction %d\n",
//dm[best_error_i], dc[best_error_i], best_error, best_error_i);
if(best_error < be) {
be = best_error;
bm = dm[best_error_i];
bc = dc[best_error_i];
} else {
minimum_found = 1;
}
}
//Free memory for d_dm
error = cudaFree(d_dm);
if(error){
fprintf(stderr, "cudaFree on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Free memory for d_dc
error = cudaFree(d_dc);
if(error){
fprintf(stderr, "cudaFree on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Free memory for d_data
error = cudaFree(d_data);
if(error){
fprintf(stderr, "cudaFree on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Free memory for d_error_sum_arr
error = cudaFree(d_error_sum_arr);
if(error){
fprintf(stderr, "cudaFree on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
//Get the system time after we have run the linear regression function.
clock_gettime(CLOCK_MONOTONIC, &finish);
//Calculate the time spent between the start time and end time.
time_difference(&start, &finish, &time_elapsed);
//Output the time spent running the program.
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
22,179 | #include <iostream>
__global__ void kernel() {
printf("test\n");
}
int main(int, char**) {
kernel<<<1,2>>>();
cudaDeviceSynchronize();
std::cout << "Hello, world!\n";
}
|
22,180 | #include "includes.h"
__global__ void add(int n, float *x, float *y) {
for (int i = 0; i < n; ++i) {
y[i] = x[i] + y[i];
}
} |
22,181 | /*******************************
*** *** TASK-3
*** NAME: - SOAIBUZZAMAN
*** Matrikel Number: 613488
*********************************/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
const int N = 200;
const int block_size = 32;
const int num_blocks = N / block_size + (N % block_size == 0 ? 0 : 1);
// Device Function
__global__ void calc_max_device(int *vec, int N, int *grid_results)
{
// Each thread determines the local maximum maxT of its assigned vector elements.
int max_t = 0;
__shared__ int part_prod[block_size];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < N; i += blockDim.x * gridDim.x)
{
// Max Logic
if (vec[i] < vec[tid * block_size + i])
{
max_t = vec[tid * block_size + i];
}
}
part_prod[threadIdx.x] = max_t;
__syncthreads();
// The threads of a blockblockIdx.x determine one local maximum maxB
// for this block inparallel. This value is stored at the position
// grid_results[blockIdx.x].
int size = blockDim.x / 2;
while (size > 0)
{
if (threadIdx.x < size)
{
// Finding max for blocksize/2 elements
if (part_prod[threadIdx.x] < part_prod[threadIdx.x + 1])
part_prod[threadIdx.x] = part_prod[threadIdx.x + 1];
}
__syncthreads();
size = size / 2;
}
// One elements per block
if (threadIdx.x == 0)
{
grid_results[blockIdx.x] = part_prod[0];
}
}
// Host function
int calc_max(int *vec, int N, int block_size, int num_blocks)
{
int *max_v;
int *vec_d, *max_val_d;
max_v = (int *)malloc(sizeof(int));
cudaMalloc((void **)&vec_d, sizeof(int) * N);
cudaMalloc((void **)&max_val_d, sizeof(int));
// initialize the vector for testing.
for (int i = 0; i < N; i++)
{
vec[i] = rand() % 100;
}
cudaMemcpy(vec_d, vec, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(max_val_d, max_v, sizeof(int) * N, cudaMemcpyHostToDevice);
// calling the device funtion
calc_max_device<<<num_blocks, block_size>>>(vec_d, N, max_val_d);
cudaMemcpy(max_v, max_val_d, sizeof(int), cudaMemcpyDeviceToHost);
free(vec);
// Couldn't free max_v because it need to be returned.
cudaFree(vec_d);
cudaFree(max_val_d);
return *max_v;
}
int main(void)
{
// Main function for testing
int *vec_h, *max_val_h;
max_val_h = (int *)malloc(sizeof(int));
vec_h = (int *)malloc(sizeof(int) * N);
*max_val_h = calc_max(vec_h, N, block_size, num_blocks);
printf("%d\n", *max_val_h);
free(max_val_h);
return 0;
}
|
22,182 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <cuda_runtime.h>
// RG*RG*MAXN must fit within mytype
#define MAXN 100000
#define RG 10
#define USECPSEC 1000000ULL
#define nTPB 256
typedef double mytype;
void conv(const mytype *A, const mytype *B, mytype* out, int N) {
for (int i = 0; i < N; ++i)
for (int j = 0; j < N; ++j)
out[i + j] += A[i] * B[j];
}
unsigned long long dtime_usec(unsigned long long prev){
timeval tv1;
gettimeofday(&tv1,0);
return ((tv1.tv_sec * USECPSEC)+tv1.tv_usec) - prev;
}
__global__ void conv_Kernel2(const mytype * A, const mytype * B, mytype *out, const int N){
int idx = threadIdx.x+blockDim.x*blockIdx.x;
if (idx < (2*N)-1){
mytype my_sum = 0;
for (int i = 0; i < N; i++)
if (((idx < N) && (i <= idx)) || ((idx >= N) && (i > (idx-N)))) {
my_sum += __ldg(A + i)*__ldg(B + idx - i);
}
out[idx] = my_sum;
}
}
__global__ void conv_Kernel1(const mytype * A, const mytype * B, mytype *out, const int N){
int idx = threadIdx.x+blockDim.x*blockIdx.x;
if (idx < (2*N)-1){
mytype my_sum = 0;
for (int i = 0; i < N; i++)
if (((idx < N) && (i <= idx)) || ((idx >= N) && (i > (idx-N)))) {
my_sum += A[i] * B[idx-i];
}
out[idx] = my_sum;
}
}
int main(int argc, char *argv[]){
mytype *h_A, *d_A, *h_result, *d_result, *result, *h_B, *d_B, *A, *B;
if (argc != 2) {printf("must specify N on the command line\n"); return 1;}
int my_N = atoi(argv[1]);
if ((my_N < 1) || (my_N > MAXN)) {printf("N out of range\n"); return 1;}
B = (mytype *)malloc(my_N*sizeof(mytype));
A = (mytype *)malloc(my_N*sizeof(mytype));
h_A = (mytype *)malloc(my_N*sizeof(mytype));
h_B = (mytype *)malloc(my_N*sizeof(mytype));
h_result = (mytype *)malloc(2*my_N*sizeof(mytype));
result = (mytype *)malloc(2*my_N*sizeof(mytype));
cudaMalloc(&d_B, my_N*sizeof(mytype));
cudaMalloc(&d_A, my_N*sizeof(mytype));
cudaMalloc(&d_result, 2*my_N*sizeof(mytype));
for (int i=0; i < my_N; i++){
A[i] = rand()%RG;
B[i] = rand()%RG;
h_A[i] = A[i];
h_B[i] = B[i];}
for (int i=0; i < 2*my_N; i++){
result[i] = 0;
h_result[i] = 0;}
unsigned long long cpu_time = dtime_usec(0);
conv(A, B, result, my_N);
cpu_time = dtime_usec(cpu_time);
cudaMemset(d_result, 0, 2*my_N*sizeof(mytype));
int loop = 100;
unsigned long long k1_time = 0;
for (int i = 0; i < loop; i++) {
unsigned long long gpu_time = dtime_usec(0);
cudaMemcpy(d_A, h_A, my_N*sizeof(mytype), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, my_N*sizeof(mytype), cudaMemcpyHostToDevice);
conv_Kernel1<<<((2*(my_N-1))+nTPB-1)/nTPB,nTPB>>>(d_A, d_B, d_result, my_N);
cudaDeviceSynchronize();
cudaMemcpy(h_result, d_result, 2*my_N*sizeof(mytype), cudaMemcpyDeviceToHost);
gpu_time = dtime_usec(gpu_time);
k1_time += gpu_time;
}
unsigned long long k2_time = 0;
for (int i = 0; i < loop; i++) {
unsigned long long gpu_time = dtime_usec(0);
cudaMemcpy(d_A, h_A, my_N*sizeof(mytype), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, my_N*sizeof(mytype), cudaMemcpyHostToDevice);
conv_Kernel2<<<((2*(my_N-1))+nTPB-1)/nTPB,nTPB>>>(d_A, d_B, d_result, my_N);
cudaDeviceSynchronize();
cudaMemcpy(h_result, d_result, 2*my_N*sizeof(mytype), cudaMemcpyDeviceToHost);
gpu_time = dtime_usec(gpu_time);
k2_time += gpu_time;
}
#if 0
for (int i = 0; i < 2*my_N; i++)
if (result[i] != h_result[i])
{
printf("mismatch2 at %d, cpu: %d, gpu %d\n", i, result[i], h_result[i]);
return 1;
}
#endif
printf("Finished. conv1(without ldg) time: %ldus, conv2(with ldg) time: %ldus\n", k1_time/100, k2_time/100);
return 0;
}
|
22,183 | #include <stdio.h>
#include <stdlib.h>
#define block_size 32
#define vector_size 1000000
__global__ void add( int *a, int *b, int *c ) {
int tid = (blockIdx.x*blockDim.x) + threadIdx.x; // this thread handles the data at its thread id
if (tid < vector_size){
c[tid] = a[tid] + b[tid]; // add vectors together
}
}
int main( void ) {
// Set device that we will use for our cuda code
// It will be either 0 or 1
cudaSetDevice(0);
// Time Variables
cudaEvent_t start, stop;
float time;
cudaEventCreate (&start);
cudaEventCreate (&stop);
// Input Arrays and variables
int *a = new int [vector_size];
int *b = new int [vector_size];
int *c_cpu = new int [vector_size];
int *c_gpu = new int [vector_size];
// Pointers in GPU memory
int *dev_a;
int *dev_b;
int *dev_c;
// fill the arrays 'a' and 'b' on the CPU
for (int i = 0; i < vector_size; i++) {
a[i] = rand()%10;
b[i] = rand()%10;
}
//
// CPU Calculation
//////////////////
printf("Running sequential job.\n");
cudaEventRecord(start,0);
// Calculate C in the CPU
for (int i = 0; i < vector_size; i++) {
c_cpu[i] = a[i] + b[i];
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("\tSequential Job Time: %.2f ms\n", time);
// allocate the memory on the GPU
cudaMalloc( (void**)&dev_a, vector_size * sizeof(int) );
cudaMalloc( (void**)&dev_b, vector_size * sizeof(int) );
cudaMalloc( (void**)&dev_c, vector_size * sizeof(int) );
// copy the arrays 'a' and 'b' to the GPU
cudaMemcpy( dev_a, a, vector_size * sizeof(int),
cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, vector_size * sizeof(int),
cudaMemcpyHostToDevice );
//
// GPU Calculation
////////////////////////
printf("Running parallel job.\n");
int grid_size = (vector_size-1)/block_size;
grid_size++;
cudaEventRecord(start,0);
add<<<grid_size,block_size>>>( dev_a, dev_b, dev_c);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("\tParallel Job Time: %.2f ms\n", time);
// copy the array 'c' back from the GPU to the CPU
cudaMemcpy( c_gpu, dev_c, vector_size * sizeof(int),
cudaMemcpyDeviceToHost );
// compare the results
int error = 0;
for (int i = 0; i < vector_size; i++) {
if (c_cpu[i] != c_gpu[i]){
error = 1;
printf( "Error starting element %d, %d != %d\n", i, c_gpu[i], c_cpu[i] );
}
if (error) break;
}
if (error == 0){
printf ("Correct result. No errors were found.\n");
}
// free the memory allocated on the GPU
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
free(a);
free(b);
free(c_cpu);
free(c_gpu);
return 0;
}
|
22,184 | #include <stdio.h>
// Function that catches the error
void testCUDA(cudaError_t error, const char *file, int line){
if (error != cudaSuccess){
printf("Error in file %s at line %d \n", file , line);
exit(EXIT_FAILURE);
}
}
// Has to be define in the compilation in order to get the correct value of
// of the values __FILE__ and __LINE__
#define testCUDA(error) (testCUDA(error, __FILE__,__LINE__))
__global__ void empty_k(void){
}
int main (void){
int count;
cudaDeviceProp prop;
empty_k<<<1,1>>>();
testCUDA(cudaGetDeviceCount(&count));
printf("The number of devices available is %i GPUs \n", count);
testCUDA(cudaGetDeviceProperties(&prop, count-1));
printf("Name %s\n", prop.name);
printf("Global memory size in octet (bytes): %1d \n", prop.totalGlobalMem);
printf("Shared memory size per block: %i\n", prop.sharedMemPerBlock);
printf("Number of registers per block: %i\n", prop.regsPerBlock);
printf("Number of threads in a warp: %i\n", prop.warpSize);
printf("Maximum number of threads that can be launched per block: %i \n",
prop.maxThreadsPerBlock);
printf("Maximum number of threads that can be launched: %i X %i X %i\n",
prop.maxThreadsDim[0],prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("Maximum grid size: %i X %i X %i\n", prop.maxGridSize[0],
prop.maxGridSize[1],prop.maxGridSize[2]);
printf("Total Constant Memory Size: %1d\n", prop.totalConstMem);
printf("Major Compute capability: %i\n", prop.major);
printf("Minor Compute capability: %i\n", prop.minor);
printf("Clock Rate : %i\n", prop.clockRate);
printf("Maximum 1D texture memory: %i\n", prop.maxTexture1D);
printf("Could we overlap? %i \n", prop.deviceOverlap);
printf("Number of multiprocessors: %i \n", prop.multiProcessorCount);
printf("Is there a limit for kernel execution? %i \n",
prop.kernelExecTimeoutEnabled);
printf("Is my GPU a chipset? %i\n", prop.integrated);
printf("Can we map the host memory? %i \n", prop.canMapHostMemory);
printf("Can we launch concurrent kernels: %i\n", prop.concurrentKernels);
printf("Do we have ECC memory %i\n", prop.ECCEnabled);
return 0;
}
|
22,185 | #include <stdio.h>
#include <cuda.h>
int main() {
/* Get Device Num */
int cudaDeviceNum = 0;
cudaGetDeviceCount(&cudaDeviceNum);
printf("%d devices found supporting CUDA\n", cudaDeviceNum);
if ( cudaDeviceNum == 0 ) {
printf("No GPU\n");
return 0;
}
for (int i = 0; i < cudaDeviceNum; i++) {
cudaDeviceProp deviceInfo;
cudaGetDeviceProperties(&deviceInfo, i);
printf("----------------------------------\n");
printf("Device %s\n", deviceInfo.name);
printf("----------------------------------\n");
printf(" Device memory: \t%zu\n", deviceInfo.totalGlobalMem);
printf(" Memory per-block: \t%zu\n", deviceInfo.sharedMemPerBlock);
printf(" Register per-block: \t%d\n", deviceInfo.regsPerBlock);
printf(" Warp size: \t\t%d\n", deviceInfo.warpSize);
printf(" Memory pitch: \t\t%zu\n", deviceInfo.memPitch);
printf(" Constant Memory: \t%zu\n", deviceInfo.totalConstMem);
printf(" Max thread per-block: \t%d\n", deviceInfo.maxThreadsPerBlock);
printf(" Max thread dim: \t%d / %d / %d\n", deviceInfo.maxThreadsDim[0], deviceInfo.maxThreadsDim[1], deviceInfo.maxThreadsDim[2]);
printf(" Max grid size: \t%d / %d / %d\n", deviceInfo.maxGridSize[0], deviceInfo.maxGridSize[1], deviceInfo.maxGridSize[2]);
printf(" Ver: \t\t\t%d.%d\n", deviceInfo.major, deviceInfo.minor);
printf(" Clock: \t\t%d\n", deviceInfo.clockRate);
printf(" Texture Alignment: \t%zu\n", deviceInfo.textureAlignment);
}
return 0;
} |
22,186 | #include "includes.h"
/*
* get_da_peaks is a gpu_accelerated local maxima finder
* [iprod] = get_da_peaks(i1, r, thresh);
* Written by Andrew Nelson 7/20/17
*
*
*
*
*/
// includes, project
// main
__global__ void da_peaks(float *d_i1, float thresh, int m, int n, int o)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
float d_i2[25];
// location of output pixel being analyzed
int row_output = blockIdx.y*blockDim.y + ty; // gives y coordinate as a function of tile width **these lose meaning for (ty || tx) >= O_TILE_WIDTH and the same is true for **
int col_output = blockIdx.x*blockDim.x + tx; // gives x coordinate as a function of tile width
int imnum = blockIdx.z;
if (imnum < o && row_output >=2 && row_output < m-2 && col_output >=2 && col_output <n-2)
{
// buffer the info into
for(int i = 0; i <5 ; i++){
for(int j = 0; j <5 ; j++)
{
d_i2[i*5 + j] = d_i1[(row_output - 2 + i) + (col_output - 2 +j)*m + imnum*m*n];
}
}
float me = d_i2[12];
int maxi = 1;
if(me < thresh){maxi = 0;}
for(int k = 0; k <25; k++)
{
if(d_i2[k] > me){maxi = 0;}
}
d_i1[row_output + col_output*m + imnum*m*n] = maxi;
}
else if(imnum <o){d_i1[row_output + col_output*m + imnum*m*n] = 0;}
else{}
} |
22,187 | #include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <math.h>
#include <cuda_runtime.h>
#define N (1 << 12)
#define tile_size 32
#define block_size tile_size
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
void fillArray(float *arr){
for(int i = 0; i < N*N; i++){
arr[i] = rand() % 100;
//arr[i] = i;
}
}
void seqMatrixMul(float *a1, float *a2, float *aout){
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
aout[i*N+j]=0.0;
for(int k = 0; k < N; k++){
aout[N*i+j] += a1[N*i+k]*a2[N*k+j];
}
}
}
}
void wrongNumberCheck(float *a1, float *a2){
int bad = 0;
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
if(a1[N*i + j] != a2[N*i + j]){
bad = bad + 1;
}
}
}
printf("Number of wrong multiplications = %i\n", bad);
}
int mulCheck(float *a1, float *a2){
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
if(a1[N*i + j] != a2[N*i + j]){
printf("Matrix Multiplication Failed!\n");
printf("index = %i \n", N*i + j);
printf("expected = %f\nreceived = %f\n", a1[N*i + j], a2[N*i+j]);
printf("Next element...\n");
printf("expected = %f\nreceived = %f\n", a1[N*i + j+1], a2[N*i+j+1]);
printf("Checking for number of wrong multiplications...\n");
wrongNumberCheck(a1, a2);
return 1;
}
}
}
printf("Matrix Multiplication Successful!\n");
return 0;
}
int mulCheck2(float *a1, float *a2){
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
if(abs(a1[N*i + j] - a2[N*i + j]) > 1e-8){
printf("Matrix Multiplication Failed!\n");
printf("index = %i \n", N*i + j);
printf("row = %i, col = %i\n", i, j);
printf("expected = %f\nreceived = %f\n", a1[N*i + j], a2[N*i+j]);
printf("Next element...\n");
printf("expected = %f\nreceived = %f\n", a1[N*i + j+1], a2[N*i+j+1]);
printf("Checking for number of wrong multiplications...\n");
wrongNumberCheck(a1, a2);
return 1;
}
}
}
printf("Matrix Multiplication Successful!\n");
return 0;
}
__global__ void gpuMatMul(float *a1, float *a2, float *aout){
__shared__ float A[tile_size][tile_size];
__shared__ float B[tile_size][tile_size];
int tc = threadIdx.x;
int tr = threadIdx.y;
int c = blockIdx.x*tile_size + threadIdx.x;
int r = blockIdx.y*tile_size + threadIdx.y;
float sum_val = 0;
for(int i = 0; i < N; i += tile_size){
A[tr][tc] = a1[N*r + i + tc];
B[tr][tc] = a2[c + N*(i + tr)];
__syncthreads();
for(int j = 0; j < tile_size; j++){
sum_val += A[tr][j]*B[j][tc];
}
__syncthreads();
}
aout[N*r + c] = sum_val;
}
int main(void){
// Setup time variables
float timecpu = 0;
float timegpu = 0;
float tpcpu = 0;
float tpgpu = 0;
cudaEvent_t launch_begin_seq, launch_end_seq;
// Host variables
float *h_arr1 = (float*)malloc(N*N*sizeof(float));
float *h_arr2 = (float*)malloc(N*N*sizeof(float));
float *h_out = (float*)malloc(N*N*sizeof(float));
float *h_save = (float*)malloc(N*N*sizeof(float));
//Device variables
float *d_arr1, *d_arr2, *d_out;
cudaMalloc((void**)&d_arr1, N*N*sizeof(float));
cudaMalloc((void**)&d_arr2, N*N*sizeof(float));
cudaMalloc((void**)&d_out, N*N*sizeof(float));
// Check Memory Allocation
if(h_arr1 == 0 || h_arr2 == 0 || h_out == 0 || h_save == 0 || d_arr1 == 0 || d_arr2 == 0 || d_out == 0){
printf("Memory Allocation Failed!\n");
return 1;
}
// Fill Array
fillArray(h_arr1);
fillArray(h_arr2);
memset(h_out, 0, N*N*sizeof(float));
memset(h_save, 0, N*N*sizeof(float));
// Create time variables
cudaEventCreate(&launch_begin_seq);
cudaEventCreate(&launch_end_seq);
//Start CPU Transpose
cudaEventRecord(launch_begin_seq,0);
seqMatrixMul(h_arr1, h_arr2, h_save);
cudaEventRecord(launch_end_seq,0);
cudaEventSynchronize(launch_end_seq);
cudaEventElapsedTime(&timecpu, launch_begin_seq, launch_end_seq);
printf("CPU time: %f ms\n", timecpu);
tpcpu = 1e-9*2*N/(timecpu*1e-3);
printf("Throughput = %f Gflops/s\n\n", tpcpu);
// Prep Grid and Block variables
dim3 dimGrid(N/tile_size, N/tile_size, 1);
dim3 dimBlock(tile_size, block_size, 1);
// Prep device memory
cudaMemset(d_arr1, 0, N*N*sizeof(float));
cudaMemset(d_arr2, 0, N*N*sizeof(float));
cudaMemcpy(d_arr1, h_arr1, N*N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_arr2, h_arr2, N*N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemset(d_out, 0, N*N*sizeof(float));
// Create time variables
cudaEventCreate(&launch_begin_seq);
cudaEventCreate(&launch_end_seq);
// Start global GPU multiplication
cudaEventRecord(launch_begin_seq,0);
gpuMatMul<<<dimGrid, dimBlock>>>(d_arr1, d_arr2, d_out);
cudaEventRecord(launch_end_seq,0);
cudaEventSynchronize(launch_end_seq);
// Copy Memory back to Host
cudaMemcpy(h_out, d_out, N*N*sizeof(float), cudaMemcpyDeviceToHost);
// Check For Cuda Errors
checkCUDAError("gpuMatMul");
if(mulCheck2(h_save, h_out) == 0){
cudaEventElapsedTime(&timegpu, launch_begin_seq, launch_end_seq);
printf("GPU time: %f ms\n", timegpu);
tpgpu = 1e-9*2*N/(timegpu*1e-3);
printf("Throughput = %f Gflops/s\n\n", tpgpu);
}
printf("Speed up = %f \n", timecpu/timegpu);
printf("ratio = %f \n\n", tpgpu/tpcpu);
printf("CSV output:\n");
printf("%i,%i,%i,%f,%f,%f,%f,%f,%f", N, tile_size, block_size, timecpu, timegpu, tpcpu, tpgpu, timecpu/timegpu, tpgpu/tpcpu);
free(h_arr1);
free(h_arr2);
free(h_out);
free(h_save);
cudaFree(d_arr1);
cudaFree(d_arr2);
cudaFree(d_out);
return 0;
}
|
22,188 | #include <stdio.h>
#include <time.h>
#define N 10000000 //Job Size = 1K, 10K, 100K, 1M and 10M
#define M 128 //Threads per block = 128
#define R 2 //Radius = 2,4,8,16
// CUDA API error checking macro
static void handleError(cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err),
file, line );
exit(EXIT_FAILURE);
}
}
#define cudaCheck( err ) (handleError(err, __FILE__, __LINE__ ))
__global__ void stencil_1d(int *in, int *out)
{
//index of a thread across all threads + Radius
int gindex = threadIdx.x + (blockIdx.x * blockDim.x) + R;
// Apply the stencil
int result = 0;
for (int offset = -R ; offset <= R ; offset++)
result += in[gindex + offset];
// Store the result
out[gindex - R] = result;
}
int main()
{
unsigned int i;
//time start and stop
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//CPU array copies
int h_in[N + 2 * R], h_out[N];
// GPU array copies
int *d_in, *d_out;
for( i = 0; i < (N + 2*R); ++i )
h_in[i] = 1;
// Allocate device memory
cudaCheck( cudaMalloc( &d_in, (N + 2*R) * sizeof(int)) );
cudaCheck( cudaMalloc( &d_out, N * sizeof(int)) );
//copy fro CPU to GPU memory
cudaCheck( cudaMemcpy( d_in, h_in, (N + 2*R) * sizeof(int), cudaMemcpyHostToDevice) );
cudaEventRecord( start, 0 );
// Call stencil kernel
stencil_1d<<< (N + M - 1)/M, M >>> (d_in, d_out);
cudaEventRecord( stop, 0 );
cudaEventSynchronize(stop);
cudaEventElapsedTime( &time, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
printf(" GPU Execution Time = %f\n",time);
// Copy results from device memory to host
cudaCheck( cudaMemcpy( h_out, d_out, N * sizeof(int), cudaMemcpyDeviceToHost) );
// Cleanup
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
22,189 | #include <cuda_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#define N 16
int testfunc()
{
float* A;
float* B;
float* C;
cudaMalloc((void**)&A, sizeof(float)*N);
cudaMalloc((void**)&B, sizeof(float)*N);
cudaMalloc((void**)&C, sizeof(float)*N);
//cudaFree(A);
//cudaFree(B);
cudaFree(C);
return 0;
}
int main()
{
testfunc();
return 0;
}
|
22,190 | __global__
void divmod(int *a, int *q, int *r, int *d){
int tmp = a[0];
/* q[0] = tmp/d[0]; */
r[0] = tmp%d[0];
}
|
22,191 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
__global__ void add(int *a,int *b){
int tid = threadIdx.x;
int y=0,z,i=1;
z = a[tid];
while(z!=0){
y += (z % 8)*i;
i = i*10;
z = z/8;
}
b[tid] = y;
}
int main(void){
int n,a[1000],b[1000],i,size,*d_a,*d_b;
printf("Enter no. of elements:\n");
scanf("%d",&n);
for(i=0;i<n;i++){
//scanf("%d",&a[i]);
a[i] = rand()/100000;
printf("%d\t",a[i]);
}
printf("\n");
size = sizeof(int);
cudaMalloc((void **)&d_a,size*n);
cudaMalloc((void **)&d_b,size*n);
cudaMemcpy(d_a,a,size*n,cudaMemcpyHostToDevice);
add <<<1,n>>> (d_a,d_b);
cudaMemcpy(b,d_b,size*n,cudaMemcpyDeviceToHost);
for(i=0;i<n;i++)
printf("%d\t",b[i]);
printf("\n");
cudaFree(d_a);
cudaFree(d_b);
return 0;
} |
22,192 | #include "includes.h"
#define N 1200
#define THREADS 1024
__global__ void matrixMultKernel (double *a, double *b, double *c, int n)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if((row < n) && (col < n)){
double v = 0;
for(int k = 0; k < n; k++){
v += a[row * n + k] * b[k * n + col];
}
c[row * n + col] = v;
}
} |
22,193 |
int launch_bf_sig_insert(unsigned char *d_sig_cache, size_t num_sigs,
unsigned char *d_bloom_filter)
{
/* TODO: Call kernel that inserts signatures into bloom filter */
return -1;
}
|
22,194 | #include <iostream>
using namespace std;
int main () {
int device_count;
cudaGetDeviceCount(&device_count);
cudaDeviceProp dp;
cout << "CUDA device count: " << device_count << endl;
for(int i = 0; i < device_count; i++) {
cudaGetDeviceProperties(&dp, i);
cout << i << ": " << dp.name << " with CUDA compute compatibility " << dp.major << "." << dp.minor << endl;
cout << i << ": Тактовая частота ядра = " << dp.clockRate << endl;
cout << "Память" << endl;
cout << i << ": Общий объем графической памяти = " << dp.totalGlobalMem / 1024 / 1024 << endl;
cout << i << ": Объем памяти констант = " << dp.totalConstMem << endl;
cout << i << ": Максимальный шаг = " << dp.memPitch << endl;
cout << "Мультипроцессоры" << endl;
cout << i << ": Число потоковых мультипроцессоров = " << dp.multiProcessorCount << endl;
cout << i << ": Объем разделяемой памяти в пределах блока = " << dp.sharedMemPerBlock << endl;
cout << i << ": Число регистров в пределах блока = " << dp.regsPerBlock << endl;
cout << i << ": Размер WARP’а (нитей в варпе) = " << dp.warpSize << endl;
cout << i << ": Максимально допустимое число нитей в блоке = " << dp.maxThreadsPerBlock << endl;
cout << i << ": Mаксимальную размерность при конфигурации нитей в блоке = " << dp.maxThreadsDim[0] << " " << dp.maxThreadsDim[1] << " " << dp.maxThreadsDim[2] << endl;
cout << i << ": Максимальную размерность при конфигурации блоков в сетке = " << dp.maxGridSize[0] << " " << dp.maxGridSize[1] << " " << dp.maxGridSize[2] << endl;
}
return 0;
} |
22,195 | __global__ void matmul(float *a, float *b, float *c, int n) {
// compute each thread's row
int row = blockIdx.y * blockDim.y + threadIdx.y;
// compute each thread's column
int col = blockIdx.x * blockDim.x + threadIdx.x;
int temp_sum = 0;
if((row < n) && (col < n)) {
// Iterate of row, and down column
for(int k = 0; k < n; k++) {
// Accumulate result for a single element
temp_sum += a[row*n + k] * b[k*n + col];
}
// Assign result
c[row*n + col] = temp_sum;
}
} |
22,196 | #include<iostream>
#include<time.h>
#include<cstdlib>
#include<stdlib.h>
using namespace std;
__global__ void matrixMultiplication(int* A,int* B,int* C,int N);
void mm(int* A,int* B,int* C,int N);
int main()
{
cudaEvent_t start,end,start1,end1;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventCreate(&start1);
cudaEventCreate(&end1);
int ROWS = 1<<2;
int COLS = 1<<2;
cout<<"\nEnter number of rows:";
cin>>ROWS;
cout<<"\nEnter number of cols:";
cin>>COLS;
int* hostA = (int*)malloc(sizeof(int)*ROWS*COLS);
int* hostB = (int*)malloc(sizeof(int)*ROWS*COLS);
int* hostC = (int*)malloc(sizeof(int)*ROWS*COLS);
srand(time(0));
int i,j;
for(i=0;i<ROWS;i++)
{
for(j=0;j<COLS;j++)
{
hostB[i*COLS+j] = rand()%30;
hostA[i*COLS+j] = rand()%20;
}
}
cout<<"\nMatrix A:\n";
for(i=0;i<ROWS;i++)
{
for(j=0;j<COLS;j++)
{
//cout<<hostA[i*COLS+j]<<"\t";
}
//cout<<"\n";
}
cout<<"\nMatrix B:\n";
for(i=0;i<ROWS;i++)
{
for(j=0;j<COLS;j++)
{
//cout<<hostB[i*COLS+j]<<"\t";
}
//cout<<"\n";
}
int* deviceA,*deviceB,*deviceC;
cudaMalloc(&deviceA,sizeof(int)*ROWS*COLS);
cudaMalloc(&deviceB,sizeof(int)*ROWS*COLS);
cudaMalloc(&deviceC,sizeof(int)*ROWS*COLS);
cudaMemcpy(deviceA,hostA,sizeof(int)*ROWS*COLS,cudaMemcpyHostToDevice);
cudaMemcpy(deviceB,hostB,sizeof(int)*ROWS*COLS,cudaMemcpyHostToDevice);
cudaEventRecord(start);
mm(deviceA,deviceB,deviceC,ROWS);
cudaEventRecord(end);
cudaEventSynchronize(end);
float t=0;
cudaEventElapsedTime(&t,start,end);
cudaError_t e=cudaGetLastError();
if(e!=cudaSuccess)
{
printf("Cuda failure %s: ",cudaGetErrorString(e));
}
cudaDeviceSynchronize();
cudaMemcpy(hostC,deviceC,ROWS*COLS*sizeof(int),cudaMemcpyDeviceToHost);
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
cudaEventRecord(start1);
int N = ROWS;
int* actual = (int*)malloc(sizeof(int)*ROWS*COLS);
int sum;
for (int row=0; row<ROWS; row++)
{
for (int col=0; col<COLS; col++)
{
sum=0;
for (int n=0; n<N; n++)
{
sum += hostA[row*N+n]*hostB[n*N+col];
}
actual[row*N+col] = sum;
}
}
cudaEventRecord(end1);
cudaEventSynchronize(end1);
float t1=0;
cudaEventElapsedTime(&t1,start1,end1);
double error = 0;
for(int k=0;k<ROWS*COLS;k++)
{
cout<<k<<")"<< "Expected value = "<<actual[k]<<"\tActual value = "<<hostC[k]<<"\n";
error += double(abs(actual[k]-hostC[k]));
}
error=sqrt(error);
cout<<"error = "<<error<<"\n";
delete[] hostA;
delete[] hostB;
delete[] hostC;
cout<<"\nSequential time="<<t1;
cout<<"\nParallel time="<<t<<endl;
}
__global__ void matrixMultiplication(int* A,int* B,int* C,int N)
{
int ROW = blockIdx.y*blockDim.y+threadIdx.y;
int COL = blockIdx.x*blockDim.x+threadIdx.x;
int sum =0 ;
if(ROW<N && COL<N)
{
for(int i=0;i<N;i++)
{
sum+=A[ROW*N+i]*B[i*N+COL];
}
__syncthreads();
C[ROW*N+COL]=sum;
}
}
void mm(int* A,int* B,int* C,int N)
{
dim3 threadsPerblock(N,N);
dim3 blocksPerGrid(1,1);
if(N*N>512)
{
threadsPerblock.x = 512;
threadsPerblock.y=512;
blocksPerGrid.x = ceil(double(N)/double(threadsPerblock.x));
blocksPerGrid.y = ceil(double(N)/double(threadsPerblock.y));
}
matrixMultiplication<<<blocksPerGrid,threadsPerblock>>>(A,B,C,N);
}
|
22,197 | //
// Created by root on 2020/11/12.
//
#include "cuda_runtime.h"
#include "stdio.h"
__global__ void unrollTestKernel(int *count) {
#pragma unroll 4
for (int i = 0; i < 20; i++) {
(*count)++;
}
}
int main() {
int *n_h = (int *) malloc(sizeof(int ) );
*n_h = 0;
int *h_d;
cudaMalloc(&h_d, sizeof(int ));
cudaMemcpy(h_d, n_h, sizeof(int ), cudaMemcpyHostToDevice);
unrollTestKernel<<<1, 1>>>(h_d);
cudaMemcpy(n_h, h_d, sizeof(int ), cudaMemcpyDeviceToHost);
printf("count = %d\n", *n_h); // 20
cudaFree(h_d);
free(n_h);
return 0;
}
|
22,198 | /** Thrust Library **/
#include <thrust/random.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
/** Std library **/
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <random>
#include <time.h>
#include <chrono>
#include <fstream>
#include <string>
#include <iomanip>
/** performMults(double * arr, double * b, const int N, const int SIZE)
* For every ith row in matrix a, multiply a[i,j] by b[j]
*/
__global__ void performMults(double * a, double * b, int ROW_SIZE, int SIZE)
{
int a_index = blockIdx.x * blockDim.x + threadIdx.x;
int b_index = a_index % ROW_SIZE;
if (a_index >= SIZE) return;
// The multiplication stage must be done before the mapping and reduction stage
// all of these tasks can be done in parallel
a[a_index] *= b[b_index];
}
using namespace std;
/** sumRows(double * arr, double * c, const int N, const int SIZE)
* Expects arr to be a matrix, and c a result vector
* c[i] = sum(a[i,j] * b[i])
*
*/
__global__ void sumRows(double * a, double * b, double * c, const int ROW_SIZE, const int SIZE)
{
int a_index = blockIdx.x * blockDim.x + threadIdx.x;
int c_index = a_index / ROW_SIZE;
int b_index = a_index % ROW_SIZE; // you can consider b_index the row id (0 start, ROW_SIZE-1 end)
/* a 3x3 matrix example
a index values: the specific element to operate on
0 1 2
3 4 5
6 7 8
c index values: where to add sum to
0 0 0
1 1 1
2 2 2
b index values: where we are in the row
0 1 2
0 1 2
0 1 2*/
if (b_index == 0) // if we are a zero index, sum up the row up to but not including the next 0 row.
{
int local_c_sum = 0;
for (int i = 0; i < ROW_SIZE; i++)
local_c_sum += a[c_index * ROW_SIZE + i] * b[i];
c[c_index] = local_c_sum;
}
// this method is bad because its tasks size grow with the problem instead of the number of tasks.
}
const int INCORRECT_NUM_ARGS_ERROR = 1;
const unsigned THREADS = 512;
void usage();
using namespace std;
/**** MAIN ***********************/
/*********************************/
int main( int argc, char* argv[] )
{
int N = 0; // row size
char mode = 'v'; // what to print
int threads = 0; // total amount of threads if 0 defaults to 512 per block
char values = '1'; // what to fill vectors with
switch ( argc )
{
case 5:
threads = atoi(argv[4]);
case 4:
values = argv[3][0];
case 3:
mode = argv[2][0];
case 2:
N = atoi(argv[1]);
break;
default:
usage();
}
const int SIZE = N * N; // square matrix N by N
thrust::host_vector<double> h_a(SIZE);
thrust::host_vector<double> h_b(N);
thrust::device_vector<double> d_a(SIZE, 1);
thrust::device_vector<double> d_b(N, 1);
thrust::device_vector<double> c(N);
// if mode is load, load vectors from file, otherwise generate them ourselves
if (values != 'l')
{
bool random = values == 'r';
double lowerlimit = random ? 0 : 1;
double upperlimit = random ? 10 : 1;
#ifdef DEBUG
printf("upperLimit: %f lowerLimit: %f\n", upperlimit, lowerlimit);
#endif
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
std::default_random_engine re(seed);
std::uniform_real_distribution<double> unif(lowerlimit,upperlimit);
for (int i = 0; i < SIZE; i++)
h_a[i] = floor(unif(re));
for (int i = 0; i < N; i++)
h_b[i] = floor(unif(re));
}
else // load vectors from file
{
ifstream myfile("input.txt");
for (int i = 0; i < SIZE; i++)
myfile >> h_a[i];
for (int i = 0; i < N; i++)
myfile >> h_b[i];
myfile.close();
}
/* thrust handles the copying of memory from host vectors to
device vectors with a simple assignment. */
// record action time
auto start = chrono::steady_clock::now();
d_a = h_a;
d_b = h_b;
auto transfer = chrono::steady_clock::now();
#ifdef DEBUG
cout << "Matrix values:" << endl;
for (int i = 0; i < SIZE; i++)
{
cout << h_a[i] << " ";
if ((i + 1) % N == 0) cout << endl;
}
cout << "\n\n";
cout << "Vector values:" << endl;
for (int i = 0; i < N; i++)
cout << h_b[i] << " ";
cout << endl;
#endif
// vectors are unfortunatly not available on cuda device
// but you can get the memory address, pass it to the device,
// and treat it as a normal array.
double * p_a = thrust::raw_pointer_cast(&d_a[0]);
double * p_b = thrust::raw_pointer_cast(&d_b[0]);
double * p_c = thrust::raw_pointer_cast(&c[0]);
unsigned blocks;
// one thread per block, if doing the Karp-Flatt Metric
// if we were given a set amount of threads
// set to it
if ( threads )
{
#ifdef DEBUG
if (N > threads)
cout << "Warning! incorrect number of threads will not perform correctly." << endl;
#endif
// assume threads is a multiple of 32
blocks = threads/32; // ensures that there are exactly as many given threads on the problem
threads = 32;
}
else
{
threads = THREADS;
blocks = (SIZE / THREADS) + 1;
}
#ifdef DEBUG
cout << "blocks: " << blocks << " threads: " << threads << endl;
#endif
// record action time
//auto start = chrono::steady_clock::now();
//performMults<<<blocks, threads>>>(p_a, p_b, N, SIZE);
//cudaDeviceSynchronize();
#ifdef DEBUG
h_a = d_a;
cout << "Matrix values after mulltiplication:" << endl;
for (int i = 0; i < SIZE; i++)
{
cout << h_a[i] << " ";
if ((i + 1) % N == 0) cout << endl;
}
#endif
sumRows<<<blocks, threads>>>(p_a, p_b, p_c, N, SIZE);
cudaDeviceSynchronize();
auto end = chrono::steady_clock::now();
// print out time took if requested
#ifndef DEBUG // if debug dont check just print
if (mode == 't')
#endif
#ifdef DEBUG // with a title too
cout << "time ns:\n";
#endif
cout << chrono::duration_cast<chrono::nanoseconds>(end - start).count();
thrust::host_vector<double> result = c;
#ifdef DEBUG
printf("\n\nresult:\n");
#endif
#ifndef DEBUG
if (mode == 'v')
#endif
for (int i = 0; i < N; i++)
cout << fixed << setprecision(2) << result[i] << " ";
#ifdef DEBUG
cout << endl;
#endif
return 0;
}
void usage()
{
printf("./main <row size> <mode> <values> <threads>\n");
printf("<row size> : required\n<mode> : v to print result, t to print time nanoseconds\n<values> : 1 all 1 values, r all random, l load from file.\n");
exit(INCORRECT_NUM_ARGS_ERROR);
}
|
22,199 | #include "includes.h"
__global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) {
int batch_index = blockIdx.x;
points += n*c*batch_index;
idx += m*nsample*batch_index;
out += m*nsample*c*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
out[j*nsample*c+k*c+l] = points[ii*c+l];
}
}
}
} |
22,200 | /*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation and
* any modifications thereto. Any use, reproduction, disclosure, or distribution
* of this software and related documentation without an express license
* agreement from NVIDIA Corporation is strictly prohibited.
*
*
*
* This sample illustrates a 3D stencil computation over a uniform grid, a
* computation common in finite difference codes. The kernel advances 2D
* threadblocks along the slowest-varying dimension of the 3D data set.
* Data is kept in registers and shared memory for each computation, thus
* effectively streaming the input. Data ends up being read twice, due to
* the halos (16x16 output region for each threadblock, 4 halo regions, each
* 16x4). For more details please refer to:
* P. Micikevicius, 3D finite difference computation on GPUs using CUDA. In
* Proceedings of 2nd Workshop on General Purpose Processing on Graphics
* Processing Units (Washington, D.C., March 08 - 08, 2009). GPGPU-2,
* vol. 383. ACM, New York, NY, 79-84.
*
* CUDA Optimization slides, Supercomputing 08 CUDA totorial
* http://gpgpu.org/static/sc2008/M02-04_Optimization.pdf
*
*/
#include <stdio.h>
#include "reference.h"
#define BLOCK_DIMX 16
#define BLOCK_DIMY 16
#define RADIUS 4
__constant__ float c_coeff[RADIUS+1];
__global__ void stencil_3D_16x16_order8(float *g_output, float *g_input, const int dimx, const int dimy, const int dimz)
{
__shared__ float s_data[BLOCK_DIMY+2*RADIUS][BLOCK_DIMX+2*RADIUS];
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iy = blockIdx.y*blockDim.y + threadIdx.y;
int in_idx = iy*dimx + ix;
int out_idx = 0;
int stride = dimx*dimy;
float infront1, infront2, infront3, infront4;
float behind1, behind2, behind3, behind4;
float current;
int tx = threadIdx.x + RADIUS;
int ty = threadIdx.y + RADIUS;
// fill the "in-front" and "behind" data
behind3 = g_input[in_idx]; in_idx += stride;
behind2 = g_input[in_idx]; in_idx += stride;
behind1 = g_input[in_idx]; in_idx += stride;
current = g_input[in_idx]; out_idx = in_idx; in_idx += stride;
infront1 = g_input[in_idx]; in_idx += stride;
infront2 = g_input[in_idx]; in_idx += stride;
infront3 = g_input[in_idx]; in_idx += stride;
infront4 = g_input[in_idx]; in_idx += stride;
for(int i=RADIUS; i<dimz-RADIUS; i++)
{
//////////////////////////////////////////
// advance the slice (move the thread-front)
behind4 = behind3;
behind3 = behind2;
behind2 = behind1;
behind1 = current;
current = infront1;
infront1 = infront2;
infront2 = infront3;
infront3 = infront4;
infront4 = g_input[in_idx];
in_idx += stride;
out_idx += stride;
__syncthreads();
/////////////////////////////////////////
// update the data slice in smem
if(threadIdx.y<RADIUS) // halo above/below
{
s_data[threadIdx.y][tx] = g_input[out_idx-RADIUS*dimx];
s_data[threadIdx.y+BLOCK_DIMY+RADIUS][tx] = g_input[out_idx+BLOCK_DIMY*dimx];
}
if(threadIdx.x<RADIUS) // halo left/right
{
s_data[ty][threadIdx.x] = g_input[out_idx-RADIUS];
s_data[ty][threadIdx.x+BLOCK_DIMX+RADIUS] = g_input[out_idx+BLOCK_DIMX];
}
// update the slice in smem
s_data[ty][tx] = current;
__syncthreads();
/////////////////////////////////////////
// compute the output value
float value = c_coeff[0] * current;
value += c_coeff[1]*( infront1 + behind1 + s_data[ty-1][tx]+ s_data[ty+1][tx]+ s_data[ty][tx-1]+ s_data[ty][tx+1] );
value += c_coeff[2]*( infront2 + behind2 + s_data[ty-2][tx]+ s_data[ty+2][tx]+ s_data[ty][tx-2]+ s_data[ty][tx+2] );
value += c_coeff[3]*( infront3 + behind3 + s_data[ty-3][tx]+ s_data[ty+3][tx]+ s_data[ty][tx-3]+ s_data[ty][tx+3] );
value += c_coeff[4]*( infront4 + behind4 + s_data[ty-4][tx]+ s_data[ty+4][tx]+ s_data[ty][tx-4]+ s_data[ty][tx+4] );
g_output[out_idx] = value;
}
}
int main(int argc, char *argv[])
{
/////////////////////////////////////////////
// choose the GPU for execution
//
int device = 0;
cudaSetDevice(device);
cudaDeviceProp properties;
cudaGetDeviceProperties(&properties, device);
printf("3DFD running on: %s\n", properties.name);
if (properties.totalGlobalMem >= 1024*1024*1024) {
printf("Total GPU Memory: %.4f GB\n", properties.totalGlobalMem/(1024.f*1024.f*1024.f) );
} else {
printf("Total GPU Memory: %.4f MB\n", properties.totalGlobalMem/(1024.f*1024.f) );
}
/////////////////////////////////////////////
// process command-line arguments,
// set execution parameters
//
int pad = 0;
int dimx = 48+pad;
int dimy = 48;
int dimz = 40;
int nreps = 1; // number of time-steps, over which performance is averaged
int check_correctness = 1; // 1=check correcness, 0-don't. Note that CPU code is very
// naive and not optimized, so many steps will take a
// long time on CPU
if( argc >= 4 )
{
dimx = atoi(argv[1]);
dimy = atoi(argv[2]);
dimz = atoi(argv[3]);
}
if( argc >= 5)
nreps = atoi(argv[4]);
if( argc >= 6)
check_correctness = atoi(argv[5]);
printf("%dx%dx%d\n", dimx, dimy, dimz);
/////////////////////////////////////////////
// setup data
//
// allocate CPU and GPU memory
float *d_input=0, *d_output=0;
int nbytes = dimx*dimy*dimz*sizeof(float);
cudaMalloc( (void**)&d_input, nbytes);
cudaMalloc( (void**)&d_output, nbytes);
if( 0==d_input || 0==d_output )
{
printf("Unable to allocate %.4f Mbytes of GPU memory\n", (float)nbytes/(1024.0f*1024.0f) );
printf(" TEST PASSED!\n");
exit(EXIT_SUCCESS);
// exit(1);
}
printf("allocated %.1f MB on device\n", (2.f*nbytes)/(1024.f*1024.f));
// initialize data
float *h_data=0, *h_reference=0;
h_data = (float*)malloc(nbytes);
h_reference = (float*)malloc(nbytes);
if( 0==h_data || 0==h_reference )
{
printf("couldn't allocate CPU memory\n");
printf(" TEST PASSED!\n");
exit(EXIT_SUCCESS);
// exit(-1);
}
random_data( h_data, dimx,dimy,dimz, 1, 5 );
cudaMemcpy( d_input, h_data, nbytes, cudaMemcpyHostToDevice );
if( cudaGetLastError() != cudaSuccess )
{
printf("data upload to GPU failed\n");
printf(" TEST FAILED!\n");
exit(-2);
}
// setup coefficients
float h_coeff_symmetric[RADIUS+1] = {1.f, 1.f, 1.f, 1.f, 1.f};
cudaMemcpyToSymbol( c_coeff, h_coeff_symmetric, (RADIUS+1)*sizeof(float) );
if( cudaGetLastError() != cudaSuccess )
{
printf("coefficient upload to GPU failed\n");
printf(" TEST FAILED!\n");
exit(-3);
}
// kernel launch configuration
dim3 block(BLOCK_DIMX,BLOCK_DIMY);
dim3 grid( dimx/block.x, dimy/block.y );
printf("(%d,%d)x(%d,%d) grid\n", grid.x,grid.y, block.x,block.y);
// variables for measuring performance
float elapsed_time_ms=0.0f, throughput_mpoints=0.0f;
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
/////////////////////////////////////////////
// kernel execution
//
cudaEventRecord( start, 0 );
for(int i=0; i<nreps; i++)
stencil_3D_16x16_order8<<<grid,block>>>(d_output, d_input, dimx, dimy, dimz);
cudaEventRecord( stop, 0 );
cudaThreadSynchronize();
cudaEventElapsedTime( &elapsed_time_ms, start, stop );
elapsed_time_ms /= nreps;
throughput_mpoints = (dimx*dimy*(dimz-2*RADIUS))/(elapsed_time_ms*1e3f);
printf("-------------------------------\n");
printf("time: %8.2f ms\n", elapsed_time_ms );
printf("throughput: %8.2f MPoints/s\n", throughput_mpoints );
printf("CUDA: %s\n", cudaGetErrorString(cudaGetLastError()) );
/////////////////////////////////////////////
// check the correctness
//
if( check_correctness)
{
printf("-------------------------------\n");
printf("comparing to CPU result...\n");
reference_3D( h_reference, h_data, h_coeff_symmetric, dimx,dimy,dimz, RADIUS );
cudaMemcpy( h_data, d_output, nbytes, cudaMemcpyDeviceToHost );
if( within_epsilon( h_data, h_reference, dimx,dimy,dimz, RADIUS*nreps, 0.0001f ) ) {
printf(" Result within epsilon\n");
printf(" TEST PASSED!\n");
} else {
printf(" Incorrect result\n");
printf(" TEST FAILED!\n");
}
}
/////////////////////////////////////////////
// release the resources
//
cudaEventDestroy( start );
cudaEventDestroy( stop );
if( d_input )
cudaFree( d_input );
if( d_output )
cudaFree( d_output );
if( h_data )
free( h_data );
if( h_reference )
free( h_reference );
cudaThreadExit();
exit(EXIT_SUCCESS);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.