serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
20,401 | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-07-31
*/
#include <math.h>
#include "../../XDevice.h"
#include "../../XName.h"
#include "../shape/IsSameShaped.h"
#include "Unary.h"
#include "Unary.cuh"
#include<cuda_runtime.h>
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
template<class T>
__device__
T UnaryCudaCeil(T x)
{
return (T)ceil((float)x);
}
template<class T>
__device__
T UnaryCudaExp(T x)
{
return (T)exp((float)x);
}
template<class T>
__device__
T UnaryCudaFabs(T x)
{
return (T)fabs((float)x);
}
template<class T>
__device__
T UnaryCudaFloor(T x)
{
return (T)floor((float)x);
}
template<class T>
__device__
T UnaryCudaIsNonZero(T r)
{
return (r != (T)0.0) ? (T)1.0 : (T)0.0;
}
template<class T>
__device__
T UnaryCudaIsZero(T r)
{
return (r == (T)0.0) ? (T)1.0 : (T)0.0;
}
template<class T>
__device__
T UnaryCudaLog(T x)
{
return (T)log((float)x);
}
template<class T>
__device__
T UnaryCudaNegate(T x)
{
return -x;
}
template<class T>
__device__
T UnaryCudaSign(T r)
{
if (r > (T)0)
return 1.0;
else if (r == (T)0)
return 0.0;
else
return -1.0;
}
template<class T>
__device__
T UnaryCudaSqrt(T x)
{
return (T)sqrt((float)x);
}
template<class T>
__device__
T UnaryCudaSquare(T x)
{
return x * x;
}
template<class T>
__device__
T UnaryCudaRound(T r)
{
return (r > (T)0.0) ? (T)UnaryCudaFloor(r + (T)0.5) : (T)UnaryCudaCeil(r - (T)0.5);
}
template<class T>
__device__
T UnaryCudaSin(T x)
{
return (T)sin((float)x);
}
template<class T>
__device__
T UnaryCudaCos(T x)
{
return (T)cos((float)x);
}
template<class T>
__device__
T UnaryCudaTan(T x)
{
return (T)tan((float)x);
}
template<class T>
__device__
T UnaryCudaReciprocal(T x)
{
//if (x == 0)
//ShowNTErrors("Zero does not have reciprocal value.");
return (T)(1 / x);
}
#define SIMPLE_UNARY_FUNCTION_GPU(funcName, origFunc) \
template<class T> \
__global__ \
void Kernel##funcName(T * a, T * b, int size) \
{ \
int i = blockDim.x * blockIdx.x + threadIdx.x; \
\
if (i < size) \
b[i] = (T)origFunc(a[i]); \
} \
void _Cuda##funcName(const XTensor * a, XTensor * b) \
{ \
CheckNTErrors((_IsSameShaped(a, b)), \
"Input tensors should have the same type!"); \
CheckNTErrors(a->isSparse == false, "TODO!"); \
\
int gridSize[3]; \
int blockSize[3]; \
\
GDevs.GetCudaThread(a->devID, a->unitNum, gridSize, blockSize); \
\
dim3 blocks(gridSize[0]); \
dim3 threads(blockSize[0]); \
\
int devIDBackup; \
ProtectCudaDev(a->devID, devIDBackup); \
\
if (a->dataType == X_FLOAT) { \
Kernel##funcName<<<blocks, threads>>> \
((float*)a->data, (float*)b->data, a->unitNum); \
} \
else if (a->dataType == X_DOUBLE) { \
Kernel##funcName<<<blocks, threads>>> \
((double*)a->data, (double*)b->data, a->unitNum); \
} \
else if (a->dataType == X_INT) { \
Kernel##funcName<<<blocks, threads>>> \
((int*)a->data, (int*)b->data, a->unitNum); \
} \
else { \
ShowNTErrors("TODO!"); \
} \
\
BacktoCudaDev(a->devID, devIDBackup); \
}
SIMPLE_UNARY_FUNCTION_GPU(Absolute, UnaryCudaFabs)
SIMPLE_UNARY_FUNCTION_GPU(Ceil, UnaryCudaCeil)
SIMPLE_UNARY_FUNCTION_GPU(Exp, UnaryCudaExp)
SIMPLE_UNARY_FUNCTION_GPU(Floor, UnaryCudaFloor)
SIMPLE_UNARY_FUNCTION_GPU(IsNonZero, UnaryCudaIsNonZero)
SIMPLE_UNARY_FUNCTION_GPU(IsZero, UnaryCudaIsZero)
SIMPLE_UNARY_FUNCTION_GPU(Log, UnaryCudaLog)
SIMPLE_UNARY_FUNCTION_GPU(Negate, UnaryCudaNegate)
SIMPLE_UNARY_FUNCTION_GPU(Round, UnaryCudaRound)
SIMPLE_UNARY_FUNCTION_GPU(Sign, UnaryCudaSign)
SIMPLE_UNARY_FUNCTION_GPU(Sqrt, UnaryCudaSqrt)
SIMPLE_UNARY_FUNCTION_GPU(Square, UnaryCudaSquare)
SIMPLE_UNARY_FUNCTION_GPU(Sin, UnaryCudaSin)
SIMPLE_UNARY_FUNCTION_GPU(Cos, UnaryCudaCos)
SIMPLE_UNARY_FUNCTION_GPU(Tan, UnaryCudaTan)
SIMPLE_UNARY_FUNCTION_GPU(Reciprocal, UnaryCudaReciprocal)
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor) |
20,402 |
/*
void cureTest() {
float sqrt
}*/ |
20,403 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#define BLOCK_SIZE 8
#define GRID_SIZE 8
//struct timespec start, finish;
//double elapsed;
__global__ void ising_kernel(int *G,int *newG,double *w,int n){
int x,y;
__shared__ double shared_w[25];
__shared__ int shared_G[(BLOCK_SIZE+4)*(BLOCK_SIZE+4)];
for(int i=0;i<25;i++){
shared_w[i]=w[i];
}
unsigned int xBlock = blockDim.x * blockIdx.x;
unsigned int yBlock = blockDim.y * blockIdx.y;
unsigned int xIndex = xBlock + threadIdx.x;
unsigned int yIndex = yBlock + threadIdx.y;
unsigned int tempX = xBlock + threadIdx.x;
unsigned int tempY = yBlock + threadIdx.y;
int iterations;
if (n%(BLOCK_SIZE*GRID_SIZE)==0){
iterations=n/(BLOCK_SIZE*GRID_SIZE);
}else{
iterations=n/(BLOCK_SIZE*GRID_SIZE)+1;
}
for(int i=0;i<iterations;i++){
xIndex=tempX+GRID_SIZE*BLOCK_SIZE*(i);
for(int j=0;j<iterations;j++){
yIndex=tempY+GRID_SIZE*BLOCK_SIZE*(j);
shared_G[(threadIdx.x+2)*(BLOCK_SIZE+4)+threadIdx.y+2]=G[((xIndex+n)%n)*n+(yIndex+n)%n];
if(threadIdx.x==0){
if (threadIdx.y==0){
for(int k=0;k<3;k++){
for(int l=0;l<3;l++){
if(!(k==0&&l==0)){
shared_G[(2-k)*(BLOCK_SIZE+4)+(2-l)]=G[((xIndex-k+n)%n)*n+(yIndex-l+n)%n];
}
}
}
}else if(threadIdx.y==BLOCK_SIZE-1){
for(int k=0;k<3;k++){
for(int l=0;l<3;l++){
if(!(k==0&&l==0)){
shared_G[(2-k)*(BLOCK_SIZE+4)+(2+l+threadIdx.y)]=G[((xIndex-k+n)%n)*n+(yIndex+l+n)%n];
}
}
}
}else{
shared_G[(2-1)*(BLOCK_SIZE+4)+(2+threadIdx.y)]=G[((xIndex-1+n)%n)*n+(yIndex+n)%n];
shared_G[(2-2)*(BLOCK_SIZE+4)+(2+threadIdx.y)]=G[((xIndex-2+n)%n)*n+(yIndex+n)%n];
}
}else if(threadIdx.x==BLOCK_SIZE-1){
if (threadIdx.y==0){
for(int k=0;k<3;k++){
for(int l=0;l<3;l++){
if(!(k==0&&l==0)){
shared_G[(2+k+threadIdx.x)*(BLOCK_SIZE+4)+(2-l)]=G[((xIndex+k+n)%n)*n+(yIndex-l+n)%n];
}
}
}
}else if(threadIdx.y==BLOCK_SIZE-1){
for(int k=0;k<3;k++){
for(int l=0;l<3;l++){
if(!(k==0&&l==0)){
shared_G[(2+k+threadIdx.x)*(BLOCK_SIZE+4)+(2+l+threadIdx.y)]=G[((xIndex+k+n)%n)*n+(yIndex+l+n)%n];
}
}
}
}else {
shared_G[(2+1+threadIdx.x)*(BLOCK_SIZE+4)+(2+threadIdx.y)]=G[((xIndex+1+n)%n)*n+(yIndex+n)%n];
shared_G[(2+2+threadIdx.x)*(BLOCK_SIZE+4)+(2+threadIdx.y)]=G[((xIndex+2+n)%n)*n+(yIndex+n)%n];
}
}else{
if(threadIdx.y==0){
shared_G[(2+threadIdx.x)*(BLOCK_SIZE+4)+(2-1)]=G[((xIndex+n)%n)*n+(yIndex-1+n)%n];
shared_G[(2+threadIdx.x)*(BLOCK_SIZE+4)+(2-2)]=G[((xIndex+n)%n)*n+(yIndex-2+n)%n];
}else if(threadIdx.y==BLOCK_SIZE-1){
shared_G[(2+threadIdx.x)*(BLOCK_SIZE+4)+(2+1+threadIdx.y)]=G[((xIndex+n)%n)*n+(yIndex+1+n)%n];
shared_G[(2+threadIdx.x)*(BLOCK_SIZE+4)+(2+2+threadIdx.y)]=G[((xIndex+n)%n)*n+(yIndex+2+n)%n];
}
}
__syncthreads();
if(xIndex<n&&yIndex<n){
double weight=0;
//printf("BLOCK IDX X: %d\n",blockIdx.x);
//printf("BLOCK IDX Y: %d\n",blockIdx.y);
for(int ibor=-2;ibor<3;ibor++){
for(int jbor=-2;jbor<3;jbor++){
weight+=shared_w[(ibor+2)*5+jbor+2]*shared_G[(threadIdx.x+2+ibor)*(BLOCK_SIZE+4) +(threadIdx.y+2+jbor) ];
}
}
if(weight<1e-4&&weight>-(1e-4)){
// newG[xIndex*n+yIndex]=G[xIndex*n+yIndex];
newG[xIndex*n+yIndex]=shared_G[(threadIdx.x+2)*(BLOCK_SIZE+4)+threadIdx.y+2];
}else if(weight>0){
newG[xIndex*n+yIndex]=1;
}else{
newG[xIndex*n+yIndex]=-1;
}
}
}
}
}
void ising( int *G, double *w, int k, int n){
int *newG,*swapG,*G2;
double *w2;
cudaMallocManaged(&newG,n*n*sizeof(int));
cudaMallocManaged(&G2,n*n*sizeof(int));
cudaMallocManaged(&w2,25*sizeof(double));
cudaMemcpy( w2, w, 25*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy( G2, G, n*n*sizeof(int),cudaMemcpyHostToDevice);
double total_time=0;
for(int iter=0;iter<k;iter++){
int grid_dimension;
bool repeat=true;
dim3 grid(GRID_SIZE, GRID_SIZE);
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
// struct timeval startwtime, endwtime;
// gettimeofday (&startwtime, NULL);
ising_kernel<<<grid,block>>>(G2,newG,w2,n);
cudaDeviceSynchronize();
// gettimeofday (&endwtime, NULL);
// double time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6 + endwtime.tv_sec - startwtime.tv_sec);
// total_time+=time;
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
if(repeat&&newG[i*n+j]!=G2[i*n+j]){
repeat=false;
}
int temp=newG[i*n+j];
newG[i*n+j]=G2[i*n+j];
G2[i*n+j]=temp;
}
}
if(repeat){
break;
}
}
cudaMemcpy(G, G2, n*n*sizeof(int),cudaMemcpyDeviceToHost);
// printf("Seconds are %lf ",total_time);
}
int main()
{
printf("==========================START=============================\n");
double weight[]={0.004,0.016,0.026,0.016,0.004,0.016,0.071,0.117,0.071,0.016,0.026,0.117,0,0.117,0.026,0.016,0.071,0.117,0.071,0.016,0.004,0.016,0.026,0.016,0.004};
int n=517;
int X[n*n];
size_t size;
FILE *fp = fopen("conf-init.bin", "rb");
size = fread(X, sizeof(int), n * n, fp);
if(size!=n*n) exit(EXIT_FAILURE);
fclose(fp);
int k=1;
ising(X,weight,k,n);
int checkX[n*n];
printf("k=1:\n");
FILE *fp2 = fopen("conf-1.bin", "rb");
size = fread(checkX, sizeof(int), n * n, fp2);
if(size!=n*n) exit(EXIT_FAILURE);
fclose(fp2);
bool flag=true;
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
if(checkX[i*n+j]!=X[i*n+j]){
printf("\nWRONG IMPLEMENTATION\n");
flag=false;
break;
}
}
if(!flag){
break;
}
}
if(flag){
printf("\nCORRECT IMPLEMENTATION\n");
}
printf("k=4:\n");
k=4 ;
int X2[n*n];
FILE *fpA = fopen("conf-init.bin", "rb");
size = fread(X2, sizeof(int), n * n, fpA);
if(size!=n*n) exit(EXIT_FAILURE);
fclose(fpA);
int checkX2[n*n];
FILE *fp3 = fopen("conf-4.bin", "rb");
size = fread(checkX2, sizeof(int), n * n, fp3);
if(size!=n*n) exit(EXIT_FAILURE);
fclose(fp3);
ising(X2,weight,k,n);
flag=true;
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
if(checkX2[i*n+j]!=X2[i*n+j]){
printf("\nWRONG IMPLEMENTATION\n");
flag=false;
break;
}
}
if(!flag){
break;
}
}
if(flag){
printf("\nCORRECT IMPLEMENTATION\n");
}
printf("k=11:\n");
k=11 ;
int X3[n*n];
FILE *fpB = fopen("conf-init.bin", "rb");
size = fread(X3, sizeof(int), n * n, fpB);
if(size!=n*n) exit(EXIT_FAILURE);
fclose(fpB);
int checkX3[n*n];
FILE *fp4 = fopen("conf-11.bin", "rb");
size = fread(checkX3, sizeof(int), n * n, fp4);
if(size!=n*n) exit(EXIT_FAILURE);
fclose(fp4);
flag=true;
ising(X3,weight,k,n);
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
if(checkX3[i*n+j]!=X3[i*n+j]){
printf("\nWRONG IMPLEMENTATION\n");
flag=false;
break;
}
}
if(!flag){
break;
}
}
if(flag){
printf("\nCORRECT IMPLEMENTATION\n");
}
printf("\n=========================END==========================");
return 0;
} |
20,404 | #include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include <math.h>
#include <locale.h>
#include <cuda.h>
#define BLOCK_SIZE 250
#define GRID_SIZE 4
#define THREAD_SIZE 1000
#define CUDA_FLOAT float
__global__ void pi_kern(double *res)
{
int n = threadIdx.x + blockIdx.x * BLOCK_SIZE;
CUDA_FLOAT x0 = n * 1.f / (BLOCK_SIZE * GRID_SIZE);
CUDA_FLOAT y0 = sqrtf(1 - x0 * x0);
CUDA_FLOAT dx = 1.f / (1.f * BLOCK_SIZE * GRID_SIZE * THREAD_SIZE);
CUDA_FLOAT s = 0;
CUDA_FLOAT x1, y1;
x1 = x0 + dx;
y1 = sqrtf(1 - x1 * x1);
s += (y0 + y1) * dx / 2.f;
x0 = x1;
y0 = y1;
res[n] = s;
}
int main() {
double *res_h, *res_d;
res_h = (double *)malloc(sizeof(double)*THREAD_SIZE);
cudaMalloc((void **) &res_d, sizeof(double)*THREAD_SIZE);
cudaMemcpy(res_d, res_h, sizeof(double)*THREAD_SIZE, cudaMemcpyHostToDevice);
pi_kern<<< GRID_SIZE, BLOCK_SIZE >>>(res_d);
cudaMemcpy(res_h, res_d, sizeof(double)*THREAD_SIZE, cudaMemcpyDeviceToHost);
double sum = 0.0;
for(int i = 0; i < THREAD_SIZE; i++){
sum += res_h[i];
}
printf("%0.8f\n", sum / THREAD_SIZE);
printf("%d", 2);
free(res_h);
cudaFree(res_d);
return 0;
}
|
20,405 | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <thrust/host_vector.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <iostream>
#include <sstream>
#include <cuda.h>
using namespace std;
#define SIZE 16
#define MAX_ITERATIONS 1024
/**
* CUDA kernel function that reverses the order of bits in each element of the array.
*/
__global__ void TRNGkernel(int M, int S)
{
extern __shared__ int shared[];
int *s = &shared[0];
int *out = &shared[S+1];
int k=0;
int id =threadIdx.x;
do{
s[id] = id;
out[id]=s[id+1];
k++;
} while(k<M);
}
void generate_raw_numbers(thrust::host_vector<float> *RN , int S, int M, int N, float CL){
int i;
cudaEvent_t start, stop;
float kernelTime;
for(i=0;i<N;i++)
{
// activate the timer in the graphic card
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
// kernel with the race conditions
TRNGkernel<<<1,S,(2*S+1)*sizeof(int)>>>(M, S);
cudaThreadSynchronize(); // Wait for the GPU launched work to complete
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &kernelTime, start, stop );
cudaEventDestroy(start);
cudaEventDestroy(stop);
// stores the raw numbers
(*RN)[i] = fmod(kernelTime*CL,float(1.0));
};
cudaDeviceReset();
}
int main(int argc, char *argv[]) {
int N_REPETITIONS = atoi(argv[1]);
thrust::host_vector<float> RawNumbers(N_REPETITIONS);
float CompressionLevel = 100.00;
generate_raw_numbers(&RawNumbers,SIZE,MAX_ITERATIONS,N_REPETITIONS, CompressionLevel);
for (int k=0;k<N_REPETITIONS;k++)
cout<< RawNumbers[k] << " ";
cout << endl;
cudaThreadExit();
return 0;
}
|
20,406 | #include <stdio.h>
#include <stdlib.h>
#define CONFIGURATION_COUNT 250000
struct Tick {
long timestamp;
double open;
double high;
double low;
double close;
double sma13;
double ema50;
double ema100;
double ema200;
double rsi;
double stochK;
double stochD;
double prcUpper;
double prcLower;
};
struct Strategy {
double profitLoss;
void (*backtest)(Strategy*, Tick*);
};
__device__ void backtest(Strategy *self, Tick *tick) {
int i;
int j = 0;
// Pretend to do something.
// TODO: Actually do something useful.
for (i=0; i<50; i++) {
j++;
}
}
__global__ void initializeStrategies(Strategy *strategies) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < CONFIGURATION_COUNT) {
strategies[i].profitLoss = 10000 + i;
strategies[i].backtest = backtest;
}
}
__global__ void backtestStrategies(Strategy *strategies, Tick *ticks) {
// TODO: Harness multiple dimensions?
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < CONFIGURATION_COUNT) {
strategies[i].backtest(&strategies[i], &ticks[i]);
}
}
int main() {
int threadsPerBlock = 1000;
int blockCount = CONFIGURATION_COUNT / threadsPerBlock;
Strategy strategies[CONFIGURATION_COUNT];
Strategy *devStrategies;
int i = 0;
int j = 0;
int k = 0;
int tickCount = 1000000;
Tick *ticks = (Tick*) malloc(CONFIGURATION_COUNT * sizeof(Tick));
Tick *devTicks;
int kFoldCount = 10;
void (*backtester)(Strategy*, Tick*);
backtester = &backtestStrategies;
cudaSetDevice(0);
// Allocate memory on the GPU for the strategies.
cudaMalloc((void**)&devStrategies, CONFIGURATION_COUNT * sizeof(Strategy));
// Allocate memory on the GPU for the ticks.
cudaMalloc((void**)&devTicks, CONFIGURATION_COUNT * sizeof(Tick));
// Initialize strategies on the GPU.
initializeStrategies<<<blockCount, threadsPerBlock>>>(devStrategies);
// Run through each k-fold step.
for (i=0; i<kFoldCount; i++) {
// Run through every tick.
for (j=0; j<tickCount; j++) {
printf("%i\n", j);
// Set up data for every configuration.
for (k=0; k<CONFIGURATION_COUNT; k++) {
ticks[k].timestamp = 1460611103;
ticks[k].open = 89.5;
ticks[k].high = 89.5;
ticks[k].low = 89.5;
ticks[k].close = 89.5;
ticks[k].sma13 = 89.5;
ticks[k].ema50 = 89.5;
ticks[k].ema100 = 89.5;
ticks[k].ema200 = 89.5;
ticks[k].rsi = 89.5;
ticks[k].stochK = 89.5;
ticks[k].stochD = 89.5;
ticks[k].prcUpper = 89.5;
ticks[k].prcLower = 89.5;
}
// Copy ticks to the GPU.
cudaMemcpy(devTicks, ticks, CONFIGURATION_COUNT * sizeof(Tick), cudaMemcpyHostToDevice);
// Run backtests for all strategy configurations.
(*backtester)<<<blockCount, threadsPerBlock>>>(devStrategies, devTicks);
// Wait for currently-running kernels to finish.
cudaDeviceSynchronize();
}
}
// Free memory for the tick data from the GPU.
cudaFree(devTicks);
// Copy strategies from the GPU.
cudaMemcpy(strategies, devStrategies, CONFIGURATION_COUNT * sizeof(Strategy), cudaMemcpyDeviceToHost);
// Display results.
for (i=0; i<CONFIGURATION_COUNT; i++) {
printf("%f\n", strategies[i].profitLoss);
}
// Free memory for the strategies on the GPU.
cudaFree(devStrategies);
return 0;
}
|
20,407 | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#include <time.h>
typedef struct bmpFileHeaderStruct {
/* 2 bytes de identificación */
uint32_t size; /* Tamaño del archivo */
uint16_t resv1; /* Reservado */
uint16_t resv2; /* Reservado */
uint32_t offset; /* Offset hasta hasta los datos de imagen */
} bmpFileHeader;
typedef struct bmpInfoHeaderStruct {
uint32_t headersize; /* Tamaño de la cabecera */
uint32_t width; /* Ancho */
uint32_t height; /* Alto */
uint16_t planes; /* Planos de color (Siempre 1) */
uint16_t bpp; /* bits por pixel */
uint32_t compress; /* compresion */
uint32_t imgsize; /* tamaño de los datos de imagen */
uint32_t bpmx; /* Resolucion X en bits por metro */
uint32_t bpmy; /* Resolucion Y en bits por metro */
uint32_t colors; /* colors used en la paleta */
uint32_t imxtcolors; /* Colores importantes. 0 si son todos */
} bmpInfoHeader;
void BW(unsigned char *imgdata, bmpInfoHeader *bInfoHeader) {
float color;
int x, y;
//KERNEL
for(x = 0; x < bInfoHeader->width; x++)
{
for(y = 0; y < bInfoHeader->height; y++)
{
color += imgdata[x*3 + y*bInfoHeader->width*3 + 0] * 0.114;
color += imgdata[x*3 + y*bInfoHeader->width*3 + 1] * 0.587;
color += imgdata[x*3 + y*bInfoHeader->width*3 + 2] * 0.299;
color /= 3;
imgdata[x*3 + y*bInfoHeader->width*3 + 0] = color;
imgdata[x*3 + y*bInfoHeader->width*3 + 1] = color;
imgdata[x*3 + y*bInfoHeader->width*3 + 2] = color;
}
}
/*imgdata[xx*3 + yy*bInfoHeader->width*3 + 0] = avgB;
imgdata[xx*3 + yy*bInfoHeader->width*3 + 1] = avgG;
imgdata[xx*3 + yy*bInfoHeader->width*3 + 2] = avgR;*/
}
__global__ void ConvMatKernel(unsigned char *img_device, unsigned char *img_device2, uint32_t width_image, uint32_t height_image, int j, float *mat) {
//Hay que pasarle la matriz
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int i = width_image * row + col;
float avgB, avgG, avgR;
int x, y;
avgB = avgG = avgR = 0;
if (i < (width_image * height_image)) {
for(x = -1; x < 2; x++) {
if (row == 0 && x == -1) {
x = 0;
}
else if (row == height_image - 1) {
if (x > 0) break;
}
for(y = -1; y < 2; y++) {
if (col == 0 && y == -1) y = 0;
if (col == width_image - 1 && y == 1) break;
avgB += img_device[(col + y)*3 + (x + row) * width_image*3 + 0] * mat[((x + 1) * 3) + y + 1];
avgG += img_device[(col + y)*3 + (x + row) * width_image*3 + 1] * mat[((x + 1) * 3) + y + 1];
avgR += img_device[(col + y)*3 + (x + row) * width_image*3 + 2] * mat[((x + 1) * 3) + y + 1];
}
}
img_device2[col*3 + row*width_image*3 + 0] = avgB;
img_device2[col*3 + row*width_image*3 + 1] = avgG;
img_device2[col*3 + row*width_image*3 + 2] = avgR;
}
}
__global__ void blurKernel(unsigned char *img_device, unsigned char *img_device2, uint32_t width_image, uint32_t height_image) {
int x,y,ile, avgR,avgB,avgG;
int blurSize = 10;
avgB = avgG = avgR = 0;
ile = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int i = width_image * row + col;
if (i < (width_image * height_image)) {
for(x = col; x < width_image && x < col + blurSize; x++)
{
for(y = row; y < height_image && y < row + blurSize; y++)
{
avgB += img_device2[x*3 + y*width_image*3 + 0];
avgG += img_device2[x*3 + y*width_image*3 + 1];
avgR += img_device2[x*3 + y*width_image*3 + 2];
ile++;
}
}
avgB = avgB / ile;
avgG = avgG / ile;
avgR = avgR / ile;
img_device[col*3 + row*width_image*3 + 0] = avgB;
img_device[col*3 + row*width_image*3 + 1] = avgG;
img_device[col*3 + row*width_image*3 + 2] = avgR;
}
}
//Kernel BW
__global__ void BWkernel(unsigned char *img_device, uint32_t n) {
float color;
color = 0.0f;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
color += img_device[i*3 + 0] * 0.114;
color += img_device[i*3 + 1] * 0.587;
color += img_device[i*3 + 2] * 0.299;
color /= 3;
img_device[i*3 + 0] = color;
img_device[i*3 + 1] = color;
img_device[i*3 + 2] = color;
}
}
void CheckCudaError(char sms[], int line);
bmpInfoHeader *createInfoHeader(uint32_t width, uint32_t height, uint32_t ppp) {
bmpInfoHeader *InfoHeader;
//InfoHeader = malloc(sizeof(InfoHeader));
if (InfoHeader == NULL) return NULL;
InfoHeader->headersize = sizeof(bmpInfoHeader);
InfoHeader->width = width;
InfoHeader->height = height;
InfoHeader->planes = 1;
InfoHeader->bpp = 24;
InfoHeader->compress = 0;
/* 3 bytes por pixel, width*height pixels, el tamaño de las filas ha de ser multiplo de 4 */
InfoHeader->imgsize = ((width*3 + 3) / 4) * 4 * height;
InfoHeader->bpmx = (unsigned) ((double)ppp*100/2.54);
InfoHeader->bpmy= InfoHeader->bpmx; /* Misma resolucion vertical y horiontal */
InfoHeader->colors = 0;
InfoHeader->imxtcolors = 0;
return InfoHeader;
}
void CheckCudaError(char sms[], int line) {
cudaError_t error;
error = cudaGetLastError();
if (error) {
printf("(ERROR) %s - %s in %s at line %d\n", sms, cudaGetErrorString(error), __FILE__, line);
exit(EXIT_FAILURE);
}
}
unsigned char *LoadBMP(char *filename, bmpInfoHeader *bInfoHeader, int i) {
FILE *f;
bmpFileHeader header; /* cabecera */
unsigned char *imgdata_h; /* datos de imagen */
unsigned char *imgdata2_h;
unsigned char *imgdata_d;
unsigned char *imgdata2_d;
uint16_t type; /* 2 bytes identificativos */
//Para el kernel
unsigned int N;
unsigned int numBytes;
unsigned int nBlocks, nThreads;
float TiempoTotal, TiempoKernel;
cudaEvent_t E0, E1, E2, E3;
f=fopen (filename, "r");
if (!f) { /* Si no podemos leer, no hay imagen */
printf("NO se puede abrir el fichero %s\n", filename);
return NULL;
}
/* Leemos los dos primeros bytes y comprobamos el formato */
fread(&type, sizeof(uint16_t), 1, f);
if (type !=0x4D42) {
fclose(f);
printf("%s NO es una imagen BMP\n", filename);
return NULL;
}
/* Leemos la cabecera del fichero */
fread(&header, sizeof(bmpFileHeader), 1, f);
printf("File size: %u\n", header.size);
printf("Reservado: %u\n", header.resv1);
printf("Reservado: %u\n", header.resv2);
printf("Offset: %u\n", header.offset);
/* Leemos la cabecera de información del BMP */
fread(bInfoHeader, sizeof(bmpInfoHeader), 1, f);
/* Reservamos memoria para la imagen, lo que indique imgsize */
if (bInfoHeader->imgsize == 0) bInfoHeader->imgsize = ((bInfoHeader->width*3 +3) / 4) * 4 * bInfoHeader->height;
imgdata_h = (unsigned char*) malloc(bInfoHeader->imgsize);
imgdata2_h = (unsigned char*) malloc(bInfoHeader->imgsize);
if (imgdata_h == NULL) {
printf("Fallo en el malloc, del fichero %s\n", filename);
exit(0);
}
/* Nos situamos en donde empiezan los datos de imagen, lo indica el offset de la cabecera de fichero */
fseek(f, header.offset, SEEK_SET);
/* Leemos los datos de la imagen, tantos bytes como imgsize */
fread(imgdata_h, bInfoHeader->imgsize,1, f);
N = bInfoHeader->imgsize;
if (i == 1) {
nThreads = 32;
// numero de Blocks en cada dimension
uint32_t nBlocksWidth = bInfoHeader->width / nThreads;
uint32_t nBlocksHeight = bInfoHeader->height / nThreads;
dim3 dimGrid(nBlocksWidth, nBlocksHeight, 1);
dim3 dimBlock(nThreads, nThreads, 1);
cudaEventCreate(&E0);
cudaEventCreate(&E1);
cudaEventCreate(&E2);
cudaEventCreate(&E3);
cudaEventRecord(E0, 0);
cudaEventSynchronize(E0);
cudaMalloc((unsigned char**)&imgdata_d, bInfoHeader->imgsize);
cudaMalloc((unsigned char**)&imgdata2_d, bInfoHeader->imgsize);
CheckCudaError((char *) "Obtener Memoria en el device", __LINE__);
cudaMemcpy(imgdata_d, imgdata_h, bInfoHeader->imgsize, cudaMemcpyHostToDevice);
cudaMemcpy(imgdata2_d, imgdata_h, bInfoHeader->imgsize, cudaMemcpyHostToDevice);
CheckCudaError((char *) "Copiar Datos Host --> Device", __LINE__);
cudaEventRecord(E1, 0);
cudaEventSynchronize(E1);
blurKernel<<<dimGrid, dimBlock>>>(imgdata_d, imgdata2_d, bInfoHeader->width, bInfoHeader->height);
CheckCudaError((char *) "Invocar Kernel", __LINE__);
cudaEventRecord(E2, 0);
cudaEventSynchronize(E2);
cudaMemcpy(imgdata_h, imgdata_d, bInfoHeader->imgsize, cudaMemcpyDeviceToHost);
CheckCudaError((char *) "Copiar Datos Device --> Host", __LINE__);
cudaEventRecord(E3, 0);
cudaEventSynchronize(E3);
cudaEventElapsedTime(&TiempoTotal, E0, E3);
cudaEventElapsedTime(&TiempoKernel, E1, E2);
printf("\nKERNEL BlackAndWhiteFilter\n");
printf("Dimensiones: %d\n",N);
printf("nThreads: %dx%d (%d)\n", nThreads, nBlocks, nThreads * nBlocks);
printf("nBlocks: %d\n", nBlocks);
printf("Tiempo Global: %4.6f milseg\n", TiempoTotal);
printf("Tiempo Kernel: %4.6f milseg\n", TiempoKernel);
printf("Rendimiento Global: %4.2f GFLOPS\n", (2.0 * (float) N * (float) N * (float) N) / (1000000.0 * TiempoTotal));
printf("Rendimiento Kernel: %4.2f GFLOPS\n", (2.0 * (float) N * (float) N * (float) N) / (1000000.0 * TiempoKernel));
cudaEventDestroy(E0); cudaEventDestroy(E1); cudaEventDestroy(E2); cudaEventDestroy(E3);
cudaFree(imgdata_d);
cudaFree(imgdata2_d);
}
else if (i == 2) {
nThreads = 1024;
// numero de Blocks en cada dimension
nBlocks = N / nThreads;
dim3 dimGrid(nBlocks, 1, 1);
dim3 dimBlock(nThreads, 1, 1);
cudaEventCreate(&E0);
cudaEventCreate(&E1);
cudaEventCreate(&E2);
cudaEventCreate(&E3);
cudaEventRecord(E0, 0);
cudaEventSynchronize(E0);
cudaMalloc((unsigned char**)&imgdata_d, bInfoHeader->imgsize);
CheckCudaError((char *) "Obtener Memoria en el device", __LINE__);
//cudaMalloc((unsigned char**)&imgdata2_d, bInfoHeader->imgsize);
cudaMemcpy(imgdata_d, imgdata_h, bInfoHeader->imgsize, cudaMemcpyHostToDevice);
CheckCudaError((char *) "Copiar Datos Host --> Device", __LINE__);
//cudaMemcpy(imgdata2_d, imgdata2_h, bInfoHeader->imgsize, cudaMemcpyHostToDevice);
cudaEventRecord(E1, 0);
cudaEventSynchronize(E1);
BWkernel<<<nBlocks, nThreads>>>(imgdata_d, (bInfoHeader->width * bInfoHeader->height));
CheckCudaError((char *) "Invocar Kernel", __LINE__);
//BW(imgdata_h, bInfoHeader);
cudaEventRecord(E2, 0);
cudaEventSynchronize(E2);
cudaMemcpy(imgdata_h, imgdata_d, bInfoHeader->imgsize, cudaMemcpyDeviceToHost);
CheckCudaError((char *) "Copiar Datos Device --> Host", __LINE__);
cudaEventRecord(E3, 0);
cudaEventSynchronize(E3);
cudaEventElapsedTime(&TiempoTotal, E0, E3);
cudaEventElapsedTime(&TiempoKernel, E1, E2);
printf("\nKERNEL BlackAndWhiteFilter\n");
printf("Dimensiones: %d\n",N);
printf("nThreads: %dx%d (%d)\n", nThreads, nBlocks, nThreads * nBlocks);
printf("nBlocks: %d\n", nBlocks);
printf("Tiempo Global: %4.6f milseg\n", TiempoTotal);
printf("Tiempo Kernel: %4.6f milseg\n", TiempoKernel);
printf("Rendimiento Global: %4.2f GFLOPS\n", (2.0 * (float) N * (float) N * (float) N) / (1000000.0 * TiempoTotal));
printf("Rendimiento Kernel: %4.2f GFLOPS\n", (2.0 * (float) N * (float) N * (float) N) / (1000000.0 * TiempoKernel));
cudaEventDestroy(E0); cudaEventDestroy(E1); cudaEventDestroy(E2); cudaEventDestroy(E3);
cudaFree(imgdata_d);
//printf("Black&White took %f seconds to execute \n", time_taken);
}
else if (i == 3) {
nThreads = 32;
float mat[9];
float *mat_d;
BW(imgdata_h, bInfoHeader);
int j = 2;
if (j == 1) {
mat[0] = -1.;
mat[1] = -1.;
mat[2] = -1.;
mat[3] = -1.;
mat[4] = 8.;
mat[5] = -1.;
mat[6] = -1.;
mat[7] = -1.;
mat[8] = -1.;
}
else if (j == 2) {
mat[0] = 0.;
mat[1] = -1.;
mat[2] = 0.;
mat[3] = -1.;
mat[4] = 5.;
mat[5] = -1.;
mat[6] = 0.;
mat[7] = -1.;
mat[8] = 0.;
}
else if (j == 3) {
mat[0] = 1./16.;
mat[1] = 2./16.;
mat[2] = 1./16.;
mat[3] = 2./16.;
mat[4] = 4./16.;
mat[5] = 2./16.;
mat[6] = 1./16.;
mat[7] = 2./16.;
mat[8] = 1./16.;
}
else {
mat[0] = 0.;
mat[1] = 0.;
mat[2] = 0.;
mat[3] = 0.;
mat[4] = 1.;
mat[5] = 0.;
mat[6] = 0.;
mat[7] = 0.;
mat[8] = 0.;
}
// numero de Blocks en cada dimension
uint32_t nBlocksWidth = bInfoHeader->width / nThreads;
uint32_t nBlocksHeight = bInfoHeader->height / nThreads;
dim3 dimGrid(nBlocksWidth, nBlocksHeight, 1);
dim3 dimBlock(nThreads, nThreads, 1);
cudaEventCreate(&E0);
cudaEventCreate(&E1);
cudaEventCreate(&E2);
cudaEventCreate(&E3);
cudaEventRecord(E0, 0);
cudaEventSynchronize(E0);
cudaMalloc((unsigned char**)&imgdata_d, bInfoHeader->imgsize);
cudaMalloc((unsigned char**)&imgdata2_d, bInfoHeader->imgsize);
cudaMalloc((float**)&mat_d, 9 * sizeof(float));
CheckCudaError((char *) "Obtener Memoria en el device", __LINE__);
cudaMemcpy(imgdata_d, imgdata_h, bInfoHeader->imgsize, cudaMemcpyHostToDevice);
cudaMemcpy(imgdata2_d, imgdata_h, bInfoHeader->imgsize, cudaMemcpyHostToDevice);
cudaMemcpy(mat_d, mat, 9 * sizeof(float), cudaMemcpyHostToDevice);
CheckCudaError((char *) "Copiar Datos Host --> Device", __LINE__);
cudaEventRecord(E1, 0);
cudaEventSynchronize(E1);
ConvMatKernel<<<dimGrid, dimBlock>>>(imgdata_d, imgdata2_d, bInfoHeader->width, bInfoHeader->height, j, mat_d);
CheckCudaError((char *) "Invocar Kernel", __LINE__);
cudaEventRecord(E2, 0);
cudaEventSynchronize(E2);
cudaMemcpy(imgdata_h, imgdata2_d, bInfoHeader->imgsize, cudaMemcpyDeviceToHost);
CheckCudaError((char *) "Copiar Datos Device --> Host", __LINE__);
cudaEventRecord(E3, 0);
cudaEventSynchronize(E3);
cudaEventElapsedTime(&TiempoTotal, E0, E3);
cudaEventElapsedTime(&TiempoKernel, E1, E2);
printf("\nKERNEL BlackAndWhiteFilter\n");
printf("Dimensiones: %d\n",N);
printf("nThreads: %dx%d (%d)\n", nThreads, nBlocks, nThreads * nBlocks);
printf("nBlocks: %d\n", nBlocks);
printf("Tiempo Global: %4.6f milseg\n", TiempoTotal);
printf("Tiempo Kernel: %4.6f milseg\n", TiempoKernel);
printf("Rendimiento Global: %4.2f GFLOPS\n", (2.0 * (float) N * (float) N * (float) N) / (1000000.0 * TiempoTotal));
printf("Rendimiento Kernel: %4.2f GFLOPS\n", (2.0 * (float) N * (float) N * (float) N) / (1000000.0 * TiempoKernel));
cudaEventDestroy(E0); cudaEventDestroy(E1); cudaEventDestroy(E2); cudaEventDestroy(E3);
cudaFree(imgdata_d);
cudaFree(imgdata2_d);
cudaFree(mat_d);
}
/* Cerramos el fichero */
fclose(f);
printf("el resultado se encuentra en el fichero aux.bmp");
/* Devolvemos la imagen */
return imgdata_h;
}
void SaveBMP(char *filename, bmpInfoHeader *InfoHeader, unsigned char *imgdata) {
bmpFileHeader header;
FILE *f;
uint16_t type;
f=fopen(filename, "w+");
header.size = InfoHeader->imgsize + sizeof(bmpFileHeader) + sizeof(bmpInfoHeader) + 2;
header.resv1 = 0;
header.resv2 = 0;
/* El offset será el tamaño de las dos cabeceras + 2 (información de fichero)*/
header.offset=sizeof(bmpFileHeader)+sizeof(bmpInfoHeader)+2;
/* Escribimos la identificación del archivo */
type=0x4D42;
fwrite(&type, sizeof(type),1,f);
/* Escribimos la cabecera de fichero */
fwrite(&header, sizeof(bmpFileHeader),1,f);
/* Escribimos la información básica de la imagen */
fwrite(InfoHeader, sizeof(bmpInfoHeader),1,f);
/* Escribimos la imagen */
printf("%f \n",InfoHeader->imgsize);
fwrite(imgdata, InfoHeader->imgsize, 1, f);
fclose(f);
}
//kernel Function, para esta función, necesitamos tanto la fila como la columna en la que actuará nuestro thread en cuestion.
//Ademas tenemos que tener dos copias de imgdata, en uno tendramos los nuevos datos, y la otra la utilizaremos para calcular la primera.
void DisplayInfo(char *FileName, bmpInfoHeader *InfoHeader)
{
printf("\n");
printf("Informacion de %s\n", FileName);
printf("Tamaño de la cabecera: %u bytes\n", InfoHeader->headersize);
printf("Anchura: %d pixels\n", InfoHeader->width);
printf("Altura: %d pixels\n", InfoHeader->height);
printf("Planos (1): %d\n", InfoHeader->planes);
printf("Bits por pixel: %d\n", InfoHeader->bpp);
printf("Compresion: %d\n", InfoHeader->compress);
printf("Tamaño de la imagen: %u bytes\n", InfoHeader->imgsize);
printf("Resolucion horizontal: %u px/m\n", InfoHeader->bpmx);
printf("Resolucion vertical: %u px/m\n", InfoHeader->bpmy);
if (InfoHeader->bpmx == 0)
InfoHeader->bpmx = (unsigned) ((double)24*100/2.54);
if (InfoHeader->bpmy == 0)
InfoHeader->bpmy = (unsigned) ((double)24*100/2.54);
printf("Colores en paleta: %d\n", InfoHeader->colors);
printf("Colores importantes: %d\n", InfoHeader->imxtcolors);
}
int main(int argc, char** argv) {
bmpInfoHeader header;
unsigned char *image;
//scanf("%d", &i);
/*image = LoadBMP("./canicas.bmp", &header, 2);
//DisplayInfo("./canicas.bmp", &header);
SaveBMP("./auxBW.bmp", &header, image);*/
image = LoadBMP("./canicas.bmp", &header, 1);
//DisplayInfo("./canicas.bmp", &header);
SaveBMP("./auxBlur.bmp", &header, image);
/*image = LoadBMP("./canicas.bmp", &header, 3);
//DisplayInfo("./canicas.bmp", &header);
SaveBMP("./auxMat.bmp", &header, image);*/
}
|
20,408 | #include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include <sys/time.h>
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double) tp.tv_sec + (double) tp.tv_usec * 1.e-6);
}
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) { \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
void identityData(int* I, int nElem) {
for (int i = 0; i < nElem; i++) {
I[i] = i;
}
}
void initialData(float *ip, int size){
time_t t;
srand((unsigned int) time (&t));
for (int i = 0; i < size; i++){
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
void initialDataInt(int *ip, int size){
time_t t;
srand((unsigned int) time (&t));
for (int i = 0; i < size; i++){
ip[i] = floor((rand() & 0xFF) / 10.0f);
}
}
void sumArraysOnHost(float *A, float *B, float *C, const int N) {
for (int idx = 0; idx < N; idx++) {
C[idx] = A[idx] + B[idx];
}
}
__global__ void sumArraysOnGpu(float *A, float *B, float *C, int* I, int* R, int strike, const int N) {
__shared__ float smem[512];
// número de conflitos
int conflicts = 8;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
smem[threadIdx.x] += i;
C[i] = A[i] + B[i] + smem[(threadIdx.x * conflicts) % blockDim.x];
}
}
int main(int argc, char**argv) {
// Configura tamanho dos vetores
int nElem = 100 * 1.e6;
int strike = 1;
// Alocando memoria na CPU
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
int *R, *I;
h_A = (float *) malloc(nBytes);
h_B = (float *) malloc(nBytes);
R = (int *) malloc(nBytes);
I = (int *) malloc(nBytes);
hostRef = (float *) malloc(nBytes);
gpuRef = (float *) malloc(nBytes);
initialData(h_A, nElem);
initialData(h_B, nElem);
initialDataInt(R, nElem);
identityData(I, nElem);
// Alocando memoria global (GPU)
float *d_A, *d_B, *d_C;
CHECK(cudaMalloc((float **)&d_A, nBytes));
CHECK(cudaMalloc((float **)&d_B, nBytes));
CHECK(cudaMalloc((float **)&d_C, nBytes));
// Transferindo dados da CPU pra GPU
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice));
// CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
// Invocando o Kernel na CPU
int iLen = 512;
dim3 block(iLen);
dim3 grid((nElem + block.x - 1) / block.x);
sumArraysOnGpu<<<grid, block>>>(d_A, d_B, d_C, I, R, strike, nElem);
// Copia os resultados do Kernel de volta pra CPU
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
// Libera memoria da GPU
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_B));
CHECK(cudaFree(d_C));
// Libera memória da CPU
free(h_A);
free(h_B);
free(R);
free(I);
free(hostRef);
free(gpuRef);
cudaDeviceReset();
return 0;
} |
20,409 | #include <stdio.h>
__global__ void reduce0(int *g_idata, int *g_odata) {
extern __shared__ int sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for(unsigned int s=1; s < blockDim.x; s *= 2) {
if (tid % (2*s) == 0) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
__global__ void reduce1(int *g_idata, int *g_odata) {
extern __shared__ int sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for(unsigned int s=1; s < blockDim.x; s *= 2) {
int index = 2 * s * tid;
if (index < blockDim.x) {
sdata[index] += sdata[index + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
__global__ void reduce2(int *g_idata, int *g_odata) {
extern __shared__ int sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
__global__ void reduce3(int *g_idata, int *g_odata) {
extern __shared__ int sdata[];
// perform first level of reduction, reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
sdata[tid] = g_idata[i] + g_idata[i+blockDim.x];
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template <unsigned int blockSize>
__device__ void warpReduce(volatile int* sdata, int tid) {
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}
__global__ void reduce4(int *g_idata, int *g_odata) {
extern __shared__ int sdata[];
// perform first level of reduction, reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
sdata[tid] = g_idata[i] + g_idata[i+blockDim.x];
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>32; s>>=1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid < 32) warpReduce<512>(sdata, tid);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template <unsigned int blockSize>
__global__ void reduce5(int *g_idata, int *g_odata) {
extern __shared__ int sdata[];
// perform first level of reduction, reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
sdata[tid] = g_idata[i] + g_idata[i+blockDim.x];
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) {
if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads();
}
if (tid < 32) warpReduce<blockSize>(sdata, tid);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
template <unsigned int blockSize>
__global__ void reduce6(int *g_idata, int *g_odata, int n) {
extern __shared__ int sdata[];
// perform first level of reduction, reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
sdata[tid] = 0;
while (i < n) {
sdata[tid] += g_idata[i] + g_idata[i+blockSize];
i += gridSize;
}
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) {
if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads();
}
if (tid < 32) warpReduce<blockSize>(sdata, tid);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
int main(void)
{
long int size = 1 << 22;
long int s;
int sizeByte = size*sizeof(int);
int* h_data = (int*) malloc(sizeByte);
for(int i = 0; i < size; i++) {
// h_data[i] = rand() & 0xFF;
h_data[i] = i % 10;
}
long long int sum = 0;
for(int i = 0; i < size; i++) sum += h_data[i];
printf("CPU results = %lld \n", sum);
int* d_idata = NULL;
int* d_odata = NULL;
cudaMalloc(&d_idata, sizeByte);
cudaMalloc(&d_odata, sizeByte);
cudaMemcpy(d_idata, h_data, sizeByte, cudaMemcpyHostToDevice);
s = size >> 2;
int blocks = (s+512-1)/512;
reduce6<512><<<blocks/2, 512, 512*sizeof(int)>>>(d_idata, d_odata, size);
cudaDeviceSynchronize();
printf("The size of array is %ld and it is processed on # of Blocks: %d \n", s, blocks/2);
/*
s = blocks/2;
blocks = (s+512-1)/512;
reduce5<512><<<blocks/2, 512, 512*sizeof(int)>>>(d_odata, d_idata);
cudaDeviceSynchronize();
printf("The size of array is %ld and it is processed on # of Blocks: %d \n", s, blocks/2);
s = blocks;
int threadsPerBlock;
if( s <= 512 ) { threadsPerBlock = s; blocks = 1; }
reduce2<<<blocks, threadsPerBlock, threadsPerBlock*sizeof(int)>>>(d_idata, d_odata);
cudaDeviceSynchronize();
*/
cudaMemcpy(h_data, d_odata, sizeof(int), cudaMemcpyDeviceToHost);
printf("GPU result = %d\n", h_data[0]);
cudaFree(d_idata);
cudaFree(d_odata);
free(h_data);
}
|
20,410 | // This example demonstrates a parallel sum reduction
// using two kernel launches
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <numeric>
#include <iostream>
#include <time.h>
double secuential(const double a[] , int dim,bool verbose){
double mean=0;
for(int i=0; i<dim;i++){
mean+=a[i];
}
if(verbose)printf("cpu sum %f\t", mean);
mean=mean/dim;
if(verbose)printf("cpu mean %f\t", mean);
double sum=0;
for(int i=0; i<dim;i++){
sum+=(a[i]-mean)*(a[i]-mean);
}
if(verbose)printf("cpu sigma %f\n", sum);
return sqrt(sum/(dim-1));
}
__global__ void reduccion(const double *a,double *a_out,const size_t dim)
{
extern __shared__ double shared[];
unsigned int global_id = blockIdx.x * blockDim.x + threadIdx.x;
shared[threadIdx.x]= (global_id < dim) ? a[global_id]: 0;
__syncthreads();
for(int i = blockDim.x / 2; i > 0; i /= 2)
{
if(threadIdx.x < i)
shared[threadIdx.x] += shared[threadIdx.x + i];
__syncthreads();
}
if(threadIdx.x == 0)
a_out[blockIdx.x] = shared[0];
}
__global__ void pre_sigma( double a[], const int dim, const double mean)
{
/*extern __shared__ double shared[];
unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x;
shared[threadIdx.x=] (global_id < dim) ? a[global_id]: 0;
__syncthreads();
//if ( global_id< dim) {
shared[threadIdx.x] -= mean;
shared[threadIdx.x]*= shared[threadIdx.x];
//}
__syncthreads();
a[global_id] = shared[threadIdx.x];*/
unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_id < dim) {
a[global_id] -= mean;
a[global_id]*= a[global_id];
}
}
int main(int argc, char *argv[])
{
clock_t time_begin;
unsigned long int size_array = (argc > 1)? atoi (argv[1]): 4;
unsigned int block_size = (argc > 2)? atoi (argv[2]): 2;
bool verbose= (argc>3)? (argv[3][0]=='v'): false;
double *host_array=(double*)malloc( size_array * sizeof(double));
for(unsigned int i = 0; i < size_array; ++i)
{
host_array[i] =rand()%10;
if(verbose) printf("%f\t", host_array[i]);
}
if(verbose) printf("\n");
time_begin=clock();
double cpu_result=secuential(host_array, size_array,verbose);
printf("CPU time: %f seconds\t", (((double)clock() - (double)time_begin) / 1000000.0F ) * 1000 );
printf("cpu result: %f\n", cpu_result);
double *device_array = 0;
time_begin=clock();
cudaMalloc((void**)&device_array, sizeof(double) * size_array);
cudaMemcpy(device_array, &host_array[0], sizeof(double) * size_array, cudaMemcpyHostToDevice);
const size_t bloques = (size_array/block_size) + ((size_array%block_size) ? 1 : 0);
double *device_array_out = 0;
cudaMalloc((void**)&device_array_out, sizeof(double) * (bloques + 1));
reduccion<<<bloques,block_size,block_size * sizeof(double)>>>(device_array, device_array_out, size_array);
reduccion<<<1,bloques,bloques * sizeof(double)>>>(device_array_out, device_array_out + bloques, bloques);
double device_result = 0;
cudaMemcpy(&device_result, device_array_out + bloques, sizeof(double), cudaMemcpyDeviceToHost);
printf("gpu sum: %f\t", device_result);
double gpu_mean=device_result /size_array;
printf("gpu mean: %f\n", gpu_mean);
//---------------
dim3 bloque2(block_size);
dim3 grid2((size_array + bloque2.x - 1) / bloque2.x);
pre_sigma<<<bloque2, grid2>>>(device_array, size_array, gpu_mean);
cudaThreadSynchronize();
cudaMemcpy(host_array, device_array, sizeof(double)*size_array, cudaMemcpyDeviceToHost);
if(verbose){
for(unsigned int j=0; j<size_array; j++)
printf("%f\t", host_array[j]);
printf("\n");
}
//------------
reduccion<<<bloques,block_size,block_size * sizeof(double)>>>(device_array, device_array_out, size_array);
reduccion<<<1,bloques,bloques * sizeof(double)>>>(device_array_out, device_array_out + bloques, bloques);
cudaMemcpy(&device_result, device_array_out + bloques, sizeof(double), cudaMemcpyDeviceToHost);
printf("gpu sigma: %f\n", device_result);
double final_res= sqrt(device_result/(size_array-1));
printf("GPU time: %f seconds\t", (((double)clock() - (double)time_begin) / 1000000.0F ) * 1000 );
printf("gpu result: %f\n", final_res);
// deallocate device memory
cudaFree(device_array);
cudaFree(device_array_out);
return 0;
}
|
20,411 | #include <stdio.h>
#include <malloc.h>
#include <stdlib.h>
#include <time.h>
const int N = 1024; // 正方行列のサイズを指定(N×N)
const int BLOCK = 16; // ブロックのサイズを指定
double cpuSecond();
__global__ void matrixMul(int *dMatA, int *dMatB, int *dMatC)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int scan;
int target = 0;
// 行列の演算を行う
}
int main(int argc, char** argv)
{
// 行列のサイズをバイト単位で算出
int matrixSize = sizeof(unsigned int) * N * N;
int test1, test2;
double start;
double gpucalctime;
double cpucalctime;
// ホスト側の行列変数設定
int* hMatA;
int* hMatB;
int* hMatC;
// 行列変数のメモリ確保
hMatA = (int*)malloc(matrixSize);
hMatB = (int*)malloc(matrixSize);
hMatC = (int*)malloc(matrixSize);
// 行列の初期値設定
// mat[row][col] を一次元配列として格納
int row, col, scan;
srand((unsigned)time(NULL));
for (row = 0; row < N; row++){
for (col = 0; col < N; col++){
hMatA[row * N + col] = rand() % (N * N);
hMatB[row * N + col] = rand() % (N * N);
hMatC[row * N + col] = 0;
}
}
/* CPU側での処理時間計測 */
start = cpuSecond();
for (row = 0; row < N; row++) {
for (col = 0; col < N; col++) {
for (scan = 0; scan < N; scan++) {
hMatC[row * N + col] += hMatA[row * N + scan] * hMatB[scan * N + col];
}
}
}
cpucalctime = cpuSecond() - start;
test1 = hMatC[52];
/* GPU側での処理時間計測 */
// デバイス側の行列変数設定
int* dMatA;
int* dMatB;
int* dMatC;
// デバイスメモリ領域の確保
cudaMalloc((void**)&dMatA, matrixSize);
cudaMalloc((void**)&dMatB, matrixSize);
cudaMalloc((void**)&dMatC, matrixSize);
// GPU 乗算及び時間計測
start = cpuSecond();
// ホストからデバイスへの変数の受け渡し
cudaMemcpy(dMatA, hMatA, matrixSize, cudaMemcpyHostToDevice);
cudaMemcpy(dMatB, hMatB, matrixSize, cudaMemcpyHostToDevice);
// ブロックサイズとグリッドサイズの設定
dim3 block(BLOCK, BLOCK);
dim3 grid( N / BLOCK, N / BLOCK);
// カーネルの起動
matrixMul<<<grid, block>>>(dMatA, dMatB, dMatC);
cudaDeviceSynchronize();
// 結果の領域確保とデバイス側からのメモリ転送
cudaMemcpy(hMatC, dMatC, matrixSize, cudaMemcpyDeviceToHost);
gpucalctime = cpuSecond() - start;
test2 = hMatC[52];
// 結果の出力
printf("[CPU]calc exetime : %f s.\n", cpucalctime);
printf("[GPU]calc exetime : %f s.\n", gpucalctime);
printf("GPUはCPUの処理を %f 倍高速化 \n",cpucalctime/gpucalctime);
printf("配列52をみて演算結果の確認\n");
printf("cpu %d , gpu %d \n", test1, test2);
// ホスト・デバイスメモリの解放
free(hMatA);
free(hMatB);
free(hMatC);
cudaFree(dMatA);
cudaFree(dMatB);
cudaFree(dMatC);
// 終了処理
cudaDeviceReset();
return 0;
}
/* 時間を秒で返す*/
double cpuSecond()
{
struct timespec tp;
clock_gettime(CLOCK_REALTIME, &tp);
return((double)tp.tv_sec + (double)tp.tv_nsec * 1.e-9);
}
|
20,412 | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void j2d81pt (double * __restrict__ l_in, double * __restrict__ l_out, int N) {
//Determing the block's indices
int i0 = (int)(blockIdx.x)*(int)(blockDim.x);
int i = max(i0,0) + (int)(threadIdx.x);
int j0 = 4*(int)(blockIdx.y)*(int)(blockDim.y);
int j = max(j0,0) + 4*(int)(threadIdx.y);
double (*in)[8200] = (double (*)[8200]) l_in;
double (*out)[8200] = (double (*)[8200]) l_out;
if (i>=0 & j>=0 & i<=N-9 & j<=N-9) {
double _t_1_;
double _t_14_;
double outjc0ic0;
double _t_3_;
double _t_2_;
double _t_0_;
double _t_4_;
double _t_9_;
double _t_7_;
double _t_17_;
double _t_5_;
double _t_15_;
double _t_28_;
double _t_12_;
double _t_11_;
double _t_10_;
double _t_8_;
double _t_18_;
double _t_6_;
double _t_16_;
double _t_13_;
double _t_20_;
double _t_30_;
double _t_43_;
double _t_33_;
double _t_45_;
double _t_32_;
double _t_22_;
double outjp1ic0;
double outjp2ic0;
double outjp3ic0;
double _t_24_;
double _t_35_;
double _t_48_;
double _t_50_;
double _t_37_;
double _t_52_;
double _t_39_;
double _t_27_;
double _t_40_;
double _t_55_;
double _t_21_;
double _t_31_;
double _t_44_;
double _t_46_;
double _t_23_;
double _t_34_;
double _t_47_;
double _t_19_;
double _t_29_;
double _t_49_;
double _t_42_;
double _t_36_;
double _t_25_;
double _t_41_;
double _t_53_;
double _t_38_;
double _t_26_;
double _t_54_;
double _t_51_;
_t_1_ = in[j][i+1];
_t_1_ += in[j][i+7];
_t_1_ += in[j+1][i];
_t_14_ = in[j+1][i];
_t_1_ += in[j+1][i+8];
_t_14_ += in[j+1][i+8];
_t_1_ += in[j+7][i];
_t_1_ += in[j+7][i+8];
_t_1_ += in[j+8][i+1];
_t_1_ += in[j+8][i+7];
outjc0ic0 = _t_1_ * 4.5339;
outjc0ic0 += in[j+4][i+4] * 8.10655;
_t_3_ = in[j][i+3];
_t_3_ += in[j][i+5];
_t_3_ += in[j+3][i];
_t_3_ += in[j+3][i+8];
_t_3_ += in[j+5][i];
_t_3_ += in[j+5][i+8];
_t_3_ += in[j+8][i+3];
_t_3_ += in[j+8][i+5];
outjc0ic0 += _t_3_ * 0.002856;
_t_2_ = in[j][i+2];
_t_2_ += in[j][i+6];
_t_2_ += in[j+6][i];
_t_2_ += in[j+6][i+8];
_t_2_ += in[j+8][i+2];
_t_2_ += in[j+8][i+6];
_t_2_ += in[j+2][i];
_t_2_ += in[j+2][i+8];
outjc0ic0 += _t_2_ * -0.000357;
_t_0_ = in[j][i];
_t_0_ += in[j][i+8];
_t_0_ += in[j+8][i];
_t_0_ += in[j+8][i+8];
outjc0ic0 += _t_0_ * 3.18622;
_t_4_ = in[j][i+4];
_t_4_ += in[j+4][i];
_t_4_ += in[j+4][i+8];
_t_4_ += in[j+8][i+4];
outjc0ic0 += _t_4_ * -0.00508225;
_t_9_ = in[j+2][i+2];
_t_9_ += in[j+2][i+6];
_t_9_ += in[j+6][i+2];
_t_9_ += in[j+6][i+6];
outjc0ic0 += _t_9_ * 0.04;
_t_7_ = in[j+3][i+1];
_t_7_ += in[j+1][i+3];
_t_17_ = in[j+1][i+3];
_t_17_ += in[j+4][i];
_t_17_ += in[j+4][i+8];
_t_17_ += in[j+6][i];
_t_17_ += in[j+6][i+8];
_t_7_ += in[j+1][i+5];
_t_17_ += in[j+1][i+5];
_t_7_ += in[j+3][i+7];
_t_7_ += in[j+7][i+3];
_t_7_ += in[j+7][i+5];
_t_7_ += in[j+5][i+1];
_t_7_ += in[j+5][i+7];
outjc0ic0 += _t_7_ * 0.04064;
_t_5_ = in[j+7][i+1];
_t_5_ += in[j+1][i+1];
_t_15_ = in[j+1][i+1];
_t_15_ += in[j+2][i];
_t_15_ += in[j+2][i+8];
_t_15_ += in[j+8][i];
_t_15_ += in[j+8][i+8];
_t_28_ = in[j+2][i];
_t_28_ += in[j+2][i+8];
_t_5_ += in[j+1][i+7];
_t_15_ += in[j+1][i+7];
_t_5_ += in[j+7][i+7];
outjc0ic0 += _t_5_ * 0.00064516;
_t_12_ = in[j+3][i+3];
_t_12_ += in[j+3][i+5];
_t_12_ += in[j+5][i+3];
_t_12_ += in[j+5][i+5];
outjc0ic0 += _t_12_ * 2.56;
_t_11_ = in[j+4][i+2];
_t_11_ += in[j+4][i+6];
_t_11_ += in[j+6][i+4];
_t_11_ += in[j+2][i+4];
outjc0ic0 += _t_11_ * 0.56944;
_t_10_ = in[j+2][i+3];
_t_10_ += in[j+2][i+5];
_t_10_ += in[j+3][i+2];
_t_10_ += in[j+3][i+6];
_t_10_ += in[j+5][i+2];
_t_10_ += in[j+5][i+6];
_t_10_ += in[j+6][i+3];
_t_10_ += in[j+6][i+5];
outjc0ic0 += _t_10_ * -0.32;
_t_8_ = in[j+4][i+1];
_t_8_ += in[j+1][i+4];
_t_18_ = in[j+1][i+4];
_t_18_ += in[j+5][i+8];
_t_18_ += in[j+5][i];
_t_8_ += in[j+4][i+7];
_t_8_ += in[j+7][i+4];
outjc0ic0 += _t_8_ * -0.0723189;
_t_6_ = in[j+6][i+1];
_t_6_ += in[j+1][i+2];
_t_16_ = in[j+1][i+2];
_t_16_ += in[j+3][i];
_t_16_ += in[j+3][i+8];
_t_16_ += in[j+7][i];
_t_16_ += in[j+7][i+8];
_t_6_ += in[j+1][i+6];
_t_16_ += in[j+1][i+6];
_t_6_ += in[j+6][i+7];
_t_6_ += in[j+7][i+2];
_t_6_ += in[j+7][i+6];
_t_6_ += in[j+2][i+1];
_t_6_ += in[j+2][i+7];
outjc0ic0 += _t_6_ * -0.00508;
_t_13_ = in[j+4][i+3];
_t_13_ += in[j+4][i+5];
_t_13_ += in[j+3][i+4];
_t_13_ += in[j+5][i+4];
outjc0ic0 += _t_13_ * -4.55552;
out[j][i] = outjc0ic0;
_t_20_ = in[j+2][i+2];
_t_20_ += in[j+2][i+6];
_t_20_ += in[j+3][i+1];
_t_20_ += in[j+3][i+7];
_t_20_ += in[j+7][i+1];
_t_20_ += in[j+7][i+7];
_t_20_ += in[j+8][i+2];
_t_20_ += in[j+8][i+6];
_t_30_ = in[j+2][i+2];
_t_30_ += in[j+2][i+6];
_t_30_ += in[j+4][i];
_t_30_ += in[j+4][i+8];
_t_30_ += in[j+8][i];
_t_30_ += in[j+8][i+8];
_t_43_ = in[j+3][i+1];
_t_43_ += in[j+3][i+7];
_t_43_ += in[j+4][i];
_t_43_ += in[j+4][i+8];
_t_33_ = in[j+3][i+1];
_t_33_ += in[j+3][i+7];
_t_45_ = in[j+3][i+3];
_t_45_ += in[j+3][i+5];
_t_45_ += in[j+6][i];
_t_45_ += in[j+6][i+8];
_t_45_ += in[j+8][i];
_t_45_ += in[j+8][i+8];
_t_32_ = in[j+2][i+4];
_t_32_ += in[j+6][i+8];
_t_32_ += in[j+6][i];
_t_22_ = in[j+2][i+4];
_t_22_ += in[j+5][i+1];
_t_22_ += in[j+5][i+7];
_t_22_ += in[j+8][i+4];
outjp1ic0 = in[j+5][i+4] * 8.10655;
outjp1ic0 += _t_20_ * -0.00508;
outjp1ic0 += _t_22_ * -0.0723189;
_t_28_ += in[j+10][i];
_t_43_ += in[j+10][i];
_t_28_ += in[j+10][i+8];
_t_43_ += in[j+10][i+8];
outjp2ic0 = in[j+6][i+4] * 8.10655;
outjp2ic0 += _t_28_ * 3.18622;
_t_43_ += in[j+11][i+1];
_t_43_ += in[j+11][i+7];
outjp3ic0 = in[j+7][i+4] * 8.10655;
outjp3ic0 += _t_43_ * 4.5339;
_t_45_ += in[j+11][i+3];
_t_45_ += in[j+11][i+5];
outjp3ic0 += _t_45_ * 0.002856;
_t_24_ = in[j+3][i+3];
_t_24_ += in[j+3][i+5];
_t_24_ += in[j+4][i+2];
_t_24_ += in[j+4][i+6];
_t_24_ += in[j+6][i+2];
_t_24_ += in[j+6][i+6];
_t_24_ += in[j+7][i+3];
_t_24_ += in[j+7][i+5];
outjp1ic0 += _t_24_ * -0.32;
_t_35_ = in[j+3][i+3];
_t_35_ += in[j+3][i+5];
_t_35_ += in[j+5][i+1];
_t_35_ += in[j+5][i+7];
_t_35_ += in[j+7][i+1];
_t_35_ += in[j+7][i+7];
_t_48_ = in[j+4][i+2];
_t_48_ += in[j+4][i+6];
_t_48_ += in[j+5][i+1];
_t_48_ += in[j+5][i+7];
_t_50_ = in[j+4][i+4];
_t_50_ += in[j+7][i+1];
_t_50_ += in[j+7][i+7];
_t_37_ = in[j+4][i+2];
_t_37_ += in[j+4][i+6];
_t_37_ += in[j+8][i+2];
_t_37_ += in[j+8][i+6];
outjp2ic0 += _t_37_ * 0.04;
_t_52_ = in[j+5][i+3];
_t_52_ += in[j+5][i+5];
_t_52_ += in[j+6][i+2];
_t_52_ += in[j+6][i+6];
_t_52_ += in[j+8][i+2];
_t_52_ += in[j+8][i+6];
_t_39_ = in[j+4][i+4];
_t_39_ += in[j+6][i+2];
_t_39_ += in[j+6][i+6];
_t_39_ += in[j+8][i+4];
outjp2ic0 += _t_39_ * 0.56944;
_t_27_ = in[j+4][i+4];
_t_27_ += in[j+5][i+3];
_t_27_ += in[j+5][i+5];
_t_27_ += in[j+6][i+4];
outjp1ic0 += _t_27_ * -4.55552;
_t_40_ = in[j+5][i+3];
_t_40_ += in[j+5][i+5];
_t_40_ += in[j+7][i+3];
_t_40_ += in[j+7][i+5];
outjp2ic0 += _t_40_ * 2.56;
_t_55_ = in[j+6][i+4];
_t_55_ += in[j+7][i+3];
_t_55_ += in[j+7][i+5];
_t_55_ += in[j+8][i+4];
outjp3ic0 += _t_55_ * -4.55552;
_t_17_ += in[j+9][i+3];
_t_35_ += in[j+9][i+3];
_t_52_ += in[j+9][i+3];
_t_17_ += in[j+9][i+5];
outjp1ic0 += _t_17_ * 0.002856;
_t_35_ += in[j+9][i+5];
outjp2ic0 += _t_35_ * 0.04064;
_t_52_ += in[j+9][i+5];
outjp3ic0 += _t_52_ * -0.32;
_t_15_ += in[j+9][i+1];
_t_33_ += in[j+9][i+1];
_t_48_ += in[j+9][i+1];
_t_15_ += in[j+9][i+7];
outjp1ic0 += _t_15_ * 4.5339;
_t_33_ += in[j+9][i+7];
outjp2ic0 += _t_33_ * 0.00064516;
_t_48_ += in[j+9][i+7];
_t_30_ += in[j+10][i+2];
_t_48_ += in[j+10][i+2];
_t_30_ += in[j+10][i+6];
outjp2ic0 += _t_30_ * -0.000357;
_t_48_ += in[j+10][i+6];
outjp3ic0 += _t_48_ * -0.00508;
_t_32_ += in[j+10][i+4];
outjp2ic0 += _t_32_ * -0.00508225;
_t_50_ += in[j+10][i+4];
outjp3ic0 += _t_50_ * -0.0723189;
_t_21_ = in[j+2][i+3];
_t_21_ += in[j+2][i+5];
_t_21_ += in[j+4][i+1];
_t_21_ += in[j+4][i+7];
_t_21_ += in[j+6][i+1];
_t_21_ += in[j+6][i+7];
_t_21_ += in[j+8][i+3];
_t_21_ += in[j+8][i+5];
outjp1ic0 += _t_21_ * 0.04064;
_t_31_ = in[j+2][i+3];
_t_31_ += in[j+2][i+5];
_t_31_ += in[j+5][i];
_t_31_ += in[j+5][i+8];
_t_31_ += in[j+7][i];
_t_31_ += in[j+7][i+8];
_t_44_ = in[j+3][i+2];
_t_44_ += in[j+3][i+6];
_t_44_ += in[j+5][i];
_t_44_ += in[j+5][i+8];
_t_46_ = in[j+3][i+4];
_t_46_ += in[j+7][i+8];
_t_46_ += in[j+7][i];
_t_44_ += in[j+11][i+2];
_t_44_ += in[j+11][i+6];
_t_46_ += in[j+11][i+4];
outjp3ic0 += _t_46_ * -0.00508225;
_t_23_ = in[j+3][i+2];
_t_23_ += in[j+3][i+6];
_t_23_ += in[j+7][i+2];
_t_23_ += in[j+7][i+6];
outjp1ic0 += _t_23_ * 0.04;
_t_34_ = in[j+3][i+2];
_t_34_ += in[j+3][i+6];
_t_34_ += in[j+4][i+1];
_t_34_ += in[j+4][i+7];
_t_34_ += in[j+8][i+1];
_t_34_ += in[j+8][i+7];
_t_47_ = in[j+4][i+1];
_t_47_ += in[j+4][i+7];
_t_19_ = in[j+2][i+1];
_t_19_ += in[j+2][i+7];
_t_19_ += in[j+8][i+1];
_t_19_ += in[j+8][i+7];
outjp1ic0 += _t_19_ * 0.00064516;
_t_29_ = in[j+2][i+1];
_t_29_ += in[j+2][i+7];
_t_29_ += in[j+3][i];
_t_29_ += in[j+3][i+8];
_t_49_ = in[j+4][i+3];
_t_49_ += in[j+4][i+5];
_t_49_ += in[j+6][i+1];
_t_49_ += in[j+6][i+7];
_t_49_ += in[j+8][i+1];
_t_49_ += in[j+8][i+7];
_t_42_ = in[j+3][i];
_t_42_ += in[j+3][i+8];
_t_36_ = in[j+3][i+4];
_t_36_ += in[j+6][i+1];
_t_36_ += in[j+6][i+7];
_t_25_ = in[j+3][i+4];
_t_25_ += in[j+5][i+2];
_t_25_ += in[j+5][i+6];
_t_25_ += in[j+7][i+4];
outjp1ic0 += _t_25_ * 0.56944;
_t_14_ += in[j+9][i];
_t_29_ += in[j+9][i];
_t_44_ += in[j+9][i];
_t_14_ += in[j+9][i+8];
outjp1ic0 += _t_14_ * 3.18622;
_t_29_ += in[j+9][i+8];
_t_44_ += in[j+9][i+8];
outjp3ic0 += _t_44_ * -0.000357;
_t_31_ += in[j+10][i+3];
_t_49_ += in[j+10][i+3];
_t_31_ += in[j+10][i+5];
outjp2ic0 += _t_31_ * 0.002856;
_t_49_ += in[j+10][i+5];
outjp3ic0 += _t_49_ * 0.04064;
_t_29_ += in[j+10][i+1];
_t_47_ += in[j+10][i+1];
_t_29_ += in[j+10][i+7];
outjp2ic0 += _t_29_ * 4.5339;
_t_47_ += in[j+10][i+7];
outjp3ic0 += _t_47_ * 0.00064516;
_t_42_ += in[j+11][i];
_t_42_ += in[j+11][i+8];
outjp3ic0 += _t_42_ * 3.18622;
_t_41_ = in[j+5][i+4];
_t_41_ += in[j+6][i+3];
_t_41_ += in[j+6][i+5];
_t_41_ += in[j+7][i+4];
outjp2ic0 += _t_41_ * -4.55552;
_t_53_ = in[j+5][i+4];
_t_53_ += in[j+7][i+2];
_t_53_ += in[j+7][i+6];
_t_38_ = in[j+4][i+3];
_t_38_ += in[j+4][i+5];
_t_38_ += in[j+5][i+2];
_t_38_ += in[j+5][i+6];
_t_38_ += in[j+7][i+2];
_t_38_ += in[j+7][i+6];
_t_38_ += in[j+8][i+3];
_t_38_ += in[j+8][i+5];
outjp2ic0 += _t_38_ * -0.32;
_t_26_ = in[j+4][i+3];
_t_26_ += in[j+4][i+5];
_t_26_ += in[j+6][i+3];
_t_26_ += in[j+6][i+5];
outjp1ic0 += _t_26_ * 2.56;
_t_54_ = in[j+6][i+3];
_t_54_ += in[j+6][i+5];
_t_54_ += in[j+8][i+3];
_t_54_ += in[j+8][i+5];
outjp3ic0 += _t_54_ * 2.56;
_t_51_ = in[j+5][i+2];
_t_51_ += in[j+5][i+6];
_t_16_ += in[j+9][i+2];
_t_34_ += in[j+9][i+2];
_t_51_ += in[j+9][i+2];
_t_16_ += in[j+9][i+6];
outjp1ic0 += _t_16_ * -0.000357;
_t_34_ += in[j+9][i+6];
outjp2ic0 += _t_34_ * -0.00508;
_t_51_ += in[j+9][i+6];
outjp3ic0 += _t_51_ * 0.04;
_t_18_ += in[j+9][i+4];
outjp1ic0 += _t_18_ * -0.00508225;
out[j+1][i] = outjp1ic0;
_t_36_ += in[j+9][i+4];
outjp2ic0 += _t_36_ * -0.0723189;
out[j+2][i] = outjp2ic0;
_t_53_ += in[j+9][i+4];
outjp3ic0 += _t_53_ * 0.56944;
out[j+3][i] = outjp3ic0;
}
}
extern "C" void host_code (double *h_in, double *h_out, int N) {
double *in;
cudaMalloc (&in, sizeof(double)*N*N);
check_error ("Failed to allocate device memory for in\n");
cudaMemcpy (in, h_in, sizeof(double)*N*N, cudaMemcpyHostToDevice);
double *out;
cudaMalloc (&out, sizeof(double)*N*N);
check_error ("Failed to allocate device memory for out\n");
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, 4*blockconfig.y));
j2d81pt<<<gridconfig, blockconfig>>> (in, out, N);
cudaMemcpy (h_out, out, sizeof(double)*N*N, cudaMemcpyDeviceToHost);
cudaFree (in);
cudaFree (out);
}
|
20,413 | #include<iostream>
#include<fstream>
#include<string>
#include<cstdlib>
#include<cstring>
#include<vector>
#include<iterator>
#include<ctime>
#include<limits>
using namespace std;
struct info_edge
{
int vertex1,vertex2;
int weight;
};
void extract_data(vector<info_edge> &adjacency,char* str)
{
int i,n=1,m=0;
int vertex[2];
int weight;
vertex[0]=0;
vertex[1]=0;
for(i=2;i<strlen(&str[0])-1;i++)
{
if(str[i]!=' ')
{
vertex[m]*=10;
vertex[m]+=(int)str[i]-48;
}
else if(m<1)
{
m++;
}
else
break;
}
info_edge edge;
if(vertex[0]<vertex[1])
{
edge.vertex1=--vertex[0];
edge.vertex2=--vertex[1];
}
else
return;
weight=0;
i++;
while(i<strlen(&str[0]))
{
weight*=10;
weight+=(int)str[i]-48;
i++;
}
edge.weight=weight;
adjacency.push_back(edge);
}
void sssp(vector<info_edge>adjacency,int* dist)
{
vector<info_edge>::iterator iter;
bool change=true;
int i=1;
while(change)
{
change=false;
for(iter=adjacency.begin();iter<adjacency.end();iter++)
{
if(dist[iter->vertex1] != dist[iter->vertex2])
{
int t=dist[iter->vertex1]-dist[iter->vertex2];
if(t>iter->weight)
{
change=true;
dist[iter->vertex1]=dist[iter->vertex2]+iter->weight;
}
else if(-t>iter->weight)
{
change=true;
dist[iter->vertex2]=dist[iter->vertex1]+iter->weight;
}
}
}
i++;
}
cout<<"Number of iterations :"<<--i<<"\n";
}
int main(int argc,char** argv)
{
struct timespec start,finish;
ifstream in(argv[2]);
string str;
int vertex;
int edge;
vector<info_edge> adjacency;
int* dist;
for(int i=0;i<4;i++)
{
getline(in,str);
}
getline(in,str);
vertex=0;
edge=0;
int i=5;
while(str[i]!=' ')
{
vertex*=10;
vertex+=(int)str[i]-48;
i++;
}
i++;
while(i<strlen(&str[0])-1)
{
edge*=10;
edge+=(int)str[i]-48;
i++;
}
for(i=0;i<2;i++)
getline(in,str);
dist=new int[vertex];
for(int i=0;i<vertex;i++)
dist[i]=std::numeric_limits<int>::max();
while(!in.eof())
{
getline(in,str);
extract_data(adjacency,&str[0]);
}
int source=atoi(argv[1]);
dist[source]=0;
clock_gettime(CLOCK_PROCESS_CPUTIME_ID,&start);
sssp(adjacency,dist);
clock_gettime(CLOCK_PROCESS_CPUTIME_ID,&finish);
cout<<"Time taken\t"<<(finish.tv_sec-start.tv_sec)+(finish.tv_nsec-start.tv_nsec)/1e09<<"\n";
ofstream out("output1.txt");
for(int i=0;i<vertex;i++)
out<<i+1<<"\t"<<dist[i]<<"\n";
return 0;
}
|
20,414 | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void __launch_bounds__ (128,2) sw4_1 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double a_mux1, a_mux2, a_mux3, a_mux4, a_muy1, a_muy2, a_muy3, a_muy4, a_muz1, a_muz2, a_muz3, a_muz4;
double b_mux1, b_mux2, b_mux3, b_mux4, b_muy1, b_muy2, b_muy3, b_muy4, b_muz1, b_muz2, b_muz3, b_muz4;
double a_r1, b_r1;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma unroll 3
for (int k=2; k<=N-3; k+=2) {
a_mux1 = mu[k][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i-2] * strx[i-2];
a_mux2 = mu[k][j][i-2] * strx[i-2] + mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i] + 3.0 * mu[k][j][i-1] * strx[i-1];
a_mux3 = mu[k][j][i-1] * strx[i-1] + mu[k][j][i+2] * strx[i+2] + 3.0 * mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i];
a_mux4 = mu[k][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i+2] * strx[i+2];
a_muy1 = mu[k][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k][j][i] * stry[j] -3e0 / 4 * mu[k][j-2][i] * stry[j-2];
a_muy2 = mu[k][j-2][i] * stry[j-2] + mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j] + 3.0 * mu[k][j-1][i] * stry[j-1];
a_muy3 = mu[k][j-1][i] * stry[j-1] + mu[k][j+2][i] * stry[j+2] + 3.0 * mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j];
a_muy4 = mu[k][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k][j][i] * stry[j] - 3e0 / 4 * mu[k][j+2][i] * stry[j+2];
a_muz1 = mu[k-1][j][i] * strz[k-1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 / 4 * mu[k-2][j][i] * strz[k-2];
a_muz2 = mu[k-2][j][i] * strz[k-2] + mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k] + 3.0 * mu[k-1][j][i] * strz[k-1];
a_muz3 = mu[k-1][j][i] * strz[k-1] + mu[k+2][j][i] * strz[k+2] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k];
a_muz4 = mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 /4 * mu[k+2][j][i] * strz[k+2];
a_r1 = 1e0 / 6 * (strx[i] * ((2 * a_mux1 + la[k][j][i-1] * strx[i-1] - 3e0 / 4 * la[k][j][i] * strx[i] - 3e0 / 4 * la[k][j][i-2] * strx[i-2]) * (u_0[k][j][i-2] - u_0[k][j][i]) +
(2 * a_mux2 + la[k][j][i-2] * strx[i-2] + la[k][j][i+1] * strx[i+1] + 3 * la[k][j][i] * strx[i] + 3 * la[k][j][i-1] * strx[i-1]) * (u_0[k][j][i-1] - u_0[k][j][i]) +
(2 * a_mux3 + la[k][j][i-1] * strx[i-1] + la[k][j][i+2] * strx[i+2] + 3 * la[k][j][i+1] * strx[i+1] + 3 * la[k][j][i] * strx[i]) * (u_0[k][j][i+1] - u_0[k][j][i]) +
(2 * a_mux4 + la[k][j][i+1] * strx[i+1] - 3e0 / 4 * la[k][j][i] * strx[i] - 3e0 / 4 * la[k][j][i+2] * strx[i+2]) * (u_0[k][j][i+2] - u_0[k][j][i]))
+ stry[j] * (a_muy1 * (u_0[k][j-2][i] - u_0[k][j][i]) + a_muy2 * (u_0[k][j-1][i] - u_0[k][j][i]) + a_muy3 * (u_0[k][j+1][i] - u_0[k][j][i]) + a_muy4 * (u_0[k][j+2][i] - u_0[k][j][i])) + strz[k] * (a_muz1 * (u_0[k-2][j][i] - u_0[k][j][i]) + a_muz2 * (u_0[k-1][j][i] - u_0[k][j][i]) + a_muz3 * (u_0[k+1][j][i] - u_0[k][j][i]) + a_muz4 * (u_0[k+2][j][i] - u_0[k][j][i])));
a_r1 += strx[i] * stry[j] * (1e0 / 144) * (la[k][j][i-2] * (u_1[k][j-2][i-2] - u_1[k][j+2][i-2] + 8 * (-u_1[k][j-1][i-2] + u_1[k][j+1][i-2])) - 8 * (la[k][j][i-1] * (u_1[k][j-2][i-1] - u_1[k][j+2][i-1] + 8 * (-u_1[k][j-1][i-1] + u_1[k][j+1][i-1]))) + 8 * (la[k][j][i+1] * (u_1[k][j-2][i+1] - u_1[k][j+2][i+1] + 8 * (-u_1[k][j-1][i+1] + u_1[k][j+1][i+1]))) - (la[k][j][i+2] * (u_1[k][j-2][i+2] - u_1[k][j+2][i+2] + 8 * (-u_1[k][j-1][i+2] + u_1[k][j+1][i+2]))));
a_r1 += strx[i] * strz[k] * (1e0 / 144) * (la[k][j][i-2] * (u_2[k-2][j][i-2] - u_2[k+2][j][i-2] + 8 * (-u_2[k-1][j][i-2] + u_2[k+1][j][i-2])) - 8 * (la[k][j][i-1] * (u_2[k-2][j][i-1] - u_2[k+2][j][i-1] + 8 * (-u_2[k-1][j][i-1] + u_2[k+1][j][i-1]))) + 8 * (la[k][j][i+1] * (u_2[k-2][j][i+1] - u_2[k+2][j][i+1] + 8 * (-u_2[k-1][j][i+1] + u_2[k+1][j][i+1]))) - (la[k][j][i+2] * (u_2[k-2][j][i+2] - u_2[k+2][j][i+2] + 8 * (-u_2[k-1][j][i+2] + u_2[k+1][j][i+2]))));
a_r1 += strx[i] * stry[j] * (1e0 / 144) * (mu[k][j-2][i] * (u_1[k][j-2][i-2] - u_1[k][j-2][i+2] + 8 * (-u_1[k][j-2][i-1] + u_1[k][j-2][i+1])) - 8 * (mu[k][j-1][i] * (u_1[k][j-1][i-2] - u_1[k][j-1][i+2] + 8 * (-u_1[k][j-1][i-1] + u_1[k][j-1][i+1]))) + 8 * (mu[k][j+1][i] * (u_1[k][j+1][i-2] - u_1[k][j+1][i+2] + 8 * (-u_1[k][j+1][i-1] + u_1[k][j+1][i+1]))) - (mu[k][j+2][i] * (u_1[k][j+2][i-2] - u_1[k][j+2][i+2] + 8 * (-u_1[k][j+2][i-1] + u_1[k][j+2][i+1]))));
a_r1 += strx[i] * strz[k] * (1e0 / 144) * (mu[k-2][j][i] * (u_2[k-2][j][i-2] - u_2[k-2][j][i+2] + 8 * (-u_2[k-2][j][i-1] + u_2[k-2][j][i+1])) - 8 * (mu[k-1][j][i] * (u_2[k-1][j][i-2] - u_2[k-1][j][i+2] + 8 * (-u_2[k-1][j][i-1] + u_2[k-1][j][i+1]))) + 8 * (mu[k+1][j][i] * (u_2[k+1][j][i-2] - u_2[k+1][j][i+2] + 8 * (-u_2[k+1][j][i-1] + u_2[k+1][j][i+1]))) - (mu[k+2][j][i] * (u_2[k+2][j][i-2] - u_2[k+2][j][i+2] + 8 * (-u_2[k+2][j][i-1] + u_2[k+2][j][i+1]))));
uacc_0[k][j][i] = a1 * uacc_0[k][j][i] + cof * a_r1;
b_mux1 = mu[k+1][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k+1][j][i] * strx[i] - 3e0 / 4 * mu[k+1][j][i-2] * strx[i-2];
b_mux2 = mu[k+1][j][i-2] * strx[i-2] + mu[k+1][j][i+1] * strx[i+1] + 3.0 * mu[k+1][j][i] * strx[i] + 3.0 * mu[k+1][j][i-1] * strx[i-1];
b_mux3 = mu[k+1][j][i-1] * strx[i-1] + mu[k+1][j][i+2] * strx[i+2] + 3.0 * mu[k+1][j][i+1] * strx[i+1] + 3.0 * mu[k+1][j][i] * strx[i];
b_mux4 = mu[k+1][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k+1][j][i] * strx[i] - 3e0 / 4 * mu[k+1][j][i+2] * strx[i+2];
b_muy1 = mu[k+1][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k+1][j][i] * stry[j] -3e0 / 4 * mu[k+1][j-2][i] * stry[j-2];
b_muy2 = mu[k+1][j-2][i] * stry[j-2] + mu[k+1][j+1][i] * stry[j+1] + 3.0 * mu[k+1][j][i] * stry[j] + 3.0 * mu[k+1][j-1][i] * stry[j-1];
b_muy3 = mu[k+1][j-1][i] * stry[j-1] + mu[k+1][j+2][i] * stry[j+2] + 3.0 * mu[k+1][j+1][i] * stry[j+1] + 3.0 * mu[k+1][j][i] * stry[j];
b_muy4 = mu[k+1][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k+1][j][i] * stry[j] - 3e0 / 4 * mu[k+1][j+2][i] * stry[j+2];
b_muz1 = mu[k+1-1][j][i] * strz[k+1-1] - 3e0 / 4 * mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k+1-2][j][i] * strz[k+1-2];
b_muz2 = mu[k+1-2][j][i] * strz[k+1-2] + mu[k+1+1][j][i] * strz[k+1+1] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k+1-1][j][i] * strz[k+1-1];
b_muz3 = mu[k+1-1][j][i] * strz[k+1-1] + mu[k+1+2][j][i] * strz[k+1+2] + 3.0 * mu[k+1+1][j][i] * strz[k+1+1] + 3.0 * mu[k+1][j][i] * strz[k+1];
b_muz4 = mu[k+1+1][j][i] * strz[k+1+1] - 3e0 / 4 * mu[k+1][j][i] * strz[k+1] - 3e0 /4 * mu[k+1+2][j][i] * strz[k+1+2];
b_r1 = 1e0 / 6 * (strx[i] * ((2 * b_mux1 + la[k+1][j][i-1] * strx[i-1] - 3e0 / 4 * la[k+1][j][i] * strx[i] - 3e0 / 4 * la[k+1][j][i-2] * strx[i-2]) * (u_0[k+1][j][i-2] - u_0[k+1][j][i]) +
(2 * b_mux2 + la[k+1][j][i-2] * strx[i-2] + la[k+1][j][i+1] * strx[i+1] + 3 * la[k+1][j][i] * strx[i] + 3 * la[k+1][j][i-1] * strx[i-1]) * (u_0[k+1][j][i-1] - u_0[k+1][j][i]) +
(2 * b_mux3 + la[k+1][j][i-1] * strx[i-1] + la[k+1][j][i+2] * strx[i+2] + 3 * la[k+1][j][i+1] * strx[i+1] + 3 * la[k+1][j][i] * strx[i]) * (u_0[k+1][j][i+1] - u_0[k+1][j][i]) +
(2 * b_mux4 + la[k+1][j][i+1] * strx[i+1] - 3e0 / 4 * la[k+1][j][i] * strx[i] - 3e0 / 4 * la[k+1][j][i+2] * strx[i+2]) * (u_0[k+1][j][i+2] - u_0[k+1][j][i]))
+ stry[j] * (b_muy1 * (u_0[k+1][j-2][i] - u_0[k+1][j][i]) + b_muy2 * (u_0[k+1][j-1][i] - u_0[k+1][j][i]) + b_muy3 * (u_0[k+1][j+1][i] - u_0[k+1][j][i]) + b_muy4 * (u_0[k+1][j+2][i] - u_0[k+1][j][i])) + strz[k+1] * (b_muz1 * (u_0[k+1-2][j][i] - u_0[k+1][j][i]) + b_muz2 * (u_0[k+1-1][j][i] - u_0[k+1][j][i]) + b_muz3 * (u_0[k+1+1][j][i] - u_0[k+1][j][i]) + b_muz4 * (u_0[k+1+2][j][i] - u_0[k+1][j][i])));
b_r1 += strx[i] * stry[j] * (1e0 / 144) * (la[k+1][j][i-2] * (u_1[k+1][j-2][i-2] - u_1[k+1][j+2][i-2] + 8 * (-u_1[k+1][j-1][i-2] + u_1[k+1][j+1][i-2])) - 8 * (la[k+1][j][i-1] * (u_1[k+1][j-2][i-1] - u_1[k+1][j+2][i-1] + 8 * (-u_1[k+1][j-1][i-1] + u_1[k+1][j+1][i-1]))) + 8 * (la[k+1][j][i+1] * (u_1[k+1][j-2][i+1] - u_1[k+1][j+2][i+1] + 8 * (-u_1[k+1][j-1][i+1] + u_1[k+1][j+1][i+1]))) - (la[k+1][j][i+2] * (u_1[k+1][j-2][i+2] - u_1[k+1][j+2][i+2] + 8 * (-u_1[k+1][j-1][i+2] + u_1[k+1][j+1][i+2]))));
b_r1 += strx[i] * strz[k+1] * (1e0 / 144) * (la[k+1][j][i-2] * (u_2[k+1-2][j][i-2] - u_2[k+1+2][j][i-2] + 8 * (-u_2[k+1-1][j][i-2] + u_2[k+1+1][j][i-2])) - 8 * (la[k+1][j][i-1] * (u_2[k+1-2][j][i-1] - u_2[k+1+2][j][i-1] + 8 * (-u_2[k+1-1][j][i-1] + u_2[k+1+1][j][i-1]))) + 8 * (la[k+1][j][i+1] * (u_2[k+1-2][j][i+1] - u_2[k+1+2][j][i+1] + 8 * (-u_2[k+1-1][j][i+1] + u_2[k+1+1][j][i+1]))) - (la[k+1][j][i+2] * (u_2[k+1-2][j][i+2] - u_2[k+1+2][j][i+2] + 8 * (-u_2[k+1-1][j][i+2] + u_2[k+1+1][j][i+2]))));
b_r1 += strx[i] * stry[j] * (1e0 / 144) * (mu[k+1][j-2][i] * (u_1[k+1][j-2][i-2] - u_1[k+1][j-2][i+2] + 8 * (-u_1[k+1][j-2][i-1] + u_1[k+1][j-2][i+1])) - 8 * (mu[k+1][j-1][i] * (u_1[k+1][j-1][i-2] - u_1[k+1][j-1][i+2] + 8 * (-u_1[k+1][j-1][i-1] + u_1[k+1][j-1][i+1]))) + 8 * (mu[k+1][j+1][i] * (u_1[k+1][j+1][i-2] - u_1[k+1][j+1][i+2] + 8 * (-u_1[k+1][j+1][i-1] + u_1[k+1][j+1][i+1]))) - (mu[k+1][j+2][i] * (u_1[k+1][j+2][i-2] - u_1[k+1][j+2][i+2] + 8 * (-u_1[k+1][j+2][i-1] + u_1[k+1][j+2][i+1]))));
b_r1 += strx[i] * strz[k+1] * (1e0 / 144) * (mu[k+1-2][j][i] * (u_2[k+1-2][j][i-2] - u_2[k+1-2][j][i+2] + 8 * (-u_2[k+1-2][j][i-1] + u_2[k+1-2][j][i+1])) - 8 * (mu[k+1-1][j][i] * (u_2[k+1-1][j][i-2] - u_2[k+1-1][j][i+2] + 8 * (-u_2[k+1-1][j][i-1] + u_2[k+1-1][j][i+1]))) + 8 * (mu[k+1+1][j][i] * (u_2[k+1+1][j][i-2] - u_2[k+1+1][j][i+2] + 8 * (-u_2[k+1+1][j][i-1] + u_2[k+1+1][j][i+1]))) - (mu[k+1+2][j][i] * (u_2[k+1+2][j][i-2] - u_2[k+1+2][j][i+2] + 8 * (-u_2[k+1+2][j][i-1] + u_2[k+1+2][j][i+1]))));
uacc_0[k+1][j][i] = a1 * uacc_0[k+1][j][i] + cof * b_r1;
}
}
}
__global__ void __launch_bounds__ (128,2) sw4_2 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double a_mux1, a_mux2, a_mux3, a_mux4, a_muy1, a_muy2, a_muy3, a_muy4, a_muz1, a_muz2, a_muz3, a_muz4;
double b_mux1, b_mux2, b_mux3, b_mux4, b_muy1, b_muy2, b_muy3, b_muy4, b_muz1, b_muz2, b_muz3, b_muz4;
double a_r2, b_r2;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma unroll 3
for (int k=2; k<=N-3; k+=2) {
#pragma begin stencil2 unroll k=1,j=1,i=1
a_mux1 = mu[k][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i-2] * strx[i-2];
a_mux2 = mu[k][j][i-2] * strx[i-2] + mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i] + 3.0 * mu[k][j][i-1] * strx[i-1];
a_mux3 = mu[k][j][i-1] * strx[i-1] + mu[k][j][i+2] * strx[i+2] + 3.0 * mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i];
a_mux4 = mu[k][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i+2] * strx[i+2];
a_muy1 = mu[k][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k][j][i] * stry[j] -3e0 / 4 * mu[k][j-2][i] * stry[j-2];
a_muy2 = mu[k][j-2][i] * stry[j-2] + mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j] + 3.0 * mu[k][j-1][i] * stry[j-1];
a_muy3 = mu[k][j-1][i] * stry[j-1] + mu[k][j+2][i] * stry[j+2] + 3.0 * mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j];
a_muy4 = mu[k][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k][j][i] * stry[j] - 3e0 / 4 * mu[k][j+2][i] * stry[j+2];
a_muz1 = mu[k-1][j][i] * strz[k-1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 / 4 * mu[k-2][j][i] * strz[k-2];
a_muz2 = mu[k-2][j][i] * strz[k-2] + mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k] + 3.0 * mu[k-1][j][i] * strz[k-1];
a_muz3 = mu[k-1][j][i] * strz[k-1] + mu[k+2][j][i] * strz[k+2] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k];
a_muz4 = mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 /4 * mu[k+2][j][i] * strz[k+2];
a_r2 = 1e0 / 6 * (strx[i] * (a_mux1 * (u_1[k][j][i-2] - u_1[k][j][i]) + a_mux2 * (u_1[k][j][i-1] - u_1[k][j][i]) + a_mux3 * (u_1[k][j][i+1] - u_1[k][j][i]) + a_mux4 * (u_1[k][j][i+2] - u_1[k][j][i])) +
stry[j] * ((2 * a_muy1 + la[k][j-1][i] * stry[j-1] - 3e0 / 4 * la[k][j][i] * stry[j] - 3e0 / 4 * la[k][j-2][i] * stry[j-2]) * (u_1[k][j-2][i] - u_1[k][j][i]) +
(2 * a_muy2 + la[k][j-2][i] * stry[j-2] + la[k][j+1][i] * stry[j+1] + 3 * la[k][j][i] * stry[j] + 3 * la[k][j-1][i] * stry[j-1]) * (u_1[k][j-1][i] - u_1[k][j][i]) +
(2 * a_muy3 + la[k][j-1][i] * stry[j-1] + la[k][j+2][i] * stry[j+2] + 3 * la[k][j+1][i] * stry[j+1] + 3 * la[k][j][i] * stry[j]) * (u_1[k][j+1][i] - u_1[k][j][i]) +
(2 * a_muy4 + la[k][j+1][i] * stry[j+1] - 3e0 / 4 * la[k][j][i] * stry[j] - 3e0 / 4 * la[k][j+2][i] * stry[j+2]) * (u_1[k][j+2][i] - u_1[k][j][i])) +
strz[k] * (a_muz1 * (u_1[k-2][j][i] - u_1[k][j][i]) + a_muz2 * (u_1[k-1][j][i] - u_1[k][j][i]) + a_muz3 * (u_1[k+1][j][i] - u_1[k][j][i]) + a_muz4 * (u_1[k+2][j][i] - u_1[k][j][i])));
a_r2 += strx[i] * stry[j] * (1e0 / 144) * (mu[k][j][i-2] * (u_0[k][j-2][i-2] - u_0[k][j+2][i-2] + 8 * (-u_0[k][j-1][i-2] + u_0[k][j+1][i-2])) - 8 * (mu[k][j][i-1] * (u_0[k][j-2][i-1] - u_0[k][j+2][i-1] + 8 * (-u_0[k][j-1][i-1] + u_0[k][j+1][i-1]))) + 8 * (mu[k][j][i+1] * (u_0[k][j-2][i+1] - u_0[k][j+2][i+1] + 8 * (-u_0[k][j-1][i+1] + u_0[k][j+1][i+1]))) - (mu[k][j][i+2] * (u_0[k][j-2][i+2] - u_0[k][j+2][i+2] + 8 * (-u_0[k][j-1][i+2] + u_0[k][j+1][i+2])))) + strx[i] * stry[j] * (1e0 / 144) * (la[k][j-2][i] * (u_0[k][j-2][i-2] - u_0[k][j-2][i+2] + 8 * (-u_0[k][j-2][i-1] + u_0[k][j-2][i+1])) - 8 * (la[k][j-1][i] * (u_0[k][j-1][i-2] - u_0[k][j-1][i+2] + 8 * (-u_0[k][j-1][i-1] + u_0[k][j-1][i+1]))) + 8 * (la[k][j+1][i] * (u_0[k][j+1][i-2] - u_0[k][j+1][i+2] + 8 * (-u_0[k][j+1][i-1] + u_0[k][j+1][i+1]))) - (la[k][j+2][i] * (u_0[k][j+2][i-2] - u_0[k][j+2][i+2] + 8 * (-u_0[k][j+2][i-1] + u_0[k][j+2][i+1])))) + stry[j] * strz[k] * (1e0 / 144) * (la[k][j-2][i] * (u_2[k-2][j-2][i] - u_2[k+2][j-2][i] + 8 * (-u_2[k-1][j-2][i] + u_2[k+1][j-2][i])) - 8 * (la[k][j-1][i] * (u_2[k-2][j-1][i] - u_2[k+2][j-1][i] + 8 * (-u_2[k-1][j-1][i] + u_2[k+1][j-1][i]))) + 8 * (la[k][j+1][i] * (u_2[k-2][j+1][i] - u_2[k+2][j+1][i] + 8 * (-u_2[k-1][j+1][i] + u_2[k+1][j+1][i]))) - (la[k][j+2][i] * (u_2[k-2][j+2][i] - u_2[k+2][j+2][i] + 8 * (-u_2[k-1][j+2][i] + u_2[k+1][j+2][i])))) + stry[j] * strz[k] * (1e0 / 144) * (mu[k-2][j][i] * (u_2[k-2][j-2][i] - u_2[k-2][j+2][i] + 8 * (-u_2[k-2][j-1][i] + u_2[k-2][j+1][i])) - 8 * (mu[k-1][j][i] * (u_2[k-1][j-2][i] - u_2[k-1][j+2][i] + 8 * (-u_2[k-1][j-1][i] + u_2[k-1][j+1][i]))) + 8 * (mu[k+1][j][i] * (u_2[k+1][j-2][i] - u_2[k+1][j+2][i] + 8 * (-u_2[k+1][j-1][i] + u_2[k+1][j+1][i]))) - (mu[k+2][j][i] * (u_2[k+2][j-2][i] - u_2[k+2][j+2][i] + 8 * (-u_2[k+2][j-1][i] + u_2[k+2][j+1][i]))));
uacc_1[k][j][i] = a1 * uacc_1[k][j][i] + cof * a_r2;
b_mux1 = mu[k+1][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k+1][j][i] * strx[i] - 3e0 / 4 * mu[k+1][j][i-2] * strx[i-2];
b_mux2 = mu[k+1][j][i-2] * strx[i-2] + mu[k+1][j][i+1] * strx[i+1] + 3.0 * mu[k+1][j][i] * strx[i] + 3.0 * mu[k+1][j][i-1] * strx[i-1];
b_mux3 = mu[k+1][j][i-1] * strx[i-1] + mu[k+1][j][i+2] * strx[i+2] + 3.0 * mu[k+1][j][i+1] * strx[i+1] + 3.0 * mu[k+1][j][i] * strx[i];
b_mux4 = mu[k+1][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k+1][j][i] * strx[i] - 3e0 / 4 * mu[k+1][j][i+2] * strx[i+2];
b_muy1 = mu[k+1][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k+1][j][i] * stry[j] -3e0 / 4 * mu[k+1][j-2][i] * stry[j-2];
b_muy2 = mu[k+1][j-2][i] * stry[j-2] + mu[k+1][j+1][i] * stry[j+1] + 3.0 * mu[k+1][j][i] * stry[j] + 3.0 * mu[k+1][j-1][i] * stry[j-1];
b_muy3 = mu[k+1][j-1][i] * stry[j-1] + mu[k+1][j+2][i] * stry[j+2] + 3.0 * mu[k+1][j+1][i] * stry[j+1] + 3.0 * mu[k+1][j][i] * stry[j];
b_muy4 = mu[k+1][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k+1][j][i] * stry[j] - 3e0 / 4 * mu[k+1][j+2][i] * stry[j+2];
b_muz1 = mu[k+1-1][j][i] * strz[k+1-1] - 3e0 / 4 * mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k+1-2][j][i] * strz[k+1-2];
b_muz2 = mu[k+1-2][j][i] * strz[k+1-2] + mu[k+1+1][j][i] * strz[k+1+1] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k+1-1][j][i] * strz[k+1-1];
b_muz3 = mu[k+1-1][j][i] * strz[k+1-1] + mu[k+1+2][j][i] * strz[k+1+2] + 3.0 * mu[k+1+1][j][i] * strz[k+1+1] + 3.0 * mu[k+1][j][i] * strz[k+1];
b_muz4 = mu[k+1+1][j][i] * strz[k+1+1] - 3e0 / 4 * mu[k+1][j][i] * strz[k+1] - 3e0 /4 * mu[k+1+2][j][i] * strz[k+1+2];
b_r2 = 1e0 / 6 * (strx[i] * (b_mux1 * (u_1[k+1][j][i-2] - u_1[k+1][j][i]) + b_mux2 * (u_1[k+1][j][i-1] - u_1[k+1][j][i]) + b_mux3 * (u_1[k+1][j][i+1] - u_1[k+1][j][i]) + b_mux4 * (u_1[k+1][j][i+2] - u_1[k+1][j][i])) +
stry[j] * ((2 * b_muy1 + la[k+1][j-1][i] * stry[j-1] - 3e0 / 4 * la[k+1][j][i] * stry[j] - 3e0 / 4 * la[k+1][j-2][i] * stry[j-2]) * (u_1[k+1][j-2][i] - u_1[k+1][j][i]) +
(2 * b_muy2 + la[k+1][j-2][i] * stry[j-2] + la[k+1][j+1][i] * stry[j+1] + 3 * la[k+1][j][i] * stry[j] + 3 * la[k+1][j-1][i] * stry[j-1]) * (u_1[k+1][j-1][i] - u_1[k+1][j][i]) +
(2 * b_muy3 + la[k+1][j-1][i] * stry[j-1] + la[k+1][j+2][i] * stry[j+2] + 3 * la[k+1][j+1][i] * stry[j+1] + 3 * la[k+1][j][i] * stry[j]) * (u_1[k+1][j+1][i] - u_1[k+1][j][i]) +
(2 * b_muy4 + la[k+1][j+1][i] * stry[j+1] - 3e0 / 4 * la[k+1][j][i] * stry[j] - 3e0 / 4 * la[k+1][j+2][i] * stry[j+2]) * (u_1[k+1][j+2][i] - u_1[k+1][j][i])) +
strz[k+1] * (b_muz1 * (u_1[k+1-2][j][i] - u_1[k+1][j][i]) + b_muz2 * (u_1[k+1-1][j][i] - u_1[k+1][j][i]) + b_muz3 * (u_1[k+1+1][j][i] - u_1[k+1][j][i]) + b_muz4 * (u_1[k+1+2][j][i] - u_1[k+1][j][i])));
b_r2 += strx[i] * stry[j] * (1e0 / 144) * (mu[k+1][j][i-2] * (u_0[k+1][j-2][i-2] - u_0[k+1][j+2][i-2] + 8 * (-u_0[k+1][j-1][i-2] + u_0[k+1][j+1][i-2])) - 8 * (mu[k+1][j][i-1] * (u_0[k+1][j-2][i-1] - u_0[k+1][j+2][i-1] + 8 * (-u_0[k+1][j-1][i-1] + u_0[k+1][j+1][i-1]))) + 8 * (mu[k+1][j][i+1] * (u_0[k+1][j-2][i+1] - u_0[k+1][j+2][i+1] + 8 * (-u_0[k+1][j-1][i+1] + u_0[k+1][j+1][i+1]))) - (mu[k+1][j][i+2] * (u_0[k+1][j-2][i+2] - u_0[k+1][j+2][i+2] + 8 * (-u_0[k+1][j-1][i+2] + u_0[k+1][j+1][i+2])))) + strx[i] * stry[j] * (1e0 / 144) * (la[k+1][j-2][i] * (u_0[k+1][j-2][i-2] - u_0[k+1][j-2][i+2] + 8 * (-u_0[k+1][j-2][i-1] + u_0[k+1][j-2][i+1])) - 8 * (la[k+1][j-1][i] * (u_0[k+1][j-1][i-2] - u_0[k+1][j-1][i+2] + 8 * (-u_0[k+1][j-1][i-1] + u_0[k+1][j-1][i+1]))) + 8 * (la[k+1][j+1][i] * (u_0[k+1][j+1][i-2] - u_0[k+1][j+1][i+2] + 8 * (-u_0[k+1][j+1][i-1] + u_0[k+1][j+1][i+1]))) - (la[k+1][j+2][i] * (u_0[k+1][j+2][i-2] - u_0[k+1][j+2][i+2] + 8 * (-u_0[k+1][j+2][i-1] + u_0[k+1][j+2][i+1])))) + stry[j] * strz[k+1] * (1e0 / 144) * (la[k+1][j-2][i] * (u_2[k+1-2][j-2][i] - u_2[k+1+2][j-2][i] + 8 * (-u_2[k+1-1][j-2][i] + u_2[k+1+1][j-2][i])) - 8 * (la[k+1][j-1][i] * (u_2[k+1-2][j-1][i] - u_2[k+1+2][j-1][i] + 8 * (-u_2[k+1-1][j-1][i] + u_2[k+1+1][j-1][i]))) + 8 * (la[k+1][j+1][i] * (u_2[k+1-2][j+1][i] - u_2[k+1+2][j+1][i] + 8 * (-u_2[k+1-1][j+1][i] + u_2[k+1+1][j+1][i]))) - (la[k+1][j+2][i] * (u_2[k+1-2][j+2][i] - u_2[k+1+2][j+2][i] + 8 * (-u_2[k+1-1][j+2][i] + u_2[k+1+1][j+2][i])))) + stry[j] * strz[k+1] * (1e0 / 144) * (mu[k+1-2][j][i] * (u_2[k+1-2][j-2][i] - u_2[k+1-2][j+2][i] + 8 * (-u_2[k+1-2][j-1][i] + u_2[k+1-2][j+1][i])) - 8 * (mu[k+1-1][j][i] * (u_2[k+1-1][j-2][i] - u_2[k+1-1][j+2][i] + 8 * (-u_2[k+1-1][j-1][i] + u_2[k+1-1][j+1][i]))) + 8 * (mu[k+1+1][j][i] * (u_2[k+1+1][j-2][i] - u_2[k+1+1][j+2][i] + 8 * (-u_2[k+1+1][j-1][i] + u_2[k+1+1][j+1][i]))) - (mu[k+1+2][j][i] * (u_2[k+1+2][j-2][i] - u_2[k+1+2][j+2][i] + 8 * (-u_2[k+1+2][j-1][i] + u_2[k+1+2][j+1][i]))));
uacc_1[k+1][j][i] = a1 * uacc_1[k+1][j][i] + cof * b_r2;
#pragma end stencil2
}
}
}
__global__ void __launch_bounds__ (128,2) sw4_3 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double mux1, mux2, mux3, mux4, muy1, muy2, muy3, muy4, muz1, muz2, muz3, muz4;
double r1, r2, r3;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma unroll 10
for (int k=2; k<=N-3; k++) {
mux1 = mu[k][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i-2] * strx[i-2];
mux2 = mu[k][j][i-2] * strx[i-2] + mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i] + 3.0 * mu[k][j][i-1] * strx[i-1];
mux3 = mu[k][j][i-1] * strx[i-1] + mu[k][j][i+2] * strx[i+2] + 3.0 * mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i];
mux4 = mu[k][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i+2] * strx[i+2];
muy1 = mu[k][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k][j][i] * stry[j] -3e0 / 4 * mu[k][j-2][i] * stry[j-2];
muy2 = mu[k][j-2][i] * stry[j-2] + mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j] + 3.0 * mu[k][j-1][i] * stry[j-1];
muy3 = mu[k][j-1][i] * stry[j-1] + mu[k][j+2][i] * stry[j+2] + 3.0 * mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j];
muy4 = mu[k][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k][j][i] * stry[j] - 3e0 / 4 * mu[k][j+2][i] * stry[j+2];
muz1 = mu[k-1][j][i] * strz[k-1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 / 4 * mu[k-2][j][i] * strz[k-2];
muz2 = mu[k-2][j][i] * strz[k-2] + mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k] + 3.0 * mu[k-1][j][i] * strz[k-1];
muz3 = mu[k-1][j][i] * strz[k-1] + mu[k+2][j][i] * strz[k+2] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k];
muz4 = mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 /4 * mu[k+2][j][i] * strz[k+2];
r3 = 1e0 / 6 * (strx[i] * (mux1 * (u_2[k][j][i-2] - u_2[k][j][i]) + mux2 * (u_2[k][j][i-1] - u_2[k][j][i]) + mux3 * (u_2[k][j][i+1] - u_2[k][j][i]) + mux4 * (u_2[k][j][i+2] - u_2[k][j][i])) +
stry[j] * (muy1 * (u_2[k][j-2][i] - u_2[k][j][i]) + muy2 * (u_2[k][j-1][i] - u_2[k][j][i]) + muy3 * (u_2[k][j+1][i] - u_2[k][j][i]) + muy4 * (u_2[k][j+2][i] - u_2[k][j][i])) +
strz[k] * ((2 * muz1 + la[k-1][j][i] * strz[k-1] - 3e0 / 4 * la[k][j][i] * strz[k] - 3e0 / 4 * la[k-2][j][i] * strz[k-2]) * (u_2[k-2][j][i] - u_2[k][j][i]) +
(2 * muz2 + la[k-2][j][i] * strz[k-2] + la[k+1][j][i] * strz[k+1] + 3 * la[k][j][i] * strz[k] + 3 * la[k-1][j][i] * strz[k-1]) * (u_2[k-1][j][i] - u_2[k][j][i]) +
(2 * muz3 + la[k-1][j][i] * strz[k-1] + la[k+2][j][i] * strz[k+2] + 3 * la[k+1][j][i] * strz[k+1] + 3 * la[k][j][i] * strz[k]) * (u_2[k+1][j][i] - u_2[k][j][i]) +
(2 * muz4 + la[k+1][j][i] * strz[k+1] - 3e0 / 4 * la[k][j][i] * strz[k] - 3e0 / 4 * la[k+2][j][i] * strz[k+2]) * (u_2[k+2][j][i] - u_2[k][j][i])));
r3 += strx[i] * strz[k] * (1e0 / 144) * (mu[k][j][i-2] * (u_0[k-2][j][i-2] - u_0[k+2][j][i-2] + 8 * (-u_0[k-1][j][i-2] + u_0[k+1][j][i-2])) - 8 * (mu[k][j][i-1] * (u_0[k-2][j][i-1] - u_0[k+2][j][i-1] + 8 * (-u_0[k-1][j][i-1] + u_0[k+1][j][i-1]))) + 8 * (mu[k][j][i+1] * (u_0[k-2][j][i+1] - u_0[k+2][j][i+1] + 8 * (-u_0[k-1][j][i+1] + u_0[k+1][j][i+1]))) - (mu[k][j][i+2] * (u_0[k-2][j][i+2] - u_0[k+2][j][i+2] + 8 * (-u_0[k-1][j][i+2] + u_0[k+1][j][i+2]))));
r3 += stry[j] * strz[k] * (1e0 / 144) * (mu[k][j-2][i] * (u_1[k-2][j-2][i] - u_1[k+2][j-2][i] + 8 * (-u_1[k-1][j-2][i] + u_1[k+1][j-2][i])) - 8 * (mu[k][j-1][i] * (u_1[k-2][j-1][i] - u_1[k+2][j-1][i] + 8 * (-u_1[k-1][j-1][i] + u_1[k+1][j-1][i]))) + 8 * (mu[k][j+1][i] * (u_1[k-2][j+1][i] - u_1[k+2][j+1][i] + 8 * (-u_1[k-1][j+1][i] + u_1[k+1][j+1][i]))) - (mu[k][j+2][i] * (u_1[k-2][j+2][i] - u_1[k+2][j+2][i] + 8 * (-u_1[k-1][j+2][i] + u_1[k+1][j+2][i]))));
r3 += strx[i] * strz[k] * (1e0 / 144) * (la[k-2][j][i] * (u_0[k-2][j][i-2] - u_0[k-2][j][i+2] + 8 * (-u_0[k-2][j][i-1] + u_0[k-2][j][i+1])) - 8 * (la[k-1][j][i] * (u_0[k-1][j][i-2] - u_0[k-1][j][i+2] + 8 * (-u_0[k-1][j][i-1] + u_0[k-1][j][i+1]))) + 8 * (la[k+1][j][i] * (u_0[k+1][j][i-2] - u_0[k+1][j][i+2] + 8 * (-u_0[k+1][j][i-1] + u_0[k+1][j][i+1]))) - (la[k+2][j][i] * (u_0[k+2][j][i-2] - u_0[k+2][j][i+2] + 8 * (-u_0[k+2][j][i-1] + u_0[k+2][j][i+1]))));
r3 += stry[j] * strz[k] * (1e0 / 144) * (la[k-2][j][i] * (u_1[k-2][j-2][i] - u_1[k-2][j+2][i] + 8 * (-u_1[k-2][j-1][i] + u_1[k-2][j+1][i])) - 8 * (la[k-1][j][i] * (u_1[k-1][j-2][i] - u_1[k-1][j+2][i] + 8 * (-u_1[k-1][j-1][i] + u_1[k-1][j+1][i]))) + 8 * (la[k+1][j][i] * (u_1[k+1][j-2][i] - u_1[k+1][j+2][i] + 8 * (-u_1[k+1][j-1][i] + u_1[k+1][j+1][i]))) - (la[k+2][j][i] * (u_1[k+2][j-2][i] - u_1[k+2][j+2][i] + 8 * (-u_1[k+2][j-1][i] + u_1[k+2][j+1][i]))));
uacc_2[k][j][i] = a1 * uacc_2[k][j][i] + cof * r3;
}
}
}
extern "C" void host_code (double *h_uacc_0, double *h_uacc_1, double *h_uacc_2, double *h_u_0, double *h_u_1, double *h_u_2, double *h_mu, double *h_la, double *h_strx, double *h_stry, double *h_strz, int N) {
double *uacc_0;
cudaMalloc (&uacc_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_0\n");
cudaMemcpy (uacc_0, h_uacc_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *uacc_1;
cudaMalloc (&uacc_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_1\n");
cudaMemcpy (uacc_1, h_uacc_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *uacc_2;
cudaMalloc (&uacc_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_2\n");
cudaMemcpy (uacc_2, h_uacc_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_0;
cudaMalloc (&u_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_0\n");
cudaMemcpy (u_0, h_u_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_1;
cudaMalloc (&u_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_1\n");
cudaMemcpy (u_1, h_u_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_2;
cudaMalloc (&u_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_2\n");
cudaMemcpy (u_2, h_u_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *mu;
cudaMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *la;
cudaMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *strx;
cudaMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice);
double *stry;
cudaMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice);
double *strz;
cudaMalloc (&strz, sizeof(double)*N);
check_error ("Failed to allocate device memory for strz\n");
cudaMemcpy (strz, h_strz, sizeof(double)*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1);
sw4_1 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
sw4_2 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
sw4_3 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
cudaMemcpy (h_uacc_0, uacc_0, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_uacc_1, uacc_1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_uacc_2, uacc_2, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaFree (uacc_0);
cudaFree (uacc_1);
cudaFree (uacc_2);
cudaFree (u_0);
cudaFree (u_1);
cudaFree (u_2);
cudaFree (mu);
cudaFree (la);
cudaFree (strx);
cudaFree (stry);
cudaFree (strz);
}
|
20,415 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#define thread_num 16
/*inline void CUDA_ERROR_CHECK(const cudaError_t &err){
if(err != cudaSuccess){
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}*/
__device__ int mandel(float x, float y, int maxIterations){
float zx, zy, newzx, newzy, a, b;
zx = x;
zy = y;
int i = 0;
for(i = 0 ; i < maxIterations ; ++i){
a = zx*zx;
b = zy*zy;
if(a + b > 4.0f) break;
newzx = a - b;
newzy = 2.f * zx * zy;
zx = newzx + x;
zy = newzy + y;
}
return i;
}
__global__ void mandelKernel(float lowerX, float lowerY, float stepX, float stepY, int *d_res, int resX, int resY, int maxIterations, size_t x_pixels, size_t y_pixels){
// To avoid error caused by the floating number, use the following pseudo code
//
// float x = lowerX + thisX * stepX;
// float y = lowerY + thisY * stepY;
int now_x, now_y, i, j, idx;
now_x = (blockIdx.x * blockDim.x + threadIdx.x)*x_pixels;
now_y = (blockIdx.y * blockDim.y + threadIdx.y)*y_pixels;
//now_x = (blockIdx.x * blockDim.x + threadIdx.x);
//now_y = (blockIdx.y * blockDim.y + threadIdx.y);
float x, y;
/*for(i = now_y ; i < resY/thread_num ; i += blockDim.y*y_pixels){
for(j = now_x ; j < resX/thread_num ; j += blockDim.x*x_pixels){
x = lowerX + i * resY;
y = lowerY + j * resY;
idx = j*resX+i;
d_res[idx] = mandel(x, y, maxIterations);
}
}*/
if(now_x >= resX || now_y >= resY) return;
for(j = now_y ; j < now_y+y_pixels ; j++){
if(j >= resY) return;
for(i = now_x ; i < now_x+x_pixels ; i++){
if(i >= resX) continue;
x = lowerX + i * stepX;
y = lowerY + j * stepY;
idx = j*resX+i;
d_res[idx] = mandel(x, y, maxIterations);
}
}
}
// Host front-end function that allocates the memory and launches the GPU kernel
void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations)
{
float stepX = (upperX - lowerX) / resX;
float stepY = (upperY - lowerY) / resY;
int *d_res, *h;
int x_pixels = 2, y_pixels = 2;
size_t pitch;
cudaMallocPitch((void**)&d_res, &pitch, resX*sizeof(int), resY);
int blocksX = (int) ceil(resX/(float)thread_num);
int blocksY = (int) ceil(resY/(float)thread_num);
dim3 block(thread_num/x_pixels, thread_num/y_pixels);
dim3 grid(blocksX, blocksY);
int size;
size = resX*resY*sizeof(int);
cudaHostAlloc((void**)&h, size, cudaHostAllocMapped);
mandelKernel <<< grid, block >>> (lowerX, lowerY, stepX, stepY, d_res, resX, resY, maxIterations, x_pixels, y_pixels);
cudaDeviceSynchronize();
cudaMemcpy(h, d_res, size, cudaMemcpyDeviceToHost);
memcpy(img, h, size);
cudaFreeHost(h);
cudaFree(d_res);
}
|
20,416 | #include<cuda.h>
#include<stdio.h>
#include<cuda_runtime.h>
#include <cuda_profiler_api.h>
#define NSTREAM 8
void matricMul(int *A, int *B, int *C, int size) {
for (int col = 0; col < size; col++) {
for (int row = 0; row < size; row++) {
int outidx = col * size + row;
for (int idx = 0; idx < size; idx++) {
C[outidx] += A[col*size + idx] * B[idx*size + row];
}
}
}
}
void matrixMulCheck(int *C_cpu, int *C_gpu, int size) {
bool ResultFlag = true;
// Print the result
for (int i = 0; i < size; i++) {
if (C_cpu[i] != C_gpu[i]) {
ResultFlag = false;
printf("Error: C_cpu[%d] = %d; C_gpu[%d] = %d;\n", i, C_cpu[i], i, C_gpu[i]);
break;
}
}
if (ResultFlag == true) printf("Matrix Multiplication OK!\n");
else printf("Matrix Multiplication Error!\n");
}
__global__ void matrixMulDepth(int *A, int *B, int *C, int size) {
int tid, tx, ty;
tx = threadIdx.x + blockDim.x * blockIdx.x;
ty = threadIdx.y + blockDim.y * blockIdx.y;
tid = size * ty + tx;
int Aval = 0;
int Bval = 0;
int Cval = 0;
for (int i = 0; i < size; i++) {
Aval = A[ty * size + i];
Bval = B[i * size + tx];
Cval += Aval * Bval;
}
C[tid] = Cval;
}
int main() {
int nx = 1600;
int ny = 1600;
int dimx = 32;
int dimy = 16;
dim3 block(dimx, dimy); // Block dimension 32x16
dim3 grid((nx+block.x-1)/block.x, (ny/NSTREAM+block.y-1)/block.y);
char *iname = "CUDA_DEVICE_MAX_CONNECTIONS";
setenv(iname, "4", 1);
int MatrixSize = nx * ny;
int BufferSize = MatrixSize * sizeof(int);
int iElem = MatrixSize / NSTREAM;
int ibytes = iElem * sizeof(int);
int *h_A, *h_B, *h_C;
int *C_cpu;
// Create streams
cudaStream_t *stream = (cudaStream_t*)malloc(NSTREAM*sizeof(cudaStream_t));
for (int i = 0; i < NSTREAM; i++) {
cudaStreamCreate(&stream[i]);
}
// Host memory allocation
cudaHostAlloc((void**)&h_A, BufferSize, cudaHostAllocDefault);
cudaHostAlloc((void**)&h_B, BufferSize, cudaHostAllocDefault);
cudaHostAlloc((void**)&h_C, BufferSize, cudaHostAllocDefault);
cudaHostAlloc((void**)&C_cpu, BufferSize, cudaHostAllocDefault);
// Data input
for (int i = 0; i < MatrixSize; i++) {
h_A[i] = i % 100;
h_B[i] = i % 100;
h_C[i] = 0;
C_cpu[i] = 0;
}
int *d_A, *d_B, *d_C;
// Device memory allocation
cudaMalloc((void**)&d_A, BufferSize);
cudaMalloc((void**)&d_B, BufferSize);
cudaMalloc((void**)&d_C, BufferSize);
cudaHostGetDevicePointer((void**)&d_B, (void*)h_B, 0);
for (int i = 0; i < NSTREAM; i++) {
int ioffset = i * iElem;
cudaMemcpyAsync(&d_A[ioffset], &h_A[ioffset], ibytes, cudaMemcpyHostToDevice, stream[i]);
matrixMulDepth<<<grid, block, 0, stream[i]>>>(&d_A[ioffset], d_B, &d_C[ioffset], nx);
cudaMemcpyAsync(&h_C[ioffset], &d_C[ioffset], ibytes, cudaMemcpyDeviceToHost, stream[i]);
cudaStreamSynchronize(stream[i]);
}
cudaProfilerStop();
// Check result
matricMul(h_A, h_B, C_cpu, nx);
matrixMulCheck(C_cpu, h_C, nx);
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// Free host memory
free(h_A);
free(h_B);
free(h_C);
free(C_cpu);
return 0;
} |
20,417 | #include "includes.h"
__global__ void CutSubImageKernel_SingleParams(float *input, float *output, float subImageX, float subImageY, float subImageDiameter, bool safeBounds, int inputWidth, int inputHeight, int outputWidth, int outputHeight)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int size = outputWidth * outputHeight;
if (id < size)
{
float subImgCX = subImageX; // <-1, 1>
float subImgCY = subImageY; // <-1, 1>
float subImgDiameter = subImageDiameter; // <0,1>
int maxDiameter = min(inputWidth - 1, inputHeight - 1);
int diameterPix = (int)(subImgDiameter * maxDiameter);
diameterPix = max(1, diameterPix);
diameterPix = min(maxDiameter, diameterPix);
int subImgX = (int)(inputWidth * (subImgCX + 1) * 0.5f) - diameterPix / 2;
int subImgY = (int)(inputHeight * (subImgCY + 1) * 0.5f) - diameterPix / 2;
if (safeBounds)
{
subImgX = max(subImgX, 1);
subImgY = max(subImgY, 1);
subImgX = min(subImgX, inputWidth - diameterPix - 1);
subImgY = min(subImgY, inputHeight - diameterPix - 1);
}
int px = id % outputWidth;
int py = id / outputWidth;
//
float xRatio = (float)(diameterPix - 1) / (outputWidth - 1);
float yRatio = (float)(diameterPix - 1) / (outputHeight - 1);
//
int x = (int)(xRatio * px);
int y = (int)(yRatio * py);
if (x + subImgX >= 0 && y + subImgY >= 0 &&
x + subImgX < inputWidth && y + subImgY < inputHeight)
{
output[py * outputWidth + px] = input[(y + subImgY) * inputWidth + x + subImgX];
}
}
} |
20,418 | #include "includes.h"
__device__ void devVecAdd(size_t pointDim, double* dest, double* src) {
for(size_t i = 0; i < pointDim; ++i) {
dest[i] += src[i];
}
}
__global__ void kernElementWiseSum(const size_t numPoints, const size_t pointDim, double* dest, double* src) {
// Called to standardize arrays to be a power of two
// Assumes a 2D grid of 1D blocks
int b = blockIdx.y * gridDim.x + blockIdx.x;
int i = b * blockDim.x + threadIdx.x;
if(i < numPoints) {
devVecAdd(pointDim, &dest[i * pointDim], &src[i * pointDim]);
}
} |
20,419 | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
// this is the program that is to be run on the device for a
// large number of threads, in our example 100
// each thread takes care of one entry in the number array,
// so in order for the thread to know which number to manipulate,
// a scheme has to be utilized in order to assign each thread a
// unique number
__global__ void incrementArrayViaCUDAdevice(int *numberArray, int N)
{
// this is the assignment of a unique identifier.
// blockIdx.x is the unique number of the block, in which the
// thread is positioned, blockDim.x holds the number of threads
// for each block and threadIdx.x is the number of the thread in
// this block.
int idx = blockIdx.x*blockDim.x + threadIdx.x;
// this tells the thread to manipulate the assigned number in
// the array stored in device memory and increment it
if (idx<N)
numberArray[idx] = numberArray[idx] + 1;
}
// this is the "normal" function to be run on the CPU
// it does the exact same thing as the CUDA function above
void incrementArray(int *numberArray, int N){
// go through every number in the array consecutively
// and increment it
for(int i=0; i<N; ++i)
{
numberArray[i] = numberArray[i] + 1;
}
}
int main(int argc, const char* argv[] )
{
// some arbitrary array length
int numberOfNumbers = 100;
// declare some arrays for storing numbers
int *numbers1, *numbers2;
// reserve (allocate) some working space for the numbers in device memory
cudaMallocManaged(&numbers1, sizeof(int)*numberOfNumbers);
cudaMallocManaged(&numbers2, sizeof(int)*numberOfNumbers);
// fill the input array with some numbers
for(int i=0;i<numberOfNumbers;i++)
{
numbers1[i] = i; // this will be manipulated by the CUDA device (GPU)
numbers2[i] = i; // this will be manipulated by the CPU (as any standard C program would do)
}
// tell the device (GPU) to do its magic
incrementArrayViaCUDAdevice<<<1, numberOfNumbers>>>(numbers1, numberOfNumbers);
// wait for the device to finish working
cudaDeviceSynchronize();
// compute the same function "normally" on the CPU
incrementArray(numbers2, numberOfNumbers);
// check if the GPU did the same as the CPU
bool workedCorrectly = true;
for(int i=0;i<numberOfNumbers;i++)
{
if (numbers1[i] != numbers2[i])
workedCorrectly = 0;
}
if (workedCorrectly == 1)
printf("The device performed well!\n");
else
printf("Something went wrong. The output numbers are not what was to be expected...\n");
// free the space that has been used by our arrays so that
// other programs might use it
cudaFree(numbers1);
cudaFree(numbers2);
return 0;
}
|
20,420 | #include "includes.h"
/*
Location qualifiers
__global__
Defines a kernel.
Runs on the GPU, called from the CPU.
Executed with <<<dim3>>> arguments.
__device__
Runs on the GPU, called from the GPU.
Can be used for variables too.
__host__
Runs on the CPU, called from the CPU.
Qualifiers can be mixed
Eg __host__ __device__ foo()
Code compiled for both CPU and GPU
useful for testing
*/
__global__ void addArrays(int* a, int* b, int* c)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
} |
20,421 | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#include <cuda_runtime.h>
#define ROW 5000
#define COL 5000
__global__ void matrixAddition(float *a, float *b, float *c, int N){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if( index < N ){
c[index] = a[index] + b[index];
}
}
__global__ void test(int ** data){
int indexx = threadIdx.x;
int indexy = threadIdx.y;
// printf("index = %d\n", threadIdx.x);
// printf("%d\n", data[threadIdx.x][threadIdx.y]);
data[indexx][indexy] += 1;
}
__global__ void test2(int N){
int index = threadIdx.x;
printf("CUDA said: hello world\n");
}
__global__ void setup_kernel(curandState * state){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
curand_init(1234, idx, 0, &state[idx]);
}
__global__ void testrand(curandState * state){
int id = blockIdx.x * blockDim.x + threadIdx.x;
curandState localState = state[id];
unsigned int num = ceilf(curand_uniform(&localState) * 30) - 1;
printf("rand = %u\n", num);
}
__global__ void testmemcpy(double * dest, double * src, unsigned int length){
memcpy(dest + 5, src, sizeof(double)*(length - 5));
}
int main(){
int ** array = (int **)malloc(10*sizeof(int*));
for(int i = 0; i < 10; ++i){
array[i] = (int *)malloc(10*sizeof(int));
for(int j = 0; j < 10; ++j){
array[i][j] = i + j;
printf("%d ", array[i][j]);
}
printf("\n");
}
int ** dev_array;
cudaMalloc((void **)&dev_array, sizeof(int*) *10);
int * dev_temp;
int ** dev_temp_array = (int **)malloc(sizeof(int *)*10);
for(int i = 0; i < 10; ++i){
cudaMalloc((void **)&dev_temp, sizeof(int)*10);
cudaMemcpy(dev_temp, array[i], 10*sizeof(int), cudaMemcpyHostToDevice);
dev_temp_array[i] = dev_temp;
cudaMemcpy(&(dev_array[i]), &dev_temp, sizeof(dev_temp), cudaMemcpyHostToDevice);
}
dim3 threadsPerBlock(10, 10);
curandState * d_state;
cudaMalloc(&d_state, sizeof(curandState));
setup_kernel<<<1, threadsPerBlock>>>(d_state);
// testrand<<<1, threadsPerBlock>>>(d_state);
double testArray[10];
double resultArray[10];
double * dev_test_array_src;
double * dev_test_array_dest;
cudaMalloc((void**)&dev_test_array_src, sizeof(double)*10);
cudaMalloc((void**)&dev_test_array_dest, sizeof(double)*10);
for(double i = 0; i < 10; i+=1){
testArray[(int)i] = i / 10.0;
}
cudaMemcpy(dev_test_array_src, testArray, sizeof(double)*10, cudaMemcpyHostToDevice);
testmemcpy<<<1, 1>>>(dev_test_array_dest, dev_test_array_src, 10);
cudaMemcpy(resultArray, dev_test_array_dest, sizeof(double)*10, cudaMemcpyDeviceToHost);
int * temp;
for(int i = 0; i < 10; ++i){
cudaMemcpy(&temp, &(dev_array[i]), sizeof(int *), cudaMemcpyDeviceToHost);
cudaMemcpy(array[i], (temp), sizeof(int)*10, cudaMemcpyDeviceToHost);
for(int j = 0; j < 10; ++j){
printf("%d ", array[i][j]);
}
printf("\n");
}
cudaDeviceReset();
return 0;
}
|
20,422 | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include<time.h>
#define BLOCK_SIZE 32
__global__ void gpu_matrix_mult(long *a, long *b, long *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if (row < m)
{
if(col < k) {
for (int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = sum;
}
}
}
void cpu_matrix_mult(long *h_a, long *h_b, long *h_result, int m, int n, int k) {
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < k; ++j)
{
int tmp = 0.0;
for (int h = 0; h < n; ++h)
{
tmp += h_a[i * n + h] * h_b[h * k + j];
}
h_result[i * k + j] = tmp;
}
}
}
void DisplayMatrix(long * h_a , int m,int n)
{
printf("\n");
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < n; ++j)
{
printf("%d ", h_a[i*n + j]);
}
printf("\n");
}
printf("\n");
}
int main(int argc, char const *argv[])
{
long *dev_a, *dev_b, *dev_c,*dev_d;
float ms;
clock_t startc, end;
double cpu_time_used;
cudaEvent_t start,stop;
FILE *myFile_mnk;
myFile_mnk = fopen("C:\\Users\\k1636\\Documents\\Visual Studio 2010\\Projects\\abc\\abc\\mnk.txt", "r");
int i,j;
int m,n,k;
fscanf(myFile_mnk, "%d", &m);
fscanf(myFile_mnk, "%d", &n);
fscanf(myFile_mnk, "%d", &k);
fclose(myFile_mnk);
printf("%d %d %d",m,n,k);
long *a=(long*)malloc(m*n*sizeof(int));
long *b=(long*)malloc(n*k*sizeof(int));
long *c=(long*)malloc(m*k*sizeof(int));
long *d=(long*)malloc(m*k*sizeof(int));
FILE *myFile_input1;
myFile_input1 = fopen("C:\\Users\\k1636\\Documents\\Visual Studio 2010\\Projects\\abc\\abc\\input1.txt", "r");
for (i = 0; i < m; i++)
{
for(j=0;j<n;j++){
fscanf(myFile_input1, "%d", &a[i*n+j]);
}
}
fclose(myFile_input1);
FILE *myFile_input2;
myFile_input2 = fopen("C:\\Users\\k1636\\Documents\\Visual Studio 2010\\Projects\\abc\\abc\\input2.txt", "r");
for (i = 0; i < n; i++)
{
for(j=0;j<k;j++){
fscanf(myFile_input2, "%d", &b[i*k+j]);
}
}
fclose(myFile_input2);
printf("\nA\n");
DisplayMatrix(a,m,n);
printf("\nB\n");
DisplayMatrix(b,n,k);
cudaMalloc((void **) &dev_a, m*n*sizeof(int));
cudaMalloc((void **) &dev_b, n*k*sizeof(int));
cudaMalloc((void **) &dev_c, m*k*sizeof(int));
cudaMalloc((void **) &dev_d, m*k*sizeof(int));
startc = clock();
cpu_matrix_mult(a, b, c, m, n, k) ;
end = clock();
printf("\n\n CPU RESULT \n\n");
DisplayMatrix(c,m,k);
cpu_time_used = ((float) (end - startc)) /(float) CLOCKS_PER_SEC;
cpu_time_used*=1000;
cudaMemcpy(dev_a, a, m*n*sizeof(int),
cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, n*k*sizeof(int),
cudaMemcpyHostToDevice);
cudaMemcpy(dev_d, d, m*k*sizeof(int),
cudaMemcpyHostToDevice);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaEventRecord(stop, 0);
dim3 dimGrid ((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
gpu_matrix_mult<<<dimGrid,dim3(BLOCK_SIZE,BLOCK_SIZE)>>>(dev_a, dev_b, dev_c,m,n,k);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
printf("\n\n GPU RESULT \n\n");
cudaMemcpy(d, dev_c, m*k*sizeof(int),cudaMemcpyDeviceToHost);
DisplayMatrix(d,m,k);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("GPU: %f ms",ms);
printf("\n CPU : %f ms",cpu_time_used);
system("pause");
return 0;
} |
20,423 | #include<stdio.h>
#include<cuda.h>
#include<math.h>
#include<float.h>
#define CUDA_CALL(x) do { cudaError_t err=(x); \
if(err!=cudaSuccess) { \
printf("Error %s at %s: %d",cudaGetErrorString(err),__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
#define W 64 // Input DIM
#define D 3 // Input and Kernel Depth
#define T 5 // Kernel DIM
#define N 128 // Number of kernels
#define TILE_W 8 //input tile width
#define n1 3 //Range for weights(log2) from INQ
#define n2 1 //where n1 > n2
#define BAND 3 // range for weights
#define STRIDE_LENGTH 1 //STRIDE_LENGTH
#define OWS (W- T + 1) // Output DIM
#define OW (((W - T)/STRIDE_LENGTH) + 1) //output width
__global__ void cudaConvolve(float* output, int* kernel, unsigned char *matrix){
/*
one block loads its required tile from the matrix collaboritively
and calculates the values for the number of kernels equalling to blockdim.x
*/
__shared__ float shmatrix[TILE_W+T-1][TILE_W+T-1][D];
__shared__ int shkernel[D][T][T][D][2];
float Sum[BAND];
float ds=0.0;
long i=0,j=0,k=0,m=0;
long ty = threadIdx.y;
long tx = threadIdx.x;
long tz = threadIdx.z;
long z = blockIdx.z*TILE_W+tz;
long y = blockIdx.y*TILE_W+ty;
long x = blockIdx.x*blockDim.x + tx;
//kernel contains the abs log of weight and the sign
if (ty<T && tz<T){
for(k=0;k<D;++k){
shkernel[k][tz][ty][tx][0] = kernel[(x-tx+k)*2*D*T*T + tz*2*D*T+ ty*2*D+ 2*tx];
shkernel[k][tz][ty][tx][1] = kernel[(x-tx+k)*2*D*T*T + tz*2*D*T+ ty*2*D+ 2*tx+1];
}
}
__syncthreads();
if ( z>=0 && z <W && y>=0 && y <W){
shmatrix[tz][ty][tx] = matrix[z*D*W + y* D+ tx];
}
else
shmatrix[tz][ty][tx] = 0.0f;
__syncthreads();
//sum array stores the sum of matrix element sharing the same weights
for(m=0;m<BAND;m++){
Sum[m]=0.0;
}
if(y%STRIDE_LENGTH == 0 && z%STRIDE_LENGTH == 0){
if (ty<TILE_W && tz<TILE_W){
for(k=0;k<D;++k){
for(i=0;i<T;++i){
for(j=0;j<T;++j){
if(shkernel[tx][i][j][k][1] > 0){
Sum[shkernel[tx][i][j][k][0] - n2] += shmatrix[i+tz][ty+j][k];
}
if(shkernel[tx][i][j][k][1] < 0){
Sum[shkernel[tx][i][j][k][0] - n2] -= shmatrix[i+tz][ty+j][k];
}
}
}
}
}
for(m=0;m<BAND;m++){
if(m + n2 > 0){
ds+=Sum[m]*(1<<(m + n2));
}
else{
ds+=Sum[m]/(1<<((-1)*(m + n2)));
}
}
__syncthreads();
if (z<OWS && y<OWS && ty<TILE_W && tz<TILE_W){
output[x*OW*OW + (z/STRIDE_LENGTH)*OW + (y/STRIDE_LENGTH)] = ds;
}
}
}
void fillMatrix(unsigned char *matrix){
unsigned char (*m)[W][D]=(unsigned char (*)[W][D])matrix;
for(int i=0;i<W;i++){
for(int j=0;j<W;j++){
for(int k=0;k<D;k++){
m[i][j][k]=(i*j+j*k+i*k+i*2+j*3+k*4)%255;
}
}
}
}
void fillKernel(int *kernel){
int (*t)[T][T][D][2]=(int (*)[T][T][D][2])kernel;
for(int i=0;i<N;i++){
for(int j=0;j<T;j++){
for(int k=0;k<T;k++){
for(int l=0;l<D;l++){
t[i][j][k][l][0]=((i+j+T+D)%n1 + n2);
t[i][j][k][l][1]=(pow(-1,i+j));
}
}
}
}
}
void printtofile(float *m){
const char *fname = "GPU_TAST";
FILE *f = fopen(fname, "w");
float (*mat)[OW][OW]=(float (*)[OW][OW])m;
for(unsigned i=0; i < N; i++) {
for(unsigned j=0; j < OW; j++){
for(unsigned k=0;k<OW;k++){
fprintf(f,"%4.4f ", mat[i][j][k]);
}
fprintf(f, "\n");
}
fprintf(f,"\n");
}
fclose(f);
}
int main()
{
unsigned char *matrix=(unsigned char*)malloc(sizeof(unsigned char)*W*W*D);
int *kernel=(int*)malloc(sizeof(int)*T*T*D*N*2);
float *output=(float *)malloc(sizeof(float)*N*OW*OW);
fillMatrix(matrix);
fillKernel(kernel);
unsigned char *Dmatrix;
cudaMalloc(&Dmatrix,sizeof(unsigned char)*W*W*D);
int *Dkernel;
cudaMalloc(&Dkernel,sizeof(int)*N*T*T*D*2);
float *Doutput;
cudaMalloc(&Doutput,sizeof(float)*N*OW*OW);
cudaMemcpy(Dmatrix, matrix, sizeof(unsigned char)*W*W*D,cudaMemcpyHostToDevice);
cudaMemcpy(Dkernel, kernel, sizeof(int)*T*T*D*N*2,cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
cudaEventRecord(start,0);
//Kernel Call
dim3 threads(D,TILE_W+T-1,TILE_W+T-1);
dim3 blocks(N/D, (W+TILE_W-1)/TILE_W , (W+TILE_W-1)/TILE_W );
cudaConvolve<<< blocks, threads >>>(Doutput, Dkernel, Dmatrix);
CUDA_CALL(cudaGetLastError());
cudaDeviceSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("%f\n",milliseconds);
cudaMemcpy(output, Doutput, sizeof(float)*N*OW*OW,cudaMemcpyDeviceToHost);
//printtofile(output);
}
|
20,424 | #include <iostream>
#include <thrust/sort.h>
using namespace std;
bool searchFunction(int *array, int *array2, int k, int m){
int first = array2[k];
int second = array[k];
for (int i=0; i<m; i++){
if (array[i]>first){
return false;
}
else if (array[i]==first){
if (array2[i]==second){
return true;
}
}
}
return false;
}
int main(int argc, char const *argv[])
{
/* code */
string a, b;
int n, m;
cin>>a>>b>>n>>m;
int *array = new int [m];
int *array2 = new int [m];
cout<<a<<" "<<b<<" "<<n<<" "<<m<<endl;
for (int i = 0; i < m; ++i)
{
/* code */
int start;
int end;
// char c;
cin>>start>>end;
array[i]=start;
array2[i]=end;
//
}
// for (int i=0; i<2*m; i++){
// cout<<array[i]<<"\t"<<array2[i]<<endl;
// }
thrust::sort_by_key(array2, array2 + m, array);
thrust::sort_by_key(array, array + m, array2);
int count = 1;
// cout<<array[0]<<"\t"<<array2[0]<<endl;
for (int i=1; i<m; i++){
if (array[i] < array2[i]){
// cout<<array[i]<<"\t"<<array2[i]<<endl;
count++;
}
else if (!searchFunction(array, array2, i, m)){
// cout<<array[i]<<"\t"<<array2[i]<<endl;
count++;
}
}
cout<<"m "<<count<<endl;
return 0;
}
|
20,425 | /*
* model.c
*
*
*/
#include <math.h>
#include <cuda.h>
struct model_data_
{
double mygamma;
double *theta;
int N_samples;
int N_sensors;
} model_data;
extern "C"
__global__ void GPU_model(double *g, double *d,double *theta,double mygamma,int N_samples,int N_sensors)
{
int ix= blockDim.x*blockIdx.x+threadIdx.x;
int iy= blockDim.y*blockIdx.y+threadIdx.y;
if(ix<N_samples && iy<N_sensors)
{
g[ix*N_sensors+iy] = 0.0;
g[ix*N_sensors+iy] = mygamma*theta[ix*2+1]/(2*M_PI*(pow((d[0]+iy*d[1])-theta[ix*2+0],2.0) + pow(theta[ix*2+1],2)));
}
}
extern "C"
void model(double *g, double *d)
{
double mygamma = model_data.mygamma;
double *theta = model_data.theta;
int N_samples = model_data.N_samples;
int N_sensors = model_data.N_sensors;
//////////////////////////////////
// model.m
// This function provides the model function (= velocity measurements) for the
// vortex optimal sensor placement problem.
// Input:
// mygamma : vortex strength
// theta : samples of model parameters
// d : vector of design parameters, d = [x_s, h]
// N_sensors : number of sensors to be placed
//
// Author: Franziska Krummenacher, krummenf@student.ethz.ch
// Spring/Summer 2016
//////////////////////////////////
//extract number of samples
//N_samples = size(theta,1);
//initialize output matrix
//g = zeros(N_samples,N_sensors);
////allocate GPU memory
double *d_g;
double *d_d;
double *d_theta;
cudaMalloc(&d_g,N_samples*N_sensors*sizeof(double));
cudaMalloc(&d_d,2*sizeof(double));
cudaMalloc(&d_theta,2*N_samples*sizeof(double));
//GPU memory copy
cudaMemcpy(d_g,g,N_samples*N_sensors*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(d_d,d,2*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(d_theta,theta,2*N_samples*sizeof(double),cudaMemcpyHostToDevice);
//kernel call
dim3 blocks(40,1);
dim3 threads(100,5);
GPU_model<<<blocks,threads>>>(d_g,d_d,d_theta,mygamma,N_samples,N_sensors);
cudaMemcpy(g,d_g,N_samples*N_sensors*sizeof(double),cudaMemcpyDeviceToHost);
cudaFree(d_g);
cudaFree(d_d);
cudaFree(d_theta);
//for (int i = 0; i < N_samples; i++)
// for (int j = 0; j < N_sensors; j++)
// g[i*N_sensors+j] = 0.0;
//fill in model predictions
//for (int i = 0; i < N_samples; i++)
// for (int k = 0; k < N_sensors; k++)
// {
// g[i*N_sensors+k] = mygamma*theta[i*2+1]/(2*M_PI*(pow((d[0]+k*d[1])-theta[i*2+0],2.0) + pow(theta[i*2+1],2)));
// }
return;
}
|
20,426 | //
// http://forums.nvidia.com/index.php?showtopic=34309
//
#include <stdio.h>
// called from host, run on device
__global__ void add_arrays_gpu(float *in1,float *in2,float *out)
{
int idx=threadIdx.x; // flat model
out[idx]=in1[idx]+in2[idx];
}
int main()
{
cudaDeviceProp c;
cudaGetDeviceProperties(&c, 0);
printf("%s\n", c.name);
printf("%d\n", c.maxThreadsPerBlock);
printf("%d, %d, %d\n", c.maxThreadsDim[0], c.maxThreadsDim[1], c.maxThreadsDim[2]);
printf("%d, %d, %d\n", c.maxGridSize[0], c.maxGridSize[1], c.maxGridSize[2]);
}
|
20,427 | #include <stdio.h>
__global__ void math_sqrt(float *a, float *b) {
*b = sqrt(*a);
}
int main() {
float a, b;
float *d_a, *d_b;
a = 4;
cudaMalloc((void **) &d_a, sizeof(float));
cudaMalloc((void **) &d_b, sizeof(float));
cudaMemcpy(d_a, &a, sizeof(float), cudaMemcpyHostToDevice);
math_sqrt<<<1, 1>>>(d_a, d_b);
cudaMemcpy(&b, d_b, sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
printf("%f\n", b);
return 0;
}
|
20,428 | #include "includes.h"
long N = 6400000000;
int doPrint = 0;
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// HELPER CODE TO INITIALIZE, PRINT AND TIME
struct timeval start, end;
__global__ void gpu_sqrt(float* a, long N) {
long element = blockIdx.x*blockDim.x + threadIdx.x;
if (element < N) a[element] = sqrt(a[element]);
} |
20,429 | #include <cuda_runtime.h>
__global__ void fdiv_rn_global(float x, float y, float *r)
{
*r = __fdiv_rn(x, y);
}
float cuda_fdiv_rn(float x, float y)
{
float *gpu_result, result;
cudaMalloc((void **)&gpu_result, sizeof(float));
fdiv_rn_global<<<1, 1>>>(x, y, gpu_result);
cudaMemcpy(&result, gpu_result, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(gpu_result);
// cudaDeviceReset(); // force printf flush
return result;
}
__global__ void fdividef_global(float x, float y, float *r)
{
*r = __fdividef(x, y);
}
float cuda_fdividef(float x, float y)
{
float *gpu_result, result;
cudaMalloc((void **)&gpu_result, sizeof(float));
fdividef_global<<<1, 1>>>(x, y, gpu_result);
cudaMemcpy(&result, gpu_result, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(gpu_result);
// cudaDeviceReset(); // force printf flush
return result;
}
|
20,430 | #include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include <stdlib.h>
#define N 1024
#define HEADER_SIZE (54)
#define LENGTH (3*N*N)
#define screenh N
#define screenw N
typedef unsigned char byte_t;
void BMPwrite(byte_t* bmp)
{
int i;
FILE *file;
file = fopen("cuda.bmp", "w+");
for(i = 0; i < LENGTH; i+=8)
{
putc(bmp[i], file);
putc(bmp[i+1], file);
putc(bmp[i+2], file);
putc(bmp[i+3], file);
putc(bmp[i+4], file);
putc(bmp[i+5], file);
putc(bmp[i+6], file);
putc(bmp[i+7], file);
}
fclose(file);
}
void BMPmake(byte_t* bitmap)
{
// bitmap signature
bitmap[0] = 'B';
bitmap[1] = 'M';
// file size
bitmap[2] = (HEADER_SIZE + LENGTH) & 0xFF; // 40 + 14 + 12
bitmap[3] = ((HEADER_SIZE + LENGTH) >> 8) & 0xFF;
bitmap[4] = ((HEADER_SIZE + LENGTH) >> 16) & 0xFF;
bitmap[5] = ((HEADER_SIZE + LENGTH) >> 24) & 0xFF;
// reserved field (in hex. 00 00 00 00)
int i;
for( i = 6; i < 10; i++) bitmap[i] = 0;
// offset of pixel data inside the image
bitmap[10]=54;
for( i = 11; i < 14; i++) bitmap[i] = 0;
// -- BITMAP HEADER -- //
// header size
bitmap[14] = 40;
for( i = 15; i < 18; i++) bitmap[i] = 0;
bitmap[18] = N & 0xFF;
bitmap[19] = (N >> 8) & 0xFF;
bitmap[20] = (N >> 16) & 0xFF;
bitmap[21] = (N >> 24) & 0xFF;
bitmap[22] = N & 0xFF;
bitmap[23] = (N >> 8) & 0xFF;
bitmap[24] = (N >> 16) & 0xFF;
bitmap[25] = (N >> 24) & 0xFF;
// reserved field
bitmap[26] = 1;
bitmap[27] = 0;
// number of bits per pixel
bitmap[28] = 24; // 3 byte
bitmap[29] = 0;
// compression method (no compression here)
for( i = 30; i < 34; i++) bitmap[i] = 0;
// size of pixel data
bitmap[34] = 255; // 12 bits => 4 pixels
bitmap[35] = 0;
bitmap[36] = 0;
bitmap[37] = 0;
// horizontal resolution of the image - pixels per meter (2835)
bitmap[38] = 0;
bitmap[39] = 0;
bitmap[40] = 48;
bitmap[41] = 177;
// vertical resolution of the image - pixels per meter (2835)
bitmap[42] = 0;
bitmap[43] = 0;
bitmap[44] = 48;
bitmap[45] = 177;
// color pallette information
for(i = 46; i < 50; i++) bitmap[i] = 0;
// number of important colors
for( i = 50; i < 54; i++) bitmap[i] = 0;
memset (bitmap + HEADER_SIZE, LENGTH, 0);
}
// should be consuming:
// - an array of chars, that will be the image
// - a FILE struct
// kernel forces every thread to color one character
// to the FILE object
__global__ void cudaColor (byte_t* bmp)
{
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
bmp [3*(col + row * N)] = ((row & 0x20) ? 235 : 64);
// bmp [3*(col + row * N) + 1] = ((col & 0x20) ? 192 : 64);
// bmp [3*(col + row * N) + 2] = ((row & 0x80) || (col & 0x80) ? 192 : 64);
}
int main()
{
byte_t *bmp, *dev_bmp;
// mallocing space fo the bmp array, that has 3 N*N dimensions
bmp = (byte_t*)malloc ((HEADER_SIZE + LENGTH) * sizeof (byte_t));
BMPmake (bmp);
cudaError_t err;
err= cudaMalloc ((void**)&dev_bmp, (HEADER_SIZE + LENGTH) * sizeof (byte_t));
printf("Cuda malloc bmp:%s \n", cudaGetErrorString(err));
err = cudaMemcpy (dev_bmp, bmp, (HEADER_SIZE + LENGTH) * sizeof (byte_t),
cudaMemcpyHostToDevice);
printf("Cuda memcpy to device bmp:%s \n", cudaGetErrorString(err));
// setting morphed dimensions
dim3 dimBlock (32, 32);
dim3 dimGrid (N / dimBlock.x, N / dimBlock.y);
struct timeval begin, end;
gettimeofday (&begin, NULL);
cudaColor <<< dimGrid, dimBlock >>> (dev_bmp + HEADER_SIZE);
err = cudaPeekAtLastError();
printf ("Cuda kernel:%s \n", cudaGetErrorString(err));
gettimeofday (&end, NULL);
err = cudaMemcpy (bmp, dev_bmp, (HEADER_SIZE + LENGTH) * sizeof (byte_t),
cudaMemcpyDeviceToHost);
printf("Cuda memcpy to host bmp:%s \n", cudaGetErrorString(err));
BMPwrite(bmp);
int verify = 0,j;
for(j = 0; j < LENGTH; j++)
verify += (bmp [j + HEADER_SIZE] == 235);
printf ("Verify count: %d\n", verify);
if (verify == (N * N) / 2){
printf ("Verified!\n");
} else {
printf ("pixels not correct\n");
}
fprintf (stdout, "time = %lf\n", (end.tv_sec - begin.tv_sec) + (end.tv_usec - begin.tv_usec) * 1.0 / 1000000);
// copying from the device back to the host, time to read out the results
printf ("size of the image: %d\n", sizeof(bmp));
cudaFree(dev_bmp);
free(bmp);
return 0;
}
|
20,431 | #include "includes.h"
__global__ void calculate_A_ch_3(float* rho, float* dz, float* s_a, int npix, int nchannels, int nimages, float* A_ch) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int c = blockIdx.z*blockDim.z + threadIdx.z;
if (i < npix && j < nimages) {
A_ch[c*npix*nimages + j*npix + i] = (rho[c*npix + i] / dz[i])*(s_a[c * nimages * 3 + j]);
}
} |
20,432 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30,float var_31) {
if (comp == (+1.6324E-35f / +1.7344E-22f + +1.5046E34f)) {
comp = var_2 * -1.2741E-35f;
comp += (-1.2420E-37f / -1.5003E-44f);
comp = (-1.2703E23f - -1.4485E35f - (-1.0363E-25f / -0.0f));
for (int i=0; i < var_1; ++i) {
float tmp_1 = (-1.8322E36f - +0.0f + var_3 - +1.4168E35f);
comp = tmp_1 * ceilf((var_4 / var_5 + var_6 / var_7));
}
if (comp < (var_8 - -1.5338E-37f * tanhf(var_9 - -1.7752E-41f + var_10))) {
comp = -1.6042E-44f + (var_11 + +1.4285E34f);
comp += -1.1709E-44f / atanf((-1.9398E34f * (-1.1727E-43f + (var_12 + fabsf(powf((var_13 * (var_14 + fabsf((var_15 / var_16 - (var_17 + var_18))))), +1.3976E36f))))));
comp += (-1.2307E-37f - (+0.0f / var_19 - coshf((-1.5122E-19f * atan2f(+1.9070E-36f, -0.0f)))));
}
if (comp > -1.3863E6f / -1.8961E34f - cosf(var_20 / ceilf((var_21 * var_22 * (var_23 * (var_24 / -0.0f)))))) {
float tmp_2 = -1.8267E36f;
float tmp_3 = fabsf(-1.2176E22f * tanhf((var_25 / var_26)));
comp += tmp_3 / tmp_2 + -1.3408E-37f + floorf((-1.4715E-42f + logf(-1.9427E13f - powf(var_27 - var_28 - ceilf(-1.1814E16f * (+1.9660E7f * -1.6151E-1f / cosf((var_29 - (var_30 + var_31))))), coshf(-1.9162E36f)))));
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
float tmp_32 = atof(argv[32]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32);
cudaDeviceSynchronize();
return 0;
}
|
20,433 | #include <thrust/device_vector.h>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <sys/time.h>
__global__ void increment(int *data_ptr)
{
(*data_ptr)++;
}
__global__ void at_increment(int *data_ptr)
{
atomicAdd(data_ptr, 1);
}
int main(void)
{
thrust::device_vector<int> data_ptr(1);
data_ptr[0] = 0;
increment<<<100,100>>>(thrust::raw_pointer_cast(&data_ptr[0]));
//at_increment<<<100,100>>>(thrust::raw_pointer_cast(&data_ptr[0]));
cudaDeviceSynchronize();
std::cout<<"Actual "<<data_ptr[0]<<std::endl;
std::cout<<"Expected "<<10*1000<<std::endl;
return 0;
}
|
20,434 | #include <string>
#include <iostream>
#include <cstdlib>
#include <fstream>
#include <cmath>
#include <iomanip>
#include <cstring>
#include <chrono>
#define mu 0
#define pi 3.141
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) // useful MACRO to check for errors
using namespace std;
static void HandleError( cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
cout<<cudaGetErrorString( err )<<" in "<< file <<" at line "<< line;
}
}
struct Pixel {float r, g, b; };
__global__ void convolution_gpu_x(const Pixel *in, Pixel *mid, const float *K, const int width, const int height, const int k, const int N)
{
int indx = threadIdx.x + blockIdx.x * blockDim.x; // calculate the thread index
int row = indx / width; // calculate my row
row++; // inc by 1 so that it doesnt have 0
if(indx < N && indx <= ((row*width)-k)) // only enter if indx is less than N and
{ // width -k for each row
float temp1=0, temp2=0, temp3=0; // declare temps
for(int ref = 0; ref<k; ref++) // loop through ref for K
{
int findx = indx + ref;
temp1 += K[ref] * in[findx].r; // cal K[0]*in[0] + K[1]*in[1] ... and so on
temp2 += K[ref] * in[findx].g; // do for r, g and b
temp3 += K[ref] * in[findx].b; // store in temp variables
}
mid[indx].r = temp1; // put it in indermidiate image
mid[indx].g = temp2;
mid[indx].b = temp3;
}
/*
__syncthreads() // sync threads doesnt provide barrier for all threads
if(indx < N-((k-1)*width)) //&& indx <= ((row*width)-k))
{
float temp1=0, temp2=0, temp3=0;
for(int ref = 0; ref<k; ref++)
{
temp1 += K[ref] * mid[indx+(ref*width)].r;
temp2 += K[ref] * mid[indx+(ref*width)].g;
temp3 += K[ref] * mid[indx+(ref*width)].b;
}
out[indx].r = temp1;
out[indx].g = temp2;
out[indx].b = temp3;
}
*/
}
__global__ void convolution_gpu_y(const Pixel *mid, Pixel *out, const float *K, const int width, const int height, const int k, const int N)
{
int indx = threadIdx.x + blockIdx.x * blockDim.x; // calc index
int row = indx / width; // calc row
row++;
/*
if(indx < N && indx <= ((row*width)-k))
{
float temp1=0, temp2=0, temp3=0;
for(int ref = 0; ref<k; ref++)
{
temp1 += K[ref] * in[indx+ref].r;
temp2 += K[ref] * in[indx+ref].g;
temp3 += K[ref] * in[indx+ref].b;
}
mid[indx].r = temp1;
mid[indx].g = temp2;
mid[indx].b = temp3;
}
__syncthreads();
*/
if(indx < N-((k-1)*width) && indx <= ((row*width)-k)) // proceed only if my thread is not below height - k or beyond width -k
{
float temp1=0, temp2=0, temp3=0; // declare temps
for(int ref = 0; ref<k; ref++) // loop to loop through K and mid image columns
{
int findx = indx + (ref*width);
temp1 += K[ref] * mid[findx].r; // calc K[0]*mid[0] + K[1]*mid[1] + ... so on
temp2 += K[ref] * mid[findx].g; // mid is indexed column wise
temp3 += K[ref] * mid[findx].b; // if width is 5 it will go like mid[0], mid[5], mid[10]
}
out[indx].r = temp1; // store back the temps to output
out[indx].g = temp2;
out[indx].b = temp3;
}
}
int main(int argc, char* argv[])
{
if(argc !=3) //Checking if there are 3 arguments
{
cout<<"Please enter 3 arguments"<<endl; //returns this message if 3 arguments not present
cout<<"USAGE:./executable image_file_name.ppm kernal_size "<<endl;
return 1;
}
string filename = argv[1]; // stores the ppm image name to filename
int sigma = atoi(argv[2]); // stores the second argument to sigma
/***************************READ FILE*****************************************/
ifstream file(filename.c_str(), ios::binary);
string type; // string to store file type eg. P6
string comment; // string to store comments in file
string dustbin; // unused dustin variable to discard comments
file >> type; // read file type
file >> comment; // read next word after type
while(comment.compare(0,1,"#") == 0) // see if first character is a #
{
getline(file,dustbin); // then read the entire line
file >> comment; // read the next word
}
int width, height, range; // variables to store the width, height and range
width = stoi(comment); // value after comment store it in width
file >> height >> range; // continue reading for height and range
int N = height*width; // N to store total number of pixels availanle
cout << endl;
cout << "Type : " << type << endl; // display the headers
cout << "height : " << height << endl;
cout << "width : " << width << endl;
cout << "range : " << range << endl;
cout << "N : " << N << endl << endl;
size_t buffer_size = 3 * height * width * sizeof(unsigned char) + 1; // calulate the size of buffer needed to allocate
unsigned char *buffer = new unsigned char[buffer_size]; // allocate pointer for the buffer
file.read((char *)buffer, N*3+1); // read the data into buffer using the pointer
file.close(); // close the file
size_t pixel_size = height * width * sizeof(Pixel) + 1; // calculate size of pixel adding 1 to be on safer side
Pixel *pixel_in = new Pixel[pixel_size]; // allocate pointer's to store input, mid image and output image
Pixel *pixel_mid = new Pixel[pixel_size];
Pixel *pixel_out = new Pixel[pixel_size];
memset(pixel_in, 0, pixel_size); // initalize everything to 0
memset(pixel_mid, 0, pixel_size);
memset(pixel_out, 0, pixel_size);
for (int i=0; i<N; i++) // store the rgb values in pixel data type
{
pixel_in[i].r = buffer[i*3+1];
pixel_in[i].g = buffer[i*3+2];
pixel_in[i].b = buffer[i*3+3];
}
delete[] buffer; // i dont need the buffer anymore
/*****************************CREATE KERNEL***********************************/
double coeff; // declare variable to store coefficient
coeff = 1/sqrt(2*sigma*sigma*pi); // calculate the coefficient
cout << "coefficient is : " << coeff << endl << endl; // display the coefficient value
int k = 6 * sigma; // calculate number of elements in kernel and store it in coeff
if(k%2==0) k++; // if k is even, increment by 1
int k_half = k/2; // calculate half of k
float* K = new float[k]; // create a pointer to kernel K
float sum = 0; // declare temp variable sum
for(int i=-k_half; i<=k_half; i++) // loop over from -k/2 to +k/2
{
K[i+k_half]=coeff*exp(-(((i-mu)*(i-mu))/(2*sigma*sigma))); // calcuate K[i] and ofset the index to store in positive values
// cout << "k["<<i+k_half<<"] :" << K[i+k_half]<<endl; // uncomment this print statement to check the values of K
sum += K[i+k_half]; // add it to temp variable sum
}
// cout<<"SUM : " << sum << endl; // display the sum of K use this to normalize K to 1
// cout <<"---------------------------------------------" << endl;
// cout <<"NORMALIZED K" << endl;
// cout <<"---------------------------------------------" << endl; // uncomment these lines to see the sum
float sum2 = 0;
for(int i=-k_half; i<=k_half; i++)
{
K[i+k_half]/=sum; // normalize K using sum
// cout << "k["<<i+k_half<<"] :" << K[i+k_half]<<endl; // uncomment this print statement to check the values of normalized K
sum2+=K[i+k_half]; // store in sum 2
}
// cout << "\nSUM after normalizing : " << sum2 << endl; // display sum after normalizing. uncommnet to check
/**************************CONVOLUTION ROW WISE*******************************/
chrono::high_resolution_clock::time_point start,stop; // initialize the timers
start = chrono::high_resolution_clock::now(); // record the start point
float temp1, temp2, temp3; // declare 3 temp variables for rgb
for(int j=0; j<height; j++) // loop till the end height
{
int ofset = j*width;
for(int i=0; i<=width-k; i++) // stops at width so that ref can fill rest of image width
{
int indx = i+ofset;
temp1 = 0, temp2 = 0, temp3 = 0; // inialize the temps to 0 after each width loop
for(int ref=0; ref<k; ref++ )
{
int findx = indx + ref; // findx gives [(i+j*width)+ref]
temp1 += K[ref] * pixel_in[findx].r; // calc K[0] * input[0] + K[1] * input[1] ... and so on
temp2 += K[ref] * pixel_in[findx].g; // do it for r, g and b
temp3 += K[ref] * pixel_in[findx].b; // and store in temp variables
}
pixel_mid[indx].r = temp1; // copy the values to the intermediate image.
pixel_mid[indx].g = temp2;
pixel_mid[indx].b = temp3;
}
}
/*
// UNCOMMENT THIS BLOCK TO CHECK VALUES OF MID IMAGE
for(int i=0; i<N; i++)
{
if(i%20 == 0) cout << endl;
cout << pixel_mid[i].r << " "<< pixel_mid[i].g << " "<< pixel_mid[i].b << " ";
}
cout << endl;
*/
/***********************CONVOLUTION COLUMN WISE******************************/
for(int j=0; j<=height-k; j++) // stops at a hight so that ref can fill rest of image height
{
int ofset = j*width;
for(int i=0; i<=width-k; i++) // loop through width. dont loop at the end k-1.
{
int indx = i+ofset;
temp1 = 0; temp2 = 0 ; temp3 = 0; // inialize the temps to 0 after each width loop
for(int ref=0; ref<k; ref++ )
{
int findx = indx+(ref*width); // findx gives [(i+j*width)+(ref*width)]
temp1 += K[ref] * pixel_mid[findx].r; // cal K[0] * mid[0] + K[1] * mid[1] + .. so on
temp2 += K[ref] * pixel_mid[findx].g; // mid[0], mid[1], mid[2] .. is all along column wise
temp3 += K[ref] * pixel_mid[findx].b; // if width is 5 the indexing is like mid[0], mid[5], mid[10]
}
pixel_out[indx].r = temp1; // store it to the output image
pixel_out[indx].g = temp2;
pixel_out[indx].b = temp3;
}
}
stop = chrono::high_resolution_clock::now(); // record the stop point
chrono::milliseconds cpu_time; // declare d to store time in milliseconds
cpu_time = chrono::duration_cast<chrono::milliseconds>(stop - start); // calculate stop - start gives the time taken
cout << "cpu time taken : " << cpu_time.count() << " ms" << endl; // display the time in ms
/*
// UNCOMMENT THIS BLOCK TO CHECK VALUES OF OUTPUT IMAGE
for(int i=0; i<N; i++)
{
if(i%20 == 0) cout << endl;
cout << pixel_out[i].r << " "<<pixel_out[i].g << " "<<pixel_out[i].b << " ";
}
cout << endl;
*/
/******************************WRITE FILE*************************************/
ofstream wfile("output_image_cpu.ppm", ios::binary); // create or open a file in binary mode to store the output image
wfile << type << endl; // write file type
wfile << width << " " << height << endl << range << endl; // write the width, height and range
unsigned char *out_buffer = new unsigned char[buffer_size]; // create a pointer to buffer to write easily. doesn't work if i write directly
for(int i = 0; i < N; i++) // store the output values in the buffer
{
out_buffer[i*3+0] = (unsigned char)pixel_out[i].r;
out_buffer[i*3+1] = (unsigned char)pixel_out[i].g;
out_buffer[i*3+2] = (unsigned char)pixel_out[i].b;
}
/*
// UNCOMMENT THIS BLOCK TO CHECK VALUES OF OUTPUT BUFFEER
for(int i=0; i<N*3; i++)
{
if(i%(3*20) == 0) cout << endl;
cout << (int)out_buffer[i] << " ";
}
cout << endl;
*/
wfile.write(reinterpret_cast<char *>(&out_buffer[0]), N*3); // write the values in the buffer to the the file
wfile.close(); // close the file
cout << "\ndone writing cpu image" << endl << endl;
delete[] out_buffer; // delete the buffer to free up space
/******************************GPU KERNAL*************************************/
cudaDeviceProp prop;
HANDLE_ERROR(cudaGetDeviceProperties(&prop,0)); // store the gpu properties in prop
Pixel *pixel_gpu_in, *pixel_gpu_mid, *pixel_gpu_out; // declare gpu pointers
float *K_gpu;
HANDLE_ERROR(cudaMalloc(&pixel_gpu_in , pixel_size)); // allocate memory on gpu
HANDLE_ERROR(cudaMalloc(&pixel_gpu_mid, pixel_size));
HANDLE_ERROR(cudaMalloc(&pixel_gpu_out, pixel_size));
HANDLE_ERROR(cudaMalloc(&K_gpu, k*sizeof(float)));
HANDLE_ERROR(cudaMemset(pixel_gpu_in , 0, pixel_size)); // set the memory to 0
HANDLE_ERROR(cudaMemset(pixel_gpu_mid, 0, pixel_size));
HANDLE_ERROR(cudaMemset(pixel_gpu_out, 0, pixel_size));
HANDLE_ERROR(cudaMemset(K_gpu, 0, k*sizeof(float)));
memset(pixel_out, 0, height*width*sizeof(Pixel)); // reuse pixel_out by setting it to 0
HANDLE_ERROR(cudaMemcpy(pixel_gpu_in, pixel_in, pixel_size, cudaMemcpyHostToDevice)); // copy the cpu_input to gpu_input
HANDLE_ERROR(cudaMemcpy(K_gpu, K, k*sizeof(float), cudaMemcpyHostToDevice)); // copy cpu_K to gpu_K
int blockDim = prop.maxThreadsDim[0]; // use max threads per block
int gridDim = N / blockDim + 1; // calulate max blocks based on N and blockDim
cout << "blockDim : " << blockDim << endl; // display threads per block, num of blocks, total number od threads
cout << "gridDim : " << gridDim << endl;
cout << "Num threads : "<< blockDim * gridDim << endl << endl;
cudaEvent_t begin, end; // 2 events to record time
cudaEventCreate(&begin); // create the 2 event
cudaEventCreate(&end);
cudaEventRecord(begin); // send the start event to stream
// I am using 2 seperate kernel because syncthread() only syncs threads within blocks so I was getting wrong results.
// LAUNCH THE KERNEL TO PERFORM CONVOLUTION ALONG X AXIS
convolution_gpu_x<<<gridDim, blockDim>>>(pixel_gpu_in, pixel_gpu_mid, K_gpu, width, height, k, N);
HANDLE_ERROR(cudaMemcpy(pixel_mid, pixel_gpu_mid, pixel_size, cudaMemcpyDeviceToHost)); // copy back the intermidiate image
// LAUNCH THE KERNEL TO PERFORM CONVOLUTION ALONG Y AXIS
convolution_gpu_y<<<gridDim, blockDim>>>(pixel_gpu_mid, pixel_gpu_out, K_gpu, width, height, k, N);
cudaEventRecord(end); // send the stop event to stream
cudaEventSynchronize(end); // wait till end occurs
float gpu_time = 0; // declare a variable to store time in milliseconds
cudaEventElapsedTime(&gpu_time, begin, end); // store the time
cout << "gpu time taken : " << gpu_time <<" ms includes one data copy back to the host" << endl << endl; // output gpu time
float speedup = (float)cpu_time.count() / (float)gpu_time;
cout <<"*********************************************************" << endl;
cout <<"Speed up of GPU over CPU : " << speedup << " times" << endl; // display the speedup
cout <<"*********************************************************" << endl;
HANDLE_ERROR(cudaMemcpy(pixel_out, pixel_gpu_out, pixel_size, cudaMemcpyDeviceToHost)); // copy back the final output
/*
// UNCOMMENT THIS BLOCK TO CHECK FINAL VALUES
for(int i=0; i<N; i++)
{
if(i%(20) == 0) cout << endl;
cout << pixel_out[i].b << " ";
}
cout << endl;
*/
/******************************WRITE FILE*************************************/
ofstream gfile("output_image_gpu.ppm", ios::binary); // Same as writing in cpu. just different filename
gfile << type << endl;
gfile << width << " " << height << endl << range << endl;
unsigned char *gpu_buffer = new unsigned char[buffer_size];
for(int i = 0; i < N; i++)
{
gpu_buffer[i*3+0] = (unsigned char)pixel_out[i].r;
gpu_buffer[i*3+1] = (unsigned char)pixel_out[i].g;
gpu_buffer[i*3+2] = (unsigned char)pixel_out[i].b;
}
/*
for(int i=0; i<N*3; i++)
{
if(i%(3*20) == 0) cout << endl;
cout << (float)gpu_buffer[i] << " ";
}
cout << endl;
*/
gfile.write(reinterpret_cast<char *>(&gpu_buffer[0]), N*3);
gfile.close();
cout << "\ndone writing gpu image" << endl << endl;
delete[] pixel_in, pixel_mid, pixel_out, gpu_buffer; // release the memory
delete[] K, K_gpu;
return 0;
}
|
20,435 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
// adds array elements in a loop like normal
void addArrays(int* a, int* b, int* c, int count) {
for (int i = 0; i < count; i++) {
c[i] = a[i] + b[i];
}
}
// simulates adding each element in a separate thread indexed by `i`
void addArraysThread(int* a, int* b, int* c, int i) {
c[i] = a[i] + b[i];
}
// CUDA-compatible "kernel" (aka function) prepended with `__global__`
// notice thread number/ID not needed to be passed in as argument
__global__ void addArraysCUDA(int* a, int* b, int* c) {
int i = threadIdx.x; // thread index available "globally"
c[i] = a[i] + b[i];
}
int main() {
const int count = 5;
const int size = count * sizeof(int);
// the `h` prepending each variable shows that it is memory that sits on the "host" (CPU)
int ha[] = {1,2,3,4,5};
int hb[] = {10,20,30,40,50};
int hc[count];
// memory that sits on the "device" (GPU)
int *da, *db, *dc;
// allocate memory (on device?) using `cudaMalloc`
cudaMalloc(&da, size); // if `da` is already a pointer, why use `&da` to get address?
cudaMalloc(&db, size);
cudaMalloc(&dc, size);
// copy memory from host to device
// cudaMemcpy(*dest, *src, size_t size, kind) // the "kind" says whether you're copying from host -> device or vice versa
cudaMemcpy(da, ha, size, cudaMemcpyKind::cudaMemcpyHostToDevice);
cudaMemcpy(db, hb, size, cudaMemcpyKind::cudaMemcpyHostToDevice);
// addArrays(a, b, c, count); // this does the computation all at once
// for (int i=0; i < count; i++) { // simulates different threads (not actually parallel)
// addArraysThread(a, b, c, i);
// }
// this is how you call CUDA kernels??
addArraysCUDA<<<1, count>>>(da, db, dc); // 1 block with `count` threads
// copy memory from device back to host
cudaMemcpy(hc, dc, size, cudaMemcpyKind::cudaMemcpyDeviceToHost);
// print results
for (int i=0; i < count; i++) {
printf("%d ", hc[i]);
}
printf("\n");
} |
20,436 |
#define FENCE_KERNEL(ID,ASM_STR)\
extern "C" __global__ void fences_kernel_ ## ID(\
volatile float *OUT, volatile float *IN)\
{\
int id = blockDim.x * blockIdx.x + threadIdx.x;\
OUT[id] = IN[id+1] + 1.0f;\
asm(ASM_STR);\
OUT[id+1] = IN[id] + 2.0f;\
}
// same as .acq_rel (since that's default)
FENCE_KERNEL(fence__cta, "fence.cta;")
FENCE_KERNEL(fence__gpu, "fence.gpu;")
FENCE_KERNEL(fence__sys, "fence.sys;")
//
FENCE_KERNEL(fence_sc_cta, "fence.sc.cta;")
FENCE_KERNEL(fence_sc_gpu, "fence.sc.gpu;")
FENCE_KERNEL(fence_sc_sys, "fence.sc.sys;")
//
FENCE_KERNEL(fence_ar_cta, "fence.acq_rel.cta;")
FENCE_KERNEL(fence_ar_gpu, "fence.acq_rel.gpu;")
FENCE_KERNEL(fence_ar_sys, "fence.acq_rel.sys;")
extern "C" __global__ void fences_kernel_threadfence(
volatile float *OUT, volatile float *IN)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
OUT[id] = IN[id+1] + 1.0f;
__threadfence();
OUT[id+1] = IN[id] + 2.0f;
}
extern "C" __global__ void fences_kernel_threadfence_block(
volatile float *OUT, volatile float *IN)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
OUT[id] = IN[id+1] + 1.0f;
__threadfence_block();
OUT[id+1] = IN[id] + 2.0f;
}
extern "C" __global__ void fences_kernel_threadfence_system(
volatile float *OUT, volatile float *IN)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
OUT[id] = IN[id+1] + 1.0f;
__threadfence_system();
OUT[id+1] = IN[id] + 2.0f;
}
|
20,437 | /*
* Copyright (c) 2016, Ville Timonen
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation are those
* of the authors and should not be interpreted as representing official policies,
* either expressed or implied, of the FreeBSD Project.
*/
// Actually, there are no rounding errors due to results being accumulated in an arbitrary order..
// Therefore EPSILON = 0.0f is OK
#define EPSILON 0.001f
#define EPSILOND 0.0000001
extern "C" __global__ void compare(float *C, int *faultyElems, size_t iters) {
size_t iterStep = blockDim.x*blockDim.y*gridDim.x*gridDim.y;
size_t myIndex = (blockIdx.y*blockDim.y + threadIdx.y)* // Y
gridDim.x*blockDim.x + // W
blockIdx.x*blockDim.x + threadIdx.x; // X
int myFaulty = 0;
for (size_t i = 1; i < iters; ++i)
if (fabsf(C[myIndex] - C[myIndex + i*iterStep]) > EPSILON)
myFaulty++;
atomicAdd(faultyElems, myFaulty);
}
extern "C" __global__ void compareD(double *C, int *faultyElems, size_t iters) {
size_t iterStep = blockDim.x*blockDim.y*gridDim.x*gridDim.y;
size_t myIndex = (blockIdx.y*blockDim.y + threadIdx.y)* // Y
gridDim.x*blockDim.x + // W
blockIdx.x*blockDim.x + threadIdx.x; // X
int myFaulty = 0;
for (size_t i = 1; i < iters; ++i)
if (fabs(C[myIndex] - C[myIndex + i*iterStep]) > EPSILOND)
myFaulty++;
atomicAdd(faultyElems, myFaulty);
}
|
20,438 | #include "includes.h"
__global__ void cudaDinv_kernel(unsigned int size, const double *x, double *y)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < size; i += stride) {
y[i] = 1.0 / x[i];
}
} |
20,439 | #include "includes.h"
__global__ void normalizeGradient(float* gradient, int* activeMask, int activePatches, int patches)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= activePatches)
return;
int patch = activeMask[i];
float norm = gradient[6 * patches + patch];
if (norm > 0)
norm = 1.0f / sqrtf(norm);
for (int j = 0; j < 6; ++j)
gradient[j*patches + patch] *= norm;
} |
20,440 | #include <iostream>
#include <stdio.h>
#include <cuda_runtime.h>
#define MIN(a, b) (a<b?a:b)
#define BLOCK_SIZE 32
struct Matrix {
int height;
int width;
int *el;
int stride;
__host__ __device__
Matrix(int height, int width, int stride ): height(height), width(width),stride(stride){}
__host__ __device__
Matrix(const Matrix &a): height(a.height), width(a.width),el(a.el),stride(a.stride){}
__device__
float getElement(int row, int col){
return el[row * stride + col];
}
__host__ __device__
void operator =(const Matrix &a){height = a.height; width = a.width; el = a.el; stride = a.stride;}
__device__
void setElement(int row, int col, int val){
el[row * stride + col] = val;
}
__device__
Matrix cutMatrix(int row, int col){
Matrix tmp(BLOCK_SIZE, BLOCK_SIZE, stride);
tmp.el = &el[stride * BLOCK_SIZE * row + BLOCK_SIZE * col];
return tmp;
}
__host__
void writeOut(){
for(int i = 0; i < height; i++){
std::cout<<"| ";
for(int j = 0; j < width; j++){
std::cout<<el[i * width + j]<<" ";
}
std::cout<<"|"<<std::endl;
}
std::cout<<"\n";
}
};
__global__
void MatrixMulKernel(Matrix a,Matrix b, Matrix c) {
int cutRow = blockIdx.y ;
int cutCol = blockIdx.x;
int fRow = blockIdx.y * blockDim.y + threadIdx.y;
int fCol = blockIdx.x * blockDim.x + threadIdx.x;
int row = threadIdx.y;
int col = threadIdx.x;
int temp = 0;
Matrix cutMatC = c.cutMatrix(cutRow, cutCol);
for( int v = 0; v < ((a.width + BLOCK_SIZE - 1)/BLOCK_SIZE); ++v){
Matrix cutMatA = a.cutMatrix(cutRow, v); //cut input matrix vector which can fit inside block
Matrix cutMatB = b.cutMatrix(v, cutCol);
__shared__ int A[BLOCK_SIZE][BLOCK_SIZE]; //Matrix wchich can share memory between threads
__shared__ int B[BLOCK_SIZE][BLOCK_SIZE];
if((row < a.height) && ((col + v * BLOCK_SIZE) < a.width)){
A[row][col] = cutMatA.getElement(row, col);
}
else{
A[row][col] = 0;
}
if((col < b.width) && ((row + v * BLOCK_SIZE) < b.height)){
B[row][col] = cutMatB.getElement(row, col);
}
else{
B[row][col] = 0;
}
__syncthreads(); //make sure that every metrix is filled
for (int i = 0; i < BLOCK_SIZE; ++i){
temp += A[row][i] * B[i][col];
}
__syncthreads();
}
if(fRow < c.height && fCol < c.width)
c.setElement(fRow, fCol, temp);
}
int main(){
int N = 37;
Matrix a(N, N, N), g(N, N, N), ag(N, N, N);
cudaError_t err = cudaSuccess;
dim3 threadsPerBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 blocksPerGrid((N + BLOCK_SIZE - 1) / BLOCK_SIZE ,(N + BLOCK_SIZE - 1) / BLOCK_SIZE );
cudaMallocManaged(&a.el,N * N * sizeof(int));
cudaMallocManaged(&g.el, N * N * sizeof(int));
cudaMallocManaged(&ag.el, N * N * sizeof(int));
for(int i = 0; i < N; i++){
for(int j = 0; j<N; j++){
a.el[i*N+j] = 1;
g.el[i*N+j] = 2;
}
}
MatrixMulKernel<<<blocksPerGrid, threadsPerBlock>>>( a, g, ag);
cudaDeviceSynchronize();
if (err != cudaSuccess){
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//a.writeOut();
//g.writeOut();
ag.writeOut();
cudaFree(a.el);
cudaFree(g.el);
cudaFree(ag.el);
}
|
20,441 | #include<stdio.h>
__global__ void kernel(int i){
printf("hello world %d \n", i);
};
int main(){
int const n_stream = 5;
cudaStream_t *ls_stream;
ls_stream = (cudaStream_t*) new cudaStream_t[n_stream];
for (int i=0; i<n_stream; i++){
cudaStreamCreate(&ls_stream[i]);
}
for(int i=0; i<n_stream; i++){
kernel<<<1, 1, 0, ls_stream[i]>>>(i);
}
cudaDeviceSynchronize();
for(int i=0; i<n_stream; i++){
cudaStreamDestroy(ls_stream[i]);
}
return 0;
}
|
20,442 | #include <stdio.h>
// kernel
__global__
void add_vectors(int *c, int *a, int *b, int n){
// printf("Add vectors function\n");
// printf("n value: %i \n", n);
int index = threadIdx.x;
int stride = blockDim.x;
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// int stride = blockDim.x * gridDim.x;
// printf("%i \n", index);
for(int i = index; i < n; i += stride){
// printf("thead %i working with index: %i \n", index, i);
c[i] = a[i] + b[i];
}
}
int main(int argc, char *argv[]){
// Define
// int *a;
// int *b;
// int n = 1000000;
int n = atoi(argv[1]);
int nt = atoi(argv[2]);
int blocks = n/nt;
printf("blocks: %i \n", blocks);
printf("works: %i\n", n);
printf("threads: %i \n", nt);
// Define and assign vector to dynamic memory
int *a = (int *) malloc(sizeof(int)*n);
int *b = (int *) malloc(sizeof(int)*n);
int *c = (int *) malloc(sizeof(int)*n);
// Pointers for device memory
int *d_a;
int *d_b;
int *d_c;
// Assign in device memory
cudaMalloc((void**)&d_a, sizeof(int) * n);
cudaMalloc((void**)&d_b, sizeof(int) * n);
cudaMalloc((void**)&d_c, sizeof(int) * n);
// init value
for(int i = 0; i < n; ++i){
a[i] = 1;
b[i] = 2;
}
// Transfer memory from host to device
cudaMemcpy(d_a, a, sizeof(int) * n, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(int) * n, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, sizeof(int) * n, cudaMemcpyHostToDevice);
// CPU call
// add_vectors(c, a, b, n);
// n = 10000
// 256 t: 1,1797 ms
// 1 t: 1,1688 ms
// n = 1.000.000
// 1 t: 124,12 ms
// 256 t: 125,34 ms
// GPU call: Launch kernel
add_vectors<<<blocks,nt>>>(d_c, d_a, d_b, n);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Transfer data back to host memory
cudaMemcpy(c, d_c, sizeof(int) * n, cudaMemcpyDeviceToHost);
// for(int i = 0; i < n; ++i){
// printf("%i \n", c[i]);
// }
cudaFree(a);
cudaFree(b);
cudaFree(c);
free(a);
free(b);
free(c);
return 0;
}
// #include <stdio.h>
// #include <stdlib.h>
// #include <math.h>
// #include <assert.h>
// // #include <cuda.h>
// // #include <cuda_runtime.h>
// #define N 10000000
// #define MAX_ERR 1e-6
// __global__
// void vector_add(float *out, float *a, float *b, int n) {
// printf("running in GPU \n" );
// for(int i = 0; i < n; i ++){
// out[i] = a[i] + b[i];
// }
// }
// int main(){
// float *a, *b, *out;
// float *d_a, *d_b, *d_out;
// // Allocate host memory
// a = (float*)malloc(sizeof(float) * N);
// b = (float*)malloc(sizeof(float) * N);
// out = (float*)malloc(sizeof(float) * N);
// // Initialize host arrays
// for(int i = 0; i < N; i++){
// a[i] = 1.0f;
// b[i] = 2.0f;
// }
// // Allocate device memory
// cudaMalloc((void**)&d_a, sizeof(float) * N);
// cudaMalloc((void**)&d_b, sizeof(float) * N);
// cudaMalloc((void**)&d_out, sizeof(float) * N);
// // Transfer data from host to device memory
// cudaMemcpy(d_a, a, sizeof(float) * N, cudaMemcpyHostToDevice);
// cudaMemcpy(d_b, b, sizeof(float) * N, cudaMemcpyHostToDevice);
// // Executing kernel
// vector_add<<<1,10>>>(d_out, d_a, d_b, N);
// // Transfer data back to host memory
// cudaMemcpy(out, d_out, sizeof(float) * N, cudaMemcpyDeviceToHost);
// // Verification
// for(int i = 0; i < N; i++){
// assert(fabs(out[i] - a[i] - b[i]) < MAX_ERR);
// }
// printf("out[0] = %f\n", out[0]);
// printf("PASSED\n");
// // Deallocate device memory
// cudaFree(d_a);
// cudaFree(d_b);
// cudaFree(d_out);
// // Deallocate host memory
// free(a);
// free(b);
// free(out);
// } |
20,443 | #include <cuda.h>
#define BLOCK_SIZE_X 16
#define BLOCK_SIZE_Y 16
#define PI 3.14159265358979323846
#define WINDOW 6
#define NUM_ITER 64
__global__
void conv2(float *A, float *B,uint8_t *C, int height, int width, int window){
int row = threadIdx.x + blockIdx.x*blockDim.x;
int col = threadIdx.y + blockIdx.y*blockDim.y;
int i = row + col*width;
float sum = 0;
if(i<height*width){
for(int p = -window; p <= window; ++p){ //loop over window
for(int q = -window; q <= window; ++q){
int pixel = i + p*width + q;
if((pixel < 0) | (pixel >= height*width)) //if outside of vertical bound, continue
continue;
int temp = i % width;
if((temp + q < 0) | (temp + q >= width)) //if outside of horiz bound, continue
continue;
sum += A[pixel]*B[(p+window)*(2*window+1)+(q+window)];
}
}
C[i] = (uint8_t)((int)sum);
}
}
extern uint8_t* launch_cuda(float *grayimg, float *scalekernel, uint8_t *finalimg, int height, int width, int window){
//initialize and allocate device variables
int winsize = (2*window + 1)*(2*window + 1);
float *d_gray, *d_scalekernel;
uint8_t *d_finalimg;
cudaMalloc((void**)&d_gray,height*width*sizeof(float));
cudaMalloc((void**)&d_scalekernel,winsize*sizeof(float));
cudaMalloc((void**)&d_finalimg,width*height*sizeof(uint8_t));
//copy info to device
cudaMemcpy(d_gray,grayimg,height*width*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(d_scalekernel,scalekernel,winsize*sizeof(float),cudaMemcpyHostToDevice);
//initialize blocksize and gridsize
dim3 dimBlock(BLOCK_SIZE_X,BLOCK_SIZE_Y);
dim3 dimGrid(ceil(width/(float)dimBlock.x),ceil(height/(float)dimBlock.y));
// Loop over image pixels by calling CUDA kernel
conv2<<<dimGrid,dimBlock>>>(d_gray, d_scalekernel, d_finalimg, height, width, window);
//copy result to host
cudaMemcpy(finalimg,d_finalimg,height*width*sizeof(uint8_t),cudaMemcpyDeviceToHost);
//free cuda variables
cudaFree(d_gray);
cudaFree(d_scalekernel);
cudaFree(d_finalimg);
return finalimg;
}
|
20,444 | #include <iostream>
#include <chrono>
int main(int argc, char *argv[])
{
int n = atoi(argv[1]); //TODO: atoi is an unsafe function
int nbiter = atoi(argv[2]);
float *array = new float[n];
for(int i = 0; i < n; ++i)
array[i] = 1.;
float *d_array;
cudaMallocHost((void **)&d_array, n * sizeof(float));
std::chrono::time_point<std::chrono::system_clock> begin, end;
begin = std::chrono::system_clock::now();
for(int iter = 0; iter < nbiter; ++iter)
cudaMemcpy(d_array, array, n * sizeof(float), cudaMemcpyHostToDevice);
end = std::chrono::system_clock::now();
std::chrono::duration<double> totaltime = (end - begin);
cudaFreeHost(d_array);
std::cout << n*sizeof(float)/1000 <<" "<< (n*sizeof(float))/(totaltime.count()*nbiter) << std::endl;
delete[] array;
return 0;
}
|
20,445 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#define TAM 10
#define TAMBLOCK 2
__global__ void reducirVector(float *dest, float *origin, int *BLOCKS){
if(blockIdx.x==0){
float counter=0.0f;
for(int i=0;i<*BLOCKS;++i){
for(int j=0;j<TAMBLOCK;++j){
counter+=origin[i*blockDim.x+j];
}
}
*dest=counter;
}
}
void inicializarVector(float *vector){
for(int i=0;i<TAM;++i)
vector[i]=(float)(rand()%100);
}
void mostrarVector(float *vector){
for(int i=0;i<TAM;++i)
printf("%f, ",*(vector+i));
printf("\n");
}
int main() {
int memsize = sizeof(float)*TAM;
float *h_vector, *h_aux;
h_vector = (float *) malloc(memsize);
h_aux = (float *) malloc(memsize);
inicializarVector(h_vector);
mostrarVector(h_vector);
float *d_vector, *d_aux;
int *d_block;
cudaMalloc(&d_vector, memsize);
cudaMalloc(&d_aux, memsize);
cudaMalloc(&d_block, sizeof(int));
cudaMemcpy(d_vector, h_vector, memsize, cudaMemcpyHostToDevice);
int block = ceilf((float)TAM/TAMBLOCK);
int thread = TAMBLOCK;
cudaMemcpy(d_block, &block, sizeof(int), cudaMemcpyHostToDevice);
reducirVector <<<block,thread>>> (d_aux, d_vector, d_block);
cudaMemcpy(h_aux, d_aux, memsize, cudaMemcpyDeviceToHost);
printf("Vector reducido, valor: %f\n", *(h_aux));
free(h_vector);
free(h_aux);
cudaFree(d_vector);
cudaFree(d_aux);
return 0;
}
|
20,446 | #include <iostream>
#include <cstdlib>
#include <cstdio>
#include <cassert>
using namespace std;
__global__ void euler1 (float2 *pos, float2* vel, float2 *acc, float dt, float box) {
int i=threadIdx.x+blockDim.x*blockIdx.x;
//Moves a particle using Euler
pos[i].x += vel[i].x * dt;
pos[i].y += vel[i].y * dt;
vel[i].x += acc[i].x * dt;
vel[i].y += acc[i].y * dt;
}
void Euler_Serial ( float2 *pos, float2* vel, float2 *acc, float dt, float box, int nParticles)
{
for (int i=0; i<nParticles; i++) {
//Moves a particle using Euler
pos[i].x += vel[i].x * dt;
pos[i].y += vel[i].y * dt;
vel[i].x += acc[i].x * dt;
vel[i].y += acc[i].y * dt;
}
}
inline void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
int main() {
cudaEvent_t start, stop;
float time1, time2;
int nTests = 99999;
float box = 40.0f;
float dt =0.0001f;
int nBlocks = 49;
int nThreads = 192;
int N=nBlocks*nThreads;
int buffer_size = N*sizeof(float2);
srand(time(NULL));
float2 pos[N];
float2 vel[N];
float2 acc[N];
float2 posserial[N],
accserial[N],
velserial[N];
for (int i=0; i<N; i++){
pos[i].x = 5*(float)rand()/RAND_MAX;
pos[i].y = 5*(float)rand()/RAND_MAX;
vel[i].x = 5*(float)rand()/RAND_MAX;
vel[i].y = 5*(float)rand()/RAND_MAX;
acc[i].x = (float)rand()/RAND_MAX;
acc[i].y = (float)rand()/RAND_MAX;
}
for (int i=0; i<N; i++){
posserial[i] = pos[i];
velserial[i] = vel[i];
accserial[i] = acc[i];
}
//Declares the CUDA pointers
float2 *pos_d=0, *vel_d, *acc_d=0;
//Allocates the device pointers
cudaMalloc((void**)&pos_d,buffer_size);
cudaMalloc((void**)&vel_d,buffer_size);
cudaMalloc((void**)&acc_d,buffer_size);
//Copies the contents of the vector to the device
cudaMemcpy( pos_d, pos, buffer_size, cudaMemcpyHostToDevice );
cudaMemcpy( vel_d, vel, buffer_size, cudaMemcpyHostToDevice );
cudaMemcpy( acc_d, acc, buffer_size, cudaMemcpyHostToDevice );
//Creates the events
cudaEventCreate(&start);
cudaEventCreate(&stop);
//Sets the inital time
cudaEventRecord(start, 0);
checkCUDAError("Start1");
for (int i=0; i<nTests; i++){
euler1 <<<nBlocks, nThreads>>> (pos_d, vel_d, acc_d, dt, box);
checkCUDAError("Euler1");
}
//Sets the final time
cudaEventRecord( stop, 0 );
checkCUDAError("Stop1");
//Sync threads
cudaEventSynchronize( stop );
//Calculates the elapsed time
cudaEventElapsedTime( &time1, start, stop );
checkCUDAError("Time2");
//Sets the inital time
cudaEventRecord(start, 0);
checkCUDAError("Start2");
for (int i=0; i<nTests; i++){
Euler_Serial (posserial, velserial, accserial, dt, box, N);
checkCUDAError("Euler1");
}
//Sets the final time
cudaEventRecord( stop, 0 );
checkCUDAError("Stop2");
//Sync threads
cudaEventSynchronize( stop );
//Calculates the elapsed time
cudaEventElapsedTime( &time2, start, stop );
checkCUDAError("Time2");
cout <<"Tempo em CUDA: " << time1/nTests << "ms\n";
cout <<"Tempo Serial: " << time2/nTests << "ms\n";
cout <<"Aumento de " << time2/time1 << "x\n";
return 0;
}
|
20,447 | #include "includes.h"
#define FIBER 32
#define MATRIX_SIZE 2048
#define DATA_SIZE MATRIX_SIZE * MATRIX_SIZE * sizeof(int)
#define MAX_MATRIX_SIZE (MATRIX_SIZE * MATRIX_SIZE)
using namespace std;
__global__ void kernel_shared(int *A, int *C, int *B, int *result) {
__shared__ int shared_memory[FIBER][FIBER];
int i = blockIdx.x * blockDim.x + threadIdx.y;
int j = blockIdx.y * blockDim.y + threadIdx.x;
shared_memory[threadIdx.y][threadIdx.x] = B[i * MATRIX_SIZE + j];
__syncthreads();
i = blockIdx.x * blockDim.x + threadIdx.x;
j = blockIdx.y * blockDim.y + threadIdx.y;
int first_index = i + j * MATRIX_SIZE;
int second_index = j + i * MATRIX_SIZE;
if (first_index < MAX_MATRIX_SIZE && second_index < MAX_MATRIX_SIZE)
{
result[first_index] = (A[first_index] + A[first_index]) * shared_memory[threadIdx.x][threadIdx.y] - C[first_index];
}
} |
20,448 | extern "C"
__global__
void sigmoid(float* a, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
a[i] = 1.0f / (expf(-a[i]) + 1.0f);
}
} |
20,449 |
__device__ float
multiplyByTwo(float number){
return number * 2.0f;
}
__device__ float
divideByTwo(float number){
return number * 0.5f;
}
|
20,450 | #include "includes.h"
using namespace std;
int *a, *b; // host data
int *c, *c2; // results
__global__ void vecAdd(int *A,int *B,int *C,int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
} |
20,451 | // Copyright (c) 2019-2020, NVIDIA CORPORATION.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////////////
// SOSFILT //
///////////////////////////////////////////////////////////////////////////////
constexpr int sos_width = 6;
template<typename T>
__device__ void _cupy_sosfilt( const int n_signals,
const int n_samples,
const int n_sections,
const int zi_width,
const T *__restrict__ sos,
const T *__restrict__ zi,
T *__restrict__ x_in,
T *s_buffer ) {
T *s_out { s_buffer };
T *s_sos { reinterpret_cast<T *>( &s_out[n_sections] ) };
const int tx { static_cast<int>( threadIdx.x ) };
const int bx { static_cast<int>( blockIdx.x ) };
// Reset shared memory
s_out[tx] = 0;
// Load SOS
// b is in s_sos[tx * sos_width + [0-2]]
// a is in s_sos[tx * sos_width + [3-5]]
#pragma unroll sos_width
for ( int i = 0; i < sos_width; i++ ) {
s_sos[tx * sos_width + i] = sos[tx * sos_width + i];
}
// __syncthreads( );
T zi0 = zi[bx * n_sections * zi_width + tx * zi_width + 0];
T zi1 = zi[bx * n_sections * zi_width + tx * zi_width + 1];
const int load_size { n_sections - 1 };
const int unload_size { n_samples - load_size };
T temp {};
T x_n {};
if ( bx < n_signals ) {
// Loading phase
for ( int n = 0; n < load_size; n++ ) {
__syncthreads( );
if ( tx == 0 ) {
x_n = x_in[bx * n_samples + n];
} else {
x_n = s_out[tx - 1];
}
// Use direct II transposed structure
temp = s_sos[tx * sos_width + 0] * x_n + zi0;
zi0 = s_sos[tx * sos_width + 1] * x_n - s_sos[tx * sos_width + 4] * temp + zi1;
zi1 = s_sos[tx * sos_width + 2] * x_n - s_sos[tx * sos_width + 5] * temp;
s_out[tx] = temp;
}
// Processing phase
for ( int n = load_size; n < n_samples; n++ ) {
__syncthreads( );
if ( tx == 0 ) {
x_n = x_in[bx * n_samples + n];
} else {
x_n = s_out[tx - 1];
}
// Use direct II transposed structure
temp = s_sos[tx * sos_width + 0] * x_n + zi0;
zi0 = s_sos[tx * sos_width + 1] * x_n - s_sos[tx * sos_width + 4] * temp + zi1;
zi1 = s_sos[tx * sos_width + 2] * x_n - s_sos[tx * sos_width + 5] * temp;
if ( tx < load_size ) {
s_out[tx] = temp;
} else {
x_in[bx * n_samples + ( n - load_size )] = temp;
}
}
// Unloading phase
for ( int n = 0; n < n_sections; n++ ) {
__syncthreads( );
// retire threads that are less than n
if ( tx > n ) {
x_n = s_out[tx - 1];
// Use direct II transposed structure
temp = s_sos[tx * sos_width + 0] * x_n + zi0;
zi0 = s_sos[tx * sos_width + 1] * x_n - s_sos[tx * sos_width + 4] * temp + zi1;
zi1 = s_sos[tx * sos_width + 2] * x_n - s_sos[tx * sos_width + 5] * temp;
if ( tx < load_size ) {
s_out[tx] = temp;
} else {
x_in[bx * n_samples + ( n + unload_size )] = temp;
}
}
}
}
}
extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_sosfilt_float32( const int n_signals,
const int n_samples,
const int n_sections,
const int zi_width,
const float *__restrict__ sos,
const float *__restrict__ zi,
float *__restrict__ x_in ) {
extern __shared__ float s_buffer_f[];
_cupy_sosfilt<float>( n_signals, n_samples, n_sections, zi_width, sos, zi, x_in, s_buffer_f );
}
extern "C" __global__ void __launch_bounds__( 1024 ) _cupy_sosfilt_float64( const int n_signals,
const int n_samples,
const int n_sections,
const int zi_width,
const double *__restrict__ sos,
const double *__restrict__ zi,
double *__restrict__ x_in ) {
extern __shared__ double s_buffer_d[];
_cupy_sosfilt<double>( n_signals, n_samples, n_sections, zi_width, sos, zi, x_in, s_buffer_d );
}
|
20,452 | #include <iostream>
#include <cuda.h>
#include <chrono>
using namespace std;
__global__ void transpose(double *in_d, double * out_d, int row, int col)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
out_d[y+col*x] = in_d[x+row*y];
}
int main(int argc,char **argv)
{
int sz_x=32*300;
int sz_y=32*300;
int nBytes = sz_x*sz_y*sizeof(double);
int block_size;
double *m_h = (double *)malloc(nBytes);
double * in_d;
double * out_d;
int count = 0;
for (int i=0; i < sz_x*sz_y; i++){
m_h[i] = count;
count++;
}
std::cout << "Allocating device memory on host..\n";
auto start_d = std::chrono::high_resolution_clock::now();
cudaMalloc((void **)&in_d,nBytes);
cudaMalloc((void **)&out_d,nBytes);
//Set up blocks
block_size=32;
dim3 dimBlock(block_size,block_size,1);
dim3 dimGrid(sz_x/block_size,sz_y/block_size,1);
std::cout << "Doing GPU Transpose\n";
cudaMemcpy(in_d,m_h,nBytes,cudaMemcpyHostToDevice);
transpose<<<dimGrid,dimBlock>>>(in_d,out_d,sz_y,sz_x);
cudaMemcpy(m_h,out_d,nBytes,cudaMemcpyDeviceToHost);
auto end_d = std::chrono::high_resolution_clock::now();
std::cout << "Doing CPU Transpose\n";
auto start_h = std::chrono::high_resolution_clock::now();
for (int y=0; y < sz_y; y++){
for (int x=y; x < sz_x; x++){
double temp = m_h[x+sz_x*y];
//std::cout << temp << " ";
m_h[x+sz_x*y] = m_h[y+sz_y*x];
m_h[y+sz_y*x] = temp;
}
//std::cout << "\n";
}
auto end_h = std::chrono::high_resolution_clock::now();
//Checking errors (should be same values as start)
count = 0;
int errors = 0;
for (int i=0; i < sz_x*sz_y; i++){
if (m_h[i] != count)
errors++;
count++;
}
std::cout << errors << " Errors found in transpose\n";
//Print Timing
std::chrono::duration<double> time_d = end_d - start_d;
std::cout << "Device time: " << time_d.count() << " s\n";
std::chrono::duration<double> time_h = end_h - start_h;
std::cout << "Host time: " << time_h.count() << " s\n";
cudaFree(in_d);
cudaFree(out_d);
return 0;
}
|
20,453 | #include "includes.h"
__global__ void writeOffsetUnroll4(float *A, float *B, float *C, const int n, int offset)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int k = i + offset;
if (k + 3 * blockDim.x < n)
{
C[k] = A[i] + B[i];
C[k + blockDim.x] = A[i + blockDim.x] + B[i + blockDim.x];
C[k + 2 * blockDim.x] = A[i + 2 * blockDim.x] + B[i + 2 * blockDim.x];
C[k + 3 * blockDim.x] = A[i + 3 * blockDim.x] + B[i + 3 * blockDim.x];
}
} |
20,454 | /*
rkrish11 Rahul Krishna
*/
#include "cuda.h"
#include <curand.h>
#include <curand_kernel.h>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#define SEED 35791246
__global__ void init_stuff(curandState *state, int count) {
// This sets a random number seed for all the threads
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<count)
curand_init(1337, idx, 0, &state[idx]);
}
__global__ void cudaMonte(double* pi, int count, curandState* state) {
// Perfome MC simulation on the threads
int id=blockIdx.x*blockDim.x+threadIdx.x;
double x,y,z;
if (id<count) {
x = (double)curand_uniform(&state[id]);
y = (double)curand_uniform(&state[id]);
z = x*x+y*y;
if (z<=1) pi[id]=1;
else pi[id]=0;
}
__syncthreads();
// Find the total number of points that lie inside the quadrant of the cirle
for (int i=1; i<count;i++) {
pi[0]+=pi[i];
}
}
int main(int argc, char** argv) {
int niter=0;
double pi;
double* d_pi;
curandState *d_state;
printf("Enter the number of iterations used to estimate pi: ");
scanf("%d",&niter);
double* h_pi = new double[niter];
if (cudaMalloc(&d_pi, sizeof(int)*niter) != cudaSuccess) {
printf("Error in memory allocation.\n");
return 0;
}
if (cudaMalloc(&d_state, sizeof(curandState)*niter) != cudaSuccess) {
printf("Error in memory allocation for random state.\n");
return 0;
}
if (cudaMemcpy (d_pi, h_pi, sizeof(int)*niter, cudaMemcpyHostToDevice) != cudaSuccess) {
printf("Error in copy from host to device.\n");
cudaFree(d_pi);
return 0;
}
// Number of threads = 1024, number of blocks = (int) (niter/threads)+1
init_stuff<<<(int) niter/1024+1, 1024>>>(d_state, niter);
cudaMonte<<<(int) niter/1024+1, 1024>>>(d_pi, niter, d_state);
if (cudaMemcpy (h_pi, d_pi, sizeof(int)*niter, cudaMemcpyDeviceToHost) != cudaSuccess) {
printf("Error in copy from device to host.\n");
delete[] h_pi;
cudaFree(d_pi);
return 0;
}
// Final Estimate of pi
pi= (double) h_pi[0]/niter*4;
printf("# of trials= %d , estimate of pi is %g \n",niter,pi);
}
|
20,455 | #include "includes.h"
__global__ void kCopyToTransDestFast(float* srcStart, float* destStart, unsigned int srcCopyWidth, unsigned int srcCopyHeight, unsigned int srcJumpSize, unsigned int destJumpSize) {
// const unsigned int idxY = blockIdx.y * blockDim.y + threadIdx.y;
// const unsigned int idxX = blockIdx.x * blockDim.x + threadIdx.x;
// if(idxX < srcCopyWidth && idxY < srcCopyHeight) {
const unsigned int srcReadIdx = (blockIdx.y * blockDim.y + threadIdx.y) * srcJumpSize + blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int destWriteIdx = (blockIdx.x * blockDim.x + threadIdx.y) * destJumpSize + blockIdx.y * blockDim.y + threadIdx.x;
__shared__ float smem[COPY_BLOCK_SIZE][COPY_BLOCK_SIZE + 1];
smem[threadIdx.x][threadIdx.y] = srcStart[srcReadIdx];
__syncthreads();
destStart[destWriteIdx] = smem[threadIdx.y][threadIdx.x];
// }
} |
20,456 | #include "add.cuh"
/**
* CUDA kernel
*/
__global__ void kernel(int* a, int* b, int* c) {
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
/**
* Function adds two numbers and stores the result in c
*/
void addTwoNum(int* a, int* b, int* c) {
int* d_a, * d_b, * d_c;
cudaMalloc((void**)&d_a, sizeof(int));
cudaMalloc((void**)&d_b, sizeof(int));
cudaMalloc((void**)&d_c, sizeof(int));
cudaMemcpy(d_a, a, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, sizeof(int), cudaMemcpyHostToDevice);
kernel << <1, 1 >> > (d_a, d_b, d_c);
cudaMemcpy(c, d_c, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
} |
20,457 | /*
This program is written to find the nearest neighbour of each point in 3 deminsional
space by implementing the brute force algorithm.
The brute force approach can easily be converted into a embarassingly parallel algorithm for
the GPU where there is no interaction between the threads.
Benchmarking is done to compare the CPU and GPU computational approaches to the problem.
*/
/*
Note that there is a considerable dependency of the ratio of execution times of the CPU and GPU on the
hardware which is being used to execute the run the program.
*/
// Importing the required headers
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<cuda.h>
#include<time.h>
struct position
{
int x,y,z; //odd number of parameters in the structure helps reducing bank conflicts in shared memory(if used).
};
// Returns the duration from start to end times in sec
double time_elapsed(struct timespec *start, struct timespec *end)
{
double t;
t = (end->tv_sec - start->tv_sec); // diff in seconds
t += (end->tv_nsec - start->tv_nsec) * 0.000000001; //diff in nanoseconds
return t;
}
// GPU Kernel
__global__ void GPU_Find(struct position *points, int *nearest, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int min = 1<<22;
int temp;
if(i >= n) return;
for(int j = 0; j < n; j++)
{
if(i == j) continue;
temp = (points[i].x - points[j].x)*(points[i].x - points[j].x);
temp += (points[i].y - points[j].y)*(points[i].y - points[j].y);
temp += (points[i].z - points[j].z)*(points[i].z - points[j].z);
if(temp < min)
{
min = temp;
nearest[i] = j;
}
}
return;
}
// CPU Function
void CPU_Find(struct position *points, int *nearest, int n)
{
int min; //All the distances are going to be smaller than this.
int temp;
for(int i = 0; i < n; i++)
{
min = 1<<22;
for(int j = 0; j < n; j++)
{
if(i == j) continue;
temp = (points[i].x - points[j].x)*(points[i].x - points[j].x);
temp += (points[i].y - points[j].y)*(points[i].y - points[j].y);
temp += (points[i].z - points[j].z)*(points[i].z - points[j].z);
temp = (int)sqrt(temp);
if(temp < min)
{
min = temp;
nearest[i] = j;
}
}
}
return;
}
// Code execution begins here
int main()
{
struct timespec start1, end1;
struct timespec start2, end2;
int n;
printf("Enter the value of n: ");
scanf("%d", &n);
struct position *points;
int *nearest1;
int *nearest2;
cudaMallocManaged(&points, n*sizeof(struct position));
cudaMallocManaged(&nearest1, n*sizeof(int));
cudaMallocManaged(&nearest2, n*sizeof(int));
for(int i = 0; i < n; i++)
{
points[i].x = rand()%100000;
points[i].y = rand()%100000;
points[i].z = rand()%10000;
nearest1[i] = -1;
nearest2[i] = -1;
}
clock_gettime(CLOCK_REALTIME, &start1); //start timestamp
GPU_Find<<<(n/128+1), 128>>>(points, nearest1, n);
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &end1); //end timestamp
clock_gettime(CLOCK_REALTIME, &start2); //start timestamp
CPU_Find(points, nearest2, n);
clock_gettime(CLOCK_REALTIME, &end2); //end timestamp
printf("\nTime taken by GPU is: %lf\n", time_elapsed(&start1, &end1)); //print result for GPU
printf("Time taken by CPU is: %lf\n", time_elapsed(&start2, &end2)); //print result for CPU
cudaFree(points);
cudaFree(nearest1);
cudaFree(nearest2);
return 0;
}
/*
The results obtained by the CPU and GPU may differ. Why so?
*/
|
20,458 | #include "motionPlanningSolution.cuh"
typedef struct MotionPlanningSolution {
std::vector<int> path; // list of path indexes
float cost;
float cp;
float time;
} MotionPlanningSolution; |
20,459 | #include <math.h>
__device__ double dist(double x1, double y1, double x2, double y2){
return sqrt((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2));
}
__global__ void testKernel(double *xs, double *ys, double *b){
b[blockIdx.x] = dist(xs[blockIdx.x], 1.0, ys[blockIdx.x], 1.0);
}
/* r^3 */
__device__ double rbf(double x1, double y1, double x2, double y2){
return pow(dist(x1, y1, x2, y2),3);
}
/* 6*r */
__device__ double rbfd2(double x1, double y1, double x2, double y2){
return 6*dist(x1, y1, x2, y2);
}
/* Gaussian Elimination */
__device__ void gauss_elim(double *A, double *b, double *x, int n){
int i, j, k;
int idxi, idxj, idxij, idxik, idxjk;
double m, diff;
// Swap first and second rows
int r1 = 0;
int r2 = 1;
double mtemp, vtemp;
int idx1;
int idx2;
for (i = 0; i < n; ++i)
{
// matrix swap
idx1 = r1*n + i;
idx2 = r2*n + i;
mtemp = A[idx1];
A[idx1] = A[idx2];
A[idx2] = mtemp;
}
// RHS vector swap
vtemp = b[1];
b[1] = b[0];
b[0] = vtemp;
// Gauss-Jordan Forward Elimination to Upper triangular matrix
for (j = 0; j < n-1; j++){
for (i = j+1; i < n; i++){
idxij = i*n + j;
idxj = j*n + j;
m = A[idxij]/A[idxj];
for (k = 0; k < n; k++){
idxik = i*n + k;
idxjk = j*n + k;
A[idxik] = A[idxik] - m*A[idxjk];
}
b[i] = b[i] - m*b[j];
}
}
// Back substituion
for (i = n-1; i >= 0; i--){
diff = b[i];
for (j = i+1; j < n; j++){
idxij = i*n + j;
diff = diff - x[j]*A[idxij];
}
idxi = i*n + i;
x[i] = diff/A[idxi];
}
}
__device__ void build_stencil_matrix(double* xs, double* ys,
int* nn, double* full_mat1, double* RHS1,
int l_max, int l, int deg, int k){
int pdim = (deg+1)*(deg+2)/2;
int i, j;
// Make matrix 0
for(i = 0; i < l + pdim; i++){
for(j = 0; j < l + pdim; j++){
full_mat1[i*(l+pdim) + j] = 0.0;
}
}
// Build A and O matrices
for(i = 0; i < l + pdim; i++){
for(j = 0; j < l + pdim; j++){
if(i < l && j < l){
full_mat1[i*(l+pdim)+j] = rbf(
xs[nn[k*l_max+i]], ys[nn[k*l_max+i]],
xs[nn[k*l_max+j]], ys[nn[k*l_max+j]]);
}
else if(i >= l && j>= l){
full_mat1[i*(l+pdim) + j] = 0.0;
}
}
}
// Build P matrix
int d = deg;
int xp = 0;
int yp = d;
for(j = l+pdim - 1; j >= l; j--){
for(i = 0; i < l; i++){
full_mat1[i*(l+pdim) + j] =
pow(xs[nn[k*l_max+i]] - xs[nn[k*l_max+0]], xp) *
pow(ys[nn[k*l_max+i]] - ys[nn[k+l_max+0]], yp);
}
if(yp - 1 < 0){
--d;
yp = d;
xp = 0;
}
else{
xp++;
yp--;
}
}
// Build P transpose matrix
d = deg;
xp = 0;
yp = d;
for(i = l+pdim - 1; i >= l; i--){
for(j = 0; j < l; j++){
//full_mat1[i*(l+pdim) + j] = pow(xs[nn[k*l+j]],xp)*pow(ys[nn[k*l+j]],yp);
full_mat1[i*(l+pdim) + j] = full_mat1[j*(l+pdim) + i];
}
if(yp - 1 < 0){
--d;
yp = d;
xp = 0;
}
else{
xp++;
yp--;
}
}
// RHS vector
for(i = 0; i < l + pdim; i++){
if(i < l){
RHS1[i] = rbfd2(
xs[nn[k*l_max+0]], ys[nn[k*l_max+0]],
xs[nn[k*l_max+i]], ys[nn[k*l_max+i]]);
}
else if(i==l+3 || i==l+5){
RHS1[i] = 2.0;
}
else{
RHS1[i] = 0.0;
}
}
}
__global__ void genDMatrix(int n, double* xs, double* ys,
int* nn, double* weights_root,
double* full_mat1_root, double* RHS1_root,
int l_max, int l, int deg){
int my_id = blockDim.x*blockIdx.x + threadIdx.x;
int pdim = (deg+1)*(deg+2)/2;
if(my_id <n){
double* full_mat1 = &full_mat1_root[my_id * (l+pdim)*(l+pdim)];
double* RHS1 = &RHS1_root[my_id * (l+pdim)];
double* weights = &weights_root[my_id * (l+pdim)];
build_stencil_matrix(xs, ys, nn, full_mat1, RHS1, l_max, l, deg, my_id);
gauss_elim(full_mat1, RHS1, weights, l+pdim);
}
}
|
20,460 | #include<stdio.h>
#include<stdlib.h>
#include<unistd.h>
#include<stdbool.h>
#include<cuda.h>
#include<cuda_runtime.h>
#include<curand.h>
#include<curand_kernel.h>
#include<math.h>
//Declare all needed extern variables and functions
// Result from last compute of world.
extern unsigned char *g_resultData;
// Current state of world.
extern unsigned char *g_data;
// Current width of world.
extern size_t g_worldWidth;
/// Current height of world.
extern size_t g_worldHeight;
/// Current data length (product of width and height)
extern size_t g_dataLength; // g_worldWidth * g_worldHeight
// Timing variables
extern size_t s_time;
extern size_t e_time;
//Global storage of total casses and deaths
extern unsigned int totalCases;
extern unsigned int totalDeaths;
extern "C" //Extern functions in C for understandability by mpi file
{
void initMaster( int myrank, int numranks, unsigned int pattern, size_t worldSize, size_t caseSeed, size_t deathSeed );
bool kernalLaunch(int myrank, int numranks,
unsigned char** d_data, unsigned char** d_resultData,
size_t worldWidth, size_t worldHeight, size_t iterationsCount, ushort threadsCount, size_t pattern,
unsigned int infectRate, unsigned int deathRate, unsigned char** recv);
void recvData(size_t myrank, size_t numranks, size_t worldLength, unsigned char** *recv);
void sendData(size_t myrank, size_t numranks, size_t worldLength, unsigned char* sData);
void exportStats(unsigned char** data, int myrank, int numranks, int day);
void freeData();
void finishCuda();
}
// Initialize each space in the data grids after initialization
static inline void gol_initData( unsigned char fill )
{
for(int i = 0; i < g_dataLength; i++){
g_data[i] = fill; //Fill the grid with the given fill number (0 or 1)
g_resultData[i] = 0; //Fill result grid with default zero values
}
}
static inline void gol_initEveryOther( size_t worldWidth, size_t worldHeight, size_t infected, size_t numranks, unsigned int* rands )
{
size_t local = infected;
size_t num_rows = local/(worldWidth/2); //Number of rows with infected people
size_t start = (worldHeight/2) - (num_rows/2); //Row to start on
size_t current = start*worldWidth; //Starting cell
//Global variable init
g_worldWidth = worldWidth;
g_worldHeight = worldHeight;
g_dataLength = g_worldWidth * g_worldHeight;
//Allocate data for mian grid and result grid
cudaMallocManaged( &g_data, (g_dataLength * sizeof(unsigned char)));
cudaMallocManaged( &g_resultData, (g_dataLength * sizeof(unsigned char)));
gol_initData(0); //Initalize grid to zero for input of argv[1] = 0
while(local > 0 && current < g_dataLength){ //Initialize every other spot in the middle of the grid
g_data[current] = (rands[current]%20)+1;
local-=1; current+=2;
}
}
static inline void gol_initClustered( size_t worldWidth, size_t worldHeight, size_t infected, size_t numranks, unsigned int* rands )
{
size_t local = infected;
size_t current = 0;
size_t spacing = 0;
size_t clusters = local/4;
unsigned int i;
// Set all global values for later use
g_worldWidth = worldWidth;
g_worldHeight = worldHeight;
g_dataLength = g_worldWidth * g_worldHeight;
//Allocate data for main grid and result grid
cudaMallocManaged( &g_data, (g_dataLength * sizeof(unsigned char)));
cudaMallocManaged( &g_resultData, (g_dataLength * sizeof(unsigned char)));
gol_initData(0); //Initialze main grid to one for input of argv[1] = 1
current = g_worldWidth+1;
spacing = (g_dataLength-(2*g_worldWidth))/clusters;
while(local>3){ //Initiate maximum spacing between groups of 4
if(current&g_worldWidth == 0){ current++;}
if(current%g_worldWidth < g_worldWidth-5){
for(i = 0; i < 4; i++, local--, current++){
g_data[current] = (rands[current]%20)+1;
}
current+= (spacing/2);
}
else{
while(current%g_worldWidth > 0){
current++;
}
}
}
while(local > 0){
if(current >= g_dataLength){
current = 1;
}
if(g_data[current-1] == 0 && g_data[current] == 0 && g_data[current+1] == 0){
g_data[current] = (rands[current]%20)+1;
}
local--; current += 3;
}
}
static inline void gol_initMiddle( size_t worldWidth, size_t worldHeight, size_t infected, size_t numranks, unsigned int* rands )
{
size_t local = infected;
size_t dim = sqrt(local);
size_t start = (worldHeight/2) - (dim/2); //Row to start on
size_t current = (start*worldWidth)+((worldWidth/2)-(dim/2)); //Starting cell
int i, j, begin;
// Set all global values for later use
g_worldWidth = worldWidth;
g_worldHeight = worldHeight;
g_dataLength = g_worldWidth * g_worldHeight;
// Allocate data for main grid and result grid
cudaMallocManaged( &g_data, (g_dataLength * sizeof(unsigned char)));
cudaMallocManaged( &g_resultData, (g_dataLength * sizeof(unsigned char)));
gol_initData(0); //Initialize grids to zero
// Create clustered square in the middle of the grid
for( i = 0; i < dim; i++){
begin = current + (i*worldWidth);
for( j = begin; j < begin + dim; j++){
g_data[j] = (rands[j]%20)+1;
}
}
}
void gol_initDistancing( size_t worldWidth, size_t worldHeight, size_t infected, size_t numranks, unsigned int* rands )
{
size_t local = infected;
size_t current = worldWidth;
// Set all global values for later use
g_worldWidth = worldWidth;
g_worldHeight = worldHeight;
g_dataLength = g_worldWidth * g_worldHeight;
//Allocate data for main grid and result grid
cudaMallocManaged( &g_data, (g_dataLength * sizeof(unsigned char)));
cudaMallocManaged( &g_resultData, (g_dataLength * sizeof(unsigned char)));
gol_initData(0); //Initialize grids to zero
size_t available_space = g_dataLength-(2* g_worldWidth);
size_t distance = available_space/local;
size_t front = distance/2;
size_t back = distance-front;
//Initialize with maximum distancing possible
while(local > 0 && current < g_dataLength){
current += front;
g_data[current] = (rands[current]%20)+1;
local-=1; current+=back;
}
}
void gol_initRandom( size_t worldWidth, size_t worldHeight, size_t infected, size_t numranks, unsigned int* rands)
{
size_t local = infected;
size_t current = worldWidth;
// Set all global valeus for later use
g_worldWidth = worldWidth;
g_worldHeight = worldHeight;
g_dataLength = g_worldWidth * g_worldHeight;
//Allocate data for main grid and result grid
cudaMallocManaged( &g_data, (g_dataLength * sizeof(unsigned char)));
cudaMallocManaged( &g_resultData, (g_dataLength * sizeof(unsigned char)));
gol_initData(0); //Initialize grids to zero
while(local > 0){ //Randomly initialize spaces in the grid
if(current == g_dataLength){ current = 0; }
if(g_data[current] == 1){
current++;
continue;
}
else if(rands[current]%100 <= 5){
g_data[current] = (rands[current]%20)+1;
local-=1;
}
current++;
}
}
//Initiate the cuda world
void initMaster( int myrank, int numranks, unsigned int pattern, size_t worldSize, size_t caseSeed, size_t deathSeed )
{
int cudaDeviceCount = -1;
cudaError_t cE = cudaSuccess;
unsigned int *rands;
curandGenerator_t gen;
//Initialize cuda devices
if( (cE = cudaGetDeviceCount( &cudaDeviceCount)) != cudaSuccess )
{
printf(" Unable to determine cuda device count, error is %d, count is %d\n",
cE, cudaDeviceCount );
exit(-1);
}
if( (cE = cudaSetDevice( myrank % cudaDeviceCount )) != cudaSuccess )
{
printf(" Unable to have rank %d set to cuda device %d, error is %d \n",
myrank, (myrank % cudaDeviceCount), cE);
exit(-1);
}
totalCases = caseSeed;
totalDeaths = deathSeed;
//Alocate and create the host random number generator with curand
cudaMallocManaged( &rands, (worldSize*worldSize * sizeof(unsigned int)));
for(int i = 0; i < worldSize*worldSize; i++){
rands[i] = 0;
}
curandStatus_t stat1, stat2, stat4;
stat1 = curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MT19937);
stat2 = curandSetPseudoRandomGeneratorSeed(gen, 1234ULL*(unsigned long long) myrank);
stat4 = curandGenerate( gen, rands, worldSize*worldSize);
cudaDeviceSynchronize();
if(stat1 != CURAND_STATUS_SUCCESS){
printf("ERROR: Generator creation failed\n");
exit(-1);
}
else if(stat2 != CURAND_STATUS_SUCCESS){
printf("ERROR: Generator seeding failed\n");
exit(-1);
}
else if(stat4 != CURAND_STATUS_SUCCESS){
printf("ERROR: RNG Production failed(%d)\n", stat4);
exit(-1);
}
else if(rands[0] == 0){
printf("ERROR: Rands did not generate!\n");
exit(-1);
}
printf("Rank %d, of size %ldx%ld, is running GOL kernel on device %d with %ld infected\n", myrank, worldSize, worldSize, (myrank%cudaDeviceCount), caseSeed);
//Initialize the world based on the given pattern
switch(pattern)
{
case 0:
gol_initEveryOther( worldSize, worldSize, caseSeed, numranks, rands);
break;
case 1:
gol_initClustered( worldSize, worldSize, caseSeed, numranks, rands);
break;
case 2:
gol_initMiddle( worldSize, worldSize, caseSeed, numranks, rands);
break;
case 3:
gol_initDistancing( worldSize, worldSize, caseSeed, numranks, rands);
break;
case 4:
gol_initRandom( worldSize, worldSize, caseSeed, numranks, rands);
break;
default:
printf("Pattern %u has not been implemented \n", pattern);
exit(-1);
}
cudaFree(rands);
curandDestroyGenerator(gen);
}
//Swap the information in the given arrays
static inline void gol_swap( unsigned char **pA, unsigned char **pB)
{
unsigned char *temp = *pA; //Save pA for use later
*pA = *pB; //Swap pB into pA
*pB = temp; //Swap pA into pB to complete the swap
}
//Cout the number of alive cells in the current world
__device__
static inline unsigned int gol_countAliveCells(unsigned char* data,
size_t x0, size_t x1, size_t x2,
size_t y0, size_t y1,size_t y2, size_t width)
{
// Compute the number of infected cells around the current cell
// Infected cells have a value of 1 - 16, healthy cells have a value of 0
// Adding up the appropiate cells produces a result containing the number of infected cells
int BL, L, BR, R, B, UL, UR, U; // Initialize a variable for each direction
L = data[x0+y1]; // Left
R = data[x2+y1]; // Right
BL = data[x0+y2]; //Bottom left
B = data[x1+y2]; //Bottom
BR = data[x2+y2]; //Bottom Right
UL = data[x0+y0]; //Upper left
U = data[x1+y0]; //Upper
UR = data[x2+y0]; //Upper right
int alive = 0; // Computer total number infected
alive += ((BL < 17 && BL > 0) ? 1 : 0);
alive += ((B < 17 && B > 0) ? 1 : 0);
alive += ((BR < 17 && BR > 0) ? 1 : 0);
alive += ((L < 17 && L > 0) ? 1 : 0);
alive += ((R < 17 && R > 0) ? 1 : 0);
alive += ((UL < 17 && UL > 0) ? 1 : 0);
alive += ((U < 17 && U > 0) ? 1 : 0);
alive += ((UR < 17 && UR > 0) ? 1 : 0);
return alive;
}
__device__
unsigned int getDefaultStatus(unsigned int current, int alive, unsigned int *tCases, unsigned int *tDeaths){
if(current > 0){ // Decide next cell state of a currently living cell.
if(alive > 4){ // cell doesn't change state unless its healed
return (current == 1 ? 0 : current);
}
else{ //Cell gets one day closer to being healthy
return current-1;
}
}
else{ //Currently healthy cells
if(alive < 2){ // Co infection
return 0;
}
else if(alive < 4){ // Minimal infection
(*tCases)+=1;
return 14;
}
else{ //Full infection
(*tCases)+=1;
return 21;
}
}
}
__device__
unsigned int getStatsStatus(unsigned int current, int alive, int rate, unsigned int *tCases, unsigned int *tDeaths, curandState_t* state){
if(current > 0){
return current-1;
}
else{
if(alive == 0){ return 0; }
while(alive > 0){
int rand = fabsf(curand(state));
if(rand%100 < rate){ //Spread rate
(*tCases)++;
return 21;
}
else{
alive--;
}
}
return 0;
}
}
__device__
unsigned int getWorstStatus(unsigned int current, int alive, unsigned int *tCases, unsigned int *tDeaths){
if(current > 0){
if(alive > 5){
return (current == 1 ? 0 : current);
}
else{
return current-1;
}
}
else{
if(alive > 1){
(*tCases)++;
return 21;
}
else{
return 0;
}
}
}
__device__
unsigned int getBestStatus(unsigned int current, int alive, unsigned int *tCases, unsigned int *tDeaths){
if(current > 0){
return current-1;
}
else{
if(alive>6){
(*tCases)++;
return 14;
}
else{
return 0;
}
}
}
/*
CUDA kernal for running GOL calculations in parallel with the specificed number of threads/blocks
*/
__global__ void gol_kernal(unsigned int myrank, unsigned int numranks,
unsigned char* d_data,
unsigned int worldWidth, unsigned int worldHeight,
unsigned char* d_resultData, unsigned int pattern,
unsigned int infectRate, unsigned int deathRate, unsigned int *tCases, unsigned int *tDeaths, size_t worldLength)
{
unsigned int index, x0, x2, y0, y1, y2, y, x; //Initialize all needed variables for the function
index = blockIdx.x * blockDim.x + threadIdx.x; //The provided index calculation
x = index%worldWidth; //The remainder of the index divided by the grid width produced the x component of the index.
y = index/worldWidth; //The integer answer of index divided by world width provides the y component of the index.
curandState_t state;
while(index < worldLength){ //continue as long as the current index is valid in the scope of the provided grid.
// Provided variable calculations
curand_init(2020, index, 0, &state); //Device rng init
y0 = ((y+worldHeight-1)%worldHeight)*worldWidth;
y1 = y*worldWidth;
y2 = ((y + 1) % worldHeight) * worldWidth;
x0 = (x + worldWidth - 1) % worldWidth;
x2 = (x + 1) % worldWidth;
int alive = gol_countAliveCells(d_data, x0, x, x2, y0, y1, y2, worldWidth); // Retrive the number of current infected cells
if(d_data[index] > 0){ //Chance of cell death
unsigned int rand = fabsf(curand(&state));
if(rand%100 < deathRate){
d_resultData[index] = 0;
(*tDeaths)++;
index += (blockDim.x * gridDim.x);
continue;
}
}
switch(pattern){
case(0):
d_resultData[index] = getDefaultStatus(d_data[index], alive, tCases, tDeaths);
break;
case(1):
d_resultData[index] = getStatsStatus(d_data[index], alive, infectRate, tCases, tDeaths, &state);
break;
case(2):
d_resultData[index] = getWorstStatus(d_data[index], alive, tCases, tDeaths);
break;
case(3):
d_resultData[index] = getBestStatus(d_data[index], alive, tCases, tDeaths);
break;
default:
printf("Pattern %u has not been implemented \n", pattern);
return;
}
index += (blockDim.x * gridDim.x); //Increase index by one block.
}
}
bool kernalLaunch(int myrank, int numranks,
unsigned char** d_data, unsigned char** d_resultData,
size_t worldWidth, size_t worldHeight, size_t iterationsCount, ushort threadsCount, size_t pattern,
unsigned int infectRate, unsigned int deathRate, unsigned char** recv)
{
int i;
size_t N; //Initialize needed variables
N = worldWidth * worldHeight; //N equals total grid size
for(i = 0; i < iterationsCount; i++){
gol_kernal<<<N/threadsCount,threadsCount>>>(myrank, numranks, *d_data, worldWidth, worldHeight, *d_resultData, pattern, infectRate, deathRate, &totalCases, &totalDeaths, N); //Call the Parallel kernel and specify the number of blocks and threads per block.
gol_swap(d_data, d_resultData);// Swap the current data with the result data
//printf("\tRank %d, Day %d- Cases: %u | Deaths: %u\n", myrank, i, totalCases, totalDeaths);
unsigned char* cpy = *d_data;
if(numranks > 1){
cudaDeviceSynchronize(); //Synchronize before full exchange
recvData(myrank, numranks, N, &recv);
sendData(myrank, numranks, N, cpy);
cudaDeviceSynchronize();//Synchronize before export
}
recv[myrank] = cpy;
exportStats(recv, myrank, numranks, i);
}
return true;
}
void finishCuda(){
cudaDeviceSynchronize(); // Function to synchronize cuda since this call must happen from the MPI code base
}
void freeData(){ //Function to free all CUDA memomory allocated
cudaFree(g_data);
cudaFree(g_resultData);
}
|
20,461 | //#include "BLACKCAT_GPU_MATHEMATICS.cuh"
//
//
//__global__
//void GPU_MATHEMATICS::dot(float* store, unsigned s_LD, const float* m1, unsigned m1_r, unsigned m1_c, unsigned m1_LD,
// const float* m2, unsigned m2_r, unsigned m2_c, unsigned m2_LD)
//{
//// cublasHandle_t h;
//// cublasCreate(&h);
//// cublasSetPointerMode(h, CUBLAS_POINTER_MODE_DEVICE);
//// cublasSgemm(h, CUBLAS_OP_N, CUBLAS_OP_N,
//// m1_r, m2_c, m1_c,
//// 1, m1, m1_LD,
//// m2, m2_LD, 1,
//// store, s_LD);
//}
//
|
20,462 | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-09-17
*/
#ifndef __CROSSENTROPY_CUH__
#define __CROSSENTROPY_CUH__
#include "../XTensor.h"
#include "../XDevice.h"
#include "CrossEntropy.cuh"
#include "CrossEntropy.h"
#include "../core/arithmetic/Div.h"
#include "../core/arithmetic/Multiply.h"
#include "../core/arithmetic/MultiplyDim.h"
#include "../core/math/Unary.h"
#include "../core/math/ScaleAndShift.h"
#include "../core/reduce/ReduceSum.h"
#include "../core/reduce/ReduceSumAll.h"
#include "../core/shape/Transpose.h"
#include "../core/shape/Unsqueeze.h"
#include "../core/shape/IsSameShaped.h"
namespace nts{ // namespace nts(NiuTrans.Tensor)
/*
compute the cross entropy loss (cuda version)
loss = sum_{i} (-gold_i * log(output_i))
where gold and output are distributions
>> output - model prediction
>> gold - gold standard
>> loss - compute loss
>> weight - a rescaling weight given to each class
>> padding - specify a target value that is ignored and does not contribute to the loss computation
>> leadingDim - the leading dimension for the output
*/
void _CudaCrossEntropyFast(const XTensor * output, const XTensor * gold,
XTensor * loss, const XTensor * weight,
const XTensor * padding, int leadingDim)
{
int n = leadingDim < 0 ? output->order - 1 : leadingDim;
if (output->mem != NULL) {
output->mem->LockBuf();
}
XTensor * interBuf1 = NewTensorBufV2(output, output->devID, output->mem);
XTensor * interBuf2 = NewTensorBufV2(output, output->devID, output->mem);
_Log(output, interBuf1);
_Multiply(gold, interBuf1, interBuf2);
if(weight != NULL)
_MultiplyDimMe(interBuf2, weight, n);
_NegateMe(interBuf2);
_ReduceSum(interBuf2, loss, n);
if(padding != NULL)
_MultiplyMe(loss, padding);
DelTensorBuf(interBuf2);
DelTensorBuf(interBuf1);
if (output->mem != NULL) {
output->mem->UnlockBuf();
}
}
/*
compute the cross entropy loss (scalar version)
loss = sum_{i} (-gold_i * log(output_i))
where gold and output are distributions
>> output - model prediction
>> gold - gold standard
>> reduceWay - loss compute way, sum or mean
>> weight - a rescaling weight given to each class
>> padding - specify a target value that is ignored and does not contribute to the loss computation
>> leadingDim - the leading dimension for the output
<< return - the cross entropy loss that is a scalar
*/
DTYPE _CudaCrossEntropyFast(const XTensor * output, const XTensor * gold,
LOSS_COMPUTE_WAY reduceWay, const XTensor * weight,
const XTensor * padding, int leadingDim)
{
DTYPE loss = 0;
int order = output->order;
int n = leadingDim < 0 ? output->order - 1 : leadingDim;
int leadingDimSize = output->GetDim(n);
CheckNTErrors(n >= 0 && n < output->order,
"Wrong leadingDim!");
CheckNTErrors(_IsSameShaped(output, gold),
"The output tensor and gold tensor must be of the same size!");
CheckNTErrors(weight == NULL || weight->unitNum == leadingDimSize,
"Wrong weight tensor!");
CheckNTErrors(padding == NULL || padding->order == output->order - 1,
"Wrong padding tensor!");
CheckNTErrors(gold->dataType == DEFAULT_DTYPE && output->dataType == DEFAULT_DTYPE,
"TODO!");
int * dimSize = new int[output->order - 1];
for (int i = 0; i < order; i++) {
if(i < n)
dimSize[i] = output->dimSize[i];
else if(i > n)
dimSize[i - 1] = output->dimSize[i];
}
if (output->mem != NULL) {
output->mem->LockBuf();
}
XTensor * lossBuf = NewTensorBufV2(output->order - 1, dimSize, output->dataType, output->denseRatio,
output->devID, output->mem);
_CudaCrossEntropyFast(output, gold, lossBuf, weight, padding, leadingDim);
_ReduceSumAll(lossBuf, &loss);
if(reduceWay == REDUCE_MEAN) {
DTYPE nonZeroNum;
if(padding == NULL) {
nonZeroNum = (DTYPE)lossBuf->unitNum;
}
else {
if ((padding->mem != NULL) && (padding->mem != output->mem)) {
padding->mem->LockBuf();
}
XTensor * tmp = NewTensorBufV2(padding, padding->devID, padding->mem);
_IsNonZero(padding, tmp);
_ReduceSumAll(tmp, &nonZeroNum);
DelTensorBuf(tmp);
if ((padding->mem != NULL) && (padding->mem != output->mem)) {
padding->mem->UnlockBuf();
}
}
loss = loss / nonZeroNum;
}
else if(reduceWay == REDUCE_SUM) {
/* don't need to do anything */
}
else {
ShowNTErrors("TODO");
}
delete[] dimSize;
DelTensorBuf(lossBuf);
if (output->mem != NULL) {
output->mem->UnlockBuf();
}
return loss;
}
/*
backward computation of cross entropy function
loss = sum_{i} (-t_i * log(y_i))
dE/dy_i = -t_i / y_i
where E is the error(loss) function that measure the errors in y
with respect to gold standard, and y this the model output
>> dedy - dE/dy (for return)
>> output - model prediction
>> gold - gold standard
>> weight - a rescaling weight given to each class
>> padding - specify a target value that is ignored and does not contribute to the loss computation
>> leadingDim - the leading dimension for the output
*/
void _CudaCrossEntropyBackward(XTensor * dedy, const XTensor * output,
const XTensor * gold, const XTensor * weight,
XTensor * padding, int leadingDim)
{
int n = leadingDim < 0 ? output->order - 1 : leadingDim;
_Div(gold, output, dedy);
_NegateMe(dedy);
if(weight != NULL)
_MultiplyDimMe(dedy, weight, n);
if(padding != NULL) {
int paddingOrder = padding->order;
int * paddingDims = new int[paddingOrder];
memcpy(paddingDims, padding->dimSize, padding->order * sizeof(int));
padding->Reshape(padding->unitNum);
int order = dedy->order;
int * dims = new int[order];
memcpy(dims, dedy->dimSize, dedy->order * sizeof(int));
dedy->Reshape(dedy->unitNum/dedy->GetDim(n), dedy->GetDim(n));
_MultiplyDimMe(dedy, padding, 0);
padding->Reshape(paddingOrder, paddingDims);
dedy->Reshape(order, dims);
delete[] paddingDims;
delete[] dims;
}
if(padding != NULL) {
XTensor * tmp = NewTensor(padding);
_IsNonZero(padding, tmp);
DTYPE nonZeroNum;
_ReduceSumAll(tmp, &nonZeroNum);
_ScaleAndShiftMe(dedy, (DTYPE)1.0/nonZeroNum);
delete tmp;
}
else {
int num = dedy->unitNum / dedy->GetDim(n);
_ScaleAndShiftMe(dedy, (DTYPE)1.0/(DTYPE)num);
}
}
} // namespace nts(NiuTrans.Tensor)
#endif // __CROSSENTROPY_CUH__ |
20,463 | #include <stdio.h>
// pitch: the number of cols
// size : the number of rows
__global__ void matmul_kernel(
const float* const mat1, const float* const mat2, float* const mat3,
const size_t pitch1, const size_t pitch2, const size_t pitch3,
const size_t size1, const size_t size2, const size_t size3
){
for(size_t i = 0; i < size1; ++i){
for(size_t j = 0; j < size3; ++j){
for(size_t k = 0; k < size2; ++k){
mat3[i*pitch3+j] += mat1[i*pitch1+k] * mat2[k*pitch2+j];
}
}
}
return;
}
void print_array(float* arr, size_t pitch, size_t size){
for(size_t i = 0; i < size; ++i){
for(size_t j = 0; j < pitch; ++j){
printf("%f ", arr[i * size + j]);
}
printf("\n");
}
}
int main(){
const size_t pitch1 = 3;
const size_t pitch2 = 3;
const size_t size1 = 3;
const size_t size2 = pitch1;
const size_t pitch3 = pitch2;
const size_t size3 = size1;
const size_t n1 = pitch1 * size1;
const size_t n2 = pitch2 * size2;
const size_t n3 = pitch3 * size3;
float a[9] = {5, 2, 1, 5, 2, 1, 5, 2, 1};
float b[9] = {5, 2, 1, 5, 2, 1, 5, 2, 1};
float c[9] = {0};
float *a_d, *b_d, *c_d;
cudaMalloc((void**) &a_d, n1 * sizeof(float));
cudaMalloc((void**) &b_d, n2 * sizeof(float));
cudaMalloc((void**) &c_d, n3 * sizeof(float));
cudaMemcpy(a_d, a, n1 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, n2 * sizeof(float), cudaMemcpyHostToDevice);
matmul_kernel<<<3, 3>>>(a_d, b_d, c_d, pitch1, pitch2, pitch3, size1, size2, size3);
cudaMemcpy(c, c_d, n3 * sizeof(float), cudaMemcpyDeviceToHost);
print_array(c, pitch3, size3);
}
|
20,464 | #include<cuda.h>
#include<stdio.h>
#include<math.h>
#include<stdlib.h>
using namespace std;
//For nodes in adjacency list
typedef struct node {
int val;
struct node* next;
}node;
//Stores visit array's old and new values
typedef struct node1
{
int oldval,newval;
}node1;
//Compare function to sort based on decreasing order of oldvalue
int cmpfunc(const void* a,const void* b)
{
node1 x,y;
x = *(node1*)a;
y = *(node1*)b;
if(x.oldval<y.oldval)
return 1;
else if(x.oldval==y.oldval)
return 0;
else
return -1;
}
//Function to update depth of nodes in next level of k-bfs
__global__
void Updatenextlevel(int *d_g_edges,int *d_g_edgepos,int *d_depth,node1 *d_visit,int *d_n)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int i, j;
for(i=index;i<d_n[0];i+=stride)
{
if(d_visit[i].oldval==d_depth[0])
{
for(j = d_g_edgepos[i];j<d_g_edgepos[i+1];j++)
{
if(d_visit[d_g_edges[j]].oldval==-1)
d_visit[d_g_edges[j]].newval = d_depth[0]+1;
}
}
}
}
//Function to update visit values after completion of iteration of k-bfs
__global__
void UpdateVisit(node1 *visit,int *d_n,int *d_depth,int *d_sz)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int j=index;j<d_n[0];j+=stride)
{
if(visit[j].newval==d_depth[0]+1)
d_sz[0]+=1;
visit[j].oldval = visit[j].newval;
visit[j].newval = 0;
}
}
//Array of vectors to store input graph in host memory
node *head[1157828];
//Inserting edge a->b in graph
void insert(int a,int b)
{
node* temp;
temp=(node*)malloc(sizeof(node));
temp->val = b;
temp->next = head[a];
head[a] = temp;
}
int main()
{
int n,m,i,a,b,start,end,j,k,q,t,K,num_blocks=1,num_threads=64;
scanf("%d %d",&n,&m);
//Initialising all the edge lists as NULL
for(i=0;i<n+1;i++)
head[i]=NULL;
//Scanning Input graph
for(i=0;i<m;i++)
{
scanf("%d %d",&a,&b);
insert(a,b);
insert(b,a);
}
//Value of K in K-bfs algorithm
K = 32;
/*G_edges is used to store edge end points in CSR format (stored in host memory)
G_edgepos is used to store number of edges from vertex (stored in host memory)
G_ecc to store our approximated eccentricity (stored in host memory)
visit to store whether a vertex is visited (stored in host memory)*/
int *G_edges,*G_edgepos,*G_ecc;
node1 *visit;
G_ecc = (int*)malloc((n+1)*sizeof(int));
visit = (node1*)malloc((n+1)*sizeof(node1));
G_edges = (int*)malloc((2*m)*sizeof(int));
G_edgepos=(int*) malloc((n+2)*sizeof(int));
G_edgepos[0] = 0 ;
G_edgepos[1] = 0 ;
//Converting graph to CSR format
j = 0;
for(i=1;i<=n;i++)
{
G_ecc[i] = 0;
visit[i].oldval = -1;
visit[i].newval = -1;
node *temp = head[i];
start = j;
while(temp!=NULL)
{
G_edges[j] = temp->val;
j++;
temp=temp->next;
}
end = j-1;
G_edgepos[i+1] = G_edgepos[i]+(end-start+1);
}
/*d_g_edges is used to store edge end points in CSR format (stored in device memory)
d_g_edgepos is used to store number of edges from vertex (stored in device memory)
d_g_ecc to store our approximated eccentricity (stored in device memory)
d_visit to store whether a vertex is visited (stored in device memory)*/
int *d_g_edges,*d_g_edgepos,*d_sz,*d_n;
node1 *d_visit;
/*Memory Allocation for variables in device memory and copying corresponding variables from
host memory to device memory */
cudaMalloc( (void**) &d_g_edges, sizeof(int)*(2*m)) ;
cudaMemcpy( d_g_edges,G_edges, sizeof(int)*(2*m), cudaMemcpyHostToDevice) ;
cudaMalloc( (void**) &d_g_edgepos, sizeof(int)*(n+2)) ;
cudaMemcpy( d_g_edgepos,G_edgepos, sizeof(int)*(n+2), cudaMemcpyHostToDevice) ;
cudaMalloc( (void**) &d_visit, sizeof(node1)*(n+1)) ;
int *d_depth;
cudaMalloc( (void**) &d_depth, sizeof(int)) ;
cudaMalloc( (void**) &d_sz, sizeof(int)) ;
cudaMalloc( (void**) &d_n, sizeof(int)) ;
cudaMemcpy( d_n, &n, sizeof(int), cudaMemcpyHostToDevice) ;
for(i=1;i<=n;i++)
{
/*If vertex is not visited yet, then approximate the eccentricity
of all vertices to which this vertex belongs.This vertex is taken as source vertex */
if(G_ecc[i]==0)
{
//If a vertex is isolated vertex
if(head[i]==NULL)
continue;
//Initialising value of K and visit array (host memory) before bfs
k = K;
for(j=1;j<=n;j++)
{
visit[j].oldval=-1;
visit[j].newval=-1;
}
//Mark visit of first vertex found with zero approximated eccentricity as zero
visit[i].oldval=0;
//Copying host visit array to device visit array
cudaMemcpy(d_visit,visit, sizeof(node1)*(n+1), cudaMemcpyHostToDevice) ;
int sz=1,depth=0,comp_size=0;
cudaMemcpy(d_depth, &depth, sizeof(int), cudaMemcpyHostToDevice) ;
//Loop runs bfs on source vertex
//The condition in while loop means we will stop when there are no nodes in current level of bfs
while(sz>0)
{
comp_size+=sz;
//Update next level in k-bfs
Updatenextlevel<<<num_blocks,num_threads>>>(d_g_edges,d_g_edgepos,d_depth,d_visit,d_n);
sz=0;
//Copy size variable (which is 0) from host to device
cudaMemcpy(d_sz, &sz, sizeof(int), cudaMemcpyHostToDevice) ;
//Update visit array in device memory
UpdateVisit<<<num_blocks,num_threads>>>(d_visit,d_n,d_depth,d_sz);
//Increase the depth (host memory) in bfs
depth++;
//Copy depth variable in host memory to device memory
cudaMemcpy(d_depth, &depth, sizeof(int), cudaMemcpyHostToDevice) ;
//Copy the number of nodes in current level from device memory to host memory
cudaMemcpy(&sz, d_sz, sizeof(int), cudaMemcpyDeviceToHost) ;
}
//Copying the visit array which has distances from the source back to host memory
int *comp;
cudaMemcpy(visit,d_visit, (n+1)*sizeof(node1), cudaMemcpyDeviceToHost) ;
int l=0;
//Getting number of nodes in current component
for(j=1;j<=n;j++)
{
if(visit[j].oldval!=-1)
{
l++;
}
}
//Adding values of nodes in current component to comp array (host memory)
comp_size = l;
comp = (int*)malloc(l*sizeof(int));
l = 0;
for(j=1;j<=n;j++)
{
if(visit[j].oldval!=-1)
{
comp[l]=j;
l++;
}
}
//If component size is less than k then k is changed to component size
if(comp_size<k);
k=comp_size;
//Selecting k random nodes from component array
for(j=0;j<k;j++)
{
q=rand()%comp_size;
t = comp[j];
comp[j]=comp[q];
comp[q] = t;
}
//Initialise visit for all vertices as -1
for(j=1;j<=n;j++)
{
visit[j].oldval = -1;
visit[j].newval = -1;
}
//Mark visit for all vertices in currentcomponent as 0
for(j=0;j<k;j++)
{
visit[comp[j]].oldval = 0;
visit[comp[j]].newval = 0;
}
//Copy visit array from device memory to host memory
cudaMemcpy(d_visit,visit, sizeof(node1)*(n+1), cudaMemcpyHostToDevice) ;
//Initialise number of nodes in 1st level as k and their depth as 0
sz=k,depth=0;
//Copy depth variable from host memory to device memory
cudaMemcpy(d_depth,&depth, sizeof(int), cudaMemcpyHostToDevice) ;
//Running bfs with above selected k nodes in first level
while(sz>0)
{
//Update next level in k-bfs
Updatenextlevel<<<num_blocks,num_threads>>>(d_g_edges,d_g_edgepos,d_depth,d_visit,d_n);
sz=0;
//Copy size variable (which is 0) from host to device
cudaMemcpy(d_sz,&sz, sizeof(int), cudaMemcpyHostToDevice) ;
//Update visit array in device memory
UpdateVisit<<<num_blocks,num_threads>>>(d_visit,d_n,d_depth,d_sz);
//Increase the depth (host memory) in bfs
depth++;
//Copy depth variable in host memory to device memory
cudaMemcpy(d_depth,&depth, sizeof(int), cudaMemcpyHostToDevice) ;
//Copy the number of nodes in current level from device memory to host memory
cudaMemcpy(&sz,d_sz, sizeof(int), cudaMemcpyDeviceToHost) ;
}
//Copying the visit array which has distances from the source back to host memory
cudaMemcpy(visit,d_visit, (n+1)*sizeof(node1), cudaMemcpyDeviceToHost) ;
//Update the eccentricities of nodes in this component based on visit array
for(j=0;j<comp_size;j++)
{
G_ecc[comp[j]] = visit[comp[j]].oldval;
}
//newd array of type struct node1 which stores depth along with node value
node1* newd;
newd = (node1*)malloc(sizeof(node1));
for(j=0;j<comp_size;j++)
{
newd[j].oldval = visit[comp[j]].oldval;
newd[j].newval = comp[j];
}
//Sort newd array based on decreasing order of depth
qsort(newd,comp_size,sizeof(node1),cmpfunc);
//Initialise visit for all vertices as -1
for(j=1;j<=n;j++)
{
visit[j].oldval=-1;
visit[j].newval=-1;
}
//Mark visit for all vertices in currentcomponent as 0
for(j=0;j<k;j++)
{
visit[newd[j].newval].oldval=0;
visit[newd[j].newval].newval=0;
}
//Copy visit array from device memory to host memory
cudaMemcpy(d_visit,visit, sizeof(node1)*(n+1), cudaMemcpyHostToDevice) ;
//Initialise number of nodes in 1st level as k and their depth as 0
sz=k,depth=0;
//Copy depth variable from host memory to device memory
cudaMemcpy(d_depth,&depth, sizeof(int), cudaMemcpyHostToDevice) ;
//Running bfs with above selected k nodes in first level
while(sz>0)
{
//Update next level in k-bfs
Updatenextlevel<<<num_blocks,num_threads>>>(d_g_edges,d_g_edgepos,d_depth,d_visit,d_n);
sz=0;
//Copy size variable (which is 0) from host to device
cudaMemcpy(d_sz,&sz, sizeof(int), cudaMemcpyHostToDevice) ;
//Update visit array in device memory
UpdateVisit<<<num_blocks,num_threads>>>(d_visit,d_n,d_depth,d_sz);
//Increase the depth (host memory) in bfs
depth++;
//Copy depth variable in host memory to device memory
cudaMemcpy(d_depth,&depth, sizeof(int), cudaMemcpyHostToDevice) ;
//Copy the number of nodes in current level from device memory to host memory
cudaMemcpy(&sz,d_sz, sizeof(int), cudaMemcpyDeviceToHost) ;
}
//Copying the visit array which has distances from the source back to host memory
cudaMemcpy(visit,d_visit, (n+1)*sizeof(node1), cudaMemcpyDeviceToHost) ;
/*Compare the value in visit array(depth)
with previous approximated eccentricity value and update it if it is more*/
for(j=0;j<comp_size;j++)
{
if(visit[comp[j]].oldval>G_ecc[comp[j]])
G_ecc[comp[j]] = visit[comp[j]].oldval;
}
}
}
return 0;
}
|
20,465 | #include "includes.h"
__global__ void atomic_reduction_kernel(float *data_out, float *data_in, int size)
{
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
atomicAdd(&data_out[0], data_in[idx_x]);
} |
20,466 | #include <stdio.h>
__global__ void add( int *a, int *b, int *c) {
*c = *a + *b;
}
__global__ void array_add (int *a, int *b, int *c, int sz) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if ( idx < sz)
c[idx] = a[idx] + b[idx];
}
int main(void) {
int a, b, c;
int *dev_a, *dev_b, *dev_c;
int size = sizeof(int);
cudaMalloc((void **)&dev_a, size);
cudaMalloc((void **)&dev_b, size);
cudaMalloc((void **)&dev_c, size);
a = 2;
b = 7;
cudaMemcpy(dev_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, &b, size, cudaMemcpyHostToDevice);
add<<< 1, 1 >>>(dev_a, dev_b, dev_c);
cudaMemcpy(&c, dev_c, size, cudaMemcpyDeviceToHost);
printf("%d + %d = %d\n", a, b, c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
size = sizeof(int) * 4096;
int *x = (int *)malloc(size);
int *y = (int *)malloc(size);
int *z = (int *)malloc(size);
memset(x, 1, size);
memset(y, 2, size);
cudaMalloc((void **)&dev_a, size);
cudaMalloc((void **)&dev_b, size);
cudaMalloc((void **)&dev_c, size);
x[0] = 5;
y[0] = 5;
x[1024] = 2;
y[1024] = 2;
x[2047] = 1;
y[2047] = 1;
cudaMemcpy(dev_a, x, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, y, size, cudaMemcpyHostToDevice);
int threadsPerBlock = 1024;
int blocksPerGrid =
(4096 + threadsPerBlock - 1) / threadsPerBlock;
array_add<<< blocksPerGrid, threadsPerBlock >>>(dev_a, dev_b, dev_c, 4096);
cudaMemcpy(z, dev_c, size, cudaMemcpyDeviceToHost);
for (int j = 0; j < 4096; j++) {
printf("%d ", z[j]);
}
printf("\n");
free(x);
free(y);
free(z);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
}
|
20,467 | /**
* @device matview_transopse
* Create on:Apr 17 2018
* @author: haili
* the size of tensor is m×n×k×l
*/
__global__ void d_batch_transpose(float* A,float* T,const int m,
const int n,const int batch){
int tid=blockDim.x*blockIdx.x+threadIdx.x;
int t_n=blockDim.x*gridDim.x;
while(tid<m*n*batch){
A[(tid/(m*n))*n*m+(tid%(m*n))/n+((tid%(m*n))%n)*m]=T[tid];
tid=tid+t_n;
__syncthreads();
}
}
|
20,468 | #include <algorithm>
#include <cfloat>
#include <chrono>
#include <fstream>
#include <iostream>
#include <random>
#include <sstream>
#include <stdexcept>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <string>
double std_time_used;
struct Data {
Data(int size) : size(size), bytes(size * sizeof(float)) {
cudaMallocManaged(&x, bytes);
cudaMallocManaged(&y, bytes);
cudaMemset(x, 0, bytes);
cudaMemset(y, 0, bytes);
}
Data(int size, std::vector<float>& h_x, std::vector<float>& h_y)
: size(size), bytes(size * sizeof(float)) {
cudaMallocManaged(&x, bytes);
cudaMallocManaged(&y, bytes);
// cudaMemcpy(x, h_x.data(), bytes, cudaMemcpyHostToDevice);
// cudaMemcpy(y, h_y.data(), bytes, cudaMemcpyHostToDevice);
memcpy(x, h_x.data(), bytes);
memcpy(y, h_y.data(), bytes);
}
~Data() {
cudaFree(x);
cudaFree(y);
}
float* x{nullptr};
float* y{nullptr};
int size{0};
int bytes{0};
};
__device__ float
squared_l2_distance(float x_1, float y_1, float x_2, float y_2) {
return (x_1 - x_2) * (x_1 - x_2) + (y_1 - y_2) * (y_1 - y_2);
}
__global__ void fine_reduce(const float* __restrict__ data_x,
const float* __restrict__ data_y,
int data_size,
const float* __restrict__ means_x,
const float* __restrict__ means_y,
float* __restrict__ new_sums_x,
float* __restrict__ new_sums_y,
int k,
int* __restrict__ counts) {
extern __shared__ float shared_data[];
const int local_index = threadIdx.x;
const int global_index = blockIdx.x * blockDim.x + threadIdx.x;
if (global_index >= data_size) return;
// Load the mean values into shared memory.
if (local_index < k) {
shared_data[local_index] = means_x[local_index];
shared_data[k + local_index] = means_y[local_index];
}
__syncthreads();
// Load once here.
const float x_value = data_x[global_index];
const float y_value = data_y[global_index];
float best_distance = FLT_MAX;
int best_cluster = -1;
for (int cluster = 0; cluster < k; ++cluster) {
const float distance = squared_l2_distance(x_value,
y_value,
shared_data[cluster],
shared_data[k + cluster]);
if (distance < best_distance) {
best_distance = distance;
best_cluster = cluster;
}
}
__syncthreads();
// reduction
const int x = local_index;
const int y = local_index + blockDim.x;
const int count = local_index + blockDim.x + blockDim.x;
for (int cluster = 0; cluster < k; ++cluster) {
shared_data[x] = (best_cluster == cluster) ? x_value : 0;
shared_data[y] = (best_cluster == cluster) ? y_value : 0;
shared_data[count] = (best_cluster == cluster) ? 1 : 0;
__syncthreads();
// Reduction for this cluster.
for (int stride = blockDim.x / 2; stride > 0; stride /= 2) {
if (local_index < stride) {
shared_data[x] += shared_data[x + stride];
shared_data[y] += shared_data[y + stride];
shared_data[count] += shared_data[count + stride];
}
__syncthreads();
}
// Now shared_data[0] holds the sum for x.
if (local_index == 0) {
const int cluster_index = blockIdx.x * k + cluster;
new_sums_x[cluster_index] = shared_data[x];
new_sums_y[cluster_index] = shared_data[y];
counts[cluster_index] = shared_data[count];
}
__syncthreads();
}
}
__global__ void coarse_reduce(float* __restrict__ means_x,
float* __restrict__ means_y,
float* __restrict__ new_sum_x,
float* __restrict__ new_sum_y,
int k,
int* __restrict__ counts) {
extern __shared__ float shared_data[];
const int index = threadIdx.x;
const int y_offset = blockDim.x;
shared_data[index] = new_sum_x[index];
shared_data[y_offset + index] = new_sum_y[index];
__syncthreads();
for (int stride = blockDim.x / 2; stride >= k; stride /= 2) {
if (index < stride) {
shared_data[index] += shared_data[index + stride];
shared_data[y_offset + index] += shared_data[y_offset + index + stride];
}
__syncthreads();
}
if (index < k) {
const int count = max(1, counts[index]);
means_x[index] = new_sum_x[index] / count;
means_y[index] = new_sum_y[index] / count;
new_sum_y[index] = 0;
new_sum_x[index] = 0;
counts[index] = 0;
}
}
int main(int argc, const char* argv[]) {
if (argc < 4) {
std::cerr << "usage: k-means <data-file> <k> [iterations]" << std::endl;
std::exit(EXIT_FAILURE);
}
const auto k = std::atoi(argv[3]);
const auto number_of_iterations = (argc == 5) ? std::atoi(argv[4]) : 300;
std::vector<float> h_x;
std::vector<float> h_y;
std::ifstream stream(argv[2]);
std::string line;
while (std::getline(stream, line)) {
std::istringstream line_stream(line);
float x, y;
uint16_t label;
line_stream >> x >> y >> label;
h_x.push_back(x);
h_y.push_back(y);
}
const size_t number_of_elements = h_x.size();
Data d_data(number_of_elements, h_x, h_y);
std::mt19937 rng(std::random_device{}());
std::shuffle(h_x.begin(), h_x.end(), rng);
std::shuffle(h_y.begin(), h_y.end(), rng);
Data d_means(k, h_x, h_y);
const int threads = 1024;
const int blocks = (number_of_elements + threads - 1) / threads;
//std::cerr << "Processing " << number_of_elements << " points on " << blocks
// << " blocks x " << threads << " threads" << std::endl;
// * 3 for x, y and counts.
const int fine_shared_memory = 3 * threads * sizeof(float);
// * 2 for x and y. Will have k * blocks threads for the coarse reduction.
const int coarse_shared_memory = 2 * k * blocks * sizeof(float);
Data d_sums(k * blocks);
int* d_counts;
cudaMalloc(&d_counts, k * blocks * sizeof(int));
cudaMemset(d_counts, 0, k * blocks * sizeof(int));
const auto start = std::chrono::high_resolution_clock::now();
for (size_t iteration = 0; iteration < number_of_iterations; ++iteration) {
fine_reduce<<<blocks, threads, fine_shared_memory>>>(d_data.x,
d_data.y,
d_data.size,
d_means.x,
d_means.y,
d_sums.x,
d_sums.y,
k,
d_counts);
cudaDeviceSynchronize();
coarse_reduce<<<1, k * blocks, coarse_shared_memory>>>(d_means.x,
d_means.y,
d_sums.x,
d_sums.y,
k,
d_counts);
cudaDeviceSynchronize();
}
const auto end = std::chrono::high_resolution_clock::now();
const auto duration =
std::chrono::duration_cast<std::chrono::duration<float>>(end - start);
std::cerr << "Standard CUDA implementation Took: " << duration.count() << "s" << " for "<<h_x.size()<<" points."<<std::endl;
std_time_used = duration.count();
cudaFree(d_counts);
std::vector<float> mean_x(k, 0);
std::vector<float> mean_y(k, 0);
// cudaMemcpy(mean_x.data(), d_means.x, d_means.bytes, cudaMemcpyDeviceToHost);
// cudaMemcpy(mean_y.data(), d_means.y, d_means.bytes, cudaMemcpyDeviceToHost);
memcpy(mean_x.data(), d_means.x, d_means.bytes);
memcpy(mean_y.data(), d_means.y, d_means.bytes);
for (size_t cluster = 0; cluster < k; ++cluster) {
//std::cout << mean_x[cluster] << " " << mean_y[cluster] << std::endl;
}
FILE *fp;
int i;
fp = fopen("Standardtimes.txt", "a");
fprintf(fp, "%0.6f\n", std_time_used);
fclose(fp);
std::string str(std::to_string(h_x.size())),str1,str2;
str = "results/standard/" + str;
str2 = str + "_centroids.txt";
fp = fopen(str2.c_str(), "w");
for(i = 0; i < k; ++i){
fprintf(fp, "%0.6f %0.6f\n", mean_x[i], mean_y[i]);
}
fclose(fp);
}
|
20,469 | extern "C"
__global__ void sconv_bprop_C1_N64 (
float* param_test,
float* param_I,
const float* param_F,
const float* param_E,
float param_alpha,
int param_N,
int param_K,
int param_D,
int param_H,
int param_W,
int param_WN,
int param_HWN,
int param_DHWN,
int param_C,
int param_CRST,
int param_RST,
int param_magic_RST,
int param_shift_RST,
int param_RS,
int param_magic_RS,
int param_shift_RS,
int param_S,
int param_magic_S,
int param_shift_S,
int param_pad_d,
int param_pad_h,
int param_pad_w,
int param_str_d,
int param_str_h,
int param_str_w,
int param_Q,
int param_PQ,
int param_QN,
int param_PQN,
int param_MPQN,
int param_magic_Q,
int param_shift_Q,
int param_magic_PQ,
int param_shift_PQ,
int param_CRST8,
int param_MPQN8) {
__shared__ float shared[64 * 8 * 4 * 2];
int tid = threadIdx.x;
shared[tid] = 1;
*param_I = shared[31 - tid];
*param_test = shared[31 - tid];
}
|
20,470 | #include "includes.h"
__global__ void fill_lower_left_gpu(int *iRow, int *jCol, unsigned int *rind_L, unsigned int *cind_L, const int nnz_L) {
int i = threadIdx.x;
if (i < nnz_L) {
iRow[i] = rind_L[i];
jCol[i] = cind_L[i];
}
} |
20,471 | #include "includes.h"
__global__ void BFS_kernel_multi_block( volatile unsigned int *frontier, volatile unsigned int *frontier2, unsigned int frontier_len, volatile unsigned int *cost, volatile int *visited, unsigned int *edgeArray, unsigned int *edgeArrayAux, unsigned int numVertices, unsigned int numEdges, volatile unsigned int *frontier_length, unsigned int NUM_P_PER_MP, unsigned int W_Q_SIZE) { ; } |
20,472 | #include "includes.h"
__device__ float computeDeterminant (float e00, float e01, float e02, float e10, float e11, float e12, float e20, float e21, float e22)
{
return e00*e11*e22-e00*e12*e21+e10*e21*e02-e10*e01*e22+e20*e01*e12-e20*e11*e02;
}
__global__ void hessianKernel ( float *d_output, const float *d_gxx, const float *d_gxy, const float *d_gxz, const float *d_gyy, const float *d_gyz, const float *d_gzz, float sigma, int imageW, int imageH, int imageD )
{
int n_blocks_per_width = imageW/blockDim.x;
int z = (int)ceilf(blockIdx.x/n_blocks_per_width);
int y = blockIdx.y*blockDim.y + threadIdx.y;
int x = (blockIdx.x - z*n_blocks_per_width)*blockDim.x + threadIdx.x;
int i = z*imageW*imageH + y*imageW + x;
// // //Brute force eigen-values computation
float a0, b0, c0, e0, f0, k0;
a0 = -d_gxx[i]; b0 = -d_gxy[i]; c0 = -d_gxz[i];
e0 = -d_gyy[i]; f0 = -d_gyz[i]; k0 = -d_gzz[i];
// http://en.wikipedia.org/wiki/Eigenvalue_algorithm
//Oliver K. Smith: Eigenvalues of a symmetric 3 × 3 matrix. Commun. ACM 4(4): 168 (1961)
float m = (a0+e0+k0)/3;
float q = computeDeterminant
(a0-m, b0, c0, b0, e0-m, f0, c0, f0, k0-m)/2;
float p = (a0-m)*(a0-m) + b0*b0 + c0*c0 + b0*b0 + (e0-m)*(e0-m) +
f0*f0 + c0*c0 + f0*f0 + (k0-m)*(k0-m);
p = p / 6;
float phi = 1.f/3.f*atan(sqrt(p*p*p-q*q)/q);
if(phi<0)
phi=phi+3.14159f/3;
float eig1 = m + 2*sqrt(p)*cos(phi);
float eig2 = m - sqrt(p)*(cos(phi) + sqrt(3.0f)*sin(phi));
float eig3 = m - sqrt(p)*(cos(phi) - sqrt(3.0f)*sin(phi));
if( (eig1 > eig2) & (eig1 > eig3))
d_output[i] = eig1*sigma*sigma;
if( (eig2 > eig1) & (eig2 > eig3))
d_output[i] = eig2*sigma*sigma;
if( (eig3 > eig2) & (eig3 > eig1))
d_output[i] = eig3*sigma*sigma;
} |
20,473 | #include "direct_gpu_computation.cuh"
#include <cmath>
// block width for computation - arbitrary parameter - can be changed
#define BW 512
/**
* Computes the backpropagation results of the Softmax loss for each result in a batch.
* Uses the softmax values obtained from forward propagation to compute the difference.
*
* @param label The training batch label values.
* @param num_labels The number of possible labels.
* @param batch_size The size of the trained batch.
* @param diff The resulting gradient.
*/
__global__ void SoftmaxLoss(const float *label, int num_labels, int batch_size, float *diff) // or also Cross-Entropy
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= batch_size)
return;
const int label_value = static_cast<int>(label[idx]);
// For each item in the batch, decrease the result of the label's value by 1
diff[idx * num_labels + label_value] -= 1.0f;
}
/**
* Computes ceil(x / y) for integral nonnegative values.
*/
static inline unsigned int RoundUp(unsigned int nominator, unsigned int denominator)
{
return (nominator + denominator - 1) / denominator;
}
void calculate_loss_with_gpu(float * d_label, int label_size, int batch_size, float * d_loss)
{
SoftmaxLoss <<<RoundUp(batch_size, BW), BW >>>(d_label, label_size, batch_size, d_loss);
}
// CUDA kernel. Each thread takes care of one element of c
__global__ void ewm(float *a, float *b, float *c, int size)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < size)
c[id] = a[id] * b[id];
}
void elementwise_multiplication(float *d_a, float * d_b, float * d_c, int size)
{
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 512;
// Number of thread blocks in grid
gridSize = (int)ceil((float)size/blockSize);
ewm<<<gridSize, blockSize>>>(d_a, d_b, d_c, size);
}
// CUDA kernel. Each thread takes care of one element of c
__global__ void ew_sqrt(float *a, float *c, int size)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < size)
c[id] = std::sqrt(a[id]);
}
void elementwise_sqrt(float *d_a, float * d_c, int size)
{
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 512;
// Number of thread blocks in grid
gridSize = (int)ceil((float)size/blockSize);
ew_sqrt<<<gridSize, blockSize>>>(d_a, d_c, size);
}
// CUDA kernel. Each thread takes care of one element of c
__global__ void ew_add(float *a, float *c, float b, int size)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < size)
c[id] = a[id] + b;
}
void elementwise_add(float *d_a, float * d_c, float b, int size)
{
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 512;
// Number of thread blocks in grid
gridSize = (int)ceil((float)size/blockSize);
ew_add<<<gridSize, blockSize>>>(d_a, d_c, b, size);
}
// CUDA kernel. Each thread takes care of one element of c
__global__ void ew_devision(float *a, float *b, float *c, int size)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < size)
c[id] = a[id] / b[id];
}
void elementwise_devision(float *d_a, float * d_b, float * d_c, int size)
{
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 512;
// Number of thread blocks in grid
gridSize = (int)ceil((float)size/blockSize);
ew_devision<<<gridSize, blockSize>>>(d_a, d_b, d_c, size);
}
__global__ void CompLoss(const float * data, const float *label, int size, float *loss)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size)
return;
loss[idx] = (data[idx] - label[idx]) / static_cast<float>(std::sqrt(std::pow(data[idx]-label[idx],2) + 1.0e-12));
}
void comp_loss_with_gpu(float * d_data, float* d_label, int size, float * d_loss)
{
int blockSize, gridSize;
blockSize = BW;
// Number of thread blocks in grid
gridSize = (int)ceil((float)size/blockSize);
CompLoss <<<gridSize, blockSize>>>(d_data, d_label, size, d_loss);
}
__global__ void CompLossUnknown(const float *original, const float * data, const float *label, int batch_size, int size, float *loss)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size * batch_size)
return;
int batch_num = int(idx/size);
int pix_num = idx % size;
//if (original[idx * 4] != 0.5)
if(original[batch_num*size*4 + pix_num*4] != 0.5)
return;
loss[idx] = (data[idx] - label[idx]) / static_cast<float>(std::sqrt(std::pow(data[idx]-label[idx],2) + 1.0e-12));
}
void comp_loss_from_unknown_area(float * original, float * d_data, float* d_label, int batch_size, int size, float * d_loss)
{
int blockSize, gridSize;
blockSize = BW;
// Number of thread blocks in grid
gridSize = (int)ceil((float)(size * batch_size)/blockSize);
CompLossUnknown<<<gridSize, blockSize>>>(original, d_data, d_label, batch_size, size, d_loss);
} |
20,474 | #include "includes.h"
__device__ void exchange(float &a, float &b){
float temp = a;
a = b;
b = temp;
}
__global__ void flip_2D(float* coords, size_t dim_y, size_t dim_x, int do_y, int do_x){
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
size_t total = dim_x * dim_y;
size_t id_x = index % dim_x;
size_t id_y = index / dim_x;
if(index < total){
if(do_x && id_x < (dim_x / 2)){
exchange(coords[total + id_y * dim_x + id_x],
coords[total + id_y * dim_x + dim_x-1 - id_x]);
__syncthreads();
}
if(do_y && id_y < (dim_y / 2)){
exchange(coords[id_y * dim_x + id_x], coords[(dim_y-1 - id_y) * dim_x + id_x]);
__syncthreads();
}
}
} |
20,475 | #include <iostream>
#include <cuda_runtime.h>
#include <fstream>
#include <sstream>
#include <iomanip>
#include <ctime>
#define TILE_WIDTH 20
#define WIDTH 10000
#define MATSIZE 256
#define SIZE (WIDTH * WIDTH * sizeof(float))
float A[WIDTH][WIDTH], B[WIDTH][WIDTH], C[WIDTH][WIDTH];
float *dev_A, *dev_B, *dev_C;
__global__ void matmul(float *A, float *B, float *C) {
__shared__ float sh_A[TILE_WIDTH][TILE_WIDTH];
__shared__ float sh_B[TILE_WIDTH][TILE_WIDTH];
int row = blockIdx.y * TILE_WIDTH + threadIdx.y;
int col = blockIdx.x * TILE_WIDTH + threadIdx.x;
if (row > WIDTH || col > WIDTH)
return;
int partial_val = 0;
for (int tile_id=0; tile_id<WIDTH/TILE_WIDTH; ++tile_id) {
sh_A[threadIdx.y][threadIdx.x] = A[row * WIDTH + (tile_id * TILE_WIDTH + threadIdx.x)];
sh_B[threadIdx.y][threadIdx.x] = B[col + (tile_id * TILE_WIDTH + threadIdx.y) * WIDTH];
__syncthreads();
for (int i=0; i<TILE_WIDTH; ++i) {
partial_val += sh_A[threadIdx.y][i] * sh_B[i][threadIdx.x];
__syncthreads();
}
}
C[row * WIDTH + col] = partial_val;
}
std::string getDimString(dim3 blocks, dim3 threads) {
std::stringstream ss;
ss << "BLOCKS: (" << blocks.x << ", " << blocks.y << ")";
ss << " THREADS: (" << threads.x << ", " << threads.y << ")";
return ss.str();
}
int main () {
std::cout << "Not Done" << std::endl;
for (int i=0; i<WIDTH; ++i) {
for (int j=0; j<WIDTH; ++j) {
A[i][j] = B[i][j] = 2.5;
}
}
cudaMalloc(&dev_A, SIZE);
cudaMalloc(&dev_B, SIZE);
cudaMalloc(&dev_C, SIZE);
cudaMemcpy(dev_A, A, SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(dev_B, B, SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(dev_C, C, SIZE, cudaMemcpyHostToDevice);
dim3 threadsPerBlock(TILE_WIDTH, TILE_WIDTH);
dim3 blocksPerGrid(WIDTH/TILE_WIDTH, WIDTH/TILE_WIDTH);
std::cout << "Calculating GPU" << std::endl;
std::clock_t t = std::clock();
matmul<<<blocksPerGrid, threadsPerBlock>>>(dev_A, dev_B, dev_C);
cudaDeviceSynchronize();
t = std::clock() - t;
std::ofstream out{"time_logs.log", std::ios_base::app};
std::cout << std::setprecision(5) << getDimString(blocksPerGrid, threadsPerBlock) <<
", TIME: "<< double(t) / double(CLOCKS_PER_SEC) << std::endl;
cudaMemcpy(C, dev_C, SIZE, cudaMemcpyDeviceToHost);
// std::cout << "Calculating CPU" << std::endl;
// t = std::clock();
// for (int i=0; i<WIDTH; ++i) {
// for (int j=0; j<WIDTH; ++j) {
// int partial_val{0};
// for (int k=0; k<WIDTH; ++k) {
// partial_val += A[i][k] * B[k][j];
// }
// }
// }
// t = std::clock() - t;
// std::cout << std::setprecision(5) << double(t) / double(CLOCKS_PER_SEC) << std::endl;
out.close();
cudaFree(dev_A); cudaFree(dev_B); cudaFree(dev_C);
std::cout << "Done!" << std::endl;
return 0;
}
|
20,476 | __global__
void f1( float4* __restrict__ ptr ) {
float4 v = ptr[threadIdx.x];
v.x += 1;
v.y += 1;
v.z += 1;
v.w += 1;
ptr[threadIdx.x] = v;
}
__global__
void f2( float* __restrict__ ptr1, float* __restrict__ ptr2, float* __restrict__ ptr3, float* __restrict__ ptr4 ) {
ptr1[threadIdx.x] += 1;
ptr2[threadIdx.x] += 1;
ptr3[threadIdx.x] += 1;
ptr4[threadIdx.x] += 1;
}
int main() {
float *some_ptr;
cudaMalloc(&some_ptr, 128 * sizeof(float));
f1<<<1, 32>>>((float4*) some_ptr);
f2<<<1, 32>>>(some_ptr, some_ptr+32, some_ptr+64, some_ptr+96);
}
|
20,477 | #include <cmath>
#include <cuda_runtime.h>
#include <curand_kernel.h>
#include "metropolis_cuda.cuh"
/*
Raandomly shuffle the path
*/
__device__ void shuffle(int *path, curandState localState,
int path_length) {
for (int i = 1; i < (path_length - 1); i++) {
int j = int(curand_uniform(&localState) * float(path_length - 3.0) + 1.0);
int temp = path[j];
path[j] = path[i];
path[i] = temp;
}
}
/*
Calculate the energy of a given path with the given distance/cost matrix
*/
__device__ float calculate_energy(int *path, float *dist_matrix,
int num_cities, int path_length) {
float E = 0.0;
for (int i = 0; i < (path_length - 1); i++) {
E += dist_matrix[path[i] * num_cities + path[i+1]];
}
return E;
}
/*
Given a path, distance/cost matrix, the old energy of this path, the
two indices between which all cities will be reversed, the number of
cities, and the path_length, compute the new energy if we were to perform
the reversal.
*/
__device__ float update_energy(int *path, float *dist_matrix,
float E_old, int idx1, int idx2, int num_cities,
int path_length) {
float E_new = E_old;
if (idx1 != idx2) {
int start_city = path[idx1];
int end_city = path[idx2];
E_new -= dist_matrix[path[idx1-1] * num_cities + start_city];
E_new -= dist_matrix[end_city * num_cities + path[idx2+1]];
E_new += dist_matrix[path[idx1-1] * num_cities + path[idx2]];
E_new += dist_matrix[path[idx1] * num_cities + path[idx2+1]];
}
return E_new;
}
__device__ float calc_pi_ratio(float E_old, float E_new, float temp) {
return exp(-1.0 / temp * (E_new - E_old));
}
/*
Arguments:
path is an integer array of size num_simulations * num_cities, that
contains the paths for each of the simulations we are running
dist_matrix is a float array of size num_cities * num_cities, that
contains all of the distance information between cities
states is a curandState array of size num_simulations that contains the
states of all of the PRNGs
energies is a float array of size num_simulations that contains the
energy/cost values for all of our simulations
temperatures is a float array of size num_simulations that contains the
current temperature for each simulation
num_cities is how many cities we have
path_length is the length of our path
num_simulations is how many simulations we are running
This kernel performs one iteration of the simulated annealing algorithm for
each of our simulations.
*/
__global__ void
cudaParallelMetropolisKernel(int *path, float *dist_matrix,
curandState *states, float *energies, float *temperatures,
int num_cities, int path_length, int num_simulations) {
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < num_simulations) {
float E_old = energies[index];
float temp = temperatures[index];
int *path_new = path + index * path_length;
// Figure out which two indices in the path are going to be reversed
int idx1 = int(curand_uniform(&states[index]) * float(path_length - 3.0) + 1.0);
int idx2 = int(curand_uniform(&states[index]) * float(path_length - 3.0) + 1.0);
int a = min(idx1, idx2);
int b = max(idx1, idx2);
// Calculate energy of new path with hypothetical reversal
float E_new = update_energy(path_new, dist_matrix, E_old,
a, b, num_cities, path_length);
// Calculate energy ratio of old & new path
float check = min(float(1.0), calc_pi_ratio(E_old, E_new, temp));
// Generate random number to see if we accept
float u = curand_uniform(&states[index]);
if (u < check) {
// If accept, change temperature & energy & actually change path
temperatures[index] = 0.9999 * temp;
energies[index] = E_new;
// Reverse portion of path
int t;
while (a < b) {
t = path_new[a];
path_new[a] = path_new[b];
path_new[b] = t;
a++;
b--;
}
}
index += blockDim.x * gridDim.x;
}
}
/*
Given a start and end index, initializes a random path
*/
__device__ void initializePath(int *path, int start, int end,
curandState localState, int path_length,
int num_simulations) {
// Initialize path to just sequence of cities with given start & end
path[0] = start;
int actual_counter = 1;
for (int i = 0; i < path_length; i++) {
if (i != start && i != end) {
path[actual_counter] = i;
actual_counter++;
}
}
path[path_length - 1] = end;
// Perform shuffles of path to initialize a random path
for (int i = 0; i < 50; i++) {
shuffle(path, localState, path_length);
}
}
// Initializes random initial paths, energies, temperatures, and curand states
__global__ void cudaInitializeDataKernel(int *path, float *dist_matrix,
curandState *states, float *energies,
float *temperatures, float init_temp,
int start, int end, int num_cities,
int path_length, int num_simulations) {
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < num_simulations) {
// Initialize curand state for this index
curand_init(index, 0, 0, &states[index]);
// Initialize random path
initializePath(path + index * path_length, start, end,
states[index], path_length, num_simulations);
// Intiialize energy associated with path
energies[index] = calculate_energy(path + index * path_length,
dist_matrix, num_cities, path_length);
// Initialize annealing temperature
temperatures[index] = init_temp;
index += blockDim.x * gridDim.x;
}
}
void cudaCallParallelMetropolisKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
int *path, float *dist_matrix,
curandState *states,
float *energies, float *temperatures,
int num_cities, int path_length,
int num_simulations) {
cudaParallelMetropolisKernel<<<blocks, threadsPerBlock>>> (path,
dist_matrix, states, energies, temperatures,
num_cities, path_length, num_simulations);
}
void cudaCallInitializeDataKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
int *path, float *dist_matrix,
curandState *states, float *energies,
float *temperatures, float init_temp,
int start, int end, int num_cities,
int path_length, int num_simulations) {
cudaInitializeDataKernel<<<blocks, threadsPerBlock>>> (path,
dist_matrix, states, energies, temperatures,
init_temp, start, end, num_cities, path_length,
num_simulations);
} |
20,478 | // source : https://gist.github.com/dpiponi/1502434
#include <stdio.h>
//
// Nearly minimal CUDA example.
// Compile with:
//
// nvcc -o minimal minimal.cu
//
#define N 1000
//
// A function marked __global__
// runs on the GPU but can be called from
// the CPU.
//
// This function multiplies the elements of an array
// of ints by 2.
//
// The entire computation can be thought of as running
// with one thread per array element with blockIdx.x
// identifying the thread.
//
// The comparison i<N is because often it isn't convenient
// to have an exact 1-1 correspondence between threads
// and array elements. Not strictly necessary here.
//
// Note how we're mixing GPU and CPU code in the same source
// file. An alternative way to use CUDA is to keep
// C/C++ code separate from CUDA code and dynamically
// compile and load the CUDA code at runtime, a little
// like how you compile and load OpenGL shaders from
// C/C++ code.
//
__global__
void add(int *a, int *b) {
int i = blockIdx.x;
if (i<N) {
b[i] = 2*a[i];
}
}
int main() {
//
// Create int arrays on the CPU.
// ('h' stands for "host".)
//
int ha[N], hb[N];
//
// Create corresponding int arrays on the GPU.
// ('d' stands for "device".)
//
int *da, *db;
cudaMalloc((void **)&da, N*sizeof(int));
cudaMalloc((void **)&db, N*sizeof(int));
//
// Initialise the input data on the CPU.
//
for (int i = 0; i<N; ++i) {
ha[i] = i;
}
//
// Copy input data to array on GPU.
//
cudaMemcpy(da, ha, N*sizeof(int), cudaMemcpyHostToDevice);
//
// Launch GPU code with N threads, one per
// array element.
//
add<<<N, 1>>>(da, db);
//
// Copy output array from GPU back to CPU.
//
cudaMemcpy(hb, db, N*sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i<N; ++i) {
printf("%d\n", hb[i]);
}
//
// Free up the arrays on the GPU.
//
cudaFree(da);
cudaFree(db);
return 0;
}
|
20,479 | #ifndef __CUDA_KERNELHEADER__
#define __CUDA_KERNELHEADER__
/********************************************/
/* Added codes for OpenACC2CUDA translation */
/********************************************/
#ifdef __cplusplus
#define restrict __restrict__
#endif
#define MAX(a,b) (((a) > (b)) ? (a) : (b))
#define MIN(a,b) (((a) < (b)) ? (a) : (b))
#ifndef FLT_MAX
#define FLT_MAX 3.402823466e+38
#endif
#ifndef FLT_MIN
#define FLT_MIN 1.175494351e-38
#endif
#ifndef DBL_MAX
#define DBL_MAX 1.7976931348623158e+308
#endif
#ifndef DBL_MIN
#define DBL_MIN 2.2250738585072014e-308
#endif
#endif
extern "C" __global__ void accLog_kernel_kernel0(float * in, float * out, float k_val, float n_val, int noutput_items)
{
int lwpriv__i;
lwpriv__i=(threadIdx.x+(blockIdx.x*64));
if (lwpriv__i<noutput_items)
{
out[lwpriv__i]=((n_val*log10(in[lwpriv__i]))+k_val);
}
}
struct FComplexStruct
{
float real;
float imag;
};
typedef struct FComplexStruct FComplex;
__device__ static float dev__fast_atan2f_GP0_TU0_CT0(float y, float x, float fast_atan_table[])
{
float x_abs;
float y_abs;
float z;
float alpha;
float angle;
float base_angle;
int index;
/* normalize to +- 45 degree range */
float _ret_val_0;
y_abs=fabsf(y);
x_abs=fabsf(x);
/* don't divide by zero! */
if ( ! ((y_abs>0.0F)||(x_abs>0.0F)))
{
_ret_val_0=0.0;
return _ret_val_0;
}
if (y_abs<x_abs)
{
z=(y_abs/x_abs);
}
else
{
z=(x_abs/y_abs);
}
/* when ratio approaches the table resolution, the angle is */
/* best approximated with the argument itself... */
/* (smallest non-zero value in table) */
if (z<0.003921569)
{
base_angle=z;
}
else
{
/* find index and interpolation value */
alpha=(z*((float)255));
index=(((int)alpha)&0xff);
alpha-=((float)index);
/* determine base angle based on quadrant and */
/* add or subtract table value from base angle based on quadrant */
base_angle=fast_atan_table[index];
base_angle+=((fast_atan_table[(index+1)]-fast_atan_table[index])*alpha);
}
if (x_abs>y_abs)
{
/* -45 -> 45 or 135 -> 225 */
if (x>=0.0)
{
/* -45 -> 45 */
/* 0 -> 45, angle OK */
if (y>=0.0)
{
angle=base_angle;
}
else
{
angle=( - base_angle);
}
/* -45 -> 0, angle = -angle */
}
else
{
/* 135 -> 180 or 180 -> -135 */
angle=3.141592653589793;
/* 135 -> 180, angle = 180 - angle */
if (y>=0.0)
{
angle-=base_angle;
}
else
{
angle=(base_angle-angle);
}
/* 180 -> -135, angle = angle - 180 */
}
}
else
{
/* 45 -> 135 or -135 -> -45 */
if (y>=0.0)
{
/* 45 -> 135 */
angle=1.5707963267948966;
/* 45 -> 90, angle = 90 - angle */
if (x>=0.0)
{
angle-=base_angle;
}
else
{
angle+=base_angle;
}
/* 90 -> 135, angle = 90 + angle */
}
else
{
/* -135 -> -45 */
angle=( - 1.5707963267948966);
/* -90 -> -45, angle = -90 + angle */
if (x>=0.0)
{
angle+=base_angle;
}
else
{
angle-=base_angle;
}
/* -135 -> -90, angle = -90 - angle */
}
}
return angle;
}
extern "C" __global__ void accComplexToArg_kernel_kernel0(FComplex * in, float * out, float fast_atan_table[], int noutput_items)
{
int lwpriv__i;
lwpriv__i=(threadIdx.x+(blockIdx.x*64));
if (lwpriv__i<noutput_items)
{
out[lwpriv__i]=dev__fast_atan2f_GP0_TU0_CT0(in[lwpriv__i].imag, in[lwpriv__i].real, fast_atan_table);
}
}
extern "C" __global__ void accComplexToMag_kernel_kernel0(FComplex * in, float * out, int noutput_items)
{
int lwpriv__i;
float aval;
float bval;
lwpriv__i=(threadIdx.x+(blockIdx.x*64));
if (lwpriv__i<noutput_items)
{
aval=in[lwpriv__i].imag;
bval=in[lwpriv__i].real;
out[lwpriv__i]=sqrt(((aval*aval)+(bval*bval)));
}
}
extern "C" __global__ void accComplexToMagPhase_kernel_kernel0(FComplex * in, float * out0, float * out1, float fast_atan_table[], int noutput_items)
{
int lwpriv__i;
float aval;
float bval;
lwpriv__i=(threadIdx.x+(blockIdx.x*64));
if (lwpriv__i<noutput_items)
{
aval=in[lwpriv__i].imag;
bval=in[lwpriv__i].real;
out0[lwpriv__i]=sqrt(((aval*aval)+(bval*bval)));
out1[lwpriv__i]=dev__fast_atan2f_GP0_TU0_CT0(aval, bval, fast_atan_table);
}
}
extern "C" __global__ void accComplexToMagSquared_kernel_kernel0(FComplex * in, float * out, int noutput_items)
{
int lwpriv__i;
float aval;
float bval;
lwpriv__i=(threadIdx.x+(blockIdx.x*64));
if (lwpriv__i<noutput_items)
{
aval=in[lwpriv__i].imag;
bval=in[lwpriv__i].real;
out[lwpriv__i]=((aval*aval)+(bval*bval));
}
}
extern "C" __global__ void accMagPhaseToComplex_kernel_kernel0(float * a, float * b, FComplex * c, int noutput_items)
{
int lwpriv__i;
float mag;
float phase;
float real;
float imag;
lwpriv__i=(threadIdx.x+(blockIdx.x*64));
if (lwpriv__i<noutput_items)
{
mag=a[lwpriv__i];
phase=b[lwpriv__i];
real=mag*cos(phase);
imag=mag*sin(phase);
c[lwpriv__i].real=real;
c[lwpriv__i].imag=imag;
}
}
|
20,480 | #include <fstream>
#include <iostream>
#include <string>
#include <cstring>
#include <cstdlib>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/binary_search.h>
#include <thrust/pair.h>
#define IGNORE_FIRST_N 1 //ignore the first n halo? host?
using namespace std;
string index_file = "/home/lyang/data/vl2b.00400.r200.index";
string ahf_part_file = "/home/lyang/halodata/vl_400_rhovesc.z0.000.AHF_particles";
string output_file = "vl2b.00400.r200.ahf.haloflags";
int * sorted_key_;
//get flags
void getFlag(int * particles_, char * flags_, int numParts_){
int numHalos = 0;
thrust::device_vector<int> dev_key(particles_, particles_ + numParts_);
//sorted_key_ = new int[numParts_];
sorted_key_ = particles_;
for(int i = 0; i < numParts_; i++){
flags_[i] = 0;
sorted_key_[i] = i;
}
//thrust::device_vector<int> dev_key(particles_, particles_+ numParts_);
printf("ok2.5\n");
thrust::device_vector<int> dev_val(sorted_key_, sorted_key_ + numParts_);
printf("ok3\n");
thrust::sort_by_key(dev_key.begin(), dev_key.end(), dev_val.begin());
printf("ok4\n");
thrust::copy(dev_val.begin(), dev_val.end(), sorted_key_);
ifstream haloInputFile_(ahf_part_file.c_str());
haloInputFile_ >> numHalos;
for(int i = 0; i < numHalos; i ++){
int numHaloParts;
haloInputFile_ >> numHaloParts;
for(int j = 0; j < numHaloParts; j++){
int partindex;
haloInputFile_ >> partindex;
if(i >= IGNORE_FIRST_N){
thrust::pair<thrust::device_vector<int>::iterator, thrust::device_vector<int>::iterator> ret
= thrust::equal_range(dev_key.begin(), dev_key.end(), partindex);
if(ret.first != ret.second){
int ind = (ret.first - dev_key.begin());
flags_[sorted_key_[ind]] = 1;
}
}
}
if(i % 100 == 0){
printf(".");
}
}
printf("\n");
//delete sorted_key_;
haloInputFile_.close();
}
int main(int argc, const char **argv){
int m=1;
//bool verbose = false;
int * particles_;
char * flags_;
int numParts_ = 0;
while (m<argc)
{
string arg = argv[m];
if (arg == "-index") { index_file = argv[m+1]; m+=1;}
if (arg == "-ahf") { ahf_part_file = argv[m+1]; m+=1;}
if (arg == "-output") { output_file = argv[m+1]; m+=1;}
//else if (arg == "-verbose") { verbose = true;}
else{
cout << "Usage:" << endl;
exit(0);
}
m++;
}
ifstream dataInputFile_;
dataInputFile_.open(index_file.c_str(), ios::binary);
if(!dataInputFile_.good()){
printf("Datafile error: %s !\n", index_file.c_str());
exit(1);
}
dataInputFile_.read((char*)&numParts_, sizeof(int));
cout << "Particles: " << numParts_ << endl;
particles_ = new int[numParts_];
printf("ok\n");
flags_ = new char[numParts_];
printf("ok1\n");
dataInputFile_.read((char *) particles_, sizeof(int) * numParts_);
dataInputFile_.close();
getFlag(particles_, flags_, numParts_);
//output
printf("Output the result...\n");
ofstream dataOutputStream_(output_file.c_str(), ios::binary);
dataOutputStream_.write((char *) &numParts_, sizeof(int));
dataOutputStream_.write((char *) flags_, sizeof(char) * numParts_);
dataOutputStream_.close();
printf("Finished...\n");
delete particles_;
delete flags_;
}
|
20,481 | #include "includes.h"
__global__ void diffuseProject_k(float2 *vx, float2 *vy, int dx, int dy, float dt, float visc, int lb) {
int gtidx = blockIdx.x * blockDim.x + threadIdx.x;
int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb;
int p;
float2 xterm, yterm;
// gtidx is the domain location in x for this thread
if (gtidx < dx) {
for (p = 0; p < lb; p++) {
// fi is the domain location in y for this thread
int fi = gtidy + p;
if (fi < dy) {
int fj = fi * dx + gtidx;
xterm = vx[fj];
yterm = vy[fj];
// Compute the index of the wavenumber based on the
// data order produced by a standard NN FFT.
int iix = gtidx;
int iiy = (fi>dy/2)?(fi-(dy)):fi;
// Velocity diffusion
float kk = (float)(iix * iix + iiy * iiy); // k^2
float diff = 1.f / (1.f + visc * dt * kk);
xterm.x *= diff; xterm.y *= diff;
yterm.x *= diff; yterm.y *= diff;
// Velocity projection
if (kk > 0.f) {
float rkk = 1.f / kk;
// Real portion of velocity projection
float rkp = (iix * xterm.x + iiy * yterm.x);
// Imaginary portion of velocity projection
float ikp = (iix * xterm.y + iiy * yterm.y);
xterm.x -= rkk * rkp * iix;
xterm.y -= rkk * ikp * iix;
yterm.x -= rkk * rkp * iiy;
yterm.y -= rkk * ikp * iiy;
}
vx[fj] = xterm;
vy[fj] = yterm;
}
}
}
} |
20,482 | #include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <math.h>
// to build on Titan V:
// nvcc -arch=sm_70 --ptxas-options=-v -o vanilladeriv vanilladeriv.cu;
#ifdef USE_DOUBLE
#define dfloat double
#else
#define dfloat float
#endif
#ifndef POLYNOMIAL_ORDER
#define POLYNOMIAL_ORDER 4
#endif
template <int Nq, int Np>
__global__ void volumerhs(dfloat * __restrict__ rhs,
const dfloat * __restrict__ Q,
const int nelem){
__shared__ dfloat s_F[Nq][Nq];
dfloat r_rhsR[Nq];
int e = blockIdx.x;
int j = threadIdx.y;
int i = threadIdx.x;
#pragma unroll Nq
for(int k=0;k<Nq;++k) r_rhsR[k] = 0;
#pragma unroll Nq
for(int k=0;k<Nq;++k){
__syncthreads();
int qid = i + j*Nq + k*Nq*Nq + e*Np;
s_F[i][j] = Q[qid];
__syncthreads();
#pragma unroll Nq
for(int n=0;n<Nq;++n){
r_rhsR[k] += s_F[n][j];
r_rhsR[k] += s_F[n][i];
r_rhsR[k] += s_F[j][n];
r_rhsR[k] += s_F[i][n];
}
}
#pragma unroll Nq
for(int k=0;k<Nq;++k){
int qid = i + j*Nq + k*Nq*Nq + e*Np;
rhs[qid] += r_rhsR[k];
}
}
void randArray(int N, dfloat base, dfloat range, dfloat **q, dfloat **c_q){
*q = (dfloat*) calloc(N, sizeof(dfloat));
cudaMalloc(c_q, N*sizeof(dfloat));
for(int n=0;n<N;++n){
q[0][n] = base + drand48()*range;
}
cudaMemcpy(c_q[0], q[0], N*sizeof(dfloat), cudaMemcpyHostToDevice);
}
int main(int argc, char **argv){
srand48(1234);
const int N = POLYNOMIAL_ORDER;
const int nelem = 4000;
const int Nq = N+1;
const int Np = Nq*Nq*Nq;
const int Ntotal = Np*nelem;
dfloat *Q, *c_Q;
randArray(Ntotal, 0., 1., &Q, &c_Q);
cudaMemcpy(c_Q, Q, nelem*Np*sizeof(dfloat), cudaMemcpyHostToDevice);
dfloat *rhs, *c_rhs;
srand48(1234);
randArray(Ntotal, 1., 1., &rhs, &c_rhs);
dim3 G(nelem,1,1);
dim3 B2(Nq,Nq,Nq);
dim3 B3(Nq,Nq,1);
volumerhs<Nq, Np> <<< G, B3 >>> (c_rhs, c_Q, nelem);
cudaDeviceSynchronize();
exit(0);
return 0;
}
|
20,483 | #include <math.h>
#include <iostream>
#include <time.h>
#include <sys/time.h>
#include <stdio.h>
// modifiable
typedef float ft;
const int chunks = 64;
const size_t ds = 1024*1024*chunks;
const int count = 22;
const int num_gpus = 4;
// not modifiable
const float sqrt_2PIf = 2.5066282747946493232942230134974f;
const double sqrt_2PI = 2.5066282747946493232942230134974;
__device__ float gpdf(float val, float sigma) {
return expf(-0.5f * val * val) / (sigma * sqrt_2PIf);
}
__device__ double gpdf(double val, double sigma) {
return exp(-0.5 * val * val) / (sigma * sqrt_2PI);
}
// compute average gaussian pdf value over a window around each point
__global__ void gaussian_pdf(const ft * __restrict__ x, ft * __restrict__ y, const ft mean, const ft sigma, const int n) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < n) {
ft in = x[idx] - (count / 2) * 0.01f;
ft out = 0;
for (int i = 0; i < count; i++) {
ft temp = (in - mean) / sigma;
out += gpdf(temp, sigma);
in += 0.01f;
}
y[idx] = out / count;
}
}
// error check macro
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
// host-based timing
#define USECPSEC 1000000ULL
unsigned long long dtime_usec(unsigned long long start) {
timeval tv;
gettimeofday(&tv, 0);
return ((tv.tv_sec*USECPSEC)+tv.tv_usec)-start;
}
int main() {
ft *h_x, *d_x[num_gpus], *d_y[num_gpus];
h_x = (ft *)malloc(ds * sizeof(ft));
for (int i = 0; i < num_gpus; i++) {
cudaSetDevice(i);
cudaMalloc(&d_x[i], ds * sizeof(ft));
cudaMalloc(&d_y[i], ds * sizeof(ft));
}
cudaCheckErrors("allocation error");
for (int i = 0; i < num_gpus; i++) {
for (size_t j = 0; j < ds; j++) {
h_x[j] = rand() / (ft)RAND_MAX;
}
cudaSetDevice(i);
cudaMemcpy(d_x[i], h_x, ds * sizeof(ft), cudaMemcpyHostToDevice);
}
cudaCheckErrors("copy error");
unsigned long long et1 = dtime_usec(0);
for (int i = 0; i < num_gpus; i++) {
cudaSetDevice(i);
gaussian_pdf<<<(ds+255)/256, 256>>>(d_x[i], d_y[i], 0.0, 1.0, ds);
}
cudaDeviceSynchronize();
cudaCheckErrors("execution error");
et1 = dtime_usec(et1);
std::cout << "elapsed time: " << et1/(float)USECPSEC << std::endl;
return 0;
}
|
20,484 | //xfail:BUGLE_ERROR
//--blockDim=1024 --gridDim=1 --no-inline
//error: Unsupported function pointer
typedef double(*funcType)(double);
__device__ double bar(double x) {
return sin(x);
}
__global__ void foo(double x, int i)
{
funcType f;
if (i == 0)
f = bar;
else
f = cos;
f(x);
}
|
20,485 | #include "includes.h"
__global__ void bankConflictsRead(float *outFloat, int iStride, unsigned long long *ullTime)
{
/* Static size of shared memory */
__shared__ float s_memoryA[2024];
/* Variable in register */
float r_var;
/* Start measure clock cycles */
unsigned long long startTime = clock64();
/* Access data from shared memory to register */
r_var = s_memoryA[threadIdx.x*iStride];
/* End measure clock cycles */
*ullTime = clock64() - startTime;
/* Conditionally assign register var, so it won't get optimized */
if(threadIdx.x == 0) outFloat[0] = r_var;
} |
20,486 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>
#include <sys/time.h>
#define SIZE 102400
#define MOD 102399
#define STEP 1
/* ARRAY A INITIALIZER */
void init_a(int * a)
{
int i;
for(i=0; i<SIZE; i++)
{
a[i] = 1;
}
}
/* ARRAY B INITIALIZER */
void init_b(int * b)
{
int i, j;
j=0;
for(i=0; i<SIZE-1; i++)
{
b[j] = i;
j = (j+STEP)%MOD;
}
b[SIZE-1] = SIZE-1;
}
/* CHECKING A VALUES */
int check_a(int * a)
{
int i;
int correct = 1;
for(i=0; i<SIZE; i++)
{
if(a[i] != (i+1))
{
correct = 0;
}
}
return correct;
}
/* CUDA FUNCTION */
__global__ void mykernel(int * a, int * b, int N)
{
int index = threadIdx.x;
int i;
for(i=index; i<N; i+=blockDim.x)
{
a[b[i]] = a[b[i]] + b[i];
}
}
int main(int argc, char * argv[])
{
int * a = (int *)malloc(sizeof(int)*SIZE);
int * b = (int *)malloc(sizeof(int)*SIZE);
init_a(a);
init_b(b);
/* INSERT CUDA ALLOCATION AND COPY HERE */
int * d_a, * d_b;
cudaMalloc(&d_a, sizeof(int)*SIZE);
cudaMalloc(&d_b, sizeof(int)*SIZE);
cudaMemcpy(d_a, a, sizeof(int)*SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(int)*SIZE, cudaMemcpyHostToDevice);
dim3 nBlocks;
dim3 nThperBlock;
nBlocks.x = 1;
nThperBlock.x = 1024;
struct timeval tv_start, tv_stop;
gettimeofday(&tv_start, NULL);
mykernel<<< nBlocks , nThperBlock >>>(d_a, d_b, SIZE);
cudaDeviceSynchronize();
gettimeofday(&tv_stop, NULL);
/* INSERT CUDA COPY HERE */
cudaMemcpy(a, d_a, sizeof(int)*SIZE, cudaMemcpyDeviceToHost);
int correct = check_a(a);;
if(0 == correct)
{
printf("\n\n ******************** \n ***/!\\ ERROR /!\\ *** \n ******************** \n\n");
}
else
{
printf("\n\n ******************** \n ***** SUCCESS! ***** \n ******************** \n\n");
}
int nsec = tv_stop.tv_sec - tv_start.tv_sec;
int nusec = tv_stop.tv_usec - tv_start.tv_usec;
if(nusec < 0)
{
nusec = nusec + 1000000;
nsec = nsec - 1;
}
printf("time = %d s %d us\n", nsec, nusec);
return 1;
}
|
20,487 | #include <cuda_runtime.h>
extern "C"
{
__global__ void dilation(int * src, int * dst, int p, int window_size, int n_window, int image_shape)
{
extern __shared__ int smem[];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
if (tx == 0)
{
for (int i = p - 1; i >= 0; i--)
{
if (i == p - 1)
smem[ty * p + i] = src[bx * p * n_window + ty * p + i];
else
smem[ty * p + i] = max(src[bx * p * n_window + ty * p + i], smem[ty * p + (i + 1)]);
}
}
else
{
for (int i = 0; i <= p - 1; i++)
{
if (i == 0)
smem[n_window * p + (ty * p) + i] = src[bx * p * n_window + ty * p + (i + p - 1)];
else
smem[n_window * p + (ty * p) + i] = max(src[bx * p *n_window + ty * p + (i + p - 1)],
smem[n_window * p + (ty * p) + (i - 1)]);
}
}
__syncthreads();
if (tx == 0)
{
for (int i = 0; i < p; i++)
{
// Skip first p-1 / 2 because of padding
int original_index = bx * p * n_window + ty * p + i + ((p - 1)/2);
if (original_index < image_shape)
{
dst[original_index] = max(smem[ty * p + i], smem[n_window * p + (ty * p) + i]);
}
}
}
}
}
extern "C"
{
__global__ void erosion(int * src, int * dst, int p, int window_size, int n_window, int image_shape)
{
extern __shared__ int smem[];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
if (tx == 0)
{
for (int i = p - 1; i >= 0; i--)
{
if (i == p - 1)
smem[ty * p + i] = src[bx * p * n_window + ty * p + i];
else
smem[ty * p + i] = min(src[bx * p * n_window + ty * p + i], smem[ty * p + (i + 1)]);
}
}
else
{
for (int i = 0; i <= p - 1; i++)
{
if (i == 0)
smem[n_window * p + (ty * p) + i] = src[bx * p * n_window + ty * p + (i + p - 1)];
else
smem[n_window * p + (ty * p) + i] = min(src[bx * p *n_window + ty * p + (i + p - 1)],
smem[n_window * p + (ty * p) + (i - 1)]);
}
}
__syncthreads();
if (tx == 0)
{
for (int i = 0; i < p; i++)
{
int original_index = bx * p * n_window + ty * p + i + ((p - 1)/2);
if (original_index < image_shape)
{
dst[original_index] = min(smem[ty * p + i], smem[n_window * p + (ty * p) + i]);
}
}
}
}
} |
20,488 | #include <cstdio>
#include <cstdlib>
#define cudaCheckError() { \
cudaError_t e=cudaGetLastError(); \
if(e!=cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \
exit(EXIT_FAILURE); \
} \
}
#define IDX(row, col, LDA) ((row)*(LDA)+(col))
//computes c(i,j) = a(i,j) + b(i,j)
//In this case i is the fastest changing thread dimension
__global__ void add_v1(int *a, int *b, int *c, int N, int M) {
int i=blockIdx.x*blockDim.x+threadIdx.x;
int j=blockIdx.y*blockDim.y+threadIdx.y;
if(i<N && j<M) {
int idx=IDX(i,j,M);
c[idx] = a[idx] + b[idx];
}
}
//computes c(i,j) = a(i,j) + b(i,j)
//In this case j is the fastest changing thread dimension
__global__ void add_v2(int *a, int *b, int *c, int N, int M) {
int i=blockIdx.y*blockDim.y+threadIdx.y;
int j=blockIdx.x*blockDim.x+threadIdx.x;
if(i<N && j<M) {
int idx=IDX(i,j,M);
c[idx] = a[idx] + b[idx];
}
}
int main() {
int N=2*1024;
int M=2*1024;
int *a, *b, *c;
dim3 threads(32,32);
dim3 blocks(N/threads.x,M/threads.y);
cudaMallocManaged(&a,N*M*sizeof(int));
cudaMallocManaged(&b,N*M*sizeof(int));
cudaMallocManaged(&c,N*M*sizeof(int));
add_v1<<<blocks,threads>>>(a,b,c,N,M);
add_v2<<<blocks,threads>>>(a,b,c,N,M);
cudaFree(a);
cudaFree(b);
cudaFree(c);
cudaDeviceSynchronize();
cudaCheckError();
return 0;
}
|
20,489 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <stdbool.h>
#include <unistd.h>
#include <pthread.h>
#include <cuda.h>
#define NUM_THREADs 5
#define BLOCK_SIZE 16
#define PI 3.141592654
#define MEGEXTRA 1000000
typedef struct Matrix
{
int width;
int height;
double* elements;
} Matrix;
__global__ void find1elt(double *m, double *rs, int n)
{
int rownum = blockIdx.x; // this thread will handle row # rownum
double sum = 0;
for (int k = 0; k < n; k++)
sum += m[rownum*n+k];
rs[rownum] = sum;
}
__global__ void Vector_Sub( double *dev_a , double *dev_b , double *dev_c,int n)
{
//Get the id of thread within a block
// unsigned short tid = threadIdx.x ;
int tid=blockIdx.x * blockDim.x + threadIdx.x;
if ( tid < n ) // check the boundry condition for the threads{
{
dev_c [tid] = (dev_a[tid]*1.0) - (dev_b[tid]*0.5) ;
// printf("%f %f\n",dev_a[tid],dev_b[tid]);
}
}
extern "C" void get_host(Matrix A, Matrix D,double* Host_c , int n, int m,int dev_no){
int nDevices;
cudaGetDeviceCount(&nDevices);
if(nDevices!=6){
printf("shit\n");
nDevices = 6;
}
dev_no = ((dev_no)%(nDevices));
dev_no = nDevices - dev_no -1 ;
cudaSetDevice(dev_no);
double *dm, // device matrix
*hrs, // host rowsums
*drs; // device rowsums
//int n;
double msize = n * m * sizeof(double); // size of matrix in bytes
// allocate space for host matrix
// as a test, fill matrix with consecutive integers
// allocate space for device matrix
cudaMalloc((void **)&dm,msize);
// copy host matrix to device matrix
cudaMemcpy(dm,D.elements,msize,cudaMemcpyHostToDevice);
// allocate host, device rowsum arrays
double rssize = n * sizeof(double);
hrs = (double *) malloc(rssize);
cudaMalloc((void **)&drs,rssize);
// set up parameters for threads structure
dim3 dimGrid(n,1); // n blocks
dim3 dimBlock(1,1,1); // 1 thread per block
// invoke the kernel
find1elt<<<dimGrid,dimBlock>>>(dm,drs,m);
// wait for kernel to finish
cudaThreadSynchronize();
// copy row vector from device to host
cudaMemcpy(hrs,drs,rssize,cudaMemcpyDeviceToHost);
// check results
/*
printf("Sum(A*Q.xA),2)=\n");
if (n < 100) for(int i=0; i<n; i++) printf("%f\n",hrs[i]);*/
// clean up
double *dm1, // device matrix
*hrs1, // host rowsums
*drs1; // device rowsums
//int n;
double msize1 = n * m * sizeof(double); // size of matrix in bytes
// allocate space for host matrix
// as a test, fill matrix with consecutive integers
// t = 0,i,j;
// allocate space for device matrix
cudaMalloc((void **)&dm1,msize1);
// copy host matrix to device matrix
cudaMemcpy(dm1,A.elements,msize1,cudaMemcpyHostToDevice);
// allocate host, device rowsum arrays
double rssize1 = n * sizeof(double);
hrs1 = (double *) malloc(rssize1);
cudaMalloc((void **)&drs1,rssize1);
// set up parameters for threads structure
dim3 dimGrid1(n,1); // n blocks
dim3 dimBlock1(1,1,1); // 1 thread per block
// invoke the kernel
find1elt<<<dimGrid1,dimBlock1>>>(dm1,drs1,m);
// wait for kernel to finish
cudaThreadSynchronize();
// copy row vector from device to host
cudaMemcpy(hrs1,drs1,rssize1,cudaMemcpyDeviceToHost);
// check results
/* printf("Sum(A,2)=\n");
if (n < 100) for(int i=0; i<n; i++) printf("%f\n",hrs1[i]);*/
//Device array
double* dev_c1 ;
//Allocate the memory on the GPU
cudaMalloc((void **)&dev_c1 , n*sizeof(double) ) ;
dim3 dimGrid2(n,1); // n blocks
dim3 dimBlock2(1,1,1); // 1 thread per block
//Make a call to GPU kernel
//Vector_Sub<<< dimGrid2 ,dimBlock2 >>> (dev_a1 , dev_b1 , dev_c1,n ) ;
Vector_Sub<<< dimGrid2 ,dimBlock2 >>> (drs1 , drs , dev_c1,n ) ;
//Copy back to Host array from Device array
cudaMemcpy(Host_c , dev_c1 , n*sizeof(double) , cudaMemcpyDeviceToHost);
//Display the result
//printf("The F:=\n");
/* for ( int i = 0; i<n; i++ )
printf ("%f\n", Host_c[i] ) ; */
//Free the Device array memory
cudaFree (dev_c1) ;
cudaFree(dm);
free(hrs);
cudaFree(drs);
cudaFree(dm1);
free(hrs1);
cudaFree(drs1);
return;
} |
20,490 | #include "includes.h"
__global__ void Float(float * x, int* y, size_t idxf, size_t idxi, size_t N)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
x[(idxf)*N + i] = float(y[(idxi-1)*N + i]);
return;
} |
20,491 | #include <bits/stdc++.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
using namespace std;
int main(){
int N = 1<<25, mod = 1E6;
srand(0);
vector<int> testing(N);
//thrust::host_vector<int> nums(N);
clock_t start_time = clock(), end_time;
/* Generating data testing*/
generate(testing.begin(), testing.end(), [&](){return rand()%mod; });
for(int i=0;i<N;i+=N/10) cout<<testing[i]<<' ';
cout<<endl;
thrust::host_vector<int> nums(testing.begin(), testing.end());
end_time = clock();
cout<<"=====================Generating Data Time Usage========================"<<endl<<endl;
cout<<"\t\t"<<double(end_time-start_time)/CLOCKS_PER_SEC<<" s\t\t"<<endl<<endl;
cout<<"======================================================================="<<endl;
for(int i=0, block = N/10;i<N; i+=block) cout<<nums[i]<<' ';
cout<<endl<<endl<<endl;
/* Sorting testing*/
start_time = clock();
thrust::device_vector<int> d_vec = testing;
thrust::sort(d_vec.begin(), d_vec.end());
end_time = clock();
cout<<"===========================Sorting Time Usage=========================="<<endl<<endl;
cout<<"\t\t"<<double(end_time-start_time)/CLOCKS_PER_SEC<<" s\t\t"<<endl<<endl;
thrust::copy(d_vec.begin(), d_vec.end(), testing.begin());
cout<<"======================================================================="<<endl;
for(int i=0, block = N/10;i<N; i+=block) cout<<testing[i]<<' ';
cout<<endl<<endl<<endl;
return 0;
}
|
20,492 | /*
FLUIDS v.3 - SPH Fluid Simulator for CPU and GPU
Copyright (C) 2012. Rama Hoetzlein, http://fluids3.com
Fluids-ZLib license (* see part 1 below)
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. Acknowledgement of the
original author is required if you publish this in a paper, or use it
in a product. (See fluids3.com for details)
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
|
20,493 | #include<iostream>
#include<stdio.h>
__global__ void kern(void){
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
// printf("Dim %d %d \n", blockDim.x, blockDim.y);
printf("%d %d %d\n", x,y, (x + 8*y));
// __syncthreads();
printf("Id%d %d %d\n", blockIdx.x, blockIdx.y, (x + 8*y));
// printf("%d \n", x + 8*y);
}
int main(){
dim3 gridDim(2,2);
dim3 blockDim(4,4);
kern<<<gridDim, blockDim>>>();
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
printf("Sync kernel error: %s\n", cudaGetErrorString(errSync));
if (errAsync != cudaSuccess)
printf("Async kernel error: %s\n", cudaGetErrorString(errAsync));
return 0;
}
|
20,494 | #include "includes.h"
__global__ void totalSequential(float *input, float *output, int len) {
//@@ Compute reduction for a segment of the input vector
int tid = threadIdx.x, i = blockIdx.x * blockDim.x;
if(tid == 0) {
int sum = 0;
for(unsigned int j = 0; j <blockDim.x; j++)
{
sum += input[i + j];
}
output[blockIdx.x] = sum;
}
} |
20,495 | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<time.h>
#define N 10000000 //job size = 1K, 10K, 100K, 1M and 10M
#define M 128 //Threads per block =128
#define R 16 //radius = 2,4,8,16
// CUDA API error checking macro
static void handleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define cudaCheck( err ) (handleError( err, __FILE__, __LINE__ ))
__global__ void stencil_1d(int *in, int *out)
{
__shared__ int temp[M + 2 * R];
int gindex = threadIdx.x + blockIdx.x * blockDim.x;
int lindex = threadIdx.x + R;
temp[lindex] = in[gindex]; // Read input elements into shared memory
if (threadIdx.x < R)
{
temp[lindex - R] = in[gindex - R];
temp[lindex + M] = in[gindex + M];
}
// Synchronize (ensure all the data is available)
__syncthreads();
int result = 0;
// Apply the stencil
for (int offset = -R ; offset <= R ; offset++)
{
result += temp[lindex + offset];
}
// Store the result
out[gindex] = result;
}
int main()
{
unsigned int i;
int h_in[N + 2 * R], h_out[N];
int *d_in, *d_out;
//time start and stop
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
for( i = 0; i < (N + 2*R); ++i )
h_in[i] = 1;
// Allocate device memory
cudaCheck( cudaMalloc( &d_in, (N + 2*R) * sizeof(int)) );
cudaCheck( cudaMalloc( &d_out, N * sizeof(int)) );
//copy fro CPU to GPU memory
cudaCheck( cudaMemcpy( d_in, h_in, (N + 2*R) * sizeof(int), cudaMemcpyHostToDevice) );
cudaEventRecord( start, 0 );
//Call stencil kernel
stencil_1d<<< (N + M - 1)/M, M >>> (d_in, d_out);
cudaEventRecord( stop, 0 );
cudaEventSynchronize(stop);
cudaEventElapsedTime( &time, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
printf("GPU Execution Time = %f\n",time);
//copy from device to host
cudaCheck( cudaMemcpy( h_out, d_out, N * sizeof(int), cudaMemcpyDeviceToHost) );
// Cleanup
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
20,496 | #include <float.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
#define BLOCK_SIZE 16
typedef struct {
int width;
int height;
float* elements;
} Matrix;
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < A.height && col < B.width)
{
for (int e = 0; e < A.width; e++)
Cvalue += (A.elements[row * A.width + e]) * (B.elements[e * B.width + col]);
C.elements[row * C.width + col] = Cvalue;
}
}
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = B.width;
d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = C.width;
d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((B.width + dimBlock.x - 1) / dimBlock.x, (A.height + dimBlock.y - 1) / dimBlock.y);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
cudaThreadSynchronize();
// Read C from device memory
cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
void startSeed()
{
srand(time(NULL));
int seed = rand();
srand(seed);
}
void draw_random(Matrix mat) {
for (int i = 0; i < mat.height*mat.width; i++)
{
mat.elements[i] = (float) (rand() % 10);
}
}
void disp_img(Matrix mat) {
for (int i = 0; i < mat.height; i++)
{
for (int j = 0; j < mat.width; j++)
{
printf("%5.0f", mat.elements[i*mat.width + j]);
}
printf("\n");
}
printf("\n");
}
Matrix createMatrix(int height, int width)
{
Matrix mat;
mat.width = width;
mat.height = height;
mat.elements = (float*) malloc(mat.width*mat.height*sizeof(float));
for(int i = 0; i < mat.height; i++)
for(int j = 0; j < mat.width; j++)
mat.elements[i*mat.width + j] = 0;
return mat;
}
int main(int argc, char* argv[])
{
Matrix A;
Matrix B;
Matrix C;
int a = 4;
int b = 4;
int c = 8;
A = createMatrix(a, b);
B = createMatrix(b, c);
C = createMatrix(A.height, B.width);
startSeed();
draw_random(A);
draw_random(B);
printf(" Matriz A \n");
disp_img(A);
printf(" Matriz B \n");
disp_img(B);
MatMul(A, B, C);
printf(" Matriz C \n");
disp_img(C);
} |
20,497 | #include "cuda_runtime.h"
#include <math.h>
void SimpleSummator(double* a, double* b, double* c, int length){
for (int i = 0; i < length; i++){
c[i] = sinf(a[i]) + sinf(b[i]);
}
}
__global__ void CUDASummator(double* a, double* b, double* c){
int i = threadIdx.x + blockIdx.x * blockDim.x;
c[i] = sinf(a[i]) + sinf(b[i]);
}
__global__ void CUDASinusator(double* a, double* result){
int i = threadIdx.x + blockIdx.x * blockDim.x;
result[i] = sinf(a[i]);
}
__global__ void RangeSummator(double* a, double* b, double* c, int bottomB){
int i = threadIdx.x + blockIdx.x * blockDim.x;
c[i] = sinf(a[i]);
}
int GPU_Sinusator(double* a, double* result, int length){
double* dev_a;
double* dev_result;
cudaMalloc((void**)&dev_a, length * sizeof(double));
cudaMalloc((void**)&dev_result, length * sizeof(double));
cudaMemcpy(dev_a, a, length * sizeof(double), cudaMemcpyHostToDevice);
CUDASinusator<<<100,length/100>>>(dev_a,dev_result);
cudaMemcpy(result,dev_result,length * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_result);
return 9999;
}
int count = 100;
int GPU_Summator(double* a, double* b, double* c, int length, int type){
if (type == 1) {
SimpleSummator(a,b,c,length);
return 11111;
}
if (type == 2) {
double* dev_a;
double* dev_b;
double* dev_c;
float elapsed=0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMalloc((void**)&dev_a, length * sizeof(double));
cudaMalloc((void**)&dev_b, length * sizeof(double));
cudaMalloc((void**)&dev_c, length * sizeof(double));
cudaMemcpy(dev_a, a, length * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, length * sizeof(double), cudaMemcpyHostToDevice);
CUDASummator<<<count,length/count>>>(dev_a,dev_b,dev_c);
cudaMemcpy(c,dev_c,length * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaEventRecord(stop, 0);
cudaEventSynchronize (stop);
cudaEventElapsedTime(&elapsed, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return (int)(elapsed*1000);
}
int repeatC = 4;
float el;
if (type == 3){
float bestT = 10000.0;
float bestC = 10;
for (int i = 10; i < 1000; i+=10){
el = 0.0;
for (int r = 0; r < repeatC; r++){
double* dev_a;
double* dev_b;
double* dev_c;
float elapsed=0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMalloc((void**)&dev_a, length * sizeof(double));
cudaMalloc((void**)&dev_b, length * sizeof(double));
cudaMalloc((void**)&dev_c, length * sizeof(double));
cudaMemcpy(dev_a, a, length * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, length * sizeof(double), cudaMemcpyHostToDevice);
CUDASummator<<<i,length/i>>>(dev_a,dev_b,dev_c);
cudaMemcpy(c,dev_c,length * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaEventRecord(stop, 0);
cudaEventSynchronize (stop);
cudaEventElapsedTime(&elapsed, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
el += elapsed;
}
if (el/repeatC < bestT) {
bestT = el/repeatC;
bestC = i;
}
}
count = bestC;
return bestC;
}
return 99999;
}; |
20,498 | #include <sys/time.h>
#include <cuda.h>
#include <stdio.h>
#define HANDLE_ERROR( err ) ( HandleError( err, __FILE__, __LINE__ ) )
static void HandleError( cudaError_t err, const char *file, int line )
{
if (err != cudaSuccess)
{
printf( "Error: %s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
// time stamp function in seconds
double getTimeStamp() {
struct timeval tv ;
gettimeofday( &tv, NULL ) ;
return (double) tv.tv_usec/1000000 + tv.tv_sec ;
}
// host side matrix addition
void h_addmat(float *A, float *B, float *C, int nx, int ny){
for (int i =0;i<nx;i++){
for(int j=0;j<ny;j++){
C[i*ny+j] = A[i*ny+j]+B[i*ny+j];
}
}
return;
}
// device-side matrix addition
__global__ void f_addmat( float *A, float *B, float *C, int nx, int ny ){
int ix = threadIdx.x + blockIdx.x*(blockDim.x) ;
int iy = threadIdx.y + blockIdx.y*(blockDim.y) ;
//printf("In add\n");
if( (ix<nx/2) && (iy<ny/2) ){
int idx = (iy*nx/2 + ix)*4;
C[idx] = A[idx] + B[idx] ;
C[idx+1] = A[idx+1] + B[idx+1] ;
C[idx+2] = A[idx+2] + B[idx+2] ;
C[idx+3] = A[idx+3] + B[idx+3] ;
//printf("Thread %d %d\n",ix,iy);
}
}
void initData(float *M, int x, int y, int flag ){
if(flag)
{
//printf("A\n");
for (int i=0;i<x;i++){
for (int j=0;j<y;j++){
M[i*y+j] = (float)(i+j)/3.0;
}
}
}
else
{
//printf("B\n");
for (int i=0;i<x;i++){
for (int j=0;j<y;j++){
M[i*y+j] = (float)3.14*(i+j) ;
}
}
}
}
int main( int argc, char *argv[] ) {
if (argc!=3){
printf("Error: Invalid number of arguments.\n");
exit(1);
}
int nx = atoi( argv[1] ) ; // should check validity
int ny = atoi( argv[2] ) ; // should check validity
if(nx <=0 || ny <=0){
printf("Error: Dimension lessThanOrEqualto Zero.\n");
exit(1);
}
if(ny>nx)
{
nx=nx^ny;
ny=nx^ny;
nx=nx^ny;
}
int noElems = (nx)*(ny) ;
int bytes = noElems * sizeof(float) ;
// GPU and CPU memory Allocations
float *d_A, *d_B, *d_C ;
HANDLE_ERROR(cudaMalloc( (float **) &d_A, bytes )) ;
HANDLE_ERROR(cudaMalloc( (float **) &d_B, bytes )) ;
HANDLE_ERROR(cudaMalloc( (float **) &d_C, bytes )) ;
float *h_hC = (float *) malloc( bytes ) ; // host result
float *h_Ap, *h_Bp, *h_dCp;
HANDLE_ERROR(cudaMallocHost( (float **) &h_Ap, bytes )) ;
HANDLE_ERROR(cudaMallocHost( (float **) &h_Bp, bytes )) ;
HANDLE_ERROR(cudaMallocHost( (float **) &h_dCp, bytes )) ;
// init matrices with random data
initData(h_Ap,nx,ny,1);
initData(h_Bp,nx,ny,0);
double timeStampA = getTimeStamp() ;
//transfer data to dev
HANDLE_ERROR (cudaMemcpy( d_A, h_Ap, bytes, cudaMemcpyHostToDevice )) ;
HANDLE_ERROR (cudaMemcpy( d_B, h_Bp, bytes, cudaMemcpyHostToDevice )) ;
double timeStampB = getTimeStamp() ;
// invoke Kernel
dim3 block( 1024, 1) ; // you will want to configure this
dim3 grid( (nx+block.x-1)/block.x, (ny+block.y-1)/block.y) ;
//printf("reached add %d %d %d %d %lu %d %d \n",(nx+block.x-1)/block.x, (ny+block.y-1)/block.y, nx, ny, sizeof(float), noElems, bytes);
f_addmat<<<grid, block>>>( d_A, d_B, d_C, nx, ny ) ;
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
HANDLE_ERROR(cudaDeviceSynchronize()) ;
double timeStampC = getTimeStamp() ;
//copy data back
HANDLE_ERROR(cudaMemcpy(h_dCp, d_C, bytes, cudaMemcpyDeviceToHost));
double timeStampD = getTimeStamp() ;
// free GPU resources
cudaFree( d_A ) ; cudaFree( d_B ) ; cudaFree( d_C ) ;
// CPU Matrix add
h_addmat( h_Ap, h_Bp, h_hC, nx, ny ) ;
// Check results
int flag = 0;
for(int i=0;i<(nx);i++){
for(int j=0;j<(ny);j++){
if(h_hC[i*(ny)+j] != h_dCp[i*(ny)+j])
flag++;
}
}
if (flag == 0){
printf("%.6f %.6f %.6f %.6f\n",(timeStampD-timeStampA),(timeStampB-timeStampA),(timeStampC-timeStampB),(timeStampD-timeStampC));
}
else printf("host result is not the same as the device result!");
//free other resourses
cudaFreeHost(h_Ap); cudaFreeHost(h_Bp); cudaFreeHost(h_dCp);
free(h_hC);
cudaDeviceReset() ;
}
|
20,499 | #include <stdio.h>
int main()
{
int devCount;
cudaGetDeviceCount(&devCount);
printf("device count: %d\n", devCount);
for (int i = 0; i < devCount; ++i) {
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
printf("ver: %d.%d\n", devProp.major, devProp.minor);
printf("max threads per block: %d\n", (int)devProp.maxThreadsPerBlock);
printf("number of SMs: %d\n", (int)devProp.multiProcessorCount);
printf("warp size: %d\n", (int)devProp.warpSize);
printf("max warps per SM: %d\n", devProp.maxThreadsPerMultiProcessor / devProp.warpSize);
printf("registers available per SM: %d\n", (int)devProp.regsPerBlock);
printf("shared memory available per SM: %d\n", (int)devProp.sharedMemPerBlock);
printf("clock frequency: %d\n", (int)devProp.clockRate);
printf("total const memory: %d\n", (int)devProp.totalConstMem);
printf("max threads dimBlock.x: %d\n", (int)devProp.maxThreadsDim[0]);
printf("max threads dimBlock.y: %d\n", (int)devProp.maxThreadsDim[1]);
printf("max threads dimBlock.z: %d\n", (int)devProp.maxThreadsDim[2]);
printf("max blocks dimGrid.x: %d\n", (int)devProp.maxGridSize[0]);
printf("max blocks dimGrid.y: %d\n", (int)devProp.maxGridSize[1]);
printf("max blocks dimGrid.z: %d\n", (int)devProp.maxGridSize[2]);
printf("memory clock rate: %d\n", (int)devProp.memoryClockRate);
printf("memory bus width: %d\n", (int)devProp.memoryBusWidth);
printf("memory pitch: %lld\n", devProp.memoryBusWidth);
printf("\n");
}
}
|
20,500 | #include <iostream>
#include <cmath>
#include <algorithm>
#include <fstream>
#define N 1000
#define nrange 20
#define bkgd 3
#define CL 0.9
__global__ void kernel(double*, int*, double*);
__device__ double poissonP(double, double);
__device__ double factorial(double n);
__global__ void kernel(double* mu, int* n, double* P) {
int thId = threadIdx.x;
int blId = blockIdx.x;
int atId = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ double cacheR[nrange];
__shared__ double cacheP[nrange];
__shared__ int cacheI[nrange];
cacheR[thId] = poissonP(mu[blId], thId)/poissonP(max(0, thId - bkgd), thId);
cacheP[thId] = poissonP(mu[blId], thId);
cacheI[thId] = thId;
__syncthreads();
if (thId == 0) {
for (int i = 0; i < nrange; i++) {
double rpRValTemp = cacheR[i];
double rpPValTemp = cacheP[i];
int rpIValTemp = cacheI[i];
double maxValTemp = cacheR[i];
int maxIdxTemp = i;
for (int j = i + 1; j < nrange; j++) {
if (cacheR[j] > maxValTemp) {
maxValTemp = cacheR[j];
maxIdxTemp = j;
}
}
cacheR[i] = cacheR[maxIdxTemp];
cacheP[i] = cacheP[maxIdxTemp];
cacheI[i] = cacheI[maxIdxTemp];
cacheR[maxIdxTemp] = rpRValTemp;
cacheP[maxIdxTemp] = rpPValTemp;
cacheI[maxIdxTemp] = rpIValTemp;
}
}
__syncthreads();
n[atId] = cacheI[thId];
double sumPTemp = 0.;
for (int i = 0; i < thId; i++) {
sumPTemp += cacheP[i];
}
if (sumPTemp <= CL) {
P[atId] = 1;
} else {
P[atId] = 0;
}
}
__device__ double poissonP(double mu, double n) {
return pow(mu + 3., n)*exp(-(mu + 3.))/factorial(n);
}
__device__ double factorial(double n) {
double fn = 1.;
if (n == 0) {
return 1.;
} else {
for (int i = 1; i < n + 1; i++) {
fn *= (double)i;
}
}
return fn;
}
int main() {
double* mu = new double[N];
double* P = new double[N*nrange];
int* n = new int[N*nrange];
double* dev_mu;
double* dev_P;
int* dev_n;
cudaMalloc((void**)&dev_mu, N*sizeof(double));
cudaMalloc((void**)&dev_P, N*nrange*sizeof(double));
cudaMalloc((void**)&dev_n, N*nrange*sizeof(int));
double muMax = 10;
double muMin = 0;
double step = (muMax - muMin)/N;
for (int i = 0; i < N; i++) {
mu[i] = muMin + (double)i * step;
}
cudaMemcpy(dev_mu, mu, N*sizeof(double), cudaMemcpyHostToDevice);
kernel<<<N,nrange>>>(dev_mu, dev_n, dev_P);
cudaMemcpy(P, dev_P, N*nrange*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(n, dev_n, N*nrange*sizeof(int), cudaMemcpyDeviceToHost);
std::ofstream ofs;
ofs.open ("ulUL.dat", std::ofstream::out | std::ofstream::app);
for (int i = 0; i < N; i++) {
ofs << mu[i];
for (int j = 0; j < nrange; j++) {
ofs << "," << n[j + i*nrange] << "," << P[j + i * nrange];
}
ofs << std::endl;
}
ofs.close();
cudaFree(dev_mu);
cudaFree(dev_n);
cudaFree(dev_P);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.