serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
3,601 | #include <assert.h>
#include <pthread.h>
#include <stdio.h>
#define THREADS 4
int intervalsT = 100000000;
double store, base;
double partialStore[] = {0.0, 0.0, 0.0, 0.0};
void* threadRoutine(void* param) {
int i;
int* threadId = (int*)param;
int partialInterval = intervalsT / THREADS;
double height;
double x;
for (i = (*threadId) * partialInterval, partialStore[*threadId] = 0.0;
i < (partialInterval * (*threadId + 1)); i++) {
x = i * base;
height = 4 / (1 + x * x);
partialStore[*threadId] += base * height;
}
return 0;
}
void calculatePIHostMultiple() {
int i;
pthread_t threads[THREADS];
int threadId[THREADS];
for (i = 0; i < THREADS; i++)
threadId[i] = i;
base = (double)(1.0 / intervalsT);
for (i = 0; i < THREADS; i++)
pthread_create(&threads[i], NULL, threadRoutine, &threadId[i]);
for (i = 0; i < THREADS; i++)
pthread_join(threads[i], NULL);
store = 0.0;
for (i = 0; i < THREADS; i++)
store += partialStore[i];
printf("PI (multiple th) =%f\n", store);
}
void calculatePIHostSingle() {
int i;
double height, x;
double store, base;
int intervals = 100000000;
base = (double)(1.0 / intervals);
for (i = 0, store = 0.0, x = 0.0; i < intervals; i++) {
x = i * base;
height = 4 / (1 + x * x);
store += base * height;
}
printf("PI (single th) =%f \n", store);
}
int main() {
calculatePIHostSingle();
calculatePIHostMultiple();
}
|
3,602 | #include <cuda.h>
#include <cmath>
#include <cstdio>
#include <iostream>
#include <chrono>
/*1-20*/
#define BLOCK_WIDTH 2
#define BLOCK_SIZE 4
using namespace std;
/*
//BlockTranspose
__global__
void BlockTranspose(float *A_elements, int A_width, int A_height) {
__shared__ float blockA[BLOCK_WIDTH][BLOCK_WIDTH];
int baseIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
baseIdx += (blockIdx.y * BLOCK_SIZE + threadIdx.y) * A_width;
blockA[threadIdx.y][threadIdx.x] = A_elements[baseIdx];
A_elements[baseIdx] = blockA[threadIdx.x][threadIdx.y];
}
*/
/*BlockTranspose_Kernel*/
__global__
void BlockTranspose_Kernel(float *A_elements, int A_width, int A_height) {
__shared__ float blockA[BLOCK_WIDTH][BLOCK_WIDTH];
int baseIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
baseIdx += (blockIdx.y * BLOCK_SIZE + threadIdx.y) * A_width;
blockA[threadIdx.y][threadIdx.x] = A_elements[baseIdx];
__syncthreads();
A_elements[baseIdx] = blockA[threadIdx.x][threadIdx.y];
}
/*BlockTranspose_GPU*/
void BlockTranspose_GPU(float* h_A, int A_width, int A_height) {
int size = A_width * A_height * sizeof(float);
float *d_A;
cudaMalloc(&d_A, size);
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
dim3 blockDim(BLOCK_WIDTH, BLOCK_WIDTH);
dim3 gridDim(A_width / blockDim.x, A_height / blockDim.y);
BlockTranspose_Kernel <<< gridDim, blockDim >>> (h_A, A_width, A_height);
cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost);
cudaFree(d_A);
}
int main() {
//Host
float *h_A;
int A_width = 8;
int A_height = 8;
h_A = (float*)malloc(A_width*A_height * sizeof(float));
//Create
for (int i = 0; i < A_width*A_height; i++) {
h_A[i] = i + 1.0f;
}
//Print BlockTranspose
for (int i = 0; i < A_height; i++) {
for (int j = 0; j < A_width; j++) {
cout << h_A[i*A_width + j] << " ";
}
cout << endl;
}
cout << endl;
//BlockTranspose (Main)
chrono::time_point<chrono::system_clock> BlockTranspose_GPU_Start, BlockTranspose_GPU_End;
BlockTranspose_GPU_Start = chrono::system_clock::now();
BlockTranspose_GPU(h_A, A_width, A_height);
BlockTranspose_GPU_End = chrono::system_clock::now();
cout << "BlockTranspose_GPU: " << chrono::duration_cast<chrono::nanoseconds>(BlockTranspose_GPU_End - BlockTranspose_GPU_Start).count() << "ns." << endl;
//Print BlockTranspose
for (int i = 0; i < A_height; i++) {
for (int j = 0; j < A_width; j++) {
cout << h_A[i*A_width + j] << " ";
}
cout << endl;
}
cout << endl;
//Free
free(h_A);
return 0;
}
|
3,603 | #include "includes.h"
__global__ void add(int N, double *a,double *b)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if(tid < N)
{
b[tid] = a[tid]*a[tid];
}
} |
3,604 | #include <stdio.h>
// indica que é uma funo que vai rodar no device
__global__ void hello()
{
printf("Oi mundo! De thread: %d De: bloco %d\n", threadIdx.x, blockIdx.x);
}
int main(void)
{
int num_threads = 5;
int num_blocks = 5;
//chama a funo e especfica blocos e threads
hello<<<num_blocks,num_threads>>>();
//espera o cdigo da gpu terminar
cudaDeviceSynchronize();
return 0;
}
|
3,605 | #include<stdio.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
__global__ void add(float *a,float *b){
int id = blockIdx.x*blockDim.x+threadIdx.x;
b[id] = sinf(a[id]);
}
int main(){
int n;
float a[10],b[10];
printf("Enter n:");
scanf("%d",&n);
printf("Enter A:\n");
for(int i=0;i<n;i++)
scanf("%f",&a[i]);
float *d_a,*d_b;
int size = sizeof(float)*n;
cudaMalloc((void**)&d_a,size);
cudaMalloc((void**)&d_b,size);
cudaMemcpy(d_a,&a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,&b,size,cudaMemcpyHostToDevice);
add<<<n,1>>>(d_a,d_b);
cudaMemcpy(&b,d_b,size,cudaMemcpyDeviceToHost);
for(int i=0;i<n;i++)
printf("%f ",b[i]);
printf("\n");
cudaFree(d_a);
cudaFree(d_b);
} |
3,606 | #include <iostream>
#include <assert.h>
#include <limits.h>
#include <vector>
#include <curand.h>
#include <curand_kernel.h>
#include <algorithm>
using namespace std;
// #define RNG_DEF int& rx
// #define RNG_ARGS rx
// #define MY_RAND_MAX ((1U << 31) - 1)
// Command line arguments that get set below (these give default values)
float sdscale = 1.0; // scale the SDs by this
int N = 1<<20;
int steps = 1000;
int outer = 100;
int thin = 1; // how many outer blocks to skip?
int seed = -1;
int burn = 0;
int QUIET = 0;
int WHICH_GPU = 0;
int FIRST_HALF_DATA = 0;
int EVEN_HALF_DATA = 0;
int SHOW_CONSTANTS = 0;
string in_file_path = "data.txt";
string out_path = "out/";
const unsigned int nBlocks = 10 ;
const unsigned int BLOCK_SIZE = 64;
// const unsigned int HARDARE_MAX_X_BLOCKS = 4096;
// const unsigned int HARDWARE_MAX_THREADS_PER_BLOCK = 1024;
// __device__ __host__ int cuda_rand(RNG_DEF) {
// //http://rosettacode.org/wiki/Linear_congruential_generator#C
// return rx = (rx * 1103515245 + 12345) & MY_RAND_MAX;
// }
// __device__ int random_int(int n) {
// // number in [0,(n-1)]
// int divisor = MY_RAND_MAX/(n+1);
// int retval;
//
// do {
// retval = cuda_rand(RNG_ARGS) / divisor;
// } while (retval >= n);
//
// return retval;
// }
__device__ float density_function(float beta, float cost) {
// printf("%f-%f\n", beta, cost);
return exp2f(-beta * cost);
}
__device__ float get_randomNum(unsigned int seed, int maxLimit) {
/* CUDA's random number library uses curandState_t to keep track of the seed value
we will store a random state for every thread */
curandState_t state;
/* we have to initialize the state */
curand_init(seed, /* the seed controls the sequence of random values that are produced */
0, /* the sequence number is only important with multiple cores */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&state);
/* curand works like rand - except that it takes a state as a parameter */
return curand(&state) % maxLimit;
// int res = curand(&state) % maxLimit;
// printf("%d ", res);
// return res;
}
void setUpDevices(){
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
if(WHICH_GPU <= deviceCount) {
cudaError_t err = cudaSetDevice(WHICH_GPU);
if(err != cudaSuccess)
cout<< "CUDA error:" <<cudaGetErrorString(err)<<endl;
}
else {
cout << "Invalid GPU device " << WHICH_GPU << endl;
exit(-1);
}
int wgpu;
cudaGetDevice(&wgpu);
cudaDeviceReset();
}
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i+=stride)
y[i] = x[i] + y[i];
}
void debug_add(){
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
int numOfBlocks = (N+BLOCK_SIZE-1)/BLOCK_SIZE;
// Run kernel on 1M elements on the GPU
add<<<numOfBlocks, BLOCK_SIZE>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
}
__device__ float cost_function(float * data, int length){
//dummy cost, just sum up all
float res = 0;
for(int i=0; i<length; i++)
res += data[i];
// printf("res: %f\n", res);
return res/1000;
}
__device__
void changeTemparature(float * temparature, unsigned int seed){
int t1 = get_randomNum(seed, nBlocks);
int t2=t1;
while(t2 == t1)
t2 = get_randomNum(seed + 100, nBlocks);
float tmp = temparature[t1];
temparature[t1] = temparature[t2];
temparature[t2] = tmp;
}
__device__
void ActualHW(int randTimes, int numofObjs, unsigned int seed, int* pickedIdAddr, float*sArray, float * cost, float *temparature){
// bool hit = false;
int index = blockIdx.x*blockDim.x + threadIdx.x;
for(int t=0; t<randTimes; t++){
if(pickedIdAddr[t] == threadIdx.x){
if(t % 10 == 0)
changeTemparature(temparature, seed+index);
float cost_pri = cost_function(sArray, numofObjs);
float p0 = density_function(temparature[blockIdx.x], cost_pri);
float tmpKeep = sArray[threadIdx.x];
sArray[threadIdx.x] = get_randomNum(seed+index, 1000);
float cost_post = cost_function(sArray, numofObjs);
float p = density_function(temparature[blockIdx.x], cost_post);
float alpha = min(1.0f, p/p0);
// printf("p/p0: %f\n", p/p0);
float t =0.8f;
//change back
if(alpha>t)
sArray[threadIdx.x] = tmpKeep;
else{
if(sArray[threadIdx.x]>tmpKeep)
printf("%f - %f\n", tmpKeep, sArray[threadIdx.x]);
cost[blockIdx.x] = cost_post;
}
// hit = true;
}
}
// return hit;
}
__global__
void simpleHW(int numofObjs, float * gValues, float* gArray,unsigned int seed,int*pickedIdxs, int randTimes){
//here should be dynamic shared mem
//__shared__ float sArray[30];
extern __shared__ float sharedMem[];
float * sArray = sharedMem;
float * lastSumUp = (float *) & sArray[nBlocks*numofObjs];
float * temparature = (float *) & lastSumUp[nBlocks];
//initialize
int startIdx = blockIdx.x * numofObjs;
int idx = startIdx+ threadIdx.x;
sArray[idx] = gValues[threadIdx.x];
temparature[blockIdx.x] = -get_randomNum(seed+blockIdx.x, 100) / 10;
// printf("temp: %f", temparature[blockIdx.x]);
lastSumUp[blockIdx.x] = 0;
for(int i = 0;i<numofObjs; i++)
lastSumUp[blockIdx.x] += gValues[i];
int* pickedIdAddr = &pickedIdxs[blockIdx.x * randTimes];
ActualHW(randTimes, numofObjs, seed, pickedIdAddr, &sArray[startIdx], lastSumUp, temparature);
__syncthreads();
gArray[idx] = sArray[idx];
}
void simpleStructure(){
float *gValues;
float * gArray;
int * pickedIdxs;
int numofObjs = 5;
int nTimes =20000;
int totalSize = nBlocks*numofObjs* sizeof(float);
cudaMallocManaged(&gValues, numofObjs * sizeof(float));
for(int i=0; i<numofObjs; i++)
gValues[i] = 1000;
cudaMallocManaged(&pickedIdxs, nBlocks*nTimes * sizeof(int));
for(int i=0; i<nBlocks*nTimes; i++)
pickedIdxs[i] = rand()%numofObjs;
// for(int i=0; i<nBlocks*nTimes; i++)
// cout<<pickedIdxs[i]<<" ";
// cout<<endl;
cudaMallocManaged(&gArray, totalSize);
//dynamic shared mem, <<<nb, nt, sm>>>
simpleHW<<<nBlocks, numofObjs, totalSize + 2*nBlocks*sizeof(float)>>>(numofObjs, gValues, gArray,time(NULL),pickedIdxs,nTimes);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
for(int i=0;i<nBlocks;i++){
for(int j=0; j<numofObjs; j++)
cout<<gArray[i * numofObjs+ j]<<" ";
cout<<endl;
}
// Free memory
cudaFree(gValues);
cudaFree(gArray);
cudaFree(pickedIdxs);
}
int main(int argc, char** argv){
setUpDevices();
// debug_add();
//setup blockSize
// int N_BLOCKS = N/BLOCK_SIZE + (N%BLOCK_SIZE == 0? 0:1);
// assert(N_BLOCKS < HARDARE_MAX_X_BLOCKS); // can have at most this many blocks
// assert(N/N_BLOCKS <= HARDWARE_MAX_THREADS_PER_BLOCK); // MUST HAVE LESS THREADS PER BLOCK!!
// setup the output files??
//Make RNG replicable
if(seed == -1)
seed = time(NULL);
srand(seed);
simpleStructure();
// read data and se
return 0;
}
|
3,607 | #include <stdio.h>
#include <stdlib.h>
#define KNZ_LEN 20
#define DIM_COUNT 3
#define DIM_SIZE 10000
#define FACT_SIZE 250000
// Datenstruktur
typedef struct _dim {
long id;
char knz[KNZ_LEN];
} DimTable;
typedef struct _factIn {
char knz[DIM_COUNT][KNZ_LEN];
} FactTableIn;
typedef struct _factOut {
long id[DIM_COUNT];
} FactTableOut;
// Funktion
__device__ int nvstrncmp(const char* s1, const char* s2, size_t n)
{
while(n--) {
if(*s1++!=*s2++) {
return *(unsigned char*)(s1 - 1) - *(unsigned char*)(s2 - 1);
}
}
return 0;
}
__global__ void join(DimTable *dim, int dim_len,
FactTableIn *in, FactTableOut *out, int fact_len,
int idx)
{
int t = threadIdx.x;
int max = blockDim.x;
int len = (fact_len / max);
int start = len * t;
for(int i = start; i < len; i++) {
for(int j = 0; j < dim_len; j++) {
if(nvstrncmp(dim[j].knz, in[i].knz[idx], KNZ_LEN) == 0) {
out[i].id[idx] = dim[j].id;
break;
}
}
}
}
int main(int argc, char *argv[]) {
// Dimensionsdaten vorbereiten (jede genau DIM_SIZE Einträge)
// (das ist zwar unrealistisch hier aber praktisch)
DimTable **dims = (DimTable**)calloc(sizeof(DimTable*), DIM_COUNT);
for(int i = 0; i < DIM_COUNT; i++) {
dims[i] = (DimTable*)calloc(sizeof(DimTable), DIM_SIZE);
for(int j = 0; j < DIM_SIZE; j++) {
dims[i][j].id = j;
sprintf(dims[i][j].knz, "KNZ%d-%d", i, j);
}
}
// Faktendaten vorbereiten
FactTableIn *factIn = (FactTableIn*)calloc(sizeof(FactTableIn), FACT_SIZE);
FactTableOut *factOut = (FactTableOut*)calloc(sizeof(FactTableOut), FACT_SIZE);
for(int i = 0; i < DIM_COUNT; i++) {
for(int j = 0; j < FACT_SIZE; j++) {
sprintf(factIn[j].knz[i], "KNZ%d-%d", i % DIM_COUNT, j % DIM_SIZE);
}
}
// Daten kopieren
DimTable *dev_dim;
FactTableIn *dev_factIn;
FactTableOut *dev_factOut;
cudaMalloc(&dev_dim, sizeof(DimTable) * DIM_SIZE);
cudaMalloc(&dev_factIn, sizeof(FactTableIn) * FACT_SIZE);
cudaMalloc(&dev_factOut, sizeof(FactTableOut) * FACT_SIZE);
cudaMemcpy(dev_factIn, factIn, sizeof(FactTableIn) * FACT_SIZE, cudaMemcpyHostToDevice);
// Join berechnen
printf("Fakten und Dimmensionen vorbereitet\n");
for(int i = 0; i < DIM_COUNT; i++) {
cudaMemcpy(dev_dim, dims[i], sizeof(DimTable) * DIM_SIZE, cudaMemcpyHostToDevice);
join<<<1, 200>>>(dev_dim, DIM_SIZE,
dev_factIn, dev_factOut, FACT_SIZE, i);
cudaDeviceSynchronize();
}
printf("Join abgeschlossen\n");
// Daten zurückkopieren
cudaMemcpy(factOut, dev_factOut, sizeof(FactTableOut) * FACT_SIZE, cudaMemcpyDeviceToHost);
cudaFree(dev_dim);
cudaFree(dev_factIn);
cudaFree(dev_factOut);
// Ergebnis ausgeben
printf("Top 5:\n");
for(int i = 0; i < 5; i++) {
for(int j = 0; j < DIM_COUNT; j++) {
printf("%s->%ld | ", factIn[i].knz[j], factOut[i].id[j]);
}
printf("\n");
}
free(factIn);
for(int i = 0; i < DIM_COUNT; i++) {
free(dims[i]);
}
free(dims);
free(factOut);
return 0;
} |
3,608 | #include "includes.h"
#define INTERVALS 1000000
// Max number of threads per block
#define THREADS 512
#define BLOCKS 64
double calculatePiCPU();
// Synchronous error checking call. Enable with nvcc -DDEBUG
__global__ static void sumReduce(int *n, float *g_sum)
{
int tx = threadIdx.x;
__shared__ float s_sum[THREADS];
if (tx < BLOCKS)
s_sum[tx] = g_sum[tx * THREADS];
else
{
s_sum[tx] = 0.0f;
}
// For each block
for (int i = blockDim.x / 2; i > 0; i >>= 1)
{
if (tx < i)
{
s_sum[tx] += s_sum[tx + i];
}
__syncthreads();
}
g_sum[tx] = s_sum[tx];
} |
3,609 | /************************************************************************************\
* *
* Copyright � 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR"�) (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include "../graph_parser/parse.h"
#include "../graph_parser/util.h"
#include "kernel.cu"
#ifdef GEM5_FUSION
#include <stdint.h>
extern "C" {
void m5_work_begin(uint64_t workid, uint64_t threadid);
void m5_work_end(uint64_t workid, uint64_t threadid);
}
#endif
// Iteration count
#define ITER 20
void print_vectorf(float *vector, int num);
int main(int argc, char **argv)
{
char *tmpchar;
int num_nodes;
int num_edges;
int file_format = 1;
bool directed = 0;
cudaError_t err = cudaSuccess;
if (argc == 3) {
tmpchar = argv[1]; // Graph inputfile
file_format = atoi(argv[2]); // File format
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
// Allocate the csr structure
csr_array *csr;
// Parse graph files into csr structure
if (file_format == 1) {
// Metis
csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 0) {
// Dimacs9
csr = parseCOO(tmpchar, &num_nodes, &num_edges, 1);
} else if (file_format == 2) {
// Matrix market
csr = parseMM(tmpchar, &num_nodes, &num_edges, directed, 0);
} else {
printf("reserve for future");
exit(1);
}
// Allocate rank_array
float *rank_array = (float *)malloc(num_nodes * sizeof(float));
if (!rank_array) {
fprintf(stderr, "rank array not allocated successfully\n");
return -1;
}
int *row_d;
int *col_d;
int *data_d;
float *pagerank1_d;
float *pagerank2_d;
// Create device-side buffers for the graph
err = cudaMalloc(&row_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc row_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&col_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc col_d (size:%d) => %s\n", num_edges, cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&data_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc data_d (size:%d) => %s\n", num_edges, cudaGetErrorString(err));
return -1;
}
// Create buffers for pagerank
err = cudaMalloc(&pagerank1_d, num_nodes * sizeof(float));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc pagerank1_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&pagerank2_d, num_nodes * sizeof(float));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc pagerank2_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
double timer1 = gettime();
#ifdef GEM5_FUSION
m5_work_begin(0, 0);
#endif
// Copy the data to the device-side buffers
err = cudaMemcpy(row_d, csr->row_array, num_nodes * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR:#endif cudaMemcpy row_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(col_d, csr->col_array, num_edges * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy col_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
// Set up work dimensions
int block_size = 256;
int num_blocks = (num_nodes + block_size - 1) / block_size;
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
double timer3 = gettime();
// Launch the initialization kernel
inibuffer <<<grid, threads>>>(row_d, pagerank1_d, pagerank2_d, num_nodes,
num_edges);
cudaThreadSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaLaunch failed (%s)\n", cudaGetErrorString(err));
return -1;
}
// Run PageRank for some iter. TO: convergence determination
for (int i = 0; i < ITER; i++) {
// Launch pagerank kernel 1
pagerank1 <<<grid, threads>>>(row_d, col_d, data_d, pagerank1_d,
pagerank2_d, num_nodes, num_edges);
// Launch pagerank kernel 2
pagerank2 <<<grid, threads>>>(row_d, col_d, data_d, pagerank1_d,
pagerank2_d, num_nodes, num_edges);
}
cudaThreadSynchronize();
double timer4 = gettime();
// Copy the rank buffer back
err = cudaMemcpy(rank_array, pagerank1_d, num_nodes * sizeof(float), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy() failed (%s)\n", cudaGetErrorString(err));
return -1;
}
#ifdef GEM5_FUSION
m5_work_end(0, 0);
#endif
double timer2 = gettime();
// Report timing characteristics
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
#if 1
// Print rank array
print_vectorf(rank_array, num_nodes);
#endif
// Free the host-side arrays
free(rank_array);
csr->freeArrays();
free(csr);
// Free the device buffers
cudaFree(row_d);
cudaFree(col_d);
cudaFree(data_d);
cudaFree(pagerank1_d);
cudaFree(pagerank2_d);
return 0;
}
void print_vectorf(float *vector, int num)
{
FILE * fp = fopen("result.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++) {
fprintf(fp, "%f\n", vector[i]);
}
fclose(fp);
}
|
3,610 | #include <assert.h>
#include <cuda.h>
#include <getopt.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
static char* program_name;
// Just defination
__global__ void Jacobi(int** a, const int** b, const int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < N && j < N) {
a[i][j] = 0.8 * (b[i + 1][j] + b[i + 1][j] + b[i][j + 1] + b[i][j + 1]);
}
}
// Usage
void print_usage(FILE* stream, int exit_code) {
fprintf(stream, "Usage: %s options\n", program_name);
fprintf(stream,
" -h --help Display this usage information.\n"
" -f --file filename File containing coefficient matrix.\n"
" -i --Ni int Number of elements in Y direction "
"(default=512).\n"
" -j --Nj int Number of elements in X direction "
"(default=512).\n"
" -n --iterations int Number of iterations (default=10000).\n"
" -k --kernel [1,2] 1: unoptimized, 2: optimized kernel "
"(default).\n"
" -t --tilesize int Size of each thread block in kernel 2 "
"(default=4).\n");
exit(exit_code);
}
// Host version of the Jacobi method
void jacobiOnHost(float* x_next, float* A, float* x_now, float* b, int Ni,
int Nj) {
int i, j;
float sigma;
for (i = 0; i < Ni; i++) {
sigma = 0.0;
for (j = 0; j < Nj; j++) {
if (i != j) sigma += A[i * Nj + j] * x_now[j];
}
x_next[i] = (b[i] - sigma) / A[i * Nj + i];
}
}
// Device version of the Jacobi method
__global__ void jacobiOnDevice(float* x_next, float* A, float* x_now, float* b,
int Ni, int Nj) {
float sigma = 0.0;
int idx = threadIdx.x;
for (int j = 0; j < Nj; j++) {
if (idx != j) sigma += A[idx * Nj + j] * x_now[j];
}
x_next[idx] = (b[idx] - sigma) / A[idx * Nj + idx];
}
// Optimized device version of the Jacobi method
__global__ void jacobiOptimizedOnDevice(float* x_next, float* A, float* x_now,
float* b, int Ni, int Nj) {
// Optimization step 1: tiling
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < Ni) {
float sigma = 0.0;
// Optimization step 2: store index in register
// Multiplication is not executed in every iteration.
int idx_Ai = idx * Nj;
// Tried to use prefetching, but then the result is terribly wrong and I
// don't know why..
/*
float curr_A = A[idx_Ai];
float nxt_A;
//printf("idx=%d\n",idx);
for (int j=0; j<Nj-1; j++)
{
if (idx != j)
nxt_A = A[idx_Ai + j + 1];
sigma += curr_A * x_now[j];
//sigma += A[idx_Ai + j] * x_now[j];
curr_A = nxt_A;
//printf("curr_A=%f\n",curr_A);
}
if (idx != Nj-1)
sigma += nxt_A * x_now[Nj-1];
x_next[idx] = (b[idx] - sigma) / A[idx_Ai + idx];
*/
for (int j = 0; j < Nj; j++)
if (idx != j) sigma += A[idx_Ai + j] * x_now[j];
// Tried to use loop-ennrolling, but also here this gives a wrong result..
/*
for (int j=0; j<Nj/4; j+=4)
{
if (idx != j)
{
sigma += A[idx_Ai + j] * x_now[j];
}
if (idx != j+1)
{
sigma += A[idx_Ai + j+1] * x_now[j+1];
}
if (idx != j+2)
{
sigma += A[idx_Ai + j+2] * x_now[j+2];
}
if (idx != j+3)
{
sigma += A[idx_Ai + j+3] * x_now[j+3];
}
}*/
x_next[idx] = (b[idx] - sigma) / A[idx_Ai + idx];
}
}
// device selection (copied from previous assignment)
static void selectGpu(int* gpu_num, int* num_devs) {
// gpu_num: (I/O): I: Default choice,
// O: best device, changed only if more than one device
// num_devs: (O) Number of found devices.
int best = *gpu_num;
cudaGetDeviceCount(num_devs);
if (*num_devs > 1) {
int dev_num;
int max_cores = 0;
for (dev_num = 0; dev_num < *num_devs; dev_num++) {
cudaDeviceProp dev_properties;
cudaGetDeviceProperties(&dev_properties, dev_num);
if (max_cores < dev_properties.multiProcessorCount) {
max_cores = dev_properties.multiProcessorCount;
best = dev_num;
}
}
*gpu_num = best;
}
}
// device test (copied from previous assignment)
static void testDevice(int devID) {
// Check if we can run. Maybe do something more...
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, devID);
if (deviceProp.major == 9999 &&
deviceProp.minor == 9999) { /* Simulated device. */
printf("There is no device supporting CUDA.\n");
// cudaThreadExit();
} else
printf("Using GPU device number %d.\n", devID);
}
int main(int argc, char* argv[]) {
// initialize timing variables
time_t start, end, start_h, end_h, start_d, end_d;
float t_full, t_host, t_dev;
start = clock();
// initialize data variables
float *x_now, *x_next, *A, *b, *x_h, *x_d;
float *x_now_d, *x_next_d, *A_d, *b_d;
// initialize parameter variables
int N, Ni, Nj, iter, kernel, tileSize;
int ch;
int i, k;
char* fname;
FILE* file;
// Argument parsing
static struct option long_options[] = {
{"file", required_argument, NULL, 'f'},
{"Ni", optional_argument, NULL, 'i'},
{"Nj", optional_argument, NULL, 'j'},
{"iterations", optional_argument, NULL, 'n'},
{"kernel", optional_argument, NULL, 'k'},
{"tilesize", optional_argument, NULL, 't'},
{"help", optional_argument, NULL, 'h'},
{NULL, 0, NULL, 0}};
program_name = argv[0];
Ni = 512, Nj = 512, iter = 10000, kernel = 2, tileSize = 4;
ch = 0;
while ((ch = getopt_long(argc, argv, "f:i:j:n:k:h", long_options, NULL)) !=
-1) {
switch (ch) {
case 'f':
fname = optarg;
break;
case 'i':
Ni = atoi(optarg);
break;
case 'j':
Nj = atoi(optarg);
break;
case 'n':
iter = atoi(optarg);
break;
case 'k':
kernel = atoi(optarg);
break;
case 't':
tileSize = atoi(optarg);
break;
case 'h':
print_usage(stderr, 1);
exit(EXIT_FAILURE);
case '?':
print_usage(stderr, 1);
exit(EXIT_FAILURE);
default:
abort();
}
}
N = Ni * Nj;
printf("\nRunning Jacobi method:\n");
printf("======================\n\n");
printf("Coefficient matrix given in file: \n%s\n\n", fname);
printf("Parameters:\n");
printf("N=%d, Ni=%d, Nj=%d, ", N, Ni, Nj);
printf("iterations=%d, kernel=%d, tilesize=%d\n", iter, kernel, tileSize);
// Allocate memory on host
x_next = (float*)malloc(Ni * sizeof(float));
A = (float*)malloc(N * sizeof(float));
x_now = (float*)malloc(Ni * sizeof(float));
b = (float*)malloc(Ni * sizeof(float));
x_h = (float*)malloc(Ni * sizeof(float));
x_d = (float*)malloc(Ni * sizeof(float));
// Initialize result vector x
for (i = 0; i < Ni; i++) {
x_now[i] = 0;
x_next[i] = 0;
}
// Read coefficient matrix from file
file = fopen(fname, "r");
if (file == NULL) exit(EXIT_FAILURE);
char* line;
size_t len = 0;
i = 0;
while ((getline(&line, &len, file)) != -1) {
if (i < N)
A[i] = atof(line);
else
b[i - N] = atof(line);
i++;
}
start_h = clock();
// Run "iter" iterations of the Jacobi method on HOST
for (k = 0; k < iter; k++) {
if (k % 2)
jacobiOnHost(x_now, A, x_next, b, Ni, Nj);
else
jacobiOnHost(x_next, A, x_now, b, Ni, Nj);
// for (i=0; i<Nj; i++)
// x_now[i] = x_next[i];
}
end_h = clock();
// Save result from host in x_h
for (i = 0; i < Nj; i++) x_h[i] = x_next[i];
// Re-initialize result vector x for device computation
for (i = 0; i < Ni; i++) {
x_now[i] = 0;
x_next[i] = 0;
}
// Check available device.
int devID = 0, num_devs = 1;
selectGpu(&devID, &num_devs);
testDevice(devID);
// Allocate memory on the device
assert(cudaSuccess == cudaMalloc((void**)&x_next_d, Ni * sizeof(float)));
assert(cudaSuccess == cudaMalloc((void**)&A_d, N * sizeof(float)));
assert(cudaSuccess == cudaMalloc((void**)&x_now_d, Ni * sizeof(float)));
assert(cudaSuccess == cudaMalloc((void**)&b_d, Ni * sizeof(float)));
// Copy data -> device
cudaMemcpy(x_next_d, x_next, sizeof(float) * Ni, cudaMemcpyHostToDevice);
cudaMemcpy(A_d, A, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(x_now_d, x_now, sizeof(float) * Ni, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, sizeof(float) * Ni, cudaMemcpyHostToDevice);
// Compute grid and block size.
// Un-optimized kernel
int blockSize = Ni;
int nBlocks = 1;
// Optimized kernel
int nTiles = Ni / tileSize + (Ni % tileSize == 0 ? 0 : 1);
int gridHeight = Nj / tileSize + (Nj % tileSize == 0 ? 0 : 1);
int gridWidth = Ni / tileSize + (Ni % tileSize == 0 ? 0 : 1);
printf("w=%d, h=%d\n", gridWidth, gridHeight);
dim3 dGrid(gridHeight, gridWidth), dBlock(tileSize, tileSize);
start_d = clock();
// Run "iter" iterations of the Jacobi method on DEVICE
if (kernel == 1) {
printf("Using un-optimized kernel.\n");
for (k = 0; k < iter; k++) {
if (k % 2)
jacobiOnDevice<<<nBlocks, blockSize>>>(x_now_d, A_d, x_next_d, b_d, Ni,
Nj);
else
jacobiOnDevice<<<nBlocks, blockSize>>>(x_next_d, A_d, x_now_d, b_d, Ni,
Nj);
// cudaMemcpy(x_now_d, x_next_d, sizeof(float)*Ni,
// cudaMemcpyDeviceToDevice);
}
} else {
printf("Using optimized kernel.\n");
for (k = 0; k < iter; k++) {
if (k % 2)
jacobiOptimizedOnDevice<<<nTiles, tileSize>>>(x_now_d, A_d, x_next_d,
b_d, Ni, Nj);
else
jacobiOptimizedOnDevice<<<nTiles, tileSize>>>(x_next_d, A_d, x_now_d,
b_d, Ni, Nj);
// cudaMemcpy(x_now_d, x_next_d, sizeof(float)*Ni,
// cudaMemcpyDeviceToDevice);
}
}
end_d = clock();
// Data <- device
cudaMemcpy(x_d, x_next_d, sizeof(float) * Ni, cudaMemcpyDeviceToHost);
// Free memory
free(x_next);
free(A);
free(x_now);
free(b);
cudaFree(x_next_d);
cudaFree(A_d);
cudaFree(x_now_d);
cudaFree(b_d);
end = clock();
printf("\nResult after %d iterations:\n", iter);
float err = 0.0;
for (i = 0; i < Ni; i++) {
// printf("x_h[%d]=%f\n",i,x_h[i]);
// printf("x_d[%d]=%f\n",i,x_d[i]);
err += abs(x_h[i] - x_d[i]) / Ni;
}
printf("x_h[%d]=%f\n", 0, x_h[0]);
printf("x_d[%d]=%f\n", 0, x_d[0]);
t_full = ((float)end - (float)start) / CLOCKS_PER_SEC;
t_host = ((float)end_h - (float)start_h) / CLOCKS_PER_SEC;
t_dev = ((float)end_d - (float)start_d) / CLOCKS_PER_SEC;
printf("\nTiming:\nFull: %f\nHost: %f\nDevice: %f\n\n", t_full, t_host,
t_dev);
printf("Relative error: %f\n", err);
printf("\nProgram terminated successfully.\n");
return 0;
} |
3,611 | #include <cuda.h>
#define THRESHOLD 10010000
__device__ void bubbleSort(int *pixelsToSort, int length){
for(int i = 0; i < length; i++ )
{
for(int j = 0; j < length-1; j++)
{
if( pixelsToSort[j] > pixelsToSort[j+1]){
int tmp = pixelsToSort[j];
pixelsToSort[j] = pixelsToSort[j+1];
pixelsToSort[j+1] = tmp;
}
}
}
}
__device__ void optimizedBubbleSort(int *pixelsToSort, int length){
for(int i = 0; i < length; i++ )
{
for(int j = 0; j < length-1; j++)
{
if( pixelsToSort[j] > pixelsToSort[j+1]){
pixelsToSort[j] = pixelsToSort[j] ^ pixelsToSort[j+1];
pixelsToSort[j+1] = pixelsToSort[j] ^ pixelsToSort[j+1];
pixelsToSort[j] = pixelsToSort[j] ^ pixelsToSort[j+1];
}
}
}
}
__device__ int cudaGetFirstNotInColor(int *image, int x, int row, int imageWidth, int color){
for (int i = x; i < imageWidth; ++i)
{
if(THRESHOLD < (color - image[row*imageWidth + i])){
return i;
}
}
return -1;
}
__device__ int cudaGetNextInColor(int *image, int x, int row, int imageWidth, int color){
for (int i = x + 1; i < imageWidth; ++i)
{
if(THRESHOLD >= (color - image[row*imageWidth + i])){
return i-1;
}
}
return imageWidth - 1;
}
__global__ void sortRows(int *image, int imageHeight, int imageWidth, int colorMode){
int row = blockIdx.x * blockDim.x + threadIdx.x;
if(row < imageHeight)
{
int startingX = 0;
int finishX = 0;
while(finishX < imageWidth)
{
startingX = cudaGetFirstNotInColor(image, startingX, row, imageWidth, colorMode);
finishX = cudaGetNextInColor(image, startingX, row, imageWidth, colorMode);
if(startingX < 0)
break;
bubbleSort(&image[row*imageWidth + startingX], finishX - startingX);
startingX = finishX + 1;
}
}
}
__global__ void optimizedSortRows(int *image, int imageHeight, int imageWidth, int colorMode){
int row = blockIdx.x * blockDim.x + threadIdx.x;
if(row < imageHeight)
{
int startingX = 0;
int finishX = 0;
int *pixelsToSort = new int[1024];
while(finishX < imageWidth)
{
startingX = cudaGetFirstNotInColor(image, startingX, row, imageWidth, colorMode);
finishX = cudaGetNextInColor(image, startingX, row, imageWidth, colorMode);
if(startingX < 0)
break;
int pixelsToSortLength = (finishX - startingX < 1024) ? finishX - startingX : 1024;
for (int i = 0; i < pixelsToSortLength; ++i)
{
pixelsToSort[i] = image[row*imageWidth + startingX + i];
}
optimizedBubbleSort(pixelsToSort, pixelsToSortLength);
for (int i = 0; i < pixelsToSortLength; ++i)
{
image[row*imageWidth + startingX + i] = pixelsToSort[i];
}
startingX = finishX + 1;
}
free(pixelsToSort);
}
} |
3,612 | #include "includes.h"
__global__ void MarkCentroidsKernel( float *centroidCoordinates, float *visField, int imgWidth, int imgHeight, int centroids )
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < centroids)
{
int x = lrintf(centroidCoordinates[threadId * 2]);
int y = lrintf(centroidCoordinates[threadId * 2 + 1]);
visField[y * imgWidth + x] = -1.00f;
}
} |
3,613 | //This is a generated CUDA code
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
//f_alu = 1
//f_mad =8
//f_sfu =6
//d_alu =8
//d_mad =6
//b_alu =5
__constant__ float kconst[115] = {
2640.27049419,496.788317279,755.85277182,4070.19741521,6510.34703498,2039.14289025,3704.61925152,7755.66914948,
1861.26002473,1253.44798987,2629.46951817,7940.16158665,7767.42297282,5605.65608719,5373.03014677,3891.71871656,
366.965906878,8379.0600811,3930.37788456,8751.34651216,6043.60764794,9682.30745623,6178.41261724,4564.3931025,
374.255925925,4577.84509961,1671.08086172,9345.83987891,2901.2040663,4154.98833481,8208.38540292,4352.1328191,
9573.36924331,3580.73886635,269.083439352,352.007680208,5305.30563288,3596.20743502,438.919179395,9456.31492571,
8206.88128285,7265.8087816,8790.45564504,2464.18248902,4064.78800258,6832.31591731,9795.16429237,5786.85994295,
2010.51460232,4034.59881526,6585.45210739,8957.74158847,6403.29279037,3070.06110398,6175.24851691,1461.27925187,
3336.50691137,6910.80070577,9967.11557409,3549.38634683,2606.03554605,1575.68922804,4906.98800552,679.32387231,
5310.51577039,6347.74789192,4980.14693502,7079.55612995,2304.94310865,253.435257548,6597.60226307,9160.92905888,
2656.51142121,1532.34098062,9800.24073027,4697.5946278,4549.56735803,895.369414414,81.2862597505,954.653076872,
9802.40869968,3068.94073212,4343.28386194,4288.43198471,9454.29190141,687.820715136,8555.10227894,1020.62527779,
8887.51891817,1622.30149751,5517.20514654,2660.16395818,9482.94902673,8842.14459278,3834.4145655,700.939818034,
8816.58299456,4351.4402913,3601.03531056,2832.71861204,5178.80878256,3393.39850047,3436.36898163,2336.76169531,
3094.84512625,9409.78159435,1337.62467857,5439.82313727,7485.84090711,3687.98225492,3607.41760755,4976.65341527,
7191.02440951,4980.12665621,3064.46805377
};
//this is an unused kernel
__global__ void kk(float * d)
{
d[0]=kconst[0];}
__global__ void kernel(float*,float*,float*,int*,int*,int *,int,float *,int *);
float uniform(float start,float end)
{
return start+(end-start)*rand()/RAND_MAX;
}
int main(int argc, char* argv[])
{
int gridx = 179;
int gridy = 1;
int gridz = 1;
int bx = 115;
int by = 1;
int bz = 1;
dim3 gridDim(gridx,gridy,gridz);
dim3 blockDim(bx,by,bz);
//srand( (unsigned)time( NULL ) );
srand(4);
//declare variables
int N=64,size=0,loop=0,globalN=1024*1024*128,globalspace;
//initialize the variables
N=gridx*gridy*gridz*bx*by*bz;
loop=16;
N=gridx*gridy*gridz*bx*by*bz*17;
size=(N)*sizeof(float);
globalspace=(globalN)*sizeof(float);
//declare the float pointer variables
float * f0,*fd0,*f1,*fd1,*f2,*fd2,*gf,*gf0;
//parameter f0
f0=(float*)malloc(size);
memset(f0,0,size);
cudaMalloc((void**)&fd0,size);
for(int i=0;i<N;i++)
f0[i]=uniform(1,10000);
cudaMemcpy(fd0,f0,size,cudaMemcpyHostToDevice);
//parameter f1
f1=(float*)malloc(size);
memset(f1,0,size);
cudaMalloc((void**)&fd1,size);
for(int i=0;i<N;i++)
f1[i]=uniform(1,10000);
cudaMemcpy(fd1,f1,size,cudaMemcpyHostToDevice);
//parameter f2
f2=(float*)malloc(size);
memset(f2,0,size);
cudaMalloc((void**)&fd2,size);
for(int i=0;i<N;i++)
f2[i]=uniform(1,10000);
cudaMemcpy(fd2,f2,size,cudaMemcpyHostToDevice);
//global memory declare(float)
gf=(float*)malloc(globalspace);
memset(gf,0,globalspace);
cudaMalloc((void**)&gf0,globalspace);
for(int i=0;i<globalN;i++)
gf[i]=uniform(1,10000);
cudaMemcpy(gf0,gf,globalspace,cudaMemcpyHostToDevice);
//declare the int pointer variables
int * d0,*dd0,*d1,*dd1,*d2,*dd2,*gd,*gd0;
size=(N)*sizeof(int);
globalspace=(globalN)*sizeof(int);
//parameter d0
d0=(int*)malloc(size);
memset(d0,0,size);
cudaMalloc((void**)&dd0,size);
for(int i=0;i<N;i++)
d0[i]=uniform(1,10000);
cudaMemcpy(dd0,d0,size,cudaMemcpyHostToDevice);
//parameter d1
d1=(int*)malloc(size);
memset(d1,0,size);
cudaMalloc((void**)&dd1,size);
for(int i=0;i<N;i++)
d1[i]=uniform(1,10000);
cudaMemcpy(dd1,d1,size,cudaMemcpyHostToDevice);
//parameter d2
d2=(int*)malloc(size);
memset(d2,0,size);
cudaMalloc((void**)&dd2,size);
for(int i=0;i<N;i++)
d2[i]=uniform(1,10000);
cudaMemcpy(dd2,d2,size,cudaMemcpyHostToDevice);
//global memory declare(int)
gd=(int*)malloc(globalspace);
memset(gd,0,globalspace);
cudaMalloc((void**)&gd0,globalspace);
for(int i=0;i<globalN;i++)
gd[i]=uniform(1,10000);
cudaMemcpy(gd0,gd,globalspace,cudaMemcpyHostToDevice);
kernel<<<gridx*gridy,bx*by*bz>>>(fd0,fd1,fd2,dd0,dd1,dd2,loop,gf0,gd0);
cudaMemcpy(d2,dd2, size, cudaMemcpyDeviceToHost);
/*for(int i=0;i<N;i++)
printf("%d ",d2[i]);
printf("\n");*/
cudaMemcpy(f2,fd2, size, cudaMemcpyDeviceToHost);
/*for(int i=0;i<N;i++)
printf("%f ",f2[i]);
printf("\n");*/
}
__global__ void kernel(float* f0,float* f1,float* f2,int *d0,int *d1,int *d2,int loop,float* gf,int* gd)
{
//declear the regs
asm volatile(".reg .u32 %rt<4>;\n\
.reg .u32 %rnt<4>;\n\
.reg .u32 %rc<4>;\n\
.reg .u32 %rnc<4>;\n\
.reg .u32 %rg<14>;\n\
.reg .u32 %rgtid;\n\
.reg .u32 %rbtid;");
//compute global threadID
asm volatile("mov.u32 %rt1,%tid.x;\n\
mov.u32 %rt2,%tid.y;\n\
mov.u32 %rt3,%tid.z;\n\
mov.u32 %rnt1,%ntid.x;\n\
mov.u32 %rnt2,%ntid.y;\n\
mov.u32 %rnt3,%ntid.z;\n\
mov.u32 %rc1,%ctaid.x;\n\
mov.u32 %rc2,%ctaid.y;\n\
mov.u32 %rc3,%ctaid.z;\n\
mov.u32 %rnc1,%nctaid.x;\n\
mov.u32 %rnc2,%nctaid.y;\n\
mov.u32 %rnc3,%nctaid.z;"); //compute threadID in a block
asm volatile("mul.lo.u32 %rg1,%rnt1,%rnt2;\n\
mul.lo.u32 %rg2,%rg1,%rt3;\n\
mul.lo.u32 %rg3,%rt2,%rnt1;\n\
add.u32 %rg4,%rt1,%rg3;\n\
add.u32 %rg5,%rg4,%rg2;\n\
mov.u32 %rbtid,%rg5;");
//compute blockid in a grid
asm volatile("mul.lo.u32 %rg6,%rnc1,%rnc2;\n\
mul.lo.u32 %rg7,%rg6,%rc3;\n\
mul.lo.u32 %rg8,%rc2,%rnc1;\n\
add.u32 %rg9,%rc1,%rg8;\n\
add.u32 %rg10,%rg9,%rg7;");
//compute blocksize
asm volatile("mul.lo.u32 %rg11,%rnt1,%rnt2;\n\
mul.lo.u32 %rg12,%rg11,%rnt3;\n\
mul.lo.u32 %rg13,%rg10,%rg12;\n\
add.u32 %rgtid,%rg13,%rbtid;");
asm volatile(".reg .u64 %rdf<3>;\n\
.reg .u64 %rpf<3>;\n\
.reg .u64 %rdd<3>;\n\
.reg .u64 %rpd<3>;\n\
.reg .pred %p_<10>;\n\
.reg .f32 %f_<3>;\n\
.reg .s32 %d_<3>;\n\
.reg .u64 %offset;\n\
.reg .u64 %g_distance;\n\
.reg .u32 %loop;\n\
.reg .u32 %pass;\n\
.reg .u64 %distance;\n\
.reg .u64 %distance_mem;\n\
.reg .u64 %gfp;\n\
.reg .u64 %gdp;\n\
.reg .u32 %counter;");
asm volatile(".reg .u32 %rd<15>;\n\
.reg .f32 %rf<16>;\n\
.reg .f32 %rgf<32>;\n\
.reg .u32 %rgd<32>;\n\
.reg .b32 %rb<5>;\n\
.reg .f64 %rdouble<8>;\n\
.reg .f64 %rmadf<3>;\n\
.reg .s32 %ropd3;\n\
.reg .b32 %ropb<2>;\n\
mov .b32 %ropb0,652;\n\
mov .b32 %ropb1,256;\n\
mov .u64 %distance,20585;\n\
mov .u64 %distance_mem,262144;\n\
mov .u64 %g_distance,65536;\n\
mov .s32 %ropd3,5786;\n\
mov .f64 %rmadf1,19.5923428225;\n\
mov .f64 %rmadf2,23.9162632576;\n\
mov .f64 %rmadf0,86.2923868728;");
asm volatile(".reg .u64 %c_base;\n\
.reg .u64 %c_offset;\n\
.reg .u64 %c_mem;\n\
mov.u64 %c_base,kconst;\n\
mul.wide.u32 %c_offset,%rbtid,4;\n\
add.u64 %c_mem,%c_base,%c_offset;");
//get the loop number
asm volatile("ld.param.u32 %loop, [_Z6kernelPfS_S_PiS0_S0_iS_S0__param_6];");//loop
asm volatile("mul.wide.u32 %offset,%rgtid,4;");//get the address offset
//d0 data
asm volatile("ld.param.u64 %rpd0, [_Z6kernelPfS_S_PiS0_S0_iS_S0__param_0];\n\
add.u64 %rdd0,%rpd0,%offset;");
//d1 data
asm volatile("ld.param.u64 %rpd1, [_Z6kernelPfS_S_PiS0_S0_iS_S0__param_1];\n\
add.u64 %rdd1,%rpd1,%offset;");
//d2 data
asm volatile("ld.param.u64 %rpd2, [_Z6kernelPfS_S_PiS0_S0_iS_S0__param_2];\n\
add.u64 %rdd2,%rpd2,%offset;");
//f0 data
asm volatile("ld.param.u64 %rpf0, [_Z6kernelPfS_S_PiS0_S0_iS_S0__param_3];\n\
add.u64 %rdf0,%rpf0,%offset;");
//f1 data
asm volatile("ld.param.u64 %rpf1, [_Z6kernelPfS_S_PiS0_S0_iS_S0__param_4];\n\
add.u64 %rdf1,%rpf1,%offset;");
//f2 data
asm volatile("ld.param.u64 %rpf2, [_Z6kernelPfS_S_PiS0_S0_iS_S0__param_5];\n\
add.u64 %rdf2,%rpf2,%offset;");
asm volatile("ld.param.u64 %gfp, [_Z6kernelPfS_S_PiS0_S0_iS_S0__param_7];\n\
add.u64 %gfp,%gfp,%offset;");
asm volatile("ld.param.u64 %gdp, [_Z6kernelPfS_S_PiS0_S0_iS_S0__param_8];\n\
add.u64 %gdp,%gdp,%offset;");
//load the values of parameters
asm volatile("ld.global.s32 %d_0,[%rdd0+0];");
asm volatile("ld.global.s32 %d_1,[%rdd1+0];");
asm volatile("ld.global.f32 %f_0,[%rdf0+0];");
asm volatile("ld.global.f32 %f_1,[%rdf1+0];");
asm volatile("mul.wide.u32 %distance,20585,4;\n\
add.u64 %rdd0,%rdd0,%distance;\n\
add.u64 %rdd1,%rdd1,%distance;\n\
add.u64 %rdf0,%rdf0,%distance;\n\
add.u64 %rdf1,%rdf1,%distance;");
asm volatile("mov.u32 %counter,0;");
asm volatile("$BB_LABEL0:");
// instruction mix
asm volatile("mov.u32 %pass,0;\n\
fma.rn.f64 %rdouble0, %rmadf0, %rmadf1, %rmadf2;\n\
cvt.rn.f32.f64 %rf0,%rdouble0;\n\
lg2.approx.f32 %rf1, %f_1;\n\
mad.lo.s32 %rd0, %d_0, %d_1, %ropd3;\n\
mad.rn.f64 %rdouble1, %rmadf0, %rmadf1, %rmadf2;\n\
cvt.rn.f32.f64 %rf2,%rdouble1;\n\
lg2.approx.f32 %rf3, %f_0;\n\
sqrt.rn.f32 %rf4, %f_1;\n\
st.global.f32 [%gfp+0],%rgf19;\n\
add.u64 %gfp,%gfp,%g_distance;\n\
mul.f32 %rf5, %rf0, %rf1;\n\
ld.const.f32 %rf0, [%c_mem+0];\n\
cvt.f64.f32 %rmadf0, %rf0;\n\
cvt.f64.f32 %rmadf1, %rf1;\n\
cvt.f64.f32 %rmadf2, %rf2;\n\
fma.rn.f64 %rdouble2, %rmadf0, %rmadf1, %rmadf2;\n\
cvt.rn.f32.f64 %rf6,%rdouble2;\n\
ld.const.f32 %rf3, [%c_mem+0];\n\
xor.b32 %rb0, %ropb0, %ropb1;\n\
ld.global.f32 %rgf11, [%gfp+0];\n\
add.u64 %gfp,%gfp,%g_distance;\n\
ld.global.f32 %rgf17, [%gfp+0];\n\
add.u64 %gfp,%gfp,%g_distance;\n\
mad.lo.s32 %rd1, %d_0, %d_1, %ropd3;\n\
mad.lo.s32 %rd2, %d_0, %d_1, %ropd3;\n\
ld.global.f32 %rgf8, [%gfp+0];\n\
add.u64 %gfp,%gfp,%g_distance;\n\
cvt.f64.f32 %rmadf0, %rf1;\n\
cvt.f64.f32 %rmadf1, %rf2;\n\
cvt.f64.f32 %rmadf2, %rf3;\n\
mad.rn.f64 %rdouble3, %rmadf0, %rmadf1, %rmadf2;\n\
cvt.rn.f32.f64 %rf7,%rdouble3;\n\
mul.lo.s32 %rd3, %d_0, %d_1;\n\
ld.const.f32 %rf1, [%c_mem+0];\n\
neg.s32 %rd4, %d_0;\n\
not.b32 %rb1, %ropb0;\n\
cvt.f64.f32 %rmadf0, %rf2;\n\
cvt.f64.f32 %rmadf1, %rf3;\n\
cvt.f64.f32 %rmadf2, %rf4;\n\
fma.rn.f64 %rdouble4, %rmadf0, %rmadf1, %rmadf2;\n\
cvt.rn.f32.f64 %rf8,%rdouble4;\n\
mad.lo.s32 %rd5, %rd0, %rd1, %rd2;\n\
ld.global.f32 %rgf2, [%gfp+0];\n\
add.u64 %gfp,%gfp,%g_distance;\n\
cvt.f64.f32 %rmadf0, %rf3;\n\
cvt.f64.f32 %rmadf1, %rf4;\n\
cvt.f64.f32 %rmadf2, %rf5;\n\
mad.rn.f64 %rdouble5, %rmadf0, %rmadf1, %rmadf2;\n\
cvt.rn.f32.f64 %rf9,%rdouble5;\n\
and.b32 %rb2, %ropb0, %ropb1;\n\
cvt.f64.f32 %rmadf0, %rf4;\n\
cvt.f64.f32 %rmadf1, %rf5;\n\
cvt.f64.f32 %rmadf2, %rf6;\n\
mad.rn.f64 %rdouble6, %rmadf0, %rmadf1, %rmadf2;\n\
cvt.rn.f32.f64 %rf10,%rdouble6;\n\
lg2.approx.f32 %rf11, %rf6;\n\
cos.approx.f32 %rf12, %rf7;\n\
rem.s32 %rd6, %rd1, %d_0;\n\
rem.s32 %rd7, %rd2, %d_0;\n\
mad.lo.s32 %rd8, %rd3, %rd4, %rd5;\n\
cvt.f64.f32 %rmadf0, %rf7;\n\
cvt.f64.f32 %rmadf1, %rf8;\n\
cvt.f64.f32 %rmadf2, %rf9;\n\
fma.rn.f64 %rdouble7, %rmadf0, %rmadf1, %rmadf2;\n\
cvt.rn.f32.f64 %rf13,%rdouble7;\n\
xor.b32 %rb3, %ropb0, %ropb1;\n\
ex2.approx.f32 %rf14, %rf9;\n\
ld.global.f32 %rgf31, [%gfp+0];\n\
add.u64 %gfp,%gfp,%g_distance;\n\
ld.global.f32 %rgf25, [%gfp+0];\n\
add.u64 %gfp,%gfp,%g_distance;\n\
shl.b32 %rb4, %ropb0, %ropb1;\n\
max.s32 %rd9, %rd4, %rd5;\n\
sad.s32 %rd10, %rd5, %rd6, %rd7;\n\
ld.global.f32 %rgf20, [%gfp+0];\n\
add.u64 %gfp,%gfp,%g_distance;\n\
div.s32 %rd11, %rd6, %d_0;\n\
mad.lo.s32 %rd12, %rd7, %rd8, %rd9;\n\
min.s32 %rd13, %rd8, %rd9;\n\
");
asm volatile("add.u32 %counter,%counter,1;\n\
setp.ge.u32 %p_0, %counter, %loop;\n\
@!%p_0 bra $BB_LABEL0;");
asm volatile("st.global.f32 [%rdf2+0],%rf15;\n\
st.global.s32 [%rdd2+0],%rd14;");
asm volatile("add.u64 %rdf2,%rdf2,%distance;\n\
add.u64 %rdd2,%rdd2,%distance;");
//end basic block;
asm volatile("$BB_LABEL1:");
asm volatile("exit;");
}
|
3,614 | #include<iostream>
#include<fstream>
#include<string>
#include<cmath>
#include<assert.h>
#include<stdio.h>
#include<cuda.h>
#include<sys/time.h>
//using namespace std;
double getSeconds()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1e-6);
}
typedef double real;
std::string part_input_file,part_out_name_base,vtk_out_name_base;
real timestep_length,time_end,epsilon,sigma;
int part_out_freq,vtk_out_freq,cl_workgroup_1dsize;
void checkError (cudaError_t err)
{
if(err != cudaSuccess )
{
std::cout<< cudaGetErrorString(err) <<std::endl ;
exit(-1);
}
}
__host__ void fileread(std::string file);
//void force_update(real* pos_x, real* pos_y, real* pos_z, real* F_x, real* F_y, real* F_z, real sigma, real epsilon, unsigned int N);
////////////////////////////*************************************************************************************************************************************
__global__ void Force_update_1D(
real *d_pos_x,
real *d_pos_y,
real *d_pos_z,
real *d_vel_x,
real *d_vel_y,
real *d_vel_z,
real *d_F_x,
real *d_F_y,
real *d_F_z,
real *d_F_old_x,
real *d_F_old_y,
real *d_F_old_z,
real sigma,
real epsilon,
unsigned int N)
{
real d = 0.0,
d_2 = 0.0,
x_i = 0.0,
y_i = 0.0,
dx,dy,dz,
z_i = 0.0,
c1 = 0.0,
t_pow = 0.0,
sig_abs = 0.0,
tempx = 0.0,
tempy = 0.0,
tempz = 0.0;
int id = threadIdx.x + blockIdx.x*blockDim.x;
c1 = 24 * epsilon;
x_i = d_pos_x[id];
y_i = d_pos_y[id];
z_i = d_pos_z[id];
for(auto j=0;j<N;++j){
if(id != j)
{
d_2 = (x_i - d_pos_x[j]) * (x_i - d_pos_x[j]) + (y_i - d_pos_y[j])*(y_i - d_pos_y[j]) + (z_i - d_pos_z[j]) * (z_i - d_pos_z[j]);
d = sqrt(d_2);
dx = x_i - d_pos_x[j];
dy = y_i - d_pos_y[j];
dz = z_i - d_pos_z[j];
assert(d != 0);
sig_abs = sigma/d;
t_pow = pow(sig_abs,6);
//std::cout<< i<<"\t" <<j<<"\t"<<"weird calc: "<< ((c1/(d_2) * t_pow * (2*t_pow - 1)) * d) <<"\t" << "c1: " << c1<<"\n";
tempx = tempx + ((c1/(d_2) * t_pow * (2*t_pow - 1)) * dx);
tempy = tempy + ((c1/(d_2) * t_pow * (2*t_pow - 1)) * dy);
tempz = tempz + ((c1/(d_2) * t_pow * (2*t_pow - 1)) * dz);
//std::cout<< id<<"\t" <<j<<"\t"<<"temp: "<< temp <<"\n";
}
}
d_F_x[id] = tempx;
d_F_y[id] = tempy;
d_F_z[id] = tempz;
}
__global__ void pos_update_1D(
real *d_pos_x,
real *d_pos_y,
real *d_pos_z,
real *d_vel_x,
real *d_vel_y,
real *d_vel_z,
real *d_F_x,
real *d_F_y,
real *d_F_z,
real *d_F_old_x,
real *d_F_old_y,
real *d_F_old_z,
real *mass,
real timestep_length){
int i = threadIdx.x + blockDim.x * blockIdx.x;
//printf("%d\n",i);
real del_T = timestep_length*timestep_length;
//printf("%f\t%f\t%f\n",mass[i],d_vel_x[i],d_F_x[i]);
d_pos_x[i] = d_pos_x[i] + timestep_length * (d_vel_x[i]) + ((del_T/(2*mass[i])) * (d_F_x[i]));
d_pos_y[i] = d_pos_y[i] + timestep_length * (d_vel_y[i]) + ((del_T/(2*mass[i])) * (d_F_y[i]));
d_pos_z[i] = d_pos_z[i] + timestep_length * (d_vel_z[i]) + ((del_T/(2*mass[i])) * (d_F_z[i]));
//std::cout << i <<"\t" << pos_x[i] << "\t" << pos_y[i] << "\t" << pos_z[i] <<"\n";
d_F_old_x[i] = d_F_x[i];
d_F_old_y[i] = d_F_y[i];
d_F_old_z[i] = d_F_z[i];
}
__global__ void vel_update_1D(
real *d_vel_x,
real *d_vel_y,
real *d_vel_z,
real *d_F_x,
real *d_F_y,
real *d_F_z,
real *d_F_old_x,
real *d_F_old_y,
real *d_F_old_z,
real timestep_length,
real *mass){
int i = threadIdx.x + blockDim.x * blockIdx.x;
d_vel_x[i] = d_vel_x[i] + timestep_length * 0.5 * (d_F_x[i] + d_F_old_x[i])/mass[i];
d_vel_y[i] = d_vel_y[i] + timestep_length * 0.5 * (d_F_y[i] + d_F_old_y[i])/mass[i];
d_vel_z[i] = d_vel_z[i] + timestep_length * 0.5 * (d_F_z[i] + d_F_old_z[i])/mass[i];
}
__global__ void print(real *d_pos_x, unsigned int N){
int i = threadIdx.x;
printf("%f\n",d_pos_x[i]);
}
////////////////////**********************************************************************************************************************************************
int main(int argc,char *argv[])
{
std::cout.precision(4);
std::string para_file;
if(argc==1)
{
std::cout<< "Para file name not given" << std::endl;
return 0;
}
para_file = argv[1];
//std::cout<< para_file<< " " << argc << std::endl; //parameter file name
double wcTimeStart= 0.0, wcTimeEnd=0.0;
fileread(para_file); //rad parameter file
unsigned int N=0; //Number of particles
//*********************************************************************
///Read from parameter file
//*********************************************************************
std::ifstream f;
f.open(part_input_file); //read input file
f>>N;
//std::cout<< "Number of particles: " << N << std::endl;
real *mass = new real[N];
real *d_mass;
real *pos_x = new real[N];
real *d_pos_x;
real *pos_y = new real[N];
real *d_pos_y;
real *pos_z = new real[N];
real *d_pos_z;
real *vel_x = new real[N];
real *d_vel_x;
real *vel_y = new real[N];
real *d_vel_y;
real *vel_z = new real[N];
real *d_vel_z;
real *F_x = new real[N];
real *d_F_x;
real *F_y = new real[N];
real *d_F_y;
real *F_z = new real[N];
real *d_F_z;
real *F_old_x = new real[N];
real *d_F_old_x;
real *F_old_y = new real[N];
real *d_F_old_y;
real *F_old_z = new real[N];
real *d_F_old_z;
int count = 0;
//***************************************
//Cuda memory assignment:
cudaMalloc((void**)&d_mass, N*sizeof(real));
cudaMalloc((void**)&d_pos_x, N*sizeof(real));
cudaMalloc((void**)&d_pos_y, N*sizeof(real));
cudaMalloc((void**)&d_pos_z, N*sizeof(real));
cudaMalloc((void**)&d_vel_x, N*sizeof(real));
cudaMalloc((void**)&d_vel_y, N*sizeof(real));
cudaMalloc((void**)&d_vel_z, N*sizeof(real));
cudaMalloc((void**)&d_F_x, N*sizeof(real));
cudaMalloc((void**)&d_F_y, N*sizeof(real));
cudaMalloc((void**)&d_F_z, N*sizeof(real));
cudaMalloc((void**)&d_F_old_x, N*sizeof(real));
cudaMalloc((void**)&d_F_old_y, N*sizeof(real));
cudaMalloc((void**)&d_F_old_z, N*sizeof(real));
//***************************************
/*d: particle distance between ith and jth particle
//d_2: sqare of d.
//abs_d_x: absolute of (d_x)
//t: time step increment
//temp: for reduction of force
****************************************************/
real t = 0;
int iter =0;
while (true) {
f >> mass[iter] >> pos_x[iter] >> pos_y[iter]>> pos_z[iter] >> vel_x[iter] >> vel_y[iter] >> vel_z[iter];
if( f.eof() ) break;
//std::cout<< mass[iter] << " " << pos_x[iter] <<" "<< pos_y[iter] << " " << pos_z[iter] << " " << vel_x[iter] << " " << vel_y[iter] <<" " << vel_z[iter]<< std::endl;
++iter;
}
f.close();
//*********************************************************************
//*********************************************************************
//*********************************************************************
std::string vtk_file =" ";
std::ofstream vtk;
//del_T: del T square
/*First force kernel cal using cuda*/
/**********Memcpy for initiaized variables ******************/
cudaMemcpy(d_mass,mass, (N*sizeof(real)),cudaMemcpyHostToDevice);
cudaMemcpy(d_pos_x,pos_x, (N*sizeof(real)),cudaMemcpyHostToDevice);
cudaMemcpy(d_pos_y,pos_y, (N*sizeof(real)),cudaMemcpyHostToDevice);
cudaMemcpy(d_pos_z,pos_z, (N*sizeof(real)),cudaMemcpyHostToDevice);
cudaMemcpy(d_vel_x,vel_x, (N*sizeof(real)),cudaMemcpyHostToDevice);
cudaMemcpy(d_vel_y,vel_y, (N*sizeof(real)),cudaMemcpyHostToDevice);
cudaMemcpy(d_vel_z,vel_z, (N*sizeof(real)),cudaMemcpyHostToDevice);
cudaMemcpy(d_F_x,F_x, (N*sizeof(real)),cudaMemcpyHostToDevice);
cudaMemcpy(d_F_y,F_y, (N*sizeof(real)),cudaMemcpyHostToDevice);
cudaMemcpy(d_F_z,F_z, (N*sizeof(real)),cudaMemcpyHostToDevice);
cudaMemcpy(d_F_old_x,F_old_x, (N*sizeof(real)),cudaMemcpyHostToDevice);
cudaMemcpy(d_F_old_y,F_old_y, (N*sizeof(real)),cudaMemcpyHostToDevice);
cudaMemcpy(d_F_old_z,F_old_z, (N*sizeof(real)),cudaMemcpyHostToDevice);
//real t = 0;
//std::cout<<d_pos_x[0]<<std::endl;
//print<<<1,N>>>(d_vel_z,N);
wcTimeStart = getSeconds(); //Start time
Force_update_1D<<<1,N>>>(d_pos_x, d_pos_y, d_pos_z, d_vel_x, d_vel_y, d_vel_z, d_F_x, d_F_y, d_F_z,d_F_old_x,d_F_old_y,d_F_old_z,sigma,epsilon,N);
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
printf("Sync kernel error: %s\n", cudaGetErrorString(errSync));
if (errAsync != cudaSuccess)
printf("Async kernel error: %s\n", cudaGetErrorString(errAsync));
cudaDeviceSynchronize();
//std::cout<<"kernel op after force update"<<std::endl;
//print<<<1,N>>>(d_F_x,N);
do{
//std::cout<<"into the do loop"<<std::endl;
//position update and parallely copy force to force_old
pos_update_1D<<<1,N>>>(d_pos_x, d_pos_y, d_pos_z, d_vel_x, d_vel_y, d_vel_z, d_F_x, d_F_y, d_F_z,d_F_old_x,d_F_old_y,d_F_old_z,d_mass,timestep_length);
//Force update
//__syncAllThreads
Force_update_1D<<<1,N>>>(d_pos_x, d_pos_y, d_pos_z, d_vel_x, d_vel_y, d_vel_z, d_F_x, d_F_y, d_F_z,d_F_old_x,d_F_old_y,d_F_old_z,sigma,epsilon,N);
cudaDeviceSynchronize();
//__syncthreads();
//__synchAllThreads
//calculate velocity
vel_update_1D<<<1,N>>>(d_vel_x, d_vel_y, d_vel_z, d_F_x, d_F_y, d_F_z,d_F_old_x,d_F_old_y,d_F_old_z,timestep_length,d_mass);
cudaMemcpy(pos_x,d_pos_x, (N*sizeof(real)),cudaMemcpyDeviceToHost);
cudaMemcpy(pos_y,d_pos_y, (N*sizeof(real)),cudaMemcpyDeviceToHost);
cudaMemcpy(pos_z,d_pos_z, (N*sizeof(real)),cudaMemcpyDeviceToHost);
cudaMemcpy(vel_x,d_vel_x, (N*sizeof(real)),cudaMemcpyDeviceToHost);
cudaMemcpy(vel_y,d_vel_y, (N*sizeof(real)),cudaMemcpyDeviceToHost);
cudaMemcpy(vel_z,d_vel_z, (N*sizeof(real)),cudaMemcpyDeviceToHost);
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
vtk_file = "tmp/" + vtk_out_name_base + std::to_string(count) +".vtk";
// std::cout << vtk_file << std::endl;
vtk.open(vtk_file);
vtk << "# vtk DataFile Version 4.0" << "\n" << "hesp visualization file" << "\n" << "ASCII" << "\n" << "DATASET UNSTRUCTURED_GRID" << "\n" << "POINTS "<<N<<" double" << "\n";
vtk<< std::fixed;
for(int j =0; j<N; ++j)
vtk<<pos_x[j] << " "<< pos_y[j]<< " " << pos_z[j] << "\n";
vtk << "CELLS 0 0" << "\n";
vtk << "CELL_TYPES 0" << "\n";
vtk << "POINT_DATA "<< N<< "\n";
vtk << "SCALARS m double" << "\n";
vtk << "LOOKUP_TABLE default" << "\n";
vtk<< std::fixed;
for(int j =0; j<N; ++j)
vtk<< mass[j] << "\n";
vtk <<"VECTORS v double" << "\n";
vtk<< std::fixed;
for(int j =0; j<N; ++j)
vtk<<vel_x[j] << " "<< vel_y[j]<< " " << vel_y[j] << "\n";
vtk.close();
vtk_file =" ";
count++;
t = t + 0.01;
}while(t<time_end);
wcTimeEnd = getSeconds(); //End time
std::cout << "Time Taken for computation: " << wcTimeEnd-wcTimeStart << " sec" << std::endl;
delete(mass);
delete(pos_x);
delete(pos_y);
delete(pos_z);
delete(vel_x);
delete(vel_y);
delete(vel_z);
delete(F_x);
delete(F_y);
delete(F_z);
delete(F_old_x);
delete(F_old_y);
delete(F_old_z);
cudaFree(d_pos_x);
cudaFree(d_pos_y);
cudaFree(d_pos_z);
cudaFree(d_vel_x);
cudaFree(d_vel_y);
cudaFree(d_vel_z);
cudaFree(d_F_x);
cudaFree(d_F_y);
cudaFree(d_F_z);
cudaFree(d_F_old_x);
cudaFree(d_F_old_y);
cudaFree(d_F_old_z);
return 0;
}
void fileread(std::string file){
std::ifstream ff;
std::string para_name;
std::string value;
ff.open(file);
std::string::size_type sz;
for(int i =0; i<10; ++i)
{
ff>>para_name >> value;
if(para_name=="part_input_file"){
part_input_file = value;
//std::cout<< "part_input_file " << part_input_file<< std::endl;
}
else if(para_name=="timestep_length"){
timestep_length= std::stod (value,&sz);
//std::cout<< "timestep_length " << timestep_length<< std::endl;
}
else if(para_name=="time_end"){
time_end= std::stod (value,&sz);
//std::cout<< "time_end " << time_end<< std::endl;
}
else if (para_name=="sigma"){
sigma=std::stod (value,&sz);
//std::cout<< "sigma " << sigma<< std::endl;
}
else if (para_name=="epsilon"){
epsilon=std::stod (value,&sz);;
//std::cout<< "epsilon " << epsilon<< std::endl;
}
else if (para_name=="part_out_freq"){
part_out_freq=std::stoi (value,&sz);
//std::cout<< "part_out_freq " << part_out_freq<< std::endl;
}
else if (para_name=="part_out_name_base"){
part_out_name_base=value;
//std::cout<< "part_out_name_base " << part_out_name_base<< std::endl;
}
else if (para_name=="vtk_out_freq")
{
vtk_out_freq=std::stoi (value,&sz);
//std::cout<< "vtk_out_freq " << vtk_out_freq<< std::endl;
}
else if (para_name=="vtk_out_name_base"){
vtk_out_name_base=value;
//std::cout<< "vtk_out_name_base " << vtk_out_name_base<< std::endl;
}
else if (para_name=="cl_workgroup_1dsize"){
cl_workgroup_1dsize=std::stoi (value,&sz);
//std::cout<< "cl_workgroup_1dsize " << cl_workgroup_1dsize<< std::endl;
}
}
ff.close();
}
|
3,615 | #include "MarkovChain.cuh"
/**
* Characters (26)
* Start of word (1)
* End of word (1)
*/
#define CHARACTERS 27
#define BUFFERSIZE 20
#define START 'S'
#define ENDINDEX 0
int getCharacterIndex(char character) {
switch (character) {
//The start character
case START:
return CHARACTERS;
//The end character
case '\0':
return ENDINDEX;
case '\n':
return ENDINDEX;
//Any other letter
default:
return (int(character) - int('a')+1);
}
}
void normalizeRow(double** markovMatrix, int row) {
int sum = 0;
//Gets the total number of character transitions in the row
for (int col = 0; col < CHARACTERS; col++) {
sum+=markovMatrix[row][col];
}
//Gets the total number of
for (int col = 0; col < CHARACTERS; col++) {
markovMatrix[row][col]/=sum;
}
}
double** markovFromFile(char* filename) {
//Creates a matrix for all characters
double** markovMatrix=(double**)(calloc(CHARACTERS+1, sizeof(double*)));
for (int i = 0; i < CHARACTERS+1; i++) {
markovMatrix[i] = (double*)(calloc(CHARACTERS, sizeof(double)));
}
//Opens the file stream
FILE* f = fopen(filename, "r");
char* buffer=(char*)(calloc(BUFFERSIZE, sizeof(char)));
//The current char
char current = START;
//Loops through the file to get the next name
while (fgets(buffer, BUFFERSIZE-1, f)!=NULL) {
//loops through the buffer
for (int c = 0; c < BUFFERSIZE-1; c++) {
//Gets the next character
char next = buffer[c];
//Changes the next letter to lowercase if it isn't already
if (next>=int('A') && next<=int('Z')) {
next = (next-int('A'))+int('a');
}
//Adds one to the markov matrix corresponding to the place pointing from the current character to the next character
markovMatrix[getCharacterIndex(current)][getCharacterIndex(next)]++;
//Switches the current character
if (next=='\n' || next== '\0') {
current = START;
break;
}
else {
current = next;
}
}
}
//Loops through the markov matrix and normalizes each row
for (int r = 0; r < CHARACTERS + 1; r++) {
normalizeRow(markovMatrix, r);
}
return markovMatrix;
}
void freeMarkov(double** markovMatrix) {
for (int row = 0; row < CHARACTERS + 1; row++) {
free(markovMatrix[row]);
}
free(markovMatrix);
}
char getChar(char currentChar, double** markovMatrix) {
//Generates random number between 0 and 1
double randNum = rand()/double(RAND_MAX);
double sum=0;
//Loops through the characters to see what range the random number falls in
for (int i = 0; i < CHARACTERS; i++) {
//Sums the frequency
sum += markovMatrix[getCharacterIndex(currentChar)][i];
//Should return the current character
if (sum>=randNum) {
//Terminate the name
if (currentChar==ENDINDEX) {
return '\0';
}
//Add to the name
else {
return char(i + int('a'));
}
}
}
return '\0';
}
void generateEmpireName(double** markovMatrix, empire* emp) {
//Creates a name buffer of max length 20
emp->name = (char*)(calloc(BUFFERSIZE, sizeof(char)));
char currentChar = START;
//Loops until the character is eof or until the buffer has no more space
for (int i = 0; i<BUFFERSIZE-1 && currentChar != '\0'; i++) {
//Gets the next character
currentChar = getChar(currentChar, markovMatrix);
//Adds this to the name
emp->name[i] = currentChar;
}
printf("Empire name=%s\n", emp->name);
} |
3,616 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <iostream>
int main() {
thrust::host_vector<double> host(5, 0);
host[4] = 35;
/* na linha abaixo os dados são copiados
para GPU */
thrust::device_vector<double> dev(host);
/* a linha abaixo só muda o vetor na CPU */
host[2] = 12;
printf("Host vector: ");
for (auto i = host.begin(); i != host.end(); i++) {
std::cout << *i << " "; // este acesso é rápido -- CPU
}
printf("\n");
printf("Device vector: ");
for (auto i = dev.begin(); i != dev.end(); i++) {
std::cout << *i << " "; // este acesso é lento! -- GPU
}
printf("\n");
}
|
3,617 | #include<stdio.h>
#include<cuda.h>
#include<iostream>
#include<fstream>
#include<chrono>
using namespace std;
__global__ void serialReduction(int *d_array, int numberOfElements)
{
int sum = 0;
for(int i=0;i<numberOfElements;i++)
{
sum = sum + d_array[i];
}
printf("%d",sum);
}
void serialReduceHost(int *h_array,int *d_array, int numberOfElements)
{
serialReduction<<<1,1>>>(d_array,numberOfElements);
cudaDeviceSynchronize();
fflush(stdout);
}
__global__ void parallelReduction(int *d_array , int numberOfElements, int elementsPerThread,int numberOfThreadsPerBlock,int numberOfBlocks,int *d_global)
{
int index = blockIdx.x * blockDim.x + threadIdx.x ;
index = index * elementsPerThread;
if(index>numberOfElements)
{
return;
}
int sum = 0;
for(int i=index;i<index+elementsPerThread;i++)
{
sum = sum + d_array[i];
}
extern __shared__ int d_blockMemmory[];
d_blockMemmory[threadIdx.x] = sum;
sum =0;
__syncthreads();
if(threadIdx.x == 0)
{
for(int i =0; i<numberOfThreadsPerBlock;i++)
{
sum = sum+ d_blockMemmory[i];
}
d_global[blockIdx.x] = sum;
}
}
void parallelReduceHost(int *h_array ,int *d_array ,int numberOfElements,int elementsPerThread , int numberOfThreadsPerBlock , int numberOfBlocks)
{
int *d_global;
cudaMalloc(&d_global, sizeof(int)*numberOfBlocks);
parallelReduction<<<numberOfBlocks,numberOfThreadsPerBlock,numberOfThreadsPerBlock*sizeof(int)>>> (d_array,numberOfElements,elementsPerThread,numberOfThreadsPerBlock,numberOfBlocks,d_global);
int *h_global = new int[numberOfBlocks];
cudaMemcpy(h_global,d_global,sizeof(int)*numberOfBlocks,cudaMemcpyDeviceToHost);
int sum=0;
for(int i=0;i<numberOfBlocks;i++)
{
sum = sum + h_global[i];
}
printf("\n%d",sum);
}
void serialReduceCpu(int *d_array , int numberOfElements)
{
int sum =0;
for(int i=0;i<numberOfElements;i++)
{
sum = sum+d_array[i];
}
cout<<"\n"<<sum;
}
int main()
{
int numberOfElements;
ifstream inFile;
inFile.open("random");
int x;
int i=0;
inFile >>x ;
numberOfElements = x;
int *h_array = new int[numberOfElements];
while(inFile >> x)
{
h_array[i] = x;
i++;
}
int *d_array;
cudaMalloc(&d_array , sizeof(int)*numberOfElements);
cudaMemcpy(d_array, h_array , sizeof(int)*numberOfElements, cudaMemcpyHostToDevice);
//serialReduceHost(h_array, d_array ,numberOfElements);
int elementsPerThread, numberOfBlocks , numberOfThreadsPerBlock;
elementsPerThread = 256 ;
numberOfThreadsPerBlock = 64;
numberOfBlocks =6400;
parallelReduceHost(h_array,d_array,numberOfElements,elementsPerThread,numberOfThreadsPerBlock ,numberOfBlocks);
serialReduceCpu(h_array , numberOfElements);
}
// freopen("random", "r", stdin); |
3,618 | #include <cstdlib>
#include <cstdio>
#include <ctime>
#include <chrono>
__global__ void cuda_vecAdd(float *v1, float *v2, float *v3, int offset)
{
int i = offset + blockIdx.x * blockDim.x + threadIdx.x;
v3[i] = v1[i] + v2[i];
}
int main(void)
{
typedef std::chrono::high_resolution_clock Clock;
typedef std::chrono::time_point<Clock> TimePoint;
typedef std::chrono::duration<float> Time;
constexpr int n = 256*1000000;
constexpr int nStreams = 32;
constexpr int blockSize = 256;
constexpr int streamSize = n / nStreams;
constexpr int gridSize = streamSize / blockSize;
printf("%d %d\n", streamSize, gridSize);
TimePoint tstart, tstop;
cudaStream_t streams[nStreams];
float *v1 = nullptr, *v2 = nullptr, *v3 = nullptr;
float *dv1 = nullptr, *dv2 = nullptr, *dv3 = nullptr;
for(int i=0; i<nStreams; i++)
cudaStreamCreate(&streams[i]);
cudaMallocHost((void**)&v1, sizeof(float) * n);
cudaMallocHost((void**)&v2, sizeof(float) * n);
cudaMallocHost((void**)&v3, sizeof(float) * n);
cudaMalloc((void**)&dv1, sizeof(float) * n);
cudaMalloc((void**)&dv2, sizeof(float) * n);
cudaMalloc((void**)&dv3, sizeof(float) * n);
for(int i=0; i<n; i++)
{
v1[i] = (float)i;
v2[i] = (float)-i+1;
}
for(int i=0; i<n; i++)
v3[i] = 0.0f;
tstart = Clock::now();
for(int i=0; i<nStreams; i++)
{
int offset = i * streamSize;
cudaMemcpyAsync(&dv1[offset], &v1[offset], sizeof(float) * streamSize, cudaMemcpyHostToDevice, streams[i]);
cudaMemcpyAsync(&dv2[offset], &v2[offset], sizeof(float) * streamSize, cudaMemcpyHostToDevice, streams[i]);
cuda_vecAdd<<<gridSize, blockSize, 0, streams[i]>>>(dv1, dv2, dv3, offset);
cudaMemcpyAsync(&v3[offset], &dv3[offset], sizeof(float) * streamSize, cudaMemcpyDeviceToHost, streams[i]);
}
for(int i=0; i<nStreams; i++)
cudaStreamSynchronize(streams[i]);
tstop = Clock::now();
float elaspedTimeGPU = std::chrono::duration_cast<Time>(tstop-tstart).count();
float checkGPU = 0.0f;
for(int i=0; i<n; i++)
checkGPU += v3[i];
printf("CheckGPU: %f -- TimeGPU: %fms\n", checkGPU, elaspedTimeGPU);
cudaFree(dv1);
cudaFree(dv2);
cudaFree(dv3);
cudaFreeHost(v1);
cudaFreeHost(v2);
cudaFreeHost(v3);
for(int i=0; i<nStreams; i++)
cudaStreamDestroy(streams[i]);
return 0;
} |
3,619 |
#include <stdio.h>
/**
* CPU version of our CUDA Hello World!
*/
void cpu_helloworld()
{
printf("Hello from the CPU!\n");
}
/**
* GPU version of our CUDA Hello World!
*/
__global__ void gpu_helloworld()
{
int threadId = threadIdx.x;
printf("Hello from the GPU! My threadId is %d\n", threadId);
}
int main(int argc, char **argv)
{
dim3 grid(1); // 1 block in the grid
dim3 block(32); // 32 threads per block
// Call the CPU version
cpu_helloworld();
// Call the GPU version
gpu_helloworld<<<grid, block>>>();
////////////////
// TO-DO #1.2 ////////////////////
// Introduce your changes here! //
// synchronize the threads
cudaDeviceSynchronize();
//////////////////////////////////
return 0;
}
|
3,620 | #include <iostream>
#include <cstdlib>
#include <vector>
__global__ void vectorAdd(int* a, int* b, int* c, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
/* printf("tid: %d\n", tid); */
if(tid < n)
c[tid] = a[tid] + b[tid];
}
int main() {
int n = 1 << 20;
// Host array
/* std::vector<int> h_a(n, 2) */
/* std::vector<int> h_b(n, 3); */
/* std::vector<int> h_c(n); */
int* h_a;
int* h_b;
int* h_c;
int bytes = sizeof(int)*n;
h_a = (int*)malloc(bytes);
h_b = (int*)malloc(bytes);
h_c = (int*)malloc(bytes);
for(int i = 0; i < n; ++i) {
h_a[i] = 2;
h_b[i] = 3;
}
//Device array
int* d_a;
int* d_b;
int* d_c;
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
int block_size = 1024;
int grid_size = (int)ceil((float)n/block_size);
std::cout<<"grid size: "<<grid_size<<"\n";
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
std::cout<<"Mem copy successfull\n";
vectorAdd<<<grid_size, block_size>>>(d_a, d_b, d_c, n);
cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost);
for(int i = 0; i < n; ++i) {
if(h_c[i] != 5) {
std::cout<<h_c[i]<<" "<<i<<std::endl;
std::cout<<"Fucking wrong answer\n";
break;
}
}
std::cout<<"Success!\n";
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
3,621 | /**
* Demo code of Cuda programming lecture
*
* This programme illustrates how warp divergence may influence the performance of CUDA programme
*
*
*/
#include <cstdio>
#include <cstdlib>
#include <sys/time.h>
#define HALF_BLOCK_SIZE 512
#define BLOCK_SIZE 1024
#define LOOP_NUM 1024
//Kernel1 (has warp divergence)
__global__ void kernel1(int *A, int *B)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i % 2 == 0)
{
/*Execution Path 1: thread 0, 2, 4, 6...30... reach here*/
int lower_bound = B[i];
int tmp = 0;
//do some computation to make this execution path long enough
for (int j = lower_bound; j < lower_bound+LOOP_NUM; j += 2)
tmp += j;
A[i] += tmp;
}
else
{
/*Execution Path 2: thread 1, 3, 5, 7...31... reach here*/
int lower_bound = B[i];
int tmp = 0;
//do some computation to make this execution path long enough
for (int j = lower_bound; j < lower_bound+LOOP_NUM; j += 2)
tmp += j;
A[i] -= tmp;
}
/*even threads and odd threads go back to the same exexution path*/
}
//Kerne2 (does not have warp divergence)
__global__ void kernel2(int *A, int *B)
{
int base = blockIdx.x*blockDim.x;
if (threadIdx.x < HALF_BLOCK_SIZE)
{
/*Execution Path 1: the first half threads of a block reach here*/
int even_index = base + threadIdx.x*2;
int lower_bound = B[even_index];
int tmp = 0;
//Do some computation
for (int j = lower_bound; j < lower_bound+LOOP_NUM; j += 2)
tmp += j;
A[even_index] += tmp;
}
else
{
/*Execution Path 2: the second half threads of a block reach here*/
int odd_index = base + (threadIdx.x - HALF_BLOCK_SIZE)*2 +1;
int lower_bound = B[odd_index];
int tmp = 0;
//Do some computation
for (int j = lower_bound; j < lower_bound+LOOP_NUM; j += 2)
tmp += j;
A[odd_index] -= tmp;
}
}
int main()
{
//Device and host memory pointers
int *h_A, *h_B, *d_A, *d_B;
int N = 33554432;
int data_size = N*(sizeof(int));
//Kernel configuration parameter
int threads_per_block = BLOCK_SIZE;
int blocks_per_grid = N / threads_per_block;
//Time measurement
timeval k1_start, k1_end, k2_start, k2_end;
float k1_elapsed_time, k2_elapsed_time;
//Allocate Host Memory
h_A = (int*)malloc(data_size);
h_B = (int*)malloc(data_size);
//Allocate Device Memory
cudaMalloc((void**)&d_A, data_size);
cudaMalloc((void**)&d_B, data_size);
//Initialization
for (int i = 0; i < N; i++)
{
h_A[i] = i;
h_B[i] = i;
}
//Memory copy from host to device
cudaMemcpy(d_A, h_A, data_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, data_size, cudaMemcpyHostToDevice);
gettimeofday(&k1_start, NULL);
//Invoke kernel1(has warp divergence)
kernel1<<<blocks_per_grid, threads_per_block>>>(d_A, d_B);
cudaDeviceSynchronize();
gettimeofday(&k1_end, NULL);
gettimeofday(&k2_start, NULL);
//Invoke kernel2(does not have warp divergence)
kernel2<<<blocks_per_grid, threads_per_block>>>(d_A, d_B);
cudaDeviceSynchronize();
gettimeofday(&k2_end, NULL);
//Copy result back from device to host
cudaMemcpy(h_A, d_A, data_size, cudaMemcpyDeviceToHost);
k1_elapsed_time = 1000*(k1_end.tv_sec - k1_start.tv_sec) + (float)(k1_end.tv_usec - k1_start.tv_usec)/1000;
k2_elapsed_time = 1000*(k2_end.tv_sec - k2_start.tv_sec) + (float)(k2_end.tv_usec - k2_start.tv_usec)/1000;
printf("elapsed time of kernel function which has warp divergence: %.2f ms\n", k1_elapsed_time);
printf("elapsed time of kernel function which has no warp divergence: %.2f ms\n", k2_elapsed_time);
//Free device memory
cudaFree(d_A);
cudaFree(d_B);
//Free host memory
free(h_A);
free(h_B);
return 0;
}
|
3,622 | #include <iostream>
#include <cmath>
#include <cstdio>
#include <sys/time.h>
using namespace std;
#define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__))
#define BLOCK_SIZE 32
#define ERROR 1.0e-9
typedef unsigned long long int LONG;
void safe_call(cudaError_t ret, int line)
{
if(ret!=cudaSuccess)
{
cout << "Error at line " << line << " : " << cudaGetErrorString(ret) << endl;
exit(-1);
}
}
void printMat(double *A, LONG N)
{
LONG i,j;
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
cout << A[i*N+j] << " ";
cout<<endl;
}
}
__global__ void gpuMM(double *A, double *B, double *C, LONG N)
{
// Matrix multiplication for NxN matrices C=A*B
// Each thread computes a single element of C
LONG row = threadIdx.y;
LONG col = blockIdx.x*blockDim.x + threadIdx.x;
double sum = 0.f;
for (LONG n = 0; n < N; n++)
sum += A[row*N+n]*B[n*N+col];
C[row*N+col] = sum;
}
int main(int argc, char *argv[])
{
struct timeval t1,t2, tnp;
double tt, gflops;
// Perform matrix multiplication C = A*B
// where A, B and C are NxN matrices
// Restricted to matrices where N = K*BLOCK_SIZE;
LONG N,K;
cin >> K;
N = K*BLOCK_SIZE;
CUDA_SAFE_CALL(cudaSetDevice(0));
cout << "Executing Matrix Multiplcation" << endl;
cout << "Matrix size: " << N << "x" << N << endl;
// Allocate memory on the host
double *hA,*hB,*hC;
hA = new double[N*N];
hB = new double[N*N];
hC = new double[N*N];
// Initialize matrices on the host
srand(time(NULL));
for (LONG j=0; j<N; j++){
for (LONG i=0; i<N; i++){
hA[j*N+i] = drand48();
hB[j*N+i] = drand48();
}
}
// Allocate memory on the device
LONG size = N*N*sizeof(double); // Size of the memory in bytes
double *dA,*dB,*dC;
// Allocate memory to store the GPU answer on the host
double *C;
C = new double[N*N];
CUDA_SAFE_CALL(cudaMalloc(&dB,size));
CUDA_SAFE_CALL(cudaMalloc(&dA,(K*size/N)));
CUDA_SAFE_CALL(cudaMalloc(&dC,(K*size/N)));
dim3 threadBlock(BLOCK_SIZE,K);
dim3 grid(K);
gettimeofday(&t1,0);
CUDA_SAFE_CALL(cudaMemcpy(dB,hB,size,cudaMemcpyHostToDevice));
for(LONG i=0; i< (N/K); i++){
//cout << "Iteration " << i << endl;
CUDA_SAFE_CALL(cudaMemcpy(dA,hA+i*N*K,(K*size/N),cudaMemcpyHostToDevice));
//Execute the matrix multiplication kernel
gpuMM<<<grid,threadBlock>>>(dA,dB,dC,N);
// Now copy the GPU result back to CPU
CUDA_SAFE_CALL(cudaMemcpy(C+i*N*K,dC,(K*size/N),cudaMemcpyDeviceToHost));
}
CUDA_SAFE_CALL(cudaDeviceSynchronize());
gettimeofday(&t2,0);
timersub(&t2,&t1,&tnp);
tt = (double) tnp.tv_sec + ((double) tnp.tv_usec/1.0e6);
gflops = ( 1.0e-9 * 2.0 * N * N * N ) / tt;
cout << "Without Prefetch : " << gflops << endl;
cout << "Device operations done." << endl;
CUDA_SAFE_CALL(cudaFree(dB));
CUDA_SAFE_CALL(cudaFree(dA));
CUDA_SAFE_CALL(cudaFree(dC));
cout << "Finished." << endl;
return 0;
}
|
3,623 | __global__ void fillOneIntegerArrayKernel(
int numberRows,
int numberEntries,
int* array,
int constant) {
int index = blockIdx.x * numberEntries + blockIdx.y * numberRows + threadIdx.x;
array[index] = constant;
} |
3,624 |
/*
// Cython function from 'thinc' library
class NumpyOps(Ops):
def mean_pool(self, float[:, ::1] X, int[::1] lengths):
cdef int B = lengths.shape[0]
cdef int O = X.shape[1]
cdef int T = X.shape[0]
cdef Pool mem = Pool()
means = <float*>mem.alloc(B * O, sizeof(float))
cpu_mean_pool(means,
&X[0, 0], &lengths[0], B, T, O)
return cpu_floats_ptr2array(means, (B, O))
cdef void cpu_mean_pool(float* means__bo,
const float* X__to, const int* lengths__b,
int B, int T, int O) nogil:
'''Compute means of a batch of concatenated sequences, using the lengths.'''
cdef float scale = 0.
for length in lengths__b[:B]:
scale = 1. / length
for _ in range(length):
VecVec.add_i(means__bo,
X__to, scale, O)
X__to += O
means__bo += O
*/
// hardcoded the shared memory to 256 but we can easily change the host to invoke
// the kernel to dynamically allocate the shared memory (according to vector dimensions)
void __global__ mean_pool(float* means, float *words, int *lengths,int *prevLengths, int numdocs, int dims)
{
int bid = blockIdx.x;
__shared__ float local_means[256];
for(int step = bid; step < numdocs; step += gridDim.x )
{
int wordsInDoc = lengths[step];
int blockStarts = prevLengths[step]*dims;
local_means[threadIdx.x] = 0.0;
for (int i = blockStarts + threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims)
local_means[threadIdx.x] += words[i];
__syncthreads();
means[step*dims + threadIdx.x] = local_means[threadIdx.x]/(float)wordsInDoc;
}
}
|
3,625 | //#include <thrust/host_vector.h>
//#include <thrust/device_vector.h>
#include <iostream>
#include "diffraction.cuh"
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <math_constants.h>
//#define THREADS_PER_BLOCK 200
/*
double cuda_func(double ang) {
// H has storage for 4 integers
thrust::host_vector<int> H(4);
// H and D are automatically deleted when the function returns
return ang;
}
int cuda_func1(int *raw_ptr, int N) {
// wrap raw pointer with a device_ptr
thrust::device_ptr<int> dev_ptr(raw_ptr);
// use device_ptr in thrust algorithms
thrust::fill(dev_ptr, dev_ptr + N, (int) 9);
// access device memory through device_ptr
dev_ptr[0] = 1;
return dev_ptr[2];
}
*/
__global__ void addVec(int *a, int *b, int *c) {
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
__global__ void addMat(float *a, float *b, float *c) {
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
__global__ void addCube(float *a, float *b, float *c) { // colume-wise access
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
__global__ void matAmp(float *F,float *Fim, int numPix){
int index = ((blockIdx.y*blockDim.y + threadIdx.y)*gridDim.x + blockIdx.x )*blockDim.x + threadIdx.x;
if (index<numPix){
F[index]=F[index]*F[index]+Fim[index]*Fim[index];
}
}
/*
__global__ void structureFactor(float *F, float *f, float *q, float *p, int numPix, int numAtoms){
//int index = blockIdx.x;
int index = ((blockIdx.y*blockDim.y + threadIdx.y)*gridDim.x + blockIdx.x )*blockDim.x + threadIdx.x;
if (index < numPix) {
float sf_real = 0;
float sf_imag = 0;
// p (Nx3)
// q (py x px x 3)
float map = 0;
int f_ind = 0;
for (int n = 0; n < numAtoms; n++) {
map = 6.283185307F * (p[n]*q[index] + p[n+numAtoms]*q[index+numPix] + p[n+(2*numAtoms)]*q[index+(2*numPix)]);
f_ind = index+(n*numPix);
sf_real += f[f_ind] * cos(map);
sf_imag += f[f_ind] * sin(map);
}
F[index] = sf_real * sf_real + sf_imag * sf_imag;
}
}
*/
//structureFactor<<<dim3(0x6,1),dim3(0xB,1)>>>(d_F,d_f,d_q,d_p,d_i,numPix,chunkSize);
__global__ void structureFactor(float *Fre, float *Fim, float *f, float *q, \
float *p, int *i, int numPix, int chunkSize) {
int index = ( (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x \
+ blockIdx.x ) * blockDim.x + threadIdx.x;
if (index<numPix){
// F (py x px) [re & im]
// f (py x px x numAtomTypes)
// q (py x px x 3)
// i (1 x chunkSize)
// p (chunkSize x 3)
float map = 0;
int f_ind = 0;
for (int n = 0; n < chunkSize; n++) {
map = 6.283185307F * (p[n] * q[index] + p[n+chunkSize] \
* q[index+(numPix)] + p[n+(2*chunkSize)]*q[index+(2*numPix)]);
f_ind = index + i[n]*numPix;
Fre[index] += f[f_ind] * cos(map);
Fim[index] += f[f_ind] * sin(map);
}
}
}
/*
__global__ void structureFactorChunk(float *sf_real, float *sf_imag, float *f, float *q, int *i, float *p, int numAtomTypes, int numPix, int chunkSize){
int index = ((blockIdx.y*blockDim.y + threadIdx.y)*gridDim.x + blockIdx.x )*blockDim.x + threadIdx.x;
if (index<numPix){
// F (py x px)
// f (py x px x numAtomTypes)
// q (py x px x 3)
// i (1 x chunkSize)
// p (chunkSize x 3)
float map = 0;
int f_ind = 0;
for (int n = 0; n < chunkSize; n++) {
map = 6.283185307F * (p[n]*q[index] + p[n+chunkSize]*q[index+(numPix)] + p[n+(2*chunkSize)]*q[index+(2*numPix)]);
f_ind = index + i[n]*numPix;
sf_real[index] += f[f_ind] * cos(map);
sf_imag[index] += f[f_ind] * sin(map);
}
}
}
__global__ void structureFactorChunkParallel(float *pad_real, float *pad_imag, float *f, float *q, int *i, float *p, int numAtomTypes, int numPix, int chunkSize){
int pixelId = blockIdx.x + blockIdx.y * gridDim.x;
int chunkId = threadIdx.x;
int index = pixelId + chunkId * numPix;
if (pixelId < numPix && chunkId < chunkSize) {
// F (py x px)
// f (py x px x numAtomTypes)
// q (py x px x 3)
// i (1 x chunkSize)
// p (chunkSize x 3)
float map = 6.283185307F * (p[chunkId]*q[pixelId] + p[chunkId+chunkSize]*q[pixelId+(numPix)] + p[chunkId+(2*chunkSize)]*q[pixelId+(2*numPix)]);
int f_ind = pixelId + i[chunkId]*numPix;
pad_real[index] = f[f_ind] * cos(map);
pad_imag[index] = f[f_ind] * sin(map);
}
}
*/
void random_ints(int* a, int N)
{
int i;
for (i = 0; i < N; ++i)
a[i] = rand() % 100; // between 0 and 100
}
void cuda_funcVec(int *a, int *b, int *c, int N) {
int *d_a, *d_b, *d_c;
int size = N*sizeof(int);
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU
addVec<<<N,1>>>(d_a, d_b, d_c);
// Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
// Cleanup
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
}
void cuda_funcMat(float *a, float *b, float *c, int H, int W) {
float *d_a, *d_b, *d_c;
int size = H*W*sizeof(int);
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU
addMat<<<H*W,1>>>(d_a, d_b, d_c);
// Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
// Cleanup
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
}
void cuda_funcCube(float *a, float *b, float *c, int H, int W, int Z) {
float *d_a, *d_b, *d_c;
int size = H*W*Z*sizeof(int);
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU
addCube<<<H*W*Z,1>>>(d_a, d_b, d_c);
// Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
// Cleanup
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
}
/*
void cuda_structureFactor(float *F, float *f, float *q, float *p, int py, int px, int numAtoms) {
float *d_f, *d_q, *d_p;
float *d_F;//, *d_F_real, *d_F_imag;
int size_f = py*px*numAtoms*sizeof(float);
int size_q = py*px*3*sizeof(float);
int size_p = numAtoms*3*sizeof(float);
int size_F = py*px*sizeof(float);
// Malloc
//float *F_real = (float *)malloc(size_F);
//float *F_imag = (float *)malloc(size_F);
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_f, size_f);
cudaMalloc((void **)&d_q, size_q);
cudaMalloc((void **)&d_p, size_p);
cudaMalloc((void **)&d_F, size_F);
//cudaMalloc((void **)&d_F_imag, size_F);
// Copy inputs to device
cudaMemcpy(d_f, f, size_f, cudaMemcpyHostToDevice);
cudaMemcpy(d_q, q, size_q, cudaMemcpyHostToDevice);
cudaMemcpy(d_p, p, size_p, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU
structureFactor<<<py*px,1>>>(d_F, d_f, d_q, d_p, py, px, numAtoms);
// Copy result back to host
cudaMemcpy(F, d_F, size_F, cudaMemcpyDeviceToHost);
//cudaMemcpy(F_imag, d_F_imag, size_F, cudaMemcpyDeviceToHost);
// Cleanup
cudaFree(d_f); cudaFree(d_q); cudaFree(d_p), cudaFree(d_F); //cudaFree(d_F_imag);
//free(F_real); free(F_imag);
}
*/
/*
void cuda_structureFactor(float *F, float *f, float *q, float *p, int numPix, int numAtoms) {
float *d_F, *d_f, *d_q, *d_p;
int size_F = numPix*sizeof(float);
int size_f = numPix*numAtoms*sizeof(float);
int size_q = numPix*3*sizeof(float);
int size_p = numAtoms*3*sizeof(float);
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_F, size_F);
cudaMalloc((void **)&d_f, size_f);
cudaMalloc((void **)&d_q, size_q);
cudaMalloc((void **)&d_p, size_p);
// Copy inputs to device
cudaMemcpy(d_f, f, size_f, cudaMemcpyHostToDevice);
cudaMemcpy(d_q, q, size_q, cudaMemcpyHostToDevice);
cudaMemcpy(d_p, p, size_p, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU
dim3 threads_per_block(20,10); // Maximum number of threads per block
dim3 number_of_blocks(20,10,1);
structureFactor<<<number_of_blocks,threads_per_block>>>(d_F, d_f, d_q, d_p, numPix, numAtoms);
//cudaThreadSynchronize();
//structureFactor<<<py*px,1>>>(d_F, d_f, d_q, d_p, py, px, numAtoms);
// Copy result back to host
cudaMemcpy(F, d_F, size_F, cudaMemcpyDeviceToHost);
// Cleanup
cudaFree(d_F); cudaFree(d_f); cudaFree(d_q); cudaFree(d_p);
}
*/
void cuda_structureFactor(float *F, float *f, float *q, float *p, int *i, \
int numPix, int numAtoms, int numAtomTypes, \
int deviceID){
int device,numBlocks;
size_t size_F,size_f,size_q,size_i,size_p;
size_t globalMem,fixedMem;
int chunk,chunkSize,chunkSizeMax;
float *d_F,*d_f,*d_p,*d_q,*d_Fim;
int *d_i;
struct cudaDeviceProp prop;
dim3 dimG,dimB;
size_F = sizeof(float) * numPix;
size_f = sizeof(float) * numPix * numAtomTypes;
size_q = sizeof(float) * numPix * 3;
fixedMem = size_F*2 + size_f + size_q;
dimB.x = CUDA_BLOCK_SIZE;
dimG.x = numBlocks = (numPix+dimB.x-1)/dimB.x;
if (dimG.x > CUDA_GROUP_LIMIT){
dimG.y = (numBlocks + CUDA_GROUP_LIMIT -1) / CUDA_GROUP_LIMIT;
dimG.x = (numBlocks + dimG.y -1) / dimG.y;
}
//cudaGetDevice(&device);
device = deviceID;
cudaSetDevice(device);
cudaGetDeviceProperties(&prop,device);
globalMem = prop.totalGlobalMem - CUDA_RESERVE_MEM;
if (globalMem <= fixedMem){
printf("Device memory[%lu] not enough to hold all data[>%lu]!\n", \
globalMem,fixedMem);
exit(EXIT_FAILURE);
}
chunkSizeMax = (globalMem - fixedMem) / (3 * sizeof(float) + sizeof(int));
if (chunkSizeMax>numAtoms)
chunkSizeMax=numAtoms;
size_p = sizeof(float) * chunkSizeMax * 3;
size_i = sizeof(int) * chunkSizeMax;
cudaMalloc((void **)&d_F, size_F);
cudaMalloc((void **)&d_Fim, size_F);
cudaMalloc((void **)&d_f, size_f);
cudaMalloc((void **)&d_q, size_q);
cudaMalloc((void **)&d_p, size_p);
cudaMalloc((void **)&d_i, size_i);
cudaMemcpy(d_f, f, size_f, cudaMemcpyHostToDevice);
cudaMemcpy(d_q, q, size_q, cudaMemcpyHostToDevice);
cudaMemset(d_F,0,size_F);
cudaMemset(d_Fim,0,size_F);
chunkSize=chunkSizeMax;
for(chunk=0;chunk<numAtoms;chunk+=chunkSizeMax){
if (chunkSize+chunk>=numAtoms)
chunkSize=numAtoms-chunk;
cudaMemcpy(d_i, i + chunk, sizeof(int)* chunkSize, \
cudaMemcpyHostToDevice);
cudaMemcpy(d_p, p + chunk, sizeof(float)*chunkSize, \
cudaMemcpyHostToDevice);
cudaMemcpy(d_p + chunkSize, p + chunk + numAtoms, \
sizeof(float)*chunkSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_p + 2*chunkSize, p + chunk + 2*numAtoms, \
sizeof(float)*chunkSize, cudaMemcpyHostToDevice);
structureFactor<<<dimG,dimB>>>(d_F, d_Fim, d_f, d_q, d_p, d_i, numPix, \
chunkSize);
}
matAmp<<<dimG,dimB>>>(d_F,d_Fim,numPix);
cudaMemcpy(F, d_F, size_F, cudaMemcpyDeviceToHost);
cudaFree(d_F);
cudaFree(d_Fim);
cudaFree(d_f);
cudaFree(d_q);
cudaFree(d_p);
cudaFree(d_i);
}
int cuda_getDeviceCount(){
int tmp;
if (cudaSuccess != cudaGetDeviceCount(&tmp))
return 0;
return tmp;
}
/*
void cuda_structureFactorChunk(float *sf_real, float *sf_imag, float *f, float *q, int *i, float *p, int numAtomTypes, int numPix, int chunkSize) {
float *d_sf_real, *d_sf_imag, *d_f, *d_q, *d_p; // Pointer to device memory
int *d_i;
int size_sf = numPix*sizeof(float);
int size_f = numPix*numAtomTypes*sizeof(float);
int size_q = numPix*3*sizeof(float);
int size_i = chunkSize*sizeof(int);
int size_p = chunkSize*3*sizeof(float);
// Allocate space for device copies
cudaMalloc((void **)&d_sf_real, size_sf);
cudaMalloc((void **)&d_sf_imag, size_sf);
cudaMalloc((void **)&d_f, size_f);
cudaMalloc((void **)&d_q, size_q);
cudaMalloc((void **)&d_i, size_i);
cudaMalloc((void **)&d_p, size_p);
// Copy inputs to device
cudaMemcpy(d_sf_real, sf_real, size_sf, cudaMemcpyHostToDevice);
cudaMemcpy(d_sf_imag, sf_imag, size_sf, cudaMemcpyHostToDevice);
cudaMemcpy(d_f, f, size_f, cudaMemcpyHostToDevice);
cudaMemcpy(d_q, q, size_q, cudaMemcpyHostToDevice);
cudaMemcpy(d_i, i, size_i, cudaMemcpyHostToDevice);
cudaMemcpy(d_p, p, size_p, cudaMemcpyHostToDevice);
// Launch kernel on GPU
dim3 threads_per_block(512); // Maximum number of threads per block
dim3 number_of_blocks(2048,2048,1);
//structureFactorChunk<<<number_of_blocks,threads_per_block>>>(d_sf_real, d_sf_imag, d_f, d_q, d_i, d_p, numAtomTypes, numPix, chunkSize);
structureFactorChunkParallel<<<number_of_blocks,threads_per_block>>>(d_sf_real, d_sf_imag, d_f, d_q, d_i, d_p, numAtomTypes, numPix, chunkSize);
//cudaThreadSynchronize();
// Copy result back to host
cudaMemcpy(sf_real, d_sf_real, size_sf, cudaMemcpyDeviceToHost);
cudaMemcpy(sf_imag, d_sf_imag, size_sf, cudaMemcpyDeviceToHost);
// Cleanup
cudaFree(d_sf_real); cudaFree(d_sf_imag); cudaFree(d_f); cudaFree(d_q); cudaFree(d_i); cudaFree(d_p);
}
void cuda_structureFactorChunkParallel(float *pad_real, float *pad_imag, float *f, float *q, int *i, float *p, int numAtomTypes, int numPix, int chunkSize) {
float *d_pad_real, *d_pad_imag, *d_f, *d_q, *d_p; // Pointer to device memory
int *d_i;
int size_pad = numPix*chunkSize*sizeof(float);
int size_f = numPix*numAtomTypes*sizeof(float);
int size_q = numPix*3*sizeof(float);
int size_i = chunkSize*sizeof(int);
int size_p = chunkSize*3*sizeof(float);
// Allocate space for device copies
cudaMalloc((void **)&d_pad_real, size_pad);
cudaMalloc((void **)&d_pad_imag, size_pad);
cudaMalloc((void **)&d_f, size_f);
cudaMalloc((void **)&d_q, size_q);
cudaMalloc((void **)&d_i, size_i);
cudaMalloc((void **)&d_p, size_p);
// Copy inputs to device
cudaMemcpy(d_f, f, size_f, cudaMemcpyHostToDevice);
cudaMemcpy(d_q, q, size_q, cudaMemcpyHostToDevice);
cudaMemcpy(d_i, i, size_i, cudaMemcpyHostToDevice);
cudaMemcpy(d_p, p, size_p, cudaMemcpyHostToDevice);
// Launch kernel on GPU
dim3 threads_per_block(chunkSize); // Maximum number of threads per block
dim3 number_of_blocks(numPix);
structureFactorChunkParallel<<<number_of_blocks,threads_per_block>>>(d_pad_real, d_pad_imag, d_f, d_q, d_i, d_p, numAtomTypes, numPix, chunkSize);
// Copy result back to host
cudaMemcpy(pad_real, d_pad_real, size_pad, cudaMemcpyDeviceToHost);
cudaMemcpy(pad_imag, d_pad_imag, size_pad, cudaMemcpyDeviceToHost);
// Cleanup
cudaFree(d_pad_real); cudaFree(d_pad_imag); cudaFree(d_f); cudaFree(d_q); cudaFree(d_i); cudaFree(d_p);
}
*/
|
3,626 | #include <cuda.h>
#include <cuda_runtime.h>
int get_cuda_error_code()
{
return (int) cudaGetLastError();
}
|
3,627 | #include <iostream>
#include <sys/time.h>
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#define t1 4096
#define t2 4096
#define N 1
#define ITERATIONS 10
#define BLOCK_SIZE 32
using namespace std;
float A[N * N], B[N * N], C[N * N], C_cmp[N * N];
__global__ void split(float *C11, float *C12, float *C21, float *C22, float *C, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i < n && j < n) {
C11[i * n + j] = C[i * 2 * n + j];
C12[i * n + j] = C[i * 2 * n + j + n];
C21[i * n + j] = C[(i + n) * 2 * n + j];
C22[i * n + j] = C[(i + n) * 2 * n + j + n];
}
}
__global__ void merge(float *C11, float *C12, float *C21, float *C22, float *C, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i < n && j < n) {
C[i * 2 * n + j] = C11[i * n + j];
C[i * 2 * n + j + n] = C12[i * n + j];
C[(i + n) *2 * n + j] = C21[i * n + j];
C[(i + n) * 2 * n + j + n] = C22[i * n + j];
}
}
__global__ void add(float *A, float *B, float *C, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i < n && j < n) {
C[i * n + j] = A[i * n + j] + B[i * n + j];
}
}
__global__ void sub(float *A, float *B, float *C, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i < n && j < n) {
C[i * n + j] = A[i * n + j] - B[i * n + j];
}
}
__global__ void mul(float *A, float *B, float *C, int n) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if(i < n && j < n) {
C[i * n + j] = 0;
for(int k = 0; k < n; k++) {
C[i * n + j] += A[i * n + k] * B[k * n + j];
}
}
}
__global__ void mul_add(float *A, float *B, float *T, float *C, int n) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if(i < n && j < n) {
C[i * n + j] = T[i * n + j];
for(int k = 0; k < n; k++) {
C[i * n + j] += A[i * n + k] * B[k * n + j];
}
}
}
__global__ void mul_sub_inc(float *A, float *B, float *T, float *C1, float *C2, int n) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if(i < n && j < n) {
C1[i * n + j] = 0;
for(int k = 0; k < n; k++) {
C1[i * n + j] += A[i * n + k] * B[k * n + j];
}
C1[i * n + j] = T[i * n + j] - C1[i * n + j];
C2[i * n + j] += T[i * n + j];
}
}
__global__ void mul_inc_inc_inc(float *A, float *B, float *C, float *T, float *C1, float *C2, int n) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if(i < n && j < n) {
C[i * n + j] = 0;
for(int k = 0; k < n; k++) {
C[i * n + j] += A[i * n + k] * B[k * n + j];
}
C1[i * n + j] += C[i * n + j];
C2[i * n + j] += C1[i * n + j];
C1[i * n + j] += T[i * n + j];
}
}
void strassen(float *A, float *B, float *C, int n) {
float *A_gpu, *B_gpu, *C_gpu;
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
cudaMalloc((void **)&A_gpu, sizeof(float) * n * n);
cudaMalloc((void **)&B_gpu, sizeof(float) * n * n);
cudaMalloc((void **)&C_gpu, sizeof(float) * n * n);
cudaMemcpy(A_gpu, A, sizeof(float) * n * n, cudaMemcpyHostToDevice);
cudaMemcpy(B_gpu, B, sizeof(float) * n * n, cudaMemcpyHostToDevice);
if(n <= t1) {
dim3 grid((size_t)ceil((float)n / (float)block.x), (size_t)ceil((float)n / (float)block.y));
mul<<<grid, block>>>(A_gpu, B_gpu, C_gpu, n);
cudaThreadSynchronize();
}
else{
int m = n >> 1;
dim3 grid((size_t)ceil((float)m / (float)block.x), (size_t)ceil((float)m / (float)block.y));
float *A11, *A12, *A21, *A22, *B11, *B12, *B21, *B22, *C11, *C12, *C21, *C22, *T1, *T2;
cudaMalloc((void **)&A11, sizeof(float) * m * m);
cudaMalloc((void **)&A12, sizeof(float) * m * m);
cudaMalloc((void **)&A21, sizeof(float) * m * m);
cudaMalloc((void **)&A22, sizeof(float) * m * m);
cudaMalloc((void **)&B11, sizeof(float) * m * m);
cudaMalloc((void **)&B12, sizeof(float) * m * m);
cudaMalloc((void **)&B21, sizeof(float) * m * m);
cudaMalloc((void **)&B22, sizeof(float) * m * m);
cudaMalloc((void **)&C11, sizeof(float) * m * m);
cudaMalloc((void **)&C12, sizeof(float) * m * m);
cudaMalloc((void **)&C21, sizeof(float) * m * m);
cudaMalloc((void **)&C22, sizeof(float) * m * m);
cudaMalloc((void **)&T1, sizeof(float) * m * m);
cudaMalloc((void **)&T2, sizeof(float) * m * m);
if(n <= t2) {
split<<<grid, block>>>(A11, A12, A21, A22, A_gpu, m);
cudaThreadSynchronize();
split<<<grid, block>>>(B11, B12, B21, B22, B_gpu, m);
cudaThreadSynchronize();
sub<<<grid, block>>>(A11, A21, T1, m);
cudaThreadSynchronize();
sub<<<grid, block>>>(B22, B12, T2, m);
cudaThreadSynchronize();
mul<<<grid, block>>>(T1, T2, C21, m);
cudaThreadSynchronize();
add<<<grid, block>>>(A21, A22, T1, m);
cudaThreadSynchronize();
sub<<<grid, block>>>(B12, B11, T2, m);
cudaThreadSynchronize();
mul<<<grid, block>>>(T1, T2, C22, m);
cudaThreadSynchronize();
sub<<<grid, block>>>(T1, A11, T1, m);
cudaThreadSynchronize();
sub<<<grid, block>>>(B22, T2, T2, m);
cudaThreadSynchronize();
mul<<<grid, block>>>(T1, T2, C11, m);
cudaThreadSynchronize();
sub<<<grid, block>>>(A12, T1, T1, m);
cudaThreadSynchronize();
mul_add<<<grid, block>>>(T1, B22, C22, C12, m);
cudaThreadSynchronize();
mul_inc_inc_inc<<<grid, block>>>(A11, B11, T1, C21, C11, C12, m);
cudaThreadSynchronize();
sub<<<grid, block>>>(T2, B21, T2, m);
cudaThreadSynchronize();
mul_sub_inc<<<grid, block>>>(A22, T2, C11, C21, C22, m);
cudaThreadSynchronize();
mul_add<<<grid, block>>>(A12, B21, T1, C11, m);
cudaThreadSynchronize();
merge<<<grid, block>>>(C11, C12, C21, C22, C_gpu, m);
cudaThreadSynchronize();
}
else{
split<<<grid, block>>>(A11, A12, A21, A22, A_gpu, m);
cudaThreadSynchronize();
split<<<grid, block>>>(B11, B12, B21, B22, B_gpu, m);
cudaThreadSynchronize();
sub<<<grid, block>>>(A11, A21, T1, m);
cudaThreadSynchronize();
sub<<<grid, block>>>(B22, B12, T2, m);
cudaThreadSynchronize();
strassen(T1, T2, C21, m);
add<<<grid, block>>>(A21, A22, T1, m);
cudaThreadSynchronize();
sub<<<grid, block>>>(B12, B11, T2, m);
cudaThreadSynchronize();
strassen(T1, T2, C22, m);
sub<<<grid, block>>>(T1, A11, T1, m);
cudaThreadSynchronize();
sub<<<grid, block>>>(B22, T2, T2, m);
cudaThreadSynchronize();
strassen(T1, T2, C11, m);
sub<<<grid, block>>>(A12, T1, T1, m);
cudaThreadSynchronize();
strassen(T1, B22, C12, m);
add<<<grid, block>>>(C12, C22, C12, m);
cudaThreadSynchronize();
strassen(A11, B11, T1, m);
add<<<grid, block>>>(C11, C12, C12, m);
cudaThreadSynchronize();
add<<<grid, block>>>(C12, T1, C12, m);
cudaThreadSynchronize();
add<<<grid, block>>>(C11, C21, C11, m);
cudaThreadSynchronize();
add<<<grid, block>>>(C11, T1, C11, m);
cudaThreadSynchronize();
sub<<<grid, block>>>(T2, B21, T2, m);
cudaThreadSynchronize();
strassen(A22, T2, C21, m);
sub<<<grid, block>>>(C11, C21, C21, m);
cudaThreadSynchronize();
add<<<grid, block>>>(C11, C22, C22, m);
cudaThreadSynchronize();
strassen(A12, B21, C11, m);
add<<<grid, block>>>(C11, T1, C11, m);
cudaThreadSynchronize();
merge<<<grid, block>>>(C11, C12, C21, C22, C_gpu, m);
cudaThreadSynchronize();
}
cudaFree(A11);
cudaFree(A12);
cudaFree(A21);
cudaFree(A22);
cudaFree(B11);
cudaFree(B12);
cudaFree(B21);
cudaFree(B22);
cudaFree(T1);
cudaFree(T2);
}
cudaMemcpy(C, C_gpu, sizeof(float) * n * n, cudaMemcpyDeviceToHost);
cudaFree(A_gpu);
cudaFree(B_gpu);
cudaFree(C_gpu);
}
void compare(float *res1, float *res2, int n) {
int fail = 0;
for(int i = 0; i < n; i++) {
float a, b;
a = res1[i] < 0 ? -res1[i] : res1[i];
b = res2[i] < 0 ? -res2[i] : res2[i];
if(a < 0.01 && b < 0.01) {
continue;
}
if(i<10) {
printf("i = %d\t%lf\t%lf\n", i, a, b);
}
float diff = (a - b) / (a + 0.000001);
if(diff < 0) {
diff= -diff;
}
if(diff>0.0005) {
fail++;
}
}
printf("Number of errors: %d\n", fail);
}
double timestamp(){
struct timeval tv;
gettimeofday(&tv, 0);
return tv.tv_sec + 1e-6 * tv.tv_usec;
}
int main() {
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
A[i * N + j] = (float)rand() / (float)RAND_MAX;
B[i * N + j] = (float)rand() / (float)RAND_MAX;
C[i * N + j] = 0;
C_cmp[i * N + j] = 0;
}
}
for(int j = 0; j < N; j++) {
for(int i = 0; i < N; i++) {
for(int k = 0; k < N; k++) {
C_cmp[i * N + j] += A[i * N + k] * B[k * N + j];
}
}
}
strassen(A, B, C, N);
compare(C, C_cmp, N * N);
double time1 = timestamp();
for(int numOfTimes = 0; numOfTimes < ITERATIONS; numOfTimes++) {
strassen(A, B, C, N);
}
double time2 = timestamp();
double time = (time2 - time1) / ITERATIONS;
double flops = 2.0 * N * N * N;
double gflopsPerSecond = flops / 1000000000 /time;
double GB = 4.0 * N * N / 1000000000;
double GBpS = 4.0 * N * N / 1000000000 / time;
printf("GFLOPS/s = %lf\n", gflopsPerSecond);
printf("GB/s = %lf\n", GBpS);
printf("GFLOPS = %lf\n", flops / 1000000000);
printf("GB = %lf\n", GB);
printf("time(s) = %lf\n", time);
return 0;
}
|
3,628 | #include <stdlib.h>
#include <stdio.h>
__global__ void run(void)
{
int cid = threadIdx.x;
int val = 0;
while(val<(cid+10)){
// do some "work" so the loop can't be compiled away
val++;
if(val == cid){
val = 0;
}
}
}
int main(int argc, char** argv)
{
for(;;){
run<<<1024,1024>>>();
cudaError_t err = cudaThreadSynchronize();
}
return 0;
} |
3,629 | #include <fstream>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <sys/time.h>
// Feature maps dimensionality descriptions and assumptions:
// : Height : Width : Channels : Number :
// INPUT / A | H | W | C | ------------------------- |
// KERNELS / F | P = K | Q = K | R = C | D = number of kernels = 1 |
// OUTPUT / B | L = H * (K - 1) | M = W * (K - 1) | N = D = 1 | ------------------------- |
// [!] K must be odd number.
// [!] Data layout for INPUT/OUTPUT: C x H x W.
// [!] Data layout for KERNELS: D x R(=C) x P(=K) x Q(=K)
// Turn on/off debug mode
// #define DEBUG
// #define FUNCTEST
#define PERFTEST
#ifdef DEBUG
#define LOG(...) printf(__VA_ARGS__); fflush(stdout);
#else
#define LOG(...) ;
#endif
const unsigned int H = 256, W = 256, C = 80, K = 3;
// HOST FUNCTION
// Takes matrix A [float *matA] and transforms it
// into column representation [float *matAc]
void im2colOnHost(float *matA, float *matAc, int radiusF, int countF, int L, int M, int K, int C)
{
// For each spatial position in output...
for (int m = 0; m < M; m++) {
int w = m + radiusF;
for (int l = 0; l < L; l++) {
int h = l + radiusF;
// Progress..
LOG("\r[i] Calculation on CPU %3d%%...", ((m * L + l) * 100 / (M * L)));
// For each kernel weight...
for (int q = 0, oq = -1 * radiusF; oq <= radiusF; q++, oq++) {
for (int p = 0, op = -1 * radiusF; op <= radiusF; p++, op++) {
for (int r = 0; r < C; r++) {
matAc[(r + C * (p + K * q)) + countF * (l + L * m)] = matA[r + C * ((h + op) + H * (w + oq))];
// LOG("matAc[%3d x %3d] <- matA[%3d x %3d x %3d]\n", (r + C * (p + K* q)), (l + L * m), (h + op), (w + oq), r);
}
}
}
}
}
LOG("\n");
}
// DEVICE KERNEL
// Takes matrix A [float *matA] and transforms it
// into column representation [float *matAc] on GPU
__global__
void im2colOnDevice(unsigned int n, float *matAc, float *matA, int radiusF, int countF, int L, int M, int K, int C)
{
// Using grid-stride loop if too big problem size.
// https://devblogs.nvidia.com/cuda-pro-tip-write-flexible-kernels-grid-stride-loops/
for (int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx < n;
idx += blockDim.x * gridDim.x)
{
int m = (idx / C) / L;
int l = (idx / C) % L;
int r = idx % C;
// For each spatial position in output...
if (m < M) {
int w = m + radiusF;
if (l < L) {
int h = l + radiusF;
// For each kernel weight...
for (int q = 0, oq = -1 * radiusF; oq <= radiusF; q++, oq++) {
for (int p = 0, op = -1 * radiusF; op <= radiusF; p++, op++) {
if (r < C) {
matAc[(r + C * (p + K * q)) + countF * (l + L * m)] = matA[r + C * ((h + op) + H * (w + oq))];
}
}
}
}
}
}
}
// DEVICE KERNEL
// Takes matrix A [float *matA] and transforms it
// into column representation [float *matAc] on GPU
__global__
void col2imOnDevice(unsigned int n, float *matA, float *matAc, int radiusF, int countF, int L, int M, int K, int C)
{
// Using grid-stride loop if too big problem size.
// https://devblogs.nvidia.com/cuda-pro-tip-write-flexible-kernels-grid-stride-loops/
for (int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx < n;
idx += blockDim.x * gridDim.x)
{
int m = (idx / C) / L;
int l = (idx / C) % L;
int r = idx % C;
// For each spatial position in output...
if (m < M) {
int w = m + radiusF;
if (l < L) {
int h = l + radiusF;
// For each kernel weight...
for (int q = 0, oq = -1 * radiusF; oq <= radiusF; q++, oq++) {
for (int p = 0, op = -1 * radiusF; op <= radiusF; p++, op++) {
if (r < C) {
matA[r + C * ((h + op) + H * (w + oq))] = matAc[(r + C * (p + K * q)) + countF * (l + L * m)];
}
}
}
}
}
}
}
void program(unsigned int blockSize, unsigned int gridSize = 0)
{
// CONSTS AND VARIABLES
// Input/kernel/output counts and sizes
const unsigned int countA = H*W*C;
const size_t sizeA = countA*sizeof(float);
LOG("[i] INPUT PARAMS: %u height, %u width, %u channels, %u elems, %u bytes\n", H, W, C, countA, sizeA);
const unsigned int radiusF = (K - 1) / 2;
const unsigned int countF = K*K*C;
LOG("[i] FILTER PARAMS: %u radius, %u elems, %u bytes\n", radiusF, countF, countF*sizeof(float));
const unsigned int L = H - (K - 1);
const unsigned int M = W - (K - 1);
LOG("[i] OUTPUT PARAMS: %u height, %u width, %u channels\n", L, M, 1);
const unsigned int countLR = L * M;
const unsigned int countAc = countF * countLR;
const size_t sizeAc = countAc*sizeof(float);
LOG("[i] INPUT IN COL PARAMS: %u elems, %u bytes\n", countAc, sizeAc);
// PREPARE DATA
// Generate input data
float *matA = (float *)malloc(sizeA);
for (int i = 0; i < countA; i++) {
matA[i] = i;
}
LOG(" [!] FINISHED GENERATING INPUT\n");
#ifdef FUNCTEST
// Calculate im2col result
float *matAc = (float *)malloc(sizeAc);
im2colOnHost(matA, matAc, radiusF, countF, L, M, K, C);
LOG(" [!] FINISHED CALCULATING im2col RESULT ON CPU\n");
#endif
// Alloc memory and copy data to device
float *devA, *devAc, *retAc;
cudaMalloc((void**)&devA, sizeA);
cudaMalloc((void**)&devAc, sizeAc);
retAc = (float *)malloc(sizeAc);
cudaMemcpy(devA, matA, sizeA, cudaMemcpyHostToDevice);
// Compute default grid size if it wasn't passed
const unsigned int KERNELS_NUM = L * M * C;
if (gridSize == 0)
gridSize = (KERNELS_NUM + blockSize - 1) / blockSize;
// Run im2col computation on device and copy results
im2colOnDevice<<<gridSize, blockSize>>>(KERNELS_NUM, devAc, devA, radiusF, countF, L, M, K, C);
LOG(" [!] FINISHED CALCULATING im2col ON DEVICE\n");
cudaMemcpy(retAc, devAc, sizeAc, cudaMemcpyDeviceToHost);
#ifdef FUNCTEST
// Compare results
int success = 1;
for (int i = 0; i < countAc; i++) {
if (retAc[i] != matAc[i]) {
success = 0;
printf("TEST FAILED: im2col device kernel...\n");
break;
}
}
if (success) {
printf("TEST PASSED: im2col device kernel!\n");
}
#endif
// Allocate memory for return value
float *retA;
retA = (float *)malloc(sizeA);
cudaMemset(devA, 0, sizeA);
// Run col2im computation on device and copy results
col2imOnDevice<<<gridSize, blockSize>>>(KERNELS_NUM, devA, devAc, radiusF, countF, L, M, K, C);
LOG(" [!] FINISHED CALCULATING col2im ON DEVICE\n");
cudaMemcpy(retA, devA, sizeA, cudaMemcpyDeviceToHost);
#ifdef FUNCTEST
// Compare results
success = 1;
for (int i = 0; i < countA; i++) {
if (retA[i] != matA[i]) {
success = 0;
printf("TEST FAILED: col2im device kernel...\n");
break;
}
}
if (success) {
printf("TEST PASSED: col2im device kernel!\n");
}
#endif
// CLEAN UP
cudaFree(devA);
cudaFree(devAc);
free(matA);
#ifdef FUNCTEST
free(matAc);
#endif
free(retA);
free(retAc);
}
int main()
{
// Enforce default block and grid sizes
unsigned int blockSize = 256;
unsigned int gridSize = 0;
// Calculate max needed kernels/threads number
const unsigned int L = H - (K - 1);
const unsigned int M = W - (K - 1);
const unsigned int KERNELS_NUM = L * M * C;
// Prepare variables for time measurement
struct timeval t1, t2;
double elapsedTime, totalTime = 0;
int totalRuns = 1;
// First warm-up run
LOG("--------- WARM-UP ---------\n");
program(256);
LOG("--------- WARM-UP ---------\n\n");
#ifdef PERFTEST
// Average over 10 runs
totalRuns = 10;
// Open file for perf logs
std::fstream fperflog("perflog.csv", std::ios::out);
if (fperflog.good())
{
// Measure effect of different block sizes
const unsigned int MAX_BLOCK_SIZE = 2048;
for (blockSize = 1; blockSize <= MAX_BLOCK_SIZE; blockSize *= 2) {
const unsigned int MAX_GRID_SIZE = (KERNELS_NUM + blockSize - 1) / blockSize;
LOG(" [!] For %d blocks, max grid size is %d\n", blockSize, MAX_GRID_SIZE);
for (gridSize = 1; gridSize <= 8192; gridSize *= 2) {
if (gridSize <= MAX_GRID_SIZE) {
totalTime = 0;
for (int i = 0; i < totalRuns; i++)
#endif
{
// Start timer
gettimeofday(&t1, NULL);
// WORK HARD!
program(blockSize, gridSize);
// Stop timer
gettimeofday(&t2, NULL);
// Compute the elapsed time in millisec
elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms
elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; // us to ms
totalTime += elapsedTime;
}
LOG(" [!] Whole program took %.3fms averaged over %d runs\n", totalTime / totalRuns, totalRuns);
#ifdef PERFTEST
fperflog << blockSize << "," << gridSize << "," << elapsedTime << std::endl;
} else {
// Meaningless data, there is more grids ten data cat utilize
fperflog << blockSize << "," << gridSize << "," << -1 << std::endl;
}
}
}
// Close file
fperflog.close();
}
#endif
return EXIT_SUCCESS;
}
|
3,630 | #include "includes.h"
__global__ void blurKernel(uchar3 *in, uchar3 *out, int w, int h)
{
int Col = blockIdx.x*blockDim.x + threadIdx.x;
int Row = blockIdx.y*blockDim.y + threadIdx.y;
if(Col<w && Row<h)
{
int pixVal1 = 0;
// int pixVal2 = 0;
// int pixVal3 = 0;
int pixels1 = 0;
// int pixels2 = 0;
// int pixels3 = 0;
for(int blurRow=-BLUR_SIZE; blurRow<BLUR_SIZE+1;++blurRow){
for(int blurCol=-BLUR_SIZE; blurCol<BLUR_SIZE+1;++blurCol)
{
int curRow = Row + blurRow;
int curCol = Col + blurCol;
if(curRow>-1 && curRow<h && curCol>-1 && curCol<w){
pixVal1+=static_cast<int>(in[curRow*w + curCol].x);
pixels1++;
pixVal1+=static_cast<int>(in[curRow*w + curCol].y);
pixels1++;
pixVal1+=static_cast<int>(in[curRow*w + curCol].z);
pixels1++;
}
}
}
out[Row*w+Col].x= static_cast<unsigned char>(pixVal1/pixels1);
out[Row*w+Col].y= static_cast<unsigned char>(pixVal1/pixels1);
out[Row*w+Col].z= static_cast<unsigned char>(pixVal1/pixels1);
}
} |
3,631 |
typedef long long LL;
__device__
int cuda_field_modulus;
__device__
int inverse(int a, int p){
return a == 1 ? 1 : ((LL)(a-inverse(p%a, a))*p+1)/a;
}
__device__
void cuda_field_init(int m){
cuda_field_modulus = m;
}
struct cuda_field_element {
__device__
cuda_field_element(){}
__device__
cuda_field_element(const cuda_field_element &v) : value(v.value) {}
__device__
cuda_field_element(int v) : value(v) { value %= cuda_field_modulus; }
__device__
cuda_field_element& operator=(const cuda_field_element &v){ value = v.value; return *this; }
__device__
cuda_field_element& operator+=(const cuda_field_element &v){
value += v.value; if(value >= cuda_field_modulus) value -= cuda_field_modulus; return *this;
}
__device__
cuda_field_element& operator-=(const cuda_field_element &v){
value -= v.value; if(value < 0) value += cuda_field_modulus; return *this;
}
__device__
cuda_field_element& operator*=(const cuda_field_element &v){ value = (LL)value * v.value % cuda_field_modulus; return *this; }
__device__
cuda_field_element operator+(const cuda_field_element &v) const { return cuda_field_element(*this) += v; }
__device__
cuda_field_element operator-(const cuda_field_element &v) const { return cuda_field_element(*this) -= v; }
__device__
cuda_field_element operator*(const cuda_field_element &v) const { return cuda_field_element(*this) *= v; }
__device__
cuda_field_element operator-() const { return value == 0 ? 0 : cuda_field_modulus - value; }
__device__
cuda_field_element operator~() const { return inverse(value, cuda_field_modulus); }
__device__
bool operator==(const cuda_field_element &v) const { return value == v.value; }
__device__
bool operator!=(const cuda_field_element &v) const { return value != v.value; }
__device__
int get_value() const { return value; }
private:
int value;
};
|
3,632 | #include "assert.h"
#include "real.h"
#include <iostream>
#include "gpuerrchk.cuh"
#include "math.h"
#define MAX_MASK_WIDTH 10
#define TILE_SIZE 1000
__device__ __constant__ float d_M[1000];
__global__ void share_conv_kernel(real* A, real* P, int mask_width, int width){
__shared__ real A_s[TILE_SIZE];
A_s[threadIdx.x]=A[blockIdx.x*blockDim.x+threadIdx.x];
__syncthreads();
int i=blockIdx.x*blockDim.x+threadIdx.x;
int this_tile_start_point = blockIdx.x*blockDim.x;
int next_tile_start_point = (blockIdx.x+1)*blockDim.x;
int mask_start_point= i-mask_width/2;
real Pvalue=0; //mask width is assumed odd So there are mask_width integers in [-mask_width/2, mask_width/2]
for (int j=0; j< mask_width; j++){
int mask_index= mask_start_point +j;
if( mask_index< 0 || mask_index >= width)
continue;
if( mask_index >=this_tile_start_point && mask_index < next_tile_start_point)
Pvalue+=A_s[threadIdx.x+j -mask_width/2]*d_M[j];
else
Pvalue+=A[mask_index]*d_M[j];
}
P[i]=Pvalue;
}
void share_conv(real* A,float* M, real* P, int mask_width, int width ){
real* d_A;
real* d_P;
gpuErrchk(cudaMalloc((void**)&d_A, sizeof(real)*width ));
gpuErrchk(cudaMemcpy(d_A, A, sizeof(real)*width, cudaMemcpyHostToDevice ) );
gpuErrchk(cudaMemcpyToSymbol(d_M, M, sizeof(real)*mask_width ) );
gpuErrchk(cudaMalloc((void**)&d_P, sizeof(real)*width ));
int blocksize=512;
share_conv_kernel<<<ceil(width/ (real)blocksize),blocksize >>>(d_A, d_P, mask_width, width);
gpuErrchk(cudaMemcpy(P, d_P, sizeof(real)*width, cudaMemcpyDeviceToHost ) );
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk(cudaFree(d_A ) );
gpuErrchk(cudaFree(d_P ) );
}
/*void trial(){
constexpr int asize=10^5;
constexpr int bsize=1000;
real A[asize];
for(int i=0; i< asize; i++){
A[i]=1;
}
real M[bsize];
for (int i=0; i<bsize; ++i){
M[i]=i;
}
real P[asize];
share_conv(A,M,P,bsize,asize);
}
*/
|
3,633 | __global__ void update_e( int Nz, int Nyz, int Nyzm, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz, float *CEx, float *CEy, float *CEz ) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int fidx = idx + idx/(Nz-1) + idx/Nyzm*Nz + Nyz + Nz + 1;
Ex[fidx] += CEx[fidx]*( Hz[fidx+Nz] - Hz[fidx] - Hy[fidx+1] + Hy[fidx] );
Ey[fidx] += CEy[fidx]*( Hx[fidx+1] - Hx[fidx] - Hz[fidx+Nyz] + Hz[fidx] );
Ez[fidx] += CEz[fidx]*( Hy[fidx+Nyz] - Hy[fidx] - Hx[fidx+Nz] + Hx[fidx] );
}
__global__ void update_h( int Nz, int Nyz, int Nyzm, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz ) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int fidx = idx + idx/(Nz-1) + idx/Nyzm*Nz + Nyz + Nz + 1;
Hx[fidx] -= 0.5*( Ez[fidx] - Ez[fidx-Nz] - Ey[fidx] + Ey[fidx-1] );
Hy[fidx] -= 0.5*( Ex[fidx] - Ex[fidx-1] - Ez[fidx] + Ez[fidx-Nyz] );
Hz[fidx] -= 0.5*( Ey[fidx] - Ey[fidx-Nyz] - Ex[fidx] + Ex[fidx-Nz] );
}
|
3,634 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#ifndef __CUDACC__
#define __CUDACC__
#endif
#include "device_launch_parameters.h"
#include <cuda.h>
#include <device_functions.h>
#include <cuda_runtime_api.h>
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.width + col)
typedef struct {
int width;
int height;
float* elements;
} Matrix;
// Thread block size
#define BLOCK_SIZE 16
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
void MatMul(const Matrix A, const Matrix B, Matrix C);
int main() {
float A[16] = { 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4 };
float B[32] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8 };
int rowsA, colsA, rowsB, colsB;
rowsA = 4;
colsA = 4;
rowsB = 4;
colsB = 8;
float *C = (float*)calloc(rowsA * colsB, sizeof(float));
Matrix Am, Bm, Cm;
Am.height = rowsA;
Am.width = colsA;
Am.elements = A;
Bm.height = rowsB;
Bm.width = colsB;
Bm.elements = B;
Cm.height = rowsA;
Cm.width = colsB;
Cm.elements = C;
MatMul(Am, Bm, Cm);
for (int i = 0; i < Cm.height; ++i){
for (int j = 0; j < Cm.width; ++j){
printf("%f ", Cm.elements[i * Cm.height + j]);
}
printf("\n");
}
printf("\n");
//print_matrix(Cm, Cm.height, Cm.width);
//C = matmul(A, rowsA, colsA, B, rowsB, colsB);
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C) {
// Load A and B to device memory
Matrix d_A; d_A.width = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
Matrix d_B; d_B.width = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C; d_C.width = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
// Read C from device memory
cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements); }
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) {
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < A.width; ++e)
Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col];
C.elements[row * C.width + col] = Cvalue;
}
|
3,635 | /* This code implements the serial solution and CUDA version for finding the maximal burst in a time series;
How to compile:
nvcc compare.cu
How to run:
./a.out n k //n is the length of the time series and k is the minimum lenght of a subsequence
Results to see:
The burst found by two methods are printed out: "burst start from .. end at ..; max-me is .."
Notes:
The serial takes long time for a large n (e.g n=3000)
*/
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
// kernel function
__global__ void max_each_block(float *dmaxarr, int *dmaxstart, int *dmaxend, float * darr, int n, int k);
// max_burst calls the kernel, return three arrays, one for the maxval, one for startpoint, one for the endpoint in each block;
void max_burst(float *x, int n, int k, int *startend, float *bigmax);
//find the maximum from the returned arrays from the kernel. This function is called by max_burst
void find_max_from_blocks(float *maxarr, int *maxstart, int *maxend, int numBlock,int *startend, float *bigmax);
//serial solution
void s_max_burst(float *arr, int n, int k);
float mean(float *y, int s, int e);
int main(int argc, char **argv) {
int n = atoi(argv[1]);
int k = atoi(argv[2]);
//generate a 1d array
float *arr = (float*) malloc(n*sizeof(float));
int i;
for (i = n; i > 0; i--) {
arr[n-i] = (float)(rand() % 80);
}
// Cuda solution
int startend[2];
float bigmax;
max_burst(arr, n,k, startend, &bigmax);
// serial solution
s_max_burst(arr, n,k);
return 0;
}
__global__ void max_each_block(float *dmaxarr, int *dmaxstart, int *dmaxend, float * darr, int n,int k) {
// declare three array for the maximum found by each thread
// learning material for shared memory: https://devblogs.nvidia.com/parallelforall/using-shared-memory-cuda-cc/
extern __shared__ float sh[];
float *mymaxvals = sh;
int *mystartmaxes = (int *)&mymaxvals[blockDim.x];
int *myendmaxes = (int *)&mystartmaxes[blockDim.x];
int perstart = threadIdx.x + blockDim.x * blockIdx.x;
int perlen, perend;
double xbar; // a temporay variable used when computing mean of subsequence
int i, tid = threadIdx.x;
if (perstart <= n-k) {
for (perlen = k ; perlen <= n - perstart ; perlen++) {
perend = perstart + perlen - 1;
//compute the mean of subsequence incrementally
if (perlen ==k) {
xbar = 0;
for ( i = perstart; i <= perend; i++) {
xbar += darr[i];
}
xbar /= (perend - perstart + 1);
mymaxvals[tid] = xbar;
mystartmaxes[tid] = perstart;
myendmaxes[tid] = perend;
} else {
xbar = ( (perlen-1) * xbar + darr[perend] ) / perlen;
}
//update the mymaxvals[tid] if the next subsequence in a thread has a higher mean
if (xbar > mymaxvals[tid]) {
mymaxvals[tid] = xbar;
mystartmaxes[tid] = perstart;
myendmaxes[tid] = perend;
}
}
} else {
mymaxvals[tid] = 0;//initialize it with the smallest number
}
__syncthreads(); //sync to make sure each thread in this block has done with the for loop
// get the highest among the mymaxvals using reduce
for (int s = blockDim.x/2; s > 0; s>>=1) {
if (tid < s ) {
if(mymaxvals[tid+s] > mymaxvals[tid]) {
mymaxvals[tid] = mymaxvals[tid+s];
mystartmaxes[tid] = mystartmaxes[tid + s];
myendmaxes[tid] = myendmaxes[tid + s];
}
}
__syncthreads();
}
//put the maximum among the mymaxvals in this block to dmaxarr
if(tid == 0) {
dmaxarr[blockIdx.x] = mymaxvals[0];
dmaxstart[blockIdx.x] = mystartmaxes[0];
dmaxend[blockIdx.x] = myendmaxes[0];
}
}
void max_burst(float *x, int n, int k, int *startend, float *bigmax) {
const int numthreadsBlock = 1024;
int numBlock = ( n + numthreadsBlock - 1)/numthreadsBlock;
//declare arrays on cpu to store the results from the kernel
float *maxarr = (float *)malloc(numBlock * sizeof(float));
int *maxstart = (int *)malloc(numBlock * sizeof(int));
int *maxend = (int *)malloc(numBlock * sizeof(int));
// declare GPU memory pointers
float *darr, * dmaxarr;
int *dmaxstart, *dmaxend;
cudaMalloc((void **)&darr, n*sizeof(float));
cudaMalloc((void **)&dmaxarr, numBlock*sizeof(float));
cudaMalloc((void **)&dmaxstart, numBlock*sizeof(int));
cudaMalloc((void **)&dmaxend, numBlock*sizeof(int));
//copy the input x to device
cudaMemcpy(darr, x, n*sizeof(float), cudaMemcpyHostToDevice);
// execution configuration
dim3 dimGrid(numBlock,1);
dim3 dimBlock(numthreadsBlock,1,1);
//call the kernel
max_each_block<<<dimGrid,dimBlock,(3*numthreadsBlock)*sizeof(float)>>>(dmaxarr,dmaxstart,dmaxend, darr, n, k);
cudaThreadSynchronize();
//copy the results from device to cpu
cudaMemcpy(maxarr, dmaxarr, numBlock*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(maxstart, dmaxstart, numBlock*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(maxend, dmaxend, numBlock*sizeof(int), cudaMemcpyDeviceToHost);
//free memory on device
cudaFree(darr);
cudaFree(dmaxarr);
cudaFree(dmaxstart);
cudaFree(dmaxend);
find_max_from_blocks( maxarr, maxstart, maxend, numBlock,startend, bigmax);
printf("burst start from %d end at %d; max-mean is %f\n", startend[0], startend[1], *bigmax);
}
void find_max_from_blocks(float *maxarr, int *maxstart, int *maxend, int numBlock,int *startend, float *bigmax) {
*bigmax = 0;
for (int i = 0; i < numBlock; i++) {
if (*bigmax < maxarr[i]) {
*bigmax = maxarr[i];
startend[0] = maxstart[i];
startend[1] = maxend[i];
}
}
}
void s_max_burst(float *arr, int n, int k) {
float mymaxval = -1;
int perstart, perlen,perend, mystart, myend;
float xbar;
for (perstart = 0; perstart <= n-k; perstart++) {
for (perlen = k; perlen <= n - perstart; perlen++) {
perend = perstart + perlen -1;
xbar = mean(arr, perstart, perend);
if (xbar > mymaxval) {
mymaxval = xbar;
mystart = perstart;
myend = perend;
}
}
}
printf("\nburst start from %d end %d, max-mean is %f\n", mystart, myend,mymaxval);
}
float mean(float *y, int s, int e){
int i;
float tot =0;
for (i=s; i<=e; i++) tot += y[i];
return tot / (e -s + 1);
}
|
3,636 | #include "includes.h"
__global__ void Find3DMinMax(int *d_Result, float *d_Data1, float *d_Data2, float *d_Data3, int width, int pitch, int height)
{
// Data cache
__shared__ float data1[3*(MINMAX_W + 2)];
__shared__ float data2[3*(MINMAX_W + 2)];
__shared__ float data3[3*(MINMAX_W + 2)];
__shared__ float ymin1[(MINMAX_W + 2)];
__shared__ float ymin2[(MINMAX_W + 2)];
__shared__ float ymin3[(MINMAX_W + 2)];
__shared__ float ymax1[(MINMAX_W + 2)];
__shared__ float ymax2[(MINMAX_W + 2)];
__shared__ float ymax3[(MINMAX_W + 2)];
// Current tile and apron limits, relative to row start
const int tx = threadIdx.x;
const int xStart = blockIdx.x*MINMAX_W;
const int xEnd = xStart + MINMAX_W - 1;
const int xReadPos = xStart + tx - WARP_SIZE;
const int xWritePos = xStart + tx;
const int xEndClamped = min(xEnd, width - 1);
int memWid = MINMAX_W + 2;
int memPos0 = (tx - WARP_SIZE + 1);
int memPos1 = (tx - WARP_SIZE + 1);
int yq = 0;
unsigned int output = 0;
for (int y=0;y<32+2;y++) {
output >>= 1;
int memPos = yq*memWid + (tx - WARP_SIZE + 1);
int yp = 32*blockIdx.y + y - 1;
yp = max(yp, 0);
yp = min(yp, height-1);
int readStart = yp*pitch;
// Set the entire data cache contents
if (tx>=(WARP_SIZE-1)) {
if (xReadPos<0) {
data1[memPos] = 0;
data2[memPos] = 0;
data3[memPos] = 0;
} else if (xReadPos>=width) {
data1[memPos] = 0;
data2[memPos] = 0;
data3[memPos] = 0;
} else {
data1[memPos] = d_Data1[readStart + xReadPos];
data2[memPos] = d_Data2[readStart + xReadPos];
data3[memPos] = d_Data3[readStart + xReadPos];
}
}
__syncthreads();
int memPos2 = yq*memWid + tx;
if (y>1) {
if (tx<memWid) {
float min1 = fminf(fminf(data1[memPos0], data1[memPos1]), data1[memPos2]);
float min2 = fminf(fminf(data2[memPos0], data2[memPos1]), data2[memPos2]);
float min3 = fminf(fminf(data3[memPos0], data3[memPos1]), data3[memPos2]);
float max1 = fmaxf(fmaxf(data1[memPos0], data1[memPos1]), data1[memPos2]);
float max2 = fmaxf(fmaxf(data2[memPos0], data2[memPos1]), data2[memPos2]);
float max3 = fmaxf(fmaxf(data3[memPos0], data3[memPos1]), data3[memPos2]);
ymin1[tx] = min1;
ymin2[tx] = fminf(fminf(min1, min2), min3);
ymin3[tx] = min3;
ymax1[tx] = max1;
ymax2[tx] = fmaxf(fmaxf(max1, max2), max3);
ymax3[tx] = max3;
}
}
__syncthreads();
if (y>1) {
if (tx<MINMAX_W) {
if (xWritePos<=xEndClamped) {
float minv = fminf(fminf(fminf(fminf(fminf(ymin2[tx], ymin2[tx+2]), ymin1[tx+1]), ymin3[tx+1]), data2[memPos0+1]), data2[memPos2+1]);
minv = fminf(minv, d_Threshold[1]);
float maxv = fmaxf(fmaxf(fmaxf(fmaxf(fmaxf(ymax2[tx], ymax2[tx+2]), ymax1[tx+1]), ymax3[tx+1]), data2[memPos0+1]), data2[memPos2+1]);
maxv = fmaxf(maxv, d_Threshold[0]);
if (data2[memPos1+1]<minv || data2[memPos1+1]>maxv)
output |= 0x80000000;
}
}
}
__syncthreads();
memPos0 = memPos1;
memPos1 = memPos2;
yq = (yq<2 ? yq+1 : 0);
}
if (tx<MINMAX_W && xWritePos<width) {
int writeStart = blockIdx.y*pitch + xWritePos;
d_Result[writeStart] = output;
}
} |
3,637 | #include "includes.h"
__global__ void kMartixSubstractMatrix(const int nThreads, const float *m1, const float *m2, float *output) {
/* Computes the (elementwise) difference between two arrays
Inputs:
m1: array
m2: array
output: array,the results of the computation are to be stored here
*/
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
output[i] = m1[i] - m2[i];
}
} |
3,638 | #include "includes.h"
__global__ void ComputePhiMag_GPU(float* phiR, float* phiI, float* phiMag, int numK) {
int indexK = blockIdx.x*KERNEL_PHI_MAG_THREADS_PER_BLOCK + threadIdx.x;
if (indexK < numK) {
float real = phiR[indexK];
float imag = phiI[indexK];
phiMag[indexK] = real*real + imag*imag;
}
} |
3,639 | /*
* cSumSquares.cu
*
* Copyright 2021 mike <mike@fedora33>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* =====================================================================
* Function g(n) is defined as greatest perfect square which divides n.
* Consider n = 10^9. 31623^2 = 1000014129
* To build a table of useful perfect squares: for x in range(1, 31624) calc x^2
* Requires 976577 blocks of 1024 threads
*
* Host memory approx 14Gb free, 10^9
*
* Available device memory approx. 1.6GiB
* Using a maximum of 190000 * 1024 * sizeof(long) requires 1.56GiB
* For a given value of N:
* calc lines required (N/1024) + 1
* pagecount = (lines/190000) + 1
*
* Launch the kernel pagecount times, summing partial results
*
* =====================================================================
*/
#include <stdio.h>
#include <math.h>
#include <cuda.h>
#define DEBUG 1
__global__ void set_squares(long *d_squares, long n_squares) {
long i = threadIdx.x + (blockIdx.x * blockDim.x);
if(i < n_squares) d_squares[i] = (int)(i+1)*(i+1);
}
__global__ void func_g(long* d_squares, const long n_squares, long *d_sums, long N, long page_size, int page_idx) {
// Calc the index of result in device results page
long i = threadIdx.x + (blockDim.x * blockIdx.x);
// Calc actual target
long target = i + (page_size * page_idx);
if(target <= N) {
//printf("idx: %ld target: %ld\n", i, target); return;
// scan in reverse the squares array
// save first square which divides i in results[i]
if(target > 3) {
for(long x = n_squares-1; x > 0; x -= 1) {
if((target % d_squares[x]) == 0) {
d_sums[i] = d_squares[x];
printf("x: %d target: %ld square: %ld\n", x, target, d_squares[x]);
break;
}
} // for...
} else {
d_sums[i] = i;
}
} // if target...
}
int main(int argc, char **argv)
{
cudaError_t error_id;
long *d_squares = NULL;
// These values based on 1.56GiB available on device
const int PageY = 190000;
const int PageX = 1024;
const int PageSize = PageX*PageY;
long *h_sums = NULL; // large page of partial results
long *d_sums = NULL;
// extract target N
long x = 0;
if(argc == 2) {
x = atol(argv[1]);
} else {
printf("usage: css target (< 1e9)\n");
exit(1);
}
const long N = x;
if(N <= 1e9L) {
printf("target: %ld\n", N);
} else {
printf("target: %ld exceeds program limitations (1e9)\n", N);
exit(2);
}
// determine array dimensions
long limit = (long)(sqrt(sqrt(N)) + 1); // defines size of array
#if(DEBUG)
printf("target: %ld limit: %ld\n", N, limit);
#endif
// Allocate space on device
error_id = cudaMalloc(&d_squares, sizeof(long )*limit);
if(error_id != cudaSuccess) {
printf("cudaMalloc squares failed with %d\n", error_id);
exit(1);
}
// launch the generator on kernel
printf("\nGenerating squares\n");
set_squares<<<1,limit>>>(d_squares, limit);
cudaDeviceSynchronize();
#if(DEBUG)
// allocate space on host and copy device squares
long *h_squares = (long *)malloc(sizeof(long )*limit);
cudaMemcpy(h_squares, d_squares, sizeof(long )*limit, cudaMemcpyDeviceToHost);
// print long array of squares
for(long x = 0; x < limit; ++x) printf("%d:%ld ", x, h_squares[x]); printf("\n");
// clear host array
free(h_squares);
#endif
// Allocate a results page on device
error_id = cudaMalloc(&d_sums, sizeof(long )*PageSize);
if(error_id != cudaSuccess) {
printf("cudaMalloc d_sums failed with %d\n", error_id);
exit(1);
}
// Allocate a results page on host
h_sums = (long*)malloc(sizeof(long)*PageSize);
if(h_sums == NULL) {
printf("Failed to malloc h_sums.");
exit(1);
}
// initialise to zero
for(int x = 0; x < PageSize; ++x) h_sums[x] = 0L;
int rows = (N / 1024) + 1;
int pages = (rows / 190000) + 1;
printf("N: %ld rows: %d pages: %d\n", N, rows, pages);
long Sum = 0;
long counted = 0;
for(int pg = 0; pg < pages; ++pg) {
// launch kernel with appropriate parameters
func_g<<<rows,1024>>>(d_squares, limit, d_sums, N, PageSize, pg);
// device sync and test for errors
error_id = cudaDeviceSynchronize();
if(error_id != cudaSuccess) {
printf("cudaDeviceSync returned %d\n", error_id);
exit(0);
}
// copy device sums to host
error_id = cudaMemcpy(h_sums, d_sums, sizeof(long)*PageSize, cudaMemcpyDeviceToHost);
// DEBUG
for(int x = 0; ((x<PageSize)&&(counted < N)); ++x,++counted) {
printf("%d:%ld ",x,h_sums[x]);
}
printf("\n"); goto exit;
// END DEBUG
#if(0)
// Update S by summing last returned page page
for(int x = 0; ((x<20)&&(counted < N)); ++x,++counted) {
Sum += h_sums[x];
}
#endif
}
exit:
// Output Result as S(N) = S
printf("S(%ld) = %ld.\n", N, Sum);
// CleanUp
free(h_sums);
cudaFree(d_sums);
return 0;
}
|
3,640 | #include <iostream>
#include <math.h>
#include <ctime>
#include <cmath>
#include <stdlib.h>
#include <fstream>
#include <sstream>
#define PI 3.14159265358979323846
__device__ double density(double Xold, double Xnew, double sigma, double r, double delta, double delta_t){
double f=0, x=0;
//x=(1/(sigma*sqrt(delta_t)))*(log(Xnew)-log(Xold)-(r-delta-0.5*sigma*sigma)*delta_t);
x=(1/(sigma*sqrt(delta_t)))*(Xnew-Xold-(r-delta-0.5*sigma*sigma)*delta_t);
//f= (1/(sigma*sqrt(delta_t)*Xnew))*(1/(sqrt(2*PI)))*exp(-0.5*x*x); // this is the transition density
f= (1/(sigma*sqrt(delta_t)))*(1/(sqrt(2*PI)))*exp(-0.5*x*x);
return f;
}
|
3,641 | #include "GOL_runner.cuh"
#include <stdio.h>
#define threadWidth 16
#define threadHeight 16
__device__ int horizCheck(bool* board, int width, int height, int x, int y) {
int horizIndex, vertIndex, realIndex, countH;
vertIndex = (y); countH = 0;
if ((x) + 1 == (width)) { horizIndex = 0; }
else { horizIndex = (x) + 1; }
realIndex = (width) * vertIndex + horizIndex;
if (board[realIndex]) { countH++; }
if ((x) == 0) { horizIndex = (width) - 1; }
else { horizIndex = (x) - 1; }
realIndex = (width) * vertIndex + horizIndex;
if (board[realIndex]) { countH++; }
if (x > 0 && x < 4) {
if (y > 0 && y < 4) {
realIndex = y * width + x;
// printf("Horiz: x: %d y: %d count: %d RealIndex: %d\n", x, y, countH, realIndex);
}
}
return countH;
}
__device__ int vertCheck( bool* board, int width, int height, int x, int y) {
int horizIndex, vertIndex, realIndex, countV;
horizIndex = (x); countV = 0;
if (((y) + 1) == (height)) { vertIndex = 0; }
else { vertIndex = (y) + 1; }
realIndex = (width) * vertIndex + horizIndex;
if (board[realIndex]) { countV++; }
if (x > 0 && x < 4) {
if (y >= 0 && y < 4) {
//printf("Lower Verts: x: %d y: %d count: %d RealIndex: %d Board: %s\n", x, y, countV, realIndex, board[realIndex] ? "true" : "false");
}
}
if ((y) == 0) { vertIndex = (height) - 1; }
else { vertIndex = (y) - 1; }
realIndex = (width) * vertIndex + horizIndex;
if (board[realIndex]) { countV++; }
if (x > 0 && x < 4) {
if (y >= 0 && y < 4) {
//printf("Higher Verts: x: %d y: %d count: %d RealIndex: %d\n", x, y, countV, realIndex);
}
}
if (x > 0 && x < 4) {
if (y > 0 && y < 4) {
realIndex = y * width + x;
//printf("Verts: x: %d y: %d count: %d RealIndex: %d\n", x, y, countV, realIndex);
}
}
return countV;
}
__device__ int cornerCheck(bool* board, int width, int height, int x, int y) {
int horizIndex, vertIndex, realIndex, countC;
countC = 0;
if ((y) + 1 == (height)) { vertIndex = 0; }
else { vertIndex = (y) + 1; }
if ((x) + 1 == (width)) { horizIndex = 0; }
else { horizIndex = (x) + 1; }
realIndex = (width) * vertIndex + horizIndex;
if (board[realIndex]) countC++;
if ((x) == 0) { horizIndex = (width) - 1; }
else { horizIndex = (x) - 1; }
realIndex = (width) * vertIndex + horizIndex;
if (board[realIndex]) countC++;
if ((y) == 0) { vertIndex = (height) - 1; }
else { vertIndex = (y) - 1; }
realIndex = (width) * vertIndex + horizIndex;
if (board[realIndex]) countC++;
if ((x) + 1 == (width)) { horizIndex = 0; }
else { horizIndex = (x) + 1; }
realIndex = (width) * vertIndex + horizIndex;
if (board[realIndex]) countC++;
if (x > 0 && x < 4) {
if (y > 0 && y < 4) {
//printf("Corners: x: %d y: %d count: %d\n", x, y, countC);
}
}
return countC;
}
__global__ void stepper(bool* board, bool* newBoard, int* width, int* height) {
// if (board[198])printf("Fill at center\n");
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
//printf("threadidx.x: %d, threadidx.y: %d\n", threadIdx.x, threadIdx.y);
if (x < (*width) && y < (*height)) {
//int count = 0; int horizIndex, vertIndex, realIndex;
int realIndex;
/*if (x > 8 && x < 12) {
if (y > 8 && y < 12) {
printf("x: %d y: %d width: %d height: %d \n", x, y, (*width), (*height));
}
}*/
/*vertIndex = (y);
if ((x)+1 == (*width)) { horizIndex = 0; }
else { horizIndex = (*width)+1; }
realIndex = (*width)*vertIndex + horizIndex;
if (board[realIndex]) count++;
if ((x) == 0) { horizIndex = (*width)-1; }
else { horizIndex = (x)-1; }
realIndex = (*width)*vertIndex + horizIndex;
if (board[realIndex]) count++;
horizIndex = (x);
if ((y)+1 == (*height)) { vertIndex = 0; }
else { vertIndex = (*height)+1; }
realIndex = (*width)*vertIndex + horizIndex;
if (board[realIndex]) count++;
if ((y) == 0) { vertIndex = (*height)-1; }
else { vertIndex = (y)-1; }
realIndex = (*width)*vertIndex + horizIndex;
if (board[realIndex]) count++;
if ((y)+1 == (*height)) { vertIndex = 0; }
else { vertIndex = (*height)+1; }
if ((x)+1 == (*width)) { horizIndex = 0; }
else { horizIndex = (*width)+1; }
realIndex = (*width)*vertIndex + horizIndex;
if (board[realIndex]) count++;
if ((x) == 0) { horizIndex = (*width)-1; }
else { horizIndex = (x)-1; }
realIndex = (*width)*vertIndex + horizIndex;
if (board[realIndex]) count++;
if ((y) == 0) { vertIndex = (*height)-1; }
else { vertIndex = (y)-1; }
realIndex = (*width)*vertIndex + horizIndex;
if (board[realIndex]) count++;
if ((x)+1 == (*width)) { horizIndex = 0; }
else { horizIndex = (*width)+1; }
realIndex = (*width)*vertIndex + horizIndex;
if (board[realIndex]) count++;*/
int neighborCount = horizCheck(board, *width, *height, x, y);
neighborCount += vertCheck(board, *width, *height, x, y);
neighborCount += cornerCheck(board, *width, *height, x, y);
int count = neighborCount;
if (x < 4 && x > 0 && y < 4 && y > 0) {
//printf("X: %d Y: %d NeighborCount: %d\n", x, y, count);
}
realIndex = y * (*width) + x;
bool cellState = board[realIndex];
if (cellState && count < 2) { newBoard[realIndex] = false; }
else if (cellState && count > 3) { newBoard[realIndex] = false; }
else if (cellState && count >= 2) { newBoard[realIndex] = true; }
else if (!cellState && count == 3) { newBoard[realIndex] = true; }
}
}
GOL::GOL(int width, int height, bool* data) {
this->width = width;
this->height = height;
size = sizeof(bool) * width * height;
board = data;
}
void GOL::init() {
cudaMalloc((void **)&d_board, size);
cudaMalloc((void **)&d_boardNew, size);
size = sizeof(int);
cudaMalloc((void**)&d_width, size); cudaMalloc((void**)&d_height, size);
cudaMemcpy(d_width, &width, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_height, &height, size, cudaMemcpyHostToDevice);
size = sizeof(bool) * width * height;
cudaMemcpy(d_board, board, size, cudaMemcpyHostToDevice);
numBlockVert = height / threadHeight;
if (height % threadHeight > 0) numBlockVert++;
numBlockHoriz = width / threadWidth;
if (height % threadWidth > 0) numBlockHoriz++;
}
bool GOL::step() {
dim3 dimGrid(numBlockHoriz, numBlockVert);
dim3 dimBlock(threadWidth, threadHeight);
stepper << <dimGrid, dimBlock >> > (d_board, d_boardNew, d_width, d_height);
cudaMemcpy(d_board, d_boardNew, size, cudaMemcpyDeviceToDevice);
cudaMemcpy(board, d_board, size, cudaMemcpyDeviceToHost);
return true;
}
GOL::~GOL() {
cudaFree(&d_board); cudaFree(&d_boardNew);
cudaFree(&d_width); cudaFree(&d_height);
} |
3,642 | /**
This example is based on the article titled "CUDA Pro Tip: Occupancy API Simplifies Launch Configuration".
More info on https://devblogs.nvidia.com/parallelforall/cuda-pro-tip-occupancy-api-simplifies-launch-configuration/
*/
#include "stdio.h"
__global__ void VectorMultiplicationKernel(int *array, int arrayCount)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < arrayCount)
{
array[idx] *= array[idx];
}
}
void launchMaxOccupancyKernel(int *array, int arrayCount)
{
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the
int gridSize; // The actual grid size needed, based on input size
cudaOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize, VectorMultiplicationKernel, 0, 0);
// Round up according to array size
gridSize = (arrayCount + blockSize - 1) / blockSize;
printf("Grid size is %d, array count is %d, min grid size is %d\n", gridSize, arrayCount, minGridSize);
VectorMultiplicationKernel<<< gridSize, blockSize >>>(array, arrayCount);
cudaDeviceSynchronize();
// calculate theoretical occupancy
int maxActiveBlocks;
cudaOccupancyMaxActiveBlocksPerMultiprocessor( &maxActiveBlocks,
VectorMultiplicationKernel,
blockSize, 0);
int device;
cudaDeviceProp props;
cudaGetDevice(&device);
cudaGetDeviceProperties(&props, device);
float occupancy = (float) (maxActiveBlocks * blockSize / props.warpSize) /
(float)(props.maxThreadsPerMultiProcessor / props.warpSize);
printf("Device maxThreadsPerMultiProcessor %d\n", props.maxThreadsPerMultiProcessor);
printf("Device warpSize %d\n", props.warpSize);
printf("Launched blocks of size %d. Theoretical occupancy: %f\n", blockSize, occupancy);
}
void initializeData(int *array, int count){
for (int i = 0; i < count; i += 1) {
array[i] = i;
}
}
void resetData(int *array, int count){
for (int i = 0; i < count; i += 1) {
array[i] = 0;
}
}
void verifyData(int *array, int count){
bool isDataCorrect = true;
for (int i = 0; i < count; i += 1) {
if (array[i] != i * i) {
printf("Element %d expected: %d actual %d", i, i *i, array[i]);
isDataCorrect = false;
}
}
if (isDataCorrect) printf("Data is correct\n");
}
int main()
{
const int count = 1000000;
int *hostArray;
int *deviceArray;
int size = count * sizeof(int);
hostArray = new int[count];
initializeData(hostArray, count);
cudaMalloc(&deviceArray, size);
cudaMemcpy(deviceArray, hostArray, size, cudaMemcpyHostToDevice);
resetData(hostArray, count);
launchMaxOccupancyKernel(deviceArray, count);
cudaMemcpy(hostArray, deviceArray, size, cudaMemcpyDeviceToHost);
verifyData(hostArray, count);
cudaFree(deviceArray);
delete[] hostArray;
return 0;
}
|
3,643 | #include "includes.h"
__global__ void vecAdd(float * in1, float * in2, float * out, int len) {
//@@ Insert code to implement vector addition here
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < len) {
out[idx ] = in1[idx] + in2[idx];
}
} |
3,644 | #include <iostream>
const long int IMAGE_SIZE = 8192;
const int BLOCK_SIZE = 32;
const float alpha = 2.f;
const float beta = 2.f;
__global__ void sgemmNaive(float* A, float* B, float* C, int N)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float val = 0.f;
for (int i = 0; i < N; i++)
val += A[row * N + i] * B[i * N + col];
C[row * N + col] = alpha * val + beta * C[row * N + col];
}
__global__ void sgemmSHM(float* A, float* B, float* C, int N)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float val = 0.f;
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
for (int i = 0; i < N / BLOCK_SIZE; i++) {
As[threadIdx.y][threadIdx.x] = A[row * N + BLOCK_SIZE * i + threadIdx.x];
Bs[threadIdx.y][threadIdx.x] = B[(threadIdx.y + i * BLOCK_SIZE) * N + col];
__syncthreads();
for (int j = 0; j < BLOCK_SIZE; j++)
val += As[threadIdx.y][j] * Bs[j][threadIdx.x];
__syncthreads();
}
C[row * N + col] = alpha * val + beta * C[row * N + col];
}
int main()
{
float *A, *A_d, *B, *B_d, *C, *C_d;
const int data_size = IMAGE_SIZE * IMAGE_SIZE * sizeof(float);
cudaMallocHost(&A, data_size);
cudaMallocHost(&B, data_size);
cudaMallocHost(&C, data_size);
cudaMalloc(&A_d, data_size);
cudaMalloc(&B_d, data_size);
cudaMalloc(&C_d, data_size);
const int grid_size = IMAGE_SIZE / BLOCK_SIZE; // 8192 / 32 = 256
dim3 grid(grid_size, grid_size); // 256 * 256
dim3 block(BLOCK_SIZE, BLOCK_SIZE); // 32 x 32 = 1024
for (int i = 0; i < IMAGE_SIZE * IMAGE_SIZE; i++) {
A[i] = 1.f;
B[i] = 1.f;
C[i] = 1.f;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMemcpy(A_d, A, data_size, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B, data_size, cudaMemcpyHostToDevice);
cudaMemcpy(C_d, C, data_size, cudaMemcpyHostToDevice);
cudaEventRecord(start);
sgemmNaive<<<grid, block>>>(A_d, B_d, C_d, IMAGE_SIZE);
cudaEventRecord(stop);
cudaMemcpy(C, C_d, data_size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
// print runtime and FLOP rate info
float milliseconds = 0.f;
cudaEventElapsedTime(&milliseconds, start, stop);
double seconds = static_cast<double>(milliseconds) / 1000.;
std::cout << "sgemmNaive runtime: " << seconds << "\n";
std::cout << "Performance (TFLOPS/s): "
<< (IMAGE_SIZE * IMAGE_SIZE * IMAGE_SIZE) * 2.0 / seconds / 1e12 << "\n\n";
cudaMemcpy(A_d, A, data_size, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B, data_size, cudaMemcpyHostToDevice);
cudaMemcpy(C_d, C, data_size, cudaMemcpyHostToDevice);
cudaEventRecord(start);
sgemmSHM<<<grid, block>>>(A_d, B_d, C_d, IMAGE_SIZE);
cudaEventRecord(stop);
cudaMemcpy(C, C_d, data_size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
// print runtime and FLOP rate
milliseconds = 0.f;
cudaEventElapsedTime(&milliseconds, start, stop);
seconds = static_cast<double>(milliseconds) / 1000.;
std::cout << "sgemmSHM runtime: " << seconds << "\n";
std::cout << "Performance (TFLOPS/s): "
<< (IMAGE_SIZE * IMAGE_SIZE * IMAGE_SIZE) * 2.0 / seconds / 1e12 << "\n\n";
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
cudaFreeHost(A);
cudaFreeHost(B);
cudaFreeHost(C);
}
|
3,645 | //#include <helper_cuda.h>
#include "project_kernel.cuh"
#include <stdio.h>
__constant__ float K[3][3];
__constant__ float Kinv[3][3];
__constant__ float eps2;
__constant__ int npoints;
__device__ float image[480*640];
__global__ void project_kernel(float *d_x, float *d_y, float *d_z, float *d_nx,
float *d_ny, float *d_nz, float *d_r, float *d_rgba)
{
// each kernel function gets a surfel in the cloud
// assume global K, assume points have already been transformed into camera frame?
// now, based on normal and radius determine the pixels covered
// we could probably use the projection matrix on the ellipse somehow
//int tid = threadIdx.x + blockDim.x*blockIdx.x;
int i = threadIdx.x + blockDim.x*blockIdx.x;
int index = 30000;
if (i == index) {
printf("npoints: %d\n", npoints);
printf("eps2: %f\n", eps2);
}
if (i >= npoints || d_z[i] < 0.0f) {
if (i == index) {
printf("Index: %d, Depth: %f\n", i, d_z[i]);
printf("K: [%f, %f, %f; %f, %f, %f; %f, %f, %f]\n", K[0][0], K[0][1], K[0][2], K[1][0], K[1][1], K[1][2], K[2][0], K[2][1], K[2][2]);
}
return;
}
float px = K[0][0]*d_x[i]/d_z[i] + K[0][2];
float py = K[1][1]*d_y[i]/d_z[i] + K[1][2];
if (px < 0 || px > 640 || py < 0 || py > 480) {
return;
}
float r2 = 1.0f/(d_r[i]*d_r[i]);
float NN[3][3];
NN[0][0] = d_nx[i]*d_nx[i]; NN[0][1] = d_nx[i]*d_ny[i]; NN[0][2] = d_nx[i]*d_nz[i];
NN[1][0] = d_ny[i]*d_nx[i]; NN[1][1] = d_ny[i]*d_ny[i]; NN[1][2] = d_ny[i]*d_nz[i];
NN[2][0] = d_nz[i]*d_nx[i]; NN[2][1] = d_nz[i]*d_ny[i]; NN[2][2] = d_nz[i]*d_nz[i];
float A[3][3];
for (int row = 0; row < 3; ++row) {
for (int col = 0; col < 3; ++col) {
A[row][col] = (eps2 - r2)*NN[row][col];
}
A[row][row] += r2;
}
float c[3] = {d_x[i], d_y[i], d_z[i]};
float Ac[3];
for (int row = 0; row < 3; ++row) {
Ac[row] = 0.0f;
for (int col = 0; col < 3; ++col) {
Ac[row] += A[row][col]*c[col];
}
}
float cAc = 0.0f;
for (int row = 0; row < 3; ++row) {
cAc += c[row]*Ac[row];
}
// now we reuse A to compute AA
for (int row = 0; row < 3; ++row) {
for (int col = 0; col < 3; ++col) {
A[row][col] = (cAc - 1.0f)*A[row][col] - Ac[row]*Ac[col];
}
}
// and finally, let's reuse NN and A to compute the AA with camera matrix
for (int row = 0; row < 3; ++row) {
for (int col = 0; col < 3; ++col) {
// A*Kinv
NN[row][col] = A[row][0]*Kinv[0][col] + A[row][1]*Kinv[1][col] + A[row][2]*Kinv[2][col];
}
}
for (int row = 0; row < 3; ++row) {
for (int col = 0; col < 3; ++col) {
// this time Kinv^T*A
A[row][col] = Kinv[0][row]*NN[0][col] + Kinv[1][row]*NN[1][col] + Kinv[2][row]*NN[2][col];
}
}
int minx = 640; int maxx = 0; int miny = 480; int maxy = 0;
float ox[4] = {d_r[i], -d_r[i], 0.0f, 0.0f};
float oy[4] = {0.0f, 0.0f, d_r[i], -d_r[i]};
for (int col = 0; col < 4; ++col) {
px = K[0][0]*(d_x[i]+ox[col])/d_z[i] + K[0][2];
py = K[1][1]*(d_y[i]+oy[col])/d_z[i] + K[1][2];
if (px < minx) {
minx = px;
}
if (py < miny) {
miny = py;
}
if (px > maxx) {
maxx = px;
}
if (py > maxy) {
maxy = py;
}
}
float x, y, disc;
for (int row = miny; row < maxy; ++row) {
for (int col = minx; col < maxx; ++col) {
x = col; y = row;
// C2 + B2.transpose()*v + v.transpose()*A2*v
disc = A[2][2] + 2.0f*(A[0][2]*x + A[1][2]*y) + A[0][0]*x*x + 2.0f*A[0][1]*x*y + A[1][1]*y*y;
if (disc < 0.0f) {
if (i == index) {
printf("disc: %f!", disc);
}
x = 1.0f;
image[640*row+col] = d_rgba[i];
}
}
}
}
void project(float *d_x, float *d_y, float *d_z, float *d_nx,
float *d_ny, float *d_nz, float *d_r, float *d_rgba,
int nblocks, int nthreads, float neps2, float* nK, float* nKinv, float* himage,
int hnpoints)
{
cudaMemcpyToSymbol(K, nK, 9*sizeof(float));
cudaMemcpyToSymbol(Kinv, nKinv, 9*sizeof(float));
cudaMemcpyToSymbol(eps2, &neps2, sizeof(float));
cudaMemcpyToSymbol(npoints, &hnpoints, sizeof(int));
project_kernel<<<nblocks,nthreads>>>(d_x, d_y, d_z, d_nx, d_ny, d_nz, d_r, d_rgba);
//float *my_image;
//cudaGetSymbolAddress((void **)&my_image, image);
cudaMemcpyFromSymbol(himage, image, 480*640*sizeof(float));
}
|
3,646 | #include "includes.h"
/*
* SpaceTime Simulator
* Curso Deep Learning y Cuda - 2020
* Autor: Oscar Noel Amaya Garcia
* email: dbanshee@gmail.com
*/
#define RUN_MODE_SIM 0
#define RUN_MODE_BENCH 1
#define SP_FILENAME "sp.json"
#define SP_FILENAME_BUFF1 "sp_0.json"
#define SP_FILENAME_BUFF2 "sp_1.json"
#define SP_FILENAME_BENCH "sp_bench.json"
#define MAX_BLACK_HOLES 10
#define SOFTENING 1e-9f
#define DT 0.05f
#define MAX_ASTEROIDS 30
#define AST_FILENAME "ast.json"
#define AST_FILENAME_BUFF1 "ast_0.json"
#define AST_FILENAME_BUFF2 "ast_1.json"
#define AST_FILENAME_BENCH "ast_bench.json"
#define MAX_BENCHMARKS 128
#define BENCH_FILENAME "benchmark.json"
#define BENCH_TIME_SECS 10
#define BENCH_CPU 0
#define BENCH_GPU 1
#define BENCH_REGEN_BH_STEPS 5
#define BENCH_FILE_ACCESS_STEPS 3
#define CUDA_OPT_NLEVELS 4
#define CUDA_OPT_LEVEL_0 0
#define CUDA_OPT_LEVEL_1 1
#define CUDA_OPT_LEVEL_2 2
#define CUDA_OPT_LEVEL_3 3
#define MAX_TIME_SIMULATION_SEC 360
#define REGEN_BLACK_HOLES_SEC 20
#define NUM_BECHMARKS 10
typedef struct blackHole {
float x, y, g;
} blackHole;
typedef struct spacePoint {
float x, y, g;
} spacePoint;
typedef struct asteroid {
float x, y, vx, vy;
} asteroid;
typedef struct benchmark {
char name[1024];
int number;
int config;
int type; // CPU = 0, GPU = 1
long time; // millis
int steps;
} benchmark;
///////////////
// Global Vars
///////////////
// Runtime
int runMode = RUN_MODE_SIM;
int spCurrentBuff = 0;
int astCurrentBuff = 0;
int nBlackHoles = 0;
int nAsteroids = MAX_ASTEROIDS;
blackHole* blackHoles = NULL;
int bhSize;
asteroid* asteroids = NULL;
int astSize;
spacePoint* SPBox = NULL;
int spSize;
float top = 2, left = -2, bottom = -2, right = 2;
float spStep = 0.1;
int nelems;
int rows, cols;
int cudaOptLevel = CUDA_OPT_LEVEL_3;
// BenchMark
int nBenchmark;
int bechmarkRegenBHSteps = MAX_TIME_SIMULATION_SEC;
int bechmarkRegenWriteFileSteps = REGEN_BLACK_HOLES_SEC;
benchmark BENCHS[MAX_BENCHMARKS];
char benchName[1024];
int benchNum;
int benchConfig;
int benchType;
//////////////////
// Error Handling
//////////////////
__global__ void calculateSpaceTimeKSHM(spacePoint* SPBox, int nRows, int nCols, float left, float right, float top, float bottom, blackHole* BH, int nBlackHoles, int bhSize) {
extern __shared__ float s[];
blackHole* bhCache = (blackHole*) s;
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i == 0 && j == 0)
memcpy(bhCache, BH, bhSize);
__syncthreads();
if (i < nRows && j < nCols) {
float x = (i / (float) nRows * (right-left)) + left;
float y = (j / (float) nCols * (top-bottom)) + bottom;
int idx = i*nRows+j;
float g = 0.0f;
for (int b = 0; b < nBlackHoles; b++){
float dx = x - bhCache[b].x;
float dy = y - bhCache[b].y;
float distSqr = sqrt(dx*dx + dy*dy);
if (distSqr == 0.0f) {
distSqr = 0.000000001;
}
float invDist = 1 / (pow((float)distSqr, (float)0.05));
g += (bhCache[b].g * invDist);
}
SPBox[idx].x = x;
SPBox[idx].y = y;
SPBox[idx].g = g;
}
} |
3,647 | // ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2017, September 11 - October 9
// ###
#include <cuda_runtime.h>
#include <iostream>
using namespace std;
// cuda error checking
#define CUDA_CHECK cuda_check(__FILE__,__LINE__)
void cuda_check(string file, int line)
{
cudaError_t e = cudaGetLastError();
if (e != cudaSuccess)
{
cout << endl << file << ", line " << line << ": " << cudaGetErrorString(e) << " (" << e << ")" << endl;
exit(1);
}
}
__global__ void square(float* cuda_a, int n){
int t_numx = threadIdx.x + blockIdx.x*blockDim.x;
int t_numy = threadIdx.y + blockIdx.y*blockDim.y;
int t_numz = threadIdx.z + blockIdx.z*blockDim.z;
if (t_numx + t_numy + t_numz < n){
cuda_a[t_numx + t_numy + t_numz] = cuda_a[t_numx + t_numy + t_numz]*cuda_a[t_numx + t_numy + t_numz];
}
}
int main(int argc,char **argv)
{
// alloc and init input arrays on host (CPU)
int n = 10;
float *a = new float[n];
for(int i=0; i<n; i++) a[i] = i;
// CPU computation
for(int i=0; i<n; i++)
{
float val = a[i];
val = val*val;
a[i] = val;
}
// print result
cout << "CPU:"<<endl;
for(int i=0; i<n; i++) cout << i << ": " << a[i] << endl;
cout << endl;
// GPU computation
// reinit data
for(int i=0; i<n; i++) a[i] = i;
float *cuda_a;
cudaMalloc((void**)&cuda_a, n*sizeof(float));
cudaMemcpy(cuda_a, a, n*sizeof(float), cudaMemcpyHostToDevice);
dim3 block = dim3(128,1,1);
int grid_x = ((n + block.x + 1)/block.x);
int grid_y = 1;
int grid_z = 1;
dim3 grid = dim3(grid_x, grid_y, grid_z );
square <<<block,grid>>> (cuda_a, n);
cudaMemcpy(a, cuda_a, n*sizeof(int), cudaMemcpyDeviceToHost);
// print result
cout << "GPU:" << endl;
for(int i=0; i<n; i++) cout << i << ": " << a[i] << endl;
cout << endl;
// free CPU arrays
delete[] a;
cudaFree(cuda_a);
}
|
3,648 | #include<bits/stdc++.h>
using namespace std;
#define BLOCK_SIZE 16
__global__ void matrix_multiplication(int *dev_a, int *dev_b, int *dev_c, int n){
__shared__ int tile_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y*BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x*BLOCK_SIZE + threadIdx.x;
int temp = 0;
int idx;
for (int i=0; i<gridDim.x; ++i){
idx = row*n + i*BLOCK_SIZE + threadIdx.x;
if (idx >= n*n){
tile_a[threadIdx.y][threadIdx.x] = 0;
}
else{
tile_a[threadIdx.y][threadIdx.x] = dev_a[idx];
}
idx = (i*BLOCK_SIZE + threadIdx.y)*n + col;
if (idx >= n*n){
tile_b[threadIdx.y][threadIdx.x] = 0;
}
else{
tile_b[threadIdx.y][threadIdx.x] = dev_b[idx];
}
__syncthreads();
for (int j=0; j<BLOCK_SIZE; ++j){
temp += tile_a[threadIdx.y][j]*tile_b[j][threadIdx.x];
}
__syncthreads();
}
if (row<n && col<n){
dev_c[row*n+col] = temp;
}
}
int main(int argc, char const *argv[]){
int n;
srand(1);
int *a, *b, *c;
n=10000;
cudaMallocHost((void **) &a, sizeof(int)*n*n);
cudaMallocHost((void **) &b, sizeof(int)*n*n);
cudaMallocHost((void **) &c, sizeof(int)*n*n);
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
a[i * n + j] = rand() % n;
b[i * n + j] = rand() % n;
}
}
float time_taken;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void **) &dev_a, sizeof(int)*n*n);
cudaMalloc((void **) &dev_b, sizeof(int)*n*n);
cudaMalloc((void **) &dev_c, sizeof(int)*n*n);
cudaMemcpy(dev_a, a, sizeof(int)*n*n, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, sizeof(int)*n*n, cudaMemcpyHostToDevice);
unsigned int grid_rows = (n+BLOCK_SIZE-1)/BLOCK_SIZE;
unsigned int grid_cols = (n+BLOCK_SIZE-1)/BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
matrix_multiplication<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c, n);
cudaMemcpy(c, dev_c, sizeof(int)*n*n, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time_taken, start, stop);
printf("Time elapsed in matrix multiplication on GPU: %f ms.\n",time_taken);
cudaFreeHost(a);
cudaFreeHost(b);
cudaFreeHost(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} |
3,649 | // Sorting reference, Odd-Even Algorithm using CUDA
__global__ void odd_even_sort_gpu_kernel_gmem(int * const data, const int num_elem) {
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
int tid_idx;
int offset = 0; // Start off with even, then odd
int num_swaps;
// Calculation maximum index for a given block
// Last block it is number of elements minus one
// Other blocks to end of block minus one
const int tid_idx_max = min( (((blockIdx.x+1) * (blockDim.x*2))-1), (num_elem-1) );
do {
// Reset number of swaps
num_swaps = 0;
// Work out index of data
tid_idx =(tid * 2) + offset;
// If no array or block overrun
if (tid_idx < tid_idx_max) {
// Read values into registers
const int d0 = data[tid_idx];
const int d1 = data[tid_idx+1];
// Compare registers
if ( d0 > d1 ) {
// Swap values if needed
data[tid_idx] = d1;
data[tid_idx+1] = d0;
// Keep track that we did a swap
num_swaps++;
}
}
// Switch from even to off, or odd to even
if (offset == 0)
offset = 1;
else
offset = 0;
} while (__syncthreads_count(num_swaps) != 0);
}
|
3,650 | // only kernel, not fully executable
#define RADIUS 7
#define BLOCK_SIZE 512
__global__ void stencil(int *in, int *out)
{
__shared__ int temp[BLOCK_SIZE + 2 * RADIUS];
int gindex = threadIdx.x + blockIdx.x * blockDim.x;
int lindex = threadIdx.x + RADIUS;
// Read input elements into shared memory
temp[lindex] = in[gindex];
// At both end of a block, the sliding window moves beyond the block boundary.
// E.g, for thread id = 512, we wiil read in[505] and in[1030] into temp.
if (threadIdx.x < RADIUS) {
temp[lindex - RADIUS] = in[gindex - RADIUS];
temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE];
}
__syncthreads();
// Apply the stencil
int result = 0;
for (int offset = -RADIUS ; offset <= RADIUS ; offset++)
result += temp[lindex + offset];
// Store the result
out[gindex] = result;
}
|
3,651 | #include "includes.h"
__global__ void reduce(float* d_out, float* d_in) { // Parallel summation: steps = O(log(N)), work = O(N * log(N))
extern __shared__ float sdata[];
int globId = blockDim.x * blockIdx.x + threadIdx.x;
int tid = threadIdx.x;
sdata[tid] = d_in[globId];
__syncthreads();
int s = blockDim.x >> 1;
while (s > 0) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
s >>= 1;
}
if (tid == 0) {
d_out[blockIdx.x] = sdata[0];
}
} |
3,652 | #include <stdio.h>
#include <time.h>
#define PI 3.1415926535897932384
#define mu0 4*PI*1e-7
//Threads per block is capped at 1024 for hardware reasons
//In some cases using a smaller number of threads per block will be more efficient
#define threadsPerBlock 1024
//Max grid points is to defined in order to allocate shared memory for each block
#define MaxGridPoints 6144
/*
NOTE:
grid is technically a 3-dimensional variable length array. In order to copy
this 3d array, it needed to be compressed into a single dimension.
Rather than decompressing this and wasting valuable resources all computation is done in line.
i.e. all blocks will write to adjacent memory
the position in memory is determined by three factors: the block numnber, the r
segment value and the z segment value
Each block is given an initial offset. This offset gives the block a unique space inside
of the overall shared array in which to write its data. Each block's grid is
r driven meaning grid(r,z) = grid[r*zMax + z], thus the position of each point in the
overall array is given by grids[offset + r*zMax + z]
*/
//This function will be called to initialize all data in the grid, Each thread will
//calculate the initial condion for a variable number of points and each block will have a different
//set of initial conditions all passed to device memory already
__global__ void init(double *grids, double *Ils, double *dIs, double *ldrs, double *rlengths, long int *grid_sizes, int *zsegs, long int *offsets){
// get needed data from memory, all memory is block adjacent
//block index determines which problem you are working on
//Current on the left side of the grid
double Il = Ils[blockIdx.x];
//amount the current increments per segment in the z direction
//This model assumes linear change
double dI = dIs[blockIdx.x];
//Physical distance change between r segments
double ldr = ldrs[blockIdx.x];
//total r length
double rlength = rlengths[blockIdx.x];
//Total number of grid points
long int grid_size = grid_sizes[blockIdx.x];
//number of z segmnents
int zseg = zsegs[blockIdx.x];
//Offset of the current grid
//Because data needs to be copied to the gpu, it is compressed into one dimension
//thus all grid data lives in the same array and the offset for that grid determines the satrt of this grid
//This is explained in more detail above
long int gridStart = offsets[blockIdx.x];
//Find how many points per thread will be executing
//remainder tells you how many threads will have one extra point to cover
int rem = grid_size%threadsPerBlock;
//division tells you how many points ALL theads will cover
int divi = grid_size/threadsPerBlock;
long int start, fin;
//If threadIdx.x is less than rem the current thread will recieve one extra point to calculate
if(threadIdx.x<rem){
start = threadIdx.x*(divi+1);
fin = start + divi + 1;
}
else{
//rem is added here because at this point a "rem" number of threads will have one extra point to compute
//overall this is "rem" extra points and this is accounted for by adding rem
start = threadIdx.x*divi + rem;
fin = start + divi;
}
//get initial conditions for all points per thread
//Loops over each point a thread takes care of
//NOTE: If a thread is not supposed to execute anything it will have the same
//value for "start" and "fin" and will not enter the loop
for(int i = start; i<fin; i++){
//Gets current r and z segmenty values
//Because grid is "r-driven", dividing by zMax = the r value and moding by
//zMax gves the current z value
//For more info check out the "note" at the top
long int r_seg_val = i/(zseg+2);
long int z_seg_val = i%(zseg+2);
//Sets initial condition at the given point based on an equation given by Dr. Sankaran
grids[i + gridStart] = (1-(r_seg_val*r_seg_val*ldr*ldr/(3*rlength*rlength)))*3*mu0*(Il + z_seg_val*dI)*r_seg_val*ldr/(4*PI*rlength*rlength);
}
}
//This function funs until the number of timesteps needed is completed
//again this function passes data by memory copy in the form of arrays
//each block excercises a different set of input params
__global__ void run(double *grids, double *r_augs, double *z_augs, long int *allMaxSteps, long int *grid_sizes, int *rsegs, int *zsegs, long int *offsets){
//get data from initialization arrays
//block index determines the problem that is being worked on
//r_aug and z_aug are constants that multiple values need to update to 1 timestep ahead
//r_aug is used for changes in r and z_aug is used for changes in z
double r_aug = r_augs[blockIdx.x];
double z_aug = z_augs[blockIdx.x];
//max steps is the number of steps needed to be completed for the simulation
//to be finished
long int maxSteps = allMaxSteps[blockIdx.x];
//total grid size for this block's problem
long int grid_size = grid_sizes[blockIdx.x];
//number of segments in r and z directions
int rseg = rsegs[blockIdx.x];
int zseg = zsegs[blockIdx.x];
//offset for block's memory locations
//More info at top
long int gridStart = offsets[blockIdx.x];
//find how many points each thread will execute for
//rem represents the number of threads that will have one more than divi points
//to take care of
int rem = grid_size%threadsPerBlock;
//divi is the amount of points each thread will have to take care of at a minimum
int divi = grid_size/threadsPerBlock;
int start, fin;
if(threadIdx.x<rem){
start = threadIdx.x*(divi+1);
fin = start + divi + 1;
}
else{
//rem is added here because at this point a "rem" number of threads will have one extra point to compute
//overall this is "rem" extra points and this is accounted for by adding rem
start = threadIdx.x*divi + rem;
fin = start + divi;
}
long int steps = 0;
//This is shared memory
//It is quick access but there is a limited amount and can only be
//accessed inside each block
//the extern keyword states that the size of this memory is passed in at the
//function call. This allows a variable amount of shared memory to be used
extern __shared__ double grid_new_s[];
extern __shared__ double grid_old_s[];
//Initialize shared memory
//gridStart is the offset for this block
for(int i = start; i<fin; i++){
grid_new_s[i] = grids[i + gridStart];
}
//wait for all threads
__syncthreads();
while(steps<maxSteps){
//copy all points from new to old
for(int i = start; i<fin; i++){
grid_old_s[i] = grid_new_s[i];
}
//wait for all threads
__syncthreads();
for(int i = start; i<fin; i++){
//because the grid is row driven
//i / zMax = rvalue
//i % zMax = zvalue
int r = i/(zseg+2);
int z = i%(zseg+2);
//leave boundary conditions alone as specified by the problem
if(z != 0 && z != zseg+1){
if(r!= 0 && r!= rseg+1){
//when r = 1 a phontom point needs to be used as the slope change from
//0 -> 1 is too large
//in this case the B field at -1 is assumed to be of the same magnitude as at 1
//but in the opposite direction
//this yields the folloeing equation
if(r==1){
grid_new_s[i] += r_aug*(2*grid_old_s[i+(zseg+2)] - 4*grid_old_s[i]) +
z_aug * (grid_old_s[i+1] - 2*grid_old_s[i] + grid_old_s[i-1]);
}
//normal update function
else{
grid_new_s[i] += r_aug*((1+(1/(2*r)))*grid_old_s[i+(zseg+2)] + (-2-(1/(r*r)))*grid_old_s[i] + (1-(1/(2*r)))*grid_old_s[i-(zseg+2)])
+z_aug*(grid_old_s[i+1] - 2*grid_old_s[i] + grid_old_s[i-1]);
}
}
}
}
steps++;
//wait for all threads
__syncthreads();
}
//copy all values back to main memory
for(int i = start; i<fin; i++){
grids[i + gridStart] = grid_new_s[i];
}
}
int main(){
//string label in input FILE
//no use just for clarity
char label[256];
double *Il_h, *Ir_h, *rlength_h, *eta_h, *tstep_h, *ldr_h, *ldz_h, *zlength_h, *dI_h, *r_aug_h, *z_aug_h, *grids_h;
int *rseg_h, *zseg_h;
long int *totsteps_h, *grid_size_h, *offsets_h;
double *Il_d, *rlength_d, *ldr_d, *dI_d, *r_aug_d, *z_aug_d, *grids_d;
int *rseg_d, *zseg_d;
long int *totsteps_d, *grid_size_d, *offsets_d;
int testcases;
//printf("How many test cases? ");
//gets number of test cases
scanf("%d", &testcases);
//these sizes are used for malloc ops
//the size is based on the amount of test cases and the
//type of data this array holds
size_t doubleSize = testcases*sizeof(double);
size_t intSize = testcases*sizeof(int);
size_t longIntSize = testcases*sizeof(long int);
//host memory allocation
Il_h = (double*)malloc(doubleSize);
Ir_h = (double*)malloc(doubleSize);
rlength_h = (double*)malloc(doubleSize);
eta_h = (double*)malloc(doubleSize);
tstep_h = (double*)malloc(doubleSize);
ldr_h = (double*)malloc(doubleSize);
ldz_h = (double*)malloc(doubleSize);
zlength_h = (double*)malloc(doubleSize);
dI_h = (double*)malloc(doubleSize);
r_aug_h = (double*)malloc(doubleSize);
z_aug_h = (double*)malloc(doubleSize);
rseg_h = (int*)malloc(intSize);
zseg_h = (int*)malloc(intSize);
totsteps_h = (long int*)malloc(longIntSize);
grid_size_h = (long int*)malloc(longIntSize);
offsets_h = (long int*)malloc(longIntSize);
//device memory allocation
cudaMalloc(&Il_d, doubleSize);
cudaMalloc(&rlength_d, doubleSize);
cudaMalloc(&ldr_d, doubleSize);
cudaMalloc(&dI_d, doubleSize);
cudaMalloc(&r_aug_d, doubleSize);
cudaMalloc(&z_aug_d, doubleSize);
cudaMalloc(&rseg_d, intSize);
cudaMalloc(&zseg_d, intSize);
cudaMalloc(&totsteps_d, longIntSize);
cudaMalloc(&grid_size_d, longIntSize);
cudaMalloc(&offsets_d, longIntSize);
long int total_grid_size = 0;
for(int counter = 0; counter < testcases; counter++){
//label is unused
scanf("%s", &label);
//value of current when z = 0
//printf("What is your left I? ");
scanf("%lf", Il_h + counter);
//value of current when z = zMax
//printf("What is your right I? ");
scanf("%lf", Ir_h + counter);
//total r value
//printf("What is the radius of your rod? ");
scanf("%lf", rlength_h + counter);
//total z value
//printf("What is the length of your rod? ");
scanf("%lf", zlength_h + counter);
//value of eta (proportional to diffusivity)
//printf("What is eta? ");
scanf("%lf", eta_h + counter);
//number of r segments
//printf("How many segments would you like per radius? ");
scanf("%d", rseg_h + counter);
//number of z segments
//printf("How many segments would you like per length? ");
scanf("%d", zseg_h + counter);
//length of each r segment
ldr_h[counter] = rlength_h[counter]/(rseg_h[counter]+1);
//length of each z segment
ldz_h[counter] = zlength_h[counter]/(zseg_h[counter]+1);
double smallest = ldr_h[counter];
if(ldz_h[counter] < smallest)
smallest = ldz_h[counter];
//determines tstep that ensures stability
//0.125 is derived from 2^-(num dimensions + 1)
tstep_h[counter] = 0.125*smallest*smallest*mu0/eta_h[counter];
//gets total run time
double tottime;
//printf("How long would you like to run? ");
scanf("%lf", &tottime);
//total steps is an integer that truncates remainder
totsteps_h[counter] = tottime/tstep_h[counter];
//dI is change in I per each z segment
//zseg + 2 to account for boundary conditions
dI_h[counter] = (Ir_h[counter] - Il_h[counter]) / (zseg_h[counter]+2);
//r and z aug are used for updating values for increasing time
//as per numerical update
r_aug_h[counter] = eta_h[counter]*tstep_h[counter]/(mu0*ldr_h[counter]*ldr_h[counter]);
z_aug_h[counter] = eta_h[counter]*tstep_h[counter]/(mu0*ldz_h[counter]*ldz_h[counter]);
//grid size accounts for boundary conditions
grid_size_h[counter] = (rseg_h[counter] + 2)*(zseg_h[counter] + 2);
//offsets says where in memory this problems data starts
//more info above
offsets_h[counter] = total_grid_size;
//Total grid size is the size of all needed grid memory
total_grid_size += grid_size_h[counter];
}
size_t gridsSize = total_grid_size*sizeof(double);
//allocate variable amount of grid memory on both host and device
grids_h = (double*)malloc(gridsSize);
cudaMalloc(&grids_d, gridsSize);
//copy memory down to device
cudaMemcpy(Il_d, Il_h, doubleSize, cudaMemcpyHostToDevice);
cudaMemcpy(rlength_d, rlength_h, doubleSize, cudaMemcpyHostToDevice);
cudaMemcpy(ldr_d, ldr_h, doubleSize, cudaMemcpyHostToDevice);
cudaMemcpy(dI_d, dI_h, doubleSize, cudaMemcpyHostToDevice);
cudaMemcpy(r_aug_d, r_aug_h, doubleSize, cudaMemcpyHostToDevice);
cudaMemcpy(z_aug_d, z_aug_h, doubleSize, cudaMemcpyHostToDevice);
cudaMemcpy(rseg_d, rseg_h, intSize, cudaMemcpyHostToDevice);
cudaMemcpy(zseg_d, zseg_h, intSize, cudaMemcpyHostToDevice);
cudaMemcpy(totsteps_d, totsteps_h, longIntSize, cudaMemcpyHostToDevice);
cudaMemcpy(grid_size_d, grid_size_h, longIntSize, cudaMemcpyHostToDevice);
cudaMemcpy(offsets_d, offsets_h, longIntSize, cudaMemcpyHostToDevice);
//initialize all problems with initial values as per Dr. Sankaran's instructions
init<<<testcases,threadsPerBlock>>>(grids_d, Il_d, dI_d, ldr_d, rlength_d, grid_size_d, zseg_d, offsets_d);
//wait for all device implementation to finish
cudaDeviceSynchronize();
//copy initial grid data back to host
cudaMemcpy(grids_h, grids_d, gridsSize, cudaMemcpyDeviceToHost);
//outputs all problems initial conditions
FILE *myfile;
long int i;
for(int counter = 0; counter < testcases; counter++){
long int gridStart = offsets_h[counter];
char init[] = "initXX.txt";
int tens = counter / 10;
int ones = counter % 10;
init[4] = tens + '0';
init[5] = ones + '0';
myfile = fopen(init, "w");
for(i = 0; i< zseg_h[counter]+1; i++)
fprintf(myfile, "%lf ", i*ldz_h[counter]);
fprintf(myfile, "%lf\n", i*ldz_h[counter]);
for(i = 0; i< rseg_h[counter]+1; i++)
fprintf(myfile, "%lf ", i*ldr_h[counter]);
fprintf(myfile, "%lf\n", i*ldr_h[counter]);
for(i = 0; i< (rseg_h[counter] + 2)*(zseg_h[counter]+2); i++){
if(i%(zseg_h[counter]+2)==zseg_h[counter]+1)
fprintf(myfile, "%lf\n", grids_h[i + gridStart]);
else
fprintf(myfile, "%lf ", grids_h[i + gridStart]);
}
fclose(myfile);
}
clock_t begin, end;
double time_spent;
begin = clock();
//run
//size capped at MaxGridPoints, grid points per problem
run<<<testcases,threadsPerBlock, MaxGridPoints*sizeof(double)>>>(grids_d, r_aug_d, z_aug_d, totsteps_d, grid_size_d, rseg_d, zseg_d, offsets_d);
//wait for all device implementation to finish
cudaDeviceSynchronize();
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
//copy final grid data back to host
cudaMemcpy(grids_h, grids_d, gridsSize, cudaMemcpyDeviceToHost);
//output all problems final grid data
for(int counter = 0; counter < testcases; counter++){
long int gridStart = offsets_h[counter];
char init[] = "resXX.txt";
int tens = counter / 10;
int ones = counter % 10;
init[3] = tens + '0';
init[4] = ones + '0';
myfile = fopen(init, "w");
for(i = 0; i< zseg_h[counter]+1; i++)
fprintf(myfile, "%lf ", i*ldz_h[counter]);
fprintf(myfile, "%lf\n", i*ldz_h[counter]);
for(i = 0; i< rseg_h[counter]+1; i++)
fprintf(myfile, "%lf ", i*ldr_h[counter]);
fprintf(myfile, "%lf\n", i*ldr_h[counter]);
for(i = 0; i< (rseg_h[counter] + 2)*(zseg_h[counter]+2); i++){
if(i%(zseg_h[counter]+2)==zseg_h[counter]+1)
fprintf(myfile, "%lf\n", grids_h[i + gridStart]);
else
fprintf(myfile, "%lf ", grids_h[i + gridStart]);
}
fclose(myfile);
}
//free host memory
free(Il_h);
free(Ir_h);
free(rlength_h);
free(eta_h);
free(tstep_h);
free(ldr_h);
free(ldz_h);
free(zlength_h);
free(dI_h);
free(r_aug_h);
free(z_aug_h);
free(rseg_h);
free(zseg_h);
free(totsteps_h);
free(grid_size_h);
free(offsets_h);
//free device memory
cudaFree(Il_d);
cudaFree(rlength_d);
cudaFree(ldr_d);
cudaFree(dI_d);
cudaFree(r_aug_d);
cudaFree(z_aug_d);
cudaFree(rseg_d);
cudaFree(zseg_d);
cudaFree(totsteps_d);
cudaFree(grid_size_d);
cudaFree(offsets_d);
printf("\n------------------------------------\nExecution took: %lf sec\n", time_spent);
return 0;
}
|
3,653 | #include <stdio.h>
#include <cuda.h>
void test(int* C, int length);
/***********************/
/* TODO, write KERNEL */
/***********************/
__global__ void VecAdd(int* A, int* B, int* C, int N) {
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<N){
C[id] = A[id]+B[id];
}
}
int main(int argc, char* argv[]){
int i,z;
int* h_A;
int* h_B;
int* h_C;
/* going 3 rounds */
for(z=0;z<3;++z){
/*******************/
/** READING INPUT **/
/*******************/
int size = 0; //length of input vectors
scanf("%d", &size);
/* Allocate host memory */
h_A = (int*)malloc(sizeof(int)*size);
h_B = (int*)malloc(sizeof(int)*size);
h_C = (int*)malloc(sizeof(int)*size);
for(i=0;i<size;++i){ scanf("%d", &h_A[i]); }
for(i=0;i<size;++i){ scanf("%d", &h_B[i]); }
/********************/
/** FINISHED INPUT **/
/********************/
int* d_A;
int* d_B;
int* d_C;
/*************************/
/* allocate device */
/* memory for A,B,C */
/*************************/
cudaMalloc(&d_A, sizeof(int)*size);
cudaMalloc(&d_B, sizeof(int)*size);
cudaMalloc(&d_C, sizeof(int)*size);
/***********************************/
/* TODO copy vectors A&B to device */
/***********************************/
cudaMemcpy(d_A, h_A, size*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size*sizeof(int), cudaMemcpyHostToDevice);
/*********************/
/* call kernel */
/*********************/
int threadsPerBlock = 256;
int blocksPerGrid = (size+threadsPerBlock-1)/threadsPerBlock;
VecAdd<<<blocksPerGrid,threadsPerBlock>>>(d_A, d_B, d_C, size);
/**************************/
/* TODO, copy result back */
/**************************/
cudaMemcpy(h_C, d_C, sizeof(int)*size, cudaMemcpyDeviceToHost);
/*******************************************/
/** Testing output, don't change anything! */
/*******************************************/
test(h_C, size);
//free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
//free host memory
free(h_A);
free(h_B);
free(h_C);
}
return 0;
}
//function to test the input, don't change anything!
void test(int* C, int length){
int i=0;
int result = 0.0;
for(i=0;i<length;++i){
result += C[i];
}
printf("%d\n", result);
} |
3,654 |
#include <cstdio>
#include <cmath>
#define BLOCKDIM 1024
// device kernel def
__global__ void Action_noImage_center_GPU(double *D_,double *maskCenter,double *SolventMols_,double maxD, int Nmols , int NAtoms, int active_size);
__global__ void Action_noImage_no_center_GPU(double *D_,double *SolventMols_,double *Solute_atoms ,double maxD, int Nmols , int NAtoms,int NSAtoms , int active_size);
////////////////////////
void Action_NoImage_Center(double *SolventMols_,double *D_, double maskCenter[3],double maxD,int NMols, int NAtoms, float &time_gpu)
{
cudaEvent_t start_event, stop_event;
float elapsed_time_gpu;
double *devI2Ptr;
double *devI1Ptr;
double *devO1Ptr;
int t4;
int t2;
double Dist;
int solventMol;
int solventAtom;
cudaMalloc(((void **)(&devO1Ptr)),NMols * sizeof(double ));
cudaMalloc(((void **)(&devI1Ptr)),3 * sizeof(double ));
cudaMemcpy(devI1Ptr,maskCenter,3 * sizeof(double ),cudaMemcpyHostToDevice);
cudaMalloc(((void **)(&devI2Ptr)),NMols * NAtoms * 3 * sizeof(double ));
cudaMemcpy(devI2Ptr,SolventMols_,NMols * NAtoms * 3 * sizeof(double ),cudaMemcpyHostToDevice);
//figue out the decomposition here
//we need to pad as well
//figure out how many active thread in a block
int active_size = BLOCKDIM/NAtoms * NAtoms;
int NBlocks = ceil(NMols * NAtoms / float(active_size));
// printf("Nmols = %d; Natoms = %d\n", NMols, NAtoms);
// printf("active_size = %d\n", active_size);
// printf("NBlocks = %d\n", NBlocks);
//printf("sezeof(double) = %d\n", sizeof(double));
//exit(0);
dim3 dimGrid0 = dim3(NBlocks,1);
dim3 dimBlock0 = dim3(BLOCKDIM,1);
printf("NMols = %d, NAtoms = %d\n", NMols, NAtoms);
printf("About to launch kernel.\n");
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
cudaEventRecord(start_event, 0);
Action_noImage_center_GPU<<<dimGrid0,dimBlock0>>>(devO1Ptr,devI1Ptr, devI2Ptr, maxD, NMols, NAtoms,active_size);
cudaThreadSynchronize();
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&elapsed_time_gpu,start_event, stop_event );
printf("Done with kernel CUDA Kernel Time: %.2f\n", elapsed_time_gpu);
time_gpu = elapsed_time_gpu;
cudaMemcpy(D_,devO1Ptr,NMols * sizeof(double ),cudaMemcpyDeviceToHost);
cudaFree(devO1Ptr);
cudaFree(devI1Ptr);
cudaFree(devI2Ptr);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Action_NoImage_no_Center(double *SolventMols_,double *D_, double *Solute_atoms,double maxD,int NMols, int NAtoms,int NSAtoms, float &time_gpu)
{
cudaEvent_t start_event, stop_event;
float elapsed_time_gpu;
double *devI3Ptr;
double *devI2Ptr;
double *devI1Ptr;
double *devO1Ptr;
int t4;
int t2;
double Dist;
int solventMol;
int solventAtom;
cudaMalloc(((void **)(&devO1Ptr)),NMols * sizeof(double ));
//cudaMalloc(((void **)(&devI1Ptr)),3 * sizeof(double ));
//cudaMemcpy(devI1Ptr,maskCenter,3 * sizeof(double ),cudaMemcpyHostToDevice);
cudaMalloc(((void **)(&devI2Ptr)),NMols * NAtoms * 3 * sizeof(double ));
cudaMemcpy(devI2Ptr,SolventMols_,NMols * NAtoms * 3 * sizeof(double ),cudaMemcpyHostToDevice);
cudaMalloc(((void **)(&devI3Ptr)), NSAtoms * 3 * sizeof(double ));
cudaMemcpy(devI3Ptr,Solute_atoms,NSAtoms * 3 * sizeof(double ),cudaMemcpyHostToDevice);
//figue out the decomposition here
//we need to pad as well
//figure out how many active thread in a block
int active_size = BLOCKDIM/NAtoms * NAtoms;
int NBlocks = ceil(NMols * NAtoms / float(active_size));
// printf("Nmols = %d; Natoms = %d\n", NMols, NAtoms);
// printf("active_size = %d\n", active_size);
// printf("NBlocks = %d\n", NBlocks);
//printf("sezeof(double) = %d\n", sizeof(double));
//exit(0);
dim3 dimGrid0 = dim3(NBlocks,1);
dim3 dimBlock0 = dim3(BLOCKDIM,1);
printf("NMols = %d, NAtoms = %d\n", NMols, NAtoms);
printf("About to launch kernel.\n");
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
cudaEventRecord(start_event, 0);
Action_noImage_no_center_GPU<<<dimGrid0,dimBlock0>>>(devO1Ptr, devI2Ptr,devI3Ptr, maxD, NMols, NAtoms,NSAtoms,active_size);
cudaThreadSynchronize();
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&elapsed_time_gpu,start_event, stop_event );
printf("Done with kernel CUDA Kernel Time: %.2f\n", elapsed_time_gpu);
time_gpu = elapsed_time_gpu;
cudaMemcpy(D_,devO1Ptr,NMols * sizeof(double ),cudaMemcpyDeviceToHost);
cudaFree(devO1Ptr);
cudaFree(devI1Ptr);
cudaFree(devI2Ptr);
cudaFree(devI3Ptr);
} |
3,655 | #include <cuda.h>
__device__ void lock(int *mutex) {
while (atomicCAS(mutex, 0, 1));
}
__device__ void unlock(int *mutex) {
atomicExch(mutex, 0);
}
__device__ long getThreadID() {
int blockId = blockIdx.x
+ blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x;
return threadId;
}
template<class T>
struct greater
{
__device__ bool operator()(T a, T b)
{
return a > b;
}
};
template<class T>
struct less
{
__device__ bool operator()(T a, T b)
{
return a < b;
}
};
|
3,656 | #include "includes.h"
__global__ void absolute_deriviative_upd_kernel( float4 * __restrict input_errors, const float4 * __restrict output_errors, const float4 * __restrict input_neurons, bool add_update_to_destination, int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float4 inp = input_neurons[elem_id];
float4 current_error = output_errors[elem_id];
if (inp.x < 0.0F)
current_error.x = -current_error.x;
if (inp.y < 0.0F)
current_error.y = -current_error.y;
if (inp.z < 0.0F)
current_error.z = -current_error.z;
if (inp.w < 0.0F)
current_error.w = -current_error.w;
float4 current_dst;
if (add_update_to_destination)
{
current_dst = input_errors[elem_id];
current_error.x += current_dst.x;
current_error.y += current_dst.y;
current_error.z += current_dst.z;
current_error.w += current_dst.w;
}
input_errors[elem_id] = current_error;
}
} |
3,657 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <float.h>
#include <iostream>
#include <vector>
#include <unordered_map>
#include <string>
#include <algorithm>
/***all macros**/
#define E_INIT 5 // in joules
#define E_ELEC 50e-9 //in nj = 1e-9j
#define E_AMP 100e-12 // in pj = 1e-12j
#define ALPHA 0.7
#define BETA 0.3
#define DELTA 0.0
#define MAX_NODE 100000000
#define DEBUG 1
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/**all type declaration***/
using namespace std;
class Node{
public:
unsigned int val;
vector<unsigned int> weights;
vector<Node*> Edges;
Node(int val){
this->val = val;
}
void addEdge(Node* v,unsigned int w){
this->Edges.push_back(v);
this->weights.push_back(w);
}
};
//all fn declarations here
void calc_hx(int* offset,int* edges,float* hx,float* cord_x,float* cord_y,int N,int E,int sink);
void update_energy(int* parent,float* res_energy,int* Nt,int* Nr,float* cord_x,float* cord_y,int N,int sink);
void check_failure(float* res_energy,int N, int* flag);
void computeTime(float& time,cudaEvent_t start, cudaEvent_t stop);
/**** device Code *******/
// __device__ volatile int Cx[MAX_NODE];
__device__ volatile int PQ[MAX_NODE];
//K in parallel
__global__ void extractMin(int* PQ_size, int* expandNodes,int* expandNodes_size,float* Cx,int* openList,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<K && PQ_size[id]>0){
//extract min from PQ
int front = id* ( (N+K-1)/K );
int node = PQ[front];
// restructure the heap
PQ[front]=PQ[front+PQ_size[id]-1];
PQ_size[id]-=1;
int pqIndex = 0;
while(2*pqIndex+1 < PQ_size[id]){
if(2*pqIndex+2 >= PQ_size[id]){
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]]){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else
break;
}
else{
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]] && Cx[PQ[front+2*pqIndex+1]] <= Cx[PQ[front+2*pqIndex+2]] ){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else if(Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+2]] && Cx[PQ[front+2*pqIndex+2]] <= Cx[PQ[front+2*pqIndex+1]] ){
int swap = PQ[front + 2*pqIndex+2];
PQ[front + 2*pqIndex+2] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+2;
}
else{
break;
}
}
}
//removed from openList
openList[node] = -1;
//added to expand next
int len = atomicAdd(expandNodes_size,1);
expandNodes[len]=node;
}
}
//for K in parallel
__global__ void A_star_expand(int* off,int* edge,float* Hx,int* parent,volatile float* Cx,
int* expandNodes,int* expandNodes_size, int* lock,int* openList,
int N,int E, int K,int* nVFlag,int* PQ_size,
float* res_energy,int* Nt,int* Nr){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id< *expandNodes_size ){
int node = expandNodes[id];
// expand
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
while(start < end){
int child = edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(leaveLoop==false){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
float exp_weight = 1 ;
if( Cx[child] > (Cx[node] - Hx[node])+ exp_weight + Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ exp_weight + Hx[child];
__threadfence();
parent[child] = node;
if(openList[child]==-1){
nVFlag[child]=1;
//add only once
}
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}//end
}
//K in parallel -- O(N)
__global__ void keepHeapPQ(int* PQ_size,float* Cx,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K && PQ_size[id] > 0){
int front = id*( (N+K-1)/K );
int size = PQ_size[id];
for(int i=front;i<front+size;i++){
if(2*i+2 < front+size){
int cost = Cx[PQ[i]];
int costLeft = Cx[PQ[2*i+1]];
int costRight = Cx[PQ[2*i+2]];
if( cost > costLeft || cost > costRight ){
int index ;
if(costLeft <= costRight)
index = 2*i+1;
else
index = 2*i+2;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
else if(2*i+1 < front+size){
if(Cx[PQ[i]] > Cx[PQ[2*i+1]]){
int index = 2*i+1;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
}
}
}
//N threads
__global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < N){
if(nextFlag[id]==1){
int index = atomicAdd(nvSize,1);
nextV[index]=id;
}
}
}
//for K in parallel
__global__ void insertPQ(int* PQS,int* nextV,int* nVsize,float* Cx,int K,int N,int* openList){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K){
int front = id*( (N+K-1)/K );
int i = id;
while(i<*nVsize){
//if not already present
if(openList[nextV[i]]!=-1){
i+=K;
continue;
}
PQ[front+PQS[id]]= nextV[i];
PQS[id]+=1;
//add in openList
openList[nextV[i]] = id;
if(PQS[id]>1){
int index = PQS[id]-1;
while(index>0){
if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){
int swap = PQ[front+index];
PQ[front+index]=PQ[front+ (index-1)/2];
PQ[front+ (index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
i += K;
}
}
}
int main(){
//seed
srand(42);
//the K PQ
int K ;
scanf("%d",&K);
FILE* fgraph = fopen("graph_op.txt","r");
FILE* fgraph_rev = fopen("graph.txt","r");
int N,E;
fscanf(fgraph,"%d %d\n",&N,&E);
fscanf(fgraph_rev,"%d %d\n",&N,&E);
int startNode = N-1;
int* H_offset = (int*)malloc(sizeof(int)*N);
int* H_edges = (int*)malloc(sizeof(int)*E);
//rev graph
int* H_rev_offset = (int*)malloc(sizeof(int)*N);
int* H_rev_edges = (int*)malloc(sizeof(int)*E);
//weight is hop count =1
float* H_hx = (float*)malloc(sizeof(float)*N);
float* H_cx = (float*)malloc(sizeof(float)*N);
int* H_parent = (int*)malloc(sizeof(int)*N);
int* H_openList = (int*)malloc(sizeof(int)*N);
int* H_PQ_size = (int*)malloc(sizeof(int)*K);
//to compute distance
float* H_cord_x = (float*)malloc(sizeof(float)*N);
float* H_cord_y = (float*)malloc(sizeof(float)*N);
float* H_res_energy = (float*)malloc(sizeof(float)*N);
int* H_packet_recv = (int*)malloc(sizeof(int)*N);
int* H_packet_sent = (int*)malloc(sizeof(int)*N);
memset(H_PQ_size,0,sizeof(int)*K);
memset(H_openList,-1,sizeof(int)*N);
memset(H_parent,-1,sizeof(int)*N);
for(int i=0;i<N;i++){
H_cx[i] = FLT_MAX;
H_res_energy[i] = E_INIT;
}
for(int i=0;i<E;i++){
fscanf(fgraph,"%d",&H_edges[i]);
fscanf(fgraph_rev,"%d",&H_rev_edges[i]);
}
for(int i=0;i<N;i++){
fscanf(fgraph,"%d",&H_offset[i]);
fscanf(fgraph_rev,"%d",&H_rev_offset[i]);
}
fclose(fgraph_rev);
fclose(fgraph);
FILE* f_cord = fopen("Cord.txt","r");
for(int i=0;i<N;i++){
float x,y;
fscanf(f_cord,"%f %f\n",&x,&y);
H_cord_x[i]= x;
H_cord_y[i] = y;
}
fclose(f_cord);
calc_hx(H_rev_offset,H_rev_edges,H_hx,H_cord_x,H_cord_y,N,E,startNode);
printf("[INFO] completed taking input\n");
//init Host var
int* H_flagEnd = (int*)malloc(sizeof(int));
int* H_flagfound = (int*)malloc(sizeof(int));
int* H_a0 = (int*)malloc(sizeof(int));
int* H_nV_size = (int*)malloc(sizeof(int));
int* H_nV = (int*)malloc(sizeof(int)*N);
//required coz if many tries to add same in diff threads high low lower
int* H_nVFlag = (int*)malloc(sizeof(int)*N);
memset(H_nVFlag,-1,sizeof(int)*N);
*H_flagEnd = 0;
*H_flagfound = 0;
*H_a0 = 0;
int* H_end_A_star = (int*)malloc(sizeof(int));
*H_end_A_star = 0;
//device var
//graph struture
float run_time = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int* D_offset;
int* D_edges ;
float* D_hx;
float* D_Cx;
int* D_parent;
//Priority queue size
int* D_PQ_size;
//flag if in openList(contains which PQ)
int* D_openList;
//lock for nodes
int* D_lock;
//next nodes flag
int* D_nVFlag;
//next nodes array to insert PQ
int* D_nV;
int* D_nV_size;
//nodes to be expanded ( extracted from PQ )
int* D_expandNodes;
int* D_expandNodes_size;
//flag to end while loop and found the destination
int* D_flagEnd;
int* D_flagfound;
float* D_cord_x;
float* D_cord_y;
float* D_res_energy;
int* D_packet_recv;
int* D_packet_sent;
gpuErrchk ( cudaMalloc(&D_offset,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_edges,sizeof(int)*E) );
gpuErrchk ( cudaMalloc(&D_hx,sizeof(float)*N) );
gpuErrchk ( cudaMalloc(&D_Cx,sizeof(float)*N) );
gpuErrchk ( cudaMalloc(&D_parent,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_PQ_size,sizeof(int)*K) );
gpuErrchk ( cudaMalloc(&D_openList,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_lock,sizeof(int)*N) );
//for next set of vertices to add in PQ
gpuErrchk ( cudaMalloc(&D_nV,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_nV_size,sizeof(int)) );
gpuErrchk ( cudaMalloc(&D_nVFlag,sizeof(int)*N) );
//next nodes to expand
gpuErrchk ( cudaMalloc(&D_expandNodes,sizeof(int)*K) ); //changed to K
gpuErrchk ( cudaMalloc(&D_expandNodes_size,sizeof(int)) );
//flag to end search
gpuErrchk( cudaMalloc(&D_flagEnd,sizeof(int)) );
gpuErrchk( cudaMalloc(&D_flagfound,sizeof(int)) );
//energy and cords
gpuErrchk ( cudaMalloc(&D_res_energy,sizeof(float)*N) );
gpuErrchk ( cudaMalloc(&D_cord_x,sizeof(float)*N) );
gpuErrchk ( cudaMalloc(&D_cord_y,sizeof(float)*N) );
gpuErrchk( cudaMalloc(&D_packet_recv,sizeof(int)*N) );
gpuErrchk( cudaMalloc(&D_packet_sent,sizeof(int)*N) );
//copy
gpuErrchk ( cudaMemcpy(D_offset,H_offset,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_edges,H_edges,sizeof(int)*E,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_hx,H_hx,sizeof(float)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_Cx,H_cx,sizeof(float)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemset(D_lock,0,sizeof(int)*N) );
gpuErrchk ( cudaMemcpy(D_res_energy,H_res_energy,sizeof(float)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_cord_x,H_cord_x,sizeof(float)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_cord_y,H_cord_y,sizeof(float)*N,cudaMemcpyHostToDevice) );
//1 TO EVADE DIVIDE BY 0 ERROR
gpuErrchk ( cudaMemset(D_packet_recv,1,sizeof(int)*N) );
gpuErrchk ( cudaMemset(D_packet_sent,1,sizeof(int)*N) );
int count_round = 0;
int numThreads = 512;
int numBlocks = (K+numThreads-1)/numThreads;
int N_numBlocks = (N+numThreads-1)/numThreads;
//A* algo
H_cx[startNode]=H_hx[startNode];
// H_PQ[0]=startNode;
H_PQ_size[0]=1;
H_openList[startNode]=0;
//reset
gpuErrchk ( cudaMemcpy(D_Cx,H_cx,sizeof(float)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpyToSymbol(PQ,&startNode, sizeof(int), 0, cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_parent,H_parent,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_openList,H_openList,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_PQ_size,H_PQ_size,sizeof(int)*K,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_expandNodes_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
int flag_PQ_not_empty = 0;
for(int i=0;i<K;i++){
if(H_PQ_size[i]>0)
flag_PQ_not_empty=1;
}
//DO A* initailly on whole graph
while(flag_PQ_not_empty == 1){
//extract min
cudaEventRecord(start);
extractMin<<<numBlocks,numThreads>>>(D_PQ_size, D_expandNodes,D_expandNodes_size,D_Cx,D_openList,N,K);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
cudaEventRecord(start);
A_star_expand<<<numBlocks,numThreads>>>(D_offset,D_edges,D_hx,D_parent,D_Cx,
D_expandNodes,D_expandNodes_size, D_lock,D_openList,
N,E,K,D_nVFlag,D_PQ_size,
D_res_energy,D_packet_sent,D_packet_recv);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
cudaEventRecord(start);
keepHeapPQ<<<numBlocks,numThreads>>>(D_PQ_size,D_Cx,N,K);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
//gen from flag D_nV
//for N in parallel
cudaEventRecord(start);
setNV<<<N_numBlocks,numThreads>>>(D_nVFlag,D_nV,D_nV_size,N);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
cudaEventRecord(start);
insertPQ<<<numBlocks,numThreads>>>(D_PQ_size,D_nV,D_nV_size,D_Cx,K,N,D_openList);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
//cpy flagend and flagEmpty
gpuErrchk( cudaMemcpy(H_PQ_size,D_PQ_size, sizeof(int)*K,cudaMemcpyDeviceToHost) );
//reset nVFlag
gpuErrchk( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) );
//reset next insert array
gpuErrchk( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(D_expandNodes_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
flag_PQ_not_empty = 0;
for(int i=0;i<K;i++){
if(H_PQ_size[i]>0)
flag_PQ_not_empty=1;
}
gpuErrchk( cudaMemcpy(H_parent,D_parent, sizeof(int)*N,cudaMemcpyDeviceToHost) );
}
//broadcasted sol
// for(int i=0;i<N;i++){
// int p = i;
// printf("PATH: %d ",i);
// while(H_parent[p]!=-1){
// p = H_parent[p];
// printf("%d ",p);
// if(p==startNode)
// break;
// }
// printf("\n");
// }
// update energy
update_energy(H_parent,H_res_energy,H_packet_sent,H_packet_recv,H_cord_x,H_cord_y,N,startNode);
gpuErrchk ( cudaMemcpy(D_packet_recv,H_packet_recv,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_packet_sent,H_packet_sent,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_res_energy,H_res_energy,sizeof(float)*N,cudaMemcpyHostToDevice) );
//check for end
check_failure(H_res_energy,N,H_end_A_star);
count_round++;
while(*H_end_A_star==0){
update_energy(H_parent,H_res_energy,H_packet_sent,H_packet_recv,H_cord_x,H_cord_y,N,startNode);
check_failure(H_res_energy,N,H_end_A_star);
count_round++;
}
printf("rounds: %d\n",count_round);
printf("time: %f\n",run_time);
return 0;
}
void calc_hx(int* offset,int* edges,float* hx,float* cord_x,float* cord_y,int N,int E,int sink){
float dx,dy;
dx = cord_x[sink];
dy = cord_y[sink];
for(int i=0;i<N;i++){
float x,y;
x = cord_x[i];
y = cord_y[i];
int start = offset[i];
int end = N;
if(i!=N-1)
end = offset[i+1];
float sum = 0;
int count = 0;
while(start < end){
int child = edges[start];
sum+= sqrtf( (x-cord_x[child])*(x-cord_x[child]) + (y-cord_y[child])*(y-cord_y[child]) );
start++;
count++;
}
float dist = sqrtf( (x-dx)*(x-dx) + (y-dy)*(y-dy) );
//hop counts
if(dist!=0){
hx[i] = (dist * count )/sum ;
}
else
hx[i] = 0;
}
}
void update_energy(int* parent,float* res_energy,int* Nt,int* Nr,float* cord_x,float* cord_y,int N,int sink){
int k = 2048; //256 bit packet length
int round = rand()%N;
for(int j=0;j<round;j++){
int start = rand()%N;
while(parent[start]!=-1){
int node = parent[start];
Nr[node]+=1;
res_energy[node]-= k*E_ELEC;
Nt[start]+=1;
float dist_sq = (cord_x[start]-cord_x[node])*(cord_x[start]-cord_x[node]) +(cord_y[start]-cord_y[node])*(cord_y[start]-cord_y[node]);
float energy = k*(E_ELEC + E_AMP*dist_sq);
res_energy[start]-=energy*100;
start = node;
}
// if(start != sink)
// printf("ERROR; invalid path\n");
}
}
void check_failure(float* res_energy,int N, int* flag){
for(int i=0;i<N;i++){
// printf("%d:%f\n",i,res_energy[i]);
if(res_energy[i]<0){
*flag =1;
printf("dead: %d\n",i);
break;
}
}
}
void computeTime(float& time,cudaEvent_t start, cudaEvent_t stop){
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
time+= milliseconds;
//printf("time:%f\n",milliseconds);
} |
3,658 | #include<iostream>
using namespace std;
__global__ void add(int *a,int*b,int *c,int n)
{
int row=blockIdx.y*blockDim.y+threadIdx.y;
int col=blockIdx.x*blockDim.x+threadIdx.x;
int sum=0;
for(int i=0;i<n;i++)
{
sum=sum+a[row*n+i]*b[i*n+col];
}
c[row*n+col]=sum;
}
int main()
{
cout<<"Enter size of matrix";
int n;
cin>>n;
int a[n][n],b[n][n],c[n][n];
for(int i=0;i<n;i++)
{
for(int j=0;j<n;j++)
{
cin>>a[i][j];
}
}
cout<<"Enter the 2nd matrix";
for(int i=0;i<n;i++)
{
for(int j=0;j<n;j++)
{
cin>>b[i][j];
}
}
int *ad,*bd,*cd;
int size;
size=n*n*sizeof(int);
cudaMalloc(&ad,size);
cudaMalloc(&bd,size);
cudaMalloc(&cd,size);
cudaMemcpy(ad,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(bd,b,size,cudaMemcpyHostToDevice);
cudaEvent_t start,end;
dim3 grid(n,n,n);
dim3 block(1,1,1);
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
add <<<grid,size>>>(ad,bd,cd,n);
cudaEventRecord(end);
float time=0;
cudaEventElapsedTime(&time,start,end);
cudaMemcpy(c,cd,size,cudaMemcpyDeviceToHost);
for(int i=0;i<n;i++)
{
for(int j=0;j<n;j++)
{
cout<<c[i][j]<<" ";
}
cout<<endl;
}
cout<<"The time required is"<<time<<endl;
}
|
3,659 | #include <stdio.h>
#include <cuda.h>
#include <iostream>
#include <cooperative_groups.h>
#define TYPE int
using namespace cooperative_groups;
__global__ void my_kernel(int* a){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
a[tid]=0;
}
int main(int argc, char **argv){
int dev = 1;
int numBlocksPerSm = 0;
int numThreads = 1024;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
std::cout << "BS SM BpSM TB TTHREADS \"SMs\""<<std::endl;
for (int i = 32; i < 1025; i +=8){
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocksPerSm, my_kernel, i, 1024);
std::cout << i << ": ";
std::cout << deviceProp.multiProcessorCount <<" * "<< numBlocksPerSm <<" = " << deviceProp.multiProcessorCount * numBlocksPerSm;
std::cout << " " << deviceProp.multiProcessorCount * numBlocksPerSm * i;
std::cout << " " << (deviceProp.multiProcessorCount * numBlocksPerSm * i)/1024 << std::endl;
//std::cout << i << "," << deviceProp.multiProcessorCount * numBlocksPerSm * i << std::endl;
}
}
|
3,660 | // filename: gax.cu
// a simple CUDA kernel to add two vectors
extern "C" // ensure function name to be exactly "gax"
{
__global__ void gax(const int lengthC, const double *a, const double *b, double *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthC)
{
c[i] = a[0]*b[i]; // REMEMBER ZERO INDEXING IN C LANGUAGE!!
}
}
} |
3,661 | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <utility>
#include <iostream>
#include <bitset>
#include <math.h>
#include <time.h>
#include <chrono>
#include <cuda.h>
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*
* See cuda.h for error code descriptions.
*/
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 1024
#endif
#ifndef ELAPSED_TIME
#define ELAPSED_TIME 0
#endif
#ifndef EXP_BITS_SIZE
#define EXP_BITS_SIZE 10
#endif
void print(int* h_data, int n) {
std::cout << "\n";
for (int i = 0; i < n; i++) {
std::cout << h_data[i] << " ";
}
std::cout << "\n";
}
void cudaTest(cudaError_t error) {
if (error != cudaSuccess) {
printf("cuda returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
}
template<int BITS_NUMBER = 64>
__global__ void radix_sort(int *d_vec, int *d_aux, int *d_seg,
int num_segments) {
int bx = blockIdx.x;
int tx = threadIdx.x;
if (bx * blockDim.x + tx < num_segments) {
int begin = d_seg[bx * blockDim.x + tx];
int end = d_seg[bx * blockDim.x + tx + 1];
int i, exp = 0;
for (exp = 0; exp < BITS_NUMBER; exp++) {
int one = 0;
int zero = 0;
for (i = begin; i < end; i++) {
int x = (d_vec[i] >> exp) & 1;
one += x;
zero += (1 - x);
}
one += zero;
for (i = end - 1; i >= begin; i--) {
int x = (d_vec[i] >> exp) & 1;
int index = begin + x * (one - 1) + (1 - x) * (zero - 1);
d_aux[index] = d_vec[i];
one -= x;
zero -= (1 - x);
}
for (i = begin; i < end; i++)
d_vec[i] = d_aux[i];
}
}
}
int main(int argc, char **argv) {
int num_of_segments;
int num_of_elements;
int i;
scanf("%d", &num_of_segments);
int mem_size_seg = sizeof(int) * (num_of_segments + 1);
int *h_seg = (int *) malloc(mem_size_seg);
for (i = 0; i < num_of_segments + 1; i++)
scanf("%d", &h_seg[i]);
scanf("%d", &num_of_elements);
int mem_size_vec = sizeof(int) * num_of_elements;
int *h_vec = (int *) malloc(mem_size_vec);
for (i = 0; i < num_of_elements; i++)
scanf("%d", &h_vec[i]);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Allocate device memory
int *d_seg, *d_vec, *d_aux;
cudaTest(cudaMalloc((void **) &d_seg, mem_size_seg));
cudaTest(cudaMalloc((void **) &d_vec, mem_size_vec));
cudaTest(cudaMalloc((void **) &d_aux, mem_size_vec));
cudaTest(cudaMemcpy(d_seg, h_seg, mem_size_seg, cudaMemcpyHostToDevice));
cudaTest(cudaMemcpy(d_vec, h_vec, mem_size_vec, cudaMemcpyHostToDevice));
int blocksize = BLOCK_SIZE;
dim3 threads(blocksize, 1);
dim3 grid((num_of_segments - 1) / blocksize + 1, 1);
cudaEventRecord(start);
radix_sort<EXP_BITS_SIZE> <<<grid, threads>>>(d_vec, d_aux, d_seg,
num_of_segments);
cudaEventRecord(stop);
//cudaDeviceSynchronize();
//cudaTest(cudaPeekAtLastError());
cudaTest(cudaMemcpy(h_seg, d_seg, mem_size_seg, cudaMemcpyDeviceToHost));
cudaTest(cudaMemcpy(h_vec, d_vec, mem_size_vec, cudaMemcpyDeviceToHost));
if (ELAPSED_TIME == 1) {
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << "\n";
} else
print(h_vec, num_of_elements);
free(h_seg);
free(h_vec);
cudaFree(d_seg);
cudaFree(d_vec);
cudaFree(d_aux);
//cudaDeviceReset();
return 0;
}
/**
* // cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
*/
/*
printf("thread=%d | aux=%d %d %d %d\n", bx * blockDim.x + tx, d_aux[begin], d_aux[begin + 1], d_aux[begin + 2], d_aux[begin + 3]);
int devID = 0;
cudaDeviceProp deviceProp;
cudaTest(cudaGetDeviceProperties(&deviceProp, devID));
unsigned int multiprocessorNumber = deviceProp.multiProcessorCount;
unsigned int sharedMemoryTotal = deviceProp.sharedMemPerBlock/(sizeof(int));
std::cout << "multiprocessorNumber: " << multiprocessorNumber << "\n";
std::cout << "sharedMemoryTotal: " << sharedMemoryTotal << "\n";
std::cout << "numberOfSegmentsPerBlock: " << sharedMemoryTotal << "\n";
*/
|
3,662 | #define MAX_BLOCKS 65535
#define MAX_THREADS 512
#include <iostream>
using namespace std;
/*
__global__ void harmonic_sum_kernel(float *d_idata,int gulp_index, int size, int stretch_factor)
{
//float* d_idata_float = (float*)d_idata;
int Index = blockIdx.x * blockDim.x + threadIdx.x;
if(Index<size/stretch_factor)
{
for(int i = 0;i<stretch_factor;i++)
{
d_idata[gulp_index+stretch_factor*Index+i] =
d_idata[gulp_index+stretch_factor*Index+i]
+d_idata[gulp_index+Index];
}
}
return;
}
__global__ void harmonic_sum_kernel_16(float *d_idata, float *d_odata,int gulp_index, int size)
{
//float* d_idata_float = (float*)d_idata;
int Index = blockIdx.x * blockDim.x + threadIdx.x;
if(Index<size)
{
d_odata[gulp_index+Index] =
d_idata[1*(gulp_index+Index)/2]
+d_idata[gulp_index+Index]
+d_idata[1*(gulp_index+Index)/4]
+d_idata[3*(gulp_index+Index)/4]
+d_idata[1*(gulp_index+Index)/8]
+d_idata[3*(gulp_index+Index)/8]
+d_idata[5*(gulp_index+Index)/8]
+d_idata[7*(gulp_index+Index)/8]
+d_idata[(gulp_index+Index)/16]
+d_idata[3*(gulp_index+Index)/16]
+d_idata[5*(gulp_index+Index)/16]
+d_idata[7*(gulp_index+Index)/16]
+d_idata[9*(gulp_index+Index)/16]
+d_idata[11*(gulp_index+Index)/16]
+d_idata[13*(gulp_index+Index)/16]
+d_idata[15*(gulp_index+Index)/16];
}
return;
}
__global__ void harmonic_sum_kernel_8(float *d_idata, float *d_odata,int gulp_index, int size)
{
//float* d_idata_float = (float*)d_idata;
int Index = blockIdx.x * blockDim.x + threadIdx.x;
if(Index<size)
{
d_odata[gulp_index+Index] = d_idata[(gulp_index+Index)/8]
+d_idata[2*(gulp_index+Index)/8]
+d_idata[3*(gulp_index+Index)/8]
+d_idata[4*(gulp_index+Index)/8]
+d_idata[5*(gulp_index+Index)/8]
+d_idata[6*(gulp_index+Index)/8]
+d_idata[7*(gulp_index+Index)/8]
+d_idata[gulp_index+Index];
}
return;
}
__global__ void harmonic_sum_kernel_4(float *d_idata, float *d_odata,int gulp_index, int size)
{
//float* d_idata_float = (float*)d_idata;
int Index = blockIdx.x * blockDim.x + threadIdx.x;
if(Index<size)
{
d_odata[gulp_index+Index] = d_idata[(gulp_index+Index)/4]
+d_idata[2*(gulp_index+Index)/4]
+d_idata[3*(gulp_index+Index)/4]
+d_idata[gulp_index+Index];
}
return;
}
__global__ void harmonic_sum_kernel_2(float *d_idata, float *d_odata,int gulp_index, int size)
{
//float* d_idata_float = (float*)d_idata;
int Index = blockIdx.x * blockDim.x + threadIdx.x;
if(Index<size)
{
d_odata[gulp_index+Index] = d_idata[(gulp_index+Index)/2]
+d_idata[gulp_index+Index];
}
return;
}
void call_harmonic_sum_kernel_generic(float *d_idata, float *d_odata, int gulp_index, int size, int harmonic)
{
harmonic_sum_kernel_generic(d_idata, d_odata, gulp_index, size, harmonic);
}
*/
__global__ void harmonic_sum_kernel_generic(float *d_idata, float *d_odata,int gulp_index, int size,int harmonic)
{
//float* d_idata_float = (float*)d_idata;
int Index = blockIdx.x * blockDim.x + threadIdx.x;
if(Index<size)
{
d_odata[gulp_index+Index] = d_idata[gulp_index+Index];
for(int i = 1; i < harmonic; i++)
{
d_odata[gulp_index+Index] += d_idata[(i*(gulp_index+Index))/harmonic];
}
// NOTE ERROR HERE
d_odata[gulp_index+Index] = d_odata[gulp_index+Index]/sqrt((float)harmonic); // can use *rsqrt to optimise further
}
return;
}
void GPU_harmonic_sum(float* d_input_array, float* d_output_array, int original_size, int harmonic)
{
int gulps;
int gulp_counter;
int gulp_index = 0;
int gulp_size;
gulps = original_size/(MAX_BLOCKS*MAX_THREADS)+1;
for(gulp_counter = 0; gulp_counter<gulps; gulp_counter++)
{
if(gulp_counter<gulps-1)
{
gulp_size = MAX_BLOCKS*MAX_THREADS;
}
else
{
gulp_size = original_size - gulp_counter*MAX_BLOCKS*MAX_THREADS;
}
harmonic_sum_kernel_generic<<<MAX_BLOCKS,MAX_THREADS>>>(d_input_array,d_output_array,gulp_index,gulp_size,harmonic);
gulp_index = gulp_index + MAX_BLOCKS*MAX_THREADS;
}
return;
}
|
3,663 | #include <thrust/device_vector.h>
#include <thrust/count.h>
#include <thrust/copy.h>
struct is_odd
{
__host__ __device__
bool operator()(int x)
{
return (x%2) == 1;
}
};
int main(void)
{
thrust::device_vector<int> data(8);
data[0] = 6;
data[1] = 3;
data[2] = 7;
data[3] = 5;
data[4] = 9;
data[5] = 0;
data[6] = 8;
data[7] = 1;
int N = thrust::count_if(data.begin(), data.end(), is_odd());
std::cout<<"counted"<<N<<"odd values"<<std::endl;
thrust::device_vector<int> odds(N);
thrust::copy_if(data.begin(), data.end(), odds.begin(), is_odd());
for ( int i = 0; i < odds.size(); i++)
std::cout<<odds[i]<<std::endl;
return 0;
}
|
3,664 | // Checks that cuda compilation does the right thing when passed
// -fcuda-flush-denormals-to-zero. This should be translated to
// -fdenormal-fp-math-f32=preserve-sign
// RUN: %clang -no-canonical-prefixes -### -target x86_64-linux-gnu -c -march=haswell --cuda-gpu-arch=sm_20 -fcuda-flush-denormals-to-zero -nocudainc -nocudalib %s 2>&1 | FileCheck -check-prefix=FTZ %s
// RUN: %clang -no-canonical-prefixes -### -target x86_64-linux-gnu -c -march=haswell --cuda-gpu-arch=sm_20 -fno-cuda-flush-denormals-to-zero -nocudainc -nocudalib %s 2>&1 | FileCheck -check-prefix=NOFTZ %s
// RUN: %clang -no-canonical-prefixes -### -target x86_64-linux-gnu -c -march=haswell --cuda-gpu-arch=sm_70 -fcuda-flush-denormals-to-zero -nocudainc -nocudalib %s 2>&1 | FileCheck -check-prefix=FTZ %s
// RUN: %clang -no-canonical-prefixes -### -target x86_64-linux-gnu -c -march=haswell --cuda-gpu-arch=sm_70 -fno-cuda-flush-denormals-to-zero -nocudainc -nocudalib %s 2>&1 | FileCheck -check-prefix=NOFTZ %s
// Test explicit argument.
// RUN: %clang -no-canonical-prefixes -### -target x86_64-linux-gnu -c -march=haswell --cuda-gpu-arch=gfx803 -fcuda-flush-denormals-to-zero -nocudainc -nogpulib %s 2>&1 | FileCheck -check-prefix=FTZ %s
// RUN: %clang -no-canonical-prefixes -### -target x86_64-linux-gnu -c -march=haswell --cuda-gpu-arch=gfx803 -fno-cuda-flush-denormals-to-zero -nocudainc -nogpulib %s 2>&1 | FileCheck -check-prefix=NOFTZ %s
// RUN: %clang -x hip -no-canonical-prefixes -### -target x86_64-linux-gnu -c -march=haswell --cuda-gpu-arch=gfx900 -fcuda-flush-denormals-to-zero -nocudainc -nogpulib %s 2>&1 | FileCheck -check-prefix=FTZ %s
// RUN: %clang -x hip -no-canonical-prefixes -### -target x86_64-linux-gnu -c -march=haswell --cuda-gpu-arch=gfx900 -fno-cuda-flush-denormals-to-zero -nocudainc -nogpulib %s 2>&1 | FileCheck -check-prefix=NOFTZ %s
// Test the default changing with no argument based on the subtarget.
// RUN: %clang -x hip -no-canonical-prefixes -### -target x86_64-linux-gnu -c -march=haswell --cuda-gpu-arch=gfx803 -nocudainc -nogpulib %s 2>&1 | FileCheck -check-prefix=FTZ %s
// RUN: %clang -x hip -no-canonical-prefixes -### -target x86_64-linux-gnu -c -march=haswell --cuda-gpu-arch=gfx900 -nocudainc -nogpulib %s 2>&1 | FileCheck -check-prefix=NOFTZ %s
// CPUFTZ-NOT: -fdenormal-fp-math
// FTZ-NOT: -fdenormal-fp-math-f32=
// FTZ: "-fdenormal-fp-math-f32=preserve-sign,preserve-sign"
// The default of ieee is omitted
// NOFTZ-NOT: "-fdenormal-fp-math"
// NOFTZ-NOT: "-fdenormal-fp-math-f32"
|
3,665 | /*
* a simple test of the scan kernel.
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void cudaScan(float* out, float *in, int size);
void startClock(char*);
void stopClock(char*);
void printClock(char*);
int main(int argc, char** argv) {
if (argc < 2) {
printf("Usage: %s size-of-array\n",argv[0]);
exit(1);
}
int size = atoi(argv[1]);
cudaDeviceProp props;
cudaGetDeviceProperties (&props,0);
if (size > props.maxThreadsPerBlock) {
fprintf(stderr,"At most %d elements for small version\n",
props.maxThreadsPerBlock);
exit(1);
}
printf("size = %d\n",size);
/* host based data */
float *h_in;
float *h_out;
/* device based data */
float *d_in;
float *d_out;
h_in = (float*) malloc(size*sizeof(float));
h_out =(float*) malloc(size*sizeof(float));
cudaMalloc(&d_in,size*sizeof(float));
cudaMalloc(&d_out,size*sizeof(float));
for (int i = 0; i < size; i++) {
h_in[i] = 1.0;
}
startClock("copy data to device");
cudaMemcpy(d_in,h_in,size*sizeof(float),cudaMemcpyHostToDevice);
stopClock("copy data to device");
startClock("compute");
cudaScan<<<1,size,2*size*sizeof(float)>>>(d_out,d_in,size);
cudaThreadSynchronize();
stopClock("compute");
startClock("copy data from device");
cudaMemcpy(h_out,d_out,size*sizeof(float),cudaMemcpyDeviceToHost);
stopClock("copy data from device");
float sum = 0.0f;
for (int i = 0; i < size; i++) {
sum += h_in[i];
printf("%d %f -> %f (%f)\n",i,h_in[i],h_out[i],sum);
}
free(h_in);
free(h_out);
cudaFree(d_in);
cudaFree(d_out);
printClock("copy data to device");
printClock("compute");
printClock("copy data from device");
}
|
3,666 | #include "includes.h"
__global__ void cuConvert8uC3To32fC4Kernel(const unsigned char *src, size_t src_pitch, float4* dst, size_t dst_stride, float mul_constant, float add_constant, int width, int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
int src_c = y*src_pitch + x*3;
int dst_c = y*dst_stride + x;
if (x<width && y<height)
{
dst[dst_c] = make_float4(src[src_c]/255.0f, src[src_c+1]/255.0f, src[src_c+2]/255.0f, 1.0f);// * mul_constant + add_constant;
}
} |
3,667 | //
// Created by daniel on 10/23/20.
//
#include "brdf.cuh"
|
3,668 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
struct Lock {
int *mutex;
Lock( void ) {
int state = 0;
cudaMalloc( (void**)& mutex, sizeof(int) );
cudaMemcpy( mutex, &state, sizeof(int), cudaMemcpyHostToDevice );
}
~Lock( void ) {
cudaFree( mutex );
}
__device__ void lock( void ) {
while( atomicCAS( mutex, 0, 1 ) != 0 );
}
__device__ void unlock( void ) {
atomicExch( mutex, 0 );
}
};
__global__ void blockCounterUnlocked(int *nblocks) {
if (threadIdx.x == 0) {
*nblocks = *nblocks + 1;
}
}
__global__ void blockCounterLocked(Lock lock, int *nblocks) {
if (threadIdx.x == 0) {
lock.lock();
*nblocks = *nblocks + 1;
lock.unlock();
}
}
int main(){
int nblocks, *d_nblocks;
Lock lock;
cudaMalloc((void**) &d_nblocks, sizeof(int));
// blockCounterUnlocked
nblocks = 0;
cudaMemcpy(d_nblocks, &nblocks, sizeof(int), cudaMemcpyHostToDevice);
blockCounterUnlocked<<<512,1024>>>(d_nblocks);
cudaMemcpy(&nblocks, d_nblocks, sizeof(int), cudaMemcpyDeviceToHost);
printf("blockCountUnlocked counted %d blocks\n", nblocks);
// blockCounterLocked
nblocks = 0;
cudaMemcpy(d_nblocks, &nblocks, sizeof(int), cudaMemcpyHostToDevice);
blockCounterLocked<<<512,1024>>>(lock, d_nblocks);
cudaMemcpy(&nblocks, d_nblocks, sizeof(int), cudaMemcpyDeviceToHost);
printf("blockCountLocked counted %d blocks\n", nblocks);
cudaFree(d_nblocks);
}
|
3,669 | // MIT License
//
// Copyright (c) 2019 Miikka Väisälä
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#include <cstdio>
#include <cmath>
#define DEBUG 0
#define PI 3.14159265
void
happy_face(float* domain, int idx, float xx, float yy, float zz, float size,
float radius1, float radius2)
{
float rr = sqrt(xx*xx + yy*yy + zz*zz);
float theta = acos(xx/rr) * 180.0/PI;
float phi = atan2(yy, zz) * 180.0/PI + 45.0;
domain[idx] = 1.0;
float rmax = (size/2.0);
rr = rr/rmax;
if (rr > radius1) {
domain[idx] = 0.0;
} else if (rr < radius2) {
domain[idx] = 1.0;
} else {
if (theta > 40.0 && theta < 80.0) {
if ((phi > 0.0 && phi < 40.0) || (phi > 50.0 && phi < 90.0)) {
domain[idx] = 10.0;
} else {
domain[idx] = 1.0;
}
} else if ((theta > 120.0 && theta < 150.0)) {
if (phi > 0.0 && phi < 90.0) {
domain[idx] = 5.0;
} else {
domain[idx] = 1.0;
}
} else {
domain[idx] = 1.0;
}
}
#if DEBUG == 1
if (rr >= radius2 && rr <= radius1)
printf("SMILE %.3e %.3e %.3e, %1.1f %1.1f, %4.1f %4.1f %4.1f, %1.1f \n",
xx, yy, zz, radius1, radius2, rr, theta, phi, domain[idx]);
#endif
}
void
coordinate(int* idx, float* xx, float* yy, float* zz, float* size,
int i, int j, int k, int NX, float dx)
{
*size = float(NX)*dx;
float origin = *size/2.0;
*xx = float(i)*dx - origin;
*yy = float(j)*dx - origin;
*zz = float(k)*dx - origin;
*idx = i + j*NX + k*NX*NX;
}
void
init_domain(float* domain, float* domain_buffer, int NX, float dx,
float radius1, float radius2)
{
int idx;
float xx, yy, zz, size;
//Make the Happy Face
for (int k=0; k < NX; k++ ) {
for (int j=0; j < NX; j++ ) {
for (int i=0; i < NX; i++ ) {
coordinate(&idx, &xx, &yy, &zz, &size, i, j, k, NX, dx);
happy_face(domain, idx, xx, yy, zz, size, radius1, radius2);
domain_buffer[i + j*NX + k*NX*NX] = 0.0;
}
}
printf("...%.1f percent", 100.0*float(NX + NX*NX + k*NX*NX)/float(NX*NX*NX));
}
printf("\n");
}
void
write_map(float* h_image, int NX, size_t image_size, float angle)
{
const size_t n = NX*NX;
const char* fname = "happy_face";
char cstep[10];
char filename[80] = "\0";
sprintf(cstep, "%d", int(angle));
strcat(filename, fname);
strcat(filename, "_");
strcat(filename, cstep);
strcat(filename, ".map");
printf("Savefile %s \n", filename);
FILE* save_ptr = fopen(filename,"wb");
float write_buf = (float) angle;
fwrite(&write_buf, sizeof(float), 1, save_ptr);
//Map data
for (size_t i = 0; i < n; ++i) {
const float point_val = h_image[i];
float write_buf = (float) point_val;
fwrite(&write_buf, sizeof(float), 1, save_ptr);
}
printf(" \n ");
fclose(save_ptr);
}
//
//
//
//////////////////////////////////////////////////////
// ALL GPU RELATED PART BELLOW THIS POINT ======> //
// (Everything above is just auxuliary C code fluff)//
//////////////////////////////////////////////////////
//
//
//
// Swap pointers arround the recycle buffer
void
swap_pointers(float** aa, float** bb)
{
float* temp = *aa;
*aa = *bb;
*bb = temp;
}
//Tranfer values from the chosed cell. This completely lacks any intertolation,
//so it quite stupid. However, enough for the demo purposes for now. Will
//result in some noticeable graphical errors.
__device__ int
get_ind_source_stupid(const int ii, const int jj, const int kk, const int NX,
const float stride)
{
float radians = stride * PI/180.0;
float x_tgt = (float(jj)-float(NX)/2)*cos(radians)
- (float(kk)-float(NX)/2)*sin(radians);
float z_tgt = (float(jj)-float(NX)/2)*sin(radians)
+ (float(kk)-float(NX)/2)*cos(radians);
int jj_tgt = floor(x_tgt)+NX/2;
int kk_tgt = floor(z_tgt)+NX/2;
int ind_source;
if (jj_tgt >= 0 && jj_tgt < NX && kk_tgt >= 0 && kk_tgt < NX) {
ind_source = ii + jj_tgt*NX + kk_tgt*NX*NX;
} else {
ind_source = -1.0;
}
return ind_source;
}
// Here we rotate the sphere using a GPU. A buffer array is used, because
// otherwise there will be problems with sychronizing the result.
__global__ void
rotate(float* d_domain, float* d_domain_buffer, const int NX, const float dx,
const float stride)
{
int ii = threadIdx.x + blockIdx.x*blockDim.x;
int jj = threadIdx.y + blockIdx.y*blockDim.y;
int kk = threadIdx.z + blockIdx.z*blockDim.z;
int ind_target = ii + jj*NX + kk*NX*NX;
int ind_source;
ind_source = get_ind_source_stupid(ii, jj, kk, NX, stride);
if (ind_source >= 0) {
d_domain_buffer[ind_target] = d_domain[ind_source];
} else {
// Zero if trying o access outside of domain bounds.
d_domain_buffer[ind_target] = 0.0;
}
}
// Integrator for the column density. The for loop is needed bacause the depth
// axis of integrated domain is not allocated to a thread index
__device__ void
integrate_column_density(float* d_domain, float* d_image, const int ind_pix,
const int NX, const float dx)
{
//Assume that the "detector" is initially empty
d_image[ind_pix] = 0.0;
//Integrate in depth.
for (int kk = 0; kk<NX; kk++) {
int ind = ind_pix + kk*NX*NX;
d_image[ind_pix] += dx*d_domain[ind];
}
}
// CUDA kernel which organizes the map construction by allocating the correct
// indices and calling the integrator
__global__ void
make_map(float* d_domain, float* d_image, int NX, float dx)
{
int ii = threadIdx.x + blockIdx.x*blockDim.x;
int kk = threadIdx.y + blockIdx.y*blockDim.y;
int ind_pix = ii + kk*NX;
integrate_column_density(d_domain, d_image, ind_pix, NX, dx);
}
//////////////////////////////////////////////////////////////
// //
// DOWNLOAD FROM: https://github.com/miikkavaisala/perkeles //
// //
//////////////////////////////////////////////////////////////
// **BASICS OF GPU PROGRAMMING WITH CUDA**
// 1. Increasing demands of computation require exotic approaches. Graphics
// processing units (GPUs) are a popular tool for accelerating computation.
// --> A multi-GPU node can be really compact and affordable for its
// computing power; assuming you write GPU compatible code (a big issue
// for most users).
// 2. Popular uses for GPUs:
// * Graphics (e.g. almost all computer games.)
// * Blockchain / Cryptocurrencies (e.g. Dogecoin)
// * Machine learning (E.g. with Tensorflow)
// * Radiative transfer. (E.g. this demo here, SOC)
// * Astronomical data reduction/analysis (??? glCLEAN, FADRA, FATBOY)
// * Fluid dynamics / MHD (E.g. Astaroth, Fargo3D, Gamer-2)
// 3. GPUs are made for computing a humongous number of floating point
// operations in parallel.
// --> Good tool when performance is bound by computation. Bad tool when
// bound by memory. Some uses are more equal than others.
// 4. Known API:
// * CUDA, by Nvidia (most popular, used here)
// * OpenCL, by Khronos Group (open standard)
// * OpenACC, by OpenACC.org (directive based, like OpenMP)
// 5. Programming GPUs is not that difficult, if you are writing C code anyway.
// 6. Challenges:
// * Porting old things takes time - exitisting tool limited.
// * Debugging is challenging.
// * For demanding optimizations memory require special attention.
// CODE TIME!
// The main() is run on the HOST, from which the CUDA kernels are called.
int
main()
{
//Define pointers for HOST and DEVICE arrays.
float *h_domain, *d_domain;
float *h_domain_buffer, *d_domain_buffer;
float *h_image, *d_image;
//Domain settings
float dx = 1.0, radius1 = 0.9, radius2 = 0.6;
int NX = 512;
//Rotation settings
float max_rot = 360.0; //in deg
float stride = 1.0; //deg
//Determine the required memory
size_t domain_size = sizeof(float) * NX*NX*NX;
size_t image_size = sizeof(float) * NX*NX;
//Integrator thread and block settings
int RAD_NumThreads = 16; //MAXIMUM 32 (32^2 = 1024)
int RAD_Blocks = NX/RAD_NumThreads;
dim3 RAD_threadsPerBlock(RAD_NumThreads, RAD_NumThreads);
dim3 RAD_numBlocks(RAD_Blocks, RAD_Blocks);
//Rotatator thread and block settings
int ROT_NumThreads = 8; //MAXIMUM 8 (32^2 = 1024)
int ROT_Blocks = NX/ROT_NumThreads;
dim3 ROT_threadsPerBlock(ROT_NumThreads, ROT_NumThreads, ROT_NumThreads);
dim3 ROT_numBlocks(ROT_Blocks, ROT_Blocks, ROT_Blocks);
//To prevent mismatch with threads
if (NX % RAD_NumThreads != 0) {
printf("NX should div by RAD_NumThreads! Now %i / %i = %f \n",
NX, RAD_NumThreads, float(NX) / float(RAD_NumThreads));
return 666;
}
if (NX % ROT_NumThreads != 0) {
printf("NX should div by ROT_NumThreads! Now %i / %i = %f \n",
NX, ROT_NumThreads, float(NX) / float(ROT_NumThreads));
return 666;
}
//Allocate HOST memory
h_domain = (float*)malloc(domain_size);
h_domain_buffer = (float*)malloc(domain_size);
h_image = (float*)malloc(image_size);
// Init density field on HOST
// Make the Happy Face
printf("Making Happy Face... \n");
init_domain(h_domain, h_domain_buffer, NX, dx, radius1, radius2);
printf("Happy day! \n");
//Allocate DEVICE memory
cudaMalloc(&d_domain, domain_size);
cudaMalloc(&d_domain_buffer, domain_size);
cudaMalloc(&d_image, image_size);
//Transfer domain (happy face) to HOST
cudaMemcpy(d_domain, h_domain, domain_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_domain_buffer, h_domain_buffer, domain_size, cudaMemcpyHostToDevice);
float angle = 0.0;
while (angle <= max_rot) {
//Calculate column density
make_map<<<RAD_numBlocks, RAD_threadsPerBlock>>>(d_domain, d_image, NX, dx);
cudaDeviceSynchronize();
//Send image to host memory
cudaMemcpy(h_image, d_image, image_size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
//Save result
write_map(h_image, NX, image_size, angle);
//Make rotation
rotate<<<ROT_numBlocks, ROT_threadsPerBlock>>>(d_domain, d_domain_buffer,
NX, dx, stride);
cudaDeviceSynchronize();
//Swat buffer pointer.
swap_pointers(&d_domain, &d_domain_buffer);
angle += stride;
}
//Free allocated gpu DEVICE memory
cudaFree(d_domain);
cudaFree(d_domain_buffer);
cudaFree(d_image );
//Free HOST memory
free(h_domain);
free(h_image );
return 0;
}
|
3,670 | // Based on: https://gist.github.com/1392067
#include <cuda.h>
#include <stdio.h>
#define NBLOCKS 4
#define NTHREADS 4
#define N (NTHREADS * NBLOCKS)
#define NBYTES (N * sizeof(unsigned))
#define SWAP(a, b) { unsigned tmp = (a); (a) = (b); (b) = tmp; }
__global__ void bitonic_sort_step(unsigned* values, unsigned j, unsigned k) {
unsigned tid, ixj;
tid = threadIdx.x + blockDim.x * blockIdx.x;
/* The threads with the lowest ids sort the array. */
ixj = tid ^ j;
if (ixj > tid) {
if (tid & k) {
/* Sort descending */
if (values[tid] < values[ixj]) {
/* exchange(tid,ixj); */
SWAP(values[tid], values[ixj]);
}
} else {
/* Sort ascending */
if (values[tid] > values[ixj]) {
/* exchange(tid,ixj); */
SWAP(values[tid], values[ixj]);
}
}
}
}
int main(int argc, char** argv) {
unsigned host[N] = {
5, 19, 3, 15, 2, 0, 1, 6, 8, 7, 11, 10, 13, 12, 4, 4
};
unsigned* device;
unsigned i, j, k;
puts("Before sort:");
for (i = 0; i != N; ++i) {
printf("%u ", host[i]);
}
puts("");
cudaMalloc(&device, NBYTES);
cudaMemcpy(device, host, NBYTES, cudaMemcpyHostToDevice);
/* Major step */
for (k = 2; k <= N; k <<= 1) {
/* Minor step */
for (j = k >> 1; j > 0; j >>= 1) {
bitonic_sort_step<<<NBLOCKS, NTHREADS>>>(device, j, k);
}
}
cudaMemcpy(host, device, NBYTES, cudaMemcpyDeviceToHost);
cudaFree(device);
puts("After sort:");
for (i = 0; i != N; ++i) {
printf("%u ", host[i]);
}
puts("");
return 0;
}
|
3,671 | /*
Ye Wang
CPEG655
lab2 problem 1.b
*/
#include <stdio.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <sys/time.h>
__global__ void
matrixMul_1b(int BLOCK_SIZE, float *C, float *A, float *B, int N);
void mm(float * C, float * A, float * B, int N);
float GetRand(int seed);
void randomInit(float *data, int size, float val);
void constantInit(float *data, int size, float val);
int matrixMultiply(int block_size, dim3 &dimsA, dim3 &dimsB);
int main()
{
int block_size = 8;//16 or 8
for(int i=0;i<2;i++,block_size*=2){
int N=512;
for(int j=0;j<2;j++,N*=2){
dim3 dimsA(N,N);
dim3 dimsB(N,N);
matrixMultiply(block_size, dimsA, dimsB);
}
}
return 0;
}
__global__ void
matrixMul_1b(int BLOCK_SIZE, float *C, float *A, float *B, int N)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x+bx*BLOCK_SIZE;
int ty = threadIdx.y+by*BLOCK_SIZE;
float Csub = 0;
for (int i= 0; i < N; i++)
{
Csub+=A[ty*N+i]*B[tx+N*i];
}
C[N * ty + tx] = Csub;
}
void mm(float * C, float * A, float * B, int N)
{
int i,j,k;
float sum=0;
for(j=0;j<N;j++)
for(i=0;i<N;i++){
C[i*N+j]=0;
sum=0;
for(k=0;k<N;k++)
sum+=A[i*N+k]*B[k*N+j];
C[i*N+j]=sum;
}
}
float GetRand(int seed)
{
struct timeval tv;
gettimeofday(&tv,NULL);
srand(tv.tv_usec%17+seed);
return((rand()% 1000) / 1.02);
}
void randomInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = GetRand(i);
}
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;//+i%2;
}
}
int matrixMultiply( int block_size, dim3 &dimsA, dim3 &dimsB)
{
printf("START: Block[%d,%d], Matrix[%d,%d]\n",block_size,block_size,dimsB.x,dimsA.y);
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
randomInit(h_A, size_A, 2.1f);
randomInit(h_B, size_B, 1.f);
unsigned int size_C = dimsB.x * dimsA.y;
// Allocate device memory
float *d_A, *d_B, *d_C;
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
float *test_C = (float *) malloc(mem_size_C);
constantInit(test_C, size_C, 0.f);
constantInit(h_C, size_C, 0.f);
cudaMalloc((void **) &d_A, mem_size_A);
cudaMalloc((void **) &d_B, mem_size_B);
cudaMalloc((void **) &d_C, mem_size_C);
cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
cudaMemcpy(d_C, h_C, mem_size_C, cudaMemcpyHostToDevice);
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsA.y/block_size, dimsB.x/block_size);
cudaDeviceSynchronize();//////////////////////****
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
cudaEventCreate(&start);
cudaEvent_t stop;
cudaEventCreate(&stop);
// Record the start event
cudaEventRecord(start, NULL);
// Execute th kernel
int nIter = 100;
for (int j = 0; j < nIter; j++)
{
matrixMul_1b<<< grid, threads >>>(block_size, d_C, d_A, d_B, dimsA.x);
}
// Record the stop event
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf( "REPORT:\n Performance= %.2f GFlop/s\n Time= %.3f msec\n Size= %.0f Ops\n WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy t_C,h_A,h_B,dimsA.x);esult from device to host
cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
//double eps = 1.e-6 ;
mm(test_C,h_A,h_B,dimsA.x);
int verify=1;
for (int i=0;i<mem_size_C/4;i++)
{
if(h_C[i]!=test_C[i]&&(fabs(h_C[i]-test_C[i])/test_C[i])>1E-6){
printf("Matrix[A:%d,B:%d,C:%d] C[%d]=%f, Expect= %f\n",mem_size_A,mem_size_B,mem_size_C,i,h_C[i],test_C[i]);
verify=0;
break;
}
}
free(h_A);
free(test_C);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaDeviceReset();
if (verify) {
printf("SUCCESS!\n\n");
return true;
}else{
printf("WRONG RESULT!\n\n");
return false;
}
}
|
3,672 | #include "includes.h"
extern "C"
__global__ void wavee(int* tab, unsigned int rowSize, unsigned int centerX, unsigned int centerY, float A, float lambda, float time, float fi, unsigned int N)
{
int index = threadIdx.x + blockDim.x * blockIdx.x;
int w = int(index/rowSize);
int h = index%rowSize;
if ( w*rowSize+h < N ) {
float dx = 0;
if(centerX > w) {
dx = centerX - w;
} else {
dx = w - centerX;
}
float dy = 0;
if(centerY > h) {
dy = centerY - h;
} else {
dy = h - centerY;
}
float distance = pow(dx,2) + pow(dy,2);
distance = sqrt(distance);
float pi = 3.1415f;
float v = 1.0f;
float T = lambda/v;
float ww = 2.0f*pi/T;
float k = 2.0f*pi/lambda;
float f = A * sin( ww*time - k*distance + fi );
float res = f * 127 + 127;
tab[index] = int(res);
}
} |
3,673 | // setup variables for calculation
__shared__ unsigned int iBAM;
#define ASK 1
#define MID 2
#define BID 3
#define TOLX 4
__device__ struct {
int vol[200];
int errmap[200];
} optout;
__global__ void myfunc(void)
{
int tid = threadIdx.x;
// going through each type, ASK, MID, and BID
for (unsigned int ii = 0; ii < 3; ii++)
{
__syncthreads();
switch(ii)
{
case 0:
if (tid == 0)
{
iBAM = ASK;
}
break;
case 1:
if (tid == 0)
{
iBAM = MID;
}
break;
case 2:
if (optout.vol[MID] > TOLX) // should always be true here
{
if (tid == 0)
{
iBAM = BID;
}
}
else
continue;
break;
} // end switch
__syncthreads();
if (tid == 0)
{
optout.vol[iBAM] = iBAM + 1.0f;
optout.errmap[iBAM] = iBAM + 11;
}
__syncthreads();
} // end for
} |
3,674 | #include "includes.h"
__global__ void InvertValuesKernel(float *input, float* outputs, int size)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x*blockIdx.x
+ threadIdx.x;
if(id < size)
{
outputs[id] = 1.00f - input[id];
}
} |
3,675 | #include <iostream>
#include <cmath>
#include <stdio.h>
#include <string.h>
__device__ __constant__ float D_H[ 3*3 ];
__device__ float norm(float val, int length) {
float mean = length/2;
float std = length/2;
return (val-mean)/std;
}
__device__ float unorm(float val, int length) {
float mean = length/2;
float std = length/2;
return val*std + mean;
}
__device__ void projectedCoord(int x, int y, int *xp, int *yp, int xlen, int ylen) {
//printf("%d, %d \n", x, y);
//NORMALIZE INPUT
float nx = norm(x,xlen);
float ny = norm(y,ylen);
//printf("%f, %f \n", nx, ny);
int sH = 3;
float w = 1; //Assume that the projection starts from y=1
float hx = nx*D_H[ sH*0+0] + ny*D_H[ sH*0+1 ] + w*D_H[ sH*0+2 ];
float hy = nx*D_H[ sH*1+0] + ny*D_H[ sH*1+1 ] + w*D_H[ sH*1+2 ];
float hw = nx*D_H[ sH*2+0] + ny*D_H[ sH*2+1 ] + w*D_H[ sH*2+2 ];
//printf("%f, %f, %f\n",D_H[ sH*0+0], D_H[ sH*0+1], D_H[ sH*0+2]);
//printf("%f, %f, %f\n",D_H[ sH*1+0], D_H[ sH*1+1], D_H[ sH*1+2]);
//printf("%f, %f, %f\n",D_H[ sH*2+0], D_H[ sH*2+1], D_H[ sH*2+2]);
//printf("%f %f %f \n", hx, hy, hw);
//Unormalize Output
*xp = unorm(hx/hw, xlen);
*yp = unorm(hy/hw, ylen);
//printf("%d, %d \n", *xp, *yp);
}
__device__ int im_idx(int r, int c, int width, int channels) {
return channels*(width*r+c);
}
__device__ bool val_rc(int r, int c, int width, int height) {
return r>=0 && r<height && c>=0 && c<width;
}
__global__ void proj_sub_tresh(unsigned char* img0, unsigned char* img1, unsigned char* out_img, int Width, int Height) {
const unsigned int c = ( (blockDim.y * blockIdx.y) + threadIdx.y );
const unsigned int r = ( (blockDim.x * blockIdx.x) + threadIdx.x );
const unsigned int treshold = 60;
const unsigned int ch = 3; //Channel
const unsigned int s = sizeof(unsigned char);
const unsigned int W = Width;
int o_img_idx;
int i_img_idx;
unsigned int subval, subval0, subval1, subval2;
int rp;
int cp;
//Projection, Background Sub, Treshold
// Not sure why I wrote the matrix in this manner where the r column is reversed using x,y notation
// Need to look deeper into and be fixed
projectedCoord(c,r, &cp, &rp, Width, Height);
//printf("%d, %d \n", rp, cp);
if ( val_rc(rp,cp, Width, Height) && val_rc(r,c, Width, Height) ) {
o_img_idx = im_idx(r,c, Width,ch);
i_img_idx = im_idx(rp,cp, Width,ch);
subval0 = abs( img1[ o_img_idx+0 ] - img0[ i_img_idx+0 ] );
subval1 = abs( img1[ o_img_idx+1 ] - img0[ i_img_idx+1 ] );
subval2 = abs( img1[ o_img_idx+2 ] - img0[ i_img_idx+2 ] );
subval = .21265*subval0 + .7152*subval1 + .0722*subval2;
if (subval > treshold) {
out_img[ o_img_idx+0 ] = subval;
out_img[ o_img_idx+1 ] = subval;
out_img[ o_img_idx+2 ] = subval;
}
//out_img[ o_img_idx+0 ] = img0[ i_img_idx+0];
//out_img[ o_img_idx+1 ] = img0[ i_img_idx+1];
//out_img[ o_img_idx+2 ] = img0[ i_img_idx+2];
}
}
|
3,676 | #include <stdio.h>
#include <assert.h>
#define N 2048 * 2048 // Number of elements in each vector
inline cudaError_t checkCuda(cudaError_t result) {
if (result != cudaSuccess) {
printf("Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
return result;
}
// Initialize memory
__global__ void initVectors(int * a, int * b, int * c) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(; i < N; i += stride) {
a[i] = 2;
b[i] = 1;
c[i] = 0;
}
}
/*
* Optimize this already-accelerated codebase. Work iteratively,
* and use nsys to support your work.
*
* Aim to profile `saxpy` (without modifying `N`) running under
* 20us.
*
* EDIT: I made it run under 77 us :)
*
* Some bugs have been placed in this codebase for your edification.
*/
__global__ void saxpy(int * a, int * b, int * c)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; i < N; i += stride)
c[i] = 2 * a[i] + b[i];
}
int main()
{
int *a, *b, *c;
int size = N * sizeof (int); // The total number of bytes per vector
int deviceId;
cudaDeviceProp props;
cudaGetDevice(&deviceId);
cudaGetDeviceProperties(&props, deviceId);
checkCuda(cudaMallocManaged(&a, size));
checkCuda(cudaMallocManaged(&b, size));
checkCuda(cudaMallocManaged(&c, size));
int threads_per_block = props.warpSize * 8;
int number_of_blocks = props.multiProcessorCount * 32; // using stride is better than arbitrary blocks
// prefetch to gpu
checkCuda(cudaMemPrefetchAsync(a, size, deviceId));
checkCuda(cudaMemPrefetchAsync(b, size, deviceId));
checkCuda(cudaMemPrefetchAsync(c, size, deviceId));
initVectors <<< number_of_blocks, threads_per_block >>>( a, b, c );
checkCuda(cudaGetLastError());
checkCuda(cudaDeviceSynchronize());
saxpy <<< number_of_blocks, threads_per_block >>> ( a, b, c );
checkCuda(cudaGetLastError());
checkCuda(cudaDeviceSynchronize());
// Print out the first and last 5 values of c for a quality check
for( int i = 0; i < 5; ++i )
printf("c[%d] = %d, ", i, c[i]);
printf ("\n");
for( int i = N-5; i < N; ++i )
printf("c[%d] = %d, ", i, c[i]);
printf ("\n");
cudaFree( a ); cudaFree( b ); cudaFree( c );
}
|
3,677 | #include <stdio.h>
#define START 32
#define END 126
#define NBR 68
__global__ void histo_kernel(unsigned char *buffer,long size, unsigned int *histo){
int dt = 32;
int i = threadIdx.x + blockIdx.x *blockDim.x;
int stride = blockDim.x *gridDim.x;
while(i<size){
/*
if (buffer[i] >= 32 && buffer[i] < 97)
atomicAdd(&(histo[buffer[i]-dt]),1);
if (buffer[i] >=97 && buffer[i] <= 122)
atomicAdd(&(histo[buffer[i]-dt-32]),1);
if (buffer[i] > 122 && buffer[i] <= 127 )
atomicAdd(&(histo[buffer[i]-dt -32 -26]),1);
*/
if (buffer[i] >= 32 && buffer[i] < 97)
{
atomicAdd(&(histo[buffer[i]-dt]),1);
}else if (buffer[i] >=97 && buffer[i] <= 122)
{ atomicAdd(&(histo[buffer[i]-dt-32]),1);
}
else{
atomicAdd(&(histo[buffer[i]-dt -32 -26]),1);
}
i+=stride;
}
}
int main(int argc, char *argv[]){
if(argc <= 2){
fprintf(stderr, "Arguments non valide");
return 1;
}
FILE *f_input;
FILE *f_output;
long lSize;
char *buffer;
f_input = fopen ( argv[1] , "r" );
f_output = fopen( argv[2],"w");
if( !f_input ) perror(argv[1]),exit(1);
fseek( f_input , 0L , SEEK_END);
lSize = ftell( f_input );
rewind( f_input );
printf("The size is : %li", lSize);
//buffer = calloc( 1, lSize+1 );
buffer =(char*) malloc(lSize);
if( !buffer ) fclose(f_input),fputs("memory alloc fails",stderr),exit(1);
if( 1!=fread( buffer , lSize, 1 , f_input) )
fclose(f_input),free(buffer),fputs("entire read fails",stderr),exit(1);
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start, 0);
unsigned char *dev_buffer;
unsigned int *dev_histo;
/*Allocate device memory*/
cudaMalloc( (void**)&dev_buffer, lSize);
cudaMemcpy( dev_buffer, buffer, lSize, cudaMemcpyHostToDevice );
cudaMalloc( (void**)&dev_histo, 256 * sizeof( long ));
cudaMemset( dev_histo, 0, 256 * sizeof( int ));
cudaDeviceProp prop;
cudaGetDeviceProperties( &prop, 0 );
int multiproc = prop.multiProcessorCount;
dim3 blocks(multiproc*2,1,1);
dim3 threads(NBR, 1, 1);
histo_kernel<<<blocks,threads>>>( dev_buffer, lSize, dev_histo );
unsigned int histo[NBR];
cudaMemcpy( histo, dev_histo,NBR * sizeof( int ),cudaMemcpyDeviceToHost);
int dt =32;
for(int i =0;i< 68;i++){
/*
if((i>=0 && i<= 31&& (i+dt != 42) && (i+dt != 36))|| (i>57 && i<=64) ){
printf("%c:%d\n",i+dt,histo[i]);
}
if(i>31 && i<= 57 ){
printf("%c:%d\n",i+dt+32,histo[i]);
}
if(i>64)
printf("%c:%d\n",i+dt+26,histo[i]);
*/
if((i>=0 && i<= 31&& (i+dt != 42) && (i+dt != 36))|| (i>57 && i<=64) ){
fprintf(f_output, "%c:%d\n",i+dt,histo[i]);
// printf("%c:%d\n",i+dt,histo[i]);
}else if (i>31 && i<= 57 ){
fprintf(f_output, "%c:%d\n",i+dt+32,histo[i]);
// printf("%c:%d\n",i+dt+32,histo[i]);
}else
fprintf(f_output, "%c:%d\n",i+dt+26,histo[i]);
// printf("%c:%d\n",i+dt+26,histo[i]);
}
cudaEventRecord( stop, 0 ) ;
cudaEventSynchronize( stop );
float elapsedTime;
cudaEventElapsedTime( &elapsedTime, start, stop );
printf( "Time to generate: %3.1f ms\n", elapsedTime );
/*Delete event use for to get running time*/
cudaEventDestroy( start ) ;
cudaEventDestroy( stop );
/*Free space*/
cudaFree( dev_histo );
cudaFree( dev_buffer );
fclose(f_input);
fclose(f_output);
free(buffer);
return 0;
}
|
3,678 | #include <stdio.h>
__global__ void square(float *d_out,float *d_in)
{
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f *f;
}
int main(int argc, char **argv)
{
const int ARRAY_SIZE = 96;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
float h_in[ARRAY_SIZE];
for(int i = 0; i < ARRAY_SIZE; ++i)
{
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
//declare GPU memory pointers
float *d_in;
float *d_out;
//allocate GPU memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out, ARRAY_BYTES);
//cuda memcpy to GPU
cudaMemcpy(d_in,h_in,ARRAY_BYTES,cudaMemcpyHostToDevice);
//kernel launch
square<<<1,ARRAY_SIZE>>> (d_out,d_in);
cudaMemcpy(h_out,d_out,ARRAY_BYTES,cudaMemcpyDeviceToHost);
for(int i =0 ; i < ARRAY_SIZE; ++i)
{
printf("%.2f \n",h_out[i]);
}
cudaFree(d_in);
cudaFree(d_out);
}
|
3,679 | #include <iostream>
#include <fstream>
#include <string>
#include <stdio.h>
#include <math.h>
#include <vector>
#include <time.h>
using namespace std;
__global__ void tryy(float *d_engrec,float *d_xrec,float *d_yrec, float *d_xx, float *d_yy, float *d_engg, float *d_inx, int blocks){
int is,il;
int count2;
int globalIndex= (blockIdx.x * blocks) + threadIdx.x;
is= d_inx[globalIndex];
il= d_inx[globalIndex+1];
count2=is;
for(int j=is;j<il;j++){
if((d_yy[j]<(d_yy[j+1]-1))||(d_yy[j]==d_yy[j+1]&& d_xx[j]<(d_xx[j+1]-2))){
d_xrec[count2]=d_xx[j];
d_yrec[count2]=d_yy[j];
d_engrec[count2]=d_engg[j];
count2++;
}
//================================================double
else if (((d_yy[j]== d_yy[j+1]) && (d_xx[j+1]== (d_xx[j]+1)) && ((d_xx[j+2]>d_xx[j+1]+1))) || ((((d_yy[j+2]>d_yy[j+1]+1)) && ( ((d_yy[j+1]+1== d_yy[j+1]) && (d_xx[j]== (d_xx[j+1]))) ))))
{ d_engrec[count2]= d_engg[j]+ d_engg[j+1];
if(d_xx[j]== d_xx[j+1] ){
if(d_engg[j]> d_engg[j+1]){
d_xrec[count2]=d_xx[j];
d_yrec[count2]=d_yy[j];
}
else{
d_xrec[count2]=d_xx[j+1];
d_yrec[count2]=d_yy[j+1];
}}
if(d_yy[j]== d_yy[j+1]){
if(d_engg[j]> d_engg[j+1]){
d_yrec[count2]=d_yy[j];
d_xrec[count2]=d_xx[j];
}
else{
d_yrec[count2]=d_yy[j+1];
d_xrec[count2]=d_xx[j+1];
}}
j++;
count2++;
}
///========================================triple event recombination
else if ((d_yy[j]==d_yy[j+2]&& d_yy[j+3]>(d_yy[j+2]+1))||( d_yy[j]==(d_yy[j+2]+1)&& d_xx[j]<(d_xx[j+3]+1)))
{
d_engrec[count2]= d_engg[j]+ d_engg[j+1]+ d_engg[j+2];
d_yrec[count2]=d_yy[j+2];
d_xrec[count2]=d_xx[j+2];
j++;
j++;
count2++;
}
///==========================================quad event recombination
else if(d_yy[j]== d_yy[j+1] && d_yy[j+2]== d_yy[j+3]/*&& y[o]<(y[o+4]-1)*/&&d_xx[j]==(d_xx[j+1]+1) ) {
d_engrec[count2]= d_engg[j]+ d_engg[j+1]+ d_engg[j+2]+ d_engg[j+3];
d_yrec[count2]=d_yy[j+2];
d_xrec[count2]=d_xx[j+2];
// if ((eng.at(o)>eng.at(o+1))&&(eng.at(o)>eng.at(o+2))&&(eng.at(o)>eng.at(o+3))){
// x_rec.push_back(x.at(o));
// y_rec.push_back(y.at(o));
// }
// if ((eng.at(o+1)>eng.at(o))&&(eng.at(o+1)>eng.at(o+2))&&(eng.at(o+1)>eng.at(o+3))){
// x_rec.push_back(x.at(o+1));
// y_rec.push_back(y.at(o+1));
// }
// if ((eng.at(o+2)>eng.at(o+1))&&(eng.at(o+2)>eng.at(o))&&(eng.at(o+2)>eng.at(o+3))){
// x_rec.push_back(x.at(o+2));
// y_rec.push_back(y.at(o+2));
// }
// if ((eng.at(o+3)>eng.at(o+1))&&(eng.at(o+3)>eng.at(o+2))&&(eng.at(o+3)>eng.at(o))){
// x_rec.push_back(x.at(o+3));
// y_rec.push_back(y.at(o+3));
// }
//cout << "quad"<< endl;
//cout << x_rec.at(o)<< y_rec.at(o)<<endl;
j++;
j++;
j++;
count2++;
}
}}
int main(){
ifstream file( "c12_siegen_19072017_01", ios::in );
string part1,part2;
string dd= "HT";
string dd2= "SF";
int num1, num2, num3;
int numb=0 ;
int subnumb=0 ;
int nframe=0;
int cou=0;
if( !file )
cerr << "Cant open " << endl;
while( file >> part1 )
{
if (part1 == dd){
numb++;
}
if (part1 == dd2){
nframe++;
}
}
//===========================================================================================================================
float frameIndexr[nframe+1];//x[numb], y[numb] , eng[numb],
float *x= new float[numb];
float *y= new float[numb];
float *eng= new float[numb];
frameIndexr[0]=0;
int cou2=1;
int rf=1000;
//cout<<"i am here "<< numb<<endl;
ifstream file2( "c12_siegen_19072017_01", ios::in );
while( file2 >> part2 >> num1 >> num2>> num3 )
{ if (cou2>rf)break;
if (part2 == dd){
x[cou]= num1;
y[cou]=num2;
eng[cou]=num3;
// cout<<eng[cou]<<endl;
cou++;
subnumb++;
}
if (part2 == dd2){
frameIndexr[cou2]=frameIndexr[cou2-1]+subnumb;
//cout<<frameIndexr[cou2]<<endl;
subnumb=0;
cou2++;
}
}
//===================================================================================
int sizeFrame=nframe+1;
//cout<<" "<<sizeFrame<<" "<< nframe<<endl;
//int x[numb],y[numb],eng[numb],frameIndex[sizeFrame];
// for (int i=0 ; i<numb ; i++){
// x[i]=xr[i];
// y[i]=yr[i];
// eng[i]=engr[i];
// }
// int count=0;
// for (int i2=1 ; i2<sizeFrame ; i2++){
// count=count+frameIndexr[i2-1];
// frameIndexr[i2]=count;
// //cout<<frameIndex[i2]<<endl;
// }
const int data_bytes= numb* sizeof(float); //the required memory
const int data_bytes_2= sizeFrame * sizeof(float);
///===========================Declaration===============================
// int h_engres[numb]; // CPU array for results
// int h_xres[numb];
// int h_yres[numb];
//cout<<"i am here "<<endl;
//=====================declaration of GPU
float *d_yin;
float *d_xin;
float *d_engin;
float *d_engres;
float *d_xres;
float *d_yres;
float *d_ind;
///=================== allocate GPU mem===============
cudaMalloc((void **) &d_engin, data_bytes);
cudaMalloc((void **) &d_engres, data_bytes);
cudaMalloc((void **) &d_xres, data_bytes);
cudaMalloc((void **) &d_yres, data_bytes);
cudaMalloc((void **) &d_xin, data_bytes);
cudaMalloc((void **) &d_yin, data_bytes);
cudaMalloc((void **) &d_ind, data_bytes_2);
///================== define number of blocks with constant 1024 threads per block===========
int nthreadsperblock=32; //number of threads per block
int nblock; //number of blocks
if(sizeFrame%nthreadsperblock == 0){
nblock=sizeFrame/nthreadsperblock;
}
else{nblock=(sizeFrame/nthreadsperblock)+1;}
//cout<< nblock << " "<< nthreadsperblock<<endl;
///===================== copy the data to the GPU=============
cudaMemcpy(d_xin, x, data_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_yin, y, data_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_engin, eng, data_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_ind,frameIndexr, data_bytes_2, cudaMemcpyHostToDevice);
clock_t tG0=clock();
tryy<<<nblock,nthreadsperblock>>>(d_engres,d_xres,d_yres,d_xin,d_yin,d_engin,d_ind,nthreadsperblock);
cudaMemcpy(eng,d_engres, data_bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(x,d_xres, data_bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(y,d_yres, data_bytes, cudaMemcpyDeviceToHost);
clock_t tGf=clock();
int single=0;
for (int i2=0 ; i2<numb ; i2++){
if(eng[i2]>0){
//cout<<eng[i2]<<" "<<x[i2]<<" "<<y[i2]<<endl;
single++;
}}
///=====================================================CPU=================================================================================================
//==========================================================================================================================================================
int frame[384][384]={{}};
int bg[384][384]={{}};
vector<int> xc;
vector<int> yc;
vector<int> engc;
vector<int> x_rec;
vector<int> y_rec;
vector<int> eng_rec;
clock_t t1=clock();
numb=0;
nframe=0;
int thres =50;
ifstream file3( "c12_siegen_19072017_01", ios::in );
if( !file3 ){
cerr << "Cant open " << endl;
}
while( file3 >> part1 >> num1 >> num2>> num3 )
{if (nframe>rf)break;
if (part1 == dd){
xc.push_back( num1);
yc.push_back( num2);
engc.push_back( num3);
numb++;}
if (part1 == dd2){
nframe++;
for (int k2=0;k2<384;k2++){
for(int j2=0;j2<384;j2++){
frame[j2][k2]=0;
}}
///================================starting recombination ======================================================================
for (int i=0;i<xc.size();i++)///filling the frame matrix
{
frame[xc[i]][yc[i]]=engc[i];
bg[xc[i]][yc[i]]=50;
}
for (int kk=1;kk<384;kk++){
for(int jj=1;jj<384;jj++){
int k= jj, j=kk;
if (frame[j][k]>bg[j][k]){
///================================single=======================
if(frame[j+1][k]<bg[j+1][k] && frame[j][k+1]<bg[j][k+1] &&frame[j-1][k]<bg[j-1][k]&&frame[j][k-1]<bg[j-1][k] ){
x_rec.push_back(j);
y_rec.push_back(k);
eng_rec.push_back(frame[j][k]);
frame[j][k]=0;
}
///================================double=======================
/////==========horizontal double============================================
else if(frame[j+1][k]>bg[j+1][k] &&frame[j+2][k]<bg[j+2][k]&&frame[j][k+1]<bg[j][k+1] &&frame[j-1][k]<bg[j-1][k]&&frame[j][k-1]<bg[j][k-1]&&frame[j+1][k+1]<bg[j+1][k+1]&&frame[j+1][k-1]<bg[j+1][k-1] ) {
eng_rec.push_back((frame[j][k]+frame[j+1][k]));
if(frame[j][k]>frame[j+1][k]){
x_rec.push_back(j);
y_rec.push_back(k);
}
else{
x_rec.push_back(j+1);
y_rec.push_back(k);
}
frame[j][k]=0;
frame[j+1][k]=0;}
////===============================vertical double ========================================
else if(frame[j][k+1]>bg[j][k+1]&&frame[j+1][k]<bg[j+1][k] &&frame[j][k+2]<bg[j][k+2] && frame[j+1][k+1]<bg[j+1][k+1]&&frame[j-1][k]<bg[j-1][k]&&frame[j-1][k+1]<bg[j-1][k+1]&&frame[j][k-1]<bg[j][k-1]) {
eng_rec.push_back((frame[j][k]+frame[j][k+1]));
if(frame[j][k]>frame[j][k+1]){
x_rec.push_back(j);
y_rec.push_back(k);
}
else{
x_rec.push_back(j);
y_rec.push_back(k+1);
}
frame[j][k]=0;
frame[j][k+1]=0;}
///================================quadrad=======================
else if(frame[j+1][k]>bg[j+1][k]&&frame[j+1][k+1]>bg[j+1][k+1]&&frame[j][k+1]>bg[j][k+1]&&frame[j+2][k]<bg[j+2][k]&&frame[j-1][k]<bg[j-1][k]&&frame[j][k-1]<bg[j][k-1]&&frame[j+1][k-1]<bg[j+1][k-1]
&& frame[j+2][k+1]<bg[j+2][k+1] && frame[j-1][k+1]<bg[j-1][k+1] && frame[j][k+2]<bg[j][k+2] && frame[j+1][k+2]<bg[j+1][k+2] )
{
eng_rec.push_back((frame[j][k]+frame[j][k+1]+frame[j+1][k]+frame[j+1][k+1]));
if(frame[j][k]>frame[j+1][k]&&frame[j][k]>frame[j][k+1]&&frame[j][k]>frame[j+1][k+1]){
x_rec.push_back(j);
y_rec.push_back(k);
}
else if(frame[j+1][k]>frame[j][k]&&frame[j+1][k]>frame[j][k+1]&&frame[j+1][k]>frame[j+1][k+1]){
x_rec.push_back(j+1);
y_rec.push_back(k);}
else if(frame[j][k+1]>frame[j][k]&&frame[j][k+1]>frame[j+1][k]&&frame[j][k+1]>frame[j+1][k+1]){
x_rec.push_back(j);
y_rec.push_back(k+1);
}
else{
x_rec.push_back(j+1);
y_rec.push_back(k+1);
}
//cout<< frame[j][k]<<" "<<frame[j][k+1]<<" "<<frame[j+1][k]<<" "<<frame[j+1][k+1]<<endl;
frame[j][k]=0;
frame[j][k+1]=0;
frame[j+1][k]=0;
frame[j+1][k+1]=0;
}
//==================================================================
///================================triple L=======================
else if(frame[j+1][k+1]>thres && frame[j][k+1]>thres &&frame[j+1][k]<thres&&frame[j][k+2]<thres&&frame[j+1][k+2]<thres&&frame[j][k-1]<thres&&frame[j-1][k]<thres&&frame[j-1][k+1]<thres&&frame[j+2][k+1]<thres&&frame[j][k+1]>frame[j][k]&&frame[j][k+1]>frame[j+1][k+1])
{
eng_rec.push_back((frame[j][k]+frame[j][k+1]+frame[j+1][k+1]));
x_rec.push_back(j);
y_rec.push_back(k+1);
frame[j][k]=0;
frame[j][k+1]=0;
frame[j+1][k+1]=0;
}
///============================triple J========================================================
else if (frame[j-1][k+1]>thres && frame[j][k+1]>thres&&frame[j+1][k]<thres &&frame[j-1][k]<thres&&frame[j][k-1]<thres&&frame[j-2][k+1]<thres&&frame[j-1][k+2]<thres
&&frame[j][k+2]<thres&&frame[j+1][k+1]<thres&&frame[j][k+1]>frame[j][k]&&frame[j][k+1]>frame[j-1][k+1] )
{
eng_rec.push_back((frame[j][k]+frame[j-1][k+1]+frame[j][k+1]));
x_rec.push_back(j);
y_rec.push_back(k+1);
frame[j][k]=0;
frame[j-1][k+1]=0;
frame[j][k+1]=0;
}
///================================== triple F ===================================
else if(frame[j][k+1]>thres &&frame[j+1][k]>thres&&frame[j+2][k]<thres &&frame[j][k+2]<thres&&frame[j+1][k+1]<thres&&frame[j][k-1]<thres&&
frame[j+1][k-1]<thres&&frame[j-1][k]<thres&&frame[j-1][k+1]<thres&&frame[j][k]>frame[j+1][k]&&frame[j][k]>frame[j][k+1])
{
eng_rec.push_back((frame[j][k]+frame[j+1][k]+frame[j][k+1]));
x_rec.push_back(j);
y_rec.push_back(k);
frame[j][k]=0;
frame[j][k+1]=0;
frame[j+1][k]=0;
}
///====================================== triple 7 ====================================================
else if(frame[j+1][k]>thres &&frame[j+1][k+1]>thres&&frame[j-1][k]<thres&&frame[j][k-1]<thres&&frame[j][k+1]<thres&&frame[j+1][k+2]<thres&&frame[j+1][k-1]<thres
&&frame[j+2][k]<thres &&frame[j+2][k+1]<thres&&frame[j+1][k]>frame[j][k]&&frame[j+1][k]>frame[j+1][k+1]
)
{
eng_rec.push_back((frame[j][k]+frame[j+1][k+1]+frame[j+1][k]));
x_rec.push_back(j+1);
y_rec.push_back(k);
frame[j][k]=0;
frame[j+1][k]=0;
frame[j+1][k+1]=0;
}
}}}
xc.clear();
yc.clear();
engc.clear();
}}
clock_t t=clock();
cout<<"The total number of frames= "<<nframe<<endl;
cout<<"The total number of frames= "<<cou2<<endl;
float gpu_time =((float)(tGf-tG0))/(CLOCKS_PER_SEC);
printf ("The GPU (%f sec).\n",gpu_time);
float cpu_time =((float)(t-t1))/(CLOCKS_PER_SEC);
printf ("The CPU (%f sec).\n",cpu_time);
float speed_up = (cpu_time/gpu_time)/75;
printf ("SU (%f ).\n", ceil(speed_up));
cudaFree(d_yin);
cudaFree(d_xin);
cudaFree(d_engin);
cudaFree(d_engres);
cudaFree(d_xres);
cudaFree(d_yres);
cudaFree(d_ind);
delete[] x;
delete[] y;
delete[] eng;
return 0 ;
}
|
3,680 | #include <stdio.h>
__global__ void helloFromGPU() {
const auto a = threadIdx.x;
printf("Hello World From GPU thread %d!\n", a);
}
int main() {
printf("Hello World From CPU1!\n");
helloFromGPU<<<1, 100>>>();
printf("Hello World From CPU2!\n");
cudaDeviceReset();
// cudaDeviceSynchronize();
printf("Hello World From CPU3!\n");
return 0;
} |
3,681 | #include <cuda.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <stdlib.h>
extern "C" void max_stride(float* src, float*dst, int stride, int src_ldx, int dst_ldx, int step, int size,int batch_size,int num_stride, int *mask);
int main()
{
int i;
float *x;
float *x_gpu;
int *mask;
int *mask_cpu;
x = (float *)malloc(sizeof(float) * 50);
mask_cpu = (int *)malloc(sizeof(int) * 8);
for (i=0;i<50;i++)
x[i] = (i*50) % 9;
cudaMalloc((void**)&x_gpu, sizeof(float) * 50);
cudaMalloc((void**)&mask, sizeof(int) * 8);
cudaMemcpy(x_gpu,x,sizeof(float) * 20, cudaMemcpyHostToDevice);
max_stride(x_gpu,x_gpu+20,2,10,4,2,2,2,2,mask);
cudaMemcpy(x, x_gpu, sizeof(float) * 28, cudaMemcpyDeviceToHost);
cudaMemcpy(mask_cpu, mask, sizeof(int) * 8, cudaMemcpyDeviceToHost);
for (i=0; i<20;i++)
printf("%f ",x[i]);
printf("\n");
for (i=20; i<28;i++)
printf("%f ",x[i]);
printf("\n");
for (i = 0; i < 4; i++)
printf("%d ",mask_cpu[i]);
return 0;
}
|
3,682 | // From Appendix B.15 of the CUDA-C Programming Guide.
#include <assert.h>
#include <cuda.h>
// assert() is only supported
// for devices of compute capability 2.0 and higher
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200)
#undef assert
#define assert(arg)
#endif
__global__ void testAssert(void) {
int is_one = 1;
int should_be_one = 0;
// This will have no effect
assert(is_one);
// This will halt kernel execution
assert(should_be_one);
}
int main(int argc, char* argv[]) {
testAssert<<<1,1>>>();
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
|
3,683 | #include "includes.h"
/*
Modified from
https://github.com/zhxfl/CUDA-CNN
*/
__global__ void elementwiseMul(float *x, float *y, float *z, int rows, int cols) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if (j >= cols || i >= rows) return;
z[i * cols + j] = x[i * cols + j] * y[i * cols + j];
} |
3,684 | // runSim2.cu
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
#include <assert.h>
// Executes the A1 operator optimized
/// @brief Executes the A1 step of the algorithm. Updates the positions of the particles according to their velocities.
/// @variable r: A 1D array of size 3n of the particle positions indexed as [x0,x1,...,x(n-1),y0,y1,...,y(n-1),z0,z1,...,z(n-1)]
/// @variable v: A 1D array of size 3n of the particle velocities indexed as [v0x,v1x,...,v(n-1)x,v0y,v1y,...,v(n-1)y,v0z,v1z,...,v(n-1)z]
/// @variable dt: The time-step used for the simulation
__global__ void A1_kernel(double* r, double* v, double dt) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
r[id] += v[id] * dt; // update positions
}
// Coalesced memory A2
/// @brief Executes the A2 step of the algorithm. Updates the velocites of all particles due to the force from the
/// central planet (particle 0) and the velocity of the central planet due to the forces from the other particles.
/// @variable r: A 1D array of size 3n of the particle positions indexed as [x0,x1,...,x(n-1),y0,y1,...,y(n-1),z0,z1,...,z(n-1)]
/// @variable v: A 1D array of size 3n of the particle velocities indexed as [v0x,v1x,...,v(n-1)x,v0y,v1y,...,v(n-1)y,v0z,v1z,...,v(n-1)z]
/// @variable m: A 1D array of size n of the particle masses indexed as [m0,m1,...,mn]
/// @variable dt: The time-step used for the simulation
/// @variable varr: A 1D array of size 3n used to store the effects on particle 0 due to the others for the reduction routine
/// @variable status: A 1D array of size n originally initiated to all 1's, later changed to 0's if a particle is absorbed or ejected
/// @variable numParticles: The number of particles in the simulation: n
__global__ void A2_kernel(double *r, double *v, double *m, double dt, double *varr, double *status, int numParticles) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x + 1; // Starts at 1
double invdist;
double dirvec[3];
if (id < numParticles) {
// Vector that points along particle id to particle 0
dirvec[0] = r[0] - r[id];
dirvec[1] = r[numParticles] - r[numParticles+id];
dirvec[2] = r[2*numParticles] - r[2*numParticles+id];
// Distance between particle 0 and id
invdist = dt * rnorm3d(dirvec[0], dirvec[1], dirvec[2])*\
rnorm3d(dirvec[0], dirvec[1], dirvec[2])*\
rnorm3d(dirvec[0], dirvec[1], dirvec[2]);
// If particle's status has previously been set to 0, it has no effect
if (status[id] == 0) {
// Particle id remains at 0
v[id] += 0;
v[numParticles+id] += 0;
v[2*numParticles+id] += 0;
// Particle 0 has no interaction
varr[id] = 0;
varr[numParticles+id] = 0;
varr[2*numParticles+id] = 0;
}
else {
// Update velocities of particles 1 through N-1
v[id] += m[0] * invdist * dirvec[0];
v[numParticles+id] += m[0] * invdist * dirvec[1];
v[2*numParticles+id] += m[0] * invdist * dirvec[2];
// Store forces on particle 0
varr[id] = -m[id] * invdist * dirvec[0];
varr[numParticles+id] = -m[id] * invdist * dirvec[1];
varr[2*numParticles+id] = -m[id] * invdist * dirvec[2];
}
// Unfortunately, this is executed by each thread but there is no race condition
// Adds the previous values of particle 0's velocity to varr to include them in the reduction
varr[0] = v[0];
varr[numParticles] = v[numParticles];
varr[2*numParticles] = v[2*numParticles];
}
}
// Coalesced B operator
/// @brief Executes the B step of the algorithm. Updates the velocity of the planetary embryo due to the forces of the
/// the particles (other than the central planet). Normally, it would calculate the effect of all the other
/// inter-particle interactions but those effects are neglected in this simulation.
/// @variable r: A 1D array of size 3n of the particle positions indexed as [x0,x1,...,x(n-1),y0,y1,...,y(n-1),z0,z1,...,z(n-1)]
/// @variable v: A 1D array of size 3n of the particle velocities indexed as [v0x,v1x,...,v(n-1)x,v0y,v1y,...,v(n-1)y,v0z,v1z,...,v(n-1)z]
/// @variable m: A 1D array of size n of the particle masses indexed as [m0,m1,...,mn]
/// @variable dt: The time-step used for the simulation
/// @variable varr: A 1D array of size 3n used to store the effects on particle 0 due to the others for the reduction routine
/// @variable status: A 1D array of size n originally initiated to all 1's, later changed to 0's if a particle is absorbed or ejected
/// @variable numParticles: The number of particles in the simulation: n
/// @variable eps: The gravitational softening parameter
__global__ void B_kernel(double *r, double *v, double *m, double *varr, double dt, int numParticles, double *status, double eps) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x + 2; // Starts at 2
double dirvec[3];
double invdist;
if (id < numParticles) {
// Vector that points along particle id to particle 0
dirvec[0] = r[1] - r[id];
dirvec[1] = r[numParticles+1] - r[numParticles+id];
dirvec[2] = r[2*numParticles+1] - r[2*numParticles+id];
// Distance between particle 0 and id
invdist = status[id] * dt * rsqrt((dirvec[0]*dirvec[0] + dirvec[1]*dirvec[1] + dirvec[2]*dirvec[2] + eps*eps)*\
(dirvec[0]*dirvec[0] + dirvec[1]*dirvec[1] + dirvec[2]*dirvec[2] + eps*eps)*\
(dirvec[0]*dirvec[0] + dirvec[1]*dirvec[1] + dirvec[2]*dirvec[2] + eps*eps));
// Update id'th satellitesimal
v[id] += m[1] * invdist * dirvec[0];
v[numParticles+id] += m[1] * invdist * dirvec[1];
v[2*numParticles+id] += m[1] * invdist * dirvec[2];
// Update embryo
// Store forces on embryo for reduction
varr[0] = v[1];
varr[numParticles-1] = 0; // These 0s are padding for the reduction
varr[numParticles] = v[numParticles+1];
varr[2*numParticles-1] = 0;
varr[2*numParticles] = v[2*numParticles+1];
varr[3*numParticles-1] = 0;
varr[id-1] = -m[id] * invdist * dirvec[0];
varr[numParticles+id-1] = -m[id] * invdist * dirvec[1];
varr[2*numParticles+id-1] = -m[id] * invdist * dirvec[2];
}
}
// Coalesced merge and eject
/// @brief Check if particle is still within the satellitesimal disc. If a particle is below 0.03*rH, it merges with the central planet.
/// If it is above rH, it is ejected from the disc.
/// @variable r: A 1D array of size 3n of the particle positions indexed as [x0,x1,...,x(n-1),y0,y1,...,y(n-1),z0,z1,...,z(n-1)]
/// @variable status: A 1D array of size n originally initiated to all 1's, later changed to 0's if a particle is absorbed or ejected
/// @variable numParticles: The number of particles in the simulation: n
/// @variable rH: The Hill radius of of particle 0 (Jupiter)
__global__ void mergeEject(double *r, double *status, int numParticles, double rH) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x + 2; // Starts at 2
double dist;
if (id < numParticles) {
dist = norm3d(r[0]-r[id], r[numParticles]-r[numParticles+id], r[2*numParticles]-r[2*numParticles+id]);
if (dist < 0.03*rH && status[id] != 0)
status[id] = 2;
else if (dist > rH && status[id] != 0)
status[id] = 3; // so that momentum conservation doesn't include ejected particles
// will be set to 0 in the consMomentum function
}
}
/// @brief If a particle merges with the central planet or a collision occurs with the embryo, momentum conservation
/// is needed to find the resulting velocity of the central planet or embryo (the other particle no longer exists).
/// @variable v: A 1D array of size 3n of the particle velocities indexed as [v0x,v1x,...,v(n-1)x,v0y,v1y,...,v(n-1)y,v0z,v1z,...,v(n-1)z]
/// @variable m: A 1D array of size n of the particle masses indexed as [m0,m1,...,mn]
/// @variable status: A 1D array of size n originally initiated to all 1's, later changed to 0's if a particle is absorbed or ejected
/// @variable numParticles: The number of particles in the simulation: n
/// @variable rSatellites: A 1D array of size 2 where the 0th entry is the embryo radius and the 1st entry is the radius of the satellitesimals
__global__ void consMomentum(double *v, double *m, double *status, int numParticles, double *rSatellites) {
for (int id = 2; id < numParticles; id++) {
if (status[id] == 2) {
status[id] = 0;
// use conservation of momentum to update central velocity
v[0] = 1./(m[0] + m[id]) * (m[0]*v[0] + m[id]*v[id]);
v[numParticles] = 1./(m[0] + m[id]) * (m[0]*v[numParticles] + m[id]*v[numParticles+id]);
v[2*numParticles] = 1./(m[0] + m[id]) * (m[0]*v[2*numParticles] + m[id]*v[2*numParticles+id]);
// conservation of mass
m[0] += m[id];
}
else if (status[id] == 4) {
status[id] = 0;
rSatellites[0] = cbrt((m[1]+m[2])/m[2])*rSatellites[1];
// use conservation of momentum to update velocity
v[1] = 1./(m[1] + m[id]) * (m[1]*v[1] + m[id]*v[id]);
v[numParticles+1] = 1./(m[1] + m[id]) * (m[1]*v[numParticles+1] + m[id]*v[numParticles+id]);
v[2*numParticles+1] = 1./(m[1] + m[id]) * (m[1]*v[2*numParticles+1] + m[id]*v[2*numParticles+id]);
// conservation of mass
m[1] += m[id];
}
else if (status[id] == 3)
status[id] = 0;
else
continue;
}
}
/// @brief Each particle has a status initially set to 1. If a particle merges with the central planet or collides with
/// the embryo, its status is set to 0 so that it no longer interacts in the simulation. This sets its mass, position
/// and velocity to 0.
/// @variable r: A 1D array of size 3n of the particle positions indexed as [x0,x1,...,x(n-1),y0,y1,...,y(n-1),z0,z1,...,z(n-1)]
/// @variable v: A 1D array of size 3n of the particle velocities indexed as [v0x,v1x,...,v(n-1)x,v0y,v1y,...,v(n-1)y,v0z,v1z,...,v(n-1)z]
/// @variable m: A 1D array of size n of the particle masses indexed as [m0,m1,...,mn]
/// @variable status: A 1D array of size n originally initiated to all 1's, later changed to 0's if a particle is absorbed or ejected
/// @variable numParticles: The number of particles in the simulation: n
__global__ void statusUpdate(double *r, double *v, double *m, double *status, int numParticles) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
m[id/3] *= status[id/3];
r[id] *= status[id % numParticles];
v[id] *= status[id % numParticles];
}
/// @brief Function to find cross product of two 3-vectors, vect_A and vect_B, and stores the result in cross_P (on the device)
/// @variable vect_A: Any 3-vector
/// @variable vect_B: Any 3-vector
/// @variable cross_P: Any 3-vector, holds the result of the cross product
__device__ void crossProduct(double *vect_A, double *vect_B, double *cross_P) {
cross_P[0] = vect_A[1] * vect_B[2] - vect_A[2] * vect_B[1];
cross_P[1] = vect_A[2] * vect_B[0] - vect_A[0] * vect_B[2];
cross_P[2] = vect_A[0] * vect_B[1] - vect_A[1] * vect_B[0];
}
/// @brief Function used for resolving collisiions with the satellitesimal and the embryo. First goes to the rest frame of the embryo,
/// then evolves each particle according to the A1 function while the embryo remains fixed. If the distance between the embryo
/// and the line traced out by the other particles is less than their combined radii, a collision occurs.
/// @variable r: A 1D array of size 3n of the particle positions indexed as [x0,x1,...,x(n-1),y0,y1,...,y(n-1),z0,z1,...,z(n-1)]
/// @variable v: A 1D array of size 3n of the particle velocities indexed as [v0x,v1x,...,v(n-1)x,v0y,v1y,...,v(n-1)y,v0z,v1z,...,v(n-1)z]
/// @variable m: A 1D array of size n of the particle masses indexed as [m0,m1,...,mn]
/// @variable status: A 1D array of size n originally initiated to all 1's, later changed to 0's if a particle is absorbed or ejected
/// @variable rSatellites: A 1D array of size 2 where the 0th entry is the embryo radius and the 1st entry is the radius of the satellitesimals
/// @variable numParticles: The number of particles in the simulation: n
/// @variable dt: The time-step used for the simulation
__global__ void collision(double* r, double* v, double* status, double* rSatellites, int numParticles, double dt) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x + 2;
double rTemp[3];
double vTemp[3];
double crossP[3];
double vecA[3];
double vecB[3];
double t;
double dist;
double d1;
double d2;
if (id < numParticles) {
// go to rest frame of embryo
vTemp[0] = v[id] - v[1];
vTemp[1] = v[numParticles+id] - v[numParticles+1];
vTemp[2] = v[2*numParticles+id] - v[2*numParticles+1];
// evolve satelitesimal
rTemp[0] = r[id] + vTemp[0] * dt/4.;
rTemp[1] = r[numParticles+id] + vTemp[1] * dt/4.;
rTemp[2] = r[2*numParticles+id] + vTemp[2] * dt/4.;
// the equation ((r-r[1]) * (rTemp-r)) / |rTemp-r|^2 where r[1] is the embryo's
// position in its rest frame, r is the satelitesimal's original position and rTemp is the
// satelitesimal's updated position in the rest frame. * indicates a dot product in this case
// this is the time that minimizes the distance function from a line segment to a point
t = ((r[id]-r[1]) *(rTemp[0]-r[id]) +\
(r[numParticles+id]-r[numParticles+1]) *(rTemp[1]-r[numParticles+id]) +\
(r[2*numParticles+id]-r[2*numParticles+1])*(rTemp[2]-r[2*numParticles+id])) /\
((rTemp[0]-r[id]) *(rTemp[0]-r[id]) +\
(rTemp[1]-r[numParticles+id]) *(rTemp[1]-r[numParticles+id]) +\
(rTemp[2]-r[2*numParticles+id]) *(rTemp[2]-r[2*numParticles+id]));
if (0 < t < 1) {
// the equation |(r[1]-r) x (r[1]-rTemp)|/|rTemp-r| where r[1] is the embryo's position
// in its rest frame, r is the satelitesimal's original position and rTemp is the
// satelitesimal's updated position in the rest frame
// if t is in this range, then the point in within line segment
vecA[0] = r[1]-r[id], vecA[1] = r[numParticles+1]-r[numParticles+id], vecA[2] = r[2*numParticles+1]-r[2*numParticles+id];
vecB[0] = r[1]-rTemp[0], vecB[1] = r[numParticles+1]-rTemp[1], vecB[2] = r[2*numParticles+1]-rTemp[2];
crossProduct(vecA, vecB, crossP);
dist = norm3d(crossP[0],crossP[1],crossP[2])*rnorm3d(rTemp[0]-r[id], rTemp[1]-r[numParticles+id], rTemp[2]-r[2*numParticles+id]);
}
else if (t > 1 || t < 0) {
// if t is not in the range, it does not lie within the line segment
// the equation |r-r[1]|
d1 = norm3d(r[id]-r[1], r[numParticles+id]-r[numParticles+1], r[2*numParticles+id]-r[2*numParticles+1]);
// the equation |rTemp-r[1]|
d2 = norm3d(rTemp[0]-r[1], rTemp[1]-r[numParticles+1], rTemp[2]-r[2*numParticles+1]);
dist = fmin(d1, d2);
}
if (dist < rSatellites[0] + rSatellites[1])
status[id] = 4;
}
}
/// @brief Function to find cross product of two 3-vectors, vect_A and vect_B, and stores the result in cross_P
/// @variable vect_A: Any 3-vector
/// @variable vect_B: Any 3-vector
/// @variable cross_P: Any 3-vector, holds the result of the cross product
void crossProduct2(double *vect_A, double *vect_B, double *cross_P) {
cross_P[0] = vect_A[1] * vect_B[2] - vect_A[2] * vect_B[1];
cross_P[1] = vect_A[2] * vect_B[0] - vect_A[0] * vect_B[2];
cross_P[2] = vect_A[0] * vect_B[1] - vect_A[1] * vect_B[0];
}
/// @brief Finds the eccentricity of all particles
/// @variable r: A 1D array of size 3n of the particle positions indexed as [x0,x1,...,x(n-1),y0,y1,...,y(n-1),z0,z1,...,z(n-1)]
/// @variable v: A 1D array of size 3n of the particle velocities indexed as [v0x,v1x,...,v(n-1)x,v0y,v1y,...,v(n-1)y,v0z,v1z,...,v(n-1)z]
/// @variable m: A 1D array of size n of the particle masses indexed as [m0,m1,...,mn]
/// @variable ecc: A 1D array of size n that holds the eccentricity of each particle
/// @variable numParticles: The number of particles in the simulation: n
__global__ void calcEccentricity(double *r, double *v, double *m, double *ecc, int numParticles) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x + 1;
double L[3]; // angular momentum
double eccTemp[3]; // hold components of eccentricity vector
double mu; // standard gravitational parameter
double invdist; // inverse distance between particle and central planet
if (id < numParticles) {
mu = m[0] + m[id];
invdist = rnorm3d(r[id]-r[0], r[numParticles+id]-r[numParticles], r[2*numParticles+id]-r[2*numParticles]);
L[0] = (r[numParticles+id]-r[numParticles])*v[2*numParticles+id] - (r[2*numParticles+id]-r[2*numParticles])*v[numParticles+id];
L[1] = (r[2*numParticles+id]-r[2*numParticles])*v[numParticles+id] - (r[numParticles+id]-r[0])*v[2*numParticles+id];
L[2] = (r[id]-r[0])*v[numParticles+id] - (r[numParticles+id]-r[numParticles])*v[id];
eccTemp[0] = (1./mu) * (v[numParticles+id]*L[2] - v[2*numParticles+id]*L[1]) - (r[id]-r[0]) * invdist;
eccTemp[1] = (1./mu) * (v[2*numParticles+id]*L[0] - v[id]*L[2]) - (r[numParticles+id]-r[numParticles]) * invdist;
eccTemp[2] = (1./mu) * (v[id]*L[1] - v[numParticles+id]*L[0]) - (r[2*numParticles+id]-r[2*numParticles]) * invdist;
ecc[id] = norm3d(eccTemp[0], eccTemp[1], eccTemp[2]); // real eccentricity
}
}
/// @brief Reduce last warp (unrolled) in reduction for the A2 operator and B operator. For more details,
/// visit https://developer.download.nvidia.com/assets/cuda/files/reduction.pdf
template <unsigned int blockSize>
__device__ void warpReduce(volatile double* sdata, int tid) {
// All statements evaluated at compile time
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}
/// @brief Reduction kernel for A2 operator for particle 0 and B operator for particle 1. For more details,
/// visit https://developer.download.nvidia.com/assets/cuda/files/reduction.pdf
template <unsigned int blockSize>
__global__ void reduce(double *g_idata, double *g_odata, unsigned int n) {
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
sdata[tid] = 0;
while (i < n) {
sdata[tid] += g_idata[i] + g_idata[i+blockSize];
i += gridSize;
}
__syncthreads();
if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); }
if (tid < 32) warpReduce<blockSize>(sdata, tid);
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/// @brief Used to calculate the total linear momentum of the system
/// @variable v: A 1D array of size 3n of the particle velocities indexed as [v0x,v1x,...,v(n-1)x,v0y,v1y,...,v(n-1)y,v0z,v1z,...,v(n-1)z]
/// @variable m: A 1D array of size n of the particle masses indexed as [m0,m1,...,mn]
/// @variable numParticles: The number of particles in the simulation: n
/// @variable P: Holds the total momentum
void linMomentum(double* v, double* m, int numParticles, double *P) {
*P = 0; // total linear momentum
double plin[3]; // adds up total Px, Py, Pz components
for (int i = 0; i < numParticles; i++) {
plin[0] += m[i]*v[i], plin[1] += m[i]*v[numParticles+i], plin[2] += m[i]*v[2*numParticles+i];
*P = sqrt(pow(plin[0], 2) + pow(plin[1], 2) + pow(plin[2], 2));
}
}
/// @brief Used to calculate the total mass of the system
/// @variable m: A 1D array of size n of the particle masses indexed as [m0,m1,...,mn]
/// @variable numParticles: The number of particles in the simulation: n
/// @variable M: Total mass of the system
void totalMass(double *m, int numParticles, double* M) {
*M = 0;
for (int i = 0; i < numParticles; i++)
*M += m[i];
}
/// @brief Used to calculate the total angular momentum of the system
/// @variable r: A 1D array of size 3n of the particle positions indexed as [x0,x1,...,x(n-1),y0,y1,...,y(n-1),z0,z1,...,z(n-1)]
/// @variable v: A 1D array of size 3n of the particle velocities indexed as [v0x,v1x,...,v(n-1)x,v0y,v1y,...,v(n-1)y,v0z,v1z,...,v(n-1)z]
/// @variable m: A 1D array of size n of the particle masses indexed as [m0,m1,...,mn]
/// @variable numParticles: The number of particles in the simulation: n
/// @variable L: Total angular momenum of the system
void angMomentum(double* r, double* v, double* m, int numParticles, double *L) {
*L = 0;
double Ltemp[3];
double crossP[3]; // store cross product result
double dirvec[3]; // distance from planet
double p[3]; // linear momentum
for (int i = 1; i < numParticles; i++) {
dirvec[0] = -r[0]+r[i], dirvec[1] = -r[numParticles]+r[numParticles+i], dirvec[2] = -r[2*numParticles]+r[2*numParticles+i];
p[0] = m[i]*v[i], p[1] = m[i]*v[numParticles+i], p[2] = m[i]*v[2*numParticles+i];
crossProduct2(dirvec, p, crossP);
Ltemp[0] += crossP[0], Ltemp[1] += crossP[1], Ltemp[2] += crossP[2];
}
*L = sqrt(pow(Ltemp[0], 2) + pow(Ltemp[1], 2) + pow(Ltemp[2], 2));
}
/// @brief Calculates the total energy of the system: kinetic energy plus potential energy
/// @variable r: A 1D array of size 3n of the particle positions indexed as [x0,x1,...,x(n-1),y0,y1,...,y(n-1),z0,z1,...,z(n-1)]
/// @variable v: A 1D array of size 3n of the particle velocities indexed as [v0x,v1x,...,v(n-1)x,v0y,v1y,...,v(n-1)y,v0z,v1z,...,v(n-1)z]
/// @variable m: A 1D array of size n of the particle masses indexed as [m0,m1,...,mn]
/// @variable numParticles: The number of particles in the simulation: n
/// @variable eps: The gravitational softening parameter
double energynew(double* r, double* v, double* m, int numParticles, double eps) {
double T = 0; // kinetic energy
double U = 0; // potential energy
double invdist;
// to hold the vector that points between particle i and particle j
double* dirvec = (double*)malloc(3 * sizeof(double));
for (int i = 0; i < numParticles; i++) {
T += 0.5 * m[i] * (pow(v[i], 2) + pow(v[numParticles+i], 2) + pow(v[2*numParticles+i], 2));
if (i > 0) {
dirvec[0] = -r[0]+r[i], dirvec[1] = -r[numParticles]+r[numParticles+i], dirvec[2] = -r[2*numParticles]+r[2*numParticles+i];
invdist = m[i] / sqrt(pow(dirvec[0], 2) + pow(dirvec[1], 2) + pow(dirvec[2], 2));
U -= m[0] * invdist;
}
if (i > 1) {
dirvec[0] = -r[0]+r[i], dirvec[1] = -r[numParticles]+r[numParticles+i], dirvec[2] = -r[2*numParticles]+r[2*numParticles+i];
invdist = m[i] / sqrt(pow(dirvec[0], 2) + pow(dirvec[1], 2) + pow(dirvec[2], 2) + eps*eps);
U -= m[1] * invdist;
}
}
free(dirvec);
return T + U;
}
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n",
cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
// Perform the simulation
extern "C" {
void runSim2(double *r_h, double *v_h, double *m_h, double dt, int numParticles, int n, double eps, int numSteps, double *ecc_h, double *status_h, double *rSatellites_h) {
// Declare useful variables
size_t i, j;
const unsigned int warpSize = 32;
size_t N = 3 * numParticles;
size_t N_bytes = N * sizeof(double);
double rH = 5.37e10/8.8605e9; // scaled
double L; double P; double M; double K; // final angular momentum, linear momentum, mass and energy
double L0; double P0; double M0; double K0; // initial angular momentum, linear momentum, mass and energy
double semMjrAxis;
/*cudaError_t stat1 = cudaMallocHost((void**)&r_h, N_bytes);
if (stat1 != cudaSuccess)
printf("Error allocating pinned host memory for position\n");
cudaError_t stat2 = cudaMallocHost((void**)&v_h, N_bytes);
if (stat2 != cudaSuccess)
printf("Error allocating pinned host memory for velocity\n");
cudaError_t stat3 = cudaMallocHost((void**)&m_h, N_bytes/3);
if (stat3 != cudaSuccess)
printf("Error allocating pinned host memory for mass\n");*/
// Make sure the number of particles is multiple of twice the warp size (2*32)
// for efficiency and reduction
if (numParticles % (2*warpSize) != 0) {
printf("Error: The number of particles must be a multiple of two times the warp size (2*32).\n");
return;
}
// Allocate arrays on device
double *r_d, *v_d, *m_d, *ecc_d, *varr_d, *rSatellites_d, *status_d, *vTemp_d;
cudaMalloc((void**) &r_d, N_bytes);
cudaMalloc((void**) &v_d, N_bytes);
cudaMalloc((void**) &m_d, N_bytes/3);
cudaMalloc((void**) &varr_d, N_bytes);
cudaMalloc((void**) &status_d, N_bytes/3);
cudaMalloc((void**) &ecc_d, N_bytes/3);
cudaMalloc((void**) &rSatellites_d, 2*sizeof(double));
cudaMalloc((void**) &vTemp_d, numParticles/512*sizeof(double));
// Copy arrays from host to device
cudaMemcpy(r_d, r_h, N_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(v_d, v_h, N_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(m_d, m_h, N_bytes/3, cudaMemcpyHostToDevice);
cudaMemcpy(status_d, status_h, N_bytes/3, cudaMemcpyHostToDevice);
cudaMemcpy(rSatellites_d, rSatellites_h, 2*sizeof(double), cudaMemcpyHostToDevice);
// calculate initial quantities
/*angMomentum(r_h, v_h, m_h, numParticles, &L0);
linMomentum(v_h, m_h, numParticles, &P0);
totalMass(m_h, numParticles, &M0);
K0 = energynew(r_h, v_h, m_h, numParticles, eps);*/
for (i = 0; i < numSteps; i++) {
// One time step
for (j = 0; j < n; j++) {
//collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt); // check for collision (must do before A1)
//consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d); // if any status got set to a different value, conserve momentum
//statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles); // change updated status value to 0 and remove it from the simulation
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n)); // update positions
//mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH); // check for merger
//consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d); // conserve momentum
//statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles); // change updated status value to 0 and remove it from the simulation
A2_kernel<<<numParticles/64, 64>>>(r_d, v_d, m_d, dt/(2*n), varr_d, status_d, numParticles); // update velocities due to particle 0
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[0], numParticles); // reduction for x-component of v0
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[numParticles], numParticles); // reduction for y-component of v0
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[2*numParticles], numParticles); // reduction for z-component of v0
//collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt); // same as above
//consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
//statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n));
//mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH);
//consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
//statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
}
B_kernel<<<numParticles/64, 64>>>(r_d, v_d, m_d, varr_d, dt, numParticles, status_d, eps); // update velocities of other particles due to embryo
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[1], numParticles); // reduction for x-component of v1
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[numParticles+1], numParticles); // reduction for y-component of v0
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[2*numParticles+1], numParticles); // reduction for z-component of v0
for (j = 0; j < n; j++) {
//collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt);
//consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
//statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n));
//mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH);
//consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
//statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A2_kernel<<<numParticles/64, 64>>>(r_d, v_d, m_d, dt/(2*n), varr_d, status_d, numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[0], numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[numParticles], numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[2*numParticles], numParticles);
//collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt);
//consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
//statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n));
//mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH);
//consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
//statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
}
// after each time-step, copy arrays from device back to host to calculate quantities if you wish
/*cudaMemcpy(r_h, r_d, N_bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(v_h, v_d, N_bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(m_h, m_d, N_bytes/3, cudaMemcpyDeviceToHost);
cudaMemcpy(status_h, status_d, N_bytes/3, cudaMemcpyDeviceToHost);
cudaMemcpy(rSatellites_h, rSatellites_d, 2*sizeof(double), cudaMemcpyDeviceToHost);
// would be easier to make these device functions so only 4 numbers need to be copied back each time-step
angMomentum(r_h, v_h, m_h, numParticles, &L);
linMomentum(v_h, m_h, numParticles, &P);
totalMass(m_h, numParticles, &M);
K = energynew(r_h, v_h, m_h, numParticles, eps);
calcEccentricity<<<numParticles/64, 64>>>(r_d, v_d, m_d, ecc_d, numParticles);
cudaMemcpy(ecc_h, ecc_d, N_bytes/3, cudaMemcpyDeviceToHost);
semMjrAxis = (m_h[0]+m_h[1])*sqrt(r_h[0]*r_h[0]+r_h[1]*r_h[1]+r_h[2]*r_h[2])/(2*(m_h[0]+m_h[1])-sqrt((r_h[0]-r_h[3])*(r_h[0]-r_h[3])+(r_h[1]-r_h[4])*(r_h[1]-r_h[4])+\
(r_h[2]-r_h[5])*(r_h[2]-r_h[5]))*sqrt(v_h[3]*v_h[3]+v_h[4]*v_h[4]+v_h[5]*v_h[5])*sqrt(v_h[3]*v_h[3]+v_h[4]*v_h[4]+v_h[5]*v_h[5]));
printf("%.15lf %.15lf %.15lf %.15lf %.15lf %.15lf\n", abs((L-L0)/L0), abs((P-P0)/P0), abs((M-M0)/M0), abs((K-K0)/K0), ecc_h[1], semMjrAxis);*/
}
// Copy arrays from device back to host
cudaMemcpy(r_h, r_d, N_bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(v_h, v_d, N_bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(m_h, m_d, N_bytes/3, cudaMemcpyDeviceToHost);
cudaMemcpy(status_h, status_d, N_bytes/3, cudaMemcpyDeviceToHost);
cudaMemcpy(rSatellites_h, rSatellites_d, 2*sizeof(double), cudaMemcpyDeviceToHost);
// Print relevant information
int h = 0;
printf("Embryo radius = %.16lf\n", rSatellites_h[0]);
for (int kk = 0; kk < numParticles; kk++) {
if (status_h[kk] == 0) {
printf("Index: %d\n", kk);
printf("New Position\n");
printf("%.16lf %.16lf %.16lf\n", r_h[kk], r_h[numParticles+kk], r_h[2*numParticles+kk]);
printf("New Velocity\n");
printf("%.16lf %.16lf %.16lf\n", v_h[kk], v_h[numParticles+kk], v_h[2*numParticles+kk]);
h += 1;
}
}
printf("%d\n", h);
printf("New Mass Planet\n");
printf("%.16lf\n", m_h[0]);
printf("New Velocity Planet\n");
printf("%.16lf %.16lf %.16lf\n", v_h[0], v_h[numParticles], v_h[2*numParticles]);
printf("New Mass Embryo\n");
printf("%.16lf\n", m_h[1]);
printf("New Velocity Embryo\n");
printf("%.16lf %.16lf %.16lf\n", v_h[1], v_h[numParticles+1], v_h[2*numParticles+1]);
printf("After %d time step(s):\n", numSteps);
printf("r\n");
for (i = 0; i < 3; i ++)
printf("%.16lf %.16lf %.16lf\n", r_h[i], r_h[numParticles+i], r_h[2*numParticles+i]);
printf("...\n");
for (i = numParticles - 3; i < numParticles; i++)
printf("%.16lf %.16lf %.16lf\n", r_h[i], r_h[numParticles+i], r_h[2*numParticles+i]);
printf("\n");
printf("v\n");
for (i = 0; i < 3; i ++)
printf("%.16lf %.16lf %.16lf\n", v_h[i], v_h[numParticles+i], v_h[2*numParticles+i]);
printf("\n");
printf("...\n");
for (i = numParticles - 3; i < numParticles; i ++)
printf("%.16lf %.16lf %.16lf\n", v_h[i], v_h[numParticles+i], v_h[2*numParticles+i]);
// Free allocated memory on host and device
cudaFree(r_d);
cudaFree(v_d);
cudaFree(m_d);
cudaFree(varr_d);
cudaFree(status_d);
cudaFree(ecc_d);
cudaFree(rSatellites_d);
}
}
|
3,685 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define TILE_WIDTH 16
//M and N number of threads (grid and block)
void secuential(const int a[] ,const int b[], unsigned long int c[], const int sqrt_dim);
__global__ void multiply( const int A[] ,const int B[], unsigned long int C[] , const int width,const int thread_number)
{
int sum = 0;
int col = blockIdx.x*TILE_WIDTH + threadIdx.x;
int row = blockIdx.y*TILE_WIDTH + threadIdx.y;
if(col < width && row < width) {
for (int k = 0; k < width; k++)
sum += A[row * width + k] * B[k * width + col];
C[row * width + col] = sum;
}
}
int main(int argc, char *argv[]){
//Measure time
clock_t time_begin;
// pointers to host & device arrays
int *d_array1 = 0,*d_array2 = 0; unsigned long int *d_array3 = 0;
int *h_array1 = 0,*h_array2 = 0;unsigned long int*h_array3 = 0;
unsigned long int *h_array_sec= 0;
unsigned int size_array=512*512; //here, size_array =L has to be a square
int N=512;
if(argc == 3){
size_array=atoi(argv[1]) * atoi(argv[1]) ;
N=atoi(argv[2]);
}
// malloc columns of host arrays
h_array1 = (int*)malloc( size_array * sizeof(int));
h_array_sec= (unsigned long int*)malloc( size_array * sizeof(unsigned long int));
h_array2 = (int*)malloc( size_array * sizeof(int));
h_array3 = (unsigned long int*)malloc( size_array * sizeof(unsigned long int));
//printf("Array A:\n");
for(unsigned long int i=0; i<size_array; i++){
h_array1[i]=1;//rand()%10;
// printf("%i\t", h_array1[i]);
//if((i+1)%(int)sqrt((float)size_array)==0)
// printf("\n");
}
//printf("\n");
//printf("Array B:\n");
for(int i=0; i<size_array; i++){
h_array2[i]=1;//rand()%10;
//printf("%i\t", h_array2[i]);
//if((i+1)%(int)sqrt((float)size_array)==0)
// printf("\n");
}
//printf("\n");
// cudaMalloc a device array
cudaMalloc(&d_array1,size_array * sizeof(int));
cudaMalloc(&d_array2,size_array * sizeof(int));
cudaMalloc(&d_array3,size_array * sizeof(unsigned long int));
// download and inspect the result on the host:
cudaMemcpy(d_array1, h_array1, sizeof(int)*size_array, cudaMemcpyHostToDevice);
cudaMemcpy(d_array2, h_array2, sizeof(int)*size_array, cudaMemcpyHostToDevice);
//dim3 bloque(N,N); //Bloque bidimensional de N*N hilos (max 512 threads in a block)
//dim3 grid(1,1); //Grid bidimensional de M*M bloques
dim3 bloque(TILE_WIDTH, TILE_WIDTH);
dim3 grid((int)ceil(double(N)/double(bloque.x)), ceil(double(N)/double(bloque.y)));
int thread_number= N*N;
/*if (N*N > 512){
bloque.x = 512;
bloque.y = 512;
grid.x = ceil(double(N)/double(bloque.x));
grid.y = ceil(double(N)/double(bloque.y));
}*/
printf("%i threads, %ix%i matrix\n", thread_number, (int)sqrt((float)size_array), (int)sqrt((float)size_array));
time_begin=clock();
multiply<<<grid, bloque>>>(d_array1, d_array2 , d_array3,sqrt((float)size_array), thread_number);
cudaThreadSynchronize();
// download and inspect the result on the host:
cudaMemcpy(h_array3, d_array3, sizeof(unsigned long int)*size_array, cudaMemcpyDeviceToHost);
//printf("GPU time: %f seconds\n", clock() - time_begin);
//windows time
printf("GPU time, %i threads: %f seconds\n", thread_number,(((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 ); //1.18s
printf("Array C=B + AB^t + A^t :\n");
for(int i=0; i<size_array; i++){
printf("%i\t", h_array3[i]);
if((i+1)%(int)(sqrt((float)size_array))==0)
printf("\n");
}
printf("\n");
time_begin=clock();
secuential(h_array1, h_array2, h_array_sec, sqrt((float)size_array));
//printf("CPU time: %f seconds\n", clock() - time_begin);
//windows time
printf("CPU time: %f seconds\n", (((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 ); //1.18s
// deallocate memory
bool b=true;
for(int i=0; i<size_array; i++){
if(h_array_sec[i] != h_array3[i]){
printf("GPU and CPU have different results (at least) at position %i\n", i);
b=false;
break;
}
}
if(b)
printf("GPU and CPU have the same results\n");
free(h_array3); free(h_array2); free(h_array1); free(h_array_sec);
cudaFree(d_array3);cudaFree(d_array2);cudaFree(d_array1);
}
void secuential(const int a[] ,const int b[], unsigned long int c[], const int sqrt_dim){
int dim = sqrt_dim* sqrt_dim;
int index_i, index_j;
//int *c= (int *)malloc ( dim * sizeof(int));
for(int i=0; i< dim; i++){
index_i = (int)i%sqrt_dim;
index_j = (i-index_i)/sqrt_dim;
c[i]= b[i]; //c= b
c[i]+= a[index_j+ index_i * sqrt_dim]; //c+= a^t
for(int j=0;j<sqrt_dim;j++){ //row of first matrix
c[i]+=a[j+index_j * sqrt_dim ]*b[j + index_i*sqrt_dim]; //c+= a*b^t
}
}
/*printf("Sequential result: Array C=B + AB^t + A^t :\n");
for(int i=0; i<dim; i++){
printf("%i\t", c[i]);
if((i+1)%(int)(sqrt((float)dim))==0)
printf("\n");
}
printf("\n");*/
//free(c);
}
|
3,686 | #include <stdio.h>
const int N = 20;
const int MAX_WORD_SIZE = 1024;
__global__
void hello(char *a, char *b, int *c, int size, int msize)
{
int i = 0;
for(i = 0; i < msize; i++){
if(a[N * threadIdx.x + i] != b[i]){
c[threadIdx.x] = 0;
break;
}
if(i == msize - 1){
c[threadIdx.x] = 1;
break;
}
}
}
int main()
{
char words[MAX_WORD_SIZE][N];
FILE *fp;
char *fname = "input_text.txt";
int c;
int i = 0;
int word_count = 0;
// File Load
fp = fopen( fname, "r" );
if( fp == NULL ){
printf( "%sファイルが開けません¥n", fname );
return -1;
}
while( (c = fgetc( fp )) != EOF ){
if(c != ',' && c != '.' && c != ' ' && c != '\n' && c != '\t' && c != '\0'){
//printf("%c", c);
words[word_count][i] = c;
i++;
}
else{
if(i != 0){
//printf("\n");
words[word_count][i] = '\0';
word_count++;
i = 0;
}
}
}
/*
printf("Input: \n");
for(i = 0; i < word_count; i++){
printf("%s\n", words[i]);
}
*/
char match_word[N] = {"in"};
int count[word_count];
char *ad, *bd;
int *cd;
const int csize = word_count*N*sizeof(char);
const int bsize = strlen(match_word)*sizeof(char);
const int isize = word_count*sizeof(int);
cudaMalloc( (void**)&ad, csize );
cudaMalloc( (void**)&bd, bsize );
cudaMalloc( (void**)&cd, isize );
cudaMemcpy( ad, words, csize, cudaMemcpyHostToDevice );
cudaMemcpy( bd, match_word, bsize, cudaMemcpyHostToDevice );
dim3 dimBlock( word_count, 1 );
dim3 dimGrid( 1, 1 );
hello<<<dimGrid, dimBlock>>>(ad, bd, cd, word_count, bsize);
cudaMemcpy( count, cd, isize, cudaMemcpyDeviceToHost );
cudaFree( ad );
cudaFree( bd );
cudaFree( cd );
int ans = 0;
for(i = 0; i < word_count; i++){
ans += count[i];
//printf("%s, %d\n", words[i], count[i]);
}
printf("%s: %d\n", match_word, ans);
return EXIT_SUCCESS;
}
|
3,687 | // Program for Parallel Vector Addition in CUDA
// For Hadoop-CUDA Lab
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <time.h>
#define N 1024 // size of array
__global__ void add(int *a,int *b, int *c) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < N){
c[tid] = a[tid]+b[tid];
}
}
int main(int argc, char *argv[]) {
int T = 10, B = 1; // threads per block and blocks per grid, taking default values
int a[N],b[N],c[N];
int *dev_a, *dev_b, *dev_c;
printf("Size of array = %d\n", N);
do {
printf("Enter number of threads per block: ");
scanf("%d",&T);
printf("\nEnter nuumber of blocks per grid: ");
scanf("%d",&B);
if (T * B != N) printf("Error T x B != N, try again");
} while (T * B != N);
cudaEvent_t start, stop; // using cuda events to measure time
float elapsed_time_ms; // which is applicable for asynchronous code also
cudaMalloc((void**)&dev_a,N * sizeof(int));
cudaMalloc((void**)&dev_b,N * sizeof(int));
cudaMalloc((void**)&dev_c,N * sizeof(int));
for(int i=0;i<N;i++) { // load arrays with some numbers
a[i] = i;
b[i] = i*1;
}
cudaMemcpy(dev_a, a , N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b , N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, c , N*sizeof(int),cudaMemcpyHostToDevice);
cudaEventCreate( &start ); // instrument code to measure start time
cudaEventCreate( &stop );
cudaEventRecord( start, 0 );
add<<<B,T>>>(dev_a,dev_b,dev_c);
cudaMemcpy(c,dev_c,N*sizeof(int),cudaMemcpyDeviceToHost);
cudaEventRecord( stop, 0 ); // instrument code to measue end time
cudaEventSynchronize( stop );
cudaEventElapsedTime( &elapsed_time_ms, start, stop );
for(int i=0;i<N;i++) {
printf("%d+%d=%d\n",a[i],b[i],c[i]);
}
printf("Time to calculate results: %f ms.\n", elapsed_time_ms); // print out execution time
// clean up
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
3,688 | #include<fstream>
#include<iostream>
#include<vector>
#include<ctime>
#include<cuda.h>
using namespace std;
int N,M;
#define THREADS_PER_BLOCK 512
vector<int> readVector(ifstream &fin)
{
//fin.open();
int n;
int c;
fin>>n;
vector<int> result;
for (int i=0;i<n;i++){
fin>>c;
result.push_back(c);
}
N=n;
return result;
}
__global__ void add(int*a, int*b, int*c,int n) {
int index=threadIdx.x+blockIdx.x*blockDim.x;
if (index<n)
c[index] = a[index] * b[index];
}
void doIt(int* sample,ofstream &fout){
clock_t begin=clock();
int *a,*b,*c; //host variables
int *d_a, *d_b, *d_c; //device variables
int size=N*sizeof(int);
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
//a = (int *)malloc(size);
a=sample;
//b = (int *)malloc(size);
b=sample;
c = (int *)malloc(size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
//launch kernel for N blocks
add<<<(N+M-1)/M,M>>>(d_a,d_b,d_c,M);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaError_t error=cudaGetLastError();
if(error!=cudaSuccess){
printf("Error: %s\n",cudaGetErrorString(error));
}
/*for (int i=0;i<N;i++)
{
std::cout<<a[i]<<"+"<<b[i]<<"="<<c[i]<<std::endl;
}
*/
//free(a);
//free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
clock_t end=clock();
double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
cout<<". Elapsed time: "<<elapsed_secs<<endl;
fout<<"{"<<N<<", "<<elapsed_secs<<"},";
}
int main(int argc, char ** argv)
{
cout<<"file name: "<<argv[1]<<endl;
cout<<"Sample count: "<<argv[2]<<endl;
cout<<"Threads per block"<<THREADS_PER_BLOCK<<endl;
string fileName=argv[1];
int sample_count=stoi(argv[2]);
M=1024;
//cout<<"Sample count: "<<sample_count<<endl;
vector<int> sample;
ifstream fin(fileName);
ofstream fout("result.txt");
for (int i=0;i<sample_count;i++){
cout<<"Sample №"<<i;
sample=readVector(fin);
doIt(&sample[0],fout);
}
fout.close();
return 0;
} |
3,689 | #include <assert.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <cstring>
#ifndef gpuAssert
#include <stdio.h>
#define gpuAssert( condition ) { \
if( (condition) != 0 ) { \
fprintf( stderr, "\n FAILURE %s in %s, line %d\n", \
cudaGetErrorString(condition), __FILE__, __LINE__ ) ; \
exit( 1 ); } }
#endif
enum boid_type {PREY_BOID, PREDATOR_BOID, FOOD_BOID};
__device__ float* data;
__global__ void test_kernel()
{
const int tid = threadIdx.x;
data[tid]=tid;
}
int main1(void)
{
boid_type type = PREY_BOID;
printf("enum: %d\n", type);
const int NUMBER = 1024;
const size_t size = NUMBER*sizeof(float);
float *data_h = (float*)malloc(size);
float *data_d;
float *data_symbol;
cudaMalloc(&data_d, size);
//gpuAssert( cudaGetSymbolAddress((void**)&data_symbol, "data"));
gpuAssert( cudaMemcpyToSymbol( data, &data_d, sizeof(data_d), 0, cudaMemcpyHostToDevice ) );
test_kernel<<<1, 1024>>>();
gpuAssert( cudaMemcpy( data_h, data_d, size, cudaMemcpyDeviceToHost ) );
for (int i = 0; i < NUMBER; i++)
printf("%f ", data_h[i]);
return (int)cudaThreadExit();
} |
3,690 | #include <stdio.h>
#include <sys/time.h>
#include <cuda.h>
#include <fstream>
#include <iostream>
#define N_ROWS 5
#define N_COLUMNS 6
#define INF 99999
#define K 60000000
#define SERIAL_DEPTH 10
#define GPU_DEPTH 2
#define at(table, i, j) ((table[1] & (1LL << ((i) * N_COLUMNS + j))) ? ( ((table[0] & (1LL << ((i) * N_COLUMNS + j)))!=0LL) + 1 ) : 0)
// error checking for CUDA calls: use this around ALL your calls!
#define GPU_CHECKERROR( err ) (gpuCheckError( err, __FILE__, __LINE__ ))
static void gpuCheckError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
__host__ __device__ void set_at(long long int table[2], int i, int j, int x) {
if(x==2)
table[0] |= (1LL << (i * N_COLUMNS + j));
table[1] |= (1LL << (i * N_COLUMNS + j));
}
int table_size = N_ROWS * N_COLUMNS;
long long int table[2] = {0LL, 0LL};
// Either 1 or 2
int current_player = 1;
int max_player = 1;
// Each player's score
int score1 = 0;
int score2 = 0;
// current column picked by current player
int current_move;
// current row picked by current player
int current_row;
/*
Prints table and score
*/
void print_table(long long int t[2]) {
printf("~* CONNECT 4 *~ \n \n");
// Print table
for (int i = 0; i < N_ROWS; i++) {
printf("|");
for (int j = 0; j < N_COLUMNS; j++) {
if (at(t, i, j) == 0)
printf(" . ");
if (at(t, i, j) == 1)
printf(" X ");
if (at(t, i, j) == 2)
printf(" 0 ");
printf("|");
}
printf("\n");
}
// Print numbers
printf("\n+ ");
for (int j=0; j < N_COLUMNS; j++)
printf("%d ",j);
printf("+ \n \n");
// Score
printf("SCORE: \n Player 1 (X, Human) = %d \n Player 2 (0, Computer) = %d \n \n", score1, score2);
}
/*
Checks if player won by making a sequence of 4 markers either
horizontally, vertically or diagonally.
*/
__device__ __host__ int current_player_won(long long int table[2], int current_row, int current_move, int current_player){
// Check for vertical sequence
// Look at last marker placed and compare with the 3 markers below it
if ((current_row < N_ROWS-3)
&& (at(table,current_row,current_move) == at(table,current_row + 1,current_move))
&& (at(table,current_row + 1,current_move) == at(table,current_row + 2,current_move))
&& (at(table,current_row+ 2,current_move) == at(table,current_row + 3,current_move)))
return true;
// Check for horizontal sequence
int sequence_length = 1;
int j = 1;
while ((current_move - j >= 0) && (at(table,current_row,current_move - j) == current_player)){
j++; sequence_length++;
}
j = 1;
while ((current_move + j < N_COLUMNS) && (at(table,current_row,current_move + j) == current_player)){
j++; sequence_length++;
}
if (sequence_length >= 4)
return true;
//Check for diagonal sequence
sequence_length = 1;
j = 1;
while((current_move - j >= 0) && (current_row - j >= 0) && (at(table,current_row - j,current_move - j) == current_player)){
j++; sequence_length++;
}
j = 1;
while ((current_move + j < N_COLUMNS) && (current_row + j <= 5) && (at(table,current_row + j,current_move + j) == current_player)){
j++; sequence_length++;
}
if (sequence_length >= 4)
return true;
//Check for inverted diagonal sequence
sequence_length = 1;
j = 1;
while((current_move - j >= 0) && (current_row + j < N_ROWS) && (at(table,current_row + j,current_move - j) == current_player)){
j++; sequence_length++;
}
j = 1;
while ((current_move + j < N_COLUMNS) && (current_row - j >= 0) && (at(table,current_row - j,current_move + j) == current_player)){
j++; sequence_length++;
}
if (sequence_length >= 4)
return true;
return false;
}
__device__ __host__ int column_is_full (long long int table[2], int column_j) {
return (at(table, 0, column_j) != 0);
}
__device__ __host__ int table_is_full(long long int table[2]) {
for (int j = 0; j < N_COLUMNS; j++){
//If some column is not full, then table is not full
if (at(table,0,j) == 0)
return false;
}
return true;
}
/*
Structures for maintaining the state of the recursion.
*/
typedef struct state
{
long long int table[2]; //board state
int current_move;
int parent_index;
int node_value;
int child_count; // -1 means children havent been generated yet, non negative numbers mean how many children are left to be checked for minmax values
int depth;
} state;
__device__ __host__ state new_state(long long int t[2], int current_move, int parent_index, int node_value, int child_count, int depth){
state s;
s.table[0] = t[0];
s.table[1] = t[1];
s.current_move = current_move;
s.parent_index = parent_index;
s.node_value = node_value;
s.child_count = child_count;
s.depth = depth;
return s;
}
__device__ __host__ void print_state(state s){
// print_table(s.table);
printf("current move %d\n", s.current_move);
printf("parent index %d\n", s.parent_index);
printf("node value %d\n", s.node_value);
printf("child count %d\n", s.child_count);
printf("depth %d\n", s.depth);
}
typedef struct stack
{
int last_i;
state data[600];
} stack;
__device__ __host__ void stack_push(stack &s, state some_state){
s.last_i = s.last_i + 1;
s.data[s.last_i] = some_state;
}
__device__ __host__ int stack_is_empty(stack &s){
return (s.last_i == -1);
}
__device__ __host__ state stack_pop(stack &s){
state st = s.data[s.last_i];
s.last_i--;
return st;
}
__device__ __host__ state stack_peek(stack &s){
// printf("last i: %d\n",s.last_i);
return s.data[s.last_i];
}
__device__ __host__ stack new_stack(){
stack s;
s.last_i = -1;
return s;
}
int k = -1;
// For storing the k nodes in SERIAL DEPTH
long long int * kTables = new long long int [K * 2];
// For getting the k values back from the GPU
int * kValues = new int [K];
void add_table(long long int t[2]){
k++;
kTables[k*2] = t[0];
kTables[k*2+1] = t[1];
}
int retrieve_value(){
int value = kValues[k];
k--;
return value;
}
// Minmax that runs on the GPU
// Return value of the origin node
__device__ __host__ int d_minmax(long long int current_table[2], int origin_is_max){
int value = INF;
if (origin_is_max) value = -INF;
state origin;
origin = new_state(current_table, -1, -1, value, -1, 0);
stack s = new_stack();
stack_push(s, origin);
// int best_move = -1;
int final_value = -1;
while(!stack_is_empty(s)){
state current_state = stack_peek(s);
int current_index = s.last_i;
int current_depth = current_state.depth;
int is_max = (current_depth % 2 == 0);
if (!origin_is_max) is_max = !(is_max);
int last_player = 1;
if (is_max) last_player = 2;
int current_move = current_state.current_move;
int parent_index = current_state.parent_index;
// printf("current state:\n");
// print_state(current_state);
int lose = 0, full_table = 0, all_children_accounted_for = 0;
// If not the origin node, then test for win and full table situations
if (current_move != -1){
// retrieve row index of current state's move
int row = 0;
while (at(current_state.table,row, current_move) == 0)
row++;
//check if player wins in this move
lose = current_player_won(current_state.table, row, current_move, last_player);
//check if table gets full
full_table = table_is_full(current_state.table);
}
all_children_accounted_for = current_state.child_count == 0;
int at_max_depth = (current_depth == GPU_DEPTH);
//if node is terminal (leaf) or all children have been computed
if (lose || full_table || all_children_accounted_for || at_max_depth){//
int value = 0;
if (lose)
value = -1;
if (!is_max)
value = -value;
if (all_children_accounted_for)
value = current_state.node_value;
// If origin node, then end search
if (current_move == -1){
final_value = value;
stack_pop(s);
continue;
}
state parent_state = s.data[parent_index];
int parent_value = parent_state.node_value;
int parents_parent_index = s.data[parent_index].parent_index;
//If current state is max, the parent state is min
if(is_max){
// if parent state has bigger value, give it the smaller value
if (parent_value > value){
s.data[parent_index].node_value = value;
// if(current_depth == 1)
// best_move = current_move;
if (parents_parent_index != -1){
int alpha = s.data[parents_parent_index].node_value;
int beta = value;
if (alpha >= beta){
s.last_i = parent_index;
s.data[parent_index].child_count = 0;
continue;
}
}
}
}//otherwise, parent state is max
else{
if (parent_value < value){
s.data[parent_index].node_value = value;
// if(current_depth == 1)
// best_move = current_move;
if (parents_parent_index != -1){
int beta = s.data[parents_parent_index].node_value;
int alpha = value;
if (alpha >= beta){
s.last_i = parent_index;
s.data[parent_index].child_count = 0;
continue;
}
}
}
}
s.data[parent_index].child_count--;
stack_pop(s);
continue;
}
// Generate children
int child_count = 0;
int child_value = -INF;
if (is_max) child_value = INF;
for(int j = 0; j < N_COLUMNS; j++ ){
if (column_is_full(current_state.table, j)) continue;
child_count++;
long long int child_table[2];
child_table[0] = current_state.table[0];
child_table[1] = current_state.table[1];
// Find row where we can play next
int row = N_ROWS-1;
while (at(child_table,row, j) != 0)
row--;
if (is_max)
set_at(child_table, row, j, 1);
else
set_at(child_table, row, j, 2);
state child;
child = new_state(child_table, j, current_index, child_value, -1, current_depth+1);
stack_push(s, child);
}
s.data[current_index].child_count = child_count;
}
// printf("best move %d\n", best_move);
return final_value;
}
// Kernel function
// Receives all tables on the SERIAL DEPTH level of the tree and returns all the computed values
__global__ void minmax_gpu (long long int * tables, int * values, int origin_is_max, int k)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
if (x > k) return;
long long int table[2];
table[0] = tables[x*2];
table[1] = tables[x*2+1];
int value = d_minmax(table, origin_is_max);
values[x] = value;
}
// CPU minmax. Called twice per turn, once to store all the nodes in the SERIAL DEPTH level of the tree
// and another one after the GPU part has returned with the computer values for those nodes
__host__ int h_minmax(long long int current_table[2], int origin_is_max, int first_pass){
int breadth=0;
int value = INF;
if (origin_is_max) value = -INF;
state origin;
origin = new_state(current_table, -1, -1, value, -1, 0);
stack s = new_stack();
stack_push(s, origin);
int best_move = -1;
if (first_pass){
k = -1;
}
while(!stack_is_empty(s)){
state current_state = stack_peek(s);
int current_index = s.last_i;
int current_depth = current_state.depth;
int is_max = (current_depth % 2 == 0);
if (!origin_is_max) is_max = !(is_max);
int last_player = 1;
if (is_max) last_player = 2;
int current_move = current_state.current_move;
int parent_index = current_state.parent_index;
// printf("current state:\n");
// print_state(current_state);
int lose = 0, full_table = 0, all_children_accounted_for = 0;
// If not the origin node, then test for win and full table situations
if (current_move != -1){
// retrieve row index of current state's move
int row = 0;
while (at(current_state.table,row, current_move) == 0)
row++;
//check if player wins in this move
lose = current_player_won(current_state.table, row, current_move, last_player);
//check if table gets full
full_table = table_is_full(current_state.table);
}
all_children_accounted_for = current_state.child_count == 0;
int second_pass_leaf = (current_depth == SERIAL_DEPTH && !first_pass);
//if node is terminal (leaf) or all children have been computed
if (lose || full_table || all_children_accounted_for || second_pass_leaf){//
// If origin node, then end search
if (current_move == -1){
stack_pop(s);
continue;
}
int value = 0;
if (lose){
value = -1;
if (!is_max)
value = -value;
}
else if (all_children_accounted_for)
value = current_state.node_value;
else if (second_pass_leaf){
value = retrieve_value();
// printf("value %d\n", value);
}
state parent_state = s.data[parent_index];
int parent_value = parent_state.node_value;
//If current state is max, the parent state is min
if(is_max){
// if parent state has bigger value, give it the smaller value
if (parent_value > value){
s.data[parent_index].node_value = value;
if(current_depth == 1)
best_move = current_move;
}
}//otherwise, parent state is max
else{
if (parent_value < value){
s.data[parent_index].node_value = value;
if(current_depth == 1)
best_move = current_move;
}
}
s.data[parent_index].child_count--;
stack_pop(s);
continue;
}
if (current_depth == SERIAL_DEPTH && first_pass){
add_table(current_state.table);
breadth++;
// printf("breadth %d\n", breadth);
stack_pop(s);
s.data[parent_index].child_count--;
continue;
}
// Generate children
int child_count = 0;
int child_value = -INF;
if (is_max) child_value = INF;
for(int j = 0; j < N_COLUMNS; j++ ){
if (column_is_full(current_state.table, j)) continue;
child_count++;
long long int child_table[2];
child_table[0] = current_state.table[0];
child_table[1] = current_state.table[1];
// Find row where we can play next
int row = N_ROWS-1;
while (at(child_table,row, j) != 0)
row--;
if (is_max)
set_at(child_table, row, j, 1);
else
set_at(child_table, row, j, 2);
state child;
child = new_state(child_table, j, current_index, child_value, -1, current_depth+1);
stack_push(s, child);
}
s.data[current_index].child_count = child_count;
}
// printf("best move %d\n", best_move);
// printf("breadth0 %d\n", k);
return best_move;
}
long long int *d_tables;
int *d_values;
// Main min max, encapsulates all the CPU and GPU dymanics
int minmax(long long int current_table[2], int origin_is_max){
// Computer tables at SERIAL DEPTH level
printf("First CPU pass \n");
h_minmax(current_table, origin_is_max, 1);
printf("Number of nodes found at SERIAL DEPTH level %d\n", k+1);
int is_max = (SERIAL_DEPTH % 2 == 0);
if (!origin_is_max) is_max = !(is_max);
if (k > -1){
// printf("Bytes sent to GPUK*2 * sizeof (long long int) %lu\n", (K*2 * sizeof (long long int)));
printf("K*2 * sizeof (long long int) %lu\n", (K*2 * sizeof (long long int)));
printf("k*2 * sizeof (long long int) %lu\n", (k*2 * sizeof (long long int)));
GPU_CHECKERROR(
cudaMemcpy ((void *) d_tables,
(void *) kTables,
(k+1)*2 * (sizeof(long long int)),
cudaMemcpyHostToDevice)
);
unsigned int threads_per_block = 512;
unsigned int num_blocks = ceil ((k+1) / (1.0*threads_per_block) );
// launch the kernel:
printf("Launching Kernel \n");
minmax_gpu<<<num_blocks, threads_per_block>>>
(d_tables,
d_values,is_max,k);
// get back the move:
GPU_CHECKERROR(
cudaMemcpy ((void *) kValues,
(void *) d_values,
K*sizeof(int),
cudaMemcpyDeviceToHost)
);
// make sure the GPU is finished doing everything!
GPU_CHECKERROR(
cudaDeviceSynchronize()
);
printf( "Errors?: %s \n", cudaGetErrorString(cudaPeekAtLastError()));
}
printf("Second CPU pass \n");
int move = h_minmax(current_table, origin_is_max, 0);
printf("Best move computed: %d\n",move);
// printf("k %d\n", k);
return move;
}
void clear_table(){
table[0]= 0LL;
table[1]= 0LL;
}
/*
Ask player which column to pick and change table accordingly
*/
void pick_column() {
if (current_player == 1){
current_move = -1;
printf("Pick a column, then press enter, player %d:\n", current_player);
scanf ("%d", ¤t_move);
// while move is invalid, keep asking
while(current_move < 0 || current_move > N_COLUMNS-1 || column_is_full(table, current_move)){
printf("invalid move, pick another column:\n");
scanf ("%d",¤t_move);
}
}else{
current_move = minmax(table,max_player==2);
}
// Find row where his move will be performed
// It will be the first row with value equals 0
int row = N_ROWS-1;
while (at(table,row,current_move) != 0)
row--;
// Change table accordingly
set_at(table, row, current_move, current_player);
// Store row where player just placed his marker
// Used to check if current player won the game
current_row = row;
}
/*
Switch current player
*/
void switch_player(){
if (current_player == 1)
current_player = 2;
else
current_player = 1;
}
/*
Increase current player's score
*/
void update_score(){
if (current_player == 1)
score1++;
else
score2++;
}
int main (int argc, char *argv[])
{
GPU_CHECKERROR(
cudaMalloc ((void **) &d_tables, K * 2 * sizeof (long long int))
);
GPU_CHECKERROR(
cudaMalloc ((void **) &d_values, K * sizeof (int))
);
// game loop
while (true) {
clear_table();
print_table(table);
// The player who starts is the max player
max_player = current_player;
// match loop
while (true) {
pick_column();
print_table(table);
if (current_player_won(table, current_row, current_move, current_player)){
update_score();
print_table(table);
printf("Congratulations, Player %d! \n", current_player);
// leave match
break;
}
// If nobody wins and table is full, we come to a draw
else if (table_is_full(table)){
printf("Draw! \n");
// leave match
break;
}
switch_player();
}
printf("Do you wish to play again?(y/n) \n");
char play_again;
scanf (" %c",&play_again);
if (!(play_again == 'y' || play_again == 'Y'))
break;
}
cudaFree (d_tables);
cudaFree (d_values);
delete[] kValues;
delete[] kTables;
return 0;
} |
3,691 | #include <cufft.h>
#include <stdio.h>
#include <malloc.h>
#define NX 64
#define BATCH 1
#define pi 3.141592
__global__ void gInitData(cufftComplex *data){
int i=threadIdx.x+blockDim.x*blockIdx.x;
float x=i*2.0f*pi/(NX);
data[i].x=cosf(x)-3.0f*sinf(x);
data[i].y=0.0f;
}
int main(){
//инициализация (эмуляция получения эксперементальных) данных:
cufftHandle plan;
cufftComplex *data;
cufftComplex *data_h=(cufftComplex*)calloc(NX,sizeof(cufftComplex));;
cudaMalloc((void**)&data, sizeof(cufftComplex)*NX*BATCH);
if (cudaGetLastError() != cudaSuccess) {
fprintf(stderr, "Cuda error: Failed to allocate\n");
return -1;
}
gInitData<<<1, NX>>>(data);
cudaDeviceSynchronize();
//конфигурация и выполнение cuFFT:
if (cufftPlan1d(&plan, NX, CUFFT_C2C, BATCH) != CUFFT_SUCCESS){
fprintf(stderr, "CUFFT error: Plan creation failed");
return -1;
}
if (cufftExecC2C(plan, data, data, CUFFT_FORWARD) != CUFFT_SUCCESS){
fprintf(stderr, "CUFFT error: ExecC2C Forward failed");
return -1;
}
if (cudaDeviceSynchronize() != cudaSuccess){
fprintf(stderr, "Cuda error: Failed to synchronize\n");
return -1;
}
cudaMemcpy(data_h, data, NX*sizeof(cufftComplex),
cudaMemcpyDeviceToHost);
for(int i=0;i<NX;i++)
printf("%g\t%g\n", data_h[i].x, data_h[i].y);
cufftDestroy(plan);
cudaFree(data);
free(data_h);
return 0;
}
|
3,692 | #include "includes.h"
__global__ void compute_distance_texture(cudaTextureObject_t ref, int ref_width, float * query, int query_width, int query_pitch, int height, float* dist) {
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if ( xIndex<query_width && yIndex<ref_width) {
float ssd = 0.f;
for (int i=0; i<height; i++) {
float tmp = tex2D<float>(ref, (float)yIndex, (float)i) - query[i * query_pitch + xIndex];
ssd += tmp * tmp;
}
dist[yIndex * query_pitch + xIndex] = ssd;
}
} |
3,693 | #include "includes.h"
#define THREADS_PER_BLOCK 1024
#define TIME 3600000
__global__ void compute(float *a_d, float *b_d, float *c_d, int arraySize)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
float temp;
if( ix > 0 && ix < arraySize-1){
temp = (a_d[ix+1]+a_d[ix-1])/2.0;
__syncthreads();
b_d[ix]=temp;
__syncthreads();
}
} |
3,694 | //
// Created by lidan on 26/10/2020.
//
|
3,695 | #include <iostream>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/transform.h>
#include <thrust/fill.h>
struct saxpy_functor
{
const float a;
saxpy_functor(float _a) : a(_a) {}
__host__ __device__
float operator()(const float& x, const float& y) const {
return a * x + y;
}
};
void saxpy_fast(float A, thrust::device_vector<float>& X, thrust::device_vector<float>& Y)
{
// Y <- A * X + Y
thrust::transform(X.begin(), X.end(), Y.begin(), Y.begin(), saxpy_functor(A));
}
void saxpy_slow(float A, thrust::device_vector<float>& X, thrust::device_vector<float>& Y)
{
thrust::device_vector<float> temp(X.size());
// temp <- A
thrust::fill(temp.begin(), temp.end(), A);
// temp <- A * X
thrust::transform(X.begin(), X.end(), temp.begin(), temp.begin(), thrust::multiplies<float>());
// Y <- A * X + Y
thrust::transform(temp.begin(), temp.end(), Y.begin(), Y.begin(), thrust::plus<float>());
}
int main()
{
// initial values
std::vector<float> h_x = { 1.0, 2.0, 3.0 };
std::vector<float> h_y = { 4.0, 5.0, 6.0 };
// init device
float A = 2.0;
thrust::device_vector<float> X;
thrust::device_vector<float> Y;
X = h_x;
Y = h_y;
saxpy_slow(A, X, Y);
// move data from device to host
thrust::host_vector<float> result;
result = Y;
// print result ( A*X + Y )
std::cout << "A = " << A << std::endl;
std::cout << "X = ";
for (auto x : h_x) {
std::cout << x << ", ";
}
std::cout << std::endl;
std::cout << "Y = ";
for (auto y : h_y) {
std::cout << y << ", ";
}
std::cout << std::endl;
std::cout << "A * X + Y = ";
for (int i = 0; i < result.size(); i++) {
std::cout << result[i] << ", ";
}
std::cout << std::endl;
return 0;
}
|
3,696 | #include "cuda.h"
#include "stdio.h"
int main(int argc, char *argv[]) {
int version, log2N_min, log2N_max;
float dur_max;
if (argc == 5) {
version = atoi(argv[1]);
log2N_min = atoi(argv[2]);
log2N_max = atoi(argv[3]);
dur_max = atof(argv[4]) * 1000.f;
} else {
printf("Usage: ./p1 <version> <log2N_min> <log2N_max> <time>\n");
printf("Version 0: Copy non-pinned memory from host to device\n");
printf("Version 1: Copy pinned memory from host to device\n");
printf(
"Version 2: Copy non-pinned memory from host to device and back "
"again\n");
printf(
"Version 3: Copy pinned memory from host to device and back again\n");
printf("Time in ms\n");
return 0;
}
int N_min = 1 << log2N_min;
int N_max = 1 << log2N_max;
float dur, dur_total;
int num_runs;
// Allocate host resources
int *h;
if (version == 0 || version == 2)
h = (int *)malloc(N_max);
else if (version == 1 || version == 3)
cudaMallocHost(&h, N_max);
// Allocate device resources
int *d1, *d2;
cudaMalloc(&d1, N_max);
if (version == 2 || version == 3) cudaMalloc(&d2, N_max);
printf("%8s %8s %12s %8s %12s\n", "Version", "log2N", "Bytes", "Runs",
"Time");
int log2N = log2N_min;
int N = N_min;
while (log2N <= log2N_max) {
dur_total = 0.f;
num_runs = 0;
while (dur_total < dur_max) {
num_runs++;
// Setup timing
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
if (version == 0 || version == 1) {
// Copy to device
cudaEventRecord(start, 0);
cudaMemcpy(d1, h, N, cudaMemcpyHostToDevice);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
} else if (version == 2 || version == 3) {
// Copy to device and back again
cudaEventRecord(start, 0);
cudaMemcpy(d1, h, N, cudaMemcpyHostToDevice);
cudaMemcpy(h, d2, N, cudaMemcpyDeviceToHost);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
}
// Calculate duration
cudaEventElapsedTime(&dur, start, end);
dur_total += dur;
}
dur = dur_total / num_runs;
printf("%8d %8d %12d %8d %12.6f\n", version, log2N, N, num_runs, dur);
log2N++;
N *= 2;
}
printf("\n");
// Free resources
if (version == 0 || version == 2)
free(h);
else if (version == 1 || version == 3)
cudaFree(h);
cudaFree(d1);
if (version == 2 || version == 3) cudaFree(d2);
return 0;
}
|
3,697 | extern "C"
__global__ void dispatchDots(
//Tree specs
// per Block In
int* dotIndexes,
int* stBl0, int* nPtBl0,
int* stBl1, int* nPtBl1,
int* blLevel,
// per GPU Block In
int* idBl,
int* offsBl,
// input values, per dot
int* rkBlPos,
int* rkBlNeg,
int* rkBlMid0,
int* rkBlMid1,
// input value, per Blocks Out
int* nPtBlPos,
int* nPtBlNeg,
int* nPtBlMid0,
int* nPtBlMid1,
int nBlocks,
int nDots,
int* whatToDo,
int* addrPt,
int* addrBloc,
int* newBlockLvl,
// bloc split
int* blKeep_dotIndexes,
int* blKeep_stBl0, int* blKeep_nPtBl0,
int* blKeep_stBl1, int* blKeep_nPtBl1,
int* blKeep_blLevel,
//bloc keep
int* blFinal_dotIndexes,
int* blFinal_stBl0, int* blFinal_nPtBl0,
int* blFinal_stBl1, int* blFinal_nPtBl1,
int* blFinal_blLevel,
// bloc discard
int* blDiscard_dotIndexes,
int* blDiscard_stBl0, int* blDiscard_nPtBl0,
int* blDiscard_stBl1, int* blDiscard_nPtBl1,
int* blDiscard_blLevel
)
{
extern __shared__ int array[];
int* whatTD = (int*)&array[7];
int* addrPtSh = (int*)&whatTD[4];
int* addrBlSh = (int*)&addrPtSh[4];
// Fetch block data
int iGPUBlock=blockIdx.x;
int iThread=threadIdx.x;
int idBloc;
if (iThread==0) {
idBloc=idBl[iGPUBlock];
array[0]=offsBl[iGPUBlock];
array[1]=stBl0[idBloc];
array[2]=nPtBl0[idBloc];
array[3]=stBl1[idBloc];
array[4]=nPtBl1[idBloc];
array[5]=blLevel[idBloc];
array[6]=idBloc;
for (int i=0;i<4;i++) {
whatTD[i]=whatToDo[4*idBloc+i];
addrPtSh[i]=addrPt[4*idBloc+i];
addrBlSh[i]=addrBloc[4*idBloc+i];
/*if (array[0]==0) {
printf("BlocIni= %i; NBloc= %i; AddrPt= %i; AddrBl= %i; WTd= %i \n",idBloc, i, addrPtSh[i], addrBlSh[i], whatTD[i] );
}*/
}
}
__syncthreads();
int offsPt = array[0];
int startIndexBl0 = array[1];
int nPtBlock0 = array[2];
int startIndexBl1 = array[3]; // useless in fact
int nPtBlock1 = array[4];
int blockLevel = array[5];
int nPts = nPtBlock0 + nPtBlock1;
int ptToBeComputed = iThread+offsPt;
idBloc = array[6];
if (ptToBeComputed<nPts) {
// Oki, copy dots at the proper location
int addr_pt = startIndexBl0+ptToBeComputed;
int id_pt=dotIndexes[addr_pt];
//if (id_pt<0) {printf("Ca joue pas \n");}
int inBloc1 = (ptToBeComputed>=nPtBlock0);
int rK, wTD;
//int* tabDest;
//int isSet=0;
//int shouldBeSomeWhere = 0;
// Let's handle bloc Neg = 0
wTD=whatTD[0];
if (wTD!=3) {
//shouldBeSomeWhere=1;
rK=rkBlNeg[addr_pt];//+inBloc1*nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut \n", (addrPtSh[0]+rK+inBloc1*nPtBlNeg[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[0]+rK]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[0]+rK]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[0]+rK]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
}
rK=rkBlNeg[addr_pt+nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut \n", (addrPtSh[0]+rK+inBloc1*nPtBlNeg[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[0]+rK+nPtBlNeg[idBloc]]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[0]+rK+nPtBlNeg[idBloc]]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[0]+rK+nPtBlNeg[idBloc]]=id_pt;//+inBloc1*nPtBlNeg[idBloc]]=id_pt;
}
}
wTD=whatTD[1];
if (wTD!=3) {
//shouldBeSomeWhere=1;
rK=rkBlPos[addr_pt];//+inBloc1*nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[1]+rK+inBloc1*nPtBlPos[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[1]+rK]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[1]+rK]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[1]+rK]=id_pt;
}
rK=rkBlPos[addr_pt+nDots];//+inBloc1*nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[1]+rK+inBloc1*nPtBlPos[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[1]+rK+nPtBlPos[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[1]+rK+nPtBlPos[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[1]+rK+nPtBlPos[idBloc]]=id_pt;
}
}
wTD=whatTD[2];
if (wTD!=3) {
//shouldBeSomeWhere=1;
rK=rkBlMid0[addr_pt];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n",(addrPtSh[2]+rK), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[2]+rK]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[2]+rK]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[2]+rK]=id_pt;
}
rK=rkBlMid0[addr_pt+nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[2]+rK+nPtBlMid0[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[2]+rK+nPtBlMid0[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[2]+rK+nPtBlMid0[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[2]+rK+nPtBlMid0[idBloc]]=id_pt;
}
}
wTD=whatTD[3];
if (wTD!=3) {
//shouldBeSomeWhere=1;
rK=rkBlMid1[addr_pt];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[3]+rK), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[3]+rK]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[3]+rK]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[3]+rK]=id_pt;
}
rK=rkBlMid1[addr_pt+nDots];
if (rK!=-1) {
//printf("on a mis %i qui vaut %i\n", (addrPtSh[3]+rK+nPtBlMid1[idBloc]), id_pt);
//isSet=1;
if (wTD==0) blKeep_dotIndexes[addrPtSh[3]+rK+nPtBlMid1[idBloc]]=id_pt;
if (wTD==1) blFinal_dotIndexes[addrPtSh[3]+rK+nPtBlMid1[idBloc]]=id_pt;
if (wTD==2) blDiscard_dotIndexes[addrPtSh[3]+rK+nPtBlMid1[idBloc]]=id_pt;
}
}
//if ((isSet==0)&&(shouldBeSomeWhere==1)) {printf("De bleu! Le point %i n'a été mis nulle part! [%i, %i, %i, %i]\n", id_pt, whatTD[0], whatTD[1], whatTD[2], whatTD[3]);}
}
if ((iThread==0)&&(offsPt==0)) {
// needs to fill bloc properties
for (int i=0;i<4;i++) {
int wTD = wTD=whatTD[i];
int idNewBloc = addrBlSh[i];
int nPtInBloc0, nPtInBloc1;
if (i==0) {
nPtInBloc0 = nPtBlNeg[idBloc];
nPtInBloc1 = nPtBlNeg[idBloc+nBlocks];
}
if (i==1) {
nPtInBloc0 = nPtBlPos[idBloc];
nPtInBloc1 = nPtBlPos[idBloc+nBlocks];
}
if (i==2) {
nPtInBloc0 = nPtBlMid0[idBloc];
nPtInBloc1 = nPtBlMid0[idBloc+nBlocks];
}
if (i==3) {
nPtInBloc0 = nPtBlMid1[idBloc];
nPtInBloc1 = nPtBlMid1[idBloc+nBlocks];
}
//printf("\n idNewBloc = %i, on en fait %i \n nPtInBloc0 = %i, nPtInBloc1 = %i , addrPtSh = %i \n",idNewBloc, wTD,nPtInBloc0,nPtInBloc1, addrPtSh[i]);
if (wTD==0) {
//SPLIT
//printf("SPLIT!!\n");
blKeep_stBl0[idNewBloc]=addrPtSh[i];
blKeep_nPtBl0[idNewBloc]=nPtInBloc0;
blKeep_stBl1[idNewBloc]=addrPtSh[i]+nPtInBloc0;
blKeep_nPtBl1[idNewBloc]=nPtInBloc1;
blKeep_blLevel[idNewBloc]=newBlockLvl[4*idBloc+i];
}
if (wTD==1) {
//KEEP
blFinal_stBl0[idNewBloc]=addrPtSh[i];
blFinal_nPtBl0[idNewBloc]=nPtInBloc0;
blFinal_stBl1[idNewBloc]=addrPtSh[i]+nPtInBloc0;
blFinal_nPtBl1[idNewBloc]=nPtInBloc1;
blFinal_blLevel[idNewBloc]=newBlockLvl[4*idBloc+i];
}
if (wTD==2) {
//DISCARD
blDiscard_stBl0[idNewBloc]=addrPtSh[i];
blDiscard_nPtBl0[idNewBloc]=nPtInBloc0;
blDiscard_stBl1[idNewBloc]=addrPtSh[i]+nPtInBloc0;
blDiscard_nPtBl1[idNewBloc]=nPtInBloc1;
blDiscard_blLevel[idNewBloc]=newBlockLvl[4*idBloc+i];
}
}
}
} |
3,698 | #include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
const long long tdelay = 1000000LL;
const int hdelay = 1000;
__global__ void dkern(){
long long start = clock64();
while(clock64() < start + tdelay);
}
int main(int argc, char *argv[]){
int i = 0;
int my_delay = hdelay;
if (argc > 1) my_delay = atoi(argv[1]);
while (i == 0){
dkern<<<1, 1>>>();
usleep(my_delay);
}
return 0;
}
|
3,699 |
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <sys/types.h>
#include <sys/times.h>
#include <sys/time.h>
#include <time.h>
/* Program Parameters */
#define MAXN 15000 /* Max value of N */
#define TILE_WIDTH 32 /* Width of each block */
int N; /* Matrix size */
/* Matrices */
float overall;
char buffer[10000];
char *pbuff;
int *classIdArray = (int *)malloc(sizeof(int)*26);
int *groupIdArray = (int *)malloc(sizeof(int)*26);
int *roomIdArray =(int *) malloc(sizeof(int)*26);
int *roomSizeAsArray = (int *)malloc(sizeof(int)*5);
int *groupSizeAsArray = (int *)malloc(sizeof(int)*11);
int *timeSlotIdArray = (int *)malloc(sizeof(int)*26);
int *profIdArray = (int *)malloc(sizeof(int)*26);
int *clashes=(int *)calloc(26*26,sizeof(int));
int *dclassIdArray,*dgroupIdArray,*droomIdArray,*droomSizeAsArray,*dgroupSizeAsArray,*dtimeSlotIdArray,*dprofIdArray,*dclashes;
/* junk */
#define randm() 4|2[uid]&3
/* returns a seed for srand based on the time */
unsigned int time_seed() {
struct timeval t;
struct timezone tzdummy;
gettimeofday(&t, &tzdummy);
return (unsigned int)(t.tv_usec);
}
//Kernel to calculate fitness
__global__ void calculateFitness(int *classIds, int *roomIds, int *roomCapacities, int *groupIds, int *groupSizes, int *timeSlotIds, int *profIds, int *clashes)
{
int i = threadIdx.x;
int j = blockIdx.x ;
if(classIds[i]==classIds[j] && roomCapacities[classIds[i]]<groupSizes[groupIds[classIds[i]]])
clashes[i*j+i]++;
if (roomIds[classIds[j]] == roomIds[classIds[i]] && timeSlotIds[classIds[i]] == timeSlotIds[classIds[j]]
&& classIds[i] != classIds[j]) {
clashes[i*j+i]++;
}
if (profIds[classIds[j]] == profIds[classIds[j]] && timeSlotIds[classIds[i]] == timeSlotIds[classIds[j]]
&& classIds[i] != classIds[j]) {
clashes[i*j+i]++;
}
}
//Main function to execute fitness function
int main(int argc, char **argv) {
/* Timing variables */
struct timeval etstart, etstop; /* Elapsed times using gettimeofday() */
struct timezone tzdummy;
clock_t etstart2, etstop2; /* Elapsed times using times() */
unsigned long long usecstart, usecstop;
struct tms cputstart, cputstop; /* CPU times for my processes */
/* Process program parameters */
srand(time_seed());
int size = N * N * sizeof( int );
/* --------Reading frm intermediate data ----------*/
FILE *fp;
int i=0;
fp=fopen("file.txt","r");
while (1) {
if (!fgets(buffer, sizeof buffer, fp)||feof(fp)) break;
pbuff = buffer;
i=0;
/* copying from intermediate file*/
while (1) {
if (*pbuff == 13 || *pbuff == 10) break;
classIdArray[i] = strtol(pbuff, &pbuff, 10);
//printf(" %d", classIdArray[i]);
i++;
}
//printf("\n");
i=0;
//printf("groupIdArray\n");
fgets(buffer, sizeof buffer, fp);
pbuff = buffer;
while (1) {
if (*pbuff == 13 || *pbuff == 10) break;
groupIdArray[i] = strtol(pbuff, &pbuff, 10);
//printf(" %d", groupIdArray[i]);
i++;
}
fgets(buffer, sizeof buffer, fp);
pbuff = buffer;
i=0;
while (1) {
if (*pbuff == 13 || *pbuff == 10) break;
roomIdArray[i] = strtol(pbuff, &pbuff, 10);
//printf(" %d", roomIdArray[i]);
i++;
}
fgets(buffer, sizeof buffer, fp);
pbuff = buffer;
i=0;
while (1) {
if (*pbuff == 13 || *pbuff == 10) break;
roomSizeAsArray[i] = strtol(pbuff, &pbuff, 10);
//printf(" %d", roomSizeAsArray[i]);
i++;
}
fgets(buffer, sizeof buffer, fp);
pbuff = buffer;
i=0;
while (1) {
if (*pbuff == 13 || *pbuff == 10) break;
groupSizeAsArray[i] = strtol(pbuff, &pbuff, 10);
i++;
}
i=0;
fgets(buffer, sizeof buffer, fp);
pbuff = buffer;
while (1) {
if (*pbuff == 13 || *pbuff == 10) break;
timeSlotIdArray[i] = strtol(pbuff, &pbuff, 10);
i++;
}
fgets(buffer, sizeof buffer, fp);
pbuff = buffer;
i=0;
while (1) {
if (*pbuff == 13 || *pbuff == 10) break;
profIdArray[i] = strtol(pbuff, &pbuff, 10);
i++;
}
/* */
cudaMalloc( (void **) &dclashes, sizeof(int)*26*26 );
cudaMalloc( (void **) &dclassIdArray, sizeof(int)*26 );
cudaMalloc( (void **) &dgroupIdArray, sizeof(int)*26 );
cudaMalloc( (void **) &droomIdArray, sizeof(int)*26 );
cudaMalloc( (void **) &droomSizeAsArray, sizeof(int)*5 );
cudaMalloc( (void **) &dgroupSizeAsArray, sizeof(int)*11 );
cudaMalloc( (void **) &dtimeSlotIdArray, sizeof(int)*26 );
cudaMalloc( (void **) &dprofIdArray, sizeof(int)*26 );
dim3 dimGrid(26,1,1);
dim3 dimBlock(26, 1,1);
cudaMemcpy( dclassIdArray, classIdArray, sizeof(int)*26, cudaMemcpyHostToDevice );
cudaMemcpy( dgroupIdArray, groupIdArray, sizeof(int)*26, cudaMemcpyHostToDevice );
cudaMemcpy( droomIdArray, roomIdArray, sizeof(int)*26, cudaMemcpyHostToDevice );
cudaMemcpy( droomSizeAsArray, roomSizeAsArray, sizeof(int)*5, cudaMemcpyHostToDevice );
cudaMemcpy( dgroupSizeAsArray, groupSizeAsArray, sizeof(int)*11, cudaMemcpyHostToDevice );
cudaMemcpy( dtimeSlotIdArray, timeSlotIdArray, sizeof(int)*26, cudaMemcpyHostToDevice );
cudaMemcpy( dprofIdArray, profIdArray, sizeof(int)*26, cudaMemcpyHostToDevice );
/* Start Clock */
gettimeofday(&etstart, &tzdummy);
etstart2 = times(&cputstart);
calculateFitness<<<dimGrid,dimBlock>>>(dclassIdArray,droomIdArray,droomSizeAsArray,dgroupIdArray,dgroupSizeAsArray,dtimeSlotIdArray,dprofIdArray,dclashes);
/* Stop Clock */
gettimeofday(&etstop, &tzdummy);
etstop2 = times(&cputstop);
cudaMemcpy( clashes, dclashes, sizeof(int)*26*26, cudaMemcpyDeviceToHost );
usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec;
usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec;
int x=0;
int y=26*26;
int z=0;
for(x=0;x<y;x++)
{
z=z+clashes[x];
}
cudaFree(dclashes);
cudaFree(dclassIdArray);
cudaFree(dgroupIdArray);
cudaFree(droomIdArray);
cudaFree(droomSizeAsArray);
cudaFree(dgroupSizeAsArray);
cudaFree(dtimeSlotIdArray);
cudaFree(dprofIdArray);
overall=overall+(float)(usecstop - usecstart)/(float)1000;
}
free(classIdArray);
free(groupIdArray);
free(roomIdArray);
free(roomSizeAsArray);
free(clashes);
free(groupSizeAsArray);
free(timeSlotIdArray);
free(profIdArray);
printf("\nElapsed time = %g ms.\n",overall);
exit(0);
}
|
3,700 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/functional.h>
#include <thrust/transform.h>
#include <iostream>
int main() {
thrust::device_vector<double> AAPL;
thrust::device_vector<double> MSFT;
thrust::device_vector<double> MEAN_DIF(2518,0);
double stocks_AAPL, stocks_MSFT, mean;
for(int i =0; i < 2518; i++){
std::cin >> stocks_AAPL >> stocks_MSFT;
AAPL.push_back(stocks_AAPL);
MSFT.push_back(stocks_MSFT);
}
thrust::transform(AAPL.begin(), AAPL.end(), MSFT.begin(), MEAN_DIF.begin(), thrust::minus<double>());
double val = thrust::reduce(MEAN_DIF.begin(), MEAN_DIF.end());
mean = val/2517;
std::cout << "Média: " << fabs(mean) << "\n";
thrust::device_vector<double> means(2518,mean);
thrust::device_vector<double> dps(2518,0);
thrust::device_vector<double> vars(2518,0);
thrust::transform(means.begin(), means.end(), MEAN_DIF.begin(), dps.begin(), thrust::minus<double>());
thrust::transform(dps.begin(), dps.end(), vars.begin(),thrust::square<double>());
double v = thrust::reduce(vars.begin(), vars.end());
double var = v/ 2517;
std::cout << "variancia : " << fabs(var) << "\n";
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.