serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
14,101 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
int getnextnum(FILE* f, int* val){
char num[6];
int idx = 0;
char c;
int ret = 0;
while(1) {
num[idx] = '\0';
c = getc(f);
if(c == EOF) {
ret = 1;
break;
}
if(c == ',') {
c = getc(f);
break;
}
num[idx] = c;
idx++;
}
*val = atoi(num);
return ret;
}
int* getarr(int* arrlen) {
FILE* inp = fopen("./inp.txt", "r");
int val;
int count = 0;
int len = 0;
int* arr = (int*)malloc(1 * sizeof(int));
int* transfer;
int end = 0;
while(!end) {
if(count == len) {
len += 10;
transfer = (int*)malloc(len * sizeof(int));
memcpy(transfer, arr, count * sizeof(int));
free(arr);
arr = transfer;
}
end = getnextnum(inp, &val);
arr[count] = val;
count++;
}
fclose(inp);
transfer = (int*)malloc(count * sizeof(int));
memcpy(transfer, arr, count * sizeof(int));
free(arr);
arr = transfer;
*arrlen = count;
return arr;
}
|
14,102 | #include "use_GMRES.cuh"
#include "matrix.cuh"
#include "vector.cuh"
#include <cstdlib>
#include <ctime>
#include <cmath>
int useGMRES()
{
matrix A(5,5);
A.set(1, 1, 0.8780); A.set(1, 2, 0.8316); A.set(1, 3, 0.2663); A.set(1, 4, 0.9787); A.set(1, 5, 0.0239);
A.set(2, 1, 0.1159); A.set(2, 2, 0.2926); A.set(2, 3, 0.2626); A.set(2, 4, 0.7914); A.set(2, 5, 0.2085);
A.set(3, 1, 0.9857); A.set(3, 2, 0.5109); A.set(3, 3, 0.5826); A.set(3, 4, 0.2115); A.set(3, 5, 0.2943);
A.set(4, 1, 0.8573); A.set(4, 2, 0.7512); A.set(4, 3, 0.4431); A.set(4, 4, 0.9486); A.set(4, 5, 0.3660);
A.set(5, 1, 0.4416); A.set(5, 2, 0.3803); A.set(5, 3, 0.4465); A.set(5, 4, 0.0586); A.set(5, 5, 0.8501);
vector b(5);
b.set(1, 1); b.set(2, 1); b.set(3, 1); b.set(4, 1); b.set(5, 1);
vector x0(5);
std::cout << "matrix A: \n\n" << A;
std::cout << "vector b: \n\n" << b;
std::cout << "initial guess vector, x0: \n\n" << x0;
vector x(5);
int iter_count;
GMRES(A, b, x0, x, iter_count);
std::cout << "GMRES solution, x is: \n\n" << x;
std::cout << "Check: Ax = \n\n" << A * x;
std::cout << "Backslash solution b/A \n\n" << b / A;
return 1;
}
int useGMRES2()
{
matrix A(4,4);
A.set(1, 1, 0.8780); A.set(1, 2, 0.8316); A.set(1, 3, 0.2663); A.set(1, 4, 0.9787);
A.set(2, 1, 0.1159); A.set(2, 2, 0.2926); A.set(2, 3, 0.2626); A.set(2, 4, 0.7914);
A.set(3, 1, 0.9857); A.set(3, 2, 0.5109); A.set(3, 3, 0.5826); A.set(3, 4, 0.2115);
A.set(4, 1, 0.8573); A.set(4, 2, 0.7512); A.set(4, 3, 0.4431); A.set(4, 4, 0.9486);
vector b(4);
b.set(1, 1); b.set(2, 1); b.set(3, 1); b.set(4, 1);
vector x0(4);
std::cout << "matrix A: \n\n" << A;
std::cout << "vector b: \n\n" << b;
std::cout << "initial guess vector, x0: \n\n" << x0;
vector x(4);
int iter_count;
GMRES(A, b, x0, x, iter_count);
std::cout << "GMRES solution, x is: \n\n" << x;
std::cout << "Check: Ax = \n\n" << A*x;
std::cout << "Backslash solution b/A \n\n" << b/A;
return 1;
}
int useGMRES3()
{
matrix A(3, 3);
A.set(1, 1, 0); A.set(1, 2, 1); A.set(1, 3, 0);
A.set(2, 1, 1); A.set(2, 2, 0); A.set(2, 3, 0);
A.set(3, 1, 0); A.set(3, 2, 0); A.set(3, 3, 1);
vector b(3);
b.set(1, 2); b.set(2, 2); b.set(3, 2);
vector x0(3);
std::cout << "matrix A: \n\n" << A;
std::cout << "vector b: \n\n" << b;
std::cout << "initial guess vector, x0: \n\n" << x0;
vector x(3);
int iter_count;
GMRES(A, b, x0, x, iter_count);
std::cout << "GMRES solution, x is: \n\n" << x;
std::cout << "Check: Ax = \n\n" << A * x;
return 1;
}
int useGMRES_n256()
{
srand (time(NULL));
int n ;
// for this experiment, pow_ = 8 would take too long!
for (int pow_ = 5; pow_ <= 5; ++pow_){
n = pow(2, pow_);
matrix A(n, n);
for (int i = 1; i <= n; ++i){
for (int j = 1; j <= n; ++j){
A.set(i, j, (double)rand() / (double) RAND_MAX);
}
}
vector b(n);
for (int i = 1; i <= n; ++i){
b.set(i, 1);
}
vector x0(n);
// std::cout << "matrix A: \n\n" << A;
// std::cout << "vector b: \n\n" << b;
// std::cout << "initial guess vector, x0: \n\n" << x0;
vector x(n);
int iter_count;
GMRES(A, b, x0, x, iter_count);
// std::cout << "GMRES solution, x is: \n\n" << x;
std::cout << "Check: Ax = \n\n" << A * x;
}
return 1;
}
|
14,103 | #include <stdio.h>
#include <time.h>
#include <malloc.h>
#define CUDA_CHECK_RETURN(value) {\
cudaError_t _m_cudaStat = value;\
if (_m_cudaStat != cudaSuccess) {\
fprintf(stderr, "Error \"%s\" at line %d in file %s\n",\
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);\
exit(1);\
}\
} //макрос для обработки ошибок
const int N = 1 << 20;
__global__ void gInitVectors(double* vector1, double* vector2) {
for (int i = 0; i < N; i++) {
vector1[i] = (double)i; //rand();
vector2[i] = (double)i;
}
}
__global__ void gVectorAddition(double* vector1, double* vector2, double* vectorSum, int threads_cnt) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= N)
return;
vectorSum[i] = vector1[i] + vector2[i];
}
float testingThreadsOfDevice(int threads_cnt, int type_time) {
double *vectorSum_d, *vectorSum_h;
vectorSum_h = (double*) calloc(N, sizeof(double));
CUDA_CHECK_RETURN(cudaMalloc((void**)&vectorSum_d, N * sizeof(double)));
double *vector1_d, *vector2_d;
CUDA_CHECK_RETURN(cudaMalloc((void**)&vector1_d, N * sizeof(double)));
CUDA_CHECK_RETURN(cudaMalloc((void**)&vector2_d, N * sizeof(double)));
gInitVectors <<< 1, 32 >>> (vector1_d, vector2_d);
CUDA_CHECK_RETURN(cudaGetLastError());
float elapsedTime;
struct timespec mt1, mt2; //для type_time = 1
cudaEvent_t start, stop; //для type_time = 2
if (type_time == 1) {
clock_gettime(CLOCK_REALTIME, &mt1);
}
else {
cudaEventCreate(&start); // инициализация
cudaEventCreate(&stop); // событий
cudaEventRecord(start,0); // привязка (регистрация) события start
}
gVectorAddition <<< N / threads_cnt, threads_cnt >>>
(vector1_d, vector2_d, vectorSum_d, threads_cnt); //запуск фу-ии на GPU
cudaDeviceSynchronize(); //синхронизация потоков
if (type_time == 1) {
clock_gettime(CLOCK_REALTIME, &mt2);
elapsedTime = (float)(mt2.tv_sec - mt1.tv_sec) +
(float)(mt2.tv_nsec - mt1.tv_nsec) / 1e6; ///время в миллисекундах
}
else {
cudaEventRecord(stop,0); // привязка события stop
cudaEventSynchronize(stop); // синхронизация по событию
CUDA_CHECK_RETURN(cudaGetLastError());
cudaEventElapsedTime(&elapsedTime,start,stop); // вычисление затраченного времени
cudaEventDestroy(start); // освобождение
cudaEventDestroy(stop); // памяти
CUDA_CHECK_RETURN(cudaGetLastError());
}
printf("blocks = %d, threads per block = %d milliseconds = %e \n",
N / threads_cnt, threads_cnt, elapsedTime);
/// проверка: ///
/*cudaMemcpy(vectorSum_h, vectorSum_d, N * sizeof(double), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
fprintf(stderr, "%g ", vectorSum_h[i]);
printf("\n");
*/
cudaFree(vector1_d);
cudaFree(vector2_d);
cudaFree(vectorSum_d);
free(vectorSum_h);
return elapsedTime;
}
int main() {
for (int type_time = 1; type_time <= 2; type_time++) {
float min_time, max_time, avg_time, cnt_tests = 1;
//запустить тест с 32 потоками на блок:
min_time = max_time = avg_time = testingThreadsOfDevice(32, type_time);
for (int i = 64; i <= 1024; i *= 2) {
float new_time = testingThreadsOfDevice(i, type_time);
if (new_time > max_time)
max_time = new_time;
if (new_time < min_time)
min_time = new_time;
avg_time += new_time;
cnt_tests++;
}
avg_time = avg_time / cnt_tests;
if (type_time == 1)
printf("\n time in milliseconds by clock_gettime:\n");
else
printf("\n time in milliseconds by Events:\n");
printf("\t avg_time = %e min_time = %e max_time = %e\n\n", avg_time, min_time, max_time);
}
return 0;
}
|
14,104 | /*
* Copyright 2016 Alexander Terenin
*
* Licensed under the Apache License, Version 2.0 (the "License")
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* /
*/
/*
* Function : nuInvXiInv
* Purpose : draws Exp(1 + theta^2) random variables
* Argument n : size of sampler
* Argument *u : pointer to array of uniforms
* Argument *thetaSq: pointer to parameter
* Output : mutates u and stores result in its place
*/
extern "C"
__global__ void cuda_nuInvXiInv(int n, float *u, float *thetaSq) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < n) {
u[i] = (-1.0f/(1.0f + thetaSq[i])) * logf(u[i]);
}
} |
14,105 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
// Algorithm switch
#define UNIFORM_CROSSOVER 1
#define ISLAND_MIGRATION 1
// Constants
#define CHROMOSOME_LENGTH 30
#define GENERATIONS 100
#define GRID_DIMENSION 1
#define BLOCK_DIMENSION 256
#define POPULATION_SIZE BLOCK_DIMENSION*GRID_DIMENSION
#define MIGRATION_INTERVAL 20
// Probability and percents
#define SELECTION_FITTEST_PERCENT 20
#define MIGRATION_PERCENT 10
#define CROSSOVER_PROBABILITY 45
#define MUTATION_PROBABILITY 5
// Functions
#define CURAND(x) (int)floor(curand_uniform(&localState)*(x-1))
// Error checking in cuda functions
#define CUDA_ERROR_CHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line)
{
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
}
}
// Valid Genes
const char *GENES = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890, .-;:_!\"#%&/\\()=?@${[]}";
const int GENE_LENGTH = 86;
__constant__ const char *GENES_D = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890, .-;:_!\"#%&/\\()=?@${[]}";
__constant__ const int GENE_LENGTH_D = 86;
// Structure for individual genome
typedef struct Genotype{
char chromosome[CHROMOSOME_LENGTH];
int fitness;
} Individual;
// Methods for critical section
__device__ volatile int sem = 0;
__device__ void acquireSemaphore(volatile int *lock){
while (atomicCAS((int *)lock, 0, 1) != 0);
}
__device__ void releaseSemaphore(volatile int *lock){
*lock = 0;
__threadfence();
}
__global__ void geneticAlgorithmKernel(Individual *population, char *targetStr, int targetStringLength, curandState *state, int *convergeGen){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<POPULATION_SIZE){
// Copy population from global to shared memory of each block
__shared__ Individual sharedPop[BLOCK_DIMENSION];
int j = threadIdx.x;
if(j < BLOCK_DIMENSION){
sharedPop[j] = population[i];
}
__syncthreads();
// Getting state for random number generation
curandState localState = state[i];
int generation = 0;
while(1){
generation++;
// Fitness calculation
int tmpFitness = 0;
for (int k=0; k<CHROMOSOME_LENGTH; k++){
if(sharedPop[j].chromosome[k]!=targetStr[k]) tmpFitness++;
}
sharedPop[j].fitness = tmpFitness;
if(tmpFitness == 0 && generation < convergeGen[0]) convergeGen[0] = generation;
if(generation>=GENERATIONS) break;
// Sort based on fitness
for (int k=0; k<BLOCK_DIMENSION/2; k++){
if(j%2==0 && j<BLOCK_DIMENSION-1){
if(sharedPop[j+1].fitness < sharedPop[j].fitness){
Individual temp = sharedPop[j];
sharedPop[j] = sharedPop[j+1];
sharedPop[j+1] = temp;
}
}
__syncthreads();
if(j%2!=0 && j<BLOCK_DIMENSION-1){
if(sharedPop[j+1].fitness < sharedPop[j].fitness){
Individual temp = sharedPop[j];
sharedPop[j] = sharedPop[j+1];
sharedPop[j+1] = temp;
}
}
__syncthreads();
}
// Migration
#if ISLAND_MIGRATION
if(generation % MIGRATION_INTERVAL == 0){
__syncthreads();
if(j == 0) acquireSemaphore(&sem);
__syncthreads();
int migrationPop = (int) ceil((float) blockDim.x * (float)MIGRATION_PERCENT/(float)100);
if(j<migrationPop){
population[i] = sharedPop[j];
}
else if(j<2*migrationPop) {
int k = (blockDim.x * (blockIdx.x + 1) + threadIdx.x - migrationPop) % POPULATION_SIZE;
sharedPop[j] = population[k];
}
__syncthreads();
if(j == 0) releaseSemaphore(&sem);
__syncthreads();
}
#endif
// Selection
int fittest = (int)ceil((float) blockDim.x * (float)SELECTION_FITTEST_PERCENT/(float)100);
if(j > fittest){
int p1 = CURAND(fittest);
int p2 = CURAND(fittest);
// Crossover and mutation
#if UNIFORM_CROSSOVER
for(int k=0; k<CHROMOSOME_LENGTH; k++){ // Uniform crossover
int prob = CURAND(100);
if(prob<45) sharedPop[j].chromosome[k] = sharedPop[p1].chromosome[k];
else if(prob<90) sharedPop[j].chromosome[k] = sharedPop[p2].chromosome[k];
else sharedPop[j].chromosome[k] = GENES_D[CURAND(GENE_LENGTH_D)];
}
#else
int partition = CURAND(CHROMOSOME_LENGTH); // Single partition crossover
for(int k=0; k<partition; k++){
sharedPop[j].chromosome[k] = sharedPop[p1].chromosome[k];
if(CURAND(100) < MUTATION_PROBABILITY){
sharedPop[j].chromosome[k] = GENES_D[CURAND(GENE_LENGTH_D)];
}
}
for(int k=partition; k<CHROMOSOME_LENGTH; k++){
sharedPop[j].chromosome[k] = sharedPop[p2].chromosome[k];
if(CURAND(100) < MUTATION_PROBABILITY){
sharedPop[j].chromosome[k] = GENES_D[CURAND(GENE_LENGTH_D)];
}
}
#endif
}
}
// Copy results back to global memory
if(j < BLOCK_DIMENSION){
population[i] = sharedPop[j];
//printf("%d ", i);
}
}
}
__global__ void setup_kernel(curandState *state){
int id = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(1234, id, 0, &state[id]);
}
int main(int argc, char* argv[]){
// Time measurement variables
float h2dTime, d2hTime, kernelTime;
// Host variable initialization
int targetStringLength = CHROMOSOME_LENGTH;
char targetString[CHROMOSOME_LENGTH];
for(int i=0; i<CHROMOSOME_LENGTH; i++){
targetString[i] = GENES[rand()%GENE_LENGTH];
}
int convergeMin[] = {GENERATIONS+1};
int population_memory_size = POPULATION_SIZE * sizeof(Individual);
Individual population[POPULATION_SIZE];
// Random populartion initialization
srand(time(0));
for(int i=0; i<POPULATION_SIZE; i++){
for(int j=0; j<targetStringLength; j++){
population[i].chromosome[j] = GENES[rand()%GENE_LENGTH];
}
}
// Random number generator for device
curandState *devStates;
CUDA_ERROR_CHECK( cudaMalloc((void **)&devStates, BLOCK_DIMENSION * GRID_DIMENSION * sizeof(curandState)) );
setup_kernel<<<GRID_DIMENSION, BLOCK_DIMENSION>>>(devStates);
CUDA_ERROR_CHECK( cudaPeekAtLastError() );
// Device time measurement variables
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Allocate memory in device
Individual *population_d;
char *targetString_d;
int *convergeGen;
CUDA_ERROR_CHECK( cudaMalloc((void **)&population_d, population_memory_size) );
CUDA_ERROR_CHECK( cudaMalloc((void **)&targetString_d, targetStringLength * sizeof(char)) );
CUDA_ERROR_CHECK( cudaMalloc((void **)&convergeGen, sizeof(int)) );
// Copy data from host to device
cudaEventRecord(start);
CUDA_ERROR_CHECK( cudaMemcpy(population_d, population, population_memory_size, cudaMemcpyHostToDevice) );
CUDA_ERROR_CHECK( cudaMemcpy(targetString_d, targetString, targetStringLength * sizeof(char), cudaMemcpyHostToDevice) );
CUDA_ERROR_CHECK( cudaMemcpy(convergeGen, convergeMin, sizeof(int), cudaMemcpyHostToDevice) );
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&h2dTime, start, stop);
// Assign parameters and launch kernel
cudaEventRecord(start);
geneticAlgorithmKernel<<<GRID_DIMENSION, BLOCK_DIMENSION>>>(population_d, targetString_d, targetStringLength, devStates, convergeGen);
CUDA_ERROR_CHECK( cudaPeekAtLastError() );
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&kernelTime, start, stop);
// Fetch results back from kernel
cudaEventRecord(start);
CUDA_ERROR_CHECK( cudaMemcpy(population, population_d, population_memory_size, cudaMemcpyDeviceToHost) );
CUDA_ERROR_CHECK( cudaMemcpy(convergeMin, convergeGen, sizeof(int), cudaMemcpyDeviceToHost) );
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&d2hTime, start, stop);
// Print output values
printf("%d,%f,%f,%f\n", convergeMin[0], h2dTime, kernelTime, d2hTime);
/*printf("\nAnswer obtained after %d generations\n", convergeMin[0]);
printf("Host to device copy time:\t%f ms\n", h2dTime);
printf("Kernel execution time:\t%f ms\n", kernelTime);
printf("Device to host copy time:\t%f ms\n\n", d2hTime);*/
CUDA_ERROR_CHECK( cudaFree(devStates) );
CUDA_ERROR_CHECK( cudaFree(population_d) );
CUDA_ERROR_CHECK( cudaFree(targetString_d) );
return 0;
}
|
14,106 | // Version 20180131-01: added version number.
#include <iostream>
#include <iomanip>
void SelectDevice () {
int nDevices;
int selectedDevice = 0;
cudaGetDeviceCount(&nDevices);
if (nDevices == 1) {
cudaSetDevice(selectedDevice);
} else {
std::cout << "\n=============================================\n";
std::cout << "|| ||";
std::cout << "\n|| There are " << nDevices << " CUDA compatible devices. ||\n";
std::cout << "|| ||\n";
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
std::cout << "||=========================================||\n";
std::cout << "|| ||\n";
std::cout << "|| Device Number: " << std::setw(25) << std::left << i <<"||\n";
std::cout << "|| Device name: " << std::setw(25) << std::left << prop.name << "||\n";
std::cout << "|| Memory Clock Rate (MHz): " << std::setw(13) << std::left << prop.memoryClockRate/1000 << "||\n";
std::cout << "|| Memory Bus Width (bits): " << std::setw(13) << std::left << prop.memoryBusWidth << "||\n";
std::cout << "|| Peak Memory Bandwidth (GB/s): " << std::setw(7) << std::left << 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6 << " ||\n";
std::cout << "|| ||\n";
}
std::cout << "=============================================\n";
std::cout << "\nPlease enter a Device Number: ";
std::cin >> selectedDevice;
cudaSetDevice(selectedDevice);
}
} |
14,107 | #include <stdio.h>
#include <stdlib.h>
// template <unsigned int blockSize>
__device__ void warpReduce(volatile double* sdata, int tid){
// if (blockSize >= 64) sdata[tid] += sdata[tid+32];
// if (blockSize >= 32) sdata[tid] += sdata[tid+16];
// if (blockSize >= 16) sdata[tid] += sdata[tid+8];
// if (blockSize >= 8) sdata[tid] += sdata[tid+4];
// if (blockSize >= 4) sdata[tid] += sdata[tid+2];
// if (blockSize >= 2) sdata[tid] += sdata[tid+1];
sdata[tid] += sdata[tid+32];
sdata[tid] += sdata[tid+16];
sdata[tid] += sdata[tid+8];
sdata[tid] += sdata[tid+4];
sdata[tid] += sdata[tid+2];
sdata[tid] += sdata[tid+1];
}
__global__ void reduce(double* g_idata, double*g_odata){
int N = 16;
__shared__ double sdata[16];
// int tid = threadIdx.x;
// int i = blockIdx.x*(blockDim.x) + threadIdx.x;
// sdata[tid] = g_idata[i]; //+ g_idata[i+blockDim.x];
for (int i=blockIdx.x*blockDim.x + threadIdx.x; i<N; i+=blockDim.x * gridDim.x){
sdata[i] = g_idata[i];
// printf("g_idata[%d] = %.6f\n", i, g_idata[i] );
// printf("sdata[%d] = %.6f\n", i, sdata[i] );
__syncthreads();
}
for (int i=blockIdx.x*blockDim.x + threadIdx.x; i<N; i+=blockDim.x * gridDim.x){
for (unsigned int s=blockDim.x/2; s>0; s*=2){
if (i < s && i+s < N){
printf("before: sdata[%d] = %.6f \t sdata[%d+%d] = %.6f\n", i, sdata[i], i, s, sdata[i+s]);
sdata[i] += sdata[i+s];
printf("reduce loop: sdata[%d] = %.6f\n", i, sdata[i] );
}
// if ((i+s )< 10){
// printf("before: sdata[%d] = %.6f \t sdata[%d+%d] = %.6f\n", i, sdata[i], i, s, sdata[i+s]);
// sdata[i] += sdata[i+s];
// printf("reduce loop: sdata[%d] = %.6f\n", i, sdata[i] );
// }
__syncthreads();
}
}
// int i = blockIdx.x*(blockDim.x) + threadIdx.x;
// // do reduction in shared mem
// for (unsigned int s=blockDim.x/2; s>0; s>>=1){
// if ((i+s )< 10){
// printf("before: sdata[%d] = %.6f \t sdata[%d+%d] = %.6f\n", i, sdata[i], i, s, sdata[i+s]);
// sdata[i] += sdata[i+s];
// printf("reduce loop: sdata[%d] = %.6f\n", i, sdata[i] );
// }
// // printf("sdata[%d] = %.6f\n", i, sdata[i] );
// __syncthreads();
// }
// if (blockSize >= 512) {
// if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); }
// if (blockSize >= 256) {
// if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); }
// if (blockSize >= 128) {
// if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); }
// if (tid < 32) warpReduce(sdata, tid);
// write result for this block to global mem
// if (tid == 0) {
// g_odata[blockIdx.x] = sdata[0];
// printf("sdata[%d] = %.6f\n", tid, sdata[tid] );
// }
}
int main(int argc, char** argv){
int N = 16;
double* array_in = (double*) calloc(1,N*sizeof(double));
double* array_out = (double*) calloc(1, N*sizeof(double));
double* d_array_in, *d_array_out;
int size = N*sizeof(double);
for (int i=0; i<N;i++){
array_in[i] = i*0.1;
// printf("array_in[%d] = %.6f\n", i, array_in[i] );
}
cudaMalloc((void**) &d_array_in, size);
cudaMalloc((void**) &d_array_out, size);
cudaMemcpy(d_array_in, array_in, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_array_out, array_out, size, cudaMemcpyHostToDevice);
reduce<<<1,4>>>(d_array_in, d_array_out);
cudaMemcpy(array_out, d_array_out, size, cudaMemcpyDeviceToHost);
printf("sum = %.6f\n", array_out[0]);
free(array_out); free(array_in);
cudaFree(d_array_in); cudaFree(d_array_out);
return 0;
} |
14,108 | #include <stdio.h>
__global__ void hello2D()
{
int blocksize = blockIdx.y * blockIdx.x;
int blockId = gridDim.x * blockIdx.y + blockIdx.x;
int tid = blockId * blocksize + blockDim.x * threadIdx.y + threadIdx.x;
printf("I am thread (%d, %d) in block (%d, %d). Global thread ID = %d\n", threadIdx.y, threadIdx.x, blockIdx.y, blockIdx.x, tid);
}
__host__ int main(void)
{
dim3 blocksize;
dim3 gridsize;
// 2D blocks in a 2D grid.
gridsize.x = 3;
gridsize.y = 2;
blocksize.x = 3;
blocksize.y = 4;
hello2D<<<gridsize, blocksize>>>();
cudaDeviceSynchronize();
return 0;
}
|
14,109 | //pass
//--blockDim=32 --gridDim=1
#include <cuda.h>
__global__ void test_Prog(int *A,int *B, int N) {
const int tid = threadIdx.x;
int tmp=A[tid+1];
int tmp2=B[tid+1];
B[tid]=tmp2+tmp;
A[tid]=tmp2-tmp;
} |
14,110 | #include "includes.h"
/*
blockIdx: block index
threadIdx: thread index within block
blockDim: threads per block (2)
gridDim: blocks per launch (N/2)
*/
#define N 10
__global__ void sum(int *a, int *b, int *c)
{
int i;
i = blockIdx.x * blockDim.x +
threadIdx.x;
c[i] = a[i] + b[i];
} |
14,111 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define XSIZE 1024
#define YSIZE 1024
#define ITERATION 100
#define NUMBER(n,i,j) (((n)&0x1? XSIZE*YSIZE :0) + (i)*YSIZE + (j))
void debug_print( float* u ){
int i,j;
for( i=0 ; i<XSIZE ; ++i ){
for( j=0 ; j<YSIZE ; ++j ){
printf("%.1e ",u[NUMBER(0,i,j)]);
}
printf("\n");
}
return ;
}
__global__ void iter(float *u,float r,int n){
int i = blockIdx.x+1;
int j = threadIdx.x+1;
u[NUMBER(n+1,i,j)]
= (1.0-4.0*r)*u[NUMBER(n,i,j)]
+ r*(u[NUMBER(n,i+1,j)]+u[NUMBER(n,i-1,j)]+u[NUMBER(n,i,j+1)]+u[NUMBER(n,i,j-1)]);
return ;
}
int main(){
int array_size = 2 * XSIZE * YSIZE * sizeof(float) ;
float r = 0.05;
float* u = (float*)malloc(array_size);
int i,j,n;
// initialize
for( i = 0 ; i < XSIZE ; ++i ){
for( j = 0 ; j < YSIZE ; ++j ){
u[NUMBER(0,i,j)] = ( i==0 || i==XSIZE-1 || j==0 || j==YSIZE-1 ? 0.0 : 1.0 );
u[NUMBER(1,i,j)] = ( i==0 || i==XSIZE-1 || j==0 || j==YSIZE-1 ? 0.0 : 1.0 );
}
}
// malloc in device
float *device_u;
cudaMalloc((void**)&device_u,array_size);
// copy to device
cudaMemcpy(device_u,u,array_size,cudaMemcpyHostToDevice);
// get time
struct timeval t_begin,t_end;
gettimeofday(&t_begin,NULL);
for( n = 0 ; n < ITERATION ; ++n )
iter<<<XSIZE-2,YSIZE-2>>>(device_u,r,n);
// print time
cudaThreadSynchronize();
gettimeofday(&t_end,NULL);
double elapsed = (double)(t_end.tv_sec-t_begin.tv_sec) + (double)(t_end.tv_usec-t_begin.tv_usec) / (1000.0*1000.0);
printf("Elapsed time = %lf(sec)\n", elapsed );
printf("FLOPS = %g\n" , 6.0*ITERATION*(XSIZE-2)*(YSIZE-2)/elapsed );
// copy from device
cudaMemcpy(u,device_u,array_size,cudaMemcpyDeviceToHost);
// debug_print(u);
return 0;
}
|
14,112 | #include "includes.h"
#define HISTOGRAM_LENGTH 256
__global__ void convertToGrayScale(unsigned char * ucharImg, unsigned char * grayImg, int width, int height)
{
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int row = by*blockDim.y+ty;
int col = bx*blockDim.x+tx;
int index = row*width + col;
if(row < height && col < width)
{
grayImg[index] = (unsigned char) (0.21*ucharImg[index*3] + 0.71*ucharImg[index*3 + 1] + 0.07*ucharImg[index*3 + 2]);
}
} |
14,113 | #include <iostream>
#include <stdio.h>
#include <math.h>
#include <cuda.h>
using namespace std;
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
// Initialization of arrays for storing Primitive Variables
void InitializeField(double *phi, int row, int col){
for(int i = 0; i<row; ++i){
for(int j =0; j<col; ++j){
phi[i*col+j]=0.0;
}
}
}
// Initialization of arrays for storing Primitive Variables
__global__ void InitializeFieldGPU(double *phi, int row, int col){
// Get global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
int n = row*col;
// Make sure we do not go out of bounds
if (id < n) {phi[id]=0.0;
//if(threadIdx.x==0){printf("Hi");}
}
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
void updateBoundaryCondition(double* ux, double *uy, double *p,
int col,int totCell){
// updateBoudaryCondition for serial mode
// North Boundary
for (int i = 0; i<col; i++)
{
ux[i]= 1;
uy[i]= 0;
p[i] = p[i+col];
}
// South Boundary
for (int i = totCell-col; i<totCell; i++)
{
ux[i]= 0;
uy[i]= 0;
p[i]= p[i-col];
}
// West Boundary - Left end
for (int i = 0; i<totCell; i=(i+col))
{
ux[i]= 0;
uy[i]= 0;
p[i] = p[i+1];
}
// East Boundary - Right end
for (int i = col-1; i<totCell; i=(i+col))
{
ux[i]=0;
uy[i]=0;
p[i] = p[i-1];
}
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
void refValueUpdate(double* Phi, int row, int col, int refCell){
for(int i = 1; i<(row-1); ++i){
for(int j =1; j<(col-1); ++j){
Phi[i*col+j]=Phi[i*col+j]-Phi[refCell];
}
}
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
void storeOldValue(double *phinew, double *phiOld,int totCell){
for(int i =0; i<totCell; i++){
phiOld[i]=phinew[i];
}
}
__global__ void storeOldValueGPU(double *phinew, double *phiOld,int totCell){
// Get global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < totCell) {phiOld[id]=phinew[id];
//if(threadIdx.x==0){printf("Hi: %6.3f\n",phiOld[id]);}
}
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
void updateCn(double* Cn,double dt, int col,int row){
for(int i = 1; i<(row-1); ++i){
for(int j =1; j<(col-1); ++j){
Cn[i*col+j] = Cn[i*col+j]/dt;
}
}
}
__global__ void updateCnGPU(double* Cn,double dt, int col,int row){
// Get global thread ID
int k = blockIdx.x*blockDim.x+threadIdx.x;
int c = k%col;
int r = k/col;
if(c>0 && c<col-1 && r>0 && r<row-1){
Cn[k] = Cn[k]/dt;
}
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
void L2norm(double *Phinew, double *Phiold,double *L2Phi,int totCell){
*L2Phi = 0;
for(int i = 0; i<totCell;i++){
*L2Phi= *L2Phi+pow((Phiold[i]-Phinew[i]),2);
}
*L2Phi=sqrt(*L2Phi/totCell);
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
void normL2(double *Phi1new, double *Phi1old,
double *Phi2new, double *Phi2old,
double *Phi3new, double *Phi3old,
double *L2Phi, int totCell){
for(int j = 0; j<totCell;j++){
L2Phi[0]= L2Phi[0]+pow((Phi1old[j]-Phi1new[j]),2);
L2Phi[1]= L2Phi[1]+pow((Phi2old[j]-Phi2new[j]),2);
L2Phi[2]= L2Phi[2]+pow((Phi3old[j]-Phi3new[j]),2);
}
L2Phi[0]=(L2Phi[0])/totCell;
L2Phi[1]=(L2Phi[1])/totCell;
L2Phi[2]=(L2Phi[2])/totCell;
// square root is not performed here.. perform it when you print it
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
void L2norm1(double *Phi1new, double *Phi1old,
double *Phi2new, double *Phi2old,
double *Phi3new, double *Phi3old,
double *L2Phi, int col,int row){
double sum1=0,sum2=0,sum3=0;
for(int i = 1; i<(row-1); ++i){
for(int j =1; j<(col-1); ++j){
sum1+=(Phi1old[i*col+j]-Phi1new[i*col+j])*(Phi1old[i*col+j]-Phi1new[i*col+j]);
sum2+=(Phi2old[i*col+j]-Phi2new[i*col+j])*(Phi2old[i*col+j]-Phi2new[i*col+j]);
sum3+=(Phi3old[i*col+j]-Phi3new[i*col+j])*(Phi3old[i*col+j]-Phi3new[i*col+j]);
}
}
L2Phi[0]=(sum1)/((double)((col-2)*(row-2)));
L2Phi[1]=(sum2)/((double)((col-2)*(row-2)));
L2Phi[2]=(sum3)/((double)((col-2)*(row-2)));
// square root is not performed here.. perform it when you print it
}
|
14,114 | #include "includes.h"
__device__ void d_boundaryCondition(const int nbrOfGrids, double *d_u1, double *d_u2, double *d_u3) {
d_u1[0] = d_u1[1];
d_u2[0] = -d_u2[1];
d_u3[0] = d_u3[1];
d_u1[nbrOfGrids - 1] = d_u1[nbrOfGrids - 2];
d_u2[nbrOfGrids - 1] = -d_u2[nbrOfGrids - 2];
d_u3[nbrOfGrids - 1] = d_u3[nbrOfGrids - 2];
}
__global__ void RoeStep(const int nbrOfGrids, double *d_u1, double *d_u2, double *d_u3, const double *d_vol, double *d_f1, double *d_f2, double *d_f3, const double *d_tau, const double *d_h, const double *d_gama, double *w1,double *w2,double *w3,double *w4, double *fc1,double *fc2,double *fc3, double *fr1,double *fr2,double *fr3, double *fl1,double *fl2,double *fl3, double *fludif1,double *fludif2,double *fludif3, double *rsumr, double *utilde, double *htilde, double *uvdif, double *absvt, double *ssc, double *vsc, double *eiglam1,double *eiglam2,double *eiglam3, double *sgn1,double *sgn2,double *sgn3, int *isb1,int *isb2,int *isb3, double *a1,double *a2,double *a3, double *ac11,double *ac12,double *ac13, double *ac21,double *ac22,double *ac23) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < nbrOfGrids; i += stride) {
// find parameter vector w
{
w1[i] = sqrt(d_vol[i] * d_u1[i]);
w2[i] = w1[i] * d_u2[i] / d_u1[i];
w4[i] = (*d_gama - 1) * (d_u3[i] - 0.5 * d_u2[i] * d_u2[i] / d_u1[i]);
w3[i] = w1[i] * (d_u3[i] + w4[i]) / d_u1[i];
}
// calculate the fluxes at the cell center
{
fc1[i] = w1[i] * w2[i];
fc2[i] = w2[i] * w2[i] + d_vol[i] * w4[i];
fc3[i] = w2[i] * w3[i];
}
__syncthreads(); // because of the [i - 1] index below
// calculate the fluxes at the cell walls
if (i > 0) {
fl1[i] = fc1[i - 1]; fr1[i] = fc1[i];
fl2[i] = fc2[i - 1]; fr2[i] = fc2[i];
fl3[i] = fc3[i - 1]; fr3[i] = fc3[i];
}
// calculate the flux differences at the cell walls
if (i > 0) {
fludif1[i] = fr1[i] - fl1[i];
fludif2[i] = fr2[i] - fl2[i];
fludif3[i] = fr3[i] - fl3[i];
}
__syncthreads(); // because of the [i - 1] index below
// calculate the tilded state variables = mean values at the interfaces
if (i > 0) {
rsumr[i] = 1 / (w1[i - 1] + w1[i]);
utilde[i] = (w2[i - 1] + w2[i]) * rsumr[i];
htilde[i] = (w3[i - 1] + w3[i]) * rsumr[i];
absvt[i] = 0.5 * utilde[i] * utilde[i];
uvdif[i] = utilde[i] * fludif2[i];
ssc[i] = (*d_gama - 1) * (htilde[i] - absvt[i]);
if (ssc[i] > 0.0)
vsc[i] = sqrt(ssc[i]);
else {
vsc[i] = sqrt(abs(ssc[i]));
}
}
// calculate the eigenvalues and projection coefficients for each eigenvector
if (i > 0) {
eiglam1[i] = utilde[i] - vsc[i];
eiglam2[i] = utilde[i];
eiglam3[i] = utilde[i] + vsc[i];
sgn1[i] = eiglam1[i] < 0.0 ? -1 : 1;
sgn2[i] = eiglam2[i] < 0.0 ? -1 : 1;
sgn3[i] = eiglam3[i] < 0.0 ? -1 : 1;
a1[i] = 0.5 * ((*d_gama - 1) * (absvt[i] * fludif1[i] + fludif3[i]
- uvdif[i]) - vsc[i] * (fludif2[i] - utilde[i]
* fludif1[i])) / ssc[i];
a2[i] = (*d_gama - 1) * ((htilde[i] - 2 * absvt[i]) * fludif1[i]
+ uvdif[i] - fludif3[i]) / ssc[i];
a3[i] = 0.5 * ((*d_gama - 1) * (absvt[i] * fludif1[i] + fludif3[i]
- uvdif[i]) + vsc[i] * (fludif2[i] - utilde[i]
* fludif1[i])) / ssc[i];
}
// divide the projection coefficients by the wave speeds to evade expansion correction
if (i > 0) {
a1[i] /= eiglam1[i] + tiny;
a2[i] /= eiglam2[i] + tiny;
a3[i] /= eiglam3[i] + tiny;
}
// calculate the first order projection coefficients ac1
if (i > 0) {
ac11[i] = -sgn1[i] * a1[i] * eiglam1[i];
ac12[i] = -sgn2[i] * a2[i] * eiglam2[i];
ac13[i] = -sgn3[i] * a3[i] * eiglam3[i];
}
// apply the 'superbee' flux correction to made 2nd order projection coefficients ac2
{
ac21[1] = ac11[1];
ac21[nbrOfGrids - 1] = ac11[nbrOfGrids - 1];
ac22[1] = ac12[1];
ac22[nbrOfGrids - 1] = ac12[nbrOfGrids - 1];
ac23[1] = ac13[1];
ac23[nbrOfGrids - 1] = ac13[nbrOfGrids - 1];
double dtdx = *d_tau / *d_h;
if ((i > 1) && (i < nbrOfGrids - 1)) {
isb1[i] = i - int(sgn1[i]);
ac21[i] = ac11[i] + eiglam1[i] *
((fmax(0.0, fmin(sbpar1 * a1[isb1[i]], fmax(a1[i], fmin(a1[isb1[i]], sbpar2 * a1[i])))) +
fmin(0.0, fmax(sbpar1 * a1[isb1[i]], fmin(a1[i], fmax(a1[isb1[i]], sbpar2 * a1[i]))))) *
(sgn1[i] - dtdx * eiglam1[i]));
isb2[i] = i - int(sgn2[i]);
ac22[i] = ac12[i] + eiglam2[i] *
((fmax(0.0, fmin(sbpar1 * a2[isb2[i]], fmax(a2[i], fmin(a2[isb2[i]], sbpar2 * a2[i])))) +
fmin(0.0, fmax(sbpar1 * a2[isb2[i]], fmin(a2[i], fmax(a2[isb2[i]], sbpar2 * a2[i]))))) *
(sgn2[i] - dtdx * eiglam2[i]));
isb3[i] = i - int(sgn3[i]);
ac23[i] = ac13[i] + eiglam3[i] *
((fmax(0.0, fmin(sbpar1 * a3[isb3[i]], fmax(a3[i], fmin(a3[isb3[i]], sbpar2 * a3[i])))) +
fmin(0.0, fmax(sbpar1 * a3[isb3[i]], fmin(a3[i], fmax(a3[isb3[i]], sbpar2 * a3[i]))))) *
(sgn3[i] - dtdx * eiglam3[i]));
}
}
// calculate the final fluxes
if (i > 0) {
d_f1[i] = 0.5 * (fl1[i] + fr1[i] + ac21[i] + ac22[i] + ac23[i]);
d_f2[i] = 0.5 * (fl2[i] + fr2[i] + eiglam1[i] * ac21[i]
+ eiglam2[i] * ac22[i] + eiglam3[i] * ac23[i]);
d_f3[i] = 0.5 * (fl3[i] + fr3[i] + (htilde[i] - utilde[i] * vsc[i]) * ac21[i]
+ absvt[i] * ac22[i] + (htilde[i] + utilde[i] * vsc[i]) * ac23[i]);
}
__syncthreads(); // because of the [i + 1] index below
// update U
if (i > 0 && i < nbrOfGrids - 1) {
d_u1[i] -= *d_tau / *d_h * (d_f1[i + 1] - d_f1[i]);
d_u2[i] -= *d_tau / *d_h * (d_f2[i + 1] - d_f2[i]);
d_u3[i] -= *d_tau / *d_h * (d_f3[i + 1] - d_f3[i]);
}
d_boundaryCondition(nbrOfGrids, d_u1, d_u2, d_u3);
}
} |
14,115 | /*
Problem 1: initialize array of size 32 to 0
Problem 2: change array size to 1024
Problem 3: create another kernel that adds i to array[ i ]
Problem 4: change array size 8000 (check answer to Problem 3 still works)
*/
#include <stdio.h>
#include <cuda.h>
//initialize array to 0
__global__ void kernel1( int N, int *d_array ){
for( int i = 0; i < N; i++ ){
d_array[ i ] = 0;
}
}
//add i to array[ i ]
__global__ void kernel2( int N, int *d_array ){
for( int i = 0; i < N; i++ ){
d_array[ i ] = i;
}
}
int main(){
int *array, *d_array;
//DO NOT COMMENT OUT ANYTHING ABOVE THIS LINE
/*
printf( "**********PROBLEM 1**********\n" );
int N = 32;
array = (int*)malloc( N*sizeof(int) );
cudaMalloc( &d_array, N*sizeof( int ) );
kernel1<<<8, 128>>>( N, d_array );
cudaMemcpy( array, d_array, N*sizeof(int), cudaMemcpyDeviceToHost );
for( int i = 0; i < N; i++ ){
printf( "array[ %d ] = %d ", i, array[ i ] );
if( (i+1) % 4 == 0 ){
printf( "\n" );
}
}
//**********end problem 1**********
*/
/*
printf( "**********PROBLEM 2**********\n" );
int N2 = 1024;
array = (int*)malloc( N2*sizeof(int) );
cudaMalloc( &d_array, N2*sizeof(int) );
kernel1<<<8, 128>>>( N2, d_array );
cudaMemcpy( array, d_array, N2*sizeof(int), cudaMemcpyDeviceToHost );
for( int i = 0; i < N2; i++ ){
printf( "array[ %d ] = %d ", i, array[ i ] );
if( (i+1) % 8 == 0 ){
printf( "\n" );
}
}
//**********end problem 2**********
*/
/*
printf( "**********PROBLEM 3**********\n" );
int N2 = 1024;
array = (int*)malloc( N2*sizeof(int) );
cudaMalloc( &d_array, N2*sizeof(int) );
kernel2<<<8, 128>>>( N2, d_array );
cudaMemcpy( array, d_array, N2*sizeof(int), cudaMemcpyDeviceToHost );
for( int i = 0; i < N2; i++ ){
printf( "array[ %d ] = %d ", i, array[ i ] );
if( (i+1) % 8 == 0 ){
printf( "\n" );
}
}
//**********end problem 3**********
*/
printf( "**********PROBLEM 4**********\n" );
int N3 = 8000;
array = (int*)malloc( N3*sizeof(int) );
cudaMalloc( &d_array, N3*sizeof(int) );
kernel2<<<8, 128>>>( N3, d_array );
cudaMemcpy( array, d_array, N3*sizeof(int), cudaMemcpyDeviceToHost );
for( int i = 0; i < N3; i++ ){
printf( "array[ %d ] = %d ", i, array[ i ] );
if( (i+1) % 8 == 0 ){
printf( "\n" );
}
}
//**********end problem 4**********
//DO NOT COMMENT ANYTHING BELOW THIS LINE
cudaFree( d_array );
free( array );
return 0;
}
|
14,116 | #include <stdio.h>
/*Kernel for matrix outer product*/
__global__
void k_gemm_f32(float alpha, const float* A, int stride_row_a, int stride_col_a,const float* B, int stride_row_b, int stride_col_b,float* C, int stride_row_c, int stride_col_c){
const int TILE_WIDTH=16;
const int VEC_SIZE=4; //multiplies TILE_WIDTH for the b row vectors. 4 means one thread calculated C's entries with a rowlength of 4*16=64 numbers in B. Must be multiple of TILE_WIDTH
float Cc[TILE_WIDTH]={0}; //initializes all elements to zero
__shared__ float Ac[TILE_WIDTH*TILE_WIDTH]; //buffer that holds columns of a
int tx=threadIdx.x;
int ty=threadIdx.y;
int bx=blockIdx.x;
int by=blockIdx.y;
int a_begin=by*TILE_WIDTH*stride_col_a;
int a_end=a_begin+stride_col_a;//check if correct
int b_begin=bx*TILE_WIDTH*VEC_SIZE*stride_row_b; //we multiply by VEC_SIZE because B's tiles have length TILEWIDTH*VEC_SIZE
for (;a_begin < a_end;a_begin+=TILE_WIDTH*stride_row_a){
//Load elements of A into shared memory
for (int i=0; i< 4;i++){
Ac[i*4+ty+TILE_WIDTH*tx]=A[a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a];
}
__syncthreads();
const float* ptrB=&B[b_begin+(TILE_WIDTH*ty+tx)*stride_row_b];
float* ptrA=Ac;
#pragma unroll
for (int i=0;i<TILE_WIDTH;i++){
float bv=alpha*ptrB[0];
//this loop could be unrolled
for (int j=0;j<TILE_WIDTH;j++){
Cc[j]+=ptrA[j]*bv;
}
ptrA+=TILE_WIDTH; //next column of A (it is the next column because Ac is a transposed block of A)
ptrB+=stride_col_b;
}
b_begin+=TILE_WIDTH*stride_col_b;
__syncthreads();
}
int c=stride_col_c*TILE_WIDTH*by+(TILE_WIDTH*VEC_SIZE*bx+tx+TILE_WIDTH*ty)*stride_row_c;
for (int i=0;i<TILE_WIDTH;i++){
C[c]+=Cc[i];
c+=stride_col_c;
}
}
//Todo!!
/*Kernel for matrix outer product. This version does not require A,B,C to be multiples of the blocksizes*/
__global__
void k_gemm_f32_nonblockmultiple(const int m, const int n, const int k,float alpha, const float* A, int stride_row_a, int stride_col_a,const float* B, int stride_row_b, int stride_col_b,float* C, int stride_row_c, int stride_col_c){
const int TILE_WIDTH=16;
const int VEC_SIZE=4; //multiplies TILE_WIDTH for the b row vectors. 4 means one thread calculated with a rowlength of 4*16=64 numbers in B. Must be multiple of TILE_WIDTH
float Cc[TILE_WIDTH]={0}; //initializes all elements to zero
__shared__ float Ac[TILE_WIDTH*TILE_WIDTH]; //buffer that holds columns of a
int tx=threadIdx.x;
int ty=threadIdx.y;
int bx=blockIdx.x;
int by=blockIdx.y;
int qm=m%TILE_WIDTH;
//int qn=(VEC_SIZE*TILE_WIDTH)%n;
int qk=k%TILE_WIDTH;
int rowA=by*TILE_WIDTH;
int colB=bx*TILE_WIDTH*VEC_SIZE+TILE_WIDTH*ty+tx;
int a_begin=by*TILE_WIDTH*stride_col_a;
int b_begin=bx*TILE_WIDTH*VEC_SIZE*stride_row_b; //we multiply by VEC_SIZE because B's tiles have length TILEWIDTH*VEC_SIZE
bool does_compute=false;
//printf("qk:%d\n",qk);
int rk=k/TILE_WIDTH;
for (int q=0;q<rk;q++){
//Load elements of A into shared memory
//printf("i: %d\n",a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a);
if ((tx<k)&&((rowA+TILE_WIDTH-1)<m)){
for (int i=0; i< 4;i++){
//printf("Aci: %d, i: %d and A:%f\n",i*4+ty+TILE_WIDTH*tx,a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a,A[a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a]);
Ac[i*4+ty+TILE_WIDTH*tx]=A[a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a];
}
}
else{
for (int i=0; i< 4;i++){
if((rowA+i*4+ty)<m && (tx<k)){
//printf("is: %f\n",A[a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a]);
Ac[i*4+ty+TILE_WIDTH*tx]=A[a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a];
// printf("is:Ac index: %d, index: %d and A:%f\n",i*4+ty+TILE_WIDTH*tx,a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a,A[a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a]);
}
else{
Ac[i*4+ty+TILE_WIDTH*tx]=0.0;
}
}
}
/*
for (int i=0;i<TILE_WIDTH*TILE_WIDTH;i++){
Ac[i]=-7;
}
*/
__syncthreads();
/*
if (tx==0 && ty==0){
for (int i=0;i<TILE_WIDTH*TILE_WIDTH;i++){
printf("%f\t",Ac[i]);
}
}
*/
if (colB>=n){
for (int j=0;j<TILE_WIDTH;j++){
Cc[j]=0.0;
}
}
else{
//printf("Id: %d,%d,%d,%d\n",by,ty,bx,tx);
const float* ptrB=&B[b_begin+(TILE_WIDTH*ty+tx)*stride_row_b];
float* ptrA=Ac;
does_compute=true;
#pragma unroll
for (int i=0;i<TILE_WIDTH;i++){
float bv=alpha*ptrB[0];
//this loop could be unrolled
for (int j=0;j<TILE_WIDTH;j++){
Cc[j]+=ptrA[j]*bv;
/* if (ptrA[j]!=0){
printf("%f vs. %f\n",ptrA[j],bv);
}
*/
}
ptrA+=TILE_WIDTH; //next column of A (it is the next column because Ac is a transposed block of A)
ptrB+=stride_col_b;
}
b_begin+=TILE_WIDTH*stride_col_b;
}
a_begin+=TILE_WIDTH*stride_row_a;
__syncthreads();
}
if (qk>0){
if (tx<qk){
//printf("rowA:%d, ty:%d\n",rowA,ty);
a_begin=(by*TILE_WIDTH*stride_col_a)+rk*TILE_WIDTH*stride_row_a;
for (int i=0; i< 4;i++){
if((rowA+i*4+ty)<m){
Ac[i*4+ty+TILE_WIDTH*tx]=A[a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a];
//printf("Ac index2: %d, index: %d and \n",i*4+ty+TILE_WIDTH*tx,a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a);
}
else{
Ac[i*4+ty+TILE_WIDTH*tx]=0.0;
}
}
}
else{
for (int i=0; i< 4;i++){
Ac[i*4+ty+TILE_WIDTH*tx]=0.0;
}
}
__syncthreads();
//return;
if (colB<n){
// printf("Id: %d,%d,%d,%d\n",by,ty,bx,tx);
const float* ptrB=&B[b_begin+(TILE_WIDTH*ty+tx)*stride_row_b];
float* ptrA=Ac;
does_compute=true;
for (int i=0;i<qk;i++){
float bv=alpha*ptrB[0];
//this loop could be unrolled
for (int j=0;j<TILE_WIDTH;j++){
Cc[j]+=ptrA[j]*bv;
/*if (ptrA[j]!=0){
printf("%f vs2. %f\n",ptrA[j],bv);
}
*/
}
ptrA+=TILE_WIDTH; //next column of A (it is the next column because Ac is a transposed block of A)
ptrB+=stride_col_b;
}
}
}
__syncthreads(); //maybe redundant
if (does_compute){
int c=stride_col_c*TILE_WIDTH*by+(TILE_WIDTH*VEC_SIZE*bx+tx+TILE_WIDTH*ty)*stride_row_c;
int c_length=((rowA+TILE_WIDTH)<=m)?TILE_WIDTH:qm;
for (int i=0;i<c_length;i++){
C[c]+=Cc[i];
c+=stride_col_c;
}
}
}
__global__
void k_scal_f32(int m, int n, float beta, float* C, int stride_row_c, int stride_col_c){
const int BLOCK_WIDTH=256; //size of a block
const int TILE_WIDTH=64; //size of block per single thread
int tx=threadIdx.x;
int ty=threadIdx.y;
int bx=blockIdx.x;
int by=blockIdx.y;
//printf("Bin drin mit : bx %d, tx %d, by %d, ty %d \n",bx,tx,by,ty);
float* c_begin=&C[(by*BLOCK_WIDTH+ty*TILE_WIDTH)*stride_col_c+(bx*BLOCK_WIDTH+tx*TILE_WIDTH)*stride_row_c];
if ((((by+1)*BLOCK_WIDTH)<=m) && (((bx+1)*BLOCK_WIDTH)<=n)){
for (int i=0;i<TILE_WIDTH;i++){
for (int j=0;j<TILE_WIDTH;j++){
c_begin[i*stride_col_c+j*stride_row_c]*=beta;
}
}
}
else{
int column=by*BLOCK_WIDTH+ty*TILE_WIDTH;
for (int i=0;i<TILE_WIDTH;i++){
if (column<m){
int row=bx*BLOCK_WIDTH+tx*TILE_WIDTH;
for (int j=0;j<TILE_WIDTH;j++){
if (row<n){
c_begin[i*stride_col_c+j*stride_row_c]*=beta;
//printf("Bin hier drin mit %d und %d mit by %d ty %d bx %d tx %d\n",i,j,by,ty,bx,tx);
}
row=row+1;
}
}
column=column+1;
}
}
}
//matrix matrix multiplication
__host__
void gemm_f32_blockmultiple(int m, int n, int k, float alpha, const float* A_h, const float* B_h, float beta, float* C_h){
float* A_d;
float* B_d;
float* C_d;
int sizeA=sizeof(float)*m*k;
int sizeB=sizeof(float)*n*k;
int sizeC=sizeof(float)*m*n;
float bsmx=16;
float bsmy=4;
dim3 threadLayout=dim3(bsmx,bsmy,1);
dim3 grid=dim3(ceil(n/(4.0*bsmx)),ceil(m/bsmx),1);
cudaMalloc((void**) &C_d,sizeC);
if (beta==0){
cudaMemset(C_d, 0, sizeC);
}
else{
cudaMemcpy((void*) C_d, (void*) C_h, sizeC,cudaMemcpyHostToDevice);
k_scal_f32<<<grid,threadLayout>>>(m,n,beta,C_d,1,n);
}
if (alpha!=0.0){
cudaMalloc((void**) &A_d,sizeA);
cudaMalloc((void**) &B_d,sizeB);
cudaError_t copy1=cudaMemcpy((void*) A_d, (void*) A_h, sizeA, cudaMemcpyHostToDevice);
cudaError_t copy2=cudaMemcpy((void*) B_d, (void*) B_h, sizeB, cudaMemcpyHostToDevice);
if ((copy1==cudaSuccess)&& (copy2==cudaSuccess)){
k_gemm_f32<<<grid,threadLayout>>> (alpha, A_d, 1, k,B_d,1,n,C_d,1,n);
cudaMemcpy((void*) C_h, (void*) C_d, sizeC, cudaMemcpyDeviceToHost);
cudaFree(A_d);
cudaFree(B_d);
}
}
cudaFree(C_d);
}
//General matrix-to-matrix multiplication for 32 bit floats. Input matrices are padded if they are not a multiple of block size bsmx and bsmy
__host__
void gemm_f32_nonblockmultiple(int m, int n, int k, float alpha, const float* A_h, const float* B_h, float beta, float* C_h){
float* A_d;
float* B_d;
float* C_d;
float bsmx=16; //blocksize x
float bsmy=4; //blocksize y
int mB=ceil(m/bsmx)*bsmx;
int nB=ceil(n/(4.0*bsmx))*(4.0*bsmx);
int kB=ceil(k/bsmx)*bsmx;
int sizeCb=sizeof(float)*mB*nB;
cudaMalloc((void**) &C_d, sizeCb);
dim3 threadLayout=dim3(bsmx,bsmy,1);
dim3 grid=dim3(ceil(nB/(4.0*bsmx)),ceil(mB/bsmx),1);
if (beta==0){
cudaMemset(C_d, 0, sizeCb);
}
else{
cudaError_t copy;
for (int i=0;i<m;i++){
copy=cudaMemcpy((void*) (C_d+i*nB), (void*) (C_h+i*n), sizeof(float)*n,cudaMemcpyHostToDevice);
}
if (copy!=cudaSuccess){
printf("Copy fehlgeschlagen\n");
}
// printf("Starte nun den Kernel\n");
dim3 threadsize=dim3(4,4,1);
dim3 blocksize=dim3(ceil(n/256.0),ceil(m/256.0),1);
k_scal_f32<<<blocksize,threadsize>>>(m,n,beta,C_d,1,nB);
//cudaDeviceSynchronize();
}
if (alpha!=0.0){
int sizeAb=sizeof(float)*mB*kB;
int sizeBb=sizeof(float)*kB*nB;
cudaMalloc((void**) &A_d,sizeAb);
cudaMalloc((void**) &B_d,sizeBb);
cudaMemset(A_d,0.0,sizeAb);
cudaMemset(B_d,0.0,sizeBb);
cudaError_t copy1;
cudaError_t copy2;
for (int i=0;i<m;i++){
copy1=cudaMemcpy((void*) (A_d+i*kB), (void*) (A_h+i*k), sizeof(float)*k,cudaMemcpyHostToDevice);
}
for (int i=0;i<k;i++){
copy2=cudaMemcpy((void*) (B_d+i*nB), (void*) (B_h+i*n), sizeof(float)*n, cudaMemcpyHostToDevice);
}
if ((copy1==cudaSuccess)&& (copy2==cudaSuccess)){
k_gemm_f32<<<grid,threadLayout>>> (alpha, A_d, 1, kB,B_d,1,nB,C_d,1,nB);
cudaFree(A_d);
cudaFree(B_d);
}
}
for (int i=0;i<m;i++){
cudaError_t copy=cudaMemcpy((void*) (C_h+i*n), (void*) (C_d+i*nB),sizeof(float)*n,cudaMemcpyDeviceToHost);
if (copy!=cudaSuccess){
printf("Copy fehlgeschlagen\n");
}
}
cudaFree(C_d);
}
//General matrix-to-matrix multiplication for 32 bit floats. Input matrices are padded if they are not a multiple of block size bsmx and bsmy
__host__
void gemm_f32(int m, int n, int k, float alpha, const float* A_h, const float* B_h, float beta, float* C_h){
if ((alpha==0.0) && (beta==1.0)){
return;
}
int res1=m%16;
int res2=n/(4*4);
if ((res1==0)&&(res2==0)){
gemm_f32_blockmultiple(m,n,k,alpha,A_h,B_h,beta,C_h);
}
else{
// printf("nonblockmultiple\n");
gemm_f32_nonblockmultiple(m,n,k,alpha,A_h,B_h,beta,C_h);
}
}
//General matrix-to-matrix multiplication for 32 bit floats. This assumes that the input parameters are already allocated in device memory
__host__
void gemm_f32_device(int m, int n, int k, float alpha, const float* A_d, int stride_row_a, int stride_col_a, const float* B_d, int stride_row_b, int stride_col_b, float beta, float* C_d,int stride_row_c, int stride_col_c){
if ((alpha==0.0) && (beta==1.0)){
return;
}
float bsmx=16;
float bsmy=4;
dim3 threadLayout=dim3(bsmx,bsmy,1);
dim3 grid=dim3(ceil(n/(4.0*bsmx)),ceil(m/bsmx),1);
k_scal_f32<<<grid,threadLayout>>>(m,n,beta,C_d,stride_row_c,stride_col_c);
if (alpha!=0){
int res1=m%(int)bsmx;
int res2=n%(int)bsmx;
if ((res1==0)&&(res2==0)){
// printf("gemm blockmultiple\n");
k_gemm_f32<<<grid,threadLayout>>>(alpha, A_d, stride_row_a, stride_col_a,B_d,stride_row_b,stride_col_b,C_d,stride_row_c,stride_col_c);
}
else{
//printf("gemm nonblockmultiple\n");
k_gemm_f32_nonblockmultiple<<<grid,threadLayout>>>(m,n,k,alpha, A_d, stride_row_a, stride_col_a,B_d,stride_row_b,stride_col_b,C_d,stride_row_c,stride_col_c);
}
}
}
|
14,117 | #include<iostream>
#include<cuda.h>
#include<cuda_runtime.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <cuda_runtime_api.h>
#include<cstdlib>
#include<cstdio>
using namespace std;
template<class T>
void print(T a){
cout << a << endl;
}
//all kernels have the 'void' return type
//global --> 1) runs on the device 2) called from the host code
// nvcc separates src code into host and device components
// host functions processed by the standard host compiler
// device funtions processed by the nvcc
__global__ void mykernel(void){
return;
}
__global__ void addInt(int *a, int *b, int *c){
*c = *a + *b;
}
void testAddInt(){
int a, b, c;
int *d_a, *d_b, *d_c;
int size = sizeof(double);//allocate space
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
a = 7, b = 2;
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
addInt<<<1, 1>>>(d_a, d_b, d_c);
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cout << c << endl;
}
__global__ void addIntBlks(double* a, double *b, double *c){
// __syncthreads();
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
printf("BlockIdx.x: %d\n", blockIdx.x);
}
void testAddIntBlks(){
// size_t N = 1<<25;
size_t N = 10;
double *d_a, *d_b, *d_c;
size_t size = sizeof(double) * N;
double *a = (double *)malloc(size);
double *b = (double *)malloc(size);
double *c = (double *)malloc(size);
for (size_t i = 0; i < N; i++){
a[i] = drand48();
b[i] = drand48();
}
//allocate space
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
addIntBlks<<<N, 1>>>(d_a, d_b, d_c);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
srand(1);
cout << rand() << " " << rand() % N << endl;
size_t i = rand() % N;
cout << N << " " << i << endl;
cout << "a[" << i << "] + b[" << i << "] = " << a[i] + b[i] << endl;
cout << "c[" << i << "] =" << c[i] << endl;
double res = 0;
for (size_t i = 0; i < N; i++){
res += a[i] + b[i] - c[i];
}
cout << res << endl;
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
int main(){
cout << "hello world!" << endl;
print<string>(string("shabi"));
mykernel<<<1,1>>>();
testAddInt();
testAddIntBlks();
return 0;
} |
14,118 | /*
Problem 1 for Project 3
Program written by Nathan Sanford
Performs Monte Carlo Integration trials in a parallel cuda environment.
The integral being approximated is
-inf
/
| exp(-x)g(x) dx
/
-0
where here g(x)=cos(x) but this can be changed in the kernel function mcquad.
Performs T trials of N samples per trial by throwing each trial onto a
processor that draws all the samples for that trial.
Inputs: Names of 2 text files. The first is an input file which consists
of the number of samples per trial and the number of samples. The
second is for an output file to which the result of each trial is
written.
Outputs: Writes the result of T trials to the
output file. Additionally, writes the total simulation time to
the terminal at the end of simulation.
Edit history:
5/15/2016 Initial draft
6/3/2016 Added comments
6/7/2016 Number of trials no longer written to output file
11/6/2019 Edited filename to differentiate
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <curand.h>
#include <curand_kernel.h>
/*
void init(int T, unsigned int seed, curandState_t* states)
Initializes the vector of states where each state represents a trial. Uses
the curand built-in curand_init.
Inputs:
T is the number of trials, used as a guard so that only T things are
initialized and seed is an integer which represents a seed
as in many RNGs.
Outputs:
The vector states which is T elements long, is initialized using the built-in
curand capability curand_init.
*/
__global__ void init(int T, unsigned int seed, curandState_t* states) {
int id=threadIdx.x+blockIdx.x*blockDim.x;
// we have to initialize the state on each gpu
if (id<T)
curand_init(seed,id,0,&states[id]);
}
/*
void mcquad(int T, int N, curandState_t* states, double* results)
Performs the mcquad trials. Performs T trials by drawing N samples in
each trial and calculating the mean. Uses the curand built-in
curand_uniform to draw a RN between 0 and 1 and then transform into the
exponential distribution. Makes the states local to each processor to
speed up the process as the states are updated with the drawing of each sample.
The function that we're averaging is cos(x) and it is specified within this
kernel.
Inputs:
T is the number of trials, used as a guard so that only T things are
initialized, N is the number of samples per trial, and states is the state
of the RNG for each trial, where the states are assumed to have been
initialized previously.
Outputs:
The vector results which is T elements long, where each element is the result
of a trial.
*/
__global__ void mcquad(int T, int N, curandState_t* states, double* results){
int id=threadIdx.x+blockIdx.x*blockDim.x;
// ------------------variable declarations-------------------
double z, y, x; // temporary variables used in drawing samples
double mean=0; // mean
// ----------------------------------------------------------
// loop through samples
if (id<T){ // guard so that we only perform T trials
curandState_t localState = states[id];
for (int n=1; n<=N; n++){
z=curand_uniform(&localState); //uniform RN between 0 and 1
y=-log(z); //exponential RN
x=cos(y); //sample
mean+=x;
}
mean*=(1.0/N);
results[id]=mean;
}
}
/*
int main( int argc , char* argv [])
The main program gets the number of trials and the number of samples and then
throws the trials onto the gpu where each processor performs one trial. It
chooses the K20c card and then uses the minimal number of blocks to get to
T trials by having each thread on each block perform a trial.
Inputs: should be 3, just function name and input and output text file names
Outputs: Prints the results of each trial to the
output file. Additionally prints to the terminal screen the
time elapsed using the cuda timing stuff.
*/
int main(int argc, char* argv[])
{
// timing the CUDA way
cudaEvent_t start , stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
int N,T;
// read parameters for simulation from input file
if (argc != 3) {
printf("Incorrect usage: enter the input and output data file names\n");
return 0;
}
FILE* inputfile = fopen(argv[1], "r"); // inputfile only needed temporarily
if (!inputfile) {
printf("Unable to open input file\n");
return 0;
}
// start reading input data using function fscanf here
fscanf(inputfile, "%d", &N); // read an integer N for number of samples
fscanf(inputfile, "%d", &T); // read an integer T for number of trials
fclose(inputfile);
// Choose the GPU card
cudaDeviceProp prop;
int dev;
memset(&prop, 0, sizeof(cudaDeviceProp));
prop.multiProcessorCount = 13;
cudaChooseDevice(&dev,&prop);
cudaSetDevice(dev);
// Get maximum thread count for the device
cudaGetDeviceProperties(&prop,dev);
int num_threads = prop.maxThreadsPerBlock;
int num_blocks = T/num_threads + (T%num_threads ? 1:0);
// ---------------------------Variable Declarations---------------------
// for storing results of all the trials
double* results=(double*)malloc(T*sizeof(double));
double* dev_results; // results of all trials on device
curandState_t* states;
// ---------------------------------------------------------------------
// allocate memory on device
cudaMalloc((void**)&states,T*sizeof(curandState_t));
cudaMalloc((void**)&dev_results, T*sizeof(double));
// set initial seeds
init<<<num_blocks,num_threads>>>(T,time(NULL),states);
// do the monte carlo simulations on device
mcquad<<<num_blocks,num_threads>>>(T,N,states,dev_results);
// copy output data from device
cudaMemcpy(results,dev_results,T*sizeof(double),cudaMemcpyDeviceToHost);
// write output of trials to file
FILE *fileid=fopen(argv[2],"w"); // open output file
fwrite(results,sizeof(double),T,fileid); // write output
fclose(fileid);
// free memory
free(results);
cudaFree(states);
cudaFree(dev_results);
// timing the cuda way
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
// print final execution time to terminal screen
printf("Cuda time elapsed: %g seconds \n",elapsedTime/1000.0);
return 0;
}
|
14,119 | #include <stdio.h>
#include <stdlib.h>
//Nesse código cada thread print uma letra na tela.
__device__ const char *STR = "Hello from GPU";
const char STR_LENGHT = 14;
__device__ void teste(){
printf("thread x = %d e y = %d\n", threadIdx.x, threadIdx.y);
}
__global__ void cuda_hello(void){
printf("%c\n", STR[threadIdx.x % STR_LENGHT]);
teste();
}
int main(void){
//int num_threads = STR_LENGHT;
//int num_blocks = 2;
//dim3 - tipo de variavel usada para configurar a entrada, ela possui tres parametros(x,y,z)
dim3 dimBlock(16,16);
dim3 dimGrid(32,32);
//Do lado esquerdo da virgula informamos a configuração do bloco, no lado direito da virgula informamos a configuração das threads.
cuda_hello<<<dimGrid,dimBlock>>>(); // Notação para chamada de GPU "<<< >>>()"
cudaDeviceSynchronize();
printf("Fim\n");
return 0;
}
|
14,120 | #include <stdio.h>
__global__ void kernel_matrix_dist(float *A, float *B, float *C, const int nx1, const int nx2, const int dim) {
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int idx = iy + nx2 * ix; // index in C
if((ix < nx1) && (iy < nx2)) {
for(int i = 0; i < dim; i++) {
C[idx] += (A[ix * dim + i] - B[iy * dim + i]) * (A[ix * dim + i] - B[iy * dim + i]);
}
C[idx] = sqrtf(C[idx]);
}
}
__global__ void kernel_matrix_dist_sharedMem(float *A, float *B, float *C, const int nx1, const int nx2, const int dim) {
extern __shared__ float sharedPoints[]; // length == blockSize (i.e. blockDim.x here)
int idx = threadIdx.x + blockIdx.x * blockDim.x; // index in A
int numInA = idx / dim;
int dimInA = idx % dim;
for(int currentBlock = 0; currentBlock < (nx2*dim/blockDim.x)+1; currentBlock++) {
// move a block of elements from B to shared memory in each iteration
if((threadIdx.x + currentBlock * blockDim.x) < (nx2 * dim)) {
sharedPoints[threadIdx.x] = B[threadIdx.x + currentBlock * blockDim.x];
}
__syncthreads(); // wait for finishing moving to shared memory
if(idx < (nx1 * dim)) {
// compute distance in corresponding dimension between this A_point to all buffered B_points in shared memory
for(int i = 0; i < blockDim.x; i++) {
int idxInB = i + currentBlock * blockDim.x;
if(idxInB >= (nx2 * dim)) break;
int numInB = idxInB / dim;
int dimInB = idxInB % dim;
if(dimInA == dimInB) {
int idxInC = numInB + nx2 * numInA;
// necessary to have atomic operation here otherwise random errors introduced
atomicAdd(&C[idxInC], (A[idx] - sharedPoints[i]) * (A[idx] - sharedPoints[i]));
}
}
}
}
__syncthreads(); // wait for finishing adding all dimensions for all points in C array
// thread with dimInA==0 do sqrtf() for the corresponding row in C
if(idx < (nx1 * dim) && dimInA == 0) {
for(int i = 0; i < nx2; i++) {
C[i + numInA * nx2] = sqrtf(C[i + numInA * nx2]);
}
}
} |
14,121 | #include "includes.h"
__global__ void recenter_2D(float* coords, size_t dim_y, size_t dim_x){
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < dim_x * dim_y){
coords[index] += (float)dim_y/2.0;
coords[index + dim_x*dim_y] += (float)dim_x/2.0;
}
__syncthreads();
} |
14,122 | #include <cuda.h>
#include <cuda_runtime.h>
#include "stdio.h"
#define TILE_SIZE 512
#define WARP_SIZE 32
extern "C" void CSRmatvecmult(int* ptr, int* J, float* Val, int N, int nnz, float* x, float *y, bool bVectorized);
extern "C" void ELLmatvecmult(int N, int num_cols_per_row , int * indices, float * data , float * x , float * y);
/**
* Custom CUDA error check wrapper.
*/
#define checkCUDAError() do { \
cudaError_t error = cudaGetLastError(); \
if (error != cudaSuccess) { \
printf("(CUDA) %s", cudaGetErrorString(error)); \
printf(" (" __FILE__ ":%d)\n", __LINE__); \
}\
} while (0)
/**
* Cuda kernel for: CSR_s(A)x = y
*/
__global__ void k_csr_mat_vec_mm(int *starts, int* column, float *data, int num_rows, float *x, float* y) {
//TODO: implement the CSR kernel
int row = blockIdx.x * TILE_SIZE + threadIdx.x;
if (row < num_rows)
{
int start = starts[row];
int end = starts[row+1];
int res = 0;
for (int j=start; j<end; ++j)
{
res += data[j] * x[column[j]];
}
y[row] = res;
}
}
/**
* Cuda kernel for: CSR_v(A)x = y
*/
__global__ void k_csr2_mat_vec_mm(int *ptr, int* indices, float *data, int num_rows, float *x, float* y) {
//TODO: implement the vectorized CSR kernel
}
/**
* Cuda kernel for: ELL(A)x = y
*/
__global__ void k_ell_mat_vec_mm ( int N, int num_cols_per_row , int * indices, float * data , float * x , float * y ) {
//NYI: ellpack kernel
}
/**
* Perform: CSR(A)x = y
*/
void CSRmatvecmult(int* ptr, int* J, float* Val, int N, int nnz, float* x, float *y, bool bVectorized) {
int *ptr_d, *J_d;
float *Val_d, *x_d, *y_d;
/************************/
/* copy to device */
/************************/
cudaMalloc((void **) &ptr_d, (N+1) * sizeof(int));
checkCUDAError();
cudaMemcpy(ptr_d, ptr, (N+1) * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) &J_d, nnz * sizeof(int));
checkCUDAError();
cudaMemcpy(J_d, J, nnz * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) &Val_d, nnz * sizeof(float));
checkCUDAError();
cudaMemcpy(Val_d, Val, nnz * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) &x_d, N * sizeof(float));
checkCUDAError();
cudaMemcpy(x_d, x, N * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) &y_d, N * sizeof(float));
checkCUDAError();
cudaMemcpy(y_d, y, N * sizeof(float) , cudaMemcpyHostToDevice);
checkCUDAError();
/************************/
/* start kernel */
/************************/
if (bVectorized) {
//TODO: define grid and block size correctly
dim3 grid(0, 0, 0);
dim3 block(0, 0, 0);
k_csr2_mat_vec_mm <<< grid, block >>> (ptr_d, J_d, Val_d, N, x_d, y_d);
} else {
dim3 grid((N - 1)/TILE_SIZE + 1, 1, 1);
dim3 block(TILE_SIZE, 1, 1);
k_csr_mat_vec_mm <<< grid, block >>> (ptr_d, J_d, Val_d, N, x_d, y_d);
}
checkCUDAError();
/************************/
/* copy back */
/************************/
cudaMemcpy(y, y_d, N * sizeof(float), cudaMemcpyDeviceToHost);
checkCUDAError();
/************************/
/* free memory */
/************************/
cudaFree(ptr_d);
cudaFree(J_d);
cudaFree(Val_d);
cudaFree(x_d);
cudaFree(y_d);
}
/**
* Perform: ELL(A)x = y
*/
void ELLmatvecmult(int N, int num_cols_per_row , int * indices,
float * data , float * x , float * y) {
int *indices_d;
float *data_d, *x_d, *y_d;
/************************/
/* copy to device */
/************************/
cudaMalloc((void **) &indices_d, N * num_cols_per_row * sizeof(int));
checkCUDAError();
cudaMemcpy(indices_d, indices, N * num_cols_per_row * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) &data_d, N * num_cols_per_row * sizeof(float));
checkCUDAError();
cudaMemcpy(data_d, data, N * num_cols_per_row * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) &x_d, N * sizeof(float));
checkCUDAError();
cudaMemcpy(x_d, x, N * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) &y_d, N * sizeof(float));
checkCUDAError();
cudaMemcpy(y_d, y, N * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
/************************/
/* start kernel */
/************************/
//NYI: define grid and block size
//k_ell_mat_vec_mm <<< grid, block >>> (N, num_cols_per_row, indices_d, data_d , x_d, y_d);
checkCUDAError();
/************************/
/* copy back */
/************************/
cudaMemcpy(y, y_d, N * sizeof(float), cudaMemcpyDeviceToHost);
checkCUDAError();
/************************/
/* free memory */
/************************/
cudaFree(indices_d);
cudaFree(data_d);
cudaFree(x_d);
cudaFree(y_d);
}
|
14,123 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
// CUDA includes
#include <cuda.h>
#include <cuda_runtime.h>
//Input size
#define SIZE 16384
//2d index to 1d index
#define idx(x,y,z) x*y + z
//TOTAL size of a matrix
size_t TOTAL_SIZE = SIZE * SIZE;
//Device allocated matrices
double *d_mat, *d_matT, *d_matSym;
//Host allocated matrix
double *h_mat;
void printMatrix(double *d_mat, int num) {
if (SIZE > 16) {
printf("Too big of an input to be printed!\n");
return;
}
cudaMemcpy(h_mat, d_mat, sizeof(double) * TOTAL_SIZE, cudaMemcpyDeviceToHost);
int i, j;
printf("MAT:\n");
for (i=0; i<SIZE; ++i) {
for (j=0; j<SIZE; ++j) {
printf("%f ", h_mat[i * SIZE + j]);
}
}
}
void allocate() {
cudaMalloc((void **)&d_mat, sizeof(double) * TOTAL_SIZE);
cudaMalloc((void **)&d_matT, sizeof(double) * TOTAL_SIZE);
cudaMalloc((void **)&d_matSym, sizeof(double) * TOTAL_SIZE);
h_mat = (double *) malloc(sizeof(double) * TOTAL_SIZE);
}
__global__ void initialize(double *d_mat) {
//Global indices
int tx = blockIdx.y * blockDim.y + threadIdx.y;
int ty = blockIdx.x * blockDim.x + threadIdx.x;
if (tx < SIZE && ty < SIZE) {
//loading the index itself
d_mat[idx(tx,SIZE,ty)] = idx(tx,SIZE,ty);
}
}
__global__ void transpose(double *d_mat, double *d_matT) {
//Global indices
int tx = blockIdx.y * blockDim.y + threadIdx.y;
int ty = blockIdx.x * blockDim.x + threadIdx.x;
if (tx < SIZE && ty < SIZE) {
//Transposing the matrix
d_matT[idx(ty,SIZE,tx)] = d_mat[idx(tx,SIZE,ty)];
}
}
__global__ void matrixMultiply(double *d_mat, double *d_matT, double *d_matSym) {
//Global inidices
int tx = blockIdx.y * blockDim.y + threadIdx.y;
int ty = blockIdx.x * blockDim.x + threadIdx.x;
int k;
if (tx < SIZE && ty < SIZE) {
double accum = 0.0;
//Accumulation for (tx,ty) position
for (k=0; k<SIZE; ++k) {
accum += d_mat[idx(tx,SIZE,k)] * d_matT[idx(k,SIZE,ty)];
}
d_matSym[idx(tx,SIZE,ty)] = accum;
}
}
void calculateSymmetricMatrix(int TILE_DIM) {
//Configuring the dimensions for thread launch
dim3 grid_dim(SIZE/TILE_DIM, SIZE/TILE_DIM, 1);
dim3 blk_dim(TILE_DIM, TILE_DIM, 1);
//This will generate a symmetric matrix where mat(i,j) = mat(j,i)
initialize<<<grid_dim, blk_dim>>>(d_mat);
cudaDeviceSynchronize();
transpose<<<grid_dim, blk_dim>>>(d_mat, d_matT);
cudaDeviceSynchronize();
matrixMultiply<<<grid_dim, blk_dim>>>(d_mat, d_matT, d_matSym);
cudaDeviceSynchronize();
}
void deallocate() {
cudaFree(d_mat);
cudaFree(d_matT);
cudaFree(d_matSym);
free(h_mat);
}
int main (int argc, char **argv) {
int i, N = 1, TILE_DIM = 32;
struct timeval start, stop;
double execTime = 0.0;
if (argc > 1) { // Number of iterations
N = atoi(argv[1]);
}
printf("\n%d x %d Matrix\n\n", SIZE, SIZE);
allocate();
printf("\nExecution times(sec)\n");
for (i=0; i<N; ++i) {
gettimeofday(&start, NULL);
calculateSymmetricMatrix(TILE_DIM);
gettimeofday(&stop, NULL);
execTime += (double)(stop.tv_usec - start.tv_usec) / 1000000 + (double)(stop.tv_sec - start.tv_sec);
printf("At %d\t%.8f s\n", i, execTime);
}
deallocate();
return 0;
}
|
14,124 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include <cstdlib>
struct Test
{
double** Cord;
};
__global__ void kernel(int a, int b, int *c)
{
*c = (a + b)*(a + b);
}
__global__ void VecAdd(const double* A, const double* B, double *C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
*C += A[i] * B[i];
}
__global__ void SumMatrix(double *A, double* B, int length0, int length1, int length2)
{
/*const int N = 30000;
int idx = blockDim.x * blockIdx.x + threadIdx.x;*/
const int blockSize = length1 * length2;
//printf(" %d \n", 2);
//printf("blockSize \r\n");
//if (idx < N) {
for (int i = 0; i < length0; i++) {
for (int y = 0; y < length1; y++) {
for (int x = 0; x < length2; x++) {
//*A[y * length2 + x] += B[(i * blockSize) + (y * length2 + x)];
A[y * length2 + x] += B[(i * blockSize) + (y * length2 + x)];
}
}
}
//}
}
__global__ void matrixMultiplicationKernel(double* A, double* B, double* C, int N) {
int ROW = blockIdx.y*blockDim.y + threadIdx.y;
int COL = blockIdx.x*blockDim.x + threadIdx.x;
float tmpSum = 0;
if (ROW < N && COL < N) {
C[ROW * N + COL] = A[ROW * N + COL] + B[ROW * N + COL];
/*char rowText[] = "row - X\n";
rowText[6] = ROW + '0';
printf(rowText);
char colText[] = "col - X\n";
colText[6] = COL + '0';
printf(colText);
char valText[] = "val - X\n";
valText[6] = A[ROW * N + COL] + '0';
printf(valText);*/
// each thread computes one element of the block sub-matrix
/*for (int i = 0; i < N; i++) {
tmpSum += A[ROW * N + i] * B[i * N + COL];
}*/
}
//C[ROW * N + COL] = tmpSum;
}
__global__ void matrixSum(double* A, double* C, int X, int Y, int Z) {
int ROW = blockIdx.y*blockDim.y + threadIdx.y;
int COL = blockIdx.x*blockDim.x + threadIdx.x;
//float tmpSum = 0;
if (ROW < Y && COL < X) {
for (int i = 0; i < Z; i++) {
C[ROW * X + COL] += A[(ROW * Y * X) + (ROW * X + COL)];
}
//tmpSum += A[ROW * N + COL];
}
//C[ROW * X + COL] = tmpSum;
}
__global__ void add_threads(double *a, double *b, int z) {
/* threadIdx.x gives the thread ID in each block */
/*char rowText[] = "row - X\n";
rowText[6] = blockIdx.x + '0';
printf(rowText);*/
for (unsigned int i = 0; i < z; i++)
b[blockIdx.x] += (2 / (1 + exp(-2 * a[blockIdx.x]))) - 1;
}
#define N 1000
__global__
void add(int *a, int *b) {
int i = blockIdx.x;
if (i<N) {
b[i] = 2 * a[i];
}
}
int main()
{
////
//// Create int arrays on the CPU.
//// ('h' stands for "host".)
////
//int ha[N], hb[N];
////
//// Create corresponding int arrays on the GPU.
//// ('d' stands for "device".)
////
//int *da, *db;
//cudaMalloc((void **)&da, N * sizeof(int));
//cudaMalloc((void **)&db, N * sizeof(int));
////
//// Initialise the input data on the CPU.
////
//for (int i = 0; i<N; ++i) {
// ha[i] = i;
//}
////
//// Copy input data to array on GPU.
////
//cudaMemcpy(da, ha, N * sizeof(int), cudaMemcpyHostToDevice);
////
//// Launch GPU code with N threads, one per
//// array element.
////
//add<<<N, 1 >>>(da, db);
////
//// Copy output array from GPU back to CPU.
////
//cudaMemcpy(hb, db, N * sizeof(int), cudaMemcpyDeviceToHost);
//for (int i = 0; i<N; ++i) {
// printf("%d\n", hb[i]);
//}
////
//// Free up the arrays on the GPU.
////
//cudaFree(da);
//cudaFree(db);
return 0;
} |
14,125 | #include "includes.h"
#ifdef TIME
#define COMM 1
#elif NOTIME
#define COMM 0
#endif
#define MASK_WIDTH 5
#define TILE_WIDTH 32
#define GPU 1
#define COMMENT "skeletization_GPU"
#define RGB_COMPONENT_COLOR 255
typedef struct {
unsigned char red, green, blue;
} PPMPixel;
typedef struct {
int x, y;
PPMPixel *data;
} PPMImage;
typedef struct {
int x, y;
} Par;
double time_total;
__global__ void Update(int *GrayScale_, int *d_changing1, int linhas, int colunas)
{
int col = blockIdx.x * TILE_WIDTH + threadIdx.x;
int fil = blockIdx.y * TILE_WIDTH + threadIdx.y;
int index = fil * colunas + col;
if (fil>0 && col>0 && fil < linhas-1 && col < colunas-1)
{
if(d_changing1[index]==1)
GrayScale_[index]=0;
}
} |
14,126 | #include<cstdio>
#include<vector>
#include<string>
#include<cuda_runtime.h>
// #include <thrust/sequence.h>
#include <thrust/scan.h>
#include <thrust/execution_policy.h> // thrust::host/device
#define BLOCK_SIZE 32
using namespace std;
float average(const vector<float> &timing) {
double avg = 0;
for(vector<float>::const_iterator it = timing.begin(); it != timing.end(); it++) avg += *it;
avg /= timing.size();
//return us
avg /= 1000;
return avg;
}
void print_info(int *data, int len, string flag) {
printf("%s frist ten:\n", flag.c_str());
for (int i=0; i<10; i++){
printf("%d ", data[i]);
}
printf("\n");
printf("%s last ten:\n", flag.c_str());
for (int i=len -10; i<len; i++){
printf("%d ", data[i]);
}
printf("\n");
}
__global__ void exclusive_scan(int *A, int N) {
int thx = blockDim.x * blockIdx.x + threadIdx.x;
if (thx >= N) return;
__shared__ int logN;
if (thx == 0) {
logN = log2f(N);
}
__syncthreads();
for (int i=0; i<logN; i++) {
int d2 = powf(2, i+1);
int dd2 = d2/2;
if (thx%d2==d2-1) {
A[thx] += A[thx - dd2];
}
__syncthreads();
}
if (thx == 0) {
A[N-1] = 0;
}
__syncthreads();
for (int i=logN-1; i>=0; i--) {
int d2 = powf(2, i+1);
int dd2 = d2/2;
if (thx%d2==d2-1) {
int tmp = A[thx-dd2];
// A[thx-dd2] = tmp;
A[thx-dd2] = A[thx];
A[thx] += tmp;
}
__syncthreads();
}
}
int main() {
int len_a = BLOCK_SIZE;
int numThreads = BLOCK_SIZE;
int numBlocks = (len_a + numThreads - 1) / numThreads;
int* A = (int*) malloc(len_a * sizeof(int));
int* th_h_A = (int*) malloc(len_a * sizeof(int));
// file a,b
for (int i=0; i<len_a; i++) A[i] = i;
for (int i=0; i<len_a; i++) th_h_A[i] = i;
print_info(A, len_a, "A");
int *d_A, *th_A;
cudaMalloc((void**)&d_A, len_a * sizeof(int));
cudaMalloc((void**)&th_A, len_a * sizeof(int));
cudaMemcpy(d_A, A, len_a * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(th_A, A, len_a * sizeof(int), cudaMemcpyHostToDevice);
// thrust scan
thrust::exclusive_scan(thrust::device, th_A, th_A+len_a, th_A, 0);
cudaMemcpy(th_h_A, th_A, len_a * sizeof(int), cudaMemcpyDeviceToHost);
// thrust::exclusive_scan(thrust::host, th_h_A, th_h_A+len_a, th_h_A, 0);
print_info(th_h_A, len_a, "th_h_A");
vector<float> times;
int loops = 1;
for (int i=0; i<loops; i++) {
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, NULL);
exclusive_scan<<<numBlocks, numThreads>>>(d_A, len_a);
cudaEventRecord(end, NULL);
cudaEventSynchronize(end);
float time = 0;
cudaEventElapsedTime(&time, start, end);
times.push_back(time);
}
printf("exclusive scan avg time:%lf\n", average(times));
cudaMemcpy(A, d_A, len_a * sizeof(float), cudaMemcpyDeviceToHost);
print_info(A, len_a, "A");
} |
14,127 | // CS 4402 - Dana Zagar - 250790176
#include <cstdio>
#include <ctime>
using namespace std;
// A small prime number to prevent overflow and make verification feasible.
const int MAX_COEFF = 103;
// Print polynomial output.
void print_polynomial(int* poly, int range)
{
for (int i = 0; i < range; i++)
{
printf("%2d ", poly[i]);
}
printf("\n\n");
}
// Generates a random polynomial of size n.
void random_polynomial(int* p, int n)
{
for (int i=0; i<n; i++) {
p[i] = rand() % MAX_COEFF;
}
}
// Serial C function to find reduced polynomial product.
// For verification purposes.
void multiply_polynomials_serial(int *x, int *y, int size, int *ans)
{
for (int i = 0; i < size; i++)
{
for (int j = 0; j < size; j++)
{
ans[i+j] = (ans[i+j] + x[i] * y[j]) % MAX_COEFF;
}
}
}
// First CUDA kernel to calculate the product terms over two given polynomials
// of size n, given n thread-blocks and n threads per.
__global__ void calculate_products(int *prods, int *x, int *y, size_t n)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
prods[index] = (x[blockIdx.x] * y[threadIdx.x]) % MAX_COEFF;
}
// Second CUDA kernel to reduce the products by combining like terms on each
// diagonal of the "2d" product matrix.
__global__ void reduce_polynomial(int *prods, int *ans, size_t n)
{
int i, j;
// Envision the product array as a 2d matrix tilted like a diamond.
// Each block represents a row of the diamond, i.e. a diagonal.
// If the block index is within the first half of the diamond, the
// block index dictates the row index.
if (blockIdx.x <= (2*n-2)/2)
{
i = blockIdx.x, j = 0;
}
// Otherwise, the block index dictates the column index.
else
{
i = n-1, j = (blockIdx.x % n) + 1;
}
// Sum over the diagonal given by the block index.
while (i >= 0 && j < n)
{
ans[blockIdx.x] = (ans[blockIdx.x] + prods[i*n + j]) % MAX_COEFF;
i--;
j++;
}
}
int main() {
srand(time(NULL));
int exponent;
// Input the number of terms.
printf("Input the desired number of terms in the polynomials. Enter an exponent on 2 [valid from 1-10] to define 2^input terms: ");
scanf("%d", &exponent);
if (exponent < 1 || exponent > 10)
{
printf("Invalid input. Program will terminate.\n\n");
return 0;
}
int n = 1 << exponent; // Number of terms is 2^exponent.
printf("%d terms; input polynomials are of degree %d.\n\n", n, n-1);
int *X = NULL; // First polynomial of degree n-1.
int *Y = NULL; // Second polynomial of degree n-1.
int *P = NULL; // Interim products.
int *Poly = NULL; // Final.
int *PolyV = NULL; // Verification answer.
X = new int[n];
Y = new int[n];
P = new int[n*n];
Poly = new int[2*n-1];
PolyV = new int[2*n-1];
// Initialize values.
random_polynomial(X, n);
random_polynomial(Y, n);
for (int i = 0; i < n*n; i++)
{
P[i] = 0;
}
for (int i = 0; i < 2*n-1; i++)
{
Poly[i] = 0;
PolyV[i] = 0;
}
// Step 1: Calculating products.
int *Xd, *Yd, *Pd;
cudaMalloc((void **)&Xd, sizeof(int)*n);
cudaMalloc((void **)&Yd, sizeof(int)*n);
cudaMalloc((void **)&Pd, sizeof(int)*n*n);
cudaMemcpy(Xd, X, sizeof(int)*n, cudaMemcpyHostToDevice);
cudaMemcpy(Yd, Y, sizeof(int)*n, cudaMemcpyHostToDevice);
cudaMemcpy(Pd, P, sizeof(int)*n*n, cudaMemcpyHostToDevice);
calculate_products<<<n, n>>>(Pd, Xd, Yd, n);
// Step 2: Reducing like terms.
int *Polyd;
cudaMalloc((void **)&Polyd, sizeof(int)*2*n-1);
cudaMemcpy(Polyd, Poly, sizeof(int)*2*n-1, cudaMemcpyHostToDevice);
reduce_polynomial<<<2*n-1, 1>>>(Pd, Polyd, n);
cudaMemcpy(Poly, Polyd, sizeof(int)*2*n-1, cudaMemcpyDeviceToHost);
// Print input, output.
printf("CUDA Program Output\n\n");
printf("First input polynomial:\n");
print_polynomial(X, n);
printf("Second input polynomial:\n");
print_polynomial(Y, n);
printf("Result:\n");
print_polynomial(Poly, 2*n-1);
// Step 3: Verify using serial C function.
printf("Verification with Serial C Output\n\n");
multiply_polynomials_serial(X, Y, n, PolyV);
printf("Result:\n");
print_polynomial(PolyV, 2*n-1);
// Free memory.
delete [] X;
delete [] Y;
delete [] P;
delete [] Poly;
delete [] PolyV;
cudaFree(Xd);
cudaFree(Yd);
cudaFree(Pd);
cudaFree(Polyd);
return 0;
}
|
14,128 | //#include "../common/common.h"
#include <cuda_runtime.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <iostream>
#include <cmath>
#include <time.h>
extern "C" void sumArraysOnGPU1(float*d_A, float*d_B, float*d_C, float *h_A, float *h_B, size_t nBytes, float *gpuRef, float *hostRef);
clock_t t1, t2, t3;
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) C[i] = A[i] + B[i];
}
#define CHECK(status) \
{ \
if (status != 0) \
{ \
std::cout << "Cuda failure: " << status; \
abort(); \
} \
}
void sumArraysOnGPU1(float*d_A, float*d_B, float*d_C, float *h_A, float *h_B, size_t nBytes, float *gpuRef, float *hostRef)
{
t1=clock();
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// set up data size of vectors
int nElem = 1 << 24;//24
printf("Vector size %d\n", nElem);
int iLen = 1024;//512
dim3 block (iLen);
dim3 grid ((nElem + block.x - 1) / block.x);
t2=clock();
sumArraysOnGPU<<<grid, block>>>(d_A, d_B, d_C, nElem);
//sumArraysOnGPU<<<32768, block>>>(d_A, d_B, d_C, nElem);
//CHECK(cudaDeviceSynchronize());
//cudaDeviceSynchronize();
t3=clock();
// check kernel error
CHECK(cudaGetLastError()) ;
printf("sumArraysOnGPU1 total Time: %f sec\n", (double)(t3-t1)/(CLOCKS_PER_SEC));
printf("sumArraysOnGPU Time elapsed: %f sec\n", (double)(t3-t2)/(CLOCKS_PER_SEC));
}
|
14,129 | #include "includes.h"
__global__ void sortMatrix ( const int nd, const float *a, float *sm ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int ij = i + j * nd;
if ( i < nd && j < nd ) {
sm[ij] = ( a[i] > a[j] );
}
} |
14,130 | //pass
//--gridDim=6624 --blockDim=256
typedef unsigned int uint;
__global__ void uniformUpdate(
uint4 *d_Data,
uint *d_Buffer
)
{
__shared__ uint buf;
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x == 0)
{
buf = d_Buffer[blockIdx.x];
}
// __syncthreads();
uint4 data4 = d_Data[pos];
data4.x += buf;
data4.y += buf;
data4.z += buf;
data4.w += buf;
d_Data[pos] = data4;
}
|
14,131 | #include "includes.h"
__global__ void softmaxActivationBackprop(float* Z, float* dA, float* dZ, int Z_x_dim, int Z_y_dim){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < Z_x_dim * Z_y_dim){
dZ[index] = dA[index];
}
} |
14,132 |
inline __device__ unsigned int ceilInt(unsigned int x, unsigned int y)
{
return (x + y - 1u)/y;
}
__global__ void blockAdd(unsigned int *prefixSum, unsigned int *blockSum,
unsigned int arraySize)
{
unsigned int idb = blockIdx.x;
unsigned int dix = blockDim.x;
unsigned int idg = 2u*idb*dix + threadIdx.x;
unsigned int idgL, idgH, sum;
idgL = idg;
idgH = idg + dix;
if (idb > 0u)
{
sum = blockSum[idb - 1u];
if (idgL < arraySize)
{
prefixSum[idgL] += sum;
if (idgH < arraySize)
{
prefixSum[idgH] += sum;
}
}
}
}
__global__ void prefixSum(unsigned int *input, unsigned int *output, unsigned int *blockSum,
unsigned int arraySize)
{
unsigned int idx = threadIdx.x;
unsigned int dix = blockDim.x;
unsigned int idg = 2u*blockIdx.x*dix + idx;
const unsigned int nrMB = 32u;
unsigned int idxDouble = 2u*idx;
unsigned int idgL, idgH, idxL, idxH;
unsigned int blockOffsetL, blockOffsetH;
__shared__ unsigned int buffer[1024u + 2u*nrMB];
idgL = idg;
idgH = idg + dix;
idxL = idx;
idxH = idx + dix;
blockOffsetL = idxL/nrMB;
blockOffsetH = idxH/nrMB;
if (idgL < arraySize)
{
buffer[idxL + blockOffsetL] = input[idgL];
if (idgH < arraySize)
{
buffer[idxH + blockOffsetH] = input[idgH];
}
}
__syncthreads();
for (unsigned int offset = 1u; offset < 2u*dix; offset *= 2u)
{
idxH = offset*(idxDouble + 2u) - 1u;
if (idxH < 2u*dix)
{
idxL = offset*(idxDouble + 1u) - 1u;
idxH += idxH/nrMB;
idxL += idxL/nrMB;
buffer[idxH] += buffer[idxL];
}
__syncthreads();
}
for (unsigned int offset = powf(2.0f, ceilf(log2f(ceilInt(dix, 2u)))); offset >= 1u; offset /= 2u)
{
idxH = offset*(idxDouble + 3u) - 1u;
if (idxH < 2u*dix)
{
idxL = offset*(idxDouble + 2u) - 1u;
idxH += idxH/nrMB;
idxL += idxL/nrMB;
buffer[idxH] += buffer[idxL];
}
__syncthreads();
}
idxL = idx;
idxH = idx + dix;
if (idgL < arraySize)
{
output[idgL] = buffer[idxL + blockOffsetL];
if (idgH < arraySize)
{
output[idgH] = buffer[idxH + blockOffsetH];
}
}
if (idx == dix - 1u && blockIdx.x < gridDim.x - 1u)
{
blockSum[blockIdx.x] = buffer[idxH + blockOffsetH];
}
else if (idgH == arraySize - 1u)
{
blockSum[blockIdx.x] = buffer[idxH + blockOffsetH];
}
else if (idgL == arraySize - 1u)
{
blockSum[blockIdx.x] = buffer[idxL + blockOffsetL];
}
}
|
14,133 | struct Real3
{
double value[3];
};
struct ReturnType
{
Real3 first, second;
};
__device__ ReturnType copy(const Real3& in1, const Real3& in2)
{
return {in1, in2};
}
__global__ void call_min(int* offsets, const Real3* inputs, Real3* outputs)
{
int idx = offsets[threadIdx.x];
// Copy with some bogus offsets
auto result = copy(inputs[idx], inputs[idx + 1]);
outputs[idx - 1] = result.first;
outputs[idx] = result.second;
}
|
14,134 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26) {
for (int i=0; i < var_1; ++i) {
float tmp_1 = var_3 / var_4 - fabsf(fabsf((var_5 * (var_6 * (-1.6306E34f / +1.2050E-43f)))));
comp = tmp_1 + (var_7 - +1.8558E-43f);
comp += (+1.7264E-36f * expf(+1.8319E-41f + (var_8 / -1.9151E27f)));
comp = (var_9 - +1.0505E-36f + (+1.2561E-15f - var_10 - +1.9349E-44f));
if (comp >= (var_11 / var_12 * var_13)) {
comp += (var_14 * (-0.0f - var_15 - (var_16 / var_17)));
comp = +1.1422E-36f - var_18 + -1.9082E-36f;
}
for (int i=0; i < var_2; ++i) {
comp = var_19 * coshf(+1.3859E35f / +1.6905E36f);
}
if (comp <= -1.5568E-42f / cosf((-1.5948E-37f * var_20 * var_21))) {
comp += var_22 * var_23 / (var_24 - floorf((var_25 * -1.0570E34f)));
float tmp_2 = +1.7322E-36f;
comp += tmp_2 - +1.7309E35f + (+1.8079E34f / var_26);
comp = (-1.3137E35f * expf(+1.8844E36f));
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27);
cudaDeviceSynchronize();
return 0;
}
|
14,135 | __device__ void MD5Transform( unsigned long *s, unsigned long *d );
__constant__ unsigned int nPassword[32];
__constant__ unsigned int nPasswordLen;
__constant__ unsigned int cCharset[256];
__constant__ unsigned int nCharsetLen;
__constant__ unsigned long bHash[4];
__global__ void MD5_Brute_GPU( unsigned long *pdwResult )
{
unsigned int tid = blockIdx.x * 256 + threadIdx.x;
unsigned int t, q, r;
unsigned int MD5[16] = { 0 ,0,0};
unsigned long State[4] = { 0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476 };
unsigned char *Block = (unsigned char *)MD5;
q = tid;
int i = 0;
for( i = 0; i < nPasswordLen; i++ )
{
t = q + nPassword[i];
q = t/nCharsetLen;
r = t - q*nCharsetLen;
Block[i] = cCharset[r];
}
// MD5 padding
Block[ i] = 0x80;
// MD5 length
Block[56] = nPasswordLen * 8;
MD5Transform( State, (unsigned long*) Block );
if( State[0] == bHash[0] )
if( State[1] == bHash[1] )
if( State[2] == bHash[2] )
if( State[3] == bHash[3] )
*pdwResult = tid;
}
extern "C" void RunKernel_MD5( int grid, unsigned long *pdwResult )
{
MD5_Brute_GPU<<< grid, 256 >>>( pdwResult );
}
#define S11 7
#define S12 12
#define S13 17
#define S14 22
#define S21 5
#define S22 9
#define S23 14
#define S24 20
#define S31 4
#define S32 11
#define S33 16
#define S34 23
#define S41 6
#define S42 10
#define S43 15
#define S44 21
/* F, G, H and I are basic MD5 functions.
*/
#define F(x, y, z) (((x) & (y)) | ((~x) & (z)))
#define G(x, y, z) (((x) & (z)) | ((y) & (~z)))
#define H(x, y, z) ((x) ^ (y) ^ (z))
#define I(x, y, z) ((y) ^ ((x) | (~z)))
/* ROTATE_LEFT rotates x left n bits.
*/
#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
/* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4.
Rotation is separate from addition to prevent recomputation.
*/
#define FF(a, b, c, d, x, s, ac) { \
(a) += F ((b), (c), (d)) + (x) + (unsigned long)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define GG(a, b, c, d, x, s, ac) { \
(a) += G ((b), (c), (d)) + (x) + (unsigned long)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define HH(a, b, c, d, x, s, ac) { \
(a) += H ((b), (c), (d)) + (x) + (unsigned long)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define II(a, b, c, d, x, s, ac) { \
(a) += I ((b), (c), (d)) + (x) + (unsigned long)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
/* MD5 basic transformation. Transforms state based on block.
*/
__device__ void MD5Transform ( unsigned long *state, unsigned long *x )
{
unsigned long a = state[0], b = state[1], c = state[2], d = state[3];
/* Round 1 */
FF (a, b, c, d, x[ 0], S11, 0xd76aa478); /* 1 */
FF (d, a, b, c, x[ 1], S12, 0xe8c7b756); /* 2 */
FF (c, d, a, b, x[ 2], S13, 0x242070db); /* 3 */
FF (b, c, d, a, x[ 3], S14, 0xc1bdceee); /* 4 */
FF (a, b, c, d, x[ 4], S11, 0xf57c0faf); /* 5 */
FF (d, a, b, c, x[ 5], S12, 0x4787c62a); /* 6 */
FF (c, d, a, b, x[ 6], S13, 0xa8304613); /* 7 */
FF (b, c, d, a, x[ 7], S14, 0xfd469501); /* 8 */
FF (a, b, c, d, x[ 8], S11, 0x698098d8); /* 9 */
FF (d, a, b, c, x[ 9], S12, 0x8b44f7af); /* 10 */
FF (c, d, a, b, x[10], S13, 0xffff5bb1); /* 11 */
FF (b, c, d, a, x[11], S14, 0x895cd7be); /* 12 */
FF (a, b, c, d, x[12], S11, 0x6b901122); /* 13 */
FF (d, a, b, c, x[13], S12, 0xfd987193); /* 14 */
FF (c, d, a, b, x[14], S13, 0xa679438e); /* 15 */
FF (b, c, d, a, x[15], S14, 0x49b40821); /* 16 */
/* Round 2 */
GG (a, b, c, d, x[ 1], S21, 0xf61e2562); /* 17 */
GG (d, a, b, c, x[ 6], S22, 0xc040b340); /* 18 */
GG (c, d, a, b, x[11], S23, 0x265e5a51); /* 19 */
GG (b, c, d, a, x[ 0], S24, 0xe9b6c7aa); /* 20 */
GG (a, b, c, d, x[ 5], S21, 0xd62f105d); /* 21 */
GG (d, a, b, c, x[10], S22, 0x2441453); /* 22 */
GG (c, d, a, b, x[15], S23, 0xd8a1e681); /* 23 */
GG (b, c, d, a, x[ 4], S24, 0xe7d3fbc8); /* 24 */
GG (a, b, c, d, x[ 9], S21, 0x21e1cde6); /* 25 */
GG (d, a, b, c, x[14], S22, 0xc33707d6); /* 26 */
GG (c, d, a, b, x[ 3], S23, 0xf4d50d87); /* 27 */
GG (b, c, d, a, x[ 8], S24, 0x455a14ed); /* 28 */
GG (a, b, c, d, x[13], S21, 0xa9e3e905); /* 29 */
GG (d, a, b, c, x[ 2], S22, 0xfcefa3f8); /* 30 */
GG (c, d, a, b, x[ 7], S23, 0x676f02d9); /* 31 */
GG (b, c, d, a, x[12], S24, 0x8d2a4c8a); /* 32 */
/* Round 3 */
HH (a, b, c, d, x[ 5], S31, 0xfffa3942); /* 33 */
HH (d, a, b, c, x[ 8], S32, 0x8771f681); /* 34 */
HH (c, d, a, b, x[11], S33, 0x6d9d6122); /* 35 */
HH (b, c, d, a, x[14], S34, 0xfde5380c); /* 36 */
HH (a, b, c, d, x[ 1], S31, 0xa4beea44); /* 37 */
HH (d, a, b, c, x[ 4], S32, 0x4bdecfa9); /* 38 */
HH (c, d, a, b, x[ 7], S33, 0xf6bb4b60); /* 39 */
HH (b, c, d, a, x[10], S34, 0xbebfbc70); /* 40 */
HH (a, b, c, d, x[13], S31, 0x289b7ec6); /* 41 */
HH (d, a, b, c, x[ 0], S32, 0xeaa127fa); /* 42 */
HH (c, d, a, b, x[ 3], S33, 0xd4ef3085); /* 43 */
HH (b, c, d, a, x[ 6], S34, 0x4881d05); /* 44 */
HH (a, b, c, d, x[ 9], S31, 0xd9d4d039); /* 45 */
HH (d, a, b, c, x[12], S32, 0xe6db99e5); /* 46 */
HH (c, d, a, b, x[15], S33, 0x1fa27cf8); /* 47 */
HH (b, c, d, a, x[ 2], S34, 0xc4ac5665); /* 48 */
/* Round 4 */
II (a, b, c, d, x[ 0], S41, 0xf4292244); /* 49 */
II (d, a, b, c, x[ 7], S42, 0x432aff97); /* 50 */
II (c, d, a, b, x[14], S43, 0xab9423a7); /* 51 */
II (b, c, d, a, x[ 5], S44, 0xfc93a039); /* 52 */
II (a, b, c, d, x[12], S41, 0x655b59c3); /* 53 */
II (d, a, b, c, x[ 3], S42, 0x8f0ccc92); /* 54 */
II (c, d, a, b, x[10], S43, 0xffeff47d); /* 55 */
II (b, c, d, a, x[ 1], S44, 0x85845dd1); /* 56 */
II (a, b, c, d, x[ 8], S41, 0x6fa87e4f); /* 57 */
II (d, a, b, c, x[15], S42, 0xfe2ce6e0); /* 58 */
II (c, d, a, b, x[ 6], S43, 0xa3014314); /* 59 */
II (b, c, d, a, x[13], S44, 0x4e0811a1); /* 60 */
II (a, b, c, d, x[ 4], S41, 0xf7537e82); /* 61 */
II (d, a, b, c, x[11], S42, 0xbd3af235); /* 62 */
II (c, d, a, b, x[ 2], S43, 0x2ad7d2bb); /* 63 */
II (b, c, d, a, x[ 9], S44, 0xeb86d391); /* 64 */
state[0] += a; state[1] += b; state[2] += c; state[3] += d;
}
|
14,136 | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <sys/time.h>
#define BLOCK_SIZE 512
#define cudaCheckError() { \
cudaError_t e=cudaGetLastError(); \
if(e!=cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \
exit(EXIT_FAILURE); \
} \
}
typedef struct {
struct timeval startTime;
struct timeval endTime;
} Timer;
void startTime(Timer* timer) {
gettimeofday(&(timer->startTime), NULL);
}
void stopTime(Timer* timer) {
gettimeofday(&(timer->endTime), NULL);
}
float elapsedTime(Timer timer) {
return ((float) ((timer.endTime.tv_sec - timer.startTime.tv_sec) \
+ (timer.endTime.tv_usec - timer.startTime.tv_usec)/1.0e6));
}
//Split based on each bit
__global__ void split(unsigned int*in_d, unsigned int *out_d, unsigned int in_size,int bit_shift) {
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int bit = 0;
if (index < in_size) {
bit = in_d[index] & (1 << bit_shift);// get the value on each bit
bit = (bit > 0) ? 1 : 0;
out_d[index] = 1 - bit;
}
}
__global__ void exclusiveScan(unsigned int *out, unsigned int* in, unsigned int*sum, unsigned int inputSize) {
__shared__ unsigned int temp[2 * BLOCK_SIZE];
int start = 2 * blockIdx.x * blockDim.x;
int tx = threadIdx.x;
int index = 0;
if (start + tx < inputSize) {
temp[tx] = in[start + tx];
} else {
temp[tx] = 0;
}
if (start + tx + blockDim.x < inputSize) {
temp[tx + blockDim.x] = in[start + tx + blockDim.x];
} else {
temp[tx + blockDim.x] = 0;
}
__syncthreads();
// up-sweep phase
int stride = 1;
while(stride <= blockDim.x) {
index = (tx + 1) * 2 * stride - 1;
if (index < (2 * blockDim.x)) {
temp[index] += temp[index - stride];
}
stride *= 2;
__syncthreads();
}
// first store the reduction sum in sum array
// make it zero since it is exclusive scan
if (tx == 0) {
// sum array contains the prefix sum of each
// 2*blockDim blocks of element..
if (sum != NULL) {
sum[blockIdx.x] = temp[2*blockDim.x - 1];
}
temp[2*blockDim.x - 1] = 0;
}
//wait for thread zero to write
__syncthreads();
stride = blockDim.x;
index = 0;
unsigned int var = 0;
while(stride > 0) {
index = (2 * stride * (tx + 1)) - 1;
if (index < 2 * blockDim.x) {
var = temp[index];
temp[index] += temp[index - stride];
temp[index-stride] = var;
}
stride >>= 1;
__syncthreads();
}
// write the temp array to output
if (start + tx < inputSize) {
out[start + tx] = temp[tx];
}
if(start + tx + blockDim.x < inputSize) {
out[start + tx + blockDim.x] = temp[tx + blockDim.x];
}
}
// merge the scan blocks
__global__ void mergeScanBlocks(unsigned int *sum, unsigned int* output, int opSize) {
int index = 2 * blockDim.x * blockIdx.x + threadIdx.x;
if (index < opSize) {
output[index] += sum[blockIdx.x];
}
if (index + blockDim.x < opSize) {
output[index + blockDim.x] += sum[blockIdx.x];
}
}
void preScan(unsigned int *out, unsigned int *in, unsigned int in_size)
{
unsigned int numBlocks1 = in_size / BLOCK_SIZE;
if (in_size % BLOCK_SIZE) numBlocks1++;
int numBlocks2 = numBlocks1 / 2;
if(numBlocks1 % 2) numBlocks2++;
dim3 dimThreadBlock;
dimThreadBlock.x = BLOCK_SIZE;
dimThreadBlock.y = 1;
dimThreadBlock.z = 1;
dim3 dimGrid;
dimGrid.x = numBlocks2;
dimGrid.y = 1;
dimGrid.z = 1;
unsigned int*sumArr_d = NULL;
if (in_size > (2*BLOCK_SIZE)) {
// we need the sum auxilarry array only if numblocks2 > 1
cudaMalloc((void**)&sumArr_d, numBlocks2 * sizeof(unsigned int));
cudaCheckError();
}
exclusiveScan<<<dimGrid, dimThreadBlock>>>(out, in, sumArr_d, in_size);
cudaDeviceSynchronize();
cudaCheckError();
if (in_size <= (2*BLOCK_SIZE)) {
// out has proper exclusive scan. return
return;
} else {
// now we need to perform exclusive scan on the auxilliary sum array
unsigned int *sumArr_scan_d;
cudaMalloc((void**)&sumArr_scan_d, numBlocks2 * sizeof(unsigned int));
cudaCheckError();
preScan(sumArr_scan_d, sumArr_d, numBlocks2);
// sumAdd_scan_d now contains the exclusive scan op of individual blocks
// now just do a one-one addition of blocks
mergeScanBlocks<<<dimGrid,dimThreadBlock>>>(sumArr_scan_d, out, in_size);
cudaDeviceSynchronize();
cudaCheckError();
cudaFree(sumArr_d);
cudaFree(sumArr_scan_d);
}
}
//Define the destination index
__global__ void indexDefine(unsigned int *in_d, unsigned int *rev_bit_d,
unsigned int in_size, unsigned int last_input) {
int index = threadIdx.x + blockDim.x * blockIdx.x;
int total_falses = in_d[in_size - 1] + last_input;
__syncthreads();
if (index < in_size) {
if (rev_bit_d[index] == 0) {
int val = in_d[index];
in_d[index] = index + 1 - val + total_falses;
}
}
}
//Scatter input using in_d address
__global__ void scatterElements(unsigned int *in_d, unsigned int *index_d, unsigned int *out_d, unsigned int in_size) {
int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index < in_size) {
unsigned int val = index_d[index];
if (val < in_size) {
out_d[val] = in_d[index];
}
}
}
void radix_sort(unsigned int *in_d, unsigned int *out_d, unsigned int *out_scan_d, unsigned int *in_h,unsigned int *out_scan_h, int num_elements) {
unsigned int *temp;
dim3 dimThreadBlock;
dimThreadBlock.x = BLOCK_SIZE;
dimThreadBlock.y = 1;
dimThreadBlock.z = 1;
dim3 dimGrid;
dimGrid.x =(int)(ceil(num_elements/(1.0 * dimThreadBlock.x)));
dimGrid.y = 1;
dimGrid.z = 1;
for (int i =0;i<32;i++) {
split<<<dimGrid, dimThreadBlock>>>(in_d,out_d,num_elements,i);
cudaDeviceSynchronize();
cudaCheckError();
preScan(out_scan_d, out_d, num_elements);
cudaDeviceSynchronize();
cudaCheckError();
indexDefine<<<dimGrid, dimThreadBlock>>>(out_scan_d, out_d, num_elements, in_h[num_elements - 1]);
cudaDeviceSynchronize();
cudaCheckError();
scatterElements<<<dimGrid, dimThreadBlock>>>(in_d, out_scan_d, out_d, num_elements);
cudaDeviceSynchronize();
cudaCheckError();
// swap pointers
temp = in_d;
in_d = out_d;
out_d = temp;
}
}
int compare(const void *a, const void *b) {
int a1 = *((unsigned int*)a);
int b1 = *((unsigned int*)b);
if (a1 == b1) return 0;
else if (a1 < b1) return -1;
else return 1;
}
int main(){
Timer timer;
unsigned int *in_h;
unsigned int *out_h;
unsigned int *out_d;
unsigned int *in_d;
unsigned int *out_scan_d;
unsigned int num_elements = 1000000;
in_h = (unsigned int*) malloc(num_elements*sizeof(unsigned int));
out_h = (unsigned int*) malloc(num_elements*sizeof(unsigned int));
unsigned int *out_scan_h = (unsigned int *)malloc(num_elements * sizeof(unsigned int));
cudaMalloc((void**)&in_d, num_elements * sizeof(unsigned int));
cudaCheckError();
cudaMalloc((void**)&out_d, num_elements * sizeof(unsigned int));
cudaCheckError();
cudaMalloc((void**)&out_scan_d, num_elements * sizeof(unsigned int ));
cudaCheckError();
cudaDeviceSynchronize();
//init array
for(int i = 0;i < num_elements;i++) {
in_h[i] = num_elements - 1 - i;
}
// Copy host variables to device ------------------------------------------
cudaMemcpy(in_d, in_h, num_elements * sizeof(unsigned int),
cudaMemcpyHostToDevice);
cudaCheckError();
cudaDeviceSynchronize();
// Launch kernel ----------------------------------------------------------
startTime(&timer);
radix_sort(in_d, out_d, out_scan_d, in_h, out_scan_h, num_elements);
cudaDeviceSynchronize();
cudaCheckError();
stopTime(&timer); printf("GPU Sort time: %f s\n", elapsedTime(timer));
cudaCheckError();
// Copy device variables from host ----------------------------------------
cudaMemcpy(out_h, out_d, num_elements * sizeof(unsigned int),
cudaMemcpyDeviceToHost);
cudaCheckError();
cudaDeviceSynchronize();
// Verify correctness -----------------------------------------------------
qsort(in_h, num_elements, sizeof(unsigned int),compare);
int flag = 0;
for (int i = 0;i < num_elements;i++) {
if (in_h[i] != out_h[i]) {
flag = 1;
break;
}
}
if (flag == 1) {
printf("test failed\n");
} else
printf("test passed\n");
// Free memory
cudaFree(in_d);
cudaFree(out_scan_d);
cudaFree(out_d);
free(in_h);
free(out_h);
return 0;
}
|
14,137 | #include "includes.h"
__global__ void unsignedGPU(int numTests, unsigned* ns, unsigned* ds, unsigned* qs, unsigned* rs) {
for (int i = 0; i < numTests; ++i) {
unsigned n = ns[i];
unsigned d = ds[i];
qs[i] = n / d;
rs[i] = n % d;
}
} |
14,138 | #include <cuda_runtime_api.h>
#include <stdint.h>
__global__ void lst_sq_fwd_kernel(
const float *x,
uint32_t batch_sz,
const float *targets,
const float *weights,
float *loss)
{
uint32_t batch_idx = threadIdx.x + blockIdx.x * blockDim.x;
if (batch_idx < batch_sz) {
float dx = x[batch_idx] - targets[batch_idx];
loss[batch_idx] = 0.5f * weights[batch_idx] * dx * dx;
}
}
extern "C" void neuralops_cuda_lst_sq_fwd(
const float *x,
size_t batch_sz,
const float *targets,
const float *weights,
float *loss,
cudaStream_t stream)
{
lst_sq_fwd_kernel<<<(batch_sz+1024-1)/1024, 1024, 0, stream>>>(
x, batch_sz, targets, weights, loss);
}
__global__ void lst_sq_bwd_kernel(
const float *x,
uint32_t batch_sz,
const float *targets,
const float *weights,
float *grad)
{
uint32_t batch_idx = threadIdx.x + blockIdx.x * blockDim.x;
if (batch_idx < batch_sz) {
grad[batch_idx] = weights[batch_idx] * (x[batch_idx] - targets[batch_idx]);
}
}
extern "C" void neuralops_cuda_lst_sq_bwd(
const float *x,
size_t batch_sz,
const float *targets,
const float *weights,
float *grad,
cudaStream_t stream)
{
lst_sq_bwd_kernel<<<(batch_sz+1024-1)/1024, 1024, 0, stream>>>(
x, batch_sz, targets, weights, grad);
}
__global__ void lst_sq_rfwd_kernel(
const float *x,
uint32_t batch_sz,
const float *rx,
const float *targets,
const float *jac_targ,
float *r_loss)
{
uint32_t batch_idx = threadIdx.x + blockIdx.x * blockDim.x;
if (batch_idx < batch_sz) {
r_loss[batch_idx] = jac_targ[batch_idx] * (x[batch_idx] - targets[batch_idx]) * rx[batch_idx];
}
}
extern "C" void neuralops_cuda_lst_sq_rfwd(
const float *x,
size_t batch_sz,
const float *rx,
const float *targets,
const float *jac_targ,
float *r_loss,
cudaStream_t stream)
{
lst_sq_rfwd_kernel<<<(batch_sz+1024-1)/1024, 1024, 0, stream>>>(
x, batch_sz, rx, targets, jac_targ, r_loss);
}
__global__ void ind_lst_sq_fwd_kernel(
const float *x,
uint32_t dim,
uint32_t batch_sz,
const float *targets,
const uint32_t *labels,
const float *weights,
float *loss)
{
uint32_t batch_idx = threadIdx.x + blockIdx.x * blockDim.x;
if (batch_idx < batch_sz) {
uint32_t label_k = labels[batch_idx];
if (label_k < dim) {
uint32_t idx = label_k + dim * batch_idx;
float dx = x[idx] - targets[batch_idx];
//loss[batch_idx] = 0.5f * weights[batch_idx] * dx * dx;
loss[batch_idx] = 0.5f * dx * dx;
} else {
loss[batch_idx] = 0.0f;
}
}
}
extern "C" void neuralops_cuda_ind_lst_sq_fwd(
const float *x,
size_t dim,
size_t batch_sz,
const float *targets,
const uint32_t *labels,
const float *weights,
float *loss,
cudaStream_t stream)
{
ind_lst_sq_fwd_kernel<<<(batch_sz+1024-1)/1024, 1024, 0, stream>>>(
x, dim, batch_sz, targets, labels, weights, loss);
}
__global__ void ind_lst_sq_bwd_kernel(
const float *x,
uint32_t dim,
uint32_t batch_sz,
const float *targets,
const uint32_t *labels,
const float *weights,
float *grad)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t k = idx % dim;
uint32_t batch_idx = idx / dim;
if (k < dim && batch_idx < batch_sz) {
if (k == labels[batch_idx]) {
grad[idx] = x[idx] - targets[batch_idx];
} else {
grad[idx] = 0.0f;
}
}
}
extern "C" void neuralops_cuda_ind_lst_sq_bwd(
const float *x,
size_t dim,
size_t batch_sz,
const float *targets,
const uint32_t *labels,
const float *weights,
float *grad,
cudaStream_t stream)
{
uint32_t n = dim * batch_sz;
ind_lst_sq_bwd_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
x, dim, batch_sz, targets, labels, weights, grad);
}
|
14,139 | // This is a generated file, do not edit it!
#pragma once
#include <stdint.h>
typedef struct AttributeDataPoint {
int32_t DataPointId;
float Weight;
uint8_t Class;
float Attribute;
} AttributeDataPoint;
|
14,140 | /*
Copyright 2012 The Trustees of Indiana University. All rights reserved.
CGL MapReduce Framework on GPUs and CPUs
Code Name: Panda
File: PandaSched.cu
First Version: 2012-07-01 V0.1
Current Version: 2012-09-01 V0.3
Last Updates: 2013-06-27 V0.43
Developer: Hui Li (lihui@indiana.edu)
This is the source code for Panda, a MapReduce runtime on GPUs and CPUs.
*/
#ifndef _PANDASCHED_CU_
#define _PANDASCHED_CU_
#endif // _PRESCHED_CU_
|
14,141 | //#include <iostream>
//#include<cuda.h>
//#include <cudnn.h>
//#include <Windows.h>
//#include <cublas.h>
//#include <cublas_v2.h>
//
//using namespace std;
//
//void checkCUDNN(cudnnStatus_t status)
//{
// if (status != CUDNN_STATUS_SUCCESS)
// cout << "[ERROR] CUDNN" << status << endl;
//}
//
//void checkCUDA(cudaError_t error)
//{
// if (error != CUDA_SUCCESS)
// cout << "[ERROR] CUDA" << error << endl;
//}
//
//void print(char* title, float* src, int filter_num, int h, int w)
//{
// cout << title << endl;
// for (int i = 0; i < filter_num; i++) {
// for (int y = 0; y < h; y++) {
// for (int x = 0; x < w; x++) {
// printf("%.0f ", src[i*h*w + y*w + x]);
// }
// cout << endl;
// }
// cout << endl;
// }
//}
//
//int main()
//{
// const int batch_count = 1; //Է , ġ
// const int in_channel = 2; //Է ä
// const int in_height = 4;// Է
// const int in_width = 4;// Է
// const int out_channel = 2; // Ŭ
// const int filter_width = 3; //
// const int filter_height = 3; //
// const int filter_num = 1; //
// const int padding_w = 1; // е.
// const int padding_h = 1;
// const int stride_horizontal = 1;
// const int stride_vertical = 1;
// const int pool_window_w = 2;
// const int pool_window_h = 2;
// const int pool_padding_horizontal = 0;
// const int pool_padding_vertical = 0;
// const int pool_stride_horizontal = 2;
// const int pool_stride_vertical = 2;
// const int pool_w = in_width / pool_stride_horizontal;
// const int pool_h = in_height / pool_stride_vertical;
// const int src_len = batch_count*filter_num*in_height*in_width;
// const int pool_len = batch_count*filter_num*pool_h*pool_w;
// float inData_NCHW[batch_count][in_channel][in_height][in_width];
// float inData_NHWC[batch_count][in_height][in_width][in_channel];
// float outData[batch_count][filter_num][in_height][in_width];
// float *inData_d;
// float *outData_d, *outData1_d;
// float *filterData_d; // device
// float *filterData2_d; // device FCN
// float *biasData_d;
// float *hostArray = new float[src_len];
// void* workSpace; //cuDNN ۾ .
//
//
//
// //Է
// for (int i = 0; i < in_channel; i++) {
// for (int y = 0; y < in_height; y++) {
// for (int x = 0; x < in_width; x++) {
// inData_NCHW[0][i][y][x] = i * in_channel * in_width*in_height + y*in_height + x;
// }
// }
// }
//
// //Է ȯ
// for (int i = 0; i < in_channel; i++) {
// for (int y = 0; y < in_height; y++) {
// for (int x = 0; x < in_width; x++) {
// inData_NHWC[0][y][x][i] = inData_NCHW[0][i][y][x];
// }
// }
// }
//
// //
// float filterData[filter_num][in_channel][filter_height][filter_width] = {
// { { { 0.0f, 0.0f, 0.0f },{ 0.0f, 1.0f, 0.0f },{ 0.0f, 0.0f, 0.0f } },
// { { 0.0f, 0.0f, 0.0f },{ 0.0f, 0.0f, 1.0f },{ 0.0f, 0.0f, 0.0f } }
// }
// };
//
// //FCN ġ
// //float filterData2[out_channel][filter_num][pool_h][pool_w] = { { { { 0.1f, 0.1f },{ 0.1f, 0.1f } } } ,{ { { 0.2f, 0.2f },{ 0.2f, 0.2f } } } };
//
// float filterData2[out_channel * pool_h * pool_w] = { 0.1f, 0.1f, 0.1f, 0.1f, 0.2f, 0.2f, 0.2f, 0.2f };
//
// float biasData[filter_num] = { -20 };
//
// cout << "in_NCHW" << endl;
// for (int i = 0; i < in_channel; i++) {
// for (int y = 0; y < in_height; y++) {
// for (int x = 0; x < in_width; x++) {
// printf("%.1f ", inData_NCHW[0][i][y][x]);
// } cout << endl;
// } cout << endl;
// }
//
// cout << "in_NHWC" << endl;
// for (int y = 0; y < in_height; y++) {
// for (int x = 0; x < in_width; x++) {
// for (int i = 0; i < in_channel; i++) {
// printf("%.1f ", inData_NHWC[0][y][x][i]);
// } cout << endl;
// } cout << endl;
// }
//
// cout << "weights" << endl;
// for (int i = 0; i < in_channel; i++) {
// for (int y = 0; y < filter_height; y++) {
// for (int x = 0; x < filter_width; x++) {
// printf("%.1f ", filterData[0][i][y][x]);
// } cout << endl;
// }cout << endl;
// }
//
// //GPU Ҵ
// checkCUDA(cudaMalloc((void**)&inData_d, sizeof(inData_NHWC)));
// checkCUDA(cudaMalloc((void**)&outData_d, sizeof(outData)));
// checkCUDA(cudaMalloc((void**)&filterData_d, sizeof(filterData)));
// checkCUDA(cudaMalloc((void**)&filterData2_d, sizeof(filterData2)));
// checkCUDA(cudaMalloc((void**)&outData1_d, sizeof(outData)));
// checkCUDA(cudaMalloc((void**)&biasData_d, sizeof(biasData)));
//
// //CPU GPU
// checkCUDA(cudaMemcpy(inData_d, inData_NHWC, sizeof(inData_NHWC), cudaMemcpyHostToDevice));
// checkCUDA(cudaMemcpy(filterData_d, filterData, sizeof(filterData), cudaMemcpyHostToDevice));
// checkCUDA(cudaMemcpy(filterData2_d, filterData2, sizeof(filterData2), cudaMemcpyHostToDevice));
// checkCUDA(cudaMemcpy(biasData_d, biasData, sizeof(biasData), cudaMemcpyHostToDevice));
//
//
//
// //CUDNN 迭
// cudnnHandle_t cudnnHandle; // cuDNN ڵ鷯
// cudnnTensorDescriptor_t inTensorDesc, outTensorDesc, biasTensorDesc, poolOutTensorDesc, sftTensorDesc; // ü
// cudnnFilterDescriptor_t filterDesc, filterDesc2;
// cudnnConvolutionDescriptor_t convDesc, convDesc2;
// cudnnPoolingDescriptor_t poolDesc;
// cudnnActivationDescriptor_t actDesc; //ȰԼ ü
//
// cublasHandle_t cublasHandle;
//
// //Ҵ
// checkCUDNN(cudnnCreate(&cudnnHandle));
// checkCUDNN(cudnnCreateTensorDescriptor(&inTensorDesc));
// checkCUDNN(cudnnCreateTensorDescriptor(&outTensorDesc));
// checkCUDNN(cudnnCreateTensorDescriptor(&biasTensorDesc));
// checkCUDNN(cudnnCreateTensorDescriptor(&poolOutTensorDesc));
// checkCUDNN(cudnnCreateTensorDescriptor(&sftTensorDesc));
// checkCUDNN(cudnnCreateFilterDescriptor(&filterDesc));
// checkCUDNN(cudnnCreateFilterDescriptor(&filterDesc2));
// checkCUDNN(cudnnCreateConvolutionDescriptor(&convDesc));
// checkCUDNN(cudnnCreateConvolutionDescriptor(&convDesc2));
// checkCUDNN(cudnnCreatePoolingDescriptor(&poolDesc));
// checkCUDNN(cudnnCreateActivationDescriptor(&actDesc));
//
// cublasCreate(&cublasHandle);
//
// cublasSetMatrix(out_channel, pool_h * pool_w, sizeof(*filterData2), filterData2, 2, filterData2_d, 2);
//
// //ʱȭ
// //inData_NHWC
// checkCUDNN(cudnnSetTensor4dDescriptor(inTensorDesc, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, batch_count, in_channel, in_height, in_width));
// checkCUDNN(cudnnSetFilter4dDescriptor(filterDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, filter_num, in_channel, filter_height, filter_width));
// checkCUDNN(cudnnSetFilter4dDescriptor(filterDesc2, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, out_channel, filter_num, pool_h, pool_w));
// checkCUDNN(cudnnSetConvolution2dDescriptor(convDesc, padding_h, padding_w, stride_vertical, stride_horizontal, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// //FCN
// checkCUDNN(cudnnSetConvolution2dDescriptor(convDesc2, 0, 0, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// checkCUDNN(cudnnSetPooling2dDescriptor(poolDesc, CUDNN_POOLING_MAX, CUDNN_PROPAGATE_NAN, pool_window_h, pool_window_w, pool_padding_vertical, pool_padding_horizontal, pool_stride_vertical
// , pool_stride_horizontal));
// checkCUDNN(cudnnSetTensor4dDescriptor(biasTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, filter_num, 1, 1));
// checkCUDNN(cudnnSetActivationDescriptor(actDesc, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0));
//
// int out_n, out_c, out_h, out_w;
// //Էµ ô
// checkCUDNN(cudnnGetConvolution2dForwardOutputDim(convDesc, inTensorDesc, filterDesc, &out_n, &out_c, &out_h, &out_w));
// printf("conv out shape (n x c x h x w) = (%d x %d x %d x %d)\n", out_n, out_c, out_h, out_w);
// checkCUDNN(cudnnSetTensor4dDescriptor(outTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, out_n, out_c, out_h, out_w));
//
// //Ǯ Ȯ
// checkCUDNN(cudnnGetPooling2dForwardOutputDim(poolDesc, outTensorDesc, &out_n, &out_c, &out_h, &out_w));
// printf("pool out shape (n x c x h x w) = (%d x %d x %d x %d)\n", out_n, out_c, out_h, out_w);
// //Ǯ ¾
// checkCUDNN(cudnnSetTensor4dDescriptor(poolOutTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, out_n, out_c, out_h, out_w));
//
// //FCN Ȯ
// checkCUDNN(cudnnGetConvolution2dForwardOutputDim(convDesc2, poolOutTensorDesc, filterDesc2, &out_n, &out_c, &out_h, &out_w));
// printf("FCN out shape (n x c x h x w)= (%d x %d x %d x %d)\n", out_n, out_c, out_h, out_w);
// checkCUDNN(cudnnSetTensor4dDescriptor(sftTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, out_n, out_c, out_h, out_w));
//
// //Է° , е, Ʈ̵尡 ־ ˰ ã
// cudnnConvolutionFwdAlgo_t algo;
// checkCUDNN(cudnnGetConvolutionForwardAlgorithm(cudnnHandle,
// inTensorDesc,
// filterDesc,
// convDesc,
// outTensorDesc,
// CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
// 0,
// &algo));
//
// cout << "Fatest algorithm for conv0 = " << algo << endl;
//
// cudnnConvolutionFwdAlgo_t algo2;
// checkCUDNN(cudnnGetConvolutionForwardAlgorithm(cudnnHandle,
// poolOutTensorDesc,
// filterDesc2,
// convDesc2,
// sftTensorDesc,
// CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
// 0,
// &algo2));
//
// cout << "Fatest algorithm for conv1 = " << algo2 << endl;
//
// // ˰ ʿ ũ ˾Ƴ
// size_t sizeinBytes = 0;
// checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnnHandle,
// inTensorDesc,
// filterDesc,
// convDesc,
// outTensorDesc,
// algo,
// &sizeinBytes));
//
// cout << "workspace size (sizeinbytes):" << sizeinBytes << endl;
// // ʿ Ҵ
// if (sizeinBytes != 0) checkCUDA(cudaMalloc(&workSpace, sizeinBytes));
//
// float alpha = 1.0f;
// float beta = 0.0f;
// //
// //"output = alpha * Op(input) + beta* output
// // "output = 1 * Op(inpuit) + 0 * output"
//
//
// checkCUDNN(cudnnConvolutionForward(cudnnHandle,
// &alpha,
// inTensorDesc,
// inData_d,
// filterDesc,
// filterData_d,
// convDesc,
// algo,
// workSpace,
// sizeinBytes,
// &beta,
// outTensorDesc,
// outData_d));
//
// checkCUDA(cudaMemcpy(hostArray, outData_d, sizeof(float)* src_len, cudaMemcpyDeviceToHost));
// print("conv out", hostArray, filter_num, in_height, in_width);
//
// //add bias
// beta = 1.0f;
// checkCUDNN(cudnnAddTensor(cudnnHandle,
// &alpha,
// biasTensorDesc,
// biasData_d,
// &beta,
// outTensorDesc,
// outData_d));
// checkCUDA(cudaMemcpy(hostArray, outData_d, sizeof(float)* src_len, cudaMemcpyDeviceToHost));
// print("add bias out", hostArray, filter_num, in_height, in_width);
//
// //activation_RELU
// beta = 0.0f;
// checkCUDNN(cudnnActivationForward(cudnnHandle,
// actDesc,
// &alpha,
// outTensorDesc,
// outData_d,
// &beta,
// outTensorDesc,
// outData1_d));
// //checkCUDA(cudaMemcpy(hostArray, outData1_d, sizeof(float)* src_len, cudaMemcpyDeviceToHost));
// //print("RELU out", hostArray, filter_num, in_height, in_width);
//
// //pooling
// checkCUDNN(cudnnPoolingForward(cudnnHandle,
// poolDesc,
// &alpha,
// outTensorDesc,
// outData1_d,
// &beta,
// poolOutTensorDesc,
// outData_d));
// //checkCUDA(cudaMemcpy(hostArray, outData_d, sizeof(float)*pool_len, cudaMemcpyDeviceToHost));
// //print("POOLING Out", hostArray, filter_num, pool_h, pool_w);
//
// cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 4, 4, out_channel, );
//
// checkCUDA(cudaMemcpy(hostArray, outData1_d, sizeof(float)*out_channel, cudaMemcpyDeviceToHost));
// print("FC Out:", hostArray, out_channel, 1, 1);
//
// //softmax
// checkCUDNN(cudnnSoftmaxForward(cudnnHandle,
// CUDNN_SOFTMAX_ACCURATE,
// CUDNN_SOFTMAX_MODE_CHANNEL,
// &alpha,
// sftTensorDesc,
// outData1_d,
// &beta,
// sftTensorDesc,
// outData_d));
//
// checkCUDA(cudaMemcpy(hostArray, outData_d, sizeof(float)*out_channel, cudaMemcpyDeviceToHost));
// print("softmax out", hostArray, out_channel, 1, 1);
//
//
// Sleep(1000000);
//
// //
// checkCUDNN(cudnnDestroyTensorDescriptor(inTensorDesc));
// checkCUDNN(cudnnDestroyTensorDescriptor(outTensorDesc));
// checkCUDNN(cudnnDestroyFilterDescriptor(filterDesc));
// checkCUDNN(cudnnDestroyFilterDescriptor(filterDesc2));
// checkCUDNN(cudnnDestroyConvolutionDescriptor(convDesc));
// checkCUDNN(cudnnDestroyConvolutionDescriptor(convDesc2));
// checkCUDNN(cudnnDestroyTensorDescriptor(biasTensorDesc));
// checkCUDNN(cudnnDestroyTensorDescriptor(poolOutTensorDesc));
// checkCUDNN(cudnnDestroyTensorDescriptor(sftTensorDesc));
// checkCUDNN(cudnnDestroyPoolingDescriptor(poolDesc));
// checkCUDNN(cudnnDestroyActivationDescriptor(actDesc));
// checkCUDNN(cudnnDestroy(cudnnHandle));
//
// checkCUDA(cudaFree(inData_d));
// checkCUDA(cudaFree(outData_d));
// checkCUDA(cudaFree(outData1_d));
// checkCUDA(cudaFree(filterData2_d));
// checkCUDA(cudaFree(filterData_d));
// checkCUDA(cudaFree(biasData_d));
// cublasDestroy(cublasHandle);
//
// checkCUDA(cudaThreadSynchronize());
// return 0;
//
//} |
14,142 | extern "C"
__global__ void fill(unsigned int *a, unsigned int value, unsigned int size)
{
const unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= size)
return;
a[index] = value;
}
|
14,143 | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
__global__ void saxpy(unsigned num_rd_streams, unsigned addr1, unsigned addr2, unsigned addr3, unsigned addr4, unsigned addr5, unsigned addr6, unsigned addr7, unsigned addr8, unsigned rd_stream_length, unsigned num_wr_streams, unsigned wr_stream_length)
{
unsigned id = threadIdx.x;
if (id <= 8) {
for (int i = 0; i < 1000 - 8; i += 8) {
unsigned a;
asm ("ld.shared.u32 %0, [%%r1];" : "=r"(a));
asm ("ld.shared.u32 %0, [%%r1];" : "=r"(a));
asm ("ld.shared.u32 %0, [%%r1];" : "=r"(a));
asm ("ld.shared.u32 %0, [%%r1];" : "=r"(a));
asm ("ld.shared.u32 %0, [%%r1];" : "=r"(a));
asm ("ld.shared.u32 %0, [%%r1];" : "=r"(a));
asm ("ld.shared.u32 %0, [%%r1];" : "=r"(a));
asm ("ld.shared.u32 %0, [%%r1];" : "=r"(a));
}
}
}
int main(int argc, char *argv[])
{
int N = 1000;
// Perform SAXPY on 1M elements
unsigned *h_x = (unsigned *)malloc(N*sizeof(unsigned));
unsigned *d_x = (unsigned *)100;
unsigned *d_x_copy;
cudaMalloc(&d_x_copy, N*sizeof(unsigned));
// cudaMalloc(&d_x, 2*sizeof(unsigned));
for (int i = 1 ; i <= N ; i++)
h_x[i-1] = (unsigned)i;
cudaMemcpy(d_x, h_x, N*sizeof(unsigned), cudaMemcpyHostToDevice);
saxpy<<<1, 8>>>(8, 100, 100, 100, 100, 100, 100, 100, 100, 1000, 0, 1000);
}
|
14,144 | /*
matNormInf.cu
Finds infinity-norm of a matrix
April 2013
Nicolas Sawaya
*/
//For matrix infinity-norm, sum all rows
//Remember this is in column format, so sum
//Btw with lanczos this can be made much less complicated
__global__ void matNormInf(double* mat, double* result) {
int row = threadIdx.x;
int m = blockDim.x;
double rowSum = 0.;
int i;
for(i=0;i<m;i++){
//Matrix is in column-major format
rowSum = rowSum + mat[i*m + row];
}
result[row] = rowSum;
}
|
14,145 | /* Molecular dynamics simulation linear code for binary Lennard-Jones liquid under NVE ensemble;
Author: You-Liang Zhu, Email: youliangzhu@ciac.ac.cn
Copyright: You-Liang Zhu
This code is free: you can redistribute it and/or modify it under the terms of the GNU General Public License.*/
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <cuda_runtime.h>
// periodic boundary condition
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(-1);
}
}
__host__ __device__ float pbc(float x, float box_len) // implement periodic bondary condition
{
float box_half = box_len * 0.5;
if (x > box_half) x -= box_len;
else if (x < -box_half) x += box_len;
return x;
}
// randome number generator [0.0-1.0)
float R2S()
{
int ran = rand();
float fran = (float)ran/(float)RAND_MAX;
return fran;
}
// initially generate the position and mass of particles
void init(unsigned int np, float4* r, float4* v, float3 box, float min_dis)
{
for (unsigned int i=0; i<np; i++)
{
bool find_pos = false;
float4 ri;
while(!find_pos)
{
ri.x = ( R2S() - 0.5 ) * box.x;
ri.y = ( R2S() - 0.5 ) * box.y;
ri.z = ( R2S() - 0.5 ) * box.z;
find_pos = true;
for(unsigned int j=0; j<i; j++)
{
float dx = pbc(ri.x - r[j].x, box.x);
float dy = pbc(ri.y - r[j].y, box.y);
float dz = pbc(ri.z - r[j].z, box.z);
float r = sqrt(dx*dx + dy*dy + dz*dz);
if(r<min_dis) // a minimum safe distance to avoid the overlap of LJ particles
{
find_pos = false;
break;
}
}
}
if(R2S()>0.5) // randomly generate the type of particle, 1.0 represent type A and 2.0 represent type B
ri.w =1.0;
else
ri.w =2.0;
r[i] = ri;
v[i].w = 1.0;
}
}
// first step integration of velocity verlet algorithm
extern "C" __global__ void first_integration_kernel(unsigned int np, float dt, float3 box, float4* r, float4* v, float4* f)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<np)
{
float4 ri = r[i];
float mass = v[i].w;
v[i].x += 0.5 * dt * f[i].x / mass;
v[i].y += 0.5 * dt * f[i].y / mass;
v[i].z += 0.5 * dt * f[i].z / mass;
ri.x += dt * v[i].x;
ri.y += dt * v[i].y;
ri.z += dt * v[i].z;
r[i].x = pbc(ri.x, box.x);
r[i].y = pbc(ri.y, box.y);
r[i].z = pbc(ri.z, box.z);
}
}
void first_integration(unsigned int np, float dt, float3 box, float4* r, float4* v, float4* f, unsigned int block_size)
{
dim3 grid( (np/block_size) + 1, 1, 1);
dim3 block(block_size, 1, 1);
first_integration_kernel<<< grid, block >>>(np, dt, box, r, v, f);
// block until the device has completed
cudaThreadSynchronize();
// check if kernel execution generated an error
checkCUDAError("kernel execution");
}
// non-bonded force calculation
extern "C" __global__ void force_calculation_kernel(unsigned int np, float3 box, float3 epsilon, float3 sigma, float4* r, float4* f, float rcut)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<np)
{
float4 force = make_float4(0.0, 0.0, 0.0, 0.0);
for(unsigned int j=0; j<np; j++)
{
/* particles have no interactions with themselves */
if (i==j) continue;
/* calculated the shortest distance between particle i and j */
float dx = pbc(r[i].x - r[j].x, box.x);
float dy = pbc(r[i].y - r[j].y, box.y);
float dz = pbc(r[i].z - r[j].z, box.z);
float type = r[i].w + r[j].w;
float r = sqrt(dx*dx + dy*dy + dz*dz);
/* compute force and energy if within cutoff */
if (r < rcut)
{
float epsilonij, sigmaij;
if(type==2.0) // i=1.0, j=1.0
{
epsilonij = epsilon.x;
sigmaij = sigma.x;
}
else if(type==3.0) // i=1.0, j=2.0; or i=2.0, j=1.0
{
epsilonij = epsilon.y;
sigmaij = sigma.y;
}
else if(type==4.0) // i=2.0, j=2.0
{
epsilonij = epsilon.z;
sigmaij = sigma.z;
}
float ffac = -4.0*epsilonij*(-12.0*__powf(sigmaij/r,12.0)/r + 6.0*__powf(sigmaij/r,6.0)/r); // force between particle i and j
float epot = 0.5*4.0*epsilonij*(__powf(sigmaij/r,12.0) - __powf(sigmaij/r,6.0)); // potential between particle i and j
force.x += ffac*dx/r;
force.y += ffac*dy/r;
force.z += ffac*dz/r;
force.w += epot;
}
}
f[i] = force;
// printf("%d %f %f %f %f \n", i, force.x, force.y, force.z, force.w);
}
}
void force_calculation(unsigned int np, float3 box, float3 epsilon, float3 sigma, float4* r, float4* f, float rcut, unsigned int block_size)
{
dim3 grid( (np/block_size) + 1, 1, 1);
dim3 block(block_size, 1, 1);
force_calculation_kernel<<< grid, block >>>(np, box, epsilon, sigma, r, f, rcut);
// block until the device has completed
cudaThreadSynchronize();
// check if kernel execution generated an error
checkCUDAError("kernel execution");
}
// second step integration of velocity verlet algorithm
extern "C" __global__ void second_integration_kernel(unsigned int np, float dt, float4* v, float4* f)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<np)
{
float mass = v[i].w;
v[i].x += 0.5 * dt * f[i].x / mass;
v[i].y += 0.5 * dt * f[i].y / mass;
v[i].z += 0.5 * dt * f[i].z / mass;
}
}
void second_integration(unsigned int np, float dt, float4* v, float4* f, unsigned int block_size)
{
dim3 grid( (np/block_size) + 1, 1, 1);
dim3 block(block_size, 1, 1);
second_integration_kernel<<< grid, block >>>(np, dt, v, f);
// block until the device has completed
cudaThreadSynchronize();
// check if kernel execution generated an error
checkCUDAError("kernel execution");
}
// system information collection for temperature, kinetic energy, potential and total energy
extern __shared__ float2 sdata[];
__global__ void compute_info_sums_kernel(unsigned int np, float4* v, float4* f, float2* scratch)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
float2 tempo;
if (i < np)
{
float4 vi = v[i];
float mass = vi.w;
tempo.x = mass * (vi.x*vi.x + vi.y*vi.y + vi.z*vi.z);
tempo.y = f[i].w;
}
else
{
tempo.x = float(0.0);
tempo.y = float(0.0);
}
sdata[threadIdx.x] = tempo;
__syncthreads();
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
{
sdata[threadIdx.x].x += sdata[threadIdx.x + offs].x;
sdata[threadIdx.x].y += sdata[threadIdx.x + offs].y;
}
offs >>= 1;
__syncthreads();
}
if (threadIdx.x == 0)
{
scratch[blockIdx.x].x = sdata[0].x;
scratch[blockIdx.x].y = sdata[0].y;
}
}
__global__ void compute_info_final_kernel(unsigned int np, float* info, float2* scratch, unsigned int num_partial_sums)
{
float2 final_sum = make_float2(0.0, 0.0);
for (int start = 0; start < num_partial_sums; start += blockDim.x)
{
__syncthreads();
if (start + threadIdx.x < num_partial_sums)
{
float2 scr = scratch[start + threadIdx.x];
sdata[threadIdx.x].x = scr.x;
sdata[threadIdx.x].y = scr.y;
}
else
{
sdata[threadIdx.x].x = float(0.0);
sdata[threadIdx.x].y = float(0.0);
}
__syncthreads();
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
{
sdata[threadIdx.x].x += sdata[threadIdx.x + offs].x;
sdata[threadIdx.x].y += sdata[threadIdx.x + offs].y;
}
offs >>= 1;
__syncthreads();
}
if (threadIdx.x == 0)
{
final_sum.x += sdata[0].x;
final_sum.y += sdata[0].y;
}
}
if (threadIdx.x == 0)
{
float ekin = 0.5*final_sum.x;
float potential = final_sum.y;
unsigned int nfreedom = 3 * np - 3;
float temp = 2.0*ekin/float(nfreedom);
float energy = ekin + potential;
info[0] = temp;
info[1] = potential;
info[2] = energy;
}
}
void compute_info(unsigned int np, float4* v, float4* f, float2* scratch, float* info, unsigned int block_size)
{
unsigned int n_blocks = (int)ceil((float)np / (float)block_size);
dim3 grid(n_blocks, 1, 1);
dim3 threads(block_size, 1, 1);
unsigned int shared_bytes = sizeof(float2)*block_size;
compute_info_sums_kernel<<<grid,threads, shared_bytes>>>(np, v, f, scratch);
// block until the device has completed
cudaThreadSynchronize();
// check if kernel execution generated an error
checkCUDAError("kernel execution");
int final_block_size = 512;
grid = dim3(1, 1, 1);
threads = dim3(final_block_size, 1, 1);
shared_bytes = sizeof(float2)*final_block_size;
compute_info_final_kernel<<<grid, threads, shared_bytes>>>(np, info, scratch, n_blocks);
// block until the device has completed
cudaThreadSynchronize();
// check if kernel execution generated an error
checkCUDAError("kernel execution");
}
// output system information and frame in XYZ formation which can be read by VMD
void output(FILE *traj, unsigned int step, float* info, float4* r, unsigned int np)
{
float temp = info[0];
float potential = info[1];
float energy = info[2];
fprintf(traj,"%d\n step=%d temp=%20.8f pot=%20.8f ener=%20.8f\n", np, step, temp, potential, energy);
for (unsigned int i=0; i<np; i++)
{
float4 ri = r[i];
if (ri.w == 1.0)
fprintf(traj, "A %20.8f %20.8f %20.8f\n", ri.x, ri.y, ri.z);
else if (ri.w == 2.0)
fprintf(traj, "B %20.8f %20.8f %20.8f\n", ri.x, ri.y, ri.z);
}
}
// main function
int main(int argc, char **argv)
{
//running parameters
unsigned int np = 2700; // the number of particles
unsigned int nsteps = 500; // the number of time steps
float dt = 0.001; // integration time step
float rcut = 3.0; // the cutoff radius of interactions
unsigned int nprint = 100; // period for data output
unsigned int block_size = 256; // the number of threads in a block
timeval start; // start time
timeval end; // end time
float3 box =make_float3(15.0, 15.0, 15.0); // box size in x, y, and z directions
float3 epsilon = make_float3(1.0, 0.5, 1.0); // epsilon.x for type 1.0 and 1.0; epsilon.y for type 1.0 and 2.0; epsilon.z for type 1.0 and 2.0
float3 sigma = make_float3(1.0, 1.0, 1.0); // sigma.x for type 1.0 and 1.0; sigma.y for type 1.0 and 2.0; sigma.z for type 1.0 and 2.0
float min_dis = sigma.x*0.9; // the minimum distance between particles for system generation
//host memory allocation
float4* h_r = (float4 *)malloc(np*sizeof(float4)); // rx, ry, rz, type(0, 1, 2 ...)
float4* h_v = (float4 *)malloc(np*sizeof(float4)); // vx, vy, vz, mass
float4* h_f = (float4 *)malloc(np*sizeof(float4)); // fx, fy, fz, potential
float* h_info = (float *)malloc(16*sizeof(float)); // temperature, potential, energy ...
//device memory allocation
float4* d_r;
float4* d_v;
float4* d_f;
float* d_info;
float2* d_scratch;
cudaMalloc( (void **) &d_r, np*sizeof(float4) ); // rx, ry, rz, type(0, 1, 2 ...)
cudaMalloc( (void **) &d_v, np*sizeof(float4) ); // vx, vy, vz, mass
cudaMalloc( (void **) &d_f, np*sizeof(float4) ); // fx, fy, fz, potential
cudaMalloc( (void **) &d_info, 16*sizeof(float) ); // temperature, potential, energy ...
cudaMalloc( (void **) &d_scratch, (np/block_size + 1)*sizeof(float2) ); // temporary data ...
FILE *traj=fopen("traj.xyz","w"); // trajectory file in XYZ format that can be open by VMD
/* generate system information */
printf("Starting simulation with %d atoms for %d steps.\n", np, nsteps);
printf("Generating system.\n", np, nsteps);
init(np, h_r, h_v, box, min_dis);
cudaMemcpy( d_r, h_r, np*sizeof(float4), cudaMemcpyHostToDevice );
cudaMemcpy( d_v, h_v, np*sizeof(float4), cudaMemcpyHostToDevice );
gettimeofday(&start,NULL); //get start time
/* main MD loop */
printf("Running simulation.\n", np, nsteps);
for(unsigned int step =0; step <= nsteps; step++) //running simulation loop
{
/* first integration for velverlet */
first_integration(np, dt, box, d_r, d_v, d_f, block_size);
/* force calculation */
force_calculation(np, box, epsilon, sigma, d_r, d_f, rcut, block_size);
/* compute temperature and potential */
compute_info(np, d_v, d_f, d_scratch, d_info, block_size);
/* second integration for velverlet */
second_integration(np, dt, d_v, d_f, block_size);
/* write output frames and system information, if requested */
if ((step % nprint) == 0)
{
cudaMemcpy(h_r, d_r, np*sizeof(float4), cudaMemcpyDeviceToHost);
cudaMemcpy(h_info, d_info, 16*sizeof(float), cudaMemcpyDeviceToHost);
output(traj, step, h_info, h_r, np);
printf("time step %d \n", step);
}
}
gettimeofday(&end,NULL); // get end time
long timeusr=(end.tv_sec-start.tv_sec)*1000000+(end.tv_usec-start.tv_usec);
printf("time is %ld microseconds\n",timeusr); // the spending time on simulation in microseconds
fclose(traj);
free(h_r);
free(h_v);
free(h_f);
free(h_info);
cudaFree(d_r);
cudaFree(d_v);
cudaFree(d_f);
cudaFree(d_info);
return 0;
}
|
14,146 | // Test using -x cuda -fopenmp does not clash integrated headers.
// Reported in https://bugs.llvm.org/show_bug.cgi?id=48014
///==========================================================================///
// REQUIRES: nvptx-registered-target
// RUN: %clang -x cuda -fopenmp -c %s -o - --cuda-path=%S/../Driver/Inputs/CUDA/usr/local/cuda -nocudalib -isystem %S/Inputs/include -isystem %S/../../lib/Headers -fsyntax-only
|
14,147 | #include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <numeric>
#include <iostream>
// Here you can set the device ID that was assigned to you
#define MYDEVICE 0
float random_float(void)
{
return static_cast<float>(rand()) / RAND_MAX;
}
// Part 1 of 6: implement the kernel
__global__ void block_sum(const float *input,
float *per_block_results,
const size_t n)
{
int sizeVec = blockDim.x;
__shared__ float sdata[1024];
int g_index = threadIdx.x + blockDim.x * blockIdx.x;
int s_index = threadIdx.x;
sdata[s_index] = input[g_index];
__syncthreads();
while (sizeVec!=1){
if (s_index < sizeVec/2)
sdata[s_index] += sdata[sizeVec - 1 - s_index];
__syncthreads();
sizeVec /=2;
}
if(s_index == 0)
atomicAdd(per_block_results, sdata[0]);
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(void)
{
// create array of 256ki elements
const int num_elements = 1<<18;
srand(time(NULL));
// generate random input on the host
std::vector<float> h_input(num_elements);
for(int i = 0; i < h_input.size(); ++i)
{
h_input[i] = random_float();
}
const float host_result = std::accumulate(h_input.begin(), h_input.end(), 0.0f);
std::cerr << "Host sum: " << host_result << std::endl;
int block_size = 1024;
int num_blocks = (num_elements + block_size - 1)/block_size;
//Part 1 of 6: move input to device memory
float *d_input = 0;
cudaMalloc(&d_input, num_elements * sizeof(float));
cudaMemcpy(d_input, &h_input[0], num_elements * sizeof(float), cudaMemcpyHostToDevice);
// Part 1 of 6: allocate the partial sums: How much space does it need?
float *d_partial_sums_and_total = 0;
cudaMalloc(&d_partial_sums_and_total, sizeof(float));
// Part 1 of 6: launch one kernel to compute, per-block, a partial sum. How much shared memory does it need?
block_sum<<<num_blocks, block_size, block_size * sizeof(float)>>>(d_input, d_partial_sums_and_total, num_elements);
// Part 1 of 6: compute the sum of the partial sums
//block_sum<<<1, num_blocks, num_blocks * sizeof(float)>>>(d_partial_sums_and_total, d_partial_sums_and_total, num_blocks);
// Part 1 of 6: copy the result back to the host
float device_result = 0;
cudaMemcpy(&device_result, d_partial_sums_and_total, sizeof(float), cudaMemcpyDeviceToHost);
std::cout << "Device sum: " << device_result << std::endl;
// Part 1 of 6: deallocate device memory
cudaFree(d_input);
cudaFree(d_partial_sums_and_total);
return 0;
}
|
14,148 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "complex_h.cuh"
__host__ __device__ complex complex_from_polar(double r, double theta_radians) {
complex result;
result.re = r * cos(theta_radians);
result.im = r * sin(theta_radians);
return result;
}
__host__ __device__ complex complex_add(complex left, complex right) {
complex result;
result.re = left.re + right.re;
result.im = left.im + right.im;
return result;
}
__host__ __device__ complex complex_sub(complex left, complex right) {
complex result;
result.re = left.re - right.re;
result.im = left.im - right.im;
return result;
}
__host__ __device__ complex complex_mult(complex left, complex right) {
complex result;
result.re = left.re * right.re - left.im * right.im;
result.im = left.re * right.im + left.im * right.re;
return result;
}
__host__ void stampaj(complex x) {
printf("%g+%gi ", x.re, x.im);
}
__host__ void stampajNiz(complex *A, int N) {
int i;
for (i = 0; i < N; i++) {
stampaj(A[i]);
putchar(' ');
if (i && !(i % 7))
putchar('\n');
}
// printf("kraj reda!\n");
}
__host__ void stampajMatricu(complex *m_A, int N) {
int i;
for (i = 0; i < N; i++) {
stampajNiz(m_A + i * N, N);
}
}
|
14,149 | /*************************************************************************
> File Name: hello.cu
> Author: anryyang
> Mail: anryyang@gmail.com
> Created Time: Mon 26 Feb 2018 04:50:59 PM SGT
************************************************************************/
#include<stdio.h>
__global__ void mykernel(void) {
}
int main(void) {
mykernel<<<1,1>>>();
printf("Hello World!\n");
return 0;
}
|
14,150 | #include "includes.h"
extern "C" {
}
#define TB 256
#define EPS 1e-4
__global__ void reconstruction_best_kernel( float *input, float *filtered_affine_model, float *filtered_best_output, int h, int w )
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
int size = h * w;
if (id < size) {
double out1 =
input[id + 2*size] * filtered_affine_model[id*12 + 0] + // A[0][0] +
input[id + size] * filtered_affine_model[id*12 + 1] + // A[0][1] +
input[id] * filtered_affine_model[id*12 + 2] + // A[0][2] +
filtered_affine_model[id*12 + 3]; //A[0][3];
double out2 =
input[id + 2*size] * filtered_affine_model[id*12 + 4] + //A[1][0] +
input[id + size] * filtered_affine_model[id*12 + 5] + //A[1][1] +
input[id] * filtered_affine_model[id*12 + 6] + //A[1][2] +
filtered_affine_model[id*12 + 7]; //A[1][3];
double out3 =
input[id + 2*size] * filtered_affine_model[id*12 + 8] + //A[2][0] +
input[id + size] * filtered_affine_model[id*12 + 9] + //A[2][1] +
input[id] * filtered_affine_model[id*12 + 10] + //A[2][2] +
filtered_affine_model[id*12 + 11]; // A[2][3];
filtered_best_output[id] = out1;
filtered_best_output[id + size] = out2;
filtered_best_output[id + 2*size] = out3;
}
return ;
} |
14,151 | #include "merge_sort.cuh"
void mergeSortAsc(int* arr, int length, int *out)
{
if (length < 2) {
out[0] = arr[0];
return;
}
//splitting of the arrays
int halfSize = length / 2;
int length_left = halfSize;
int length_right = length - halfSize;
int *leftPart = new int[length_left];
int *rightPart = new int[length_right];
for (int i = 0; i < length; i++)
{
if (i < halfSize)
{
//copying of the left part
leftPart[i] = arr[i];
}
else {
//copying of the right part
rightPart[i - halfSize] = arr[i];
}
}
int* out_left = new int[length_left];
int* out_right = new int[length_right];
mergeSortAsc(leftPart, length_left, out_left);
mergeSortAsc(rightPart, length_right, out_right);
int* out_temp = new int[length];
mergeArraysAsc(out_left, out_right, length_left, length_right, out_temp);
for (int i = 0; i < length; i++)
{
out[i] = out_temp[i];
}
}
void mergeArraysAsc(int* arr_left, int* arr_right, int length_left, int length_right, int* out)
{
int totalLength = length_left + length_right;
//running indices
int i = 0;
int j = 0;
int index = 0;
while (i < length_left && j < length_right)
{
if (arr_left[i] <= arr_right[j])
{
out[index] = arr_left[i];
i++;
index++;
}
else {
out[index] = arr_right[j];
j++;
index++;
}
}
//only one of these two loops will run
while (i < length_left)
{
out[index] = arr_left[i];
index++;
i++;
}
while (j < length_right)
{
out[index] = arr_right[j];
index++;
j++;
}
} |
14,152 | #include <stdio.h>
#include <cuda_runtime.h>
#define CUDA_CALL(cmd) do { \
if((err = cmd) != cudaSuccess) { \
printf("(%d) Cuda Error: %s\n", __LINE__, cudaGetErrorString(err) ); \
} \
} while(0)
// Kernel definition
__global__ void VecAdd(float* A, float* B, float* C)
{
int i = threadIdx.x;
C[i] = A[i] + B[i];
}
int main()
{
//Test vars
cudaError_t err;
float h_a[4] = {1,2,3,4};
float h_b[4] = {1,2,3,4};
float h_c[4] = {0,0,0,0};
float *d_a,*d_b,*d_c;
// Kernel invocation with N threads
CUDA_CALL(cudaMalloc((void**)&d_a, 4 * sizeof(float)));
CUDA_CALL(cudaMalloc((void**)&d_b, 4 * sizeof(float)));
CUDA_CALL(cudaMalloc((void**)&d_c, 4 * sizeof(float)));
CUDA_CALL(cudaMemcpy(d_a, h_a, 4 * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(d_b, h_b, 4* sizeof(float), cudaMemcpyHostToDevice));
VecAdd<<<1,4>>>(d_a, d_b, d_c);
cudaMemcpy(h_c, d_c, 4* sizeof(float), cudaMemcpyDeviceToHost);
printf("\n Vector sum is %f,%f,%f,%f. ",h_c[0],h_c[1],h_c[2],h_c[3]);
return 0;
}
|
14,153 | #include "includes.h"
__global__ void grad(float * val, int * row_ind, int *col_ind, float * mat_err, int nnz, float *act, float *label, float *w, float learning_rate) {
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < nnz) {
int r = row_ind[tid];
int c = col_ind[tid];
float v = val[tid];
mat_err[tid] = abs(label[r] - act[r]);
float err = v * (label[r] - act[r]);
atomicAdd(&w[c], learning_rate * err);
}
} |
14,154 | #define HANDLE_ERROR(err) if(err != cudaSuccess) { printf("Error\n"); exit(1); }
#include <stdio.h>
#include <stdlib.h>
#define N 32
__global__ void add(int *a, int *b, int *c){
int tid = blockIdx.x;
if(tid < N)
c[tid] = a[tid] + b[tid];
}
int main(int argc, char *argv[]){
int num_gpu = 0;
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
if(argc == 2) num_gpu = atoi(argv[1]);
for (int i = 0; i< N; i++){
a[i] = i;
b[i] = i * i;
}
cudaSetDevice(num_gpu);
HANDLE_ERROR(cudaMalloc((void **)&dev_a, N * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void **)&dev_b, N * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void **)&dev_c, N * sizeof(int)));
HANDLE_ERROR(cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice));
add <<< N, 1 >>> (dev_a, dev_b, dev_c);
HANDLE_ERROR(cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost));
for(int i = 0; i < N; i++)
printf("%d + %d = %d \n", a[i], b[i], c[i]);
HANDLE_ERROR(cudaFree(dev_a));
HANDLE_ERROR(cudaFree(dev_b));
HANDLE_ERROR(cudaFree(dev_c));
return 0;
}
|
14,155 | #include "GeometricRestartsManager.cuh"
__device__ GeometricRestartsManager::GeometricRestartsManager(
int initial_conflicts_until_restart, float increase_factor)
{
conflicts_until_restart = initial_conflicts_until_restart;
this->increase_factor = increase_factor;
n_current_conflicts = 0;
}
__device__ void GeometricRestartsManager::signal_conflict()
{
n_current_conflicts++;
}
__device__ void GeometricRestartsManager::handle_restart()
{
n_current_conflicts = 0;
conflicts_until_restart = (int) (conflicts_until_restart * increase_factor);
}
__device__ bool GeometricRestartsManager::should_restart()
{
if (n_current_conflicts >= conflicts_until_restart) {
handle_restart();
return true;
}
else {
return false;
}
}
|
14,156 | #include <iostream>
using namespace std;
#define TYPE float
typedef TYPE T;
__constant__ float dev_box[4];
__constant__ int dev_threads[1];
__constant__ int dev_blocks[1];
__constant__ int dev_n_of_ints[1];
__constant__ int dev_n_of_func = 4;
template<class T>
class interval_gpu
{
public:
__device__ __host__ interval_gpu();
__device__ __host__ interval_gpu(T const &v);
__device__ __host__ interval_gpu(T const &l, T const &u);
__device__ __host__ T const &lower() const;
__device__ __host__ T const &upper() const;
static __device__ __host__ interval_gpu empty();
friend ostream& operator<<(ostream& os, const interval_gpu<T> &x){
os<<"["<<x.lower()<<":"<<x.upper()<<"]";return os;
}
private: T low; T up;
};
// Constructors
template<class T> inline __device__ __host__
interval_gpu<T>::interval_gpu(){}
template<class T> inline __device__ __host__
interval_gpu<T>::interval_gpu(T const &v) :
low(v), up(v){}
template<class T> inline __device__ __host__
interval_gpu<T>::interval_gpu(T const &l, T const &u) :
low(l), up(u){}
template<class T> inline __device__ __host__
T const &interval_gpu<T>::lower() const
{return low;}
template<class T> inline __device__ __host__
T const &interval_gpu<T>::upper() const
{return up;}
//OVERLOAD OVERLOAD OVERLOAD OVERLOAD OVERLOAD OVERLOAD OVERLOAD OVERLOAD OVERLOAD
template<class T> inline __host__ __device__
interval_gpu<T> operator+(interval_gpu<T> const &x, interval_gpu<T> const &y)
{
return interval_gpu<T>(x.lower() + y.lower(), x.upper() + y.upper());
}
template<class T> inline __host__ __device__
interval_gpu<T> operator-(interval_gpu<T> const &x, interval_gpu<T> const &y)
{return interval_gpu<T>(x.lower() - y.upper(), x.upper() - y.lower());}
template<class T> inline __host__ __device__
interval_gpu<T> operator*(interval_gpu<T> const &x, interval_gpu<T> const &y)
{return interval_gpu<T>(min(min(x.lower()*y.lower(),x.lower()*y.upper()),
min(x.upper()*y.lower(),x.upper()*y.upper())),
max(max(x.lower()*y.lower(),x.lower()*y.upper()),
max(x.upper()*y.lower(),x.upper()*y.upper())));}
template<class T> inline __host__ __device__
interval_gpu<T> operator/(interval_gpu<T> const &x, interval_gpu<T> const &y)
{return interval_gpu<T>(min(min(x.lower()/y.lower(),x.lower()/y.upper()),
min(x.upper()/y.lower(),x.upper()/y.upper())),
max(max(x.lower()/y.lower(),x.lower()/y.upper()),
max(x.upper()/y.lower(),x.upper()/y.upper())));}
__device__ __forceinline__ int g1(interval_gpu<T> *x){
interval_gpu<T> lmax(12);
interval_gpu<T> f(x[0]*x[0] + x[1]*x[1] - lmax*lmax);
return int(bool(f.upper() < 0) + bool(f.lower() < 0));
}
__device__ __forceinline__ int g2(interval_gpu<T> *x){
interval_gpu<T> l(8);
interval_gpu<T> f(l*l - x[0]*x[0] - x[1]*x[1]);
return int(bool(f.upper() < 0) + bool(f.lower() < 0));
}
__device__ __forceinline__ int g3(interval_gpu<T> *x){
interval_gpu<T> lmax(12);
interval_gpu<T> l0(5);
interval_gpu<T> f((x[0]-l0)*(x[0]-l0) + x[1]*x[1] - lmax*lmax);
return int(bool(f.upper() < 0) + bool(f.lower() < 0));
}
__device__ __forceinline__ int g4(interval_gpu<T> *x){
interval_gpu<T> l(8);
interval_gpu<T> l0(5);
interval_gpu<T> f(l*l - (x[0]-l0)*(x[0]-l0) - x[1]*x[1]);
return int(bool(f.upper() < 0) + bool(f.lower() < 0));
}
__constant__ int(*dev_func_pp[4])(interval_gpu<T>*) = {&g1,&g2,&g3,&g4};
template<class T>
__global__ void second_grid(int* detail_res,int* corner){
double x1_low = dev_box[0] + int(corner[0] % dev_threads[0])*(dev_box[1] - dev_box[0])/dev_threads[0];
double x2_low = dev_box[2] + int(corner[0] / dev_threads[0])*(dev_box[3] - dev_box[2])/dev_blocks[0];
interval_gpu<T>* x = new interval_gpu<T>[dev_n_of_ints[0]];
x[0] = interval_gpu<T>(x1_low + (threadIdx.x) * ((dev_box[1] - dev_box[0])/dev_threads[0])/blockDim.x,
x1_low +(1+threadIdx.x) * ((dev_box[1] - dev_box[0])/dev_threads[0])/blockDim.x);
x[1] = interval_gpu<T>(x2_low + (blockIdx.x) * ((dev_box[3] - dev_box[2])/dev_blocks[0])/gridDim.x,
x2_low + (1+blockIdx.x) * ((dev_box[3] - dev_box[2])/dev_blocks[0])/gridDim.x);
detail_res[(blockIdx.x*blockDim.x + threadIdx.x)] = 1;
for(int i = 0; i < dev_n_of_func; i++){
detail_res[(blockIdx.x*blockDim.x + threadIdx.x)] *= (*dev_func_pp[i])(x);
}
if((blockIdx.x*blockDim.x + threadIdx.x)==0){
printf("corner = %d\n",corner[0]);
}
}
//1 thread to up, in for loop to the end
template<class T>
__global__ void large_grid(int* res){
interval_gpu<T>* x = new interval_gpu<T>[dev_n_of_ints[0]];
x[0] = interval_gpu<T>(dev_box[0] + (threadIdx.x) * (dev_box[1] - dev_box[0])/blockDim.x,
dev_box[0] +(1+threadIdx.x) * (dev_box[1] - dev_box[0])/blockDim.x);
x[1] = interval_gpu<T>(dev_box[2] + (blockIdx.x) * (dev_box[3] - dev_box[2])/gridDim.x,
dev_box[2] + (1+blockIdx.x) * (dev_box[3] - dev_box[2])/gridDim.x);
res[(blockIdx.x*blockDim.x + threadIdx.x)] = 1;
for(int i = 0; i < dev_n_of_func; i++){
res[(blockIdx.x*blockDim.x + threadIdx.x)] *= (*dev_func_pp[i])(x);
}
// if( (blockIdx.x*blockDim.x + threadIdx.x) == 2926){printf("[%f:%f]:[%f:%f]\n",
// dev_box[0] + (threadIdx.x) * (dev_box[1] - dev_box[0])/blockDim.x,
// dev_box[0] +(1+threadIdx.x) * (dev_box[1] - dev_box[0])/blockDim.x,
// dev_box[2] + (blockIdx.x) * (dev_box[3] - dev_box[2])/gridDim.x,
// dev_box[2] + (1+blockIdx.x) * (dev_box[3] - dev_box[2])/gridDim.x);}
// if(res[(blockIdx.x*blockDim.x + threadIdx.x)]%16>0){
// //call
// }
}
//в уточнении нуждаются только граничные ячейки.
//возвращается 2048 индексов номеров крупной сетки
//launch kernell fromkernell cudalaunchkernel
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = cudaDeviceSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
int main(){
int n_of_ints = 2;
float host_box[4] = {-15.0,0.0,0.0,7.5};
int lb = 64;
int lt = lb*2;
int * res;
int * detail_res;
int*corner;
//cout<<fixed;
//cout.precision(4);
cudaMallocManaged(&corner, sizeof(int));
cudaMallocManaged(&res, sizeof(int)*lb*lt);
cudaMallocManaged(&detail_res, sizeof(int)*lb*lb);
cudaMemcpyToSymbol(dev_n_of_ints, &n_of_ints, sizeof(int));
cudaMemcpyToSymbol(dev_threads, <, sizeof(int));
cudaMemcpyToSymbol(dev_blocks, &lb, sizeof(int));
cudaMemcpyToSymbol(dev_box, &host_box, sizeof(float)*4);
large_grid<T><<<lb, lt>>>(res);
cudaDeviceSynchronize();
int counter = 0;
for(int i = 0; i < lb; i++){
for(int j = 0; j < lt; j++){
if(int(res[(i*lt+j)])%16>0){
interval_gpu<T> xb1(host_box[0] + (j) * (host_box[1] - host_box[0])/lt ,host_box[0]+(1+j) * (host_box[1] - host_box[0])/lt);
interval_gpu<T> xb2(host_box[2] + (i) * (host_box[3] - host_box[2])/lb ,host_box[2]+(1+i) * (host_box[3] - host_box[2])/lb);
// cout<<xb1<<":"<<xb2<<"\n";
}
if(int(res[(i*lt+j)])%16>0){
counter++;
corner[0] = (i*lt+j);//
// corner[0] = 2926;
//cout<<corner[0]<<"\n";
// break;
// //cout<<"x1_low = "<<((i*lt+j)% lb)*(host_box[1] - host_box[0])/lt<<"\n";
// //cout<<"x2_low = "<<((i*lt+j)/ lb)*(host_box[3] - host_box[2])/lb<<"\n";
cout<<"counter = "<<counter<<"\n";
second_grid<T><<<lb,lb>>>(detail_res,corner);
CudaCheckError();
cudaDeviceSynchronize();
for(int k = 0; k < lb; k++){
for(int m = 0; m < lb; m++){
if(int(detail_res[k*lb+m])%16>0){
double x1_low = host_box[0] + (j) * (host_box[1] - host_box[0])/lt ; //host_box[0]+(1+j) * (host_box[1] - host_box[0])/lt
double x2_low = host_box[2] + (i) * (host_box[3] - host_box[2])/lb ; //host_box[2]+(1+i) * (host_box[3] - host_box[2])/lb
interval_gpu<T> x3(x1_low + m*(host_box[1] - host_box[0])/lt/lb,x1_low + (m+1)*(host_box[1] - host_box[0])/lt/lb);
interval_gpu<T> x4(x2_low + k*(host_box[3] - host_box[2])/lb/lb,x2_low + (k+1)*(host_box[3] - host_box[2])/lb/lb);
// cout<<x3<<":"<<x4<<"\n";
}
detail_res[k*lb+m] = 0;
}
}
cudaDeviceSynchronize();
// if(counter == 21){i = lb; j = lt; break;}
}
}
}
// cout<<"dick"<<"\n";
// cudaFree(res);
// for(int i = 0; i < lb; i++){
// for(int j = 0; j < lt; j++){
// if(int(res[(i*lt+j)])%16>0){
// interval_gpu<T> xb1(host_box[0] + (j) * (host_box[1] - host_box[0])/lt ,host_box[0]+(1+j) * (host_box[1] - host_box[0])/lt);
// interval_gpu<T> xb2(host_box[2] + (i) * (host_box[3] - host_box[2])/lb ,host_box[2]+(1+i) * (host_box[3] - host_box[2])/lb);
// cout<<xb1<<":"<<xb2<<"\n";
// }
// }
// }
cudaFree(res);
cudaFree(detail_res);
cudaFree(dev_blocks);
cudaFree(dev_threads);
cudaFree(corner);
cudaFree(dev_n_of_ints);
cudaFree(dev_box);
return 0;
}
|
14,157 | #include <iostream>
__global__ void matrix_mult(){
}
int main(int argc, char **argv)
{
int devID = 0;
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDevice(&devID);
error = cudaGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == cudaComputeModeProhibited)
{
return 1;
}
if (error != cudaSuccess)
{
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
matrix_mult<<< 1,1 >>> ();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << "Matrixmultiplikation (" << milliseconds << " ms)" << std::endl;
return 0;
}
|
14,158 | #include "includes.h"
__global__ void Return32( int *sum, int *out, const int *pIn )
{
out[threadIdx.x] = atomicAdd( &sum[threadIdx.x], *pIn );
} |
14,159 | #include <cuda_runtime.h>
#include <iostream>
__global__ void getBit(int* input, int* out_bits, int* reverse_out_bits, int* nbOnes, int mask) {
int tid = threadIdx.x;
if((input[tid] & mask) == mask) {
out_bits[tid] = 0;
reverse_out_bits[tid] = 1;
}
else {
out_bits[tid] = 1;
reverse_out_bits[tid] = 0;
atomicAdd(nbOnes, 1); // Count the number of ones
}
}
// Blelloch Scan !
__global__ void blelloch_scan(int *output_data, int n) {
int tid = threadIdx.x;
int offset = 1;
for (int d = n>>1; d > 0; d >>= 1){ // build sum in place up the tree
__syncthreads();
if (tid < d) {
int ai = offset*(2*tid+1)-1;
int bi = offset*(2*tid+2)-1;
output_data[bi] += output_data[ai];
}
offset *= 2;
}
if (tid == 0) {
output_data[n - 1] = 0; // clear the last element
}
//down
for (int d = 1; d < n; d *= 2){ // traverse down tree & build scan
offset >>= 1;
__syncthreads();
if (tid < d) {
int ai = offset*(2*tid+1)-1;
int bi = offset*(2*tid+2)-1;
float t = output_data[ai];
output_data[ai] = output_data[bi];
output_data[bi] += t;
}
}
}
// rearrange the array so as to put the correct values at the position indicated by the blelloch scan algorithm
__global__ void sort_array(int* input, int* output, int* bits, int* result_scan, int* second_scan, int* num_ones) {
int tid = threadIdx.x;
if(bits[tid] == 1)
{
int idx = result_scan[tid]; // get the position in the first scan
output[idx] = input[tid]; // put the value in the good position
}
else {
int idx = second_scan[tid] + *num_ones; // value whose low weight bit is 1 therefore we shift a good number of 1 and we place from this index
output[idx] = input[tid]; // put the value in the good position
}
}
int main(int argc, char const* argv[]) {
int* test_input_host; // input host
int* test_input_device; // input device
int* test_output_host; // output host
int* test_output_device; // output device
int* test_bits_host; // bits host
int* test_bits_device; // bits device
int* test_reverse_bits_host; // bits reverse from test_bits_host
int* test_reverse_bits_device; // bits reverse from test_bits_device
int* nb_ones_host; // number of ones host
int* nb_ones_device; // number of ones device
int* result_scan_host; // result scan host
int* result_scan_device; // result scan device
int* result_second_scan_host; // result second scan host
int* result_second_scan_device; // result second scan device
cudaMallocHost((void**)&test_input_host, 8*sizeof(int));
cudaMallocHost((void**)&test_output_host, 8*sizeof(int));
cudaMallocHost((void**)&test_bits_host, 8*sizeof(int));
cudaMallocHost((void**)&nb_ones_host, sizeof(int));
cudaMallocHost((void**)&result_scan_host, 8*sizeof(int));
cudaMallocHost((void**)&test_reverse_bits_host, 8*sizeof(int));
cudaMallocHost((void**)&result_second_scan_host, 8*sizeof(int));
cudaMalloc(&test_input_device, 8*sizeof(int));
cudaMalloc(&test_output_device, 8*sizeof(int));
cudaMalloc(&test_bits_device, 8*sizeof(int));
cudaMalloc(&nb_ones_device, sizeof(int));
cudaMalloc(&result_scan_device, 8*sizeof(int));
cudaMalloc(&test_reverse_bits_device, 8*sizeof(int));
cudaMalloc(&result_second_scan_device, 8*sizeof(int));
// ------------------- INITIALISATION ------------------ //
test_input_host[0] =0; // 000
test_input_host[1] =5; // 101
test_input_host[2] =2; // 010
test_input_host[3] =7; // 111
test_input_host[4] =1; // 001
test_input_host[5] =3; // 011
test_input_host[6] =6; // 110
test_input_host[7] =4; // 100
cudaMemcpy(test_input_device, test_input_host, sizeof(int)*8, cudaMemcpyHostToDevice);
int mask = 1;
for(int x = 0; x < 3; x++){
// *************** DEBUG ************** //
std::cout << "Orignal Array : ";
for(int i = 0; i < 8; i++) {
std::cout << test_input_host[i] << " ";
}
std::cout << std::endl ;
cudaMemset(nb_ones_device, 0, sizeof(int)); // set 0 on the gpu for the numbers of ones
getBit<<<1, 8>>>(test_input_device, test_bits_device, test_reverse_bits_device, nb_ones_device, mask); // Kernel Invoke
cudaMemcpy(test_bits_host, test_bits_device, 8*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(nb_ones_host, nb_ones_device, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(test_reverse_bits_host, test_reverse_bits_device, sizeof(int)*8, cudaMemcpyDeviceToHost);
// *************** DEBUG ************** //
std::cout << "Bit of value : " ;
for(int i = 0; i < 8; i++) {
std::cout << test_bits_host[i] << " ";
}
std::cout << "\nBits reverse : " ;
for(int i = 0; i < 8; i++) {
std::cout << test_reverse_bits_host[i] << " ";
}
std::cout << "\nNum of ones : " << *nb_ones_host << std::endl ;
cudaMemcpy(result_scan_device, test_bits_host, 8*sizeof(int), cudaMemcpyHostToDevice);
blelloch_scan<<<1, 8>>>(result_scan_device, 8); // invoke blelloch scan on the array of bit
cudaMemcpy(result_scan_host, result_scan_device, 8*sizeof(int), cudaMemcpyDeviceToHost);
// *************** DEBUG ************** //
std::cout << "First scan : " ;
for(int i = 0; i < 8; i++) {
std::cout << result_scan_host[i] << " ";
}
std::cout << std::endl;
cudaMemcpy(result_second_scan_device, test_reverse_bits_host, 8*sizeof(int), cudaMemcpyHostToDevice);
blelloch_scan<<<1, 8>>>(result_second_scan_device, 8); // invoke blelloch scan on the array of reversed bit
cudaMemcpy(result_second_scan_host, result_second_scan_device, 8*sizeof(int), cudaMemcpyDeviceToHost);
// *************** DEBUG ************** //
std::cout << "Second scan : " ;
for(int i = 0; i < 8; i++) {
std::cout << result_second_scan_host[i] << " ";
}
std::cout << std::endl ;
sort_array<<<1, 8>>>(test_input_device, test_output_device, test_bits_device, result_scan_device, result_second_scan_device, nb_ones_device);
cudaMemcpy(test_output_host, test_output_device, sizeof(int)*8, cudaMemcpyDeviceToHost);
// *************** DEBUG ************** //
std::cout << "\nStep [" << x + 1 << "/3] \nSorted GPU : ";
for(int i = 0; i < 8; i++) {
std::cout << test_output_host[i] << " ";
}
std::cout << std::endl << std::endl;
mask <<= 1; // change bit !
cudaMemcpy(test_input_device, test_output_host,sizeof(int)*8, cudaMemcpyHostToDevice);
}
cudaFree(result_scan_device);
cudaFree(test_reverse_bits_device);
cudaFree(test_reverse_bits_host);
cudaFree(test_bits_device);
cudaFree(test_bits_host);
cudaFree(result_scan_host);
cudaFree(result_scan_device);
cudaFree(nb_ones_host);
cudaFree(nb_ones_device);
cudaFree(test_output_device);
cudaFree(test_input_device);
cudaFree(test_output_host);
cudaFree(test_input_host);
} |
14,160 | /* now let's drop some CUDA in there!
* instead of running all on the CPU, we will hand off the task of adding floats to the GPU
* for now, we'll stick with one thread
* compile with CUDA compiler:
nvcc cuda1.cu -o cuda1
*/
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@//
#include <cstdio>
#include <cmath>
int arraySize = 1<<20; // basically a million
// function to add them together
__global__ /*/*/ // this makes addArrays() accessible to the GPU
void addArrays (int arraySize, float *add1, float *add2, float *sum){ // addArrays() is now considered a kernel
for (int i=0; i<arraySize; i++){
sum[i] = add1[i] + add2[i];
}
}
// all the action
int main(){
// three arrays; we will add the first two to sum[]
printf("initializing arrays\n");
float *add1, *add2, *sum; /*/*/ // CUDA allows us to set up a memory space
cudaMallocManaged( &add1, arraySize*sizeof(float) ); /*/*/ // accessible by the CPU (HOST) and GPU (DEVICE) alike
cudaMallocManaged( &add2, arraySize*sizeof(float) ); /*/*/ // cudaMallocManaged(), like malloc(),
cudaMallocManaged( &sum, arraySize*sizeof(float) ); /*/*/ // returns pointers usable by both devices
/* All that was a lie- cudaMallocManaged() does not actually allocate memory on a RAM card accessed by both host and device
* ...but that is what appears to happen in the source code, so when learning the command, we talk of this imaginary memory like it is real
* What is really happening? Well, when you give a kernel to the GPU, it needs a copy of the instructions and variables in local memory
* ...so CUDA copies everyting relevant from the host memory to the device memory.
* We previously had to manage this copying manually with commands like cudaMemcpy*().
* cudaMallocManaged() makes our code more concise and readable by copying for us.
* Unfortunately, we will still have latency from memory transfer.
*/
// fill first two arrays before the CUDA starts
for (int i=0; i<arraySize; i++){
add1[i] = 1.0;
add2[i] = 2.0;
}
printf("arrays done. prepare for adding\n");
// parallelization happens here
addArrays<<<1,1>>>(arraySize, add1,add2,sum); /*/*/ // <<<1,1>>> tells CPU to give task to the GPU
/*/*/ // the 1's will be explained later, but in
// wait for all threads to complete on the GPU // this case it means just one thread
cudaDeviceSynchronize(); /*/*/ // then we wait for all GPU threads to finish calculating
printf("adding complete.\t"); // now the CPU is back in charge
// check for accuracy- what's the biggest mistake?
float maxError = 0.0;
for (int i=0; i<arraySize; i++){
// check each array index for value and store the greater deviation from 3.0
maxError = fmax(maxError, fabs(sum[i]-3.0));
}
printf("max error = %f\n",maxError);
// free memory
cudaFree(add1); /*/*/ // we need to use cudaFree()
cudaFree(add2); /*/*/ // instead of delete []
cudaFree(sum); /*/*/ // because it's shared CUDA memory
return 0;
}
|
14,161 | /* Voxel sampling GPU implementation
* Author Zhaoyu SU
* All Rights Reserved. Sep., 2019.
*/
#include <stdio.h>
#include <iostream>
#include <float.h>
__device__ int binary_search(const long long* input_voxel_idx,
int start_id,
int stop_id,
long long target_voxel_id) {
if (input_voxel_idx[start_id] > target_voxel_id || input_voxel_idx[stop_id] < target_voxel_id)
return -1;
while (start_id <= stop_id) {
int m = start_id + (stop_id - start_id) / 2;
if (input_voxel_idx[m] == target_voxel_id)
return m;
if (input_voxel_idx[m] < target_voxel_id)
start_id = m + 1;
else
stop_id = m - 1;
}
return -1;
}
__device__ int get_batch_id(int* accu_list, int batch_size, int id) {
for (int b=0; b<batch_size-1; b++) {
if (id >= accu_list[b]) {
if(id < accu_list[b+1])
return b;
}
}
return batch_size - 1;
}
__global__ void output_init_gpu_kernel(int batch_size, int center_num, int kernel_num,
float padding, int channels,
float* output_features,
int* output_idx) {
int thread_id = threadIdx.x + blockIdx.x * blockDim.x;
if (thread_id < center_num * kernel_num) {
output_idx[thread_id] = -1;
for (int c=0; c<channels; c++) {
output_features[thread_id*channels + c] = padding;
}
}
}
__global__ void grid_buffer_init_gpu_kernel(int batch_size, int input_point_num, float resolution,
int grid_dim_w, int grid_dim_l, int grid_dim_h,
const float* input_coors,
int* input_accu_list,
int* grid_buffer) {
const int grid_dim_size = grid_dim_w * grid_dim_h * grid_dim_l;
int point_id = threadIdx.x + blockIdx.x * blockDim.x;
if (point_id < input_point_num) {
int center_grid_coor_x = (int)floor(input_coors[point_id*3 + 0] / resolution);
int center_grid_coor_y = (int)floor(input_coors[point_id*3 + 1] / resolution);
int center_grid_coor_z = (int)floor(input_coors[point_id*3 + 2] / resolution);
int batch_id = get_batch_id(input_accu_list, batch_size, point_id);
int grid_buffer_idx = batch_id * grid_dim_size + center_grid_coor_x * grid_dim_l * grid_dim_h + center_grid_coor_y * grid_dim_h + center_grid_coor_z;
atomicExch(&grid_buffer[grid_buffer_idx], point_id);
}
}
__global__ void voxel_sampling_gpu_kernel(int batch_size, int center_num, int channels,
int kernel_size,
int grid_dim_w, int grid_dim_l, int grid_dim_h,
float resolution,
const float* input_coors,
const float* input_features,
const float* center_coors,
int* center_accu_list,
int* grid_buffer,
float* output_features,
int* output_idx) {
const int kernel_num = kernel_size * kernel_size * kernel_size;
const int half_kernel_size = (kernel_size - 1) / 2;
const int half_kernel_num = kernel_size * kernel_size * half_kernel_size + \
kernel_size * half_kernel_size + \
half_kernel_size;
const int search_kernel_size = kernel_size + 1;
const int search_kernel_num = search_kernel_size * search_kernel_size * search_kernel_size;
const int grid_dim_size = grid_dim_w * grid_dim_l * grid_dim_h;
const float radius = 1.5 * resolution;
const float r2 = radius * radius;
int thread_id = threadIdx.x + blockIdx.x * blockDim.x;
if (thread_id < center_num * search_kernel_num) {
int center_id = thread_id / search_kernel_num;
int search_grid_id = thread_id % search_kernel_num;
int batch_id = get_batch_id(center_accu_list, batch_size, center_id);
float center_coor_x = center_coors[center_id*3 + 0];
float center_coor_y = center_coors[center_id*3 + 1];
float center_coor_z = center_coors[center_id*3 + 2];
int center_grid_coor_x = __float2int_rz(center_coor_x / resolution);
int center_grid_coor_y = __float2int_rz(center_coor_y / resolution);
int center_grid_coor_z = __float2int_rz(center_coor_z / resolution);
int search_grid_x = search_grid_id / (search_kernel_size * search_kernel_size);
int search_grid_y = search_grid_id % (search_kernel_size * search_kernel_size) / search_kernel_size;
int search_grid_z = search_grid_id % search_kernel_size;
int search_offset_x = -2 + round(center_coor_x / resolution - center_grid_coor_x) + search_grid_x;
int search_offset_y = -2 + round(center_coor_y / resolution - center_grid_coor_y) + search_grid_y;
int search_offset_z = -2 + round(center_coor_z / resolution - center_grid_coor_z) + search_grid_z;
int target_grid_x = max(0, min(center_grid_coor_x + search_offset_x, grid_dim_w - 1));
int target_grid_y = max(0, min(center_grid_coor_y + search_offset_y, grid_dim_l - 1));
int target_grid_z = max(0, min(center_grid_coor_z + search_offset_z, grid_dim_h - 1));
int target_grid_id = batch_id * grid_dim_size + target_grid_x * grid_dim_l * grid_dim_h + target_grid_y * grid_dim_h + target_grid_z;
int point_id = grid_buffer[target_grid_id];
if (point_id>=0) {
float coor_x = input_coors[point_id*3 +0];
float coor_y = input_coors[point_id*3 +1];
float coor_z = input_coors[point_id*3 +2];
float dx = coor_x - center_coor_x + FLT_EPSILON;
float dy = coor_y - center_coor_y + FLT_EPSILON;
float dz = coor_z - center_coor_z + FLT_EPSILON;
float dx2 = dx * dx;
float dy2 = dy * dy;
float dz2 = dz * dz;
if (dx2 < r2 && dy2 < r2 && dz2 < r2) {
int kernel_coor_x = __float2int_rz(dx / resolution + 0.5 * fabsf(dx) / dx);
int kernel_coor_y = __float2int_rz(dy / resolution + 0.5 * fabsf(dy) / dy);
int kernel_coor_z = __float2int_rz(dz / resolution + 0.5 * fabsf(dz) / dz);
int voxel_coor = center_id * kernel_num + half_kernel_num + \
kernel_size * kernel_size * kernel_coor_x + \
kernel_size * kernel_coor_y + \
kernel_coor_z;
if (output_idx[voxel_coor] < 0) {
output_idx[voxel_coor] = point_id;
for (int c=0; c<channels; c++) {
output_features[voxel_coor * channels + c] = input_features[point_id * channels + c];
}
}
}
}
}
}
__global__ void voxel_sampling_grad_gpu_kernel(int kernel_number, int ngrid, int channels,
const int* output_idx,
const float* output_features_grad,
float* input_features_grad) {
int center_id = threadIdx.x + blockIdx.x * blockDim.x;
if (center_id < kernel_number) {
for (int i=0; i<ngrid; i++) {
int voxel_coor = center_id*ngrid + i;
int point_id = output_idx[voxel_coor];
if (point_id >= 0) {
for (int c=0; c<channels; c++) {
atomicAdd(&input_features_grad[point_id*channels + c], output_features_grad[voxel_coor*channels + c]);
}
}
}
}
}
void voxel_sampling_gpu_launcher(int batch_size, int input_point_num, int channels,
int center_num, int kernel_size,
int grid_dim_w, int grid_dim_l, int grid_dim_h,
float resolution, float padding,
const float* input_coors,
const float* input_features,
const int* input_num_list,
const float* center_coors,
const int* center_num_list,
int* input_accu_list,
int* center_accu_list,
int* grid_buffer,
float* output_features,
int* output_idx) {
if (batch_size*input_point_num <=0 || center_num * channels <= 0) {
printf("VoxelSampleOp ERROR: Invalid CUDA input dimensions.\n");
return;
}
int kernel_num = kernel_size * kernel_size * kernel_size;
int search_kernel_num = (kernel_size + 1) * (kernel_size + 1) * (kernel_size + 1);
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, output_init_gpu_kernel, 0, center_num * kernel_num);
gridSize = (center_num * kernel_num + blockSize - 1) / blockSize;
output_init_gpu_kernel<<<gridSize, blockSize>>>(batch_size, center_num, kernel_num,
padding, channels,
output_features,
output_idx);
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, grid_buffer_init_gpu_kernel, 0, input_point_num);
gridSize = (input_point_num + blockSize - 1) / blockSize;
grid_buffer_init_gpu_kernel<<<gridSize, blockSize>>>(batch_size, input_point_num, resolution,
grid_dim_w, grid_dim_l, grid_dim_h,
input_coors,
input_accu_list,
grid_buffer);
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, voxel_sampling_gpu_kernel, 0, center_num * search_kernel_num);
gridSize = (center_num * search_kernel_num + blockSize - 1) / blockSize;
voxel_sampling_gpu_kernel<<<gridSize, blockSize>>>(batch_size, center_num, channels,
kernel_size,
grid_dim_w, grid_dim_l, grid_dim_h, resolution,
input_coors,
input_features,
center_coors,
center_accu_list,
grid_buffer,
output_features,
output_idx);
}
void voxel_sampling_grad_gpu_launcher(int kernel_number, int ngrid, int channels,
const int* output_idx,
const float* output_features_grad,
float* input_features_grad) {
if (kernel_number==0 || ngrid*channels == 0) {
printf("VoxelSampleGradOp ERROR: Invalid CUDA input dimensions.\n");
return;
}
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, output_init_gpu_kernel, 0, kernel_number);
gridSize = (kernel_number + blockSize - 1) / blockSize;
voxel_sampling_grad_gpu_kernel<<<gridSize, blockSize>>>(kernel_number, ngrid, channels,
output_idx,
output_features_grad,
input_features_grad);
} |
14,162 | #include<cuda.h>
#include<cuda_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#include<cmath>
#define TILE_SIZE 4
__device__ void store_full(float*,float*,int);
__device__ void load_full(float*,float*,int);
__device__ void potrf_tile(float*,int,int);
__device__ void trsm_tile(float*,int,int,int);
__device__ void syrk_tile(float*,int,int,int,int);
__global__ void right_looking_launch_kernel(float*,int);
__device__ void store_full(float* read_data,float* write_data,int N)
{
int i,j,ID;
for(i=0;i<N/TILE_SIZE;i++)
{
for(j=0;j<N/TILE_SIZE;j++)
{
ID = (i*TILE_SIZE + threadIdx.y)*N + j*TILE_SIZE + threadIdx.x;
write_data[ID + N*N*blockIdx.x] = read_data[ID];
}
}
__syncthreads();
}
__device__ void load_full(float* read_data,float* write_data,int N)
{
int i,j,ID;
for(i=0;i<N/TILE_SIZE;i++)
{
for(j=0;j<N/TILE_SIZE;j++)
{
ID = (i*TILE_SIZE + threadIdx.y)*N + j*TILE_SIZE + threadIdx.x;
write_data[ID] = read_data[ID + N*N*blockIdx.x];
}
}
__syncthreads();
}
__device__ void potrf_tile(float* t_A,int i,int N)
{
int t_x = threadIdx.x;
int t_y = threadIdx.y;
for(int k=0;k<TILE_SIZE;k++)
{
if(t_x==t_y && t_x==k)
{
t_A[i*TILE_SIZE*(1+N) + t_x*N + t_x] = sqrtf(t_A[i*TILE_SIZE*(1+N) + t_x*N + t_x]);
}
__syncthreads();
if(t_x<t_y && t_x == k)
{
t_A[i*TILE_SIZE*(1+N) + t_y*N + t_x]/= t_A[i*TILE_SIZE*(1+N) + t_x*N + t_x];
}
__syncthreads();
if(k<t_y && k<t_x && t_x<=t_y)
{
t_A[i*TILE_SIZE*(1+N) + t_y*N + t_x]-= t_A[i*TILE_SIZE*(1+N) + t_x*N + k]*t_A[i*TILE_SIZE*(1+N) + t_y*N + k];
}
__syncthreads();
}
}
__device__ void trsm_tile(float *row_data,int i,int j,int N)
{
int t_x = threadIdx.x;
int t_y = threadIdx.y;
for(int s=0;s<TILE_SIZE;s++)
{
if(t_x==s)
{
row_data[(t_y + j*TILE_SIZE)*N + t_x + i*TILE_SIZE]/= row_data[i*TILE_SIZE*(1+N) + t_x*(1+N)];
}
__syncthreads();
if(t_x > s)
{
row_data[(t_y + j*TILE_SIZE)*N + t_x + i*TILE_SIZE]-= row_data[(t_x + i*TILE_SIZE)*N + s]*row_data[(t_y + j*TILE_SIZE)*N + s];
}
__syncthreads();
}
}
__device__ void syrk_tile(float* row_data,int i,int j,int k,int N)
{
int t_y = threadIdx.y;
int t_x = threadIdx.x;
float valueToSubtract = 0.0;
for(int r=0;r<TILE_SIZE;r++)
{
valueToSubtract+= row_data[(t_x + k*TILE_SIZE)*N + i*TILE_SIZE + r]*row_data[(t_y + j*TILE_SIZE)*N + i*TILE_SIZE + r];
}
row_data[(t_y + j*TILE_SIZE)*N + t_x + k*TILE_SIZE]-= valueToSubtract;
__syncthreads();
}
__global__ void right_looking_launch_kernel(float* read_data,int N)
{
extern __shared__ float data[];
int i,j,k;
load_full(read_data,data,N);
for(i=0;i<N/TILE_SIZE;i++)
{
potrf_tile(data,i,N);
for(j=i+1;j<N/TILE_SIZE;j++)
{
trsm_tile(data,i,j,N);
for(k=i+1;k<=j;k++)
{
syrk_tile(data,i,j,k,N);
}
}
}
store_full(data,read_data,N);
} |
14,163 | #include <stdio.h>
#include <locale.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
long getmax(long *, long);
__global__ void getMaxNum( long *in, long size, long blocks_d, long *out) {
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x *blockDim.x + threadIdx.x;
// each thread loads one element from global to shared mem
long x = LONG_MIN;
if(i < size)
x = in[i];
out[blockIdx.x*blocks_d+tid] = x;
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s = s/2){
if (tid < s){
__syncthreads();
if (out[blockIdx.x * blocks_d+tid] < out[blockIdx.x * blocks_d+tid+s]){
out[blockIdx.x * blocks_d+tid] = out[blockIdx.x * blocks_d+tid+s];
}
}
__syncthreads();
}
__syncthreads();
}
int main(int argc, char *argv[])
{
long size = 0; // The size of the array
long i; // loop index
long max;
long * numbers; //pointer to the array
if(argc !=2)
{
printf("usage: maxseq num\n");
printf("num = size of the array\n");
exit(1);
}
size = atol(argv[1]);
numbers = (long *)malloc(size * sizeof(long));
if( !numbers )
{
printf("Unable to allocate mem for an array of size %ld\n", size);
exit(1);
}
srand(time(NULL)); // setting a seed for the random number generator
// Fill-up the array with random numbers from 0 to size-1
for( i = 0; i < size; i++){
numbers[i] = rand() % size;
}
int numsSize = size * sizeof(long);
// Get the number of threads per block but getting the device's
// maximum threads per block
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, 0);
long THREADS_PER_BLOCK = devProp.maxThreadsPerBlock;
//Create nums array that we will be sending to the device
long * nums;
// Get number of blocks by rounding up the size of the array / threads per block
// so, the amount of blocks needed for the max threads per block for this device
long blocks = ((size + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
long numOfThreads;
if (size > THREADS_PER_BLOCK){
numOfThreads = THREADS_PER_BLOCK;
}
else{
numOfThreads = size;
}
//Transfer and copy numbers array from the host to the device
cudaMalloc((void **) &nums, numsSize);
cudaError_t e = cudaMemcpy(nums, numbers, numsSize, cudaMemcpyHostToDevice);
// Create array that will store the result - sending this from device to host
long * maxResult;
long resultSize = blocks * sizeof(long);
// Transfer maxResult array to device
cudaMalloc((void **) &maxResult, resultSize);
cudaError_t v = cudaGetLastError();
//launch kernel function
getMaxNum<<<blocks, numOfThreads>>>(nums, size, blocks, maxResult);
// Copy the array from the device to the host so we can get result
cudaError_t s = cudaMemcpy(numbers, maxResult, resultSize, cudaMemcpyDeviceToHost);
long l;
max = numbers[0];
for(l = 1; l < blocks; l++){
if(numbers[l] > max){
max = numbers[l];
}
}
printf("The maximum number in the array is %'ld \n", max);
cudaFree(nums);
cudaFree(maxResult);
free(numbers);
cudaDeviceReset();
exit(0);
}
|
14,164 | #include "includes.h"
#define NUM_THREADS 512
__global__ void opt_cond_itr(int num_train_cases, double *opt_cond, double alpha_high, double alpha_high_prev, int high_label, int high_indx, double alpha_low, double alpha_low_prev, int low_label, int low_indx, double *kernel_val_mat){
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if(global_id < num_train_cases){
opt_cond[global_id] += (alpha_high - alpha_high_prev) * high_label * kernel_val_mat[high_indx*num_train_cases+global_id]
+ (alpha_low - alpha_low_prev) * low_label * kernel_val_mat[low_indx*num_train_cases+global_id];
}
} |
14,165 | #include <stdio.h>
__global__ void Final_Hello()
{printf("Final_Hello");}
int main()
{
Final_Hello<<<1,1>>>();
cudaDeviceSynchronize();
}
|
14,166 | #include <iostream>
using namespace std;
__device__ float slog(float x) {
if(x < 4.6e-5f)
return -10.0f;
return logf(x);
}
__global__ void logMapKernel(float *ptr, int width, int height, float av, float logAv, float power, float innerMult, float mult)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x < width && y < height)
{
int idx = 3 * (width * y + x);
ptr += idx;
float lum = 0.2125f * ptr[0] + 0.7154f * ptr[1] + 0.0721f * ptr[2];
float map = (slog(lum + av) - logAv) / slog(2.0f + powf(lum, power) * innerMult) * mult / lum;
ptr[0] *= map;
ptr[1] *= map;
ptr[2] *= map;
}
}
extern "C"
void logMapApply(float *ptr, int width, int height, float av, float logAv, float power, float innerMult, float mult)
{
int image_memory = width * height * 3 * sizeof(*ptr);
float *gpuPtr = NULL;
cudaMalloc((void**) &gpuPtr, image_memory);
cudaMemcpy(gpuPtr, ptr, image_memory, cudaMemcpyHostToDevice);
dim3 threads(16, 16);
dim3 blocks((width + threads.x - 1) / threads.x, (height + threads.y - 1) / threads.y);
logMapKernel<<<blocks, threads>>>(gpuPtr, width, height, av, logAv, power, innerMult, mult);
cudaMemcpy(ptr, gpuPtr, image_memory, cudaMemcpyDeviceToHost);
cudaFree(gpuPtr);
}
|
14,167 | // Rishabh Agarwal - 18je0676
#include <bits/stdc++.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
using namespace std;
#define check(statement) do {\
cudaError_t error = statement;\
if (error != cudaSuccess) {\
cout << "Failed to run stmt " << __LINE__ << "\n";\
cout << "Got CUDA error ... " << cudaGetErrorString(error) << "\n";\
return -1;\
}\
} while(0)
// kernel function
__global__ void convolutionKernel(float *a,float *b,float *c,int maskWidth,int width) {
int i=blockIdx.x*blockDim.x+threadIdx.x;
float cvalue=0.0;
int start_point=i-(maskWidth/2);
for(int j = 0;j < maskWidth; j++) {
if((start_point + j) >= 0 && (start_point+j) < width) {
cvalue += a[start_point + j] * b[j];
}
}
c[i]=cvalue;
}
// main function
int main() {
float * input;
float * mask;
float * output;
float * dinput;
float * dmask;
float * doutput;
int maskWidth=3;
int width=5;
// allocating memomry to input, mask and output
input = (float *)malloc(sizeof(float) * width);
mask = (float *)malloc(sizeof(float) * maskWidth);
output = (float *)malloc(sizeof(float) * width);
// assigning values to input, mask and output
for(int i=0;i<width;i++) {
input[i]=1.0;
}
for(int i=0;i < maskWidth;i++) {
mask[i]=1.0;
}
cout << "\nInput: \n";
for(int i=0; i<width; i++) {
cout << input[i] << " ";
}
cout << "\n";
cout << "\nMask: \n";
for(int i=0; i < maskWidth; i++) {
cout << mask[i] << " ";
}
cout << "\n";
// allocating device memory
check(cudaMalloc((void **)&dinput, sizeof(float) * width));
check(cudaMalloc((void **)&dmask, sizeof(float) * maskWidth));
check(cudaMalloc((void **)&doutput, sizeof(float) * width));
// copying memory from host to device
check(cudaMemcpy(dinput, input, sizeof(float) * width, cudaMemcpyHostToDevice));
check(cudaMemcpy(dmask, mask, sizeof(float) * maskWidth, cudaMemcpyHostToDevice));
// kernel dimensions
dim3 dimGrid(((width-1)/maskWidth) + 1, 1,1);
dim3 dimBlock(maskWidth,1, 1);
// calling kernel
convolutionKernel<<<dimGrid,dimBlock>>>(dinput, dmask, doutput, maskWidth, width);
cudaDeviceSynchronize();
// copying memory back from device to host
check(cudaMemcpy(output, doutput, sizeof(float) * width, cudaMemcpyDeviceToHost));
cout << "\nOutput: \n";
for(int i=0; i < width; i++) {
cout << output[i] << " ";
}
cudaFree(dinput);
cudaFree(dmask);
cudaFree(doutput);
free(input);
free(output);
free(mask);
return 0;
}
|
14,168 | /***********************************************************
tissueGPU1.cu
GPU kernel to accumulate contributions of tissue source
strengths qt to tissue solute levels pt.
TWS December 2011
Cuda 10.1 Version, August 2019
************************************************************/
__global__ void tissueGPU1Kernel(int *d_tisspoints, float *d_dtt000, float *d_pt000, float *d_qt000, int nnt)
{
int itp = blockDim.x * blockIdx.x + threadIdx.x;
int jtp,ixyz,ix,iy,iz,jx,jy,jz,nnt2=2*nnt;
float p = 0.;
if(itp < nnt){
ix = d_tisspoints[itp];
iy = d_tisspoints[itp+nnt];
iz = d_tisspoints[itp+nnt2];
for(jtp=0; jtp<nnt; jtp++){
jx = d_tisspoints[jtp];
jy = d_tisspoints[jtp+nnt];
jz = d_tisspoints[jtp+nnt2];
ixyz = abs(jx-ix) + abs(jy-iy) + abs(jz-iz);
p += d_qt000[jtp]*d_dtt000[ixyz];
}
d_pt000[itp] = p;
}
}
extern "C" void tissueGPU1(int *d_tisspoints, float *d_dtt000, float *d_pt000, float *d_qt000, int nnt, int useGPU)
{
int threadsPerBlock = 256;
int blocksPerGrid = (nnt + threadsPerBlock - 1) / threadsPerBlock;
tissueGPU1Kernel<<<blocksPerGrid, threadsPerBlock>>>(d_tisspoints,d_dtt000,d_pt000,d_qt000,nnt);
}
|
14,169 | #include <cuda.h>
#include <iostream>
#include <cstdlib>
#include <vector>
#define QNUM 19
__constant__ int NX;
__constant__ int NY;
__constant__ int NZ;
__constant__ int SIZE;
__constant__ float WW[QNUM];
__constant__ char CX[QNUM];
__constant__ char CY[QNUM];
__constant__ char CZ[QNUM];
__constant__ char OPPQ[QNUM];
static const char Cx [] = {0, 1, -1, 0, 0, 0, 0, 1, -1, -1, 1, 1, 1, -1, -1, 0, 0, 0, 0};
static const char Cy [] = {0, 0, 0, 1, -1, 0, 0, 1, 1, -1, -1, 0, 0, 0, 0, 1, -1, -1, 1};
static const char Cz [] = {0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 1, -1, -1, 1, 1, 1, -1, -1};
static const float W [] = {
12.0 / 36.0,
2.0 / 36.0, 2.0 / 36.0, 2.0 / 36.0, 2.0 / 36.0, 2.0 / 36.0, 2.0 / 36.0,
1.0 / 36.0, 1.0 / 36.0, 1.0 / 36.0, 1.0 / 36.0, 1.0 / 36.0, 1.0 / 36.0,
1.0 / 36.0, 1.0 / 36.0, 1.0 / 36.0, 1.0 / 36.0, 1.0 / 36.0, 1.0 / 36.0
};
static const char OppQ [] = {0, 2, 1, 4, 3, 6, 5, 9, 10, 7, 8, 13, 14, 11, 12, 17, 18, 15, 16};
void Init(unsigned char *raw, float *f, float *rho, float *ux, float *uy, float *uz, int nx, int ny, int nz) {
cudaMemcpyToSymbol(NX, &nx, sizeof(int));
cudaMemcpyToSymbol(NY, &ny, sizeof(int));
cudaMemcpyToSymbol(NZ, &nz, sizeof(int));
const size_t size = nx * ny * nz;
cudaMemcpyToSymbol(SIZE, &size, sizeof(size_t));
cudaMemcpyToSymbol(CX, &Cx, QNUM * sizeof(char));
cudaMemcpyToSymbol(CY, &Cy, QNUM * sizeof(char));
cudaMemcpyToSymbol(CZ, &Cz, QNUM * sizeof(char));
cudaMemcpyToSymbol(WW, &W, QNUM * sizeof(float));
cudaMemcpyToSymbol(OPPQ, &OppQ, QNUM * sizeof(char));
for (int z = 0; z < nz; ++z) {
for (int y = 0; y < ny; ++y) {
for (int x = 0; x < nx; ++x) {
size_t index = x + y * nx + z * nx * ny;
if (raw[index] == 0) {
for (size_t q = 0; q < QNUM; ++q) {
float cu = (Cx[q] * ux[index] + Cy[q] * uy[index] + Cz[q] * uz[index]);
float u2 = ux[index] * ux[index] + uy[index] * uy[index] + uz[index] * uz[index];
float feq = W[q] * rho[index] * (1 + 3.0 * cu + 4.5 * cu * cu - 1.5 * u2);
f[q * size + index] = feq;
}
}
}
}
}
}
__global__ void SRTKernel(unsigned char *raw, float *f, float *fNew, float tau = 1.0) {
size_t x = threadIdx.x + blockIdx.x * blockDim.x;
size_t y = threadIdx.y + blockIdx.y * blockDim.y;
size_t z = threadIdx.z + blockIdx.z * blockDim.z;
size_t index = x + y * NX + z * NX * NY;
float localRho = 0;
float localUx = 0;
float localUy = 0;
float localUz = 0.00001;
if (raw[index] == 0) {
for (size_t q = 0; q < QNUM; ++q) {
localRho += f[q * SIZE + index];
localUx += CX[q] * f[q * SIZE + index];
localUy += CY[q] * f[q * SIZE + index];
localUz += CZ[q] * f[q * SIZE + index];
}
for (size_t q = 0; q < QNUM; ++q) {
int newX = (x + CX[q] + NX) % NX;
int newY = (y + CY[q] + NY) % NY;
int newZ = (z + CZ[q] + NZ) % NZ;
size_t newIndex = newX + newY * NX + newZ * NX * NY;
float cu = (CX[q] * localUx + CY[q] * localUy + CZ[q] * localUz);
float u2 = localUx * localUx + localUy * localUy + localUz * localUz;
float feq = WW[q] * localRho * (1 + 3.0 * cu + 4.5 * cu * cu - 1.5 * u2);
float tmpF = f[q * SIZE + index] + (1.0 / tau) * (feq - f[q * SIZE + index]);
if (raw[newIndex] == 0) {
fNew[q * SIZE + newIndex] = tmpF;
} else {
fNew[OPPQ[q] * SIZE + index] = tmpF;
}
}
} else {
for (size_t q = 0; q < QNUM; ++q) {
fNew[q * SIZE + index] = 0;
}
}
}
int main(int /*argc*/, char** /*argv*/) {
const int nx = 128;
const int ny = 128;
const int nz = 128;
const int size = nx * ny * nz;
std::vector<float> rho, ux, uy, uz, f;
rho.resize(size);
ux.resize(size);
uy.resize(size);
uz.resize(size);
f.resize(QNUM * size);
std::vector<unsigned char> raw;
raw.resize(size);
const int R = 50;
int index = 0;
for (int z = 0; z < nz; ++z) {
for (int y = 0; y < ny; ++y) {
for (int x = 0; x < nx; ++x) {
index = x + y * nx + z * nx * ny;
ux[index] = uy[index] = uz[index] = 0;
if ((x-nx/2)*(x-nx/2) + (y-ny/2)*(y-ny/2) < R*R) {
raw[index] = 0;
rho[index] = 1.f;
} else {
raw[index] = 1;
rho[index] = 0.f;
}
}
}
}
Init(&raw[0], &f[0], &rho[0], &ux[0], &uy[0], &uz[0], nx, ny, nz);
float *d_f, *d_fNew;
cudaMalloc(&d_f, sizeof(float) * QNUM * size);
cudaMalloc(&d_fNew, sizeof(float) * QNUM * size);
cudaMemcpy(d_f, &f[0], sizeof(float) * QNUM * size, cudaMemcpyHostToDevice);
unsigned char *d_raw;
cudaMalloc(&d_raw, sizeof(unsigned char) * size);
cudaMemcpy(d_raw, &raw[0], sizeof(unsigned char) * size, cudaMemcpyHostToDevice);
dim3 threadsPerBlock(32, 4, 4);
dim3 blocksPerGrid(nx/threadsPerBlock.x, ny/threadsPerBlock.y, nz/threadsPerBlock.z);
float *fTmp = NULL;
for (int it = 0; it < 1000; ++it) {
SRTKernel<<<blocksPerGrid, threadsPerBlock>>>(d_raw, d_f, d_fNew);
fTmp = d_f;
d_f = d_fNew;
d_fNew = fTmp;
}
cudaMemcpy(&f[0], d_f, sizeof(float) * QNUM * size, cudaMemcpyDeviceToHost);
for (int x = 0; x < nx; ++x) {
int y = ny / 2;
int z = nz / 2;
float locUz = 0;
for (size_t q = 0; q < QNUM; ++q) {
locUz += Cz[q] * f[q * size + x + y * nx + z * nx * ny];
}
std::cout << locUz << std::endl;
}
cudaFree(d_f);
cudaFree(d_fNew);
cudaFree(d_raw);
return 0;
}
|
14,170 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>
#define MAX_LENGTH 1000000000
__global__ void print(char ***strs, int record_num, int col_num) {
int t_id = threadIdx.x;
int i;
if (t_id < record_num) {
printf("---t_id %d---\n", t_id);
for (i = 0; i < col_num; i ++) {
printf("\tattr %d: %s\n", i, strs[t_id][i]);
}
}
}
int main(int argc, char *argv[]) {
char ***records;
int i, j, record_num = 102400, col_num = 12;
cudaMallocManaged(&records, sizeof(char**) * record_num);
int str_len;
for (i = 0; i < record_num; i ++) {
cudaMallocManaged(&records[i], sizeof(char*) * col_num);
for (j = 0; j < col_num; j ++) {
str_len = strlen("Hello world\n");
cudaMallocManaged(&records[i][j], sizeof(char) * str_len);
strcpy(records[i][j], "Hello world\n");
}
}
cudaDeviceSynchronize();
print<<<1, 2>>>(records, record_num, col_num);
cudaDeviceReset();
return 0;
}
|
14,171 | #include <iostream>
#define N (32768)
#define THREADS_PER_BLOCK 1024
__global__ void dot( int *a, int *b, int *d ) {
// Shared memory for results of multiplication
__shared__ int temp[THREADS_PER_BLOCK];
__shared__ int sum;
int index = threadIdx.x + blockIdx.x*blockDim.x ;
temp[threadIdx.x] = a[index] * b[index];
__syncthreads();
// Thread 0 sums the pairwiseproducts
if( 0 == threadIdx.x ) {
sum = 0;
for( int i = 0; i < THREADS_PER_BLOCK ; i++ )
{
sum += temp[i];
};
atomicAdd ( d , sum );
}
}
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
int main( void ) {
int *a, *b, *d ; // host copies of a,b,c
int *dev_a, *dev_b, *dev_d; // device copies of a, b, c
int size = N *sizeof( int); // we need space for N integers
// allocate device copies of a, b, c
cudaMalloc( (void**)&dev_a, size );
cudaMalloc( (void**)&dev_b, size );
cudaMalloc( (void**)&dev_d, sizeof(int) );
a = (int*)malloc( size );
b = (int*)malloc( size );
d = (int*)malloc( sizeof(int) );
for (int i=0; i<N; i++)
{ a[i] = 1;
};
for (int i=0; i<N; i++)
{ b[i] = 1;
};
*d = 0;
// copy inputs to device
cudaMemcpy( dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy( dev_b, b, size, cudaMemcpyHostToDevice);
cudaMemcpy( dev_d, d, sizeof(int), cudaMemcpyHostToDevice);
// launch dot() kernel with N parallel blocks
dot<<< N/THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>( dev_a, dev_b, dev_d);
// copy device result back to host copy of d
cudaMemcpy( d, dev_d, sizeof(int) , cudaMemcpyDeviceToHost);
printf("a %i b %i ; d %i; \n ",a[0],b[0],*d);
free( a ); free( b ); free (d );
cudaFree( dev_a);
cudaFree( dev_b);
cudaFree( dev_d);
return 0;
}
|
14,172 | // CUDA-C includes
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
const u_int16_t THREAD_COUNT = 64;
const u_int16_t THREAD_SIZE = 8;
const u_int16_t Y_TILE_SIZE = 64;
/**
* suitable only for matrices of X % (64*8) = 0; Y % 64 = 0
*/
template <u_int16_t threads, u_int16_t cols_per_thread,
u_int16_t rows_per_thread>
__global__ void parallel_col_sum(const int *in, float *avg_row, float *avg_col,
const u_int16_t cols) {
u_int16_t tx = u_int16_t(threadIdx.x);
u_int16_t blocks_row = u_int16_t(blockIdx.y) * rows_per_thread;
__shared__ u_int16_t tile[threads * cols_per_thread];
__shared__ int tile_accum;
u_int16_t col_res_accum[threads] = {0}; // array per thread, in registers
u_int16_t col_offset = u_int16_t(blockIdx.x) * (threads * cols_per_thread);
// Iterate over ROWS
for (u_int16_t row = blocks_row; row < blocks_row + rows_per_thread; ++row) {
// copy input to shared mem by threads blocks (done in parallel in blocks
// of threads)
// for threads*cols_per_thread
#pragma unroll
for (u_int16_t i = 0; i < cols_per_thread; ++i) {
tile[i * threads + tx] =
u_int16_t(in[row * cols + col_offset + i * threads + tx]);
}
// if (tx == 0)
tile_accum = 0;
__syncthreads(); // if ( threads == 32 ) not necessary
u_int16_t row_accum_thread = 0; // per thread
#pragma unroll
for (u_int16_t i = 0; i < cols_per_thread; ++i) {
row_accum_thread += tile[i * threads + tx];
col_res_accum[i] += tile[i * threads + tx];
}
//__syncthreads(); // if ( threads == 32 ) not necessary - speeedup
atomicAdd(&tile_accum, row_accum_thread);
__syncthreads();
if (tx == 0)
atomicAdd((avg_row + row), tile_accum);
//__syncthreads();
}
// store que results
for (u_int16_t i = 0; i < cols_per_thread; ++i) {
atomicAdd((avg_col + col_offset + i * threads + tx), col_res_accum[i]);
}
}
__global__ void avg_div(float *array, const u_int16_t delimiter) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
array[i] /= float(delimiter);
}
__global__ void col_sum(const int *in, float *avg_que, const int students,
const int questions) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int tmp_que = 0;
for (int j = 0; j < students; ++j) {
tmp_que += in[questions * j + i];
}
avg_que[i] = float(tmp_que) / float(students);
}
__global__ void row_sum(const int *in, float *avg_stud, const int questions) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int tmp_stud = 0;
for (int j = 0; j < questions; ++j) {
tmp_stud += in[questions * i + j];
}
avg_stud[i] = float(tmp_stud) / float(questions);
}
void solveGPU(const int *results, float *avg_stud, float *avg_que,
const unsigned int students, const unsigned int questions) {
cudaMemset(avg_stud, 0.0, students * 4);
cudaMemset(avg_que, 0.0, questions * 4);
dim3 threadsPerBlock(THREAD_COUNT, 1);
dim3 numBlocks(questions / (THREAD_COUNT * THREAD_SIZE),
students / Y_TILE_SIZE);
if ((questions % (64 * 8) == 0) && (students % 64 == 0)) {
parallel_col_sum<THREAD_COUNT, THREAD_SIZE,
Y_TILE_SIZE><<<numBlocks, threadsPerBlock>>>(
results, avg_stud, avg_que, u_int16_t(questions));
avg_div<<<students / THREAD_COUNT, THREAD_COUNT>>>(avg_stud, u_int16_t(questions));
avg_div<<<questions / THREAD_COUNT, THREAD_COUNT>>>(avg_que, u_int16_t(students));
} else {
col_sum<<<questions / THREAD_COUNT, THREAD_COUNT>>>(results, avg_que, students, questions);
row_sum<<<students / THREAD_COUNT, THREAD_COUNT>>>(results, avg_stud, questions);
}
// if input is students << questions, switch to rowsum without caching
}
|
14,173 | #ifndef UTILS
#define UTILS
#include "utils.cuh"
#endif
// One thread per matrix row
__global__ void mmult_kernel(// First row of file
int rows, int columns, int num_of_non_zero_entries,
int num_repetitions,
// Return variables
int* row_ptr_array_d, int* col_ind_array_d,
double* values_array_d, double* x_array_d,
double* x_array_d_old)
{
// printf("Thread on GPU: BlockDim.x:%d blockIdx.x:%d threadIdx.x:%d\n"
// , blockDim.x, blockIdx.x, threadIdx.x);
int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < rows)
{
double tmp_product = 0;
int row_start = row_ptr_array_d[row];
int row_end = row_ptr_array_d[row + 1];
// Iterate over the sparse row
for (int j = row_start; j < row_end; j++)
tmp_product += values_array_d[j] * x_array_d_old[col_ind_array_d[j]];
x_array_d[row] = tmp_product;
}
}
|
14,174 | // Author: Sudnya Padalikar
// Date: 01/18/2014
// Brief: vector addition kernel in cuda
#include <stdio.h>
// Kernel that executes on the CUDA device
__global__ void vector_add(float *c, float *a, float *b, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
c[idx] = a[idx] + b[idx];
}
}
int main()
{
float *a_h;
float *a_d; // Pointer to host & device arrays
float *b_h;
float *b_d;
float *c_h;
float *c_d;
const int N = 10; // Number of elements in arrays
size_t bytes = N * sizeof(float);
a_h = (float *)malloc(bytes); // Allocate array on host
cudaMalloc((void **) &a_d, bytes); // Allocate array on device
b_h = (float *)malloc(bytes); // Allocate array on host
cudaMalloc((void **) &b_d, bytes); // Allocate array on device
c_h = (float *)malloc(bytes); // Allocate array on host
cudaMalloc((void **) &c_d, bytes); // Allocate array on device
// Initialize host array and copy it to CUDA device
for (int i = 0; i < N; i++) {
a_h[i] = (float)i;
b_h[i] = (float)i/10;
}
cudaMemcpy(a_d, a_h, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b_h, bytes, cudaMemcpyHostToDevice);
// Do calculation on device:
int block_size = 256;
int n_blocks = N-1/block_size + 1;
vector_add <<< n_blocks, block_size >>> (c_d, a_d, b_d, N);
// Retrieve result from device and store it in host array
cudaMemcpy(c_h, c_d, sizeof(float)*N, cudaMemcpyDeviceToHost);
// Print results
for (int i=0; i<N; i++) {
printf("%d %f\n", i, c_h[i]);
}
// Cleanup
free(a_h);
cudaFree(a_d);
free(b_h);
cudaFree(b_d);
free(c_h);
cudaFree(c_d);
}
|
14,175 | #include "../headers/kernels.cuh"
__global__ void findNeigboursXyz(
const float4* const sites,
int4* const neighbours,
float3 base1,
float3 base2,
float3 base3,
int3 dimensions,
float radius,
int offset,
int beginFrom,
int size)
{
const int id = blockDim.x * blockIdx.x + threadIdx.x + beginFrom;
// TODO pass square of radius as input parameter
const float squareRadius = radius * radius;
// TODO store as float3 structures
extern __shared__ float4 shared_sites[];
// load site data assigned for this thread
float4 currentSite = sites[id];
int neighbourCounter = 0;
for (int i = 0, tile = 0; i < size; i += blockDim.x, ++tile)
{
if(tile * blockDim.x + threadIdx.x < size)
{
float4 site = sites[tile * blockDim.x + threadIdx.x];
shared_sites[threadIdx.x] = site;
}
else
{
shared_sites[threadIdx.x] = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
}
__syncthreads();
for (int isite = 0; isite < blockDim.x; ++isite)
{
if ((tile * blockDim.x + isite != id) && (tile * blockDim.x + isite < size))
{
float4 site = shared_sites[isite];
float xp = site.x - currentSite.x;
float yp = site.y - currentSite.y;
float zp = site.z - currentSite.z;
#pragma unroll
for (int k = -1; k <= 1; ++k)
{
#pragma unroll
for (int l = -1; l <= 1; ++l)
{
#pragma unroll
for (int m = -1; m <= 1; ++m)
{
float lx = k * dimensions.x * base1.x + l * dimensions.y * base2.x + m * dimensions.z * base3.x;
float ly = k * dimensions.x * base1.y + l * dimensions.y * base2.y + m * dimensions.z * base3.y;
float lz = k * dimensions.x * base1.z + l * dimensions.y * base2.z + m * dimensions.z * base3.z;
float squareDistance = (xp + lx) * (xp + lx) + (yp + ly) * (yp + ly) + (zp + lz) * (zp + lz);
// TODO use fabs
if ((neighbourCounter < offset) && (squareDistance < squareRadius))
{
neighbours[id * offset + neighbourCounter] = make_int4(-k, -l, -m, tile * blockDim.x + isite);
++neighbourCounter;
}
}
}
}
}
}
__syncthreads();
}
}
/*
* set float4 fields
*/
__global__ void setFloat4x(int index,float value,float4* input, int size) {
int thId = blockIdx.x * blockDim.x + threadIdx.x;
if(thId < size && index == thId)
input[thId].x = value;
}
__global__ void setFloat4y(int index,float value,float4* input, int size) {
int thId = blockIdx.x * blockDim.x + threadIdx.x;
if(thId < size && index == thId)
input[thId].y = value;
}
__global__ void setFloat4z(int index,float value,float4* input, int size) {
int thId = blockIdx.x * blockDim.x + threadIdx.x;
if(thId < size && index == thId)
input[thId].z = value;
}
__global__ void setFloat4w(int index,float value,float4* input, int size) {
int thId = blockIdx.x * blockDim.x + threadIdx.x;
if(thId < size && index == thId)
input[thId].w = value;
}
/*
* Exchange values in Float4 fields
*/
__global__ void exchangeFloat4x(int index1, int index2,float4* input, int size) {
int thId = blockIdx.x * blockDim.x + threadIdx.x;
if(thId < size && thId == index1) {
float temp = input[thId].x;
input[thId].x = input[index2].x;
input[index2].x = temp;
}
}
__global__ void exchangeFloat4y(int index1, int index2,float4* input, int size) {
int thId = blockIdx.x * blockDim.x + threadIdx.x;
if(thId < size && thId == index1) {
float temp = input[thId].y;
input[thId].y = input[index2].y;
input[index2].y = temp;
}
}
__global__ void exchangeFloat4z(int index1, int index2,float4* input, int size) {
int thId = blockIdx.x * blockDim.x + threadIdx.x;
if(thId < size && thId == index1) {
float temp = input[thId].z;
input[thId].z = input[index2].z;
input[index2].z = temp;
}
}
__global__ void exchangeFloat4w(int index1, int index2,float4* input, int size) {
int thId = blockIdx.x * blockDim.x + threadIdx.x;
if(thId < size && thId == index1) {
float temp = input[thId].w;
input[thId].w = input[index2].w;
input[index2].w = temp;
}
}
/* To jest wersja działająca - nie jest w inej używana pamięć shared*/
#ifdef DEBUG
__global__ void findNeigboursXyzGlobal(const float4 * const sites,
int4 * neigbours, float3 base1, float3 base2, float3 base3,
int3 dimensions, float radius, int offset, int beginFrom, int size) {
int id = blockDim.x * blockIdx.x + threadIdx.x + beginFrom;
if (id < size) {
float x = sites[id].x;
float y = sites[id].y;
float z = sites[id].z;
float lx;
float ly;
float lz;
int nCntr = 0;
for (int i = beginFrom; i < size; ++i) {
if (i != id) {
float xp = sites[i].x - x;
float yp = sites[i].y - y;
float zp = sites[i].z - z;
#pragma unroll
for (int k = -1; k <= 1; ++k) {
#pragma unroll
for (int l = -1; l <= 1; ++l) {
#pragma unroll
for (int m = -1; m <= 1; ++m) {
lx = k * dimensions.x * base1.x
+ l * dimensions.y * base2.x
+ m * dimensions.z * base3.x;
ly = k * dimensions.x * base1.y
+ l * dimensions.y * base2.y
+ m * dimensions.z * base3.y;
lz = k * dimensions.x * base1.z
+ l * dimensions.y * base2.z
+ m * dimensions.z * base3.z;
float distance = sqrt(
(xp + lx) * (xp + lx)
+ (yp + ly) * (yp + ly)
+ (zp + lz) * (zp + lz));
if (distance < radius && nCntr < offset) {
neigbours[id * offset + nCntr].x = -k;
neigbours[id * offset + nCntr].y = -l;
neigbours[id * offset + nCntr].z = -m;
neigbours[id * offset + nCntr].w = i;
nCntr++;
}
}
}
}
}
}
}
}
#endif
|
14,176 | #include <stdint.h>
#include <stdio.h>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) { \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
struct GpuTimer {
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer() {
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer() {
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start() {
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
}
void Stop() { cudaEventRecord(stop, 0); }
float Elapsed() {
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
void printArray(const uint32_t *a, int n) {
for (int i = 0; i < n; i++)
printf("%2i ", a[i]);
printf("\n");
}
void sortByThrust(const uint32_t *in, int n, uint32_t *out, int nBits) {
thrust::device_vector<uint32_t> dv_out(in, in + n);
thrust::sort(dv_out.begin(), dv_out.end());
thrust::copy(dv_out.begin(), dv_out.end(), out);
}
// Sequential radix sort (paper)
// scan in counting sort Assume: nBits (k in slides) in {1, 2, 4, 8, 16} Why
// "int * blockSizes"? Because we may want different block sizes for diffrent
// kernels:
// blockSizes[0] for the histogram kernel
// blockSizes[1] for the scan kernel
void sortByHost(const uint32_t *in, int n, uint32_t *out, int nBits,
int *blockSizes) {
// TODO
const int nBins = 1 << nBits;
const dim3 histBlockSize = dim3(blockSizes[0]);
const int histBlockCount = (n - 1) / histBlockSize.x + 1;
const dim3 histGridSize = dim3(histBlockCount);
// Not use
// const dim3 scanBlockSize = dim3(blockSizes[1]);
// const int scanBlockCount = (nBins * histBlockCount - 1) / scanBlockSize.x +
// 1; const dim3 scanGridSize = dim3(scanBlockCount);
uint32_t *hist =
(uint32_t *)malloc(nBins * histGridSize.x * sizeof(uint32_t));
uint32_t *histScan =
(uint32_t *)malloc(nBins * histGridSize.x * sizeof(uint32_t));
uint32_t *src = (uint32_t *)malloc(n * sizeof(uint32_t));
memcpy(src, in, n * sizeof(uint32_t));
uint32_t *originalSrc = src; // Use originalSrc to free memory later
uint32_t *dst = out;
/* GpuTimer timer; */
for (int bit = 0; bit < 8 * sizeof(uint32_t); bit += nBits) {
/* printf("#%d (iteration):\n", bit / nBits + 1); */
// Step 1: Calculate local histogram of each block
/* printf(" + Step 1. Local histogram. "); */
/* timer.Start(); */
memset(hist, 0, nBins * histGridSize.x * sizeof(uint32_t));
for (int blockIndex = 0; blockIndex < histGridSize.x; blockIndex++) {
for (int threadIndex = 0; threadIndex < histBlockSize.x; threadIndex++) {
int index_data = blockIndex * histBlockSize.x + threadIndex;
if (index_data < n) {
int bin = (src[index_data] >> bit) & (nBins - 1);
hist[blockIndex * nBins + bin]++;
}
}
}
/* timer.Stop(); */
/* printf("Time: %.3f ms\n", timer.Elapsed()); */
// Step 2: Scan (exclusive) "hist"
/* printf(" + Step 2. Exclusive scan. "); */
/* timer.Start(); */
int pre = 0;
for (int bin = 0; bin < nBins; bin++) {
for (int blockIndex = 0; blockIndex < histGridSize.x; blockIndex++) {
histScan[blockIndex * nBins + bin] = pre;
pre = pre + hist[blockIndex * nBins + bin];
}
}
/* timer.Stop(); */
/* printf("Time: %.3f ms\n", timer.Elapsed()); */
// Step 3: Scatter
/* printf(" + Step 3. Scatter. "); */
/* timer.Start(); */
for (int blockIndex = 0; blockIndex < histGridSize.x; blockIndex++) {
for (int threadIndex = 0; threadIndex < histBlockSize.x; threadIndex++) {
int index_data = blockIndex * histBlockSize.x + threadIndex;
if (index_data < n) {
int bin =
blockIndex * nBins + ((src[index_data] >> bit) & (nBins - 1));
dst[histScan[bin]] = src[index_data];
histScan[bin]++;
}
}
}
/* timer.Stop(); */
/* printf("Time: %.3f ms\n", timer.Elapsed()); */
// Swap "src" and "dst"
uint32_t *tmp = src;
src = dst;
dst = tmp;
}
// Copy result to "out"
memcpy(out, src, n * sizeof(uint32_t));
// Free memories
free(hist);
free(histScan);
free(originalSrc);
}
// Radix sort
void sort(const uint32_t *in, int n, uint32_t *out, int nBits,
bool useThrust = false, int *blockSizes = NULL) {
GpuTimer timer;
timer.Start();
if (useThrust == false) {
printf("\nRadix sort by thrust\n");
sortByThrust(in, n, out, nBits);
} else // use device
{
printf("\nRadix sort by host (#paper: sequential radix sort)\n");
sortByHost(in, n, out, nBits, blockSizes);
}
timer.Stop();
printf("Time: %.3f ms\n", timer.Elapsed());
}
void printDeviceInfo() {
cudaDeviceProp devProv;
CHECK(cudaGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n",
devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %zu byte\n", devProv.totalGlobalMem);
printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor);
printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock);
printf("****************************\n");
}
void checkCorrectness(uint32_t *out, uint32_t *correctOut, int n) {
for (int i = 0; i < n; i++) {
if (out[i] != correctOut[i]) {
printf("INCORRECT :(\n");
return;
}
}
printf("CORRECT :)\n");
}
int main(int argc, char **argv) {
// PRINT OUT DEVICE INFO
printDeviceInfo();
// SET UP INPUT SIZE
int n = (1 << 24) + 1;
// int n = 10;
/* n = 17; */
printf("\nInput size: %d\n", n);
// ALLOCATE MEMORIES
size_t bytes = n * sizeof(uint32_t);
uint32_t *in = (uint32_t *)malloc(bytes);
uint32_t *out = (uint32_t *)malloc(bytes); // Device result
uint32_t *correctOut = (uint32_t *)malloc(bytes); // Thrust result
// SET UP INPUT DATA
for (int i = 0; i < n; i++)
in[i] = rand();
/* in[i] = rand() % 16; */
// SET UP NBITS
int nBits = 4; // Default
if (argc > 1)
nBits = atoi(argv[1]);
printf("\nNum bits per digit: %d\n", nBits);
// DETERMINE BLOCK SIZES
int blockSizes[2] = {512, 512}; // One for histogram, one for scan
if (argc == 4) {
blockSizes[0] = atoi(argv[2]);
blockSizes[1] = atoi(argv[3]);
}
printf("\nHist block size: %d, scan block size: %d\n", blockSizes[0],
blockSizes[1]);
// SORT BY THRUST
sort(in, n, correctOut, nBits);
// SORT BY DEVICE
sort(in, n, out, nBits, true, blockSizes);
checkCorrectness(out, correctOut, n);
// FREE MEMORIES
free(in);
free(out);
free(correctOut);
return EXIT_SUCCESS;
}
|
14,177 | #include "includes.h"
__global__ void reduceCompleteUnrollWarp8(int *g_idata, int *g_odata, unsigned int n){
// thread id
int idx = threadIdx.x + blockIdx.x * blockDim.x * 8;
// data pointer of this block(s)
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling blocks
if (idx + 7 * blockDim.x < n) {
int el0 = g_idata[idx];
int el1 = g_idata[idx + blockDim.x];
int el2 = g_idata[idx + 2*blockDim.x];
int el3 = g_idata[idx + 3*blockDim.x];
int el4 = g_idata[idx + 4*blockDim.x];
int el5 = g_idata[idx + 5*blockDim.x];
int el6 = g_idata[idx + 6*blockDim.x];
int el7 = g_idata[idx + 7*blockDim.x];
g_idata[idx] = el0+el1+el2+el3+el4+el5+el6+el7;
}
__syncthreads();
// unrolling in blocks
// 这种优化需要保证blockDim.x为2的k次幂,且最大为1024
if (blockDim.x >= 1024 && threadIdx.x < 512) idata[threadIdx.x] += idata[threadIdx.x + 512];
__syncthreads();
if (blockDim.x >= 512 && threadIdx.x < 256) idata[threadIdx.x] += idata[threadIdx.x + 256];
__syncthreads();
if (blockDim.x >= 256 && threadIdx.x < 128) idata[threadIdx.x] += idata[threadIdx.x + 128];
__syncthreads();
if (blockDim.x >= 128 && threadIdx.x < 64) idata[threadIdx.x] += idata[threadIdx.x + 64];
__syncthreads();
// unrolling sync in thread cluster(stride less than 32)
if (threadIdx.x < 32){
volatile int *vmem = idata;
vmem[threadIdx.x] += vmem[threadIdx.x + 32];
vmem[threadIdx.x] += vmem[threadIdx.x + 16];
vmem[threadIdx.x] += vmem[threadIdx.x + 8];
vmem[threadIdx.x] += vmem[threadIdx.x + 4];
vmem[threadIdx.x] += vmem[threadIdx.x + 2];
vmem[threadIdx.x] += vmem[threadIdx.x + 1];
}
if (threadIdx.x == 0){
g_odata[blockIdx.x] = idata[0];
}
} |
14,178 | #include <stdio.h>
#include <stdlib.h>
__global__ void add(int *a, int *b, int *c) {
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
int main(void) {
int *d_a, *d_b, *d_c;
size_t size = 2*sizeof(int);
int a[2] = {1,2};
int b[2] = {1,2};
int c[2] = {0,0};
int test;
cudaGetDeviceCount(&test);
cudaMalloc(&d_a, size);
cudaMalloc(&d_b, size);
cudaMalloc(&d_c, size);
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
printf("c[0]%d\n",c[0]);
printf("c[1]%d\n",c[1]);
add<<<1,2>>>(d_a, d_b, d_c);
a[1] = 6;
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
cudaMemcpy(&b, d_b, size, cudaMemcpyDeviceToHost);
cudaMemcpy(&a, d_a, size, cudaMemcpyDeviceToHost);
printf("c[0]%d\n",c[0]);
printf("c[1]%d\n",c[1]);
printf("%p\n", &a);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
14,179 | #define N 10000000
__global__ void vector_add(float *out, float *a, float *b, int n) {
for(int i = 0; i < n; i++){
out[i] = a[i] + b[i];
}
}
int main(){
float *a, *b, *out;
float *d_a;
a = (float*)malloc(sizeof(float) * N);
// Allocate device memory for a
cudaMalloc((void**)&d_a, sizeof(float) * N);
// Transfer data from host to device memory
cudaMemcpy(d_a, a, sizeof(float) * N, cudaMemcpyHostToDevice);
vector_add<<<1,1>>>(out, d_a, b, N);
// Cleanup after kernel execution
cudaFree(d_a);
free(a);
return 0;
}
|
14,180 | //pass
//--gridDim=4096 --blockDim=512
#include "common_merge.h"
template<uint sortDir> __global__ void mergeSortSharedKernel(uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint arrayLength);
template __global__ void mergeSortSharedKernel<1>(uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint arrayLength);
template<uint sortDir> __global__ void mergeSortSharedKernel(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal,
uint arrayLength
)
{
__shared__ uint s_key[SHARED_SIZE_LIMIT];
__shared__ uint s_val[SHARED_SIZE_LIMIT];
d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
s_key[threadIdx.x + 0] = d_SrcKey[ 0];
s_val[threadIdx.x + 0] = d_SrcVal[ 0];
s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)];
s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)];
for (uint stride = 1; stride < arrayLength; stride <<= 1)
{
uint lPos = threadIdx.x & (stride - 1);
uint *baseKey = s_key + 2 * (threadIdx.x - lPos);
uint *baseVal = s_val + 2 * (threadIdx.x - lPos);
__syncthreads();
uint keyA = baseKey[lPos + 0];
uint valA = baseVal[lPos + 0];
uint keyB = baseKey[lPos + stride];
uint valB = baseVal[lPos + stride];
uint posA = binarySearchExclusive<sortDir>(keyA, baseKey + stride, stride, stride) + lPos;
uint posB = binarySearchInclusive<sortDir>(keyB, baseKey + 0, stride, stride) + lPos;
__syncthreads();
baseKey[posA] = keyA;
baseVal[posA] = valA;
baseKey[posB] = keyB;
baseVal[posB] = valB;
}
__syncthreads();
d_DstKey[ 0] = s_key[threadIdx.x + 0];
d_DstVal[ 0] = s_val[threadIdx.x + 0];
d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}
|
14,181 | #include "includes.h"
__global__ void computeVertices_kernel(float4* pos, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int y = blockIdx.x * blockDim.x + threadIdx.x;
// calculate uv coordinates
float u = x / (float) width;
float v = y / (float) height;
u = u*2.0 - 1.0f;
v = v*2.0 - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = sin(u*freq + time) * cos(v*freq + time) * 0.5f;
// write output vertex
pos[y*width+x] = make_float4(u, w, v, 1.0f);
} |
14,182 | #include "includes.h"
__global__ void GetScaleFactorsKernel(float *indata, float *base, float *stdev, float *factors, int nchans, int processed) {
// NOTE: Filterbank file format coming in
//float mean = indata[threadIdx.x];
float mean = 0.0f;
// NOTE: Depending whether I save STD or VAR at the end of every run
// float estd = stdev[threadIdx.x];
float estd = stdev[threadIdx.x] * stdev[threadIdx.x] * (processed - 1.0f);
float oldmean = base[threadIdx.x];
//float estd = 0.0f;
//float oldmean = 0.0;
float val = 0.0f;
float diff = 0.0;
for (int isamp = 0; isamp < 2 * NACCUMULATE; ++isamp) {
val = indata[isamp * nchans + threadIdx.x];
diff = val - oldmean;
mean = oldmean + diff * factors[processed + isamp + 1];
estd += diff * (val - mean);
oldmean = mean;
}
base[threadIdx.x] = mean;
stdev[threadIdx.x] = sqrtf(estd / (float)(processed + 2 * NACCUMULATE - 1.0f));
// stdev[threadIdx.x] = estd;
} |
14,183 | #include <stdio.h>
__global__ void mykernel(void){
}
int main(void){
int deviceCount;
mykernel<<<1,1>>>();
cudaGetDeviceCount(&deviceCount);
printf("Hello World! Total Device: %d\n", deviceCount);
return 0;
}
|
14,184 | #include <cuda.h>
#include <stdio.h>
#include <math.h>
void readFile(char* fname, int* N, int* iter, float** A, float** b)
{
FILE *fp;
char buf[100];
int i, j;
fp = fopen(fname, "r");
if(!fp)
{
*N = 0;
*iter = 0;
printf("Stale File Handle\n");
return;
}
if(fscanf(fp, "%s", buf) > 0) *N = atoi(buf);
if(fscanf(fp, "%s", buf) > 0) *iter = atoi(buf);
printf("N = %d\nIterations = %d\n", *N, *iter);
*b = (float*) malloc(*N*sizeof(float));
*A = (float*) malloc((*N)*(*N)*sizeof(float));
for(i = 0; i < *N; i++)
{
for(j = 0; j < *N; j++)
{
fscanf(fp, "%s", buf);
(*A)[ ((*N*i)+j) ] = (float)atoi(buf);
}
}
fscanf(fp, "%s", buf); // Ignore the "solution" in the text
for(i = 0; i < *N; i++)
{
fscanf(fp, "%s", buf);
(*b)[i] = (float)atoi(buf);
}
fclose(fp);
}
void jacobi(float* A, float* b, int N, int iter, float** x)
{
int i, j, k;
float t;
float *y;
*x = (float*) malloc(N*sizeof(float));
y = (float*) malloc(N*sizeof(float));
for(i = 0; i < N; i++)
{
(*x)[i] = 0.0; // Initial Guess
}
for(k = 0; k < iter; k++)
{
for(i = 0; i < N; i++)
{
t = 0;
for(j = 0; j < N; j++)
{
if(i != j)
{
t += ((A[ ((N*i)+j) ]) * ((*x)[j]));
}
}
y[i] = ((b[i]) - t)/(A[ ((N*i)+i) ]);
//printf("k %02d i %02d t %f y %f\n", k, i, t, y[i]);
}
for(i = 0; i < N; i++)
{
(*x)[i] = y[i];
}
}
free(y);
}
__global__
void iloop(float* A, float* b, int N, float* x, float* y, int offset, int streamsize)
{
float t;
int index = offset + (blockIdx.x * blockDim.x) + threadIdx.x;
int stride = (gridDim.x * blockDim.x);
//printf("<<< %d, %d, %d >>>\n", index, (offset+streamsize), stride);
for(int i = index; i < (offset + streamsize); i += stride)
{
t = 0.0;
for(int j = 0; j < N; j++)
{
if(i != j)
{
t = t + (( A[ ((N*i)+j) ] ) * x[j]);
}
}
y[i] = ((b[i] - t)/(A[ ((N*i)+i) ]));
}
}
__global__
void add(float* a, float* b, int N)
{
for(int i = 0; i < N; i++)
{
b[i] = a[i] + b[i];
}
}
int main(int argc, char* argv[])
{
float time = 0.0;
float maxError = 0.0;
float* d_A;
float* d_b;
float* d_x;
float* d_y;
int k;
int blocksize;
int numblocks;
int nstreams;
int streamsize;
int devID;
float* A;
float* b;
float* x;
float* y;
float* c;
char* fname;
int N, iter, i, j, M;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
if(argc >= 2) fname = argv[1];
else fname = "../inputs/8.txt";
if(argc >= 3) M = atoi(argv[2]);
else M = 32;
//if(argc >= 3) devID = atoi(argv[2]);
//else devID = 1;
//cudaDeviceProp prop;
//if( cudaGetDeviceProperties(&prop, devID) != cudaSuccess) printf("CUDA : Error with Device Properties\n");
//else printf("Device : %d\n", prop.id);
//if( cudaSetDevice(devID) != cudaSuccess ) printf("CUDA : Error with Device ID\n");
readFile(fname, &N, &iter, &A, &b);
printf("CUDA : Parsed file %s\n", fname);
x = (float*) malloc(N*sizeof(float));
y = (float*) malloc(N*sizeof(float));
memset(x, 0.0, N*sizeof(float));
memset(y, 0.0, N*sizeof(float));
//for(i = 0; i < N; i++)
//{
// x[i] = 0.0;
// y[i] = 0.0;
//}
if( cudaMalloc(&d_A, N*N*sizeof(float)) != cudaSuccess ) printf("CUDA : memory allocation error!\n");
if( cudaMalloc(&d_b, N*sizeof(float)) != cudaSuccess ) printf("CUDA : memory allocation error!\n");
if( cudaMalloc(&d_x, N*sizeof(float)) != cudaSuccess ) printf("CUDA : memory allocation error!\n");
if( cudaMalloc(&d_y, N*sizeof(float)) != cudaSuccess ) printf("CUDA : memory allocation error!\n");
if( cudaMemcpy(d_A, A, N*N*sizeof(float), cudaMemcpyHostToDevice) != cudaSuccess ) printf("CUDA : memory copy error!\n");
if( cudaMemcpy(d_b, b, N*sizeof(float) , cudaMemcpyHostToDevice) != cudaSuccess ) printf("CUDA : memory copy error!\n");
if( cudaMemcpy(d_y, y, N*sizeof(float) , cudaMemcpyHostToDevice) != cudaSuccess ) printf("CUDA : memory copy error!\n");
nstreams = 2;
streamsize = N/nstreams;
blocksize = (int) fmin(M, streamsize);
numblocks = streamsize/blocksize;
//numblocks = (N+blocksize-1)/blocksize;
printf("CUDA : Streams %d, Grid Size %d, Block size %d\n", nstreams, numblocks, blocksize);
cudaStream_t streams[nstreams];
for(i = 0; i < nstreams; i++)
{
if( cudaStreamCreate(&streams[i]) != cudaSuccess ) printf("CUDA : Error with Stream Creation\n");
}
cudaEventRecord(start);
//if( cudaMemcpy(d_x, x, N*sizeof(float) , cudaMemcpyHostToDevice) != cudaSuccess ) printf("CUDA : memory copy error!\n");
for(k = 0; k < iter; k++)
{
for(i = 0; i < nstreams; i++)
{
int offset = i*streamsize;
if( cudaMemcpyAsync(&d_x[offset], &d_y[offset], streamsize*sizeof(float), cudaMemcpyDeviceToDevice, streams[i])!= cudaSuccess ) printf("CUDA : memory copy error!\n");
//}
//for(i = 0; i < nstreams; i++)
//{
// int offset = i*streamsize;
iloop<<< numblocks, blocksize, 0, streams[i] >>>(d_A, d_b, N, d_x, d_y, offset, streamsize); // kernel launch on GPU
}
}
if( cudaMemcpy(x, d_y, N*sizeof(float) , cudaMemcpyDeviceToHost) != cudaSuccess ) printf("CUDA : memory copy error!\n");
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("CUDA : Done Computing Jacobi on GPU\n");
c = (float*) malloc(N*sizeof(float));
for(i = 0; i < N; i++)
{
c[i] = 0;
for(j = 0; j < N; j++)
{
c[i] += A[ ((N*i)+j) ] * x[j];
}
//printf("%0.2f\n", c[i]);
maxError = fmax(maxError, fabs(c[i] - b[i]));
}
printf("\nCUDA : Time %f ms\n", time);
printf("CUDA : MaxError = %f\n\n\n", maxError);
for(i = 0; i < nstreams; i++)
{
if( cudaStreamDestroy(streams[i]) != cudaSuccess ) printf("CUDA : Error with Stream Deletion\n");
}
cudaFree(d_A);
cudaFree(d_b);
cudaFree(d_x);
cudaFree(d_y);
free(A);
free(b);
free(x);
free(y);
free(c);
return 0;
}
|
14,185 |
__global__ void cuda_Euler(const float * __restrict__ real, float *imag, float *output, const float ANGLE, const int LENGTH)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x,
offset = gridDim.x * blockDim.x;
while(tid < LENGTH) {
output[tid] = real[tid] * cosf(ANGLE) + imag[tid] * sinf(ANGLE);
tid += offset;
}
}
|
14,186 | /*#include <boost/random.hpp>
#include <cstdlib>
#include <cstdio>
#include <vector>
#include <chrono>
#include <algorithm>
#include <numeric>
#define N 5000
using namespace std ; */
#include <stdio.h>
int main() {
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
printf("No device :(") ;
}
/*
int main (int argc, char *argv[])
{
boost::random::mt19937 rng;
boost::random::uniform_01<boost::mt19937> rand(rng) ;
auto start = std::chrono::high_resolution_clock::now();
auto elapsed = std::chrono::high_resolution_clock::now()-start;
vector <vector <double> > particles (N, vector<double>(3,0)) ;
for (auto & v : particles)
for (auto & w: v)
w = rand() ;
double Dsqr=0.05 ;
start = std::chrono::high_resolution_clock::now();
double sum ;
int count=0 ;
for (int i=0 ; i<N ; i++)
for (int j=i+1 ; j<N ; j++)
{
sum=0 ;
for (int k=0 ; k<3 ; k++)
sum += (particles[i][k]-particles[j][k])*(particles[i][k]-particles[j][k]) ;
if (sum < Dsqr)
count ++ ;
}
elapsed = std::chrono::high_resolution_clock::now()-start;
auto duration= std::chrono::duration_cast<std::chrono::microseconds>(elapsed).count() ;
printf("%g %d\n", duration/1000000., count) ;
}
*/
|
14,187 | /***************************************************************************
*
* Authors: David Strelak (davidstrelak@gmail.com)
*
* Unidad de Bioinformatica of Centro Nacional de Biotecnologia , CSIC
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
* 02111-1307 USA
*
* All comments concerning this program package may be sent to the
* e-mail address 'xmipp@cnb.csic.es'
***************************************************************************/
#ifndef CUDA_GPU_MUTLIDIM_ARRAY_CU
#define CUDA_GPU_MUTLIDIM_ARRAY_CU
#define LOOKUP_TABLE_LEN 6
template<typename T>
__device__
T interpolatedElementBSpline2D_Degree3(T x, T y, int xdim, int ydim, T* data)
{
bool firstTime=true; // Inner loop first time execution flag.
T *ref;
int l1 = (int)ceil(x - 2);
int l2 = l1 + 3;
int m1 = (int)ceil(y - 2);
int m2 = m1 + 3;
T columns = 0.0;
T aux;
int equivalent_l_Array[LOOKUP_TABLE_LEN];
T aux_Array[LOOKUP_TABLE_LEN];
for (int m = m1; m <= m2; m++)
{
int equivalent_m=m;
if (m<0)
equivalent_m=-m-1;
else if (m>=ydim)
equivalent_m=2*ydim-m-1;
T rows = 0.0;
int index=0;
ref = data + (equivalent_m*xdim);
for (int l = l1; l <= l2; l++)
{
int equivalent_l;
// Check if it is first time executing inner loop.
if (firstTime)
{
T xminusl = x - (T) l;
equivalent_l=l;
if (l<0)
{
equivalent_l=-l-1;
}
else if (l>=xdim)
{
equivalent_l=2*xdim-l-1;
}
equivalent_l_Array[index] = equivalent_l;
aux = bspline03(xminusl);
aux_Array[index] = aux;
index++;
}
else
{
equivalent_l = equivalent_l_Array[index];
aux = aux_Array[index];
index++;
}
T Coeff = ref[equivalent_l];
rows += Coeff * aux;
}
// Set first time inner flag is executed to false.
firstTime = false;
T yminusm = y - (T) m;
aux = bspline03(yminusm);
columns += rows * aux;
}
return columns;
}
#undef LOOKUP_TABLE_LEN
#endif //* CUDA_GPU_MUTLIDIM_ARRAY_CU */
|
14,188 | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <assert.h>
#include <vector>
using namespace std;
const int INF = 10000000;
const int V = 10010;
const int MAX_THREAD_DIM2 = 32;
void input(char *inFileName, int B);
void output(char *outFileName);
void block_FW_2GPU(int B);
int ceil(int a, int b);
void calAsync(int gpuId, int B, int Round, int bi0, int bi1, int bj0, int bj1, int half = 0);
int realn;
int n, m; // Number of vertices, edges
int* Dist; // n * n, on host
int* dDist[2]; // n * n, on device
int streamSize[2];
vector<cudaStream_t> streams[2];
int getGPUId ()
{
int gpuId;
cudaGetDevice(&gpuId);
return gpuId;
}
cudaStream_t getIdleStream (int gpuId)
{
cudaSetDevice(gpuId);
if(streams[gpuId].size() == streamSize[gpuId])
{
cudaStream_t stm;
cudaStreamCreate(&stm);
streams[gpuId].push_back(stm);
streamSize[gpuId]++;
return stm;
}
else
return streams[gpuId][streamSize[gpuId]++];
}
void syncAllStreams ()
{
cudaSetDevice(0);
cudaThreadSynchronize();
cudaSetDevice(1);
cudaThreadSynchronize();
streamSize[0] = 0;
streamSize[1] = 0;
}
cudaEvent_t appendEvent (cudaStream_t stm)
{
cudaEvent_t end;
cudaEventCreate(&end);
cudaEventRecord(end, stm);
return end;
}
cudaStream_t newBranchStream (int gpuId, cudaStream_t stm)
{
// cudaSetDevice(gpuId);
cudaStream_t stm1 = getIdleStream(gpuId);
cudaStreamWaitEvent(stm1, appendEvent(stm), 0);
return stm1;
}
const bool H2D = true;
const bool D2H = false;
cudaStream_t blockCopyAsync (int gpuId, bool h2d, int B, int bi0, int bi1, int bj0, int bj1, int half = 0)
{
// cudaSetDevice(gpuId);
cudaStream_t stream = getIdleStream(gpuId);
int *dst = dDist[gpuId];
int *src = Dist;
if(!h2d) swap(dst, src);
cudaMemcpyKind kind = h2d? cudaMemcpyHostToDevice: cudaMemcpyDeviceToHost;
for(int i = bi0 * B; i < bi1 * B; ++i)
{
int bi = i / B;
int offset = i * n + bj0 * B;
int size = (bj1 - bj0) * B * sizeof(int);
if(half == 1)
offset = i * n + max(bi, bj0) * B,
size = (bj1 - max(bi, bj0)) * B * sizeof(int);
else if(half == 2)
size = (min(bi, bj1) - bj0) * B * sizeof(int);
cudaMemcpyAsync(dst + offset, src + offset, size, kind, stream);
}
return stream;
}
int main(int argc, char* argv[])
{
int B = atoi(argv[3]);
input(argv[1], B);
// if(B > n)
// {
// B = n;
// cerr << "Warning: B > n. Set B = n.";
// }
block_FW_2GPU(B);
output(argv[2]);
return 0;
}
void input(char *inFileName, int B)
{
FILE *infile = fopen(inFileName, "r");
fscanf(infile, "%d %d", &realn, &m);
n = ceil(realn, B) * B;
cudaMallocHost(&Dist, n * n * sizeof(int));
for (int i = 0, k = 0; i < n; ++i) {
for (int j = 0; j < n; ++j, ++k) {
if (i == j) Dist[k] = 0;
else Dist[k] = INF;
}
}
while (--m >= 0) {
int a, b, v;
fscanf(infile, "%d %d %d", &a, &b, &v);
--a, --b;
Dist[a * n + b] = v;
}
}
void output(char *outFileName)
{
FILE *outfile = fopen(outFileName, "w");
for (int i = 0; i < realn; ++i) {
for (int j = 0; j < realn; ++j) {
int d = Dist[i * n + j];
if (d >= INF) fprintf(outfile, "INF ");
else fprintf(outfile, "%d ", d);
}
fprintf(outfile, "\n");
}
cudaFreeHost(Dist);
}
void print ()
{
for (int i = 0; i < realn; ++i) {
for (int j = 0; j < realn; ++j) {
int d = Dist[i * n + j];
if (d >= INF) fprintf(stderr, "INF ");
else fprintf(stderr, "%d ", d);
}
fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
int ceil(int a, int b)
{
return (a + b -1)/b;
}
void block_FW_2GPU(int B)
{
for(int t=0; t<2; ++t)
{
cudaSetDevice(t);
cudaMalloc(&dDist[t], sizeof(int) * n * n);
cudaMemcpy(dDist[t], Dist, sizeof(int) * n * n, cudaMemcpyHostToDevice);
}
int round = ceil(n, B);
for (int r = 0; r < round; ++r) {
#define PIVOT r, r+1, r, r+1
#define LPR r, r+1, 0, round
#define LP r, r+1, 0, r+1
#define UPD 0, round, r, r+1
#define LEFT r, r+1, 0, r
#define RIGHT r, r+1, r+1, round
#define UP 0, r, r, r+1
#define DOWN r+1, round, r, r+1
#define LU 0, r, 0, r
#define RD r+1, round, r+1, round
#define LD r+1, round, 0, r
#define RU 0, r, r+1, round
// fprintf(stderr, "Round: %d\n", r);
/* Phase 1*/
calAsync(0, B, r, PIVOT);
calAsync(1, B, r, PIVOT);
syncAllStreams();
/* Phase 2*/
calAsync(0, B, r, LEFT);
calAsync(0, B, r, DOWN);
calAsync(1, B, r, UP);
calAsync(1, B, r, RIGHT);
syncAllStreams();
// + 0.2s
calAsync(0, B, r, LD);
calAsync(1, B, r, RU);
blockCopyAsync(0, D2H, B, LP);
blockCopyAsync(0, D2H, B, DOWN);
blockCopyAsync(1, D2H, B, UP);
blockCopyAsync(1, D2H, B, RIGHT);
syncAllStreams();
// + 0.1s
blockCopyAsync(1, H2D, B, LEFT);
blockCopyAsync(1, H2D, B, DOWN);
blockCopyAsync(0, H2D, B, UP);
blockCopyAsync(0, H2D, B, RIGHT);
syncAllStreams();
/* Phase 3*/
// + 0.18s
calAsync(0, B, r, LU);
calAsync(1, B, r, RD);
blockCopyAsync(0, D2H, B, LD);
blockCopyAsync(1, D2H, B, RU);
syncAllStreams();
// + 0.6s
blockCopyAsync(0, D2H, B, LU);
blockCopyAsync(1, D2H, B, RD);
syncAllStreams();
if(r == round - 1)
break;
// + 0.17s
blockCopyAsync(1, H2D, B, LU);
blockCopyAsync(0, H2D, B, RD);
blockCopyAsync(1, H2D, B, LD);
blockCopyAsync(0, H2D, B, RU);
}
syncAllStreams();
// cudaMemcpy(Dist, dDist, sizeof(int) * n * n, cudaMemcpyDeviceToHost);
for(int t=0; t<2; ++t)
{
cudaSetDevice(t);
cudaFree(&dDist[t]);
}
}
__global__
void Update (int k, int i0, int j0, int i1, int j1, int* dDist, int n)
{
#define D(i,j) (dDist[(i) * n + (j)])
int i = blockDim.x * blockIdx.x + threadIdx.x + i0;
int j = blockDim.y * blockIdx.y + threadIdx.y + j0;
if(i >= i1 || j >= j1)
return;
int Dik = D(i, k);
int Dkj = D(k, j);
int D1 = Dik + Dkj;
if (D1 < D(i, j))
D(i, j) = D1;
}
__global__
void UpdateIndependent (int k0, int k1, int i0, int j0, int i1, int j1, int* dDist, int n)
{
#define D(i,j) (dDist[(i) * n + (j)])
int tx = threadIdx.x;
int ty = threadIdx.y;
int di = blockDim.x * blockIdx.x + tx;
int dj = blockDim.y * blockIdx.y + ty;
int i = i0 + di;
int j = j0 + dj;
bool valid = i < i1 && j < j1;
__shared__ int Si[MAX_THREAD_DIM2][MAX_THREAD_DIM2];
__shared__ int Sj[MAX_THREAD_DIM2][MAX_THREAD_DIM2];
const int cacheSize = MAX_THREAD_DIM2;
int Dij = valid? D(i, j): 0;
int dkmod = 0;
for(int k = k0; k < k1; ++k)
{
if(dkmod == 0)
{
__syncthreads();
if(i < i1 && k+ty < k1)
Si[ty][tx] = D(i, k+ty);
if(j < j1 && k+tx < k1)
Sj[tx][ty] = D(k+tx, j);
__syncthreads();
}
if(valid)
{
// assert(Si[tx][dkmod] == D(i,k));
// assert(Sj[dkmod][ty] == D(k,j));
// int Dik = D(i, k);
// int Dkj = D(k, j);
int Dik = Si[dkmod][tx];
int Dkj = Sj[dkmod][ty];
int D1 = Dik + Dkj;
if (D1 < Dij)
Dij = D1;
}
dkmod = (dkmod + 1) % cacheSize;
}
if(valid)
D(i, j) = Dij;
}
void calAsync(int gpuId, int B, int Round, int bi0, int bi1, int bj0, int bj1, int half)
{
cudaSetDevice(gpuId);
for(int bi = bi0; bi < bi1; ++bi)
for(int bj = bj0; bj < bj1; ++bj)
{
if(half == 1 && bi > bj)
continue;
if(half == 2 && bi <= bj)
continue;
int i0 = bi * B;
int i1 = min((bi +1) * B, n);
int j0 = bj * B;
int j1 = min((bj +1) * B, n);
int k0 = Round * B;
int k1 = min((Round +1) * B, n);
bool iDepends = i0 == k0;
bool jDepends = j0 == k0;
int threadDim = MAX_THREAD_DIM2;//std::min(B, MAX_THREAD_DIM2);
int blockDim = (B + MAX_THREAD_DIM2 - 1) / MAX_THREAD_DIM2;
dim3 grid(blockDim, blockDim), block(threadDim, threadDim);
cudaStream_t stm = getIdleStream(gpuId);
if(iDepends || jDepends)
{
for(int k=k0; k<k1; ++k)
Update<<<grid, block, 0, stm>>>(k, i0, j0, i1, j1, dDist[gpuId], n);
}
else
UpdateIndependent<<<grid, block, 0, stm>>>(k0, k1, i0, j0, i1, j1, dDist[gpuId], n);
}
}
|
14,189 | #include <stdio.h>
#define SIZE 1024
__global__ void VectorAdd(int *a, int *b, int *c, int n)
{
int i = threadIdx.x;
if(i<n)
c[i] = a[i] + b[i];
}
int main()
{
printf("\n------------------------------\nSUMA VECTORIAL\n------------------------------\n");
// ALLOCATE AND INITIALIZE DATA ON CPU
int *a, *b, *c;
int *d_a, *d_b, *d_c;
a = (int *)malloc(SIZE*sizeof(int));
b = (int *)malloc(SIZE*sizeof(int));
c = (int *)malloc(SIZE*sizeof(int));
// ALOCATE DATA ON GPU
cudaMalloc( &d_a, SIZE*sizeof(int));
cudaMalloc( &d_b, SIZE*sizeof(int));
cudaMalloc( &d_c, SIZE*sizeof(int));
for( int i=0;i<SIZE;++i)
{
a[i] = i;
b[i] = i;
c[i] = 0;
}
printf("\n Vector A\n \n");
for( int i=0;i<3;++i)
printf("a[%d] = %d\n", i, a[i]);
printf(".\n.\n.\n");
for( int i=SIZE-3;i<SIZE;++i)
printf("a[%d] = %d\n", i, a[i]);
printf("\n Vector B\n \n");
for( int i=0;i<3;++i)
printf("b[%d] = %d\n", i, b[i]);
printf(".\n.\n.\n");
for( int i=SIZE-3;i<SIZE;++i)
printf("b[%d] = %d\n", i, b[i]);
// TRANSFER DATA FROM CPU TO GPU
cudaMemcpy( d_a, a, SIZE*sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy( d_b, b, SIZE*sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy( d_c, c, SIZE*sizeof(int), cudaMemcpyHostToDevice );
// RUN KERNEL
VectorAdd<<< 1, SIZE >>>(d_a, d_b, d_c, SIZE);
// TRANSFER DATA FROM GPU TO CPU
cudaMemcpy( c, d_c, SIZE*sizeof(int), cudaMemcpyDeviceToHost );
printf("\n A[i] + B[i] = C[i] \n");
printf("\n Vector C\n \n");
for( int i=0;i<3;++i)
printf("c[%d] = %d\n", i, c[i]);
printf(".\n.\n.\n");
for( int i=SIZE-3;i<SIZE;++i)
printf("c[%d] = %d\n", i, c[i]);
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
14,190 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
__device__ int count;
__device__ void sum(double *partial_sum, int dummy) {
if(threadIdx.x == 0) {
count = dummy;
if(count %2 != 0) {
count++;
partial_sum[count-1] = 0;
}
}
__syncthreads();
for(int i = count/2; i > 0; i = i/2) {
if(threadIdx.x < i)
partial_sum[threadIdx.x] += partial_sum[threadIdx.x + i];
__syncthreads();
if(threadIdx.x == 0) {
if(i%2 != 0 && i != 1) {
partial_sum[0] += partial_sum[--i];
}
}
__syncthreads();
}
__syncthreads();
return;
}
void init_grid_points(double * x, double * y, int m)
{
double h = (double)1/(m + 1);
for (int i = 0; i < m; i ++)
{
for (int j = 0; j < m; j ++)
{
x[i*m+j] = (i + 1)*h;
y[i*m+j] = (j + 1)*h;
}
}
}
void init_observed_data_vector(double * f, double * x, double * y, int size)
{
for (int i = 0; i < size; i++)
{
f[i] = 1.0 - pow(x[i] - 0.5, 2) - pow(y[i] - 0.5, 2) + 0.1 * ((double)rand() / (double)RAND_MAX - 0.5);
}
}
void compute_A( double * A, double * x, double * y, int n)
{
int i, j;
double d, t;
//Initialize K
for (i = 0; i < n; i ++)
{
for (j = 0; j < n; j++)
{
d = pow(x[i] - x[j], 2) + pow(y[i] - y[j],2);
A[i*n + j] = exp(-d);
}
}
//Compute A = tI+K
t = 0.01;
for (i = 0; i < n; i ++)
{
A[i*n + i] += t;
}
}
void compute_k(double * k, double * x, double * y, double * rstar, int n)
{
int i;
double d;
for (i = 0; i < n; i ++)
{
d = pow(rstar[0]-x[i], 2) + pow(rstar[1]-y[i], 2);
k[i] = exp(-d);
}
}
void compute_LU_factors(double * A, int n)
{
int k, i, j;
double m;
for (k = 0; k < n - 1; k ++)
{
for (i = k + 1; i < n; i ++)
{
m = A[i*n + k] / A[k*n + k];
for (j = k + 1; j < n; j ++)
{
A[i*n + j] = A[i*n + j] - m * A[k*n + j];
}
A[i*n + k] = m;
}
}
}
__global__ void compute_LU_factors(int N, double * A, int n)
{
int k, i, j;
int m;
for (k = 0; k < n - 1; k ++)
{
for (i = k + 1 + threadIdx.x; i < n; i += N)
{
A[i*n + k] = A[i*n + k] / A[k*n + k];
}
__syncthreads();
for (m = threadIdx.x; m < (n - k - 1)*(n - k - 1); m += N )
{
i = k + 1 + m / (n - k - 1);
j = k + 1 + m % (n - k - 1);
A[i*n + j] -= A[i*n + k] * A[k*n + j];
}
__syncthreads();
}
return;
}
void solve_triangular_systems(double * z, double * A, double * f, int n)
{
int i, j;
double m;
//Solve Az = f by LUz = f
//1. Solve Ly = f for y
for (i = 0; i < n; i ++)
{
m = 0;
for (j = 0; j < i; j ++)
{
m += A[i*n + j] * z[j];
}
z[i] = f[i] - m;
}
//2. Solve Uz = y for z
for (i = n - 1; i >= 0; i --)
{
m = 0;
for (j = i + 1; j < n; j ++)
{
m += A[i*n + j] * z[j];
}
z[i] = (z[i]-m)/A[i*n + i];
}
}
__global__ void solve_triangular_systems(int N, double * z, double * A, double * f, int n)
{
extern __shared__ double partial_sum[];
int i, j;
//Solve Az = f by LUz = f
//1. Solve Ly = f for y
for (i = 0; i < n; i ++)
{
partial_sum[threadIdx.x] = 0;
for (j = threadIdx.x; j < i; j += N)
{
partial_sum[threadIdx.x] += A[i*n + j] * z[j];
}
sum (partial_sum, (N<i)?N:i);
if (threadIdx.x == 0){
z[i] = f[i] - partial_sum[0];
}
__syncthreads();
}
__syncthreads();
//2. Solve Uz = y for z
for (i = n - 1; i >= 0; i --)
{
partial_sum[threadIdx.x] = 0;
for (j = i + 1 + threadIdx.x; j < n; j += N)
{
partial_sum[threadIdx.x] += A[i*n + j] * z[j];
}
__syncthreads();
sum(partial_sum, (N < (n-1-i))? N:(n-1-i));
if(threadIdx.x == 0) {
z[i] = (z[i]-partial_sum[0])/A[i*n + i];
}
__syncthreads();
}
return;
}
double compute_fstar(double * k, double * z, int n)
{
int i;
double fstar = 0.0;
// Compute predicted value fstar at rstar: k'*z
for (i = 0; i < n; i ++)
{
fstar += k[i] * z[i];
}
return fstar;
}
void print_array(double * array, int n)
{
for (int i = 0; i < n; i++)
{
printf("%.2f ", array[i]);
}
printf("\n");
}
void print_matrix(double * matrix, int m, int n)
{
for (int i = 0; i < m; i++)
{
for (int j = 0; j < n; j++)
{
printf("%.2f ", matrix[i*m + j]);
}
printf("\n");
}
}
int main(int argc, char** argv)
{
// Host Data
double * hGx; // host grid x-coordinate array
double * hGy; // host grid y-coordinate array
double * hA; // host tI+K
double * hLU; // host LU factorization of A
double * hf;// host observed data vector f
double * hk;// host vector k
double * hz;// host triangular systems solution
// Device Data
// double * dGx; // device grid x-coordinate array
// double * dGy; // device grid y-coordinate array
double * dA; // device tI+K
//double * hLU; // device LU factorization of A
double * df;// device observed data vector f
// double * dk;// device vector k
double * dz;// device triangular systems solution
// Grid size m, grid points n
int m = 4, n;
// Coordinate of r*
double * rstar;
rstar = (double *) malloc(2 * sizeof(double));
// Timing variables
float LU_time, solver_time, total_time;
cudaEvent_t start, stop; // GPU timing variables
// Other variables
double fstar;
int size;
// Timing initializations
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Check input
if (argc > 3){
m = atoi(argv[1]);
rstar[0] = atof(argv[2]);
rstar[1] = atof(argv[3]);
printf("r*=(%lf, %lf)\n", rstar[0], rstar[1]);
}else{
// cout << "Please indicate grid size and coordinate of r*" << endl;
printf("Please indicate grid size and coordinate of r*");
return -1;
}
// Allocate host coordinate arrays
n = m * m;
size = n * sizeof(double);
hGx = (double *) malloc(size);
hGy = (double *) malloc(size);
hf = (double *) malloc(size);
hk = (double *) malloc(size);
hz = (double *) malloc(size);
size = n * n * sizeof(double);
hA = (double *) malloc(size);
hLU = (double *) malloc(size);
// printf("Allocate host coordinate arrays\n");
init_grid_points(hGx, hGy, m);
// printf("x and y coordinates of grid points\n");
// print_array(hGx, n);
// print_array(hGy, n);
srand(time(0));
init_observed_data_vector(hf, hGx, hGy, n);
// printf("observed data vector f\n");
// print_array(hf, n);
compute_A(hA, hGx, hGy, n);//tI+K
// printf("compute_A\n");
// print_matrix(hA, n, n);
compute_k(hk, hGx, hGy, rstar, n);
// printf("compute_k\n");
// print_array(hk, n);
// LU_floats = n*(n-1)*(4*n+1);
// LU_floats /= 6;
// solver_floats = n*(4+n);
// Allocate device coordinate arrays
size = n * sizeof(double);
cudaMalloc(&df, size);
cudaMemcpy(df, hf, size, cudaMemcpyHostToDevice);
cudaMalloc(&dz, size);
size = n * n * sizeof(double);
cudaMalloc(&dA, size);
cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice);
// Invoke kernel
printf("GPU version\n");
int threads = 192;
printf("Number of threads %d\n", threads);
cudaEventRecord(start, 0);
compute_LU_factors<<<1, threads>>>(threads, dA, n);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&LU_time, start, stop);
size = n * n * sizeof(double);
cudaMemcpy(hLU, dA, size, cudaMemcpyDeviceToHost);
printf("LU time = %f ms\n", LU_time);
cudaEventRecord(start, 0);
solve_triangular_systems<<<1, threads, threads * sizeof(double)>>>(threads, dz, dA, df, n);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&solver_time, start, stop);
size = n * sizeof(double);
cudaMemcpy(hz, dz, size, cudaMemcpyDeviceToHost);
printf("Solver time = %f ms\n", solver_time);
total_time = LU_time + solver_time;
fstar = compute_fstar(hk, hz, n);
printf("Total time = %f ms, Predicted value = %lf\n", total_time, fstar);
cudaFree(df);
cudaFree(dz);
cudaFree(dA);
free(hGx);
free(hGy);
free(hA);
free(hLU);
free(hf);
free(hk);
free(hz);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
14,191 | #define BlockDim 16
#include <stdio.h>
#include <stdlib.h>
//*********************************************************************
//Function to multiply two matrices using GPU without any optimisation
__global__ void mul(float *a,float *b, float *c,int N)
{
float temp = 0;
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
// Identify the row and column of the C element to work on
int row = by * BlockDim + ty;
int col = bx * BlockDim + tx;
if( col < N && row < N)
{
for(int i = 0; i < N; i++)
{
// mat a is row major traversed and mat b is col major traversed
temp = temp + a[i + row * N] * b[i * N + col];
}
c[row * N + col] = temp;
}
}
//*********************************************************************
// Main Function
int main(int argc, char *argv[])
{
int N=4;
N= atoi(argv[1]);
// allocate memory in cpu
float *cpu_a, *cpu_b, *cpu_c;
cpu_a = (float *)malloc(sizeof(float)*N*N);
cpu_b = (float *)malloc(sizeof(float)*N*N);
cpu_c = (float *)malloc(sizeof(float)*N*N);
// Allocate memory on the gpu
float *gpu_a, *gpu_b, *gpu_c;
cudaMalloc(&gpu_a, sizeof(float)*N*N);
cudaMalloc(&gpu_b, sizeof(float)*N*N);
cudaMalloc(&gpu_c, sizeof(float)*N*N);
// Initialize mat A and B
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
cpu_a[i * N + j] = (float)rand()/(float)(RAND_MAX);
//Range 0 to 1
cpu_b[i * N + j] = (float)rand()/(float)(RAND_MAX);
//Range 0 to 1
}
}
if(N<10){
//displays Matrix 1 only for N<10
printf("Matrix 1 is \n");
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++)
printf("%f ", cpu_a[i * N + j]);
printf("\n");
}
//displays Matrix 2 only for N<10
printf("Matrix 2 is \n");
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++)
printf("%f ", cpu_b[i * N + j]);
printf("\n");
}
}
// copy matrix A and B from cpu to gpu memory
cudaMemcpy(gpu_a, cpu_a, sizeof(float)*N*N, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, cpu_b, sizeof(float)*N*N, cudaMemcpyHostToDevice);
//dim3 type is defined integer vector to specify dimensions.
dim3 dimBlock(BlockDim, BlockDim,1);
dim3 dimGrid((unsigned int) (N-1)/BlockDim +1,(unsigned int) (N-1)/BlockDim +1,1);
// Kernel
mul<<<dimGrid, dimBlock>>>(gpu_a, gpu_b, gpu_c, N);
// Transefr results from gpu to cpu
cudaMemcpy(cpu_c, gpu_c, sizeof(float)*N*N, cudaMemcpyDeviceToHost);
// Print if N<10
if (N<10) {
printf("Done, Matrix Result is\n");
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
printf("%f ", cpu_c[i * N + j]);
}
printf("\n");
}
}
// free memory
cudaFree(gpu_a);
cudaFree(gpu_b);
cudaFree(gpu_c);
free(cpu_a);
free(cpu_b);
free(cpu_c);
}
|
14,192 | #include <stdio.h>
int main() {
int devices;
cudaGetDeviceCount(&devices);
for (int d = 0; d < devices; ++d) {
cudaDeviceProp p;
cudaGetDeviceProperties(&p, d);
int mp = p.multiProcessorCount, sp = 0;
if (p.major == 2) {
if (p.minor == 1) sp = 48;
else sp = 32;
} else if (p.major == 3) {
sp = 192;
} else if (p.major == 5) {
sp = 128;
}
printf("Device %d: %s\n", d, p.name);
printf(" -> multiprocessor count: %d\n", mp);
printf(" -> stream processor count: %d (total %d)\n", sp, sp * mp);
printf(" -> warp size: %d\n", p.warpSize);
printf(" -> max threads per block: %d\n", p.maxThreadsPerBlock);
printf(" -> max block dimensions: %d x %d x %d\n", p.maxThreadsDim[0], p.maxThreadsDim[1], p.maxThreadsDim[2]);
printf(" -> max grid dimensions: %d x %d x %d\n", p.maxGridSize[0], p.maxGridSize[1], p.maxGridSize[2]);
puts("");
}
return 0;
} |
14,193 | #include "includes.h"
#define VERBOSE 0
#define INTEGER_SCALE_FACTOR 100
// Command line argument definitions
#define DEFAULT_NUM_REPEATS 1
#define DEFAULT_NUM_ITERATIONS 1
#define DEFAULT_NUM_ELEMENTS 128
#define DEFAULT_SEED 0
#define DEFAULT_DEVICE 0
#define MIN_ARGS 1
#define MAX_ARGS 6
#define ARG_EXECUTABLE 0
#define ARG_REPEATS 1
#define ARG_ITERATIONS 2
#define ARG_ELEMENTS 3
#define ARG_SEED 4
#define ARG_DEVICE 5
#define MAX 10
// Lazy CUDA Error handling
__global__ void setQuantities( unsigned int numInputs, unsigned int value, unsigned int * d_quantity ){
unsigned int tid = threadIdx.x + (blockDim.x * blockIdx.x);
if (tid < numInputs){
d_quantity[tid] = value;
}
} |
14,194 | #ifdef GALAX_MODEL_GPU
#include "cuda.h"
#include "kernel.cuh"
#include <mipp.h>
#define DIFF_T (0.1f)
#define EPS (1.0f)
inline __host__ __device__ float3 sub(float3 a, float3 b)
{
return make_float3(a.x - b.x, a.y - b.y, a.z - b.z);
}
inline __host__ __device__ float3 add(float3 a, float3 b)
{
return make_float3(a.x + b.x, a.y + b.y, a.z + b.z);
}
inline __host__ __device__ float3 multi1(float3 a, float3 b)
{
return make_float3(a.x * b.x, a.y * b.y, a.z * b.z);
}
inline __host__ __device__ float3 multi2(float3 a, float b)
{
return make_float3(a.x * b, a.y * b, a.z * b);
}
__global__ void compute_acc(float3 * positionsGPU, float3 * velocitiesGPU, float3 * accelerationsGPU, float* massesGPU, int n_particles)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
for (int j = 0; j < n_particles; j++)
{
if(i != j)
{
const float3 diff = sub(positionsGPU[j] , positionsGPU[i]);
float3 dij3 = multi1(diff,diff);
float dij = dij3.x + dij3.y + dij3.z;
if (dij < 1.0)
{
dij = 10.0;
}
else
{
dij = std::sqrt(dij);
dij = 10.0 / (dij * dij * dij);
}
float3 n = multi2(diff, dij);
float3 m = multi2(n,massesGPU[j]);
accelerationsGPU[i] = add(accelerationsGPU[i],m);
}
}
/*
const mipp::Reg<float> rpos_i = &positionsGPU[i];
mipp::Reg<float> racc_i = &accelerationsGPU[i];
const mipp::Reg<float> value =1.0;
const mipp::Reg<float> zero =0.0;
const mipp::Reg<float> G =10.0;
using T = float;
constexpr int N = mipp::N<T>();
auto vecLoopSize = (n_particles / N)* N;
for (int j = 0; j < vecLoopSize; j += N)
{
const mipp::Reg<float3> rpos_j = &positionsGPU[j];
mipp::Reg<float> masses_j = &massesGPU[j];
if(i != j)
{
const mipp::Reg<float3> diff = sub(rpos_j, rpos_i);
const mipp::Reg<float3> dij3 = multi1(diff,diff);
const mipp::Reg<float> dij = dij3.x + dij3.y + dij3.z;
mipp::Msk<N> msk = (dij < value);
dij = mipp::mask<float, mipp::mul>(msk, dij, dij, zero);
dij = mipp::mask<float, mipp::add>(msk, dij, dij, value);
dij = mipp::sqrt(dij);
dij = G / (dij * dij * dij);
float3 n = multi2(diff, dij);
float3 m = multi2(n,masses_j);
racc_i= add(racc_i,m);
}
}
racc_i.store(&accelerationsGPU[i]);
*/
}
__global__ void maj_pos(float3 * positionsGPU, float3 * velocitiesGPU, float3 * accelerationsGPU)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
velocitiesGPU[i] = add(velocitiesGPU[i], multi2(accelerationsGPU[i], 2.0f));
positionsGPU[i] = add(positionsGPU[i], multi2(velocitiesGPU[i], 0.1f));
}
void update_position_cu(float3* positionsGPU, float3* velocitiesGPU, float3* accelerationsGPU, float* massesGPU, int n_particles)
{
int nthreads = 128;
int nblocks = (n_particles + (nthreads -1)) / nthreads;
compute_acc<<<nblocks, nthreads>>>(positionsGPU, velocitiesGPU, accelerationsGPU, massesGPU, n_particles);
maj_pos <<<nblocks, nthreads>>>(positionsGPU, velocitiesGPU, accelerationsGPU);
}
#endif // GALAX_MODEL_GPU |
14,195 | #include "includes.h"
__global__ void saxpy_shmem_doublebuffer ( float* y, float* x, float a, clock_t * timer_vals)
{
volatile __shared__ float sdata_x0 [COMPUTE_THREADS_PER_CTA];
volatile __shared__ float sdata_y0 [COMPUTE_THREADS_PER_CTA];
volatile __shared__ float sdata_x1 [COMPUTE_THREADS_PER_CTA];
volatile __shared__ float sdata_y1 [COMPUTE_THREADS_PER_CTA];
int tid = threadIdx.x ;
unsigned int idx0, idx1;
idx0 = blockIdx.x * COMPUTE_THREADS_PER_CTA + tid;
idx1 = COMPUTE_THREADS_PER_CTA * CTA_COUNT + blockIdx.x * COMPUTE_THREADS_PER_CTA + tid;
for (int i=0; i < NUM_ITERS; i+=2) {
__syncthreads();
sdata_x0[tid] = x[idx0];
sdata_y0[tid] = y[idx0];
if (i!=0) {
y[idx1] = a * sdata_x1[tid] + sdata_y1[tid];
idx1 += 2 * COMPUTE_THREADS_PER_CTA * CTA_COUNT ;
}
__syncthreads();
sdata_x1[tid] = x[idx1];
sdata_y1[tid] = y[idx1];
y[idx0] = a * sdata_x0[tid] + sdata_y0[tid];
idx0 += 2 * COMPUTE_THREADS_PER_CTA * CTA_COUNT ;
}
__syncthreads();
y[idx1] = a * sdata_x1[tid] + sdata_y1[tid];
} |
14,196 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,int var_3,float var_4,float var_5,float var_6,float* var_7,float* var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22) {
if (comp <= var_1 + var_2 + -0.0f) {
comp = sinf((+1.4539E-41f * (var_4 + var_5 * (+0.0f * var_6))));
for (int i=0; i < var_3; ++i) {
var_7[i] = cosf((var_9 * sinf(-1.0267E26f / +1.2468E23f)));
var_8[i] = -1.2872E-42f;
comp += var_8[i] * var_7[i] - -0.0f - var_10;
}
if (comp == (var_11 * logf(+1.6407E19f))) {
float tmp_1 = (var_12 / var_13 - sqrtf((+1.0377E-37f - +1.5482E-44f + -1.5813E-35f * (-0.0f - -0.0f))));
float tmp_2 = (-1.6053E26f * +1.1100E-37f - ceilf(+1.9220E36f / ceilf(var_14 + (+1.6144E-36f / var_15 * (-1.8990E34f / (+1.5194E24f - -0.0f))))));
float tmp_3 = +0.0f;
comp = tmp_3 / tmp_2 / tmp_1 / var_16 / var_17 / var_18 + -1.0775E35f / sinhf(var_19 * -1.0626E-35f);
}
if (comp < var_20 * -1.6531E-42f * -0.0f) {
float tmp_4 = -0.0f;
comp = tmp_4 - +0.0f / (var_21 / -1.5219E-44f - var_22);
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float* tmp_8 = initPointer( atof(argv[8]) );
float* tmp_9 = initPointer( atof(argv[9]) );
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23);
cudaDeviceSynchronize();
return 0;
}
|
14,197 | #include "includes.h"
__global__ void Fprop1(const float* in, const float* syn1, float* layer1)
{
int i = threadIdx.x; //256
//int j = blockDim.y*blockIdx.y + threadIdx.y; //28*28
int k = blockIdx.x; //Data.count
float x = 0.0;
for (int j=0; j < 28*28; ++j)
x += in[k*28*28 + j] * syn1[j*256 + i];
layer1[k*256 + i] = x;
} |
14,198 | #include <chrono>
#include <cstdio>
#include <cstdlib>
#include <cuda.h>
#define cudaCheck(op) \
do { \
cudaError_t err = op; \
if (cudaSuccess != err) \
{ \
fprintf(stderr, \
"%s:%d CUDA operation failed: %s\n", \
__FILE__, __LINE__, \
cudaGetErrorString(err)); \
exit(EXIT_FAILURE); \
} \
} while(0)
//#define cudaCheck(op) op
__global__ void kernel(float *K, float *g1, float *M, int N, float k)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if (i < N && j < N) M[i+j*N] = g1[N+i-j] * (K[i]+k) * (K[j]+k);
}
int main(int argc, char **argv)
{
using namespace std::chrono;
int N = 1000;
if (argc == 2) {
N = atoi(argv[1]);
}
printf("Using %d x %d\n", N, N);
auto t0 = high_resolution_clock::now();
float *M = new float[N*N];
float *K = new float[N];
float *g1 = new float[2*N];
float k = 1.3;
for (int i=0; i<N; i++)
K[i] = rand() / (float)RAND_MAX;
for (int i=0; i<2*N; i++)
g1[i] = rand() / (float)RAND_MAX;
float *d_M = NULL;
float *d_K = NULL;
float *d_g1 = NULL;
cudaCheck(cudaMalloc((void**)&d_M, sizeof(float)*N*N));
cudaCheck(cudaMalloc((void**)&d_K, sizeof(float)*N ));
cudaCheck(cudaMalloc((void**)&d_g1, sizeof(float)*2*N));
cudaCheck(cudaMemcpy(d_M, M, sizeof(float)*N*N, cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy(d_K, K, sizeof(float)*N , cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy(d_g1, g1, sizeof(float)*2*N, cudaMemcpyHostToDevice));
dim3 dimBlock(128);
dim3 dimGrid((N+dimBlock.x-1)/dimBlock.x,
(N+dimBlock.y-1)/dimBlock.y);
kernel<<<dimGrid,dimBlock>>>(d_K,d_g1,d_M,N,k);
cudaCheck(cudaGetLastError());
cudaCheck(cudaMemcpy(M, d_M, sizeof(float)*N*N, cudaMemcpyDeviceToHost));
cudaCheck(cudaFree(d_M));
cudaCheck(cudaFree(d_K));
cudaCheck(cudaFree(d_g1));
delete[] M;
delete[] K;
delete[] g1;
duration<float> t = high_resolution_clock::now() - t0;
printf("GPU took %f seconds\n", t.count());
return 0;
}
|
14,199 | /*
============================================================================
Name : Pomentale.cu
Author : stomo
Version :
Copyright : Your copyright notice
Description : Compute sum of reciprocals using STL on CPU and Thrust on GPU
============================================================================
*/
#include <iostream>
#include <cstdlib>
#include <cmath>
#include <cassert>
#include <vector>
#include <algorithm>
#include <thrust/complex.h>
#define EPS 0.000001 // 停止判定
#define MAXIT 16 // 最大反復回数
int P; // Pomentale法の次数
// ゼロ点
std::vector< thrust::complex<double> > Zrs
{
thrust::complex<double> ( 1.0, 1.0 ), // z1
thrust::complex<double> ( -1.0, 1.0 ), // z2
thrust::complex<double> ( 0.0, -1.0 ) // z3
};
// ゼロ点の重複度
std::vector<double> Mul
{
1.0, // n1
2.0, // n2
3.0 // n3
};
// Polynomial function value
template<typename T> thrust::complex<T> Pval( thrust::complex<T> z )
{
thrust::complex<T> tmp;
tmp = thrust::complex<T> (1.0,0.0);
for (int i=0; i<Zrs.size(); i++)
{
tmp *= pow( z - Zrs[i], Mul[i] );
}
return tmp;
}
int main(int argc, char *argv[])
{
if (argc<4)
{
std::cerr << "Usage: a.out [Order] [Real(z0)] [Imag(z0)]\n";
exit(EXIT_FAILURE);
}
P = atoi(argv[1]); // Pomentale法の次数
assert( (P==2) | (P==4) | (P==8) | (P==16) | (P==32) );
double rez0 = atof(argv[2]);
double imz0 = atof(argv[3]);
thrust::complex<double> z0 = thrust::complex<double>( rez0, imz0 );
thrust::complex<double> z = Pval(z0);
std::cout << "z0 = (" << z0.real() << ", " << z0.imag() << ")\n";
std::cout << "z = (" << z.real() << ", " << z.imag() << ")\n";
return 0;
}
|
14,200 | #include<iostream>
#include<ctime>
using namespace std;
typedef unsigned int index_t;
void __global__ transpose(float *src, float *target, index_t nx, index_t ny) {
index_t blk_y = blockIdx.x;
index_t blk_x = (blockIdx.x + blockIdx.y) % gridDim.x;
index_t ix = blockDim.x * blk_x + threadIdx.x;
index_t iy = blockDim.y * blk_y + threadIdx.y;
if(ix < nx && iy < ny) {
target[ix * ny + iy] = src[iy * nx + ix];
}
}
void __device__ __host__ transposeHost(float *src, float *target, index_t row ,index_t column) {
for (index_t i = 0;i < row;++i) {
for (index_t j = 0;j < column;++j) {
target[j * column + i] = src[i * row + j];
}
}
}
int main() {
int N = 1 << 18;
cout << N;
int nx = 1 << 9;
int ny = 1 << 9;
int blockx = 32;
int blocky = 32;
clock_t start, end;
float a[N],b[N];
for(int i = 0;i < N;i++) {
a[i] = i;
}
start = clock();
transposeHost(a, b, nx,ny);
end = clock();
cout << "cpu time:" << end -start <<endl;
float *a_dev, *b_dev;
cudaMalloc((void**)&a_dev, sizeof(a));
cudaMalloc((void**)&b_dev, sizeof(a));
cudaMemcpy(a_dev, a, sizeof(a), cudaMemcpyHostToDevice);
dim3 block(blockx, blocky);
dim3 grid((nx + blockx - 1)/blockx,(ny + blocky - 1)/blocky);
start = clock();
transpose<<<block, grid>>>(a_dev, b_dev, nx, ny);
cudaDeviceSynchronize();
end = clock();
cudaMemcpy(a,b_dev, sizeof(a), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cout << "gpu time:" << end -start <<endl;
for(int i = 0;i < N;i++) {
if(a[i] != b[i]) {
cout << a[i] << " "<<b[i] <<endl;
}
}
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.