serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
21,701 | #include <cstdio>
#define cudaCheckError() { \
cudaError_t e=cudaGetLastError(); \
if(e!=cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \
exit(EXIT_FAILURE); \
} \
}
// Computes the 1D index of a value in a contiguous buffer
// given its 2D coordinates and the width of a row.
#define IDX(row, col, width) ((row)*(width)+(col)) // FIXME
//computes c(i,j) = a(i,j) + b(i,j)
__global__ void add(int *a, int *b, int *c, int N, int M) {
int i= blockDim.x * blockIdx.x + threadIdx.x; // FIXME compute row the coordinates of the value
int j= blockDim.y * blockIdx.y + threadIdx.y; // FIXME compute col the coordinates of the value
if(i < N && j < M) { // FIXME check boundaries
int idx=IDX(i,j,M); // keep this line
c[idx] = a[idx] + b[idx]; // keep this line
}
}
int main() {
// 2048 rows and cols
int N=2*1024;
int M=2*1024;
int *a, *b, *c;
dim3 threads(32,32);
dim3 blocks(N/threads.x,M/threads.y);
// Unified memory allocation
cudaMallocManaged(&a,N*M*sizeof(int));
cudaMallocManaged(&b,N*M*sizeof(int));
cudaMallocManaged(&c,N*M*sizeof(int));
// Kernal launch
add<<<blocks,threads>>>(a,b,c,N,M);
cudaDeviceSynchronize();
cudaCheckError();
// Check the results
bool error = false;
for (auto i = 0; i < N * M; i++) {
if (error = a[i] + b[i] != c[i]) {
printf("ERROR at index %d.", i);
break;
}
}
cudaFree(a);
cudaFree(b);
cudaFree(c);
cudaCheckError();
if (!error) {
printf("Test completed successfully.\n");
return 0;
} else {
printf("WARNING there were some errors.\n");
return 1;
}
}
|
21,702 | #include "includes.h"
__global__ void implantCoeffs(float* matrices, float *coeffArray, int savedCoeffs, int dimsize){
int id = blockIdx.x * blockDim.x * blockDim.y * blockDim.z
+ threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
int offsetMatrix = id * dimsize * dimsize,
offsetCoeff = id * savedCoeffs,
coeffsLeft = savedCoeffs,
x, y, y_n = 0, x_n = 1,
numberinrow, tmp;
matrices[offsetMatrix] = coeffArray[offsetCoeff + (savedCoeffs - coeffsLeft)];
coeffsLeft -= 1;
while (coeffsLeft > 0){
// Work out number in row
x = x_n;
y = y_n;
if (x_n < dimsize - 1){
numberinrow = x_n + 1;
}
else{
numberinrow = x_n - (y_n - 1);
}
if (numberinrow % 2 == 0){
// Even
while (numberinrow > 0 && coeffsLeft > 0){
matrices[offsetMatrix + x + y * dimsize] = coeffArray[offsetCoeff + (savedCoeffs - coeffsLeft)];
numberinrow--;
coeffsLeft--;
if ((numberinrow + 1) % 2 == 0){
// Swap x and y
tmp = x;
x = y;
y = tmp;
}
else{
// Swap x and y
tmp = x;
x = y;
y = tmp;
x--;
y++;
}
}
}
else{
// Odd
while (numberinrow > 1 && coeffsLeft > 0){
matrices[offsetMatrix + x + y * dimsize] = coeffArray[offsetCoeff + (savedCoeffs - coeffsLeft)];
numberinrow--;
coeffsLeft--;
if ((numberinrow + 1) % 2 == 1){
// Swap x and y
tmp = x;
x = y;
y = tmp;
}
else{
// Swap x and y
tmp = x;
x = y;
y = tmp;
x--;
y++;
}
}
if (coeffsLeft > 0){
// add the odd one
matrices[offsetMatrix + x + y * dimsize] = coeffArray[offsetCoeff + (savedCoeffs - coeffsLeft)];
numberinrow--;
coeffsLeft--;
}
}
if (x_n == dimsize - 1){
y_n++;
}
else{
x_n++;
}
}
} |
21,703 | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
#define N (2048*2048)
#define THREAD_PER_BLOCK 512
__global__ void reverse(int* a, int* b){
int index_in = threadIdx.x + blockIdx.x * blockDim.x;
int index_out = gridDim.x * blockDim.x - index_in - 1;
b[index_out] = a[index_in];
}
void random_ints(int *p, int n) {
int i;
for(i=0; i<n; i++) {
p[i]=i;
}
}
int main(void) {
int *in, *out, *test;
int *dev_in, *dev_out;
int size = N*sizeof(int);
int i;
cudaMalloc( (void**)&dev_in, size );
cudaMalloc( (void**)&dev_out, size );
in = (int*)malloc( size );
out = (int*)malloc( size );
test = (int*)malloc( size );
random_ints(in, N );
cudaMemcpy( dev_in, in, size, cudaMemcpyHostToDevice );
reverse<<< N/THREAD_PER_BLOCK, THREAD_PER_BLOCK >>>(dev_in, dev_out);
cudaMemcpy(out, dev_out, size, cudaMemcpyDeviceToHost);
for(i = 0; i < N; i++){
test[N-i-1] = in[i];
}
for(i = 0; i < N; i++){
if(test[i]!= out[i]){
printf("error: expected %d, got %d!\n",test[i], out[i]);
break;
}
}
if(i==N) {
printf("correct!\n");
}
free(in); free(out);
cudaFree(dev_in);
cudaFree(dev_out);
return 0;
}
|
21,704 | extern "C" __global__ void uppercase(char* b, char* a) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (a[i] >= 'a' && a[i] <= 'z') b[i] = a[i] + 'A' - 'a';
else b[i] = a[i];
} |
21,705 | /*
mini EP 11
NOME: Your name here
NUSP: Your NUSP here
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
long getMS() {
struct timespec s;
clock_gettime(CLOCK_REALTIME, &s);
return s.tv_sec*1000 + s.tv_nsec/1000000;
}
// number of tests
#define NTESTS 10
#define SEED 123456
#define SIZE 1024
#define WIDTH 512
#define ROUNDS 1000
long cudaSum(int *);
long cudaIfSum(int *);
int main() {
srand(SEED);
long cudaTime = 0;
long cudaIfTime = 0;
int * reference = (int *)malloc(sizeof(int)*SIZE*WIDTH);
for(int i = 0; i < NTESTS; i++) {
for(int j = 0; j < SIZE*WIDTH; j++) reference[j] = rand()%5096;
cudaTime += cudaSum(reference);
cudaIfTime += cudaIfSum(reference);
}
free(reference);
printf("Average cudaTime %ldms\nAvarage cudaIfTime %ldms\n", cudaTime/NTESTS, cudaIfTime/NTESTS);
}
// Conditional Vector Sum
__global__ void cudaIfSumGPU(int *ints) {
int sum = 0;
int off = blockIdx.x*32 + threadIdx.x;
for(int j = 0; j < ROUNDS; j++) {
for(int i = 0; i < WIDTH; i++) {
if(ints[WIDTH*off+i] % 2)
sum += (int) sqrt((double)ints[WIDTH*off+i]);
else
sum += (int) sqrt((double)ints[WIDTH*off+i]);
}
sum = sum/128;
}
ints[WIDTH*SIZE+off] = sum;
}
long cudaIfSum(int *refs) {
int *cudaRefs;
int results[SIZE];
cudaMalloc(&cudaRefs, sizeof(int)*SIZE*(WIDTH+1));
cudaMemcpy(cudaRefs, refs, sizeof(int)*WIDTH*SIZE, cudaMemcpyHostToDevice);
long t0 = getMS();
cudaIfSumGPU<<<32,SIZE/32>>>(cudaRefs);
cudaMemcpy(results, cudaRefs+(WIDTH*SIZE), sizeof(int)*SIZE, cudaMemcpyDeviceToHost);
int sum = 0;
for(int i = 0; i < SIZE; i++)
sum += results[i];
long tf = getMS();
cudaFree(cudaRefs);
printf("CUDA IF SUM: %d\n", sum);
return tf-t0;
}
// Non Conditional Vector Sum
__global__ void cudaSumGPU(int *ints) {
int sum = 0;
int off = blockIdx.x*32 + threadIdx.x;
for(int j = 0; j < ROUNDS; j++) {
for(int i = 0; i < WIDTH; i++) {
sum += (int) sqrt((double)ints[WIDTH*off+i]);
}
sum = sum/128;
}
ints[WIDTH*SIZE+off] = sum;
}
long cudaSum(int *refs) {
int *cudaRefs;
int results[SIZE];
cudaMalloc(&cudaRefs, sizeof(int)*SIZE*(WIDTH+1));
cudaMemcpy(cudaRefs, refs, sizeof(int)*WIDTH*SIZE, cudaMemcpyHostToDevice);
long t0 = getMS();
cudaSumGPU<<<32,SIZE/32>>>(cudaRefs);
cudaMemcpy(results, cudaRefs+(WIDTH*SIZE), sizeof(int)*SIZE, cudaMemcpyDeviceToHost);
int sum = 0;
for(int i = 0; i < SIZE; i++)
sum += results[i];
long tf = getMS();
cudaFree(cudaRefs);
printf("CUDA SUM: %d\n", sum);
return tf-t0;
}
|
21,706 | #include "includes.h"
__global__ void imageSplitKernel(float3 *ptr, float *dst, int width, int height)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= width || y >= height) {
return;
}
float3 color = ptr[y * width + x];
dst[y * width + x] = color.x;
dst[y * width + x + width * height] = color.y;
dst[y * width + x + width * height * 2] = color.z;
} |
21,707 | #include "includes.h"
__global__ void rectified_linear_kernel( float4 * __restrict output, const float4 * __restrict input, float negative_slope, int elem_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = input[elem_id];
if (val.x < 0.0F)
val.x *= negative_slope;
if (val.y < 0.0F)
val.y *= negative_slope;
if (val.z < 0.0F)
val.z *= negative_slope;
if (val.w < 0.0F)
val.w *= negative_slope;
output[elem_id] = val;
}
} |
21,708 | #include "includes.h"
__global__ void modify_i_j( int width, int height, int pitch, float *d_array, int i, int j, float change_to ){
//we want to change the [i,j]-th of the 2-dim array
int idx = blockIdx.x; //row
int idy = threadIdx.x; //column
//we can do index by pointer:
//if ((idx == i) && (idy == j)){
//float* row = (float *)((char*)d_array + idx*pitch);
// row[idy] = change_to;
//}
//or, a more convenient way is to do index just use idx and idy
if ((idx == i) && (idy == j))
{
d_array[idx*(pitch / sizeof(float)) + idy] = change_to;
}
} |
21,709 | #include <cstdio>
#define getPos(a,k) (((a)>>(k-1))&1)
extern "C" {
__global__ void prefixSum(int * input_T, int * prefix_T, int * prefix_helper_T, int n, int k, int blockPower) {
__shared__ int tmp_T[1024];
for(int i = 0; i<blockPower; i++) {
if(threadIdx.x + 1024*blockIdx.x + i*1024*gridDim.x >= n) return;
tmp_T[threadIdx.x] = input_T[threadIdx.x + 1024*blockIdx.x + i*1024*gridDim.x];
tmp_T[threadIdx.x] = getPos(tmp_T[threadIdx.x],k);
int val,kk = 1;
while(kk <= 512) {
__syncthreads();
if(kk <= threadIdx.x) val = tmp_T[threadIdx.x - kk];
__syncthreads();
if(kk <= threadIdx.x) tmp_T[threadIdx.x] += val;
kk *= 2;
}
__syncthreads();
prefix_T[threadIdx.x + 1024*blockIdx.x + i*1024*gridDim.x] = tmp_T[threadIdx.x];
if(threadIdx.x == 1023 || threadIdx.x + 1024*blockIdx.x + i*1024*gridDim.x == n-1) prefix_helper_T[i*gridDim.x + blockIdx.x + 1] = tmp_T[threadIdx.x];
}
}
__global__ void replace(int * input_T, int * output_T, int * prefix_T, int * prefix_helper_T, int n, int k, int blockPower) {
for(int i = 0; i<blockPower; i++) {
int oldpos = threadIdx.x + 1024*blockIdx.x + i*1024*gridDim.x;
if(oldpos >= n) return ;
int newpos = prefix_T[oldpos] + prefix_helper_T[blockIdx.x + i*gridDim.x];
if(getPos(input_T[oldpos],k) == 0) {
newpos = oldpos - newpos;
} else {
newpos = prefix_helper_T[(n+1023)/1024] + newpos - 1;
}
output_T[newpos] = input_T[oldpos];
}
}
}
|
21,710 | #include "includes.h"
__global__ void IncrementConnectionAgeKernel( int cell, int *connection, int *age, int maxCells )
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < maxCells)
{
if(connection[cell * maxCells + threadId] == 1)
{
age[cell * maxCells + threadId] += 1;
age[threadId * maxCells + cell] += 1;
}
}
} |
21,711 | #ifndef _UTIL_CU
#define _UTIL_CU
template<int N> __device__ void zero(unsigned int *x)
{
#pragma unroll
for(int i = 0; i < N; i++) {
x[i] = 0;
}
}
template<int N> __device__ void copy(const unsigned int *a, unsigned int *b)
{
#pragma unroll
for(int i = 0; i < N; i++) {
b[i] = a[i];
}
}
#endif |
21,712 | /* CSCI 563 Programming Assignment 2 Part 2
Clayton Kramp
*/
#include <iostream>
#include <fstream>
using namespace std;
// Device function to transpose matrix
__global__ void transpose(int* A, int* B, int row, int col) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= row || j >= col) return;
B[j*row + i] = A[i*col + j];
}
int main(int argc, char* argv[]) {
if (argc != 2) {
cerr << "Arguments error" << endl;
return -1;
}
ifstream file(argv[1]);
if (!file.good()) {
cerr << "Bad input" << endl;
return -1;
}
int row, col;
file >> col >> row;
int** A = new int*[row];
A[0] = new int[row*col];
for (int i = 1; i < row; i++) A[i] = A[i-1] + col;
// Fill in matrix A in host
for (int i = 0; i < row; i++) {
for (int j = 0; j < col; j++) {
int element;
file >> element;
A[i][j] = element;
}
}
file.close();
int* count = new int;
*count = 0;
// Copy matrix to device memory
int* deviceA;
int bytes = row * col * sizeof(int);
cudaMalloc(&deviceA, bytes);
cudaMemcpy(deviceA, A[0], bytes, cudaMemcpyHostToDevice);
// Create the fill in matrix
int** B = new int*[col];
B[0] = new int[row*col];
for (int i = 1; i < col; i++) B[i] = B[i-1] + row;
int* deviceB;
cudaMalloc(&deviceB, bytes);
dim3 threadsPerBlock(8, 8, 1);
dim3 numBlocks((col + threadsPerBlock.x-1) / threadsPerBlock.x,
(row + threadsPerBlock.y-1) / threadsPerBlock.y, 1);
// Call the actual function
transpose<<<numBlocks, threadsPerBlock>>>(deviceA, deviceB, row, col);
//cudaDeviceSynchronize();
// Copy back the memory
cudaMemcpy(B[0], deviceB, bytes, cudaMemcpyDeviceToHost);
// Print out the info to console
cout << row << " " << col << endl;
for (int i = 0; i < col; i++) {
for (int j = 0; j < row; j++) {
cout << B[i][j] << " ";
}
cout << endl;
}
delete A[0];
delete A;
delete B[0];
delete B;
cudaFree(deviceA);
cudaFree(deviceB);
return 0;
}
|
21,713 | /*
* Matrix multiplication based on modified NVIDIA samples code
* Copyright (C) 2014 René Oertel (rene.oertel@cs.tu-chemnitz.de)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* TODO: Add commandline arguments for matrix sizes
*
* Last modifications: oere, 2014-05-21, 11:05
*/
// System includes
#include <stdio.h>
#include <assert.h>
#include <getopt.h>
typedef struct config config_t;
struct config {
int device;
int wA;
int hA;
int wB;
int hB;
};
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void
matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
// FIXME
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < wA; ++e) {
Cvalue += A[row * wA + e] * B[e * wB + col];
}
C[row * wB + col] = Cvalue;
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
printf("=====================================\n");
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
cudaError_t error;
error = cudaMalloc((void **) &d_A, mem_size_A);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_B, mem_size_B);
if (error != cudaSuccess)
{
printf("cudaMalloc d_B returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_C, mem_size_C);
if (error != cudaSuccess)
{
printf("cudaMalloc d_C returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16)
{
matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
cudaDeviceSynchronize();
// Create and start timer
printf("Computing result using CUDA Kernel ... ");
fflush(NULL);
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++)
{
if (block_size == 16)
{
matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
}
// Record the stop event
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
printf("done\n");
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
double gF = gigaFlops * msecPerMatrixMul * 1000;
printf("> overall GFlops = %.2f\n", gF);
printf(
"Performance = %.2f GFlops/sec\nTime = %.3f msec\nSize = %.0f Ops\nWorkgroupSize = %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6 ; // machine zero
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "PASS" : "FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaDeviceReset();
printf("=====================================\n");
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
/* Print usage information */
void usage(int argc, char *argv[])
{
printf("\nUsage: %s [OPTION]...\n"
"Available arguments:\n\n"
"\t-d/--device=<ID> Choose device (Default: 0)\n", argv[0]);
exit(0);
}
/* Parse commandline arguments */
void parse_opts(int argc, char *argv[], config_t *config)
{
int option = 0;
int option_index = 0;
const char *short_options = "d:h";
const struct option long_options[] = {
{"device", 1, NULL, 'd'},
{"help", 0, NULL, 'h'},
{0, 0, 0, 0}
};
do {
option = getopt_long(argc, argv, short_options,
long_options, &option_index);
switch (option) {
case 'd':
config->device = atoi(optarg);
printf("Device %d selected\n", config->device);
break;
case 'h': /* -h or --help */
usage(argc, argv);
break;
default:
break;
}
}
while (-1 != option);
}
int main(int argc, char **argv)
{
cudaError_t error;
cudaDeviceProp deviceProp;
dim3 dimsA(0, 0, 1);
dim3 dimsB(0, 0, 1);
// Set default configuration, i.e. device id and matrix sizes
config_t config = { .device = 0, .wA = 1024, .hA = 1024, .wB = 1024, .hB = 1024 };
// Parse commandline arguments and override default configuration
parse_opts(argc, argv, &config);
// Set device to use
error = cudaSetDevice(config.device);
if (error != cudaSuccess)
{
printf("Error: cudaSetDevice(%d) returned error code %d, line(%d): %s\n", config.device, error, __LINE__, cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Check if device is usable
error = cudaGetDeviceProperties(&deviceProp, config.device);
if (deviceProp.computeMode == cudaComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use cudaSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != cudaSuccess)
{
printf("Error: cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n", config.device, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// Use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
// Set matrix sizes
dimsA.x = config.wA;
dimsA.y = config.hA;
dimsB.x = config.wB;
dimsB.y = config.hB;
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d x %d), MatrixB(%d x %d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
|
21,714 | /*
**********************************************
* CS314 Principles of Programming Languages *
* Spring 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void collateSegments_gpu(int * src, int * scanResult, int * output, int numEdges) {
/*YOUR CODE HERE*/
int allThreads = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for(int i = tid; i < numEdges; i += allThreads){
if (src[i] != src[i+1]){
output[src[i]] = scanResult[i];
}
}
if(tid >= numEdges && allThreads !=0){
return;
}
}
|
21,715 | #include <stdio.h>
#include <vector>
#include <string>
#include <fstream>
#include <cmath>
#include <ctime>
#include <stack>
#include <sstream>
#include <cstdlib>
#include <iostream>
#include <limits>
#include <algorithm>
#include <climits>
#include <bitset>
#include <set>
#include <sys/time.h>
#include <bits/stdc++.h>
using namespace std;
#define print_to_err(x) do { std::cerr << #x << ": " << x << std::endl; } while (0)
#define bitcount(x) __builtin_popcount(x)
string int2str(int num)
{
ostringstream ss;
ss << num;
return(ss.str());
}
int str2int(string str)
{
int value;
istringstream (str)>>value;
return value;
}
string input_file="";
string output_file="";
vector<long long int> matrix_A_row;
vector<long long int> matrix_A_col;
vector<long long int> matrix_A_data;
vector<long long int> matrix_B;
vector<long long int> matrix_C;
vector<long long int> ptr_vec;
long long int dimension;
void readfile()
{
ifstream infile;
char input_file_array[input_file.length()+1];
for(int i=0;i<input_file.length();i++)
{
input_file_array[i]=input_file.at(i);
}
input_file_array[input_file.length()]='\0';
infile.open(input_file_array);
//Name Line
string name_line;
getline(infile,name_line);
//Dimension Line
string dimension_line;
getline(infile,dimension_line);
// print_to_err(dimension_line);
int dimension_line_length=int(dimension_line.length());
int start_dimension_line;
for(int i=10;i<dimension_line_length;i++)
{
if(dimension_line.at(i)!=' ')
{
start_dimension_line=i;
break;
}
}
string dimension_string=dimension_line.substr(start_dimension_line,(dimension_line_length- start_dimension_line));
// print_to_err(dimension_string);
istringstream(dimension_string)>>dimension;
ptr_vec.resize(dimension+1);
//A
string garbage_line;
getline(infile,garbage_line);
long long int temp_inp_a;
string b_detect;
long long int curr_row=(-1);
long long int count_ptr=0;
while(true)
{
infile>>b_detect;
if(b_detect=="B")
{
for(long long int j=(curr_row+1);j<=dimension;j++)
{
//check this
ptr_vec[j]=count_ptr;
}
break;
}
istringstream (b_detect)>>temp_inp_a;
matrix_A_row.push_back(temp_inp_a);
if(curr_row<temp_inp_a)
{
for(long long int j=(curr_row+1);j<=temp_inp_a;j++)
{
ptr_vec[j]=count_ptr;
}
curr_row=temp_inp_a;
}
infile>>temp_inp_a;
matrix_A_col.push_back(temp_inp_a);
infile>>temp_inp_a;
matrix_A_data.push_back(temp_inp_a);
count_ptr+=1;
}
matrix_B.resize(dimension);
for(int i=0;i<dimension;i++)
{
infile>>matrix_B[i];
}
matrix_C.clear();
matrix_C.resize(dimension,0);
infile.close();
}
void outfile()
{
ofstream outfile;
char output_file_array[output_file.length()+1];
for(int i=0;i<output_file.length();i++)
{
output_file_array[i]=output_file.at(i);
}
output_file_array[output_file.length()]='\0';
outfile.open (output_file_array);
// else
// {
// outfile.open(output_file_array, std::ios_base::app);
// }
// outfile<<"my rank:"<<my_rank<<" I will print "<<int(my_numbers.size())<<" numbers"<<endl;
// for(int i=0;i<matrix_A_data.size();i++)
// {
// outfile<<matrix_A_row[i]<<" "<<matrix_A_col[i]<<" "<<matrix_A_data[i]<<endl;
// }
for (int i = 0; i < dimension; i++)
{
outfile<<matrix_C[i]<<endl;
}
// outfile<<endl;
outfile.close();
}
__global__ void spmv_csr_vector_kernel ( long long int num_rows ,long long int * ptr ,long long int * indices ,long long int * data ,long long int * x ,long long int * y)
{
__shared__ long long int vals [32];
long long int thread_id = blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
long long int warp_id = thread_id / 32; // global warp index
int lane = thread_id & (32 - 1); // thread index within the warp
// one warp per row
long long int row = warp_id ;
//int num_rows = 5;
if ( row < num_rows ){
long long int row_start = ptr [ row ];
long long int row_end = ptr [ row +1];
// compute running sum per thread
vals [ threadIdx.x ] = 0;
for ( long long int jj = row_start + lane ; jj < row_end ; jj += 32)
vals [ threadIdx.x ] += data [ jj ] * x [ indices [ jj ]];
// parallel reduction in shared memory
if ( lane < 16) vals [ threadIdx.x ] += vals [ threadIdx.x + 16];
if ( lane < 8) vals [ threadIdx.x ] += vals [ threadIdx.x + 8];
if ( lane < 4) vals [ threadIdx.x ] += vals [ threadIdx.x + 4];
if ( lane < 2) vals [ threadIdx.x ] += vals [ threadIdx.x + 2];
if ( lane < 1) vals [ threadIdx.x ] += vals [ threadIdx.x + 1];
// first thread writes the result
if ( lane == 0)
y[ row ] += vals [ threadIdx.x ];
}
// if(thread_id==0)
// {
// cout<<"GPU PRINT"<<endl;
// for(int i=0; i<11;i++)
// {
// cout<<indices[i]<<" "<<data[i]<<endl;
// }
// cout<<endl;
// cout<<"num_rows:"<<num_rows<<endl;
// cout<<"B is "<<endl;
// for(int i=0; i<5;i++)
// {
// cout<<x[i]<<endl;
// }
// cout<<"ptr_vec is "<<endl;
// for(int i=0; i<6;i++)
// {
// cout<<ptr[i]<<endl;
// }
// }
}
int main(int argc, char *argv[])
{
string temp_inpfile(argv[1]);
input_file=temp_inpfile;
string temp_outfile(argv[2]);
output_file=temp_outfile;
readfile();
long long int matrix_A_nonzero_elem=(long long int)(matrix_A_data.size());
// for(int i=0; i<matrix_A_nonzero_elem;i++)
// {
// cout<<matrix_A_row[i]<<" "<<matrix_A_col[i]<<" "<<matrix_A_data[i]<<endl;
// }
// cout<<endl;
// cout<<"dimension:"<<dimension<<endl;
// cout<<"B is "<<endl;
// for(int i=0; i<dimension;i++)
// {
// cout<<matrix_B[i]<<endl;
// }
// cout<<"ptr_vec is "<<endl;
// for(int i=0; i<int(ptr_vec.size());i++)
// {
// cout<<ptr_vec[i]<<endl;
// }
long long int *d_ptr_vector,*d_matrixA_col,*d_matrixA_data,*d_matrixB,*d_multiply_answer;
long long int nonzero_size=matrix_A_nonzero_elem*sizeof(long long int);
long long int dimension_size=(dimension*sizeof(long long int));
cudaMalloc((void **)&d_ptr_vector,((dimension+1)*sizeof(long long int)));
cudaMalloc((void **)&d_matrixA_col,nonzero_size);
cudaMalloc((void **)&d_matrixA_data,nonzero_size);
cudaMalloc((void **)&d_matrixB,dimension_size);
cudaMalloc((void **)&d_multiply_answer,dimension_size);
cudaMemcpy(d_ptr_vector,(&ptr_vec[0]),((dimension+1)*sizeof(long long int)),cudaMemcpyHostToDevice);
cudaMemcpy(d_matrixA_col,(&matrix_A_col[0]),nonzero_size,cudaMemcpyHostToDevice);
cudaMemcpy(d_matrixA_data,(&matrix_A_data[0]),nonzero_size,cudaMemcpyHostToDevice);
cudaMemcpy(d_matrixB,(&matrix_B[0]),dimension_size,cudaMemcpyHostToDevice);
cudaMemcpy(d_multiply_answer,(&matrix_C[0]),dimension_size,cudaMemcpyHostToDevice);
// long long int num_blocks;
// num_blocks=(dimension/(long long int)(32));
// if(num_blocks%32!=0)
// {
// num_blocks+=1;
// }
spmv_csr_vector_kernel<<<dimension,32>>>(dimension,d_ptr_vector,d_matrixA_col,d_matrixA_data,d_matrixB,d_multiply_answer);
cudaMemcpy((&matrix_C[0]),d_multiply_answer,dimension_size,cudaMemcpyDeviceToHost);
outfile();
return 0;
}
|
21,716 | #include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <iostream>
typedef unsigned char CELL_DT;
#define CUDA_BLOCK_SIZE 32
using namespace std;
#define cudaErrchk(ans) cudaAssert((ans), __FILE__, __LINE__)
inline void cudaAssert(cudaError_t code, string file, int line){
if (code != cudaSuccess){
cerr << "CUDA Error: " << cudaGetErrorString(code) << "; file: " << file << ", line:" << line << endl;
exit(-1);
}
}
/*
*********************************************************************
* func name: gol_live_neighbor_cnt
* description: Add up all live neighbors and return the number of
* live neighbors
* parameters :
* none
* return: none
*********************************************************************
*/
__global__ void gol_3d_kernel(CELL_DT* gol_grid_in, CELL_DT* gol_grid_out, int Ngx, int Ngy, int Ngz, int b2r_i)
{
__shared__ CELL_DT behind[CUDA_BLOCK_SIZE+2][CUDA_BLOCK_SIZE+2];
__shared__ CELL_DT current[CUDA_BLOCK_SIZE+2][CUDA_BLOCK_SIZE+2];
__shared__ CELL_DT infront[CUDA_BLOCK_SIZE+2][CUDA_BLOCK_SIZE+2];
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iy = blockIdx.y*blockDim.y + threadIdx.y;
if(ix >= Ngx-b2r_i || iy >= Ngy-b2r_i || ix < b2r_i || iy < b2r_i)
{
return;
}
int tx = threadIdx.x + 1; // physical id_x (due to halo storage)
int ty = threadIdx.y + 1; // physical id_y (due to halo storage)
int stride = Ngx*Ngy;
int in_2d = stride*b2r_i + iy*Ngx + ix;
int out_2d;
int live_cnt;
bool update_flag = ix >= b2r_i+1 && ix <= (Ngx-2-b2r_i) && iy >= 1+b2r_i && iy <= (Ngy-2-b2r_i);
current[ty][tx] = gol_grid_in[in_2d];
out_2d = in_2d; // current
in_2d += stride; // one layer ahead
infront[ty][tx] = gol_grid_in[in_2d];
in_2d += stride; // two layers ahead
for(int i=b2r_i+1; i<=Ngz-2-b2r_i; i++)
{
behind[ty][tx] = current[ty][tx];
current[ty][tx]= infront[ty][tx];
infront[ty][tx]= gol_grid_in[in_2d];
in_2d += stride;
out_2d += stride;
__syncthreads();
if (update_flag)
{
if(threadIdx.x == 0){ // Halo left
current[ty][tx-1] = gol_grid_in[out_2d - 1];
}
if(threadIdx.x == CUDA_BLOCK_SIZE-1){ // Halo right
current[ty][tx+1] = gol_grid_in[out_2d + 1];
}
if(threadIdx.y == 0){ // Halo bottom
current[ty-1][tx] = gol_grid_in[out_2d - Ngx];
}
if(threadIdx.y == CUDA_BLOCK_SIZE-1){ // Halo top
current[ty+1][tx] = gol_grid_in[out_2d + Ngx];
}
}
__syncthreads();
if (update_flag){ // the update_flag limitted edge, ±1 will not exceed border
live_cnt = infront[ty-1][tx-1] + infront[ty-1][tx] + infront[ty-1][tx+1]
+ infront[ty][tx-1] + infront[ty][tx] + infront[ty][tx+1]
+ infront[ty+1][tx-1] + infront[ty+1][tx] + infront[ty+1][tx+1]
+ current[ty-1][tx-1] + current[ty-1][tx] + current[ty-1][tx+1]
+ current[ty][tx-1] + current[ty][tx+1]
+ current[ty+1][tx-1] + current[ty+1][tx] + current[ty+1][tx+1]
+ behind[ty-1][tx-1] + behind[ty-1][tx] + behind[ty-1][tx+1]
+ behind[ty][tx-1] + behind[ty][tx] + behind[ty][tx+1]
+ behind[ty+1][tx-1] + behind[ty+1][tx] + behind[ty+1][tx+1];
}
if(current[ty][tx] && live_cnt <= 1){ // with only 1 or less neighbours die, as if by lonliness.
gol_grid_out[out_2d] = 0;
}else if(0 == current[ty][tx] && live_cnt == 5){ // If 5 cells surround an empty cell, they breed
gol_grid_out[out_2d] = 1;
}else if(current[ty][tx] && live_cnt >= 8){ // If a cell has 8 or more neighbours, it dies from overcrowding.
gol_grid_out[out_2d] = 0;
}
__syncthreads();
}
}
/*
*********************************************************************
* func name: gol_main
* description: update cells, rules include:
(1) Cells (in this case, cubes) with only 1
or less neighbours die, as if by lonliness.
(2) If 5 cells surround an empty cell, they breed
and fill it.
(3) If a cell has 8 or more neighbours,
it dies from overcrowding.
* parameters :
* none
* return: none
*********************************************************************
*/
double gol_3D_gpu_main(CELL_DT * h_in, int b2r_R, char *ef, int Ngx, int Ngy, int Ngz)
{
int in_grid_size = sizeof(CELL_DT)*Ngx*Ngy*Ngz;
CELL_DT *d_temp1, *d_temp2;
CELL_DT *pd_temp;
cudaErrchk( cudaMalloc((void**)&d_temp1, in_grid_size) );
cudaErrchk( cudaMalloc((void**)&d_temp2, in_grid_size) );
cudaErrchk( cudaMemcpy((void *)d_temp1, (void *)h_in, in_grid_size, cudaMemcpyHostToDevice) );
// copy to d_temp2 is also needed because the hola (our surface will not be updated, while d_temp2 will be the input for the next step)
cudaErrchk( cudaMemcpy((void *)d_temp2, (void *)d_temp1, in_grid_size, cudaMemcpyDeviceToDevice) );
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0); // set as the start of computation
// Launch configuration:
dim3 dimBlock(CUDA_BLOCK_SIZE, CUDA_BLOCK_SIZE, 1);
dim3 dimGrid(ceil((float)Ngx/CUDA_BLOCK_SIZE), ceil((float)Ngy/CUDA_BLOCK_SIZE), 1);
for (int i = 0; i < b2r_R; i++)
{
gol_3d_kernel<<<dimGrid, dimBlock>>>(d_temp1, d_temp2, Ngx, Ngy, Ngz, i);
cudaErrchk( cudaThreadSynchronize() );
pd_temp = d_temp1;
d_temp1 = d_temp2;
d_temp2 = pd_temp;
}
cudaEventRecord(stop, 0); // computation finished
cudaEventSynchronize(stop);
// Copy from device to host
cudaErrchk( cudaMemcpy((void*) h_in, (void*) d_temp1, in_grid_size, cudaMemcpyDeviceToHost) );
float gpu_compu_elapsed_time_ms;
cudaEventElapsedTime(&gpu_compu_elapsed_time_ms, start, stop);
cudaFree(d_temp1);
cudaFree(d_temp2);
return (double)gpu_compu_elapsed_time_ms / 1000.0;
} |
21,717 |
// Babak Poursartip
// 01/28/2021
// CUDA
//topic:
#include "cstdio"
#include <algorithm>
#include <functional>
// =================================
__global__ void addCuda( int *a, int *b, int *c)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
// =================================
void addIndex( int *a, int *b, int *c, int i)
{
c[i] = a[i] + b[i];
}
// =================================
void add( int *a, int *b, int *c, const int &count)
{
std::transform(a,a+count, b, c , std::plus<>{});
}
// =================================
int main(){
printf("\n starts ... \n");
const int count = 6;
int ha[]={5,2,5,8,7,6};
int hb[]={15,12,15,18,17,16};
// serial adding
int hc[count];
add(ha, hb, hc, count);
printf(" 1 ================\n");
std::for_each(hc,hc+count, [](int&c){printf("%d, ",c);});
printf("\n");
// add index
for (int i = 0; i < count; ++i)
addIndex(ha, hb, hc, i);
printf(" 2 ================\n");
std::for_each(hc,hc+count, [](int&c){printf("%d, ",c);});
printf("\n");
// cuda adding
int size = count * sizeof(int);
int * da, *db, * dc;
cudaMalloc(&da, size);
cudaMalloc(&db, size);
cudaMalloc(&dc, size);
cudaMemcpy(da, ha, size, cudaMemcpyHostToDevice);
cudaMemcpy(db, hb, size, cudaMemcpyHostToDevice);
addCuda<<<1,count>>>(da, db, dc);
cudaMemcpy(hc, db, size, cudaMemcpyDeviceToHost);
printf(" 3 ================\n");
std::for_each(hc,hc+count, [](int&c){printf("%d, ",c);});
printf("\n");
cudaDeviceReset();
printf("\n finished. \n");
return 0;
}
|
21,718 | #include "includes.h"
__global__ void solve(float* mat, float* b, float* x, int rows, int cols)
{
int n = blockIdx.x*threads1D + threadIdx.x;
if (n < rows) //Ensure bounds
x[n] = b[n] / mat[n * cols + n];
} |
21,719 | #include "matrix.cuh"
#include <cstring> // memset
#define BLOCK_SIZE 1024
#define DIVIDE(A,B) (((A)+(B)-1)/(B))
#define BLOCKS(N) DIVIDE(N,BLOCK_SIZE)
#define TILE_DIM 32 // total shared memory usage 32*32 * 2(matrices) * 4(sizeof(float)) = 8kB
///////////////////////////////////////////////////////////////////////////
// Comparison
/* PARELLEL FUNCTION
* Objective: Compare two matrices and store the answer in bool answer
* Requirements:
* 1) The matrices must be the same size.
* 2) The answer must be a pointer in DEVICE memory
*
* Shared Memory Requirements: NONE
* Runtime: Linear Parellel
*/
__global__ void compare(float* a, float* b, const int size, bool* answer)
{
if (answer)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size && a[i] != b[i])
*answer = false;
}
}
/* Return true if the matrices are identical to each other and false otherwise */
bool Matrix::operator== (const Matrix& other) const
{
if (gpu_enabled != other.is_gpu())
yeet "cannot compare gpu matrix with cpu matrix\n";
if (dim1 != other.get_dim1() || dim2 != other.get_dim2())
return false;
float* other_matrix = other.get_matrix();
if (gpu_enabled)
{
bool host_ans;
cudaMemset(dummy, 1, sizeof(bool)); // set the value of the pointer in device memory to false
compare <<<BLOCKS(dim1*dim2),BLOCK_SIZE>>> (matrix, other_matrix, dim1*dim2, (bool*)dummy);
cudaMemcpy(&host_ans, dummy, sizeof(bool), cudaMemcpyDeviceToHost);
return host_ans;
}
else
{
for (int i = 0; i < dim1*dim2; ++i)
if (matrix[i]!=other_matrix[i])
return false;
return true;
}
}
/* Return false if the matrices are identical to each other and true otherwise */
bool Matrix::operator!= (const Matrix& other) const
{
return !(*this == other);
}
///////////////////////////////////////////////////////////////////////////
// Addition, subtraction, scalar multiplication
/* PARELLEL FUNCTION
* Objective: Add two matrices and stores it in a third matrix ans.
* Requirements:
* 1) The matrices must be the same size.
* 2) The answer must be a pointer in DEVICE memory and a block sizeof(float)*size must be allocated
*
* Shared Memory Requirements: NONE
* Runtime: Linear Parellel
*/
__global__ void add(float* a, float* b, const int size, float* ans)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size)
ans[i] = a[i] + b[i];
}
/* PARELLEL FUNCTION
* Objective: Subtract two matrices and stores it in a third matrix ans.
* Requirements:
* 1) The matrices must be the same size.
*
* Shared Memory Requirements: NONE
* Runtime: Linear Parellel
*/
__global__ void subtract(float* a, float* b, const int size, float* ans)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size)
ans[i] = a[i] - b[i];
}
/* PARELLEL FUNCTION
* Objective: Multiplies a matrix by a scalar and stores it in a third matrix.
*
* Shared Memory Requirements: NONE
* Runtime: Linear Parellel
*/
__global__ void scale(float* a, const float scalar, const int size, float* ans)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size)
ans[i] = a[i]*scalar;
}
Matrix Matrix::operator+ (const Matrix& other) const
{
if (!(gpu_enabled && other.is_gpu()))
yeet "cannot add because one or more of the matrices are not gpu\n";
if (dim1 != other.get_dim1() || dim2 != other.get_dim2())
yeet "cannot add two matrices of different dimensions\n";
Matrix ans (dim1, dim2, 0.0f);
add <<<BLOCKS(dim1*dim2), BLOCK_SIZE>>> (matrix, other.get_matrix(), dim1*dim2, ans.get_matrix());
return ans;
}
void Matrix::operator+= (const Matrix& other)
{
if (!(gpu_enabled && other.is_gpu()))
yeet "cannot add because one or more of the matrices are not gpu\n";
if (dim1 != other.get_dim1() || dim2 != other.get_dim2())
yeet "cannot add two matrices of different dimensions\n";
add <<<BLOCKS(dim1*dim2), BLOCK_SIZE>>> (matrix, other.get_matrix(), dim1*dim2, matrix);
}
Matrix Matrix::operator- (const Matrix& other) const
{
if (!(gpu_enabled && other.is_gpu()))
yeet "cannot add because one or more of the matrices are not gpu\n";
if (dim1 != other.get_dim1() || dim2 != other.get_dim2())
yeet "cannot add two matrices of different dimensions\n";
Matrix ans (dim1, dim2, 0.0f);
subtract <<<BLOCKS(dim1*dim2), BLOCK_SIZE>>> (matrix, other.get_matrix(), dim1*dim2, ans.get_matrix());
return ans;
}
void Matrix::operator-= (const Matrix& other)
{
if (!(gpu_enabled && other.is_gpu()))
yeet "cannot add because one or more of the matrices are not gpu\n";
if (dim1 != other.get_dim1() || dim2 != other.get_dim2())
yeet "cannot add two matrices of different dimensions\n";
subtract <<<BLOCKS(dim1*dim2), BLOCK_SIZE>>> (matrix, other.get_matrix(), dim1*dim2, matrix);
}
Matrix Matrix::operator* (const float scalar) const
{
if (!gpu_enabled)
yeet "cannot multiply because the matrices is not gpu enabled\n";
Matrix ans (dim1, dim2, 0.0f);
scale <<<BLOCKS(dim1*dim2), BLOCK_SIZE>>> (matrix, scalar, dim1*dim2, ans.get_matrix());
return ans;
}
void Matrix::operator*= (const float scalar)
{
if (!gpu_enabled)
yeet "cannot multiply because the matrices is not gpu enabled\n";
scale <<<BLOCKS(dim1*dim2), BLOCK_SIZE>>> (matrix, scalar, dim1*dim2, matrix);
}
Matrix Matrix::operator/ (const float scalar) const
{
if (scalar)
return *this * (1.0f / scalar);
else
yeet "You're tryin to divide by zero, idiot!\n";
}
void Matrix::operator/= (const float scalar)
{
if (scalar)
*this *= (1.0f / scalar);
else
yeet "You're tryin to divide by zero, idiot!\n";
}
//////////////////////////////////////////////////////////////////////////
// Matrix multiplication
/* PARELLEL FUNCTION
* Objective: Does matrix multiplication between two matrices and stores it in a third. ans = ab.
* Requirements:
* 1) The matrices dimensions must match, including the ans matrix.
*
* Shared Memory Requirements: 8kb
* Runtime: O(a_r * a_c__b_r * b_c / THREADS)
*/
__global__ void matmul(float* A, float* B, float* C, int ARows, int ACols, int BRows,
int BCols, int CRows, int CCols)
{
float CValue = 0.0f;
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ float As[TILE_DIM][TILE_DIM];
__shared__ float Bs[TILE_DIM][TILE_DIM];
#pragma unroll
for (int k = 0; k < (TILE_DIM + ACols - 1)/TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < ACols && Row < ARows)
As[threadIdx.y][threadIdx.x] = A[INDEX(Row,k*TILE_DIM+threadIdx.x,ARows,ACols)];
else
As[threadIdx.y][threadIdx.x] = 0.0;
if (k*TILE_DIM + threadIdx.y < BRows && Col < BCols)
Bs[threadIdx.y][threadIdx.x] = B[INDEX(k*TILE_DIM+threadIdx.y,Col,BRows,BCols)];
else
Bs[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n)
CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x];
__syncthreads();
}
if (Row < CRows && Col < CCols)
C[INDEX(blockIdx.y * blockDim.y + threadIdx.y, (blockIdx.x * blockDim.x)+ threadIdx.x, CRows,CCols)] = CValue;
}
Matrix Matrix::operator* (const Matrix& other) const
{
if (!(gpu_enabled && other.is_gpu()))
yeet "cannot multiply because one or more of the matrices are not gpu\n";
if (dim2 != other.get_dim1())
yeet "matrix dimensions are not compatiable.";
Matrix ans(dim1, other.get_dim2(), 0.0f);
dim3 grid (DIVIDE(dim1,TILE_DIM), DIVIDE(dim2,TILE_DIM), 1);
dim3 block (TILE_DIM,TILE_DIM,1);
matmul<<<grid, block, sizeof(float)*TILE_DIM*TILE_DIM>>> (matrix, other.get_matrix(), ans.get_matrix(), dim1, dim2, other.get_dim1(), other.get_dim2(), ans.get_dim1(), ans.get_dim2());
return ans;
}
void Matrix::rezero()
{
if (gpu_enabled)
cudaMemset(matrix, 0, dim1*dim2*sizeof(float));
else
std::memset(matrix, 0, dim1*dim2*sizeof(float));
}
__global__ void had(float* a, float* b, const int size, float* ans)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size)
ans[i] = a[i] * b[i];
}
Matrix Matrix::o(const Matrix& other) const
{
if (!(gpu_enabled && other.is_gpu()))
yeet "cannot hadamard because one or more of the matrices are not gpu\n";
if (dim1 != other.get_dim1() || dim2 != other.get_dim2())
yeet "cannot perform hadamard product on matrices of different dimensions";
Matrix ans(dim1, dim2, 0.0f);
had <<<BLOCKS(dim1*dim2), BLOCK_SIZE>>> (matrix, other.get_matrix(), dim1*dim2, ans.get_matrix());
return ans;
}
__global__ void transpose (float* a, float* ans, const int dim1, const int dim2)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if (x < dim1 && y < dim2)
ans[INDEX(y,x,dim2,dim1)] = a[INDEX(x,y,dim1,dim2)];
}
__global__ void transpose (float* a, const int dim)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
float tmp;
if (x < dim && y < dim && x < y)
{
tmp = a[INDEX(x,y,dim,dim)];
a[INDEX(x,y,dim,dim)] = a[INDEX(y,x,dim,dim)];
a[INDEX(y,x,dim,dim)] = tmp;
}
}
Matrix Matrix::T(void) const
{
if (!gpu_enabled)
yeet "cannot transpose a non-gpu matrix";
Matrix ans(dim2, dim1, 0.0f);
transpose<<<BLOCKS(dim1*dim2),BLOCK_SIZE>>>(matrix, ans.get_matrix(), dim1, dim2);
return ans;
}
void Matrix::T_inplace(void)
{
if (dim1 != dim2)
*this = this->T();
else
transpose <<<BLOCKS(dim1*dim2),BLOCK_SIZE>>> (matrix, dim1);
}
float Matrix::dot(const Matrix& other) const
{
if ((dim1 > 1 && dim2 > 1) || (other.get_dim1() > 1) && (other.get_dim2() > 1))
yeet "one cannot take the dot product of two matrices\n";
if (dim1 + dim2 != other.get_dim2() + other.get_dim1())
yeet "the dimensions don't match.\n";
float* partial_ans;
cudaMalloc((void**)&partial_ans, sizeof(float)*(dim1+dim2-2));
int reqs = dim1 + dim2 - 1;
yeet "the dimensions don't match.\n";
}
|
21,720 | #include <cstdlib>
#include <iostream>
#include <stdlib.h>
#include <fstream>
#define BIN_COUNT 10
#define N_THREADS 512
#define N_TOTAL 1024
#define RANGE 100
using namespace std;
// GPU kernel for computing a histogram
__global__ void kernel(int *input, int *bins, int N, int N_bins, int DIV){
// Calculate global thread ID
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N){
int bin = input[tid] / DIV;
// bins[bin] += 1;
atomicAdd(&bins[bin], 1);
}
}
// Initializes our input array
// Takes:
// a: array of integers
// N: Length of the array
// takes random number from 0-99
void init_array(int *a, int N){
for(int i = 0; i < N; i++){
a[i] = rand() % RANGE;
}
}
int main(){
// Declare our problem size
int N = N_TOTAL;
size_t bytes = N * sizeof(int);
int N_bins = BIN_COUNT;
size_t bytes_bins = N_bins * sizeof(int);
// Allocate unified memory
int *input = new int[N];
int *bins = new int[N_bins];
cudaMallocManaged(&input, bytes);
cudaMallocManaged(&bins, bytes_bins);
// Initialize the array
init_array(input, N);
// divisor for finding correct bin
int DIV = (RANGE + N_bins - 1) / N_bins;
// initialize bin to 0
for(int i=0; i < N_bins; i++){
bins[i] = 0;
}
// threads and blocks
int THREADS = N_THREADS;
int BLOCKS = (N + THREADS - 1) / THREADS;
// Launch the kernel
kernel<<<BLOCKS, THREADS>>>(input, bins, N, N_bins, DIV);
cudaDeviceSynchronize();
// int tmp = 0;
// for (int i = 0; i < N_bins; i++){
// tmp += bins[i];
// }
//
// cout << "total number: " + to_string(tmp) << endl;
// Write the data out for gnuplot
ofstream output_file;
output_file.open("histogram.dat", ios::out | ios::trunc);
for(int i = 0; i < N_bins; i++){
output_file << bins[i] << " \n";
}
output_file.close();
return 0;
}
|
21,721 | #include "includes.h"
using namespace std;
struct pixel //to store RGB values
{
unsigned char r;
unsigned char g;
unsigned char b;
};
__device__ pixel padding(pixel* Pixel_val, int x_coord, int y_coord, int img_width, int img_height)
{ pixel Px;
Px.r=0; Px.g=0; Px.b=0;
if(x_coord< img_width && y_coord <img_height && x_coord>=0 && y_coord>=0)
{
Px=Pixel_val[y_coord*img_width+x_coord];
}
return Px;
}
__global__ void vertical_conv(pixel* Pixel_in_v, pixel* Pixel_out_v,int img_wd_v, int img_ht_v, float* kernel_v, int k_v)
{
float tmp_r, tmp_g, tmp_b;
//int pix_idx_v=blockIdx.x*blockDim.x + threadIdx.x;
//int row=(int)(pix_idx_v/img_wd_v);
//int col=pix_idx_v%img_wd_v;
size_t col=blockIdx.x*blockDim.x + threadIdx.x;
size_t row=blockIdx.y*blockDim.y + threadIdx.y;
size_t pix_idx_v=row*img_wd_v+col;
tmp_r=0, tmp_g=0, tmp_b=0;
if(row<img_ht_v && col<img_wd_v){
for(int l=0;l<k_v;l++)
{//doing by 1 D arrays
pixel pix_val=padding(Pixel_in_v, col, (row+l-(k_v-1)/2), img_wd_v, img_ht_v);
tmp_r+=pix_val.r * kernel_v[l];
tmp_b+=pix_val.b * kernel_v[l];
tmp_g+=pix_val.g * kernel_v[l];
}
Pixel_out_v[pix_idx_v].r=tmp_r;
Pixel_out_v[pix_idx_v].g=tmp_g;
Pixel_out_v[pix_idx_v].b=tmp_b;
}
} |
21,722 | #include "includes.h"
__global__ void resized(unsigned char *imgData, int width, float scale_factor, cudaTextureObject_t texObj) {
const unsigned int tidX = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int tidY = blockIdx.y * blockDim.y + threadIdx.y;
const unsigned idx = tidY * width + tidX;
//Read texture mem to CUDA Kernel
imgData[idx] = tex2D<unsigned char>(texObj,(float)(tidX*scale_factor),(float)(tidY*scale_factor));
} |
21,723 | // parallel HelloWorld using GPUs
// Simple starting example for CUDA program : this only works on arch 2 or higher
// Cong Xiao and Senlei Wang, Modified on Sep 2018
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define N_THRDS 4 // Nr of threads in a block (blockDim)
#define N_BLKS 4 // Nr of blocks in a kernel (gridDim)
void checkCudaError(const char *error)
{
if (cudaGetLastError() != cudaSuccess)
{
fprintf (stderr, "Cuda : %s\n",error);
exit(EXIT_FAILURE);
}
}
void checkCardVersion()
{
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
checkCudaError("cudaGetDeviceProperties failed");
fprintf(stderr,"This GPU has major architecture %d, minor %d \n",prop.major,prop.minor);
if(prop.major < 2)
{
fprintf(stderr,"Need compute capability 2 or higher.\n");
exit(1);
}
}
__global__ void HelloworldOnGPU(void)
{
int myid = (blockIdx.x * blockDim.x) + threadIdx.x;
// Each thread simply prints it's own string :
printf( "Hello World, I am thread %d in block with index %d, my thread index is %d \n",
myid, blockIdx.x, threadIdx.x);
}
int main(void)
{
checkCardVersion();
HelloworldOnGPU <<< N_BLKS, N_THRDS >>> ();
cudaDeviceSynchronize(); // without using synchronization, output won't be shown
return 0;
}
|
21,724 | #include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include <time.h>
//#define __DEBUG
#define element_addr(a, m, n, d) (a + ((m) * (d) + n))
#define element(a, m, n, d) (((m >= 0)&&(m < d)&&(n >= 0)&&(n < d))? (a[(m) * (d) + n]) : 0)
#define CUDA_CALL(cmd) do { \
if((err = cmd) != cudaSuccess) { \
printf("(%d) Cuda Error:(%d) %s\n", __LINE__,int(err), cudaGetErrorString(err) ); \
} \
} while(0)
#define BLK_SZ 256
#define BLK_SIDE 16
/*__global__ void computeKernel(int *living, float *honeys[2], float *honeyr, int d, float rbee, float rflow) {
//honeyr[threadIdx.x] = honeys[0][threadIdx.x];
//honeyr[threadIdx.x] = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
//honeyr[i*d + j] = i+j;
*(element_addr(honeyr, i, j, d)) = element(honeyr,i-1,j-1,d);
}*/
__global__ void computeKernelReal(int *living, float *honeyin,float *honeyout, int d, float rbee, float rflow) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
*(element_addr(honeyout, i, j, d)) = rflow * (element(honeyin, i-1, j-1, d) + element(honeyin, i-1, j, d) + element(honeyin, i-1, j+1, d) + element(honeyin, i, j-1, d) + element(honeyin, i, j+1, d) + element(honeyin, i+1, j-1, d) + element(honeyin, i+1, j, d) + element(honeyin, i+1, j+1, d) ) + (1.0 - 8.0 * rflow) * element(honeyin, i, j, d) + rbee * element(living, i, j, d);
}
int calculateGPU(const int *living, float *honey[2], int d, int n, float rbee, float rflow)
{
cudaError_t err;
clock_t start, end;
cudaEvent_t kstart, kstop;
float ktime;
double time;
int i;
/* PA2: Define your local variables here */
int *living_d;
float *honeyin_d;
float *honey_r;
/* Set up device timers */
CUDA_CALL(cudaSetDevice(0));
CUDA_CALL(cudaEventCreate(&kstart));
CUDA_CALL(cudaEventCreate(&kstop));
/* Start GPU end-to-end timer */
start = clock();
/* PA2: Add CUDA kernel call preparation code here */
CUDA_CALL(cudaMalloc((void **)&living_d, d * d * sizeof(int)));
CUDA_CALL(cudaMalloc((void **)&honeyin_d, d * d * sizeof(float)));
CUDA_CALL(cudaMalloc((void **)&honey_r, d * d * sizeof(float)));
CUDA_CALL(cudaMemcpy(living_d, living, d * d * sizeof(int), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(honeyin_d, honey[0], d * d * sizeof(float), cudaMemcpyHostToDevice));
/* Start GPU computation timer */
CUDA_CALL(cudaEventRecord(kstart, 0));
/* PA2: Add main honey level simulation loop here */
dim3 dimGrid(d/BLK_SIDE,d/BLK_SIDE);
dim3 dimBlock(BLK_SIDE,BLK_SIDE);
for (i=0;i< n;i++)
{
computeKernelReal<<<dimGrid,dimBlock>>>(living_d,honeyin_d,honey_r,d,rbee,rflow);
//CUDA_CALL(cudaThreadSynchronize());
CUDA_CALL(cudaMemcpy(honeyin_d,honey_r,d * d * sizeof(float),cudaMemcpyDeviceToDevice ));
}
//computeKernel<<<dimGrid,dimBlock>>>(living_d,honey_d,honey_r,d,rbee,rflow);
/* Stop GPU computation timer */
CUDA_CALL(cudaEventRecord(kstop, 0));
CUDA_CALL(cudaEventSynchronize(kstop));
CUDA_CALL(cudaEventElapsedTime(&ktime, kstart, kstop));
printf("GPU computation: %f msec\n", ktime);
/* PA2: Add post CUDA kernel call processing and cleanup here */
//CUDA_CALL(cudaMemcpy(honey[0],honey_d[resin],d * d * sizeof(float),cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(honey[1],honey_r,d * d * sizeof(float),cudaMemcpyDeviceToHost));
/*printf("\nhoney[] after cuda kernel call -\n");
for(int i = 0; i < d; i++ ) {
for(int j = 0; j < d; j++ ) {
printf("%f ", element(honey[1], i, j, d));
}
printf("\n");
}*/
CUDA_CALL(cudaFree(living_d));
CUDA_CALL(cudaFree(honeyin_d));
CUDA_CALL(cudaFree(honey_r));
/* Stop GPU end-to-end timer and timer cleanup */
end = clock();
CUDA_CALL(cudaEventDestroy(kstart));
CUDA_CALL(cudaEventDestroy(kstop));
time = ((double)(end-start))/CLOCKS_PER_SEC;
printf("GPU end-to-end: %lf sec\n", time);
return 1;
}
|
21,725 | #include <stdio.h>
#include <cuda_runtime.h>
__global__ void kernel() {
printf("%d, %d\n", threadIdx.x, blockIdx.x);
return;
}
int main() {
// main iteration
kernel <<<16, 4, 0>>>();
return 0;
}
/**
* Dans cette démo on appelle 4 threads par block et on appelle 16 blocks.
*/ |
21,726 | #define t_max 1
#define t 1
/*
(u[0][0][0][1][0]=(a*((((u[-3][0][0][0][0]+(u[0][-3][0][0][0]+u[0][0][-3][0][0]))*-2.0)+(((u[-2][0][0][0][0]+(u[0][-2][0][0][0]+u[0][0][-2][0][0]))*15.0)+((u[-1][0][0][0][0]+(u[0][-1][0][0][0]+u[0][0][-1][0][0]))*-60.0)))+((u[0][0][0][0][0]*20.0)+(((u[1][0][0][0][0]+(u[0][1][0][0][0]+u[0][0][1][0][0]))*30.0)+((u[2][0][0][0][0]+(u[0][2][0][0][0]+u[0][0][2][0][0]))*-3.0))))))
*/
__global__ void upstream_5_3d(double * * u_0_1_out, double * u_0_0, double * u_0_1, double a, int x_max, int y_max, int z_max, int cbx)
{
// double * const u__u_0[16] = { u_0_0, u_0_1 } ;
int _idx0;
int _idx1;
int _idx10;
int _idx11;
int _idx12;
int _idx13;
int _idx14;
int _idx15;
int _idx2;
int _idx3;
int _idx4;
int _idx5;
int _idx6;
int _idx7;
int _idx8;
int _idx9;
int idx_1_2;
int pt_idx_x;
int pt_idx_y;
int pt_idx_z;
int size_1_1;
int size_1_2;
// int t;
int tmp;
int v_idx_x;
int v_idx_x_max;
int v_idx_y;
int v_idx_y_max;
int v_idx_z;
int v_idx_z_max;
/*
Initializations
*/
size_1_1=(y_max/blockDim.y);
size_1_2=(z_max/blockDim.z);
idx_1_2=(blockIdx.y/size_1_2);
tmp=(blockIdx.y-(idx_1_2*size_1_2));
v_idx_x=(cbx*(threadIdx.x+(blockDim.x*blockIdx.x)));
v_idx_x_max=(v_idx_x+cbx);
v_idx_y=(threadIdx.y+(tmp*blockDim.y));
v_idx_y_max=(v_idx_y+1);
v_idx_z=(threadIdx.z+(idx_1_2*blockDim.z));
v_idx_z_max=(v_idx_z+1);
/*
Implementation
*/
/*
for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... }
*/
// for (t=1; t<=t_max; t+=1)
{
/* Index bounds calculations for iterators in v[t=t, s=(cbx, 1, 1)][0] */
/*
for POINT pt[t=t, s=(1, 1, 1)][0] of size [1, 1, 1] in v[t=t, s=(:, :, :)][0] parallel 1 <level 1> schedule default { ... }
*/
{
/* Index bounds calculations for iterators in pt[t=t, s=(1, 1, 1)][0] */
pt_idx_z=v_idx_z;
pt_idx_y=v_idx_y;
for (pt_idx_x=v_idx_x; pt_idx_x<(v_idx_x_max-0); pt_idx_x+=1)
{
/* Index bounds calculations for iterators in pt[t=t, s=(1, 1, 1)][0] */
/*
v[t=(t+1), s=pt[t=?, s=?][0]][0]=stencil(v[t=t, s=pt[t=?, s=?][0]][0])
*/
/* _idx0 = ((((((((pt_idx_z+3)*x_max)+(((5*pt_idx_z)+15)*t))*y_max)+((((((5*pt_idx_z)+15)*t)+pt_idx_y)+3)*x_max))+(((25*pt_idx_z)+75)*(t*t)))+(((5*pt_idx_y)+15)*t))+pt_idx_x) */
_idx0=((((((((pt_idx_z+3)*x_max)+(((5*pt_idx_z)+15)*t))*y_max)+((((((5*pt_idx_z)+15)*t)+pt_idx_y)+3)*x_max))+(((25*pt_idx_z)+75)*(t*t)))+(((5*pt_idx_y)+15)*t))+pt_idx_x);
/* _idx1 = (((((((((pt_idx_z+3)*x_max)+(((5*pt_idx_z)+15)*t))*y_max)+(((((5*pt_idx_z)+15)*t)+pt_idx_y)*x_max))+(((25*pt_idx_z)+75)*(t*t)))+((5*pt_idx_y)*t))+pt_idx_x)+3) */
_idx1=(((_idx0-(3*x_max))-(15*t))+3);
/* _idx2 = ((((((((pt_idx_z*x_max)+((5*pt_idx_z)*t))*y_max)+(((((5*pt_idx_z)*t)+pt_idx_y)+3)*x_max))+((25*pt_idx_z)*(t*t)))+(((5*pt_idx_y)+15)*t))+pt_idx_x)+3) */
_idx2=((((_idx1+(((-3*x_max)-(15*t))*y_max))+((3-(15*t))*x_max))-(75*(t*t)))+(15*t));
/* _idx3 = (((((((((pt_idx_z+3)*x_max)+(((5*pt_idx_z)+15)*t))*y_max)+((((((5*pt_idx_z)+15)*t)+pt_idx_y)+3)*x_max))+(((25*pt_idx_z)+75)*(t*t)))+(((5*pt_idx_y)+15)*t))+pt_idx_x)+1) */
_idx3=(_idx0+1);
/* _idx4 = (((((((((pt_idx_z+3)*x_max)+(((5*pt_idx_z)+15)*t))*y_max)+((((((5*pt_idx_z)+15)*t)+pt_idx_y)+1)*x_max))+(((25*pt_idx_z)+75)*(t*t)))+(((5*pt_idx_y)+5)*t))+pt_idx_x)+3) */
_idx4=((_idx1+x_max)+(5*t));
/* _idx5 = (((((((((pt_idx_z+1)*x_max)+(((5*pt_idx_z)+5)*t))*y_max)+((((((5*pt_idx_z)+5)*t)+pt_idx_y)+3)*x_max))+(((25*pt_idx_z)+25)*(t*t)))+(((5*pt_idx_y)+15)*t))+pt_idx_x)+3) */
_idx5=(((_idx2+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t)));
/* _idx6 = (((((((((pt_idx_z+3)*x_max)+(((5*pt_idx_z)+15)*t))*y_max)+((((((5*pt_idx_z)+15)*t)+pt_idx_y)+3)*x_max))+(((25*pt_idx_z)+75)*(t*t)))+(((5*pt_idx_y)+15)*t))+pt_idx_x)+2) */
_idx6=(_idx0+2);
/* _idx7 = (((((((((pt_idx_z+3)*x_max)+(((5*pt_idx_z)+15)*t))*y_max)+((((((5*pt_idx_z)+15)*t)+pt_idx_y)+2)*x_max))+(((25*pt_idx_z)+75)*(t*t)))+(((5*pt_idx_y)+10)*t))+pt_idx_x)+3) */
_idx7=((_idx4+x_max)+(5*t));
/* _idx8 = (((((((((pt_idx_z+2)*x_max)+(((5*pt_idx_z)+10)*t))*y_max)+((((((5*pt_idx_z)+10)*t)+pt_idx_y)+3)*x_max))+(((25*pt_idx_z)+50)*(t*t)))+(((5*pt_idx_y)+15)*t))+pt_idx_x)+3) */
_idx8=(((_idx5+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t)));
/* _idx9 = (((((((((pt_idx_z+3)*x_max)+(((5*pt_idx_z)+15)*t))*y_max)+((((((5*pt_idx_z)+15)*t)+pt_idx_y)+3)*x_max))+(((25*pt_idx_z)+75)*(t*t)))+(((5*pt_idx_y)+15)*t))+pt_idx_x)+3) */
_idx9=(_idx0+3);
/* _idx10 = (((((((((pt_idx_z+3)*x_max)+(((5*pt_idx_z)+15)*t))*y_max)+((((((5*pt_idx_z)+15)*t)+pt_idx_y)+3)*x_max))+(((25*pt_idx_z)+75)*(t*t)))+(((5*pt_idx_y)+15)*t))+pt_idx_x)+4) */
_idx10=(_idx0+4);
/* _idx11 = (((((((((pt_idx_z+3)*x_max)+(((5*pt_idx_z)+15)*t))*y_max)+((((((5*pt_idx_z)+15)*t)+pt_idx_y)+4)*x_max))+(((25*pt_idx_z)+75)*(t*t)))+(((5*pt_idx_y)+20)*t))+pt_idx_x)+3) */
_idx11=((_idx9+x_max)+(5*t));
/* _idx12 = (((((((((pt_idx_z+4)*x_max)+(((5*pt_idx_z)+20)*t))*y_max)+((((((5*pt_idx_z)+20)*t)+pt_idx_y)+3)*x_max))+(((25*pt_idx_z)+100)*(t*t)))+(((5*pt_idx_y)+15)*t))+pt_idx_x)+3) */
_idx12=(((_idx9+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t)));
/* _idx13 = (((((((((pt_idx_z+3)*x_max)+(((5*pt_idx_z)+15)*t))*y_max)+((((((5*pt_idx_z)+15)*t)+pt_idx_y)+3)*x_max))+(((25*pt_idx_z)+75)*(t*t)))+(((5*pt_idx_y)+15)*t))+pt_idx_x)+5) */
_idx13=(_idx0+5);
/* _idx14 = (((((((((pt_idx_z+3)*x_max)+(((5*pt_idx_z)+15)*t))*y_max)+((((((5*pt_idx_z)+15)*t)+pt_idx_y)+5)*x_max))+(((25*pt_idx_z)+75)*(t*t)))+(((5*pt_idx_y)+25)*t))+pt_idx_x)+3) */
_idx14=((_idx11+x_max)+(5*t));
/* _idx15 = (((((((((pt_idx_z+5)*x_max)+(((5*pt_idx_z)+25)*t))*y_max)+((((((5*pt_idx_z)+25)*t)+pt_idx_y)+3)*x_max))+(((25*pt_idx_z)+125)*(t*t)))+(((5*pt_idx_y)+15)*t))+pt_idx_x)+3) */
_idx15=(((_idx12+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t)));
u_0_1[_idx9]=(a*((((u_0_0[_idx0]+(u_0_0[_idx1]+u_0_0[_idx2]))*-2.0)+(((u_0_0[_idx3]+(u_0_0[_idx4]+u_0_0[_idx5]))*15.0)+((u_0_0[_idx6]+(u_0_0[_idx7]+u_0_0[_idx8]))*-60.0)))+((u_0_0[_idx9]*20.0)+(((u_0_0[_idx10]+(u_0_0[_idx11]+u_0_0[_idx12]))*30.0)+((u_0_0[_idx13]+(u_0_0[_idx14]+u_0_0[_idx15]))*-3.0)))));
}
}
}
}
__global__ void initialize(double * u_0_0, double * u_0_1, double a, int x_max, int y_max, int z_max, int cbx)
{
double * const u__u_0[16] = { u_0_0, u_0_1 } ;
int _idx0;
int _idx1;
int _idx10;
int _idx11;
int _idx12;
int _idx13;
int _idx14;
int _idx15;
int _idx2;
int _idx3;
int _idx4;
int _idx5;
int _idx6;
int _idx7;
int _idx8;
int _idx9;
int idx_1_2;
int pt_idx_x;
int pt_idx_y;
int pt_idx_z;
int size_1_1;
int size_1_2;
//int t;
int tmp;
int v_idx_x;
int v_idx_x_max;
int v_idx_y;
int v_idx_y_max;
int v_idx_z;
int v_idx_z_max;
/*
Initializations
*/
size_1_1=(y_max/blockDim.y);
size_1_2=(z_max/blockDim.z);
idx_1_2=(blockIdx.y/size_1_2);
tmp=(blockIdx.y-(idx_1_2*size_1_2));
v_idx_x=(cbx*(threadIdx.x+(blockDim.x*blockIdx.x)));
v_idx_x_max=(v_idx_x+cbx);
v_idx_y=(threadIdx.y+(tmp*blockDim.y));
v_idx_y_max=(v_idx_y+1);
v_idx_z=(threadIdx.z+(idx_1_2*blockDim.z));
v_idx_z_max=(v_idx_z+1);
/*
Implementation
*/
/*
for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... }
*/
//for (t=1; t<=t_max; t+=1)
{
/* Index bounds calculations for iterators in v[t=t, s=(cbx, 1, 1)][0] */
/*
for POINT pt[t=t, s=(1, 1, 1)][0] of size [1, 1, 1] in v[t=t, s=(:, :, :)][0] parallel 1 <level 1> schedule default { ... }
*/
{
/* Index bounds calculations for iterators in pt[t=t, s=(1, 1, 1)][0] */
pt_idx_z=v_idx_z;
pt_idx_y=v_idx_y;
for (pt_idx_x=v_idx_x; pt_idx_x<(v_idx_x_max-0); pt_idx_x+=1)
{
/* Index bounds calculations for iterators in pt[t=t, s=(1, 1, 1)][0] */
/*
v[t=(t+1), s=pt[t=?, s=?][0]][0]=stencil(v[t=t, s=pt[t=?, s=?][0]][0])
*/
/* _idx0 = ((((((((pt_idx_z+3)*x_max)+(((5*pt_idx_z)+15)*t))*y_max)+((((((5*pt_idx_z)+15)*t)+pt_idx_y)+3)*x_max))+(((25*pt_idx_z)+75)*(t*t)))+(((5*pt_idx_y)+15)*t))+pt_idx_x) */
_idx0=((((((((pt_idx_z+3)*x_max)+(((5*pt_idx_z)+15)*t))*y_max)+((((((5*pt_idx_z)+15)*t)+pt_idx_y)+3)*x_max))+(((25*pt_idx_z)+75)*(t*t)))+(((5*pt_idx_y)+15)*t))+pt_idx_x);
u_0_0[_idx0]=0.1;
/* _idx1 = (((((((((pt_idx_z+3)*x_max)+(((5*pt_idx_z)+15)*t))*y_max)+((((((5*pt_idx_z)+15)*t)+pt_idx_y)+3)*x_max))+(((25*pt_idx_z)+75)*(t*t)))+(((5*pt_idx_y)+15)*t))+pt_idx_x)+1) */
_idx1=(_idx0+1);
u_0_0[_idx1]=0.1;
/* _idx2 = (((((((((pt_idx_z+3)*x_max)+(((5*pt_idx_z)+15)*t))*y_max)+((((((5*pt_idx_z)+15)*t)+pt_idx_y)+3)*x_max))+(((25*pt_idx_z)+75)*(t*t)))+(((5*pt_idx_y)+15)*t))+pt_idx_x)+2) */
_idx2=(_idx0+2);
u_0_0[_idx2]=0.1;
/* _idx3 = (((((((((pt_idx_z+3)*x_max)+(((5*pt_idx_z)+15)*t))*y_max)+(((((5*pt_idx_z)+15)*t)+pt_idx_y)*x_max))+(((25*pt_idx_z)+75)*(t*t)))+((5*pt_idx_y)*t))+pt_idx_x)+3) */
_idx3=(((_idx0-(3*x_max))-(15*t))+3);
u_0_0[_idx3]=0.1;
/* _idx4 = (((((((((pt_idx_z+3)*x_max)+(((5*pt_idx_z)+15)*t))*y_max)+((((((5*pt_idx_z)+15)*t)+pt_idx_y)+1)*x_max))+(((25*pt_idx_z)+75)*(t*t)))+(((5*pt_idx_y)+5)*t))+pt_idx_x)+3) */
_idx4=((_idx3+x_max)+(5*t));
u_0_0[_idx4]=0.1;
/* _idx5 = (((((((((pt_idx_z+3)*x_max)+(((5*pt_idx_z)+15)*t))*y_max)+((((((5*pt_idx_z)+15)*t)+pt_idx_y)+2)*x_max))+(((25*pt_idx_z)+75)*(t*t)))+(((5*pt_idx_y)+10)*t))+pt_idx_x)+3) */
_idx5=((_idx4+x_max)+(5*t));
u_0_0[_idx5]=0.1;
/* _idx6 = ((((((((pt_idx_z*x_max)+((5*pt_idx_z)*t))*y_max)+(((((5*pt_idx_z)*t)+pt_idx_y)+3)*x_max))+((25*pt_idx_z)*(t*t)))+(((5*pt_idx_y)+15)*t))+pt_idx_x)+3) */
_idx6=((((_idx1+(((-3*x_max)-(15*t))*y_max))-((15*t)*x_max))-(75*(t*t)))+2);
u_0_0[_idx6]=0.1;
/* _idx7 = (((((((((pt_idx_z+1)*x_max)+(((5*pt_idx_z)+5)*t))*y_max)+((((((5*pt_idx_z)+5)*t)+pt_idx_y)+3)*x_max))+(((25*pt_idx_z)+25)*(t*t)))+(((5*pt_idx_y)+15)*t))+pt_idx_x)+3) */
_idx7=(((_idx6+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t)));
u_0_0[_idx7]=0.1;
/* _idx8 = (((((((((pt_idx_z+2)*x_max)+(((5*pt_idx_z)+10)*t))*y_max)+((((((5*pt_idx_z)+10)*t)+pt_idx_y)+3)*x_max))+(((25*pt_idx_z)+50)*(t*t)))+(((5*pt_idx_y)+15)*t))+pt_idx_x)+3) */
_idx8=(((_idx7+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t)));
u_0_0[_idx8]=0.1;
/* _idx9 = (((((((((pt_idx_z+3)*x_max)+(((5*pt_idx_z)+15)*t))*y_max)+((((((5*pt_idx_z)+15)*t)+pt_idx_y)+3)*x_max))+(((25*pt_idx_z)+75)*(t*t)))+(((5*pt_idx_y)+15)*t))+pt_idx_x)+3) */
_idx9=(_idx0+3);
u_0_0[_idx9]=0.1;
/* _idx10 = (((((((((pt_idx_z+4)*x_max)+(((5*pt_idx_z)+20)*t))*y_max)+((((((5*pt_idx_z)+20)*t)+pt_idx_y)+3)*x_max))+(((25*pt_idx_z)+100)*(t*t)))+(((5*pt_idx_y)+15)*t))+pt_idx_x)+3) */
_idx10=(((_idx9+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t)));
u_0_0[_idx10]=0.1;
/* _idx11 = (((((((((pt_idx_z+5)*x_max)+(((5*pt_idx_z)+25)*t))*y_max)+((((((5*pt_idx_z)+25)*t)+pt_idx_y)+3)*x_max))+(((25*pt_idx_z)+125)*(t*t)))+(((5*pt_idx_y)+15)*t))+pt_idx_x)+3) */
_idx11=(((_idx10+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t)));
u_0_0[_idx11]=0.1;
/* _idx12 = (((((((((pt_idx_z+3)*x_max)+(((5*pt_idx_z)+15)*t))*y_max)+((((((5*pt_idx_z)+15)*t)+pt_idx_y)+4)*x_max))+(((25*pt_idx_z)+75)*(t*t)))+(((5*pt_idx_y)+20)*t))+pt_idx_x)+3) */
_idx12=((_idx9+x_max)+(5*t));
u_0_0[_idx12]=0.1;
/* _idx13 = (((((((((pt_idx_z+3)*x_max)+(((5*pt_idx_z)+15)*t))*y_max)+((((((5*pt_idx_z)+15)*t)+pt_idx_y)+5)*x_max))+(((25*pt_idx_z)+75)*(t*t)))+(((5*pt_idx_y)+25)*t))+pt_idx_x)+3) */
_idx13=((_idx12+x_max)+(5*t));
u_0_0[_idx13]=0.1;
/* _idx14 = (((((((((pt_idx_z+3)*x_max)+(((5*pt_idx_z)+15)*t))*y_max)+((((((5*pt_idx_z)+15)*t)+pt_idx_y)+3)*x_max))+(((25*pt_idx_z)+75)*(t*t)))+(((5*pt_idx_y)+15)*t))+pt_idx_x)+4) */
_idx14=(_idx0+4);
u_0_0[_idx14]=0.1;
/* _idx15 = (((((((((pt_idx_z+3)*x_max)+(((5*pt_idx_z)+15)*t))*y_max)+((((((5*pt_idx_z)+15)*t)+pt_idx_y)+3)*x_max))+(((25*pt_idx_z)+75)*(t*t)))+(((5*pt_idx_y)+15)*t))+pt_idx_x)+5) */
_idx15=(_idx0+5);
u_0_0[_idx15]=0.1;
u_0_1[_idx9]=1.1;
}
}
}
}
|
21,727 | #include <iostream>
const int image_size = 4096;
const int filter_size = 3;
__global__ void conv2d(int* A, int* B, int* C, int N, int n)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
const int offset = n / 2;
int row_i = threadIdx.y - offset;
int col_i = threadIdx.x - offset;
__shared__ int shm[16][16];
shm[threadIdx.y][threadIdx.x] = A[row * N + col];
__syncthreads();
int val = 0;
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
if ((0 <= (i + col_i) && (i + col_i) < 16))
if ((0 <= (j + row_i) && (j + row_i) < 16)) val += shm[j + row_i][i + col_i] * C[j * n + i];
B[row * N + col] = val;
}
int main()
{
int *A, *A_d, *B, *B_d, *C, *C_d;
const int data_size = image_size * image_size * sizeof(int);
const int kernel_size = filter_size * filter_size * sizeof(int);
cudaMallocHost(&A, data_size);
cudaMallocHost(&B, data_size);
cudaMallocHost(&C, kernel_size);
for (int i = 0; i < image_size * image_size; i++)
A[i] = 1;
memset(B, 0, data_size);
for (int i = 0; i < filter_size * filter_size; i++)
C[i] = 2;
cudaMalloc(&A_d, data_size);
cudaMalloc(&B_d, data_size);
cudaMalloc(&C_d, kernel_size);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMemcpy(A_d, A, data_size, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B, data_size, cudaMemcpyHostToDevice);
cudaMemcpy(C_d, C, kernel_size, cudaMemcpyHostToDevice);
const int block_size = 16;
const int grid_size = (image_size + block_size - 1) / block_size;
dim3 grid(grid_size, grid_size);
dim3 block(block_size, block_size);
cudaEventRecord(start);
conv2d<<<grid, block>>>(A_d, B_d, C_d, image_size, filter_size);
cudaEventRecord(stop);
cudaMemcpy(B, B_d, data_size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 10; j++)
std::cout << B[i * image_size + j] << " ";
std::cout << "\n";
}
std::cout << "Kernel run time: " << milliseconds << " ms\n";
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
cudaFreeHost(A);
cudaFreeHost(B);
cudaFreeHost(C);
}
|
21,728 | #include "includes.h"
__global__ void cube(double* d_out, double* d_in)
{
int idx = threadIdx.x;
double f = d_in[idx];
d_out[idx] = f*f*f;
} |
21,729 | #include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cstdlib>
#include <iostream>
__global__ void DivergencyKernel(float* a, int N) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
if (!(threadIdx.x % 2))
a[x] = a[x] * (threadIdx.x + 1);
else
a[x] = a[x] * (threadIdx.x % 5);
}
__global__ void NoDivergencyKernel(float* a, int N) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
a[x] = threadIdx.x;
}
int main(int argc, char** argv) {
if (argc == 2) {
int N = atoi(argv[1]);
size_t size = N * sizeof(float);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float* h_A = (float*)malloc(size);
if (h_A == NULL) {
std::cerr << "Failed malloc for h_A!\n";
return 1;
}
for (int i = 0; i < N; i++) {
h_A[i] = i + 1;
}
float* d_A = NULL;
cudaMalloc((void**)&d_A, size);
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
const int BLOCK_SIZE = 1024;
const int GRID_SIZE = (N - 1) / BLOCK_SIZE + 1;
cudaEventRecord(start);
DivergencyKernel<<<BLOCK_SIZE, GRID_SIZE>>>(d_A, N);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float msecs = 0;
cudaEventElapsedTime(&msecs, start, stop);
std::cout << "(Divergency) Kernel Time: " << msecs << " ms.\n";
cudaEventRecord(start);
NoDivergencyKernel<<<BLOCK_SIZE, GRID_SIZE>>>(d_A, N);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecs, start, stop);
std::cout << "(Non-Divergency) Kernel Time: " << msecs << " ms.\n";
cudaFree(d_A);
free(h_A);
}
return 0;
} |
21,730 | /* This is a somewhat naive matrix-multiplication implementation. The most
* glaring shortcoming is the lack of shared-memory usage.
*
* Compile with `nvcc matrix_multiplication.cu`.
*
* Author: Christopher Mitchell <chrism@lclark.edu>
* Date: 2011-07-15
*/
#include <stdio.h>
#include <stdlib.h>
typedef struct {
int width;
int height;
// This should point to a row-major array of elements. We use a 1D array
// instead of a 2d array to represent our matrix because copying 2D data to
// the device is more complex.
float *elements;
} Matrix;
/* Matrix multiply A by B on the CPU, storing the product into result. */
void mat_mult_host(Matrix *result, Matrix *A, Matrix *B) {
int row, col, sum, idx;
for (col=0; col < (A->width); col++) {
for (row=0; row < (A->height); row++) {
sum = 0;
for (idx=0; idx < (A->height); idx++) {
sum += A->elements[row * A->width + idx] * B->elements[idx * B->width + col];
}
result->elements[row * result->width + col] = sum;
}
}
}
/* The matrix multiplication kernel used by the mat_mult_dev function. */
__global__ void kernel_mat_mult(Matrix result, Matrix A, Matrix B) {
int row, col, sum, idx;
row = blockIdx.y * blockDim.y + threadIdx.y;
col = blockIdx.x * blockDim.x + threadIdx.x;
sum = 0;
// This is the inner-most loop of the host matrix multiplication function.
for (idx=0; idx < (A.height); idx++) {
sum += A.elements[row * A.width + idx] * B.elements[idx * B.width + col];
}
result.elements[row * result.width + col] = sum;
}
/* Matrix multiply A by B on the GPU, storing the product into result. */
void mat_mult_dev(Matrix *result, Matrix *A, Matrix *B) {
int size;
Matrix A_dev, B_dev, C_dev;
/* When we copy the input matrices to the device, we only copy the elements
* and not the entire structure. We do this to avoid the complexity of
* having to find the size structures with variable length arrays.
*/
A_dev.width = A->width;
A_dev.height = A->height;
size = sizeof(float) * A->width * A->height;
cudaMalloc(&A_dev.elements, size);
cudaMemcpy(A_dev.elements, A->elements, size, cudaMemcpyHostToDevice);
B_dev.width = B->width;
B_dev.height = B->height;
size = sizeof(float) * B->width * B->height;
cudaMalloc(&B_dev.elements, size);
cudaMemcpy(B_dev.elements, B->elements, size, cudaMemcpyHostToDevice);
C_dev.width = result->width;
C_dev.height = result->height;
size = sizeof(float) * result->width * result->height;
cudaMalloc(&C_dev.elements, size);
/* Since the kernel only uses one block, once the number of cells in the
* result matrix exceeds the number of threads that can be in a block,
* things will fail. Thouh we don't do so in this program, one can find the
* max number of threads in a block that our card supports by:
* cudaDeviceProp devProps = cudaGetDeviceProperties(&devProps, 0);
* printf("Max threads per block: %d", devProps.maxThreadsPerBlock);
*/
dim3 dimGrid(1,1,1);
dim3 dimBlock(result->width, result->height, 1);
kernel_mat_mult<<<dimGrid,dimBlock>>>(C_dev, A_dev, B_dev);
// retrieve the result from the GPU
cudaMemcpy(result->elements, C_dev.elements, size, cudaMemcpyDeviceToHost);
// free GPU memory
cudaFree(A_dev.elements);
cudaFree(B_dev.elements);
cudaFree(C_dev.elements);
}
/* Print the matrix so that each row is on a new line, and each value in a row
* is separated by a space. */
void print_mat(Matrix *mat) {
int row, col;
for (row = 0; row < (mat->height); row++) {
for (col = 0; col < (mat->width); col++) {
printf("%lf ", mat->elements[row * mat->width + col]);
}
printf("\n");
}
printf("\n");
}
int main(void) {
/* Initialize the matrix that we will be multiplying with itself */
Matrix matrix = {8, 8}; // Set the width and height
float elements[] = {1, 2, 3, 4, 5, 6, 7, 8,
2, 3, 4, 5, 6, 7, 8, 9,
3, 4, 5, 6, 7, 8, 9, 10,
4, 5, 6, 7, 8, 9, 10, 11,
5, 6, 7, 8, 9, 10, 11, 12,
6, 7, 8, 9, 10, 11, 12, 13,
7, 8, 9, 10, 11, 12, 13, 14,
8, 9, 10, 11, 12, 13, 14, 15};
matrix.elements = elements; // Set the elements
/* Allocate space for the result vectors. */
Matrix cpu_result = {8, 8};
Matrix gpu_result = {8, 8};
cpu_result.elements = (float *) malloc(sizeof(float) * cpu_result.width * cpu_result.height);
gpu_result.elements = (float *) malloc(sizeof(float) * gpu_result.width * gpu_result.height);
printf("CPU result:\n");
mat_mult_host(&cpu_result, &matrix, &matrix);
print_mat(&cpu_result);
printf("GPU result:\n");
mat_mult_dev(&gpu_result, &matrix, &matrix);
print_mat(&gpu_result);
return 0;
}
|
21,731 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_profiler_api.h>
#define ITER 100000
#define THREAD_PER_BLOCK 10
#define PI 3.1415926535
#define RAD(X) X *(PI / 180.0)
void calculator(float *sin_arr, float *cos_arr, float *tan_arr)
{
for (int idx = 0; idx < ITER; idx++)
{
float rad = RAD(idx);
sin_arr[idx] = sinf(rad);
cos_arr[idx] = cosf(rad);
tan_arr[idx] = tanf(rad);
}
}
int main()
{
cudaProfilerStart();
float *sin_arr, *cos_arr, *tan_arr;
sin_arr = (float *)malloc(sizeof(float) * ITER);
cos_arr = (float *)malloc(sizeof(float) * ITER);
tan_arr = (float *)malloc(sizeof(float) * ITER);
calculator(sin_arr, cos_arr, tan_arr);
for (int i = 0; i < ITER; i++)
{
printf("sin (%d) = %f cos (%d) = %f tan (%d) = %f\n", i, sin_arr[i], i, cos_arr[i], i, tan_arr[i]);
}
free(sin_arr);
free(cos_arr);
free(tan_arr);
cudaProfilerStop();
return 0;
} |
21,732 |
#include <time.h>
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
void loadData( char* fileName , int n , float* x , float* y , float* mass , int* actual ) ;
float getVal( char* str , int start , int subLen ) ;
__global__ void iter( int n , float* xVel , float* yVel , float* x , float* y , float* mass , float G , float delt , float* xOut , float* yOut ) ;
int main( int argc , char** argv )
{
if( argc < 4 )
{
printf( "REQUIRED: Please provide (arg1) a file to load, (arg2) number of bodies, and (arg3) the number of iterations\n" ) ;
printf( "OPTIONAL: (arg4) time-step size in seconds (default=1000.0)\n" ) ;
return( 0 ) ;
}
float G = 6.67384 * ((float) pow( 10.0 , -11.0 )) ; // Newton's gravitational constant
int maxIter = atoi( argv[3] ) ;
float delt = 1000.0 ;
if( argc > 4 )
delt = atof( argv[4] ) ;
int n = atoi( argv[2] ) ;
int m ;
float* x = (float*) malloc( n * sizeof(float) ) ;
float* y = (float*) malloc( n * sizeof(float) ) ;
float* mass = (float*) malloc( n * sizeof(float) ) ;
loadData( argv[1] , n , x , y , mass , &m ) ;
struct timeval startALL, endALL ;
gettimeofday( &startALL , NULL ) ;
////////////////// ALLOCATE MEMORY ON DEVICE PRIOR to COMPUTATION
float* d_x ;
float* d_y ;
float* d_mass ;
float* d_xVel ;
float* d_yVel ;
float* d_xTemp ;
float* d_yTemp ;
float* zeros = (float*) malloc( n * sizeof(float) ) ;
int i ;
for( i = 0 ; i < n ; i ++ )
zeros[i] = 0.0 ;
cudaError_t status = cudaMalloc( &d_x , n * sizeof(float) ) ;
if( status == cudaSuccess )
status = cudaMalloc( &d_y , n * sizeof(float) ) ;
if( status == cudaSuccess )
status = cudaMalloc( &d_mass , n * sizeof(float) ) ;
if( status == cudaSuccess )
status = cudaMalloc( &d_xVel , n * sizeof(float) ) ;
if( status == cudaSuccess )
status = cudaMalloc( &d_yVel , n * sizeof(float) ) ;
if( status == cudaSuccess )
status = cudaMalloc( &d_xTemp , n * sizeof(float) ) ;
if( status == cudaSuccess )
status = cudaMalloc( &d_yTemp , n * sizeof(float) ) ;
if( status == cudaSuccess )
status = cudaMemcpy( d_x , x , n * sizeof(float) , cudaMemcpyHostToDevice ) ;
if( status == cudaSuccess )
status = cudaMemcpy( d_y , y , n * sizeof(float) , cudaMemcpyHostToDevice ) ;
if( status == cudaSuccess )
status = cudaMemcpy( d_mass , mass , n * sizeof(float) , cudaMemcpyHostToDevice ) ;
if( status == cudaSuccess )
status = cudaMemcpy( d_xVel , zeros , n * sizeof(float) , cudaMemcpyHostToDevice ) ;
if( status == cudaSuccess )
status = cudaMemcpy( d_yVel , zeros , n * sizeof(float) , cudaMemcpyHostToDevice ) ;
//////////////// ASYNCHRONOUS QUEUE JOBS ON DEVICE
struct timeval startKERNEL , endKERNEL ;
gettimeofday( &startKERNEL , NULL ) ;
for( i = 0 ; i < maxIter ; i+=2 )
{
iter<<< m/32+1 , 32 >>>( m , d_xVel , d_yVel , d_x , d_y , d_mass , G , delt , d_xTemp , d_yTemp ) ;
iter<<< m/32+1 , 32 >>>( m , d_xVel , d_yVel , d_xTemp , d_yTemp , d_mass , G , delt , d_x , d_y ) ;
}
if( status == cudaSuccess ) ;
status = cudaDeviceSynchronize() ;
gettimeofday( &endKERNEL , NULL ) ;
fprintf( stderr , "GPU kernel time: %ld microseconds\n", ((endKERNEL.tv_sec * 1000000 + endKERNEL.tv_usec)
- (startKERNEL.tv_sec * 1000000 + startKERNEL.tv_usec)));
//////////////// COPY MEMORY TO HOST
if( status == cudaSuccess )
status = cudaMemcpy( x , d_x , n * sizeof(float) , cudaMemcpyDeviceToHost ) ;
if( status == cudaSuccess )
status = cudaMemcpy( y , d_y , n * sizeof(float) , cudaMemcpyDeviceToHost ) ;
if( status != cudaSuccess )
printf( "ERROR: %s\n" , cudaGetErrorString(status) ) ;
cudaFree( d_x ) ;
cudaFree( d_y ) ;
cudaFree( d_mass ) ;
cudaFree( d_xVel ) ;
cudaFree( d_yVel ) ;
cudaFree( d_xTemp ) ;
cudaFree( d_yTemp ) ;
gettimeofday( &endALL , NULL ) ;
fprintf( stderr , "GPU kernel and comm time: %ld microseconds\n", ((endALL.tv_sec * 1000000 + endALL.tv_usec)
- (startALL.tv_sec * 1000000 + startALL.tv_usec)));
cudaDeviceReset() ;
for( i = 0 ; i < m ; i++ )
printf( "%f\t%f\n" , x[i] , y[i] ) ;
return( 0 ) ;
}
__global__ void iter( int n , float* xVel , float* yVel , float* x , float* y , float* mass , float G , float delt , float* xOut , float* yOut )
{
int rank = threadIdx.x + blockIdx.x * blockDim.x ;
if( rank >= n )
return ;
int i ;
float r ;
float xForce = 0.0 ;
float yForce = 0.0 ;
for( i = 0 ; i < n ; i++ )
{
if( i != rank )
{
// Calculations are done in exponentiated logs to reduce roundoffs
r = sqrt( (x[i] - x[rank])*(x[i] - x[rank]) + (y[i] - y[rank])*(y[i] - y[rank]) ) ;
if( x[i] > x[rank] )
xForce = xForce + exp( log(G) + log(mass[i]) + log(mass[rank]) + log( x[i] - x[rank] ) - 3.0*log( r ) ) ;
if( x[i] < x[rank] )
xForce = xForce - exp( log(G) + log(mass[i]) + log(mass[rank]) + log( x[rank] - x[i] ) - 3.0*log( r ) ) ;
// case: x[i] == x[rank] : do nothing
if( y[i] > y[rank] )
yForce = yForce + exp( log(G) + log(mass[i]) + log(mass[rank]) + log( y[i] - y[rank] ) - 3.0*log( r ) ) ;
if( y[i] < y[rank] )
yForce = yForce - exp( log(G) + log(mass[i]) + log(mass[rank]) + log( y[rank] - y[i] ) - 3.0*log( r ) ) ;
// case: y[i] == y[rank] : do nothing
}
}
xVel[rank] = xVel[rank] + xForce * delt / mass[rank] ;
yVel[rank] = yVel[rank] + yForce * delt / mass[rank] ;
xOut[rank] = x[rank] + xVel[rank] * delt ;
yOut[rank] = y[rank] + yVel[rank] * delt ;
}
void loadData( char* fileName , int n , float* x , float* y , float* mass , int* actual )
{
char temp[1000] ;
FILE *file ;
file = fopen( fileName , "r" ) ;
if( file == NULL )
{
printf( "File failed to open!\n" ) ;
return ;
}
*actual = 0 ;
int delim1 , delim2 , delim3 ; // ends of delimeters
int len , i , j , flag ;
// char temp1[1000] ;
for( j = 0 ; fgets( temp , 1000 , file ) != NULL && j < n ; j++ )
{
len = strlen( temp ) ;
delim1 = -1 ;
for( i = 0 ; i < len && delim1 < 0 ; i++ )
{
if( temp[i] != ' ' )
delim1 = i ;
}
delim2 = -1 ;
flag = -1 ;
for( i = delim1 + 1 ; i < len && delim2 < 0 ; i++ )
{
if( temp[i] == ' ' )
flag = 1 ;
if( temp[i] != ' ' && flag > 0 )
delim2 = i ;
}
delim3 = -1 ;
flag = -1 ;
for( i = delim2 + 1 ; i < len && delim3 < 0 ; i++ )
{
if( temp[i] == ' ' )
flag = 1 ;
if( temp[i] != ' ' && flag > 0 )
delim3 = i ;
}
if( delim1 < 0 || delim2 < 0 || delim3 < 0 )
{
printf( "Input data formatting error\n" ) ;
return ;
}
x[j] = getVal( temp , delim1 , delim2 - delim1 ) ;
y[j] = getVal( temp , delim2 , delim3 - delim1 ) ;
mass[j] = getVal( temp , delim3 , -1 ) ;
*actual = *actual + 1 ;
}
fclose( file ) ;
}
float getVal( char* str , int start , int subLen )
{
int len = strlen( str ) ;
if( subLen < 0 )
subLen = len - start + 1 ;
else
subLen = subLen + 1 ;
char temp[subLen] ;
temp[subLen - 1] = '\0' ;
int i ;
for( i = 0 ; i < subLen - 1 ; i++ )
{
temp[i] = str[i+start] ;
}
return( atof( temp ) ) ;
}
|
21,733 | #include <stdio.h>
#define NUM_BLOCKS 1
#define BLOCKS_WIDTH 256
__global__ void hello()
{
printf("Hello world! I am thread %d\n", threadIdx.x);
}
int main(int argc, char** argv)
{
// launch the kernel
hello<<<NUM_BLOCKS, BLOCKS_WIDTH>>>();
// force the printf()s to flush
cudaDeviceSynchronize();
printf("That's all!\n");
return 0;
}
|
21,734 | #include <stdio.h>
#include <assert.h>
#include <cuda.h>
#include <sys/time.h>
#define N 10240000
#define ThreadPerBlock 128
#define NSTREAM 4
__global__ void multiply(double * a, double *b , double * output, int length)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < length)
output[tid] = a[tid] + b[tid];
}
int main()
{
cudaSetDevice(1);
int nbytes = N * sizeof (double);
double *a, *b, *c;
double *dev_A, *dev_B, *dev_C;
a = (double *) malloc( N*sizeof(double));
b = (double *) malloc( N*sizeof(double));
c = (double *) malloc( N*sizeof(double));
if(a == NULL){
printf("Error malloc \n");
exit(0);
}
int i;
for (i = 0; i < N ; i++)
{
a[i] = i;
b[i] = i;
}
assert(cudaMalloc((void**) &dev_A, nbytes) == cudaSuccess);
assert(cudaMalloc((void**) &dev_B, nbytes) == cudaSuccess);
assert(cudaMalloc((void**) &dev_C, nbytes) == cudaSuccess);
assert(cudaMemcpy(dev_A, a, nbytes, cudaMemcpyHostToDevice) == cudaSuccess);
assert(cudaMemcpy(dev_B, b, nbytes, cudaMemcpyHostToDevice) == cudaSuccess);
struct timeval begin, end;
gettimeofday(&begin, NULL);
int nblock = N/ThreadPerBlock;
if ( N % ThreadPerBlock) nblock ++;
multiply<<<nblock, ThreadPerBlock>>>(dev_A, dev_B, dev_C,N);
assert( cudaThreadSynchronize() == cudaSuccess ) ;
gettimeofday(&end, NULL);
double time = 1000000*(end.tv_sec - begin.tv_sec) + (end.tv_usec - begin.tv_usec);
printf("One Stream time: %lf ms \n", time);
cudaStream_t stream[NSTREAM];
int n = N/NSTREAM;
printf("%d\n", n);
assert( cudaThreadSynchronize() == cudaSuccess ) ;
gettimeofday(&begin, NULL);
for (i = 0; i < NSTREAM; i++)
{
nblock = n/ThreadPerBlock;
if(n % ThreadPerBlock) nblock++;
assert(cudaStreamCreate(&stream[i])== cudaSuccess);
multiply<<<nblock, ThreadPerBlock, 0, stream[i]>>>(&dev_A[i*n], &dev_B[i*n], &dev_C[i*n], n);
assert(cudaStreamDestroy(stream[i])== cudaSuccess);
}
assert( cudaThreadSynchronize() == cudaSuccess ) ;
gettimeofday(&end, NULL);
time = 1000000*(end.tv_sec - begin.tv_sec) + (end.tv_usec - begin.tv_usec);
printf("%d Stream time: %lf ms \n",NSTREAM, time);
assert(cudaMemcpy(c, dev_C, nbytes, cudaMemcpyDeviceToHost) == cudaSuccess);
for (i = 0; i < N; i++)
{
int d = (int) c[i];
int e = 2*i;
if( d != e)
{
printf("Error, %d, %lf\n", i, c[i]);
exit(0);
}
}
printf("Passed!!\n");
cudaFree(dev_A);
cudaFree(dev_B);
cudaFree(dev_C);
free(a);
free(b);
free(c);
}
|
21,735 | #include "includes.h"
// CUDA kernel. Each thread takes care of one element of c
__global__ void encode(char *encodedText, char *decodedText)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
int startEncoded = id * 101;
int startDecoded = id * 4;
int t,finish=startEncoded+100;
// Make sure we do not go out of bounds
if (id < 15360)
{
for(t=startEncoded;t<finish;t++)
{
if(encodedText[t]==',')
{
decodedText[startDecoded]=encodedText[t+1];
startDecoded++;
}
}
}
} |
21,736 | #include "includes.h"
__global__ void initialize_clause_output_predict(int *clause_output, int *all_exclude)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// Initialize clause output
for (int j = index; j < CLAUSES; j += stride) {
clause_output[j] = 1;
all_exclude[j] = 1;
}
} |
21,737 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30,float var_31) {
comp = +1.0262E-21f + tanhf((var_3 - -1.8370E6f + (+1.3846E36f - expf(+1.7265E-37f + -1.3232E36f))));
float tmp_1 = -1.2115E-17f;
comp = tmp_1 / var_4 / (var_5 + (var_6 - var_7));
if (comp == expf(-1.0314E-37f * (var_8 / +1.2603E-35f))) {
float tmp_2 = -1.2533E36f;
float tmp_3 = (var_9 * asinf((var_10 - var_11 * (var_12 - +0.0f))));
comp = tmp_3 + tmp_2 * (+1.9068E-44f * +0.0f * -1.3626E34f);
comp += sinf(ldexpf(+1.2663E-29f * powf(-0.0f, var_13 - +1.7495E29f + -1.5401E-37f + (var_14 + (var_15 * var_16))), 2));
}
for (int i=0; i < var_1; ++i) {
comp = acosf(-1.2047E-37f * var_17 - var_18);
comp = cosf(sinhf(+1.8500E25f));
comp = acosf((var_19 + +1.7544E35f));
}
for (int i=0; i < var_2; ++i) {
comp = var_20 - (-1.8848E35f - sqrtf((+1.4556E35f - (+1.6375E-37f - var_21 - -1.9039E34f + var_22 + var_23))));
comp = fmodf(+0.0f + var_24, (+1.0279E-37f / (var_25 - (var_26 + atan2f((var_27 - logf((var_28 / -0.0f * (var_29 + (var_30 * (+1.5087E34f * var_31)))))), +1.9838E21f)))));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
float tmp_32 = atof(argv[32]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32);
cudaDeviceSynchronize();
return 0;
}
|
21,738 | #include "includes.h"
const float REAL_VALUE_MAX = 1000000.0f;
const int NUM_THREADS = 32;
const int SIZE = 10000;
const int DIMENSION = 2;
__device__ float clamp(float v, float mn = -REAL_VALUE_MAX, float mx = REAL_VALUE_MAX) {
return v < mn ? mn : v > mx ? mx : v;
}
__global__ void updateParticleKernel(float* P, float* V, float* PB, float* GB, float momentum, float introvert, float extrovert, float clamp_min, float clamp_max) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < SIZE * DIMENSION) {
P[i] = clamp(P[i] + V[i], clamp_min, clamp_max);
V[i] = clamp(momentum * V[i] + introvert * (PB[i] - P[i]) + extrovert * (GB[i % DIMENSION] - P[i]), clamp_min, clamp_max);
}
} |
21,739 | #include "includes.h"
__global__ void mapPrefixSumToPrisms( const unsigned numberOfPrisms, const unsigned raysPerSample, const unsigned reflectionSlices, const unsigned* raysPerPrism, const unsigned* prefixSum, unsigned *indicesOfPrisms, unsigned *numberOfReflections ){
int id = threadIdx.x + (blockIdx.x * blockDim.x);
// break if we have too many threads (this is likely)
if(id >= numberOfPrisms*reflectionSlices) return;
const unsigned count = raysPerPrism[id];
const unsigned startingPosition = prefixSum[id];
const unsigned reflection_i = id / numberOfPrisms;
const unsigned prism_i = id % numberOfPrisms;
for(unsigned i=0; i < count ; ++i){
indicesOfPrisms[startingPosition + i] = prism_i;
numberOfReflections[startingPosition + i] = reflection_i;
}
} |
21,740 | #include "includes.h"
__global__ void simpleMPIKernel(float *input, float *output) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
output[tid] = sqrt(input[tid]);
} |
21,741 | //SAXPY - Single-Precision A*X Plus Y
#include <stdio.h>
#define TPB 256
#define ARRAY_SIZE 10000
/*
__device__ float ax(float x, float a){
return a*x;
}
*/
__global__ void saxpyKernel(float *x, float *y, float a){
const int i = blockIdx.x*blockDim.x + threadIdx.x;
y[i] += x[i]*a;
}
__host__ void verify(float *x, float *y, float a, int N){
/*
Host program to verify device computations
*/
for(int i = 0; i < N; i++){
y[i] += x[i]*a;
}
}
int main(){
//Host addresses
float *xh = 0, *yh = 0;
//Device addresses
float *xd = 0, *yd = 0;
const float a = 0.5;
// Allocate device memory
cudaMalloc(&xd, ARRAY_SIZE*sizeof(float));
cudaMalloc(&yd, ARRAY_SIZE*sizeof(float));
// Allocate host memory
xh = (float *) malloc(ARRAY_SIZE*sizeof(float));
yh = (float *) malloc(ARRAY_SIZE*sizeof(float));
//Set some values
for(int i = 0; i < ARRAY_SIZE; i++){
xh[i] = i;
yh[i] = i + 1;
}
//Copy memory to device
cudaMemcpy(xd, xh, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(yd, yh, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice);
//Launch kernel
printf("Computing SAXPY on the GPU...\n");
saxpyKernel <<<(ARRAY_SIZE + TPB-1)/TPB, TPB>>>(xd, yd, a);
//Sync
cudaDeviceSynchronize();
printf("Done!\n");
//Run host verification
printf("Computing SAXPY on the CPU...\n");
verify(xh, yh, a, ARRAY_SIZE);
printf("Done!\n");
//Copy yd from device to xh, then compare that xh and yh are equal
printf("Copying from device and comparing the output for device and host\n");
cudaMemcpy(xh, yd, ARRAY_SIZE*sizeof(float), cudaMemcpyDeviceToHost);
bool eq = true;
for(int i = 0; i < ARRAY_SIZE; i++){
if (abs(xh[i] - yh[i]) >= 1e-6){
eq = false;
break;
}
//printf("idx %6d: GPU = %f --- CPU = %f", i, xh[i], yh[i]);
}
if (eq){
printf("Successfully compared everything within a 1e-6 error margin.\n");
}
else{
printf("Comparison failed for 1e-6 error margin.\n");
}
//Free memory
cudaFree(xd); cudaFree(yd);
free(xh); free(yh);
return 0;
}
|
21,742 | #include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <stdbool.h>
#include <time.h>
#include <iostream>
#include <cstring>
#include <fstream>
#include <sstream>
using namespace std;
#define NO_OF_CHARS 256
#define SHAREDMEMPERBLOCK 32768
#define NUMTHREADSPERBLOCK 1024
//int n_blocks = n/block_size + (n%block_size==0?0:1);
int total=0;
__global__ void boyer_moore (char *d_string, int n, const char* __restrict__ pat, int m,
const int * __restrict__ delta1, const int * __restrict__ delta2, int offset, int cblockSize, int *d_total){
int i;
int gid = cblockSize*blockIdx.x;
__shared__ char s_string[SHAREDMEMPERBLOCK];
int idx = threadIdx.x;
int sharedIndex;
int globalIndex;
for(i=0;i<offset;i++){
sharedIndex = idx + i*blockDim.x;
globalIndex = sharedIndex+blockIdx.x*cblockSize;
if(globalIndex<n)
s_string[sharedIndex] = d_string[globalIndex];
else
s_string[sharedIndex] = '*'; //assume * not in d_string
}
__syncthreads();
int beg = idx*offset;
int end = min (beg+offset+m, SHAREDMEMPERBLOCK);
i = beg;
while (i < end) {
int j = m-1;
while (j >= 0&&(s_string[i+j] == pat[j])) {
--j;
}
if (j < 0) {
//printf("\nFound at: %d %d %d %d \n",gid+i+1,beg,end,*d_total);
break;
}
i += max(delta1[j+1] , j - delta2[s_string[i+j]]);
}
}
void badCharHeuristic( char *str, int size,
int badchar[NO_OF_CHARS])
{
int i;
for (i = 0; i < NO_OF_CHARS; i++)
badchar[i] = -1;
for (i = 0; i < size; i++)
badchar[(int) str[i]] = i;
}
void preprocess_strong_suffix(int *shift, int *bpos,
char *pat, int m)
{
// m is the length of pattern
int i=m, j=m+1;
bpos[i]=j;
while(i>0)
{
/*if character at position i-1 is not equivalent to
character at j-1, then continue searching to right
of the pattern for border */
while(j<=m && pat[i-1] != pat[j-1])
{
/* the character preceding the occurence of t in
pattern P is different than mismatching character in P,
we stop skipping the occurences and shift the pattern
from i to j */
if (shift[j]==0)
shift[j] = j-i;
//Update the position of next border
j = bpos[j];
}
/* p[i-1] matched with p[j-1], border is found.
store the beginning position of border */
i--;j--;
bpos[i] = j;
}
}
//Preprocessing for case 2
void preprocess_case2(int *shift, int *bpos,
char *pat, int m)
{
int i, j;
j = bpos[0];
for(i=0; i<=m; i++)
{
/* set the border postion of first character of pattern
to all indices in array shift having shift[i] = 0 */
if(shift[i]==0)
shift[i] = j;
/* suffix become shorter than bpos[0], use the position of
next widest border as value of j */
if (i==j)
j = bpos[j];
}
}
//char h_string[200];
//char h_pat[4];
int main(int argc, char const *argv[]){
cout<<"1.\n";
char *d_s, *d_p;
int *d_d1, *d_d2;
ifstream t("data.txt");
stringstream buffer;
buffer << t.rdbuf();
string text = buffer.str();
char *h_string = ( char*)text.c_str();
char h_pat[]={"ATC"};
//cin>>h_string>>h_pat;
/*for(int i=0;i<100;i++)
h_string[i] = 'a'+(i%26);
for(int i=0;i<10;i++)
h_pat[i] = 'a' + (i%26);
*/
cout<<"2.\n";
int stringlen = strlen(h_string);
int patlen = strlen(h_pat);
int *delta1 = (int*)malloc(sizeof(int)*(patlen+1));for(int i=0;i<patlen+1;i++) delta1[i]=0;
int *bpos = (int*)malloc(sizeof(int)*(patlen+1));
int delta2[NO_OF_CHARS];
//cout<<h_string<<" "<<h_pat<<endl;
preprocess_strong_suffix(delta1, bpos, h_pat, patlen);
preprocess_case2(delta1, bpos, h_pat, patlen);
badCharHeuristic(h_pat, patlen, delta2);
cudaMalloc(&d_s, stringlen*sizeof(char));
cudaMemcpy(d_s, h_string,stringlen*sizeof(char),cudaMemcpyHostToDevice);
cudaMalloc(&d_p, patlen*sizeof(char));
cudaMemcpy(d_p, h_pat,patlen*sizeof(char),cudaMemcpyHostToDevice);
cudaMalloc(&d_d1, (patlen+1)*sizeof(int));
cudaMemcpy(d_d1, delta1,(patlen+1)*sizeof(int),cudaMemcpyHostToDevice);
cudaMalloc(&d_d2, NO_OF_CHARS*sizeof(int));
cudaMemcpy(d_d2, delta2,NO_OF_CHARS*sizeof(int),cudaMemcpyHostToDevice);
int& n=stringlen;
int& m=patlen;
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp,0);
int sm_size = SHAREDMEMPERBLOCK;//devProp.sharedMemPerBlock/2; //so that atleast 2 blocks can be scheduled simultaneously
int conceptualBlockSize = SHAREDMEMPERBLOCK- m + 1;
int n_blocks = (n-1)/(conceptualBlockSize) + 1;//number of blocks
int threadsPerBlock = NUMTHREADSPERBLOCK;//devProp.maxThreadsPerBlock;//max threads
int offset = sm_size/threadsPerBlock;// number of characters each thread loads into shared mem =D
int *d_total;
int h_total[2];
h_total[0]=1;
h_total[1]=1;
cudaMalloc((void**)&d_total,2*sizeof(int));
cudaMemcpy(d_total,&h_total,2*sizeof(int),cudaMemcpyHostToDevice);
boyer_moore<<<n_blocks,threadsPerBlock>>>(d_s, n, d_p, m, d_d1, d_d2,
offset,conceptualBlockSize,d_total);
cudaMemcpy(h_total,d_total,2*sizeof(int),cudaMemcpyDeviceToHost);
printf("\n%d\n",h_total[0]);
cout<<"\nlol\n";
return 0;
}
//things to care of
//1. Number of Blocks fixed to 2000
//2. |
21,743 | #include "cuda.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <sys/time.h>
//code that does the main job on GPU
__global__ void countNumOfPrimerKernel(int* n_array_d, bool* is_prime_d)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int i = row * blockDim.x + col + 2;
// printf("gridDim: %d,block.x: %d, block.y: %d, thread.x: %d, thread:y: %d,\
// col: %d, row: %d, i: %d\n",gridDim.x, blockIdx.x, blockIdx.y, threadIdx.x,\
// threadIdx.y, col, row, i);
bool has_factor = false;
for (int j = 2; j < i; ++j)
{
if (i % j == 0)
{
has_factor = true;
break;
}
}
if (!has_factor)
{
is_prime_d[i] = true;
}
}
int main(int argc, char** argv)
{
struct timeval start, end;
gettimeofday(&start, NULL);
if (argc != 3)
{
std::cout << "USAGE: num_prime <number of blocks> <integer>"
<< std::endl;
return -1;
}
int num_blocks = atoi(argv[1]);
int n = atoi(argv[2]);
cudaSetDevice(0);
int* n_array = new int[n];
int* n_array_d = new int[n];
bool* is_prime = new bool[n];
bool* is_prime_d = new bool[n];
for (int i = 0; i < n; ++i)
{
n_array[i] = i + 1;
is_prime[i] = false;
}
cudaMalloc((void**)&n_array_d, n * sizeof(int));
cudaMemcpy(n_array_d, n_array, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**)&is_prime_d, n * sizeof(bool));
cudaMemcpy(is_prime_d, is_prime, n * sizeof(bool), cudaMemcpyHostToDevice);
num_blocks = 1000 ;
// use fixed number of thread in each block for convience
int num_thread = n / num_blocks;
printf("num_blocks: %d, num_thread: %d\n", num_blocks, num_thread);
dim3 dimBlock(num_thread, num_thread);
dim3 dimGrid(num_blocks, 1);
//countNumOfPrimerKernel<<<dimGrid, dimBlock>>> (n_array_d, is_prime_d);
countNumOfPrimerKernel<<< dim3(num_blocks), dim3(num_thread)>>> \
(n_array_d, is_prime_d);
cudaThreadSynchronize();
cudaMemcpy(is_prime, is_prime_d, n * sizeof(bool), cudaMemcpyDeviceToHost);
int sum_num_prime = 0;
for (int i = 0; i < n; ++i)
{
if (is_prime[i])
{
++sum_num_prime;
std::cout << "prime: " << i << std::endl;
}
}
std::cout << "Number of primes between 0 and " << n << " is: "
<< sum_num_prime << std::endl;
cudaFree(n_array_d);
cudaFree(is_prime_d);
delete[] n_array;
delete[] is_prime;
gettimeofday(&end, NULL);
double time_gap = (end.tv_sec - start.tv_sec) * 1000000u
+ end.tv_usec - start.tv_usec;
printf("Time cost: %.2lf s.\n", time_gap / 100000);
return 0;
}
|
21,744 | #include "includes.h"
const int Nthreads = 1024, maxFR = 100000, NrankMax = 3, nmaxiter = 500, NchanMax = 32;
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
// THIS UPDATE DOES NOT UPDATE ELOSS?
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void spaceFilterUpdate(const double *Params, const float *data, const float *U, const bool *UtU, const int *iC, const int *iW, float *dprod, const int *st, const int *id, const int *counter){
volatile __shared__ float sU[32*NrankMax];
volatile __shared__ int iU[32];
float x;
int tid, bid, ind, nt0, i, t, k, Nrank, NT, Nfilt, NchanU, Nchan;
tid = threadIdx.x;
bid = blockIdx.x;
NT = (int) Params[0];
Nfilt = (int) Params[1];
Nrank = (int) Params[6];
NchanU = (int) Params[10];
nt0 = (int) Params[4];
Nchan = (int) Params[9];
// just need to do this for all filters that have overlap with id[bid] and st[id]
// tidx still represents time, from -nt0 to nt0
// tidy loops through all filters that have overlap
if (tid<NchanU)
iU[tid] = iC[tid + NchanU * iW[bid]];
__syncthreads();
if (tid<NchanU)
for (k=0;k<Nrank;k++)
sU[tid + k * NchanU] = U[iU[tid] + Nchan * bid + Nchan * Nfilt * k];
__syncthreads();
for(ind=counter[1];ind<counter[0];ind++)
if (UtU[id[ind] + Nfilt *bid]){
t = st[ind] + tid - nt0;
// if this is a hit, threads compute all time offsets
if (t>=0 & t<NT){
for (k=0;k<Nrank;k++){
x = 0.0f;
for(i=0;i<NchanU;i++)
x += sU[i + NchanU*k] * data[t + NT * iU[i]];
dprod[t + NT*bid + k*NT*Nfilt] = x;
}
}
}
} |
21,745 | #include "includes.h"
__global__ void cube(float* d_out, float* d_in) {
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f * f;
} |
21,746 | #include <stdio.h>
#include <stdlib.h>
__global__ void blur_kernel(int *image_d,float *filter_d,int *blurimage_d,int N1,int N2) {
int row = threadIdx.x + blockDim.x*blockIdx.x;
int col = threadIdx.y + blockDim.y*blockIdx.y;
if(row < N1 && col < N2) {
int i,j;
float sum = 0,wsum = 0;
for(int i=0;i<3;i++) {
for(j=0;j<3;j++) {
wsum += *(filter_d + i*3 + j);
}
}
int k = 0,l = 0;
for(i=row-1;i<=row+1;i++,k++) {
l = 0;
for(j=col-1;j<=col+1;j++,l++) {
if(i >= 0 && j >= 0 && i < N1 && j < N2) {
sum += ((*(image_d + i*N2+j)) * (*(filter_d + k*3+l)));
}
}
}
blurimage_d[row*N2+col] = (int)(sum/wsum);
}
}
int main() {
int N1,N2;
N1 = 100;
N2 = 100;
int *image_h = (int*)malloc(N1*N2*sizeof(int));
int i,j;
for(i=0;i<N1;i++) {
for(j=0;j<N2;j++) {
*(image_h + i*N2 + j) = rand()%256;
}
}
float f[3][3] = {{1.0,2.0,1.0},{2.0,3.0,2.0},{1.0,2.0,1.0}};
float *filter_h = (float*)malloc(3*3*sizeof(float));
for(i=0;i<3;i++) {
for(j=0;j<3;j++) {
*(filter_h + i*3 + j) = f[i][j];
}
}
printf("Original Image:\n");
for(i=0;i<N1;i++) {
for(j=0;j<N2;j++) {
printf("%d ",*(image_h + i*N2 + j));
}
printf("\n");
}
printf("\n");
int *image_d,*blurimage_d;
float *filter_d;
cudaMalloc((void**)&image_d,N1*N2*sizeof(int));
cudaMalloc((void**)&filter_d,3*3*sizeof(float));
cudaMalloc((void**)&blurimage_d,N1*N2*sizeof(int));
cudaMemcpy(image_d,image_h,N1*N2*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(filter_d,filter_h,3*3*sizeof(float),cudaMemcpyHostToDevice);
dim3 grid(10,10);
dim3 block(10,10);
blur_kernel<<<grid,block>>>(image_d,filter_d,blurimage_d,N1,N2);
cudaMemcpy(image_h,blurimage_d,N1*N2*sizeof(int),cudaMemcpyDeviceToHost);
printf("Blurred Image:\n");
for(i=0;i<N1;i++) {
for(j=0;j<N2;j++) {
printf("%d ",*(image_h + i*N2 + j));
}
printf("\n");
}
printf("\n");
free(image_h);
free(filter_h);
cudaFree(image_d);
cudaFree(filter_d);
cudaFree(blurimage_d);
}
|
21,747 | /******************************************************
* CUDA Sum Reduction
* By: Sairam Krishnan
* Date: May 6, 2014
* Compile command: nvcc -arch=sm_20 reduction.cu
******************************************************/
#include <cuda.h>
#include <stdio.h>
#define N 100
#define NTHRDS 8
#define NBLKS (((N) + (NTHRDS-1)) / (NTHRDS))
int getNearestPowerOfTwo(int n) {
int count = 1;
while (n > 1) {
n >>= 1;
count <<= 1;
}
return count << 1;
}
__global__ void sumReduction(int *input, int *output) {
int i, tidx = threadIdx.x, index = blockIdx.x*blockDim.x + tidx;
extern __shared__ int temp[];
if (index >= N) {
return;
}
temp[tidx] = input[index];
__syncthreads();
for (i=blockDim.x/2; i>0; i>>=1) {
if (tidx < i && index+i < N) {
temp[tidx] += temp[tidx + i];
}
__syncthreads();
}
if (tidx == 0) {
output[blockIdx.x] = temp[0];
}
}
int main() {
int nb = getNearestPowerOfTwo(NBLKS), i, sum;
int input[N], output[nb];
int *devInput, *devOutput1, *devOutput2;
//Sizes
int INT_SIZE = sizeof(int);
int N_SIZE = N * INT_SIZE;
int NBLKS_SIZE = NBLKS * INT_SIZE;
int NTHRDS_SIZE = NTHRDS * INT_SIZE;
int NB_SIZE = nb * INT_SIZE;
//Load host input array with values {1....N}. Clear output array for stage 1.
for (i = 0; i<N; i++)
input[i] = i+1;
memset(output, 0, NB_SIZE);
//Allocate memory for device pointers.
cudaMalloc(&devInput, N_SIZE);
cudaMalloc(&devOutput1, NB_SIZE);
cudaMalloc(&devOutput2, INT_SIZE);
//Load input array into input device pointer.
//Clear stage 1 output device pointer.
cudaMemcpy(devInput, input, N_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(devOutput1, output, NB_SIZE, cudaMemcpyHostToDevice);
//Execute stage 1 reduction. Partial sums will be stored in devOutput1.
//Copy partial sums from devOutput1 to output for debugging purposes.
sumReduction <<<NBLKS, NTHRDS, NTHRDS_SIZE>>>(devInput, devOutput1);
cudaMemcpy(output, devOutput1, NBLKS_SIZE, cudaMemcpyDeviceToHost);
//Stage 2 reduction - add up the partial sums and store final result in sum
sumReduction <<<1, nb, NB_SIZE>>>(devOutput1, devOutput2);
cudaMemcpy(&sum, devOutput2, INT_SIZE, cudaMemcpyDeviceToHost);
//Print out partial sums and final sum.
for (i = 0; i<nb; i++) {
printf("%d ", output[i]);
}
printf("\n%d\n", sum);
//Free allocated device memory.
cudaFree(devOutput2);
cudaFree(devOutput1);
cudaFree(devInput);
return 0;
}
|
21,748 | // This is a generated file, do not edit it!
#pragma once
#include <stdint.h>
typedef struct DataPoint DataPoint;
typedef struct Node Node;
typedef struct DecisionLearnerContext {
DataPoint *DataPoints;
int32_t NumDataPoints;
float TotalWeight;
int32_t NumAttributeAxes;
int32_t NumCategoricalAxes;
int32_t *DataPointIds;
Node *Nodes;
int32_t CurrentLevel;
int32_t *OpenNodeIds;
int32_t NumOpenNodes;
int32_t *NextOpenNodeIds;
int32_t NumNextOpenNodes;
int32_t MaxOpenNodes;
} DecisionLearnerContext;
|
21,749 | /* Parallel Best Band Selection Algorithm */
/*
Max Bands searched: 45 -> Taking Approximately a Day or so to complete
Value returned: 0.094479
Band Returning Max: UNKNOWN
author: Michael C Estwanick
*/
#include <thrust/extrema.h>
#include <math.h>
#include <stdio.h>
#include <time.h>
#include <sys/timeb.h>
#define ARRAYSIZE 45 //Number of Bands to check
#define TOTAL powf(2,ARRAYSIZE)
//Cuda Error Check
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//Kernel that Performs the best band selection algorithm
__global__ void kernel(float *cc, long long int jump, float threadCount){
int N = threadCount*threadCount; // Total threads
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int threadId = col + row * N; // Two dimensional Thread Index
int decimalNumber,quotient;
int binaryNumber[ARRAYSIZE]; //Holds the binary number in an array
//Spectra Data Set
int a[169] = {1192, 1315, 1462, 1484, 1476, 1443, 1508, 1489, 1470, 1537, 1633, 1539, 1600, 1707, 1701, 1682, 1688, 1681, 1694, 1728, 1786, 1821, 1830, 1881, 1893, 1816, 1692, 1675, 1651, 1579, 1514, 1600, 1576, 1543, 1465, 1440, 1452, 1483, 1944, 2303, 2616, 3118, 3861, 4054, 3915, 4790, 5543, 4539, 4679, 5574, 5365, 5080, 4186, 4272, 4934, 5057, 5000, 4867, 3872, 2992, 2519, 1203, 1092, 1979, 3005, 3886, 4121, 4134, 4168, 4014, 3612, 3391, 2712, 1324, 473, 556, 1099, 1769, 1979, 2063, 2289, 2494, 2553, 2196, 2125, 2147, 1749, 1221, 667, 517, 732, 885, 988, 1051, 1001, 984, 997, 965, 1008, 1022, 992, 993, 982, 946, 850, 698, 562, 446, 334, 278, 226, 161, 99, 58, 125, 139, 101, 93, 115, 151, 167, 171, 178, 172, 180, 176, 163, 152, 143, 134, 129, 130, 139, 148, 151, 146, 137, 123, 106, 98, 79, 65, 58, 70, 60, 62, 51};
int b[169] = {1162, 1337, 1282, 1491, 1508, 1517, 1488, 1513, 1539, 1576, 1626, 1634, 1573, 1786, 1741, 1782, 1755, 1669, 1700, 1826, 1832, 1895, 1920, 1938, 1933, 1852, 1808, 1806, 1747, 1718, 1628, 1659, 1639, 1621, 1589, 1525, 1526, 1583, 2118, 2549, 2900, 3411, 4237, 4340, 4126, 4985, 5760, 4716, 4840, 5793, 5616, 5326, 4416, 4485, 5197, 5322, 5315, 5166, 4107, 3158, 2664, 1286, 1149, 2093, 3197, 4157, 4413, 4422, 4444, 4287, 3842, 3620, 2892, 1415, 498, 591, 1164, 1892, 2110, 2215, 2441, 2663, 2721, 2351, 2286, 2296, 1872, 1318, 714, 568, 805, 977, 1084, 1143, 1094, 1071, 1085, 1044, 1092, 1116, 1070, 1076, 1068, 1031, 928, 766, 617, 481, 370, 305, 250, 181, 108, 64, 139, 153, 109, 101, 122, 162, 180, 189, 192, 191, 195, 192, 178, 164, 153, 145, 141, 139, 148, 158, 163, 151, 148, 131, 120, 107, 91, 71, 72, 81, 65, 66, 62};
//Holds Product from the dot product
int c[ARRAYSIZE];
//Arrays to hold integers summed
int aSumArr[ARRAYSIZE];
int bSumArr[ARRAYSIZE];
//Initialize arrays
for(int i = 0; i < ARRAYSIZE; i++){
c[i] = 0;
aSumArr[i] = 0;
bSumArr[i] = 0;
binaryNumber[i] = 0;
}
int dotSum = 0; //value for the dot product
int aSum = 0; //sum of valid array positions for array a
int bSum = 0; //sum of valid array positions for array b
int i = 0;
float finalValue = 0; //Value of the arcCos of the dot product / sqrt(array a) * sqrt(array b)
//Add jump to decimal to avoid running combinations that have already been calculated
decimalNumber = threadId + jump;
quotient = decimalNumber;
//Loop to convert decimal into binary and store in array
while(quotient!=0){
binaryNumber[i++]= quotient % 2;
quotient = quotient / 2;
}
//Loop through binaryNumber array
for(int x = ARRAYSIZE-1 ; x >= 0; x--){
//Only perform calculation on selected bands
if(binaryNumber[x] == 1){
//Perform multiplication for dot product
c[x] = a[x] * b[x];
//Fill sum arrays at correct index
aSumArr[x] = a[x];
bSumArr[x] = b[x];
}else{
//Do Nothing
}
}
//Sums up the product array to complete dot product
for(int j = 0; j < ARRAYSIZE; ++j){
dotSum += c[j]; // Dot Product
aSum += powf( aSumArr[j], 2 ); // Euclidean Norm on vector A
bSum += powf( bSumArr[j], 2 ); // Euclidean Norm on vector B
}
//Create values for algorithm
float sqSum1 = sqrtf(aSum); //Finish Euclidean Norm on vector A
float sqSum2 = sqrtf(bSum); //Finish Euclidean Norm on vector B
float sqSum = sqSum1 * sqSum2;
float div = dotSum / sqSum ;
//Plug in values for final answer
finalValue = acosf( div ) ;
//Stores the threads final value in array cc, in the respected index
if(finalValue == finalValue){ //Check if the result is a real number
cc[threadId] = finalValue; //store value in array to be passed back to host (CPU)
}else{
cc[threadId] = -2; //If the value return is NaN set result = -2
}
}//End kernel
float getFreeMem();
void deviceProperties();
float kernelCount(float freeMem, float totalMem);
int main( void ) {
printf("------------------------------------------------------ \n");
printf("2 ^ %d bands \n", ARRAYSIZE);
cudaDeviceReset();
float freeMem = getFreeMem(); // Get available free memory
float kernels = kernelCount( freeMem, TOTAL); // get number of kernels to launch
//Number of elements for each kernel
float threadCount = ( TOTAL / kernels );
printf("threadCount: Total thread Count: %lf \n", threadCount);
//number of threads per kernel
float threadsPerDim = ceil( powf(threadCount,(.25f)) );
printf("threadPerDim: Total threads per dimension: %lf \n", threadsPerDim);
long long int jump = 0;
float *h_c = (float *)malloc(sizeof(float)*threadCount); //Host Vector
float *d_c; //Device Vector
//Collection of individual kernel max
float *maxCollection = (float *)malloc(sizeof(float)*kernels);
float totalTime = 0.0;
//CPU Timer start
struct timeb start, end;
int diff;
ftime(&start);
//Loop Through the kernel as many times needed to execute all bands,
//When The GPU is out of memory the loop will execute again storing
//The max from each subset of bands in the maxCollection array
for(int i = 0; i < kernels; i++){
cudaDeviceReset();
//Setup Thread & Block Grid
dim3 blocks (threadsPerDim, threadsPerDim);
dim3 threads (threadsPerDim, threadsPerDim);
//Allocate Device Memory
HANDLE_ERROR( cudaMalloc((void**)&d_c, sizeof(float)*(threadCount)) );
//Timer
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
//Execute Kernel
kernel<<<blocks, threads>>>(d_c, jump, threadsPerDim);
//Timer stuff
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
//printf("GPU: Kernel: %d Time: %f \n", i ,milliseconds/1000);
totalTime += milliseconds;
//Retrieve vector from device holding the max value from each subset of bands
HANDLE_ERROR( cudaMemcpy(h_c, d_c, sizeof(float)*(threadCount), cudaMemcpyDeviceToHost) );
//Get the max value from the current subset of bands executed using THRUST Library
float *result = thrust::max_element(h_c, h_c + (int)threadCount);
//Store the max in the maxCollection array
maxCollection[i] = *result;
//printf(" \t Jump Size: %ld \n", jump);
jump = jump + threadCount; //Increment jump to avoid checking completed bands
HANDLE_ERROR( cudaFree(d_c) );
}
//Get max of all kernels
float *result = thrust::max_element(maxCollection, maxCollection + (int)kernels);
//Print the maximum of all bands executed from all the kernels combined
printf("Total Max: is: %f \n", *result);
//Stop timer
ftime(&end);
diff = (int) (1000.0 * (end.time - start.time)
+ (end.millitm - start.millitm));
printf("\nOperation took %u milliseconds\n", diff);
//printf("Total GPU Time: %f \n", totalTime/1000 );
return 0;
}
//Return the number of kernels
float kernelCount(float freeMem, float totalMem){
float totalSize = sizeof(float) * totalMem ;
float kernels = ceil( totalSize / freeMem ) ;
printf("Total array size %lf || free mem %lf \n", totalSize, freeMem);
printf("Kernels: %lf \n ", kernels);
return kernels;
}
//Get available free memory
float getFreeMem(){
size_t freeMem, totalMem;
cudaMemGetInfo(&freeMem, &totalMem);
//fprintf(stderr, "Free = %ld, Total = %ld\n", freeMem, totalMem);
return freeMem;
}
//Get total memory of device
void deviceProperties(){
cudaDeviceProp prop;
int devCount;
HANDLE_ERROR( cudaGetDeviceCount( &devCount ) );
for (int i=0; i< devCount; i++) {
HANDLE_ERROR( cudaGetDeviceProperties( &prop, i ) );
printf( " --- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
}
//Number of threads
}
|
21,750 | #include <stdio.h>
#include <stdlib.h>
#define N 16384
__global__ void addVecGrande(int *a, int *b, int *c)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
if(tid<N)
{
c[tid]=a[tid]+b[tid];
}
}
int main (void)
{
int *dev_a, *dev_b, *dev_c,*a,*b,*c;
float elapsedTime;
//asignar memoria en la GPU
a=(int *)malloc(N*sizeof(int));
b=(int *)malloc(N*sizeof(int));
c=(int *)malloc(N*sizeof(int));
cudaMalloc((void**)&dev_a,N*sizeof(int));
cudaMalloc((void**)&dev_b,N*sizeof(int));
cudaMalloc((void**)&dev_c,N*sizeof(int));
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
for(int i=0; i<N;i++)
{
a[i]=i;
b[i]=i+1;
}
//copiar el arreglo 'a' y 'b' en la GPU
cudaMemcpy(dev_a,a,N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,N*sizeof(int),cudaMemcpyHostToDevice);
printf("Se van a ejecutar 128 bloques con 128 hilos\n");
cudaEventRecord(start,0);
addVecGrande<<<128,128>>>(dev_a,dev_b,dev_c);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
printf("El tiempo tomado fue de %3.3f ms\n",elapsedTime);
cudaMemcpy(c,dev_c,N*sizeof(int),cudaMemcpyDeviceToHost);
printf("En el renglon 0: \t%d\t+\t%d\t=\t%d\n",a[0],b[0],c[0]);
printf("En el renglon 10: \t%d\t+\t%d\t=\t%d\n",a[10],b[10],c[10]);
printf("En el renglon %d: \t%d\t+\t%d\t=\t%d\n",N,a[N-1],b[N-1],c[N-1]);
printf("Se van a ejecutar 256 bloques con 64 hilos\n");
cudaEventRecord(start,0);
addVecGrande<<<256,64>>>(dev_a,dev_b,dev_c);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
printf("El tiempo tomado fue de %3.3f ms\n",elapsedTime);
cudaMemcpy(c,dev_c,N*sizeof(int),cudaMemcpyDeviceToHost);
printf("En el renglon 0: \t%d\t+\t%d\t=\t%d\n",a[0],b[0],c[0]);
printf("En el renglon 10: \t%d\t+\t%d\t=\t%d\n",a[10],b[10],c[10]);
printf("En el renglon %d: \t%d\t+\t%d\t=\t%d\n",N,a[N-1],b[N-1],c[N-1]);
printf("Se van a ejecutar 32 bloques con 512 hilos\n");
cudaEventRecord(start,0);
addVecGrande<<<32,512>>>(dev_a,dev_b,dev_c);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
printf("El tiempo tomado fue de %3.3f ms\n",elapsedTime);
cudaMemcpy(c,dev_c,N*sizeof(int),cudaMemcpyDeviceToHost);
printf("En el renglon 0: \t%d\t+\t%d\t=\t%d\n",a[0],b[0],c[0]);
printf("En el renglon 10: \t%d\t+\t%d\t=\t%d\n",a[10],b[10],c[10]);
printf("En el renglon %d: \t%d\t+\t%d\t=\t%d\n",N,a[N-1],b[N-1],c[N-1]);
free(a);
free(b);
free(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} |
21,751 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
using namespace std;
__global__ void depth(int *mat, int *stack, int *index, int start, int n, int *depth)
{
int id_x = threadIdx.x;
int id_y = blockIdx.x;
depth[start] = 0;
index[start] = start;
stack[start] = 1;
while(index[id_x]<0)
{
if(mat[id_y*n+id_x]==1)
{
if(index[id_x]==-1)
{
if(index[id_y]!=-1)
{
index[id_x] = index[id_y]*10 + id_x;
depth[id_x] = depth[id_y] +1;
stack[id_x] = 1;
}
}
}
__syncthreads();
if(mat[id_y*n+id_x]==1)
{
if(index[id_y]==-1)
{
if(index[id_x]!=-1)
{
index[id_y] = index[id_x]*10 + id_y;
depth[id_y] = depth[id_x] +1;
stack[id_y] = 1;
}
}
}
__syncthreads();
}
__syncthreads();
for(int i=0;i<4;i++)
if(mat[id_y*n+id_x]==1)
{
if(depth[id_x]!=depth[id_y])
{
if(depth[id_x] < depth[id_y] && index[id_x] < index[id_y]/10)
{
if(id_y>99 && id_y<1000) index[id_y]=index[id_x]*1000+id_y;
else if(id_y>9 && id_y<100) index[id_y]=index[id_x]*100+id_y;
else
index[id_y]=index[id_x]*10+id_y;
}
__syncthreads();
if(depth[id_y] < depth[id_x] && index[id_y] < index[id_x]/10)
{
if(id_x>99 && id_x<1000) index[id_x]=index[id_y]*1000+id_x;
else if(id_x>9 && id_x<100) index[id_x]=index[id_y]*100+id_x;
else
index[id_x]=index[id_y]*10+id_x;
}
__syncthreads();
}
}
}
int main()
{
int n;
int i,j,k,l;
cout<<"Enter the number of vertices: ";
cin>>n;
int *h_mat = new int[n*n*sizeof(int)];
int *d_mat;
int *d_stack;
if(cudaMalloc(&d_mat,n*n*sizeof(int)) != cudaSuccess)
{
cout<<"Memory allocation failed.";
cin>>i;
return 0;
}
if(cudaMalloc(&d_stack,n*sizeof(int)) != cudaSuccess)
{
cout<<"Memory allocation failed.";
cin>>i;
cudaFree(d_mat);
return 0;
}
cudaMemset(d_stack, -1, n*sizeof(int));
cudaMemset(d_stack, -1, n*sizeof(int));
for(i=0;i<(n*n);i++)
h_mat[i]=0;
for(i=0;i<n;i++)
{
cout<<"\nEnter the number of connections of "<<i<<" vertex: ";
cin>>j;
cout<<"\nEnter the vertices which are connected to "<<i<<" :\n";
for(k=0;k<j;k++)
{
cin>>l;
h_mat[(i * n) + l] = 1;
}
}
for(i=0;i<n;i++)
{
for(j=0;j<n;j++)
cout<<h_mat[i * n + j]<<" ";
cout<<endl;
}
int *h_output = new int[sizeof(int)*n];
int *d_depth;
if(cudaMalloc(&d_depth,n*sizeof(int)) != cudaSuccess)
{
cout<<"Memory allocation failed.";
cin>>i;
cudaFree(d_mat);
cudaFree(d_stack);
return 0;
}
int *d_index;
if(cudaMalloc(&d_index,n*sizeof(int)) != cudaSuccess)
{
cout<<"Memory allocation failed.";
cin>>i;
cudaFree(d_mat);
cudaFree(d_stack);
return 0;
}
cudaMemset(d_depth, -1, n*sizeof(int));
cudaMemset(d_index, -1, n*sizeof(int));
int h_vertex;
cout << "Enter the starting vertex: ";
cin >> h_vertex;
if(cudaMemcpy(d_mat, h_mat, n*n*sizeof(int), cudaMemcpyHostToDevice) != cudaSuccess)
{
cout<<"Memory copy from host to device failed.88";
cin>>i;
cudaFree(d_mat);
cudaFree(d_stack);
cudaFree(d_depth);
return 0;
}
depth<<<n,n>>>(d_mat, d_stack, d_index,h_vertex,n, d_depth);
int *h_depth = new int[n*sizeof(int)];
if(cudaMemcpy(h_depth,d_index,n*sizeof(int),cudaMemcpyDeviceToHost) != cudaSuccess)
{
cout<<"Memory copy from device to host failed.126";
cin>>i;
cudaFree(d_mat);
cudaFree(d_stack);
cudaFree(d_depth);
return 0;
}
cout<<"depth is:\n";
for(i=0;i<n;i++)
{
cout<<h_depth[i]<< " for " << i << endl;
}
for(i=0;i<n;i++)
{
h_output[i]=0;
for(j=0;j<n;j++)
{
if(h_depth[i]>h_depth[j]) h_output[i]++;
}
}
cout<<"Output bfs is: ";
for(i=0;i<n;i++)
cout<<h_output[i]<<" ";
cin >> i;
cudaFree(d_depth);
cudaFree(d_mat);
cudaFree(d_stack);
return 0;
} |
21,752 | #include "includes.h"
__global__ void counting_sort(int* array, int *temp, int size) {
int i, j, count;
i = threadIdx.x + (blockIdx.x * blockDim.x);
if (i < size) {
count = 0;
for(j = 0; j < size; j++) {
if(array[j] < array[i]) {
count++;
} else if(array[i] == array[j] && j < i) {
count++;
}
}
temp[count] = array[i];
}
} |
21,753 | #include "iostream"
__global__ void hello_fromGPU(int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
printf("Hello World from thread %d-%d\n", n, tid);
}
void hello_fromCPU()
{
printf("Hello World from CPU\n");
}
int main()
{
hello_fromGPU<<<2,3>>>(0);
hello_fromGPU<<<2,3>>>(1);
cudaDeviceSynchronize();
hello_fromCPU();
return 0;
}
|
21,754 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void add_gpu(int nn, double *a, double *b, double *c) {
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if (tid < nn) {
c[tid] = a[tid] + b[tid];
}
}
extern "C" void add(int nx, int ny, void *ap, void *bp, void *cp) {
int i, j, idx;
// arrays in the GPU
static int call_count=1;
static double *a_gpu, *b_gpu, *c_gpu;
if (call_count == 1) {
printf("******************** call count %d ********************\n", call_count);
cudaMalloc((void**)&a_gpu, nx*ny*sizeof(double));
cudaMalloc((void**)&b_gpu, nx*ny*sizeof(double));
cudaMalloc((void**)&c_gpu, nx*ny*sizeof(double));
}
// arrays from Fortran
double *a, *b, *c;
a = (double *)ap;
b = (double *)bp;
c = (double *)cp;
printf("#C address: a %p, b %p, c %p\n", a, b, c);
cudaMemcpy(a_gpu, a, nx*ny*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(b_gpu, b, nx*ny*sizeof(double), cudaMemcpyHostToDevice);
// call the kernel
add_gpu<<<1,nx*ny>>>(nx*ny, a_gpu, b_gpu, c_gpu);
// copy array 'c' back from the GPU
cudaMemcpy(c, c_gpu, nx*ny*sizeof(double), cudaMemcpyDeviceToHost);
// print result
for (i=0; i<nx; i++) {
for (j=0; j<ny; j++) {
idx = i*ny + j;
printf("c[%d,%d]= %g\n", i, j, c[idx]);
}
}
call_count += 1;
}
|
21,755 | #include <iostream>
#include <fstream>
#include <vector>
#include <cmath>
#define HANDLE_ERROR(err) \
do { if (err != cudaSuccess) { printf("ERROR: %s\n", cudaGetErrorString(err)); exit(0);} } while (0)
__constant__ double AVG[32][3];
__constant__ double COV[32][3][3];
__constant__ double COV_INV[32][3][3];
__constant__ double DETS[32];
__device__ double func(uchar4 p, int i)
{
double res = 0.0, p_avg[3], tmp[3];
for (int j = 0; j < 3; ++j) {
p_avg[j] = 0.0;
tmp[j] = 0.0;
}
p_avg[0] = p.x - AVG[i][0];
p_avg[1] = p.y - AVG[i][1];
p_avg[2] = p.z - AVG[i][2];
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 3; ++k) {
tmp[j] += -p_avg[k] * COV_INV[i][k][j];
}
res += tmp[j] * p_avg[j];
}
res -= std::log(std::abs(DETS[i]));
return res;
}
__device__ int getClass(uchar4 pixel, int nc)
{
double res[32];
for (int i = 0; i < nc; ++i) {
res[i] = func(pixel, i);
}
double maxEl = res[0];
int idx = 0;
for (int i = 0; i < nc; ++i) {
if (res[i] > maxEl) {
maxEl = res[i];
idx = i;
}
}
return idx;
}
__global__ void kernel(uchar4 *dst, int w, int h, int nc)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int idy = blockDim.y * blockIdx.y + threadIdx.y;
int offsetx = blockDim.x * gridDim.x;
int offsety = blockDim.y * gridDim.y;
for (int x = idx; x < w; x += offsetx) {
for (int y = idy; y < h; y += offsety) {
dst[x + y * w].w = getClass(dst[x + y * w], nc);
}
}
}
int main()
{
std::string input, output;
int w, h, nc, np;
uchar4 *data;
std::cin >> input >> output >> nc;
std::vector<std::vector<int2>> classes(nc);
for (int i = 0; i < nc; ++i) {
std::cin >> np;
classes[i].resize(np);
for (int j = 0; j < np; ++j) {
std::cin >> classes[i][j].x >> classes[i][j].y;
}
}
std::ifstream fsIn(input, std::ios::in | std::ios::binary);
if (fsIn.is_open()) {
fsIn.read((char *)&w, sizeof(w));
fsIn.read((char *)&h, sizeof(h));
data = new uchar4[w * h];
fsIn.read((char *)data, w * h * sizeof(data[0]));
fsIn.close();
} else {
return 1;
}
// дальше начинаются интересные вещи
double avg[32][3];
for (int i = 0; i < 32; ++i) {
for (int j = 0; j < 3; ++j) {
avg[i][j] = 0.0;
}
}
for (int i = 0; i < nc; ++i) {
int np = classes[i].size();
for (int j = 0; j < np; ++j) {
int x = classes[i][j].x;
int y = classes[i][j].y;
uchar4 curPixel = data[x + y * w];
avg[i][0] += curPixel.x;
avg[i][1] += curPixel.y;
avg[i][2] += curPixel.z;
}
for (int k = 0; k < 3; ++k) {
avg[i][k] /= np;
}
}
double cov[32][3][3];
for (int i = 0; i < 32; ++i) {
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 3; ++k) {
cov[i][j][k] = 0.0;
}
}
}
for (int i = 0; i < nc; ++i) {
np = classes[i].size();
for (int j = 0; j < np; ++j) {
double tmp[3];
int x = classes[i][j].x;
int y = classes[i][j].y;
uchar4 curPixel = data[x + y * w];
tmp[0] = curPixel.x - avg[i][0];
tmp[1] = curPixel.y - avg[i][1];
tmp[2] = curPixel.z - avg[i][2];
for (int k = 0; k < 3; ++k) {
for (int l = 0; l < 3; ++l) {
cov[i][k][l] += tmp[k] * tmp[l];
}
}
}
for (int k = 0; k < 3; ++k) {
for (int l = 0; l < 3; ++l) {
cov[i][k][l] /= np - 1;
}
}
}
double cov_inv[32][3][3];
for (int i = 0; i < 32; ++i) {
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 3; ++k) {
cov_inv[i][j][k] = 0.0;
}
}
}
double dets[32];
for (int i = 0; i < nc; ++i) {
double det = 0;
for (int j = 0; j < 3; ++j) {
det += cov[i][0][j] * (cov[i][1][(j + 1) % 3] * cov[i][2][(j + 2) % 3] - cov[i][1][(j + 2) % 3] * cov[i][2][(j + 1) % 3]);
}
dets[i] = det;
}
// извините
for (int i = 0; i < nc; ++i) {
cov_inv[i][0][0] = (cov[i][1][1] * cov[i][2][2] - cov[i][2][1] * cov[i][1][2]) / dets[i];
cov_inv[i][0][1] = (cov[i][0][2] * cov[i][2][1] - cov[i][0][1] * cov[i][2][2]) / dets[i];
cov_inv[i][0][2] = (cov[i][0][1] * cov[i][1][2] - cov[i][0][2] * cov[i][1][1]) / dets[i];
cov_inv[i][1][0] = (cov[i][1][2] * cov[i][2][0] - cov[i][1][0] * cov[i][2][2]) / dets[i];
cov_inv[i][1][1] = (cov[i][0][0] * cov[i][2][2] - cov[i][0][2] * cov[i][2][0]) / dets[i];
cov_inv[i][1][2] = (cov[i][1][0] * cov[i][0][2] - cov[i][0][0] * cov[i][1][2]) / dets[i];
cov_inv[i][2][0] = (cov[i][1][0] * cov[i][2][1] - cov[i][2][0] * cov[i][1][1]) / dets[i];
cov_inv[i][2][1] = (cov[i][2][0] * cov[i][0][1] - cov[i][0][0] * cov[i][2][1]) / dets[i];
cov_inv[i][2][2] = (cov[i][0][0] * cov[i][1][1] - cov[i][1][0] * cov[i][0][1]) / dets[i];
}
HANDLE_ERROR(cudaMemcpyToSymbol(AVG, avg, sizeof(double) * 32 * 3));
HANDLE_ERROR(cudaMemcpyToSymbol(COV, cov, sizeof(double) * 32 * 3 * 3));
HANDLE_ERROR(cudaMemcpyToSymbol(COV_INV, cov_inv, sizeof(double) * 32 * 3 * 3));
HANDLE_ERROR(cudaMemcpyToSymbol(DETS, dets, sizeof(double) * 32));
uchar4 *dev_data;
HANDLE_ERROR(cudaMalloc(&dev_data, sizeof(uchar4) * h * w));
HANDLE_ERROR(cudaMemcpy(dev_data, data, sizeof(uchar4) * h * w, cudaMemcpyHostToDevice));
kernel<<<dim3(16, 16), dim3(16, 16)>>>(dev_data, w, h, nc);
HANDLE_ERROR(cudaMemcpy(data, dev_data, sizeof(uchar4) * h * w, cudaMemcpyDeviceToHost));
std::ofstream fsOut(output, std::ios::out | std::ios::binary);
if (fsOut.is_open()) {
fsOut.write((char *)&w, sizeof(w));
fsOut.write((char *)&h, sizeof(h));
fsOut.write((char *)data, w * h * sizeof(data[0]));
fsOut.close();
} else {
return 1;
}
HANDLE_ERROR(cudaFree(dev_data));
delete[] data;
return 0;
} |
21,756 | #include <stdio.h>
#include <cuda_runtime.h>
#include <stdint.h>
__global__ void kernel() {
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t n = tid;
uint32_t sum = 0;
uint32_t prod = 1;
while(n != 0){
uint32_t digit = n % 10;
n /= 10;
sum += digit;
prod *= digit;
}
if(sum*prod == tid) printf("%u\n", tid);
return;
}
void checkrange(uint32_t range){
double dim = sqrt(range);
uint32_t thread_number = (uint32_t)ceil(range/(dim));
if ( thread_number > 1024 ) {
printf("Impossible to run more threads than 1024.\nNumber reduce to 1024. \n");
thread_number = 1024;
}
printf("Checking %u for sum-product numbers\n", range);
/* If the number of threads is greater than 1024, the code will not be executed. */
kernel<<<(uint32_t)dim, thread_number, 0>>>();
cudaDeviceSynchronize();
}
int main() {
// main iteration
checkrange(1024);
checkrange(16777216);
return 0;
} |
21,757 | #include <stdio.h>
#include <assert.h>
const int DIM = 32;
// print GB/s
void postprocess(int n, float ms)
{
printf("%21f\t", n * sizeof(double)*1e-6 / ms ); //can be multiplied by 2 -> once for reading the matrix and the other
//for writing.
}
//Read the in matrix using pieces of 32 items.
// naive transpose
__global__ void transposeNaive(double *out,double *in,int BLOCK)
{
int x = blockIdx.x * DIM + threadIdx.x;
int y = blockIdx.y * DIM + threadIdx.y;
for (int j = 0; j < DIM; j+= BLOCK)
out[x*(gridDim.x * DIM) + (y+j)] = in[(y+j)*(gridDim.x * DIM) + x];
//each thread executing transpose DIM/BLOCK_i elements from in column into out row.
}
__global__ void transposeImproved(double *out, double *in,int BLOCK)
{
__shared__ double aux_mat[DIM][DIM];
int x = blockIdx.x * DIM + threadIdx.x;
int y = blockIdx.y * DIM + threadIdx.y;
for (int j = 0; j < DIM; j += BLOCK)
aux_mat[threadIdx.y+j][threadIdx.x] = in[(y+j)*(gridDim.x * DIM) + x];
//use shared memory in order to transpose the matrix and write back to out in row-wise.
__syncthreads(); //needed in order to ensure that all the writes are performed.
x = blockIdx.y * DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * DIM + threadIdx.y;
for (int j = 0; j < DIM; j += BLOCK)
out[(y+j)*(gridDim.x * DIM) + x] = aux_mat[threadIdx.x][threadIdx.y + j];
}
void RunTest(int BLOCK,const int nx,const int ny,const int size){
dim3 dimGrid(nx/DIM, ny/DIM, 1);
dim3 dimBlock(DIM, BLOCK, 1);
printf("%d\t",DIM*BLOCK);
double *h_in = (double*)malloc(size);
double *h_out = (double*)malloc(size);
double *d_in, *d_out;
cudaMalloc(&d_in, size);
cudaMalloc(&d_out, size);
// host
for (int j = 0; j < ny; j++)
for (int i = 0; i < nx; i++)
h_in[j*nx + i] = i;
// device
cudaMemcpy(d_in, h_in, size, cudaMemcpyHostToDevice);
// events for timing
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
float time_m;
//time measures might contain overhead due to kernel lauch
cudaMemset(d_out, 0, size);
cudaEventRecord(startEvent, 0);
transposeNaive<<<dimGrid, dimBlock>>>(d_out, d_in,BLOCK);
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&time_m, startEvent, stopEvent); //milliseconds
printf("%21f\t",time_m);
cudaMemcpy(h_out, d_out, size, cudaMemcpyDeviceToHost);
postprocess(nx * ny, time_m);
cudaMemset(d_out, 0, size); //Reset matrix so i don't have to allocate a new one
cudaEventRecord(startEvent, 0);
transposeImproved<<<dimGrid, dimBlock>>>(d_out, d_in,BLOCK);
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&time_m, startEvent, stopEvent);
printf("%21f\t",time_m);
cudaMemcpy(h_out, d_out, size, cudaMemcpyDeviceToHost);
postprocess(nx * ny, time_m);
printf("\n");
// cleanup
cudaEventDestroy(startEvent);
cudaEventDestroy(stopEvent);
cudaFree(d_out);
cudaFree(d_in);
free(h_in);
free(h_out);
}
int main(int argc, char **argv)
{
printf("# Threads Naive Transpose(ms) Naive Bandwidth (GB/s) Improved Transpose(ms) Improved Bandwidth (GB/s)\n");
//it's convenient to have num of threads < than elements in a submatrix.
int BLOCK_1 = 2; //each thread transpose DIM/BLOCK_i elements in the matrix.
int BLOCK_2 = 16;
int BLOCK_3 = 32; //each block transpose a submatrix of DIMxDIM size
const int nx = 8192;
const int ny = 8192;
const int size = nx*ny*sizeof(double);
RunTest(BLOCK_1,nx,ny,size);
RunTest(BLOCK_2,nx,ny,size);
RunTest(BLOCK_3,nx,ny,size);
}
|
21,758 | #include <iostream>
__global__ void add(int *a, int *b, int *c, int n){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < n)
c[index] = a[index] + b[index];
}
void random_ints(int * a, int N){
for(int i = 0; i < N; ++i){
a[i] = rand();
}
}
#define N (2048*2048)
#define THREADS_PER_BLOCK 512
using namespace std;
int main(){
int *a,*b,*c;
int *da,*db,*dc;
int size = N* sizeof(int);
cudaMalloc((void **)&da, size);
cudaMalloc((void **)&db, size);
cudaMalloc((void **)&dc, size);
a = (int *)malloc(size);
random_ints(a, N);
b = (int *)malloc(size);
random_ints(b, N);
c = (int *)malloc(size);
cudaMemcpy(da,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(db,b,size,cudaMemcpyHostToDevice);
add<<<N/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(da,db,dc, N);
cudaMemcpy(c,dc,size,cudaMemcpyDeviceToHost);
for(int i = 0; i < sizeof(c); i++){
cout << c[i] << endl;
}
free(a);
free(b);
free(c);
cudaFree(da);
cudaFree(db);
cudaFree(dc);
return 0;
}
|
21,759 | #include <cstdio>
#include <cstdlib>
#include <cuda_runtime.h>
#include <time.h>
#define random(a, b) (rand() % (b - a) + a)
void FillMatrix(float *matrix, int row, int col);
void PrintMatrix(float *A, float *B, float *C, int m, int n, int k);
__global__ void MatrixMulCUDA(const float *A, const float *B, float *C, int m, int n, int k, int ThreadBlockSize)
{
// 计算元素的行
const int row = blockIdx.x;
// 计算元素的列
const int col = blockIdx.y * ThreadBlockSize + threadIdx.x;
float temp = 0;
if (row < m && col < k)
{
for (int i = 0; i < n; ++i)
temp += A[row * n + i] * B[i * k + col];
C[row * k + col] = temp;
}
}
int main(int argc, char **argv)
{
if (argc != 5)
{
printf("Wrong Input!\n");
return 1;
}
int m = atoi(argv[1]);
int n = atoi(argv[2]);
int k = atoi(argv[3]);
int ThreadBlockSize = atoi(argv[4]);
float *A, *B, *C;
A = new float[m * n];
B = new float[n * k];
C = new float[m * k];
FillMatrix(A, m, n);
FillMatrix(B, n, k);
float *cuda_A, *cuda_B, *cuda_C;
// 使用cuda内置API计时
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// 申请空间
cudaMalloc((void **)&cuda_A, sizeof(float) * m * n);
cudaMalloc((void **)&cuda_B, sizeof(float) * n * k);
cudaMalloc((void **)&cuda_C, sizeof(float) * m * k);
// 将A、B矩阵从CPU转移到GPU
cudaMemcpy(cuda_A, A, sizeof(float) * m * n, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_B, B, sizeof(float) * n * k, cudaMemcpyHostToDevice);
// 定义的结构网格
dim3 grid(m, k / ThreadBlockSize);
// 矩阵乘法
MatrixMulCUDA<<<grid, ThreadBlockSize>>>(cuda_A, cuda_B, cuda_C, m, n, k, ThreadBlockSize);
// 将结果C矩阵从GPU转移回CPU
cudaMemcpy(C, cuda_C, sizeof(float) * m * k, cudaMemcpyDeviceToHost);
cudaFree(cuda_A);
cudaFree(cuda_B);
cudaFree(cuda_C);
// 计时
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Matrix Size is %d\nThreadBlockSize is %d\n", m,ThreadBlockSize);
printf("Calculation time is %.10f ms\n", elapsedTime);
// PrintMatrix(A, B, C, m, n, k);
delete[] A;
delete[] C;
delete[] B;
return 0;
}
void FillMatrix(float *matrix, int row, int col)
{
for (int i = 0; i < row; ++i)
for (int j = 0; j < col; ++j)
matrix[i * col + j] = random(0, 9);
}
void PrintMatrix(float *A, float *B, float *C, int m, int n, int k)
{
printf("Matrix A:\n");
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < n; ++j)
printf("%f ", A[i * n + j]);
printf("\n");
}
printf("Matrix B:\n");
for (int i = 0; i < n; ++i)
{
for (int j = 0; j < k; ++j)
printf("%f ", B[i * k + j]);
printf("\n");
}
printf("Matrix C:\n");
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < k; ++j)
printf("%f ", C[i * k + j]);
printf("\n");
}
} |
21,760 |
#include "cuda.h"
#include "cuda_runtime.h"
#include "cuda_runtime_api.h"
#include "device_functions.h"
#include "device_launch_parameters.h"
#include <chrono>
#include <stdio.h>
|
21,761 | // kernels from http://ppc.cs.aalto.fi/ch4 (2018)
#include <algorithm>
#include <chrono>
#include <iomanip>
#include <iostream>
#include <iostream>
#include <limits>
#include <numeric>
#include <random>
#include <cstdio>
#include <cuda_runtime.h>
inline void check(cudaError_t err, const char* context) {
if (err != cudaSuccess) {
std::cerr << "CUDA error: " << context << ": "
<< cudaGetErrorString(err) << std::endl;
std::exit(EXIT_FAILURE);
}
}
#define CHECK(x) check(x, #x)
#define BLOCKSIZE 32
float next_float() {
static std::random_device rd;
static std::default_random_engine e(rd());
static std::uniform_real_distribution<float> floats(0.0, 1.0);
return floats(e);
}
inline int static divup(int a, int b) {
return (a + b - 1)/b;
}
inline int static roundup(int a, int b) {
return divup(a, b) * b;
}
__global__ void kernel_v0(const float *in, float *out, int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
float v = HUGE_VALF;
for (int k = 0; k < n; ++k) {
float x = in[n*i + k];
float y = in[n*k + j];
float z = x + y;
v = min(v, z);
}
out[n*i + j] = v;
}
__global__ void kernel_v1(const float *in, float *out, int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
float v = HUGE_VALF;
for (int k = 0; k < n; ++k) {
float x = in[n*j + k];
float y = in[n*k + i];
float z = x + y;
v = min(v, z);
}
out[n*j + i] = v;
}
__global__ void kernel_v2(float* r, const float* d, int n, int nn) {
int ia = threadIdx.x;
int ja = threadIdx.y;
int ic = blockIdx.x;
int jc = blockIdx.y;
const float* t = d + nn * nn;
float v[8][8];
for (int ib = 0; ib < 8; ++ib) {
for (int jb = 0; jb < 8; ++jb) {
v[ib][jb] = HUGE_VALF;
}
}
for (int k = 0; k < n; ++k) {
float x[8];
float y[8];
for (int ib = 0; ib < 8; ++ib) {
int i = ic * 64 + ib * 8 + ia;
x[ib] = t[nn*k + i];
}
for (int jb = 0; jb < 8; ++jb) {
int j = jc * 64 + jb * 8 + ja;
y[jb] = d[nn*k + j];
}
for (int ib = 0; ib < 8; ++ib) {
for (int jb = 0; jb < 8; ++jb) {
v[ib][jb] = min(v[ib][jb], x[ib] + y[jb]);
}
}
}
for (int ib = 0; ib < 8; ++ib) {
for (int jb = 0; jb < 8; ++jb) {
int i = ic * 64 + ib * 8 + ia;
int j = jc * 64 + jb * 8 + ja;
if (i < n && j < n) {
r[n*i + j] = v[ib][jb];
}
}
}
}
__global__ void add_padding_v2(const float* r, float* d, int n, int nn) {
int ja = threadIdx.x;
int i = blockIdx.y;
float* t = d + nn * nn;
for (int jb = 0; jb < nn; jb += 64) {
int j = jb + ja;
float v = (i < n && j < n) ? r[n*i + j] : HUGE_VALF;
d[nn*i + j] = v;
t[nn*j + i] = v;
}
}
void step_v0(float* r, const float* d, int n) {
// Allocate memory & copy data to GPU
float* dGPU = NULL;
float* rGPU = NULL;
CHECK(cudaMalloc(&dGPU, n * n * sizeof(float)));
CHECK(cudaMalloc(&rGPU, n * n * sizeof(float)));
CHECK(cudaMemcpy(dGPU, d, n * n * sizeof(float), cudaMemcpyHostToDevice));
// Run kernel
dim3 dimBlock(16, 16);
dim3 dimGrid(divup(n, dimBlock.x), divup(n, dimBlock.y));
kernel_v0<<<dimGrid, dimBlock>>>(rGPU, dGPU, n);
CHECK(cudaGetLastError());
// Copy data back to CPU & release memory
CHECK(cudaMemcpy(r, rGPU, n * n * sizeof(float), cudaMemcpyDeviceToHost));
CHECK(cudaFree(dGPU));
CHECK(cudaFree(rGPU));
}
void step_v1(float* r, const float* d, int n) {
float* dGPU = NULL;
float* rGPU = NULL;
CHECK(cudaMalloc(&dGPU, n * n * sizeof(float)));
CHECK(cudaMalloc(&rGPU, n * n * sizeof(float)));
CHECK(cudaMemcpy(dGPU, d, n * n * sizeof(float), cudaMemcpyHostToDevice));
dim3 dimBlock(16, 16);
dim3 dimGrid(divup(n, dimBlock.x), divup(n, dimBlock.y));
kernel_v1<<<dimGrid, dimBlock>>>(rGPU, dGPU, n);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(r, rGPU, n * n * sizeof(float), cudaMemcpyDeviceToHost));
CHECK(cudaFree(dGPU));
CHECK(cudaFree(rGPU));
}
void step_v2(float* r, const float* d, int n) {
int nn = roundup(n, 64);
float* dGPU = NULL;
float* rGPU = NULL;
CHECK(cudaMalloc(&dGPU, 2 * nn * nn * sizeof(float)));
CHECK(cudaMalloc(&rGPU, n * n * sizeof(float)));
CHECK(cudaMemcpy(dGPU, d, n * n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(rGPU, d, n * n * sizeof(float), cudaMemcpyHostToDevice));
{
dim3 dimBlock(64, 1);
dim3 dimGrid(1, nn);
add_padding_v2<<<dimGrid, dimBlock>>>(rGPU, dGPU, n, nn);
CHECK(cudaGetLastError());
}
{
dim3 dimBlock(8, 8);
dim3 dimGrid(nn / 64, nn / 64);
kernel_v2<<<dimGrid, dimBlock>>>(rGPU, dGPU, n, nn);
CHECK(cudaGetLastError());
}
CHECK(cudaMemcpy(r, rGPU, n * n * sizeof(float), cudaMemcpyDeviceToHost));
CHECK(cudaFree(dGPU));
CHECK(cudaFree(rGPU));
}
void print_header() {
std::cout << std::setw(12) << "function"
<< std::setw(12) << "iteration"
<< std::setw(12) << "input size"
<< std::setw(12) << "time (μs)"
<< std::endl;
}
void print_row(const char* name, int i, size_t n, double time) {
std::cout << std::setw(12) << name
<< std::setw(12) << i
<< std::setw(12) << n*n
<< std::setw(12) << (int)(1e6 * time)
<< std::endl;
}
struct FunctionData {
const char* name;
void (*callable)(float*, const float*, int);
const size_t n;
};
int main(int argc, char** argv) {
int iterations = 1;
if (argc > 1) {
iterations = std::stoi(argv[1]);
}
std::vector<FunctionData> functions = {
{"step_v0", step_v0, BLOCKSIZE << 7},
{"step_v1", step_v1, BLOCKSIZE << 7},
{"step_v2", step_v2, BLOCKSIZE << 7},
};
print_header();
for (auto func : functions) {
const size_t n = func.n;
for (auto i = 0; i < iterations; ++i) {
std::vector<float> data(n*n);
std::generate(data.begin(), data.end(), next_float);
std::vector<float> result(n*n);
const auto time_start = std::chrono::high_resolution_clock::now();
func.callable(result.data(), data.data(), n);
const auto time_end = std::chrono::high_resolution_clock::now();
const std::chrono::duration<float> delta_seconds = time_end - time_start;
print_row(func.name, i + 1, n, delta_seconds.count());
}
}
}
|
21,762 | #include <iostream>
using namespace std;
__global__ void SomaVetores(int* vetorA, int* vetorB, int* vetorC, int tamanho)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < tamanho)
vetorC[i] = vetorA[i] + vetorB[i];
}
int main()
{
int tamanho = 100000000;
size_t totalBytes = tamanho * sizeof(int);
int* vetorA = (int*) malloc(totalBytes);
int* vetorB = (int*) malloc(totalBytes);
int* vetorC = (int*) malloc(totalBytes);
if(vetorA == NULL || vetorB == NULL || vetorC == NULL)
{
cout << "Memoria insuficiente!" << endl;
return 0;
}
for(int index = 0; index < tamanho; index++)
{
vetorA[index] = vetorB[index] = index;
vetorC[index] = 0;
}
int* cudaVetorA;
int* cudaVetorB;
int* cudaVetorC;
cudaMalloc(&cudaVetorA, totalBytes);
cudaMalloc(&cudaVetorB, totalBytes);
cudaMalloc(&cudaVetorC, totalBytes);
cudaMemcpy(cudaVetorA, vetorA, totalBytes, cudaMemcpyHostToDevice);
cudaMemcpy(cudaVetorB, vetorB, totalBytes, cudaMemcpyHostToDevice);
SomaVetores<<<1, tamanho>>>(cudaVetorA, cudaVetorB, cudaVetorC, tamanho);
cudaMemcpy(vetorC, cudaVetorC, totalBytes, cudaMemcpyDeviceToHost);
cudaFree(cudaVetorA);
cudaFree(cudaVetorB);
cudaFree(cudaVetorC);
/*
for(int index = 0; index < tamanho; index++)
{
cout << "C = " << vetorC[index] << endl;
}
*/
free(vetorA);
free(vetorB);
free(vetorC);
cout << "200 OK" << endl;
return 0;
}
|
21,763 | extern "C" __global__ void
add_particles(float* x, float* y, float* z, const float* xx, const float* yy, const float* zz, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
x[i + size] = xx[i];
y[i + size] = yy[i];
z[i + size] = zz[i];
} |
21,764 | #include<stdio.h>
__device__ const char *STR = "Hello World!\n";
const char STR_LENGTH = 12;
__global__ void hello(){
printf("%c\n", STR[threadIdx.x % STR_LENGTH]);
}
int main(void){
hello<<<1, STR_LENGTH>>>();
cudaDeviceSynchronize();
return 0;
}
|
21,765 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#include <cuda.h>
#include <curand_kernel.h>
__global__ void generate_map(curandState* devState, int n_maps, int* grid, int width, int height);
__global__ void setup_rnd_kernel (curandState* state, unsigned long seed);
__device__ void apply_cave_generation_rule();
__device__ void create_random_initial_population(curandState* devState, int* grid, int width, int height, int fill_percent);
__device__ int rand(curandState* localRand, int max);
__device__ void pretty_print_map(int* map, int width, int height);
__global__ void generate_map(curandState* devState, int n_maps, int* grid, int width, int height)
{
int i;
/* Literature says so... */
int n_iterations = 5;
/*
This is the kernel function
*/
create_random_initial_population(devState, grid, width, height, 45);
pretty_print_map(grid, width, height);
/*pthread_params *params = (pthread_params*) thread_params;
int n_elements = CELL_COUNT / N_THREADS;
int start = n_elements * (params->id);
int end = start + n_elements;
apply_cave_generation_rule(current_gen, start, end);
free(params);
pthread_exit(NULL);*/
// printf("[BlockId]: %d, [ThreadId]: %d\n", blockIdx.x, threadIdx.x);
}
__global__ void setup_rnd_kernel (curandState* state, unsigned long seed)
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
curand_init ( seed, id, 0, &state[id] );
}
__device__ void apply_cave_generation_rule()
{
/* int i;
int n_count = 0;
for(i=startPos; i<endPos; i++)
{
n_count = count_neighbors(grid, i);
// Applying the B678 rule.
if(grid[i] == 0)
{
if((n_count == 6) || (n_count == 7) || (n_count == 8))
next_gen[i] = 1;
else
next_gen[i] = grid[i];
}
// Applying the S345678
if(grid[i] == 1)
{
if((n_count == 0) || (n_count == 1) || (n_count == 2))
next_gen[i] = 0;
else
next_gen[i] = grid[i];
}
}*/
}
__device__ void create_random_initial_population(curandState* devState, int* grid, int width, int height, int fill_percent)
{
/* This function creates a grid with a random distribution of 1 and 0 cells given width, height and a fill_percentage. */
int i = 0;
int count = 0;
int cell_count = width * height;
for(i=0; i<cell_count; i++)
{
if(rand(devState, 100) < fill_percent)
{
grid[i] = 1;
count++;
}
else
{
grid[i] = 0;
}
}
}
__device__ int rand(curandState* localRand, int max)
{
int ind = threadIdx.x;
curandState localState = localRand[ind];
float rnd = curand_uniform( &localState );
localRand[ind] = localState;
return int(rnd * max);
}
__device__ void pretty_print_map(int* map, int width, int height)
{
int i;
for(i=0; i<width*height; i++)
{
if(i%width == 0)
printf("\n");
if(map[i] == 0)
printf(".");
if(map[i] == 1)
printf("@");
}
printf("\n");
}
int main(int argc, char* argv[])
{
int *cudaGrid, *grid;
curandState* devStates;
srand(time(NULL));
/*int i, n, t, s;
struct timeval inicio, fim;
tsc_counter tsc1, tsc2;
long long unsigned int clock;
double tempo, tempo_total;*/
int N_THREADS, N_BLOCKS;
N_THREADS = 1;
N_BLOCKS = 1;
int n_maps = 5;
int width, height;
width = 10;
height = 10;
cudaMalloc(&devStates, N_THREADS * N_BLOCKS * sizeof( curandState ));
setup_rnd_kernel <<<N_THREADS, N_BLOCKS>>> ( devStates, time(NULL) );
if (cudaMalloc (&cudaGrid, sizeof(int) * width * height) != cudaSuccess)
{ printf("Erro cudaMalloc\n"); return -1; }
grid = (int*) malloc(width * height * sizeof(int));
generate_map <<<N_THREADS, N_BLOCKS>>>(devStates, n_maps, cudaGrid, width, height);
cudaDeviceSynchronize();
if (cudaMemcpy(grid, cudaGrid, sizeof(int) * width*height, cudaMemcpyDeviceToHost) != cudaSuccess)
{ printf("Erro cudaMemcpy\n"); return -1; }
printf("Depois de chamar!\n");
int i;
for(i=0; i<width*height; i++)
{
if(i%width == 0)
printf("\n");
if(grid[i] == 0)
printf(".");
if(grid[i] == 1)
printf("@");
}
printf("\n");
/*for(s=0; s<N_SIM; s++)
{
gettimeofday(&inicio, NULL);
RDTSC(tsc1);
for(i=0; i<N_MAPS; i++)
{
current_gen = create_random_initial_population();
next_gen = malloc(CELL_COUNT * sizeof(int));
fill_borders(current_gen);
for(n=0; n<N_ITER; n++)
{
for(t=0; t<N_THREADS; t++)
{
pthread_params *params;
params = malloc(sizeof(pthread_params));
params->id = t;
pthread_create(&tid[t], NULL,
transition_cells, (void*) params);
}
for(t=0; t<N_THREADS; t++)
{
pthread_join(tid[t], NULL);
}
current_gen = next_gen;
fill_borders(current_gen);
//print_grid(current_gen);
}
//printf("Mapa #%d:\n", i+1);
//print_grid(current_gen);
free(current_gen);
//free(next_gen);
}
RDTSC(tsc2);
gettimeofday(&fim, NULL);
printf("Run #%d\n", s);
tempo = (fim.tv_sec - inicio.tv_sec) * 1000 + (fim.tv_sec - inicio.tv_sec)/1000;
tempo_total += tempo;
printf("Tempo: %.2lf\n", tempo);
clock = tsc2.int64 - tsc1.int64;
printf("Tempo: %.2lf(ms) Clocks: %.2e\n", tempo/N_MAPS, (double)clock/N_ITER);
printf("Clock/tempo: %.2e\n\n", clock/tempo);
}
printf("Tempo Total: %.2lf\n", tempo_total);
printf("Tempo médio: %.2lf\n", (double) tempo_total/N_SIM);
pthread_exit(NULL);*/
return 0;
} |
21,766 | // Copyright (C) 2018 NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// =============================================================================
#define TO_NEXT_MULT_P2(x,p) (((x)+((p)-1)) & ~(p-1))
__host__ __device__ ulonglong2 operator<<(ulonglong2 a, int l) {
ulonglong2 b;
if (l > 64) {
b = make_ulonglong2(0ull, a.x << (l-64));
} else {
b = make_ulonglong2(a.x << l, (a.y << l) | (a.x >> (8*sizeof(a.x)-l)));
}
return b;
}
__host__ __device__ ulonglong2 operator>>(ulonglong2 a, int l) {
ulonglong2 b;
if (l > 64) {
b = make_ulonglong2(a.y >> (l-64), 0ull);
} else {
b = make_ulonglong2((a.x >> l) | (a.y << (8*sizeof(a.y)-l)), a.y >> l);
}
return b;
}
__host__ __device__ ulonglong2 operator|(ulonglong2 a, ulonglong2 b) {
return make_ulonglong2(a.x | b.x, a.y | b.y);
}
template<int BDIM_X,
int MAXIOB,
int SH_BYTE_X_BL,
typename LDST_T>
__device__ void memcpy_d(const size_t n,
const unsigned char *__restrict__ src,
unsigned char *__restrict__ dst,
unsigned char *__restrict__ __sh) {
const int tid = threadIdx.x;
const unsigned long long srcULL = reinterpret_cast<unsigned long long>(src);
const unsigned long long dstULL = reinterpret_cast<unsigned long long>(dst);
int srcOff = (MAXIOB - srcULL) & (MAXIOB-1);
int dstOff = (MAXIOB - dstULL) & (MAXIOB-1);
const int ELXTH = SH_BYTE_X_BL/(BDIM_X*MAXIOB);
LDST_T *__ptrSH = reinterpret_cast<LDST_T *>(__sh);
if (srcOff == dstOff) {
const LDST_T *__restrict__ __ptrLDG = reinterpret_cast<const LDST_T *>(src + srcOff);
LDST_T *__restrict__ __ptrSTG = reinterpret_cast< LDST_T *>(dst + dstOff);
int nread = (n-srcOff) / sizeof(*__ptrLDG);
int remBytes = (n-srcOff) % sizeof(*__ptrLDG);
LDST_T __loc[ELXTH];
#pragma unroll
for(int j = 0; j < ELXTH; j++) {
if (j*BDIM_X+tid < nread) {
__loc[j] = __ptrLDG[j*BDIM_X+tid];
}
}
for(int i = 0; i < nread; i += BDIM_X*ELXTH) {
#pragma unroll
for(int j = 0; j < ELXTH; j++) {
__ptrSH[j*BDIM_X+tid] = __loc[j];
}
#pragma unroll
for(int j = 0; j < ELXTH; j++) {
if (i + BDIM_X*ELXTH + j*BDIM_X + tid < nread) {
__loc[j] = __ptrLDG[i + BDIM_X*ELXTH + j*BDIM_X + tid];
}
}
#pragma unroll
for(int j = 0; j < ELXTH; j++) {
if (i + j*BDIM_X + tid < nread) {
__ptrSTG[i + j*BDIM_X + tid] = __ptrSH[j*BDIM_X+tid];
}
}
}
if (tid < srcOff+remBytes) {
const int off = (tid < srcOff) ? tid : n-remBytes+tid-srcOff;
dst[off] = src[off];
}
} else {
const LDST_T *__restrict__ __ptrLDG = reinterpret_cast<const LDST_T *>(src + srcOff);
LDST_T *__restrict__ __ptrSTG = reinterpret_cast< LDST_T *>(dst + dstOff);
int nread = ((n-srcOff) / sizeof(*__ptrLDG));
int remBytes = ((n-srcOff) % sizeof(*__ptrLDG));
int lowShft, uppShft;
if (srcOff > dstOff) {
uppShft = (srcOff-dstOff)*8;
lowShft = (8*sizeof(*__ptrLDG)) - uppShft;
__ptrSTG++;
} else {
lowShft = (dstOff-srcOff)*8;
uppShft = (8*sizeof(*__ptrLDG)) - lowShft;
}
for(int i = 0; i < nread-1; i += BDIM_X) {
if (i+tid < nread-1) {
const LDST_T low = __ptrLDG[i+tid];
const LDST_T upp = __ptrLDG[i+tid+1];
__ptrSTG[i+tid] = (low >> lowShft) | (upp << uppShft);
}
}
remBytes += sizeof(*__ptrLDG);
if (srcOff > dstOff) {
dstOff += sizeof(*__ptrLDG);
if (tid < dstOff+remBytes) {
const int off = (tid < dstOff) ? tid : n-remBytes + tid-dstOff;
dst[off] = src[off];
}
} else {
if (tid < dstOff+remBytes) {
const int off = (tid < dstOff) ? tid : n-remBytes + tid-dstOff;
dst[off] = src[off];
}
}
}
}
template<int BDIM_X,
int MAXIOB>
__global__ void memcpy_k(const size_t *sizes,
const unsigned char *const __restrict__ *__restrict__ in,
unsigned char *__restrict__ *__restrict__ out) {
const int SH_BYTE_X_BL = 32768;
__shared__ unsigned char __sh[SH_BYTE_X_BL];
switch(MAXIOB) {
case 4:
memcpy_d<BDIM_X, MAXIOB, SH_BYTE_X_BL, unsigned int>(sizes[blockIdx.x],
in[blockIdx.x],
out[blockIdx.x],
__sh);
break;
case 8:
memcpy_d<BDIM_X, MAXIOB, SH_BYTE_X_BL, unsigned long long>(sizes[blockIdx.x],
in[blockIdx.x],
out[blockIdx.x],
__sh);
break;
case 16:
memcpy_d<BDIM_X, MAXIOB, SH_BYTE_X_BL, ulonglong2>(sizes[blockIdx.x],
in[blockIdx.x],
out[blockIdx.x],
__sh);
break;
}
return;
}
#define NTHREADS 1024
void batched_d2d_memcpy(void** out_ptrs, void** in_ptrs, size_t* sizes, int num_copies, cudaStream_t stream)
{
memcpy_k<NTHREADS, 16><<<num_copies, NTHREADS, 0, stream>>>(sizes, (unsigned char**) in_ptrs, (unsigned char**) out_ptrs);
}
|
21,767 | #include "Map.cuh"
float** generateMap(int width, int height) {
float** map;
cudaMallocManaged(&map, height*sizeof(float*));
for (int y = 0; y < height; y++) {
cudaMallocManaged(&map[y], width * sizeof(float));
}
return map;
}
void freeMap(float** map, int height) {
for (int y = 0; y < height; y++) {
cudaFree(&map[y]);
}
cudaFree(&map);
}
__global__ void addMapFragments(float** map, float** mapFragment, int width, int height) {
//Gets the thread numbers
int threadX = threadIdx.x + blockIdx.x * blockDim.x;
int threadY = threadIdx.y + blockIdx.y * blockDim.y;
//Gets the stride to increase
int strideX = gridDim.x*blockDim.x;
int strideY = gridDim.y*blockDim.y;
for (int y = threadY; y < height; y+=strideY) {
for (int x = threadX; x < width; x+=strideX) {
map[y][x] += mapFragment[y][x];
}
}
} |
21,768 | #include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <stdio.h>
__global__ void sumMatrixOnGPU2D(float *MatA, float *MatB, float *MatC, int nx, int ny){
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy*nx + ix;
if(ix < nx && iy < ny)
MatC[idx] = MatA[idx] + MatB[idx];
}
__global__ void sumMatrixOnGPU1D(float *MatA, float *MatB, float *MatC, int nx, int ny){
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
if(ix < nx){
for(int iy = 0; iy < ny; iy++){
int idx = iy*nx + ix;
MatC[idx] = MatA[idx] + MatB[idx];
}
}
}
__global__ void sumMatrixOnGPUMix(float *MatA, float *MatB, float *MatC, int nx, int ny){
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = blockIdx.y;
unsigned int idx = iy*nx + ix;
if (ix < nx && iy < ny)
MatC[idx] = MatA[idx] + MatB[idx];
} |
21,769 | #include <stdio.h>
#include <stdlib.h>
#define N 512
void random_ints(int * a, int q)
{
for(int i = 0; i < q; i++)
a[i] = rand() % 100;
}
__global__ void dot( int *a, int *b, int *c ) {
__shared__ int temp[N];
temp[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x];
__syncthreads();
if( 0 == threadIdx.x ) {
int sum = 0;
for( int i = 0; i < N; i++ )
sum += temp[i];
*c = sum;
}
}
int cpu(int*a,int*b,int q)
{
int r = 0,i;
for(i = 0; i < q; i++)
r +=a[i]*b[i];
return r;
}
int main( void ) {
int *a, *b, *c; // copies of a, b, c
int *dev_a, *dev_b, *dev_c; // device copies of a, b, c
int size = N * sizeof( int ); // we need space for 512 integers
int i;
// allocate device copies of a, b, c
cudaMalloc( (void**)&dev_a, size );
cudaMalloc( (void**)&dev_b, size );
cudaMalloc( (void**)&dev_c, sizeof( int ) );
a = (int *)malloc( size );
b = (int *)malloc( size );
c = (int *)malloc( sizeof( int ) );
random_ints( a, N );
random_ints( b, N );
// copy inputs to device
cudaMemcpy( dev_a, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, size, cudaMemcpyHostToDevice );
// launch dot() kernel with 1 block and N threads
dot<<< 1, N >>>( dev_a, dev_b, dev_c );
// copy device result back to host copy of c
cudaMemcpy( c, dev_c, sizeof( int ) , cudaMemcpyDeviceToHost );
printf("GPU:%d CPU:%d\n",c[0],cpu(a,b,N));
free( a ); free( b ); free( c );
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
return 0;
} |
21,770 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void matrixMultGPU(int *a, int *b, int *c, int N){
int k, sum = 0;
int col = threadIdx.x + blockDim.x * blockIdx.x;
int fil = threadIdx.y + blockDim.y * blockIdx.y;
if (col < N && fil < N)
{
for (k = 0; k < N; k++)
{
sum += a[fil * N + k] * b[k * N + col];
}
c[fil * N + col] = sum;
}
}
int main (void){
//Creación de variables del sistema
int *a, *b, *c, *dev_a, *dev_b, *dev_c, N;
int i,j;
int T,div=1, iteraciones=100,ind=0;
float elapsedTime;
printf("Ingrese el tamano deseado para las matrices:\n");
scanf("%d",&N);
//Creación de variables de tiempo
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
printf("Creando espacio e inicializando matrices...\n");
//Asignación e inicialización de memoria
a=(int*)malloc(N*N*sizeof(int));
b=(int*)malloc(N*N*sizeof(int));
c=(int*)malloc(N*N*sizeof(int));
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
a[i*N+j]=i*j;
b[i*N+j]=i*j;
}
}
if(cudaMalloc(&dev_a,N*N*sizeof(int))!=cudaSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(cudaMalloc(&dev_b,N*N*sizeof(int))!=cudaSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(cudaMalloc(&dev_c,N*N*sizeof(int))!=cudaSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
printf("Asignacion de memoria correcta\n");
//Copia de memoria a GPU
if(cudaMemcpy(dev_a,a,N*N*sizeof(int),cudaMemcpyHostToDevice)!=cudaSuccess)
{
printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n");
exit(1);
}
if(cudaMemcpy(dev_b,b,N*N*sizeof(int),cudaMemcpyHostToDevice)!=cudaSuccess)
{
printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n");
exit(1);
}
//Cálculo de bloques e hilos
while((float)N/(float)div>32)
{
div++;
}
float f_N=(float)N,f_div=(float)div;
T=(int)ceil(f_N/f_div);
dim3 ThreadsBloque(T,T);
dim3 Bloques(div, div);
printf("Se va a realizar la suma con %d bloques y %d hilos\n",div,T);
printf("Se va a realizar %d iteraciones de matrices %dx%d\n",iteraciones,N,N);
//Ejecución de kernel
cudaEventRecord(start,0);
while(ind<iteraciones)
{
matrixMultGPU<<<Bloques, ThreadsBloque>>>(dev_a,dev_b,dev_c,N);
ind++;
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
printf("El tiempo tomado para %d iteraciones fue de %3.5f ms\n",iteraciones,elapsedTime);
cudaMemcpy(c,dev_c,N*N*sizeof(int),cudaMemcpyDeviceToHost);
printf("Por ejemplo %d deberia ser 0\n",c[3*N]);
printf("Por ejemplo %d deberia ser 0\n",c[(int)N/2]);
printf("Por ejemplo %d deberia ser %d\n",c[N+1],(int)((2*pow(N-1,3)+3*pow(N-1,2)+N-1)/6));
/*
for(i=0;i<N;i++)
{
printf("\n");
for(j=0;j<N;j++)
{
printf("\t%d",a[i*N+j]);
}
//printf("\t");
for(j=0;j<N;j++)
{
printf("\t%d",b[i*N+j]);
}
//printf("\t");
for(j=0;j<N;j++)
{
printf("\t%d",c[i*N+j]);
}
}
*/
free(a);
free(b);
free(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} |
21,771 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cstdlib>
#include <float.h>
__global__ void maxpooling_kernel(float *output, float *input,
int batch, int channel, int height, int width,
int kernel_height, int kernel_width, int pad_height, int pad_width, int stride_height, int stride_width, int total_size)
{
int N = batch;
int C = channel;
int H = height;
int W = width;
int kH = kernel_height;
int kW = kernel_width;
int pH = pad_height;
int pW = pad_width;
int sH = stride_height;
int sW = stride_width;
int P = ((H + 2 * pH - kH) / sH) + 1;
int Q = ((W + 2 * pW - kW) / sW) + 1;
//tid : thread id
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= total_size)
return;
//q_idx : output w-index
int q_idx = tid % Q;
int idx = tid / Q;
//p_idx : output h-index
int p_idx = idx % P;
idx /= P;
//k_idx : output channel-index
int k_idx = idx % C;
//n_idx : output batch-index
int n_idx = idx / C;
//output(n_idx, k_idx, p_idx, q_idx)
float max = -FLT_MAX;
for (int kh = 0; kh < kH; kh++) {
int h_idx = p_idx * sH + kh - pH;
if (h_idx >= 0 && h_idx < H) {
for (int kw = 0; kw < kW; kw++) {
int w_idx = q_idx * sW + kw - pW;
if (w_idx >= 0 && w_idx < W) {
int input_index = n_idx * C * H * W + k_idx * H * W + h_idx * W + w_idx;
if (input[input_index] > max) {
max = input[input_index];
}
//int input_index = n_idx * C * H * W + c * H * W + h_idx * W + w_idx;
//int weight_index = k_idx * C * kH * kW + c * kH * kW + kh * kW + kw;
//sum += input[input_index] * weight[weight_index];
}
}
}
}
output[tid] = max;
//if (tid < 5)
// printf("%dth thread : %f\n", tid, output[tid]);
}
void maxpooling(float *output, float *input,
int batch, int channel, int height, int width,
int kernel_height, int kernel_width, int pad_height, int pad_width, int stride_height, int stride_width)
{
int N = batch;
int C = channel;
int H = height;
int W = width;
int kH = kernel_height;
int kW = kernel_width;
int pH = pad_height;
int pW = pad_width;
int sH = stride_height;
int sW = stride_width;
int P = (H + 2 * pH - kH) / sH + 1;
int Q = (W + 2 * pW - kW) / sW + 1;
int THREADS_PER_BLOCK = 256;
int TOTAL_SIZE = N * C * P * Q;
int NUMBER_OF_BLOCKS = (TOTAL_SIZE + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
maxpooling_kernel <<< NUMBER_OF_BLOCKS, THREADS_PER_BLOCK >>> (output, input, N, C, H, W, kH, kW, pH, pW, sH, sW, TOTAL_SIZE);
} |
21,772 | #include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
/*bool InitCUDA(){
int count;
cudaGetDeviceCount(&count);
if(count == 0){
fprintf(stderr, "There is no device.]n");
return false;
}
int i;
for(i = 0; i < count; i++){
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop,i) == cudaSuccess){
if(prop.major >= 1){
break;
}
}
}
if(i == count){
fprintf(stderr,"There is no device suppoting CUDA 1.x.\n");
return false;
}
cudaSetDevice(i);
return true;
}*/
int main(){
int count;
cudaGetDeviceCount(&count);
printf("There are %d devices.\n",count);
return 0;
} |
21,773 | #include <stdio.h>
#include <stdlib.h>
#define IN_SIZE 32
#define TH_X_BLK 32
// It's not work efficient and results must be adjusted by adjusted
__global__ void prefixSumNaive(int *in,int *out){
__shared__ int smem[TH_X_BLK];
int x=threadIdx.x+blockIdx.x*blockDim.x;
if(x<IN_SIZE)
smem[threadIdx.x]=in[x];
for(int i=1;i<TH_X_BLK;i*=2){
__syncthreads();
if(threadIdx.x>=i)
smem[threadIdx.x]+=smem[threadIdx.x-i];
}
if(x<IN_SIZE)
out[x]=smem[threadIdx.x];
}
//work-efficient implementation of the algorithm
__global__ void prefixSumWE(int *in,int *out){
__shared__ int smem[TH_X_BLK];
int x=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int i;
int temp;
if(x<IN_SIZE)
smem[threadIdx.x]=in[x];
//reduction phase
for(i=1;i<TH_X_BLK;i*=2){
__syncthreads();
if(threadIdx.x<TH_X_BLK/(i*2))
smem[(threadIdx.x+1)*i*2-1]+=smem[(threadIdx.x+1)*i*2-1-i];
}
//sweep-down phase
if(threadIdx.x==0)
smem[blockDim.x-1]=0;
for(i=TH_X_BLK/2;i>0;i/=2){
__syncthreads();
if(threadIdx.x<TH_X_BLK/(i*2)){
//taking value of the current root
temp=smem[(threadIdx.x+1)*i*2-1];
//the current root value is root_value+left_child value
smem[(threadIdx.x+1)*i*2-1]+=smem[(threadIdx.x+1)*i*2-1-i];
//left_child value is the old value of the root
smem[(threadIdx.x+1)*i*2-1-i]=temp;
}
}
if(x<IN_SIZE)
out[x]=smem[threadIdx.x];
}
int main(){
bool log=true;
int *in=(int*)malloc(IN_SIZE*sizeof(int)),
*out=(int*)malloc(IN_SIZE*sizeof(int)),
*gpu_in,*gpu_out;
cudaMalloc((void**)&gpu_in,IN_SIZE*sizeof(int));
cudaMalloc((void**)&gpu_out,IN_SIZE*sizeof(int));
for(int i=0;i<IN_SIZE;i++)
in[i]=1;
if(log){
printf("Input array\n");
for(int i=0;i<IN_SIZE;i++)
printf("%d ",in[i]);
printf("\n");
}
cudaMemcpy(gpu_in,in,IN_SIZE*sizeof(int),cudaMemcpyHostToDevice);
prefixSumNaive<<<(IN_SIZE+TH_X_BLK-1)/TH_X_BLK,TH_X_BLK>>>(gpu_in,gpu_out);
cudaMemcpy(out,gpu_out,IN_SIZE*sizeof(int),cudaMemcpyDeviceToHost);
if(log){
printf("Output array(naive version)\n");
for(int i=0;i<IN_SIZE;i++)
printf("%d ",out[i]);
printf("\n");
}
prefixSumWE<<<(IN_SIZE+TH_X_BLK-1)/TH_X_BLK,TH_X_BLK>>>(gpu_in,gpu_out);
cudaMemcpy(out,gpu_out,IN_SIZE*sizeof(int),cudaMemcpyDeviceToHost);
if(log){
printf("Output array\n");
for(int i=0;i<IN_SIZE;i++)
printf("%d ",out[i]);
printf("\n");
}
cudaFree(gpu_in);
cudaFree(gpu_out);
free(in);
free(out);
} |
21,774 | #include "includes.h"
__global__ void BaseNeuronSetFloatArray(float *arr, int n_elem, int step, float val)
{
int array_idx = threadIdx.x + blockIdx.x * blockDim.x;
if (array_idx<n_elem) {
arr[array_idx*step] = val;
}
} |
21,775 | #include <stdio.h>
__device__ void MatrixMultiply(void *input)
{
float* inputIn = (float*)input;
int matrixWidth = inputIn[0];
float *matrixA = inputIn+1;
float *matrixB = matrixA + matrixWidth*matrixWidth;
float *matrixOut = matrixA + 2*matrixWidth*matrixWidth;
int warp_size=32;
int thread = threadIdx.x % warp_size;
for (unsigned int i = thread; i < matrixWidth; i=i+32)
{
for (unsigned int j = 0; j < matrixWidth; j++) {
float sum = 0;
for (unsigned int k = 0; k < matrixWidth; k++) {
float a = matrixA[i * matrixWidth + k];
float b = matrixB[k * matrixWidth + j];
sum += a * b;
}
matrixOut[i * matrixWidth + j ] = sum;
}
}
}
|
21,776 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
#include <math.h>
// this is a function the instructor came up with
__global__ void sumSingleBlock(int* d) {
int tid = threadIdx.x;
// iterate over reduce steps
// recall `>>=` does a left bitshift/assignment, so this is a clever
// way of halving the thread count `tc` on each step
for (int tc=blockDim.x, stepSize=1; tc>0; tc>>=1, stepSize<<=1) {
if (tid < tc) {
int pa = tid * stepSize * 2;
int pb = pa + stepSize;
d[pa] += d[pb];
}
}
}
__global__ void mySumSingleBlock(int* d) {
int tid = threadIdx.x;
// iterate over aggregation steps; keep track of thread count `tc`,
// the number of threads still doing useful operations
for (int tc=blockDim.x, step=1; tc > 0; tc/=2, step*=2) {
if (tid < tc) { // only have participating threads do useful work
// map thread to array positions it will sum together
int pa = tid * step * 2;
int pb = pa + step;
d[pa] += d[pb];
}
}
}
int main() {
// can't go any higher than 2^10 = 1024 threads/block on my GPU (GTX 1080),
// where the max blockDim.x == 1024.
// however, when using int32, the sum from 1..1024 is 1024*1023/2 = 523776,
// which is larger than 2^15-1 = 32767, the maximum int size, so the
// computation will overflow anyway. hence we use 512, which has a sum from
// 1..512 of 512*511/2 = 131328... which is still higher than the max...
// how is this giving the correct output? is this compiler-specific?
const int count = 512;
printf("%d elements\n", count);
const int size = count * sizeof(int);
int h[count];
for (int i=0; i<count; i++) {
h[i] = i+1;
}
int *d;
cudaMalloc(&d, size);
cudaMemcpy(d, h, size, cudaMemcpyHostToDevice);
// sumSingleBlock<<<1,count/2>>>(d);
mySumSingleBlock<<<1,count/2>>>(d);
cudaMemcpy(h, d, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d);
printf("Sum is %d\n", h[0]);
} |
21,777 | #include <iostream>
#include <cstdlib>
#include <ctime>
#define NUM_POINTS 33554432 // 1 GB of 32-bit floats
float cpu_dataset[NUM_POINTS];
float cpu2_dataset[NUM_POINTS];
__global__ void hitAtomic(float* where) {
atomicAdd(where, 1.0);
}
__global__ void hitAtomicBy32(float* where) {
atomicAdd(&where[threadIdx.x % 32], 1.0);
}
__global__ void hitAtomicThreadLocal(float* where) {
atomicAdd(&where[threadIdx.x], 1.0);
}
__global__ void hitNaiveThreadLocal(float* where) {
where[threadIdx.x] += 1.0;
}
__global__ void inplaceOperation(float* data) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
data[id] = 1.0 - data[id];
}
__global__ void immutableOperation(float* datain, float* dataout) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
dataout[id] = 1.0 - datain[id];
}
__global__ void constImmutableOperation(const float* datain, float* dataout) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
dataout[id] = 1.0 - datain[id];
}
int main(int argc, char** argv) {
srand(12345);
for (int i = 0; i < NUM_POINTS; i++)
cpu_dataset[i] = ((float)rand()) / RAND_MAX;
struct cudaDeviceProp cdp;
cudaGetDeviceProperties(&cdp, 0);
std::cout << "Device at 0:" << std::endl;
std::cout << " name: " << cdp.name << std::endl;
std::cout << " totalGlobalMem: " << cdp.totalGlobalMem / 1024.0 / 1024.0 / 1024.0 << " GB" << std::endl;
std::cout << " sharedMemPerBlock: " << cdp.sharedMemPerBlock / 1024.0 << " kB" << std::endl;
std::cout << " regsPerBlock: " << cdp.regsPerBlock << std::endl;
std::cout << " warpSize: " << cdp.warpSize << std::endl;
std::cout << " memPitch: " << cdp.memPitch / 1024.0 / 1024.0 / 1024.0 << " GB" << std::endl;
std::cout << " maxThreadsPerBlock: " << cdp.maxThreadsPerBlock << std::endl;
std::cout << " maxThreadsDim: " << cdp.maxThreadsDim[0] << " " << cdp.maxThreadsDim[1] << " " << cdp.maxThreadsDim[2] << " " << std::endl;
std::cout << " maxGridSize: " << cdp.maxGridSize[0] << " " << cdp.maxGridSize[1] << " " << cdp.maxGridSize[2] << " " << std::endl;
std::cout << " totalConstMem: " << cdp.totalConstMem / 1024.0 << " kB" << std::endl;
std::cout << " version: " << cdp.major << "." << cdp.minor << std::endl;
std::cout << " clockRate: " << cdp.clockRate / 1000.0 << " MHz" << std::endl;
std::cout << " textureAlignment: " << cdp.textureAlignment << std::endl;
std::cout << " deviceOverlap: " << (cdp.deviceOverlap ? "true" : "false") << std::endl;
std::cout << " multiProcessorCount: " << cdp.multiProcessorCount << std::endl;
std::cout << " kernelExecTimeoutEnabled: " << (cdp.kernelExecTimeoutEnabled ? "true" : "false") << std::endl;
std::cout << " integrated: " << (cdp.integrated ? "true" : "false") << std::endl;
std::cout << " canMapHostMemory: " << (cdp.canMapHostMemory ? "true" : "false") << std::endl;
std::cout << " computeMode: " << (cdp.computeMode == cudaComputeModeDefault ? "cudaComputeModeDefault" : (cdp.computeMode == cudaComputeModeExclusive ? "cudaComputeModeExclusive" : (cdp.computeMode == cudaComputeModeProhibited ? "cudaComputeModeProhibited" : "unknown"))) << std::endl;
std::cout << " concurrentKernels: " << (cdp.concurrentKernels ? "true" : "false") << std::endl;
std::cout << " ECCEnabled: " << (cdp.ECCEnabled ? "true" : "false") << std::endl;
std::cout << " pciBusID: " << cdp.pciBusID << std::endl;
std::cout << " pciDeviceID: " << cdp.pciDeviceID << std::endl;
std::cout << " tccDriver: " << (cdp.tccDriver ? "true" : "false") << std::endl;
std::cout << std::endl;
for (int i = 0; i < 5; i++) {
std::clock_t startTime = std::clock();
for (int j = 0; j < 1000; j++) {
memcpy(cpu2_dataset, cpu_dataset, NUM_POINTS * 4);
}
std::cout << "1 GB host -> host: " << (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl;
}
float* gpu_dataset;
float* gpu2_dataset;
cudaMalloc((void**)&gpu_dataset, NUM_POINTS * 4);
cudaMalloc((void**)&gpu2_dataset, NUM_POINTS * 4);
std::cout << "check " << cpu_dataset[0] << " " << cpu_dataset[1] << " " << cpu_dataset[2] << std::endl;
for (int i = 0; i < 5; i++) {
std::clock_t startTime = std::clock();
for (int j = 0; j < 1000; j++) {
cudaMemcpy(gpu_dataset, cpu_dataset, NUM_POINTS * 4, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
}
std::cout << "1 GB host -> device: " << (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl;
}
for (int i = 0; i < 5; i++) {
std::clock_t startTime = std::clock();
for (int j = 0; j < 1000; j++) {
inplaceOperation<<<NUM_POINTS / cdp.maxThreadsPerBlock, cdp.maxThreadsPerBlock>>>(gpu_dataset);
cudaDeviceSynchronize();
}
std::cout << "1 GB device in-place operation: " << (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl;
}
for (int i = 0; i < 5; i++) {
std::clock_t startTime = std::clock();
for (int j = 0; j < 1000; j++) {
immutableOperation<<<NUM_POINTS / cdp.maxThreadsPerBlock, cdp.maxThreadsPerBlock>>>(gpu_dataset, gpu2_dataset);
cudaDeviceSynchronize();
}
std::cout << "1 GB device immutable operation: " << (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl;
}
for (int i = 0; i < 5; i++) {
std::clock_t startTime = std::clock();
for (int j = 0; j < 1000; j++) {
constImmutableOperation<<<NUM_POINTS / cdp.maxThreadsPerBlock, cdp.maxThreadsPerBlock>>>(gpu_dataset, gpu2_dataset);
cudaDeviceSynchronize();
}
std::cout << "1 GB device const immutable operation: " << (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl;
}
for (int i = 0; i < 5; i++) {
std::clock_t startTime = std::clock();
for (int j = 0; j < 1000; j++) {
cudaMemcpy(cpu_dataset, gpu_dataset, NUM_POINTS * 4, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
}
std::cout << "1 GB device -> host: " << (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl;
}
std::cout << "check " << cpu_dataset[0] << " " << cpu_dataset[1] << " " << cpu_dataset[2] << std::endl;
float* pinned_dataset;
cudaMallocHost((void**)&pinned_dataset, NUM_POINTS * 4);
for (int i = 0; i < 5; i++) {
std::clock_t startTime = std::clock();
for (int j = 0; j < 1000; j++) {
memcpy(pinned_dataset, cpu_dataset, NUM_POINTS * 4);
}
std::cout << "1 GB host -> pinned: " << (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl;
}
float* mapped_dataset;
cudaHostGetDevicePointer((void**)&mapped_dataset, (void*)pinned_dataset, 0);
for (int i = 0; i < 5; i++) {
std::clock_t startTime = std::clock();
for (int j = 0; j < 1000; j++) {
inplaceOperation<<<NUM_POINTS / cdp.maxThreadsPerBlock, cdp.maxThreadsPerBlock>>>(mapped_dataset);
cudaDeviceSynchronize();
}
std::cout << "1 GB device in-place operation: " << (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl;
}
for (int i = 0; i < 5; i++) {
std::clock_t startTime = std::clock();
for (int j = 0; j < 1000; j++) {
immutableOperation<<<NUM_POINTS / cdp.maxThreadsPerBlock, cdp.maxThreadsPerBlock>>>(mapped_dataset, gpu2_dataset);
cudaDeviceSynchronize();
}
std::cout << "1 GB device immutable operation: " << (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl;
}
for (int i = 0; i < 5; i++) {
std::clock_t startTime = std::clock();
for (int j = 0; j < 1000; j++) {
constImmutableOperation<<<NUM_POINTS / cdp.maxThreadsPerBlock, cdp.maxThreadsPerBlock>>>(mapped_dataset, gpu2_dataset);
cudaDeviceSynchronize();
}
std::cout << "1 GB device const immutable operation: " << (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl;
}
for (int i = 0; i < 5; i++) {
std::clock_t startTime = std::clock();
for (int j = 0; j < 1000; j++) {
memcpy(cpu_dataset, pinned_dataset, NUM_POINTS * 4);
}
std::cout << "1 GB pinned -> host: " << (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl;
}
std::cout << "check " << cpu_dataset[0] << " " << cpu_dataset[1] << " " << cpu_dataset[2] << std::endl;
for (int i = 0; i < 5; i++) {
std::clock_t startTime = std::clock();
for (int j = 0; j < 1000; j++) {
cudaMemcpy(gpu2_dataset, gpu_dataset, NUM_POINTS * 4, cudaMemcpyDeviceToDevice);
cudaDeviceSynchronize();
}
std::cout << "1 GB device -> device: " << (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl;
}
const float atomic_init[32] = {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f};
float* global_atomics;
cudaMalloc((void**)&global_atomics, 32 * 4);
float cpu_atomics[32];
cudaMemcpy(global_atomics, atomic_init, 32 * 4, cudaMemcpyHostToDevice);
for (int i = 0; i < 5; i++) {
std::clock_t startTime = std::clock();
for (int j = 0; j < 1000; j++) {
hitAtomic<<<32768, 32>>>(global_atomics);
cudaDeviceSynchronize();
}
std::cout << "hit atomics on global <<<32768, 32>>>: " << (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl;
}
cudaMemcpy(cpu_atomics, global_atomics, 32 * 4, cudaMemcpyDeviceToHost);
std::cout << "check for " << 32768 * 32 * 5 << ": ";
for (int i = 0; i < 32; i++)
std::cout << cpu_atomics[i] << " ";
std::cout << std::endl;
cudaMemcpy(global_atomics, atomic_init, 32 * 4, cudaMemcpyHostToDevice);
for (int i = 0; i < 5; i++) {
std::clock_t startTime = std::clock();
for (int j = 0; j < 1000; j++) {
hitAtomic<<<32768, 64>>>(global_atomics);
cudaDeviceSynchronize();
}
std::cout << "hit atomics on global <<<32768, 64>>>: " << (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl;
}
cudaMemcpy(cpu_atomics, global_atomics, 32 * 4, cudaMemcpyDeviceToHost);
std::cout << "check for " << 32768 * 64 * 5 << ": ";
for (int i = 0; i < 32; i++)
std::cout << cpu_atomics[i] << " ";
std::cout << std::endl;
cudaMemcpy(global_atomics, atomic_init, 32 * 4, cudaMemcpyHostToDevice);
for (int i = 0; i < 5; i++) {
std::clock_t startTime = std::clock();
for (int j = 0; j < 1000; j++) {
hitAtomic<<<32768, cdp.maxThreadsPerBlock>>>(global_atomics);
cudaDeviceSynchronize();
}
std::cout << "hit atomics on global <<<32768, " << cdp.maxThreadsPerBlock << ">>>: " << (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl;
}
cudaMemcpy(cpu_atomics, global_atomics, 32 * 4, cudaMemcpyDeviceToHost);
std::cout << "check for " << 32768 * cdp.maxThreadsPerBlock * 5 << ": ";
for (int i = 0; i < 32; i++)
std::cout << cpu_atomics[i] << " ";
std::cout << std::endl;
cudaMemcpy(global_atomics, atomic_init, 32 * 4, cudaMemcpyHostToDevice);
for (int i = 0; i < 5; i++) {
std::clock_t startTime = std::clock();
for (int j = 0; j < 1000; j++) {
hitAtomicBy32<<<32768, 32>>>(global_atomics);
cudaDeviceSynchronize();
}
std::cout << "hit atomics by 32 on global <<<32768, 32>>>: " << (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl;
}
cudaMemcpy(cpu_atomics, global_atomics, 32 * 4, cudaMemcpyDeviceToHost);
std::cout << "check for " << 32768 * 32 * 5 / 32 << ": ";
for (int i = 0; i < 32; i++)
std::cout << cpu_atomics[i] << " ";
std::cout << std::endl;
cudaMemcpy(global_atomics, atomic_init, 32 * 4, cudaMemcpyHostToDevice);
for (int i = 0; i < 5; i++) {
std::clock_t startTime = std::clock();
for (int j = 0; j < 1000; j++) {
hitAtomicBy32<<<32768, 64>>>(global_atomics);
cudaDeviceSynchronize();
}
std::cout << "hit atomics by 32 on global <<<32768, 64>>>: " << (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl;
}
cudaMemcpy(cpu_atomics, global_atomics, 32 * 4, cudaMemcpyDeviceToHost);
std::cout << "check for " << 32768 * 64 * 5 / 32 << ": ";
for (int i = 0; i < 32; i++)
std::cout << cpu_atomics[i] << " ";
std::cout << std::endl;
cudaMemcpy(global_atomics, atomic_init, 32 * 4, cudaMemcpyHostToDevice);
for (int i = 0; i < 5; i++) {
std::clock_t startTime = std::clock();
for (int j = 0; j < 1000; j++) {
hitAtomicBy32<<<32768, cdp.maxThreadsPerBlock>>>(global_atomics);
cudaDeviceSynchronize();
}
std::cout << "hit atomics by 32 on global <<<32768, " << cdp.maxThreadsPerBlock << ">>>: " << (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl;
}
cudaMemcpy(cpu_atomics, global_atomics, 32 * 4, cudaMemcpyDeviceToHost);
std::cout << "check for " << 32768 * cdp.maxThreadsPerBlock * 5 / 32 << ": ";
for (int i = 0; i < 32; i++)
std::cout << cpu_atomics[i] << " ";
std::cout << std::endl;
float* atomic_init2 = (float*)malloc(cdp.maxThreadsPerBlock * 4);
float* global_atomics2;
cudaMalloc((void**)&global_atomics2, cdp.maxThreadsPerBlock * 4);
float* cpu_atomics2 = new float[cdp.maxThreadsPerBlock];
for (int i = 0; i < cdp.maxThreadsPerBlock; i++)
atomic_init2[i] = 0.0;
cudaMemcpy(global_atomics2, atomic_init2, cdp.maxThreadsPerBlock * 4, cudaMemcpyHostToDevice);
for (int i = 0; i < 5; i++) {
std::clock_t startTime = std::clock();
for (int j = 0; j < 1000; j++) {
hitNaiveThreadLocal<<<32768, cdp.maxThreadsPerBlock>>>(global_atomics2);
cudaDeviceSynchronize();
}
std::cout << "hit naive thread local by " << cdp.maxThreadsPerBlock << " on global <<<32768, " << cdp.maxThreadsPerBlock << ">>>: " << (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl;
}
cudaMemcpy(cpu_atomics2, global_atomics2, cdp.maxThreadsPerBlock * 4, cudaMemcpyDeviceToHost);
std::cout << "check for " << 32768 * 5 << ": ";
for (int i = 0; i < cdp.maxThreadsPerBlock; i++)
std::cout << cpu_atomics2[i] << " ";
std::cout << std::endl;
for (int i = 0; i < cdp.maxThreadsPerBlock; i++)
atomic_init2[i] = 0.0;
cudaMemcpy(global_atomics2, atomic_init2, cdp.maxThreadsPerBlock * 4, cudaMemcpyHostToDevice);
for (int i = 0; i < 5; i++) {
std::clock_t startTime = std::clock();
for (int j = 0; j < 1000; j++) {
hitAtomicThreadLocal<<<32768, cdp.maxThreadsPerBlock>>>(global_atomics2);
cudaDeviceSynchronize();
}
std::cout << "hit atomic thread local by " << cdp.maxThreadsPerBlock << " on global <<<32768, " << cdp.maxThreadsPerBlock << ">>>: " << (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl;
}
cudaMemcpy(cpu_atomics2, global_atomics2, cdp.maxThreadsPerBlock * 4, cudaMemcpyDeviceToHost);
std::cout << "check for " << 32768 * 5 << ": ";
for (int i = 0; i < cdp.maxThreadsPerBlock; i++)
std::cout << cpu_atomics2[i] << " ";
std::cout << std::endl;
return 0;
}
|
21,778 | /*
Simulated Annealing algorithm for Traveling Salesman Problem
@@ CUDA version: no parallel optimization, single thread
Input: xxx.tsp file
Output: optimal value (total distance)
& solution route: permutation of {1, 2, ..., N}
*/
#include <iostream>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <algorithm>
#include <sys/time.h>
#include <pthread.h>
#include <curand_kernel.h>
#define MAXITER 20 // Proposal 20 routes and then select the best one
#define THRESH1 0.1 // Threshold 1 for the strategy
#define THRESH2 0.89 // Threshold 2 for the strategy
#define RELAX 400 // The times of relaxation of the same temperature
#define ALPHA 0.999 // Cooling rate
#define INITEMP 99.0 // Initial temperature
#define STOPTEMP 0.001 // Termination temperature
#define MAXLAST 3 // Stop if the tour length keeps unchanged for MAXLAST consecutive temperature
#define MAXN 250 // only support N <= 250
#define THREADITER 200
using namespace std;
float minTourDist = -1; // The distance of shortest path
int *minTour = NULL; // The shortest path
int N = 0; // Number of cities
float *dist = NULL; // The distance matrix, use (i-1) instead of i
int *currTour = NULL;
int blockNum = 1; // block number
int threadNum = 1; // thread number
int globalIter = -1; // global iteration count
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
class rand_x {
unsigned int seed;
public:
rand_x(int init) : seed(init) {}
int operator()(int limit) {
int divisor = RAND_MAX/(limit+1);
int retval;
do {
retval = rand_r(&seed) / divisor;
} while (retval > limit);
return retval;
}
};
/* load the data */
void loadFile(char* filename) {
FILE *pf;
pf = fopen(filename, "r");
if (pf == NULL) {
printf("Cannot open the file!\n");
exit(1);
}
char buff[200];
fscanf(pf, "NAME: %[^\n]s", buff);
printf("%s\n", buff);
fscanf(pf, "\nTYPE: TSP%[^\n]s", buff);
printf("%s\n", buff);
fscanf(pf, "\nCOMMENT: %[^\n]s", buff);
printf("%s\n", buff);
fscanf(pf, "\nDIMENSION: %d", &N);
printf("The N is: %d\n", N);
fscanf(pf, "\nEDGE_WEIGHT_TYPE: %[^\n]s", buff);
printf("the type is: %s\n", buff);
dist = (float *)malloc(sizeof(float) * N * N);
memset(dist, 0, sizeof(float) * N * N);
if (strcmp(buff, "EUC_2D") == 0) {
fscanf(pf, "\nNODE_COORD_SECTION");
float nodeCoord[MAXN][2] = {};
int nid;
float xx, yy;
for (int i = 0; i < N; ++i) {
fscanf(pf, "\n%d %f %f", &nid, &xx, &yy);
nodeCoord[i][0] = xx;
nodeCoord[i][1] = yy;
}
float xi, yi, xj, yj;
for (int i = 0; i < N; ++i) {
for (int j = i + 1; j < N; ++j) {
xi = nodeCoord[i][0];
yi = nodeCoord[i][1];
xj = nodeCoord[j][0];
yj = nodeCoord[j][1];
dist[i*N + j] = (float)sqrt((xi - xj) * (xi - xj) + (yi - yj) * (yi - yj));
dist[j*N + i] = dist[i*N + j];
}
}
}
else if (strcmp(buff, "EXPLICIT") == 0) {
fscanf(pf, "\nEDGE_WEIGHT_FORMAT: %[^\n]s", buff);
fscanf(pf, "\n%[^\n]s", buff);
char *disps = strstr(buff, "DISPLAY_DATA_TYPE");
if (disps != NULL) {
fscanf(pf, "\nEDGE_WEIGHT_SECTION");
}
float weight;
for (int i = 0; i < N; ++i) {
for (int j = 0; j <= i; ++j) {
fscanf(pf, "%f", &weight);
dist[i*N + j] = weight;
dist[j*N + i] = weight;
}
}
}
return;
}
/* Calculate the length of the tour */
float tourLen(int *tour) {
if (tour == NULL) {
printf("tour not exist!\n");
return -1;
}
float cnt = 0;
for (int i = 0; i < N - 1; ++i) {
cnt += dist[tour[i]*N + tour[i+1]];
}
cnt += dist[tour[N-1]*N + tour[0]];
return cnt;
}
/* the main simulated annealing function */
__global__ void saTSP(int cityCnt, int* globalTour, curandState *randStates, float *dev_dist, float temperature, int relaxiter) {
int thid = (blockIdx.x * blockDim.x) + threadIdx.x;
int *tour = &globalTour[thid * cityCnt];
float currLen = 0;
for (int i = 0; i < cityCnt - 1; ++i) {
currLen += dev_dist[tour[i]*cityCnt + tour[i+1]];
}
currLen += dev_dist[tour[cityCnt-1]*cityCnt + tour[0]];
//float temperature = INITEMP;
//float lastLen = currLen;
//int contCnt = 0; // the continuous same length times
int iterCnt = 0;
while (temperature > STOPTEMP) {
temperature *= ALPHA;
iterCnt += 1;
/* stay in the same temperature for RELAX times */
for (int i = 0; i < relaxiter; ++i) {
/* Proposal 1: Block Reverse between p and q */
int p = (int)(curand_uniform(&(randStates[thid])) * (float)(cityCnt + 10)) % cityCnt;
int q = (int)(curand_uniform(&(randStates[thid])) * (float)(cityCnt + 10)) % cityCnt;
// If will occur error if p=0 q=N-1...
if (abs(p - q) == cityCnt - 1) {
p = (int)(curand_uniform(&(randStates[thid])) * (float)(cityCnt - 3));
q = (int)(curand_uniform(&(randStates[thid])) * (float)(cityCnt - 2));
}
if (p == q) {
q = (q + 2) % cityCnt;
}
if (p > q) {
int tmp = p;
p = q;
q = tmp;
}
int p1 = (p - 1 + cityCnt) % cityCnt;
int q1 = (q + 1) % cityCnt;
int tp = tour[p], tq = tour[q], tp1 = tour[p1], tq1 = tour[q1];
float delta = dev_dist[tp*cityCnt + tq1] + dev_dist[tp1*cityCnt + tq] - dev_dist[tp*cityCnt + tp1] - dev_dist[tq*cityCnt + tq1];
/* whether to accept the change */
if ((delta < 0) || ((delta > 0) &&
(expf(-delta/temperature) > curand_uniform(&(randStates[thid]))))) {
currLen = currLen + delta;
int mid = (q - p) >> 1;
int tmp;
for (int k = 0; k <= mid; ++k) {
tmp = tour[p+k];
tour[p+k] = tour[q-k];
tour[q-k] = tmp;
}
//currLen = tourLen(tour);
}
}
/*
if ((currLen - lastLen < 1e-2) && (currLen - lastLen > -1e-2)) {
contCnt += 1;
if (contCnt >= MAXLAST) {
//printf("unchanged for %d times1!\n", contCnt);
break;
}
}
else
contCnt = 0;
lastLen = currLen;
*/
}
return;
}
__global__ void setup_kernel_randomness(curandState * state, unsigned long seed)
{
int s_id = (blockIdx.x*blockDim.x) + threadIdx.x;
curand_init(seed*s_id, s_id, 0, &state[s_id]);
}
int main(int argc, char **argv) {
cudaError_t err = cudaSuccess;
float *dev_dist;
if (argc < 2) {
printf("Usage: ./cuda_tsp <filename> <blockNum> <threadNum>\n");
return 0;
}
else {
loadFile(argv[1]);
err = cudaMalloc((void **)&dev_dist, sizeof(float) * N * N);
if (err != cudaSuccess) {
fprintf(stderr, "cudaMalloc() failed\n");
exit(1);
}
cudaMemcpy((void *)dev_dist, dist, sizeof(float) * N * N, cudaMemcpyHostToDevice);
}
if (argc == 4) {
blockNum = atoi(argv[2]);
threadNum = atoi(argv[3]);
}
printf("blockNum is: %d, threadNum is: %d\n", blockNum, threadNum);
struct timeval start, stop;
gettimeofday(&start, NULL);
srandom(time(0));
int *dev_currTour; // currTour on device;
int itersCnt = blockNum * threadNum; // total iterations
err = cudaMalloc((void **)&dev_currTour, sizeof(int)*N*itersCnt);
if (err != cudaSuccess) {
fprintf(stderr, "cudaMalloc() failed\n");
exit(1);
}
srand(time(0));
currTour = (int *)malloc(sizeof(int) * N * itersCnt);
for (int i = 0; i < itersCnt; ++i) {
for (int j = 0; j < N; ++j) {
currTour[i*N + j] = j;
}
random_shuffle(currTour+i*N, currTour+(i+1)*N);
/*for (int j = 0; j < N; ++j) {
printf("%d ", currTour[i*N + j]);
}
printf("%d before: %f\n", i, tourLen(currTour + i*N));*/
}
err = cudaMemcpy(dev_currTour, currTour, itersCnt * N * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "cudaMalloc() for dev_currTour failed\n");
exit(1);
}
// allocate random seed for each thread
curandState *devStates;
cudaMalloc((void **)&devStates, itersCnt * sizeof(curandState));
setup_kernel_randomness<<<blockNum, threadNum>>>(devStates, time(0));
cudaDeviceSynchronize();
float currLen = 0;
float temperature = INITEMP;
int contCnt = 0;
float tempstep = pow(ALPHA, THREADITER);
//while (temperature > STOPTEMP) {
//printf("%.06f \n", temperature);
saTSP<<<blockNum, threadNum>>>(N, dev_currTour, devStates, dev_dist, temperature, RELAX);
cudaDeviceSynchronize();
// temperature *= tempstep;
//}
minTour = (int *)malloc(sizeof(int) * N);
memset(currTour, 0, itersCnt * N * sizeof(int));
err = cudaMemcpy(currTour, dev_currTour, itersCnt * N * sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "cudaMemcpyc(Device to Host) failed with %d\n", err);
exit(1);
}
/* find the minimal answer */
int minidx = 0;
for (int i = 0; i < itersCnt; ++i) {
currLen = tourLen(&currTour[i * N]);
/*for (int j = 0; j < N; ++j) {
printf("%d ", currTour[i*N + j]);
}
printf("%d after: %f\n", i, currLen);*/
if ((currLen < minTourDist) || (minTourDist < 0)) {
minTourDist = currLen;
minidx = i;
}
}
for (int i = 0; i < N; ++i) {
minTour[i] = currTour[minidx * N + i];
}
gettimeofday(&stop, NULL);
// ------------- Print the result! -----------------
int tottime = stop.tv_sec - start.tv_sec;
int timemin = tottime / 60;
int timesec = tottime % 60;
printf("Total time usage: %d min %d sec. \n", timemin, timesec);
printf("N is %d, The shortest length is: %f\n And the tour is: \n", N, minTourDist);
for (int i = 0; i < N; ++i) {
printf("%d \n", minTour[i]+1);
}
free(dist);
free(minTour);
free(currTour);
return 0;
}
|
21,779 | #include "includes.h"
__global__ void findMax(int *m, int *cs, int n)
{
// your code goes here
int colnum = blockDim.x * blockIdx.x + threadIdx.x;
int max = m[0];
for (int k = 0; k < n; k++){
if(m [colnum+n*k] > max)
max = m [colnum+n*k];
}
cs[colnum] = max;
} |
21,780 | //xfail:BOOGIE_ERROR
//--blockDim=8 --gridDim=1 --no-inline
// The statically given values for A are not preserved when we translate CUDA
// since the host is free to change the contents of A.
// cf. testsuite/OpenCL/globalarray/pass2
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#define N 2//8
#define THREAD_CHANGE 1
__constant__ float A[8] = {0,1,2,3,4,5,6,7};
__global__ void globalarray(float* p) {
int i = threadIdx.x;
A[THREAD_CHANGE] = 0; // forçando a entrada no laço, alterando uma constante!
int a = A[i];
if(a != threadIdx.x) {
p[0] = threadIdx.x; //entra aqui apenas para para thread=1, por isso não há corrida de dados
}
}
|
21,781 |
#include "magma_dsyev_batch_functions.cuh"
|
21,782 | #include <iostream>
#include <fstream>
#include <string>
#include <sstream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <assert.h>
#define ID(i,j,k) (( i * k ) + j)
#define ARG(i,j) <<< dim3(i,j,1), dim3(1,1,1) >>>
// computes the sum of matrices: c = (a + b)
// a : n->k, b : n->k, c : n->k
// call it with n x k blocks
__global__ void matrixSum(float* a, float* b, size_t const n, size_t const k, float* c){
size_t i{ blockIdx.x };
size_t j{ blockIdx.y };
if ((i >= n) || (j >= k))
return;
c[ID(i, j, k)] = (a[ID(i, j, k)] + b[ID(i, j, k)]);
}
// computes the product of matrices: c = (a.b)
// a : n->k, b : k->p, c : n->p
// call it with n x p blocks
// we assume c != a, c != b
__global__ void matrixMultiply(float* a, float* b, size_t const n, size_t const k, size_t const p, float* c){
assert((a != c) && (b != c));
size_t i{ blockIdx.x };
size_t j{ blockIdx.y };
if ((i >= n) || (j >= p))
return;
c[ID(i, j, p)] = 0.0f;
for (size_t z = 0; z<k; ++z)
c[ID(i, j, p)] += (a[ID(i, z, k)] * b[ID(z, j, p)]);
}
// computes the product of matrix a and scalar b: c = b*a
// a : n->k, b : 1->1
// call it with n x k blocks
__global__ void matrixScalarMultiply(float* a, float b, size_t const n, size_t const k, float* c){
size_t i{ blockIdx.x };
size_t j{ blockIdx.y };
if ((i >= n) || (j >= k))
return;
c[ID(i, j, k)] = b * a[ID(i, j, k)];
}
// computes c = (a * b), where * is elementwise multiplication
// a : n->k, b : n->k, c : n->k
// call it with n x k blocks
__global__ void matrixElementwiseMultiply(float* a, float* b, size_t const n, size_t const k, float* c){
size_t i{ blockIdx.x };
size_t j{ blockIdx.y };
if ((i >= n) || (j >= k))
return;
c[ID(i, j, k)] = (a[ID(i, j, k)] * b[ID(i, j, k)]);
}
// computes the sigmoid function: c = ( 1 / (1 + e^(-a)) ) elementwise on matrices
// a : n -> k, c : n ->k
// call it with n x p blocks
__global__ void sigmoid(float* a, size_t const n, size_t const k, float* c){
size_t i{ blockIdx.x };
size_t j{ blockIdx.y };
if ((i >= n) || (j >= k))
return;
c[ID(i, j, k)] = 1.0f / (1.0f + expf((-1.0f) * a[ID(i, j, k)]));
}
// computes transpose of a:n->p
// returns it in c:p->n
// call it with n x p blocks
// we assume a != c
__global__ void transpose(float* a, size_t n, size_t p, float* c){
assert(a != c);
size_t i{ blockIdx.x };
size_t j{ blockIdx.y };
if ((i >= n) || (j >= p))
return;
c[ID(j, i, n)] = a[ID(i, j, p)];
}
// return the zero matrix c:n->k
// call it with n x k blocks
__global__ void zeroMatrix(float* c, size_t n, size_t k){
size_t i{ blockIdx.x };
size_t j{ blockIdx.y };
if ((i >= n) || (j >= k))
return;
c[ID(i, j, k)] = 0.0f;
}
// *adds* x to the diagonal of a:n->n
// call it with n x 1 blocks
__global__ void addDiagonal(float* a, size_t n, float x){
size_t i{blockIdx.x};
if (i >= n)
return;
a[ID(i, i, n)] += x;
}
// zeroes out column j, except for its j'th row
// using rank-preserving transformations
// c is the extension matrix of a
// call it with n x n blocks
__global__ void column_zeroer(float *a, float *c, size_t n, size_t j){
size_t i{ blockIdx.x };
size_t k{ blockIdx.y };
if ((j == i) || ((i >= n) || (k >= n)))
return;
float ratio{ a[ID(i, j, n)] / a[ID(j, j, n)] };
a[ID(i, k, n)] -= (ratio * a[ID(j, k, n)]); // unstable
c[ID(i, k, n)] -= (ratio * c[ID(j, k, n)]);
}
// divides row j with divisor
// it is a rank-preserving transformation
// c is the extension matrix of a
// call it with n x 1 blocks
__global__ void row_divider(float *a, float *c, size_t n, size_t j){
size_t k{ blockIdx.x };
if (k >= n)
return;
float divisor{ a[ID(j, j, n)] };
a[ID(j, k, n)] /= divisor; // unstable
c[ID(j, k, n)] /= divisor;
}
// computes the inverse of a:n->n
// using rank-preserving transformations
// numerically unstable
// returns the inverse in c:n->n
// we assume a != c
inline void inverse(float* a, size_t n, float* c, float damping){
assert(a != c);
zeroMatrix ARG(n, n) (c, n, n);
addDiagonal ARG(n, 1) (c, n, 1.0f);
addDiagonal ARG(n, 1) (a, n, damping); // damping
for (size_t j = 0; j < n; ++j)
column_zeroer ARG(n, n) (a, c, n, j);
for (size_t j = 0; j < n; ++j)
row_divider ARG(n, 1) (a, c, n, j);
}
inline void printCudaMatrix(float* m, size_t n, size_t k){
float *h_m = new float[n*k];
cudaMemcpy(h_m, m, sizeof(float)*n*k, cudaMemcpyDeviceToHost);
for (size_t j = 0; j < n; ++j){
for (size_t i = 0; i < k; ++i)
std::cerr << h_m[ID(j, i, k)] << " ";
std::cerr << std::endl;
}
std::cerr << std::endl;
delete[] h_m;
}
// traning the network
// -------------------
// input matrix : x : d->n, target matrix : t : c->n
//
// the set of learning samples consists of the input-output pairs: (x_i, t_i),
// where x_i is the i'th column of x, t_i is the i'th column of t, respectively.
//
// weight matrices of layers: w : d->l, u : l->c
//
// rho : ( initial ) learning rate
//
// number_of_iterations : number of iterations to be performed
//
inline void train(size_t d, size_t l, size_t c, size_t n, float* x, float* t, float* w, float* u, float rho, size_t number_of_iterations) {
float *w_t, *h, *t_t, *temp_lxl, *temp_lxl_inv, *temp_lxn_1, *temp_lxn_2, *u_t, *e; // device pointers
float const damping{ 0.0f };
// memory allocation on device
cudaMalloc(&w_t, sizeof(float) * l * d);
cudaMalloc(&h, sizeof(float) * l * n);
cudaMalloc(&u_t, sizeof(float) * c * l);
cudaMalloc(&t_t, sizeof(float) * c * n);
cudaMalloc(&temp_lxl, sizeof(float) * l * l);
cudaMalloc(&temp_lxl_inv, sizeof(float) * l * l);
cudaMalloc(&temp_lxn_1, sizeof(float) * l * n);
cudaMalloc(&temp_lxn_2, sizeof(float) * l * n);
cudaMalloc(&e, sizeof(float) * d * l);
// computing t_t
transpose ARG(c,n) (t, c, n, t_t);
// now t_t = Transpose(t)
for (size_t j = 0; j < number_of_iterations; ++j) {
// computing w_t
transpose ARG(d,l) (w, d, l, w_t);
// now w_t = Transpose(w)
// computing h
matrixMultiply ARG(l,n) (w_t, x, l, d, n, h);
// now h = Transpose(w).x
sigmoid ARG(l,n) (h, l, n, h);
// now h = sigmoid(Transpose(w).x)
// computing h_t
transpose ARG(l,n) (h, l, n, temp_lxn_1);
// now temp_lxn_1 = Transpose(h)
// computing u
matrixMultiply ARG(l,l) (h, temp_lxn_1, l, n, l, temp_lxl);
// now temp_lxl = h.Transpose(h)
// printCudaMatrix(temp_lxl, l, l);
inverse(temp_lxl, l, temp_lxl_inv, damping);
// printCudaMatrix(temp_lxl_inv, l, l);
// now temp_lxl = inverse(h.Transpose(h))
matrixMultiply ARG(l,c) (h, t_t, l, n, c, u_t);
// now u = h.Transpose(t)
matrixMultiply ARG(l,c) (temp_lxl_inv, u_t, l, l, c, u);
// now u = inverse(h.Transpose(h)).h.Transpose(t)
transpose ARG(l, c) (u, l, c, u_t);
// now u_t = Transpose(u)
// computing e
matrixMultiply ARG(l,l) (u, u_t, l, c, l, temp_lxl);
matrixMultiply ARG(l,n) (temp_lxl, h, l, l, n, temp_lxn_1);
matrixMultiply ARG(l,n) (u, t, l, c, n, temp_lxn_2);
matrixScalarMultiply ARG(l,n) (temp_lxn_2, -1.0f, l, n, temp_lxn_2);
matrixSum ARG(l,n) (temp_lxn_1, temp_lxn_2, l, n, temp_lxn_1);
// now temp_lxn_1 contains the third part
zeroMatrix ARG(l,n) (temp_lxn_2, l, n);
matrixSum ARG(l,n) (temp_lxn_2, h, l, n, temp_lxn_2);
matrixScalarMultiply ARG(l,n) (temp_lxn_2, -1.0f, l, n, temp_lxn_2);
addDiagonal ARG((l < n) ? l : n, (l < n) ? l : n) (temp_lxn_2, (l < n) ? l : n, 1.0f);
// now temp_lxn_2 contains the second part
// and h contains the first part
matrixElementwiseMultiply ARG(l, n) (h, temp_lxn_2, l, n, temp_lxn_2);
matrixElementwiseMultiply ARG(l, n) (temp_lxn_2, temp_lxn_1, l, n, temp_lxn_1);
transpose ARG(l, n) (temp_lxn_1, l, n, temp_lxn_2);
matrixScalarMultiply ARG(n, l) (temp_lxn_2, (rho*(-2.0f)), n, l, temp_lxn_2);
matrixMultiply ARG(d, l) (x, temp_lxn_2, d, n, l, e);
// now e is computed
matrixSum ARG(d, l) (w, e, d, l, w);
}
// freeing memory on device
cudaFree(w_t);
cudaFree(h);
cudaFree(u_t);
cudaFree(t_t);
cudaFree(temp_lxl);
cudaFree(temp_lxl_inv);
cudaFree(temp_lxn_1);
cudaFree(temp_lxn_2);
cudaFree(e);
}
// processing the network
// ----------------------
// input matrix : x : d->n, output matrix : y : c->n,
//
// weight matrices of layers: w : d->l, u : l->c
//
inline void compute(size_t d, size_t l, size_t c, size_t n, float* x, float* w, float* u, float* y){
float *w_t, *h, *u_t; // device pointers
// memory allocation on device
cudaMalloc(&w_t, sizeof(float) * l * d);
cudaMalloc(&h, sizeof(float) * l * n);
cudaMalloc(&u_t, sizeof(float) * c * l);
// computing w_t
transpose ARG(d, l) (w, d, l, w_t);
// now w_t = Transpose(w)
// computing h
matrixMultiply ARG(l, n) (w_t, x, l, d, n, h);
// now h = Transpose(w).x
sigmoid ARG(l, n) (h, l, n, h);
// now h = sigmoid(Transpose(w).x)
// computing u_t
transpose ARG(l, c) (u, l, c, u_t);
// now u_t = Transpose(u)
// computing y
matrixMultiply ARG(c, n) (u_t, h, c, l, n, y);
// now y = u_t.h
// freeing memory on device
cudaFree(w_t);
cudaFree(h);
cudaFree(u_t);
}
// reads 'matrix' : n->k from 'file'
// first like contains n k
// the further lines contain n x k floats
// we assume matrix == nullptr
bool read_matrix_from_file(std::string const &file, float* &matrix, size_t &n, size_t &k) {
assert(matrix == nullptr);
std::string line;
std::ifstream input(file);
size_t i{ 0 };
float temp{ 0.0f };
if (input.is_open()) {
std::getline(input, line);
std::istringstream in(line);
in >> n;
in >> k;
matrix = new float[n * k];
while (std::getline(input, line)){
std::istringstream in(line);
while (in >> temp)
matrix[i++] = temp;
}
input.close();
}
else {
std::cerr << "Unable to open file " << file << "\n";
return false;
}
return true;
}
// writes 'matrix' : n->k into 'file'
// first like contains n k
// the further lines contain n x k floats
// we assume matrix != nullptr
bool write_matrix_into_file(std::string const &file, float* const matrix, size_t const &n, size_t const &k) {
assert(matrix != nullptr);
std::string line;
std::ofstream output(file);
size_t i{ 0 };
size_t j{ 0 };
if (output.is_open()) {
output << n << " " << k << std::endl;
for (j = 0; j < n; ++j){
for (i = 0; i < (k - 1); ++i)
output << matrix[ID(j, i, k)] << " ";
output << matrix[ID(j, (k - 1), k)] << std::endl;
}
output.close();
}
else {
std::cerr << "Unable to open file " << file << "\n";
return false;
}
return true;
}
// main
int main(){
size_t d, l, c, n;
float *x{ nullptr }, *d_x{ nullptr }, *t{ nullptr }, *d_t{ nullptr }, *w{ nullptr };
float *d_w{ nullptr }, *u{ nullptr }, *d_u{ nullptr }, *y{ nullptr }, *d_y{ nullptr };
// setting up constants
// ---------------------
// rho : initial learning rate
float rho{ 1.0f };
// number_of_iterations : iterations performed while learning
size_t number_of_iterations{ 1 };
// reading input
read_matrix_from_file("x.txt", x, d, n);
assert(x != nullptr);
// reading weights of first layer
read_matrix_from_file("w.txt", w, d, l);
assert(w != nullptr);
// reading weights of second layer
read_matrix_from_file("u.txt", u, l, c);
assert(u != nullptr);
// memory allocation on the device
cudaMalloc(&d_x, sizeof(float) * d * n);
cudaMalloc(&d_w, sizeof(float) * d * l);
cudaMalloc(&d_u, sizeof(float) * l * c);
// copy x, w, u to the device
cudaMemcpy(d_x, x, sizeof(float) * d * n, cudaMemcpyHostToDevice);
cudaMemcpy(d_w, w, sizeof(float) * d * l, cudaMemcpyHostToDevice);
cudaMemcpy(d_u, u, sizeof(float) * l * c, cudaMemcpyHostToDevice);
delete[] x;
// //
// at this point, the network is on the device //
// //
if (!(std::ifstream("y.txt").good())){
// if y.txt does not exist, then
// we are going to train the network
read_matrix_from_file("t.txt", t, c, n);
cudaMalloc(&d_t, sizeof(float) * c * n);
cudaMemcpy(d_t, t, sizeof(float) * c * n, cudaMemcpyHostToDevice);
train(d, l, c, n, d_x, d_t, d_w, d_u, rho, number_of_iterations);
cudaMemcpy(w, d_w, sizeof(float) * d * l, cudaMemcpyDeviceToHost);
cudaMemcpy(u, d_u, sizeof(float) * l * c, cudaMemcpyDeviceToHost);
write_matrix_into_file("w.txt", w, d, l);
write_matrix_into_file("u.txt", u, l, c);
cudaFree(d_t);
delete[] t;
delete[] w;
delete[] u;
} else {
// y.txt exists, so we are going to
// process the network on the inputs
// we will not need either w or u
delete[] w;
delete[] u;
cudaMalloc(&d_y, sizeof(float) * c * n);
compute (d, l, c, n, d_x, d_w, d_u, d_y);
y = new float[c * n];
cudaMemcpy(y, d_y, sizeof(float) * c * n, cudaMemcpyDeviceToHost);
write_matrix_into_file("y.txt", y, c, n);
cudaFree(d_y);
delete[] y;
}
// freeing memory on device
cudaFree(d_x);
cudaFree(d_w);
cudaFree(d_u);
return 0;
}
|
21,783 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,float var_4,int var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
for (int i=0; i < var_3; ++i) {
if (comp == asinf(+1.6052E-29f * (var_4 - (+1.3418E-44f * -1.1849E-36f)))) {
float tmp_1 = +1.4940E-23f;
float tmp_2 = -1.6240E35f + -1.2672E-41f + +1.0623E34f + atan2f(logf(var_6 * -0.0f), (-1.4919E-42f * atan2f((-1.9619E-37f * (-1.6127E-37f + acosf(var_7 * -1.0424E-41f / -1.8312E-36f * var_8))), var_9 * (-1.5333E-43f * var_10 / powf(var_11 - coshf((var_12 - coshf(var_13 / -1.9055E20f))), -1.0315E9f)))));
comp = tmp_2 * tmp_1 - +1.6798E36f * (+1.1643E34f - (var_14 * +1.4241E36f * (var_15 + var_16)));
comp += var_17 / floorf(+0.0f);
for (int i=0; i < var_5; ++i) {
comp += +1.6689E-43f - asinf(-1.4086E35f / +0.0f * var_18 + var_19);
}
if (comp == (+1.3292E34f - var_20 * (-1.9025E-42f - var_21))) {
comp = +1.9010E19f * var_22 * -1.4243E-5f * tanhf((-1.4812E-35f * +1.5328E36f));
}
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
int tmp_6 = atoi(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23);
cudaDeviceSynchronize();
return 0;
}
|
21,784 | /**
Cで学ぶアルゴリズムとデータ構造
ステップバイステップでN−クイーン問題を最適化
一般社団法人 共同通信社 情報技術局 鈴木 維一郎(suzuki.iichiro@kyodonews.jp)
コンパイル
$ nvcc CUDA01_N-Queen.cu -o CUDA01_N-Queen
実行
$ ./CUDA01_N-Queen
1. ブルートフォース 力任せ探索
全ての可能性のある解の候補を体系的に数え上げ、それぞれの解候補が問題の解とな
るかをチェックする方法
(※)各行に1個の王妃を配置する組み合わせを再帰的に列挙組み合わせを生成するだ
けであって8王妃問題を解いているわけではありません
実行結果
:
:
16777209: 7 7 7 7 7 7 7 0
16777210: 7 7 7 7 7 7 7 1
16777211: 7 7 7 7 7 7 7 2
16777212: 7 7 7 7 7 7 7 3
16777213: 7 7 7 7 7 7 7 4
16777214: 7 7 7 7 7 7 7 5
16777215: 7 7 7 7 7 7 7 6
16777216: 7 7 7 7 7 7 7 7
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define THREAD_NUM 96
#define MAX 27
//
long Total=0 ; //合計解
long Unique=0;
int down[2*MAX-1]; //down:flagA 縦 配置フラグ
int left[2*MAX-1]; //left:flagB 斜め配置フラグ
int right[2*MAX-1]; //right:flagC 斜め配置フラグ
int SIZE=8; //Nは8で固定
int COUNT=0; //カウント用
int aBoard[MAX]; //版の配列
//
__global__ void solve_nqueen_cuda_kernel_bt_bm(
int n,int mark,
unsigned int* totalDown,unsigned int* totalLeft,unsigned int* totalRight,
unsigned int* results,int totalCond){
const int tid=threadIdx.x,bid=blockIdx.x,idx=bid*blockDim.x+tid;
__shared__ unsigned int down[THREAD_NUM][10],left[THREAD_NUM][10],right[THREAD_NUM][10],
bitmap[THREAD_NUM][10],sum[THREAD_NUM];
const unsigned int mask=(1<<n)-1;int total=0,i=0;unsigned int bit;
if(idx<totalCond){
down[tid][i]=totalDown[idx];
left[tid][i]=totalLeft[idx];
right[tid][i]=totalRight[idx];
bitmap[tid][i]=down[tid][i]|left[tid][i]|right[tid][i];
while(i>=0){
if((bitmap[tid][i]&mask)==mask){i--;}
else{
bit=(bitmap[tid][i]+1)&~bitmap[tid][i];
bitmap[tid][i]|=bit;
if((bit&mask)!=0){
if(i+1==mark){total++;i--;}
else{
down[tid][i+1]=down[tid][i]|bit;
left[tid][i+1]=(left[tid][i]|bit)<<1;
right[tid][i+1]=(right[tid][i]|bit)>>1;
bitmap[tid][i+1]=(down[tid][i+1]|left[tid][i+1]|right[tid][i+1]);
i++;
}
}else{i--;}
}
}
sum[tid]=total;
}else{sum[tid]=0;}
__syncthreads();if(tid<64&&tid+64<THREAD_NUM){sum[tid]+=sum[tid+64];}
__syncthreads();if(tid<32){sum[tid]+=sum[tid+32];}
__syncthreads();if(tid<16){sum[tid]+=sum[tid+16];}
__syncthreads();if(tid<8){sum[tid]+=sum[tid+8];}
__syncthreads();if(tid<4){sum[tid]+=sum[tid+4];}
__syncthreads();if(tid<2){sum[tid]+=sum[tid+2];}
__syncthreads();if(tid<1){sum[tid]+=sum[tid+1];}
__syncthreads();if(tid==0){results[bid]=sum[0];}
}
//
long long solve_nqueen_cuda(int n,int steps) {
unsigned int down[32];unsigned int left[32];unsigned int right[32];
unsigned int m[32];unsigned int bit;
if(n<=0||n>32){return 0;}
unsigned int* totalDown=new unsigned int[steps];
unsigned int* totalLeft=new unsigned int[steps];
unsigned int* totalRight=new unsigned int[steps];
unsigned int* results=new unsigned int[steps];
unsigned int* downCuda;unsigned int* leftCuda;unsigned int* rightCuda;
unsigned int* resultsCuda;
cudaMalloc((void**) &downCuda,sizeof(int)*steps);
cudaMalloc((void**) &leftCuda,sizeof(int)*steps);
cudaMalloc((void**) &rightCuda,sizeof(int)*steps);
cudaMalloc((void**) &resultsCuda,sizeof(int)*steps/THREAD_NUM);
const unsigned int mask=(1<<n)-1;
const unsigned int mark=n>11?n-10:2;
long long total=0;int totalCond=0;
int i=0,j;down[0]=0;left[0]=0;right[0]=0;m[0]=0;bool computed=false;
for(j=0;j<n/2;j++){
bit=(1<<j);m[0]|=bit;
down[1]=bit;left[1]=bit<<1;right[1]=bit>>1;
m[1]=(down[1]|left[1]|right[1]);
i=1;
while(i>0){
if((m[i]&mask)==mask){i--;}
else{
bit=(m[i]+1)&~m[i];m[i]|=bit;
if((bit&mask)!=0){
down[i+1]=down[i]|bit;left[i+1]=(left[i]|bit)<<1;right[i+1]=(right[i]|bit)>>1;
m[i+1]=(down[i+1]|left[i+1]|right[i+1]);
i++;
if(i==mark){
totalDown[totalCond]=down[i];totalLeft[totalCond]=left[i];totalRight[totalCond]=right[i];
totalCond++;
if(totalCond==steps){
if(computed){
cudaMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];}
computed=false;
}
cudaMemcpy(downCuda,totalDown,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/** backTrack+bitmap*/
solve_nqueen_cuda_kernel_bt_bm<<<steps/THREAD_NUM,THREAD_NUM>>>(n,n-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
computed=true;totalCond=0;
}
i--;
}
}else{i --;}
}
}
}
if(computed){
cudaMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];}
computed=false;
}
cudaMemcpy(downCuda,totalDown,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/** backTrack+bitmap*/
solve_nqueen_cuda_kernel_bt_bm<<<steps/THREAD_NUM,THREAD_NUM>>>(n,n-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
cudaMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];}
total*=2;
if(n%2==1){
computed=false;totalCond=0;bit=(1<<(n-1)/2);m[0]|=bit;
down[1]=bit;left[1]=bit<<1;right[1]=bit>>1;
m[1]=(down[1]|left[1]|right[1]);
i=1;
while(i>0){
if((m[i]&mask)==mask){i--;}
else{
bit=(m[i]+1)&~m[i];m[i]|=bit;
if((bit&mask)!=0){
down[i+1]=down[i]|bit;left[i+1]=(left[i]|bit)<<1;right[i+1]=(right[i]|bit)>>1;
m[i+1]=(down[i+1]|left[i+1]|right[i+1]);
i++;
if(i==mark){
totalDown[totalCond]=down[i];totalLeft[totalCond]=left[i];totalRight[totalCond]=right[i];
totalCond++;
if(totalCond==steps){
if(computed){
cudaMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];}
computed=false;
}
cudaMemcpy(downCuda,totalDown,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/** backTrack+bitmap*/
solve_nqueen_cuda_kernel_bt_bm<<<steps/THREAD_NUM,THREAD_NUM>>>(n,n-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
computed=true;totalCond=0;
}
i--;
}
}else{i --;}
}
}
if(computed){
cudaMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];}
computed=false;
}
cudaMemcpy(downCuda,totalDown,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(leftCuda,totalLeft,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
cudaMemcpy(rightCuda,totalRight,sizeof(int)*totalCond,cudaMemcpyHostToDevice);
/** backTrack+bitmap*/
solve_nqueen_cuda_kernel_bt_bm<<<steps/THREAD_NUM,THREAD_NUM>>>(n,n-mark,downCuda,leftCuda,rightCuda,resultsCuda,totalCond);
cudaMemcpy(results,resultsCuda,sizeof(int)*steps/THREAD_NUM,cudaMemcpyDeviceToHost);
for(int j=0;j<steps/THREAD_NUM;j++){total+=results[j];}
}
cudaFree(downCuda);cudaFree(leftCuda);cudaFree(rightCuda);cudaFree(resultsCuda);
delete[] totalDown;delete[] totalLeft;delete[] totalRight;delete[] results;
return total;
}
/** CUDA 初期化 **/
bool InitCUDA(){
int count;
cudaGetDeviceCount(&count);
if(count==0){fprintf(stderr,"There is no device.\n");return false;}
int i;
for(i=0;i<count;i++){
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop,i)==cudaSuccess){if(prop.major>=1){break;} }
}
if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;}
cudaSetDevice(i);
return true;
}
//出力用のメソッド
void print(){
printf("%d: ",++COUNT);
for(int j=0;j<SIZE;j++){
printf("%d ",aBoard[j]);
}
printf("\n");
}
//ロジックメソッド
void NQueen(int row){
if(row==SIZE){ //SIZEは8で固定
print(); //rowが8になったら出力
}else{
for(int i=0;i<SIZE;i++){
aBoard[row]=i;
NQueen(row+1); // インクリメントしながら再帰
}
}
}
//メインメソッド
int main(int argc,char** argv) {
bool cpu=true,cpur=true,gpu=true;
int argstart=1,steps=24576;
/** パラメータの処理 */
if(argc>=2&&argv[1][0]=='-'){
if(argv[1][1]=='c'||argv[1][1]=='C'){gpu=false;cpur=false;}
else if(argv[1][1]=='r'||argv[1][1]=='R'){cpu=false;gpu=false;}
else if(argv[1][1]=='g'||argv[1][1]=='G'){cpu=false;cpur=false;}
argstart=2;
}
if(argc<argstart){
printf("Usage: %s [-c|-g|-r] n steps\n",argv[0]);
printf(" -c: CPU only\n");
printf(" -r: CPUR only\n");
printf(" -g: GPU only\n");
printf("Default to 8 queen\n");
}
/** 出力と実行 */
/** CPU */
if(cpu){
printf("\n\n1. ブルートフォース 力任せ探索");
}
/** CPUR */
if(cpur){
printf("\n\n1. ブルートフォース 力任せ探索");
NQueen(0);//ロジックメソッドを0を渡して呼び出し
}
/** GPU */
if(gpu){
if(!InitCUDA()){return 0;}
int min=4;int targetN=18;
struct timeval t0;struct timeval t1;int ss;int ms;int dd;
printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms");
for(int i=min;i<=targetN;i++){
gettimeofday(&t0,NULL); // 計測開始
Total=solve_nqueen_cuda(i,steps);
gettimeofday(&t1,NULL); // 計測終了
if (t1.tv_usec<t0.tv_usec) {
dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400;
ss=(t1.tv_sec-t0.tv_sec-1)%86400;
ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000;
} else {
dd=(int)(t1.tv_sec-t0.tv_sec)/86400;
ss=(t1.tv_sec-t0.tv_sec)%86400;
ms=(t1.tv_usec-t0.tv_usec+500)/10000;
}
int hh=ss/3600;
int mm=(ss-hh*3600)/60;
ss%=60;
printf("%2d:%18ld%18ld%12.2d:%02d:%02d:%02d.%02d\n", i,Total,Unique,dd,hh,mm,ss,ms);
}
}
return 0;
}
|
21,785 | /*
icc propagate-toz-test.C -o propagate-toz-test.exe -fopenmp -O3
*/
#include "cuda_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <unistd.h>
#include <sys/time.h>
#include <iostream>
#include <chrono>
#include <iomanip>
//#define DUMP_OUTPUT
#define FIXED_RSEED
//#define USE_ASYNC
#ifndef USE_ASYNC
#define num_streams 1
#endif
#ifndef nevts
#define nevts 100
#endif
#ifndef bsize
#define bsize 32
#endif
#ifndef ntrks
#define ntrks 9600 //122880
#endif
#define nb (ntrks/bsize)
#define smear 0.1
#ifndef NITER
#define NITER 5
#endif
#ifndef nlayer
#define nlayer 20
#endif
#ifndef num_streams
#define num_streams 10
#endif
#ifndef threadsperblockx
#define threadsperblockx bsize
#endif
#define threadsperblocky 1
#ifndef blockspergrid
#define blockspergrid nevts*nb/num_streams
#endif
#define HOSTDEV __host__ __device__
#define loadData(dst, src, tid, itrsize) \
_Pragma("unroll") \
for(int ip=0; ip<itrsize; ++ip) { \
dst[ip] = src[ip*bsize + tid]; \
}
#define saveData(dst, src, tid, itrsize) \
_Pragma("unroll") \
for(int ip=0; ip<itrsize; ++ip) { \
dst[ip*bsize + tid] = src[ip]; \
}
#define iparX 0
#define iparY 1
#define iparZ 2
#define iparIpt 3
#define iparPhi 4
#define iparTheta 5
HOSTDEV size_t PosInMtrx(size_t i, size_t j, size_t D) {
return i*D+j;
}
HOSTDEV size_t SymOffsets33(size_t i) {
const size_t offs[9] = {0, 1, 3, 1, 2, 4, 3, 4, 5};
return offs[i];
}
HOSTDEV size_t SymOffsets66(size_t i) {
const size_t offs[36] = {0, 1, 3, 6, 10, 15, 1, 2, 4, 7, 11, 16, 3, 4, 5, 8, 12, 17, 6, 7, 8, 9, 13, 18, 10, 11, 12, 13, 14, 19, 15, 16, 17, 18, 19, 20};
return offs[i];
}
struct ATRK {
float par[6];
float cov[21];
int q;
// int hitidx[22];
};
struct AHIT {
float pos[3];
float cov[6];
};
struct MP1I {
int data[1*bsize];
};
struct MP22I {
int data[22*bsize];
};
struct MP3F {
float data[3*bsize];
};
struct MP6F {
float data[6*bsize];
};
struct MP3x3 {
float data[9*bsize];
};
struct MP3x6 {
float data[18*bsize];
};
struct MP3x3SF {
float data[6*bsize];
};
struct MP6x6SF {
float data[21*bsize];
};
struct MP6x6F {
float data[36*bsize];
};
struct MPTRK {
MP6F par;
MP6x6SF cov;
MP1I q;
// MP22I hitidx;
};
struct MPHIT {
MP3F pos;
MP3x3SF cov;
};
struct MP1I_ {
int data[1];
};
struct MP22I_ {
int data[22];
};
struct MP3F_ {
float data[3];
};
struct MP6F_ {
float data[6];
};
struct MP3x3_ {
float data[9];
};
struct MP3x6_ {
float data[18];
};
struct MP3x3SF_ {
float data[6];
};
struct MP6x6SF_ {
float data[21];
};
struct MP6x6F_ {
float data[36];
};
struct MPTRK_ {
MP6F_ par;
MP6x6SF_ cov;
MP1I_ q;
// MP22I_ hitidx;
};
struct MPHIT_ {
MP3F_ pos;
MP3x3SF_ cov;
};
float randn(float mu, float sigma) {
float U1, U2, W, mult;
static float X1, X2;
static int call = 0;
if (call == 1) {
call = !call;
return (mu + sigma * (float) X2);
} do {
U1 = -1 + ((float) rand () / RAND_MAX) * 2;
U2 = -1 + ((float) rand () / RAND_MAX) * 2;
W = pow (U1, 2) + pow (U2, 2);
}
while (W >= 1 || W == 0);
mult = sqrt ((-2 * log (W)) / W);
X1 = U1 * mult;
X2 = U2 * mult;
call = !call;
return (mu + sigma * (float) X1);
}
MPTRK* prepareTracks(ATRK inputtrk) {
MPTRK* result;
cudaMallocManaged((void**)&result,nevts*nb*sizeof(MPTRK));
for (size_t ie=0;ie<nevts;++ie) {
for (size_t ib=0;ib<nb;++ib) {
for (size_t it=0;it<bsize;++it) {
//par
for (size_t ip=0;ip<6;++ip) {
result[ib + nb*ie].par.data[it + ip*bsize] = (1+smear*randn(0,1))*inputtrk.par[ip];
}
//cov
for (size_t ip=0;ip<21;++ip) {
result[ib + nb*ie].cov.data[it + ip*bsize] = (1+smear*randn(0,1))*inputtrk.cov[ip];
}
//q
result[ib + nb*ie].q.data[it] = inputtrk.q-2*ceil(-0.5 + (float)rand() / RAND_MAX);//fixme check
}
}
}
return result;
}
MPHIT* prepareHits(AHIT inputhit) {
MPHIT* result;
cudaMallocManaged((void**)&result,nlayer*nevts*nb*sizeof(MPHIT));
for (size_t lay=0;lay<nlayer;++lay) {
for (size_t ie=0;ie<nevts;++ie) {
for (size_t ib=0;ib<nb;++ib) {
for (size_t it=0;it<bsize;++it) {
//pos
for (size_t ip=0;ip<3;++ip) {
result[lay+nlayer*(ib + nb*ie)].pos.data[it + ip*bsize] = (1+smear*randn(0,1))*inputhit.pos[ip];
}
//cov
for (size_t ip=0;ip<6;++ip) {
result[lay+nlayer*(ib + nb*ie)].cov.data[it + ip*bsize] = (1+smear*randn(0,1))*inputhit.cov[ip];
}
}
}
}
}
return result;
}
HOSTDEV MPTRK* bTk(MPTRK* tracks, size_t ev, size_t ib) {
return &(tracks[ib + nb*ev]);
}
HOSTDEV const MPTRK* bTk(const MPTRK* tracks, size_t ev, size_t ib) {
return &(tracks[ib + nb*ev]);
}
HOSTDEV float q(const MP1I* bq, size_t it){
return (*bq).data[it];
}
HOSTDEV float par(const MP6F* bpars, size_t it, size_t ipar){
return (*bpars).data[it + ipar*bsize];
}
HOSTDEV float x (const MP6F* bpars, size_t it){ return par(bpars, it, 0); }
HOSTDEV float y (const MP6F* bpars, size_t it){ return par(bpars, it, 1); }
HOSTDEV float z (const MP6F* bpars, size_t it){ return par(bpars, it, 2); }
HOSTDEV float ipt (const MP6F* bpars, size_t it){ return par(bpars, it, 3); }
HOSTDEV float phi (const MP6F* bpars, size_t it){ return par(bpars, it, 4); }
HOSTDEV float theta(const MP6F* bpars, size_t it){ return par(bpars, it, 5); }
HOSTDEV float par(const MPTRK* btracks, size_t it, size_t ipar){
return par(&(*btracks).par,it,ipar);
}
HOSTDEV float x (const MPTRK* btracks, size_t it){ return par(btracks, it, 0); }
HOSTDEV float y (const MPTRK* btracks, size_t it){ return par(btracks, it, 1); }
HOSTDEV float z (const MPTRK* btracks, size_t it){ return par(btracks, it, 2); }
HOSTDEV float ipt (const MPTRK* btracks, size_t it){ return par(btracks, it, 3); }
HOSTDEV float phi (const MPTRK* btracks, size_t it){ return par(btracks, it, 4); }
HOSTDEV float theta(const MPTRK* btracks, size_t it){ return par(btracks, it, 5); }
HOSTDEV float par(const MPTRK* tracks, size_t ev, size_t tk, size_t ipar){
size_t ib = tk/bsize;
const MPTRK* btracks = bTk(tracks, ev, ib);
size_t it = tk % bsize;
return par(btracks, it, ipar);
}
HOSTDEV float x (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 0); }
HOSTDEV float y (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 1); }
HOSTDEV float z (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 2); }
HOSTDEV float ipt (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 3); }
HOSTDEV float phi (const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 4); }
HOSTDEV float theta(const MPTRK* tracks, size_t ev, size_t tk){ return par(tracks, ev, tk, 5); }
HOSTDEV void setpar(MP6F* bpars, size_t it, size_t ipar, float val){
(*bpars).data[it + ipar*bsize] = val;
}
HOSTDEV void setx (MP6F* bpars, size_t it, float val){ setpar(bpars, it, 0, val); }
HOSTDEV void sety (MP6F* bpars, size_t it, float val){ setpar(bpars, it, 1, val); }
HOSTDEV void setz (MP6F* bpars, size_t it, float val){ setpar(bpars, it, 2, val); }
HOSTDEV void setipt (MP6F* bpars, size_t it, float val){ setpar(bpars, it, 3, val); }
HOSTDEV void setphi (MP6F* bpars, size_t it, float val){ setpar(bpars, it, 4, val); }
HOSTDEV void settheta(MP6F* bpars, size_t it, float val){ setpar(bpars, it, 5, val); }
HOSTDEV void setpar(MPTRK* btracks, size_t it, size_t ipar, float val){
setpar(&(*btracks).par,it,ipar,val);
}
HOSTDEV void setx (MPTRK* btracks, size_t it, float val){ setpar(btracks, it, 0, val); }
HOSTDEV void sety (MPTRK* btracks, size_t it, float val){ setpar(btracks, it, 1, val); }
HOSTDEV void setz (MPTRK* btracks, size_t it, float val){ setpar(btracks, it, 2, val); }
HOSTDEV void setipt (MPTRK* btracks, size_t it, float val){ setpar(btracks, it, 3, val); }
HOSTDEV void setphi (MPTRK* btracks, size_t it, float val){ setpar(btracks, it, 4, val); }
HOSTDEV void settheta(MPTRK* btracks, size_t it, float val){ setpar(btracks, it, 5, val); }
HOSTDEV MPHIT* bHit(MPHIT* hits, size_t ev, size_t ib) {
return &(hits[ib + nb*ev]);
}
HOSTDEV const MPHIT* bHit(const MPHIT* hits, size_t ev, size_t ib) {
return &(hits[ib + nb*ev]);
}
HOSTDEV const MPHIT* bHit(const MPHIT* hits, size_t ev, size_t ib,int lay) {
return &(hits[lay + (ib*nlayer) +(ev*nlayer*nb)]);
}
HOSTDEV float pos(const MP3F* hpos, size_t it, size_t ipar){
return (*hpos).data[it + ipar*bsize];
}
HOSTDEV float x(const MP3F* hpos, size_t it) { return pos(hpos, it, 0); }
HOSTDEV float y(const MP3F* hpos, size_t it) { return pos(hpos, it, 1); }
HOSTDEV float z(const MP3F* hpos, size_t it) { return pos(hpos, it, 2); }
HOSTDEV float pos(const MPHIT* hits, size_t it, size_t ipar){
return pos(&(*hits).pos,it,ipar);
}
HOSTDEV float x(const MPHIT* hits, size_t it) { return pos(hits, it, 0); }
HOSTDEV float y(const MPHIT* hits, size_t it) { return pos(hits, it, 1); }
HOSTDEV float z(const MPHIT* hits, size_t it) { return pos(hits, it, 2); }
HOSTDEV float pos(const MPHIT* hits, size_t ev, size_t tk, size_t ipar){
size_t ib = tk/bsize;
//[DEBUG by Seyong on Dec. 28, 2020] add 4th argument(nlayer-1) to bHit() below.
const MPHIT* bhits = bHit(hits, ev, ib, nlayer-1);
size_t it = tk % bsize;
return pos(bhits,it,ipar);
}
HOSTDEV float x(const MPHIT* hits, size_t ev, size_t tk) { return pos(hits, ev, tk, 0); }
HOSTDEV float y(const MPHIT* hits, size_t ev, size_t tk) { return pos(hits, ev, tk, 1); }
HOSTDEV float z(const MPHIT* hits, size_t ev, size_t tk) { return pos(hits, ev, tk, 2); }
#define N bsize
__forceinline__ __device__ void MultHelixPropEndcap(const MP6x6F_* A, const MP6x6SF_* B, MP6x6F_* C) {
const float *a = A->data; //ASSUME_ALIGNED(a, 64);
const float *b = B->data; //ASSUME_ALIGNED(b, 64);
float *c = C->data; //ASSUME_ALIGNED(c, 64);
{
c[ 0] = b[ 0] + a[ 2]*b[ 3] + a[ 3]*b[ 6] + a[ 4]*b[10] + a[ 5]*b[15];
c[ 1] = b[ 1] + a[ 2]*b[ 4] + a[ 3]*b[ 7] + a[ 4]*b[11] + a[ 5]*b[16];
c[ 2] = b[ 3] + a[ 2]*b[ 5] + a[ 3]*b[ 8] + a[ 4]*b[12] + a[ 5]*b[17];
c[ 3] = b[ 6] + a[ 2]*b[ 8] + a[ 3]*b[ 9] + a[ 4]*b[13] + a[ 5]*b[18];
c[ 4] = b[10] + a[ 2]*b[12] + a[ 3]*b[13] + a[ 4]*b[14] + a[ 5]*b[19];
c[ 5] = b[15] + a[ 2]*b[17] + a[ 3]*b[18] + a[ 4]*b[19] + a[ 5]*b[20];
c[ 6] = b[ 1] + a[ 8]*b[ 3] + a[ 9]*b[ 6] + a[10]*b[10] + a[11]*b[15];
c[ 7] = b[ 2] + a[ 8]*b[ 4] + a[ 9]*b[ 7] + a[10]*b[11] + a[11]*b[16];
c[ 8] = b[ 4] + a[ 8]*b[ 5] + a[ 9]*b[ 8] + a[10]*b[12] + a[11]*b[17];
c[ 9] = b[ 7] + a[ 8]*b[ 8] + a[ 9]*b[ 9] + a[10]*b[13] + a[11]*b[18];
c[10] = b[11] + a[ 8]*b[12] + a[ 9]*b[13] + a[10]*b[14] + a[11]*b[19];
c[11] = b[16] + a[ 8]*b[17] + a[ 9]*b[18] + a[10]*b[19] + a[11]*b[20];
c[12] = 0;
c[13] = 0;
c[14] = 0;
c[15] = 0;
c[16] = 0;
c[17] = 0;
c[18] = b[ 6];
c[19] = b[ 7];
c[20] = b[ 8];
c[21] = b[ 9];
c[22] = b[13];
c[23] = b[18];
c[24] = a[26]*b[ 3] + a[27]*b[ 6] + b[10] + a[29]*b[15];
c[25] = a[26]*b[ 4] + a[27]*b[ 7] + b[11] + a[29]*b[16];
c[26] = a[26]*b[ 5] + a[27]*b[ 8] + b[12] + a[29]*b[17];
c[27] = a[26]*b[ 8] + a[27]*b[ 9] + b[13] + a[29]*b[18];
c[28] = a[26]*b[12] + a[27]*b[13] + b[14] + a[29]*b[19];
c[29] = a[26]*b[17] + a[27]*b[18] + b[19] + a[29]*b[20];
c[30] = b[15];
c[31] = b[16];
c[32] = b[17];
c[33] = b[18];
c[34] = b[19];
c[35] = b[20];
}
}
__forceinline__ __device__ void MultHelixPropTranspEndcap(MP6x6F_* A, MP6x6F_* B, MP6x6SF_* C) {
const float *a = A->data; //ASSUME_ALIGNED(a, 64);
const float *b = B->data; //ASSUME_ALIGNED(b, 64);
float *c = C->data; //ASSUME_ALIGNED(c, 64);
{
c[ 0] = b[ 0] + b[ 2]*a[ 2] + b[ 3]*a[ 3] + b[ 4]*a[ 4] + b[ 5]*a[ 5];
c[ 1] = b[ 6] + b[ 8]*a[ 2] + b[ 9]*a[ 3] + b[10]*a[ 4] + b[11]*a[ 5];
c[ 2] = b[ 7] + b[ 8]*a[ 8] + b[ 9]*a[ 9] + b[10]*a[10] + b[11]*a[11];
c[ 3] = b[12] + b[14]*a[ 2] + b[15]*a[ 3] + b[16]*a[ 4] + b[17]*a[ 5];
c[ 4] = b[13] + b[14]*a[ 8] + b[15]*a[ 9] + b[16]*a[10] + b[17]*a[11];
c[ 5] = 0;
c[ 6] = b[18] + b[20]*a[ 2] + b[21]*a[ 3] + b[22]*a[ 4] + b[23]*a[ 5];
c[ 7] = b[19] + b[20]*a[ 8] + b[21]*a[ 9] + b[22]*a[10] + b[23]*a[11];
c[ 8] = 0;
c[ 9] = b[21];
c[10] = b[24] + b[26]*a[ 2] + b[27]*a[ 3] + b[28]*a[ 4] + b[29]*a[ 5];
c[11] = b[25] + b[26]*a[ 8] + b[27]*a[ 9] + b[28]*a[10] + b[29]*a[11];
c[12] = 0;
c[13] = b[27];
c[14] = b[26]*a[26] + b[27]*a[27] + b[28] + b[29]*a[29];
c[15] = b[30] + b[32]*a[ 2] + b[33]*a[ 3] + b[34]*a[ 4] + b[35]*a[ 5];
c[16] = b[31] + b[32]*a[ 8] + b[33]*a[ 9] + b[34]*a[10] + b[35]*a[11];
c[17] = 0;
c[18] = b[33];
c[19] = b[32]*a[26] + b[33]*a[27] + b[34] + b[35]*a[29];
c[20] = b[35];
}
}
__forceinline__ __device__ void KalmanGainInv(const MP6x6SF_* A, const MP3x3SF_* B, MP3x3_* C) {
// k = P Ht(HPHt + R)^-1
// HpHt -> cov of x,y,z. take upper 3x3 matrix of P
// This calculates the inverse of HpHt +R
const float *a = A->data; //ASSUME_ALIGNED(a, 64);
const float *b = B->data; //ASSUME_ALIGNED(b, 64);
float *c = C->data; //ASSUME_ALIGNED(c, 64);
{
double det =
((a[0]+b[0])*(((a[ 6]+b[ 3]) *(a[11]+b[5])) - ((a[7]+b[4]) *(a[7]+b[4])))) -
((a[1]+b[1])*(((a[ 1]+b[ 1]) *(a[11]+b[5])) - ((a[7]+b[4]) *(a[2]+b[2])))) +
((a[2]+b[2])*(((a[ 1]+b[ 1]) *(a[7]+b[4])) - ((a[2]+b[2]) *(a[6]+b[3]))));
double invdet = 1.0/det;
c[ 0] = invdet*(((a[ 6]+b[ 3]) *(a[11]+b[5])) - ((a[7]+b[4]) *(a[7]+b[4])));
c[ 1] = -invdet*(((a[ 1]+b[ 1]) *(a[11]+b[5])) - ((a[2]+b[2]) *(a[7]+b[4])));
c[ 2] = invdet*(((a[ 1]+b[ 1]) *(a[7]+b[4])) - ((a[2]+b[2]) *(a[7]+b[4])));
c[ 3] = -invdet*(((a[ 1]+b[ 1]) *(a[11]+b[5])) - ((a[7]+b[4]) *(a[2]+b[2])));
c[ 4] = invdet*(((a[ 0]+b[ 0]) *(a[11]+b[5])) - ((a[2]+b[2]) *(a[2]+b[2])));
c[ 5] = -invdet*(((a[ 0]+b[ 0]) *(a[7]+b[4])) - ((a[2]+b[2]) *(a[1]+b[1])));
c[ 6] = invdet*(((a[ 1]+b[ 1]) *(a[7]+b[4])) - ((a[2]+b[2]) *(a[6]+b[3])));
c[ 7] = -invdet*(((a[ 0]+b[ 0]) *(a[7]+b[4])) - ((a[2]+b[2]) *(a[1]+b[1])));
c[ 8] = invdet*(((a[ 0]+b[ 0]) *(a[6]+b[3])) - ((a[1]+b[1]) *(a[1]+b[1])));
}
}
__forceinline__ __device__ void KalmanGain(const MP6x6SF_* A, const MP3x3_* B, MP3x6_* C) {
// k = P Ht(HPHt + R)^-1
// HpHt -> cov of x,y,z. take upper 3x3 matrix of P
// This calculates the kalman gain
const float *a = A->data; //ASSUME_ALIGNED(a, 64);
const float *b = B->data; //ASSUME_ALIGNED(b, 64);
float *c = C->data; //ASSUME_ALIGNED(c, 64);
{
c[ 0] = a[0]*b[0] + a[1]*b[3] + a[2]*b[6];
c[ 1] = a[0]*b[1] + a[1]*b[4] + a[2]*b[7];
c[ 2] = a[0]*b[2] + a[1]*b[5] + a[2]*b[8];
c[ 3] = a[1]*b[0] + a[6]*b[3] + a[7]*b[6];
c[ 4] = a[1]*b[1] + a[6]*b[4] + a[7]*b[7];
c[ 5] = a[1]*b[2] + a[6]*b[5] + a[7]*b[8];
c[ 6] = a[2]*b[0] + a[7]*b[3] + a[11]*b[6];
c[ 7] = a[2]*b[1] + a[7]*b[4] + a[11]*b[7];
c[ 8] = a[2]*b[2] + a[7]*b[5] + a[11]*b[8];
c[ 9] = a[3]*b[0] + a[8]*b[3] + a[12]*b[6];
c[ 10] = a[3]*b[1] + a[8]*b[4] + a[12]*b[7];
c[ 11] = a[3]*b[2] + a[8]*b[5] + a[12]*b[8];
c[ 12] = a[4]*b[0] + a[9]*b[3] + a[13]*b[6];
c[ 13] = a[4]*b[1] + a[9]*b[4] + a[13]*b[7];
c[ 14] = a[4]*b[2] + a[9]*b[5] + a[13]*b[8];
c[ 15] = a[5]*b[0] + a[10]*b[3] + a[14]*b[6];
c[ 16] = a[5]*b[1] + a[10]*b[4] + a[14]*b[7];
c[ 17] = a[5]*b[2] + a[10]*b[5] + a[14]*b[8];
}
}
__forceinline__ __device__ void KalmanUpdate(MP6x6SF_* trkErr, MP6F_* inPar, const MP3x3SF_* hitErr, const MP3F_* msP){
MP3x3_ inverse_temp;
MP3x6_ kGain;
MP6x6SF_ newErr;
KalmanGainInv(trkErr,hitErr,&inverse_temp);
KalmanGain(trkErr,&inverse_temp,&kGain);
{
float *inParData = inPar->data;
float *trkErrData = trkErr->data;
const float xin = inParData[iparX];
const float yin = inParData[iparY];
const float zin = inParData[iparZ];
const float ptin = 1.0f/inParData[iparIpt]; // is this pt or ipt?
const float phiin = inParData[iparPhi];
const float thetain = inParData[iparTheta];
const float xout = msP->data[iparX];
const float yout = msP->data[iparY];
//const float zout = msP->data[iparZ];
float xnew = xin + (kGain.data[0]*(xout-xin)) +(kGain.data[1]*(yout-yin));
float ynew = yin + (kGain.data[3]*(xout-xin)) +(kGain.data[4]*(yout-yin));
float znew = zin + (kGain.data[6]*(xout-xin)) +(kGain.data[7]*(yout-yin));
float ptnew = ptin + (kGain.data[9]*(xout-xin)) +(kGain.data[10]*(yout-yin));
float phinew = phiin + (kGain.data[12]*(xout-xin)) +(kGain.data[13]*(yout-yin));
float thetanew = thetain + (kGain.data[15]*(xout-xin)) +(kGain.data[16]*(yout-yin));
newErr.data[0] = trkErrData[0] - (kGain.data[0]*trkErrData[0]+kGain.data[1]*trkErrData[1]+kGain.data[2]*trkErrData[2]);
newErr.data[1] = trkErrData[1] - (kGain.data[0]*trkErrData[1]+kGain.data[1]*trkErrData[6]+kGain.data[2]*trkErrData[7]);
newErr.data[2] = trkErrData[2] - (kGain.data[0]*trkErrData[2]+kGain.data[1]*trkErrData[7]+kGain.data[2]*trkErrData[11]);
newErr.data[3] = trkErrData[3] - (kGain.data[0]*trkErrData[3]+kGain.data[1]*trkErrData[8]+kGain.data[2]*trkErrData[12]);
newErr.data[4] = trkErrData[4] - (kGain.data[0]*trkErrData[4]+kGain.data[1]*trkErrData[9]+kGain.data[2]*trkErrData[13]);
newErr.data[5] = trkErrData[5] - (kGain.data[0]*trkErrData[5]+kGain.data[1]*trkErrData[10]+kGain.data[2]*trkErrData[14]);
newErr.data[6] = trkErrData[6] - (kGain.data[3]*trkErrData[1]+kGain.data[4]*trkErrData[6]+kGain.data[5]*trkErrData[7]);
newErr.data[7] = trkErrData[7] - (kGain.data[3]*trkErrData[2]+kGain.data[4]*trkErrData[7]+kGain.data[5]*trkErrData[11]);
newErr.data[8] = trkErrData[8] - (kGain.data[3]*trkErrData[3]+kGain.data[4]*trkErrData[8]+kGain.data[5]*trkErrData[12]);
newErr.data[9] = trkErrData[9] - (kGain.data[3]*trkErrData[4]+kGain.data[4]*trkErrData[9]+kGain.data[5]*trkErrData[13]);
newErr.data[10] = trkErrData[10] - (kGain.data[3]*trkErrData[5]+kGain.data[4]*trkErrData[10]+kGain.data[5]*trkErrData[14]);
newErr.data[11] = trkErrData[11] - (kGain.data[6]*trkErrData[2]+kGain.data[7]*trkErrData[7]+kGain.data[8]*trkErrData[11]);
newErr.data[12] = trkErrData[12] - (kGain.data[6]*trkErrData[3]+kGain.data[7]*trkErrData[8]+kGain.data[8]*trkErrData[12]);
newErr.data[13] = trkErrData[13] - (kGain.data[6]*trkErrData[4]+kGain.data[7]*trkErrData[9]+kGain.data[8]*trkErrData[13]);
newErr.data[14] = trkErrData[14] - (kGain.data[6]*trkErrData[5]+kGain.data[7]*trkErrData[10]+kGain.data[8]*trkErrData[14]);
newErr.data[15] = trkErrData[15] - (kGain.data[9]*trkErrData[3]+kGain.data[10]*trkErrData[8]+kGain.data[11]*trkErrData[12]);
newErr.data[16] = trkErrData[16] - (kGain.data[9]*trkErrData[4]+kGain.data[10]*trkErrData[9]+kGain.data[11]*trkErrData[13]);
newErr.data[17] = trkErrData[17] - (kGain.data[9]*trkErrData[5]+kGain.data[10]*trkErrData[10]+kGain.data[11]*trkErrData[14]);
newErr.data[18] = trkErrData[18] - (kGain.data[12]*trkErrData[4]+kGain.data[13]*trkErrData[9]+kGain.data[14]*trkErrData[13]);
newErr.data[19] = trkErrData[19] - (kGain.data[12]*trkErrData[5]+kGain.data[13]*trkErrData[10]+kGain.data[14]*trkErrData[14]);
newErr.data[20] = trkErrData[20] - (kGain.data[15]*trkErrData[5]+kGain.data[16]*trkErrData[10]+kGain.data[17]*trkErrData[14]);
inParData[iparX] = xnew;
inParData[iparY] = ynew;
inParData[iparZ] = znew;
inParData[iparIpt] = ptnew;
inParData[iparPhi] = phinew;
inParData[iparTheta] = thetanew;
#pragma unroll
for (int i = 0; i < 21; i++){
trkErrData[ i] = trkErrData[ i] - newErr.data[ i];
}
}
}
__device__ __constant__ float kfact = 100/3.8;
__device__ __forceinline__ void propagateToZ(const MP6x6SF_* inErr, const MP6F_* inPar, const MP1I_* inChg,const MP3F_* msP,
MP6x6SF_* outErr, MP6F_* outPar) {
struct MP6x6F_ errorProp, temp;
{
const float *inParData = inPar->data;
float *outParData = outPar->data;
const float zout = msP->data[iparZ];
const float k = inChg->data[0]*kfact;//*100/3.8;
const float deltaZ = zout - inParData[iparZ];
const float ipt_ = inParData[iparIpt];
const float pt = 1.0f/ipt_;
const float phi_ = inParData[iparPhi];
const float cosP = cosf(phi_);
const float sinP = sinf(phi_);
const float theta_ = inParData[iparTheta];
const float cosT = cosf(theta_);
const float sinT = sinf(theta_);
const float pxin = cosP*pt;
const float pyin = sinP*pt;
const float icosT = 1.0f/cosT;
const float icosTk = icosT/k;
const float alpha = deltaZ*sinT*ipt_*icosTk;
const float sina = sinf(alpha); // this can be approximated;
const float cosa = cosf(alpha); // this can be approximated;
outParData[iparX] = inParData[iparX] + k*(pxin*sina - pyin*(1.0f-cosa));
outParData[iparY] = inParData[iparY] + k*(pyin*sina + pxin*(1.0f-cosa));
outParData[iparZ] = zout;
outParData[iparIpt] = ipt_;
outParData[iparPhi] = phi_+alpha;
outParData[iparTheta] = theta_;
const float sCosPsina = sinf(cosP*sina);
const float cCosPsina = cosf(cosP*sina);
//for (size_t i=0;i<6;++i) errorProp.data[PosInMtrx(i,i,6) + it] = 1.f;
errorProp.data[PosInMtrx(0,0,6)] = 1.0f;
errorProp.data[PosInMtrx(1,1,6)] = 1.0f;
errorProp.data[PosInMtrx(2,2,6)] = 1.0f;
errorProp.data[PosInMtrx(3,3,6)] = 1.0f;
errorProp.data[PosInMtrx(4,4,6)] = 1.0f;
errorProp.data[PosInMtrx(5,5,6)] = 1.0f;
//[Dec. 21, 2022] Added to have the same pattern as the cudauvm version.
errorProp.data[PosInMtrx(0,1,6)] = 0.0f;
errorProp.data[PosInMtrx(0,2,6)] = cosP*sinT*(sinP*cosa*sCosPsina-cosa)*icosT;
errorProp.data[PosInMtrx(0,3,6)] = cosP*sinT*deltaZ*cosa*(1.0f-sinP*sCosPsina)*(icosT*pt)-k*(cosP*sina-sinP*(1.0f-cCosPsina))*(pt*pt);
errorProp.data[PosInMtrx(0,4,6)] = (k*pt)*(-sinP*sina+sinP*sinP*sina*sCosPsina-cosP*(1.0f-cCosPsina));
errorProp.data[PosInMtrx(0,5,6)] = cosP*deltaZ*cosa*(1.0f-sinP*sCosPsina)*(icosT*icosT);
errorProp.data[PosInMtrx(1,2,6)] = cosa*sinT*(cosP*cosP*sCosPsina-sinP)*icosT;
errorProp.data[PosInMtrx(1,3,6)] = sinT*deltaZ*cosa*(cosP*cosP*sCosPsina+sinP)*(icosT*pt)-k*(sinP*sina+cosP*(1.0f-cCosPsina))*(pt*pt);
errorProp.data[PosInMtrx(1,4,6)] = (k*pt)*(-sinP*(1.0f-cCosPsina)-sinP*cosP*sina*sCosPsina+cosP*sina);
errorProp.data[PosInMtrx(1,5,6)] = deltaZ*cosa*(cosP*cosP*sCosPsina+sinP)*(icosT*icosT);
errorProp.data[PosInMtrx(4,2,6)] = -ipt_*sinT*(icosTk);
errorProp.data[PosInMtrx(4,3,6)] = sinT*deltaZ*(icosTk);
errorProp.data[PosInMtrx(4,5,6)] = ipt_*deltaZ*(icosT*icosTk);
}
MultHelixPropEndcap(&errorProp, inErr, &temp);
MultHelixPropTranspEndcap(&errorProp, &temp, outErr);
}
__device__ __constant__ int ie_range = (int) nevts/num_streams;
__device__ __constant__ int ie_rangeR = (int) nevts%num_streams;
__global__ void GPUsequence(MPTRK* trk, MPHIT* hit, MPTRK* outtrk, const int stream){
for (int ti = blockIdx.x; ti<ie_range*nb; ti+=gridDim.x){
struct MPTRK_ obtracks;
struct MPTRK_ btracks;
float *dstPtr = btracks.par.data;
float *srcPtr = trk[ti].par.data;
loadData(dstPtr,srcPtr,threadIdx.x,6);
dstPtr = btracks.cov.data;
srcPtr = trk[ti].cov.data;
loadData(dstPtr,srcPtr,threadIdx.x,21);
int *dstPtrI = btracks.q.data;
int *srcPtrI = trk[ti].q.data;
loadData(dstPtrI,srcPtrI,threadIdx.x,1);
#pragma unroll
for (int layer=0;layer<nlayer;++layer){
struct MPHIT_ bhits;
float *dstPtr2 = bhits.pos.data;
float *srcPtr2 = hit[layer+ti*nlayer].pos.data;
loadData(dstPtr2,srcPtr2,threadIdx.x,3);
dstPtr2 = bhits.cov.data;
srcPtr2 = hit[layer+ti*nlayer].cov.data;
loadData(dstPtr2,srcPtr2,threadIdx.x,6);
propagateToZ(&(btracks.cov), &(btracks.par), &(btracks.q), &(bhits.pos),
&(obtracks.cov), &(obtracks.par));
KalmanUpdate(&(obtracks.cov),&(obtracks.par),&(bhits.cov),&(bhits.pos));
}
dstPtr = outtrk[ti].par.data;
srcPtr = obtracks.par.data;
saveData(dstPtr,srcPtr,threadIdx.x,6);
dstPtr = outtrk[ti].cov.data;
srcPtr = obtracks.cov.data;
saveData(dstPtr,srcPtr,threadIdx.x,21);
dstPtrI = outtrk[ti].q.data;
srcPtrI = obtracks.q.data;
saveData(dstPtrI,srcPtrI,threadIdx.x,1);
}
}
__global__ void GPUsequenceR(MPTRK* trk, MPHIT* hit, MPTRK* outtrk, const int stream){
for (int ti = blockIdx.x; ti<ie_rangeR*nb; ti+=gridDim.x){
struct MPTRK_ obtracks;
struct MPTRK_ btracks;
float *dstPtr = btracks.par.data;
float *srcPtr = trk[ti].par.data;
loadData(dstPtr,srcPtr,threadIdx.x,6);
dstPtr = btracks.cov.data;
srcPtr = trk[ti].cov.data;
loadData(dstPtr,srcPtr,threadIdx.x,21);
int *dstPtrI = btracks.q.data;
int *srcPtrI = trk[ti].q.data;
loadData(dstPtrI,srcPtrI,threadIdx.x,1);
#pragma unroll
for (int layer=0;layer<nlayer;++layer){
struct MPHIT_ bhits;
float *dstPtr2 = bhits.pos.data;
float *srcPtr2 = hit[layer+ti*nlayer].pos.data;
loadData(dstPtr2,srcPtr2,threadIdx.x,3);
dstPtr2 = bhits.cov.data;
srcPtr2 = hit[layer+ti*nlayer].cov.data;
loadData(dstPtr2,srcPtr2,threadIdx.x,6);
propagateToZ(&(btracks.cov), &(btracks.par), &(btracks.q), &(bhits.pos),
&(obtracks.cov), &(obtracks.par));
KalmanUpdate(&(obtracks.cov),&(obtracks.par),&(bhits.cov),&(bhits.pos));
}
dstPtr = outtrk[ti].par.data;
srcPtr = obtracks.par.data;
saveData(dstPtr,srcPtr,threadIdx.x,6);
dstPtr = outtrk[ti].cov.data;
srcPtr = obtracks.cov.data;
saveData(dstPtr,srcPtr,threadIdx.x,21);
dstPtrI = outtrk[ti].q.data;
srcPtrI = obtracks.q.data;
saveData(dstPtrI,srcPtrI,threadIdx.x,1);
}
}
void prefetch_device(MPTRK* trk, MPHIT* hit, cudaStream_t* streams, int stream_chunk, int stream_remainder, int device) {
for (int s = 0; s<num_streams;s++){
#ifdef USE_ASYNC
cudaMemPrefetchAsync(trk+(s*stream_chunk),stream_chunk*sizeof(MPTRK), device,streams[s]);
#else
cudaMemPrefetchAsync(trk+(s*stream_chunk),stream_chunk*sizeof(MPTRK), device,0);
#endif
#ifdef USE_ASYNC
cudaMemPrefetchAsync(hit+(s*stream_chunk*nlayer),nlayer*stream_chunk*sizeof(MPHIT), device,streams[s]);
#else
cudaMemPrefetchAsync(hit+(s*stream_chunk*nlayer),nlayer*stream_chunk*sizeof(MPHIT), device,0);
#endif
}
if(stream_remainder != 0){
#ifdef USE_ASYNC
cudaMemPrefetchAsync(trk+(num_streams*stream_chunk),stream_remainder*sizeof(MPTRK), device,streams[num_streams]);
#else
cudaMemPrefetchAsync(trk+(num_streams*stream_chunk),stream_remainder*sizeof(MPTRK), device,0);
#endif
#ifdef USE_ASYNC
cudaMemPrefetchAsync(hit+(num_streams*stream_chunk*nlayer),nlayer*stream_remainder*sizeof(MPHIT), device,streams[num_streams]);
#else
cudaMemPrefetchAsync(hit+(num_streams*stream_chunk*nlayer),nlayer*stream_remainder*sizeof(MPHIT), device,0);
#endif
}
}
void prefetch_host(MPTRK* outtrk, cudaStream_t* streams, int stream_chunk, int stream_remainder) {
for (int s = 0; s<num_streams;s++){
#ifdef USE_ASYNC
cudaMemPrefetchAsync(outtrk+(s*stream_chunk),stream_chunk*sizeof(MPTRK), cudaCpuDeviceId,streams[s]);
#else
cudaMemPrefetchAsync(outtrk+(s*stream_chunk),stream_chunk*sizeof(MPTRK), cudaCpuDeviceId,0);
#endif
}
if(stream_remainder != 0){
#ifdef USE_ASYNC
cudaMemPrefetchAsync(outtrk+(num_streams*stream_chunk),stream_remainder*sizeof(MPTRK), cudaCpuDeviceId,streams[num_streams]);
#else
cudaMemPrefetchAsync(outtrk+(num_streams*stream_chunk),stream_remainder*sizeof(MPTRK), cudaCpuDeviceId,0);
#endif
}
}
int main (int argc, char* argv[]) {
#ifdef USE_ASYNC
printf("RUNNING CUDA Async Version!!\n");
#else
printf("RUNNING CUDA Sync Version!!\n");
#endif
#ifdef include_data
printf("Measure Both Memory Transfer Times and Compute Times!\n");
#else
printf("Measure Compute Times Only!\n");
#endif
printf("Streams: %d, blocks: %d, threads(x,y): (%d,%d)\n",num_streams,blockspergrid,threadsperblockx,threadsperblocky);
int itr;
ATRK inputtrk = {
{-12.806846618652344, -7.723824977874756, 38.13014221191406,0.23732035065189902, -2.613372802734375, 0.35594117641448975},
{6.290299552347278e-07,4.1375109560704004e-08,7.526661534029699e-07,2.0973730840978533e-07,1.5431574240665213e-07,9.626245400795597e-08,-2.804026640189443e-06,
6.219111130687595e-06,2.649119409845118e-07,0.00253512163402557,-2.419662877381737e-07,4.3124190760040646e-07,3.1068903991780678e-09,0.000923913115050627,
0.00040678296006807003,-7.755406890332818e-07,1.68539375883925e-06,6.676875566525437e-08,0.0008420574605423793,7.356584799406111e-05,0.0002306247719158348},
1
};
AHIT inputhit = {
{-20.7824649810791, -12.24150276184082, 57.8067626953125},
{2.545517190810642e-06,-2.6680759219743777e-06,2.8030024168401724e-06,0.00014160551654640585,0.00012282167153898627,11.385087966918945}
};
printf("track in pos: %f, %f, %f \n", inputtrk.par[0], inputtrk.par[1], inputtrk.par[2]);
printf("track in cov: %.2e, %.2e, %.2e \n", inputtrk.cov[SymOffsets66(PosInMtrx(0,0,6))],
inputtrk.cov[SymOffsets66(PosInMtrx(1,1,6))],
inputtrk.cov[SymOffsets66(PosInMtrx(2,2,6))]);
printf("hit in pos: %f %f %f \n", inputhit.pos[0], inputhit.pos[1], inputhit.pos[2]);
printf("produce nevts=%i ntrks=%i smearing by=%f \n", nevts, ntrks, smear);
printf("NITER=%d\n", NITER);
long setup_start, setup_stop;
struct timeval timecheck;
gettimeofday(&timecheck, NULL);
setup_start = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000;
#ifdef FIXED_RSEED
//[DEBUG by Seyong on Dec. 28, 2020] add an explicit srand(1) call to generate fixed inputs for better debugging.
srand(1);
#endif
// cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
// cudaFuncSetCacheConfig(GPUsequence,cudaFuncCachePreferL1);
// cudaFuncSetCacheConfig(GPUsequenceR,cudaFuncCachePreferL1);
MPTRK* trk = prepareTracks(inputtrk);
MPHIT* hit = prepareHits(inputhit);
MPTRK* outtrk;
cudaMallocManaged((void**)&outtrk,nevts*nb*sizeof(MPTRK));
dim3 grid(blockspergrid,1,1);
dim3 block(threadsperblockx,threadsperblocky,1);
int device = -1;
cudaGetDevice(&device);
int stream_chunk = ((int)(nevts/num_streams))*nb;//*sizeof(MPTRK);
int stream_remainder = ((int)(nevts%num_streams))*nb;//*sizeof(MPTRK);
int stream_range;
if (stream_remainder == 0){ stream_range =num_streams;}
else{stream_range = num_streams+1;}
cudaStream_t streams[stream_range];
for (int s = 0; s<stream_range;s++){
//cudaStreamCreateWithFlags(&streams[s],cudaStreamNonBlocking);
cudaStreamCreate(&streams[s]);
}
#ifndef include_data
prefetch_device(trk, hit, streams, stream_chunk, stream_remainder, device);
#ifdef USE_ASYNC
cudaDeviceSynchronize();
#endif
#endif
gettimeofday(&timecheck, NULL);
setup_stop = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000;
printf("done preparing!\n");
printf("Size of struct MPTRK trk[] = %ld\n", nevts*nb*sizeof(struct MPTRK));
printf("Size of struct MPTRK outtrk[] = %ld\n", nevts*nb*sizeof(struct MPTRK));
printf("Size of struct struct MPHIT hit[] = %ld\n", nevts*nb*sizeof(struct MPHIT));
auto wall_start = std::chrono::high_resolution_clock::now();
for(itr=0; itr<NITER; itr++){
#ifdef include_data
prefetch_device(trk, hit, streams, stream_chunk, stream_remainder, device);
#endif
for (int s = 0; s<num_streams;++s){
//printf("stream = %d, grid (%d, %d, %d), block(%d, %d, %d), stream_chunk = %d\n",s, grid.x, grid.y, grid.z, block.x, block.y, block.z, stream_chunk);
#ifdef USE_ASYNC
GPUsequence<<<grid,block,0,streams[s]>>>(trk+(s*stream_chunk),hit+(s*stream_chunk*nlayer),outtrk+(s*stream_chunk),s);
#else
GPUsequence<<<grid,block,0,0>>>(trk+(s*stream_chunk),hit+(s*stream_chunk*nlayer),outtrk+(s*stream_chunk),s);
#endif
}
if(stream_remainder != 0){
#ifdef USE_ASYNC
GPUsequenceR<<<grid,block,0,streams[num_streams]>>>(trk+(num_streams*stream_chunk),hit+(num_streams*stream_chunk*nlayer),outtrk+(num_streams*stream_chunk),num_streams);
#else
GPUsequenceR<<<grid,block,0,0>>>(trk+(num_streams*stream_chunk),hit+(num_streams*stream_chunk*nlayer),outtrk+(num_streams*stream_chunk),num_streams);
#endif
}
#ifdef include_data
prefetch_host(outtrk, streams, stream_chunk, stream_remainder);
#endif
} //end itr loop
#ifdef USE_ASYNC
cudaDeviceSynchronize();
#endif
auto wall_stop = std::chrono::high_resolution_clock::now();
#ifndef include_data
prefetch_host(outtrk, streams, stream_chunk, stream_remainder);
#ifdef USE_ASYNC
cudaDeviceSynchronize();
#endif
#endif
for (int s = 0; s<stream_range;s++){
cudaStreamDestroy(streams[s]);
}
auto wall_diff = wall_stop - wall_start;
auto wall_time = static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(wall_diff).count()) / 1e6;
printf("setup time time=%f (s)\n", (setup_stop-setup_start)*0.001);
printf("done ntracks=%i tot time=%f (s) time/trk=%e (s)\n", nevts*ntrks*int(NITER), wall_time, wall_time/(nevts*ntrks*int(NITER)));
printf("formatted %i %i %i %i %i %f 0 %f %i\n",int(NITER),nevts, ntrks, bsize, nb, wall_time, (setup_stop-setup_start)*0.001, num_streams);
#ifdef DUMP_OUTPUT
FILE *fp_x;
FILE *fp_y;
FILE *fp_z;
fp_x = fopen("output_x.txt", "w");
fp_y = fopen("output_y.txt", "w");
fp_z = fopen("output_z.txt", "w");
#endif
double avgx = 0, avgy = 0, avgz = 0;
double avgpt = 0, avgphi = 0, avgtheta = 0;
double avgdx = 0, avgdy = 0, avgdz = 0;
for (size_t ie=0;ie<nevts;++ie) {
for (size_t it=0;it<ntrks;++it) {
float x_ = x(outtrk,ie,it);
float y_ = y(outtrk,ie,it);
float z_ = z(outtrk,ie,it);
float pt_ = 1./ipt(outtrk,ie,it);
float phi_ = phi(outtrk,ie,it);
float theta_ = theta(outtrk,ie,it);
#ifdef DUMP_OUTPUT
fprintf(fp_x, "ie=%lu, it=%lu, %f\n",ie, it, x_);
fprintf(fp_y, "%f\n", y_);
fprintf(fp_z, "%f\n", z_);
#endif
//if(x_ ==0 || y_==0||z_==0){
//printf("x: %f,y: %f,z: %f, ie: %d, it: %f\n",x_,y_,z_,ie,it);
//continue;
//}
avgpt += pt_;
avgphi += phi_;
avgtheta += theta_;
avgx += x_;
avgy += y_;
avgz += z_;
float hx_ = x(hit,ie,it);
float hy_ = y(hit,ie,it);
float hz_ = z(hit,ie,it);
//if(x_ ==0 || y_==0 || z_==0){continue;}
avgdx += (x_-hx_)/x_;
avgdy += (y_-hy_)/y_;
avgdz += (z_-hz_)/z_;
}
}
#ifdef DUMP_OUTPUT
fclose(fp_x);
fclose(fp_y);
fclose(fp_z);
fp_x = fopen("input_x.txt", "w");
fp_y = fopen("input_y.txt", "w");
fp_z = fopen("input_z.txt", "w");
#endif
avgpt = avgpt/double(nevts*ntrks);
avgphi = avgphi/double(nevts*ntrks);
avgtheta = avgtheta/double(nevts*ntrks);
avgx = avgx/double(nevts*ntrks);
avgy = avgy/double(nevts*ntrks);
avgz = avgz/double(nevts*ntrks);
avgdx = avgdx/double(nevts*ntrks);
avgdy = avgdy/double(nevts*ntrks);
avgdz = avgdz/double(nevts*ntrks);
double stdx = 0, stdy = 0, stdz = 0;
double stddx = 0, stddy = 0, stddz = 0;
for (size_t ie=0;ie<nevts;++ie) {
for (size_t it=0;it<ntrks;++it) {
float x_ = x(outtrk,ie,it);
float y_ = y(outtrk,ie,it);
float z_ = z(outtrk,ie,it);
stdx += (x_-avgx)*(x_-avgx);
stdy += (y_-avgy)*(y_-avgy);
stdz += (z_-avgz)*(z_-avgz);
float hx_ = x(hit,ie,it);
float hy_ = y(hit,ie,it);
float hz_ = z(hit,ie,it);
stddx += ((x_-hx_)/x_-avgdx)*((x_-hx_)/x_-avgdx);
stddy += ((y_-hy_)/y_-avgdy)*((y_-hy_)/y_-avgdy);
stddz += ((z_-hz_)/z_-avgdz)*((z_-hz_)/z_-avgdz);
#ifdef DUMP_OUTPUT
x_ = x(trk,ie,it);
y_ = y(trk,ie,it);
z_ = z(trk,ie,it);
fprintf(fp_x, "%f\n", x_);
fprintf(fp_y, "%f\n", y_);
fprintf(fp_z, "%f\n", z_);
#endif
}
}
#ifdef DUMP_OUTPUT
fclose(fp_x);
fclose(fp_y);
fclose(fp_z);
#endif
stdx = sqrtf(stdx/double(nevts*ntrks));
stdy = sqrtf(stdy/double(nevts*ntrks));
stdz = sqrtf(stdz/double(nevts*ntrks));
stddx = sqrtf(stddx/double(nevts*ntrks));
stddy = sqrtf(stddy/double(nevts*ntrks));
stddz = sqrtf(stddz/double(nevts*ntrks));
printf("track x avg=%f std/avg=%f\n", avgx, fabs(stdx/avgx));
printf("track y avg=%f std/avg=%f\n", avgy, fabs(stdy/avgy));
printf("track z avg=%f std/avg=%f\n", avgz, fabs(stdz/avgz));
printf("track dx/x avg=%f std=%f\n", avgdx, stddx);
printf("track dy/y avg=%f std=%f\n", avgdy, stddy);
printf("track dz/z avg=%f std=%f\n", avgdz, stddz);
printf("track pt avg=%f\n", avgpt);
printf("track phi avg=%f\n", avgphi);
printf("track theta avg=%f\n", avgtheta);
//free(trk);
//free(hit);
//free(outtrk);
cudaFree(trk);
cudaFree(hit);
cudaFree(outtrk);
return 0;
}
|
21,786 | #include "includes.h"
__global__ void TwoNodesDistanceKernel( float *twoNodesDifference, float *twoNodesDistance, int vectorLength )
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < 1)
{
float sum = 0.00f;
float value;
for(int i = 0; i < vectorLength; i++)
{
value = twoNodesDifference[threadId * vectorLength + i];
sum += value*value;
}
twoNodesDistance[threadId] = sqrtf(sum);
}
} |
21,787 | #include <stdio.h>
__global__ void hello(){
printf("Hello from block: %u, thread: %u\n", threadIdx.x, blockIdx.x);
}
int main(){
hello<<<2,1>>>();
cudaDeviceSynchronize();
}
|
21,788 | #include "includes.h"
__global__ void leftUnpackingKernel(double* temperature, double* ghost, int block_size) {
int j = blockDim.x * blockIdx.x + threadIdx.x;
if (j < block_size) {
temperature[(block_size + 2) * (1 + j) + 1] = ghost[j];
}
} |
21,789 | /*** Calculating a derivative with CD ***/
#include <iostream>
#include <fstream>
#include <cmath>
#include <sys/time.h>
void checkErrors(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label);
}
}
double get_time()
{ struct timeval tim;
cudaThreadSynchronize();
gettimeofday(&tim, NULL);
return (double) tim.tv_sec+(tim.tv_usec/1000000.0);
}
__global__ void copy_array(float *u, float *u_prev, int N, int BSZ)
{
int i = threadIdx.x;
int j = threadIdx.y;
int I = blockIdx.y*BSZ*N + blockIdx.x*BSZ + j*N + i;
if (I>=N*N){return;}
u_prev[I] = u[I];
}
// GPU kernel
__global__ void update (float *u, float *u_prev, int N, float h, float dt, float alpha, int BSZ)
{
// Setting up indices
int i = threadIdx.x;
int j = threadIdx.y;
int I = blockIdx.y*BSZ*N + blockIdx.x*BSZ + j*N + i;
if (I>=N*N){return;}
//if (()>=N || j>){return;}
// if not boundary do
if ( (I>N) && (I< N*N-1-N) && (I%N!=0) && (I%N!=N-1))
{ u[I] = u_prev[I] + alpha*dt/(h*h) * (u_prev[I+1] + u_prev[I-1] + u_prev[I+N] + u_prev[I-N] - 4*u_prev[I]);
}
// Boundary conditions are automatically imposed
// as we don't touch boundaries
}
int main()
{
// Allocate in CPU
int N = 128;
int BLOCKSIZE = 16;
cudaSetDevice(0);
float xmin = 0.0f;
float xmax = 3.5f;
float ymin = 0.0f;
//float ymax = 2.0f;
float h = (xmax-xmin)/(N-1);
float dt = 0.00001f;
float alpha = 0.645f;
float time = 0.4f;
int steps = ceil(time/dt);
int I;
float *x = new float[N*N];
float *y = new float[N*N];
float *u = new float[N*N];
float *u_prev = new float[N*N];
// Generate mesh and intial condition
for (int j=0; j<N; j++)
{ for (int i=0; i<N; i++)
{ I = N*j + i;
x[I] = xmin + h*i;
y[I] = ymin + h*j;
u[I] = 0.0f;
if ( (i==0) || (j==0))
{u[I] = 200.0f;}
}
}
// Allocate in GPU
float *u_d, *u_prev_d;
cudaMalloc( (void**) &u_d, N*N*sizeof(float));
cudaMalloc( (void**) &u_prev_d, N*N*sizeof(float));
// Copy to GPU
cudaMemcpy(u_d, u, N*N*sizeof(float), cudaMemcpyHostToDevice);
// Loop
dim3 dimGrid(int((N-0.5)/BLOCKSIZE)+1, int((N-0.5)/BLOCKSIZE)+1);
dim3 dimBlock(BLOCKSIZE, BLOCKSIZE);
double start = get_time();
for (int t=0; t<steps; t++)
{ copy_array <<<dimGrid, dimBlock>>> (u_d, u_prev_d, N, BLOCKSIZE);
update <<<dimGrid, dimBlock>>> (u_d, u_prev_d, N, h, dt, alpha, BLOCKSIZE);
}
double stop = get_time();
checkErrors("update");
double elapsed = stop - start;
std::cout<<"time = "<<elapsed<<std::endl;
// Copy result back to host
cudaMemcpy(u, u_d, N*N*sizeof(float), cudaMemcpyDeviceToHost);
std::ofstream temperature("temperature_global.txt");
for (int j=0; j<N; j++)
{ for (int i=0; i<N; i++)
{ I = N*j + i;
// std::cout<<u[I]<<"\t";
temperature<<x[I]<<"\t"<<y[I]<<"\t"<<u[I]<<std::endl;
}
temperature<<"\n";
//std::cout<<std::endl;
}
temperature.close();
// Free device
cudaFree(u_d);
cudaFree(u_prev_d);
}
|
21,790 | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
__global__ void vecMultiply(int *arr, int size){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid<size){
for(int i = 0;i<100000;i++){
*(arr + tid) += 10;
}
}
}
int main(int argc, char *argv[]){
// Initialize
int elementSize = 64;
int threadsPerBlock = 32;
int blockSize = (elementSize+threadsPerBlock-1)/threadsPerBlock;
int *host_input_arr;
cudaMallocManaged((void**)&host_input_arr, sizeof(int) * elementSize);
for(int i = 0;i<elementSize;i++){
host_input_arr[i] = i;
}
vecMultiply<<<blockSize, threadsPerBlock>>>(host_input_arr, elementSize);
cudaDeviceSynchronize();
for(int i = 0;i<elementSize;i++){
printf("%d ", host_input_arr[i]);
}
printf("\n");
cudaFree(host_input_arr);
return 0;
} |
21,791 |
__global__ void kernel_forward_projection(float *d_a, float *d_b)
{
int idx = blockDim.x * gridDim.x * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
d_b[idx]=d_a[idx]+0.6f;
}
__global__ void kernel_back_projection(float *d_a, float *d_b)
{
int idx = blockDim.x * gridDim.x * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
d_a[idx]=d_b[idx]/2.0f;
}
__global__ void kernel_add(float *d_a, float *d_b)
{
int idx = blockDim.x * gridDim.x * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
d_a[idx]=d_a[idx]+d_b[idx];
} |
21,792 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <time.h>
#include <math.h>
#include <algorithm>
//For the given architecture, we do not require strides in avePooling layer. each N*M matrix is converted into one single value and stored into a memory location
//It is a simple reduction kernel applied to smaller segments of an array < 32 generally. Even if the layer is large, say 16*16, we can launch that many threads per block
/*_device__ void warpReduce ( int * sdata , int tid )
{
sdata [tid] += sdata [tid + 32];
sdata [tid] += sdata [tid + 16];
sdata [tid] += sdata [tid + 8];
sdata [tid] += sdata [tid + 4];
sdata [tid] += sdata [tid + 2];
sdata [tid] += sdata [tid + 1];
}*/
//Misaligned data acceses are not as problematic in modern GPUs due to larger L1 Cache width
//https://developer.nvidia.com/blog/how-access-global-memory-efficiently-cuda-c-kernels/#:~:text=Misaligned%20Data%20Accesses&text=Arrays%20allocated%20in%20device%20memory,are%20aligned%20to%20their%20size.
__global__ void avgPoolKernel(float*A, float*B, int threadsPerMat, int nearest_2)
{
//threadsPerMat is equal to size of each matrix
//__shared__ tile[size]; //Make a very very large one to be true, large enough to load all elements per block
extern __shared__ float tile[]; //For now, one block computes one matrix only
unsigned int tid,local_tid,access_id;
//local_tid = threadIdx.y*threadDim.x + threadIdx.x;
local_tid = threadIdx.x;
//tid = blockIdx.x*blockDim.x*blockDim.y + local_tid;
tid = blockIdx.x*blockDim.x + local_tid;
access_id = local_tid + blockIdx.x*threadsPerMat;
//printf("local_tid: %d, tid: %d\n",local_tid,tid);
//printf("local_tid: %d, tid: %d, tile[local_tid]: %f\n",local_tid,tid,tile[local_tid]);
//Note that shared memory in CUDA is not initialized!!!
if(local_tid < threadsPerMat - nearest_2)
{
tile[local_tid] = A[access_id + nearest_2];
}
else
{
tile[local_tid] = 0;
}
//printf("local_tid: %d, tid: %d, tile[local_tid]: %f\n",local_tid,tid,tile[local_tid]);
tile[local_tid] += A[access_id];
__syncthreads();
//printf("local_tid: %d, tid: %d, tile[local_tid]: %f\n",local_tid,tid,tile[local_tid]);
//All the elements are loaded and are now done to closest power of 2
for (unsigned int i = nearest_2/2; i >0 ; i>>=1)
{
if(local_tid<i)
{
tile[local_tid] += tile[local_tid + i];
}
__syncthreads();
}
// if (local_tid < 32)
// {
// warpReduce(tile,local_tid);
// }
//As of now, one block calculates one full tile
if(local_tid==0)
{
B[blockIdx.x] = tile[0]/threadsPerMat;
// printf("Tile: %f, Block: %d\n",tile[0],blockIdx.x);
}
// tile[block_tid] = A[tid]; //block_tid is id of the matrix in the block conatining channels number of channels
// int mat_tid = tid%threadsPerMat;
// unsigned int i = threadsPerMat;
// for (i = threadsPerMat; i>0; )
// {
// if((i & 0x01) == 1) //If i is odd, we need to compensate for that loner at the end
// {
// if(tid == 0)
// {
// tile[tid] += tile[tid-1 + threadsPerMat];
// }
// }
// i>>=1; //Divide i by 2
// if(mat_tid<i)
// {
// tile[tid] += tile[tid+i];
// }
// if(mat_tid==0 )
// {
// tile[tid] +=tile[]
// }
// __syncthreads();
// }
//Access Threads per mat each time and keep on dividing it by two till you reach 1.
//Divide by ThreadsperMat at the end, and return in the array required.
}
//We try to access memory in strides, but we have to put an if condition to write in proper locations.
//i.e. we did not have too many if conditions before, but uncoalesced memory accesses.
//Now we will have too many if else conditions initially.
//We can assume that the inputs are of small size, <12X12 or even 15X15. In this case we will try to club as many blocks as we can such that we get full 32.
// __global__ avgPoolKernelV2(float*A, float*B, int threadsPerMat, int nearest_2)
// {
// }
//From now on it would be assumed that each function must provide the input matrices already in the device.
//The weight matrices shall be provided copied into the device memory as well.
//If they are not present in the memory already, special functions shall exist to do so.
void avgPool(int width, int height, int channels, float* hA, float* hB, float* dA, float* dB)
{
//struct cudeDeviceProp devp;
//cudaGetDeviceProperties(&devp,0);
//int maxThreadsPerBlock = devp.maxThreadsPerBlock;
//int maxX = devp.maxThreadsDim[0], maxY = devp.maxThreadsDim[1], maxZ = devp.maxThreadsDim[2];
//int sharedMemPerBlock = devp.sharedMemPerBlock;
int sharedMemPerBlock = 49152;
int maxThreadsPerBlock = 1024;
int maxX = 1024, maxY = 1024, maxZ = 64;
int threadsPerMat = width*height;
int nearest_2 = pow(2,floor(log2(threadsPerMat)));
int size = nearest_2;
dim3 gridSize(channels,1,1);
dim3 blockSize(nearest_2,1);
cudaError_t err = cudaSuccess;
//If I can not access dA if it exists on the device, we can replace dA with hA and make them mutually exclusive
//If hA is null, dA must have the data, if hA is not null, dA has to be allocated
//dA == NULL works even if it is allocated in GPU MEM, it was tested
if(dA == NULL)
{
err = cudaMalloc((void**)&dA,height*width*channels*sizeof(float));
if(err !=cudaSuccess)
{
fprintf(stderr, "avgPool: Failed to allocate device vector A (code:%s)\n",cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
if(hA == NULL)
{
fprintf(stderr, "No input A (device nor host) Given to avgPool\n");
exit(0);
}
err = cudaMemcpy(dA, hA, height*width*channels*sizeof(float) , cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
// if(dB == NULL)
// {
err = cudaMalloc((void**)&dB,channels*sizeof(float));
if(err !=cudaSuccess)
{
fprintf(stderr, "avgPool: Failed to allocate device vector B (code:%s)\n",cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// }
avgPoolKernel<<<gridSize,blockSize,nearest_2*sizeof(float)>>>(dA,dB,threadsPerMat,nearest_2);
if(hB!=NULL)
{
err = cudaMemcpy(hB, dB, channels*sizeof(float) , cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy device vector B from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
//Note: The driver code has to free both hA, hB, dA, dB if they are not required.
// int dimx, dimy, dimz, tot_data;
// tot_data = width*height*channels;
// dimx = fmin(width,maxX);
// dimy = fmin(height,maxY);
// dimz = fmin(maxZ,tot_data/dimx/dimy); //This is generally equal to channels, but if the width is too high, then it becomes smaller than channels
// dimz = fmin(dimz, sharedMemPerBlock/dimx/dimy/4); //Gives the maximum width across X such that all of the memory spaces are utilized in each block
// dim3 blockSize(dimx,dimy,dimz);
//Blocks arranged across Z axis as well only.
// size_t dimGz = tot_data/dimy/dimx/dimz;
// dim3 gridSize(1,1,dimGz);
//printf("dimx: %d, dimy: %d, dimz: %d, blocks: %d ",dimx,dimy,dimz,dimGz);
//int dimz = min(maxZ,maxThreadsPerBlock/maxX/maxY,channels,);
}
int main(int argc, char const *argv[])
{
//Driver Code only
int channels = 1024, height = 7, width = 7;
float *hA = (float *)malloc(width*height*channels*sizeof(float));
float *hB = (float *)malloc(channels*sizeof(float));
float *dA = NULL;
float *dB = NULL;
for (int k = 0; k < channels; ++k)
{
for (int i = 0; i < height; ++i)
{
for (int j = 0; j < width; ++j)
{
hA[k*width*height + i*width + j] = j*(k+1);
// printf("%d ", j*(k+1));
}
// printf("\n");
}
// printf("\nChannel End\n");
}
//Testing if dA == NULL can be done on host if dA is on Device
// cudaError_t err = cudaSuccess;
// err = cudaMalloc((void**)&dA,height*width*channels*sizeof(float));
// if(err !=cudaSuccess)
// {
// fprintf(stderr, "avgPool: Failed to allocate device vector A (code:%s)\n",cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
// err = cudaMemcpy(dA, hA, height*width*channels*sizeof(float) , cudaMemcpyHostToDevice);
// if (err != cudaSuccess)
// {
// fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
// exit(EXIT_FAILURE);
// }
avgPool(width,height,channels,hA,hB,dA,dB);
// for (int k = 0; k < channels; ++k)
// {
// printf("%f\n", *(hB+k));
// }
printf("DONE!!!\n");
return 0;
} |
21,793 | /*
SU Project -- Taniya -- Cuda
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <cuda_runtime.h>
/* Bounds of the Mandelbrot set */
#define X_MIN -1.78
#define X_MAX 0.78
#define Y_MIN -0.961
#define Y_MAX 0.961
__global__ void pixel_calculation(double dx, double dy, char * pixels, int nb_iter, double x_min, double y_max, int columns);
typedef struct {
int nb_rows, nb_columns; /* Dimensions */
char * pixels; /* Linearized matrix of pixels */
} Image;
static void error_options () {
fprintf (stderr, "Use : ./mandel [options]\n\n");
fprintf (stderr, "Options \t Meaning \t\t Default val.\n\n");
fprintf (stderr, "-n \t\t Nb iter. \t\t 100\n");
fprintf (stderr, "-b \t\t Bounds \t\t -1.78 0.78 -0.961 0.961\n");
fprintf (stderr, "-d \t\t Dimensions \t\t 1024 768\n");
fprintf (stderr, "-f \t\t File \t\t /tmp/mandel.ppm\n");
exit (1);
}
static void analyzis (int argc, char * * argv, int * nb_iter, double * x_min, double * x_max, double * y_min, double * y_max, int * width, int * height, char * * path) {
const char * opt = "b:d:n:f:" ;
int c ;
/* Default values */
* nb_iter = 100;
* x_min = X_MIN;
* x_max = X_MAX;
* y_min = Y_MIN;
* y_max = Y_MAX;
* width = 1024;
* height = 768;
* path = "mandel.ppm";
/* Analysis of arguments */
while ((c = getopt (argc, argv, opt)) != EOF) {
switch (c) {
case 'b':
sscanf (optarg, "%lf", x_min);
sscanf (argv [optind ++], "%lf", x_max);
sscanf (argv [optind ++], "%lf", y_min);
sscanf (argv [optind ++], "%lf", y_max);
break ;
case 'd': /* width */
sscanf (optarg, "%d", width);
sscanf (argv [optind ++], "%d", height);
break;
case 'n': /* Number of iterations */
* nb_iter = atoi (optarg);
break;
case 'f': /* Output file */
* path = optarg;
break;
default :
error_options ();
};
}
}
static void initialization (Image * im, int nb_columns, int nb_rows) {
im -> nb_rows = nb_rows;
im -> nb_columns = nb_columns;
im -> pixels = (char *) malloc (sizeof (char) * nb_rows * nb_columns); /* Space memory allocation */
}
static void save (const Image * im, const char * path) {
/* Image saving using the ASCII format'.PPM' */
unsigned i;
FILE * f = fopen (path, "w");
fprintf (f, "P6\n%d %d\n255\n", im -> nb_columns, im -> nb_rows);
for (i = 0; i < im -> nb_columns * im -> nb_rows; i ++) {
char c = im -> pixels [i];
fprintf (f, "%c%c%c", c, c, c); /* Monochrome weight */
}
fclose (f);
}
static void Compute (Image * im, int nb_iter, double x_min, double x_max, double y_min, double y_max) {
double dx = (x_max - x_min) / im -> nb_columns, dy = (y_max - y_min) / im -> nb_rows; /* Discretization */
int rownum = im->nb_rows, colnum = im-> nb_columns;
dim3 blocksize(16,16,1); // 16 blocks of 16 threads each
dim3 nblocks(rownum/16, colnum/16, 1);
char * im_pixels_d;
cudaMalloc(&im_pixels_d, sizeof(char)*rownum*colnum);
cudaMemcpy(im_pixels_d, im->pixels , sizeof(char) * rownum * colnum,cudaMemcpyHostToDevice);
pixel_calculation<<< nblocks, blocksize >>> (dx, dy, im_pixels_d, nb_iter, x_min, y_max, colnum);
cudaMemcpy(im -> pixels, im_pixels_d, sizeof(char)*rownum*colnum,cudaMemcpyDeviceToHost);
cudaFree(im_pixels_d);
}
__global__ void pixel_calculation(double dx, double dy, char * pixels, int nb_iter, double x_min, double y_max, int colnum)
{
int id_x = blockIdx.x *blockDim.x + threadIdx.x;
int id_y = blockIdx.y *blockDim.y + threadIdx.y;
double a = x_min + id_y * dx, b = y_max - id_x * dy, x = 0, y = 0;
int i=0;
while (i < nb_iter) {
double tmp = x;
x = x * x - y * y + a;
y = 2 * tmp * y + b;
if (x * x + y * y > 4) /* Divergence ! */
break;
else
i++;
}
pixels [id_x*colnum+id_y] = (double) i / nb_iter * 255;
}
int main (int argc, char * * argv) {
int nb_iter, width, height; /* Degree of precision, dimensions of the image */
double x_min, x_max, y_min, y_max; /* Bounds of representation */
char * path; /* File destination */
Image im;
analyzis(argc, argv, & nb_iter, & x_min, & x_max, & y_min, & y_max, & width, & height, & path);
initialization (& im, width, height);
Compute (& im, nb_iter, x_min, x_max, y_min, y_max);
save (& im, path);
return 0 ;
}
|
21,794 | #include <cstdlib>
#include <cstdio>
__global__ void kernel(int* arr,int n){
int idx=blockDim.x*blockIdx.x+threadIdx.x;
if(idx<n){
arr[idx]=5;
}
return;
}
__host__ int main(int argc,char* argv[]){
int* arr=NULL;
int* cuArr=NULL;
const int n=100;
size_t size=n*sizeof(int);
arr=(int*)malloc(size);
cudaMalloc((void**)&cuArr,size);
kernel<<<2,64>>>(cuArr,n);
cudaMemcpy(arr,cuArr,size,cudaMemcpyDeviceToHost);
cudaFree(cuArr);
for(int i=0;i<n;i++){
printf("%d ",arr[i]);
}
printf("\n");
free(arr);
return 0;
}
|
21,795 | __global__ void conv1D(int *arr,int *mask,int *res,int n,int m,int c){
int idi = blockIdx.y*blockDim.y+threadIdx.y;
if(idi<n){
res[idi]=0;
int a,b;
b=idi-c;
for(a=0;a<m;a++,b++){
if(b>=0 && b<n){
res[idi]+=mask[a]*arr[b];
}
}
}
}
__global__ void conv2D(int *arr,float *mask,float *res,int n1,int n2, int m1,int m2,int c1,int c2){
int idi = blockIdx.y*blockDim.y+threadIdx.y;
int idj = blockIdx.x*blockDim.x+threadIdx.x;
if(idi<n1 && idj<n2){
int a,b,c,d;
c=idi-c1;
res[idi*n1+idj]=0.0;
for(a=0;a<m1;a++,c++){
d=idj-c2;
for(b=0;b<m2;b++,d++){
if(c>=0 && c<n1 && d>=0 && d<n2){
res[idi*n1+idj]+=mask[a*m1+b]*arr[c*n1+d];
}
}
}
}
}
|
21,796 | #include "includes.h"
__global__ void AddLocalErrorKernel( int s1, float *distance, float *localError )
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < 1)
{
localError[s1] += distance[s1] * distance[s1];
}
} |
21,797 | #include "includes.h"
__global__ void inclusive_scan(const unsigned int *X, unsigned int *Y, int N)
{
extern __shared__ int XY[];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
// load input into __shared__ memory
if(i<N)
{
XY[threadIdx.x] =X[i];
}
/*Note here stride <= threadIdx.x, means that everytime the threads with threadIdx.x less than
stride do not participate in loop*/
for(unsigned int stride = 1; stride <= threadIdx.x; stride *= 2) {
__syncthreads();
XY[threadIdx.x]+= XY[threadIdx.x - stride];
}
/*This is executed by all threads, so that they store the final prefix sum to
corresponding locations in global memory*/
Y[i]=XY[threadIdx.x];
// wait until all threads of this block writes the output for all prefix sum within the block
__syncthreads();
if (threadIdx.x < blockIdx.x) //for 1st block onwards
{
//update the shared memory to keep prefix sum of last elements of previous block's
XY[threadIdx.x] = Y[threadIdx.x * blockDim.x + BLOCK_SIZE - 1];
}
__syncthreads();
for (int stride = 0; stride < blockIdx.x; stride++)
{ //add all previous las elements to this block elements
Y[threadIdx.x + blockDim.x * blockIdx.x] += XY[stride];
__syncthreads();
}
} |
21,798 | #include "includes.h"
#pragma comment(lib,"cublas.lib")
using namespace std;
//==============================Function Prototypes================================
double getRand();
__global__ void weightUpdate(float *d_W,float *d_D,float *d_N){
int2 pos;
pos.x = blockIdx.x*blockDim.x + threadIdx.x;//row j
pos.y = blockIdx.y*blockDim.y + threadIdx.y;//column k
int n = pos.x*blockDim.x*gridDim.y + pos.y;
float N = 0.1;
d_W[n] = d_W[n] + N*d_D[pos.y] * d_N[pos.x];
} |
21,799 | #include <cuda_runtime.h>
#include <cstddef>
#include <sys/time.h>
#include <iostream>
#include <vector>
void checkError( cudaError_t err)
{
if(err != cudaSuccess)
{
std::cout << cudaGetErrorString(err) << std::endl;
exit(-1);
}
}
//global is a kernel: global cannot be called from host, but can be called from functions
__global__ void kernel(int*A, int* B, int* C, long long N)
{
// amount of threads per block and number of blocks. in OpenCL we also specify total number of threads
long long idx = blockIdx.x * blockDim.x+ threadIdx.x; //all these indexes start at zero;
//cuda launching kernels is possible in 2D/3D as well;
if(idx < N)
C[idx] = A[idx];
}
double getSeconds()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double) tp.tv_sec + (double) tp.tv_usec * 1e-6);
}
int main()
{
const long long nElem = 1 << 24;
//std::cout << nElem << std::endl;
std::vector<int> A(nElem, 1);
std::vector<int> B(nElem, 1);
std::vector<int> C(nElem, 0);
double start, end;
const long long nBytes = nElem * sizeof(int);
std::cout << nBytes * 1e-6 << std::endl;
/*allocate the memory to allocate memory on GPU
** cudaMalloc() malloc call: returns pointer to the first block of memeory.
** no difference between gpu and cpu mem pointer.
** nomenclature for gpu pounter: d_variable
*/
int* d_A;
int* d_B;
int* d_C;
checkError(cudaMalloc(&d_A, nBytes));
checkError(cudaMalloc(&d_B, nBytes));
checkError(cudaMalloc(&d_C, nBytes));
checkError(cudaMemcpy(d_A, &A[0], nBytes, cudaMemcpyHostToDevice)); // A/B/C are local memory
checkError(cudaMemcpy(d_B, &B[0], nBytes, cudaMemcpyHostToDevice));
start = getSeconds();
kernel<<< (1 << 14), (1 << 10) >>> (d_A, d_B, d_C, nElem); //number of blocks
checkError( cudaPeekAtLastError() );
checkError(cudaDeviceSynchronize());
end = getSeconds();
std::cout << "time is " << end - start << std::endl; // this actually gives the time needed to launch the kernel. to get the correct measurements, we have to synchronize. this ensures that all kernels have finished before it exectutes the next instruction.
// checkError should be used for every cuda intruction.
checkError(cudaMemcpy(&C[0], d_C, nBytes, cudaMemcpyDeviceToHost)); //destination:source:size:direction
for (auto c : C)
{
if (c!=1)
{
std::cout << "error" << std::endl;
exit(123);
}
}
checkError(cudaFree(d_A));
checkError(cudaFree(d_B));
checkError(cudaFree(d_C));
}
//cuda: nvidia compiler: compiles the kernel
//()nvcc -{to be safe}std=c++11 -{compute-capability}arch=sm_20 source.cu && ./a.out
|
21,800 | #include <bits/stdc++.h>
using namespace std;
const int MAXX = 1e8;
__constant__ int4 avg_dev[32];
__constant__ double cov_inv_dev[32][3][3];
__constant__ double dets_dev[32];
#define CSC(call) \
do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \
__FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while(0)
struct pnt {
int x, y;
};
__global__ void kernel(uchar4 *data, int w, int h, int nc) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int idy = blockDim.y * blockIdx.y + threadIdx.y;
int offsetx = blockDim.x * gridDim.x;
int offsety = blockDim.y * gridDim.y;
int x, y, i, j, k;
uchar4 ps;
for (y = idy; y < h; y += offsety) {
for (x = idx; x < w; x += offsetx) {
ps = data[y * w + x];
double mx = -MAXX;
int idx = -1;
for (i = 0; i < nc; ++i) {
int diff[3];
diff[0] = ps.x - avg_dev[i].x;
diff[1] = ps.y - avg_dev[i].y;
diff[2] = ps.z - avg_dev[i].z;
double tmp[3];
for (j = 0; j < 3; ++j) {
tmp[j] = 0;
for (k = 0; k < 3; ++k) {
tmp[j] += (diff[k] * cov_inv_dev[i][k][j]);
}
}
double ans = 0;
for (j = 0; j < 3; ++j) {
ans += (tmp[j] * diff[j]);
}
ans = -ans - log(abs(dets_dev[i]));
if (ans > mx) {
mx = ans;
idx = i;
}
}
data[y * w + x].w = idx;
}
}
}
int main() {
int w, h;
char inputFile[256], outputFile[256];
cin >> inputFile >> outputFile;
FILE *fp = fopen(inputFile, "rb");
fread(&w, sizeof(int), 1, fp);
fread(&h, sizeof(int), 1, fp);
uchar4 *data = (uchar4 *) malloc(sizeof(uchar4) * w * h);
fread(data, sizeof(uchar4), w * h, fp);
fclose(fp);
int nc, np;
cin >> nc;
vector<vector<pnt>> classes(nc);
int4 avg[32];
double cov[32][3][3];
double cov_inv[32][3][3];
double dets[32];
for (int i = 0; i < nc; ++i) {
cin >> np;
classes[i].resize(np);
// input + counting averages
long long xx = 0, yy = 0, zz = 0;
for (int j = 0; j < np; ++j) {
cin >> classes[i][j].x >> classes[i][j].y;
uchar4 ps = data[classes[i][j].y * w + classes[i][j].x];
xx += ps.x;
yy += ps.y;
zz += ps.z;
}
xx /= np;
avg[i].x = xx;
yy /= np;
avg[i].y = yy;
zz /= np;
avg[i].z = zz;
cout << avg[i].x << " " << avg[i].y << " " << avg[i].z << "\n";
// counting cov
for (int j = 0; j < np; ++j) {
uchar4 ps = data[classes[i][j].y * w + classes[i][j].x];
int diff[3];
diff[0] = ps.x - avg[i].x;
diff[1] = ps.y - avg[i].y;
diff[2] = ps.z - avg[i].z;
for (int k = 0; k < 3; ++k) {
for (int m = 0; m < 3; ++m) {
cov[i][k][m] += diff[k] * diff[m];
}
}
}
for (int k = 0; k < 3; ++k) {
for (int m = 0; m < 3; ++m) {
cov[i][k][m] /= (np - 1);
}
}
// counting cov_inverse + determinants
double det = cov[i][0][0] * (cov[i][1][1] * cov[i][2][2] - cov[i][2][1] * cov[i][1][2])
- cov[i][0][1] * (cov[i][1][0] * cov[i][2][2] - cov[i][2][0] * cov[i][1][2])
+ cov[i][0][2] * (cov[i][1][0] * cov[i][2][1] - cov[i][2][0] * cov[i][1][1]);
cov_inv[i][0][0] = (cov[i][1][1] * cov[i][2][2] - cov[i][2][1] * cov[i][1][2]) / det;
cov_inv[i][1][0] = -(cov[i][1][0] * cov[i][2][2] - cov[i][2][0] * cov[i][1][2]) / det;
cov_inv[i][2][0] = (cov[i][1][0] * cov[i][2][1] - cov[i][2][0] * cov[i][1][1]) / det;
cov_inv[i][0][1] = -(cov[i][0][1] * cov[i][2][2] - cov[i][2][1] * cov[i][0][2]) / det;
cov_inv[i][1][1] = (cov[i][0][0] * cov[i][2][2] - cov[i][2][0] * cov[i][0][2]) / det;
cov_inv[i][2][1] = -(cov[i][0][0] * cov[i][2][1] - cov[i][2][0] * cov[i][0][1]) / det;
cov_inv[i][0][2] = (cov[i][0][1] * cov[i][1][2] - cov[i][1][1] * cov[i][0][2]) / det;
cov_inv[i][1][2] = -(cov[i][0][0] * cov[i][1][2] - cov[i][1][0] * cov[i][0][2]) / det;
cov_inv[i][2][2] = (cov[i][0][0] * cov[i][1][1] - cov[i][1][0] * cov[i][0][1]) / det;
dets[i] = det;
}
uchar4 *dev_data;
CSC(cudaMalloc(&dev_data, sizeof(uchar4) * w * h));
CSC(cudaMemcpy(dev_data, data, sizeof(uchar4) * w * h, cudaMemcpyHostToDevice));
CSC(cudaMemcpyToSymbol(avg_dev, avg, sizeof(double) * 32 * 3));
CSC(cudaMemcpyToSymbol(cov_inv_dev, cov_inv, sizeof(double) * 32 * 3 * 3));
CSC(cudaMemcpyToSymbol(dets_dev, dets, sizeof(double) * 32));
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
kernel<<<dim3(16, 16), dim3(16, 16)>>>(dev_data, w, h, nc);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
fprintf(stderr, "%.2f\n", time);
cudaEventDestroy(stop);
cudaEventDestroy(start);
CSC(cudaMemcpy(data, dev_data, sizeof(uchar4) * h * w, cudaMemcpyDeviceToHost));
fp = fopen(outputFile, "wb");
fwrite(&w, sizeof(int), 1, fp);
fwrite(&h, sizeof(int), 1, fp);
fwrite(data, sizeof(uchar4), w * h, fp);
fclose(fp);
cudaFree(dev_data);
free(data);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.