serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
14,201 | #include <iostream>
#include <chrono>
#include "png.cuh"
#include "constants.cuh"
#include "mandelbrot.cuh"
#define ZOOM_FACTOR 1.01
using namespace std::chrono; // Prevent the hassle of having this long prefix
int main() {
int max_loop = 100;
// Storage for iteration result out of the Mandelbrot function
int *iteration_table;
// Storage for pixel representation
unsigned char *pixel_table;
// Storages for real and imaginary coordinates to process
double *r, *i;
// Initialize image storage for iteration numbers and pixel rendering
cudaMallocManaged(&iteration_table, SIZE * SIZE * sizeof(int));
cudaMallocManaged(&pixel_table, 3 * SIZE * SIZE * sizeof(unsigned char));
cudaMallocManaged(&r, SIZE * SIZE * sizeof(double));
cudaMallocManaged(&i, SIZE * SIZE * sizeof(double));
// Create a grid of SIZE*SIZE for the gpu functions
dim3 blockSize = dim3(SIZE, SIZE);
// Width of the real view
double zoom = 2.;
double zf = ZOOM_FACTOR;
unsigned int frame = 0;
// Skip this amount of frames
steady_clock::time_point preframe = steady_clock::now();
for (; frame < 8000; frame++) {
zoom = zoom / zf;
max_loop += 2;
}
std::cout << "Prepared frame to " << frame << " in "
<< duration_cast<microseconds>(steady_clock::now() - preframe).count() << "us" << std::endl;
// Render frames until the max frame allowed
for (; frame < 100000; frame++) {
steady_clock::time_point begin = steady_clock::now();
// Fill the table with initial coordinates
// translate<<<blockSize, 1>>>(r, i, zoom, zoom, -1.7499, 0.); // Another interesting zoom point
// translate<<<blockSize, 1>>>(r, i, zoom, zoom, -0.16070135, 1.0375665); // Another interesting zoom point
translate<<<blockSize, 1>>>(r, i, zoom, zoom, 0.281717921930775, 0.5771052841488505);
cudaDeviceSynchronize();
steady_clock::time_point translated = steady_clock::now();
// Compute Mandelbrot for each pixel
mandelbrot<<<blockSize, 1>>>(iteration_table, r, i, max_loop);
cudaDeviceSynchronize();
steady_clock::time_point mandelbrotT = steady_clock::now();
// Transform the iterations into beautiful colors
colorize<<<blockSize, 1>>>(pixel_table, iteration_table, max_loop);
cudaDeviceSynchronize();
steady_clock::time_point color = steady_clock::now();
// Store the image into a PNG
write_frame(pixel_table, frame, SIZE);
steady_clock::time_point end = steady_clock::now();
// Print timings
std::cout << "\rDone at zoom " << zoom << " frame " << frame
<< ". Rendered in " << duration_cast<milliseconds>( end - begin ).count() << "ms"
<< ", translated in " << duration_cast<microseconds>( translated - begin ).count() << "us"
<< ", mandelbrot in " << duration_cast<milliseconds>( mandelbrotT - translated ).count() << "ms"
<< ", colorized in " << duration_cast<microseconds>( color - mandelbrotT ).count() << "us"
<< ", stored in " << duration_cast<milliseconds>( end - color ).count() << "ms"
<< std::flush;
// Zoom more
zoom = zoom / ZOOM_FACTOR;
max_loop += 2;
}
// Write 3 more frame with the last frame
write_frame(pixel_table, ++frame, SIZE);
write_frame(pixel_table, ++frame, SIZE);
write_frame(pixel_table, frame + 1, SIZE);
return 0;
}
|
14,202 | #pragma once
#include <stdlib.h>
#include <math.h>
#include <iostream>
__device__ const double PI = 3.141592653589793238462643;
class Complex {
public:
double real;
double imag;
// Wn
__device__ static Complex W(int n) {
Complex res(cos(2.0 * PI / n), sin(2.0 * PI / n));
return res;
}
// Wn^k
__device__ static Complex W(int n, int k) {
Complex res(cos(2.0 * PI * k / n), sin(2.0 * PI * k / n));
return res;
}
Complex() {
}
static Complex GetComplex(double real, double imag) {
Complex r;
r.real = real;
r.imag = imag;
return r;
}
static Complex GetRandomComplex() {
Complex r;
r.real = (double)rand() / rand();
r.imag = (double)rand() / rand();
return r;
}
static Complex GetRandomReal() {
Complex r;
r.real = (double)rand() / rand();
r.imag = 0;
return r;
}
static Complex GetRandomPureImag() {
Complex r;
r.real = 0;
r.imag = (double)rand() / rand();
return r;
}
__device__ Complex(double real, double imag) {
this->real = real;
this->imag = imag;
}
__device__ Complex operator+(const Complex &other) {
Complex res(this->real + other.real, this->imag + other.imag);
return res;
}
__device__ Complex operator-(const Complex &other) {
Complex res(this->real - other.real, this->imag - other.imag);
return res;
}
__device__ Complex operator*(const Complex &other) {
Complex res(this->real * other.real - this->imag * other.imag, this->imag * other.real + this->real * other.imag);
return res;
}
};
|
14,203 | #include "includes.h"
__global__ void init_data_kernel( int n, double* x)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < n )
{
x[i] = n - i;
}
} |
14,204 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
// Kernel that executes on the CUDA device
//__global__ void square_array(float *a, int N)
//{
// int idx = blockIdx.x * blockDim.x + threadIdx.x;
// if (idx<N) a[idx] = a[idx] * a[idx];
//}
// main routine that executes on the host
int main(void) {
float lambda = 20;
int n = 10;
int p = 2;
int m = 10;
int i, j;
float *A_h[n]; // host A matrix pointers
int *IDX_h[n]; // host Aindex matrix pointers
float *A_d[n];
int *IDX_d[n];
for (i = 0; i < n; i++) {
float *vec;
vec = (float *) malloc(p * sizeof(float));
int *vec_idx;
vec_idx = (int *) malloc(p * sizeof(int));
for (j = 0; j < p; j++) {
int idx = (int) (m * (rand() / (RAND_MAX + 1.0)));
float val = (float) rand() / RAND_MAX;
vec[j] = val;
vec_idx[j] = idx;
}
A_h[i] = &vec[0];
IDX_h[i] = &vec_idx[0];
float *vec_d;
int *vec_idx_d;
cudaMalloc((void **) &vec_d, p * sizeof(float));
cudaMalloc((void **) &vec_idx_d, p * sizeof(int));
// CUDA_SAFE_CALL(cudaMalloc((void **) &vec_d, p * sizeof(float))); // Allocate array on device
// CUDA_SAFE_CALL(cudaMalloc((void **) &vec_idx_d, p * sizeof(int))); // Allocate array on device
cudaMemcpy(vec_d, vec, sizeof(float) * p, cudaMemcpyHostToDevice);
cudaMemcpy(vec_idx_d, vec_idx, sizeof(int) * p, cudaMemcpyHostToDevice);
}
puts("idem generovat x_0\n"); /* prints !!!Hello World!!! */
float x_h[n];
float L_h[n];
float Li_h[n];
for (i = 0; i < n; i++) {
float val = (float) rand() / RAND_MAX;
x_h[i] = val;
L_h[i] = 0;
}
float gradient_h[m];
for (i = 0; i < m; i++) {
gradient_h[i] = 0;
}
// size_t size = m * sizeof(float);
// gradient_h = (float *)malloc(size);
for (i = 0; i < n; i++) {
float *vector = A_h[i];
int *vector_idx = IDX_h[i];
for (j = 0; j < p; j++) {
L_h[i] += vector[j] * vector[j];
gradient_h[vector_idx[j]] = gradient_h[vector_idx[j]] + (x_h[i])
* (vector[j]);
}
}
for (i = 0; i < m; i++) {
printf("gradient value: %d %f\n", i, gradient_h[i]);
}
//vypocet Li
for (i = 0; i < n; i++) {
Li_h[i] = 1 / L_h[i];
printf("Li: %d %f xi: %f\n", i, Li_h[i], x_h[i]);
}
for (i = 0; i < n; i++) {
printf("x[%d] = %f \n", i, x_h[i]);
}
for (i = 0; i < 10; i++) {
int coordinate = (int) (n * (rand() / (RAND_MAX + 1.0)));
// printf("Zvolil som suradnicu: %d \n", coordinate);
float *vector = A_h[coordinate];
int *vector_idx = IDX_h[coordinate];
float alpha = 0;
for (j = 0; j < p; j++) {
alpha += vector[vector_idx[j]] * gradient_h[vector_idx[j]];
}
// printf("alpha: %f \n", alpha);
//minimize alfa*d + 0.5 * L*d^2 + lambda |x+d|
double delta = -(alpha + lambda) * Li_h[coordinate];
if (x_h[coordinate] + delta < 0) {
delta = -(alpha - lambda) * Li_h[coordinate];
if (x_h[coordinate] + delta > 0) {
delta = -x_h[coordinate];
}
}
for (j = 0; j < p; j++) {
gradient_h[vector_idx[j]] += delta * vector[j];
}
x_h[coordinate] += delta;
//
//
// g(idx)=g(idx)+delta*A(idx,j);
//
}
for (i = 0; i < n; i++) {
printf("x[%d] = %f \n", i, x_h[i]);
}
// size_t matrixsize = n*p * sizeof(float);
//
// h_A = (float *)malloc(matrixsize); // Allocate array on host
// cudaMalloc((void **) &d_A, matrixsize); // Allocate array on device
// Initialize host array and copy it to CUDA device
// for (int i=0; i<N; i++) a_h[i] = (float)i;
// cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
// Do calculation on device:
// int block_size = 4;
// int n_blocks = N/block_size + (N%block_size == 0 ? 0:1);
// square_array <<< n_blocks, block_size >>> (a_d, N);
// Retrieve result from device and store it in host array
// cudaMemcpy(a_h, a_d, sizeof(float)*N, cudaMemcpyDeviceToHost);
// Print results
// for (int i=0; i<N; i++) printf("%d %f\n", i, a_h[i]);
// Cleanup
// free(h_A);
// cudaFree(d_A);
cudaFree(A_d);
cudaFree(IDX_d);
}
|
14,205 | #include <cuda.h>
#define SIZE 16
#define BLOCKSIZE 4
extern "C" void outer_compute(int *in_arr, int *out_arr);
__device__ int compare(int a, int b) {
if (a == b) return 1;
return 0;
}
__global__ void compute(int *d_in,int *d_out) {
int i;
d_out[threadIdx.x] = 0;
for (i=0; i<SIZE/BLOCKSIZE; i++) {
d_out[threadIdx.x] += compare(d_in[i*BLOCKSIZE+threadIdx.x],6);
}
}
void outer_compute(int *h_in_array, int *h_out_array) {
int *d_in_array, *d_out_array;
/* allocate memory for device copies, and copy input to device */
cudaMalloc((void **) &d_in_array,SIZE*sizeof(int));
cudaMalloc((void **) &d_out_array,BLOCKSIZE*sizeof(int));
cudaMemcpy(d_in_array,h_in_array,SIZE*sizeof(int),cudaMemcpyHostToDevice);
/* compute number of appearances of 8 for subset of data in each thread! */
compute<<<1,BLOCKSIZE,0>>>(d_in_array,d_out_array);
cudaThreadSynchronize();
cudaMemcpy(h_out_array,d_out_array,BLOCKSIZE*sizeof(int),cudaMemcpyDeviceToHost);
}
|
14,206 | #include <iostream>
using namespace std;
__global__ void matMul(int *a, int *b, int *c, int n){
int row=blockIdx.y*blockDim.y+threadIdx.y;
int col=blockIdx.x*blockDim.x+threadIdx.x;
int sum=0;
for(int j=0;j<n;j++)
{
sum=sum+a[row*n+j]*b[j*n+col];
}
c[n*row+col]=sum;
}
int main(){
int n;
cin>>n;
int *a= new int[n*n];
int *b = new int[n*n];
int *c = new int[n*n];
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
a[i*n+j] = i+1;
b[i*n+j] = j+1;
}
}
int *ad, *bd, *cd;
cudaMalloc(&ad, n*n*sizeof(int));
cudaMalloc(&bd, n*n*sizeof(int));
cudaMalloc(&cd, n*n*sizeof(int));
cudaMemcpy(ad, a, n*n*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(bd, b, n*n*sizeof(int), cudaMemcpyHostToDevice);
dim3 grids(n, n, 1);
matMul<<<grids, 1>>>(ad, bd, cd, n);
cudaMemcpy(c, cd, n*n*sizeof(int), cudaMemcpyDeviceToHost);
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
cout<<c[i*n+j]<<" ";
}
cout<<endl;
}
}
|
14,207 | /*
* Geom.cc
*
* Created on: Feb 1, 2014
* Author: reid
*/
#include "Geom.h"
template <typename T> __host__ __device__ bool withinCircle(T cOrgX, T cOrgY, T cRad, T pX, T pY, T eps) {
T distX = cOrgX - pX;
T distY = cOrgY - pY;
distX *= distX;
distY *= distY;
return eps * eps > cRad * cRad - distX - distY;
}
|
14,208 | /*
* Copyright 2019 Australian National University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either or express implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* keywordfinder.c - finds key words in a large file
*
* Created on: 19 Feb. 2019
* Author: Eric McCreath
*/
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
// this macro checks for errors in cuda calls
#define Err(ans) \
{ gpucheck((ans), __FILE__, __LINE__); }
inline void gpucheck(cudaError_t code, const char *file, int line) {
if (code != cudaSuccess) {
fprintf(stderr, "GPU Err: %s %s %d\n", cudaGetErrorString(code), file,
line);
exit(code);
}
}
void err(const char *str) {
printf("error : %s\n", str);
exit(1);
}
// get the size of a file, allocate pinned memory, and load the file into memory
int loadfile(char *str, char **data) {
struct stat filestat;
if (stat(str, &filestat) == -1)
err("problem stating file");
FILE *file;
if ((file = fopen(str, "r")) == NULL)
err("problem opening file");
Err(cudaMallocHost(data, filestat.st_size));
fread(*data, filestat.st_size, 1, file);
return filestat.st_size;
}
// count - used for counting the number of "marker"s in some text
int count(char marker, char *data, int size) {
int i;
int sum = 0;
for (i = 0; i < size; i++) {
if (data[i] == marker)
sum++;
}
return sum;
}
struct find {
int pos;
int word;
};
// check - determine if "word" matches within "data" starting at "pos".
// the "word" is assumed to be null terminated
__device__ int check(char *data, int datasize, int pos, char *word) {
int i = 0;
while (word[i] != 0 && data[pos + i] == word[i] && pos + i < datasize) {
i++;
}
if (word[i] == 0)
return 1;
return 0;
}
// findkeywords - search for the keywords within the "data"
__global__ void findkeywords(char *data, int datasize, char *keywords,
int *wordsindex, int numwords,
struct find *finds, int maxfinds,
int *findcount) {
int pos;
int i;
char *word;
for (pos = 0; pos < datasize; pos++) {
for (i = 0; i < numwords; i++) {
word = keywords + wordsindex[i];
if (check(data, datasize, pos, word)) {
if (*findcount < maxfinds) {
finds[*findcount].pos = pos;
finds[*findcount].word = i;
}
(*findcount)++;
}
}
}
}
int main(int argc, char *argv[]) {
if (argc != 3)
err("usage: keywordfinder textfile keywords");
// load the text files into memory
char *data_h, *data_d;
int datasize;
datasize = loadfile(argv[1], &data_h);
Err(cudaMalloc(&data_d, datasize));
char *keywords_h, *keywords_d;
int keywordssize;
keywordssize = loadfile(argv[2], &keywords_h);
Err(cudaMalloc(&keywords_d, keywordssize));
// obtain an index into the keywords. So "wordsindex[i]" is
// the position within "keywords" that keyword "i" starts.
int numwords = count('\n', keywords_h, keywordssize);
int *wordsindex_h, *wordsindex_d;
Err(cudaMallocHost(&wordsindex_h, sizeof(int) * numwords));
Err(cudaMalloc(&wordsindex_d, sizeof(int) * numwords));
int i;
int pos = 0;
wordsindex_h[pos++] = 0;
for (i = 0; i < keywordssize; i++) {
if (keywords_h[i] == '\n') {
keywords_h[i] = 0;
if (pos < numwords)
wordsindex_h[pos++] = i + 1;
}
}
// set aside some memory for the finds (we fix a maximum number of finds)
// A "struct find" is used to store a find,
// basically just a mapping between the key word index and the position.
int maxfinds = 2000;
struct find *finds_h, *finds_d;
Err(cudaMallocHost(&finds_h, maxfinds * sizeof(struct find)));
Err(cudaMalloc(&finds_d, maxfinds * sizeof(struct find)));
// copy the data across to the device
Err(cudaMemcpy(data_d, data_h, datasize, cudaMemcpyHostToDevice));
Err(cudaMemcpy(keywords_d, keywords_h, keywordssize,
cudaMemcpyHostToDevice));
Err(cudaMemcpy(wordsindex_d, wordsindex_h, sizeof(int) * numwords,
cudaMemcpyHostToDevice));
int *findcount_d;
int findcount = 0;
Err(cudaMalloc(&findcount_d, sizeof(int)));
Err(cudaMemcpy(findcount_d, &findcount, sizeof(int),
cudaMemcpyHostToDevice));
// find the keywords
findkeywords<<<1, 1>>>(data_d, datasize, keywords_d, wordsindex_d, numwords,
finds_d, maxfinds, findcount_d);
// copy the results back
Err(cudaMemcpy(&findcount, findcount_d, sizeof(int),
cudaMemcpyDeviceToHost));
Err(cudaMemcpy(finds_h, finds_d, sizeof(struct find) * findcount,
cudaMemcpyDeviceToHost));
// display the result
for (int k = 0; k < findcount; k++) {
printf("%s : %d\n", &keywords_h[wordsindex_h[finds_h[k].word]],
finds_h[k].pos);
}
return 0;
}
|
14,209 | #include "includes.h"
__global__ void getInducedGraphNeighborCountsKernel(int size, int *aggregateIdx, int *adjIndexesOut, int *permutedAdjIndexes, int *permutedAdjacencyIn)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
int Begin = permutedAdjIndexes[ aggregateIdx[idx] ];
int End = permutedAdjIndexes[ aggregateIdx[idx + 1] ];
// Sort each section of the adjacency:
for(int i = Begin; i < End - 1; i++)
{
for(int ii = i + 1; ii < End; ii++)
{
if(permutedAdjacencyIn[i] < permutedAdjacencyIn[ii])
{
int temp = permutedAdjacencyIn[i];
permutedAdjacencyIn[i] = permutedAdjacencyIn[ii];
permutedAdjacencyIn[ii] = temp;
}
}
}
// Scan through the sorted adjacency to get the condensed adjacency:
int neighborCount = 1;
if(permutedAdjacencyIn[Begin] == idx)
neighborCount = 0;
for(int i = Begin + 1; i < End; i++)
{
if(permutedAdjacencyIn[i] != permutedAdjacencyIn[i - 1] && permutedAdjacencyIn[i] != idx)
{
permutedAdjacencyIn[neighborCount + Begin] = permutedAdjacencyIn[i];
neighborCount++;
}
}
// Store the size
adjIndexesOut[idx] = neighborCount;
}
} |
14,210 | #include <stdio.h>
__global__ void mykernel(void) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
printf("***blockDim=%d\n",blockDim.x);
printf("***blockIdx=%d\n",blockIdx.x);
printf("***threadIdx=%d\n",threadIdx.x);
printf("***index=%d\n",i);
}
int main() {
mykernel<<<1,1>>>();
return 0;
}
|
14,211 | #include "includes.h"
__global__ void CopyConnectionsCoordinatesKernel( int *connectionMatrix, float *pointsCoordinates, float *vertexData, int *connectionCount, int maxCells )
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < maxCells * maxCells)
{
if(connectionMatrix[threadId] == 1)
{
int from = threadId / maxCells;
int to = threadId % maxCells;
if(to > from)
{
//int vertexDataOffset = maxCells * 3;
int vertexDataOffset = 0;
int connIdx = atomicAdd( &connectionCount[0], 1);
vertexData[vertexDataOffset + connIdx * 6] = pointsCoordinates[from * 3];
vertexData[vertexDataOffset + connIdx * 6 + 1] = pointsCoordinates[from * 3 + 1];
vertexData[vertexDataOffset + connIdx * 6 + 2] = pointsCoordinates[from * 3 + 2];
vertexData[vertexDataOffset + connIdx * 6 + 3] = pointsCoordinates[to * 3];
vertexData[vertexDataOffset + connIdx * 6 + 4] = pointsCoordinates[to * 3 + 1];
vertexData[vertexDataOffset + connIdx * 6 + 5] = pointsCoordinates[to * 3 + 2];
}
}
}
} |
14,212 | #include <stdio.h>
__global__ void helloFromGPU() {
printf("Hello World from GPU thread %d!\n",threadIdx.x);
}
int main() {
printf("Hello World from CPU!\n");
helloFromGPU <<<1, 10 >>> ();
cudaDeviceReset();
//cudaDeviceSynchronize();
// Without cudaDeviceReset() or cudaDeviceSynchronize() the kernel messages are not printed.
// In addition, the .exe file handle is still held by malwarebytes... sometimes.
// Maybe only after Malwarebytes has been running a long time.
// Restarting Malwarebytes fixes things.
return 0;
} |
14,213 | #include "EmpireGenerator.cuh"
int generateRandomColor() {
return rand() % 256;
}
void generateEmpire(empire* emp, int empNum, double** markovMatrix) {
//Generates the empire's number
emp->number = empNum;
//Generates the empire's name
generateEmpireName(markovMatrix, emp);
//Generates the color for the empire
emp->r = generateRandomColor();
emp->g = generateRandomColor();
emp->b = generateRandomColor();
}
empire* generateEmpires(float** map, int width, int height, char* filename) {
//Allocates the empires for GPU processing
empire* empires;
cudaMallocManaged(&empires, NUMEMPIRES*sizeof(empire));
//The matrix for the markov chain
double** markovMatrix = markovFromFile(filename);
//Generates all the empire structs and their names
for (int i = 0; i < NUMEMPIRES; i++) {
//The number is 1 more than i reaching from [1-NUMEMPIRES]
generateEmpire(&empires[i], i+1, markovMatrix);
}
freeMarkov(markovMatrix);
return empires;
}
|
14,214 | #include <iostream>
#include <stdio.h>
using namespace std;
__global__ void max(int* input)
{
int tid = threadIdx.x;
float number_of_threads = blockDim.x;
int step_size = 1;
while(number_of_threads > 0){
if(tid < number_of_threads)
{
int first = tid*step_size*2;
int second = first + step_size;
if(input[first] < input[second] && input[second] > 0)
input[first] = input[second];
}
step_size *= 2;
number_of_threads = number_of_threads!=1 ? (int)ceil(number_of_threads/2) : 0;
}
}
int main(int argc, char const *argv[])
{
// User input
int count;
cout << "Enter size : ";
cin >> count;
// Host array
int hostArray[count];
for (int i = 0; i < count; i++)
hostArray[i] = rand()%count+1;
// Device array
int *deviceArray;
cudaMalloc(&deviceArray, count*sizeof(int));
cudaMemcpy(deviceArray, hostArray, count*sizeof(int), cudaMemcpyHostToDevice);
// Cuda code
max<<<1, (count/2)+1>>>(deviceArray);
int result;
cudaMemcpy(&result, deviceArray, sizeof(int), cudaMemcpyDeviceToHost);
cout << "Elements : ";
for(int i = 0; i < count; i++)
cout << hostArray[i] << " ";
cout << "\nMaximum element: " << result;
}
/*
Enter size : 30
Elements : 14 17 28 26 24 26 17 13 10 2 3 8 21 20 24 17 1 7 23 17 12 9 28 10 3 21 3 14 8 26
Maximum element: 28
*/
|
14,215 | #include <iostream>
#include <cstdio>
#include <cstdlib>
#include <cstring>
using namespace std;
#define V 1024
#define E 1024
struct NODE
{
int begin;
int end;
}node[V];
__global__ void bfs(NODE * node,int * edge,int * cost,bool * frontier,bool * visited)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(frontier[tid])
{
frontier[tid] = false;
visited[tid] = true;
for(int i = node[tid].begin; i < node[tid].end; ++i)
{
if(!visited[edge[i]])
{
cost[edge[i]] = cost[tid] + 1;
frontier[edge[i]] = true;
}
}
}
return;
}
int main()
{
freopen("input_bfs","r",stdin);
int nv,ne;
scanf("%d%d",&nv,&ne);
int edge[2*E],cost[V];
bool frontier[V],visited[V];
int *dev_edge,*dev_cost;
bool *dev_frontier,*dev_visited;
NODE *dev_node;
memset(cost,-1,nv * sizeof(int));
memset(frontier,false,nv * sizeof(bool));
memset(visited,false,nv * sizeof(bool));
for(int i = 0; i < nv;++i)
{
int edge_num;
scanf("%d",&edge_num);
if(i == 0)
node[i].begin = 0;
else
node[i].begin = node[i-1].end;
node[i].end = node[i].begin;
//read edges of this vertex
for(int j = 0; j < edge_num; ++j)
{
int vertex;
scanf("%d",&vertex);
edge[node[i].begin + j] = vertex;
node[i].end++;
}
}
for(int i = 0; i < 2*ne; ++i)
cout << edge[i] << " ";
cout << endl;
cudaMalloc((void**)&dev_edge,2*ne*sizeof(int));
cudaMalloc((void**)&dev_cost,nv*sizeof(int));
cudaMalloc((void**)&dev_frontier,nv*sizeof(bool));
cudaMalloc((void**)&dev_visited,nv*sizeof(bool));
cudaMalloc((void**)&dev_node,nv*sizeof(NODE));
cudaMemcpy(dev_edge,edge,2*ne*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_visited,visited,nv*sizeof(bool),cudaMemcpyHostToDevice);
cudaMemcpy(dev_node,node,nv*sizeof(NODE),cudaMemcpyHostToDevice);
int source;
scanf("%d",&source);
frontier[source] = true;
cost[source] = 0;
cudaMemcpy(dev_cost,cost,nv*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_frontier,frontier,nv*sizeof(bool),cudaMemcpyHostToDevice);
bool flag = true;
while(flag)
{
flag = false;
for(int i = 0; i < nv;++i)
{
if(frontier[i])
{
cout << "frontier " << i << endl;
flag = true;
bfs<<<1,nv>>>(dev_node,dev_edge,dev_cost,dev_frontier,dev_visited);
cudaMemcpy(frontier,dev_frontier,nv*sizeof(bool),cudaMemcpyDeviceToHost);
break;
}
}
}
cudaMemcpy(cost,dev_cost,nv*sizeof(int),cudaMemcpyDeviceToHost);
for(int i = 0; i < nv; ++i)
printf("%d %d\n",i,cost[i]);
return 0;
}
|
14,216 | #include <stdio.h>
#include <cuda_runtime_api.h>
#include <time.h>
/****************************************************************************
This program gives an example of a poor way to implement a password cracker
in CUDA C. It is poor because it acheives this with just one thread, which
is obviously not good given the scale of parallelism available to CUDA
programs.
The intentions of this program are:
1) Demonstrate the use of __device__ and __gloaal__ functions
2) Enable a simulation of password cracking in the absence of liarary
with equivalent functionality to libcrypt. The password to be found
is hardcoded into a function called is_a_match.
Compile and run with:
nvcc -o password_raj password_crack_raj.cu
To Run:
./password_raj > resultpwd_cuda_raj.txt
Dr Kevan auckley, University of Wolverhampton, 2018
*****************************************************************************/
__device__ int is_a_match(char *attempt) {
char plain_password1[] = "RA7852";
char plain_password2[] = "JG3524";
char plain_password3[] = "HA1234";
char plain_password4[] = "LE3254";
char *r = attempt;
char *a = attempt;
char *j = attempt;
char *g = attempt;
char *r1 = plain_password1;
char *r2 = plain_password2;
char *r3 = plain_password3;
char *r4 = plain_password4;
while(*r == *r1) {
if(*r == '\0')
{
printf("Password: %s\n",plain_password1);
break;
}
r++;
r1++;
}
while(*a == *r2) {
if(*a == '\0')
{
printf("Password: %s\n",plain_password2);
break;
}
a++;
r2++;
}
while(*j == *r3) {
if(*j == '\0')
{
printf("Password: %s\n",plain_password3);
break;
}
j++;
r3++;
}
while(*g == *r4) {
if(*g == '\0')
{
printf("Password: %s\n",plain_password4);
return 1;
}
g++;
r4++;
}
return 0;
}
__global__ void kernel() {
char l,o,v,e;
char password[7];
password[6] = '\0';
int i = blockIdx.x+65;
int j = threadIdx.x+65;
char firstValue = i;
char secondValue = j;
password[0] = firstValue;
password[1] = secondValue;
for(l='0'; l<='9'; l++){
for(o='0'; o<='9'; o++){
for(v='0';v<='9';v++){
for(e='0';e<='9';e++){
password[2] = l;
password[3] = o;
password[4]= v;
password[5]=e;
if(is_a_match(password)) {
//printf("Success");
}
else {
//printf("tried: %s\n", password);
}
}
}
}
}
}
int time_difference(struct timespec *start,
struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
kernel <<<26,26>>>();
cudaDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9));
return 0;
}
|
14,217 | // CUDA programming
// Exercise n. 07
#include <errno.h>
#include <cuda.h>
#include <stdio.h>
#define BLOCKS 512
#define THREADS 256
// Prototype
__global__ void saxpy(float a, float *x, float *y, float *z, int N);
__host__ void ints(float *m, int N);
__host__ void print_performance(float time_ms, int N);
int main(void)
{
float *x, *y, *z, a; // host copies of x, y, a
float *d_x, *d_y, *d_z; // device copies of x, y
int N = 1 << 20;
int size = N * sizeof(float);
// Allocate space for host copies of x, y
x = (float *)malloc(size);
y = (float *)malloc(size);
z = (float *)malloc(size);
// Setup input values
ints(x, N);
ints(y, N);
a = 3.0/2.5;
// Allocate space for device copies of x, y
cudaMalloc((void **)&d_x, size);
cudaMalloc((void **)&d_y, size);
cudaMalloc((void **)&d_z, size);
// Create CUDA events for performance evaluation purposes
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Copy inputs to device
cudaMemcpy(d_x, x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, size, cudaMemcpyHostToDevice);
// Call the kernel on GPU
cudaEventRecord(start);
saxpy<<< BLOCKS, THREADS >>>(a, d_x, d_y, d_z, N);
cudaEventRecord(stop);
// Copy result back to host
cudaMemcpy(z, d_z, size, cudaMemcpyDeviceToHost);
// Compute the elapsed time in milliseconds
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
print_performance(milliseconds, N);
// Cleanup
free(x);
free(y);
free(z);
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
return(EXIT_SUCCESS);
}
// Single-precision A*X Plus Y (on device)
__global__ void saxpy(float a, float *x, float *y, float *z, int N)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
// Avoid accessing beyond the end of the arrays
if(index < N)
{
z[index] = a * x[index] + y[index];
}
}
// Initialisation
__host__ void ints(float *m, int N)
{
int i;
for(i = 0; i < N; i++)
m[i] = i/(i + 1.0);
}
__host__ void print_performance(float time_ms, int N)
{
// Compute the effective bandwidth: BW = (Rb + Wb)/(t*1e9)
float RbWb, BW;
RbWb = N*5.0; // number of bytes transferred per array read or write
RbWb *= 3.0; // 3 is the reading of x, y and writing of z
BW = RbWb/(time_ms*1e6); // bandwidth in GB/s
// Measuring computational throughput: GFLOP = 2*N/(t*1e9)
float GFLOP = 2.0*N/(time_ms*1e6); // throughput in GB/s
printf("Device performance\n"
"Elapsed time (s): %.3f\n"
"Effective Bandwidth (GB/s): %.3f\n"
"Effective computational throughput (GFLOP/s): %.3f\n", time_ms, BW, GFLOP);
}
|
14,218 | /* From: https://github.com/mvx24
* Find the sum of all primes below 2 million (Project Euler #10).
* This can take a while! *spoiler* 142913828922
* For below 2k: 277050 (0.09s via nvcc, 19 hours via kcc!)
*/
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#define THREADS_PER_BLOCK 512
#define START_NUMBER 1414
#define TOTAL_THREADS ((2002-START_NUMBER)/2)
// Kernel that executes on the CUDA device
__global__ void sum_primes(int* firstPrimes, size_t n, unsigned long long* blockSums) {
__shared__ int blockPrimes[THREADS_PER_BLOCK];
int i;
int idx;
int num;
idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < TOTAL_THREADS) {
// The number to test
num = (START_NUMBER - 1) + (idx * 2);
for (i = 0; i < n; ++i) {
if(!(num % firstPrimes[i])) break;
}
if (i == n)
blockPrimes[threadIdx.x] = num;
else
blockPrimes[threadIdx.x] = 0;
} else {
blockPrimes[threadIdx.x] = 0;
}
__syncthreads();
if (threadIdx.x == 0) {
// sum all the results from the block
blockSums[blockIdx.x] = 0;
for (i = 0; i < blockDim.x; ++i)
blockSums[blockIdx.x] += blockPrimes[i];
}
}
// main routine that executes on the host
int main(int argc, char *argv[]) {
//host
int primes[1024];
unsigned long long *primeSums;
int i, j, index;
int blockSize, nblocks;
unsigned long long sum;
size_t len;
//device
int* primesDevice;
unsigned long long* primeSumsDevice;
// Find all the primes less than the square root of 2 million ~1414
primes[0] = 2;
index = 1;
sum = 2;
for (i = 3; i != START_NUMBER; ++i) {
for (j = 0; j != index; ++j) {
if (!(i % primes[j])) break;
}
if (j == index) {
primes[index++] = i;
sum += i;
}
}
len = index;
cudaMalloc((void**) &primesDevice, len * sizeof(int));
cudaMemcpy(primesDevice, primes, len * sizeof(int), cudaMemcpyHostToDevice);
// Test the all odd numbers between 1414 and 2000000
blockSize = THREADS_PER_BLOCK;
nblocks = TOTAL_THREADS/blockSize + !!(TOTAL_THREADS % blockSize);
cudaMalloc((void**) &primeSumsDevice, nblocks * sizeof(unsigned long long));
sum_primes <<< nblocks, blockSize >>> (primesDevice, index, primeSumsDevice);
// C invocation
// do {
// dim3 gridDim;
// dim3 blockDim;
// cudaError_t error;
// gridDim.x = nblocks;
// blockDim.x = blockSize;
// gridDim.y = gridDim.z = blockDim.y = blockDim.z = 1;
// error = cudaConfigureCall(gridDim, blockDim, 0, NULL);
// if(error != cudaSuccess)
// {
// printf("%s\n", cudaGetErrorString(error));
// break;
// }
// error = cudaSetupArgument(&primesDevice, sizeof(primesDevice), 0);
// error = cudaSetupArgument(&index, sizeof(index), sizeof(primesDevice));
// error = cudaSetupArgument(&primeSumsDevice, sizeof(primeSumsDevice), sizeof(primesDevice) + sizeof(index));
// printf("Start kernel\n");
// error = cudaLaunch(sum_primes);
// if(error != cudaSuccess) {
// printf("cudaLaunch: %s\n", cudaGetErrorString(error));
// break;
// }
// } while(0);
// Retrieve result from device and store it in host array
primeSums = (unsigned long long*) malloc(nblocks * sizeof(unsigned long long));
cudaMemcpy(primeSums, primeSumsDevice, nblocks * sizeof(unsigned long long), cudaMemcpyDeviceToHost);
for (i = 0; i != nblocks; ++i) {
sum += primeSums[i];
//printf("%llu\t", primeSums[i]);
}
// Cleanup
free(primeSums);
cudaFree(primeSumsDevice);
cudaFree(primesDevice);
// Print results
printf("%llu\n", sum);
} |
14,219 | /* SampleDecoder.cu
This file should be specificated by the user.
Authors
Derek N.A. Alves,
Bruno C.S. Nogueira,
Davi R.C Oliveira and
Ermeson C. Andrade.
Instituto de Computação, Universidade Federal de Alagoas.
Maceió, Alagoas, Brasil.
*/
#include "SampleDecoder.cuh"
#include "k.cuh"
SampleDecoder::SampleDecoder() { }
SampleDecoder::~SampleDecoder() { }
void SampleDecoder::Init() const{
return;
}
void SampleDecoder::Decode(float* d_next, float* d_nextFitKeys) const {
int p = 256;
int n = 32;
dec<<<p, n>>>(d_next, d_nextFitKeys);
return;
}
|
14,220 | #include<stdio.h>
__global__ void parallel_vector_add(int* d_a, int* d_b, int* d_c, int* d_n) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < *d_n) {
printf("I am anout to compute c[%d].\n", i);
d_c[i] = d_a[i] + d_b[i];
}
else {
printf("I am thread #%d, and doing nothing.\n", i);
}
}
int main()
{
//allocate and initialize host memory
int n;
scanf("%d", &n);
int h_a[n];
int h_b[n];
int h_c[n];
for (int i = 0; i < n; i++) {
h_a[i] = i;
h_b[i] = n - i;
}
//Part 1
//allocate device memory for a, b, and c
//copy a and b to device memory
int *d_a, *d_b, *d_c, *d_n;
cudaMalloc((void **) &d_a, n*sizeof(int));
cudaMalloc((void **) &d_b, n*sizeof(int));
cudaMalloc((void **) &d_c, n*sizeof(int));
cudaMalloc((void **) &d_n, sizeof(int));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMemcpy(d_a, &h_a, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &h_b, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_n, &n, sizeof(int), cudaMemcpyHostToDevice);
//Part 2
//kernel launch code which let the device performs the actual vector addition
int amountBlock;
if (n % 512)
amountBlock = (n/512) + 1;
else
amountBlock = n/512;
cudaEventRecord(start);
parallel_vector_add<<<amountBlock, 512>>>(d_a, d_b, d_c, d_n);
cudaEventRecord(stop);
cudaDeviceSynchronize();
//Part 3
//copy c to host memory
cudaMemcpy(&h_c, d_c, n*sizeof(int), cudaMemcpyDeviceToHost);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
//free device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
for (int i = 0; i < n; i++) {
printf("%d ", h_c[i]);
}
printf("\ntime used = %f\n", milliseconds);
}
|
14,221 | #define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
#define INDEX(b,c,h,w,channels,height,width) ((b * channels + c) * height + h) * width+ w
extern "C" __global__ void IRNNBackward(
float* grad_input,
float* grad_weight_up_map,
float* grad_weight_right_map,
float* grad_weight_down_map,
float* grad_weight_left_map,
float* grad_bias_up_map,
float* grad_bias_right_map,
float* grad_bias_down_map,
float* grad_bias_left_map,
const float* weight_up,
const float* weight_right,
const float* weight_down,
const float* weight_left,
const float* grad_output_up,
const float* grad_output_right,
const float* grad_output_down,
const float* grad_output_left,
const float* output_up,
const float* output_right,
const float* output_down,
const float* output_left,
const int channels,
const int height,
const int width,
const int n) {
CUDA_KERNEL_LOOP(index,n){
int w = index % width;
int h = index / width % height;
int c = index / width / height % channels;
int b = index / width / height / channels;
float diff_left = 0;
float diff_right = 0;
float diff_up = 0;
float diff_down = 0;
//left
for (int i = 0; i<=w; i++)
{
diff_left *= weight_left[c];
diff_left += grad_output_left[INDEX(b, c, h, i, channels, height, width)];
diff_left *= (output_left[INDEX(b, c, h, i, channels, height, width)]<=0)? 0 : 1;
}
float temp = grad_output_left[INDEX(b, c, h, 0, channels, height, width)];
for (int i = 1; i < w +1 ; i++)
{
temp = (output_left[INDEX(b, c, h, i-1, channels, height, width)] >0?1:0) * temp * weight_left[c] + grad_output_left[INDEX(b, c, h, i, channels, height, width)];
}
if (w != width - 1){
grad_weight_left_map[index] = temp * output_left[INDEX(b, c, h, w+1, channels, height, width)] * (output_left[index] > 0? 1:0);
grad_bias_left_map[index] = diff_left;
}
// right
for (int i = width -1; i>=w; i--)
{
diff_right *= weight_right[c];
diff_right += grad_output_right[INDEX(b, c, h, i, channels, height, width)];
diff_right *= (output_right[INDEX(b, c, h, i, channels, height, width)]<=0)? 0 : 1;
}
temp = grad_output_right[INDEX(b, c, h, width-1, channels, height, width)];
for (int i = width -2; i > w - 1 ; i--)
{
temp = (output_right[INDEX(b, c, h, i+1, channels, height, width)] >0?1:0) * temp * weight_right[c] + grad_output_right[INDEX(b, c, h, i, channels, height, width)];
}
if (w != 0){
grad_weight_right_map[index] = temp * output_right[INDEX(b, c, h, w-1, channels, height, width)] * (output_right[index] > 0? 1:0);
grad_bias_right_map[index] = diff_right;
}
// up
for (int i = 0; i<=h; i++)
{
diff_up *= weight_up[c];
diff_up += grad_output_up[INDEX(b, c, i, w, channels, height, width)];
diff_up *= (output_up[INDEX(b, c, i, w, channels, height, width)]<=0)? 0 : 1;
}
temp = grad_output_up[INDEX(b, c, 0, w, channels, height, width)];
for (int i = 1; i < h +1 ; i++)
{
temp = (output_up[INDEX(b, c, i-1, w, channels, height, width)] >0?1:0) * temp * weight_up[c] + grad_output_up[INDEX(b, c, i, w, channels, height, width)];
}
if (h != height - 1){
grad_weight_up_map[index] = temp * output_up[INDEX(b, c, h+1, w, channels, height, width)] * (output_up[index] > 0? 1:0);
grad_bias_up_map[index] = diff_up;
}
// down
for (int i = height -1; i>=h; i--)
{
diff_down *= weight_down[c];
diff_down += grad_output_down[INDEX(b, c, i, w, channels, height, width)];
diff_down *= (output_down[INDEX(b, c, i, w, channels, height, width)]<=0)? 0 : 1;
}
temp = grad_output_down[INDEX(b, c, height-1, w, channels, height, width)];
for (int i = height -2; i > h - 1 ; i--)
{
temp = (output_down[INDEX(b, c, i+1, w, channels, height, width)] >0?1:0) * temp * weight_down[c] + grad_output_down[INDEX(b, c, i, w, channels, height, width)];
}
if (h != 0){
grad_weight_down_map[index] = temp * output_down[INDEX(b, c, h-1, w, channels, height, width)] * (output_down[index] > 0? 1:0);
grad_bias_down_map[index] = diff_down;
}
grad_input[index] = diff_down + diff_left + diff_right + diff_up;
}
} |
14,222 |
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include<stdio.h>
#include<stdlib.h>
#include<string>
#include<vector>
#include<iostream>
#include<time.h>
#include<map>
#include<cmath>
#include<limits>
#include<fstream>
#include<cuda.h>
using namespace std;
//static const int WORK_SIZE = 256;
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*
* See cuda.h for error code descriptions.
*/
#define CHECK_CUDA_RESULT(N) { \
CUresult result = N; \
if (result != 0) { \
printf("CUDA call on line %d returned error %d\n"]= __LINE__, \
result); \
exit(1); \
} }
/*int main(int argc, char **argv)
{
CUmodule module;
CUcontext context;
CUdevice device;
CUdeviceptr deviceArray;
CUfunction process;
void *kernelArguments[] = { &deviceArray };
int deviceCount;
unsigned int idata[WORK_SIZE], odata[WORK_SIZE];
for (int i = 0; i < WORK_SIZE; ++i) {
idata[i] = i;
}
CHECK_CUDA_RESULT(cuInit(0));
CHECK_CUDA_RESULT(cuDeviceGetCount(&deviceCount));
if (deviceCount == 0) {
printf("No CUDA-compatible devices found\n");
exit(1);
}
CHECK_CUDA_RESULT(cuDeviceGet(&device, 0));
CHECK_CUDA_RESULT(cuCtxCreate(&context, 0, device));
CHECK_CUDA_RESULT(cuModuleLoad(&module, "bitreverse.fatbin"));
CHECK_CUDA_RESULT(cuModuleGetFunction(&process, module, "bitreverse"));
CHECK_CUDA_RESULT(cuMemAlloc(&deviceArray, sizeof(int) * WORK_SIZE));
CHECK_CUDA_RESULT(
cuMemcpyHtoD(deviceArray, idata, sizeof(int) * WORK_SIZE));
CHECK_CUDA_RESULT(
cuLaunchKernel(process, 1, 1, 1, WORK_SIZE, 1, 1, 0, NULL, kernelArguments, NULL));
CHECK_CUDA_RESULT(
cuMemcpyDtoH(odata, deviceArray, sizeof(int) * WORK_SIZE));
for (int i = 0; i < WORK_SIZE; ++i) {
printf("Input value: %u, output value: %u\n"]= idata[i], odata[i]);
}
CHECK_CUDA_RESULT(cuMemFree(deviceArray));
CHECK_CUDA_RESULT(cuCtxDestroy(context));
return 0;
}*/
int maxIterations=50000;
double bias=0.0;
double learningRate=0.05;
double minValue=0.0;
double maxValue=5000.0;
double learningRateInitial=0.01;
double integer_maximum=(double)numeric_limits<int>::max();
double integer_minimum=(double)numeric_limits<int>::min();
const int inputLayerLimit=10;
const int secondLayerLimit=20;
const int thirdLayerLimit=20;
const int fourthLayerLimit=44;
vector<double> inputLayer;
vector<double> secondLayer;
vector<double> thirdLayer;
vector<double> fourthLayer;
//double finalOutput;
map<string,int> phonemeIDMapping;
double inputConnection[secondLayerLimit][inputLayerLimit];
double secondConnection[thirdLayerLimit][secondLayerLimit];
double thirdConnection[fourthLayerLimit][thirdLayerLimit];
string phonemes[41];
//double[] finalConnection=new double[fourthLayerLimit];
void initLearningData()
{
phonemes[0]="a";
phonemes[1]="ae";
phonemes[2]="ar";
phonemes[3]="au";
phonemes[4]="b";
phonemes[5]="ch";
phonemes[6]="d";
phonemes[7]="e";
phonemes[8]="ee";
phonemes[9]="er";
phonemes[10]="f";
phonemes[11]="g";
phonemes[12]="h";
phonemes[13]="ie";
phonemes[14]="j";
phonemes[15]="k";
phonemes[16]="l";
phonemes[17]="m";
phonemes[18]="n";
phonemes[19]="ng";
phonemes[20]="o";
phonemes[21]="oe";
phonemes[22]="oi";
phonemes[23]="oo";
phonemes[24]="or";
phonemes[25]="ow";
phonemes[26]="p";
phonemes[27]="r";
phonemes[28]="s";
phonemes[29]="sh";
phonemes[30]="t";
phonemes[31]="th";
phonemes[32]="u";
phonemes[33]="ue";
phonemes[34]="ur";
phonemes[35]="v";
phonemes[36]="w";
phonemes[37]="wh";
phonemes[38]="y";
phonemes[39]="z";
phonemes[40]="zh";
for(int i=0;i<41;i++)
{
phonemeIDMapping[phonemes[i]]=(i+1);
}
/*phonemeIDMapping["a"]=1;
phonemeIDMapping["a"]=2;
phonemeIDMapping["e"]=3;
phonemeIDMapping["i"]=4;
phonemeIDMapping["o"]=5;
phonemeIDMapping["u"]=6;
phonemeIDMapping["ae"]=7;
phonemeIDMapping["ee"]=8;
phonemeIDMapping["ie"]=9;
phonemeIDMapping["oe"]=10;
phonemeIDMapping["ue"]=11;
phonemeIDMapping["oo"]=12;
phonemeIDMapping["ar"]=13;
phonemeIDMapping["ur"]=14;
phonemeIDMapping["or"]=15;
phonemeIDMapping["au"]=16;
phonemeIDMapping["er"]=17;
phonemeIDMapping["ow"]=18;
phonemeIDMapping["oi"]=19;
phonemeIDMapping["b"]=20;
phonemeIDMapping["d"]=21;
phonemeIDMapping["f"]=22;
phonemeIDMapping["g"]=23;
phonemeIDMapping["h"]=24;
phonemeIDMapping["j"]=25;
phonemeIDMapping["k"]=26;
phonemeIDMapping["l"]=27;
phonemeIDMapping["m"]=28;
phonemeIDMapping["n"]=29;
phonemeIDMapping["p"]=30;
phonemeIDMapping["r"]=31;
phonemeIDMapping["s"]=32;
phonemeIDMapping["t"]=33;
phonemeIDMapping["v"]=34;
phonemeIDMapping["w"]=35;
phonemeIDMapping["wh"]=36;
phonemeIDMapping["y"]=37;
phonemeIDMapping["z"]=38;
phonemeIDMapping["th"]=39;
phonemeIDMapping["ch"]=40;
phonemeIDMapping["sh"]=41;
phonemeIDMapping["zh"]=42;
phonemeIDMapping["ng"]=43;*/
}
double getRandomWeight()
{
double val=(double)rand();
val=(val-integer_minimum)/(integer_maximum-integer_minimum);
//cout<<val;
return val;
}
double normalize(double value)
{
double val=(value-minValue)/(maxValue-minValue);
//cout<<val;
return val;
}
/*void initFromFile()
{
int i=0,j=0;
try
{
Scanner fileScanner=new Scanner(new File("inputConnection"));
for(i=0;i<secondLayerLimit;i++)
{
for(j=0;j<inputLayerLimit;j++)
{
try
{
inputConnection[i][j]=fileScanner.nextdouble();
}
catch(Exception e)
{
//inputConnection[i][j]=getRandomWeight();
}
}
}
fileScanner.close();
fileScanner=new Scanner(new File("secondConnection"));
for(i=0;i<thirdLayerLimit;i++)
{
for(j=0;j<secondLayerLimit;j++)
{
try
{
secondConnection[i][j]=fileScanner.nextdouble();
}
catch(Exception e)
{
//secondConnection[i][j]=getRandomWeight();
}
}
}
fileScanner.close();
fileScanner=new Scanner(new File("thirdConnection"));
for(i=0;i<fourthLayerLimit;i++)
{
for(j=0;j<thirdLayerLimit;j++)
{
try
{
thirdConnection[i][j]=fileScanner.nextdouble();
}
catch(Exception e)
{
//thirdConnection[i][j]=getRandomWeight();
}
}
}
fileScanner=new Scanner(new File("finalConnection"));
for(i=0;i<fourthLayerLimit;i++)
{
try
{
finalConnection[i]=fileScanner.nextdouble();
}
catch(Exception e)
{
finalConnection[i]=getRandomWeight();
}
}
fileScanner.close();
}
catch(Exception e)
{
cout<<i+" "+j);
e.printStackTrace();
}
}*/
void init()
{
for(int i=0;i<secondLayerLimit;i++)
{
for(int j=0;j<inputLayerLimit;j++)
{
inputConnection[i][j]=getRandomWeight();
}
}
for(int i=0;i<thirdLayerLimit;i++)
{
for(int j=0;j<secondLayerLimit;j++)
{
secondConnection[i][j]=getRandomWeight();
}
}
for(int i=0;i<fourthLayerLimit;i++)
{
for(int j=0;j<thirdLayerLimit;j++)
{
thirdConnection[i][j]=getRandomWeight();
}
}
/*for(int i=0;i<fourthLayerLimit;i++)
{
finalConnection[i]=getRandomWeight();
}*/
}
double sigmoid(double value)
{
double val=1/(1+exp(-value));
return val;
}
double activation(double value,int derivative)
{
double val=derivative==1?value*(1-value):sigmoid(value);
//cout<<format.format(val)<<" "<<format.format(value)<<" "<<derivative);
return val;
}
void backPropagate(double err[],double error)
{
double delta[thirdLayerLimit];
for(int i=0;i<fourthLayerLimit;i++)
{
for(int j=0;j<thirdLayerLimit;j++)
{
double correction=(err[i]*activation(fourthLayer[i],1)*thirdLayer[j]);
thirdConnection[i][j]=thirdConnection[i][j]-learningRate*correction;
delta[j]=delta[j]+(correction*thirdConnection[i][j]);
}
}
double delta1[secondLayerLimit];
for(int i=0;i<thirdLayerLimit;i++)
{
for(int j=0;j<secondLayerLimit;j++)
{
double correction=(delta[i]*activation(thirdLayer[i],1)*secondLayer[j]);
secondConnection[i][j]=secondConnection[i][j]-learningRate*correction;
delta1[j]=delta1[j]+(correction*secondConnection[i][j]);
}
}
for(int i=0;i<secondLayerLimit;i++)
{
for(int j=0;j<inputLayerLimit;j++)
{
double correction=(delta1[i]*activation(secondLayer[i],1)*inputLayer[j]);
inputConnection[i][j]=inputConnection[i][j]-learningRate*correction;
}
}
}
void learn()
{
for(int i=0;i<secondLayerLimit;i++)
{
double sum=bias;
for(int j=0;j<inputLayerLimit;j++)
{
sum+=(inputLayer[j]*inputConnection[i][j]);
}
secondLayer.push_back(activation(sum,0));
}
for(int i=0;i<thirdLayerLimit;i++)
{
double sum=bias;
for(int j=0;j<secondLayerLimit;j++)
{
sum+=(secondLayer[j]*secondConnection[i][j]);
}
thirdLayer.push_back(activation(sum,0));
}
for(int i=0;i<fourthLayerLimit;i++)
{
double sum=bias;
for(int j=0;j<thirdLayerLimit;j++)
{
sum+=(thirdLayer[j]*thirdConnection[i][j]);
}
fourthLayer.push_back(activation(sum,0));
}
/*double sum=bias;
for(int i=0;i<fourthLayerLimit;i++)
{
sum+=activation(fourthLayer[i)*finalConnection[i],0);
}
finalOutput=Math.abs(sum%44.0);*/
}
void learner()
{
for(int k=0;k<maxIterations;k++)
{
//cout<<"Iteration: "+(k+1)+"...");
for(int i=0;i<41;i++)
{
string filename="preprocess/preprocess_"+phonemes[i]+"_1";
ifstream file(filename.c_str());
string phoneme=phonemes[i];
int expectedOutput=phonemeIDMapping[phoneme];
while(!file.eof())
{
double positive,negative;
cin>>positive>>negative;
double val=positive>negative?positive:-negative;
inputLayer.push_back(normalize(val));
}
learn();
double err[fourthLayerLimit];
double error=0.0;
for(int m=0;m<fourthLayerLimit;m++)
{
err[m]=(m+1)==expectedOutput?fourthLayer[m]-0.99:fourthLayer[m]-0.01;
double s=(m+1)==expectedOutput?0.99:0.01;
error+=(0.5*(s-fourthLayer[m])*(s-fourthLayer[m]));
}
if((k+1)%10000==0)
{
cout<<"Error: Iteration "<<(k+1)<<" Phoneme "<<phoneme<<" [";
for(int m=0;m<fourthLayerLimit;m++)
{
cout<<err[m]<<" ";
}
cout<<"]"<<endl;
}
backPropagate(err,error);
inputLayer.clear();
secondLayer.clear();
thirdLayer.clear();
fourthLayer.clear();
file.close();
}
}
}
void commitToFile()
{
/*try
{
PrintWriter filePrinter=new PrintWriter(new File("inputConnection"));
DecimalFormat doubleFormat=new DecimalFormat("#.######");
for(int i=0;i<secondLayerLimit;i++)
{
filePrinter.print(doubleFormat.format(inputConnection[i][0]));
for(int j=1;j<inputLayerLimit;j++)
{
filePrinter.print("\t"+doubleFormat.format(inputConnection[i][j]));
}
filePrinter.println();
filePrinter.flush();
filePrinter.close();
}
filePrinter=new PrintWriter(new File("secondConnection"));
for(int i=0;i<thirdLayerLimit;i++)
{
filePrinter.print(doubleFormat.format(secondConnection[i][0]));
for(int j=1;j<secondLayerLimit;j++)
{
filePrinter.print("\t"+doubleFormat.format(secondConnection[i][j]));
}
filePrinter.println();
filePrinter.flush();
filePrinter.close();
}
filePrinter=new PrintWriter(new File("thirdConnection"));
for(int i=1;i<fourthLayerLimit;i++)
{
filePrinter.print(doubleFormat.format(thirdConnection[i][0]));
for(int j=0;j<thirdLayerLimit;j++)
{
filePrinter.print("\t"+doubleFormat.format(thirdConnection[i][j]));
}
filePrinter.println();
filePrinter.flush();
filePrinter.close();
}
filePrinter=new PrintWriter(new File("finalConnection"));
for(int i=0;i<fourthLayerLimit;i++)
{
filePrinter.println(doubleFormat.format(finalConnection[i]));
}
filePrinter.close();
filePrinter.flush();
filePrinter.close();
}
catch(Exception e)
{
e.printStackTrace();
}*/
}
void predict()
{
for(int i=0;i<secondLayerLimit;i++)
{
double sum=bias;
for(int j=0;j<inputLayerLimit;j++)
{
sum+=(inputLayer[j]*inputConnection[i][j]);
}
secondLayer.push_back(activation(sum,0));
}
for(int i=0;i<thirdLayerLimit;i++)
{
double sum=bias;
for(int j=0;j<secondLayerLimit;j++)
{
sum+=(secondLayer[j]*secondConnection[i][j]);
}
thirdLayer.push_back(activation(sum,0));
}
for(int i=0;i<fourthLayerLimit;i++)
{
double sum=bias;
for(int j=0;j<thirdLayerLimit;j++)
{
sum+=(thirdLayer[j]*thirdConnection[i][j]);
}
fourthLayer.push_back(activation(sum,0));
}
for(map<string,int>::iterator key=phonemeIDMapping.begin();key!=phonemeIDMapping.end();++key)
{
cout<<"Phoneme: "<<key->first<<" Probability: "<<fourthLayer[key->second]<<endl;
}
}
void initPrediction(string filename)
{
ifstream file(filename.c_str());
while(!file.eof())
{
double positive,negative;
cin>>positive>>negative;
double val=positive>negative?positive:-negative;
inputLayer.push_back(normalize(val));
}
predict();
}
int main()
{
srand(time(NULL));
cout<<"1. Learn\n2. Predict\nPlease enter choice:";
int option;
cin>>option;
if(option==1)
{
initLearningData();
init();
cout<<"Learning phase started...";
learner();
commitToFile();
cout<<"Learning phase finished...";
}
else
{
//initFromFile();
}
cout<<"Prediction phase started...";
initPrediction("test");
cout<<"Prediction phase finished...";
return 0;
}
|
14,223 | #include "includes.h"
__global__ void vecMult(float* a,float* b,float* c,const int N){
//const int i = blockIdx.x*blockDim.x + threadIdx.x;
const int i = gridDim.x*blockDim.x*blockIdx.y + blockIdx.x*blockDim.x + threadIdx.x;
if(i<N)
c[i] = a[i]*b[i];
} |
14,224 | #include "includes.h"
using namespace std;
#define TILE 16
/* LU Decomposition using Shared Memory \
\ CUDA \
\ \
\ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
//Initialize a 2D matrix
__global__ void scaleIndex(double *matrix, int n, int index){
int start=(index*n+index);
int end=(index*n+n);
for(int i= start+1 ; i<end; ++i){
matrix[i]=(matrix[i]/matrix[start]);
}
} |
14,225 | /*************************************************************************
> File Name: 07cultime.cu
> Author: dong xu
> Mail: gwmxyd@163.com
> Created Time: 2016年04月08日 星期五 10时18分09秒
************************************************************************/
#include <stdio.h>
#include <cuda_runtime.h>
//__global__声明的函数,告诉编译器这段代码交由CPU调用,由GPU执行
__global__ void mul(int *dev_a,const int NUM)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int dis=blockDim.x * gridDim.x;
while(idx<NUM)
{
dev_a[idx]=dev_a[idx]%23*dev_a[idx]*5%9;
idx+=dis;
}
}
int main(void)
{
const int thread_pre_block = 64; //每个block的线程数量
const int block_pre_grid = 8; //grid中的block数量
const int NUM = 45056;
//申请主机内存,并进行初始化
int host_a[NUM];
for(int i=0;i<NUM;i++)
host_a[i]=i;
//定义cudaError,默认为cudaSuccess(0)
cudaError_t err = cudaSuccess;
//申请GPU存储空间
int *dev_a;
err=cudaMalloc((void **)&dev_a, sizeof(int)*NUM);
if(err!=cudaSuccess)
{
perror("the cudaMalloc on GPU is failed");
return 1;
}
//将要计算的数据使用cudaMemcpy传送到GPU
cudaMemcpy(dev_a,host_a,sizeof(host_a),cudaMemcpyHostToDevice);
dim3 threads = dim3(thread_pre_block);
dim3 blocks = dim3(block_pre_grid);
//使用event计算时间
float time_elapsed=0;
cudaEvent_t start,stop;
cudaEventCreate(&start); //创建Event
cudaEventCreate(&stop);
cudaEventRecord( start,0); //记录当前时间
mul<<<blocks, threads, 0, 0>>>(dev_a,NUM);
cudaEventRecord( stop,0); //记录当前时间
cudaEventSynchronize(start); //Waits for an event to complete.
cudaEventSynchronize(stop); //Waits for an event to complete.Record之前的任务
cudaEventElapsedTime(&time_elapsed,start,stop); //计算时间差
cudaMemcpy(&host_a,dev_a,sizeof(host_a),cudaMemcpyDeviceToHost); //计算结果回传到CPU
cudaEventDestroy(start); //destory the event
cudaEventDestroy(stop);
cudaFree(dev_a);//释放GPU内存
printf("执行时间:%f(ms)\n",time_elapsed);
return 0 ;
}
|
14,226 | #include "includes.h"
#define DEBUG false
#define DEBUG_OUTPUT false
#define DEBUG_DELTA_K false
#define DEBUGNET false
#define DEBUG_TIMEING true
#define index(i,j,ld) (((j)*(ld))+(i))
int numBlocks = 1;
int blockSize = 256;
using namespace std;
/*
* Print Matrix on host
*/
__global__ void sigmoid(float* input, int num_elements){
const unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if(tid < num_elements)
{
float value = 1.0 / (1.0 + exp(-1*input[tid]));
input[tid] = value;
}
} |
14,227 | /*
Jaitirth Jacob - 13CO125 Vidit Bhargava - 13CO151
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
__global__ void square(long long int *c)
{
c[threadIdx.x] = threadIdx.x * threadIdx.x;
}
#define N 1024
int main(void)
{
long long int *c;
long long int *d_c;
int size = N * sizeof(long long int);
//Allocate memory for array in GPU
cudaMalloc((void **)&d_c, size);
//Allocate memory on host
c = (long long int *)malloc(size);
//Launch square() kernel on GPU
square<<<1,1024>>>(d_c);
//Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
printf("c[1023]= %d\n", c[1023]);
printf("1023*1023 = %ld\n", 1023*1023);
//Cleanup
free(c);
cudaFree(d_c);
return 0;
}
|
14,228 | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cuda.h>
//A kernel calculates the sum of the threadidx and blockidx
__global__ void sumKernel(int* data, int size)
{
int threadi = threadIdx.x;
int blocki = blockIdx.x;
int value = blocki*blockDim.x + threadi;
if(value < size){
data[value] = threadi+blocki;
}
}
int main(int argc, char* argv[])
{
const int totalSize = 16;
const int blockSize = 2;
const int threadSize = 8;
int hostArr[totalSize];
//allocate the memory
int *dArray;
cudaMalloc((void **)&dArray,sizeof(int)*totalSize);
sumKernel<<<blockSize, threadSize>>>(dArray, totalSize);
cudaMemcpy(&hostArr, dArray, sizeof(int)*totalSize, cudaMemcpyDeviceToHost);
//print the output
int i;
for(i=0; i<totalSize;i++){
printf("%d\n", hostArr[i]);
}
//clean up the memory
cudaFree(dArray);
return 0;
}
|
14,229 | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cuda.h>
using namespace std;
#define imin(a,b) (a<b?a:b)
const int N = 128;
const int threadsPerBlock = 128;
const int blocksPerGrid = 1;
__global__ void dot(int *a) {
//int gid = threadIdx.x + blockIdx.x * blockDim.x;
//int i = a[gid];
//int j = b[gid];
//int k = i + j;
int tid = threadIdx.x;
for (int i = 0; i < 1; i ++) {
if (tid >= N/2){
int t0=0;
int t1=0;
for ( int j = 0 ;j < tid; j++) {
a[N + j] = a[tid];
}
t0 = a[tid - N/2];
t1 = a[tid];
//__syncthreads();
a[tid] = t0+t1;
//__syncthreads();
} else {
__syncthreads();
int t0 = a[tid];
int t1 = a[tid + N/2];
//__syncthreads();
a[tid] = t0 + t1;
//__syncthreads();
}
//__syncthreads();
}
// else
// c[gid] = 1;
// c[gid] = c[gid] + 1;
}
//__global__ void dot2(int *a, int *b, int*c) {
//int gid = threadIdx.x + blockIdx.x * blockDim.x;
//int i = a[gid];
//int j = b[gid];
//int k = i + j;
//}
//__global__ void mykernel(int *data){
// atomicAdd(data, 10);
//}
int main(){
int *a;
int *dev_a;
a = new int[N];
for (int i = 0; i < N; i++) {
a[i] = i;
}
cudaMalloc((void **)&dev_a, sizeof(int) * N);
cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice);
dot<<<blocksPerGrid, threadsPerBlock>>>(dev_a);
cudaMemcpy( a, dev_a, N*sizeof(int), cudaMemcpyDeviceToHost);
#define sum_sq(x) (x*(x+1)*(2*x+1)/6)
for (int i = 0;i < N/2; i++)
printf("%d %d\n", a[i], a[i + N/2]);
cudaFree(dev_a);
delete[] a;
}
|
14,230 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand_kernel.h>
#include <math_constants.h>
extern "C"
{
__global__ void
rtruncnorm_kernel(
float *x, int n,
float *mu, float *sigma,
float *lo, float *hi,
int maxtries, int rng_a,
int rng_b, int rng_c)
{
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
if(idx < n){
// Setup the RNG:
curandState_t rng;
curand_init(rng_a+rng_b*idx,rng_c,0,&rng);
// Draw sample
int ntries = 0;
int accepted = 0;
float ran;
while(!accepted and ntries < maxtries){
ran = mu[idx]+sigma[idx]*curand_normal(&rng);
ntries += 1;
if(ran >= lo[idx] and ran <= hi[idx]){
accepted = 1;
}
}
// Use Robert Method if that didn't work
if(!accepted){
// my code is set up to sample only (a,infty), so if it's a (-infty,b) sample we want, then we sample from (-b,infty) and reverse the sign after
int rev_sign = 0;
float lower;
if(isfinite(hi[idx])){
lower = lo[idx]-mu[idx];
}else{
lower = mu[idx]-hi[idx];
rev_sign = 1;
}
float alpha = (lower+sqrtf(lower*lower+4))/2;
float z;
int ntries = 0;
// I may well have done something wrong...but for some datasets, this while loop never ended if I didn't set a max # of tries.
while(!accepted and ntries < 10000L){
ntries += 1;
float psi;
// sample uniform, then use inverse cdf to get sample from exponential distribution:
z = lower-logf(curand_uniform(&rng))/alpha;
if(lower<alpha){
psi = expf(-powf((alpha-z),2)/2);
}else{
psi = expf(-powf((alpha-z),2)/2)*expf(powf((lower-alpha),2)/2);
}
float u = curand_uniform(&rng);
if(u<psi){
accepted = 1;
}
}
if(rev_sign){
ran = mu[idx]-z;
}else{
ran = mu[idx]+z;
}
// If the Robert method failed to accept in 10000 tries:
if(!accepted){
ran = CUDART_NAN_F;
}
}
x[idx] = ran;
}
return;
}
} // END extern "C"
|
14,231 | /* Device code for the Trap, yo */
#define THREADS_PER_BLOCK 128
#define NUM_TRAPEZOIDS 8000000
#ifndef _TRAP_KERNEL_H_
#define _TRAP_KERNEL_H_
//ITS A TRAP!
/*------------------------------------------------------------------
* Function: f - for function
* Purpose: Compute a value that will knock your socks off
* Input args: x - yep, that's it.
* Output: (x+1)/sqrt(x*x + x + 1) or whateva, eh.
*/
//Ready for this?
//Function as a macro...
//What what!?
//Avoids overhead of function calls
#define f(x) (((x) + 1)/sqrt((x)*(x) + (x) + 1))
//Though somewhere I did read that cuda inlines all functions anyway...
__device__ void kahan_sum(float * result, float * input, int start_index, int end_index, int increment)
{
/* Thanks wikipedia
function KahanSum(input)
var sum = 0.0
var c = 0.0 //A running compensation for lost low-order bits.
for i = 1 to input.length do
var y = input[i] - c //So far, so good: c is zero.
var t = sum + y //Alas, sum is big, y small, so low-order digits of y are lost.
c = (t - sum) - y //(t - sum) recovers the high-order part of y; subtracting y recovers -(low part of y)
sum = t //Algebraically, c should always be zero. Beware overly-aggressive optimising compilers!
//Next time around, the lost low part will be added to y in a fresh attempt.
return sum
*/
//Volatile to avoid being compiled out
volatile float sum = 0.0;
volatile float c = 0.0;
int i;
for(i=start_index; i <=end_index; i+=increment)
{
float y = input[i] - c;
float t = sum + y;
c = (t-sum) - y;
sum = t;
}
*result = sum;
//Original non-kahan
/*
float sum = 0.0;
int i;
for(i=start_index; i <=end_index; i+=increment)
{
sum += input[i];
}
*result = sum;
*/
}
__global__ void trap_kernel_slow(float a, float b, int n, float h, float * results)
{
//Each thread will do one k iteration
//Get a thread id
int tx = blockIdx.x * blockDim.x + threadIdx.x;
//k starts at one so add one
int k = tx + 1;
float x = a+k*h;
results[tx] = f(x);
//That's all folks
//Summing in done in seperate kernel
}
__global__ void trap_kernel_slow_sum(float * sum,float a, float b, int n, float h, float * results)
{
//Sum over results in global mem
//Only one thread should do this
int tx = blockIdx.x * blockDim.x + threadIdx.x;
if(tx==0)
{
//Loop over results and sum them
float integral;
int k;
integral = (f(a) + f(b))/2.0;
for (k = 1; k <= n-1; k++) {
integral += results[k-1];
}
integral = integral*h;
*sum = integral;
}
}
__global__ void trap_kernel_fast_sum(float a, float b, float h, float * end_result, int global_mem_floats_per_thread, int global_mem_floats, float * global_mem_float_array)
{
//Thread id
int tx = blockIdx.x * blockDim.x + threadIdx.x;
//First part is to sum/collect global memory values into shared mem
//There is only one block
//global_mem_floats_per_thread is the number of floats
//Calculate this threads range
// tx 0, 0 to (global_mem_floats_per_thread-1)
// tx 1, global_mem_floats_per_thread to ...
int global_mem_start_index = (tx *global_mem_floats_per_thread);
int global_mem_end_index = global_mem_start_index + global_mem_floats_per_thread -1;
//There are maximum limits (more threads are created then needed...)
//Only go up to the number of global mem floats (-1 for index)
int global_mem_index_max = global_mem_floats-1;
//Summming over values in global_mem_float_array
float thread_result = 0;
//A weeee bit of thread divergence
//Keeps us from summing too far
int summing_index_max;
if(global_mem_end_index<=global_mem_index_max)
{
//No range limiting
summing_index_max = global_mem_end_index;
}
else
{
//Limit range to max
summing_index_max = global_mem_index_max;
}
//Each thread does Kahan sum for these global memory vals
kahan_sum(&thread_result, global_mem_float_array,global_mem_start_index, summing_index_max,1);
//This threads result is in thread_result
//Copy this into spot in shared mem
//Initially shared mem must be the size the number of threads used
//As each thread produces a result
//(and only one block so = threads per block)
__shared__ float thread_vals[THREADS_PER_BLOCK];
thread_vals[tx] = thread_result;
//Sync here before next step (one block so all threads sync)
__syncthreads();
//I implemented a tree based sum
//Even made this nice figure...
/*
| ----x --- |
* * * * * * * *
|s s| s s | s s| s s| s s| s s| s s| s s|
|s s | s s | s s | s s | <<Level 1
|s s | s s | <<Level 2
|s s | <<Level 3
s <<Level 4
*/
//But!... there are so little things to sum at this step
// that it was a big waste of time
//As doing the sum in just one thread is just as fast
//So here it is with just one thread:
//I can provide the other code if requested
if(tx==0)
{
float ret_val = 0;
kahan_sum(&ret_val, &(thread_vals[0]),0,THREADS_PER_BLOCK-1, 1);
*end_result = (ret_val + ((f(a) + f(b))/2.0))*h;
return;
}
else
{
return;
}
}
__global__ void trap_kernel_fast(int n, float a, float h, float * gpu_block_results)
{
//Each thread does one iteration, placing value in shared mem
//Get a thread id
int tx = blockIdx.x * blockDim.x + threadIdx.x;
//k starts at one so add one
int k = tx + 1;
float x = a+k*h;
float val = f(x);
//Write this into shared mem location
//Shared mem for this thread block
//Each thread writes one float so need that many
__shared__ float thread_vals[THREADS_PER_BLOCK];
//Use thread index to write into block shared mem
thread_vals[threadIdx.x] = val;
//Sync all threads in this block
__syncthreads();
//One thread from this block sums vals into its local mem
float thread_block_sum = 0.0;
if(threadIdx.x == 0)
{
//Sum values in shared mem, store in local mem
//Don't sum too far, some threads are uneeded
//Individual threads, (tx's) span a range
//Only some of them are valid
//Originally (k = 1; k <= n-1; k++)
//Which is tx = 0 to tx <= (n-2)
//int min_tx = 0;
int max_tx = (n-2);
//Calculate equivalent tx range for this block
//Overall low and high tx (thread id/index) bound for this block
int low_tx = blockIdx.x * blockDim.x;
int high_tx = blockIdx.x * blockDim.x + THREADS_PER_BLOCK - 1;
//Make sure this block is in range at all
if(low_tx > max_tx)
{
//This block is out of range completely, unneeded
//Do nothing
return;
}
else
{
//Block is partially in range, check top
if(high_tx > max_tx)
{
//High range goes too high
//Limit to max
high_tx = max_tx;
}
//Now have high and low tx values
//Convert these back to indices into block shared thread_vals[]
// ( 0->THREADS_PER_BLOCK-1 values )
int idx_start = low_tx - (blockIdx.x * blockDim.x); //0 always
int idx_end = high_tx - (blockIdx.x * blockDim.x);
kahan_sum(&thread_block_sum, &(thread_vals[0]), idx_start, idx_end,1);
}
//Same thread writes that local mem to a global mem spot
gpu_block_results[blockIdx.x] = thread_block_sum;
}
}
#endif // #ifndef _TRAP_KERNEL_H_
|
14,232 | #include "includes.h"
//---------------------------------------------------------------------------------
//---------------------------------------------------------------------------------
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** A = M x N **** AxB=C
//**** B = N x K ****
//**** C = M x K ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
static const int M = 3;
static const int N = 5;
static const int K = 4;
static const int TILE_WIDTH = 2;
using namespace std;
//---------------------------------------------------------------------------------
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
__global__ void MatrixMulKernel(int ARows,int ACols, int BRows, int BCols, int CRows, int CCols,unsigned int* A_d, unsigned int *B_d, unsigned int *C_d) {
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** Populate matrixMultiplication kernel function ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
int CValue = 0;
int Row = blockIdx.y*TILE_WIDTH + threadIdx.y;
int Col = blockIdx.x*TILE_WIDTH + threadIdx.x;
__shared__ int As[TILE_WIDTH][TILE_WIDTH];
__shared__ int Bs[TILE_WIDTH][TILE_WIDTH];
for (int k = 0; k < (TILE_WIDTH + ACols - 1)/TILE_WIDTH; k++) {
if (k*TILE_WIDTH + threadIdx.x < ACols && Row < ARows)
As[threadIdx.y][threadIdx.x] = A_d[Row*ACols + k*TILE_WIDTH + threadIdx.x];
else
As[threadIdx.y][threadIdx.x] = 0;
if (k*TILE_WIDTH + threadIdx.y < BRows && Col < BCols)
Bs[threadIdx.y][threadIdx.x] = B_d[(k*TILE_WIDTH + threadIdx.y)*BCols + Col];
else
Bs[threadIdx.y][threadIdx.x] = 0;
__syncthreads();
for (int n = 0; n < TILE_WIDTH; ++n)
CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x];
__syncthreads();
}
if (Row < CRows && Col < CCols)
C_d[((blockIdx.y * blockDim.y + threadIdx.y)*CCols) +
(blockIdx.x * blockDim.x)+ threadIdx.x] = CValue;
} |
14,233 | #include "includes.h"
__global__ void FullyConnectedShiftKernel( float *weightPtr, float *biasPtr, float *shiftedWeightsPtr, float *shiftedBiasPtr, float *avgWeightGradPtr, float *avgBiasGradPtr, float *dropoutMaskPtr, int prevLayerSize, int thisLayerSize )
{
// i: prev. layer neuron id
// j: current layer neuron id
int i;
int j = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (j < thisLayerSize)
{
if (!dropoutMaskPtr[j])
{
// weight gradient
int index = j;
for (i = 0; i < prevLayerSize; i++)
{
shiftedWeightsPtr[index] = weightPtr[index] + avgWeightGradPtr[index]; // TODO: Check if it is correct to add here, or if it should be subtracted
index += thisLayerSize;
}
// bias gradient
shiftedBiasPtr[j] = biasPtr[j] - avgBiasGradPtr[j];
}
}
} |
14,234 | #import <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
void error(char const *str)
{
fprintf(stderr, "%s\n", str);
exit(1);
}
void cuda_check(cudaError_t err, char const *str)
{
if (err != cudaSuccess) {
fprintf(stderr, "%s: CUDA error %d (%s)\n",
str, err, cudaGetErrorString(err));
}
}
__host__ __device__
float4 operator+(const float4 &a, const float4 &b)
{
return make_float4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);
}
__host__ __device__
float4 operator-(const float4 &a, const float4 &b)
{
return make_float4(a.x - b.x, a.y -b.y, a.z - b.z, a.w - b.w);
}
__global__
void init_vec(int nels, float4* __restrict__ d_vec1)
{
int Idx = threadIdx.x + blockIdx.x * blockDim.x;
int i= Idx*4;
d_vec1[Idx].x = i;
d_vec1[Idx].y = i+1;
d_vec1[Idx].z = i+2;
d_vec1[Idx].w = i+3;
}
__global__
void multi_vec2(int nels,int n_row1,int n_col1,int n_row2,int n_col2,float4* __restrict__ res_vec,
float* __restrict__ d_vec1,float* __restrict__ d_vec2)
{
int Idx = threadIdx.x + blockIdx.x * blockDim.x;
int i= Idx*4;
int r_res,c_res;
r_res=n_row1;
c_res=n_row2*n_col2;
if(i<(r_res*c_res)){
int c= ((int)(i/c_res))*n_row1 + ((int)(i%n_col1))%n_col1;
int j= ((int)(((int)(i%c_res))/n_row2) + (((int)(i%c_res))%n_row2)*n_col2);
res_vec[Idx].x=d_vec1[c]*d_vec2[j];
int c1= ((int)((i+1)/c_res))*n_row1 + ((int)((i+1)%n_col1))%n_col1;
int j1= ((int)(((int)((i+1)%c_res))/n_row2) + (((int)((i+1)%c_res))%n_row2)*n_col2);
res_vec[Idx].y=d_vec1[c1]*d_vec2[j1];
int c2= ((int)((i+2)/c_res))*n_row1 + ((int)((i+2)%n_col1))%n_col1;
int j2= ((int)(((int)((i+2)%c_res))/n_row2) + (((int)((i+2)%c_res))%n_row2)*n_col2);
res_vec[Idx].z=d_vec1[c2]*d_vec2[j2];
int c3= ((int)((i+3)/c_res))*n_row1 + ((int)((i+3)%n_col1))%n_col1;
int j3= ((int)(((int)((i+3)%c_res))/n_row2) + (((int)((i+3)%c_res))%n_row2)*n_col2);
res_vec[Idx].w=d_vec1[c3]*d_vec2[j3];
}
}
__global__
void multi_vec(int nels,int n_row1,int n_col1,int n_row2,int n_col2,float4* __restrict__ res_vec,
float4* __restrict__ d_vec1,float4* __restrict__ d_vec2)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int r_res,c_res;
r_res=n_row1;
c_res=n_row2*n_col2;
if(i<(r_res*c_res)){
int c= ((int)(i/c_res))*n_col1 + ((int)(i%n_col1))%n_col1;
int j= ((int)(((int)(i%c_res))/n_row2) + (((int)(i%c_res))%n_row2)*n_col2);
res_vec[i].x=d_vec1[c].x*d_vec2[j].x;
res_vec[i].y=d_vec1[c].y*d_vec2[j].y;
res_vec[i].z=d_vec1[c].z*d_vec2[j].z;
res_vec[i].w=d_vec1[c].w*d_vec2[j].w;
}
}
__global__
void scalareMatrice( float4* __restrict__ res_vec,float scalar,float4* __restrict__ d_vec)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
res_vec[i].x=d_vec[i].x*scalar;
res_vec[i].y=d_vec[i].y*scalar;
res_vec[i].z=d_vec[i].z*scalar;
res_vec[i].w=d_vec[i].w*scalar;
}
__global__
void reduction_row2(int nels,int l_elem,float4* res_vec, float4* d_vec1)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
const float4 noels = make_float4(0.0, 0.0, 0.0, 0.0);
const int nquarts = nels*4;
const int elem=nels/l_elem;
//int i=idx*l_elem;
int i0 = idx;
int i1 = idx + 1;
int i2 = idx + 2;
int i3 = idx + 3;
__syncthreads();
float4 r0;
if(l_elem >= 4){
r0=d_vec1[i0];
}
else r0= noels;
float4 r1;
if(l_elem >= 8){
r1=d_vec1[i1];
}
else r1= noels;
float4 r2;
if(l_elem >= 12){
r2=d_vec1[i2];
}
else r2= noels;
float4 r3;
if(l_elem >= 16){
r3=d_vec1[i3];
}
else r3= noels;
float4 v = (r0 + r1) + (r2 + r3);
if (idx < nels){
if(idx%4==0)
res_vec[idx].x = (v.x + v.y) + (v.z + v.w);
if(idx%4==1)
res_vec[idx].y = (v.x + v.y) + (v.z + v.w);
if(idx%4==2)
res_vec[idx].z = (v.x + v.y) + (v.z + v.w);
if(idx%4==3)
res_vec[idx].w = (v.x + v.y) + (v.z + v.w);
}
}
__global__
void reduction_row(int nels,int l_elem,float4* res_vec, float4* d_vec1)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
const float4 noels = make_float4(0.0, 0.0, 0.0, 0.0);
const int nquarts = nels*4;
const int elem=nels/l_elem;
int i=idx*(l_elem/4);
int i0 = i;
int i1 = i + 1;
int i2 = i + 2;
int i3 = i + 3;
__syncthreads();
float4 r0;
if(l_elem >= 4){
r0=d_vec1[i0];
}
else r0= noels;
float4 r1;
if(l_elem >= 8){
r1=d_vec1[i1];
}
else r1= noels;
float4 r2;
if(l_elem >= 12){
r2=d_vec1[i2];
}
else r2= noels;
float4 r3;
if(l_elem >= 16){
r3=d_vec1[i3];
}
else r3= noels;
float4 v = (r0 + r1) + (r2 + r3);
if (idx < nels){
int x= idx/4;
if(idx%4==0)
res_vec[x].x = (v.x + v.y) + (v.z + v.w);
if(idx%4==1)
res_vec[x].y = (v.x + v.y) + (v.z + v.w);
if(idx%4==2)
res_vec[x].z = (v.x + v.y) + (v.z + v.w);
if(idx%4==3)
res_vec[x].w = (v.x + v.y) + (v.z + v.w);
}
}
__global__
void transpose(int nrow,int ncols, float4* __restrict__ res_vec, float4* __restrict__ d_vec1)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int c =i%ncols;
int r=i/ncols;
int l_in = r*ncols + c;
int l_out = c * nrow + r;
res_vec[l_out].x = d_vec1[l_in].x;
res_vec[l_out].y = d_vec1[l_in].y;
res_vec[l_out].z = d_vec1[l_in].z;
res_vec[l_out].w = d_vec1[l_in].w;
}
__global__
void vecsum(int nels, float4* __restrict__ res_vec, float4* __restrict__ d_vec1, float4* __restrict__ d_vec2)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;;
res_vec[i] =d_vec1[i]+d_vec2[i];
}
__global__
void vecdif(int nels, float4* __restrict__ res_vec, float4* __restrict__ d_vec1, float4* __restrict__ d_vec2)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
res_vec[i] =d_vec1[i]-d_vec2[i];
}
void stampa(float* matrice,int m){
int i,j;
printf("\n");
for(i=0;i<m;i++){
printf("%f ",matrice[i]);
printf("\n");
}
}
int main(int argc, char *argv[]){
float4* matriceA;
float4* matriceB;
float4* matriceX;
float4* pk;
float4* trasposta;
float4* prodotto;
float4* somma;
float4* res;
float4* den;
float4* res0;
float4* res1;
float4* res2;
float4* red_den;
float* matrice;
float4* scalar;
float4* num;
float4* deno;
float ak;
int nels;
if (argc != 2) {
error("syntax: vecsum nels v");
}
int N = atoi(argv[1]);
if (N < 0) {
error("N < 0");
}
int M=1;
nels=N*N;
size_t memsize = nels*sizeof(float);
cudaError_t err;
err = cudaMalloc((void**)&matriceA, memsize);
cuda_check(err, "alloc matriceA");
err = cudaMalloc((void**)&matriceB, N*M*sizeof(float));
cuda_check(err, "alloc matriceB");
err = cudaMalloc((void**)&matriceX, N*sizeof(float));
cuda_check(err, "alloc matriceX");
err = cudaMallocHost(&matrice, N*N*sizeof(float));
cuda_check(err, "alloc matrice");
err = cudaMallocHost(&num, M*sizeof(float));
cuda_check(err, "alloc matrice");
err = cudaMallocHost(&deno, M*sizeof(float));
cuda_check(err, "alloc matrice");
err = cudaMalloc((void**)&somma,nels*M*sizeof(float));
cuda_check(err, "alloc somma");
err = cudaMalloc((void**)&res,M*N*N*sizeof(float));
cuda_check(err, "alloc res");
err = cudaMalloc((void**)&res0,N*M*N*sizeof(float));
cuda_check(err, "alloc res0");
err = cudaMalloc((void**)&prodotto,M*N*N*sizeof(float));
cuda_check(err, "alloc prodotto");
err = cudaMalloc((void**)&res1,M*N*sizeof(float));
cuda_check(err, "alloc res1");
err = cudaMalloc((void**)&res2,M*N*N*sizeof(float));
cuda_check(err, "alloc res2");
err = cudaMalloc((void**)&pk,M*N*sizeof(float));
cuda_check(err, "alloc pk");
err = cudaMalloc((void**)&trasposta,M*N*sizeof(float));
cuda_check(err, "alloc trasposta ");
err = cudaMalloc((void**)&den,M*N*sizeof(float));
cuda_check(err, "alloc den");
err = cudaMalloc((void**)&red_den,M*N*sizeof(float));
cuda_check(err, "alloc den");
err = cudaMalloc((void**)&scalar,M*N*sizeof(float));
cuda_check(err, "alloc scalar");
cudaEvent_t pre_init, post_init, pre_sum, post_sum,pre_prodotto,post_prodotto,
pre_transpose,post_transpose,pre_scalar_matrice,post_scalar_matrice,pre_vecsum,post_vecsum,
pre_vecdif,post_vecdif;
err = cudaEventCreate(&pre_init, 0);
cuda_check(err, "create pre_init");
err = cudaEventCreate(&pre_prodotto, 0);
cuda_check(err, "create pre_sum");
err = cudaEventCreate(&pre_transpose, 0);
cuda_check(err, "create pre_traspose");
err = cudaEventCreate(&pre_scalar_matrice, 0);
cuda_check(err, "create pre_scalar_matrice");
err = cudaEventCreate(&pre_vecdif, 0);
cuda_check(err, "create pre_vecdif");
err = cudaEventCreate(&pre_vecsum, 0);
cuda_check(err, "create pre_vecsum");
err = cudaEventCreate(&post_init, 0);
cuda_check(err, "create post_init");
err = cudaEventCreate(&post_prodotto, 0);
cuda_check(err, "create post_sum");
err = cudaEventCreate(&post_transpose, 0);
cuda_check(err, "create post_traspose");
err = cudaEventCreate(&post_scalar_matrice, 0);
cuda_check(err, "create post_scalar_matrice");
err = cudaEventCreate(&post_vecdif, 0);
cuda_check(err, "create post_vecdif");
err = cudaEventCreate(&post_vecsum, 0);
cuda_check(err, "create post_vecsum");
const int blockSize = 1024;
int numBlocks = (nels/4 + blockSize - 1)/blockSize;
cudaEventRecord(pre_init);
init_vec<<<blockSize,numBlocks>>>(nels, matriceA);
cudaEventRecord(post_init);
numBlocks = (M*N/4 + blockSize - 1)/blockSize;
init_vec<<<blockSize, numBlocks>>>(M*N, matriceB);
init_vec<<<blockSize, numBlocks>>>(M*N, matriceX);
int i;
//calcolo i parametri della riduzione
int THREAD_LOAD=0;
float n = N;
while (n > 1) {
n/=4;
if(n==1){
THREAD_LOAD=4;
}
}
n = N;
while (n > 1) {
n/=8;
if(n==1){
THREAD_LOAD=8;
}
}
n=N;
while (n > 1) {
n/=12;
if(n==1){
THREAD_LOAD=12;
}
}
n=N;
while (n > 1) {
n/=16;
if(n==1){
THREAD_LOAD=16;
}
}
if(THREAD_LOAD==0){
printf("Errore N deve essere una potenza di 4,8,12,16");
exit(0);
}
int j;
int c=N;
float* temp;
float runtime_red_ms;
int lr=0;
int log=N*N;
while(log>N){
++lr;
log=log/THREAD_LOAD;
}
cudaEvent_t pre_red[lr], post_red[lr];
//inizializzo gli eventi per la riduzione
for(i=0;i<lr;i++){
err = cudaEventCreate(&(pre_red[i]), 0);
cuda_check(err, "create pre_red");
err = cudaEventCreate(&(post_red[i]), 0);
cuda_check(err, "create post_red");
}
for(i=0;i<1;i++){
numBlocks = (nels/4 + blockSize - 1)/blockSize;
cudaEventRecord(pre_prodotto);
multi_vec<<<blockSize, numBlocks>>>(nels*M/4,N,N/4,N/4,M,somma,matriceA,matriceX);
cudaEventRecord(post_prodotto);
c=N*N;
int nels_red=0;
int cont=0;
while(c>N){
c/=THREAD_LOAD;
nels_red+=c;
numBlocks = (c + blockSize - 1)/blockSize;
cudaEventRecord(pre_red[cont]);
reduction_row<<<blockSize, numBlocks>>>(c,THREAD_LOAD,res0,somma);
cudaEventRecord(post_red[cont]);
err = cudaMemcpy(somma, res0, c*sizeof(float4), cudaMemcpyDeviceToDevice);
cuda_check(err, "cpy");
cont++;
}
printf("%d %d\n",lr,nels_red );
numBlocks = ((N*M)/4 + blockSize - 1)/blockSize;
cudaEventRecord(pre_vecdif);
vecdif<<<blockSize, numBlocks>>>(N*M,pk,matriceB,res0);
cudaEventRecord(post_vecdif);
numBlocks = (N*N/4 + blockSize - 1)/blockSize;
cudaEventRecord(pre_transpose);
transpose<<<blockSize, numBlocks>>>(N,M,trasposta,pk);
cudaEventRecord(post_transpose);
numBlocks = ((M*N)/4 + blockSize - 1)/blockSize;
multi_vec<<<blockSize, numBlocks>>>(N*M/4,M,N/4,N/4,M,prodotto,trasposta,pk);
c=N;
while (c>1) {
c/=THREAD_LOAD;
numBlocks = (c + blockSize - 1)/blockSize;
reduction_row<<<blockSize, numBlocks>>>(c,THREAD_LOAD,res1,prodotto);
err = cudaMemcpy(prodotto, res1, c*sizeof(float), cudaMemcpyDeviceToDevice);
cuda_check(err, "cpy");
}
numBlocks = ((M*N*N*M)/4 + blockSize - 1)/blockSize;
multi_vec2<<<blockSize, numBlocks>>>(M*N*N*M/4,M,N,N,N,res,(float*)trasposta,(float*)matriceA);
c=N*N;
while (c>N) {
c/=THREAD_LOAD;
numBlocks = (c + blockSize - 1)/blockSize;
reduction_row<<<blockSize, numBlocks>>>(c,THREAD_LOAD,res2,res);
err = cudaMemcpy(res, res2, c*sizeof(float), cudaMemcpyDeviceToDevice);
cuda_check(err, "cpy");
}
numBlocks = ((N*N)/4 + blockSize - 1)/blockSize;
multi_vec<<<blockSize, numBlocks>>>(N*N/4 ,M,N/4,N/4,M,den,res2,pk);
c=N;
while (c>1) {
c/=THREAD_LOAD;
numBlocks = (c + blockSize - 1)/blockSize;
reduction_row<<<blockSize, numBlocks>>>(c,THREAD_LOAD,red_den,den);
err = cudaMemcpy(den, red_den, c*sizeof(float), cudaMemcpyDeviceToDevice);
cuda_check(err, "cpy");
}
err = cudaMemcpy(num, res1, 1*sizeof(float), cudaMemcpyDeviceToHost);
err = cudaMemcpy(deno, red_den, 1*sizeof(float), cudaMemcpyDeviceToHost);
ak=num[0].x/deno[0].x;
printf("%f\n",ak );
numBlocks = (N/4 + blockSize - 1)/blockSize;
cudaEventRecord(pre_scalar_matrice);
scalareMatrice<<<blockSize, numBlocks>>>(scalar,ak,pk);
cudaEventRecord(post_scalar_matrice);
numBlocks = ((N*M)/4 + blockSize - 1)/blockSize;
cudaEventRecord(pre_vecsum);
vecsum<<<blockSize, numBlocks>>>(N*M,matriceX,matriceX,scalar);
cudaEventRecord(post_vecsum);
err = cudaMemcpy(matrice, matriceX, M*N*sizeof(float), cudaMemcpyDeviceToHost);
cuda_check(err, "create mem");
stampa(matrice,M*N);
float runtime_init_ms, runtime_prodotto_ms, runtime_red_ms,runtime_transpose_ms,runtime_scalar_matrice_ms,
runtime_vecdif_ms,runtime_vecsum_ms,runtime_red_count_ms;
err = cudaEventElapsedTime(&runtime_init_ms, pre_init, post_init);
cuda_check(err, "elapsed time init");
err = cudaEventElapsedTime(&runtime_prodotto_ms, pre_prodotto, post_prodotto);
cuda_check(err, "elapsed time prodotto");
runtime_red_count_ms=0;
for(j=0;j<lr;j++){
err = cudaEventElapsedTime(&runtime_red_ms, pre_red[j], post_red[j]);
cuda_check(err, "elapsed time reduction");
runtime_red_count_ms+=runtime_red_ms;
}
err = cudaEventElapsedTime(&runtime_transpose_ms, pre_transpose, post_transpose);
cuda_check(err, "elapsed time traspose");
err = cudaEventElapsedTime(&runtime_scalar_matrice_ms, pre_scalar_matrice, post_scalar_matrice);
cuda_check(err, "elapsed time scalar_matrice");
err = cudaEventElapsedTime(&runtime_vecdif_ms, pre_vecdif, post_vecdif);
cuda_check(err, "elapsed time vecdif");
err = cudaEventElapsedTime(&runtime_vecsum_ms, pre_vecsum, post_vecsum);
cuda_check(err, "elapsed time vecsum");
printf("init: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_init_ms, nels/runtime_init_ms/1.0e6, memsize/runtime_init_ms/1.0e6);
printf("prodotto: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_prodotto_ms, nels/runtime_prodotto_ms/1.0e6, memsize/runtime_prodotto_ms/1.0e6);
printf("reduction: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_red_count_ms, nels_red/runtime_red_count_ms/1.0e6, (nels_red*sizeof(float))/runtime_red_count_ms/1.0e6);
printf("transpose: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_transpose_ms, N/runtime_transpose_ms/1.0e6, (N*sizeof(float))/runtime_transpose_ms/1.0e6);
printf("scalareMatrice: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_scalar_matrice_ms, N/runtime_scalar_matrice_ms/1.0e6, (N*sizeof(float))/runtime_scalar_matrice_ms/1.0e6);
printf("vecdif: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_vecdif_ms, N/runtime_vecdif_ms/1.0e6, (N*sizeof(float))/runtime_vecdif_ms/1.0e6);
printf("vecsum: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_vecsum_ms, N/runtime_vecsum_ms/1.0e6, (N*sizeof(float))/runtime_vecsum_ms/1.0e6);
}
cudaFree(matriceA);
cudaFreeHost(matrice);
cudaFree(somma);
cudaFree(res);
cudaFree(pk);
cudaFree(trasposta);
cudaFree(prodotto);
cudaFree(den);
cudaFree(res0);
cudaFree(res1);
cudaFree(res2);
cudaFree(red_den);
cudaFree(scalar);
cudaFree(matriceB);
cudaFree(matriceX);
cudaFreeHost(num);
cudaFreeHost(deno);
}
|
14,235 | #include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <signal.h>
#include <unistd.h>
//Spanish Parallel programming Contest 2017. Problem I - heterosolar
//Maximum coincidence with a mask in 2D. CUDA version.
//Schema for In/Out, validation and execution time
void generar(char *m, int t,int sup) {
int i;
for (i = 0; i < t; i++) {
m[i] = (char) (((1. * rand()) / RAND_MAX)*sup)+'a';
}
}
void escribir(char *m, int t) {
int i;
for (i = 0; i < t; i++) {
printf("%c ", m[i]);
}
printf("\n");
}
/*
c
c mseconds - returns elapsed milliseconds since Jan 1st, 1970.
c
*/
long long mseconds(){
struct timeval t;
gettimeofday(&t, NULL);
return t.tv_sec*1000 + t.tv_usec/1000;
}
static void alarm_handler(int sig) {
fprintf(stderr, "Time Limit Exceeded\n");
abort();
}
extern int sec(int,char *,int,char *);
int main(int argc,char *argv[]) {
int N,M,cuantos;
bool correcto=true;
int semilla,upper;
char *A,*B;
long long ti,tf,tt=0;
//FILE *stats_file = fopen("stats", "w");
struct sigaction sact;
sigemptyset(&sact.sa_mask);
sact.sa_flags = 0;
sact.sa_handler = alarm_handler;
sigaction(SIGALRM, &sact, NULL);
alarm(40); /* time limit */
scanf("%d",&cuantos);
for(int i=0;i < cuantos;i++)
{
scanf("%d",&N); // Matrices size
scanf("%d",&M); // mask size
scanf("%d",&semilla); // seed for random generation
scanf("%d",&upper); // upper value for random generation
// Space for the matrix, the values, rows and columns
A = (char *) malloc(sizeof(double)*N*N);
B = (char *) malloc(sizeof(double)*M*M);
srand(semilla);
generar(A,N*N,upper);
generar(B,M*M,upper);
/*#ifdef DEBUG
escribir(A,N*N);
escribir(B,M*M);
#endif*/
ti=mseconds();
printf("%d\n",sec(N,A,M,B));
tf=mseconds();
if(i!=0) tt+=tf-ti;
free(A);
free(B);
}
// fprintf(stats_file, "%Ld\n", tt);
// fclose(stats_file);
printf("%Ld\n", tt);
return 0;
} |
14,236 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
__global__ void colonel(int a, int b, int* r_d) {
*r_d = a + b;
}
int main() {
int a = 2;
int b = 3;
int r = 0, * r_d;
cudaMalloc((void**)&r_d, sizeof(int));
cudaMemcpy(r_d, &r, sizeof(int), cudaMemcpyHostToDevice);
colonel <<<1,1>>> (a, b, r_d);
cudaMemcpy(&r, r_d, sizeof(int), cudaMemcpyDeviceToHost);
printf("resultado=%d\n", r);
cudaFree(r_d);
system("pause");
return -1;
} |
14,237 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iterator>
#include <iostream>
#include <algorithm>
#include <fstream>
#include <cstdlib>
#include <cstdio>
#include <cmath>
#include <stdint.h>
using namespace std;
#define SIZEOFBLOCK 256
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
void printToFile(int* arr, int n) {
ofstream fstream;
fstream.open("../out/output");
for (int i = 0; i < n; i++) {
fstream << arr[i] << endl;
}
}
void printArr(int* arr, int n) {
for (int i = 0; i < n; i++) {
cout << arr[i] << endl;
}
}
void rng(int* arr, int n) {
int seed = 13516154;
srand(seed);
for (long i = 0; i < n; i++) {
arr[i] = (int)rand();
}
}
// parallel radix sort
// get specific bit at index = idx
__global__ void generateFlag(int* flag, int* arr, int n, int idx) {
// parallel
for (int i = 0; i < n; i++) {
if ((arr[i] >> idx) & 1 == 1) {
flag[i] = 1;
}
else {
flag[i] = 0;
}
}
}
// create I-down array
int* generateIDown(int* flag, int n) {
int* iDown = (int*)malloc(n * sizeof(int));
int val = 0;
iDown[0] = val;
for (int i = 1; i < n; i++) {
if (flag[i - 1] == 0) {
val++;
}
iDown[i] = val;
}
return iDown;
}
// create I-up array
int* generateIUp(int* flag, int n) {
int* iUp = (int*)malloc(n * sizeof(int));
int val = n - 1;
iUp[n - 1] = val;
for (int i = n - 2; i >= 0; i--) {
if (flag[i + 1] == 1) {
val--;
}
iUp[i] = val;
}
return iUp;
}
__global__ void generateShouldIndex(int* shouldIndex, int* flag, int* iDown, int* iUp, int n) {
// parallel
for (int i = 0; i < n; i++) {
if (flag[i] == 0) {
shouldIndex[i] = iDown[i];
}
else {
shouldIndex[i] = iUp[i];
}
}
}
void permute(int* arr, int* flag, int* iDown, int* iUp, int n) {
int* shouldArr = (int*)malloc(n * sizeof(int));
int numBlocks = (n + SIZEOFBLOCK - 1) / SIZEOFBLOCK;
int* d_flag;
int* h_shouldIndex = (int*)malloc(n * sizeof(int));
int* d_shouldIndex;
int* d_iDown;
int* d_iUp;
cudaMalloc(&d_shouldIndex, n * sizeof(int));
cudaMalloc(&d_flag, n * sizeof(int));
cudaMalloc(&d_iDown, n * sizeof(int));
cudaMalloc(&d_iUp, n * sizeof(int));
cudaMemcpy(d_flag, flag, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_iDown, iDown, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_iUp, iUp, n * sizeof(int), cudaMemcpyHostToDevice);
generateShouldIndex<<<numBlocks,SIZEOFBLOCK>>>(d_shouldIndex, d_flag, d_iDown, d_iUp, n);
cudaDeviceSynchronize();
cudaMemcpy(h_shouldIndex, d_shouldIndex, n * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_flag);
cudaFree(d_iDown);
cudaFree(d_iUp);
cudaFree(d_shouldIndex);
// parallel
for (int i = 0; i < n; i++) {
shouldArr[h_shouldIndex[i]] = arr[i];
}
// parallel
for (int i = 0; i < n; i++) {
arr[i] = shouldArr[i];
}
}
void split(int* arr, int n, int idx) {
int numBlocks = (n + SIZEOFBLOCK - 1) / SIZEOFBLOCK;
int* h_flag = (int*)malloc(n * sizeof(int));
int* d_flag;
int* d_arr;
cudaMalloc(&d_flag, n * sizeof(int));
cudaMalloc(&d_arr, n * sizeof(int));
cudaMemcpy(d_arr, arr, n * sizeof(int), cudaMemcpyHostToDevice);
generateFlag<<<numBlocks,SIZEOFBLOCK>>>(d_flag, d_arr, n, idx);
cudaDeviceSynchronize();
cudaMemcpy(h_flag, d_flag, n * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_flag);
cudaFree(d_arr);
int* iDown = generateIDown(h_flag, n);
int* iUp = generateIUp(h_flag, n);
permute(arr, h_flag, iDown, iUp, n);
}
void radixSort(int* arr, int n) {
int idx = 0;
for (int i = 0; i < 32; i++) {
split(arr, n, i);
}
}
int main(int argc, char** argv)
{
int n;
if (argc != 2) {
cout << "Wrong input" << endl;
cout << "./radix_sort <N>" << endl;
exit(0);
}
else {
n = atoi(argv[1]);
}
int * arr = (int*)malloc(n * sizeof(int));
rng(arr, n);
clock_t beginTime = clock();
radixSort(arr, n);
clock_t endTime = clock();
double elapsedTime = (double)endTime - beginTime / CLOCKS_PER_SEC;
printToFile(arr, n);
cout << "Parallel Radix Sort Time: " << elapsedTime << endl;
cout << endl;
return 0;
}
|
14,238 | #include <iostream>
#include <cstdlib>
#include <cuda_runtime.h>
#include <cassert>
#include <ctime>
#include <curand.h>
#include <curand_kernel.h>
#include <cmath>
#include <string.h>
#define DOMINIO 10000
#define SUBDOMINIO 1000 // = DOMINIO / BLOCO
#define BLOCOS 10
#define M 10 //Tamanho do padrao
#define N 100 //Tamanho da linha
#define LINHAS 100 //Linhas por bloco = threads por bloco
#define TAMLINHA 100
#define CHECK_ERROR(call) do { \
if( cudaSuccess != call) { \
std::cerr << std::endl << "CUDA ERRO: " << \
cudaGetErrorString(call) << " in file: " << __FILE__ \
<< " in line: " << __LINE__ << std::endl; \
exit(0); \
} } while (0)
//Lendo sequencia e padrao a partir de um arquivo
__host__ void le_sequencia(char *nome_arquivo, char *seq, int tam)
{
FILE *arq;
arq = fopen(nome_arquivo, "r");
fscanf(arq, "%s", seq);
}
//Calcula qual o avanço de acordo com a localizacao do caracter no padrao
__device__ int ord(char *padrao, char c)
{
int i = M - 1;
while(padrao[i] != c && i >= 0)
i--;
if(i >= 0)
return i;
else
return M - 1;
}
__global__ void kernel(char *texto, char *padrao, int *res)
{
int thread = blockDim.x * blockIdx.x + threadIdx.x;
int d[M];
int i = 0, k, j;
int a = 1;
k = SUBDOMINIO * blockDim.x;
//Pre-processamento
for (j = 0; j < M; j++)
d[j] = M;
for (j = 0; j < M - 1; j++)
{
d[ord(padrao, padrao[j])] = M - a;
a++;
}
i = (thread * TAMLINHA) + M;
//C e F sao o inicio e o fim de cada linha, pra evitar que uma thread acesse a linha da outra thread
int c = thread * TAMLINHA;
int f = (thread * TAMLINHA) + TAMLINHA;
while ((i <= f) && ( i > c))
{
k = i - 1;
j = M - 1;
while ((j > 0) && (texto[k] == padrao[j]))
{
k -= 1;
j -= 1;
}
if (j == 0 && (texto[k] == padrao[j]))
res[k] = 1;
a = ord(padrao, texto[i-1]);
i = i + d[a];
}
}
using namespace std;
int main (int argc, char **argv)
{
cudaEvent_t e_Start, e_Stop;
float elapsedTime = 0.0f;
//Criando os vetores - Device
char *d_Texto = NULL, *d_Padrao = NULL;
int *d_resultado = NULL;
//Vetores - Host
char h_Texto[DOMINIO], h_Padrao[M];
int h_resultado[DOMINIO];
le_sequencia("dna.txt", h_Texto, DOMINIO);
le_sequencia("padrao_dna.txt", h_Padrao, M);
memset(h_resultado, 0, DOMINIO * sizeof(int));
unsigned int qtdeDados = DOMINIO * sizeof(char);
//Aloca memória GPU
CHECK_ERROR(cudaMalloc((void**) &d_Texto, DOMINIO * sizeof(char)));
CHECK_ERROR(cudaMalloc((void**) &d_Padrao, M * sizeof(char)));
CHECK_ERROR(cudaMalloc((void**) &d_resultado, DOMINIO * sizeof(int)));
//Copiando o texto da CPU -> GPU
CHECK_ERROR(cudaMemcpy(d_Texto, h_Texto , DOMINIO * sizeof(char), cudaMemcpyHostToDevice));
CHECK_ERROR(cudaMemcpy(d_Padrao, h_Padrao, M * sizeof(char), cudaMemcpyHostToDevice));
CHECK_ERROR(cudaMemcpy(d_resultado, h_resultado, DOMINIO * sizeof(int), cudaMemcpyHostToDevice));
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
cout << "\n\n Algoritmo Boyer Moore Horspool\n\n\n";
//Dados do Problema
cout << "::Dados do Problema::\n" << endl;
cout << "Tamanho do texto: " << DOMINIO << " caracteres" << endl;
cout << "Blocos: " << BLOCOS << endl;
cout << "Threads: " << LINHAS << endl;
cout << "Padrao: " << h_Padrao << endl;
//Reset no device
CHECK_ERROR(cudaDeviceReset());
//Criando eventos
CHECK_ERROR(cudaEventCreate(&e_Start));
CHECK_ERROR(cudaEventCreate(&e_Stop));
//Alocando memória em GPU
CHECK_ERROR(cudaMalloc(reinterpret_cast<void**> (&d_Texto), qtdeDados));
CHECK_ERROR(cudaMalloc(reinterpret_cast<void**> (&d_Padrao), M * sizeof(char)));
CHECK_ERROR(cudaMalloc(reinterpret_cast<void**> (&d_resultado), DOMINIO * sizeof(int)));
CHECK_ERROR(cudaEventRecord(e_Start, cudaEventDefault));
//Lançando o KERNEL
kernel<<<BLOCOS, LINHAS, 1>>>(d_Texto, d_Padrao, d_resultado);
CHECK_ERROR(cudaDeviceSynchronize());
//GPU -> CPU
CHECK_ERROR(cudaMemcpy(h_resultado, d_resultado, DOMINIO * sizeof(int), cudaMemcpyDeviceToHost));
CHECK_ERROR(cudaEventRecord(e_Stop, cudaEventDefault));
CHECK_ERROR(cudaEventSynchronize(e_Stop));
CHECK_ERROR(cudaEventElapsedTime(&elapsedTime, e_Start, e_Stop));
cout << "Tempo de execucao: " << elapsedTime / 1000.0f << " (s) \n\n\n";
//Resultado
for(int k = 0; k < DOMINIO; k++)
if(h_resultado[k] == 1)
cout << "Ocorrencia em: " << k << endl;
CHECK_ERROR(cudaEventDestroy(e_Start));
CHECK_ERROR(cudaEventDestroy(e_Stop));
cout << "\nFIM\n";
return EXIT_SUCCESS;
}
|
14,239 | #include <stdlib.h>
#include "mesh.cuh"
#include "constants.cuh"
//-------------------------------PROTOTIPES---------------------------
void ik_compute(int* edofmat, int *ik, struct mesh *mesh);
void jk_compute(int* edofmat, int *jk, struct mesh *mesh);
//------------------------------BODIES--------------------------------
void jk_ik_compute(int **ik,int **jk, int* edofmat, struct mesh *mesh) {
(*ik) = (int*)malloc(mesh->nelx*mesh->nely * 64 * sizeof(int));
(*jk) = (int*)malloc(mesh->nelx*mesh->nely * 64 * sizeof(int));
ik_compute(edofmat, (*ik), mesh);
jk_compute(edofmat, (*jk), mesh);
}
void ik_compute(int* edofmat, int *ik, struct mesh *mesh) { //iK = reshape(kron(edofMat,ones(8,1))',64*nelx*nely,1);
int e_index, tmp_index = 0, ik_index = 0;
int *tmp ;
tmp = (int*)malloc(mesh->nely*mesh->nelx * sizeof(int));
for (int e_row_index = 0; e_row_index < mesh->nely*mesh->nelx;e_row_index++) {
for (int e_col_index = 0;e_col_index < EDOFMAT_COL;e_col_index++) {
e_index = e_row_index * EDOFMAT_COL + e_col_index;
tmp[e_col_index] = edofmat[e_index];
}
for (int i = 0;i < 8;i++) {
for (tmp_index = 0;tmp_index <EDOFMAT_COL;tmp_index++) {
ik[ik_index] = tmp[tmp_index];
ik_index++;
}
}
}
free(tmp);
}
void jk_compute(int* edofmat, int *jk, struct mesh *mesh) { //jK = reshape(kron(edofMat,ones(1,8))',64*nelx*nely,1);
int e_index, jk_index = 0;
for (int e_row_index = 0; e_row_index < mesh->nely*mesh->nelx; e_row_index++) {
for (int e_col_index = 0; e_col_index < EDOFMAT_COL; e_col_index++) {
e_index = e_row_index * EDOFMAT_COL + e_col_index;
for (int i = 0; i < EDOFMAT_COL;i++) {
jk[jk_index] = edofmat[e_index];
jk_index++;
}
}
}
} |
14,240 |
/*
// Cython function from 'thinc' library
class NumpyOps(Ops):
def backprop_mean_pool(self, float[:, ::1] d_means, int[::1] lengths):
cdef int B = lengths.shape[0]
cdef int O = d_means.shape[1]
cdef int T = 0
for length in lengths[:B]:
T += length
cdef Pool mem = Pool()
dX = <float*>mem.alloc(T * O, sizeof(float))
cpu_backprop_mean_pool(dX,
&d_means[0,0], &lengths[0], B, T, O)
return cpu_floats_ptr2array(dX, (T, O))
cdef void cpu_backprop_mean_pool(float* dX__to,
const float* d_means__bo, const int* lengths__b,
int B, int T, int O) nogil:
cdef float scale = 0.
for length in lengths__b[:B]:
scale = 1./ length
for _ in range(length):
VecVec.add_i(dX__to,
d_means__bo, scale, O)
dX__to += O
d_means__bo += O
*/
void __global__ backprop_mean_pool(float* means, float *words, int *lengths,int *prevLengths, int numdocs, int dims)
{
int bid = blockIdx.x;
__shared__ float local_means[256];
for(int step = bid; step < numdocs; step += gridDim.x )
{
int wordsInDoc = lengths[step];
int blockStarts = prevLengths[step]*dims;
local_means[threadIdx.x] = means[step*dims+threadIdx.x];
for (int i = blockStarts + threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims)
words[i] = local_means[threadIdx.x]/wordsInDoc;
}
}
|
14,241 | /* GEMM is a General Matrix Multiply - a subroutine in the Basic Linear Algebra Subprograms library*/
/* Includes, system */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
/* Includes, cuda */
//#include <cuda_runtime.h>
//#include <cublas_v2.h>
//#include <helper_cuda.h>
#define BLOCK_SIZE 16
/* ======================================================= */
/* CUDA implementation of dGEMM without using shared memory
/* ======================================================= */
__global__ void cuda_dgemm(int n,
double alpha,
const double *A,
const double *B,
double beta,
double *C) {
int row = blockDim.y * blockIdx.y + threadIdx.y;
int col = blockDim.x * blockIdx.x + threadIdx.x;
printf("row = %d col = %d n= %d\n", row, col, n);
if (row > n || col > n) return;
double prod = 0;
for (int k = 0; k < n; ++k){
prod += A[row * n + k] * B[k * n + col];
}
printf("alpha = %f, prod = %f\n", alpha, prod);
C[row*n + col] = alpha * prod + beta * C[row*n+col];
}
/* ==== */
/* Main */
/* ==== */
int main(int argc, char **argv)
{
//cublasStatus_t status;
double *h_A, *h_B, *h_C, *h_C_blas, *h_C_simple, *h_C_0;
double *d_A = 0;
double *d_B = 0;
double *d_C = 0;
double alpha = 1.0f;
double beta = 0.0f;
int n2, N;
int i;
double error_norm1, error_norm2;
double ref_norm;
double diff1, diff2;
//cublasHandle_t handle;
struct timeval tv1, tv2;
/* get the size of the matrix from the command line */
if (argc <2 ) N= 275;
else N = atoi(argv[1]);
n2 = N * N;
/* Allocate host memory for the matrices */
h_A = (double *)malloc(n2 * sizeof(double) );
h_B = (double *)malloc(n2 * sizeof(double) );
h_C = (double *)malloc(n2 * sizeof(double) );
h_C_blas = (double *)malloc(n2 * sizeof(double) );
h_C_simple = (double *)malloc(n2 * sizeof(double) );
h_C_0 = (double *)malloc(n2 * sizeof(double) );
/* Fill the matrices with test data */
for (i = 0; i < n2; i++){
h_A[i] = i; //rand() / (double)RAND_MAX;
h_B[i] = i; //rand() / (double)RAND_MAX;
h_C[i] = i; //rand() / (double)RAND_MAX;
h_C_blas[i] = h_C[i];
h_C_simple[i] = h_C[i];
h_C_0[i] = h_C[i];
printf("%f %f \n",h_A[i], h_B[i]);
}
/* ============ CUDA implementation without shared memory =============== */
printf("\tTesting CUDA dgemm function without using Shared memory.\n");
gettimeofday(&tv1, NULL);
/* Allocate device memory for the matrices */
cudaMalloc((void **)&d_A, n2 * sizeof(d_A[0]));
cudaMalloc((void **)&d_B, n2 * sizeof(d_B[0]));
cudaMalloc((void **)&d_C, n2 * sizeof(d_C[0]));
/* copy A and B matrices to gpu */
cudaMemcpy(d_A, h_A,n2*sizeof(d_A[0]), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B,n2*sizeof(d_B[0]), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, h_C_0,n2*sizeof(d_C[0]), cudaMemcpyHostToDevice);
/* Kernel */
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(N/dimBlock.x+1, N/dimBlock.y+1);
printf(" beta=%f\n",beta);
cuda_dgemm<<<dimGrid, dimBlock>>>(N, alpha, d_A, d_B, beta, d_C);
/* wait until all threads finish their job */
cudaDeviceSynchronize();
/* Read the result back */
cudaMemcpy(h_C, d_C,n2*sizeof(d_C[0]), cudaMemcpyDeviceToHost);
gettimeofday(&tv2, NULL);
printf("\t\tdone...\n");
printf("\t\tExecution time (in millisec): %.2f\n",
(double)(tv2.tv_usec-tv1.tv_usec)/1000 +
(double)(tv2.tv_sec -tv1.tv_sec )*1000);
printf("\n\tChecking results.\n");
/* Check result against reference */
error_norm1 = 0;
ref_norm = 0;
for (i = 0; i < n2; ++i){
if (i<10)printf("%f\n",h_C[i]);
}
/* free cuda memory */
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
/* Memory clean up */
free(h_A);
free(h_B);
free(h_C);
free(h_C_simple);
free(h_C_blas);
return(0);
}
|
14,242 | #include <pthread.h>
#include <cstdlib>
#include <iostream>
#include <vector>
#define COMPRESSION_BATCH_SIZE 32
using namespace std;
struct ThreadArg {
float *original_data;
long num_elements;
int thread_num;
float *compressed_data;
unsigned int *mask;
};
int n_threads = 8;
long layer_sizes[] = {56l * 56 * 96, 28l * 28 * 96, 27l * 27 * 256,
13l * 13 * 256, 13l * 12 * 384, 13l * 12 * 384,
13l * 13 * 256, 6l * 6 * 256};
int num_layers = 8;
void *compressThread(void *arg) {
ThreadArg *thread_arg = (ThreadArg *)arg;
float *original_data = thread_arg->original_data;
float *compressed_data = thread_arg->compressed_data;
unsigned int *mask = thread_arg->mask;
int thread_num = thread_arg->thread_num;
long num_elements = thread_arg->num_elements;
long start = thread_num * num_elements / n_threads;
long n_compression_batches =
num_elements / n_threads / COMPRESSION_BATCH_SIZE;
for (long i = 0; i < n_compression_batches; i++) {
long mask_pos =
(i * COMPRESSION_BATCH_SIZE + start) / COMPRESSION_BATCH_SIZE;
mask[mask_pos] = 0;
for (long j = i * COMPRESSION_BATCH_SIZE + start;
j < (i + 1) * COMPRESSION_BATCH_SIZE + start; j++) {
if (original_data[j] > 0) {
mask[mask_pos] = (mask[mask_pos] << 1) + 1;
compressed_data[j] = original_data[j];
} else {
mask[mask_pos] = (mask[mask_pos] << 1);
}
}
}
return NULL;
}
int main() {
int batch_size = 128;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
pthread_t threads[n_threads];
for (int i = 0; i < num_layers; i++) {
layer_sizes[i] *= batch_size;
}
vector<float> compression_times;
float total_milli = 0.0;
for (int j = 0; j < num_layers; j++) {
long num_elements = layer_sizes[j];
float *original_data, *compressed_data;
unsigned int *mask;
cudaMallocHost((void **)&original_data, num_elements * sizeof(float));
// generate data
for (long i = 0; i < num_elements; i++) {
if (rand() % 10 < 3)
original_data[i] = 0;
else
original_data[i] = 1;
}
if (num_elements % n_threads != 0) {
cout << "bad number of threads" << endl;
exit(0);
}
if ((num_elements / n_threads) % COMPRESSION_BATCH_SIZE != 0) {
cout << "bad num_elements or n_threads" << endl;
exit(0);
}
cout << "starting " << j << endl;
cudaEventRecord(start);
cudaMallocHost((void **)&compressed_data, num_elements * sizeof(float));
cudaMallocHost((void **)&mask, num_elements / COMPRESSION_BATCH_SIZE *
sizeof(unsigned int));
ThreadArg thread_arg[n_threads];
for (int i = 0; i < n_threads; i++) {
thread_arg[i].original_data = original_data;
thread_arg[i].compressed_data = compressed_data;
thread_arg[i].mask = mask;
thread_arg[i].thread_num = i;
thread_arg[i].num_elements = num_elements;
}
for (int i = 0; i < n_threads; i++) {
pthread_create(&threads[i], NULL, &compressThread,
(void *)&thread_arg[i]);
}
for (int i = 0; i < n_threads; i++) {
pthread_join(threads[i], NULL);
}
// for (int i = 0; i < 27 * 27 * 256 * 128; i++);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milli;
cudaEventElapsedTime(&milli, start, stop);
compression_times.push_back(milli);
total_milli += milli;
// cout << milli << endl;
cudaFreeHost(original_data);
cudaFreeHost(compressed_data);
cudaFreeHost(mask);
}
for (int i = 0; i < num_layers; i++) {
cout << compression_times[i] << endl;
}
cout << total_milli << endl;
} |
14,243 | /* CUDA utility Library */
/* written by Viktor K. Decyk, UCLA */
#include <stdlib.h>
#include <stdio.h>
#include "cuda.h"
int nblock_size = 64;
int ngrid_size = 1;
int maxgsx = 65535;
int mmcc = 0;
static int devid;
static cudaError_t crc;
#define MAXSTREAMS 4
static cudaStream_t streams[MAXSTREAMS] = {NULL,NULL,NULL,NULL};
/* Prototypes for Fortran function called by C */
extern "C" void getfcptr_(unsigned long *carrayref, float *carray,
int *nx);
extern "C" void getf2cptr_(unsigned long *carrayref, float *carray,
int *nx, int *ny);
extern "C" void getc2cptr_(unsigned long *carrayref, float2 *carray,
int *nx, int *ny);
__global__ void emptyKernel() {}
/*--------------------------------------------------------------------*/
extern "C" void gpu_setgbsize(int nblock) {
/* set blocksize */
nblock_size = nblock;
return;
}
/*--------------------------------------------------------------------*/
extern "C" int getmmcc() {
/* get major and minor computer capability */
return mmcc;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fallocate(float **g_f, int nsize, int *irc) {
/* allocate global float memory on GPU, return pointer to C */
void *gptr;
crc = cudaMalloc(&gptr,sizeof(float)*nsize);
if (crc) {
printf("cudaMalloc float Error=%d:%s,l=%d\n",crc,
cudaGetErrorString(crc),nsize);
*irc = 1;
}
*g_f = (float *)gptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_iallocate(int **g_i, int nsize, int *irc) {
/* allocate global integer memory on GPU, return pointer to C */
void *gptr;
crc = cudaMalloc(&gptr,sizeof(int)*nsize);
if (crc) {
printf("cudaMalloc int Error=%d:%s,l=%d\n",crc,
cudaGetErrorString(crc),nsize);
*irc = 1;
}
*g_i = (int *)gptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_callocate(float2 **g_c, int nsize, int *irc) {
/* allocate global float2 memory on GPU, return pointer to C */
void *gptr;
crc = cudaMalloc(&gptr,sizeof(float2)*nsize);
if (crc) {
printf("cudaMalloc float2 Error=%d:%s,l=%d\n",crc,
cudaGetErrorString(crc),nsize);
*irc = 1;
}
*g_c = (float2 *)gptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_deallocate(void *g_d, int *irc) {
/* deallocate global memory on GPU */
crc = cudaFree(g_d);
if (crc) {
printf("cudaFree Error=%d:%s\n",crc,cudaGetErrorString(crc));
*irc = 1;
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void hpl_fallocate(float **h_f, int nsize, int *irc) {
/* allocate page-locked float memory on host, return pointer to C */
void *hptr = NULL;
crc = cudaMallocHost(&hptr,sizeof(float)*nsize);
if (crc) {
printf("cudaMallocHost float Error=%d:%s,l=%d\n",crc,
cudaGetErrorString(crc),nsize);
*irc = 1;
}
*h_f = (float *)hptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void hpl_callocate(float2 **h_c, int nsize, int *irc) {
/* allocate page-locked float2 memory on host, return pointer to C */
void *hptr = NULL;
crc = cudaMallocHost(&hptr,sizeof(float2)*nsize);
if (crc) {
printf("cudaMallocHost float2 Error=%d:%s,l=%d\n",crc,
cudaGetErrorString(crc),nsize);
*irc = 1;
}
*h_c = (float2 *)hptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void hpl_deallocate(void *h_d, int *irc) {
/* deallocate page-locked on host */
crc = cudaFreeHost(h_d);
if (crc) {
printf("cudaFreeHost Error=%d:%s\n",crc,cudaGetErrorString(crc));
*irc = 1;
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fcopyin(float *f, float *g_f, int nsize) {
/* copy float array from host memory to global GPU memory */
crc = cudaMemcpy((void *)g_f,f,sizeof(float)*nsize,
cudaMemcpyHostToDevice);
if (crc) {
printf("cudaMemcpyHostToDevice float Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fcopyout(float *f, float *g_f, int nsize) {
/* copy float array from global GPU memory to host memory */
crc = cudaMemcpy(f,(void *)g_f,sizeof(float)*nsize,
cudaMemcpyDeviceToHost);
if (crc) {
printf("cudaMemcpyDeviceToHost float Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_icopyin(int *f, int *g_f, int nsize) {
/* copy int array from host memory to global GPU memory */
crc = cudaMemcpy((void *)g_f,f,sizeof(int)*nsize,
cudaMemcpyHostToDevice);
if (crc) {
printf("cudaMemcpyHostToDevice int Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_icopyout(int *f, int *g_f, int nsize) {
/* copy int array from global GPU memory to host memory */
crc = cudaMemcpy(f,(void *)g_f,sizeof(int)*nsize,
cudaMemcpyDeviceToHost);
if (crc) {
printf("cudaMemcpyDeviceToHost int Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_ccopyin(float2 *f, float2 *g_f, int nsize) {
/* copy float2 array from host memory to global GPU memory */
crc = cudaMemcpy((void *)g_f,f,sizeof(float2)*nsize,
cudaMemcpyHostToDevice);
if (crc) {
printf("cudaMemcpyHostToDevice float2 Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_ccopyout(float2 *f, float2 *g_f, int nsize) {
/* copy float2 array from global GPU memory to host memory */
crc = cudaMemcpy(f,(void *)g_f,sizeof(float2)*nsize,
cudaMemcpyDeviceToHost);
if (crc) {
printf("cudaMemcpyDeviceToHost float2 Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_initstream(int nstream) {
/* Create Stream for requested identifier nstream */
/* nstream should be between 1 and MAXSTREAMS inclusive */
if ((nstream < 1) || (nstream > MAXSTREAMS)) {
printf("gpu_initstream: nstream out of bounds = %d\n",nstream);
exit(1);
}
if (streams[nstream-1] != NULL) {
printf("gpu_initstream: nstream already used = %d\n",nstream);
exit(1);
}
crc = cudaStreamCreate(&streams[nstream-1]);
if (crc) {
printf("cudaStreamCreate Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_delstream(int nstream) {
/* Destroy Stream for requested identifier nstream */
/* nstream should be between 1 and MAXSTREAMS inclusive */
if ((nstream < 1) || (nstream > MAXSTREAMS)) {
printf("gpu_delstream: nstream out of bounds = %d\n",nstream);
}
if (streams[nstream-1] == NULL) {
printf("gpu_delstream: nstream not allocated = %d\n",nstream);
}
crc = cudaStreamDestroy(streams[nstream-1]);
if (crc) {
printf("cudaStreamDestroy Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_waitstream(int nstream) {
/* Synchronize Stream for requested identifier nstream */
/* nstream should be between 0 and MAXSTREAMS inclusive */
cudaStream_t stream = NULL;
if ((nstream >= 0) || (nstream <= MAXSTREAMS)) {
if (nstream > 0) stream = streams[nstream-1];
}
else {
printf("gpu_waitstream: nstream undefined = %d\n",nstream);
exit(1);
}
crc = cudaStreamSynchronize(stream);
if (crc) {
printf("cudaStreamSynchronize Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_cascopyin(float2 *f, float2 *g_f, int noff,
int nsize, int nstream) {
/* copy float2 array segment from host memory to global GPU memory */
/* asynchronous copy */
float2 *cptr;
cudaStream_t stream = NULL;
cptr = &g_f[noff];
if ((nstream >= 0) || (nstream <= MAXSTREAMS)) {
if (nstream > 0) stream = streams[nstream-1];
}
else {
printf("gpu_cascopyin: nstream undefined = %d\n",nstream);
exit(1);
}
crc = cudaMemcpyAsync((void *)cptr,f,sizeof(float2)*nsize,
cudaMemcpyHostToDevice,stream);
if (crc) {
printf("Async cudaMemcpyHostToDevice float2 Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_cascopyout(float2 *f, float2 *g_f, int noff,
int nsize, int nstream) {
/* copy float2 array segment from global GPU memory to host memory */
/* asynchronous copy */
float2 *cptr;
cudaStream_t stream = NULL;
cptr = &g_f[noff];
if ((nstream >= 0) || (nstream <= MAXSTREAMS)) {
if (nstream > 0) stream = streams[nstream-1];
}
else {
printf("gpu_cascopyout: nstream undefined = %d\n",nstream);
exit(1);
}
crc = cudaMemcpyAsync(f,(void *)cptr,sizeof(float2)*nsize,
cudaMemcpyDeviceToHost,stream);
if (crc) {
printf("Async cudaMemcpyDeviceToHost float2 Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_zfmem(float *g_f, int nsize) {
/* initialize float array in global GPU memory to zero */
crc = cudaMemset((void *)g_f,0,sizeof(float)*nsize);
if (crc) {
printf("cudaMemset Error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_set_cache_size(int nscache) {
/* request preferred cache size, requires CUDA 3.2 or higher */
/* nscache = (0,1,2) = (no,small,big) cache size */
cudaFuncCache cpref;
if ((nscache < 0) || (nscache > 2))
return;
if (nscache==0)
cpref = cudaFuncCachePreferNone;
else if (nscache==1)
cpref = cudaFuncCachePreferShared;
else if (nscache==2)
cpref = cudaFuncCachePreferL1;
crc = cudaThreadSetCacheConfig(cpref);
/* crc = cudaDeviceSetCacheConfig(cpref); */
if (crc) {
printf("cudaThreadSetCacheConfig error=%d:%s\n",crc,
cudaGetErrorString(crc));
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void emptykernel() {
int ngx, ngy;
ngx = nblock_size < 32768 ? nblock_size : 32768;
ngy = (ngrid_size - 1)/ngx + 1;
dim3 dimBlock(nblock_size,1);
dim3 dimGrid(ngx,ngy);
crc = cudaGetLastError();
emptyKernel<<<dimGrid,dimBlock>>>();
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("emptyKernel error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void init_cu(int dev, int *irc) {
/* initialize CUDA with device dev or selects best GPU available */
/* searches throughs devices, selects the device with the most compute */
/* units, and saves the device id devid */
/* if dev is a valid device, it is used, otherwise the GPU with the */
/* most multi-processors is selected */
/* error code is modified only if there is an error */
int maxcpus = 0, jm = -1;
int j, ndevs, maxunits;
unsigned long msize;
double z;
struct cudaDeviceProp prop;
/* returns number of device */
crc = cudaGetDeviceCount(&ndevs);
if (crc) {
printf("cudaGetDeviceCount Error=%i:%s\n",crc,
cudaGetErrorString(crc));
*irc = 1;
return;
}
/* get information about devices */
for (j = 0; j < ndevs; j++) {
crc = cudaGetDeviceProperties(&prop,j);
if (crc) {
printf("cudaGetDeviceProperties Error=%i:%s\n",crc,
cudaGetErrorString(crc));
prop.name[0] = 0;
}
maxunits = prop.multiProcessorCount;
if (dev <= 0) {
printf("j=%i:CUDA_DEVICE_NAME=%s,CUDA_MULTIPROCESSOR_COUNT=%i\n",
j,prop.name,maxunits);
msize = prop.totalGlobalMem;
z = ((double) msize)/1073741824.0;
mmcc = 10*prop.major + prop.minor;
printf(" CUDA_GLOBAL_MEM_SIZE=%lu(%f GB),Capability=%d\n",
msize,(float) z,mmcc);
if (maxunits > maxcpus) {
maxcpus = maxunits;
jm = j;
}
}
}
devid = jm;
if (dev >= 0)
devid = dev % ndevs;
printf("using device j=%i\n",devid);
/* get properties for this device */
crc = cudaGetDeviceProperties(&prop,devid);
maxgsx = prop.maxGridSize[0];
mmcc = 10*prop.major + prop.minor;
/* set device */
crc = cudaSetDevice(devid);
if (crc) {
printf("cudaSetDevice Error=%i:%s\n",crc,
cudaGetErrorString(crc));
*irc = 1;
return;
}
/* run empty kernel */
emptykernel();
return;
}
extern "C" void end_cu() {
/* terminate CUDA */
crc = cudaThreadExit();
if (crc) {
printf("cudaThreadExit Error=%d:%s\n",crc,cudaGetErrorString(crc));
}
return;
}
/* Interfaces to Fortran */
/*--------------------------------------------------------------------*/
extern "C" void gpu_setgbsize_(int *nblock) {
gpu_setgbsize(*nblock);
return;
}
/*--------------------------------------------------------------------*/
extern "C" int getmmcc_() {
/* get major and minor computer capability */
return getmmcc();
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fallocate_(unsigned long *gp_f, int *nsize,
int *irc) {
/* allocate global float memory on GPU, return pointer to Fortran */
float *fptr;
gpu_fallocate(&fptr,*nsize,irc);
*gp_f = (long )fptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_iallocate_(unsigned long *gp_i, int *nsize,
int *irc) {
/* allocate global integer memory on GPU, return pointer to Fortran */
int *iptr;
gpu_iallocate(&iptr,*nsize,irc);
*gp_i = (long )iptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_callocate_(unsigned long *gp_f, int *nsize,
int *irc) {
/* allocate global float2 memory on GPU, return pointer */
/* to Fortran */
float2 *fptr;
gpu_callocate(&fptr,*nsize,irc);
*gp_f = (long )fptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_deallocate_(unsigned long *gp_d, int *irc) {
/* deallocate global memory on GPU, return pointer to Fortran */
void *d;
d = (void *)*gp_d;
gpu_deallocate(d,irc);
*gp_d = 0;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void hpl_f1allocate_(unsigned long *hp_f, int *nx,
int *irc) {
/* allocate page-locked 1d real memory on host, assign */
/* data pointer to Fortran pointer object hp_f */
/* This procedure needs an interface in Fortran90 */
/* interface */
/* subroutine hpl_f1allocate(hp_f,nx,irc) */
/* implicit none */
/* integer :: nx, irc */
/* real, dimension(:), pointer :: hp_f */
/* end subroutine */
/* end interface */
int nsize;
float *fptr;
nsize = *nx;
/* allocate data on host */
hpl_fallocate(&fptr,nsize,irc);
/* set reference to C data in real Fortran pointer object */
getfcptr_(hp_f,fptr,nx);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void hpl_f2allocate_(unsigned long *hp_f, int *nx, int *ny,
int *irc) {
/* allocate page-locked 2d real memory on host, assign */
/* data pointer to Fortran pointer object hp_f */
/* This procedure needs an interface in Fortran90 */
/* interface */
/* subroutine hpl_f2allocate(hp_f,nx,ny,irc) */
/* implicit none */
/* integer :: nx, ny, irc */
/* real, dimension(:,:), pointer :: hp_f */
/* end subroutine */
/* end interface */
int nsize;
float *fptr;
nsize = (*nx)*(*ny);
/* allocate data on host */
hpl_fallocate(&fptr,nsize,irc);
/* set reference to C data in real Fortran pointer object */
getf2cptr_(hp_f,fptr,nx,ny);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void hpl_c2allocate_(unsigned long *hp_f, int *nx, int *ny,
int *irc) {
/* allocate page-locked 2d complex memory on host, assign */
/* data pointer to Fortran pointer object hp_f */
/* This procedure needs an interface in Fortran90 */
/* interface */
/* subroutine hpl_c2allocate(hp_f,nx,ny,irc) */
/* implicit none */
/* integer :: nx, ny, irc */
/* complex, dimension(:,:), pointer :: hp_f */
/* end subroutine */
/* end interface */
int nsize;
float2 *cptr;
nsize = (*nx)*(*ny);
/* allocate data on host */
hpl_callocate(&cptr,nsize,irc);
/* set reference to C data in complex Fortran pointer object */
getc2cptr_(hp_f,cptr,nx,ny);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void hpl_deallocate_(void *h_d, int *irc) {
/* deallocate page-locked memory on host */
/* pointer in Fortran should also be nullified */
hpl_deallocate(h_d,irc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fcopyin_(float *f, unsigned long *gp_f,
int *nsize) {
/* copy float array from main memory to global GPU memory */
float *g_f;
g_f = (float *)*gp_f;
gpu_fcopyin(f,g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fcopyout_(float *f, unsigned long *gp_f,
int *nsize) {
/* copy float array from global GPU memory to main memory */
float *g_f;
g_f = (float *)*gp_f;
gpu_fcopyout(f,g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_icopyin_(int *f, unsigned long *gp_f, int *nsize) {
/* copy int array from main memory to global GPU memory */
int *g_f;
g_f = (int *)*gp_f;
gpu_icopyin(f,g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_icopyout_(int *f, unsigned long *gp_f, int *nsize) {
/* copy int array from global GPU memory to main memory */
int *g_f;
g_f = (int *)*gp_f;
gpu_icopyout(f,g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_ccopyin_(float2 *f, unsigned long *gp_f,
int *nsize) {
/* copy float2 array from main memory to global GPU memory */
float2 *g_f;
g_f = (float2 *)*gp_f;
gpu_ccopyin(f,g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_ccopyout_(float2 *f, unsigned long *gp_f,
int *nsize) {
/* copy float2 array from global GPU memory to main memory */
float2 *g_f;
g_f = (float2 *)*gp_f;
gpu_ccopyout(f,g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_initstream_(int *nstream) {
gpu_initstream(*nstream);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_delstream_(int *nstream) {
gpu_delstream(*nstream);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_waitstream_(int *nstream) {
gpu_waitstream(*nstream);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_cascopyin_(float2 *f, unsigned long *gp_f,
int *noff, int *nsize, int *nstream) {
/* copy float2 array segment from main memory to global GPU memory */
/* asynchronous copy */
float2 *g_f;
g_f = (float2 *)*gp_f;
gpu_cascopyin(f,g_f,*noff,*nsize,*nstream);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_cascopyout_(float2 *f, unsigned long *gp_f,
int *noff, int *nsize, int *nstream) {
/* copy float2 array segment from global GPU memory to main memory */
/* asynchronous copy */
float2 *g_f;
g_f = (float2 *)*gp_f;
gpu_cascopyout(f,g_f,*noff,*nsize,*nstream);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_zfmem_(unsigned long *gp_f, int *nsize) {
float *g_f;
g_f = (float *)*gp_f;
gpu_zfmem(g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_set_cache_size_(int *nscache) {
gpu_set_cache_size(*nscache);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void init_cu_(int *dev, int *irc) {
init_cu(*dev,irc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void end_cu_() {
end_cu();
return;
}
|
14,244 | // printf.cu
/*
* Simple script to show how to print on a GPU.
* NOTE: I have no idea why, but this simply does not work unless
* you initialize an array on the GPU, then free it afterwards.
* Hence the cudaMalloc() and cudaFree() calls that do nothing.
*
* My assumption is that it has something to do with initializing
* an "active" state on the device. cudaDeviceSynchronize(),
* cudaDeviceReset(), and cudaSetDevice() do not suffice, however.
*/
#include <stdio.h>
__global__ void hello() {
printf("Hello from Block %d, Thread %d\n", blockIdx.x, threadIdx.x);
}
int main() {
float *d_arr;
cudaMalloc(&d_arr, 25*sizeof(float));
hello<<<5,5>>>();
cudaFree(d_arr);
return 0;
}
|
14,245 | typedef double BYTE;
|
14,246 | /*----------
* Authors:
* Saúl Contreras (Suulcoder)
* Michele Benvenuto
* Luis Urbina
* ----------
* Universidad del Valle
* Programación de Microprocesadores
* Semestre 4, 2019
* ----------
*/
#include <iostream> //cout, cin, cerr
#include <fstream> //file processing
#include <cstdlib> //exit function
#include <string.h>
using namespace std;
#define N 87395 //Number of data in the csv
__global__ void getK(float *a,float *c )
{
float A = 1000000000.0f;
float e = 2.71828182846f;
float Ea = 45000.0f;
float R = 8.314f;
int myID = threadIdx.x + blockDim.x * blockIdx.x;
if (myID < N)
{
c[myID] = (A*float(pow(e,(-Ea)/(R*(a[myID]+273.15f)))));
}
}
int main(int argc, char** argv)
{
cudaStream_t stream1; // stream1 and stream2 instantiation
cudaStream_t stream2;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
float *a1, *c1; // stream 1 mem ptrs
float *dev_a1, *dev_c1; // stream 1 mem ptrs
//stream 1
cudaMalloc( (void**)&dev_a1, N * sizeof(float)); //CudaMalloc
cudaMalloc( (void**)&dev_c1, N * sizeof(float));
cudaHostAlloc( (void**)&a1, N * sizeof(int), cudaHostAllocDefault); //CudaHostAlloc allowing the device to get access to mem.
cudaHostAlloc( (void**)&c1, N * sizeof(int), cudaHostAllocDefault);
ifstream read("data.csv",ios::in);
if(!read){
cerr<<"Fail to read data.csv"<<endl;
exit(EXIT_FAILURE);
}
int count = 0; //Write all data
string row;
while(read>>row){
if(count!=0){
std::string delimiter = ",";
if(count%2==0){ //Using just the temperature
std::string token = row.substr(row.find(delimiter)+1);
double temp = ::atof(token.c_str());
float temperature = float(temp);
a1[count/2] = temperature;
}
}
count++;
}
for(int i=0;i<N;i+= N*2) { // loop over data in chunks
// interweave stream 1 and steam 2
if(i%2==0){
cudaMemcpyAsync(dev_a1,a1,N*sizeof(int),cudaMemcpyHostToDevice,stream1); //Copy N*Size(int) bytes from a1 to dev_a1, host to device
getK<<<(int)ceil(N/1024)+1,1024,0,stream1>>>(dev_a1,dev_c1); //Using stream 1
cudaMemcpyAsync(c1,dev_c1,N*sizeof(int),cudaMemcpyDeviceToHost,stream1);
}
else{
cudaMemcpyAsync(dev_a1,a1,N*sizeof(int),cudaMemcpyHostToDevice,stream2); //Copy N*Size(int) bytes from a1 to dev_a1, host to device
getK<<<(int)ceil(N/1024)+1,1024,0,stream1>>>(dev_a1,dev_c1); //Using streasm 2
cudaMemcpyAsync(c1,dev_c1,N*sizeof(int),cudaMemcpyDeviceToHost,stream2);
}
}
for (int k=0;k<N-1;k++){ //Printing all data
cout<<"Dato: "<<k<<" | Value of K: "<<c1[k]<<"\n";
}
cout<<"\n\n\n------------------------------Values of K by period:------------------------------"; //Print data and write it in the file
cout<<"\n\n All values returned are based on the Cyclopentadiene Dimerization";
cout<<"\n\n\n H2 + I2 --> 2HI \n\n\n";
std::ofstream myfile;
myfile.open ("outData.csv");
myfile<<"Hour,People,Velocity of reaction\n";
int medPerPeriod = 12000; //300 Data taken per second 12000 in 1 period
float sum = 0;
int period = 0;
for (int k=0;k<N-1;k++){
sum+=c1[k];
if(k%medPerPeriod==0&&k!=0){
period++;
int people = 0;
std::string hour = " ";
if(period==1){
hour = "07:00 - 07:50";
people = 38;
}
else if(period==2){
hour = "07:50 - 08:40";
people = 37;
}
else if(period==3){
hour = "08:40 - 09:30";
people = 36;
}
else if(period==4){
hour = "09:30 - 10:15";
people = 36;
}
else if(period==5){
hour = "10:15 - 10:40";
people = 3;
}
else if(period==6){
hour = "10:40 - 11:30";
people = 34;
}
else if(period==7){
hour = "11:30 - 12:15";
people = 35;
}
double average = double(sum)/double(medPerPeriod);
double velocity = (average*0.05*0.05);
cout<<"\tHour: "<<hour<<"\tPeople: "<<people<<"\tVelocity of reaction: "<< velocity<<"s\n";
myfile<<hour<<","<<people<<","<< velocity<<"s\n";
sum=0;
}
}
cout<<"-----------------------------------------------------------------------------------\n\n\n";
myfile.close();
cudaStreamDestroy(stream1); //Destruir cudaStreamDestroy(stream1)
return 0;
} |
14,247 | #include <chrono>
#include <fstream>
#include <stdio.h>
#include <string>
#include <iostream>
#include <math.h>
#include <random>
typedef struct {
int a;
int b;
} MatricesStruct;
typedef struct {
int add;
int sub;
int mul;
int div;
} MatrixResultsStruct;
__constant__ int ScaleFactor = 5;
__device__
void d_add_matrices( const MatricesStruct& matrices, MatrixResultsStruct& results )
{
results.add = matrices.a + matrices.b;
}
__device__
void d_sub_matrices( const MatricesStruct& matrices, MatrixResultsStruct& results )
{
results.sub = matrices.a - matrices.b;
}
__device__
void d_mult_matrices( const MatricesStruct& matrices, MatrixResultsStruct& results )
{
results.mul = matrices.a * ScaleFactor;
}
__device__
void d_div_matrices( const MatricesStruct& matrices, MatrixResultsStruct& results)
{
results.div = matrices.a / ScaleFactor;
}
__global__
void matrix_ops(const MatricesStruct* const matrices, MatrixResultsStruct* const results)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
d_add_matrices( matrices[index], results[index] );
d_sub_matrices( matrices[index], results[index] );
d_mult_matrices( matrices[index], results[index] );
d_div_matrices( matrices[index], results[index] );
}
__host__
int get_duration_ns( const std::chrono::time_point<std::chrono::high_resolution_clock>& start,
const std::chrono::time_point<std::chrono::high_resolution_clock>& end )
{
return int(std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count());
}
__host__
std::chrono::time_point<std::chrono::high_resolution_clock> get_clock_time()
{
return std::chrono::high_resolution_clock::now();
}
// Helper function for writing the add math results to file.
__host__
void write_results(
const std::string& outputName, const int& totalThreads, const int& blockSize,
const MatrixResultsStruct* const results, const float& proc_time)
{
int width = sqrt(totalThreads);
std::ofstream add_stream(outputName + "_add");
if (add_stream.is_open())
{
add_stream << "Matrix Add: " << totalThreads << " and Block Size: " << blockSize << "\n";
add_stream << "matops took " << proc_time << "ns\n[ ";
for( int i = 0; i < totalThreads; i++ )
{
add_stream << results[i].add << ",";
if ( (i + 1) % width == 0 )
add_stream << "\n";
}
add_stream << "]\n";
}
else{
printf("FILE NOT OPEN?\n");
}
add_stream.close();
std::ofstream sub_stream(outputName + "_sub");
if (sub_stream.is_open())
{
sub_stream << "Matrix SUB: " << totalThreads << " and Block Size: " << blockSize << "\n";
sub_stream << "matops took " << proc_time << "ns\n [ ";
for( int i = 0; i < totalThreads; i++ )
{
sub_stream << results[i].sub << ",";
if ( (i + 1) % width == 0 )
sub_stream << "\n";
}
sub_stream << "]\n";
}
else{
printf("FILE NOT OPEN?\n");
}
sub_stream.close();
std::ofstream mul_stream(outputName + "_mull");
if (mul_stream.is_open())
{
mul_stream << "Matrix MUL: " << totalThreads << " and Block Size: " << blockSize << "\n";
mul_stream << "matops took " << proc_time << "ns\n[ ";
for( int i = 0; i < totalThreads; i++ )
{
mul_stream << results[i].mul << ",";
if ( (i + 1) % width == 0 )
mul_stream << "\n";
}
mul_stream << "]\n";
}
else{
printf("FILE NOT OPEN?\n");
}
mul_stream.close();
std::ofstream div_stream(outputName + "_div");
if (div_stream.is_open())
{
div_stream << "Matrix DIV: " << totalThreads << " and Block Size: " << blockSize << "\n";
div_stream << "matops took " << proc_time << "ns\n[ ";
for( int i = 0; i < totalThreads; i++ )
{
div_stream << results[i].div << ",";
if ( (i + 1) % width == 0 )
div_stream << "\n";
}
div_stream << "]\n";
}
else{
printf("FILE NOT OPEN?\n");
}
div_stream.close();
}
__host__
void generate_data( const int& totalThreads, MatricesStruct* const host_data )
{
std::default_random_engine generator;
std::uniform_int_distribution<int> distribution(0,3);
for( int i = 0; i < totalThreads; i++ )
{
host_data[i].a = i * (distribution( generator ) + 1);
host_data[i].b = distribution( generator );
}
}
// Helper function for executing the matrix functionality via pageable memory
// calls matrix_ops and copies the results to an interleaved
// struct
__host__
void run_kernal(
const int& blockSize, const int& totalThreads, const int& numBlocks, const std::string& outputName,
MatricesStruct* const data, MatrixResultsStruct*& results, MatricesStruct* d_data, MatrixResultsStruct* d_results)
{
auto data_size = totalThreads * sizeof(MatricesStruct);
auto results_size = totalThreads * sizeof(MatrixResultsStruct);
cudaStream_t stream;
cudaStreamCreate(&stream);
cudaEvent_t start, stop;
float proc_time;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord(start);
cudaMemcpyAsync(d_data, data, data_size, cudaMemcpyHostToDevice, stream);
matrix_ops<<<numBlocks, blockSize, 1, stream>>>(d_data, d_results);
cudaMemcpyAsync(results, d_results, results_size, cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&proc_time, start, stop);
cudaStreamDestroy(stream);
printf("matrix_ops took %f ms\n", proc_time);
if ( !outputName.empty() )
{
write_results( outputName, totalThreads, totalThreads / numBlocks, results, proc_time );
}
}
// Helper function for initilizing the data used by the math functions will output results of pageable and
// pinned memory allocation.
__host__
void init_data(const int& totalThreads, MatricesStruct*& host_data, MatrixResultsStruct*& host_results,
MatricesStruct*& d_data, MatrixResultsStruct*& d_results)
{
auto data_size = totalThreads * sizeof(MatricesStruct);
auto results_size = totalThreads * sizeof(MatrixResultsStruct);
cudaMalloc((void**)&d_data, data_size);
cudaMalloc((void**)&d_results, results_size);
cudaHostAlloc((void **)&host_data, data_size, cudaHostAllocDefault);
cudaHostAlloc((void **)&host_results, results_size, cudaHostAllocDefault);
generate_data( totalThreads, host_data );
}
// Helper function for cleaning up allocated memory used by math functionality.
__host__
void cleanup_data(
MatricesStruct*& data, MatricesStruct*& d_data,
MatrixResultsStruct*& results, MatrixResultsStruct*& d_results)
{
cudaFree(d_data);
cudaFree(d_results);
cudaFreeHost(data);
cudaFreeHost(results);
}
__host__
void execute_matops(
const int& blockSize, const int& totalThreads, const int& numBlocks,
const std::string& outputName)
{
MatricesStruct* data = nullptr;
MatricesStruct* d_data = nullptr;
MatrixResultsStruct* results = nullptr;
MatrixResultsStruct* d_results = nullptr;
init_data(totalThreads, data, results, d_data, d_results);
run_kernal(blockSize, totalThreads, numBlocks, outputName, data, results,
d_data, d_results);
cleanup_data(data, d_data, results, d_results);
}
int main(int argc, char** argv)
{
// read command line arguments
int matDimSize = 512;
int totalThreads = 512 * 512;
int blockSize = 256;
std::string outputName;
if (argc >= 2)
{
matDimSize = atoi(argv[1]);
totalThreads = matDimSize * matDimSize;
}
if (argc >= 3)
{
blockSize = atoi(argv[2]);
}
if (argc >= 4)
{
outputName = argv[3];
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0)
{
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
printf("%d x %d martix (%d elements)\n", matDimSize, matDimSize, totalThreads);
printf("Executing with %d total threads %d blocks @ %d threads\n", totalThreads, numBlocks, blockSize);
execute_matops( blockSize, totalThreads, numBlocks, outputName);
}
|
14,248 | #include <iostream>
#include <cassert>
#include <cstdlib>
__global__ void add_1(int n, float *x, float *y, float *ans){
int tid = threadIdx.x;
int stride = blockDim.x;
for(int i = tid; i < n; i += stride){
ans[i] = x[i] + y[i];
}
}
__global__ void add_2(int n, float *x, float *y, float *ans){
int tid = threadIdx.x;
int stride = blockDim.x;
for(int i = tid; i < n; i += stride){
ans[i] = x[i] + y[i];
__syncthreads();
}
}
__global__ void add_3(int n, float *x, float *y, float *ans){
int tid = threadIdx.x;
int stride = blockDim.x;
int rowstart = ((long long) tid * n) / stride;
int rowend = ((long long) (tid + 1) * n) / stride;
for(int i = rowstart; i < rowend; i++){
ans[i] = x[i] + y[i];
}
}
__global__ void add_4(int n, float *x, float *y, float *ans){
int i = blockIdx.x * blockDim.x + threadIdx.x;
ans[i] = x[i] + y[i];
}
int main(int argc, char** argv){
assert(argc == 2);
int algo_type = std::stoi(argv[1]);
int n = (1 << 25);
std::cout << "memory usage: " << 3 * n * sizeof(float) << " bytes" << std::endl;
float *x, *y, *ans;
x = (float *) malloc(n * sizeof(float));
assert(x != NULL);
y = (float *) malloc(n * sizeof(float));
assert(y != NULL);
ans = (float *) malloc(n * sizeof(float));
assert(ans != NULL);
for(int i = 0; i < n; i++){
x[i] = 1.0;
y[i] = 2.0;
}
float *d_x, *d_y, *d_ans;
cudaMalloc(&d_x, n * sizeof(float));
assert(d_x != NULL);
cudaMalloc(&d_y, n * sizeof(float));
assert(d_y != NULL);
cudaMalloc(&d_ans, n * sizeof(float));
assert(d_ans != NULL);
cudaMemcpy(d_x, x, n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, n * sizeof(float), cudaMemcpyHostToDevice);
int grid_size = (n + 256 - 1) / 256;
if(algo_type == 1){
add_1<<<1, 256>>>(n, d_x, d_y, d_ans);
} else if(algo_type == 2){
add_2<<<1, 256>>>(n, d_x, d_y, d_ans);
} else if(algo_type == 3){
add_3<<<1, 256>>>(n, d_x, d_y, d_ans);
} else if(algo_type == 4){
add_4<<<grid_size, 256>>>(n, d_x, d_y, d_ans);
}
cudaMemcpy(ans, d_ans, n * sizeof(float), cudaMemcpyDeviceToHost);
float err = 0.0;
for(int i = 0; i < n; i++){
err += abs(ans[i] - 3.0);
}
std::cout << err << '\n';
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_ans);
free(x);
free(y);
free(ans);
} |
14,249 | /*
* Copyright 2016 Alexander Terenin
*
* Licensed under the Apache License, Version 2.0 (the "License")
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* /
*/
/*
* Function : cuda_betaSq
* Purpose : squares beta
* Argument n : size of sampler
* Argument *beta : pointer to beta
* Argument *betaSq : pointer to betaSq
* Output : mutates betaSq and stores result in its place
*/
extern "C"
__global__ void cuda_betaSq(int n, float *beta, float *betaSq) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < n) {
betaSq[i] = beta[i] * beta[i];
}
} |
14,250 | /*
* Source: https://stackoverflow.com/questions/24704710/cuda-ptxas-options-v-shared-memory-and-cudafuncattributes-sharedsizebytes-do
* Three issues get clarified here:
* 1. use of cudaFuncAttributes(&attr,kernel), refer the documentation at
* http://developer.download.nvidia.com/compute/cuda/3_0/toolkit/docs/online/group__CUDART__HIGHLEVEL_g0b85e087210b47056cb6fc03a0e264e8.html
* This function obtains the attributes of the function (kernel), kernel must be a global function
* 2. Second is an issue on the sidelines about using printf statement inside kernel
* https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#formatted-output
* printf() can be used in devices with compute capability 2.x or higher, must use -arch=sm_20 and cudaDeviceSynchronize() after the kernel
* call inside the host function.
* 3. usage of -Xptxas=-v for reading the memory usage
$ nvcc -Wno-deprecated-gpu-targets -Xptxas=-v -arch=sm_20 -o 061218c 061218c.cu
ptxas info : 22 bytes gmem, 16 bytes cmem[14]
ptxas info : Compiling entry function '_Z8mykernelv' for 'sm_20'
ptxas info : Function properties for _Z8mykernelv
8 bytes stack frame, 0 bytes spill stores, 0 bytes spill loads
ptxas info : Used 17 registers, 128 bytes smem, 32 bytes cmem[0]
*/
#include <stdio.h>
__global__ void mykernel(){
__shared__ int data[32];
printf("Hello\n");
for (int i = 0; i < 32; i++)
printf("data[%d] = %d\n", i, data[i]);
}
int main(){
cudaFuncAttributes attr;
mykernel<<<1,1>>>();
cudaDeviceSynchronize();
cudaFuncGetAttributes(&attr, mykernel);
printf("shared mem usage: %zu bytes\n", attr.sharedSizeBytes);
return 0;
}
|
14,251 | #include "includes.h"
__global__ void mult_complex_eff_kernal(float* data, const float* src_data, const int nx, const int nxy, const int size)
{
int idx = threadIdx.z*nxy + threadIdx.y*nx + threadIdx.x;
data[idx] *= src_data[idx];
data[size-idx-1] *= src_data[size-idx-1];
} |
14,252 | //MANDELBULB WITH CUDA
//Alexander Kuczala 2015
//akuczala@ucsd.edu
//Input: base name of output files
//Outputs binary .rgb file of pixels encoded as 24 bit colors #RRGGBB
//can be converted to image file with program such as ImageMagick (`convert' in linux)
#include <iostream>
#include <fstream>
#include <sstream>
#include <iomanip>
#include <cmath>
#include <string>
#include <sys/time.h>
#include "Vec.cu"
#include "colorFunctions.cu"
//number of runs
const int runs = 1;
//number of frames/run
int frames = 10;
//fractal parameters
const bool isJulia = false; //if true draws a julia set instead of mandelbulb
const float power = 8; //power of recursion function
//rendering parameters
const float specularity = 0.5; //intensity of specularity highlights
const float specularExponent = 3; //larger values -> smaller highlights
const float fogDistance = 4; //distance at which fog completely occludes objects
const float lightIntensity = 1;
const float cameraAngle = 1.0; //divergence of camera rays
//fractal calculation parameters
//default values: bailout = 6, maxIterations = 8
const float bailout = 12; //value of r to terminate at, lower -> smoother, less detailed
const int maxIterations = 32; //more iterations, more accurate fractal
//ray stepping parameters
const float epsilonScale = 0.1; //default value = 1
const float minEpsilon = 1E-7; //default = 1E-7
const int stepLimit = 5000; //number of allowed marching steps (default 100)
const float rayLengthLimit = 4; //maximum ray length allowed
//(should be smaller or equal to than fog distance)
int* h_pixels; //host image array
//clip values to certain range
__device__ float clamp(float value, float lower, float upper)
{
if(value < lower)
return lower;
if(value > upper)
return upper;
return value;
}
//Ray class performs operations of single Ray/processor
//All ray functions are performed on GPU
class Ray{
public:
Vec dir; //Ray direction
//Camera parameters
Vec cameraPos; //camera position
Vec cameraTarget; //camera target
Vec cameraDir; //calculated direction of camera
Vec cameraUp; //direction of camera's y axis
Vec cameraRight; //direction of camera's x axis
//Light position
Vec lightPos;
//const bool shadowsOn = false;
//constant vector c for julia set
Vec julia;
//coloring variables
int backgroundColor;
float breakRadius; //keep track of breakout radius value for coloring
float minDist; //keep track of minimum distant of orbits in recursion
float eps; //intersection distance threshold
float pixelScale; //ray stepping size
int stepsTaken; //number of ray steps taken in last iteration
int width, height; //image dimensions
//Constructor
__device__ Ray(int i, int j, Vec cameraPos, Vec cameraTarget, int width, int height)
{
//set width and height
this->width = width;
this->height = height;
pixelScale = 1.0/width; //scale of distance between rays
//set camera parameters
Vec cameraUp(0,0,1); //set direction of camera y axis
this->cameraPos = cameraPos.copy();
this->cameraTarget = cameraTarget.copy();
this->cameraUp = cameraUp.copy();
//set light position
Vec lightPos(-2,-2,2);
this->lightPos = lightPos;
initCamera(); //set up orthogonal basis for camera
dir = rayDir(i,j);
//set julia constant
Vec julia(0.8,-0.9,-0.4);
//set background color
backgroundColor = color(100,100,100);
}
//calculate ray direction from pixel address
__device__ Vec rayDir(int i, int j)
{
//scale the camera frame vectors to create the cone of rays
float xscale = 1.0*(i-width/2.0)/width*cameraAngle;
float yscale = -1.0*(j-height/2.0)/height*cameraAngle;
Vec out = cameraDir.add(cameraRight.times(xscale)).add(cameraUp.times(yscale));
return out.unit();
}
//Single ray marching step with intital vector zed0
__device__ float traceStep(Vec zed0)
{
Vec c(0,0,0); //initialize c vector
//c is either a constant (for julia) or the starting point (mandelbulb)
if(isJulia)
c = julia;
else
c = zed0.copy();
Vec zed = zed0.copy();
//convert initial zed to spherical coordinates
float r = zed.mag();
float th = atan2(zed.y,zed.x);
float ph = asin(zed.z/r);
float dr = 1; //initial value of r derivative
minDist = -1; //initialize minimum distance
float powR, powRsin;
int n=0;
//zed iterations
for(n=0; n<= maxIterations; n++)
{
//compute scalar derivative approximation
powR = pow(r,power - 1);
dr = dr*powR*power + 1;
//iterate zed (zed = zed^p + c)
powR = pow(r,power);
powRsin = sin(power*ph);
zed.x = powR*powRsin*cos(power*th);
zed.y = powR*powRsin*sin(power*th);
zed.z = powR*cos(power*ph);
zed.addTo(c);
r = zed.mag(); //new radius
if(minDist < 0 ^ r < minDist) minDist = r; //update min distance
if(r > bailout) break; //stop iterating if r exceeds bailout value
//calculate new angles
th = atan2(zed.y, zed.x);
ph = acos(zed.z / r);
}
//memoize for coloring
breakRadius = r;
//return distance estimation value
return 0.5*r*log(r)/dr;
}
//approximate normal vector to fractal surface
__device__ Vec getNormal(Vec zed)
{
eps = eps/2.0;
//calculate small finite differences around zed
Vec zedx1 = zed.add(Vec(eps,0,0));
Vec zedx2 = zed.sub(Vec(eps,0,0));
Vec zedy1 = zed.add(Vec(0,eps,0));
Vec zedy2 = zed.sub(Vec(0,eps,0));
Vec zedz1 = zed.add(Vec(0,0,eps));
Vec zedz2 = zed.sub(Vec(0,0,eps));
//calculate normal to surface
float dx = traceStep(zedx1) - traceStep(zedx2);
float dy = traceStep(zedy1) - traceStep(zedy2);
float dz = traceStep(zedz1) - traceStep(zedz2);
Vec normal = Vec(dx,dy,dz);
normal = normal.unit();
return normal;
}
//ray stepping algorithm
__device__ float trace(Vec p0, Vec dir)
{
Vec zed0 = p0.copy(); //initial point
float rayLength = 0;
eps = minEpsilon; //initial intersection threshold
int maxSteps = int(1.0*stepLimit/epsilonScale);
float distance = 0;
int i;
for(i = 0;i<maxSteps;i++)
{
distance = traceStep(zed0); //calculate maximum distance to fractal
//step ray forward
zed0 = zed0.add(dir.times(epsilonScale*distance));
rayLength += epsilonScale*distance;
//if ray length exceeds limit, assume no intersection and stop
if(rayLength > rayLengthLimit)
return -1;
//stop if within intersection threshold
if(distance < eps) break;
//update intersection threshold
eps = max(minEpsilon,pixelScale*rayLength);
//println("eps= " + eps);
}
stepsTaken = i; //record steps taken
//assume intersection if number of steps is exhausted
//this can cause artifacts if the stepLimit is too small
return rayLength;
}
//various routines for coloring
__device__ int stepColoring()
{
int scale = 20;
float r = 1.0*(stepsTaken%scale)/scale;
float g = 1.0*((stepsTaken+scale/3)%scale)/scale;
float b = 1.0*((stepsTaken+2*scale/3)%scale)/scale;
r = abs(r-0.5)*2;
g = abs(g-0.5)*2;
b = abs(b-0.5)*2;
return color(int(r*255),int(g*255),int(b*255));
}
__device__ int minOrbitColoring()
{
float scale = 0.4;
float r,g,b;
float spam;
r = modf((minDist)/scale,&spam);
g = modf((minDist+scale/3)/scale,&spam);
b = modf((minDist+2*scale/3)/scale,&spam);
r = abs(r-0.5)*2;
g = abs(g-0.5)*2;
b = abs(b-0.5)*2;
return color(int(r*255),int(g*255),int(b*255));
}
//returns fractal color
__device__ int getCol()
{
return minOrbitColoring();
}
//simulate ambient light by shading
//based on number of steps taken and minimum orbit distance
__device__ float ambientOcclusion()
{
//const float aoStrength = 1;
const float emphasis = 0.58; //default
int maxSteps = int(stepLimit/ epsilonScale);
float ao = 1.0 - minDist*minDist;
if(ao < 0)
ao = 0;
if(ao > 1)
ao = 1;
ao = 1.0 - ao;
ao = ao*(1-1.0*stepsTaken/((float)(maxSteps))*2*emphasis);
return clamp(ao,0.0,1.0);
}
//apply fog based on distance to point
__device__ float fog(float distance)
{
return clamp(distance/fogDistance,0.0,1.0);
}
__device__ int rayTraceFractal()
{
//Vec dir = rayDir(i,j);
Vec pos = cameraPos;
float distance = trace(pos,dir); //find distance with ray marching
if(distance < 0) //negative distance means no intersection
return backgroundColor;
//intersection point of ray with surface
Vec intersect = pos.add(dir.times(distance));
//normal to surface
Vec normal = getNormal(intersect);
//shading for surface
//calculate unit vector pointing from light to object
Vec lightDir = intersect.sub(lightPos);
lightDir = lightDir.unit();
//calculate cos of angle between light ray and normal to sphere and use for shade
float normDotLight = -normal.dot(lightDir);
float shade = 0;
if(normDotLight < 0) //if dot product is - then no shading
shade = 0;
else
shade = normDotLight*lightIntensity;
shade = abs(shade);
//phong specularity-----
//reflected light vector
Vec reflect = lightDir.times(-1).sub(normal.times(2*normDotLight));
float reflDotRay = -reflect.dot(dir);
float specular = 0;
if(reflDotRay < 0)
specular = specularity*pow(abs(reflDotRay),specularExponent);
//base color is lambertian shading
int out = colorShade(getCol(),shade);
//apply specularity
out = addColors(out,colorShade(color(255,255,255),specular));
//apply ambient occulsion
out = colorShade(out,ambientOcclusion());
//check for shadows.
//if(shadowsOn)
//{
//create shadow detecting ray pointing from object to light
//place ray's origin slightly above intersection point
//push above surface by normal*eps
//Vec shadowPos = intersect.copy().add(normal.times(eps));
//Vec shadowDir = lightDir.times(-1);
//float dist = trace(pos,dir); //compute distance to fractal along ray to light
//if ray intersects a surface between object and light, cast shadow
//if(dist > 0 && dist*dist < intersect.sub(lightPos).squared())
//{
// return 0;
//}
//}
//add fog
out = averageColors(backgroundColor,out,fog(distance));
return out;
}
//calculate frame vectors for camera
__device__ void initCamera()
{
//points from camera to target
cameraDir = cameraTarget.sub(cameraPos).unit();
//use Graham Schmidt to make up vector orthogonal to dir
cameraUp = cameraUp.sub(cameraDir.times(cameraUp.dot(cameraDir)));
cameraUp = cameraUp.unit();
//calculate right pointing camera frame vector
cameraRight = cameraDir.cross(cameraUp).unit();
}
};
//end ray object----------------------------
//Kernel. One ray per thread.
__global__ void draw(int* pixels,int* width, int* height, Vec* cameraPos, Vec* cameraTarget)
{
int index = blockDim.x * blockIdx.x + threadIdx.x;
int n = (*width) * (*height);
if(index < n)
{
int i = index%(*width);
int j = index/(*width);
Ray ray(i,j,*cameraPos,*cameraTarget,*width,*height);
pixels[index] = ray.rayTraceFractal();
}
//
}
//write pixel color values as binary #RRGGBB to output file
void write(string outFileName,int width, int height)
{
ofstream outFile;
//open file for writing in binary
outFile.open(outFileName.c_str(),ios::out | ios::binary);
if(!outFile.is_open())
{
cout << "couldn't write to " << outFileName << endl;
return;
}
cout << "writing to " << outFileName << endl;
for(int i=0;i<width*height;i++)
{
int p = h_pixels[i];
//put the bits in the right order (Read from left to right)
unsigned int unp = (unsigned int)(color(getb(p),getg(p),getr(p)));
//outFile << h_pixels[i];
outFile.write((char*) &unp,3); //colors are 3 bytes long
}
outFile.close();
}
int main(int argc, char* argv[])
{
//timer parameters
struct timeval t1, t2;
struct timezone tz;
//time data arrays
double time[runs];
float kernelTime[runs];
cudaError_t err;
//run loop. can vary image size etc
for(int run = 0; run< runs; run++)
{
//start timer------
gettimeofday(&t1, &tz);
//int h_width = (run+1)*100; //variable width
int h_width = 1200; //constant width
cout << "width = " << h_width << endl;
//image size on host and device
int h_height = h_width;
int* d_width;int* d_height;
int n = h_width*h_height; //number of pixels
size_t size = sizeof(int)*n;
size_t vecSize = sizeof(Vec);
//allocate pixel array on host
h_pixels = (int*)malloc(size);
int* d_pixels; //pixel array on device
//Camera position and target
Vec h_cameraPos;
Vec h_cameraTarget;
Vec* d_cameraPos;
Vec* d_cameraTarget;
//allocate memory on device
//allocate image size on device
err = cudaMalloc((void **) &d_width, sizeof(int));
if(err != cudaSuccess) cout << "can't allocate memory for width on device" << endl;
err = cudaMalloc((void **) &d_height, sizeof(int));
if(err != cudaSuccess) cout << "can't allocate memory for height on device" << endl;
//allocate pixel array on device
err = cudaMalloc((void **) &d_pixels, size);
if(err != cudaSuccess) cout << "can't allocate memory for pixel array on device" << endl;
//allocate camera position and target
err = cudaMalloc((void **) &d_cameraPos, vecSize);
if(err != cudaSuccess) cout << "can't allocate memory for cameraPos on device" << endl;
err = cudaMalloc((void **) &d_cameraTarget, vecSize);
if(err != cudaSuccess) cout << "can't allocate memory for cameraTarget on device" << endl;
//run animation
//set initial and final values of camera target and position
Vec cameraTargetInit(0,0,0);
Vec cameraTargetFinal(0.6025440273509881, -0.7549067847481121, 0.5049324975811623);
Vec cameraPosInit(1,-2,1.5);
Vec cameraPosFinal = cameraTargetFinal.copy();
float dt = 1.0/frames;
float t = 0;
for(int frame = 0;frame < frames; frame++)
{
cout << "Frame " << frame << "/" << frames << endl;
//move towards fractal at exponentially decaying rate
float distFrac = exp(-8*t);
h_cameraPos = cameraPosInit.times(distFrac).add(cameraPosFinal.times(1-distFrac));
h_cameraTarget = cameraTargetInit.times(distFrac).add(cameraTargetFinal.times(1-distFrac));
//copy image size to device
err = cudaMemcpy(d_width, &h_width, sizeof(int), cudaMemcpyHostToDevice);
if(err != cudaSuccess) cout << "can't copy width to device" << endl;
err =cudaMemcpy(d_height, &h_height, sizeof(int), cudaMemcpyHostToDevice);
if(err != cudaSuccess) cout << "can't copy height to device" << endl;
//copy camera data to device
err = cudaMemcpy(d_cameraPos, &h_cameraPos, vecSize, cudaMemcpyHostToDevice);
if(err != cudaSuccess) cout << "can't copy cameraPos to device" << endl;
err =cudaMemcpy(d_cameraTarget, &h_cameraTarget, vecSize, cudaMemcpyHostToDevice);
if(err != cudaSuccess) cout << "can't copy cameraTarget to device" << endl;
//start CUDA timer
cudaEvent_t start, stop;
cudaEventCreate(&start); cudaEventCreate(&stop);
cudaEventRecord(start,0); //start kernel timer
//----launch kernel-----
int threadsPerBlock = 256;
int blocksPerGrid = (n + threadsPerBlock -1)/threadsPerBlock;
cout << "launching " << blocksPerGrid << " blocks of ";
cout << threadsPerBlock << " threads" << endl;
draw<<<blocksPerGrid, threadsPerBlock>>>(d_pixels,d_width,d_height, d_cameraPos, d_cameraTarget);
//stop CUDA timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&kernelTime[run],start,stop);
cudaEventDestroy(start);cudaEventDestroy(stop);
cout << "kernel time: " << kernelTime[run] << endl;
//check for kernel error
err = cudaGetLastError();
if(err != cudaSuccess) cout << "kernel failed: " << cudaGetErrorString(err) << endl;
//copy results to hosts
err = cudaMemcpy(h_pixels, d_pixels, size,cudaMemcpyDeviceToHost);
if(err != cudaSuccess) cout << "can't copy to host" << endl;
//if program has output filename, output to file
if(argc == 2)
{
stringstream ss;
ss << argv[1] << "_" << setfill('0') << setw(3) << frame << ".rgb" ;
//ss << argv[1] << "_" << setfill('0') << setw(4) << h_width << ".rgb" ;
string fileName;
ss >> fileName;
write(fileName,h_width,h_height);
}
//increment t
t += dt;
}
//Deallocate memory
cudaFree(d_pixels);
cudaFree(d_cameraTarget); cudaFree(d_cameraPos);
//stop timer---
gettimeofday(&t2, &tz);
time[run] = (t2.tv_sec-t1.tv_sec) + 1e-6*(t2.tv_usec-t1.tv_usec);
cout << "Run time: " << time[run] << endl;
}
//reset GPU
err = cudaDeviceReset();
if(err != cudaSuccess) cout << "Couldn't reset GPU" << endl;
//print runtimes
cout << "Run times" << endl;
for(int i=0;i<runs;i++)
cout << time[i] << endl;
cout << "Kernel times" << endl;
for(int i=0;i<runs;i++)
cout << kernelTime[i] << endl;
return 0;
} |
14,253 | /**
* Descrição: Multiplicação de matrizes em paralelo usando GPU
* Entrada: Dimensão das matrizes e dos blocos de threads
* Saída: Tempos de execução
*/
#include <stdio.h>
#include <sys/time.h>
#include <iostream>
using namespace std;
/**
* O agumento deve ser double
*/
#define GET_TIME(now) { \
struct timespec time; \
clock_gettime(CLOCK_MONOTONIC_RAW, &time); \
now = time.tv_sec + time.tv_nsec/1000000000.0; \
}
/**
* Para checar erros em chamadas Cuda
*/
#define CUDA_SAFE_CALL(call) { \
cudaError_t err = call; \
if(err != cudaSuccess) { \
fprintf(stderr,"Erro no arquivo '%s', linha %i: %s.\n",__FILE__, __LINE__,cudaGetErrorString(err)); \
exit(EXIT_FAILURE); } \
}
/**
* Funcao para execucao sequencial
*/
void multMatSeq(double *a, double *b, double *c, int mA, int nAmB, int nB) {
int i, j, k;
double soma;
for(i=0; i<mA; i++)
for(j=0; j<nB; j++) {
soma = 0;
for(k=0; k<nAmB; k++) {
soma += a[i*nAmB+k] * b[k*nB+j];
}
c[i*nB+j] = soma;
}
}
/**
* Kernel para execucao paralela em CUDA
*/
__global__ void multMatPar(double *a, double *b, double *c, int mA, int nAmB, int nB) {
// Coordenadas globais da thread
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
// Coordenadas locais da thread
int i_bloco = threadIdx.x;
int j_bloco = threadIdx.y;
extern __shared__ double mat_sub[];
// Memória compartilhada para a submatriz de A
double* Asub = (double*) mat_sub;
// Memória compartilhada para a submatriz de B
double* Bsub= (double*) &Asub[blockDim.x*blockDim.y];
double valor = 0;
for(int passo=0; passo<nAmB; passo+=blockDim.y) {
if (i < mA && (passo+j_bloco) < nAmB)
Asub[i_bloco*blockDim.y+j_bloco] = a[i*nAmB+passo+j_bloco];
else
Asub[i_bloco*blockDim.y+j_bloco] = 0;
if ((passo+i_bloco) < nAmB && j < nB)
Bsub[i_bloco*blockDim.y+j_bloco] = b[(passo+i_bloco)*nB+j];
else
Bsub[i_bloco*blockDim.y+j_bloco] = 0;
__syncthreads();
if (i < mA && j < nB)
for (int k = 0; k < blockDim.y; k++) {
valor += Asub[i_bloco*blockDim.y+k] * Bsub[k*blockDim.y+j_bloco];
}
__syncthreads();
}
if (i < mA && j < nB)
c[i*nB+j] = valor;
}
/**
* Função que aloca espaco para uma matriz e preenche seus valores
* Entrada: matriz de entrada, dimensoes da matriz
* Saída: retorna 1 se a matriz foi preenchida com sucesso e 0 caso contrario
*/
int preencheMatriz(double **mat, int linhas, int colunas) {
int i, j;
//aloca espaco de memoria para a matriz
*mat = (double*) malloc(sizeof(double) * linhas * colunas);
if (mat == NULL) return 0;
//preenche o vetor
for (i=0; i<linhas; i++) {
for (j=0; j<colunas; j++) {
*((*mat) + (i*colunas+j)) = 1.5;
}
}
return 1;
}
void checkResults(double *mat1, double *mat2, int m, int n){
for (int i=0; i<m; i++) {
for (int j=0; j<n; j++) {
if (fabs(mat1[i*n+j] - mat2[i*n+j]) > 1e-5) {
cerr << "Resultado incorreto em " << i << " x " << j << " -> " << mat1[i*n+j] << " " << mat2[i*n+j] << endl;
exit(EXIT_FAILURE);
}
}
}
}
/**
* Imprime os resultados do programa
*/
void printResults(unsigned int mA, unsigned int nA, unsigned int mB, unsigned int nB, unsigned int blockLines, unsigned int blockColumns, double tempoSeq, float delta_eventos, double initialParTime, double finalParTime, bool csv = true){
if (csv) {
cout << mA << ";" << nA << ";" << mB << ";" << nB << ";" << blockLines << ";" << blockColumns << ";" << tempoSeq << ";" << delta_eventos/1000 << ";" << initialParTime << ";" << finalParTime << ";" << endl;
} else {
cout << "Dimensões da matriz A = " << mA << " x " << nA << endl
<< "Dimensões da matriz B = " << mB << " x " << nB << endl
<< "Dimensões dos blocos = " << blockLines << " x " << blockColumns << endl
<< "Tempo sequencial = "<< tempoSeq <<" seg" << endl
<< "Tempo paralelo kernel = "<< delta_eventos/1000 << " seg" << endl
<< "Tempo paralelo begin = "<< initialParTime <<" seg" << endl
<< "Tempo paralelo end = "<< finalParTime <<" seg" << endl
<< "Tempo paralelo total = "<< initialParTime+(delta_eventos/1000)+finalParTime <<" seg" << endl;
}
}
//funcao principal
int main(int argc, char** argv) {
double *h_a, *h_b, *h_c, *h_c_seq; //matrizes host
double *d_a, *d_b, *d_c; //matrizes device
//para medidas de tempo
double begin, end, initialParTime, finalParTime, tempoSeq;
cudaEvent_t start, stop;
//entrada de dados
unsigned int mA, nA, mB, nB; // Dimensão das matrizes de entrada
long int bytesA, bytesB, bytesC; //qtde bytes por matriz
//tamanho dos blocos de threads
unsigned int blockLines, blockColumns;
//le e valida os parametros de entrada
if(argc < 6) {
cerr << "Digite: "<< argv[0] <<" <nº de linhas da matriz A> <nº de colunas da matriz A> <nº de linhas da matriz B> <nº de colunas da matriz B> <nº de linhas e colunas dos blocos>" << endl;
exit(EXIT_FAILURE);
}
//dimensao das matrizes e tamanho dos blocos
mA = atol(argv[1]);
nA = atol(argv[2]);
mB = atol(argv[3]);
nB = atol(argv[4]);
blockLines = atol(argv[5]);
blockColumns = atol(argv[5]);
if (nA != mB) {
cerr << "Impossível executar multiplicação das matrizes. Número de colunas da matriz A ("<< nA <<") não bate com o número de colunas da matriz B ("<< mB <<")" << endl;
exit(EXIT_FAILURE);
}
//calcula o tamanho em bytes das matrizes
bytesA = mA*nA*sizeof(double);
bytesB = mB*nB*sizeof(double);
bytesC = mA*nB*sizeof(double);
// Aloca e preenche a matriz de entrada A
if (preencheMatriz(&h_a, mA, nA) == 0) {
cerr << "Erro de preenchimento da matriz de entrada A" << endl;
exit(EXIT_FAILURE);
}
// Aloca e preenche a matriz de entrada B
if (preencheMatriz(&h_b, mB, nB) == 0) {
cerr << "Erro de preenchimento da matriz de entrada B" << endl;
exit(EXIT_FAILURE);
}
// Aloca a matriz de saída paralelo
h_c = (double*) malloc(bytesC);
if (h_c == NULL) {
cerr << "Erro de alocacao da matriz de saida" << endl;
exit(EXIT_FAILURE);
}
// Aloca a matriz de saída sequencial
h_c_seq = (double*) malloc(bytesC);
if (h_c_seq == NULL) {
cerr << "Erro de alocacao da matriz de saida" << endl;
exit(EXIT_FAILURE);
}
//!!! ------------------------ executa sequencial ---------------------------------- !!!//
GET_TIME(begin);
multMatSeq(h_a, h_b, h_c_seq, mA, nA, nB);
GET_TIME(end);
tempoSeq = end-begin; // calcula o tempo sequencial em segundos
//!!! ------------------------ executa em paralelo em CUDA -------------------------- !!!//
GET_TIME(begin);
// Aloca espaço para as matrizes na GPU
CUDA_SAFE_CALL(cudaMalloc((void**) &d_a, bytesA));
CUDA_SAFE_CALL(cudaMalloc((void**) &d_b, bytesB));
CUDA_SAFE_CALL(cudaMalloc((void**) &d_c, bytesC));
// Copia as matrizes de entrada da CPU para a GPU (host para device)
CUDA_SAFE_CALL(cudaMemcpy(d_a, h_a, bytesA, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_b, h_b, bytesB, cudaMemcpyHostToDevice));
// Invoca o kernel com blocos de tamanhos fixos
dim3 threadsBloco = {blockLines, blockColumns, 1};
dim3 blocosGrade = {(mA + threadsBloco.x - 1)/threadsBloco.x, (nB + threadsBloco.y - 1)/threadsBloco.y, 1};
int tamMemCompartilhada = blockLines*blockColumns*8*2;
GET_TIME(end);
initialParTime = end-begin; // Calcula o tempo das inicializações paralelo em segundos
//dispara o kernel
CUDA_SAFE_CALL(cudaEventCreate(&start));
CUDA_SAFE_CALL(cudaEventCreate(&stop));
CUDA_SAFE_CALL(cudaEventRecord(start));
multMatPar<<<blocosGrade, threadsBloco, tamMemCompartilhada>>>(d_a, d_b, d_c, mA, nA, nB);
CUDA_SAFE_CALL(cudaGetLastError());
CUDA_SAFE_CALL(cudaEventRecord(stop));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
float delta_eventos = 0;
CUDA_SAFE_CALL(cudaEventElapsedTime(&delta_eventos, start, stop));
//copia resultado da GPU para a CPU (device para host)
GET_TIME(begin);
CUDA_SAFE_CALL(cudaMemcpy(h_c, d_c, bytesC, cudaMemcpyDeviceToHost))
GET_TIME(end);
finalParTime = end-begin; // calcula o tempo das finalizacoes paralelo em segundos
checkResults(h_c_seq, h_c, mA, nB);
// Libera a memória na GPU
CUDA_SAFE_CALL(cudaFree(d_a));
CUDA_SAFE_CALL(cudaFree(d_b));
CUDA_SAFE_CALL(cudaFree(d_c));
// Libera a memória na CPU
free(h_a);
free(h_b);
free(h_c);
//------------------------------- imprime dos tempos de execucao ----------------------//
printResults(mA, nA, mB, nB, blockLines, blockColumns, tempoSeq, delta_eventos, initialParTime, finalParTime);
return 0;
}
|
14,254 | #include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/device_vector.h>
struct saxpy_functor
{
const float a;
saxpy_functor(float _a) : a(_a) {}
__host__ __device__ float operator()(const float& x, const float& y) const
{
return a * x + y;
}
};
void saxpy_fast(float A, thrust::device_vector<float>& X, thrust::device_vector<float>& Y)
{
// Y <- A * X + Y
thrust::transform(X.begin(), X.end(), Y.begin(), Y.begin(), saxpy_functor(A));
}
void saxpy_slow(float A, thrust::device_vector<float>& X, thrust::device_vector<float>& Y)
{
thrust::device_vector<float> temp(X.size());
// temp <- A
thrust::fill(temp.begin(), temp.end(), A);
// temp <- A * X
thrust::transform(X.begin(), X.end(), temp.begin(), temp.begin(), thrust::multiplies<float>());
// Y <- A * X + Y
thrust::transform(temp.begin(), temp.end(), Y.begin(), Y.begin(), thrust::plus<float>());
}
#define BIG 262144
int main(){
saxpy_functor moo(1234.51234);
std::cout << moo( 4345.434, 345325.34 ) << std::endl;
thrust::device_vector<float> x(BIG);
thrust::device_vector<float> y(BIG);
} |
14,255 | #include "includes.h"
__global__ void NN_UpSampling( float *target, const float *source, const int wt, const int ht )
{
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = y*wt+x;
const int curs = (y/2)*(wt/2)+x/2;
if(y < ht and x < wt) {
target[curt*3+0] = source[curs*3+0];
target[curt*3+1] = source[curs*3+1];
target[curt*3+2] = source[curs*3+2];
}
} |
14,256 | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
__global__ void mysgemm(int m, int n, int k, const float *A, const float *B, float* C) {
/********************************************************************
*
* Compute C = A x B
* where A is a (m x k) matrix
* where B is a (k x n) matrix
* where C is a (m x n) matrix
*
********************************************************************/
// INSERT KERNEL CODE HERE
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
float pValue = 0;
if (row < m && col < n) {
for (int i = 0; i < k; ++i) {
pValue += A[row * k + i] * B[i * n + col];
}
C[row * n + col] = pValue;
}
}
void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc)
{
if ((transa != 'N') && (transa != 'n')) {
printf("unsupported value of 'transa'\n");
return;
}
if ((transb != 'N') && (transb != 'n')) {
printf("unsupported value of 'transb'\n");
return;
}
if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) {
printf("unsupported value of alpha\n");
return;
}
if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) {
printf("unsupported value of beta\n");
return;
}
// Initialize thread block and kernel grid dimensions ---------------------
const unsigned int BLOCK_SIZE = 16; // Use 16x16 thread blocks
//INSERT CODE HERE
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 blocksPerGrid((int)ceil((float)n/BLOCK_SIZE),(int)ceil((float)m/BLOCK_SIZE));
// Invoke CUDA kernel -----------------------------------------------------
//INSERT CODE HERE
mysgemm<<<blocksPerGrid,threadsPerBlock>>>(m,n,k,A,B,C);
}
|
14,257 | #include<stdio.h>
#include<stdlib.h>
#include<cmath>
int NPTS = 20000;
__global__ void kernel (float *a, float *b, int dimx, int dimy)
{
// Compute the index variable
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iy = blockIdx.y*blockDim.y + threadIdx.y;
int idx = iy*dimx + ix;
//a[idx] = a[idx]+1;
float r, xdiff, ydiff;
for (int i=idx;i<dimx;i++)
{
if (i != idx)
{
xdiff = a[idx] - a[i];
ydiff = b[idx] - b[i];
r = sqrt(xdiff*xdiff + ydiff*ydiff);
}
}
}
int main()
{
float xmax = 10.0;
float ymax = 10.0;
int num_bytes = NPTS*sizeof(float);
float *d_x=0, *d_y=0, *h_x=0, *h_y=0; // device and host pointers
// Allocate memory on host (CPU)
h_x = (float*)malloc(num_bytes);
h_y = (float*)malloc(num_bytes);
// Allocate memory on device (GPU)
cudaMalloc((void**)&d_x,num_bytes);
cudaMalloc((void**)&d_y,num_bytes);
// Check to see that there was enough memory for both
// allocations.
// If the memory allocation fails, it doesn't change the
// pointer value. That is why we set them to be 0 at declaration,
// and then see if they have changed or stayed the same.
if (0==h_x || 0==d_x || 0==h_y || 0==d_y)
{
printf("couldn't allocate memory\n");
return 1;
}
// Fill the universe with random stuff
for (int i=0;i<NPTS;i++)
{
h_x[i] = 2.0*xmax*rand() - xmax;
h_y[i] = 2.0*ymax*rand() - ymax;
}
// Initialize array to all 0's
cudaMemset(d_x,0,num_bytes);
cudaMemset(d_y,0,num_bytes);
//-----------------------------------------------------------------------//
dim3 grid,block;
block.x = 4;
block.y = 4;
grid.x = NPTS/block.x;
grid.y = NPTS/block.y;
cudaMemcpy(d_x,h_x,num_bytes,cudaMemcpyHostToDevice);
cudaMemcpy(d_y,h_y,num_bytes,cudaMemcpyHostToDevice);
kernel<<<grid,block>>>(d_x,d_y,NPTS,NPTS);
/*
// Copy it back over
cudaMemcpy(h_a,d_a,num_bytes,cudaMemcpyDeviceToHost);
for (int row=0;row<dimy;row++)
{
for (int col=0;col<dimx;col++)
{
printf("%d",h_a[row*dimx+col]);
}
printf("\n");
}
*/
free(h_x);
free(h_y);
cudaFree(d_x);
cudaFree(d_y);
return 0;
}
|
14,258 | #include <iostream>
#include <cuda.h>
int main(){
size_t num_bytes = 1<<10;
int arraysize = 1000;
float *device_data=NULL;
cudaMalloc(&device_data, num_bytes);
int *a_dev;
int *b_dev;
int *c_dev;
cudaMalloc((void**) &a_dev, arraysize*sizeof(int));
cudaMalloc((void**) &b_dev, arraysize*sizeof(int));
cudaMalloc((void**) &c_dev, arraysize*sizeof(int));
return 0;
}
|
14,259 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
/**
* 矩阵a:32 x 16 b:16 x 32,随机生成
* 一个block中32 x 4个线程,共1 x 8个block
*/
#define A_ROW_NUM 32
#define A_COL_NUM 16
#define B_ROW_NUM 16
#define B_COL_NUM 32
#define TOTAL_SIZE (A_COL_NUM * A_ROW_NUM * sizeof(int))
#define RES_SIZE (A_ROW_NUM * A_ROW_NUM * sizeof(int))
__global__ void cal_by_gpu(int *a, int *b, int *c) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int i, res = 0;
for (i = 0; i < A_COL_NUM; i++) {
res += a[row * A_COL_NUM + i] * b[i * B_COL_NUM + col];
}
c[row * A_ROW_NUM + col] = res;
}
void cal_by_cpu(int a[][A_COL_NUM], int b[][B_COL_NUM], int c[][A_ROW_NUM]) {
int i, j, k, res;
for (i = 0; i < A_ROW_NUM; i++) {
for (j = 0; j < A_ROW_NUM; j++) {
res = 0;
for (k = 0; k < A_COL_NUM; k++) {
res += a[i][k] * b[k][j];
}
c[i][j] = res;
}
}
}
int main() {
int cpu_a[A_ROW_NUM][A_COL_NUM], cpu_b[B_ROW_NUM][B_COL_NUM], cpu_res[A_ROW_NUM][A_ROW_NUM];
int *gpu_a, *gpu_b, *gpu_res;
int i, j;
srand((unsigned int)time(NULL));
for (i = 0; i < A_ROW_NUM; i++) {
for (j = 0; j < A_COL_NUM; j++) {
cpu_a[i][j] = rand() % 41 - 20;
cpu_b[j][i] = rand() % 41 - 20;
}
}
cudaMalloc((void**)&gpu_a, TOTAL_SIZE);
cudaMalloc((void**)&gpu_b, TOTAL_SIZE);
cudaMalloc((void**)&gpu_res, RES_SIZE);
cudaMemcpy(gpu_a, cpu_a, TOTAL_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, cpu_b, TOTAL_SIZE, cudaMemcpyHostToDevice);
dim3 thread_square(32, 4);
dim3 block_square(1, 8);
cal_by_gpu<<<block_square, thread_square>>>(gpu_a, gpu_b, gpu_res);
cudaDeviceSynchronize();
cudaMemcpy(cpu_res, gpu_res, RES_SIZE, cudaMemcpyDeviceToHost);
cudaFree(gpu_a);
cudaFree(gpu_b);
cudaFree(gpu_res);
printf("a:\n");
for (i = 0; i < A_ROW_NUM; i++) {
for (j = 0; j < A_COL_NUM; j++) {
printf("%d ", cpu_a[i][j]);
}
printf("\n");
}
printf("--------------------------------------------\n");
printf("b:\n");
for (i = 0; i < B_ROW_NUM; i++) {
for (j = 0; j < B_COL_NUM; j++) {
printf("%d ", cpu_b[i][j]);
}
printf("\n");
}
printf("--------------------------------------------\n");
printf("cal_by_gpu:\n");
for (i = 0; i < A_ROW_NUM; i++) {
for (j = 0; j < A_ROW_NUM; j++) {
printf("%d ", cpu_res[i][j]);
}
printf("\n");
}
printf("--------------------------------------------\n");
cal_by_cpu(cpu_a, cpu_b, cpu_res);
printf("cal_by_cpu:\n");
for (i = 0; i < A_ROW_NUM; i++) {
for (j = 0; j < A_ROW_NUM; j++) {
printf("%d ", cpu_res[i][j]);
}
printf("\n");
}
return 0;
} |
14,260 | #include <cuda.h>
#include <stdlib.h>
#include <iostream>
#define THREADS_PER_BLOCK 32
#define BLOCKS_PER_SM 1
#define KB(x) ((x) << 10)
#define MB(x) ((x) << 20)
#define CACHE_BLOCK_SIZE 64
#define NUM_ITERATIONS 10000000
#define ACCESSES_PER_ITERATION 200
__managed__ __device__ long max_idx = 0;
__global__ void test_kernel(char *data)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < max_idx) {
data[tid] = tid;
tid += blockDim.x * gridDim.x;
}
}
int main()
{
char *data;
for (int i = 1; i < 256; i++) {
float time_elapsed, total_time = 0;
cudaEvent_t before, after;
cudaEventCreate(&before);
cudaEventCreate(&after);
cudaMallocManaged(&data, MB(i));
#if 1
for (int j = 0; j < MB(i); j+=CACHE_BLOCK_SIZE)
data[j] = j;
#endif
for (int tries = 0; tries < 5; tries++) {
cudaEventRecord(before, 0);
test_kernel<<<BLOCKS_PER_SM, THREADS_PER_BLOCK>>>(data);
cudaDeviceSynchronize();
cudaEventRecord(after, 0);
cudaEventSynchronize(before);
cudaEventSynchronize(after);
cudaEventElapsedTime(&time_elapsed, before, after);
total_time += time_elapsed;
}
std::cout << i << "," << total_time/5 << "," << total_time/(5*i) << std::endl;
cudaFree(data);
}
return 0;
}
|
14,261 | #include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
using namespace std;
//DEVICE
__global__ void kernelSuma_Vectores(float* array_A, float* array_B, int _size){
int idx= blockIdx.x*blockDim.x+threadIdx.x;
if(idx<_size){
array_A[idx] = array_A[idx] + array_B[idx];
}
}
//HOST
int main(){
int size= 1000000;
float* array_A= new float[size];
float* array_B= new float[size];
float* array_A_DEVICE=NULL;
float* array_B_DEVICE=NULL;
for (int index = 0; index < size ; index++){
array_A[index]= index;
array_B[index]= index;
}
cudaMalloc((void**)&array_A_DEVICE,size*sizeof(float));
cudaMalloc((void**)&array_B_DEVICE,size*sizeof(float));
cudaMemcpy(array_A_DEVICE,array_A,size*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(array_B_DEVICE,array_B,size*sizeof(float),cudaMemcpyHostToDevice);
kernelSuma_Vectores<<<ceil(size/512),512>>>(array_A_DEVICE,array_B_DEVICE,size);
cudaMemcpy(array_A,array_A_DEVICE,size*sizeof(float),cudaMemcpyDeviceToHost);
for( int index=0 ; index< 100 ; index++){
cout<<array_A[index]<< endl;
}
cudaFree(array_A_DEVICE);
cudaFree(array_B_DEVICE);
delete[] array_A;
delete[] array_B;
}
|
14,262 | //
// Created by sun on 12/9/18.
//
//
//#include "grouping.h"
//
//__host__ __device__ bool sortCorres(const pcl::Correspondence &lhs, const pcl::Correspondence &rhs) {
// return lhs.distance < rhs.distance;
//}
//
//__global__ void kernClusterCorresp(int N, const PointType* model, const PointType* scene, const pcl::Correspondence* corrs,
// const double thres, const int min_size, int* cluster){
//// extern __shared__ bool group_used[]; // N
// __shared__ int num_clustered_curr;
// // keeps whether has been used
//// if (index < N)
//// group_used[index] = false;
//// __syncthreads();
// int index = threadIdx.x + blockIdx.x * blockDim.x;
// if (index < N){
// bool in_dist[N];
//// if (!group_used[index * 2]){
// int scene_self_index = corrs[index].index_match;
// int model_self_index = corrs[index].index_query;
// Eigen::Vector3f scene_self = Eigen::Vector3f(scene[scene_self_index].x, scene[scene_self_index].y,
// scene[scene_self_index].z);
// Eigen::Vector3f model_self = Eigen::Vector3f(model[model_self_index].x, model[model_self_index].y,
// model[model_self_index].z);
// int num_consistent = 0;
// for (int i = 0; i < N; i++){
// if (i == index ) continue;
// int scene_other_index = corrs[i].index_match;
// int model_other_index = corrs[i].index_query;
// Eigen::Vector3f scene_other = Eigen::Vector3f(scene[scene_other_index].x, scene[scene_other_index].y,
// scene[scene_other_index].z);
// Eigen::Vector3f model_other = Eigen::Vector3f(model[model_other_index].x, model[model_other_index].y,
// model[model_other_index].z);
// Eigen::Vector3f dist_scene = scene_other - scene_self;
// Eigen::Vector3f dist_model = model_other - model_self;
// double dist = fabs(dist_scene.norm() - dist_model.norm());
//// if (dist > thres)
//// group_used[i] = false;
// }
//// __syncthreads();
//// if (grouped_used[index])
//// atomicAdd(&num_clustered_curr,1);
//// __syncthreads();
//// if (num_clustered_curr > min_size){
//// // update array for used & dist
//// if (group_used[index]){
//// group_used[index * 2] = true;
//// group_used[index] = false;
//// num_clustered[0] = prev_num_clustered + 1;
//// }
////
//// // run ransac
//// }
//
//// }
// }
//}
//
//
//void Grouping::groupCorrespondence() {
// if (!_input || !_scene || !_corrs){
// std::cerr << "grouping has not been correctly set up " << std::endl;
// exit(1);
// }
//
// pcl::Correspondence* dev_corrs = NULL;
// cudaMalloc((void**)&dev_corrs, _N_corrs * sizeof(pcl::Correspondence));
// checkCUDAError("cudamalloc dev_corr ");
// cudaMemcpy(dev_corrs, &(*_corrs)[0], sizeof(pcl::Correspondence), cudaMemcpyHostToDevice);
// checkCUDAError("cudamemcpy corr error");
//
// PointType *dev_input = NULL;
// cudaMalloc((void**)&dev_input, _N_input * sizeof(PointType));
// checkCUDAError("cudamalloc dev_input");
// cudaMemcpy(dev_input, &_input->points[0], _N_input * sizeof(PointType), cudaMemcpyHostToDevice);
//
// PointType *dev_scene = NULL;
// cudaMalloc((void**)&dev_scene, _N_scene * sizeof(PointType));
// checkCUDAError("cudamalloc dev_input");
// cudaMemcpy(dev_scene, &_scene->points[0], _N_scene * sizeof(PointType), cudaMemcpyHostToDevice);
//
// int *dev_num_clustered = NULL;
// cudaMalloc((void**)&dev_num_clustered, sizeof(int));
// cudaMemset(dev_num_clustered, 0, sizeof(int));
//
// int blocksize = blockSize;
// dim3 fullBlockPerGrid_points;
// if (_N_corrs < blockSize){
// fullBlockPerGrid_points = dim3(static_cast<u_int32_t >((_N_corrs + blockSize - 1)/blockSize));
// }else {
// while (blocksize < _N_corrs) blocksize *= 2;
// fullBlockPerGrid_points = dim3(static_cast<u_int32_t >((_N_corrs + blocksize - 1) / blocksize));
// }
//
// thrust::sort(thrust::device, dev_corrs, dev_corrs + _N_corrs, sortCorres);
//
// kernClusterCorresp<<< fullBlockPerGrid_points, blocksize>>> (_N_corrs, dev_input, dev_scene, dev_corrs, _thres, dev_num_clustered);
//
//
//} |
14,263 | #include <stdio.h>
//return global thread ID (for 2D grids and blocks)
__device__ int globalID_2D(){
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
return blockId * (blockDim.x * blockDim.y) + \
(threadIdx.x + threadIdx.y*blockDim.x);
}
__global__ void print_ids(){
printf("blockIdx.(x,y,z): %d %d %d\t threadIdx.(x,y,z): %d %d %d =>\t globalID: %d\n",\
blockIdx.x,blockIdx.y,blockIdx.z, \
threadIdx.x,threadIdx.y,threadIdx.z,globalID_2D());
if(globalID_2D() == 0)
printf("==============\n");
}
int main(){
//1D grid of 1D-blocks (1 x 16 threads )
print_ids<<<1,16>>>();
cudaDeviceSynchronize();
//1D grid of 1D-blocks (4 x 4 threads)
print_ids<<<4,4>>>();
cudaDeviceSynchronize();
//2D grid of 2D-blocks ((2x2) x (2x2) threads)
dim3 blocks(2,2,1);
dim3 threads(2,2,1);
print_ids<<<blocks,threads>>>();
cudaDeviceSynchronize();
}
|
14,264 | #include <stdio.h>
#include <cassert>
#include <iostream>
#include <chrono>
#include <random>
using namespace std;
//=========================== prototypes des fonctions ========================================
__global__ void vectorAdd(int *a, int *b, int *c, int N) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = j*gridDim.x * blockDim.x + i;
if (k < N) c[k] = a[k] + b[k];
}
auto get_time() { return chrono::high_resolution_clock::now(); }
//=========================== fuction main ===================================================
int main() {
const int N = 1000 << 16;
size_t bytes = N * sizeof(int);
int BLOCK_SIZE = 1 << 10;
int GRID_SIZE = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
int *a, *b, *c;
int id = cudaGetDevice(&id);
cudaMallocManaged(&a, bytes);
cudaMallocManaged(&b, bytes);
cudaMallocManaged(&c, bytes);
cudaMemAdvise(a, bytes, cudaMemAdviseSetPreferredLocation, cudaCpuDeviceId);
cudaMemAdvise(b, bytes, cudaMemAdviseSetPreferredLocation, cudaCpuDeviceId);
cudaMemPrefetchAsync(c, bytes, id);
for (int i = 0; i < N; i++) {
a[i] = rand() % 100;
b[i] = rand() % 100;
}
cudaMemAdvise(a, bytes, cudaMemAdviseSetReadMostly, id);
cudaMemAdvise(b, bytes, cudaMemAdviseSetReadMostly, id);
cudaMemPrefetchAsync(a, bytes, id);
cudaMemPrefetchAsync(b, bytes, id);
auto start = get_time();
vectorAdd<<<GRID_SIZE, BLOCK_SIZE>>>(a, b, c, N);
cudaDeviceSynchronize();
cudaMemPrefetchAsync(a, bytes, cudaCpuDeviceId);
cudaMemPrefetchAsync(b, bytes, cudaCpuDeviceId);
cudaMemPrefetchAsync(c, bytes, cudaCpuDeviceId);
auto finish = get_time();
auto duration =
chrono::duration_cast<std::chrono::milliseconds>(finish - start);
cout << "temps écoulé en kernel = " << duration.count() << " ms\n";
for (int i = 0; i < N; i++) {
assert(c[i] == a[i] + b[i]);
}
cudaFree(a);
cudaFree(b);
cudaFree(c);
cout << "terminé avec succès"<<endl;
return 0;
}
|
14,265 | #include<cuda.h>
#include<cuda_runtime.h>
#include<stdio.h>
void printDevProp(cudaDeviceProp devProp)
{
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %lu\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %lu\n", devProp.sharedMemPerBlock);
printf("Total Registers per block: %d\n", devProp.regsPerBlock);
printf("Warp Size: %d\n", devProp.warpSize);
printf("Maximum Memory pitch: %lu\n", devProp.memPitch);
printf("Maximum Threads per block: %d\n", devProp.maxThreadsPerBlock);
for(int i = 0; i<3; i++)
printf("Maximum dimension of block %d: %d\n", i, devProp.maxThreadsDim[i]);
for(int i = 0; i<3; i++)
printf("Maximum dimension of grid %d: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total Constant Memory: %lu\n", devProp.totalConstMem);
printf("Texture alignment: %lu\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
}
int main()
{
int devCount;
cudaGetDeviceCount(&devCount);
for(int i=0; i<devCount; i++)
{
cudaDeviceProp devp;
cudaGetDeviceProperties(&devp, i);
printDevProp(devp);
}
return 0;
} |
14,266 | #include "includes.h"
static unsigned int GRID_SIZE_N;
static unsigned int GRID_SIZE_4N;
static unsigned int MAX_STATE_VALUE;
__global__ static void cudaEvaluateLeftGammaKernel(int *wptr, double *x2, double *tipVector, unsigned char *tipX1, double *diagptable, double *output, const int limit) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= limit) {
output[i] = 0.0;
return;
}
int j;
double term = 0.0;
tipVector += 4 * tipX1[i];
x2 += 16 * i;
#pragma unroll
for (j = 0; j < 4; j++) {
term += tipVector[0] * x2[0] * diagptable[0];
term += tipVector[1] * x2[1] * diagptable[1];
term += tipVector[2] * x2[2] * diagptable[2];
term += tipVector[3] * x2[3] * diagptable[3];
x2 += 4;
diagptable += 4;
}
term = log(0.25 * fabs(term));
output[i] = wptr[i] * term;
} |
14,267 |
#ifdef _WIN32
# define IMPORT __declspec(dllimport)
# define EXPORT __declspec(dllexport)
#else
# define IMPORT
# define EXPORT
#endif
int curand_main();
int nppif_main();
EXPORT int shared_version()
{
return curand_main() == 0 && nppif_main() == 0;
}
|
14,268 | #include "includes.h"
__global__ void kernel_5(int *new_data, int *data)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ >= 10000000) return;
int idx_2 = (_tid_ / 2) % 500;
new_data[_tid_] = (data[_tid_] + idx_2) % 13377;
} |
14,269 | /* so we know how to interface with blocks/threads conceptually.
* now, the question is: what is the syntax?
* compile with cuda compiler:
nvcc cuda4.cu -o cuda4
*/
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@//
#include <cstdio>
#include <cmath>
int arraySize = 1<<20; // basically a million
// function to add them together
__global__
void addArrays (int arraySize, float *add1, float *add2, float *sum){ // now each copy of our kernel doesn't have to handle all 1M operations!
/* so how do you keep track of all these kernels?
* each thread has an ID number for its location within its own block
* this variable is called @@@ threadIdx.x @@@
* each block also has an ID number for its location within the grid
* this variable is called @@@ blockIdx.x @@@
* dimensions of blocks are @@@ blockDim.x @@@
* like this:
drawing { |----------------|----------------|----------------|----------------|
labels { thread[^] block[________________]
gridDim.x { = 4
blockIdx.x { 0----------------1----------------2----------------3----------------
blockDim.x { = 16 = 16 = 16 = 16
threadIdx.x { -0123456789......-0123456789......-0123...........15...............15
*/
int i = blockIdx.x*blockDim.x + threadIdx.x; /*/*/ // i lets us know where in the array this kernel copy should target
// be sure we aren't overstepping the array size- segfault! // ... where it used to tell us where the loop iteration should target
if (i<arraySize) { /*/*/ // notice- no for loop! the looping action is totally parallelized
sum[i] = add1[i] + add2[i];
}
}
// all the action
int main(){
// three arrays; we will add the first two to sum[]
printf("initializing arrays\n");
float *add1, *add2, *sum;
cudaMallocManaged( &add1, arraySize*sizeof(float) );
cudaMallocManaged( &add2, arraySize*sizeof(float) );
cudaMallocManaged( &sum, arraySize*sizeof(float) );
// fill first two arrays before the CUDA starts
for (int i=0; i<arraySize; i++){
add1[i] = 1.0;
add2[i] = 2.0;
}
printf("arrays done. prepare for adding\n");
// parallelization happens here
addArrays<<<4096,256>>>(arraySize, add1,add2,sum); /*/*/ // look at all those kernels! 4096*256 = 1048576 = 1<<20
// wait for all threads to complete on the GPU
cudaDeviceSynchronize();
printf("adding complete.\t");
// check for accuracy- what's the biggest mistake?
float maxError = 0.0;
for (int i=0; i<arraySize; i++){
// check each array index for value and store the greater deviation from 3.0
maxError = fmax(maxError, fabs(sum[i]-3.0));
}
printf("max error = %f\n",maxError);
// free memory
cudaFree(add1);
cudaFree(add2);
cudaFree(sum);
return 0;
}
|
14,270 | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#define N 1<<1
#define TILE_WIDTH 1
#define RANDRANGE 5
__global__ void matrixMultKernel(float* Md, float* Nd, float* Pd, int Width) {
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Identify the row and column of the Pd element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
// Loop over the Md and Nd tiles required to compute the Pd element
for (int m = 0; m < Width/TILE_WIDTH; ++m) {
// Collaborative loading of Md and Nd tiles into shared memory
Mds[ty][tx] = Md[Row*Width + (m*TILE_WIDTH + tx)];
Nds[ty][tx] = Nd[Col + (m*TILE_WIDTH + ty)*Width];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k)
Pvalue += Mds[ty][k] * Nds[k][tx];
__syncthreads();
}
Pd[Row*Width+Col] = Pvalue;
}
// Allocates a matrix with random float entries.
void randomInit(float* data, int size) {
for (int i = 0; i < size; ++i)
data[i] = (float)(rand() % RANDRANGE +1);
}
/////////////////////////////////////////////////////////
// Program main
/////////////////////////////////////////////////////////
int main(int argc, char** argv) {
// set seed for rand()
srand(2015);
// allocate host memory for matrices A and B
unsigned int size_A = N * N;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
unsigned int size_B = N * N;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*) malloc(mem_size_B);
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// allocate device memory
float* d_A; float* d_B;
cudaMalloc((void**) &d_A, mem_size_A);
cudaMalloc((void**) &d_B, mem_size_B);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A,
cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B,
cudaMemcpyHostToDevice);
// allocate host memory for the result C
unsigned int size_C = N*N ;
unsigned int mem_size_C = sizeof(float) * size_C;
float* h_C = (float*) malloc(mem_size_C);
memset(h_C,0,mem_size_C);
// allocate device memory for the result
float* d_C;
cudaMalloc((void**) &d_C, mem_size_C);
// perform the calculation
// setup execution parameters
dim3 blocks(TILE_WIDTH, TILE_WIDTH);
dim3 grid(N/ TILE_WIDTH, N/ TILE_WIDTH);
// execute the kernel
matrixMultKernel<<< grid, blocks >>>(d_A, d_B, d_C, N);
// copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost);
//ToDo: Your Test code here......
printf("N= %d and TILE_WIDTH =%d\n", N,TILE_WIDTH);
for (int i=0; i < N*N; i++) {
printf("%20.15f : %20.15f : %20.15f \n", h_A[i], h_B[i], h_C[i] );
}
// clean up memory
free(h_A); free(h_B); free(h_C);
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
}
|
14,271 | // --- CSCS (Swiss National Supercomputing Center) ---
#include <stdio.h>
extern "C"
void set_gpu(int dev)
{
cudaSetDevice(dev);
}
extern "C"
void get_gpu_info(char *gpu_string, int dev)
{
struct cudaDeviceProp dprop;
cudaGetDeviceProperties(&dprop, dev);
strcpy(gpu_string,dprop.name);
}
extern "C"
void get_more_gpu_info(int dev)
{
int driverVersion = 0, runtimeVersion = 0;
struct cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor);
}
// Add two arrays on the device
__global__ void gpu_kernel(int *d_a1, int *d_a2, int *d_a3, int N) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < N)
d_a3[idx] = d_a1[idx] + d_a2[idx];
d_a1[idx] = idx ; // dummy
}
//#define SIZE 12
extern "C"
void run_gpu_kernel(int SIZE) {
int i;
int a1[SIZE], a2[SIZE], a3[SIZE]; // Host arrays
int *d_a1, *d_a2, *d_a3; // Device arrays
// Initalize the host input arrays
for(i = 0; i < SIZE; i++) {
a1[i] = i;
a2[i] = 100*i;
}
// Allocate the device arrays and copy data over to the device
cudaMalloc((void**) &d_a1, sizeof(int)*SIZE);
cudaMalloc((void**) &d_a2, sizeof(int)*SIZE);
cudaMalloc((void**) &d_a3, sizeof(int)*SIZE);
cudaMemcpy(d_a1, a1, sizeof(int)*SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(d_a2, a2, sizeof(int)*SIZE, cudaMemcpyHostToDevice);
// Zero out results
cudaMemset(d_a3, 0, sizeof(int)*SIZE);
gpu_kernel<<<3, 4>>>(d_a1, d_a2, d_a3, SIZE);
cudaMemcpy(a3, d_a3, sizeof(int)*SIZE, cudaMemcpyDeviceToHost);
printf("%d %d %d\n", 0, SIZE/2, SIZE-1);
printf("%d ", a3[0]);
printf("%d ", a3[SIZE/2]);
printf("%d ", a3[SIZE-1]);
printf("\n");
cudaFree(d_a1);
cudaFree(d_a2);
cudaFree(d_a3);
}
|
14,272 | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <limits.h>
#include <time.h>
/* Original permuation code due to D. Jimenez, UT Austin
* http://faculty.cse.tamu.edu/djimenez/ut/utsa/cs3343/
*/
/* Reference an element in the TSP distance array. */
#define TSP_ELT(tsp, n, i, j) *(tsp + (i * n) + j)
/* Create an instance of a symmetric TSP. */
int *
create_tsp(int n, int random_seed)
{
int *tsp = (int *) malloc(n * n * sizeof(int));
srandom(random_seed);
for (int i = 0; i < n; i++) {
for (int j = 0; j <= i; j++) {
int val = (int)(random() / (RAND_MAX / 100));
TSP_ELT(tsp, n, i, j) = val;
TSP_ELT(tsp, n, j, i) = val;
}
}
return tsp;
}
/* Print a TSP distance matrix. */
void
print_tsp(int *tsp, int n, int random_seed)
{
printf("TSP (%d cities - seed %d)\n ", n, random_seed);
for (int j = 0; j < n; j++) {
printf("%3d|", j);
}
printf("\n");
for (int i = 0; i < n; i++) {
printf("%2d|", i);
for (int j = 0; j < n; j++) {
printf("%4d", TSP_ELT(tsp, n, i, j));
}
printf("\n");
}
printf("\n");
}
void
usage(char *prog_name)
{
fprintf(stderr, "usage: %s [flags]\n", prog_name);
fprintf(stderr, " -h\n");
fprintf(stderr, " -t <number of threads>\n");
fprintf(stderr, " -c <number of cities>\n");
fprintf(stderr, " -s <random seed>\n");
exit(1);
}
__device__ unsigned long
factorial(int n)
{
if (n < 1) {
return 0;
}
unsigned long rtn = 1;
for (unsigned i = 1; i <= n; i++) {
rtn *= i;
}
return rtn;
}
__device__ int
calc_cost(int *perm, int *matrix, int n)
{
int total = 0;
for (int i = 0; i < n; i++) {
int j = (i + 1) % n;
int from = perm[i];
int to = perm[j];
int val = TSP_ELT(matrix, n, from, to);
total += val;
}
return total;
}
void
create_tsp(int *matrix, int n, int random_seed)
{
srandom(random_seed);
for (int i = 0; i < n; i++) {
for (int j = 0; j <= i; j++) {
int val = (int)(random() / (RAND_MAX / 100));
TSP_ELT(matrix, n, i, j) = val;
TSP_ELT(matrix, n, j, i) = val;
}
}
}
/**** List ADT ****************/
typedef struct {
int *values; /* Values stored in list */
int max_size; /* Maximum size allocated */
int cur_size; /* Size currently in use */
} list_t;
/* Dump list, including sizes */
__device__ void
list_dump(list_t *list)
{
printf("%2d/%2d", list->cur_size, list->max_size);
for (int i = 0; i < list->cur_size; i++) {
printf(" %d", list->values[i]);
}
printf("\n");
}
/* Allocate list that can store up to 'max_size' elements */
__device__ list_t *
list_alloc(int max_size)
{
list_t *list = (list_t *)malloc(sizeof(list_t));
list->values = (int *)malloc(max_size * sizeof(int));
list->max_size = max_size;
list->cur_size = 0;
return list;
}
/* Free a list; call this to avoid leaking memory! */
__device__ void
list_free(list_t *list)
{
free(list->values);
free(list);
}
/* Add a value to the end of the list */
__device__ void
list_add(list_t *list, int value)
{
if (list->cur_size >= list->max_size) {
printf("List full");
list_dump(list);
}
list->values[list->cur_size++] = value;
}
/* Return the current size of the list */
__device__ int
list_size(list_t *list)
{
return list->cur_size;
}
/* Validate index */
__device__ void
_list_check_index(list_t *list, unsigned long index)
{
if (index > list->cur_size - 1) {
printf("Invalid index %d\n", index);
list_dump(list);
}
}
/* Get the value at given index */
__device__ int
list_get(list_t *list, unsigned long index)
{
_list_check_index(list, index);
return list->values[index];
}
/* Remove the value at the given index */
__device__ void
list_remove_at(list_t *list, int index)
{
_list_check_index(list, index);
for (int i = index; i < list->cur_size - 1; i++) {
list->values[i] = list->values[i + 1];
}
list->cur_size--;
}
/* Retrieve a copy of the values as a simple array of integers. The returned
array is allocated dynamically; the caller must free the space when no
longer needed.
*/
__device__ int *
list_as_array(list_t *list)
{
int *rtn = (int *)malloc(list->max_size * sizeof(int));
for (int i = 0; i < list->max_size; i++) {
rtn[i] = list_get(list, i);
}
return rtn;
}
/**** Permutation ****************/
/* Permutation algorithms based on code found at:
http://www.mathblog.dk/project-euler-24-millionth-lexicographic-permutation/
which references:
http://www.cut-the-knot.org/do_you_know/AllPerm.shtml
*/
/* Return the kth lexographically ordered permuation of an array of k integers
in the range [0 .. size - 1]. The integers are allocated dynamically and
should be free'd by the caller when no longer needed.
*/
__device__ int *
kth_perm(int k, int size)
{
long remain = k - 1;
list_t *numbers = list_alloc(size);
for (int i = 0; i < size; i++) {
list_add(numbers, i);
}
list_t *perm = list_alloc(size);
for (int i = 1; i < size; i++) {
unsigned long f = factorial(size - i);
unsigned long j = remain / f;
remain = remain % f;
list_add(perm, list_get(numbers, j));
list_remove_at(numbers, j);
if (remain == 0) {
break;
}
}
/* Append remaining digits */
for (int i = 0; i < list_size(numbers); i++) {
list_add(perm, list_get(numbers, i));
}
int *rtn = list_as_array(perm);
list_free(perm);
return rtn;
}
/* Swap v[i] and v[j] */
__device__ void
swap(int *v, int i, int j)
{
int t = v[i];
v[i] = v[j];
v[j] = t;
}
/* Print a permutation array */
__device__ void
print_perm(int *perm, int size)
{
for (int k = 0; k < size; k++) {
printf("%4d", perm[k]);
}
printf("\n");
}
/* Given an array of size elements at perm, update the array in place to
contain the lexographically next permutation. It is originally due to
Dijkstra. The present version is discussed at:
http://www.cut-the-knot.org/do_you_know/AllPerm.shtml
*/
__device__ void
next_perm(int *perm, int size)
{
int i = size - 1;
while (perm[i - 1] >= perm[i]) {
i = i - 1;
}
int j = size;
while (perm[j - 1] <= perm[i - 1]) {
j = j - 1;
}
swap(perm, i - 1, j - 1);
i++;
j = size;
while (i < j) {
swap(perm, i - 1, j - 1);
i++;
j--;
}
}
__global__ void
perm_kernel(int *glob_cost_matrix, int *min_matrix, int num_cities, int num_threads)
{
int block_id =
blockIdx.x +
blockIdx.y * blockDim.x +
blockIdx.z * blockDim.x * blockDim.y;
int block_offset =
block_id *
blockDim.x * blockDim.y * blockDim.z;
int thread_offset =
threadIdx.x +
threadIdx.y * blockDim.x +
threadIdx.z * blockDim.x * blockDim.y;
int tid = block_offset + thread_offset;
__shared__ int cost_matrix[144];
int init_iters = (num_cities * num_cities) / num_threads;
if(tid == 0)
init_iters += (num_cities * num_cities) % num_threads;
for(int i = 0; i < num_cities * num_cities; i++)
cost_matrix[i] = glob_cost_matrix[i];
__syncthreads();
unsigned long num_iters = factorial(num_cities) / num_threads;
int *perm = kth_perm((num_iters * tid) + 1, num_cities);
print_perm(perm, num_cities);
int min_cost = INT_MAX;
int cost;
for(unsigned long i = 0; i < num_iters; i++)
{
cost = calc_cost(perm, cost_matrix, num_cities);
if(cost < min_cost)
{
min_cost = cost;
}
next_perm(perm, num_cities);
}
min_matrix[tid] = min_cost;
}
double
now(void)
{
struct timespec current_time;
double ONE_BILLION = (double)1000000000.0;
clock_gettime(CLOCK_REALTIME, ¤t_time);
return current_time.tv_sec + (current_time.tv_nsec / ONE_BILLION);
}
int
main(int argc, char **argv)
{
int num_cities = 3;
int random_seed = 42;
int num_threads = 5;
int ch;
while ((ch = getopt(argc, argv, "t:c:hs:")) != -1) {
switch (ch) {
case 'c':
num_cities = atoi(optarg);
break;
case 's':
random_seed = atoi(optarg);
break;
case 't':
num_threads = atoi(optarg);
break;
case 'h':
default:
usage(argv[0]);
}
}
double start = now();
int cost_matrix_size = sizeof(int) * num_cities * num_cities;
int min_matrix_size = sizeof(int) * num_threads;
//Initialize matrices
int *min_matrix_h = (int *) malloc(min_matrix_size);
int *cost_matrix_h = (int *) malloc(cost_matrix_size);
int *min_matrix_d, *cost_matrix_d;
cudaMalloc(&min_matrix_d, min_matrix_size);
cudaMalloc(&cost_matrix_d, cost_matrix_size);
//create and copy cost matrix to device
create_tsp(cost_matrix_h, num_cities, random_seed);
// print_tsp(cost_matrix_h, num_cities, random_seed);
cudaMemcpy(cost_matrix_d, cost_matrix_h, cost_matrix_size, cudaMemcpyHostToDevice);
//launch kernel
int threads_per_block = num_threads;
for(int i = 1; i < 1025; i *= 2)
{
if(i > num_threads)
break;
if(i < num_threads && (num_threads % i == 0))
{
threads_per_block = i;
}
}
int blocks_per_grid = num_threads / threads_per_block;
perm_kernel<<<blocks_per_grid, threads_per_block>>>(cost_matrix_d, min_matrix_d, num_cities, num_threads);
//copy local mins back to host
cudaError_t rtn = cudaMemcpy(min_matrix_h, min_matrix_d, min_matrix_size, cudaMemcpyDeviceToHost);
if(rtn != 0){
printf("Ouchie:\n%s\n", cudaGetErrorString(rtn));
}
//calculate minimum
int shortest_length = INT_MAX;
for(int i = 0; i < num_threads; i++){
if(min_matrix_h[i] < shortest_length)
{
shortest_length = min_matrix_h[i];
}
}
double stop = now();
printf("Shortest %d\n", shortest_length);
printf("Blocks per grid:%d\n", blocks_per_grid);
printf("Threads per block:%d\n", threads_per_block);
printf("Num cities:%d\n", num_cities);
printf("Num threads:%d\n", num_threads);
printf("Took %5.3f seconds\n\n", stop - start);
}
|
14,273 | #include <cuda_runtime.h>
#include <assert.h>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <stdarg.h>
struct settings
{
unsigned int iterations;
unsigned int x;
unsigned int y;
unsigned int z;
unsigned int n;
bool dim_3d;
unsigned int threads;
unsigned long read_only;
};
bool read(char *token, const char *delim, int argc, ...)
{
va_list argv;
va_start(argv, argc);
unsigned int *temp_int;
float *temp_float;
unsigned int i;
for (i = 0; i < argc; i++)
{
token = strtok(NULL, delim);
if (i == argc - 1)
{
temp_float = (float *) va_arg(argv, float *);
if (token != NULL) { *temp_float = atof(token); }
else { return false; }
}
else
{
temp_int = (unsigned int *) va_arg(argv, unsigned int*);
if (token != NULL) { *temp_int = atoi(token); }
else { return false; }
}
}
return true;
}
unsigned int next_pow(unsigned int x)
{
unsigned int n = 1;
while (n < x) { n <<= 1; }
return n;
}
unsigned int __max(unsigned int a, unsigned int b)
{
if (a > b) { return a; }
return b;
}
__global__ void iterate2d(float *from, float *to, bool *read_only, unsigned int yshift, unsigned int N)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (read_only[x+(y<<yshift)] == false && x != 0 && y != 0 && x != (N - 1) && y != (N - 1))
{
to[x+(y<<yshift)] = (from[(x+1)+((y+1)<<yshift)] + from[(x+1)+((y-1))] + from[(x-1)+((y+1)<<yshift)] + from[(x-1)+((y-1)<<yshift)]) / 4;
}
else
{
to[x+(y<<yshift)] = from[x+(y<<yshift)];
}
}
__global__ void iterate3d(float *from, float *to, bool *read_only, unsigned int yshift, unsigned int zshift, unsigned int N, unsigned int NN)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int z = blockIdx.z * blockDim.z + threadIdx.z;
if (read_only[x+(y<<yshift)+(z<<zshift)] == false && x != 0 && y != 0 && z != 0 && x != (N - 1) && y != (N - 1) && z != (N - 1))
{
to[x+(y<<yshift)+(z<<zshift)] = (from[(x+1)+((y+1)<<yshift)+((z+1)<<zshift)] + from[(x+1)+((y+1)<<yshift)+((z-1)<<zshift)] + from[(x+1)+((y-1)<<yshift)+((z+1)<<zshift)] + from[(x+1)+((y-1)<<yshift)+((z-1)<<zshift)] + from[(x-1)+((y+1)<<yshift)+((z+1)<<zshift)] + from[(x-1)+((y+1)<<yshift)+((z-1)<<zshift)] + from[(x-1)+((y-1)<<yshift)+((z+1)<<zshift)] + from[(x-1)+((y-1)<<yshift)+((z-1)<<zshift)]) / 8;
}
else
{
to[x+(y<<yshift)+(z<<zshift)] = from[x+(y<<yshift)+(z<<zshift)];
}
}
void read_config(char *settings_file, settings &config)
{
FILE *handle = fopen(settings_file, "r");
fseek(handle, 0L, SEEK_END);
unsigned long size = ftell(handle);
fseek(handle, 0L, SEEK_SET);
char *buf = (char*) malloc((size + 1) * sizeof(char));
memset(buf, 0, size + 1);
fread(buf, size, 1, handle);
const char *delimiter = " \n";
char *token = strtok(buf, delimiter);
while (token != NULL)
{
if (strcmp(token, "iterations") == 0)
{
token = strtok(NULL, delimiter);
if (token == NULL) { break; }
else { config.iterations = atoi(token); }
}
else if (strcmp(token, "x") == 0)
{
token = strtok(NULL, delimiter);
if (token == NULL) { break; }
else { config.x = atoi(token); }
}
else if (strcmp(token, "y") == 0)
{
token = strtok(NULL, delimiter);
if (token == NULL) { break; }
else { config.y = atoi(token); }
}
else if (strcmp(token, "z") == 0)
{
token = strtok(NULL, delimiter);
if (token == NULL) { break; }
else { config.z = atoi(token); }
}
else if (strcmp(token, "threads") == 0)
{
token = strtok(NULL, delimiter);
if (token == NULL) { break; }
else { config.threads = atoi(token); }
}
else if (strcmp(token, "2d") == 0)
{
config.dim_3d = false;
}
else if (strcmp(token, "3d") == 0)
{
config.dim_3d = true;
}
token = strtok(NULL, delimiter);
}
free(buf);
fclose(handle);
}
void init2d(char *settings_file, settings &config, float *data, bool *read_only)
{
FILE *handle = fopen(settings_file, "r");
fseek(handle, 0L, SEEK_END);
unsigned long size = ftell(handle);
fseek(handle, 0L, SEEK_SET);
char *buf = (char*) malloc((size + 1) * sizeof(char));
for(unsigned long i = 0; i < config.n * config.n; i++)
{
data[i] = 0.0f;
read_only[i] = false;
}
config.read_only = 0;
fread(buf, size, 1, handle);
const char *delimiter = " \n";
char *token = strtok(buf, delimiter);
unsigned int x0;
unsigned int x1;
unsigned int y0;
unsigned int y1;
float f;
unsigned int x;
unsigned int y;
while (token != NULL)
{
if (strcmp(token, "point") == 0) //point located at (x,y), syntax: point [x] [y] [value]
{
assert(read(token, delimiter, 3, &x, &y, &f));
assert(x < config.n);
assert(y < config.n);
if (read_only[x+(y*config.n)] == false)
{
read_only[x+(y*config.n)] = true;
config.read_only ++;
}
data[x+(y*config.n)] = f;
}
if (strcmp(token, "yline") == 0) //line in y direction, syntax: yline [xpos] [ystart] [yend] [value]
{
assert(read(token, delimiter, 4, &x, &y0, &y1, &f));
assert(y0 < y1);
assert(x < config.n);
assert(y1 < config.n);
for (y = y0; y <= y1; y++)
{
if (read_only[x+(y*config.n)] == false)
{
read_only[x+(y*config.n)] = true;
config.read_only ++;
}
data[x+(y*config.n)] = f;
}
}
else if (strcmp(token, "xline") == 0) //line in x direction, syntax: xline [ypos] [xstart] [xend] [value]
{
assert(read(token, delimiter, 4, &y, &x0, &x1, &f));
assert(x0 < x1);
assert(y < config.n);
assert(x1 < config.n);
for (x = x0; x <= x1; x++)
{
assert(x < config.n);
if (read_only[x+(y*config.n)] == false)
{
read_only[x+(y*config.n)] = true;
config.read_only ++;
}
data[x+(y*config.n)] = f;
}
}
else if (strcmp(token, "rectangle") == 0) //rectangle from (x0, y0) to (x1, y1), syntax: square [x0] [y0] [x1] [y1] [value]
{
assert(read(token, delimiter, 5, &x0, &y0, &x1, &y1, &f));
assert(x0 < x1);
assert(y0 < y1);
assert(x1 < config.n);
assert(y1 < config.n);
for (x = x0; x <= x1; x++)
{
for (y = y0; y <= y1; y++)
{
if (read_only[x+(y*config.n)] == false)
{
read_only[x+(y*config.n)] = true;
config.read_only ++;
}
data[x+(y*config.n)] = f;
}
}
}
token = strtok(NULL, delimiter);
}
for (x = config.x; x < config.n; x++)
{
for (y = config.y; y < config.n; y++)
{
if (read_only[x+(y*config.n)] == false)
{
read_only[x+(y*config.n)] = true;
config.read_only ++;
}
}
}
free(buf);
fclose(handle);
}
void init3d(char *settings_file, settings &config, float *data, bool *read_only)
{
FILE *handle = fopen(settings_file, "r");
fseek(handle, 0L, SEEK_END);
unsigned long size = ftell(handle);
fseek(handle, 0L, SEEK_SET);
char *buf = (char*) malloc((size + 1) * sizeof(char));
for(unsigned long i = 0; i < config.n * config.n * config.n; i++)
{
data[i] = 0.0f;
read_only[i] = false;
}
config.read_only = 0;
fread(buf, size, 1, handle);
const char *delimiter = " \n";
char *token = strtok(buf, delimiter);
unsigned int x0;
unsigned int x1;
unsigned int y0;
unsigned int y1;
unsigned int z0;
unsigned int z1;
float f;
unsigned int x;
unsigned int y;
unsigned int z;
while (token != NULL)
{
if (strcmp(token, "point") == 0) //point located at (x,y,z), syntax: point [x] [y] [z] [value]
{
assert(read(token, delimiter, 4, &x, &y, &z, &f));
assert(x < config.n);
assert(y < config.n);
assert(z < config.n);
if (read_only[x+(y*config.n)+(z*config.n*config.n)] == false)
{
config.read_only ++;
read_only[x+(y*config.n)+(z*config.n*config.n)] = true;
}
data[x+(y*config.n)+(z*config.n*config.n)] = f;
}
if (strcmp(token, "zline") == 0) //line in z direction, syntax: zline [xpos] [ypos] [zstart] [zend] [value]
{
assert(read(token, delimiter, 5, &x, &y, &z0, &z1, &f));
assert(z0 < z1);
assert(x < config.n);
assert(y < config.n);
assert(z1 < config.n);
for (z = z0; z <= z1; z++)
{
if (read_only[x+(y*config.n)+(z*config.n*config.n)] == false)
{
config.read_only ++;
read_only[x+(y*config.n)+(z*config.n*config.n)] = true;
}
data[x+(y*config.n)+(z*config.n*config.n)] = f;
}
}
else if (strcmp(token, "yline") == 0) //line in y direction, syntax: yline [xpos] [zpos] [ystart] [yend] [value]
{
assert(read(token, delimiter, 5, &x, &z, &y0, &y1, &f));
assert(y0 < y1);
assert(x < config.n);
assert(y1 < config.n);
assert(z < config.n);
for (y = y0; y <= y1; y++)
{
assert(y < config.n);
if (read_only[x+(y*config.n)+(z*config.n*config.n)] == false)
{
config.read_only ++;
read_only[x+(y*config.n)+(z*config.n*config.n)] = true;
}
data[x+(y*config.n)+(z*config.n*config.n)] = f;
}
}
else if (strcmp(token, "xline") == 0) //line in x direction, syntax: xline [ypos] [zpos] [xstart] [xend] [value]
{
assert(read(token, delimiter, 5, &y, &z, &x0, &x1, &f));
assert(x0 < x1);
assert(x1 < config.n);
assert(y < config.n);
assert(z < config.n);
for (x = x0; x <= x1; x++)
{
if (read_only[x+(y*config.n)+(z*config.n*config.n)] == false)
{
config.read_only ++;
read_only[x+(y*config.n)+(z*config.n*config.n)] = true;
}
data[x+(y*config.n)+(z*config.n*config.n)] = f;
}
}
else if (strcmp(token, "zrectangle") == 0) //rectangle from (x0, y0) to (x1, y1) in z plane, syntax: zrectangle [x0] [y0] [x1] [y1] [z] [value]
{
assert(read(token, delimiter, 6, &x0, &y0, &x1, &y1, &z, &f));
assert(x0 < x1);
assert(y0 < y1);
assert(x1 < config.n);
assert(y1 < config.n);
assert(z < config.n);
for (x = x0; x <= x1; x++)
{
for (y = y0; y <= y1; y++)
{
if (read_only[x+(y*config.n)+(z*config.n*config.n)] == false)
{
config.read_only ++;
read_only[x+(y*config.n)+(z*config.n*config.n)] = true;
}
data[x+(y*config.n)+(z*config.n*config.n)] = f;
}
}
}
else if (strcmp(token, "yrectangle") == 0) //rectangle from (x0, z0) to (x1, z1) in y plane, syntax yrectangle [x0] [z0] [x1] [z1] [y] [value]
{
assert(read(token, delimiter, 6, &x0, &z0, &x1, &z1, &y, &f));
assert(x0 < x1);
assert(z0 < z1);
assert(x1 < config.n);
assert(y < config.n);
assert(z1 < config.n);
for (x = x0; x <= x1; x++)
{
for (z = z0; z <= z1; z++)
{
if (read_only[x+(y*config.n)+(z*config.n*config.n)] == false)
{
config.read_only ++;
read_only[x+(y*config.n)+(z*config.n*config.n)] = true;
}
data[x+(y*config.n)+(z*config.n*config.n)] = f;
}
}
}
else if (strcmp(token, "xrectangle") == 0) ////rectangle from (y0, z0) to (y1, z1) in x plane, syntax xrectangle [y0] [z0] [y1] [z1] [x] [value]
{
assert(read(token, delimiter, 6, &y0, &z0, &y1, &z1, &x, &f));
assert(y0 < y1);
assert(z0 < z1);
assert(x < config.n);
assert(y1 < config.n);
assert(z1 < config.n);
for (y = y0; y <= y1; y++)
{
for (z = z0; z <= z1; z++)
{
if (read_only[x+(y*config.n)+(z*config.n*config.n)] == false)
{
config.read_only ++;
read_only[x+(y*config.n)+(z*config.n*config.n)] = true;
}
data[x+(y*config.n)+(z*config.n*config.n)] = f;
}
}
}
else if (strcmp(token, "prism") == 0) //prism from (x0, y0, z0) to (x1, y1, z1), syntax prism [x0] [y0] [z0] [x1] [y1] [z1] [value]
{
assert(read(token, delimiter, 7, &x0, &y0, &z0, &x1, &y1, &z1, &f));
assert(x0 < x1);
assert(y0 < y1);
assert(z0 < z1);
assert(x1 < config.n);
assert(y1 < config.n);
assert(z1 < config.n);
for (x = x0; x <= x1; x++)
{
for (y = y0; y <= y1; y++)
{
for (z = z0; z <= z1; z++)
{
if (read_only[x+(y*config.n)+(z*config.n*config.n)] == false)
{
config.read_only ++;
read_only[x+(y*config.n)+(z*config.n*config.n)] = true;
}
data[x+(y*config.n)+(z*config.n*config.n)] = f;
}
}
}
}
token = strtok(NULL, delimiter);
}
for (x = config.x; x < config.n; x++)
{
for (y = config.y; y < config.n; y++)
{
for (z = config.z; z < config.n; z++)
{
if (read_only[x+(y*config.n)+(z*config.n*config.n)] == false)
{
read_only[x+(y*config.n)+(z*config.n*config.n)] = true;
config.read_only ++;
}
}
}
}
free(buf);
fclose(handle);
}
int main(int argc, char **argv)
{
if (argc != 3)
{
printf("Usage: %s [config file] [output]\n", argv[0]);
exit(1);
}
settings config;
dim3 block(1,1,1);
dim3 grid(1,1,1);
unsigned long float_mem;
unsigned long bool_mem;
config.iterations = 0;
config.n = 0;
config.threads = 512;
config.x = 0;
config.y = 0;
config.z = 0;
config.dim_3d = false;
char *settings_file = argv[1];
read_config(settings_file, config);
switch (config.threads)
{
case 2048:
block.x = 32; block.y = 32; block.z = 2;
break;
case 1024:
block.x = 32; block.y = 32; block.z = 1;
break;
case 512:
block.x = 16; block.y = 16; block.z = 2;
break;
case 256:
block.x = 16; block.y = 16; block.z = 1;
break;
case 128:
block.x = 8; block.y = 8; block.z = 2;
break;
case 64:
block.x = 8; block.y = 8; block.z = 1;
break;
case 32:
block.x = 4; block.y = 4; block.z = 2;
break;
}
if (!config.dim_3d)
{
block.z = 1;
}
assert(config.n % block.x == 0);
assert(config.n % block.y == 0);
assert(config.n % block.z == 0);
if (config.dim_3d)
{
config.n = next_pow(__max(__max(config.x, config.y), config.z));
grid.x = config.n / block.x;
grid.y = config.n / block.y;
grid.z = config.n / block.z;
float_mem = config.n * config.n * config.n * sizeof(float);
bool_mem = config.n * config.n * config.n * sizeof(bool);
}
else
{
config.n = next_pow(__max(config.x, config.y));
grid.x = config.n / block.x;
grid.y = config.n / block.y;
float_mem = config.n * config.n * sizeof(float);
bool_mem = config.n * config.n * sizeof(bool);
}
float *data = (float *)malloc(float_mem);
bool *read_only = (bool *)malloc(bool_mem);
if (config.dim_3d) { init3d(settings_file, config, data, read_only); }
else { init2d(settings_file, config, data, read_only); }
float *d_z_1;
float *d_z_2;
bool *d_read_only;
cudaEvent_t start;
cudaEvent_t stop;
float compute_time;
double gflops;
unsigned int yshift = (unsigned int) log2((double) config.n);
unsigned int zshift = (unsigned int) log2((double) config.n * config.n);
assert(cudaSuccess == cudaEventCreate(&start));
assert(cudaSuccess == cudaEventCreate(&stop));
assert(cudaSuccess == cudaMalloc((void**) &d_z_1, float_mem));
assert(cudaSuccess == cudaMalloc((void**) &d_z_2, float_mem));
assert(cudaSuccess == cudaMalloc((void**) &d_read_only, bool_mem));
assert(cudaSuccess == cudaMemcpy(d_z_1, data, float_mem, cudaMemcpyHostToDevice));
assert(cudaSuccess == cudaMemcpy(d_read_only, read_only, bool_mem, cudaMemcpyHostToDevice));
assert(cudaSuccess == cudaEventRecord(start, 0));
for (unsigned int i = 0; i < config.iterations; i++)
{
if (config.dim_3d)
{
iterate3d<<<grid, block>>>(d_z_1, d_z_2, d_read_only, yshift, zshift, config.n, config.n * config.n);
cudaThreadSynchronize();
iterate3d<<<grid, block>>>(d_z_2, d_z_1, d_read_only, yshift, zshift, config.n, config.n * config.n);
cudaThreadSynchronize();
if (i % 50 == 0) { printf("\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\bIterations: %u", i); }
}
else
{
iterate2d<<<grid, block>>>(d_z_1, d_z_2, d_read_only, yshift, config.n);
cudaThreadSynchronize();
iterate2d<<<grid, block>>>(d_z_2, d_z_1, d_read_only, yshift, config.n);
cudaThreadSynchronize();
if (i % 500 == 0) { printf("\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\bIterations: %u", i); }
}
}
assert(cudaSuccess == cudaEventRecord(stop, 0));
assert(cudaSuccess == cudaEventSynchronize(stop));
assert(cudaSuccess == cudaEventElapsedTime(&compute_time, start, stop));
printf("\n");
printf("Compute time: %fms\n", compute_time);
if (config.dim_3d) { gflops = ((config.n * config.n * config.n) - config.read_only) * 16.0 * config.iterations / (compute_time * 1000000.0); }
else { gflops = ((config.n * config.n) - config.read_only) * 8.0 * config.iterations / (compute_time * 1000000.0); }
printf("Compute speed: %f GFLOPS\n", gflops);
assert(cudaSuccess == cudaMemcpy(data, d_z_1, float_mem, cudaMemcpyDeviceToHost));
FILE *handle = fopen(argv[2], "w");
assert(handle != NULL);
if (config.dim_3d)
{
for (unsigned int x = 0; x < config.x; x++)
{
for (unsigned int y = 0; y < config.y; y++)
{
for (unsigned int z = 0; z < config.z; z++)
{
fprintf(handle, "%u, %u, %u, %f\n", x, y, z, data[x+(y*config.n)+(z*config.n*config.n)]);
}
}
}
}
else
{
for (unsigned int y = 0; y < config.n; y++)
{
for (unsigned int x = 0; x < config.n; x++)
{
fprintf(handle, "%06.2f", data[x+(y*config.n)]);
if (x == config.n - 1) { fprintf(handle, "\n"); }
else { fprintf(handle, ", "); }
}
}
}
fclose(handle);
assert(cudaSuccess == cudaFree(d_z_1));
assert(cudaSuccess == cudaFree(d_z_2));
assert(cudaSuccess == cudaFree(d_read_only));
free(data);
free(read_only);
return 0;
}
|
14,274 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <chrono>
#include <cstdint>
#include <fstream>
#include <iostream>
#include <stdio.h>
#include <vector>
using std::cin;
using std::cout;
/*******************************************************************************************************************************************************
*
* DEVICE
*
*******************************************************************************************************************************************************/
namespace Device
{
// Количество потоков на измерение, которое мы будем использовать.
const int nThreads = 32;
// Исходное изображение.
double *inImage = nullptr;
// Выходное изображение.
double *outImage = nullptr;
// Веса фильтров (размерности фильтров должны быть одинаковыми.
double *filters = nullptr;
// Результат работы фильтров
double *featureMaps = nullptr;
// Вспомогательная структура со всеми размерами (в штуках, а не в байтах).
struct Size
{
std::size_t inImageSize;
std::size_t outImageWidth;
std::size_t outImageHeight;
std::size_t outImageSize;
std::size_t kernelWeightsSize;
std::size_t featureMapsize;
} size;
// Освобождает память.
static void freeMemory()
{
cudaFree(inImage);
cudaFree(outImage);
cudaFree(filters);
cudaFree(featureMaps);
}
/*
* @param width - ширина изображения.
* @param height - высота изображения.
* @param stride - сдвиг окна фильтра.
* @param filterLineSize - размер строки в матрице одного фильтра. Должен быть нечетным.
* @param filtersCount - количество фильтров.
* @note Подразумевается, что размеры всех фильтров одинаковы.
*/
static int allocateMemory(std::size_t width, std::size_t height, std::size_t stride, std::size_t filterLineSize, std::size_t filtersCount)
{
// Для контроля ошибок выделения памяти.
auto cudaError = cudaSuccess;
// Выделяем память.
// Память выделяется в БАЙТАХ, поэтому даже для int или float нужно домножать на sizeof (то есть на количество байт, которое занимает переменная типа).
auto kernelWeightsSize = filtersCount * filterLineSize * filterLineSize;
cudaError = cudaMalloc(&filters, kernelWeightsSize * sizeof(*filters));
if (cudaSuccess != cudaError) {
freeMemory();
return -1;
}
auto inImageSize = width * height * 3;
cudaError = cudaMalloc(&inImage, inImageSize * sizeof(*inImage));
if (cudaSuccess != cudaError) {
freeMemory();
return -1;
}
auto outImageWidth = ((width - filterLineSize) / stride + 1);
auto outImageHeight = ((height - filterLineSize) / stride + 1);
auto outImageSize = outImageWidth * outImageHeight * 3;
cudaError = cudaMalloc(&outImage, outImageSize * sizeof(*outImage));
if (cudaSuccess != cudaError) {
freeMemory();
return -1;
}
auto featureMapsize = filtersCount * outImageSize;
cudaError = cudaMalloc(&featureMaps, featureMapsize * sizeof(*featureMaps));
if (cudaSuccess != cudaError) {
freeMemory();
return -1;
}
// Заполняем выделенную память нулями.
cudaError = cudaMemset(filters, 0, kernelWeightsSize * sizeof(*filters));
if (cudaSuccess != cudaError) {
freeMemory();
return -1;
}
cudaError = cudaMemset(inImage, 0, inImageSize * sizeof(*inImage));
if (cudaSuccess != cudaError) {
freeMemory();
return -1;
}
cudaError = cudaMemset(outImage, 0, outImageSize * sizeof(*outImage));
if (cudaSuccess != cudaError) {
freeMemory();
return -1;
}
cudaError = cudaMemset(featureMaps, 0, featureMapsize * sizeof(*featureMaps));
if (cudaSuccess != cudaError) {
freeMemory();
return -1;
}
// Заполняем структуру с размерами элементов, объявленную выше.
size.kernelWeightsSize = kernelWeightsSize;
size.inImageSize = inImageSize;
size.outImageWidth = outImageWidth;
size.outImageHeight = outImageHeight;
size.outImageSize = outImageSize;
size.featureMapsize = featureMapsize;
return 0;
}
}
/* Функция ядра, являющаяся имитацией сверточного слоя нейросети.
*
* Эта функция работает следующим образом
* 1. на вход подается изображение inImage. Это изображение имеет структуру трехмерного массива [[[r, g, b], [r, g, b], ..., ], [[r, g, b], [r, g, b], ..., ], ... ]
* здесь [r, g, b] - это пиксель (так как он состоит из трех цветов)
* [[r, g, b], [r, g, b], ..., ] - это строчка пикселов
* [[[r, g, b], [r, g, b], ..., ], [[r, g, b], [r, g, b], ..., ], ... ] - контейнер таких строчек.
*
* У изображения есть ширина и высота. Массив имеет размерность [высота [ширина [3]]]. Об этом важно помнить!!!
*
* Если развернуть это изображение в одномерный массив, то его размерность будет высота Х ширина Х 3, тогда
* [[[r, g, b], [r, g, b], ..., ], [[r, g, b], [r, g, b], ..., ], ... ] превратится в r, g, b, r, g, b, r, g, b, ...
*
* ЗАМЕЧАНИЕ
* Мы будем говорить о ПИКСЕЛЕ (или ТОЧКЕ) и о ЦВЕТЕ. Пиксель - это [r, g, b] (то есть иногда для удобства мы будем представлять, как будто мы работаем не с трехмерным массивом, а с двухмерным).
* Цвет - это конкретное значение (от 0 до 255) конкретного цвета конкретного пикселя. Когда мы будем говорить о цвете, никакой абстракции уже не останется.
*
* 2. на вход подаются фильтры. Это массив вида [ [[1, 1, 1,], [1, 1, 1], [1, 1, 1]], ... ]. В данном случае передается массив матриц-фильтров 3Х3
*
* ЗАМЕЧАНИЯ
* фильтры обязательно должны быть квадратными
* размерность обязательно нечетная
* все фильтры имеют одинаковую размерность
*
* Фильтры - это матрицы, которые позволяют находить на изображении специальные особенности, подобно тому, как работает зрительная кора головного мозга.
* Например, это могут быть прямые линии, линии под наклоном, а могут быть и сложные паттерны типа человеческого лица.
* Каждый фильтр после применения к изображению формирует новое изображение меньшей размерности. На этом изображении (в зависимости от того, насколько правильно подобраны веса)
* четко отображаются признаки, описанные выше. Все эти изображения формируют карту призаков или featureMaps. Эти изображения мы будем сохранять в одноименную переменную.
* Таким образом, featureMaps будет по сути массивом изображений в виде [[[[r, g, b], [r, g, b], ..., ], [[r, g, b], [r, g, b], ..., ], ... ], ...]
* Сколько фильтов, столько и изображений, причем идут они соответственно номерам, то есть первый фильтр формирует первое изображение в массиве, второй - второе и т.д.
*
* В тот же самый момент, мы возьмем все карты признаков, попиксельно сложим, разделим на количество фильтров, и результат сохраним в outImage. По способу хренения он идентичен inImage (однако меньшего размера).
*
*
* @param inImage - входное изображение для обработки.
* @param width - ширина изображения.
* @param height - высота изображения.
* @param filters - веса фильтров.
* @param filtersCount - количество фильтров.
* @param stride - смещение фильтра на следующем шаге.
* @param outImage - выходное изображение.
* @param featureMaps - карты признаков.
* @param outWidth - ширина выходного изображения.
* @param outHeight - высота выходного изображения.
*/
/*
__global__ void gpuCNN(
const double *inImage,
std::size_t width,
std::size_t height,
double *filters,
std::size_t filtersCount,
std::size_t filterLineSize,
std::size_t stride,
double *outImage,
double *featureMaps,
std::size_t outWidth,
std::size_t outHeight)
{
auto halfLineSize = filterLineSize / 2;
stride = (0 == stride) ? 1 : stride;
auto outPixelX = threadIdx.x + blockIdx.x * blockDim.x;
auto outPixelY = threadIdx.y + blockIdx.y * blockDim.y;
if (outPixelX < outWidth && outPixelY < outHeight)
{
auto pixelX = outPixelX * stride + halfLineSize;
auto pixelY = outPixelY * stride + halfLineSize;
// Функция ядра на GPU
for(std::size_t colorIdx = 0; colorIdx < 3; ++colorIdx)
{
double outImageSum = 0;
auto outColorPos = outPixelY*outWidth*3 + outPixelX*3 + colorIdx;
for(std::size_t filterIdx = 0; filterIdx < filtersCount; ++filterIdx)
{
double currentFilterSum = 0;
for(std::size_t i = 0; i < filterLineSize; ++i)
{
for(std::size_t j = 0; j < filterLineSize; ++j)
{
auto convPixelX = pixelX + j - halfLineSize;
auto convPixelY = pixelY + i - halfLineSize;
auto colorPos = convPixelY*width*3 + convPixelX*3 + colorIdx;
auto weightPos = filterIdx*filterLineSize*filterLineSize + i*filterLineSize + j;
currentFilterSum += inImage[colorPos] * filters[weightPos];
}
}
outImageSum += currentFilterSum;
featureMaps[filterIdx*outWidth*outHeight*3 + outColorPos] = currentFilterSum;
}
outImage[outColorPos] = outImageSum / (float)filtersCount;
}
}
}
*/
/*******************************************************************************************************************************************************
*
* DEVICE
*
*******************************************************************************************************************************************************/
void cpuCNN(
const std::vector<double> &inImage,
std::size_t width,
std::size_t height,
const std::vector<double> &filters,
std::size_t filtersCount,
std::size_t filterLineSize,
std::size_t stride,
std::vector<double> &outImage,
std::vector<double> &featureMaps,
std::size_t outWidth,
std::size_t outHeight
)
{
static const auto halfLineSize = filterLineSize / 2;
//stride = (0 == stride) ? 1 : stride;
for(std::size_t x = 0; x < outWidth; ++x)
{
for(std::size_t y = 0; y < outHeight; ++y)
{
auto pixelX = x * stride + halfLineSize;
auto pixelY = y * stride + halfLineSize;
// Функция на CPU
for(std::size_t colorIdx = 0; colorIdx < 3; ++colorIdx)
{
double outImageSum = 0;
auto outPixelX = (pixelX - halfLineSize) / stride;
auto outPixelY = (pixelY - halfLineSize) / stride;
auto outColorPos = outPixelY*outWidth*3 + outPixelX*3 + colorIdx;
for(std::size_t filterIdx = 0; filterIdx < filtersCount; ++filterIdx)
{
double currentFilterSum = 0;
for(std::size_t i = 0; i < filterLineSize; ++i)
{
for(std::size_t j = 0; j < filterLineSize; ++j)
{
auto convPixelX = pixelX + j - halfLineSize;
auto convPixelY = pixelY + i - halfLineSize;
auto colorPos = convPixelY*width*3 + convPixelX*3 + colorIdx;
auto weightPos = filterIdx*filterLineSize*filterLineSize + i*filterLineSize + j;
currentFilterSum += inImage[colorPos] * filters[weightPos];
}
}
outImageSum += currentFilterSum;
featureMaps[filterIdx*outWidth*outHeight*3 + outColorPos] = currentFilterSum;
}
outImage[outColorPos] = outImageSum / (float)filtersCount;
}
}
}
}
/*******************************************************************************************************************************************************
*
* MAIN
*
*******************************************************************************************************************************************************/
namespace CliArgs
{
// Количество аргументов, которое ожидает программа (имя программы среди них с индексом 0, поэтому количество на 1 больше).
static const int N_ARGS = 5;
// Путь к входному изображению.
static const int IN_FILE_POS = 1;
// Размерность файла - ширина.
static const int IMG_WIDTH = 2;
// Размерность файла - высота.
static const int IMG_HEIGHT = 3;
// Страйд
static const int STRIDE = 4;
}
/*******************************************************************************************************************************************************
*
* MAIN
*
*******************************************************************************************************************************************************/
// ВАЖНО
// Дабы окончательно не усложнять программу, проверки вводимых данных делаются по минимуму.
int main(int argc, char *argv[])
{
#if defined(WIN32) || defined(_WIN32) || defined(__WIN32) && !defined(__CYGWIN__)
setlocale(0, "russian");
#endif
// Проверяем количество аргументов.
if (CliArgs::N_ARGS != argc) {
cout << "Неверное количество аргументов." << std::endl;
getchar();
return 1;
}
// Извлекаем имена файлов.
auto imageFilePath = argv[CliArgs::IN_FILE_POS];
// Извлекаем размерность картинки.
auto imageWidth = atoi(argv[CliArgs::IMG_WIDTH]);
auto imageHeight = atoi(argv[CliArgs::IMG_HEIGHT]);
// Извлекаем страйд.
auto stride = atoi(argv[CliArgs::STRIDE]);
auto imageSize = imageWidth * imageHeight * 3;
// Читаем данные из файла с изображением.
std::ifstream ifs(imageFilePath, std::ios_base::in);
if (!ifs.is_open()) {
cout << "Невозможно открыть файл " << imageFilePath << std::endl;
getchar();
return 1;
}
std::cout << "Начато чтение из файла..." << std::endl;
std::vector<double> imageData(imageSize);
for (std::size_t i = 0; i < imageSize; ++i)
ifs >> imageData[i];
ifs.close();
std::cout << "Чтение закончено" << std::endl;
// Заполняем фильтры.
auto filterLineSize = 5;
auto filtersCount = 4;
std::vector<double> filters = {
0.5, 0.5, -1, 0.5, 0.5,
0.5, 0.5, -1, 0.5, 0.5,
0.5, 0.5, -1, 0.5, 0.5,
0.5, 0.5, -1, 0.5, 0.5,
0.5, 0.5, -1, 0.5, 0.5,
0, 0, 0, 0, 2,
0, 0, 0, 2, 0,
0, 0, 2, 0, 0,
0, 2, 0, 0, 0,
2, 0, 0, 0, 0,
2, 0, 0, 0, 0,
0, 2, 0, 0, 0,
0, 0, 2, 0, 0,
0, 0, 0, 2, 0,
0, 0, 0, 0, 2,
-0.5, -0.5, -0.5, -0.5, -0.5,
-0.5, -0.5, -0.5, -0.5, -0.5,
1, 1, 1, 1, 1,
-0.5, -0.5, -0.5, -0.5, -0.5,
-0.5, -0.5, -0.5, -0.5, -0.5
};
// Выделяем память на устройстве.
if (0 != Device::allocateMemory(imageWidth, imageHeight, stride, filterLineSize, filtersCount)) {
cout << "Ошибка выделения памяти на графической карте" << std::endl;
getchar();
return 1;
}
cout << "Закончено выделение памяти на устройстве" << std::endl;
// Копируем данные HOST -> GPU.
auto cudaError = cudaSuccess;
cudaError = cudaMemcpy(Device::inImage, imageData.data(), imageSize * sizeof(imageData[0]), cudaMemcpyHostToDevice);
if (cudaSuccess != cudaError) {
cout << "Ошибка при копировании результата на устройство: " << cudaError << std::endl;
getchar();
Device::freeMemory();
return 1;
}
cudaError = cudaMemcpy(Device::filters, filters.data(), filters.size() * sizeof(filters[0]), cudaMemcpyHostToDevice);
if (cudaSuccess != cudaError) {
cout << "Ошибка при копировании результата на устройство: " << cudaError << std::endl;
getchar();
Device::freeMemory();
return 1;
}
cout << "Закончено копирование данных на устройство" << std::endl;
// Расчет на CPU.
std::vector<double> cpuOutImage(Device::size.outImageSize, 0.0);
std::vector<double> cpuFeatureMaps(Device::size.featureMapsize, 0.0);
// Запуск функции на CPU.
auto cpuBeginTime = std::chrono::steady_clock::now();
cpuCNN(
imageData,
imageWidth,
imageHeight,
filters,
filtersCount,
filterLineSize,
stride,
cpuOutImage,
cpuFeatureMaps,
Device::size.outImageWidth,
Device::size.outImageHeight
);
auto cpuTime = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - cpuBeginTime).count();
// Пишем в файлы.
std::cout << "Начата запись изображения в файл..." << std::endl;
std::ofstream cpuImgOfs("cpu_out_image.txt", std::ios_base::out | std::ios_base::trunc);
if (cpuImgOfs.is_open())
for (std::size_t i = 0; i < Device::size.outImageSize; ++i)
cpuImgOfs << static_cast<unsigned int>(cpuOutImage[i]) % 255 << " ";
cpuImgOfs.close();
cout << "Запись изображения в файл закончена..." << std::endl;
cout << "Начата запись карты признаков в файл..." << std::endl;
std::ofstream cpuFmOfs("cpu_out_features.txt", std::ios_base::out | std::ios_base::trunc);
if (cpuFmOfs.is_open())
for (std::size_t i = 0; i < Device::size.featureMapsize; ++i)
cpuFmOfs << static_cast<unsigned int>(cpuFeatureMaps[i]) % 255 << " ";
cpuFmOfs.close();
cout << "Запись карты признаков в файл закончена..." << std::endl;
//delete[] cpuOutImage;
//delete[] cpuFeatureMaps;
// Расчет на GPU.
/*
dim3 threads(Device::nThreads, Device::nThreads);
auto nBlocksX = Device::size.outImageWidth / threads.x;
nBlocksX += (0 == Device::size.outImageWidth % threads.x) ? 0 : 1;
auto nBlocksY = Device::size.outImageHeight / threads.y;
nBlocksY += (0 == Device::size.outImageHeight % threads.y) ? 0 : 1;
dim3 blocks(nBlocksX, nBlocksY);
Запуск функции ядра.
auto gpuBeginTime = std::chrono::steady_clock::now();
gpuCNN <<< blocks, threads >>> (
Device::inImage,
imageWidth,
imageHeight,
Device::filters,
filtersCount,
filterLineSize,
stride,
Device::outImage,
Device::featureMaps,
Device::size.outImageWidth,
Device::size.outImageHeight
);
cudaDeviceSynchronize();
auto gpuTime = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - gpuBeginTime).count();
cout << "Закончен расчет на GPU" << std::endl;
Теперь тащим с GPU результат.
auto outImage = new double[Device::size.outImageSize];
auto featureMaps = new double[Device::size.featureMapsize];
cudaError = cudaMemcpy(outImage, Device::outImage, Device::size.outImageSize * sizeof(*outImage), cudaMemcpyDeviceToHost);
if (cudaSuccess != cudaError) {
cout << "Ошибка при копировании изображения с устройства: " << cudaError << std::endl;
getchar();
Device::freeMemory();
return 1;
}
cudaError = cudaMemcpy(featureMaps, Device::featureMaps, Device::size.featureMapsize * sizeof(*featureMaps), cudaMemcpyDeviceToHost);
if (cudaSuccess != cudaError) {
cout << "Ошибка при копировании карт признаков с устройства: " << cudaError << std::endl;
getchar();
Device::freeMemory();
return 1;
}
Device::freeMemory();
cout << "Копирование результата с устройства закончено" << std::endl;
Пишем в файлы.
std::cout << "Начата запись изображения в файл..." << std::endl;
std::ofstream imgOfs("out_image.txt", std::ios_base::out | std::ios_base::trunc);
if (imgOfs.is_open())
for (std::size_t i = 0; i < Device::size.outImageSize; ++i)
imgOfs << static_cast<unsigned int>(outImage[i]) % 255 << " ";
imgOfs.close();
cout << "Запись изображения в файл закончена..." << std::endl;
cout << "Начата запись карты признаков в файл..." << std::endl;
std::ofstream fmOfs("out_features.txt", std::ios_base::out | std::ios_base::trunc);
if (fmOfs.is_open())
for (std::size_t i = 0; i < Device::size.featureMapsize; ++i)
fmOfs << static_cast<unsigned int>(featureMaps[i]) % 255 << " ";
fmOfs.close();
cout << "Запись карты признаков в файл закончена..." << std::endl;
delete[] outImage;
delete[] featureMaps;
*/
cout << std::endl << std::endl;
cout << "Полученное изображение имеет параметры " << Device::size.outImageWidth << " X " << Device::size.outImageHeight << std::endl;
cout << "Карта признаков имеет параметры " << Device::size.outImageWidth << " X " << Device::size.outImageHeight * filtersCount << std::endl << std::endl;
cout << "Время на CPU: " << cpuTime << " миллисекунд "<< std::endl;
//cout << "Время на GPU: " << gpuTime << " миллисекунд" << std::endl;
cout << "Для выхода нажмите Enter." << std::endl;
getchar();
return 0;
}
|
14,275 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
unsigned int getmax(unsigned int *, unsigned int);
#define TPB 1024
__global__ void get_cuda_max(unsigned int* dev_num, unsigned int size){
unsigned int id = (blockDim.x * blockIdx.x) + threadIdx.x;
unsigned int size_cp = size;
unsigned int ten = size_cp/10;
if(id < ten){
for(unsigned int i = 1; i < 10; i++){
if(dev_num[ten*i + id] > dev_num[id])
dev_num[id] = dev_num[ten*i + id];
}
}
}
int main(int argc, char *argv[])
{
unsigned int size = 0; // The size of the array
unsigned int i; // loop index
unsigned int * numbers; //pointer to the array
if(argc !=2)
{
printf("usage: maxseq num\n");
printf("num = size of the array\n");
exit(1);
}
size = atol(argv[1]);
numbers = (unsigned int *)malloc(size * sizeof(unsigned int));
if( !numbers )
{
printf("Unable to allocate mem for an array of size %u\n", size);
exit(1);
}
srand(time(NULL)); // setting a seed for the random number generator
// Fill-up the array with random numbers from 0 to size-1
for( i = 0; i < size; i++)
numbers[i] = rand() % size;
// for( i = 0; i < size; i++)
// printf("%u\n", numbers[i]);
unsigned int num_blocks = (size + TPB - 1)/TPB;
unsigned int* dev_num;
cudaMalloc((void**) &dev_num, size*sizeof(unsigned int));
cudaMemcpy(dev_num, numbers, size*sizeof(unsigned int), cudaMemcpyHostToDevice);
unsigned int size_cp = size;
while(size_cp > 1){
get_cuda_max<<<num_blocks, TPB>>>(dev_num, size_cp);
size_cp = size_cp/10;
}
cudaMemcpy(numbers, dev_num, size*sizeof(unsigned int), cudaMemcpyDeviceToHost);
unsigned int ans = numbers[0];
cudaFree(dev_num);
printf(" The maximum number in the array is: %u\n",
ans);
printf("The max num sequentially is: %u\n", getmax(numbers, size));
free(numbers);
exit(0);
}
/*
input: pointer to an array of long int
number of elements in the array
output: the maximum number of the array
*/
unsigned int getmax(unsigned int num[], unsigned int size)
{
unsigned int i;
unsigned int max = num[0];
for(i = 1; i < size; i++)
if(num[i] > max)
max = num[i];
return( max );
}
|
14,276 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
cudaError_t barycentricCuda(const float3 *v0, const float3 *v1, const float3 *v2, const float *da, const float *db, const float *dc, float *dOut, int2 framebufferSize);
__device__ __inline__ float dot(const float2 a, const float2 b)
{
return (a.x * b.x) + (a.y * b.y);
}
__device__ float2 calculatePosition(int x, int y, float width, float height)
{
float2 fragSize = make_float2(2 / width, 2 / height);
return make_float2(fragSize.x * x + fragSize.y / 2 - 1, (fragSize.y * y + fragSize.y / 2 - 1) * -1);
}
__global__ void baryKernel(const float3 *v0, const float3 *v1, const float3 *v2, const float *da, const float *db, const float *dc, float *dOut, int *width, int *height)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < *width && y < *height)
{
float2 pos = calculatePosition(x, y, *width, *height);
float2 t0 = make_float2(v2->x, v2->y);
float2 t1 = make_float2(v0->x, v0->y);
float2 t2 = make_float2(v1->x, v1->y);
float2 v0 = make_float2(t1.x - t0.x, t1.y - t0.y);
float2 v1 = make_float2(t2.x - t0.x, t2.y - t0.y);
float2 v2 = make_float2(pos.x - t0.x, pos.y - t0.y);
float d00 = dot(v0, v0);
float d01 = dot(v0, v1);
float d11 = dot(v1, v1);
float d20 = dot(v2, v0);
float d21 = dot(v2, v1);
float denom = d00 * d11 - d01 * d01;
float baryX = (d11 * d20 - d01 * d21) / denom;
float baryY = (d00 * d21 - d01 * d20) / denom;
float baryZ = 1 - baryX - baryY;
if (baryX > 0 && baryY > 0 && baryZ > 0)
{
dOut[y * *width + x] = *da * baryX + *db * baryY + *dc * baryZ;
}
else
{
dOut[y * *width + x] = 0;
}
}
}
int main()
{
printf("\n\n\nBarycentric:\n");
int2 framebufferSize = make_int2(50, 50);
float3 bary_v0 = make_float3(0, 1, 0);
float3 bary_v1 = make_float3(1, -1, 0);
float3 bary_v2 = make_float3(-1, -1, 0);
float bary_da = 3;
float bary_db = 2;
float bary_dc = 1;
float *bary_dOut = (float*)malloc(framebufferSize.x * framebufferSize.y * sizeof(float*));
// Barycentric in parallel.
cudaError_t cudaStatus = barycentricCuda(&bary_v0, &bary_v1, &bary_v2, &bary_da, &bary_db, &bary_dc, bary_dOut, framebufferSize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "barycentricCuda failed!");
return 1;
}
printf("{\n");
for (int y = 0; y < framebufferSize.y; y++)
{
printf(" {");
for (int x = 0; x < framebufferSize.x; x++)
{
printf("%.1f|", bary_dOut[x + y * framebufferSize.y]);
}
printf("}\n");
}
printf("}\n");
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
#define myMalloc(VAR, SIZE, TYPE) cudaStatus = cudaMalloc((void**)&VAR, SIZE * sizeof(TYPE)); \
if (cudaStatus != cudaSuccess) {\
fprintf(stderr, "cudaMalloc failed!"); \
goto Error; \
}
#define myVarOnGPU(VAR, SOURCEVAR, SIZE, TYPE) cudaStatus = cudaMalloc((void**)&VAR, SIZE * sizeof(TYPE));\
if (cudaStatus != cudaSuccess) {\
fprintf(stderr, "cudaMalloc failed!");\
goto Error;\
}\
cudaStatus = cudaMemcpy(VAR, SOURCEVAR, SIZE * sizeof(TYPE), cudaMemcpyHostToDevice);\
if (cudaStatus != cudaSuccess) {\
fprintf(stderr, "cudaMemcpy failed!");\
goto Error;\
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t barycentricCuda(const float3 *v0, const float3 *v1, const float3 *v2, const float *da, const float *db, const float *dc, float *dOut, int2 framebufferSize)
{
int length = framebufferSize.x * framebufferSize.y;
int bytes = length * sizeof(float);
const dim3 windowSize(framebufferSize.x, framebufferSize.y);
const dim3 blockSize(16, 16, 1);
const dim3 gridSize(windowSize.x / blockSize.x + 1, windowSize.y / blockSize.y + 1);
float3 *dev_v0 = 0;
float3 *dev_v1 = 0;
float3 *dev_v2 = 0;
float *dev_da = 0;
float *dev_db = 0;
float *dev_dc = 0;
struct cudaPitchedPtr dstGPU;
int *dev_width = 0;
int *dev_height = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate and populate GPU buffers for vectors and data.
myVarOnGPU(dev_v0, v0, 1, float3);
myVarOnGPU(dev_v1, v1, 1, float3);
myVarOnGPU(dev_v2, v2, 1, float3);
myVarOnGPU(dev_da, da, 1, float);
myVarOnGPU(dev_db, db, 1, float);
myVarOnGPU(dev_dc, dc, 1, float);
myVarOnGPU(dev_width, &framebufferSize.x, 1, int);
myVarOnGPU(dev_height, &framebufferSize.y, 1, int);
cudaStatus = cudaMalloc3D(&dstGPU, make_cudaExtent(framebufferSize.x * sizeof(float), framebufferSize.y, 1));
// Launch a kernel on the GPU with one thread for each element.
baryKernel <<<gridSize, blockSize>>> (dev_v0, dev_v1, dev_v2, dev_da, dev_db, dev_dc, (float *)dstGPU.ptr, dev_width, dev_height);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "barycentricCuda launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching barycentricCuda!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(dOut, dstGPU.ptr, bytes, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dstGPU.ptr);
cudaFree(dev_dc);
cudaFree(dev_db);
cudaFree(dev_da);
cudaFree(dev_v2);
cudaFree(dev_v0);
cudaFree(dev_v1);
return cudaStatus;
}
|
14,277 | /*
@Author: 3sne ( Mukur Panchani )
@FileName: q3BinaryMaker.cu
@Task: CUDA program that converts chars of a string to binary.
*/
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
__global__ void makeBinaries(char *str, int *binArr) {
int tid = threadIdx.x;
int n = (int)str[tid];
int rem, i = 1, bin = 0;
while (n > 0) {
rem = n % 2;
n = n / 2;
bin = bin + rem * i;
i = i * 10;
}
binArr[tid] = bin;
}
int main() {
char *dStr; int *dBinArr;
char *str = (char*)malloc(sizeof(char) * 10240);
printf("Enter the string >> "); scanf("%[^\n]s", str);
int len = strlen(str);
int *binArr = (int*)malloc(sizeof(int) * len);
cudaMalloc((void **)&dStr , len*sizeof(char));
cudaMalloc((void **)&dBinArr, len*sizeof(int));
cudaMemcpy(dStr, str, len * sizeof(char), cudaMemcpyHostToDevice);
makeBinaries<<<1, len>>>( dStr, dBinArr);
cudaMemcpy(binArr, dBinArr, len * sizeof(int), cudaMemcpyDeviceToHost);
printf("Output: \n");
for(int i = 0 ; i < len; i++) {
printf("'%c' -> %10d\n", str[i] , binArr[i]);
}
}
|
14,278 | #include "includes.h"
__global__ void g_getSoftMaxP(float* softMaxP, float* b, int cols)
{
int bid = blockIdx.x;
extern __shared__ float _share[];
float * _max = _share;
float * _sum = _share + blockDim.x;
float* sp = softMaxP + bid * cols;
_sum[threadIdx.x] = 0.0;
_max[threadIdx.x] = -100000000.0;
for(int tid = 0; tid < cols; tid += blockDim.x){
int id = tid + threadIdx.x;
if(id < cols){
sp[id] += b[id];
_max[threadIdx.x] = max(_max[threadIdx.x], sp[id]);
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
if(_max[threadIdx.x] < _max[threadIdx.x + skip])
{
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x){
int id = tid + threadIdx.x;
if(id < cols){
sp[id] -= _max[0];
sp[id] = __expf(sp[id]);
_sum[threadIdx.x] += sp[id];
}
}
__syncthreads();
len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x){
int id = tid + threadIdx.x;
if(id < cols){
sp[id] /= _sum[0];
}
}
} |
14,279 | #include<iostream>
__global__ void fill(int * m, std::size_t w , std::size_t h)
{
auto idx = blockIdx.x * blockDim.x + threadIdx.x;
auto idy = blockIdx.y * blockDim.y + threadIdx.y;
if( idx < w && idy <h )
{
m [ idy * w + idx ] = idy * w + idx;
}
}
int main() {
std::size_t w =10;
std::size_t h =10;
std::size_t size =w*h;
int * m_h = nullptr;
int * m_d = nullptr;
cudaMallocHost(&m_h, size * sizeof(int));
cudaMalloc( &m_d, size * sizeof(int));
dim3 block (32 , 32);
dim3 grid ((w-1) / block.x +1, (h-1)/block.y +1);
fill<<<grid, block >>>(m_d, w , h );
cudaMemcpy ( m_h , m_d, size * sizeof (int) , cudaMemcpyDeviceToHost);
for (std::size_t j = 0; j < h ;++j )
{
for (std::size_t i =0 ; i<w ; ++i)
{
std::cout << m_h[j*w +i]<< ' ';
}
std::cout << std:: endl;
}
cudaFree(m_d);
cudaFreeHost(m_h);
return 0;
}
|
14,280 | #include "includes.h"
__global__ void alligned_access(float* a,int max){
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= max) return;
a[idx] = a[idx] + 1;
} |
14,281 | #include "includes.h"
__global__ void arradd(const int *md, const int *nd, int *pd, int size){
int myid = blockDim.x*blockIdx.x + threadIdx.x;
if(myid < size)
pd[myid] = md[myid] + nd[myid];
} |
14,282 | #include <stdio.h>
#include <math.h>
#define N 5000000
__global__ void vecadd(float *a, float *b, float *c) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) c[i] = a[i] + b[i];
}
int main() {
float *a, *b, *c;
float *d_a, *d_b, *d_c;
size_t vecSize = N * sizeof(float);
a = (float*)malloc(vecSize);
b = (float*)malloc(vecSize);
c = (float*)malloc(vecSize);
// Allocate device memory for vector a, b and c
cudaMalloc((void**)&d_a, vecSize);
cudaMalloc((void**)&d_b, vecSize);
cudaMalloc((void**)&d_c, vecSize);
// Transfer data from host to device
cudaMemcpy(d_a, a, vecSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, vecSize, cudaMemcpyHostToDevice);
// Call kernel
int threadsPerBlock = 256;
int numBlocks = ceil(N * 1.0 / threadsPerBlock);
vecadd<<<numBlocks, threadsPerBlock>>>(d_a, d_b, d_c);
// Transfer data from device to host
cudaMemcpy(c, d_c, vecSize, cudaMemcpyDeviceToHost);
// Deallocate device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(a); free(b); free(c);
return 0;
} |
14,283 | #include <iostream>
#include <chrono>
using namespace std::chrono;
using namespace std;
#define n (1 << 2)
__global__ void matrix_multiplication_kernel(int *d_a, int *d_b, int *d_c){
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= n || j >= n) return;
d_c[i*n + j] = 0;
for(int k=0; k<n; k++){
d_c[i*n+j] += d_a[i*n+k] * d_b[k*n+j];
}
}
int main(){
size_t bytes = n*n*sizeof(int);
int *h_a, *h_b, *h_c;
h_a = (int*)malloc(bytes);
h_b = (int*)malloc(bytes);
h_c = (int*)malloc(bytes);
for(int i = 0; i < n*n; i++){
h_a[i] = i;
h_b[i] = i;
}
// for(int i = 0; i < n; i++){
// for(int j=0; j<n; j++){
// cout << h_a[i*n + j] << "\t";
// }
// cout << endl;
// }
// cout << "*" << endl;
// for(int i = 0; i < n; i++){
// for(int j=0; j<n; j++){
// cout << h_b[i*n + j] << "\t";
// }
// cout << endl;
// }
cout << "cpu: " << endl;
auto start = high_resolution_clock::now();
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
h_c[i*n + j] = 0;
for(int k=0; k<n; k++){
h_c[i*n+j] += h_a[i*n+k] * h_b[k*n+j];
}
}
}
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop - start);
cout << "cpu time: " << duration.count() << endl;
// for(int i = 0; i < n; i++){
// for(int j=0; j<n; j++){
// cout << h_c[i*n + j] << "\t";
// }
// cout << endl;
// }
cout << "gpu: " << endl;
start = high_resolution_clock::now();
int *h_d = (int*)malloc(bytes);
int *d_a, *d_b, *d_d;
cudaMalloc((void**) &d_a, bytes);
cudaMalloc((void**) &d_b, bytes);
cudaMalloc((void**) &d_d, bytes);
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
int BLOCK_SIZE = 32;
int GRID_SIZE = (n/BLOCK_SIZE) + 1;
dim3 grid(GRID_SIZE, GRID_SIZE);
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
matrix_multiplication_kernel<<<grid,block>>>(d_a,d_b,d_d);
cudaDeviceSynchronize();
cudaMemcpy(h_d, d_d, bytes, cudaMemcpyDeviceToHost);
for(int i = 0; i < n; i++){
for(int j=0; j<n; j++){
cout << h_d[i*n + j] << "\t";
}
cout << endl;
}
stop = high_resolution_clock::now();
duration = duration_cast<microseconds>(stop - start);
cout << "gpu time: " << duration.count() << endl;
bool error_occurred = false;
for(int i = 0; i < n; i++){
for(int j=0; j<n; j++){
if(h_d[i*n + j] - h_c[i*n + j] != 0){
cout << "Some error occurred" <<endl;
error_occurred = true;
}
}
}
if(error_occurred == false) cout << "No error" <<endl;
} |
14,284 | /*
Copyright 2016 Thomas Luu
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
File: ncx2cdfinv.cu
Computation of the noncentral chi-squared quantile function.
Based on:
Luu, T; (2016) Fast and accurate parallel computation of quantile functions for
random number generation. Doctoral thesis, UCL (University College London).
http://discovery.ucl.ac.uk/1482128/
*/
#ifndef NCX2CDFINV
#define NCX2CDFINV
#include <math_constants.h>
#if 1
#define TOL 0.1
#else
#define TOL 0.01
#endif
__host__ __device__ inline double sankaran(double u, double k, double l)
{
double h = 1.0 - CUDART_TWOTHIRD * (k + l) * (k + l + l + l) / ((k + l + l) * (k + l + l));
double p = (k + l + l) / ((k + l) * (k + l));
double m = (h - 1.0) * (1.0 - (h + h + h));
double mu = 1.0 + h * p * (h - 1.0 - (1.0 - h * 0.5) * m * p);
double s = h * sqrt(p + p) * (1 + m * p * 0.5);
double z = normcdfinv(u);
double x = z * s + mu;
return (k + l) * pow(x, 1.0 / h);
}
__host__ __device__ inline double v(double u, double k, double l, double c)
{
return pow(c * u, 2.0 / k);
}
__host__ __device__ inline double v_inv(double x, double k, double l, double c)
{
return pow(x, 0.5 * k) / c;
}
__host__ __device__ inline double luu(double u, double k, double l, double *u_split = 0)
{
double c = 0.5 * exp2(0.5 * k) * exp(0.5 * l) * k * tgamma(0.5 * k);
double vv = v(u, k, l, c);
double v_split;
if (u_split != 0) {
double rg2 = (k * (2.0 + k)) / (k - l);
double rg3 = (2.0 * k * k * (2.0 + k) * (2.0 + k) * (4.0 + k)) / (6.0 * l * l * (k - 1) + 2.0 * l * (8.0 - 5.0 * k) * k + k * k * (5.0 * k - 8.0));
if (k == l) {
v_split = cbrt(TOL * fabs(rg3));
} else {
v_split = sqrt(TOL * fabs(rg2));
}
*u_split = v_inv(v_split, k, l, c);
}
return vv;
}
__host__ __device__ inline double ncx2cdfinv(double u, double k, double l)
{
if (u == 0.0) {
return 0.0;
}
if (u == 1.0) {
#ifdef __CUDA_ARCH__
return CUDART_INF;
#else
return INFINITY;
#endif
}
double sankaran_approx = sankaran(u, k, l);
double u_split;
double luu_approx = luu(u, k, l, &u_split);
if (isnan(sankaran_approx)) return luu_approx;
return u < u_split ? luu_approx : sankaran_approx;
}
#endif
|
14,285 | #include <stdio.h>
__device__ void addSleep(void *p_us_time)
{
//This method will sleep for clockRate*kernel_time many clock ticks
// which is equivalent to sleeping for kernel_time milliseconds
int time = *((int *) p_us_time);
//float AddPerUs = 10.26188; //Ben
float AddPerUs = 9.89759943623274; //Scott
// float AddPerUs = 1; // Test
float adds = time*AddPerUs;
int temp=0;
while(temp<adds){
temp++;
}
}
|
14,286 | #include "cudaSegSLIC.cuh"
#include "cudaUtil.cuh"
#include <stdio.h>
__host__ void SLICImgSeg(int* maskBuffer, float4* floatBuffer,
int nWidth, int nHeight, int nSegs,
SLICClusterCenter* vSLICCenterList,
float weight)
{
int nClusterSize=(int)sqrt((float)iDivUp((nWidth*nHeight),nSegs));
int nClustersPerCol=iDivUp(nHeight,nClusterSize);
int nClustersPerRow=iDivUp(nWidth,nClusterSize);
int nBlocksPerCluster=iDivUp(nClusterSize*nClusterSize,MAX_BLOCK_SIZE);
int nSeg=nClustersPerCol*nClustersPerRow;
int nBlockWidth=nClusterSize;
int nBlockHeight=iDivUp(nClusterSize,nBlocksPerCluster);
dim3 ThreadPerBlock_init(nClustersPerRow); //x
dim3 BlockPerGrid_init(nClustersPerCol); //y
dim3 ThreadPerBlock(nBlockWidth,nBlockHeight);
dim3 BlockPerGrid(nBlocksPerCluster,nSeg);
kInitClusterCenters<<<BlockPerGrid_init,ThreadPerBlock_init>>>(floatBuffer,nWidth,nHeight,nSegs,vSLICCenterList);
//5 iterations have already given good result
for (int i=0;i<5;i++)
{
kIterateKmeans<<<BlockPerGrid,ThreadPerBlock>>>(maskBuffer,floatBuffer,nWidth,nHeight,nSeg,nClustersPerRow,vSLICCenterList,true, weight);
kUpdateClusterCenters<<<BlockPerGrid_init,ThreadPerBlock_init>>>(floatBuffer,maskBuffer,nWidth,nHeight,nSeg,vSLICCenterList);
}
kIterateKmeans<<<BlockPerGrid,ThreadPerBlock>>>(maskBuffer,floatBuffer,nWidth,nHeight,nSeg,nClustersPerRow,vSLICCenterList,true, weight);
}
__global__ void kInitClusterCenters( float4* floatBuffer, int nWidth, int nHeight, int nSegs, SLICClusterCenter* vSLICCenterList )
{
int blockWidth=nWidth/blockDim.x;
int blockHeight=nHeight/gridDim.x;
int clusterIdx=blockIdx.x*blockDim.x+threadIdx.x;
int offsetBlock = blockIdx.x * blockHeight * nWidth + threadIdx.x * blockWidth;
float2 avXY;
avXY.x=threadIdx.x*blockWidth + (float)blockWidth/2.0;
avXY.y=blockIdx.x*blockHeight + (float)blockHeight/2.0;
//use a single point to init center
int offset=offsetBlock + blockHeight/2 * nWidth+ blockWidth/2 ;
float4 fPixel=floatBuffer[offset];
vSLICCenterList[clusterIdx].lab=fPixel;
vSLICCenterList[clusterIdx].xy=avXY;
vSLICCenterList[clusterIdx].nPoints=0;
}
__global__ void kIterateKmeans( int* maskBuffer, float4* floatBuffer,
int nWidth, int nHeight, int nSegs, int nClusterIdxStride,
SLICClusterCenter* vSLICCenterList,
bool bLabelImg, float weight)
{
//for reading cluster centers
__shared__ float4 fShareLab[3][3];
__shared__ float2 fShareXY[3][3];
//pixel index
__shared__ SLICClusterCenter pixelUpdateList[MAX_BLOCK_SIZE];
__shared__ float2 pixelUpdateIdx[MAX_BLOCK_SIZE];
int clusterIdx=blockIdx.y;
int blockCol=clusterIdx%nClusterIdxStride;
int blockRow=clusterIdx/nClusterIdxStride;
//int upperBlockHeight=blockDim.y*gridDim.x;
int lowerBlockHeight=blockDim.y;
int blockWidth=blockDim.x;
int upperBlockHeight=blockWidth;
int innerBlockHeightIdx=lowerBlockHeight*blockIdx.x+threadIdx.y;
float M=weight;
float invWeight=1/((blockWidth/M)*(blockWidth/M));
int offsetBlock = (blockRow*upperBlockHeight+blockIdx.x*lowerBlockHeight)*nWidth+blockCol*blockWidth;
int offset=offsetBlock+threadIdx.x+threadIdx.y*nWidth;
int rBegin=(blockRow>0)?0:1;
int rEnd=(blockRow+1>(gridDim.y/nClusterIdxStride-1))?1:2;
int cBegin=(blockCol>0)?0:1;
int cEnd=(blockCol+1>(nClusterIdxStride-1))?1:2;
if (threadIdx.x<3 && threadIdx.y<3)
{
if (threadIdx.x>=cBegin && threadIdx.x<=cEnd && threadIdx.y>=rBegin && threadIdx.y<=rEnd)
{
int cmprIdx=(blockRow+threadIdx.y-1)*nClusterIdxStride+(blockCol+threadIdx.x-1);
fShareLab[threadIdx.y][threadIdx.x]=vSLICCenterList[cmprIdx].lab;
fShareXY[threadIdx.y][threadIdx.x]=vSLICCenterList[cmprIdx].xy;
}
}
__syncthreads();
if (innerBlockHeightIdx>=blockWidth)
{
return;
}
if (offset>=nWidth*nHeight)
{
return;
}
// finding the nearest center for current pixel
float fY=blockRow*upperBlockHeight+blockIdx.x*lowerBlockHeight+threadIdx.y;
float fX=blockCol*blockWidth+threadIdx.x;
if (fY<nHeight && fX<nWidth)
{
float4 fPoint=floatBuffer[offset];
float minDis=9999;
int nearestCenter=-1;
int nearestR, nearestC;
for (int r=rBegin;r<=rEnd;r++)
{
for (int c=cBegin;c<=cEnd;c++)
{
int cmprIdx=(blockRow+r-1)*nClusterIdxStride+(blockCol+c-1);
//compute SLIC distance
float fDab=(fPoint.x-fShareLab[r][c].x)*(fPoint.x-fShareLab[r][c].x)
+(fPoint.y-fShareLab[r][c].y)*(fPoint.y-fShareLab[r][c].y)
+(fPoint.z-fShareLab[r][c].z)*(fPoint.z-fShareLab[r][c].z);
//fDab=sqrt(fDab);
float fDxy=(fX-fShareXY[r][c].x)*(fX-fShareXY[r][c].x)
+(fY-fShareXY[r][c].y)*(fY-fShareXY[r][c].y);
//fDxy=sqrt(fDxy);
float fDis=fDab+invWeight*fDxy;
if (fDis<minDis)
{
minDis=fDis;
nearestCenter=cmprIdx;
nearestR=r;
nearestC=c;
}
}
}
if (nearestCenter>-1)
{
int pixelIdx=threadIdx.y*blockWidth+threadIdx.x;
pixelUpdateList[pixelIdx].lab=fPoint;
pixelUpdateList[pixelIdx].xy.x=fX;
pixelUpdateList[pixelIdx].xy.y=fY;
pixelUpdateIdx[pixelIdx].x=nearestC;
pixelUpdateIdx[pixelIdx].y=nearestR;
if (bLabelImg)
{
maskBuffer[offset]=nearestCenter;
}
}
}
else
{
int pixelIdx=threadIdx.y*blockWidth+threadIdx.x;
pixelUpdateIdx[pixelIdx].x=-1;
pixelUpdateIdx[pixelIdx].y=-1;
}
__syncthreads();
}
__global__ void kUpdateClusterCenters( float4* floatBuffer,int* maskBuffer, int nWidth, int nHeight, int nSegs, SLICClusterCenter* vSLICCenterList )
{
int blockWidth=nWidth/blockDim.x;
int blockHeight=nHeight/gridDim.x;
int clusterIdx=blockIdx.x*blockDim.x+threadIdx.x;
int offsetBlock = threadIdx.x * blockWidth+ blockIdx.x * blockHeight * nWidth;
float2 crntXY=vSLICCenterList[clusterIdx].xy;
float4 avLab;
float2 avXY;
int nPoints=0;
avLab.x=0;
avLab.y=0;
avLab.z=0;
avXY.x=0;
avXY.y=0;
int yBegin=0 < (crntXY.y - blockHeight) ? (crntXY.y - blockHeight) : 0;
int yEnd= nHeight > (crntXY.y + blockHeight) ? (crntXY.y + blockHeight) : (nHeight-1);
int xBegin=0 < (crntXY.x - blockWidth) ? (crntXY.x - blockWidth) : 0;
int xEnd= nWidth > (crntXY.x + blockWidth) ? (crntXY.x + blockWidth) : (nWidth-1);
//update to cluster centers
//
for (int i = yBegin; i < yEnd ; i++)
{
for (int j = xBegin; j < xEnd; j++)
{
int offset=j + i * nWidth;
float4 fPixel=floatBuffer[offset];
int pIdx=maskBuffer[offset];
if (pIdx==clusterIdx)
{
avLab.x+=fPixel.x;
avLab.y+=fPixel.y;
avLab.z+=fPixel.z;
avXY.x+=j;
avXY.y+=i;
nPoints++;
}
}
}
avLab.x/=nPoints;
avLab.y/=nPoints;
avLab.z/=nPoints;
avXY.x/=nPoints;
avXY.y/=nPoints;
vSLICCenterList[clusterIdx].lab=avLab;
vSLICCenterList[clusterIdx].xy=avXY;
vSLICCenterList[clusterIdx].nPoints=nPoints;
}
|
14,287 | #include<bits/stdc++.h>
using namespace std;
#define pi (2.0*acos(0.0))
#define eps 1e-6
#define ll long long
#define inf (1<<29)
#define vi vector<int>
#define vll vector<ll>
#define sc(x) scanf("%d",&x)
#define scl(x) scanf("%lld",&x)
#define all(v) v.begin() , v.end()
#define me(a,val) memset( a , val ,sizeof(a) )
#define pb(x) push_back(x)
#define pii pair<int,int>
#define mp(a,b) make_pair(a,b)
#define Q(x) (x) * (x)
#define L(x) ((x<<1) + 1)
#define R(x) ((x<<1) + 2)
#define M(x,y) ((x+y)>>1)
#define fi first
#define se second
#define MOD 1000000007
#define ios ios::sync_with_stdio(0)
#define N 1024
#define BL 32
#define NA 20
__global__ void suma(int *A, int *S){
S[0] = S[0] + A[threadIdx.x];
printf("A[t]: %d S[0]: %d\n",A[threadIdx.x], S[0]);
__syncthreads();
}
int main(){
int *a = new int[NA];
for(int i = 0; i < NA; i++) a[i] = 1;
int *A;
cudaMalloc(&A, NA * sizeof(int));
cudaMemcpy(A, a, NA * sizeof(int), cudaMemcpyHostToDevice);
int *s = new int[1];
s[0] = 0;
int *S;
cudaMalloc(&S, sizeof(int));
cudaMemcpy(S, s, sizeof(int), cudaMemcpyHostToDevice);
suma<<<1,NA>>>(A, S);
cout<<"llego"<<endl;
cudaMemcpy(s, S, sizeof(int), cudaMemcpyDeviceToHost);
cout<<s[0]<<endl;
cudaFree(A);
cudaFree(S);
return 0;
}
|
14,288 | // ReSharper disable IdentifierTypo
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <curand.h>
#include <curand_kernel.h>
#include <stdio.h>
#include <math.h>
#include <ctime>
#define RAND_SEED_KERNEL 1504
#define CUDA_GRID_SIZE 256
#define CUDA_BLOCK_SIZE 64
struct mem_test_block
{
size_t p_start;
size_t p_end;
size_t element_count;
int* ptr;
mem_test_block() {
p_start = 0;
p_end = 0;
element_count = 0;
ptr = nullptr;
}
mem_test_block(size_t start, size_t end, int* ptr) {
this->p_start = start;
this->p_end = end;
this->element_count = floorf((end - start) / sizeof(int)) - 1;
this->ptr = ptr;
}
};
cudaError_t perform_memory_test(mem_test_block* mem_blocks, int mem_blocks_count, int iterations, float** took_time_stats, float* took_time_all, int tests_count);
cudaError_t perform_test_with_stats(mem_test_block* mem_blocks, int mem_blocks_count, int iterations, int tests_count);
__device__ int kernel_rand_generate(curandState* random_state, int ind, const int min, const size_t max) {
auto local_state = random_state[ind];
auto value = curand_uniform(&local_state);
value *= (max - min + 0.999999);
value += min;
return static_cast<int>(truncf(value));
}
__global__ void mem_kernel_setup(curandState* state)
{
const int id = threadIdx.x;
curand_init(RAND_SEED_KERNEL, id, 0, &state[id]);
}
__global__ void mem_kernel_process(curandState* random_state, mem_test_block* mem_test_blocks, const int mem_blocks_count, int iterations)
{
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
const auto block_index = idx % mem_blocks_count;
if (block_index >= mem_blocks_count) {
printf("mem_kernel_process: Memory region by index %d out of bounds\n", block_index);
return;
}
const auto mem_block = mem_test_blocks[block_index];
//printf("mem_kernel_process: mem_block[%d].element_count = %lld\n", block_index, mem_block.element_count);
int a = 0;
int reads = 0;
if (mem_blocks_count == 1) {
reads = 2;
}
else {
reads = 1;
}
for (auto i = 0; i < iterations; ++i) {
// Get random memory index within our block
const auto random_mem_index = kernel_rand_generate(random_state, idx, 0, mem_block.element_count);
if (random_mem_index < 0) {
printf("mem_kernel_process: Trying to access invalid memory block index = %d\n", random_mem_index);
return;
}
//printf("mem_kernel_process: mem_block[%d] at index %d\n", random_mem_index, block_index);
// Write access to memory within our block
for (auto k = 0; k < reads; ++k) {
a = mem_block.ptr[random_mem_index]; // = kernel_rand_generate(random_state, idx, 0, 1024);
}
}
a *= 2;
}
int main()
{
size_t freeMem, totalMem;
auto cuda_status = cudaMemGetInfo(&freeMem, &totalMem);
if (cuda_status != cudaSuccess) {
fprintf(stderr, "Failed to get GPU memory information! (cudaMemGetInfo = %d)\n", cuda_status);
return 1;
}
const auto test_iterations = 30000;
const auto test_count = 50;
const auto used_memory = totalMem - freeMem;
const auto cuda_memory_block_size = freeMem - (used_memory * 2);
printf("CUDA Memory [free=%llu Mb, total=%llu Mb, used=%llu Mb, allocated=%llu]\n", freeMem / 1024 / 1024, totalMem / 1024 / 1024, used_memory / 1024 / 1024, cuda_memory_block_size / 1024 / 1024);
// Allocate all free memory to occupy bigger block
int* cuda_memory_block = nullptr;
cuda_status = cudaMalloc(reinterpret_cast<void**>(&cuda_memory_block), cuda_memory_block_size);
if (cuda_status != cudaSuccess) {
fprintf(stderr, "Allocation failed for CUDA block! (cudaMalloc = %d)\n", cuda_status);
return 1;
}
const auto test_1_mem_blocks = static_cast<mem_test_block*>(malloc(sizeof(mem_test_block) * 1));
test_1_mem_blocks[0] = mem_test_block{ 0, (size_t)(1024 * 1024 * 1024 * 11) - 1024, cuda_memory_block };
cuda_status = perform_test_with_stats(test_1_mem_blocks, 1, test_iterations, test_count);
if (cuda_status != cudaSuccess) {
fprintf(stderr, "#1 memory test failed!\n");
cudaFree(cuda_memory_block);
return 1;
}
free(test_1_mem_blocks);
const auto test_2_mem_blocks = static_cast<mem_test_block*>(malloc(sizeof(mem_test_block) * 2));
test_2_mem_blocks[0] = mem_test_block{ 0, (size_t)(1024 * 1024 * 1024 * 11) - 1024, cuda_memory_block };
test_2_mem_blocks[1] = mem_test_block{ cuda_memory_block_size - (size_t)((1024 * 1024 * 1024 * 11) - 1024), cuda_memory_block_size, cuda_memory_block };
cuda_status = perform_test_with_stats(test_2_mem_blocks, 2, test_iterations, test_count);
if (cuda_status != cudaSuccess) {
fprintf(stderr, "#2 memory test failed!\n");
cudaFree(cuda_memory_block);
return 1;
}
free(test_2_mem_blocks);
const auto test_3_mem_blocks = static_cast<mem_test_block*>(malloc(sizeof(mem_test_block) * 1));
test_3_mem_blocks[0] = mem_test_block{ cuda_memory_block_size - (size_t)((1024 * 1024 * 1024 * 11) - 1024), cuda_memory_block_size, cuda_memory_block };
cuda_status = perform_test_with_stats(test_3_mem_blocks, 1, test_iterations, test_count);
if (cuda_status != cudaSuccess) {
fprintf(stderr, "#3 memory test failed!\n");
cudaFree(cuda_memory_block);
return 1;
}
free(test_3_mem_blocks);
//const auto test_4_mem_blocks = static_cast<mem_test_block*>(malloc(sizeof(mem_test_block) * 1));
//test_4_mem_blocks[0] = mem_test_block{ 0, (1024 * 1024 * 1024 * 2) - 1024, cuda_memory_block };
//
//cuda_status = perform_test_with_stats(test_4_mem_blocks, 1, test_iterations, test_count);
//if (cuda_status != cudaSuccess) {
// fprintf(stderr, "#4 memory test failed!\n");
// cudaFree(cuda_memory_block);
// return 1;
//}
//
//free(test_4_mem_blocks);
cuda_status = cudaDeviceReset();
if (cuda_status != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
cudaFree(cuda_memory_block);
return 1;
}
cudaFree(cuda_memory_block);
return 0;
}
cudaError_t perform_test_with_stats(mem_test_block* mem_blocks, int mem_blocks_count, const int iterations, const int tests_count) {
float time_took_tests_all = 0;
float* time_took_tests;
const auto cuda_status = perform_memory_test(mem_blocks, mem_blocks_count, iterations, &time_took_tests, &time_took_tests_all, tests_count);
if (cuda_status != cudaSuccess) {
fprintf(stderr, "** FAILED to perform memory test with %d regions of a memory block.\n", mem_blocks_count);
return cuda_status;
}
float tests_took_total = 0;
for (auto i = 0; i < tests_count; ++i) {
tests_took_total += time_took_tests[i];
}
printf("%d tests with %d iterations took - %f ms avg, %f ms total.\n\n", tests_count, iterations, tests_took_total / tests_count, time_took_tests_all);
return cuda_status;
}
cudaError_t perform_memory_test(mem_test_block* mem_blocks, int mem_blocks_count, const int iterations, float** took_time_stats, float* took_time_all, int tests_count)
{
auto cuda_status = cudaSetDevice(0);
if (cuda_status != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed, no CUDA GPU? Lel.");
return cuda_status;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
printf("Setting up memory kernel (memory_regions = %d) ...\n", mem_blocks_count);
curandState* dev_states;
cudaMalloc(&dev_states, iterations * sizeof(curandState));
srand(time(nullptr));
// Setup memory kernel
mem_kernel_setup <<< CUDA_GRID_SIZE, CUDA_BLOCK_SIZE, 0 >>> (dev_states);
cuda_status = cudaGetLastError();
if (cuda_status != cudaSuccess) {
fprintf(stderr, "mem_kernel_setup launch failed: %s\n", cudaGetErrorString(cuda_status));
return cuda_status;
}
printf("Launching memory kernel (memory_regions = %d) ...\n", mem_blocks_count);
*took_time_stats = static_cast<float*>(malloc(sizeof(float) * tests_count));
const auto time_stat = *took_time_stats;
for (auto i = 0; i < tests_count; ++i) {
cudaEventRecord(start);
// Copy shared memory to GPU
mem_test_block* mem_blocks_cuda_ptr = nullptr;
cuda_status = cudaMalloc(reinterpret_cast<void**>(&mem_blocks_cuda_ptr), sizeof(mem_test_block) * mem_blocks_count);
if (cuda_status != cudaSuccess) {
fprintf(stderr, "Allocation failed for memory access region (cudaMalloc = %d)\n", cuda_status);
return cuda_status;
}
cuda_status = cudaMemcpy(mem_blocks_cuda_ptr, mem_blocks, sizeof(mem_test_block) * mem_blocks_count, cudaMemcpyHostToDevice);
if (cuda_status != cudaSuccess) {
fprintf(stderr, "Failed to copy shared memory to CUDA (cudaMemcpy = %d)\n", cuda_status);
return cuda_status;
}
// Launch memory kernel
mem_kernel_process <<< CUDA_GRID_SIZE, CUDA_BLOCK_SIZE, 0 >>> (dev_states, mem_blocks_cuda_ptr, mem_blocks_count, iterations);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cuda_status = cudaGetLastError();
if (cuda_status != cudaSuccess) {
fprintf(stderr, "mem_kernel_process launch failed: %s\n", cudaGetErrorString(cuda_status));
return cuda_status;
}
cudaEventElapsedTime(&time_stat[i], start, stop);
printf("- test #%d took %f ms\n", i, time_stat[i]);
*took_time_all += time_stat[i];
}
return cuda_status;
}
|
14,289 | /* Host side code that calls a GPU kernel to perform vector addition on the GPU using a single thread block.
We restrict the size of the vector to be up to 1024 elements which is the maximum thread block size on this
GPU.
Author: Naga Kandasamy
Date modified: May 3, 2020
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#define NUM_ELEMENTS 1024
/* Include the kernel code during the compiler preprocessing step */
#include "vector_addition_kernel.cu"
void run_test(void);
void compute_on_device(float *, float *, float *, int);
extern "C" void compute_gold(float *, float *, float *, int);
int main(int argc, char **argv)
{
run_test();
exit(EXIT_SUCCESS);
}
/* Perform vector addition on the CPU and the GPU */
void run_test(void)
{
int num_elements = NUM_ELEMENTS;
float diff;
int i;
/* Allocate memory on the CPU for input vectors A and B, and output vector C */
int vector_length = sizeof(float) * num_elements;
float *A = (float *)malloc(vector_length);
float *B = (float *)malloc(vector_length);
float *gold_result = (float *)malloc(vector_length); /* Result vector computed on CPU */
float *gpu_result = (float *)malloc(vector_length); /* Result vector computed on GPU */
/* Initialize the input data to be integer values between 0 and 5 */
for (i = 0; i < num_elements; i++) {
A[i] = floorf(5 * (rand() / (float)RAND_MAX));
B[i] = floorf(5 * (rand() / (float)RAND_MAX));
}
/* Compute reference solution on CPU */
compute_gold(A, B, gold_result, num_elements);
/* Compute result vector on GPU */
compute_on_device(A, B, gpu_result, num_elements);
/* Compute differences between CPU and GPU results */
diff = 0.0;
for (i = 0; i < num_elements; i++)
diff += fabsf(gold_result[i] - gpu_result[i]);
printf("Difference between the CPU and GPU result = %f\n", diff);
/* Cleanup memory */
free((void *)A);
free((void *)B);
free((void *)gold_result);
free((void *)gpu_result);
return;
}
/* Vector addition on GPU */
void compute_on_device(float *A_on_host, float *B_on_host, float *gpu_result, int num_elements)
{
float *A_on_device = NULL;
float *B_on_device = NULL;
float *C_on_device = NULL;
/* Allocate space on GPU for vectors A and B, and copy contents of vectors to GPU */
cudaMalloc((void**)&A_on_device, num_elements * sizeof(float));
cudaMemcpy(A_on_device, A_on_host, num_elements * sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void**)&B_on_device, num_elements * sizeof(float));
cudaMemcpy(B_on_device, B_on_host, num_elements * sizeof(float), cudaMemcpyHostToDevice);
/* Allocate space for result vector on GPU */
cudaMalloc((void**)&C_on_device, num_elements * sizeof(float));
/* Set up execution grid on the GPU */
dim3 thread_block(num_elements, 1, 1); /* Set number of threads in thread block */
dim3 grid(1,1);
vector_addition_kernel<<<grid, thread_block>>>(A_on_device, B_on_device, C_on_device, num_elements);
/* Copy result vector back from GPU */
cudaMemcpy(gpu_result, C_on_device, num_elements * sizeof(float), cudaMemcpyDeviceToHost);
/* Free memory on GPU */
cudaFree(A_on_device);
cudaFree(B_on_device);
cudaFree(C_on_device);
}
|
14,290 | /**
* File: hello_gpu.cu
**/
/* Kernel – does nothing*/
#include <stdio.h>
__global__ void mykernel(void) {
}
int main(void) {
mykernel<<<1,1>>>(); /* Launch mykernel on GPU */
printf("Hello GPU!\n");
return 0;
}
|
14,291 | #include "conv2d-transpose-kernel-grad.hh"
#include "graph.hh"
#include "../runtime/graph.hh"
#include "../runtime/node.hh"
#include "../memory/alloc.hh"
namespace ops
{
Conv2DTransposeKernelGrad::Conv2DTransposeKernelGrad(Op* y, Op* input,
const int strides[], const int kernel_size[])
: Op("conv2d_transpose_kernel_grad",
Shape({kernel_size[0], kernel_size[1],
kernel_size[2],kernel_size[3]}),
{y, input})
, m_strides(strides)
{
m_kernel_size[0] = kernel_size[0];
m_kernel_size[1] = kernel_size[1];
m_kernel_size[2] = kernel_size[2];
m_kernel_size[3] = kernel_size[3];
}
void Conv2DTransposeKernelGrad::compile()
{
auto& g = Graph::instance();
auto& cy = g.compiled(preds()[0]);
auto& cinput = g.compiled(preds()[1]);
Shape out_shape({m_kernel_size[0], m_kernel_size[1], m_kernel_size[2], m_kernel_size[3]});
dbl_t* out_data = tensor_alloc(out_shape.total());
int y_size[4] = { cy.out_shape[0], cy.out_shape[1],
cy.out_shape[2], cy.out_shape[3]};
int input_size[4] = { cinput.out_shape[0], cinput.out_shape[1],
cinput.out_shape[2], cinput.out_shape[3]};
int kernel_size[4] = { shape_get()[0], shape_get()[1],
shape_get()[2], shape_get()[3]};
auto out_node = rt::Node::op_conv2d_transpose_kernel_grad(cy.out_data, cinput.out_data,
m_strides, out_data, y_size,
input_size, kernel_size,
{cy.out_node, cinput.out_node});
g.add_compiled(this, {out_node}, {out_data}, out_node, out_shape, out_data);
}
}
|
14,292 | #include "includes.h"
__global__ void multiply(int* a, int* b, int* c, int x, int y) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int temp = 0;
if(row < x && col < x) {
for(int i = 0; i < y; i++) {
temp += a[row * y + i] * b[i * x + col];
}
}
c[row * x + col] = temp;
} |
14,293 | #include "includes.h"
__global__ void MatrixMult(int *M, int *N, int *P, int width)
{
int tid, tx, ty;
tx = blockIdx.x*blockDim.x + threadIdx.x;
ty = blockIdx.y*blockDim.y + threadIdx.y;
tid = ty*width + tx;
int Pv = 0, Mv = 0, Nv = 0;
for(int i = 0; i < width; i++) {
Mv = M[ty*width+i];
Nv = N[i*width+tx];
Pv += Mv * Nv;
}
P[tid] = Pv;
} |
14,294 | #include <stdio.h>
#include <math.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/random/linear_congruential_engine.h>
#include <thrust/random/uniform_real_distribution.h>
#include <thrust/random.h>
#include <iostream>
#define DATA_SIZE 100000000
struct psrngen
{
__host__ __device__ psrngen(float _a, float _b) : a(_a), b(_b) {;}
__host__ __device__ float operator()(const unsigned int n) const
{
thrust::default_random_engine rng;
thrust::uniform_real_distribution<float> dist(a, b);
rng.discard(n);
return dist(rng);
}
float a, b;
};
template <class R, class S, class T>
T hypothesis_test(R hypothesis_value, T alpha, S test_type){
srand(time(NULL));
int _seed = rand();
thrust::device_vector<float> D1(DATA_SIZE);
thrust::counting_iterator<unsigned int> index_sequence_begin(_seed);
thrust::transform( index_sequence_begin,
index_sequence_begin + (DATA_SIZE),
D1.begin(),
psrngen(0.0, 1.0f));
thrust::device_vector<T> D2(D1.size());
// for(int j=0; j <D1.size();j++){
// std::cout <<"D1["<< j <<"] = " << D1[j] << std::endl;
// }
T mean = thrust::reduce( D1.begin(),
D1.end(),
(T)0,
thrust::plus<T>())/D1.size();
thrust::transform( D1.begin(),
D1.end(),
thrust::make_constant_iterator(mean),
D1.begin(),
thrust::minus<T>());
thrust::transform( D1.begin(),
D1.end(),
D1.begin(),
D2.begin(),
thrust::multiplies<T>());
T variance = thrust::reduce(D2.begin(), D2.end(),(T)0, thrust::plus<T>())/(D1.size()-1);
T standard_deviation = sqrt(variance);
T Z = (mean - hypothesis_value)/(standard_deviation/sqrt(D1.size()));
T left = (0.5)*(1.0 + erf(Z/sqrt(2.0)));
T right = 1.0 - (0.5)*(1.0 + erf(Z/sqrt(2.0)));
T two_sided = 2.0*(1.0 - (0.5)*(1.0 + erf(abs(Z)/sqrt(2.0))));
if (test_type == 1){
if(left < alpha){
printf("We reject the null hypothesis\n");
return left;
}
else{
printf("We fail to reject the null hypothesis\n");
return left;
}
}
else if(test_type == 2){
if(right < alpha){
printf("We reject the null hypothesis \n");
return right;
}
else{
printf("We fail to reject the null hypothesis \n");
return right;
}
}
else if(test_type == 3){
if(two_sided < alpha){
printf("We reject the null hypothesis \n");
return two_sided;
}
else{
printf("we fail to reject the null hypothesis \n");
return two_sided;
}
}
else return 0;
}
int main(){
float value = hypothesis_test(0.5,0.05, 3);
printf("%.4f \n", value);
}
|
14,295 | // Esempio collective operations: reduce_sum
#include <cooperative_groups.h>
//using namespace cooperative_groups;
namespace cg = cooperative_groups;
#include <locale>
#include <stdlib.h>
#include <iostream>
#include <experimental/random>
#include <time.h>
#define RNG_MAX_VAL 3 // 5 // 50 // max rng val for array elems
static void HandleError( cudaError_t err, const char *file, int line) {
if (err != cudaSuccess) {
std::cout << cudaGetErrorString( err ) << " in " << file << " line " << line << std::endl;
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR(err)(HandleError(err, __FILE__, __LINE__))
void init_vec(int *v, int n) {
for (int i=0; i<n; i++) {
v[i] = std::experimental::randint(0,RNG_MAX_VAL);
//v[i] = i;
}
}
void show_vec(int *v, int n) {
std::cout << "\n" << v[0];
for (int i=1; i<n; i++) {
std::cout << ", " << v[i];
}
std::cout << "\n" << std::endl;
}
int cpu_sum(int *v, int n) {
int s=0;
for (int i=0; i<n; i++) {
s += v[i];
}
return s;
}
// Codice Prof
__device__ int reduce_sum(cg::thread_group g, int *temp, int val) {
int lane = g.thread_rank();
// ad ogni iterazione si dimezza il numero di thread attivi
// ogni thread somma parziale temp[i] a temp[lane+i]
for (int i=g.size()/2; i>0; i/=2) {
temp[lane] = val;
g.sync(); // attendo tutti thread del gruppo
if (lane < i) val += temp[lane+i];
g.sync();
}
return val; // solo thread 0 restituisce la somma completa
}
__device__ int thread_sum(int *input, int n) {
int sum=0;
for (int i=blockIdx.x * blockDim.x + threadIdx.x;
i<n/4;
i+=blockDim.x * gridDim.x) { // accesso strided
int4 in = ((int4*)input)[i]; // vector load e' piu' effciente
sum += in.x + in.y + in.z + in.w;
}
return sum;
}
__global__ void sum_kernel_block(int *sum, int *input, int n) {
int my_sum = thread_sum(input, n);
extern __shared__ int temp[]; // extern perche' allocazione dinamica con
// terzo argomento della kernel call <<< ... >>>
//auto g = cg::this_thread_block();
cg::thread_block g = cg::this_thread_block();
int block_sum = reduce_sum(g, temp, my_sum);
if(g.thread_rank() == 0)
atomicAdd(sum, block_sum);
}
// END // Codice Prof
// ATTENZIONE!! Funziona solo con n=2^k con k>1
//int n = 1<<24; // array len = 16M // n=2^24 // bit shift operation
//int blockSize = 256;
////int nBlocks = (n+blockSize-1) / blockSize; // work as ceiling
//int nBlocks = (n+(blockSize*4)-1) / (blockSize*4); // il numero di blocchi si puo' ridurre perche' sopra non si tiene conto degli int4
//int sharedBytes = blockSize * sizeof(int);
// toy example
int n = 16;
int blockSize = 2;
int nBlocks = (n+(blockSize*4)-1) / (blockSize*4); // il numero di blocchi si puo' ridurre perche' sopra non si tiene conto degli int4
int sharedBytes = blockSize * sizeof(int);
int main( void ) {
//int seed = (int)time(NULL);
int seed = 1619508961;
std::experimental::reseed(seed);
std::cout << "seed = " << seed << std::endl;
std::cout << "\nn = " << n << std::endl;
std::cout << "blockSize = " << blockSize << std::endl;
std::cout << "nBlocks = " << nBlocks << std::endl;
std::cout << "sharedBytes = " << sharedBytes << "\n" << std::endl;
size_t data_size = (size_t)n*sizeof(int);
int *sum, *data;
sum = (int*)malloc(sizeof(int));
data = (int*)malloc(data_size);
int *d_sum, *d_data;
HANDLE_ERROR(cudaMalloc((void**)&d_sum, sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&d_data, data_size));
init_vec(data,n);
if (n < 32) // mostra il vettore solo se e' piccolo
show_vec(data,n);
HANDLE_ERROR(cudaMemcpy(d_data, data, data_size, cudaMemcpyHostToDevice));
sum_kernel_block<<<nBlocks, blockSize, sharedBytes>>>(d_sum, d_data, n);
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_ERROR(cudaMemcpy(sum, d_sum, sizeof(int), cudaMemcpyDeviceToHost));
int c_sum = cpu_sum(data,n);
std::cout << "c_sum = " << c_sum << std::endl;
std::cout << "g_sum = " << *sum << std::endl;
if (c_sum == *sum)
std::cout << "\nCorrect" << std::endl;
else
std::cout << "\nWRONG!" << std::endl;
cudaFree(d_data);
cudaFree(d_sum);
return 0;
}
|
14,296 | __global__ void getSumSquares(float* X, float* XSqr, int dim, int dimpow2) {
extern __shared__ float x[];
int i, k;
int offset = blockIdx.x*dim;
int jump = dimpow2;
int sumjump = jump >> 1;
//Step 0: Figure out K (number of batches per block)
int K = dimpow2 >> 9;
if (K == 0) {
K = 1;
}
if (jump > 512) {
jump = 512;
}
//Step 1: Copy over each row to shared memory
//and square in the process
for (k = 0; k < K; k++) {
i = k*jump + threadIdx.x;
if (i < dim) {
x[i] = X[offset + i]*X[offset + i];
}
else if (i < dimpow2) {
x[i] = 0.0;
}
}
__syncthreads();
//Step 2: Perform sums
while (sumjump > 0) {
if (threadIdx.x < sumjump) {
K = sumjump >> 9;
if (K == 0) {
K = 1;
}
jump = sumjump;
if (jump > 512) {
jump = 512;
}
for (k = 0; k < K; k++) {
i = k*jump + threadIdx.x;
x[i] += x[i + sumjump];
}
}
sumjump = sumjump >> 1;
__syncthreads();
}
//Step 3: Copy back results
XSqr[blockIdx.x] = x[0];
}
//CSM is N x M
__global__ void finishCSM(float* CSM, float* XSqr, float* YSqr, int N, int M, int MPow2) {
int offset = blockIdx.x*M;
int K = MPow2 >> 9;
int i;
int k, jump = MPow2;
float val = 0.0;
if (K == 0) {
K = 1;
}
if (jump > 512) {
jump = 512;
}
for (k = 0; k < K; k++) {
i = k*jump + threadIdx.x;
if (i < M) {
val = XSqr[i] + YSqr[blockIdx.x];
val = val - 2*CSM[offset + i];
if (val < 0) {
val = 0;
}
CSM[offset + i] = sqrt(val);
}
}
}
|
14,297 | #include "includes.h"
using namespace std;
// this amazingly nice error checking function is stolen from:
//https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
__global__ void addKernel(double *c, const double *a, const double *b) {
int i = threadIdx.x;
c[i] = a[i] + b[i];
} |
14,298 | /*
* María Fernanda Mora Alba, 103596
* Arquitectura de computadoras - Maestría en Ciencias en Computación
* Introducción a los conceptos de CUDA
* Cálculo conjunto de Mandelbrot
*/
#include <stdlib.h>
#include <math.h>
#include <stdio.h>
#include <time.h>
#define NPOINTS 1000
#define MAXITER 2000
struct complex{
double real;
double imag;
};
/* Utilidad para checar errores de CUDA */
void checkCUDAError(const char*);
__global__ void mandelbrot(int *d_res)
{
double ztemp;
struct complex z, c;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
printf("idx: %d, idy: %d, %d\n", idx, idy, idx+NPOINTS*idy);
c.real = -2.0+2.5*(double)(idx)/(double)(NPOINTS)+1.0e-7;
c.imag = 1.125*(double)(idy)/(double)(NPOINTS)+1.0e-7;
z=c;
for(int iter=0; iter<MAXITER; iter++){
ztemp=(z.real*z.real)-(z.imag*z.imag)+c.real;
z.imag=z.real*z.imag*2+c.imag;
z.real=ztemp;
if((z.real*z.real+z.imag*z.imag)>4.0e0){
d_res[idx+NPOINTS*idy] = 1;
break;
}
}
d_res[idx+NPOINTS*idy] = 0;
}
int main(int argc, char *argv[])
{
int numoutside = 0;
double area, error;
time_t t1,t2;
t1 = time(NULL);
int *h_res; /* Arreglo del host */
int *d_res;/* Arreglo del device */
size_t sz = NPOINTS * NPOINTS * sizeof(int);
h_res = (int *) malloc(sz);
cudaMalloc((void**) &d_res, sz);
for(int i = 0; i < NPOINTS*NPOINTS; i++){
h_res[i] = 0;
}
cudaMemcpy(d_res, h_res, sz, cudaMemcpyHostToDevice);
dim3 dimGrid(100,100);
dim3 dimBlock(10,10);
mandelbrot<<<dimGrid,dimBlock>>>(d_res);
cudaThreadSynchronize();
checkCUDAError("kernel invocation");
cudaMemcpy(h_res,d_res,sz,cudaMemcpyDeviceToHost);
checkCUDAError("memcpy");
t2 = time(NULL);
for(int i=0; i < NPOINTS*NPOINTS; i++){
if(h_res[i] > 0){
numoutside++;
}
}
area=2.0*2.5*1.125*(double)(NPOINTS*NPOINTS-numoutside)/(double)(NPOINTS*NPOINTS);
error=area/(double)NPOINTS;
printf("Area del conjunto de Mandlebrot = %12.8f +/- %12.8f\n",area,error);
printf("Tiempo de ejecucion: %f segundos \n",difftime(t2,t1));
cudaFree(d_res);
free(h_res);
return 0;
}
/* Utility function to check for and report CUDA errors */
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
14,299 | #include<bits/stdc++.h>
#include<cuda.h>
using namespace std;
// A structure that stores the flow along a directed edge
struct edge{
int u, v, c, f;
};
// finds augmenting path in non-deterministic fashion
__global__ void find_augmenting_path(edge *d_edges, int m, int *vis, int *par,
int *current_flow, int *progress){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < m){
int u = d_edges[id].u, v = d_edges[id].v, c = d_edges[id].c, f = d_edges[id].f;
// checking if forward edge uv exists in residual graph
if(vis[u] && !vis[v] && f < c && atomicCAS(par+v, -1, id) == -1){
vis[v] = 1;
current_flow[v] = min(current_flow[u], c - f);
atomicAdd(progress, 1);
}
// checking if reverse edge vu exists in residual graph
if(vis[v] && !vis[u] && f > 0 && atomicCAS(par+u, -1, id) == -1){
vis[u] = 1;
current_flow[u] = min(current_flow[v], f);
atomicAdd(progress, 1);
}
}
}
// augemnts along path found by find_augmenting_path
__global__ void augment(edge* d_edges, int* par, int t, int flow){
int cur = t;
while(cur){
int idx = par[cur];
int u = d_edges[idx].u, v = d_edges[idx].v;
if(cur == u){
d_edges[idx].f -= flow;
cur = v;
}
else{
d_edges[idx].f += flow;
cur = u;
}
}
}
int main(int argc, char* argv[]){
auto clk=clock();
if(argc < 2){
cout<<"Enter file name"<<endl;
return 0;
}
int n, m, INF = 1000000000;
edge *edges, *d_edges;
int *vis, *par, *progress, *current_flow;
ifstream fin(argv[1]);
fin >> n >> m;
edges = new edge[m];
for(int i = 0; i < m; i++){
fin >> edges[i].u >> edges[i].v >> edges[i].c;
edges[i].u--;
edges[i].v--;
edges[i].f = 0;
}
cudaMalloc(&d_edges, m * sizeof(edge));
cudaMalloc(&vis, n * sizeof(int));
cudaMalloc(&par, n * sizeof(int));
cudaMalloc(¤t_flow, n * sizeof(int));
cudaMalloc(&progress, sizeof(int));
cudaMemcpy(d_edges, edges, m*sizeof(edge), cudaMemcpyHostToDevice);
int threads = 1024;
int blocks = ceil((float)m/threads);
int total_flow = 0;
while(true){
cudaMemset(vis, 0, n * sizeof(int));
cudaMemset(par, -1, n * sizeof(int));
cudaMemset(current_flow, 0, n * sizeof(int));
cudaMemset(vis, 1, sizeof(int));
cudaMemcpy(current_flow, &INF, sizeof(int), cudaMemcpyHostToDevice);
int prog, t_reachable, cur_flow;
// this loop performs search for augmenting path in parallel fashion
// loop breaks when there is no new vertex that is reached in the last iteration
do{
cudaMemset(progress, 0, sizeof(int));
find_augmenting_path<<<blocks,threads>>>(d_edges, m, vis, par, current_flow, progress);
cudaMemcpy(&prog, progress, sizeof(int), cudaMemcpyDeviceToHost);
}while(prog);
cudaMemcpy(&t_reachable, vis + n - 1, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&cur_flow, current_flow + n - 1, sizeof(int), cudaMemcpyDeviceToHost);
if(!t_reachable){
assert(!cur_flow);
break;
}
// has to be done serially
augment<<<1,1>>>(d_edges, par, n-1 , cur_flow);
total_flow += cur_flow;
}
double t_elapsed = (double)(clock()-clk)/CLOCKS_PER_SEC;
printf("|V|:%d |E|:%d Flow:%d\nTime:%f\n", n, m, total_flow, t_elapsed);
} |
14,300 | #include <iostream>
using namespace std;
const int vsize = 512;
const int blocksize = 256;
__global__
void vsum(float *x, float *y, float *z)
{
//int i = blockIdx.x * blockDim.x + threadIdx.x; @ blockdim, not block_size @
int i = blockIdx.x * blocksize + threadIdx.x;
if(i < vsize)
z[i] = x[i]+y[i];
}
int main()
{
float *A = (float*)malloc(vsize*sizeof(float));
float *B = (float*)malloc(vsize*sizeof(float));
float *C = (float*)malloc(vsize*sizeof(float));
for(int i = 1; i<=vsize; i++)
{
A[i]=(float)i;
B[i]=(float)i;
//C[i]=(float)i;
}
float *dA, *dB, *dC;
cudaMalloc(&dA, vsize * sizeof(float));
cudaMalloc(&dB, vsize * sizeof(float));
cudaMalloc(&dC, vsize * sizeof(float));
cudaMemcpy(dA, A, vsize*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dB, B, vsize*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dC, C, vsize*sizeof(float), cudaMemcpyHostToDevice);
vsum<<<(vsize/blocksize), blocksize>>>(A, B, C);
cudaDeviceSynchronize();
for (int i = 0; i<vsize; i+=16)
{
for (int j = 0; j<32; j++)
cout << C[j] << " ";
cout << endl;
}
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
free(A);
free(B);
free(C);
return 0;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.