serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
5,401 | __global__ void create_combined_escape_carry_newline_count_index(char *file, long n, char *escape_carry_index, int *newline_count_index) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
long normal_chars_per_thread = max((n+stride-1) / stride, 64L);
long chars_per_thread = ((normal_chars_per_thread + 64 - 1) / 64) * 64;
long start = index * chars_per_thread;
long end = start + chars_per_thread;
// There are essentially two cases:
// - The last character in the previous block is an escape character.
// - The last character in the previous block is not an escape character.
// However, we don't know in advance which one it is, because
// we are not sequential. So, here we'll basically
// calculate the carry of each thread assuming the initial
// carry is 0.
char carry = 0;
int count = 0;
for (long i = start; i < end && i < n; i += 1) {
char value = file[i];
if (value == '\\') {
carry = 1 ^ carry;
} else {
carry = 0;
}
if (value == '\n') {
count += 1;
}
}
escape_carry_index[index] = carry;
newline_count_index[index] = count;
}
|
5,402 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,int var_7,int var_8,int var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float* var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30) {
if (comp >= (-1.6656E23f - (var_2 + +1.8501E-43f * (-1.6050E-35f / (-1.3931E-35f - var_3))))) {
if (comp > -0.0f - (-0.0f * var_4 + var_5)) {
if (comp < (+1.7990E15f + var_6)) {
for (int i=0; i < var_1; ++i) {
comp = -1.5762E-36f / var_10 + (var_11 - var_12);
comp += (var_13 / var_14 / -1.4057E36f);
for (int i=0; i < var_7; ++i) {
float tmp_1 = (+1.1568E-37f + ldexpf(-1.9930E-35f * sinf((+1.1203E34f + (-1.6866E-43f + (var_15 / var_16)))), 2));
comp += tmp_1 / ldexpf(-1.5442E-36f / -1.6635E34f - var_17, 2);
}
for (int i=0; i < var_8; ++i) {
var_18[i] = var_19 / coshf(tanhf(ceilf(var_20 - var_21)));
float tmp_2 = logf(var_22 / logf(-1.6411E27f + var_23 - -1.5443E4f));
comp = tmp_2 * var_18[i] - -0.0f + var_24 / +1.5382E16f;
comp = acosf(var_25 - +1.4894E35f / +1.1725E-35f);
}
for (int i=0; i < var_9; ++i) {
comp += sqrtf(var_26 - logf((-1.4186E-36f * (var_27 - +1.9571E-16f / (var_28 / (var_29 - var_30))))));
}
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
int tmp_8 = atoi(argv[8]);
int tmp_9 = atoi(argv[9]);
int tmp_10 = atoi(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float* tmp_19 = initPointer( atof(argv[19]) );
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31);
cudaDeviceSynchronize();
return 0;
}
|
5,403 | #include "includes.h"
__global__ void SetMatrixVauleMinMaxY( float* matrix, int cols, int size, int id_min, int id_max, float value)
{
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
int id_row = id / cols;
if (id_row >= id_min && id_row <= id_max && id < size)
matrix[id] = value;
} |
5,404 | #include <iostream>
#include <math.h>
#include <fstream>
#include <vector>
#include <sstream>
#include <time.h>
#include <stdlib.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
using namespace std;
const int THREADS_PER_BLOCK = 256;
/**
* CUDA Kernel Device code
*/
/*****************************************************************************/
__global__ void scaleImageCuda (int *pixels, int minpix, int maxpix, int imageSize) {
/* blockDim.x gives the number of threads per block, combining it
with threadIdx.x and blockIdx.x gives the index of each global
thread in the device */
int index = (blockDim.x * blockIdx.x) + threadIdx.x;
int value;
/* Typical problems are not friendly multiples of blockDim.x.
Avoid accesing data beyond the end of the arrays */
if (index < imageSize) {
value = round(((double)(pixels[index] - minpix) / (maxpix - minpix)) * 255);
pixels[index] = value;
}
__syncthreads();
}
__global__ void edgeDetectionCuda (int *pixels, int *tempImage, int width, int height, int imageSize) {
/* blockDim.x gives the number of threads per block, combining it
with threadIdx.x and blockIdx.x gives the index of each global
thread in the device */
int index = (blockDim.x * blockIdx.x) + threadIdx.x;
int x = 0, y = 0;
int xG = 0, yG = 0;
/* Typical problems are not friendly multiples of blockDim.x.
Avoid accesing data beyond the end of the arrays */
if (index < imageSize) {
x = index % width;
if (index != 0) {
y = __double2int_rd((__int2double_rn(index) / __int2double_rn(width)));
}
if (x < (width - 1) && y < (height - 1)
&& (y > 0) && (x > 0)) {
//index = x + (y * width)
//Finds the horizontal gradient
xG = (pixels[(x+1) + ((y-1) * width)]
+ (2 * pixels[(x+1) + (y * width)])
+ pixels[(x+1) + ((y+1) * width)]
- pixels[(x-1) + ((y-1) * width)]
- (2 * pixels[(x-1) + (y * width)])
- pixels[(x-1) + ((y+1) * width)]);
//Finds the vertical gradient
yG = (pixels[(x-1) + ((y+1) * width)]
+ (2 * pixels[(x) + ((y + 1) * width)])
+ pixels[(x+1) + ((y+1) * width)]
- pixels[(x-1) + ((y-1) * width)]
- (2 * pixels[(x) + ((y-1) * width)])
- pixels[(x+1) + ((y-1) * width)]);
tempImage[index] = __double2int_rn(sqrt(__int2double_rn(xG * xG) + __int2double_rn(yG * yG)));
} else {
//Pads out of bound pixels with 0
tempImage[index] = 0;
}
}
//__syncthreads();
}
//Creating image class (base class)
class Image{
public:
Image():
height(0),
width(0),
maxPixelValue(0),
minpix(0),
maxpix(0),
imageSize(0){}
virtual ~Image(){}
virtual void readImage(ifstream &inFile) = 0;
virtual void writeImage(ofstream &outFile) = 0;
void readHeader(ifstream &inFile);
void scaleImage();
void edgeDection();
//Accessor methods
int getHeight(){return height;}
int getWidth(){return width;}
int getMaxPixelValue(){return maxPixelValue;}
//Mutator methods
void setHeight(int h){height = h;}
void setWidth(int w){width = w;}
void setMaxPixelValue(int mpv){maxPixelValue = mpv;}
//Member variables
protected:
int height;
int width;
int maxPixelValue;
int minpix;
int maxpix;
unsigned int imageSize;
int * pixels;
inline void findMin();
inline void findMax();
};
//Binary image class (derived class)
class BinaryImage: public Image{
public:
BinaryImage(){}
~BinaryImage(){}
void readImage(ifstream &inFile);
void writeImage(ofstream &outFile);
};
class AsciiImage: public Image{
public:
AsciiImage(){}
~AsciiImage(){}
void readImage(ifstream &infile);
void writeImage(ofstream &outFile);
};
//Check if header contains comments
//Comments start with #
bool isComment(string comment){
for(unsigned int i = 0; i < comment.length(); i++){
if(comment[i] == '#') return true;
if(!isspace(comment[i])) return false;
}
return true;
}
//Reads binary pixel values in image
void BinaryImage::readImage(ifstream &inFile){
//Check if the file stream in open
if(!inFile){
cerr << "Could not read from file!" << endl;
exit(1000);
}
//Making a temp array, and putting it on the heap
char * byteArray = new char[imageSize + 1];
//Read the bytes of the image, and puts data in byteArray
inFile.read(byteArray, imageSize);
//If reading in the data failed, return an error
if(inFile.fail()){
cerr << "Error: cannot read pixels." << endl;
exit(1000);
}
//Set the last element in array to EOF character
byteArray[imageSize] = '\0';
//Put the data read from file into pixels
pixels = (int *)malloc(imageSize * sizeof(int));
for(unsigned int i = 0; i < imageSize; i++){
pixels[i] = static_cast<int>
(static_cast<unsigned char>(byteArray[i]));
}
//Delete the byteArray
free(byteArray);
}
//Writes binary pixels to output file
void BinaryImage::writeImage(ofstream &outFile){
//Check if the file stream is open
if(!outFile){
cerr << "Could not write to file." << endl;
exit(1000);
}
//Write header
outFile << "P5" << " " <<
width << " " <<
height << " " <<
maxPixelValue << endl;
//Take all pixel values from pixels and writes it to output file
char * byteArray = new char[imageSize + 1];
for(unsigned int i = 0; i < imageSize; i++){
byteArray[i] = static_cast<char>(pixels[i]);
}
byteArray[imageSize] = '\0';
outFile.write(byteArray, imageSize);
if(outFile.fail()){
cerr << "Error: error writing to file." << endl;
exit(1000);
}
free(byteArray);
free(pixels);
//delete[] byteArray;
}
void AsciiImage::readImage(ifstream &inFile){
//Check if the file opened properly
if(!inFile){
cerr << "Could not read from file." << endl;
exit(1001);
}
int pixelValue;
//Read in the Ascii values from file
int i = 0;
while(inFile >> pixelValue){
pixels[i] = pixelValue;
i++;
}
}
void AsciiImage::writeImage(ofstream &outFile){
//Check if file is open
if(!outFile){
cerr << "Could not write to file." << endl;
exit(1001);
}
//Write Header
outFile << "P2" << ' ' <<
width << ' ' <<
height << ' ' <<
maxPixelValue << '\n';
//Write the contents of pixels to the output file
for(unsigned int i = 0; i < imageSize; i++){
//Add a '\n' at the end of each row
if(i % width == 0 && i != 0) outFile << '\n';
outFile << pixels[i] << '\t';
}
free(pixels);
}
void Image::readHeader(ifstream &inFile){
stringstream sStream;
string line;
//Check if the file opened successfully
if(!inFile){
cerr << "Error: Could not open file." << endl;
exit(1002);
}
char readChar;
string errorMessage = "Error: incorrect picture format.";
getline(inFile, line);
unsigned int lineSize = line.length();
//After we read magic number, we read the next line and determine if it's valid
for(unsigned int i = 0; i < lineSize; i++){
if(!isspace(line[i])){
cerr << errorMessage << endl;
cerr << "Extra info after magic number." << endl;
exit(1002);
}
}
//Read through the rest of the header and skip through comments
while(getline(inFile, line)){
if(!(isComment(line))) break;
}
sStream << line;
//Read in width.
//If there is a problem, return error
if(!(sStream >> width)){
cerr << errorMessage << endl;
cerr << "Cannot read width." << endl;
exit(1002);
}
//Read in height
//If there is a problem, return error
if(!(sStream >> height)){
cerr << errorMessage << endl;
cerr << "Cannot read height." << endl;
exit(1002);
}
//Check if there is extra information after width and height
while(sStream >> readChar){
if(!(isspace(readChar))){
cerr << errorMessage << endl;
cerr << "Extra info when reading height and width." << endl;
exit(1002);
}
}
//Make sure the height and width is positive
if(width <= 0 || height <= 0){
cerr << "Error: width and height cannot be negative" << endl;
exit(1002);
}
//Check if there are any comments between height/width and maxPixelValue
while(getline(inFile, line)){
if(!(isComment(line))) break;
}
//Clear out the string stream
sStream.str("");
sStream.clear();
sStream << line;
//Read in the maxPixelValue
if(!(sStream >> maxPixelValue)){
cerr << errorMessage << endl;
cerr << "Could not read maxPixelValue." << endl;
exit(1002);
}
//Check if there is extra information after maxPixelValue
while(sStream >> readChar){
if(!(isspace(readChar))){
cerr << errorMessage << endl;
cerr << "Extra info after the max pixel value." << endl;
exit(1002);
}
}
if(maxPixelValue < 0 || maxPixelValue > 255){
cerr << errorMessage << endl;
cerr << "Invalid max pixel value." << endl;
exit(1002);
}
imageSize = width * height;
}
//Finds the maxium pixel value in the image
void Image::findMax(){
int maxVal = 0;
for(unsigned int i = 0; i < imageSize; i++){
if(pixels[i] > maxVal){
maxVal = pixels[i];
}
}
maxpix = maxVal;
}
//Finds the minimal pixel value of the image
void Image::findMin(){
int minVal = 255;
for(unsigned int i = 0; i < imageSize; i++){
if(pixels[i] < minVal){
minVal = pixels[i];
}
}
minpix = minVal;
}
//Scales image so that the maximum pixel value is 255
void Image::scaleImage(){
findMin();
findMax();
int *d_pixels;
size_t size = imageSize * sizeof(int);
cudaError_t err = cudaSuccess;
/* Allocate memory in device */
err = cudaMalloc((void **) &d_pixels, size);
if (err != cudaSuccess){
fprintf(stderr, "Failed to allocate device vector pixels (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Copy data to device */
err = cudaMemcpy(d_pixels, pixels, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess){
fprintf(stderr, "Failed to copy vector pixels from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Launch scaleImageCuda() kernel on device with N threads in N blocks */
int blocks = (imageSize + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK;
scaleImageCuda<<<blocks, THREADS_PER_BLOCK>>>(d_pixels, minpix, maxpix, imageSize);
err = cudaGetLastError();
if (err != cudaSuccess){
fprintf(stderr, "Failed to launch scaleImageCuda kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Copy data to tohost device */
err = cudaMemcpy(pixels, d_pixels, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess){
fprintf(stderr, "Failed to copy vector pixels from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Clean-up */
err = cudaFree(d_pixels);
if (err != cudaSuccess){
fprintf(stderr, "Failed to free device vector pixels (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaDeviceReset();
if (err != cudaSuccess){
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
maxPixelValue = 255;
}
//Sobel edge detection function - detects edges and draws an outline
void Image::edgeDection(){
cudaError_t err = cudaSuccess;
size_t size = imageSize * sizeof(int);
/* Allocate memory in host */
int *tempImage = (int *)malloc(size);
int *d_pixels, *d_tempImage;
/* Allocate memory in device */
err = cudaMalloc((void **) &d_pixels, size);
if (err != cudaSuccess){
fprintf(stderr, "Failed to allocate device array pixels (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **) &d_tempImage, size);
if (err != cudaSuccess){
fprintf(stderr, "Failed to allocate device array tempImage (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Copy data to device */
err = cudaMemcpy(d_pixels, pixels, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess){
fprintf(stderr, "Failed to copy array pixels from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_tempImage, tempImage, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess){
fprintf(stderr, "Failed to copy array tempImage from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Launch edgeDetectionCuda() kernel on device with N threads in N blocks */
int blocks = (imageSize + (THREADS_PER_BLOCK - 1)) / THREADS_PER_BLOCK;
printf("blocks=%d\n", blocks);
edgeDetectionCuda<<<blocks, THREADS_PER_BLOCK>>>(d_pixels, d_tempImage, width, height, imageSize);
err = cudaGetLastError();
if (err != cudaSuccess){
fprintf(stderr, "Failed to launch edgeDetectionCuda kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Copy data to host */
err = cudaMemcpy(tempImage, d_tempImage, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess){
fprintf(stderr, "Failed to copy array tempImage from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Clean-up device */
err = cudaFree(d_tempImage);
if (err != cudaSuccess){
fprintf(stderr, "Failed to free array vector tempImage (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_pixels);
if (err != cudaSuccess){
fprintf(stderr, "Failed to free array vector pixels (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
for(unsigned int i = 0; i < imageSize; i++){
pixels[i] = tempImage[i];
}
/* Clean-up host */
free(tempImage);
}
bool isBinary(ifstream &inFile);
void run(char **argv);
int main(int argc, char **argv){
if(argc != 3){
cerr << "Usage: EdgeDetection imageName.pgm output.pgm";
return 1;
}
//long start, end;
//double total;
//start = clock();
run(argv);
//end = clock();
//total = (double)(end - start)/1000;
//cout << "Execution time: " << total << endl;
return 0;
}
bool isBinary(ifstream &inFile){
char readChar = ' ';
string errorMessage = "Error: incorrect picture format.";
//If there is no character or the character is not equal to 'P'
//then return an error
if(!(inFile >> readChar) || ( readChar != 'P' )){
cerr << errorMessage << endl;
cerr << "P" << endl;
exit(1002);
}
//If there is no character or the second character is not a 2 or 5
//then return an error
if(!(inFile >> readChar) || ( readChar != '2' && readChar != '5')){
cerr << errorMessage << endl;
cerr << readChar << endl;
exit(1002);
}
if(readChar == '5') return true;
return false;
}
void run(char **argv){
ifstream inFile;
inFile.open(argv[1], ios::binary | ios::in);
ofstream outFile;
outFile.open(argv[2], ios::binary
| ios::out
| ios::trunc);
if(isBinary(inFile)){
BinaryImage binaryImage;
binaryImage.readHeader(inFile);
binaryImage.readImage(inFile);
binaryImage.edgeDection();
binaryImage.scaleImage();
binaryImage.writeImage(outFile);
}else{
AsciiImage asciiImage;
asciiImage.readHeader(inFile);
asciiImage.readImage(inFile);
asciiImage.edgeDection();
asciiImage.scaleImage();
asciiImage.writeImage(outFile);
}
inFile.close();
outFile.close();
}
|
5,405 | __global__ void f(int * A, int *B) {
int tid = threadIdx.x;
int diff = (B - A);
int x = B[tid];
int y = A[tid + diff - 1];
B[tid] = x + y;
}
|
5,406 | #include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/host_vector.h>
#include <thrust/transform_reduce.h>
#include <iostream>
#include <unordered_map>
template <uint8_t D>
struct Coord {
int32_t data_[D];
int32_t& operator[](int i) { return data_[i]; }
const int32_t& operator[](int i) const { return data_[i]; }
};
int main() {
std::string stra = "asdasds";
std::string strb = "a";
std::cout << sizeof(stra) << " " << sizeof(strb) << "\n";
thrust::device_vector<bool> b;
b.push_back(true);
b.push_back(false);
for (int i = 0; i < b.size(); ++i) {
std::cout << b[i] << "\n";
}
thrust::device_vector<float> a;
a.push_back(3.14);
for (int i = 0; i < a.size(); ++i) {
std::cout << a[i] << "\n";
}
thrust::host_vector<Coord<3>> coords_host(10);
for (auto& coord : coords_host) {
coord[0] = -1;
}
thrust::device_vector<Coord<3>> coords_device(10);
thrust::copy(coords_host.begin(), coords_host.end(), coords_device.begin());
// for (int i = 0; i < 10; ++i) {
// Coord<3> coord = coords_device[i];
// std::cout << coord[0] << "\n";
// }
std::unordered_map<int, int> hash_map;
auto ret0 = hash_map.emplace(1, 10);
auto ret1 = hash_map.emplace(1, 91);
std::cout << (*ret0.first).first << " " << (*ret0.first).second << " "
<< ret0.second << "\n";
(*ret0.first).second = -1;
std::cout << (*ret1.first).first << " " << (*ret1.first).second << " "
<< ret1.second << "\n";
}
|
5,407 | /*
GPU Kernels for the mesh to particles functions
@author: Stefan Hegglin, Adrian Oeftiger
*/
extern "C" {
__global__ void mesh_to_particles_2d(
int nparticles,
double* particles_quantity, double *mesh_quantity,
const int nx, const int ny,
double *wij, double *wi1j, double *wij1, double *wi1j1,
int *i, int *j)
{
int pidx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y* blockDim.x + threadIdx.x;
int ix = i[pidx];
int jx = j[pidx];
if (pidx < nparticles) {
if (jx >= 0 && jx < nx - 1 && ix >= 0 && ix < ny - 1)
{
particles_quantity[pidx] = ( wij[pidx] * mesh_quantity[jx + ix*nx]
+ wij1[pidx] * mesh_quantity[jx+1 + ix*nx]
+ wi1j[pidx] * mesh_quantity[jx+ + (ix+1)*nx]
+ wi1j1[pidx] * mesh_quantity[jx+1 + (ix+1)*nx]);
} else {
particles_quantity[pidx] = 0;
}
}
}
__global__ void field_to_particles_2d(
int nparticles,
double* forcex, double* forcey, double* fieldx, double* fieldy,
const int nx, const int ny,
double *wij, double *wi1j, double *wij1, double *wi1j1, int *i, int *j)
{
int pidx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y* blockDim.x + threadIdx.x;
int jx = j[pidx];
int ix = i[pidx];
if (pidx < nparticles) {
if (jx >= 0 && jx < nx - 1 && ix >= 0 && ix < ny - 1)
{
forcex[pidx] = ( wij[pidx] * fieldx[jx + ix*nx]
+ wij1[pidx] * fieldx[jx+1 + ix*nx]
+ wi1j[pidx] * fieldx[jx + (ix+1)*nx]
+ wi1j1[pidx] *fieldx[jx+1 + (ix+1)*nx]);
forcey[pidx] = ( wij[pidx] * fieldy[jx + ix*nx]
+ wij1[pidx] * fieldy[jx+1 + ix*nx]
+ wi1j[pidx] * fieldy[jx + (ix+1)*nx]
+ wi1j1[pidx] *fieldy[jx+1 + (ix+1)*nx]);
} else {
forcex[pidx] = 0;
forcey[pidx] = 0;
}
}
}
__global__ void field_to_particles_3d(
int nparticles,
double* forcex, double* forcey, double* forcez,
double* fieldx, double* fieldy, double* fieldz,
const int nx, const int ny, const int nz,
double *wijk, double *wi1jk, double *wij1k, double *wi1j1k,
double *wijk1, double *wi1jk1, double* wij1k1, double* wi1j1k1,
int *i, int *j, int* k)
{
int pidx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y* blockDim.x + threadIdx.x;
int ix = i[pidx];
int jx = j[pidx];
int kx = k[pidx];
if (pidx < nparticles) {
if (jx >= 0 && jx < nx - 1 && ix >= 0 && ix < ny - 1 && kx >= 0 && kx < nz - 1)
{
forcex[pidx] = ( wijk[pidx] * fieldx[jx + ix*nx + kx*nx*ny]
+ wij1k[pidx] * fieldx[jx+1 + ix*nx + kx*nx*ny]
+ wi1jk[pidx] * fieldx[jx+ + (ix+1)*nx + kx*nx*ny]
+ wi1j1k[pidx] * fieldx[jx+1 + (ix+1)*nx + kx*nx*ny]
+ wijk1[pidx] * fieldx[jx + ix*nx + (kx+1)*nx*ny]
+ wij1k1[pidx] * fieldx[jx+1 + ix*nx + (kx+1)*nx*ny]
+ wi1jk1[pidx] * fieldx[jx+ + (ix+1)*nx + (kx+1)*nx*ny]
+ wi1j1k1[pidx]* fieldx[jx+1 + (ix+1)*nx + (kx+1)*nx*ny]);
forcey[pidx] = ( wijk[pidx] * fieldy[jx + ix*nx + kx*nx*ny]
+ wij1k[pidx] * fieldy[jx+1 + ix*nx + kx*nx*ny]
+ wi1jk[pidx] * fieldy[jx+ + (ix+1)*nx + kx*nx*ny]
+ wi1j1k[pidx] * fieldy[jx+1 + (ix+1)*nx + kx*nx*ny]
+ wijk1[pidx] * fieldy[jx + ix*nx + (kx+1)*nx*ny]
+ wij1k1[pidx] * fieldy[jx+1 + ix*nx + (kx+1)*nx*ny]
+ wi1jk1[pidx] * fieldy[jx+ + (ix+1)*nx + (kx+1)*nx*ny]
+ wi1j1k1[pidx]* fieldy[jx+1 + (ix+1)*nx + (kx+1)*nx*ny]);
forcez[pidx] = ( wijk[pidx] * fieldz[jx + ix*nx + kx*nx*ny]
+ wij1k[pidx] * fieldz[jx+1 + ix*nx + kx*nx*ny]
+ wi1jk[pidx] * fieldz[jx+ + (ix+1)*nx + kx*nx*ny]
+ wi1j1k[pidx] * fieldz[jx+1 + (ix+1)*nx + kx*nx*ny]
+ wijk1[pidx] * fieldz[jx + ix*nx + (kx+1)*nx*ny]
+ wij1k1[pidx] * fieldz[jx+1 + ix*nx + (kx+1)*nx*ny]
+ wi1jk1[pidx] * fieldz[jx+ + (ix+1)*nx + (kx+1)*nx*ny]
+ wi1j1k1[pidx]* fieldz[jx+1 + (ix+1)*nx + (kx+1)*nx*ny]);
} else {
forcex[pidx] = 0;
forcey[pidx] = 0;
forcez[pidx] = 0;
}
}
}
__global__ void mesh_to_particles_3d(
int nparticles,
double* particles_quantity, double *mesh_quantity,
const int nx, const int ny, const int nz,
double *wijk, double *wi1jk, double *wij1k, double *wi1j1k,
double *wijk1, double *wi1jk1, double* wij1k1, double* wi1j1k1,
int *i, int *j, int* k)
{
int pidx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y* blockDim.x + threadIdx.x;
int ix = i[pidx];
int jx = j[pidx];
int kx = k[pidx];
if (pidx < nparticles) {
if (jx >= 0 && jx < nx - 1 && ix >= 0 && ix < ny - 1 && kx >= 0 && kx < nz - 1)
{
particles_quantity[pidx] = ( wijk[pidx] * mesh_quantity[jx + ix*nx + kx*nx*ny]
+ wij1k[pidx] * mesh_quantity[jx+1 + ix*nx + kx*nx*ny]
+ wi1jk[pidx] * mesh_quantity[jx+ + (ix+1)*nx + kx*nx*ny]
+ wi1j1k[pidx] * mesh_quantity[jx+1 + (ix+1)*nx + kx*nx*ny]
+ wijk1[pidx] * mesh_quantity[jx + ix*nx + (kx+1)*nx*ny]
+ wij1k1[pidx] * mesh_quantity[jx+1 + ix*nx + (kx+1)*nx*ny]
+ wi1jk1[pidx] * mesh_quantity[jx+ + (ix+1)*nx + (kx+1)*nx*ny]
+ wi1j1k1[pidx]* mesh_quantity[jx+1 + (ix+1)*nx + (kx+1)*nx*ny]);
} else {
particles_quantity[pidx] = 0;
}
}
}
} /* end extern C */
|
5,408 | #include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
void Print_matrix(int N, FILE *f, double *m, long int n);
__global__ void Jacobi(long int n, double *in, double *out);
int main(void)
{
int N = 128;
int block = 1024;
int grid = N * N / 1024;
double *heat = (double *) calloc(sizeof(double), N * N), *arr, *arr_out;
cudaMalloc(&arr, sizeof(double) * N * N);
cudaMalloc(&arr_out, sizeof(double) * N * N);
cudaMemcpy(arr, heat, sizeof(double) * N * N, cudaMemcpyHostToDevice);
dim3 Block(block);
dim3 Grid(grid);
int k_iter = 0;
for (int i = 0; i < N; i++)
{
heat[N * i] = 1;
}
FILE *f = fopen("heat.txt", "wb");
for(;;)
{
k_iter++;
Jacobi<<<Grid, Block>>>(N, arr, arr_out);
cudaMemcpy(heat, arr, sizeof(double)*N*N, cudaMemcpyDeviceToHost);
Print_matrix(N, f, heat, N);
if (k_iter >= 200)
{
break;
}
}
cudaDeviceSynchronize();
fclose(f);
free(heat);
cudaFree(arr);
cudaFree(arr_out);
}
__global__ void Jacobi(long int n, double *in, double *out)
{
int myId, i, j;
int north, south, east, west;
//double N, S, E, W;
int index_center;
//int flag = 0;
myId = threadIdx.x + blockDim.x * blockIdx.x;
i = myId / n;
j = myId - n * i;
index_center = i*n + j;
south = j - 1 > 0 ? (j - 1) + i*n : 0;
west = i - 1 > 0 ? j + (i - 1)*n : -1;
north = j + 1 < n - 1 ? (j + 1) + i*n : -1;
east = i + 1 < n - 1 ? j + (i + 1)*n : -1;
//in[0] = 0;
//if (j < 1) {S = 0;} else {S = in[(int)south];}
//if (j > n - 2) {N = 0;} else {N = in[(int)north];}
//if (i > n - 2) {E = 0;} else {E = in[(int)east];}
//if (i < 1) {W = 0;} else {W = in[(int)west];}
// if (i < n - 1 && j < n - 1 && i>0 && j>0)
// {
out[index_center] = 0.25 * (in[(int)north] +
in[(int)south] +
in[(int)east] +
in[(int)west]);
/* out[index_center] = 0.25 * (S+
N +
E +
W);
*/
// }
//__syncthreads();
if (i == 0)
{
out[index_center] = 0;
}
if (i == n - 1)
{
out[index_center] = 0;
}
if (j == n - 1)
{
out[index_center] = 0;
}
// __syncthreads();
if (j == 0)
{
out[index_center - j] = 1;
}
__syncthreads();
in[index_center] = out[index_center];
}
void Print_matrix(int N, FILE *f, double *m, long int n)
{
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
fprintf(f, "%f\t", m[i*N + j]);
}
}
fprintf(f, "\n");
}
|
5,409 | #include <stdio.h>
#include <cuda.h>
__global__ void K() {
printf("%d\n", threadIdx.x + threadIdx.y);
}
int main() {
dim3 block(3, 4);
K<<<1, block>>>();
cudaDeviceSynchronize();
return 0;
}
|
5,410 | #include "includes.h"
__global__ void devFillAffectedTriangles(int nFlip, int *pTaff, int *pTaffEdge, int *pEnd, int2 *pEt)
{
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
while (i < nFlip) {
int e = pEnd[i];
pTaffEdge[i] = i;
pTaffEdge[i + nFlip] = i;
pTaff[i] = pEt[e].x;
pTaff[i + nFlip] = pEt[e].y;
i += gridDim.x*blockDim.x;
}
} |
5,411 | #include "includes.h"
__global__ void GPUKernel_VpVm_tiled(int a, int bstart, int bsize,int v,double * in,double * outp,double * outm) {
int blockid = blockIdx.x*gridDim.y + blockIdx.y;
int id = blockid*blockDim.x + threadIdx.x;
int v2 = v*v;
if ( id >= v2*bsize ) return;
// id : b*v2+c*v+d
int d = id%v;
int c = (id-d)%(v*v)/v;
if ( d > c ) return;
//int b = (id-d)%(v*bsize)/v;
//int c = (id-d-b*v)/(bsize*v);
int b = (id-d-c*v)/(v*v);
if ( b + bstart < a ) return;
int cd = c*(c+1)/2 + d;
int vtri = v*(v+1)/2;
int bv2 = b*v2;
//outp[b*vtri+cd] = in[bv2+d*v+c] + in[bv2+c*v+d];
//outm[b*vtri+cd] = in[bv2+d*v+c] - in[bv2+c*v+d];
outp[b*vtri+cd] = in[bv2+d*v+c] + in[id];
outm[b*vtri+cd] = in[bv2+d*v+c] - in[id];
} |
5,412 | /* Furthest point sampling GPU implementation
* Original author: Haoqiang Fan
* Modified by Charles R. Qi
* All Rights Reserved. 2017.
*/
__global__ void cumsumKernel(int b,int n,const float * __restrict__ inp,float * __restrict__ out){
const int BlockSize=2048;
const int paddingLevel=5;
__shared__ float buffer4[BlockSize*4];
__shared__ float buffer[BlockSize+(BlockSize>>paddingLevel)];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
float runningsum=0,runningsum2=0;
for (int j=0;j<n;j+=BlockSize*4){
int n24_i=min(n-j,BlockSize*4);
int n24=(n24_i+3)&~3;
int n2=n24>>2;
for (int k=threadIdx.x*4;k<n24_i;k+=blockDim.x*4){
if (k+3<n24_i){
float v1=inp[i*n+j+k];
float v2=inp[i*n+j+k+1];
v2+=v1;
float v3=inp[i*n+j+k+2];
float v4=inp[i*n+j+k+3];
v4+=v3;
v3+=v2;
v4+=v2;
buffer4[k]=v1;
buffer4[k+1]=v2;
buffer4[k+2]=v3;
buffer4[k+3]=v4;
buffer[(k>>2)+(k>>(2+paddingLevel))]=v4;
}else{
float v=0;
for (int k2=k;k2<n24_i;k2++){
v+=inp[i*n+j+k2];
buffer4[k2]=v;
}
for (int k2=n24_i;k2<n24;k2++){
buffer4[k2]=v;
}
buffer[(k>>2)+(k>>(2+paddingLevel))]=v;
}
}
int u=0;
for (;(2<<u)<=n2;u++){
__syncthreads();
for (int k=threadIdx.x;k<int(n2>>(u+1));k+=blockDim.x){
int i1=(((k<<1)+2)<<u)-1;
int i2=(((k<<1)+1)<<u)-1;
i1+=i1>>paddingLevel;
i2+=i2>>paddingLevel;
buffer[i1]+=buffer[i2];
}
}
u--;
for (;u>=0;u--){
__syncthreads();
for (int k=threadIdx.x;k<int((n2-(1<<u))>>(u+1));k+=blockDim.x){
int i1=(((k<<1)+3)<<u)-1;
int i2=(((k<<1)+2)<<u)-1;
i1+=i1>>paddingLevel;
i2+=i2>>paddingLevel;
buffer[i1]+=buffer[i2];
}
}
__syncthreads();
for (int k=threadIdx.x*4;k<n24;k+=blockDim.x*4){
if (k!=0){
int k2=((k>>2)-1)+(((k>>2)-1)>>paddingLevel);
buffer4[k]+=buffer[k2];
buffer4[k+1]+=buffer[k2];
buffer4[k+2]+=buffer[k2];
buffer4[k+3]+=buffer[k2];
}
}
__syncthreads();
for (int k=threadIdx.x;k<n24_i;k+=blockDim.x){
out[i*n+j+k]=buffer4[k]+runningsum;
}
float t=buffer[(n2-1)+((n2-1)>>paddingLevel)]+runningsum2;
float r2=runningsum+t;
runningsum2=t-(r2-runningsum);
runningsum=r2;
__syncthreads();
}
}
}
__global__ void binarysearchKernel(int b,int n,int m,const float * __restrict__ dataset,const float * __restrict__ query, int * __restrict__ result){
int base=1;
while (base<n)
base<<=1;
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=blockIdx.y*blockDim.x+threadIdx.x;j<m;j+=blockDim.x*gridDim.y){
float q=query[i*m+j]*dataset[i*n+n-1];
int r=n-1;
for (int k=base;k>=1;k>>=1)
if (r>=k && dataset[i*n+r-k]>=q)
r-=k;
result[i*m+j]=r;
}
}
}
__global__ void farthestpointsamplingKernel(int b,int n,int m,const float * __restrict__ dataset,float * __restrict__ temp,int * __restrict__ idxs){
if (m<=0)
return;
const int BlockSize=512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
const int BufferSize=3072;
__shared__ float buf[BufferSize*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int old=0;
if (threadIdx.x==0)
idxs[i*m+0]=old;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[blockIdx.x*n+j]=1e38;
}
for (int j=threadIdx.x;j<min(BufferSize,n)*3;j+=blockDim.x){
buf[j]=dataset[i*n*3+j];
}
__syncthreads();
for (int j=1;j<m;j++){
int besti=0;
float best=-1;
float x1=dataset[i*n*3+old*3+0];
float y1=dataset[i*n*3+old*3+1];
float z1=dataset[i*n*3+old*3+2];
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float td=temp[blockIdx.x*n+k];
float x2,y2,z2;
if (k<BufferSize){
x2=buf[k*3+0];
y2=buf[k*3+1];
z2=buf[k*3+2];
}else{
x2=dataset[i*n*3+k*3+0];
y2=dataset[i*n*3+k*3+1];
z2=dataset[i*n*3+k*3+2];
}
float d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
float d2=min(d,td);
if (d2!=td)
temp[blockIdx.x*n+k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
__syncthreads();
old=dists_i[0];
if (threadIdx.x==0)
idxs[i*m+j]=old;
}
}
}
__global__ void gatherpointKernel(int b,int n,int m,const float * __restrict__ inp,const int * __restrict__ idx,float * __restrict__ out){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=blockIdx.y*blockDim.x+threadIdx.x;j<m;j+=blockDim.x*gridDim.y){
int a=idx[i*m+j];
out[(i*m+j)*3+0]=inp[(i*n+a)*3+0];
out[(i*m+j)*3+1]=inp[(i*n+a)*3+1];
out[(i*m+j)*3+2]=inp[(i*n+a)*3+2];
}
}
}
__global__ void scatteraddpointKernel(int b,int n,int m,const float * __restrict__ out_g,const int * __restrict__ idx,float * __restrict__ inp_g){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=blockIdx.y*blockDim.x+threadIdx.x;j<m;j+=blockDim.x*gridDim.y){
int a=idx[i*m+j];
atomicAdd(&inp_g[(i*n+a)*3+0],out_g[(i*m+j)*3+0]);
atomicAdd(&inp_g[(i*n+a)*3+1],out_g[(i*m+j)*3+1]);
atomicAdd(&inp_g[(i*n+a)*3+2],out_g[(i*m+j)*3+2]);
}
}
}
void cumsumLauncher(int b,int n,const float * inp,float * out){
cumsumKernel<<<32,512>>>(b,n,inp,out);
}
//require b*n working space
void probsampleLauncher(int b,int n,int m,const float * inp_p,const float * inp_r,float * temp,int * out){
cumsumKernel<<<32,512>>>(b,n,inp_p,temp);
binarysearchKernel<<<dim3(32,8,1),512>>>(b,n,m,temp,inp_r,out);
}
//require 32*n working space
void farthestpointsamplingLauncher(int b,int n,int m,const float * inp,float * temp,int * out){
farthestpointsamplingKernel<<<32,512>>>(b,n,m,inp,temp,out);
}
void gatherpointLauncher(int b,int n,int m,const float * inp,const int * idx,float * out){
gatherpointKernel<<<dim3(2,8,1),512>>>(b,n,m,inp,idx,out);
}
void scatteraddpointLauncher(int b,int n,int m,const float * out_g,const int * idx,float * inp_g){
scatteraddpointKernel<<<dim3(2,8,1),512>>>(b,n,m,out_g,idx,inp_g);
}
|
5,413 | #include <stdlib.h>
#include <stdio.h>
__device__ int mandelbrot_point(float x, float y) {
int max_iteration = 1000;
int iteration;
float a,b, new_a, new_b;
a = 0.0f;
b = 0.0f;
new_a =0.0f;
new_b =0.0f;
iteration = 0;
while (a*a + b*b <= 3.0f && iteration < max_iteration) {
new_a = a*a - b*b + x;
new_b = 2.0f*a*b + y;
a = new_a;
b = new_b;
iteration++;
}
return (iteration == max_iteration ? 255 : iteration*10 % 255);
}
__global__ void compute_mandelbrot(int *pixels) {
float x_center = -1.0f;
float y_center = 0.0f;
float x,y;
int value;
x = (float)x_center + 4.0f * (float)((float)blockIdx.x-(float)gridDim.x/2.0f) /(float)gridDim.x;
y = (float)y_center + 4.0f * (float)((float)blockIdx.y-(float)gridDim.y/2.0f) /(float)gridDim.y;
value = mandelbrot_point(x,y);
pixels[blockIdx.x*3+blockIdx.y*gridDim.x*3] = value;
pixels[blockIdx.x*3+blockIdx.y*gridDim.x*3+1] = value;
pixels[blockIdx.x*3+blockIdx.y*gridDim.x*3+2] = value;
}
void print_mandelbrot(int *pixels, int width, int height ) {
int i,j;
for (i = 0; i < width; i++) {
for (j = 0; j < height; j++) {
if (pixels[i*3+width*j*3] == 255) printf("*");
else printf(" ");
}
printf("|\n");
}
}
int main(void) {
int *pixels_dev;
int *pixels;
int width = 100;
int height = 100;
cudaEvent_t start, stop;
dim3 grid(width, height);
float elapsed;
cudaMalloc((void **)&pixels_dev, sizeof(int) * width * height * 3);
pixels = (int *)malloc(width*height*3*sizeof(int));
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
compute_mandelbrot<<<grid,1>>>(pixels_dev);
cudaMemcpy(pixels, pixels_dev, width * height* 3 * sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
printf("time: %f\n", elapsed);
cudaEventDestroy(start);
cudaEventDestroy(stop);
print_mandelbrot(pixels, width, height);
}
|
5,414 | #include <time.h>
#include <stdio.h>
#define N 10000000
__global__ void increment(int *array, int length) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < length) {
array[idx] = array[idx] + 1;
}
}
int main(int x) {
int bytes = N*sizeof(int);
int* a = (int*)malloc(bytes);
int* b = (int*)malloc(bytes);
int* cuda_a;
int* cuda_b;
cudaMalloc((int**) &cuda_a, bytes);
cudaMalloc((int**) &cuda_b, bytes);
memset(a, 0, bytes);
clock_t start, end;
double cpu_time_used;
// copy the values in array a to array b on the host memory
start = clock();
memcpy(b, a, bytes);
end = clock();
cpu_time_used = ((double) (end - start)) / (CLOCKS_PER_SEC/1000);
printf("%f\n", cpu_time_used);
cudaMemcpy(cuda_a, b, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_b, cuda_a, bytes, cudaMemcpyDeviceToDevice);
// try 4, 8, 16, 24 and 32
int blocksize = 16;
dim3 dimBlock(blocksize);
dim3 dimGrid( ceil(N/(float)blocksize) );
increment <<<dimGrid,dimBlock>>> (cuda_b, N);
cudaMemcpy(a, cuda_b, bytes, cudaMemcpyDeviceToHost);
cudaFree(cuda_a);
cudaFree(cuda_b);
free(a);
free(b);
} |
5,415 | #include "includes.h"
__global__ void column_sum(const float* data, float* sum, int nx, int ny, int num_threads, int offset ) {
float s = 0.0;
const uint idx = threadIdx.x + blockIdx.x*num_threads+offset;
for(int i =0; i < ny; i++) {
s += data[idx + i*nx];
}
sum[idx] = s;
} |
5,416 | #include "includes.h"
__global__ void Census_Kernel(unsigned char * MemSrc, unsigned int * MemDst, int eps, int Width, int Height)
{
//===============================================================================================
//
//===============================================================================================
int globalX = blockIdx.x * blockDim.x + threadIdx.x;
int globalY = blockIdx.y * blockDim.y + threadIdx.y;
int GlobalOffset = (globalY * Width + globalX);
float Value;
float ValueCenter;
unsigned int Census=0;
float Diff = 0;
//int threadX = threadIdx.x+3;
//int threadY = threadIdx.y+3;
//int blockDimX = blockDim.x+2*3;
//int blockDimY = blockDim.y+2*3;
//int OffsetLocal = (threadY * blockDimX + threadX);
extern __shared__ unsigned char DataCache[];
//FillCacheRadius(DataCache, MemSrc, 3, Width, Height);
//------------------------------------------------------------------
if (globalX>1 && globalX<(Width-2) && globalY>1 && globalY<(Height-2))
{
ValueCenter=MemSrc[GlobalOffset];
//ValueCenter=DataCache[OffsetLocal];
#pragma unroll
for(int dy=-1;dy<=1;dy++)
{
#pragma unroll
for(int dx=-1;dx<=1;dx++)
{
if (!(dx==0 && dy==0))
{
Value=MemSrc[(globalY+dy) * Width + (globalX+dx)];
//Value=DataCache[(threadY+dy) * blockDimX + (threadX+dx)];
//---------------------------------------------------------------------
// Ternary
//---------------------------------------------------------------------
Diff = ValueCenter - Value;
Census = Census << 2;
if (abs(Diff)<=eps)
{
Census=Census+1;
}
else if (Diff> eps)
{
Census=Census+2;
}
}
}
}
#pragma unroll
for(int dy=-2;dy<=2;dy++)
{
#pragma unroll
for(int dx=-2;dx<=2;dx++)
{
if (!(dx==0 && dy==0) && !(abs(dx)==1 || abs(dy)==1))
{
Value=MemSrc[(globalY+dy) * Width + (globalX+dx)];
//Value=DataCache[(threadY+dy) * blockDimX + (threadX+dx)];
//---------------------------------------------------------------------
// Ternary
//---------------------------------------------------------------------
Diff = ValueCenter - Value;
Census = Census << 2;
if (abs(Diff)<=eps)
{
Census=Census+1;
}
else if (Diff> eps)
{
Census=Census+2;
}
}
}
}
MemDst[GlobalOffset] = (Census);
}
else
{
if (globalX>=0 && globalX<(Width) && globalY>=0 && globalY<(Height))
MemDst[GlobalOffset] = 0;
}
} |
5,417 | /*
* FileName: RayTracer_Kernel.cu
*
* Programmer: Jiayin Cao
*/
//the sum for scan
int* g_ScanSum[2];
//some helper functions
__device__ void d_normalize( float4* v )
{
float s = v->x * v->x + v->y * v->y + v->z * v->z;
s = sqrt(s);
v->x /= s;
v->y /= s;
v->z /= s;
}
//cross product
__device__ float4 d_cross( const float4& v1 , const float4& v2 )
{
float4 r;
r.x = v1.y * v2.z - v1.z * v2.y;
r.y = v1.z * v2.x - v1.x * v2.z;
r.z = v1.x * v2.y - v1.y * v2.x;
r.w = 0.0f;
return r;
}
//clamp the value
__device__ float d_clamp( const float v )
{
if( v > 1.0f )
return 1.0f;
if( v < 0.0f )
return 0.0f;
return v;
}
//clamp the float4
__device__ float4 d_saturate( const float4& v )
{
return make_float4( d_clamp( v.x ) , d_clamp( v.y ) , d_clamp( v.z ) , d_clamp( v.w ) );
}
//dot product
__device__ float d_dot( const float4& v1 , const float4& v2 )
{
return v1.x * v2.x + v1.y * v2.y + v1.z * v2.z ;
}
//the length of the vector
__device__ float d_length( const float4& v )
{
return sqrt( v.x * v.x + v.y * v.y + v.z * v.z );
}
//define some useful operators for float4
__device__ float4 operator+ ( const float4& v1 , const float4& v2 )
{
return make_float4( v1.x + v2.x , v1.y + v2.y , v1.z + v2.z , v1.w + v2.w );
}
__device__ float4 operator- ( const float4& v1 , const float4& v2 )
{
return make_float4( v1.x - v2.x , v1.y - v2.y , v1.z - v2.z , v1.w - v2.w );
}
__device__ float4 operator* ( const float4& v , const float d )
{
return make_float4( v.x * d , v.y * d , v.z * d , v.w * d );
}
__device__ float4 operator* ( const float d , const float4& v )
{
return make_float4( v.x * d , v.y * d , v.z * d , v.w * d );
}
__device__ float4 operator* ( const float4& v1 , const float4& v2 )
{
return make_float4( v1.x * v2.x , v1.y * v2.y , v1.z * v2.z , v1.w * v2.w );
}
__device__ float4 operator+= ( float4& v1 , const float4& v2 )
{
v1 = v1 + v2;
return v1;
}
__device__ float2 operator * ( const float d , const float2& v )
{
return make_float2( d * v.x , d * v.y );
}
__device__ float2 operator + ( const float2& v1 , const float2& v2 )
{
return make_float2( v1.x + v2.x , v1.y + v2.y );
}
__device__ float2 operator - ( const float2& v1 , const float2& v2 )
{
return make_float2( v1.x - v2.x , v1.y - v2.y );
}
__device__ float2 floor( const float2& v )
{
int x = (int) v.x ;
int y = (int) v.y ;
return make_float2( x , y );
}
//reflect direction
__device__ float4 d_reflect( const float4& dir , const float4& normal )
{
float dotProduct = ( -2.0f ) * d_dot( dir , normal );
float4 r = dir + dotProduct * normal;
return make_float4( r.x , r.y , r.z , 0.0f );
}
//refraction direction
__device__ float4 d_refract( const float4& dir , float4 normal , float rate )
{
float4 r;
if( d_dot( dir , normal ) > 0 )
{
normal = -1.0f * normal;
rate = 1.0f / rate;
}
float cos = -1.0f * d_dot( dir , normal );
float t = 1 - rate * rate * ( 1 - cos * cos );
if( t < 0 )
{
r = d_reflect( dir , normal );
}else
{
float cos2 = sqrt( t );
r = rate * dir + ( rate * cos - cos2 ) * normal ;
}
return r;
}
//check if the ray intersects with bounding box
__device__ float4 kernelIntersectBoundingBox( float4& ori , float4& dir , float4& min , float4& max , float length )
{
//the result
float4 result = make_float4( 0.0f , 9999999.0f , 0.0f , 0.0f );
//limit the maxium value
if( length > 0 )
result.y = length;
//the variables
float t1 , t2;
if( fabs( dir.x ) < 0.0000001f )
{
if( ori.x > max.x || ori.x < min.x )
return result;
}else
{
t1 = ( max.x - ori.x ) / dir.x;
t2 = ( min.x - ori.x ) / dir.x;
if( t1 > t2 ) { float t = t1; t1 = t2; t2 = t; }
//clamp
if( t1 > result.x ) result.x = t1;
if( t2 < result.y ) result.y = t2;
if( result.x > result.y )
return result;
}
if( fabs( dir.y ) < 0.0000001f )
{
if( ori.y > max.y || ori.y < min.y )
return result;
}else
{
t1 = ( max.y - ori.y ) / dir.y;
t2 = ( min.y - ori.y ) / dir.y;
if( t1 > t2 ) { float t = t1; t1 = t2; t2 = t; }
//clamp
if( t1 > result.x ) result.x = t1;
if( t2 < result.y ) result.y = t2;
if( result.x > result.y )
return result;
}
if( fabs( dir.y ) < 0.0000001f )
{
if( ori.z > max.z || ori.z < min.z )
return result;
}else
{
t1 = ( max.z - ori.z ) / dir.z;
t2 = ( min.z - ori.z ) / dir.z;
if( t1 > t2 ) { float t = t1; t1 = t2; t2 = t; }
//clamp
if( t1 > result.x ) result.x = t1;
if( t2 < result.y ) result.y = t2;
if( result.x > result.y )
return result;
}
//enable the intersected point
result.z = 1.0f;
return result;
}
//check if the ray intersects with a plane
__device__ float4 kernelIntersectPlane( const float4& v1 , const float4& v2 , const float4& v3 , const float4& ori , const float4& dir )
{
//w : >= 0 ( intersected point enable ) , < 0 ( disable )
float4 result = make_float4( 0.0f , 0.0f , 0.0f , 0.0f );
//get the normal of the plane
float4 normal = d_cross( v2 - v1 , v3 - v1 );
//get the factor
float t = d_dot( normal , ori - v1 ) / d_dot( normal , dir );
//set the result
result = ori - t * dir;
if( t <= 0.0f )
result.w = -t;
else
result.w = -1;
return result;
}
//check if the ray intersects with a triangle
__device__ float4 kernelIntersectTriangle( const float4& v1 , const float4& v2 , const float4& v3 , const float4& ori , const float4& dir )
{
//the result
float4 result = kernelIntersectPlane( v1 , v2 , v3 , ori , dir );
if( result.w < 0 )
return result;
//get the factor
float4 d1 = d_cross( result - v2 , v1 - v2 );
float4 d2 = d_cross( result - v3 , v2 - v3 );
float4 d3 = d_cross( result - v1 , v3 - v1 );
float f1 = d_dot( d1 , d2 );
float f2 = d_dot( d2 , d3 );
if( !( f1 >= -0.000000000000001f && f2 >= -0.000000000000001f ) )
result.w = -1.0f;
return result;
}
//check if the current point is in the bounding box
__device__ int kernelPointInBoundingBox( const float4& p , const float4& min , const float4& max )
{
float threshold = 0.00001f;
if( p.x < min.x - threshold || p.y < min.y - threshold || p.z < min.z - threshold ||
p.x > max.x + threshold || p.y > max.y + threshold || p.z > max.z + threshold )
return false;
return true;
}
//do interplotation
__device__ float4 kernelInterploted( const float4& v1 , const float4& v2 , const float4& v3 , const float4& intersected )
{
//get the vectors
float4 e1 = intersected - v1;
float4 e2 = intersected - v2;
float4 e3 = intersected - v3;
//compute the areas
float4 area;
area.x = d_length( d_cross( e2 , e3 ) );
area.y = d_length( d_cross( e3 , e1 ) );
area.z = d_length( d_cross( e1 , e2 ) );
float d = 1.0f / ( area.x + area.y + area.z );
return area * d;
}
//clear and initialize buffer
__global__ void kernelInitBuffer( float4* buffer ,
int* markedBuffer ,
int pixelNum )
{
//get the thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//limit the thread id
if( tid >= pixelNum )
return;
buffer[tid] = make_float4( 0.0f , 0.0f , 0.0f , 0.0f );
markedBuffer[tid] = tid;
}
//generate primary ray intersected result
__global__ void kernelGenerateIntersectedPoint( float4* rayOri ,
float4* rayDir ,
float4* vertexBuffer ,
int rayNum ,
int* index ,
float4* result )
{
//get the thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//limit the thread id
if( tid >= rayNum )
return;
//Load the vertex
int triId = index[tid];
//get the vertex
int id = 3 * triId;
float4 v1 = vertexBuffer[id];
float4 v2 = vertexBuffer[id+1];
float4 v3 = vertexBuffer[id+2];
//ray ori and dir
float4 ori = rayOri[tid];
float4 dir = rayDir[tid];
//get the intersected result
result[tid] = kernelIntersectPlane( v1 , v2 , v3 , ori , dir );
result[tid].w = triId;
}
//Generate primary rays
__global__ void kernelGeneratePrimaryRays( float4 viewInfo ,
float* invViewMatrix ,
float4* rayOri ,
float4* rayDir )
{
//get the thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//limit the thread id
if( tid >= (int)viewInfo.x * (int)viewInfo.y )
return;
// get the pixel coorindate first
uint2 coord;
coord.x = tid % (int) viewInfo.x;
coord.y = tid / (int)viewInfo.x;
// compute the vector of the ray in screen space
float2 v;
v.x = ( ( ( 2.0f * coord.x ) / viewInfo.x ) - 1.0f ) / viewInfo.z;
v.y = -1.0f * ( ( ( 2.0f * coord.y ) / viewInfo.y ) - 1.0f ) / viewInfo.w;
//copy the original point of the rays
rayOri[tid] = make_float4( invViewMatrix[12] , invViewMatrix[13] , invViewMatrix[14] , tid );
//compute the direction of the ray
float4 dir;
dir.x = ( v.x * invViewMatrix[0] + v.y * invViewMatrix[4] + invViewMatrix[8] );
dir.y = ( v.x * invViewMatrix[1] + v.y * invViewMatrix[5] + invViewMatrix[9] );
dir.z = ( v.x * invViewMatrix[2] + v.y * invViewMatrix[6] + invViewMatrix[10] );
dir.w = 0.0f;
d_normalize( &dir );
rayDir[tid] = make_float4( dir.x , dir.y , dir.z , 1.0f );
}
//traverse the ray through kd-tree
__device__ float4 kernelTraverseRay( float4* kdTree ,
int* indexMap ,
int* offsetBuffer ,
float4* vertexBuffer ,
float4& rayOri ,
float4& rayDir ,
float length )
{
//the intersected result
float4 result = make_float4( 0.0f , 0.0f , 0.0f , -1.0f );
//tree node information
float4 header;
float4 splitInfo;
//the bounding box
float4 minBB = kdTree[2];
float4 maxBB = kdTree[3];
//check if the ray intersects with the current bounding box of the root
result = kernelIntersectBoundingBox( rayOri , rayDir , minBB , maxBB , length );
//if the ray doesn't cross the kd-tree , just return
if( result.z < 0.5f )
{
result = make_float4( 0.0f , 0.0f , 0.0f , -1.0f );
return result;
}
//current traversing node
int currentNodeIndex = 0;
//the mask to mark the traversed node
unsigned int mask = 0;
//current traverse depth
int currentTraverseDepth = 0;
//current inPonit when traversing the node
float4 inPoint = rayOri + result.x * rayDir ;
while( currentTraverseDepth >= 0 )
{
//traverse the current node
do
{
//the current node offset
int currentNodeOffset = currentNodeIndex * 4;
//get the current node information
header = kdTree[ currentNodeOffset ];
splitInfo = kdTree[currentNodeOffset + 1 ];
//check if it's a leaf node
if( splitInfo.x < 0 )
break;
//get the split axis
int splitAxis = (int) splitInfo.x;
//get the pointer of the inPoint
float sPos = 0.0f;
if( splitAxis == 0 )
sPos = inPoint.x;
else if( splitAxis == 1 )
sPos = inPoint.y;
else if( splitAxis == 2 )
sPos = inPoint.z;
//update the virtual stack and traverse the node
if( splitInfo.y > sPos )
currentNodeIndex = (int)header.y;
else
currentNodeIndex = (int)header.z;
//increase the current traverse depth
currentTraverseDepth++;
}while( true );
//get the offset and triangle number
int triOffset = offsetBuffer[currentNodeIndex];
int triNumber = (int)header.w;
//min value
float minFactor = 9999999.0f;
if( length > 0 )
minFactor = length;
//triangle index
int oriTriIndex = -1;
//the bounding box
minBB = kdTree[currentNodeIndex*4+2];
maxBB = kdTree[currentNodeIndex*4+3];
//intersect with the current triangles
for( int i = 0 ; i < triNumber ; i++ )
{
//get the triangles
int triIndex = indexMap[triOffset+i];
//get the vertex
float4 v1 = vertexBuffer[3*triIndex];
float4 v2 = vertexBuffer[3*triIndex+1];
float4 v3 = vertexBuffer[3*triIndex+2];
//get the intersected point
result = kernelIntersectTriangle( v1 , v2 , v3 , rayOri , rayDir );
//limit the factor
if( result.w > 0.0f && result.w < minFactor )
{
if( kernelPointInBoundingBox( result , minBB , maxBB ) )
{
minFactor = result.w;
oriTriIndex = triIndex;
if( length > 0 )
break;
}
}
}
if( oriTriIndex >= 0 )
{
result = rayOri + minFactor * rayDir;
result.w = (float)oriTriIndex;
return result;
}
//back track here
while( currentTraverseDepth >= 0 )
{
if( currentTraverseDepth == 0 )
return make_float4( 0 , 0 , 0 , -1.0f );
//get the current mask
if( mask & ( 0x00000001 << currentTraverseDepth ) )
{
//update the mask
mask &= ~(0x00000001 << currentTraverseDepth );
//decrease the current depth;
currentTraverseDepth--;
//get to the father node
currentNodeIndex = (int)kdTree[ 4 * currentNodeIndex ].x;
//continue to next level
continue;
}
//check the other node
int otherNode = currentNodeIndex + 1;
if( currentNodeIndex % 2 == 0 )
otherNode -= 2;
//get the bounding box of the other node
int otherNodeOffset = 4 * otherNode;
minBB = kdTree[ otherNodeOffset + 2 ];
maxBB = kdTree[ otherNodeOffset + 3 ];
//get the intersected result
float4 bi = kernelIntersectBoundingBox( rayOri , rayDir , minBB , maxBB , length );
if( bi.z > 0.5f )
{
//update the current traverse node
currentNodeIndex = otherNode;
//update the inPoint
inPoint = rayOri + bi.x * rayDir ;
//update the mask
mask |= 0x00000001 << currentTraverseDepth;
break;
}else
{
//update the mask
mask &= ~( 0x00000001 << currentTraverseDepth );
//decrease current depth
currentTraverseDepth--;
//get to the father node
currentNodeIndex = (int) kdTree[ 4 * currentNodeIndex ].x;
}
}
}
result.w = -1.0f;
return result;
}
//get the interseced point
__global__ void kernelGetIntersectedPoint( float4* rayOri ,
float4* rayDir ,
float4* kdTree ,
int* indexMap ,
int* offsetBuffer ,
float4* vertexBuffer ,
int rayNumber ,
float4* result )
{
//get the thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//limit the thread id
if( tid >= rayNumber )
return;
//get the triangle
result[tid] = kernelTraverseRay( kdTree , indexMap , offsetBuffer , vertexBuffer , rayOri[tid] , rayDir[tid] , -1.0f );
}
//do pixel shader here
__global__ void kernelPixelShader( float4* intersected ,
float4* vertexBuffer ,
float4* normalBuffer ,
float2* texCoordinateBuffer ,
float4* kdTree ,
int* indexMap ,
int* offsetIndexBuffer,
float4* lightBuffer ,
int* attributeBuffer ,
float4* materialBuffer ,
int* textureOffset ,
float4* customTexture ,
int pixelNum ,
float4* rayDir ,
int* offsetBuffer ,
float4* destNormalBuffer ,
float4* imageBuffer )
{
//get the thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//limit the thread id
if( tid >= pixelNum )
return;
//get the triangle index
int triIndex = (int)intersected[tid].w;
int triOffset = 3 * triIndex;
float4 color = make_float4( 0.0f , 0.0f , 0.0f , 0.0f );
//load the density of the pixel
if( triIndex < 0 )
return;
//get the material index
int matIndex = attributeBuffer[triIndex];
//the material buffer
float4 ambient = materialBuffer[ 4 * matIndex ];
float4 diffuse = materialBuffer[ 4 * matIndex + 1 ];
float4 specular = materialBuffer[ 4 * matIndex + 2 ];
float4 matprop = materialBuffer[ 4 * matIndex + 3 ];
//load the vertex
float4 v1 = vertexBuffer[ triOffset ];
float4 v2 = vertexBuffer[ triOffset + 1 ];
float4 v3 = vertexBuffer[ triOffset + 2 ];
//get the interploted
float4 interploted = kernelInterploted( v1 , v2 , v3 , intersected[tid] );
//get the normal
float4 n1 = normalBuffer[ triOffset ];
float4 n2 = normalBuffer[ triOffset + 1 ];
float4 n3 = normalBuffer[ triOffset + 2 ];
float4 normal = n1 * interploted.x + n2 * interploted.y + n3 * interploted.z;
d_normalize( &normal );
//update the normal buffer
destNormalBuffer[tid] = normal;
destNormalBuffer[tid].w = matIndex;
//the density for the pixel
float density = rayDir[tid].w;
if( matprop.x > -0.5f )
{
//load the texture coordinate
float2 t1 = texCoordinateBuffer[ triOffset ];
float2 t2 = texCoordinateBuffer[ triOffset + 1 ];
float2 t3 = texCoordinateBuffer[ triOffset + 2 ];
float2 texCoord = interploted.x * t1 + interploted.y * t2 + interploted.z * t3;
texCoord = texCoord - floor( texCoord );
if( texCoord.x < 0.0f ) texCoord.x += 1.0f;
if( texCoord.y < 0.0f ) texCoord.y += 1.0f;
//load the texture
float4* imgData = customTexture + textureOffset[(int)matprop.x];
int x = imgData[0].y * texCoord.x ;
int y = imgData[0].z * texCoord.y ;
int texOffset = y * imgData[0].y + x + 1;
diffuse = diffuse * (*(imgData + texOffset)) ;
}
//initialize the image buffer
color = ambient;
//shade the pixels
for( int i = 0 ; i < 2 ; i++ )
{
if( lightBuffer[i].w < 0.01f )
continue;
//the light direction
float4 lightDir = intersected[tid] - lightBuffer[i];
//check if the point is in the shadow
float shadowLen = 0.98f * d_length(lightDir);
d_normalize( &lightDir );
//the dot product
float dotProduct = d_dot( lightDir , normal );
if( dotProduct > 0.0f )
continue;
{
float4 shadowFactor = kernelTraverseRay( kdTree , indexMap , offsetIndexBuffer , vertexBuffer , lightBuffer[i] , lightDir , shadowLen );
if( shadowFactor.w >= 0.0f )
continue;
}
//the light density
float lightDensity = d_clamp( -1.0f * dotProduct ) * lightBuffer[i].w;
//load the density of current pixel
color += diffuse * lightDensity ;
//add specular if possible
if( specular.w > 0 )
{
//reflect direction
float4 reflectDir = d_reflect( lightDir , normal );
d_normalize( &reflectDir );
//get the dot product
float d = d_clamp(-d_dot( reflectDir , rayDir[tid] ));
if( d > 0 )
color += pow( d , specular.w ) * specular;
}
}
int offset = offsetBuffer[tid];
imageBuffer[offset] = d_saturate( imageBuffer[offset] + d_saturate( color * density ) );
}
//generate next level rays
__global__ void kernelGenerateNextLevelRays( float4* materialInfo ,
float4* intersected ,
float4* backNormalBuffer ,
float4* rayOri ,
float4* rayDir ,
int rayNumber ,
float4* destRayOri ,
float4* destRayDir ,
int* markedBuffer )
{
//get the thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//limit the thread id
if( tid >= rayNumber )
return;
//set marked buffer zero
markedBuffer[tid] = 0;
//load the intersected point
float4 intersectedPoint = intersected[tid];
//get the intersected triangle index
int triIndex = (int)intersectedPoint.w;
if( triIndex < 0 )
return;
//load the normal
float4 normal = backNormalBuffer[tid];
//get the material index
int matIndex = (int)normal.w;
//get the material
float4 matInfo = materialInfo[4*matIndex+3];
//load the ray direction
float4 ori = rayOri[tid];
float4 dir = rayDir[tid];
//if there is reflection , mark result as true
if( matInfo.y > 0 )
{
float4 reflectDir = d_reflect( dir , normal );
d_normalize( &reflectDir );
reflectDir.w = dir.w * matInfo.y;
destRayDir[tid] = reflectDir;
destRayOri[tid] = intersectedPoint + reflectDir * 0.1f;
destRayOri[tid].w = ori.w;
markedBuffer[tid] = 1;
}else if( matInfo.z > 0 )
{
float4 refractDir = d_refract( dir , normal , 1.0f / matInfo.w );
d_normalize( &refractDir );
refractDir.w = dir.w * matInfo.z;
destRayDir[tid] = refractDir;
destRayOri[tid] = intersectedPoint + refractDir * 0.02f;
destRayOri[tid].w = ori.w;
markedBuffer[tid] = 1;
}
}
//copy new rays
__global__ void kernelCopyNewRays( float4* srcRayOri ,
float4* srcRayDir ,
int* scanResult ,
int rayNumber ,
float4* destRayOri ,
float4* destRayDir ,
int* offsets )
{
//get the thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//limit the thread id
if( tid >= rayNumber )
return;
//load the offset
int offset = scanResult[tid];
if( offset != scanResult[tid+1] )
{
//set the result
destRayOri[offset] = srcRayOri[tid];
destRayDir[offset] = srcRayDir[tid];
offsets[offset] = (int)srcRayOri[tid].w;
}
}
//Do scan on GPU
__global__ void kernelScan( int* data , int number , int oBlockRes , int* blockRes )
{
//the shared memory
__shared__ int sharedMem[512];
//get the thread id
int ltid = threadIdx.x;
int gtid = ltid + blockDim.x * blockIdx.x;
//the block sum
int blocksum = 0;
//zero the rest of the memory
if( 2 * gtid >= number )
{
data[ 2 * gtid ] = 0;
data[ 2 * gtid + 1 ] = 0;
}else if( 2 * gtid == number - 1 )
data[ 2 * gtid + 1 ] = 0;
//Load the data into the shared memory
sharedMem[2*ltid] = data[2*gtid];
sharedMem[2*ltid+1] = data[2*gtid+1];
//the offset
int offset = 1;
for( int d = 256 ; d > 1 ; d >>= 1 )
{
//sync the threads in a group
__syncthreads();
if( ltid < d )
{
int ai = offset * ( 2 * ltid + 1 ) - 1;
int bi = ai + offset;
sharedMem[bi] += sharedMem[ai];
}
offset *= 2;
}
//the block sum
blocksum = sharedMem[511] + sharedMem[255];
//clear the last element
if( ltid == 0 )
{
sharedMem[511] = sharedMem[255];
sharedMem[255] = 0;
}
for( int d = 2 ; d < 512 ; d *= 2 )
{
__syncthreads();
offset >>= 1;
if( ltid < d )
{
int ai = offset * ( 2 * ltid + 1 ) - 1 ;
int bi = ai + offset ;
int t = sharedMem[ai];
sharedMem[ai] = sharedMem[bi];
sharedMem[bi] += t;
}
}
__syncthreads();
data[ 2 * gtid ] = sharedMem[ 2 * ltid ];
data[ 2 * gtid + 1 ] = sharedMem[ 2 * ltid + 1 ];
//Output Block Result
if( oBlockRes > 0 )
{
if( ltid == 0 )
{
//copy the result
blockRes[blockIdx.x] = blocksum;
}
}
}
//Add the block result to the segmented scan result
__global__ void kernelUniformAdd( int* data , int* blockResult )
{
//get the thread id
int ltid = threadIdx.x;
int gtid = ltid + blockDim.x * blockIdx.x;
//add the result
data[gtid] += blockResult[gtid/512];
}
//clear the noise of the image
__global__ void kernelClearNoise( float4* imgData ,
int width ,
int height ,
float4* targetData )
{
//get the thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//limit the thread id
if( tid >= width * height )
return;
//threshold
float threshold = 0.4f;
//the difference
int difference = 0;
//current index
int currentIndex = tid;
int leftIndex = tid - 1;
int rightIndex = tid + 1;
int upIndex = tid - width ;
int downIndex = tid + width ;
//the coordinate
int i = tid % width;
int j = tid / width;
//current color
float4 color = imgData[currentIndex];
float4 sum = make_float4( 0 , 0 , 0 , 0 );
if( i > 0 )
{
if( d_length( color - imgData[leftIndex] ) > threshold )
difference++;
sum += imgData[leftIndex];
}
if( i < width - 1 )
{
if( d_length( color - imgData[rightIndex] ) > threshold )
difference++;
sum += imgData[rightIndex];
}
if( j > 0 )
{
if( d_length( color - imgData[upIndex] ) > threshold )
difference++;
sum += imgData[upIndex];
}
if( j < height - 1 )
{
if( d_length( color - imgData[downIndex] ) > threshold )
difference++;
sum += imgData[downIndex];
}
if( difference >= 2 )
color = sum * 0.25f;
targetData[tid] = color;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//initialize buffer
extern "C" void cudaInitBuffer( float4* buffer ,
int* markedBuffer ,
int pixelNum )
{
//the block number
int threadNum = 256;
int blockNum = ( pixelNum + threadNum - 1 ) / threadNum;
//call the kenrel
kernelInitBuffer<<<blockNum,threadNum>>>( buffer , markedBuffer , pixelNum );
}
//generate primary ray intersected result
extern "C" void cudaGenerateIntersectedPoint( float4* rayOri ,
float4* rayDir ,
float4* vertexBuffer ,
int rayNum ,
int* index ,
float4* result )
{
//the block number
int threadNum = 256;
int blockNum = ( rayNum + threadNum - 1 ) / threadNum;
//call the kernel
kernelGenerateIntersectedPoint<<<blockNum , threadNum>>>( rayOri , rayDir , vertexBuffer , rayNum , index , result );
}
//Generate primary rays
extern "C" void cudaGeneratePrimaryRays( float4 viewInfo ,
float* invViewMatrix ,
float4* rayOri ,
float4* rayDir )
{
//get the number of data
int rayNum = (int)( viewInfo.x * viewInfo.y );
//the block number
int threadNum = 256;
int blockNum = ( rayNum + threadNum - 1 ) / threadNum;
//call the kernel
kernelGeneratePrimaryRays<<<blockNum , threadNum>>>( viewInfo , invViewMatrix , rayOri , rayDir );
}
//get intersected point
extern "C" void cudaGetIntersectedPoint( float4* rayOri ,
float4* rayDir ,
float4* kdTree ,
int* indexMap ,
int* offsetBuffer ,
float4* vertexBuffer ,
int rayNumber ,
float4* result )
{
//the block and thread number
int threadNum = 256;
int blockNum = ( rayNumber + threadNum - 1 ) / threadNum ;
//call the kernel
kernelGetIntersectedPoint<<<blockNum , threadNum>>>( rayOri , rayDir , kdTree , indexMap , offsetBuffer , vertexBuffer , rayNumber , result );
}
//do pixel shader
extern "C" void cudaPixelShader( float4* interseced ,
float4* vertexBuffer ,
float4* normalBuffer ,
float2* texCoordinateBuffer ,
float4* kdTree ,
int* indexMap ,
int* offsetIndexBuffer ,
float4* lightBuffer ,
int* attributeBuffer ,
float4* materialBuffer ,
int* textureOffset ,
float4* customTexture ,
int pixelNum ,
float4* rayDir ,
int* offsetBuffer ,
float4* destNormalBuffer ,
float4* imageBuffer )
{
//the block and thread number
int threadNum = 256;
int blockNum = ( pixelNum + threadNum - 1 ) / threadNum ;
//call the kernel
kernelPixelShader<<<blockNum , threadNum>>>( interseced , vertexBuffer , normalBuffer , texCoordinateBuffer ,
kdTree , indexMap , offsetIndexBuffer , lightBuffer , attributeBuffer , materialBuffer ,
textureOffset , customTexture , pixelNum , rayDir , offsetBuffer , destNormalBuffer , imageBuffer );
}
//generate next level rays
extern "C" void cudaGenerateNextLevelRays( float4* materialInfo ,
float4* intersected ,
float4* backNormalBuffer ,
float4* rayOri ,
float4* rayDir ,
int rayNumber ,
float4* destRayOri ,
float4* destRayDir ,
int* markedBuffer )
{
//the block and thread number
int threadNum = 256;
int blockNum = ( rayNumber + threadNum - 1 ) / threadNum ;
//call the kernel
kernelGenerateNextLevelRays<<<blockNum , threadNum>>>( materialInfo , intersected , backNormalBuffer , rayOri , rayDir ,
rayNumber , destRayOri , destRayDir , markedBuffer );
}
//do scan on gpu
extern "C" void cudaScan( int* data , int num , int level )
{
/* //allocate the number of data
int* cpuData = new int[num];
//pass the data from gpu to cpu
cudaMemcpy( cpuData , data , sizeof( int ) * ( num - 1 ) , cudaMemcpyDeviceToHost );
int last = 0;
for( int i = 0 ; i < num ; i++ )
{
int oldLast = last;
last += cpuData[i];
cpuData[i] = oldLast;
}
//pass the data back from cpu to gpu
cudaMemcpy( data , cpuData , sizeof( int ) * num , cudaMemcpyHostToDevice );
//delete the data
delete[] cpuData;*/
//the dimension of the kernel
dim3 threads( 256 );
dim3 blocks( ( num + 511 ) / 512 );
//call the kernel
kernelScan<<<blocks , threads>>>( data , num , 1 , g_ScanSum[level] );
//scan the block Result
if( num <= 262144 )
kernelScan<<<1 , threads>>>( g_ScanSum[level] , blocks.x , -1 , data );
else
cudaScan( g_ScanSum[level] , blocks.x , level + 1 );
//add the offset
threads.x = 512;
kernelUniformAdd<<< blocks , threads >>> ( data , g_ScanSum[level] );
}
//copy new rays
extern "C" void cudaCopyNewRays( float4* srcRayOri ,
float4* srcRayDir ,
int* scanResult ,
int rayNumber ,
float4* destRayOri ,
float4* destRayDir ,
int* offsets )
{
//the block and thread number
int threadNum = 256;
int blockNum = ( rayNumber + threadNum - 1 ) / threadNum ;
//call the kernel
kernelCopyNewRays<<<blockNum , threadNum>>>( srcRayOri , srcRayDir , scanResult , rayNumber , destRayOri , destRayDir , offsets );
}
//clear the noise of the image
extern "C" void cudaClearNoise( float4* imgData ,
int width ,
int height ,
float4* targetData )
{
//the block and thread number
int threadNum = 256;
int blockNum = ( width * height + 255 ) / 256;
//call the kernel
kernelClearNoise<<<blockNum , threadNum>>>( imgData , width , height , targetData );
} |
5,418 | #include "includes.h"
__global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){
const int batch=512;
__shared__ float buf[batch*2];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int k2=0;k2<m;k2+=batch){
int end_k=min(m,k2+batch)-k2;
for (int j=threadIdx.x;j<end_k*2;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*2+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz[(i*n+j)*2+0];
float y1=xyz[(i*n+j)*2+1];
int best_i=0;
float best=0;
int end_ka=end_k-(end_k&2);
if (end_ka==batch){
for (int k=0;k<batch;k+=4){
{
float x2=buf[k*2+0]-x1;
float y2=buf[k*2+1]-y1;
float d=x2*x2+y2*y2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*2+2]-x1;
float y2=buf[k*2+3]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*2+4]-x1;
float y2=buf[k*2+5]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*2+6]-x1;
float y2=buf[k*2+7]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}else{
for (int k=0;k<end_ka;k+=4){
{
float x2=buf[k*2+0]-x1;
float y2=buf[k*2+1]-y1;
float d=x2*x2+y2*y2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*2+2]-x1;
float y2=buf[k*2+3]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*2+4]-x1;
float y2=buf[k*2+5]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*2+6]-x1;
float y2=buf[k*2+7]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}
for (int k=end_ka;k<end_k;k++){
float x2=buf[k*2+0]-x1;
float y2=buf[k*2+1]-y1;
float d=x2*x2+y2*y2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
} |
5,419 | #include "TDES.cuh"
namespace TDESCA {
__device__ chunk64 TDES::Encode(chunk64 key1, chunk64 key2, chunk64 key3, chunk64 data)
{
chunk64 tempData = cipherMachine.Encode(key1, data);
tempData = cipherMachine.Decode(key2, tempData);
return cipherMachine.Encode(key3, tempData);
}
__device__ chunk64 TDES::Decode(chunk64 key1, chunk64 key2, chunk64 key3, chunk64 data)
{
chunk64 tempData = cipherMachine.Decode(key3, data);
tempData = cipherMachine.Encode(key2, tempData);
return cipherMachine.Decode(key1, tempData);
}
} // namespace TDESCA
|
5,420 | // 20181201
// Yuqiong Li
// a basic CUDA function to test working with device constant memory
#include <stdio.h>
#include <cuda.h>
const unsigned int N = 10; // size of vectors
__constant__ float const_d_a[N]; // filter in device const memory
int main()
{
float * a, * b; // a and b are vectors. c is the result
a = (float *)calloc(N, sizeof(float));
b = (float *)calloc(N, sizeof(float));
/**************************** Exp 1: sequential ***************************/
int i;
int size = N * sizeof(float);
for (i = 0; i < N; i++){
a[i] = (float)i / 0.23 + 1;
}
// 1. copy a to constant memory
cudaError_t err = cudaMemcpyToSymbol(const_d_a, a, size);
if (err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cudaError_t err2 = cudaMemcpyFromSymbol(b, const_d_a, size);
if (err2 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err2), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
double checksum0, checksum1;
for (i = 0; i < N; i++){
checksum0 += a[i];
checksum1 += b[i];
}
printf("Checksum for elements in host memory is %f\n.", checksum0);
printf("Checksum for elements in constant memory is %f\n.", checksum1);
return 0;
}
|
5,421 | #include "cuda.h"
#include "cuda_runtime_api.h"
#include <inttypes.h>
extern "C" __global__ void kernel0(int32_t* input, int32_t input_length,
double* output,
int32_t output_length, int32_t output_size)
{
for (int32_t j = blockIdx.x * blockDim.x + threadIdx.x;
j < output_size;
j += blockDim.x * gridDim.x) {
uint32_t xa = 0U;
uint8_t lastTestBit = 1;
for (int i = 0; i < 30; ++i) {
uint32_t n;
uint8_t testBit;
// j % (x_04 > (input_length > x_04 ? x_04 : input_length) ? input_length > x_04 ? x_04 : input_length : x_04)
// (u > (t > u ? u : t) ? (t > u ? u : t) : u)
// <=>
// (u > min(u,t) ? min(u,t) : u)
// <=>
// min(u,t)
n = (uint32_t) input[j % min(input_length, output_size)]; // this should be hoisted out of the loop (does nvcc do it?)
uint32_t grayCode = n ^ (n >> 1U);
testBit = (grayCode & 1 << i) != 0;
if (testBit) {
uint32_t v;
v = 1U << 29U - (uint32_t) i; // direction numbers (these should have been arguments to the kernel)
if (lastTestBit) {
xa = v ^ xa;
} else {
xa = v;
}
lastTestBit = testBit;
}
}
output[j] = (double) (int32_t) xa / (double) (1 << 30);
}
}
|
5,422 | // X = H , y = W
#include<iostream>
#include<stdio.h>
#include<cuda.h>
#include<ctime>
#include<cstdlib>
#include<cuda_profiler_api.h>
using namespace std;
// serially intializing tensor of the image
void tensor_init(int * image, int N, int H, int W, int C){
/*
Initialise the tensor for the convolution operation.
Runs on the CPU
N : Batch Size of the image
H : Height of the image
W : Width of the image
C : channels for the kernels
*/
srand(time(0));
int tot = N*H*W*C;
for(int i = 0; i< tot;i++){
image[i] = rand()%256; //random initializing of the image tensor// for simulating it as an image
}
}
//serially intialising the kernel with given dimensions
void kernel_init(int *krnl, int d, int h, int w,int c){
/*
Initialise the kernel(s) for the convolution operation.
Runs on the CPU
d : Number of kernel
h : Height of the kernel
w : Width of the kernel
c : channels for the kernels
*/
int tot = d*h*w*c;
for(int i = 0; i< tot;i++){
if(i%2 ==0){
krnl[i] = rand()%10;
}
else{
krnl[i] = -rand()%10;
//random initializing of the image tensor
// cout<<krnl[i]<<endl;
}
}
}
// intialising the mask for checking sparsity of the block
void mask_init(int *mask,int N,int H,int W,int sparsity_perc){
/*
Initialise the tensor for the convolution operation.
Runs on the CPU
N : Batch Size of the image
H : Height of the image
W : Width of the image
*/
int tot = N*H*W;
for(int i = 0; i< tot;i++){
if(rand()%100<=sparsity_perc){
mask[i] = 0;
}
else{
mask[i] = 1;
} //random initializing of the image tensor
// cout<<mask[i]<<endl;
}
}
// ************************ device kernels **************** to be optimizzed ***************************************
__device__ bool checksparse(int *d_mask,int cx,int cy,int H, int W, int C,int h,int w,int S,int n){// may be i can have some more conditions
/*
device function to check for sparsity
(device int *) d_mask : pointer to the mask of the image
(int) n: number of the image
(int) h: height of the kernels
(int) w: Weight of the kernels
(int) c_x: x coordinate of the center
(int) c_y: y coordinate of the center
*/
int x = 0;
int y = 0;
for( int l=-(h-1)/2; l <= (h-1)/2; l++ ){
for( int p=-(w-1)/2; p <= (w-1)/2; p++ ){
x = cx + l;
y = cy + p;
if( d_mask[n*H*W + W*y + x ] == 1 ){
return false;
}
}
}
return true;
}
__global__ void gather(int *d_mask, int *d_tensor, int *d_mat,unsigned int *row_address, int * d_row_map, int N , int H , int W , int h, int w, int C , int S ){
/*
Gather kernel from the paper to check for sparse and non sparse parts of image for convolution
(device int *) d_mask : pointer to the mask of the image
(device int *) d_tensor : pointer to the tensor containing the all the images
(device int *) d_mat : pointer with memmory alloc to store every non sparse part of thhe images
(device int *) row_address : pointer to single integer containing the number of non sparse part of the image
(int) N: number of the images in the given tensor
(int) H: Height of the image
(int) W: Weight of the image
(int) C: Channels of the image
(int) h: height of the kernels
(int) w: Weight of the kernels
*/
int id2 = blockIdx.x*blockDim.x + threadIdx.x;
int in = blockIdx.y;
int x_dim = id2%W;// along the height of the image
int y_dim = id2/W;// along the length oh the image
if(x_dim > 0 && x_dim/S + h < H/S){// condition considering s = 1 for now
if(y_dim > 0 && y_dim/S +w < W/S){
int cen_x = x_dim + (h-1)/2;
int cen_y = y_dim + (w-1)/2;
// printf("%d,%d,%d\n",checksparse(d_mask,x_dim,y_dim,H,W,C,h,w,S,in),cen_x,cen_y);
if(!checksparse(d_mask,x_dim,y_dim,H,W,C,h,w,S,in)){
unsigned int val = atomicInc(row_address,1000000);
int col_index = 0;
for( int l=-(h-1)/2; l <= (h-1)/2; l++ ){
for( int p=-(w-1)/2; p <= (w-1)/2; p++ ){
for( int q=0; q < C; q++){
d_mat[val*h*w*C+col_index] = d_mask[in*(H/S)*(W/S)+((int)((cen_x+l)/S))*(W/S)+((int)((cen_y+p)/S))]?d_tensor[in*H*W*C+(cen_x+l)*W*C+(cen_y+p)*C+q]:0;
col_index += 1;
}
}
}
d_row_map[val*3+0] = x_dim; /* Store the original x-coordinate corresponding to a row into a map */
d_row_map[val*3+1] = y_dim; /* Store the original y-coordinate corresponding to a row into a map */
d_row_map[val*3+2] = in; /* Store the image corresponding to a row in a map */
// printf("%d\n",val);
}
}
}
}
__global__ void convolution(int *d_mat,int *d_kernel,unsigned int *number_rows ,int d,int *output_mat,int h,int w,int C){
/*
The most basic implementation of the cuda kernel;
(int *)d_mat : pointer to the conovoluted results for all the non scarse part of the original image
(int *)d_kernel : kernel for the coonvoltion(d kernels)
(int *)output_mat : pointer for finally storing the output of the matrix
(unsigned int): int containing the number of non sparse convolution block
(int) N: number of the images in the given tensor
(int) H: Height of the image
(int) W: Weight of the image
(int) C: Channels of the image
(int) h: height of the kernels
(int) w: Weight of the kernels
(int) d : number of kernels
*/
int t_idx = blockDim.x*blockIdx.x + threadIdx.x;// for the number of the element being changed
int t_idy = blockDim.y*blockIdx.y + threadIdx.y;// for the number of kernels
output_mat[t_idx*d + t_idy] = 0;
int offset = h*w*C;
if(t_idx < *number_rows && t_idy < d){
// now the convolution part
for(int i = 0; i < h*w*C; i++ ){
output_mat[t_idx*d + t_idy] += d_kernel[t_idy*h*w + i]*d_mat[offset*t_idx + i];
// printf("%d,%d,\n",d_kernel[t_idy*d +i],d_mat[offset*t_idx + i]);
}
// printf("%d,%d,%d\n",t_idx,t_idy,output_mat[t_idx*d + t_idy]);
}
}
__global__ void scatter(int *output_mat, int *d_row_map, unsigned int *number_rows, int *output,int H,int W,int d,int h,int w){
/*
Putting the peices back together in the final image(restoring the final output part of the kernel
(int *)output_mat : pointer to the conovoluted results for all the non scarse part of the original image
(int *)d_row_map : pointer to the center positions non sparse part of the image
(int *)output : pointer to the final image after convolutions
(int) N: number of the images in the given tensor
(int) H: Height of the image
(int) W: Weight of the image
(int) C: Channels of the image
(int) h: height of the kernels
(int) w: Weight of the kernels
(int) d : number of kernels
*/
int image_size = (H - h + 1)*(W-w+1);
// image size after the convolution happens
int t_idx = blockIdx.x*blockDim.x + threadIdx.x;// The number of convs in the output matrux
int t_idy = blockDim.y*blockIdx.y + threadIdx.y;// The number of output kernels
// printf("%d,%d,%d \n",t_idx,t_idy, 0);
if(t_idx<*number_rows && t_idy <d){
int c_x = d_row_map[t_idx*3] - (h-1)/2; // convert the center to convoluted positions
int c_y = d_row_map[t_idx*3 + 1] - (w-1)/2;
int N = d_row_map[t_idx*3 + 2];
output[N*(image_size*d) + t_idy*(image_size) + W*(c_y) + c_x] = output_mat[t_idx*d + t_idy ];
//printf("%d,%d,%d\n",output[N*(image_size*d) + t_idy*(image_size) + W*(c_y) + c_x],output_mat[t_idx*d + t_idy ],N*(image_size*d) + t_idy*(image_size) + W*(c_y) + c_x);
}
}
int main(){
// taking input of the image(tensor) dimnsions
int BLOCK_SIZE = 32;
int N,H,W,C;
/*
(int) N: number of the images in the given tensor
(int) H: Height of the image
(int) W: Weight of the image
(int) C: Channels of the image
*/
cout<<"Gimme Image Block Dimensions"<<endl;
N = 4;
H = 256;
W = 256;
C = 3;
int *tensor = (int *)malloc(N*H*W*C*sizeof(int));
tensor_init(tensor,N,H,W,C);
int h,w,d;
/*
(int) h: height of the kernels
(int) w: Weight of the kernels
(int) d : number of kernels
*/
int c = C;
cout<<"Gimme krnl Block Dimension"<<endl;
d = 16;
h = 4;
w = 4;
int *kernel = (int *)malloc(sizeof(int)*h*w*c*d);
kernel_init(kernel,d,h,w,C);
// space for d kernels
int per_sp;
cout<<"Gimme Percent Sparcity of the block"<<endl;
per_sp =70;
int S = 1;// assuming the mask dimension to be 1 for now
int *mask = (int * )malloc(sizeof(int)*N*H*W*C);
mask_init(mask,N,H,W,per_sp);
int num_images = 2;
int n_streams = N/2;
// memory allocation for tensor kernel and the mask on the device
int *d_tensor;
int *d_kernel;
int *d_mask;
cudaMalloc(&d_tensor,sizeof(int)*N*H*W*C);// 4-D tensor containing images for the convolution operation
cudaMalloc(&d_kernel,sizeof(int)*d*h*w*c);// for the kernels to stored in the matrix
cudaMalloc(&d_mask,sizeof(int)*N*H*W); //mask for checking the sparsity of blocks for the kernel
// memory copying to the device
cudaMemcpy( d_kernel, kernel, sizeof(int)*d*h*w*c, cudaMemcpyHostToDevice );
cudaMemcpy( d_mask, mask, sizeof(int)*N*H*W, cudaMemcpyHostToDevice );
cudaMemcpy( d_tensor, tensor, sizeof(int)*N*H*W*C, cudaMemcpyHostToDevice );
// gatther kernel to fill on the device
int * d_mat;//
int * d_row_map;
unsigned int *row_address;
cudaMalloc(&d_mat,sizeof(int)*h*w*C*(H-h+1)*(W-w+1)*N); // considering that all the parts will be in the imsge
cudaMalloc(&row_address,n_streams*sizeof( unsigned int));
cudaMemset(&row_address, 0, n_streams*sizeof(unsigned int) );
cudaMalloc(&d_row_map,sizeof(int)*(H-h+1)*(W-w+1)*N*3);
// create streams:
// it can roughly handle about 1000 images at once
cudaStream_t streams[1000]; /* Declaring a set of CUDA streams */
for( int i=0; i<n_streams; i++ ) cudaStreamCreate(&streams[i]); /* Initializing a set of streams to work on a set of each image */
// creating memory for the intermediate kernels
int * output_mat; /// for the putput of the gather kernel
cudaMalloc(&output_mat,sizeof(int)*(H-h+1)*(W-w+1)*d*N);
// final output matrix we all know its dimensionb already
int * output;
cudaMalloc(&output,sizeof(int)*N*(H-h+1)*(W-w+1)*d);
// profiling features -----------------------
cudaEvent_t start,stop; /* CUDA events to time the program */
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
// blocks and threads for diffrent kernel plaunches -----------
// for the gather kernle
dim3 Block(H*W/BLOCK_SIZE,num_images,1);
dim3 Thread(BLOCK_SIZE,1,1);
//convolution kernel
dim3 Block_c(((H-h+1)*(W-w+1)*num_images)/BLOCK_SIZE,d,1);
dim3 Thread_c(BLOCK_SIZE,1,1);
// offset for diffrent arrays -------------
int offset; // tensor fooffset
int mask_offset; // mask offset
int mat_offset; // d_mat offsaet
int map_offset; // d_row_map offset
int o_offset; //output_mat offset
int om_offset; // output offset
unsigned int *number_rows = (unsigned int *)malloc(sizeof(int)*n_streams );
// Allocating memory for the output tensor
int * h_output = (int *)malloc(sizeof(int)*N*(H-h+1)*(W-w+1)*d);
//scatter kernel ---
dim3 Block_s(((H-h+1)*(W-w+1)*num_images)/BLOCK_SIZE,d,1);
dim3 Thread_s(BLOCK_SIZE,1,1);
//lanching the diffrent streams
for(int j=0; j<n_streams; j++){
/* Initialize a set of off-sets for each stream */
offset = j*H*W*C*num_images; // tensor offset will be needed
mask_offset = j*(H)*(W)*num_images; /// mask offset will be needed
mat_offset = h*w*C*(H-h+1)*(W-w+1)*j*num_images;//matrix offset for the value to be in the matrix
map_offset = 3*(H-h+1)*(W-w+1)*j*num_images;//offset for d_row_map
o_offset = (H-h+1)*(W-w+1)*d*j*num_images;//offset for convolution output
om_offset = d*(H-h+1)*(W-w+1)*j*num_images;//final output offset
// now the kernels..............
// gether kernel
gather<<<Block, Thread, 0, streams[j]>>>(&d_mask[mask_offset], &d_tensor[offset], &d_mat[mat_offset],&row_address[j], &d_row_map[map_offset],N , H , W , h, w, C , S);
// cudaMemcpyAsync(&number_rows[j], &row_address[j], sizeof(unsigned int), cudaMemcpyDeviceToHost,streams[j]);
//convolution kernel
convolution<<<Block_c, Thread_c,0, streams[j]>>>(&d_mat[mat_offset], d_kernel, &row_address[j], d, &output_mat[om_offset],h,w,C);
// cout<<"kernel went through"<<endl;
// convert the kernel back to its original form
scatter<<<Block_s,Thread_s, 0 , streams[j]>>>(&output_mat[om_offset], &d_row_map[map_offset], &row_address[j], &output[o_offset],H, W, d, h, w);
}
cudaMemcpy(h_output,output,sizeof(int)*(H-h+1)*(W-w+1)*d*N,cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float run_time = 0.0;
cudaEventElapsedTime(&run_time,start,stop);
cout<<run_time<<endl;
// for(int k = 0;k<N;k++){
// for(int p = 0; p<d;p++){
// cout<<"image"<<" "<<k<<" "<<"kernel"<<" "<<p<<endl;
// for(int i = 0; i<(H-h+1);i++){
// for(int j = 0; j<(W-w+1);j++){
// cout<<h_output[k*(H-h+1)*(W-w+1)*d + p*(H-h+1)*(W-w+1) + i*(W-w+1)+ j ]<<" ";
// }
// cout<<endl;
// }
// cout<<endl;
// }
// cout<<endl;
// }
// Destroying all the streams in rthe
for( int i=0; i<n_streams; i++ ) cudaStreamDestroy(streams[i]);
return 0;
} |
5,423 | #include "cuda_helpers.cuh"
// limited version of checkCudaErrors from helper_cuda.h in CUDA examples
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
void check_cuda(cudaError_t result, char const *const func, const char *const file, int const line) {
if (result) {
std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
// Make sure we call CUDA Device Reset before exiting
cudaDeviceReset();
exit(99);
}
}
__global__
void init_rand_state(curandState* d_rand_state, int width, int height) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if((x >= width) || (y >= height)) return;
int pixel_index = x*height + y;
//Each thread gets same seed, a different sequence number, no offset
curand_init(1984, pixel_index, 0, &d_rand_state[pixel_index]);
} |
5,424 | #include "includes.h"
__global__ void fillCondensedAdjacencyKernel(int size, int *aggregateIdx, int *adjIndexesOut, int *adjacencyOut, int *permutedAdjIndexesIn, int *permutedAdjacencyIn)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
int oldBegin = permutedAdjIndexesIn[ aggregateIdx[idx] ];
int newBegin = adjIndexesOut[idx];
int runSize = adjIndexesOut[idx + 1] - newBegin;
// Copy adjacency over
for(int i = 0; i < runSize; i++)
{
adjacencyOut[newBegin + i] = permutedAdjacencyIn[oldBegin + i];
}
}
} |
5,425 | #include <stdint.h>
#include <stdio.h>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#define LIBRARY 0
#define HOST 1
#define DEVICE 2
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
struct GpuTimer
{
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer()
{
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start()
{
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
}
void Stop() { cudaEventRecord(stop, 0); }
float Elapsed()
{
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
// Sequential radix sort
// Assume: nBits (k in slides) in {1, 2, 4, 8, 16}
// Ta sẽ sử dụng ý tưởng từ thuật toán sắp xếp tuần tự mới chứ không phải
// thuật toán mà ta đã sử dụng trong bài tập số 3
/**
Sắp xếp tuần tự trên host
@blockSize kích thước một block mà ta sẽ duyệt (Ta vẫn duyệt tuần tự)
*/
void sortByHost(const uint32_t *in, int n, uint32_t *out, int nBits, int blockSize)
{
int nBins = 1 << nBits; // Số lượng bin là 2^nBits
uint32_t *src = (uint32_t *)malloc(n * sizeof(uint32_t)); // Biến tạm để lưu trữ dữ liệu input
memcpy(src, in, n * sizeof(uint32_t)); // Sao chép dữ liệu từ in vào src
uint32_t *originalSrc = src; // Use originalSrc to free memory later
uint32_t *dst = out; // Mảng kết quả
// [TODO]: Khởi tạo các mảng cần thiết khi chạy
int sizeHist = nBins * ((n - 1) / blockSize +1); // Tính kích thước của mảng listLocalHist sizeHist = Số bin x Số lượng block
int *listLocalHist = (int *)malloc(sizeHist * sizeof(int)); // Mảng chứa các localHist
int *listLocalHistConvert = (int *)malloc(sizeHist * sizeof(int)); // Mảng chuyển đổi của listLocalHistConvert
int *histScan = (int *)malloc(sizeHist * sizeof(int)); // Mảng exclusive scan của listLocalHistConvert
int *eleBefore =(int *)malloc(n * sizeof(int)); // Mảng chứa chỉ số phân tử đứng trước và bằng nó trong từng block
int numBlock = (n - 1) / blockSize + 1; // Số lượng các block cần thiết
for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits)
{
// TODO: Mỗi block tính local histogram của digit-đang-xét trên phần
// dữ liệu của mình và chép vào mảng listLocalHist
memset(listLocalHist, 0, sizeHist * sizeof(int)); // Gán mảng listLocalHist bằng 0
for (int blkIdx = 0; blkIdx < numBlock; blkIdx++)
{
int start = blkIdx * blockSize; // Chỉ số bắt đầu của block
int end = (blkIdx == numBlock - 1)? n : start + blockSize; // Chỉ số kết thúc của block
for (int index = start; index < end; index++)
{ // Duyệt tất cả phần tử của block
int bin = (src[index] >> bit) & (nBins - 1);
listLocalHist[blkIdx * nBins + bin]++;
}
}
// [DEBUG]: In ra mảng listLocalHist
/*printf("Mang listLocalHist: ");
for(int i=0; i < sizeHist; i++){
printf("%d ", listLocalHist[i]);
}
printf("\n");*/
// TODO: Với mảng 2 chiều mà mỗi dòng là local hist của một block,
// thực hiện exclusive scan trên mảng một chiều gồm các cột
// nối lại với nhau (Xem slide để hiểu rõ)
int indexLLHC = 0; // Chỉ số trong mảng listLocalHistConvert
for (int i = 0; i < nBins; i++)
{ // Duyệt tất cả các phần tử trong một localHist
for (int j = 0; j < numBlock; j++)
{ // Duyệt tất cả các localHist
listLocalHistConvert[indexLLHC++] =listLocalHist[i + j * nBins]; // i là chỉ số bin trong localHist
// j * nBins là chỉ số của block
}
}
// [DEBUG]: In ra mảng listLocalHistConvert
/*printf("Mang listLocalHistConvert: ");
for(int i=0; i<sizeHist; i++){
printf("%d ", listLocalHistConvert[i]);
}
printf("\n");*/
// Tính histScan (exculusive scan) cho mảng listLocalHistConvert
histScan[0] = 0;
for (int i = 1; i < sizeHist; i++)
{
histScan[i] = histScan[i - 1] + listLocalHistConvert[i - 1];
}
// [DEBUG]: In ra mảng histScan
/*printf("Mang histScan: ");
for(int i=0; i<sizeHist; i++){
printf("%d ", histScan[i]);
}
printf("\n");*/
// TODO: Mỗi block thực hiện scatter phần dữ liệu của mình xuống
// mảng output dựa vào kết quả scan ở trên
// ▪ Mỗi block sắp xếp cục bộ phần dữ liệu của mình theo digit đang
// xét (dùng Radix Sort với k=1 bit và làm trên SMEM)
// ▪ Mỗi block tính chỉ số bắt đầu (xét cục bộ trong block) của mỗi giá
// trị digit
// ▪ Mỗi thread trong block tính số lượng phần tử đứng trước mình
// trong block có digit-đang-xét bằng digit-đang-xét của phần tử mà
// mình phụ trách
// ▪ Mỗi thread trong block tính rank và thực hiện scatter
// Sắp xếp các data trong block tăng dần theo Bubble Sort
// Ta sẽ thực hiện luôn trên mảng src để tiết kiệm bộ nhớ
for (int blkIdx = 0; blkIdx < numBlock; blkIdx++)
{ // Duyệt từng block
int start = blkIdx * blockSize; // Chỉ số bắt đầu của block
int end = (blkIdx == numBlock - 1) ? n : start + blockSize; // Chỉ số kết thúc của block
for (int x = end - start; x >= 1; x--)
{ // Ta tưởng tượng đây là sắp xếp mảng có end - start phần tử
for (int y = 0; y < x - 1; y++)
{
int first = (src[blkIdx * blockSize + y] >> bit) & (nBins - 1);
int second = (src[blkIdx * blockSize + y + 1] >> bit) & (nBins - 1);
if (first > second)
{
uint32_t temp = src[blkIdx * blockSize + y];
src[blkIdx * blockSize + y] = src[blkIdx * blockSize + y + 1];
src[blkIdx * blockSize + y + 1] = temp;
}
}
}
}
// [DEBUG]: In ra mảng sortBlockData
/*printf("Mang da duoc sap xep theo block: ");
for(int i=0; i<n; i++){
printf("%d ", src[i]);
}
printf("\n");*/
// Tính chỉ số bắt đầu trong block và tính luôn số lượng
// phần tử giống nó và đứng trước nó
memset(eleBefore, 0, n * sizeof(int)); // Khởi tạo mảng chứa các phần tử đứng trước bằng 0
for (int blkIdx = 0; blkIdx < numBlock; blkIdx++)
{
int start = blkIdx * blockSize; // Chỉ số bắt đầu của block
int end = (blkIdx == numBlock - 1) ? n : start + blockSize; // Chỉ số kết thúc của block
for (int index = 1; index < end - start; index++)
{
int first = (src[blkIdx * blockSize + index - 1] >> bit) & (nBins - 1);
int second = (src[blkIdx * blockSize + index] >> bit) & (nBins - 1);
if (first == second)
{
eleBefore[blkIdx * blockSize + index] = eleBefore[blkIdx * blockSize + index - 1] + 1;
}
}
}
// [DEBUG]: In ra mảng eleBefore
/*printf("Mang eleBefore: ");
for(int index=0; index<n; index++){
printf("%d ", eleBefore[index]);
}
printf("\n");*/
// Tính rank và scatter
for (int index = 0; index < n; index++)
{
int blIdx = index / blockSize;
int bin = (src[index] >> bit) & (nBins - 1);
int rank = histScan[bin * numBlock + blIdx] + eleBefore[index];
dst[rank] = src[index];
}
// [DEBUG]: Mang dst
/*printf("Mang dst: ");
for (int index = 0; index < n; index++) {
printf("%d ", dst[index]);
}
printf("\n");*/
// TODO: Swap "src" and "dst"
uint32_t *temp = src;
src = dst;
dst = temp;
}
// [DEBUG]: In mảng src
/*printf("\nMang ket qua la: ");
for(int index=0; index<n; index++){
printf("%d ", src[index]);
}
printf("\n");*/
// TODO: Copy result to "out"
memcpy(out, src, n * sizeof(uint32_t));
// Free memories
free(listLocalHist);
free(histScan);
free(originalSrc);
free(listLocalHistConvert);
free(eleBefore);
}
void sortByLibrary(const uint32_t *in, int n, uint32_t *out, int nBits)
{
// TODO
thrust::device_vector<uint32_t> dv_out(in, in + n);
thrust::sort(dv_out.begin(), dv_out.end());
thrust::copy(dv_out.begin(), dv_out.end(), out);
}
// (Partially) Parallel radix sort: implement parallel histogram and parallel
// scan in counting sort Assume: nBits (k in slides) in {1, 2, 4, 8, 16} Why
// "int * blockSizes"? Because we may want different block sizes for diffrent
// kernels:
// blockSizes[0] for the histogram kernel
// blockSizes[1] for the scan kernel
void sortByDevice(const uint32_t *in, int n, uint32_t *out, int nBits,
int *blockSizes)
{
}
// Radix sort
/*
@type 0 Sử dụng thư viện
1 Sử dụng Host
2 Sử dụng Device
*/
void sort(const uint32_t *in, int n, uint32_t *out, int nBits, int type,
int *blockSizes = NULL)
{
GpuTimer timer;
timer.Start();
if (type == 0)
{
printf("\nRadix sort by library\n");
sortByLibrary(in, n, out, nBits);
}
else if (type == 1)
{
printf("\nRadix sort by host\n");
sortByHost(in, n, out, nBits, 32);
}
else // use device
{
printf("\nRadix sort by device\n");
sortByDevice(in, n, out, nBits, blockSizes);
}
timer.Stop();
printf("Time: %.3f ms\n", timer.Elapsed());
}
void printDeviceInfo()
{
cudaDeviceProp devProv;
CHECK(cudaGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n",
devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %zu byte\n", devProv.totalGlobalMem);
printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor);
printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock);
printf("****************************\n");
}
void checkCorrectness(uint32_t *out, uint32_t *correctOut, int n)
{
for (int i = 0; i < n; i++)
{
if (out[i] != correctOut[i])
{
printf("INCORRECT :(\n");
return;
}
}
printf("CORRECT :)\n");
}
void printArray(uint32_t *a, int n)
{
for (int i = 0; i < n; i++)
printf("%i ", a[i]);
printf("\n");
}
int main(int argc, char **argv)
{
// PRINT OUT DEVICE INFO
printDeviceInfo();
// SET UP INPUT SIZE
int n = (1 << 24) + 1;
printf("\nInput size: %d\n", n);
// ALLOCATE MEMORIES
size_t bytes = n * sizeof(uint32_t);
uint32_t *in = (uint32_t *)malloc(bytes);
uint32_t *out = (uint32_t *)malloc(bytes); // Device result
uint32_t *correctOut = (uint32_t *)malloc(bytes); // Host result
// SET UP INPUT DATA
for (int i = 0; i < n; i++)
in[i] = rand();
// in[i] = rand() % 8;
// uint32_t temp[11] =
// {41,18467,6334,26500,19169,15724,11478,29358,26962,24464,5705}; memcpy(in,
// temp, n * sizeof(uint32_t)); printArray(in, n);
// SET UP NBITS
int nBits = 4; // Default
if (argc > 1)
nBits = atoi(argv[1]);
printf("\nNum bits per digit: %d\n", nBits);
// DETERMINE BLOCK SIZES
int blockSizes[2] = {512, 512}; // One for histogram, one for scan
if (argc == 4)
{
blockSizes[0] = atoi(argv[2]);
blockSizes[1] = atoi(argv[3]);
}
printf("\nHist block size: %d, scan block size: %d\n", blockSizes[0],
blockSizes[1]);
// SORT BY LIBRARY
sort(in, n, correctOut, nBits, LIBRARY);
// SORT BY HOST
sort(in, n, out, nBits, HOST);
checkCorrectness(out, correctOut, n);
// SORT BY DEVICE
out[0] = 1; // Sửa lại mảng out để output sẽ khác
sort(in, n, out, nBits, DEVICE, blockSizes);
checkCorrectness(out, correctOut, n);
// FREE MEMORIES
free(in);
free(out);
free(correctOut);
return EXIT_SUCCESS;
} |
5,426 |
#include <cuda.h>
// Kernel definition
__global__ void VecAdd(float* A, float* B, float* C)
{
int i = threadIdx.x;
C[i] = A[i] + B[i];
} |
5,427 | // Compile: nvcc -arch=sm_61 -std=c++11 assignment5-p4.cu -o assignment5-p4
#include <cmath>
#include <cuda.h>
#include <iostream>
const uint64_t N = (1 << 10);
using namespace std;
__global__ void kernel1(uint64_t* A, uint64_t* B, uint64_t* C) {
// SB: Write your code here
}
__global__ void kernel2(uint64_t* A, uint64_t* B, uint64_t* C) {
// SB: Write your code here
}
__host__ void cpumatMul(uint64_t* A, uint64_t* B, uint64_t* C) {
for (uint64_t i = 0; i < N; i++) {
for (uint64_t j = 0; j < N; j++) {
float sum = 0.0;
for (uint64_t k = 0; k < N; k++) {
sum += A[i * N + k] * B[k * N + j];
}
C[i * N + j] = sum;
}
}
}
__host__ void check_result(uint64_t* w_ref, uint64_t* w_opt) {
for (uint64_t i = 0; i < N; i++) {
for (uint64_t j = 0; j < N; j++) {
if (w_ref[i * N + j] != w_opt[i * N + j]) {
cout << "Difference found\n";
exit(EXIT_FAILURE);
}
}
}
cout << "No differences found between base and test versions\n";
}
int main() {
int SIZE = N * N;
cudaError_t status;
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
uint64_t *h_A, *h_B, *h_C1, *h_C2, *cpuResult;
h_A = (uint64_t*)malloc(SIZE * sizeof(uint64_t));
h_B = (uint64_t*)malloc(SIZE * sizeof(uint64_t));
h_C1 = (uint64_t*)malloc(SIZE * sizeof(uint64_t));
h_C2 = (uint64_t*)malloc(SIZE * sizeof(uint64_t));
cpuResult = (uint64_t*)malloc(SIZE * sizeof(uint64_t));
for (uint64_t i = 0; i < N; i++) {
for (uint64_t j = 0; j < N; j++) {
h_A[i * N + j] = 1;
h_B[i * N + j] = 2;
h_C1[i * N + j] = 0;
h_C2[i * N + j] = 0;
cpuResult[i * N + j] = 0;
}
}
cpumatMul(h_A, h_B, cpuResult);
uint64_t *d_A, *d_B, *d_C1, *d_C2;
status = cudaMalloc((void**)&d_A, SIZE * sizeof(uint64_t));
if (status != cudaSuccess) {
cerr << cudaGetErrorString(status) << endl;
}
status = cudaMalloc((void**)&d_B, SIZE * sizeof(uint64_t));
if (status != cudaSuccess) {
cerr << cudaGetErrorString(status) << endl;
}
status = cudaMalloc((void**)&d_C1, SIZE * sizeof(uint64_t));
if (status != cudaSuccess) {
cerr << cudaGetErrorString(status) << endl;
}
status = cudaMalloc((void**)&d_C2, SIZE * sizeof(uint64_t));
if (status != cudaSuccess) {
cerr << cudaGetErrorString(status) << endl;
}
status = cudaMemcpy(d_A, h_A, SIZE * sizeof(uint64_t), cudaMemcpyHostToDevice);
if (status != cudaSuccess) {
cerr << cudaGetErrorString(status) << endl;
}
status = cudaMemcpy(d_B, h_B, SIZE * sizeof(uint64_t), cudaMemcpyHostToDevice);
if (status != cudaSuccess) {
cerr << cudaGetErrorString(status) << endl;
}
dim3 blocksPerGrid(1);
dim3 threadsPerBlock(1);
cudaEventRecord(start, 0);
kernel1<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C1);
cudaEventRecord(end, 0);
float kernel_time;
cudaEventElapsedTime(&kernel_time, start, end);
std::cout << "Kernel 1 time (ms): " << kernel_time << "\n";
cudaMemcpy(h_C1, d_C1, SIZE * sizeof(uint64_t), cudaMemcpyDeviceToHost);
cudaEventRecord(start, 0);
kernel2<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C2);
cudaEventRecord(end, 0);
cudaEventElapsedTime(&kernel_time, start, end);
std::cout << "Kernel 2 time (ms): " << kernel_time << "\n";
cudaMemcpy(h_C2, d_C2, SIZE * sizeof(uint64_t), cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C1);
cudaFree(d_C2);
free(h_A);
free(h_B);
check_result(h_C1, cpuResult);
check_result(h_C2, cpuResult);
free(cpuResult);
free(h_C1);
free(h_C2);
return EXIT_SUCCESS;
}
|
5,428 | #include "scan.cuh"
#include "stdio.h"
__global__ void hs_scanl(float *g_odata, float *g_idata, int n){// another possible version
extern volatile __shared__ float temp[]; // allocated on invocation
int thid = threadIdx.x;
int thlen = blockDim.x;
int blid = blockIdx.x;
int id = thlen * blid + thid;
int pout = 0, pin = 1;
if (id >= n) {
return;
}
temp[thid] = g_idata[id]; // this is for inclusive scan. In lecture it is for exclusive
__syncthreads();
for( int offset = 1; offset < thlen; offset *= 2 ) {
pout = 1 - pout; // swap double buffer indices
pin = 1 - pout;
if (thid >= offset)
temp[pout * thlen + thid] = temp[pin * thlen + thid] + temp[pin * thlen + thid - offset];
else
temp[pout * thlen + thid] = temp[pin * thlen + thid];
__syncthreads(); // I need this here before I start next iteration
}
if (pout *thlen + thid < thlen)
g_odata[id] = temp[pout * n + thid];
}
// here the code could also handle more blocks. In lecture note, only 1
__global__ void hs_scan(float *g_od, float *g_id, float *g_bs, int n, int isNull) {
extern volatile __shared__ float temp[]; // allocated on invocation
int thid = threadIdx.x;
int thlen = blockDim.x;
int blid = blockIdx.x;
int id = thlen * blid + thid;
int pout = 0, pin = 1;
if (id < n) {
temp[thid] = g_id[id];
__syncthreads();
for( int offset = 1; offset < thlen; offset *= 2 ) { // modified code from lecture notes
pout = 1 - pout; // swap double buffer indices
pin = 1 - pout;
temp[pout * thlen + thid] = temp[pin * thlen + thid];
if (thid >= offset) {
temp[pout * thlen + thid] += temp[pin * thlen + thid - offset];
}
__syncthreads(); // I need this here before I start next iteration
}
if (pout * thlen + thid < thlen) {
g_od[id] = temp[pout * n + thid];
}
if (isNull == 0 && thid == thlen - 1) { // add block
g_bs[blid] = temp[pout * n + thid];
}
}
}
__global__ void inAdd(float *g_od, float *g_os, int n) {
int thid = threadIdx.x;
int thlen = blockDim.x;
int blid = blockIdx.x;
int id = thlen * blid + thid;
if (id < n && blid != 0) { //inclusive add
g_od[id] += g_os[blid - 1];
}
}
// "inclusive scan". Use lecture notes
__host__ void scan(const float* input, float* output, unsigned int n, unsigned int threads_per_block) {
// allocate cuda memory
float *g_id, *g_od;
cudaMalloc(&g_id, n * sizeof(float));
cudaMalloc(&g_od, n * sizeof(float));
// allocate sum cuda memory for each iteration and overall sum
// this is needed due to various number of blocks
float *g_is, *g_os, *g_em;
// need to calculate number of block
int num_block = (n - 1 + threads_per_block) / threads_per_block;
cudaMalloc(&g_is, num_block * sizeof(float));
cudaMalloc(&g_os, num_block * sizeof(float));
cudaMalloc(&g_em, num_block * sizeof(float));
// map to device input for g_id
cudaMemcpy(g_id, input, n * sizeof(float), cudaMemcpyHostToDevice);
// number of memory for shared size
int size_shareM = 2 * threads_per_block * sizeof(float);
hs_scan<<<num_block, threads_per_block, size_shareM>>>(g_od, g_id, g_is, n, 0);
hs_scan<<<1, threads_per_block, size_shareM>>>(g_os, g_is, g_em, num_block, 1);
inAdd<<<num_block, threads_per_block>>>(g_od, g_os, n);
// copy back from device to host to host
cudaMemcpy(output, g_od, n * sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// free array
cudaFree(g_id);
cudaFree(g_od);
cudaFree(g_is);
cudaFree(g_os);
cudaFree(g_em);
}
|
5,429 | /*
* Russell Taylor(rtaylor)
* Matt Crusse(macrusse)
* CPE458-01 Lab 1 Winter 2013
*/
#include <sys/stat.h>
#include <sys/mman.h>
#include <errno.h>
#include <string.h>
#include <stdarg.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <stdint.h>
#define TILE_SIZE 32
/* Compile-Time Declaration on double or float usage */
#ifdef DOUBLE
#define TYPEUSE double
#else
#define TYPEUSE float
#endif
/*
* Handles CUDA errors, taking from provided sample code on clupo site
*/
static void HandleError( cudaError_t err, const char * file, int line)
{
if(err !=cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
/*
* Reads Input File and Returns Buffer of Contents
*/
char* read_file(const char * file_name)
{
size_t size;
char *buffer;
FILE *fp;
fp = fopen(file_name,"r");
if(!fp) {
fprintf (stderr, "Error opening input file.\n");
exit (EXIT_FAILURE);
}
fseek (fp, 0, SEEK_END);
size = ftell(fp);
rewind (fp);
buffer = (char*) malloc (sizeof(char)*size);
fread (buffer, 1, size, fp);
fclose(fp);
return buffer;
}
/*
* Calculate the Resultant Matrix from Multiplication
*/
void calc_matrix(TYPEUSE *A, TYPEUSE *B, TYPEUSE *C, int Arow, int Acol, int Brow, int Bcol)
{
uint64_t i, j, k;
TYPEUSE sum = 0;
for(i = 0; i < Arow; i++)//Iterate through Matrix B columnwise
{
for(j = 0; j < Bcol; j++)//Iterate through Matrix A rowwise
{
for(k = 0; k < Acol; k++)//Acol = Brow on valid Matrices
{
if(i >475)
printf("");
sum+= A[ i* (Acol) + k] * B[k * (Bcol) + j];
}
C[i *Acol + j] = sum;
sum = 0;
}
}
}
/*
* Print matrix values to a file outputfile
*/
void output_matrix(const char * outputfile, TYPEUSE *matrix, int row, int col)
{
int i, j;
FILE *ofp = fopen(outputfile, "w");
if(!ofp){
fprintf (stderr, "Error opening output file.\n");
exit (EXIT_FAILURE);
}
for(i = 0; i < row; i++) {
for(j = 0; j < col; j++) {
fprintf(ofp, "%.2f ",matrix[i*uint64_t(col) + j]);
}
if(i < row-1){
fprintf(ofp, "\n");
}
}
fprintf(ofp, "\n");
fclose(ofp);
}
/*
* Simply prints out the matrix to screen
*/
void print_matrix(TYPEUSE *matrix, int row, int col)
{
int i, j;
for(i = 0; i < row; i++) {
for(j = 0; j < col; j++) {
//printf("(%d,%d)", i, j);
printf("%.2f ",matrix[i*col +j]);
}
if(i < row-1){
printf("\n");
}
}
printf("\n");
}
/*
* Created a Matrix based on Buffered Input Information
*/
TYPEUSE * read_matrix(uint64_t * rowCnt, uint64_t * colCnt, char * mapped)
{
TYPEUSE value;
const char *delim_space = " ";
char *token = NULL;
char *unconverted;
int i, j, len;
TYPEUSE *matrix;
uint64_t bigiter;
*colCnt = 0;
*rowCnt = 0;
/* Determine Col Count */
i = 0;
while(mapped[i] != '\n'){
if(mapped[i] == '.') {
(*colCnt)++;
}
i++;
}
/* Determine Row Count */
bigiter = 0;//For large file sizes, an int is too small to iterate through
len = strlen(mapped);
while(bigiter < len && mapped[bigiter] != '\0'){
if((mapped[bigiter] == '\n') && (mapped[bigiter+1] != '\0') ) {
(*rowCnt)++;
}
bigiter+=1;
}
(*rowCnt)++;
/* Malloc the Matrix */
if (( matrix = (TYPEUSE *) malloc((*rowCnt) * (*colCnt) * sizeof(TYPEUSE))) == NULL ) {
printf("malloc issue");
}
/* Read values into matrix */
i = 0; j = 0;
for (token = strtok(mapped, delim_space); token != NULL; token = strtok(NULL, delim_space)) {
value = strtod(token, &unconverted);
matrix[i*(*colCnt) +j] = value;
j++;
if(j == (*colCnt)) {
j = 0;
if(++i == (*rowCnt))
break;
}
}
return matrix;
}
/*
* Simply copies device matrix to shared memory
*/
__device__ void copyMiniMatrix(TYPEUSE * M_device, TYPEUSE M_shared[TILE_SIZE][TILE_SIZE], uint64_t devOffset)
{
if(threadIdx.y < TILE_SIZE && threadIdx.x < TILE_SIZE) {
M_shared[threadIdx.y][threadIdx.x] = M_device[devOffset];
}
}
/*
* Kernel called from main.
*/
__global__ void MMKernel(TYPEUSE *A_d, TYPEUSE *B_d, TYPEUSE * C_d, uint64_t depth, uint64_t Arow, uint64_t Bcol)
{
TYPEUSE Cvalue = 0.0;
__shared__ TYPEUSE A_shared[TILE_SIZE][TILE_SIZE], B_shared[TILE_SIZE][TILE_SIZE];
int resultWidth = Bcol;
int resultCol = blockIdx.x * blockDim.x + threadIdx.x;
int resultRow = blockIdx.y * blockDim.y + threadIdx.y;
int resultIndex = resultRow * resultWidth + resultCol;
uint64_t threadReadId;
int validCalcThread = resultRow < Arow && resultCol < Bcol;
for(int i = 0; i < (depth+TILE_SIZE-1)/TILE_SIZE; i++) {
/* Copy device matrix A into shared memory */
threadReadId = threadIdx.x + i * TILE_SIZE;
if(threadReadId < depth){
copyMiniMatrix(A_d, A_shared, resultRow*depth + threadReadId);
}
/* Copy device matrix B into shared memory */
threadReadId = threadIdx.y + i * TILE_SIZE;
if(threadReadId < depth) {
copyMiniMatrix(B_d, B_shared, threadReadId * Bcol + resultCol);
}
/* Wait for all threads to complete copy to shared memory */
__syncthreads();
/* Boundary Check*/
int remaining = depth - (i * TILE_SIZE);
int maxIter = (remaining <= TILE_SIZE) * (remaining) +
(remaining > TILE_SIZE) * (TILE_SIZE);
if(validCalcThread) {
for(int k = 0; k < maxIter; k++) {
TYPEUSE Aelem = A_shared[threadIdx.y][k];
TYPEUSE Belem = B_shared[k][threadIdx.x];
Cvalue += Aelem * Belem;
}
}
/* Wait for all threads to finish */
__syncthreads();
}
if(validCalcThread) {
C_d[resultIndex] = Cvalue;
}
}
int main (int argc, const char * argv[])
{
const char * Cfile = "result.out";
TYPEUSE * Amatrix, * Bmatrix, * Cmatrix;
TYPEUSE * A_d, * B_d, * C_d;
uint64_t Arow, Acol, Brow, Bcol;
int size;
int blockRow, blockCol;
char * Amapped, * Bmapped;
if(argc != 3) {
fprintf(stderr, "Usage: [Matrix A] [Matrix B]\n");
exit(EXIT_FAILURE);
}
/* Device Properties */
/*
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop,0);
printf("maxThreads: %d\n", prop.maxThreadsPerBlock);
*/
/* Read and Map matrix */
Amapped = read_file(argv[1]);
Bmapped = read_file(argv[2]);
Amatrix = read_matrix(&Arow, &Acol, Amapped);
Bmatrix = read_matrix(&Brow, &Bcol, Bmapped);
if(Acol != Brow) {
fprintf(stderr, "Matrices are not a compatible size to be multiplied, %dx%d and %dx%d\n", Arow, Acol, Brow, Bcol);
exit(EXIT_FAILURE);
}
/* Malloc a New Matrix */
if (( Cmatrix = (TYPEUSE *) malloc((Arow) * (Bcol) * sizeof(TYPEUSE))) == NULL ) {
printf("malloc issue");
}
/* Malloc and Copy space on GPU */
size = Arow * Acol * sizeof(TYPEUSE);
HANDLE_ERROR(cudaMalloc(&A_d, size));
HANDLE_ERROR(cudaMemcpy(A_d, Amatrix, size, cudaMemcpyHostToDevice));
size = Brow * Bcol * sizeof(TYPEUSE);
HANDLE_ERROR(cudaMalloc(&B_d, size));
HANDLE_ERROR(cudaMemcpy(B_d, Bmatrix, size, cudaMemcpyHostToDevice));
size = Arow * Bcol * sizeof(TYPEUSE);
HANDLE_ERROR(cudaMalloc(&C_d, size));
/* Kernel Setup */
blockRow = (Arow+31) / 32;
blockCol = (Bcol+31) / 32;
dim3 dimGrid(blockCol,blockRow);
dim3 dimBlock(32,32);
/* Kernel Call */
MMKernel<<<dimGrid,dimBlock>>>(A_d, B_d, C_d, Brow, Arow, Bcol);
HANDLE_ERROR(cudaMemcpy(Cmatrix,C_d,size, cudaMemcpyDeviceToHost));
output_matrix(Cfile, Cmatrix, Arow, Bcol);
//print_matrix(Cmatrix, Arow, Bcol);
/* Free Stuff */
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
free(Amatrix);
free(Bmatrix);
free(Cmatrix);
free(Amapped);
free(Bmapped);
return 0;
}
|
5,430 | #include <stdio.h>
#include <stdint.h>
#define BLK_SIZE 128
__global__ void integral_kernel(float* in, float* inSum, float* carry, uint32_t w, uint32_t h)
{
__shared__ float tmp[2*BLK_SIZE];
int tdx = threadIdx.x;
int idx0 = (threadIdx.x + blockIdx.x*blockDim.x)*2 + (blockIdx.y*blockDim.y + threadIdx.y)*w;
int idx1 = (threadIdx.x + blockIdx.x*blockDim.x)*2+1 + (blockIdx.y*blockDim.y + threadIdx.y)*w;
int idx = (blockIdx.x*blockDim.x + threadIdx.x);
int idy = (blockIdx.y*blockDim.y + threadIdx.y);
tmp[2*tdx] = in[idx0] ;
tmp[2*tdx+1] = in[idx1];
// float carr = carry[blockIdx.x + idy*(w/(2*BLK_SIZE))];
// if(tdx==0)
// printf("%d\t%d\t%d\t%d\t%f\n",blockIdx.x,w/(2*BLK_SIZE), idy,blockIdx.x + idy*(w/(2*BLK_SIZE)),carr);
int offset =1;
for(int s=BLK_SIZE; s>0; s>>=1)
{
__syncthreads();
if(tdx<s)
tmp[offset*(2*tdx+2)-1] += tmp[offset*(2*tdx+1)-1];
offset <<= 1;
}
//TODO save the larges elements tmp[2*BLK_SIZE-1] out to later add it back in
if(tdx==0)
{
// printf("%d\t%d\t%d\t%d\t%f\n",blockIdx.x,w/(2*BLK_SIZE), idy,blockIdx.x + idy*(w/(2*BLK_SIZE)), tmp[2*BLK_SIZE-1]);
if( blockIdx.x< w/(2*BLK_SIZE) -1)
carry[ blockIdx.x + idy*(w/(2*BLK_SIZE))] = tmp[2*BLK_SIZE-1]; //+carr;
tmp[2*BLK_SIZE-1] =0;
}
for(int s=1; s<2*BLK_SIZE; s <<=1)
{
offset >>=1;
__syncthreads();
if(tdx < s)
{
int ai = offset*(2*tdx+1) -1;
int bi = offset*(2*tdx+2) -1;
float t = tmp[ai];
tmp[ai] = tmp[bi];
tmp[bi] += t;
}
}
__syncthreads();
inSum[idx0] = tmp[2*tdx] ; // + carr;
inSum[idx1] = tmp[2*tdx+1]; // + carr;
}
__global__ void addCarryOver_kernel(float* inOut, float* carry, uint32_t w, uint32_t h)
{
int tdx = threadIdx.x;
int idx = (blockIdx.x*blockDim.x + threadIdx.x);
int idy = (blockIdx.y*blockDim.y + threadIdx.y);
if(blockIdx.x>0)
{
float carr = carry[0 + idy*(w/(2*BLK_SIZE))];
for(int i=1; i<=blockIdx.x-1 ;++i)
carr += carry[i + idy*(w/(2*BLK_SIZE))];
if((idx<w)&&(idy<h))
inOut[idx+idy*w] += carr;
}
}
#define BLK_SIZE_S 16
__global__ void transpose_kernel(float* in, float* out, uint32_t w, uint32_t h)
{
__shared__ float tmp[BLK_SIZE_S][BLK_SIZE_S];
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int idy = blockIdx.y*blockDim.y + threadIdx.y;
if((idx<w)&&(idy<h))
tmp[threadIdx.y][threadIdx.x] = in[idy*w +idx];
__syncthreads();
idx = blockIdx.y*blockDim.y + threadIdx.x;
idy = blockIdx.x*blockDim.x + threadIdx.y;
if((idx<w)&&(idy<h))
out[idy*h+idx] = tmp[threadIdx.x][threadIdx.y] ;
}
|
5,431 | /* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include <stdio.h>
struct DataElement
{
char *name;
int value;
};
__global__
void Kernel(DataElement *elem) {
printf("On device: name=%s, value=%d\n", elem->name, elem->value);
elem->name[0] = 'd';
elem->value++;
}
void launch(DataElement *elem) {
Kernel<<< 1, 1 >>>(elem);
cudaDeviceSynchronize();
}
int main(void)
{
DataElement *e;
cudaMallocManaged((void**)&e, sizeof(DataElement));
e->value = 10;
cudaMallocManaged((void**)&(e->name), sizeof(char) * (strlen("hello") + 1) );
strcpy(e->name, "hello");
launch(e);
printf("On host: name=%s, value=%d\n", e->name, e->value);
cudaFree(e->name);
cudaFree(e);
cudaDeviceReset();
} |
5,432 | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
const int N = 16;
// name executed by invoked by
// __device__ device device
// __global__ device host
// __host__ host host
// Every thread executes this kernel function simultaneously
// so called SIMT
__global__
void vector_add(int *a, int *b, int *res)
{
// blockIdx: block id
// blockDim: threads per block
// threadIdx: thread id
int i = blockIdx.x *blockDim.x + threadIdx.x;
res[i] = a[i] + b[i];
}
void init(int x[])
{
for(int i=0;i<N;i++){
x[i] = i;
}
}
int main()
{
int a[N];
int b[N];
int res[N];
init(a);
init(b);
int *ad, *bd, *resd;
int copy_size = N*sizeof(int);
cudaMalloc(&ad, copy_size);
cudaMalloc(&bd, copy_size);
cudaMalloc(&resd, copy_size);
cudaMemcpy(ad, a, copy_size, cudaMemcpyHostToDevice);
cudaMemcpy(bd, b, copy_size, cudaMemcpyHostToDevice);
// kernel_func<<<numBlock, numThread, Ns, S>>>()
// <<<numBlock, numThread, Ns, S>>>
// Ns, S optional
// Ns: the maximum dynamic memory size
// the kernel function is in the stream S
//vector_add<<<N, 1>>>(ad, bd, resd); // Totally 1xN=N threads, it works!
//vector_add<<<1, N>>>(ad, bd, resd); // Totally 1xN=N threads, it works!
//vector_add<<<1, (N-1)>>>(ad, bd, resd); // Totally (N-1) threads, so omits the last element...
vector_add<<<4, 4>>>(ad, bd, resd); // Totally 4x4=N(16) threads, it works!
cudaMemcpy(res, resd, copy_size, cudaMemcpyDeviceToHost);
cudaFree(ad);
cudaFree(bd);
cudaFree(resd);
for(int i=0;i<N;i++){
printf("%d ", res[i]);
}
return EXIT_SUCCESS;
}
|
5,433 | #include "includes.h"
__global__ void kernel5( int *a, int dimx, int dimy )
{
} |
5,434 | #include <iostream>
#include <ctime>
#include <curand.h>
using namespace std;
#define n 1048576*2 // Size of array M (should be a power of 2
// Function that catches the error
void testCUDA(cudaError_t error, const char *file, int line) {
if (error != cudaSuccess) {
printf("There is an error in file %s at line %d\n", file, line);
exit(EXIT_FAILURE);
}
}
#define testCUDA(error) (testCUDA(error, __FILE__ , __LINE__))
//Sort initialization #1 : with multiple blocks ---> NOT OPTIMAL
__global__
void sort_array(unsigned int *M){
int index = blockIdx.x * 2;
if(M[index] > M[index+1]){
int swap = M[index];
M[index] = M[index+1];
M[index+1] = swap;
}
}
//Sort initialization #2 : with multiple blocks ---> OPTIMAL (contiguity)
__global__
void sort_array_2(unsigned int *M){
int stride = min(n/2,1024)*2;
int index = blockIdx.x * blockDim.x * 2 ;
for(int i=threadIdx.x*2;i<blockDim.x*2;i+=stride){
if(M[index+i] > M[index+i+1]){
int swap = M[index+i];
M[index+i] = M[index+i+1];
M[index+i+1] = swap;
}
}
}
//Merge path
__global__
void merge_array(unsigned int *m,unsigned int *M,int array_size){
int index_array = blockIdx.x * array_size;
int stride = blockDim.x;
for(int i=threadIdx.x;i<array_size;i+=stride){
int size_A = array_size / 2;
int size_B = array_size / 2;
int offset;
int K[2],P[2],Q[2];
if( i> size_A ){
K[0] = P[1] = i - size_A;
K[1] = P[0] = size_A;
}
else{
K[0] = P[1] = 0;
K[1] = P[0] = i;
}
while(true){
offset = abs(K[1]-P[1])/2;
Q[0] = K[0] + offset;
Q[1] = K[1] - offset;
if (Q[1] >= 0 && Q[0] <= size_B && (Q[1] == size_A || Q[0] == 0 || M[index_array+Q[1]]>M[index_array+size_A+Q[0]-1])){
if(Q[0] == size_B || Q[1] == 0 || M[index_array+Q[1]-1] <= M[index_array+size_A+Q[0]]){
if(Q[1] < size_A && (Q[0] == size_B || M[index_array+Q[1]] <= M[index_array+size_A+Q[0]])){
m[index_array+i] = M[index_array+Q[1]];
}
else{
m[index_array+i] = M[index_array+size_A+Q[0]];
}
break;
}
else {
K[0] = Q[0] + 1;
K[1] = Q[1] - 1;
}
}
else{
P[0] = Q[0] - 1;
P[1] = Q[1] + 1;
}
}
}
__syncthreads();
}
//Function to Sanity check if the array is sorted or not (CPU)
void sanity_check(unsigned int *M){
bool sorted = true;
for (int i = 1; i < n; i++){
if(M[i]<M[i-1]){
sorted = false;
cout << "SORTED = FALSE for i =" << i << endl;
cout << "M[i-1]=" << M[i-1] << endl;
cout << "M[i]=" << M[i] << endl;
break;
}
}
if(sorted==true){
cout << "*** Is array sorted : True " << endl;
}
else{
cout << "*** Is array sorted : False " << endl;
}
}
int main () {
// *** Defining variables ***
unsigned int *M_dev, *M_dev_next; //Array and its copy to scale merge path procedure
curandGenerator_t G; //Curand RNG
clock_t timer; //Timer for CPU
cudaEvent_t start,stop; //Timer for GPU
float elapsedTime; //Timer for GPU
// *** Allocating memory ***
cudaEventCreate(&start);
cudaEventCreate(&stop);
testCUDA(cudaMallocManaged(&M_dev, n*sizeof(unsigned int)));
curandCreateGenerator(&G,CURAND_RNG_PSEUDO_DEFAULT);
testCUDA(cudaMallocManaged(&M_dev_next, n*sizeof(unsigned int)));
// *** Performing custom random array generation ***
cout << "---- GENERATING RANDOM ARRAY WITH CURAND ----" << endl;
cudaEventRecord(start,0);
curandGenerate(G, M_dev, n); //Generate n pseudo random unsigned int on device
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
cout << "*** Length of array created : " << n << endl;
cout << "*** Execution time : " << elapsedTime << " ms" << endl;
// *** Performing sort initialization ***
cout << "\n---- SORTING ARRAYS OF LENGTH 2 TO INITIALIZE MERGE PATH ----" << endl;
cudaEventRecord(start,0);
//sort_array<<<n/2,1>>>(M_dev);
sort_array_2<<<n/(2*min(n/2,1024)),min(n/2,1024)>>>(M_dev);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
cout << "*** Execution time : " << elapsedTime << " ms" << endl;
// *** Performing merge path***
cout << "\n---- MERGING SORTED ARRAYS BY MERGE PATH ----" << endl;
cudaEventRecord(start,0);
int nb_arrays = n/2;
int array_size = 2;
while (nb_arrays!=1){
nb_arrays /= 2; //nb of array to merge on next merge_path
array_size *= 2; //size of arrays on next merge_path
merge_array<<<nb_arrays,min(array_size,1024)>>>(M_dev_next,M_dev,array_size);
M_dev = M_dev_next;
testCUDA(cudaMallocManaged(&M_dev_next, n*sizeof(unsigned int)));
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
cout << "*** Execution time : " << elapsedTime << " ms" << endl;
//*** Sanity check ***
unsigned int *M_host = (unsigned int*)malloc(n*sizeof(unsigned int));
testCUDA(cudaMemcpy(M_host, M_dev, n*sizeof(unsigned int), cudaMemcpyDeviceToHost));
/*
//Code to vizualize array sorted
for(int i=0;i<n;i++){
cout << M_host[i] << "\t";
}
*/
cout << "\n---- SANITY CHECK ----" << endl;
timer = clock();
sanity_check(M_host);
timer = clock() - timer;
cout << "*** Execution time : " << (float)timer/(CLOCKS_PER_SEC/1000) << " ms" << endl;
//*** free memory on device and host ***
free(M_host);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(M_dev);
cudaFree(M_dev_next);
return EXIT_SUCCESS;
}
|
5,435 | #include <iostream>
void check(const char *file, const int line, cudaError_t err) {
if (err != cudaSuccess) {
std::cerr << file << ":" << line
<< " CUDA call failed with error: "
<< cudaGetErrorString(err)
<< std::endl;
std::terminate();
}
}
#define CHECK(x) check(__FILE__, __LINE__, (x))
__global__ void sum(const float *a, float *b) {
int row = threadIdx.x + blockIdx.x * blockDim.x;
int n = blockDim.x * gridDim.x;
float sum = 0;
int idx = row;
for (int col = 0; col < n; col++) {
sum += a[col+n*row];
idx++;
}
b[row] = sum;
}
int main() {
const int N = 8192;
float *ha, *hb;
float *da, *db;
ha = new float [N * N];
hb = new float [N];
CHECK(cudaMalloc(&da, N * N * sizeof(float)));
CHECK(cudaMalloc(&db, N * sizeof(float)));
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
ha[i * N + j] = sin(i * N + j);
CHECK(cudaMemcpy(da, ha, N * N * sizeof(float), cudaMemcpyHostToDevice));
cudaEvent_t start, stop;
CHECK(cudaEventCreate(&start));
CHECK(cudaEventCreate(&stop));
CHECK(cudaEventRecord(start, 0));
dim3 block(1024);
dim3 grid(N / block.x);
sum<<<grid, block>>>(da, db);
CHECK(cudaEventRecord(stop, 0));
CHECK(cudaEventSynchronize(stop));
float timems;
CHECK(cudaEventElapsedTime(&timems, start, stop));
std::cout << "Kernel elapsed time: " << timems << " ms" << std::endl;
CHECK(cudaMemcpy(hb, db, N * sizeof(float), cudaMemcpyDeviceToHost));
for (int i = 0; i < 10; i++)
std::cout << "b[" << i << "] = " << hb[i] << std::endl;
std::cout << "..." << std::endl;
for (int i = N - 10; i < N; i++)
std::cout << "b[" << i << "] = " << hb[i] << std::endl;
CHECK(cudaFree(da));
CHECK(cudaFree(db));
delete[] ha;
delete[] hb;
return 0;
}
|
5,436 | #include <cstdio>
#define N 64
#define TPB 32
float scale(int i, int n)
{
return ((float)i)/(n - 1);
}
__device__ float distance(float x1, float x2)
{
return sqrt( (x2 - x1)*(x2 - x1) );
}
__global__ void distanceKernel(float *d_out, float *d_in, float ref)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const float x = d_in[i];
d_out[i] = distance(x, ref);
printf("%2d %18.10f %18.10f %18.10f\n", i, ref, x, d_out[i]);
}
int main()
{
const float ref = 0.5f;
// Declare pointers for input and output arrays
float *in = 0;
float *out = 0;
// Allocate managed memory for input and output arrays
cudaMallocManaged(&in, N*sizeof(float));
cudaMallocManaged(&out, N*sizeof(float));
for( int i = 0; i < N; i++ ) {
in[i] = scale(i, N);
}
distanceKernel<<<N/TPB,TPB>>>(out, in, ref);
cudaDeviceSynchronize();
cudaFree(in);
cudaFree(out);
return 0;
}
|
5,437 | #include <stdio.h>
#define QUEENS 10
__device__
void kiir(char *A)
{
char s[QUEENS*21 + 1];
int k = 0;
for(int i = 0; i < QUEENS; i++)
{
for(int j = 0; j < QUEENS; j++)
{
if(A[i] == j)
s[k++] = 'Q';
else
s[k++] = '.';
s[k++] = ' ';
}
s[k++] = '\n';
}
s[k] = '\0';
printf("%s\n", s);
}
__global__
void queen(int *db, const int n)
{
{
bool B[QUEENS];
for(int i = 0; i < QUEENS; i++)
B[i] = 0;
B[threadIdx.x] = 1;
B[threadIdx.y] = 1;
B[threadIdx.z] = 1;
B[blockIdx.x/10] = 1;
B[blockIdx.x%10] = 1;
B[blockIdx.y/10] = 1;
B[blockIdx.y%10] = 1;
B[blockIdx.z/10] = 1;
B[blockIdx.z%10] = 1;
B[n] = 1;
for(int i = 0; i < QUEENS; i++)
if(B[i] == 0)
return;
}
char A[QUEENS];
A[0] = threadIdx.x;
A[1] = threadIdx.y;
A[2] = threadIdx.z;
A[3] = blockIdx.x/10;
A[4] = blockIdx.x%10;
A[5] = blockIdx.y/10;
A[6] = blockIdx.y%10;
A[7] = blockIdx.z/10;
A[8] = blockIdx.z%10;
A[9] = n;
for(int i = 0; i < QUEENS - 1; i++)
for(int j = i + 1; j < QUEENS; j++)
if(abs(i - j) == abs(A[i] - A[j]))
return;
atomicAdd(db, 1);
printf("%d.\n", *db);
kiir(A);
}
int
main()
{
int h = 0, *d;
cudaMalloc((void**) &d, sizeof(int));
cudaMemcpy(d, &h, sizeof(int), cudaMemcpyHostToDevice);
dim3 blocksPerGrid(100, 100, 100);
dim3 threadsPerBlock(10, 10, 10);
for(int i = 0; i < QUEENS; i++)
queen<<<blocksPerGrid, threadsPerBlock>>>(d, i);
cudaMemcpy(&h, d, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d);
cudaDeviceReset();
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(error));
return -1;
}
fprintf(stderr, "Solutions: %d\n", h);
fprintf(stderr, "\nDone\n");
return 0;
}
|
5,438 | //To compile should add "-lfftw3 -lgsl"
#include <stdio.h>
#include <math.h>
// #include <gsl/gsl_rng.h>
#include <cstdio>
#include <cstdlib>
#include <string>
#include <iostream>
#include <time.h>
//#include <fftw3.h>
#include <complex>
#include <cufft.h>
using namespace std;
struct particle3D{
int number;
double *mass;
double *x;
double *y;
double *z;
double *Fx;
double *Fy;
double *Fz;
double *vx;
double *vy;
double *vz;
};
struct grid3D{
double L;
int Nx;
int Ny;
int Nz;
int N;
double dx;
double dy;
double dz;
double *phi;
double *density;
double *Fx;
double *Fy;
double *Fz;
};
struct rk43D{
int step1;
int step2;
double *ax;
double *ay;
double *az;
double *vx;
double *vy;
double *vz;
};
__global__
void phiToForce(double* phi,double* Fx,double* Fy,double* Fz,int Nx,double L,double nConst){
int N = Nx*Nx*Nx;
int const Ny = Nx;
int const Nz = Nx;
double dx = L / Nx;
double factor = -1./(2.0*dx) * nConst;
int index = blockDim.x * blockIdx.x + threadIdx.x;
while(index < N){
int ii = index / (Ny*Nz);
int jj = (index / Nz) % Ny;
int kk = index % Nz;
Fx[index] = factor*( phi[ ( (ii+1)%Nx )*Ny*Nz + jj*Nz + kk ]
- phi[ ( (Nx+ii-1)%Nx )*Ny*Nz + jj*Nz + kk ] );
Fy[index] = factor*( phi[ ii*Ny*Nz + ( (jj+1)%Ny )*Nz + kk ]
- phi[ ii*Ny*Nz + ( (Ny+jj-1)%Ny )*Nz + kk ] );
Fz[index] = factor*( phi[ ii*Ny*Nz + jj*Nz + ((kk+1)%Nz)]
- phi[ ii*Ny*Nz + jj*Nz + ((Nz+kk-1)%Nz)]);
index += blockDim.x*gridDim.x;
}
}
__global__
void overk2(double *out,int Nx,int Ny,int Nzh){
//Notice that *out actullay is a compelx <double> pointer array.
int N = Nx*Ny*Nzh;
int index = blockDim.x * blockIdx.x + threadIdx.x;
int fi,fj;
double kxx,kyy,kzz;
while(index < N){
int ii = index / (Ny*Nzh);
int jj = (index / Nzh) % Ny;
int kk = index % Nzh;
if (2*ii < Nx) {fi = ii;}
else {fi = Nx-ii;}
if (2*jj < Ny) {fj = jj;}
else {fj = Ny-jj;}
kxx = 1.0*fi*fi;
kyy = 1.0*fj*fj;
kzz = 1.0*kk*kk;
if(index != 0){
//ii != 0 || jj != 0 || kk!=0
out[2*index] = out[2*index] / (kxx+kyy+kzz); //real part
out[2*index+1] = out[2*index+1] / (kxx+kyy+kzz); //imaginary part
}
index += blockDim.x*gridDim.x;
}
}
void Weight(struct grid3D *grid,struct particle3D *particle,int type);
void WeightForce(struct grid3D *grid,struct particle3D *particle,int type);
void poisson_solver_fft_force_3d(int const dim, struct grid3D *grid);
void _2nd_order_diff_3d(struct grid3D *grid, int const ii, int const jj, int const kk );
void _2nd_order_diff_3d_cuda(struct grid3D *grid);
void calculateGreenFFT(struct grid3D *grid, complex<double>* fftgf);
void isolatedPotential(struct grid3D *grid, complex<double>* fftgf);
void kick(struct particle3D *particle , double dt);
void drift(struct particle3D *particle , double dt);
void init_rk4(struct particle3D *particle, struct particle3D *buff, struct rk43D *rk4);
void rk4_mid(struct particle3D *particle, struct particle3D *buff, struct rk43D *rk4, double dt, int weighting);
void rk4_end(struct particle3D *particle, struct rk43D *rk4, double dt);
void periodic_boundary(double position, double length);
void boundary_check(int boundary, struct particle3D *particle, double L);
//Functions to locate memory and free memory of different struct.
void locateMemoryParticle(struct particle3D *particle,int N);
void freeMemoryParticle(struct particle3D *particle);
void locateMemoryRk4(struct rk43D *rk4,int N);
void freeMemoryRk4(struct rk43D *rk4);
void locateMemoryGrid(struct grid3D *grid);
void freeMemoryGrid(struct grid3D *grid);
int main( int argc, char *argv[] ){
//================Simulation Constants
int weightFunction = 1; //0/1/2 : NGP/CIC/TSC
int orbitIntegration = 1; //0/1/2 : KDK/DKD/RK4
int poissonSolver = 0; //0/1 : fft/isolated
int boundary = 0; //0/1/2 : periodic/isolated/no boundary
int dim = 3;
double L = 10.0; //Length of box (from -L/2 ~ L/2)
int Nx = 256; //Number of grid in x direction. (should be odd number)
int NParticle=2;//Number of particles used in simulation
//double massParticle=1.0;
double dt = 1.0e-2;
double G = 1.0;
double T,r1_0,r2_0;
cudaEvent_t start, stop; //For cuda timing
float totalTime;
//================Structs
struct grid3D grid;
struct particle3D myParticle;
struct particle3D buffParticle; //If not RK4 mode , it will not be malloc and free
struct rk43D myrk4; //If not RK4 mode , it will not be malloc and free
complex<double>* fftgf;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//Output to a file
FILE *output;
output = fopen("result.txt","w");
//================Random number generator.
//To use : d=gsl_rng_uniform(rng);
// gsl_rng *rng;
// rng = gsl_rng_alloc(gsl_rng_mt19937);
// gsl_rng_set(rng,123456);//The seed is 123456.
//================Initialize Grid Parameter===========
grid.L = L;
grid.Nx = Nx;
grid.Ny = grid.Nx;
grid.Nz = grid.Nx;
grid.N = grid.Nx * grid.Ny * grid.Nz;
grid.dx = grid.L / (grid.Nx-1); //-1 is because boundary (make box closed)
grid.dy = grid.L / (grid.Ny-1);
grid.dz = grid.L / (grid.Nz-1);
locateMemoryGrid(&grid);
//================Initialize Particles ======
myParticle.number = NParticle;
locateMemoryParticle(&myParticle,NParticle);
if(orbitIntegration == 2){
//================Initialize Particles for RK4 (buffer)======
buffParticle.number = NParticle;
locateMemoryParticle(&buffParticle,NParticle);
//================Initialize Runge-Kutta Coefficient======
myrk4.step1 = 0;
myrk4.step2 = 1;
locateMemoryRk4(&myrk4,NParticle);
}
if(poissonSolver == 1){
fftgf = (complex<double>*) malloc(sizeof(complex<double>) * 8*grid.N);
calculateGreenFFT(&grid,fftgf);
}
//Initialize mass of particles
myParticle.mass[0]=2.0;
myParticle.mass[1]=1.0;
if(orbitIntegration == 2){
buffParticle.mass[0] = myParticle.mass[0];
buffParticle.mass[1] = myParticle.mass[1];
}
//Initialize Initial Position of Particle.
// for (int i = 0; i < myParticle.number; ++i){
// myParticle.x[i]=gsl_rng_uniform(rng) * grid.L - grid.L/2;
// myParticle.y[i]=gsl_rng_uniform(rng) * grid.L - grid.L/2;
// printf("At (%f,%f) \n",myParticle.x[i],myParticle.y[i]);
// }
myParticle.x[0] = 1.0;
myParticle.y[0] = 0.0;
myParticle.z[0] = 0.0;
myParticle.x[1] = -2.0;
myParticle.y[1] = 0.0;
myParticle.z[1] = 0.0;
r1_0 = sqrt(pow(myParticle.x[0],2)+pow(myParticle.y[0],2)+pow(myParticle.z[0],2));
r2_0 = sqrt(pow(myParticle.x[1],2)+pow(myParticle.y[1],2)+pow(myParticle.z[1],2));
Weight(&grid,&myParticle,weightFunction);
if(poissonSolver == 0){poisson_solver_fft_force_3d(dim,&grid);}
else if ( poissonSolver == 1 ){isolatedPotential(&grid,fftgf);}
WeightForce(&grid,&myParticle,weightFunction);
// //Initialize Initial velocity
myParticle.vx[0] = 0.0;
myParticle.vy[0] = -sqrt(fabs(myParticle.Fx[0]*myParticle.x[0])/myParticle.mass[0]);
myParticle.vz[0] = 0.0;
myParticle.vx[1] = 0.0;
myParticle.vy[1] = sqrt(fabs(myParticle.Fx[1]*myParticle.x[1])/myParticle.mass[1]);
myParticle.vz[1] = 0.0;
double F_0;
F_0 = G * myParticle.mass[0] * myParticle.mass[1]/pow(r1_0+r2_0,2);
T =sqrt(myParticle.mass[0]*4*pow(M_PI,2)*r1_0/F_0);
//Check whether force is same magnitude but inverse direction.
printf("%f\t%f\t%f\n",myParticle.Fx[0],myParticle.Fy[0],myParticle.Fz[0]);
printf("%f\t%f\t%f\n",myParticle.Fx[1],myParticle.Fy[1],myParticle.Fz[1]);
//Time evolution loop
double t = 0.0;
for(int st=0;st < 1000;st++){
//Deposit Particles to grid
Weight(&grid,&myParticle,weightFunction);
//Use Fourier Transform to calculate potential and force.
if ( poissonSolver == 0 ) poisson_solver_fft_force_3d(dim, &grid);
else if ( poissonSolver == 1 ){isolatedPotential(&grid,fftgf);}
//Remap the force to particle.
WeightForce(&grid,&myParticle,weightFunction);
//Move particle
if(orbitIntegration == 0){
//KDK scheme
kick(&myParticle,dt/2);
drift(&myParticle,dt);
boundary_check(boundary, &myParticle, L);
Weight(&grid,&myParticle,weightFunction);
if ( poissonSolver == 0 ) poisson_solver_fft_force_3d(dim, &grid);
else if ( poissonSolver == 1 ){isolatedPotential(&grid,fftgf);}
WeightForce(&grid,&myParticle,weightFunction);
kick(&myParticle,dt/2);
}
else if(orbitIntegration == 1){
//DKD scheme
drift(&myParticle,dt/2);
boundary_check(boundary, &myParticle, L);
Weight(&grid,&myParticle,weightFunction);
if ( poissonSolver == 0 ) poisson_solver_fft_force_3d(dim, &grid);
else if ( poissonSolver == 1 ) isolatedPotential(&grid,fftgf);
WeightForce(&grid,&myParticle,weightFunction);
kick(&myParticle,dt);
drift(&myParticle,dt/2);
}
else if(orbitIntegration == 2){
//RK4
init_rk4(&myParticle,&buffParticle,&myrk4);
rk4_mid(&myParticle,&buffParticle,&myrk4,dt/2,1); //k1
boundary_check(boundary, &buffParticle, L);
Weight(&grid,&buffParticle,weightFunction);
if ( poissonSolver == 0 ) poisson_solver_fft_force_3d(dim, &grid);
else if ( poissonSolver == 1 ) isolatedPotential(&grid,fftgf);
WeightForce(&grid,&buffParticle,weightFunction);
rk4_mid(&myParticle,&buffParticle,&myrk4,dt/2,2); //k2
boundary_check(boundary, &buffParticle, L);
Weight(&grid,&buffParticle,weightFunction);
if ( poissonSolver == 0 ) poisson_solver_fft_force_3d(dim, &grid);
else if ( poissonSolver == 1 ) isolatedPotential(&grid,fftgf);
WeightForce(&grid,&buffParticle,weightFunction);
rk4_mid(&myParticle,&buffParticle,&myrk4,dt,2); //k3
boundary_check(boundary, &buffParticle, L);
Weight(&grid,&buffParticle,weightFunction);
if ( poissonSolver == 0 ) poisson_solver_fft_force_3d(dim, &grid);
else if ( poissonSolver == 1 ) isolatedPotential(&grid,fftgf);
WeightForce(&grid,&buffParticle,weightFunction);
rk4_mid(&myParticle,&buffParticle,&myrk4,dt,1); //k4
rk4_end(&myParticle,&myrk4,dt);
}
//Boundary Condition
#ifdef CHECK_THIS_LATER
if (boundary == 0){
for (int i=0; i<NParticle; i++){
if ( abs(myParticle.x[i]) > L/2){
periodic_boundary(myParticle.x[i],L);
}
if ( abs(myParticle.y[i]) > L/2){
periodic_boundary(myParticle.y[i],L);
}
if ( abs(myParticle.z[i]) > L/2){
periodic_boundary(myParticle.z[i],L);
}
}
}
else if (boundary == 1){
for (int i=0; i<NParticle; i++){
if ( abs(myParticle.x[i]) > L/2 || abs(myParticle.y[i]) > L/2 || abs(myParticle.z[i]) > L/2){
myParticle.mass[i] = 0;
cout << "A particle reaches the boundary." << endl;
}
}
}
#endif
boundary_check(boundary, &myParticle, L);
//print out the position of particle 1
if(st % 20 == 0){
printf("Step:%d\n", st);
// double momentum_x = 0;
// double momentum_y = 0;
// double momentum_z = 0;
// for (int i=0; i<NParticle; i++){
// momentum_x += myParticle.mass[i] * myParticle.vx[i];
// momentum_y += myParticle.mass[i] * myParticle.vy[i];
// momentum_z += myParticle.mass[i] * myParticle.vz[i];
// }
// cout << "(px , py, pz) = (" << momentum_x << ", " << momentum_y << ", " << momentum_z << ")" << endl;
fprintf(output,"%f\t%f\t",myParticle.x[0],myParticle.y[0]);
fprintf(output,"%f\t%f\n",myParticle.x[1],myParticle.y[1]);
}
t+=dt;
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&totalTime, start, stop);
printf("Total time : %f\n",totalTime/1000);
fclose(output);
freeMemoryGrid(&grid);
freeMemoryParticle(&myParticle);
if(orbitIntegration == 2){
freeMemoryParticle(&buffParticle);
}
if(poissonSolver == 1){
free(fftgf);
}
return 0;
}
void poisson_solver_fft_force_3d(int const dim, struct grid3D *grid){
int const Nx = grid->Nx;
int const Ny = grid->Ny;
int const Nz = grid->Nz;
int const Nzh = (Nz/2+1);
cufftHandle p1,p2;
double *in;
complex<double> *out;
in = (double*) malloc( sizeof(double) * Nx*Ny*Nz );
out = (complex<double>*) malloc( sizeof(complex<double>) * Nx*Ny*Nzh);
/////////// fft ///////////
cufftDoubleReal *dataIn;
cufftDoubleComplex *dataOut;
cudaMalloc((void**)&dataIn, sizeof(cufftDoubleReal)*Nx*Ny*Nz);
cudaMalloc((void**)&dataOut, sizeof(cufftDoubleComplex)*Nx*Ny*Nzh);
cudaMemcpy(dataIn, grid->density, sizeof(cufftDoubleReal)*Nx*Ny*Nz, cudaMemcpyHostToDevice);
//cufft
if (cufftPlan3d(&p1,Nx,Ny,Nz, CUFFT_D2Z) != CUFFT_SUCCESS) {
printf("CUFFT error: Plan D2Z creation failed.\n");
exit(1);
}
if (cufftExecD2Z(p1, dataIn, dataOut) != CUFFT_SUCCESS) {
printf("CUFFT error: ExecD2Z forward failed.\n");
exit(1);
}
cudaMemcpy(out, dataOut,sizeof(cufftDoubleComplex)*Nx*Ny*Nzh, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cufftDestroy(p1);
double *d_out;
cudaMalloc((void**)&d_out, sizeof(double)*2*Nx*Ny*Nzh);
cudaMemcpy(d_out,out,sizeof(double)*2*Nx*Ny*Nzh,cudaMemcpyHostToDevice);
overk2 <<<128,128>>> (d_out,Nx,Ny,Nzh);
cudaMemcpy(out,d_out,sizeof(double)*2*Nx*Ny*Nzh,cudaMemcpyDeviceToHost);
cudaFree(d_out);
/////////// inverse fft ///////////
cudaMemcpy(dataOut, out,sizeof(cufftDoubleComplex)*Nx*Ny*Nzh, cudaMemcpyHostToDevice);
//cufft
if (cufftPlan3d(&p2,Nx,Ny,Nz, CUFFT_Z2D) != CUFFT_SUCCESS) {
printf("CUFFT error: Plan creation failed.\n");
exit(1);
}
if (cufftExecZ2D(p2, dataOut, dataIn) != CUFFT_SUCCESS) {
printf("CUFFT error: ExecZ2D failed.\n");
exit(1);
}
cudaMemcpy(in, dataIn, sizeof(cufftDoubleReal)*Nx*Ny*Nz, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cufftDestroy(p2);
/////////// normalization ///////////
double nConst = -1.0 / M_PI/grid->L; //Normalization constant.
double* d_in;
double* d_Fx;
double* d_Fy;
double* d_Fz;
int size = grid->N*sizeof(double);
cudaMalloc((void**)&d_in, size);
cudaMalloc((void**)&d_Fx, size);
cudaMalloc((void**)&d_Fy, size);
cudaMalloc((void**)&d_Fz, size);
cudaMemcpy(d_in,in,size,cudaMemcpyHostToDevice);
phiToForce <<<128,128>>>(d_in,d_Fx,d_Fy,d_Fz,grid->Nx,grid->L,nConst);
cudaDeviceSynchronize();
cudaMemcpy(grid->Fx,d_Fx,size, cudaMemcpyDeviceToHost);
cudaMemcpy(grid->Fy,d_Fy,size, cudaMemcpyDeviceToHost);
cudaMemcpy(grid->Fz,d_Fz,size, cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_Fx);
cudaFree(d_Fy);
cudaFree(d_Fz);
cudaFree(dataIn);
cudaFree(dataOut);
free(in);
free(out);
}
void _2nd_order_diff_3d(struct grid3D *grid, int const ii, int const jj, int const kk ) {
double factor1 = -1./(2.*grid->dx);
double factor2 = -1./(2.*grid->dy);
double factor3 = -1./(2.*grid->dz);
int const Nx = grid->Nx;
int const Ny = grid->Ny;
int const Nz = grid->Nz;
int index = ii*Ny*Nz + jj*Nz + kk;
grid->Fx[ index ] = factor1*( grid->phi[ ( (Nx+ii+1)%Nx )*Nx*Ny + jj*Nx + kk ]
- grid->phi[ ( (Nx+ii-1)%Nx )*Nx*Ny + jj*Nx + kk ] );
grid->Fy[ index ] = factor2*( grid->phi[ ii*Nx*Ny + ( (Ny+jj+1)%Ny )*Nx + kk ]
- grid->phi[ ii*Nx*Ny + ( (Ny+jj-1)%Ny )*Nx + kk ] );
grid->Fz[ index ] = factor3*( grid->phi[ ii*Nx*Ny + jj*Nx + ((Nz+kk+1)%Nz)]
- grid->phi[ ii*Nx*Ny + jj*Nx + ((Nz+kk-1)%Nz)]);
}
void _2nd_order_diff_3d_cuda(struct grid3D *grid) {
double* d_phi;
double* d_Fx;
double* d_Fy;
double* d_Fz;
int size = grid->N * sizeof(double);
cudaMalloc((void**)&d_phi, size);
cudaMalloc((void**)&d_Fx, size);
cudaMalloc((void**)&d_Fy, size);
cudaMalloc((void**)&d_Fz, size);
cudaMemcpy(d_phi,grid->phi,size,cudaMemcpyHostToDevice);
phiToForce <<<128,128>>>(d_phi,d_Fx,d_Fy,d_Fz,grid->Nx,grid->L,1.0);
cudaDeviceSynchronize();
cudaMemcpy(grid->Fx,d_Fx,size, cudaMemcpyDeviceToHost);
cudaMemcpy(grid->Fy,d_Fy,size, cudaMemcpyDeviceToHost);
cudaMemcpy(grid->Fz,d_Fz,size, cudaMemcpyDeviceToHost);
cudaFree(d_phi);
cudaFree(d_Fx);
cudaFree(d_Fy);
cudaFree(d_Fz);
}
void calculateGreenFFT(struct grid3D *grid, complex<double> *fftgf){
double *greenFunction;
int N = grid->N;
int Nx = grid->Nx;
int Ny = grid->Ny;
int Nz = grid->Nz;
int NNx = 2*Nx;
int NNy = 2*Ny;
int NNz = 2*Nz;
greenFunction = (double*)malloc(8*N*sizeof(double));
complex<double> *gf; //For green function
gf = (complex<double>*) malloc(sizeof(complex<double>) * 8*N);
//Initialize green function array
for(int i=0;i<NNx;i++){
for(int j=0;j<NNy;j++){
for(int k=0;k<NNz;k++){
greenFunction[i*NNx*NNy+j*NNx+k] = 0.0;
}
}
}
//Construct green function
for(int i=0;i<Nx;i++){
for(int j=0;j<Ny;j++){
for(int k=0;k<Nz;k++){
if(i != 0 || j != 0 || k != 0){
greenFunction[i*NNx*NNy+j*NNx+k] = -1.0/sqrt(pow(i,2)+pow(j,2)+pow(k,2));
}
}
}
}
for(int i=Nx+1;i<NNx;i++){
for(int j=0;j<Ny;j++){
for(int k=0;k<Nz;k++){
greenFunction[i*NNx*NNy+j*NNx+k] = greenFunction[(NNx-i)*NNx*NNy+j*NNx+k];
}
}
}
for(int i=0;i<Nx;i++){
for(int j=Ny+1;j<NNy;j++){
for(int k=0;k<Nz;k++){
greenFunction[i*NNx*NNy+j*NNx+k] = greenFunction[i*NNx*NNy+(NNy-j)*NNx+k];
}
}
}
for(int i=Nx+1;i<NNx;i++){
for(int j=Ny+1;j<NNy;j++){
for(int k=0;k<Nz;k++){
greenFunction[i*NNx*NNy+j*NNx+k] = greenFunction[(NNx-i)*NNx*NNy+(NNy-j)*NNx+k];
}
}
}
for(int i=0;i<NNx;i++){
for(int j=0;j<NNy;j++){
for(int k=Nz+1;k<NNz;k++){
greenFunction[i*NNx*NNy+j*NNx+k] = greenFunction[i*NNx*NNy + j*NNx + (NNz-k)];
}
}
}
for(int i=0;i<NNx;i++){
for(int j=0;j<NNy;j++){
for(int k=0;k<NNz;k++){
gf[i*NNx*NNy + j*NNx + k] = complex<double>(0.,0.);
gf[i*NNx*NNy + j*NNx + k] += greenFunction[i*NNx*NNy + j*NNx + k];
}
}
}
cufftHandle plan;
cufftDoubleComplex *data;
cudaMalloc((void**)&data, sizeof(cufftDoubleComplex)*8*N);
cudaMemcpy(data, gf, sizeof(double)*8*N*2, cudaMemcpyHostToDevice);
if (cufftPlan3d(&plan,NNx,NNy,NNz, CUFFT_Z2Z) != CUFFT_SUCCESS) {
printf("CUFFT error: Plan creation failed.\n");
exit(1);
}
if (cufftExecZ2Z(plan, data, data, CUFFT_FORWARD) != CUFFT_SUCCESS) {
printf("CUFFT error: ExecZ2Z forward failed.\n");
exit(1);
}
cudaMemcpy(fftgf, data, sizeof(double)*8*N*2, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cufftDestroy(plan);
cudaFree(data);
free(gf);
free(greenFunction);
}
void isolatedPotential(struct grid3D *grid, complex<double>* fftgf){
double *densityPad;
int N = grid->N;
int Nx = grid->Nx;
int Ny = grid->Ny;
int Nz = grid->Nz;
int NNx = 2*Nx;
int NNy = 2*Ny;
int NNz = 2*Nz;
densityPad = (double*)malloc(8*N*sizeof(double));
cufftHandle p1,p2; //fft plan for cuFFT
complex<double> *dp, *fftdp; //For Padding density
complex<double> *phi,*ifftphi; //For potential
dp = (complex<double>*) malloc(sizeof(complex<double>) * 8*N);
fftdp = (complex<double>*) malloc(sizeof(complex<double>) * 8*N);
phi = (complex<double>*) malloc(sizeof(complex<double>) * 8*N);
ifftphi = (complex<double>*) malloc(sizeof(complex<double>) * 8*N);
for(int i=0;i<NNx;i++){
for(int j=0;j<NNy;j++){
for(int k=0;k<NNz;k++){
if(i < Nx && j < Ny && k < Nz){
//Copy initial density
densityPad[i*NNx*NNy+j*NNx+k] = grid->density[i*Nx*Ny+j*Nx+k];
}else{
//Padding 0s
densityPad[i*NNx*NNy+j*NNx+k] = 0.0;
}
}
}
}
for(int i=0;i<NNx;i++){
for(int j=0;j<NNy;j++){
for(int k=0;k<NNz;k++){
dp[i*NNx*NNy + j*NNx + k] = complex<double>(0.,0.);
dp[i*NNx*NNy + j*NNx + k] += densityPad[i*NNx*NNy + j*NNx + k];
}
}
}
cufftDoubleComplex *data;
cudaMalloc((void**)&data, sizeof(cufftDoubleComplex)*8*N);
cudaMemcpy(data, dp, sizeof(double)*8*N*2, cudaMemcpyHostToDevice);
//cufft
if (cufftPlan3d(&p1,NNx,NNy,NNz, CUFFT_Z2Z) != CUFFT_SUCCESS) {
printf("CUFFT error: Plan creation failed.\n");
exit(1);
}
if (cufftExecZ2Z(p1, data, data, CUFFT_FORWARD) != CUFFT_SUCCESS) {
printf("CUFFT error: ExecZ2Z forward failed.\n");
exit(1);
}
cudaMemcpy(fftdp, data, sizeof(double)*8*N*2, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cufftDestroy(p1);
cudaFree(data);
for(int i=0;i<NNx;i++){
for(int j=0;j<NNy;j++){
for(int k=0;k<NNz;k++){
//Multiply 2 imaginary numbers
int index = i*NNx*NNy + j*NNx + k;
// phi[index][0] = fftdp[index][0] * fftgf[index][0] - fftdp[index][1] * fftgf[index][1];
// phi[index][1] = fftdp[index][0] * fftgf[index][1] + fftdp[index][1] * fftgf[index][0];
phi[index] = fftdp[index] * fftgf[index];
}
}
}
cufftDoubleComplex *data2;
cudaMalloc((void**)&data2, sizeof(cufftDoubleComplex)*8*N);
cudaMemcpy(data, phi, sizeof(double)*8*N*2, cudaMemcpyHostToDevice);
////ifft cufft
if (cufftPlan3d(&p2,NNx,NNy,NNz, CUFFT_Z2Z) != CUFFT_SUCCESS) {
printf("CUFFT error: Plan creation failed.\n");
exit(1);
}
if (cufftExecZ2Z(p2, data2, data2, CUFFT_INVERSE) != CUFFT_SUCCESS) {
printf("CUFFT error: ExecZ2Z forward failed.\n");
exit(1);
}
cudaMemcpy(ifftphi, data2, sizeof(double)*8*N*2, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cufftDestroy(p2);
cudaFree(data2);
for(int i=0;i<Nx;i++){
for(int j=0;j<Ny;j++){
for(int k=0;k<Nz;k++){
int index1 = i*Nx*Ny+j*Nx+k;//index for N size grid.
int index2 = i*NNx*NNy + j*NNx + k;// index for 2N size grid.
grid->phi[index1] = -1.0/grid->dx / (8*N) * abs(ifftphi[index2]);
}
}
}
// for (int i=0; i < Nx; i++){
// for (int j=0; j < Ny; j++){
// for(int k=0 ; k < Nz ; k++){
// _2nd_order_diff_3d(grid, i, j,k);
// }
// }
// }
//Use GPU to calculate force from potential.
_2nd_order_diff_3d_cuda(grid);
free(densityPad);
free(dp);
free(fftdp);
free(phi);
free(ifftphi);
}
void Weight(struct grid3D *grid,struct particle3D *particle,int type){
//Initialize Density field
for(int i=0 ; i < grid->N ; i++){
grid->density[i]=0.0;
}
for(int i=0;i < particle->number;i++){
int lx,ly,lz,sx,sy,sz;
double shift = -grid->L/2; //make (0,0) to be in the center of grid.
lx = (particle->x[i]-shift)/grid->dx;
ly = (particle->y[i]-shift)/grid->dy;
lz = (particle->z[i]-shift)/grid->dz;
if(type == 0){
//NGP
sx = particle->x[i]-shift-lx * grid->dx - 0.5*grid->dx + 1;
sy = particle->y[i]-shift-ly * grid->dy - 0.5*grid->dy + 1;
sz = particle->z[i]-shift-lz * grid->dz - 0.5*grid->dz + 1;
grid->density[(lx+sx)*grid->Nx*grid->Ny + (ly+sy)*grid->Nx + (lz+sz) ] += particle->mass[i];
}else if(type == 1){
//CIC
for(int zz=0;zz<2;zz++){
for(int j=0;j<4;j++){
int p = j / 2;
int q = j % 2;
double wFactor = (1-fabs(particle->x[i]-shift-(lx+p)*grid->dx)/grid->dx)*(1-fabs(particle->y[i]-shift-(ly+q)*grid->dy)/grid->dy);
wFactor *= (1-fabs(particle->z[i]-shift-(lz+zz)*grid->dz)/grid->dz);
grid->density[(lx+p)*grid->Nx*grid->Ny + (ly+q)*grid->Nx + (lz+zz) ] += particle->mass[i] * wFactor;
}
}
}else if(type == 2){
//TSC
//xxx
//xox
//xxx
lx = (particle->x[i]-shift+0.5*grid->dx)/grid->dx; //Find the nearest point in lattice index.
ly = (particle->y[i]-shift+0.5*grid->dy)/grid->dy;
lz = (particle->z[i]-shift+0.5*grid->dz)/grid->dz;
double weightX[3]; //Weight factor in x direction for 3 affected points
double weightY[3];
double weightZ[3];
//Construct weighting factor
for(int xx=-1;xx<2;xx++){
double ddx = fabs(particle->x[i]-shift-(lx+xx)*grid->dx);
if(ddx <= grid->dx/2){
weightX[xx+1] = 3.0/4 - pow(ddx / grid->dx,2);
}else if(ddx<= grid->dx/2*3.0){
weightX[xx+1] = 0.5*pow(1.5-ddx / grid->dx,2);
}else{
printf("Should not be here");
weightX[xx+1]=0.0;
}
}
for(int yy=-1;yy<2;yy++){
double ddy = fabs(particle->y[i]-shift-(ly+yy)*grid->dy);
if(ddy <= grid->dy/2){
weightY[yy+1] = 3.0/4 - pow(ddy / grid->dy,2);
}else if(ddy <= grid->dy/2*3.0){
weightY[yy+1] = 0.5*pow(1.5-ddy / grid->dy,2);
}else{
weightY[yy+1]=0.0;
}
}
for(int zz=-1;zz<2;zz++){
double ddz = fabs(particle->z[i]-shift-(lz+zz)*grid->dz);
if(ddz <= grid->dz/2){
weightZ[zz+1] = 3.0/4 - pow(ddz / grid->dz,2);
}else if(ddz <= grid->dz/2*3.0){
weightZ[zz+1] = 0.5*pow(1.5-ddz / grid->dz,2);
}else{
weightZ[zz+1]=0.0;
}
}
//Weight mass into density
int indx,indy,indz;
for(int xx=-1;xx<2;xx++){
for(int yy=-1;yy<2;yy++){
for(int zz=-1;zz<2;zz++){
//Account for periodic boundary
indx = ((lx+xx)+grid->Nx)%grid->Nx;
indy = ((ly+yy)+grid->Ny)%grid->Ny;
indz = ((lz+zz)+grid->Nz)%grid->Nz;
int index = indx*grid->Nx*grid->Ny + indy*grid->Nx + indz;
grid->density[index]+=weightX[xx+1]*weightY[yy+1]*weightZ[zz+1]*particle->mass[i];
}
}
}
}
}
}
void WeightForce(struct grid3D *grid,struct particle3D *particle,int type){
//type = 0/1/2 => NGP/CIC/TSC
for(int i=0;i < particle->number;i++){
int lx,ly,lz,sx,sy,sz;
double shift = -grid->L/2; //make (0,0) to be in the center of grid.
lx = (particle->x[i]-shift)/grid->dx;
ly = (particle->y[i]-shift)/grid->dy;
lz = (particle->z[i]-shift)/grid->dz;
particle->Fx[i]=0.0;
particle->Fy[i]=0.0;
particle->Fz[i]=0.0;
if(type == 0){
sx = particle->x[i]-shift-lx * grid->dx - 0.5*grid->dx + 1;
sy = particle->y[i]-shift-ly * grid->dy - 0.5*grid->dy + 1;
sz = particle->z[i]-shift-lz * grid->dz - 0.5*grid->dz + 1;
int pos = (lx+sx)*grid->Nx*grid->Ny + (ly+sy)*grid->Nx + (lz+sz) ;
particle->Fx[i]=grid->Fx[pos]*particle->mass[i];
particle->Fy[i]=grid->Fy[pos]*particle->mass[i];
particle->Fz[i]=grid->Fz[pos]*particle->mass[i];
}else if(type == 1){
for(int zz=0;zz<2;zz++){
for(int j=0;j<4;j++){
int p = j / 2;
int q = j % 2;
double wFactor = (1-fabs(particle->x[i]-shift-(lx+p)*grid->dx)/grid->dx)*(1-fabs(particle->y[i]-shift-(ly+q)*grid->dy)/grid->dy);
wFactor *= (1-fabs(particle->z[i]-shift-(lz+zz)*grid->dz)/grid->dz);
int pos = (lx+p)*grid->Nx*grid->Ny + (ly+q)*grid->Nx + (lz+zz) ;
particle->Fx[i] += grid->Fx[pos] * wFactor*particle->mass[i];
particle->Fy[i] += grid->Fy[pos] * wFactor*particle->mass[i];
particle->Fz[i] += grid->Fz[pos] * wFactor*particle->mass[i];
}
}
}else if(type == 2){
//TSC
//xxx
//xox
//xxx
lx = (particle->x[i]-shift+0.5*grid->dx)/grid->dx; //Find the nearest point in lattice index.
ly = (particle->y[i]-shift+0.5*grid->dy)/grid->dy;
lz = (particle->z[i]-shift+0.5*grid->dz)/grid->dz;
double weightX[3]; //Weight factor in x direction for 3 affected points
double weightY[3];
double weightZ[3];
//Construct weighting factor
for(int xx=-1;xx<2;xx++){
double ddx = fabs(particle->x[i]-shift-(lx+xx)*grid->dx);
if(ddx <= grid->dx/2){
weightX[xx+1] = 3.0/4 - pow(ddx / grid->dx,2);
}else if(ddx<= grid->dx/2*3.0){
weightX[xx+1] = 0.5*pow(1.5-ddx / grid->dx,2);
}else{
printf("Should not be here");
weightX[xx+1]=0.0;
}
}
for(int yy=-1;yy<2;yy++){
double ddy = fabs(particle->y[i]-shift-(ly+yy)*grid->dy);
if(ddy <= grid->dy/2){
weightY[yy+1] = 3.0/4 - pow(ddy / grid->dy,2);
}else if(ddy <= grid->dy/2*3.0){
weightY[yy+1] = 0.5*pow(1.5-ddy / grid->dy,2);
}else{
weightY[yy+1]=0.0;
}
}
for(int zz=-1;zz<2;zz++){
double ddz = fabs(particle->z[i]-shift-(lz+zz)*grid->dz);
if(ddz <= grid->dz/2){
weightZ[zz+1] = 3.0/4 - pow(ddz / grid->dz,2);
}else if(ddz <= grid->dz/2*3.0){
weightZ[zz+1] = 0.5*pow(1.5-ddz / grid->dz,2);
}else{
weightZ[zz+1]=0.0;
}
}
//Weight mass into density
int indx,indy,indz;
for(int xx=-1;xx<2;xx++){
for(int yy=-1;yy<2;yy++){
for(int zz=-1;zz<2;zz++){
//Account for periodic boundary
indx = ((lx+xx)+grid->Nx)%grid->Nx;
indy = ((ly+yy)+grid->Ny)%grid->Ny;
indz = ((lz+zz)+grid->Nz)%grid->Nz;
int index = indx*grid->Nx*grid->Ny + indy*grid->Nx + indz;
double weight = weightX[xx+1]*weightY[yy+1]*weightZ[zz+1];
particle->Fx[i] += grid->Fx[index]*weight*particle->mass[i];
particle->Fy[i] += grid->Fy[index]*weight*particle->mass[i];
particle->Fz[i] += grid->Fz[index]*weight*particle->mass[i];
}
}
}
}
}
}
void kick(struct particle3D *particle , double dt){
double ax,ay,az;
for(int i=0 ; i<particle->number ; i++){
//Cauculate the acceleration of each particle.
ax = particle->Fx[i] / particle->mass[i];
ay = particle->Fy[i] / particle->mass[i];
az = particle->Fz[i] / particle->mass[i];
//Calculate velocity of each particle.
particle->vx[i] += ax * dt;
particle->vy[i] += ay * dt;
particle->vz[i] += az * dt;
}
}
void drift(struct particle3D *particle , double dt){
for(int i=0 ; i<particle->number ; i++){
particle->x[i] += particle->vx[i] * dt;
particle->y[i] += particle->vy[i] * dt;
particle->z[i] += particle->vz[i] * dt;
}
}
void init_rk4(struct particle3D *particle, struct particle3D *buff, struct rk43D *rk4){
for(int i=0 ; i<particle->number ; i++){
buff->vx[i] = particle->vx[i];
buff->vy[i] = particle->vy[i];
buff->vz[i] = particle->vz[i];
buff->Fx[i] = particle->Fx[i];
buff->Fy[i] = particle->Fy[i];
buff->Fz[i] = particle->Fz[i];
rk4->ax[i] = 0;
rk4->ay[i] = 0;
rk4->az[i] = 0;
rk4->vx[i] = 0;
rk4->vy[i] = 0;
rk4->vz[i] = 0;
rk4->step1 = 0;
rk4->step2 = 1;
}
}
void rk4_mid(struct particle3D *particle, struct particle3D *buff, struct rk43D *rk4, double dt, int weighting){
for(int i=0 ; i<particle->number ; i++){
buff->Fx[i] = rk4->step1 * buff->Fx[i] + rk4->step2 * particle->Fx[i];
buff->Fy[i] = rk4->step1 * buff->Fy[i] + rk4->step2 * particle->Fy[i];
buff->Fz[i] = rk4->step1 * buff->Fz[i] + rk4->step2 * particle->Fz[i];
buff->x[i] = particle->x[i] + buff->vx[i] * dt;
buff->y[i] = particle->y[i] + buff->vy[i] * dt;
buff->z[i] = particle->z[i] + buff->vz[i] * dt;
rk4->ax[i] += weighting/6.0 * buff->Fx[i]/particle->mass[i];
rk4->ay[i] += weighting/6.0 * buff->Fy[i]/particle->mass[i];
rk4->az[i] += weighting/6.0 * buff->Fz[i]/particle->mass[i];
rk4->vx[i] += weighting/6.0 * buff->vx[i];
rk4->vy[i] += weighting/6.0 * buff->vy[i];
rk4->vz[i] += weighting/6.0 * buff->vz[i];
buff->vx[i] = particle->vx[i] + buff->Fx[i]/particle->mass[i] * dt;
buff->vy[i] = particle->vy[i] + buff->Fy[i]/particle->mass[i] * dt;
buff->vz[i] = particle->vz[i] + buff->Fz[i]/particle->mass[i] * dt;
rk4->step1 = 1;
rk4->step2 = 0;
}
}
void rk4_end(struct particle3D *particle, struct rk43D *rk4, double dt){
for(int i=0 ; i<particle->number ; i++){
particle->x[i] += rk4->vx[i] * dt;
particle->y[i] += rk4->vy[i] * dt;
particle->z[i] += rk4->vz[i] * dt;
particle->vx[i] += rk4->ax[i] * dt;
particle->vy[i] += rk4->ay[i] * dt;
particle->vz[i] += rk4->az[i] * dt;
}
}
void periodic_boundary(double position, double length){
int sign = position/abs(position);
position = sign * remainder(abs(position + sign*length/2), length) - sign*length/2;
cout << "A particle reaches the boundary." << endl;
}
void boundary_check(int boundary, struct particle3D *particle, double L){
if (boundary == 0){
for (int i=0; i<particle->number; i++){
if ( abs(particle->x[i]) > L/2){
periodic_boundary(particle->x[i],L);
}
if ( abs(particle->y[i]) > L/2){
periodic_boundary(particle->y[i],L);
}
if ( abs(particle->z[i]) > L/2){
periodic_boundary(particle->z[i],L);
}
}
}
else if (boundary == 1){
for (int i=0; i<particle->number; i++){
if ( abs(particle->x[i]) > L/2 || abs(particle->y[i]) > L/2 || abs(particle->z[i]) > L/2){
particle->mass[i] = 0;
cout << "A particle reaches the boundary." << endl;
}
}
}
}
//Functions to locate memory and free memory of different struct.
void locateMemoryParticle(struct particle3D *particle,int N){
particle->mass = (double*)malloc(N*sizeof(double));
particle->x = (double*)malloc(N*sizeof(double));
particle->y = (double*)malloc(N*sizeof(double));
particle->z = (double*)malloc(N*sizeof(double));
particle->Fx = (double*)malloc(N*sizeof(double));
particle->Fy = (double*)malloc(N*sizeof(double));
particle->Fz = (double*)malloc(N*sizeof(double));
particle->vx = (double*)malloc(N*sizeof(double));
particle->vy = (double*)malloc(N*sizeof(double));
particle->vz = (double*)malloc(N*sizeof(double));
}
void freeMemoryParticle(struct particle3D *particle){
free(particle->mass);
free(particle->x);
free(particle->y);
free(particle->z);
free(particle->Fx);
free(particle->Fy);
free(particle->Fz);
free(particle->vx);
free(particle->vy);
free(particle->vz);
}
void locateMemoryRk4(struct rk43D *rk4,int N){
rk4->ax = (double*)malloc(N*sizeof(double));
rk4->ay = (double*)malloc(N*sizeof(double));
rk4->az = (double*)malloc(N*sizeof(double));
rk4->vx = (double*)malloc(N*sizeof(double));
rk4->vy = (double*)malloc(N*sizeof(double));
rk4->vz = (double*)malloc(N*sizeof(double));
}
void freeMemoryRk4(struct rk43D *rk4){
free(rk4->ax);
free(rk4->ay);
free(rk4->az);
free(rk4->vx);
free(rk4->vy);
free(rk4->vz);
}
void locateMemoryGrid(struct grid3D *grid){
grid->density = (double*) malloc(grid->N*sizeof(double)); //Density on the grid
grid->phi = (double*) malloc(grid->N*sizeof(double)); //Potential on the grid
grid->Fx = (double*) malloc(grid->N*sizeof(double)); //Force on grid comes from potential calculation
grid->Fy = (double*) malloc(grid->N*sizeof(double));
grid->Fz = (double*) malloc(grid->N*sizeof(double));
}
void freeMemoryGrid(struct grid3D *grid){
free(grid->density);
free(grid->phi);
free(grid->Fx);
free(grid->Fy);
free(grid->Fz);
}
|
5,439 | #include "includes.h"
__global__ void GaussianEliminationShared(const int clusterSize,float *x, const float *diagonal_values , const float *non_diagonal_values ,float *y )
{
const int index = blockIdx.x ;
__shared__ float shared_m[9][9]; // size of cluster
for (int i = 0; i < clusterSize;++i)
{
for (int j = 0; j < clusterSize;++j)
{
shared_m[i][j]=0;
}
}
for(int i = 0; i < clusterSize; ++i)
{
shared_m[i][i] = diagonal_values[clusterSize * index + i];
}
for(int i = 0; i < clusterSize-1;++i)
{
shared_m[i][i+1] = non_diagonal_values[clusterSize * index * 2 + 2*i+1];
shared_m[i+1][i] = non_diagonal_values[clusterSize * index * 2 + 2*i+2];
}
// triangle form
for (int i = 0 ; i < clusterSize; ++i)
{
//for every row...
for (int j = i+1; j < clusterSize; ++j)
{
//calculate ratio for every row below it using the triangular
double ratio = shared_m[j][i] / shared_m[i][i];
for(int k = 0; k < clusterSize; ++k)
{
//Eliminate every column based on that ratio
shared_m[j][k] = shared_m[j][k] - (shared_m[i][k] * ratio);
}
//elimination on the coefficient vector
y[clusterSize * index +j] = y[clusterSize * index +j] - (y[clusterSize * index +i] * ratio);
}
}
__syncthreads();
//Back substitution
for (int i = clusterSize-1; i > -1; --i)
{
double current = 0;
for (unsigned int j = i; j < clusterSize; ++j)
{
current = current + (shared_m[i][j] * x[clusterSize * index +j]);
}
x[clusterSize * index +i] = (y[clusterSize * index +i] - current) / shared_m[i][i];
}
} |
5,440 | #include <iostream>
#include <chrono>
__global__ void polynomial_expansion (float* poly, int degree, int n, float* array) {
//TODO: Write code to use the GPU here!
//code should write the output back to array
int index = blockIdx.x * blockDim.x + threadIdx.x;
if( index < n )
{
float out = 0.0;
float xtothepowerof = 1.0;
for ( int i = 0; i < degree+1; ++i)
{
out += xtothepowerof * poly[i];
xtothepowerof *= array[index];
}
array[index] = out;
}
}
int main (int argc, char* argv[]) {
//TODO: add usage
if (argc < 3) {
std::cerr<<"usage: "<<argv[0]<<" n degree"<<std::endl;
return -1;
}
int n = atoi(argv[1]); //TODO: atoi is an unsafe function
int degree = atoi(argv[2]);
int nbiter = 1;
float* array = new float[n];
float* poly = new float[degree+1];
for (int i=0; i<n; ++i)
array[i] = 1.;
for (int i=0; i<degree+1; ++i)
poly[i] = 1.;
float *dev_array, *dev_poly;
std::chrono::time_point<std::chrono::system_clock> begin, end;
begin = std::chrono::system_clock::now();
/*for (int iter = 0; iter<nbiter; ++iter)
polynomial_expansion (poly, degree, n, array);*/
cudaMallocManaged(&dev_array, n*sizeof(float));
cudaMallocManaged(&dev_poly, (degree+1)*sizeof(float));
cudaMemcpy(dev_array, array, n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_poly, poly, (degree+1)*sizeof(float), cudaMemcpyHostToDevice);
polynomial_expansion<<<(n+255)/256, 256>>>(dev_poly, degree, n, dev_array);
cudaMemcpy(array, dev_array, n*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(dev_array);
cudaFree(dev_poly);
cudaDeviceSynchronize();
end = std::chrono::system_clock::now();
std::chrono::duration<double> totaltime = (end-begin)/nbiter;
{
bool correct = true;
int ind;
for (int i=0; i< n; ++i) {
if (fabs(array[i]-(degree+1))>0.01) {
correct = false;
ind = i;
}
}
if (!correct)
std::cerr<<"Result is incorrect. In particular array["<<ind<<"] should be "<<degree+1<<" not "<< array[ind]<<std::endl;
}
std::cerr<<array[0]<<std::endl;
std::cout<<n<<" "<<degree<<" "<<totaltime.count()<<std::endl;
delete[] array;
delete[] poly;
return 0;
}
|
5,441 | #include <iostream>
#include <cstdlib>
#include <ctime>
#define MAXN 30000000
#define A (-1.0)
#define B (1.0)
//säieryhmän koko
#define LOCAL_SIZE 1024
//säieryhmien määrä
//#define WG_COUNT (MAXN/LOCAL_SIZE+1)
//ydin, jolle annetaan kaksi n kokoista vektoria x ja y, sekä
//liukuluku m, joille suoritetaan axpy-operaatio
__global__ void axpy(double *x, double *y, double m, int n) {
const int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if(global_id < n)
y[global_id] = m * x[global_id] + y[global_id];
}
//tarkistetaan onko doublet samoja
bool double_equals( double a, double b, double epsilon = 0.0001) {
return std::abs(a-b) < epsilon;
}
int main() {
cudaError err;
// Alustetaan satunnaislukugeneraattori kellonajalla
srand(time(NULL));
// Satunnainen kokonaisluku väliltä [1,MAXN]
int n = rand() % MAXN + 1;
// Varataan tilaa vektoreille ja luodaan satunnainen liukuluku
double *x = new double[n];
double *y = new double[n];
double m = (B-A)*((double)rand()/RAND_MAX)+A;
//ja vektorit testaamista varten
double *x_test = new double[n];
double *y_test = new double[n];
// Generoidaan kaksi satunnaista vektoria
for(int i = 0; i < n; i++) {
x[i] = (B-A)*((double)rand()/RAND_MAX)+A;
y[i] = (B-A)*((double)rand()/RAND_MAX)+A;
}
//kopioidaan vektorit talteen testaamista varten
for(int i = 0; i < n; i++) {
x_test[i] = x[i];
y_test[i] = y[i];
}
//Varataan muisti Cuda-laitteelta
double *deviceBuffer1, *deviceBuffer2;
//Varataan deviceBuffer1
err = cudaMalloc((void **)&deviceBuffer1, n*sizeof(double));
if(err != cudaSuccess) {
std::cout << "Muistin varaaminen epäonnistui." << std::endl;
// cudaError on union-tietotyyppi. Virhekoodia vastaava virheviestijono
// saadaan selville cudaGetErrorString-aliohjelman avulla.
std::cerr << "CUDA-virhekoodi: " << cudaGetErrorString(err) <<
std::endl;
delete [] x;
delete [] y;
delete [] x_test;
delete [] y_test;
return 1;
}
//Varataan deviceBuffer2
err = cudaMalloc((void **)&deviceBuffer2, n*sizeof(double));
if(err != cudaSuccess) {
std::cout << "Muistin varaaminen epäonnistui." << std::endl;
// cudaError on union-tietotyyppi. Virhekoodia vastaava virheviestijono
// saadaan selville cudaGetErrorString-aliohjelman avulla.
std::cerr << "CUDA-virhekoodi: " << cudaGetErrorString(err) <<
std::endl;
delete [] x;
delete [] y;
delete [] x_test;
delete [] y_test;
return 1;
}
//Siirretään data cuda-laitteelle
//Siirretään x -> deviceBuffer1
err = cudaMemcpy(
deviceBuffer1, x, n*sizeof(double), cudaMemcpyHostToDevice);
if(err != cudaSuccess) {
std::cout << "Isäntälaite -> GPU -siirtokäskyn asettaminen " \
"komentojonoon epäonnistui." << std::endl;
std::cerr << "CUDA-virhekoodi: " << cudaGetErrorString(err) <<
std::endl;
cudaFree(deviceBuffer1);
cudaFree(deviceBuffer2);
delete [] x;
delete [] y;
delete [] x_test;
delete [] y_test;
return 1;
}
//Siirretään y -> deviceBuffer2
err = cudaMemcpy(
deviceBuffer2, y, n*sizeof(double), cudaMemcpyHostToDevice);
if(err != cudaSuccess) {
std::cout << "Isäntälaite -> GPU -siirtokäskyn asettaminen " \
"komentojonoon epäonnistui." << std::endl;
std::cerr << "CUDA-virhekoodi: " << cudaGetErrorString(err) <<
std::endl;
cudaFree(deviceBuffer1);
cudaFree(deviceBuffer2);
delete [] x;
delete [] y;
delete [] x_test;
delete [] y_test;
return 1;
}
//ytimen käynnistäminen
// Säieryhmien määrä
dim3 WGCount(n/LOCAL_SIZE+1, 1, 1);
// Lokaalin indeksiavaruuden koko
dim3 localDim(LOCAL_SIZE, 1, 1);
axpy<<<WGCount, localDim>>>(deviceBuffer1, deviceBuffer2, m, n);
//kysytään tuliko ytimen käynnistämisessä virheitä
err = cudaGetLastError();
if(err != cudaSuccess) {
std::cout << "Ytimen käynnistyskäskyn asettaminen komentojonoon " \
"epäonnistui." << std::endl;
std::cerr << "CUDA-virhekoodi: " << cudaGetErrorString(err) <<
std::endl;
cudaFree(deviceBuffer1);
cudaFree(deviceBuffer2);
delete [] x;
delete [] y;
delete [] x_test;
delete [] y_test;
return 1;
}
//
// Siirretään data takaisin isäntälaiteen muistiin
//
//x ja deviceBuffer1
cudaMemcpy(x, deviceBuffer1, n*sizeof(double), cudaMemcpyDeviceToHost);
if(err != cudaSuccess) {
std::cout << "GPU -> Isäntälaite -siirtokäskyn asettaminen " \
"komentojonoon epäonnistui." << std::endl;
std::cerr << "CUDA-virhekoodi: " << cudaGetErrorString(err) <<
std::endl;
cudaFree(deviceBuffer1);
cudaFree(deviceBuffer2);
delete [] x;
delete [] y;
delete [] x_test;
delete [] y_test;
return 1;
}
//y ja deviceBuffer2
cudaMemcpy(y, deviceBuffer2, n*sizeof(double), cudaMemcpyDeviceToHost);
if(err != cudaSuccess) {
std::cout << "GPU -> Isäntälaite -siirtokäskyn asettaminen " \
"komentojonoon epäonnistui." << std::endl;
std::cerr << "CUDA-virhekoodi: " << cudaGetErrorString(err) <<
std::endl;
cudaFree(deviceBuffer1);
cudaFree(deviceBuffer2);
delete [] x;
delete [] y;
delete [] x_test;
delete [] y_test;
return 1;
}
//
// Tarkistetaan tulos
//
//lasketaan axpy cpu:lla
for(int i = 0; i < n; i++) {
y_test[i] = m * x_test[i] + y_test[i];
}
//tarkistetaan ovatko kaikki y:n arvot samoja
bool correct = true;
for(int i = 0; i < n; i++) {
if (double_equals(y_test[i], y[i]) != true) {
correct = false;
//tulostetaan eriävät arvot
std::cout << y_test[i] << std::endl;
std::cout << y[i] << std::endl;
break;
}
}
std::cout << "The result was " ;
if (!correct) {
std::cout << "incorrect." << std::endl;
}
else std::cout << "correct." << std::endl;
//
// Vapautetaan CUDA-laitteen puolelta varattu muisti
//
err = cudaFree(deviceBuffer1);
if(err != cudaSuccess) {
std::cout << "Muistin vapauttaminen epäonnistui epäonnistui." <<
std::endl;
std::cerr << "CUDA-virhekoodi: " << cudaGetErrorString(err) <<
std::endl;
delete [] x;
delete [] y;
delete [] x_test;
delete [] y_test;
return 1;
}
err = cudaFree(deviceBuffer2);
if(err != cudaSuccess) {
std::cout << "Muistin vapauttaminen epäonnistui epäonnistui." <<
std::endl;
std::cerr << "CUDA-virhekoodi: " << cudaGetErrorString(err) <<
std::endl;
delete [] x;
delete [] y;
delete [] x_test;
delete [] y_test;
return 1;
}
delete [] x;
delete [] y;
delete [] x_test;
delete [] y_test;
return 0;
}
|
5,442 | #include <cstdio>
#include <iostream>
#include <chrono>
#define N 5000
void cpuAdd(int *h_a, int *h_b, int *h_c)
{
int tid = 0;
while (tid < N)
{
h_c[tid] = h_a[tid] + h_b[tid];
tid += 1;
}
}
int main(void)
{
int h_a[N], h_b[N], h_c[N];
for (int i = 0; i < N; i++)
{
h_a[i] = 2 * i * i;
h_b[i] = i;
}
auto t0 = std::chrono::steady_clock::now();
cpuAdd(h_a, h_b, h_c);
auto t1 = std::chrono::steady_clock::now();
std::cout << "CPU time:" << std::chrono::duration_cast<std::chrono::nanoseconds>(t1-t0).count() << "\n";
printf("Vector addition on CPU\n");
// for (int i = 0; i < N; i++)
// {
// printf("The sum of %d element is %d + %d = %d\n", i, h_a[i], h_b[i], h_c[i]);
// }
return 0;
}
|
5,443 | #include <stdio.h>
#define SIZE 1024*1024*4
__global__ void reduce0(unsigned int *g_idata, unsigned int *g_odata, long size){
// dynamically allocated shared memory
extern __shared__ unsigned int sdata[];
// set up thread ids: within a block, and across the grid
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
//initialize the shared sum array
sdata[tid] = 0;
sdata[tid] = g_idata[i];
// synchronize: so that all threads within a block have loaded
// the elements from GDRAM to the shared mem
__syncthreads();
//for(unsigned int s=1; s < blockDim.x; s *= 2) {
// // version 1
// if (tid % (2*s) == 0) {
// sdata[tid] += sdata[tid + s];
// }
// //version 2
// int index = 2 * s * tid;
// if (index < blockDim.x) {
// sdata[index] += sdata[index + s];
// }
// __syncthreads(); // let one phase complete before the next starts
// }
// //version 3
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
#define gpuErrChk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int main(void){
cudaEvent_t start_event, stop_event;
float cuda_elapsed_time;
// define a vector of certain "SIZE" and set values to "1"
// This means that the final sum will be equal to size.
// Easy for debugging!
// Dayum, am I smart or am I smart ?!
unsigned int *h_i, *h_o;
long size = SIZE;
h_i = (unsigned int *) malloc (sizeof(unsigned int)*SIZE);
for (unsigned int i = 0; i < SIZE; i ++)
h_i[i] = 1;
// declare a device array and copy the host array to the device array
// If the size of an array is smallish, one can use the Thrust lib
// Thrust is kickass!
unsigned int *d_i;
unsigned int threadsPerBlock = 128;
unsigned int totalBlocks = (SIZE+(threadsPerBlock-1))/threadsPerBlock;
// allocate space for output array on the host
h_o = (unsigned int*) malloc(totalBlocks * sizeof(unsigned int));
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
/* start the timer for GPU code */
cudaEventRecord(start_event, 0);
gpuErrChk(cudaMalloc((void**)&d_i, sizeof(unsigned int)*SIZE));
gpuErrChk(cudaMemcpy(d_i, h_i, sizeof(unsigned int)*SIZE, cudaMemcpyHostToDevice));
// define an output array on device which will hold the sum from
// each block
unsigned int *d_o;
gpuErrChk(cudaMalloc((void**)&d_o, sizeof(unsigned int)*totalBlocks));
// Invoke the kernel: by the power of the greyskull!
reduce0<<<totalBlocks, threadsPerBlock, 2*threadsPerBlock*sizeof(unsigned int)>>>(d_i, d_o, size);
// Copy the output array back and reduce on CPU
gpuErrChk(cudaMemcpy(h_o, d_o, totalBlocks * sizeof(unsigned int), cudaMemcpyDeviceToHost));
/*end the timer for GPU code */
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&cuda_elapsed_time, start_event, stop_event);
for (unsigned int j = 1; j < totalBlocks; j++)
{
h_o[0] += h_o[j];
}
printf("Reduced Sum from GPU = %ld \n", h_o[0]);
printf("Time taken by the kernel: %f ms \n",cuda_elapsed_time);
return 0;
}
|
5,444 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
void secuential(const int a[] ,const int b[], int c[], const unsigned int sqrt_dim);
__global__ void multiply( const int* A, const int* B,int* C, int width, int tile_width)
{
float Csub = 0;
for (int a = width * tile_width * blockIdx.y, b = tile_width * blockIdx.x; a <= width * tile_width * blockIdx.y + width - 1; a += tile_width, b += tile_width * width)
{
extern __shared__ int shared[];
int *As=&shared[0];
int *Bs=&shared[tile_width*tile_width];
As[threadIdx.y+tile_width*threadIdx.x] = A[a + width * threadIdx.y + threadIdx.x];
Bs[threadIdx.y+tile_width*threadIdx.x] = B[b + width * threadIdx.y + threadIdx.x];
__syncthreads();
for (int k = 0; k < tile_width; ++k)
{
Csub += As[threadIdx.y+tile_width*k] * Bs[threadIdx.x+tile_width*k]; //a*b^t
}
__syncthreads();
}
int c = width * tile_width * blockIdx.y + tile_width * blockIdx.x;
C[c + width * threadIdx.y + threadIdx.x] = Csub;
C[c + width * threadIdx.y + threadIdx.x]+=B[c + width * threadIdx.y + threadIdx.x] + A[c + width * threadIdx.x + threadIdx.y];
}
void init(int *a, int size, int val)
{
for (int i = 0; i < size; ++i)
{
a[i] = val;
}
}
int main(int argc, char** argv)
{
clock_t time_begin;
unsigned int size_array = (argc > 1)? atoi (argv[1]): 1024;
unsigned int tile_width = (argc > 2)? atoi (argv[2]): 2;
bool verbose= (argc>3)? (argv[3][0]=='v'): false;
int* h_array1 = (int*) malloc(sizeof(int) * size_array*size_array);
int* h_array2 = (int*) malloc(sizeof(int) * size_array*size_array);
int* h_array3 = (int*) malloc(sizeof(int) * size_array*size_array);
int* h_array_sec = (int*) malloc(sizeof(int) * size_array*size_array);
init(h_array1, size_array*size_array,1);
init(h_array2, size_array*size_array,1);
if(verbose){
printf("A:\n");
for(int i=0; i<size_array*size_array; i++){
printf("%i\t", h_array1[i]);
if((i+1)%size_array==0) printf("\n");
}
printf("\n");
printf("B:\n");
for(int i=0; i<size_array*size_array; i++){
printf("%i\t", h_array2[i]);
if((i+1)%size_array==0) printf("\n");
}
printf("\n");
}
int *d_array1,*d_array2, *d_array3;
cudaMalloc((void**) &d_array1, sizeof(int) * size_array*size_array);
cudaMalloc((void**) &d_array2, sizeof(int) * size_array*size_array);
cudaMalloc((void**) &d_array3, sizeof(int) * size_array*size_array);
cudaMemcpy(d_array1, h_array1, sizeof(int) * size_array*size_array, cudaMemcpyHostToDevice);
cudaMemcpy(d_array2, h_array2, sizeof(int) * size_array*size_array, cudaMemcpyHostToDevice);
dim3 bloque(tile_width, tile_width);
dim3 grid(size_array / bloque.x, size_array / bloque.y);
time_begin=clock();
multiply<<< grid, bloque, tile_width*tile_width*tile_width*tile_width >>>( d_array1, d_array2,d_array3, size_array, tile_width);
cudaMemcpy(h_array3, d_array3, sizeof(int) * size_array*size_array, cudaMemcpyDeviceToHost);
printf("GPU time: %f seconds\n", (((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 );
if(verbose){
printf("Array C=B + AB^t + A^t :\n");
for(int i=0; i<size_array*size_array; i++){
printf("%i\t", h_array3[i]);
if((i+1)%size_array==0) printf("\n");
}
}
time_begin=clock();
secuential(h_array1, h_array2, h_array_sec, size_array);
printf("CPU time: %f seconds\n", (((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 );
bool b=true;
for(int i=0; i<size_array; i++){
if(h_array_sec[i] != h_array3[i]){
printf("GPU and CPU have different results (at least) at position %i\n", i);
b=false;
break;
}
}
if(b)
printf("GPU and CPU have the same results\n");
free(h_array1);
free(h_array2);
free(h_array3);
cudaFree(d_array1);
cudaFree(d_array2);
cudaFree(d_array3);
}
void secuential(const int a[] ,const int b[], int c[], const unsigned int sqrt_dim){
int dim = sqrt_dim* sqrt_dim;
int index_i, index_j;
//int *c= (int *)malloc ( dim * sizeof(int));
for(int i=0; i< dim; i++){
index_i = (int)i%sqrt_dim;
index_j = (i-index_i)/sqrt_dim;
c[i]= b[i]; //c= b
c[i]+= a[index_j+ index_i * sqrt_dim]; //c+= a^t
for(int j=0;j<sqrt_dim;j++){ //row of first matrix
c[i]+=a[j+index_j * sqrt_dim ]*b[j + index_i*sqrt_dim]; //c+= a*b^t
}
}
/*printf("Sequential result: Array C=B + AB^t + A^t :\n");
for(int i=0; i<dim; i++){
printf("%i\t", c[i]);
if((i+1)%(int)(sqrt((float)dim))==0)
printf("\n");
}
printf("\n");*/
//free(c);
}
|
5,445 | #include <stdio.h>
// Simple transformation kernel
__global__ void transformKernel(
float* d_output,
cudaTextureObject_t texObj,
int width){
// Calculate normalized texture coordinates
float u = threadIdx.x/(float) blockDim.x;
// Read from texture and write to global memory
d_output[threadIdx.x] = tex1D<float>(texObj,u);
for (int i = 0; i < blockDim.x; i++){
if (threadIdx.x == i){
printf("(%.2f, %.2f )\t",u,d_output[threadIdx.x]);
}
__syncthreads();
}
__syncthreads();
if (threadIdx.x == 1){
printf("\n");
}
}
// Host code
int main(){
int width = 10;
float * h_data =(float *) malloc(sizeof(float)*width);;
for (int i=0; i<width; i++){
h_data[i]=(float) i;
}
int size = width*sizeof(float);
// Allocate CUDA array in device memory
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(
32, 0, 0, 0,
cudaChannelFormatKindFloat);
cudaArray* cuArray;
cudaMallocArray(&cuArray, &channelDesc, size,1);
printf("cuda malloc array\n");
printf(cudaGetErrorString(cudaGetLastError()));
printf("\n");
// Copy to device memory some data located at address h_data
// in host memory
cudaMemcpyToArray(
cuArray, 0, 0,
h_data, size,
cudaMemcpyHostToDevice);
printf("cuda memcpy to array\n");
printf(cudaGetErrorString(cudaGetLastError()));
printf("\n");
// Specify texture
struct cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = cuArray;
// Specify texture object parameters
struct cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeClamp;
texDesc.addressMode[1] = cudaAddressModeClamp;
texDesc.filterMode = cudaFilterModeLinear;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = 1;
// Create texture object
cudaTextureObject_t texObj = 0;
cudaCreateTextureObject(
&texObj,
&resDesc,
&texDesc,
NULL);
printf("create texture \n");
printf(cudaGetErrorString(cudaGetLastError()));
printf("\n");
int num = 10;
// Allocate result of transformation in device memory
float* d_output;
cudaMalloc(&d_output, num*width*sizeof(float));
float* output = (float *) malloc(num*width*sizeof(float));
// Invoke kernel
transformKernel<<<1,100>>>(
d_output,
texObj, width);
// retrieve the output
cudaMemcpy(output,d_output,num*width*sizeof(float),cudaMemcpyDeviceToHost);
for (int i=0; i< width; i++){
printf("%.2f \t",h_data[i]);
}
printf("\n");
// Destroy texture object
cudaDestroyTextureObject(texObj);
// Free device memory
cudaFreeArray(cuArray);
cudaFree(d_output);
return 0;
}
|
5,446 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<stdio.h>
__global__ void print_details_exercise()
{
printf("threadIdx.x : %d, threadIdx.y : %d, threadIdx.z :%d, blockIdx.x : %d, blockIdx.y : %d, blockIdx.z : %d, blockDim.x : %d, blockDim.y : %d, gridDim.x : %d, gridDim.y : %d\n",
threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, gridDim.x, gridDim.y);
}
/*
int main()
{
int nx = 4, ny = 4, nz = 4;
dim3 block(2, 2, 2);
dim3 grid(nx / block.x, ny / block.y, nz / block.z);
print_details_exercise<<<grid, block>>>();
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
*/ |
5,447 | #include <stdio.h>
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.width + col)
typedef struct {
int width;
int height;
float* elements;
} Matrix;
// Thread block size
#define BLOCK_SIZE 16
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C) {
printf("Running Matmul\n");
// Load A and B to device memory
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = B.width;
d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = C.width;
d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
for(int i = 0; i < d_A.width * d_A.width; i++){
//printf("d_A[%d]=%lf, ", i, d_A.elements[i]);
//printf("d_B[%d]=%lf, ", i, d_B.elements[i]);
//printf("d_C[%d]=%lf, ", i, d_C.elements[i]);
}
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
// Read C from device memory
cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
for(int i = 0; i < C.width; i++){
for(int j = 0; j < C.width; j++){
printf("%lf, ", A.elements[i * C.width + j]);
}
printf("\n");
}
for(int i = 0; i < C.width; i++){
for(int j = 0; j < C.width; j++){
printf("%lf, ", B.elements[i * C.width + j]);
}
printf("\n");
}
for(int i = 0; i < C.width; i++){
for(int j = 0; j < C.width; j++){
printf("%lf, ", C.elements[i * C.width + j]);
}
printf("\n");
}
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) {
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
printf("row %d \n", row);
for (int e = 0; e < A.width; ++e){
Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col];
}
C.elements[row * C.width + col] = Cvalue;
}
int main( int argc, char **argv )
{
int mat_size = 16;
Matrix A;
A.width = mat_size;
A.height = mat_size;
A.elements = (float*)malloc(mat_size * mat_size * sizeof(float));
for(int i = 0; i < mat_size * mat_size; i++){
A.elements[i] = (float)i*i/47084659.0;
//printf("A[%d]=%lf, ", i, A.elements[i]);
}
Matrix B;
B.width = mat_size;
B.height = mat_size;
B.elements = (float*)malloc(mat_size * mat_size * sizeof(float));
for(int i = 0; i < mat_size * mat_size; i++){
B.elements[i] = (float)i / 22360.0;
//printf("B[%d]=%lf, ", i, B.elements[i]);
}
Matrix C;
C.width = mat_size;
C.height = mat_size;
C.elements = (float*)malloc(mat_size * mat_size * sizeof(float));
MatMul(A, B, C);
for(int i = 0; i < mat_size; i++){
for(int j = 0; j < mat_size; j++){
//printf("%lf, ", C.elements[i * mat_size + j]);
}
//printf("\n");
}
}
|
5,448 | /* Voxel sampling GPU implementation
* Author Zhaoyu SU
* All Rights Reserved. Sep., 2019.
*/
#include <stdio.h>
#include <iostream>
#include <float.h> // import FLT_EPSILON
__device__ int get_batch_id(int* accu_list, int batch_size, int id) {
for (int b=0; b<batch_size-1; b++) {
if (id >= accu_list[b]) {
if(id < accu_list[b+1])
return b;
}
}
return batch_size - 1;
}
__global__ void output_init_gpu_kernel(int roi_num, int voxel_num, int channels, int pooling_size, float padding_value,
float* output_features,
int* output_idx) {
int thread_id = threadIdx.x + blockIdx.x * blockDim.x;
if (thread_id < roi_num * voxel_num) {
for (int c=0; c<channels; c++) {
output_features[thread_id * channels + c] = padding_value;
}
for (int p=0; p<pooling_size; p++) {
output_idx[thread_id * pooling_size + p] = -1;
}
}
}
__global__ void grid_buffer_init_gpu_kernel(int batch_size, int input_point_num,
float grid_buffer_resolution, int grid_buffer_size,
int grid_dim_w, int grid_dim_l, int grid_dim_h,
const float* input_coors,
int* input_accu_list,
int* grid_buffer,
int* grid_buffer_count) {
const int grid_dim_size = grid_dim_w * grid_dim_h * grid_dim_l;
int point_id = threadIdx.x + blockIdx.x * blockDim.x;
if (point_id < input_point_num) {
int grid_coor_x = __float2int_rz(input_coors[point_id*3 + 0] / grid_buffer_resolution);
int grid_coor_y = __float2int_rz(input_coors[point_id*3 + 1] / grid_buffer_resolution);
int grid_coor_z = __float2int_rz(input_coors[point_id*3 + 2] / grid_buffer_resolution);
grid_coor_x = max(0, min(grid_coor_x, grid_dim_w - 1));
grid_coor_y = max(0, min(grid_coor_y, grid_dim_l - 1));
grid_coor_z = max(0, min(grid_coor_z, grid_dim_h - 1));
int batch_id = get_batch_id(input_accu_list, batch_size, point_id);
int grid_buffer_idx = batch_id * grid_dim_size + grid_coor_x * grid_dim_l * grid_dim_h + grid_coor_y * grid_dim_h + grid_coor_z;
int count = atomicAdd(&grid_buffer_count[grid_buffer_idx], 1);
if (count < grid_buffer_size)
grid_buffer[grid_buffer_idx * grid_buffer_size + count] = point_id;
}
}
__global__ void roi_pooling_register_gpu_kernel(int batch_size, int roi_num,
int voxel_size, int pooling_size,
float grid_buffer_resolution, int grid_buffer_size,
float offset_w, float offset_l, float offset_h,
int grid_dim_w, int grid_dim_l, int grid_dim_h,
const float* input_coors,
const float* roi_attrs,
const int* input_num_lists,
int* input_accu_list,
int* roi_accu_list,
int* temp_count,
int* grid_buffer,
int* grid_buffer_count,
int* output_idx,
float* output_weight) {
const int voxel_num = voxel_size * voxel_size * voxel_size;
const int grid_dim_size = grid_dim_w * grid_dim_l * grid_dim_h;
const int half_voxel_size = (voxel_size - voxel_size % 2) / 2;
// const int center_offset = voxel_size * voxel_size * half_voxel_size + \
// voxel_size * half_voxel_size + \
// half_voxel_size;
int thread_id = threadIdx.x + blockIdx.x * blockDim.x;
if (thread_id < roi_num * voxel_num) {
int roi_id = thread_id / voxel_num;
int voxel_id = thread_id % voxel_num;
int voxel_coor = roi_id * voxel_num + voxel_id;
int batch_id = get_batch_id(roi_accu_list, batch_size, roi_id);
float roi_w = roi_attrs[roi_id*7 + 0];
float roi_l = roi_attrs[roi_id*7 + 1];
float roi_h = roi_attrs[roi_id*7 + 2];
float roi_x = roi_attrs[roi_id*7 + 3] + offset_w;
float roi_y = roi_attrs[roi_id*7 + 4] + offset_l;
float roi_z = roi_attrs[roi_id*7 + 5] + offset_h;
float roi_r = roi_attrs[roi_id*7 + 6];
// printf("%d, %d, %d\n", grid_coor_x, grid_coor_y, grid_coor_z);
float roi_grid_length_x = roi_w / voxel_size;
float roi_grid_length_y = roi_l / voxel_size;
float roi_grid_length_z = roi_h / voxel_size;
//
int roi_grid_coor_x = voxel_id / (voxel_size * voxel_size);
int roi_grid_coor_y = (voxel_id - roi_grid_coor_x * voxel_size * voxel_size) / voxel_size;
int roi_grid_coor_z = voxel_id % voxel_size;
roi_grid_coor_x -= half_voxel_size;
roi_grid_coor_y -= half_voxel_size;
roi_grid_coor_z -= half_voxel_size;
float rel_roi_grid_x = (roi_grid_coor_x + 0.5 * (1 - voxel_size % 2)) * roi_grid_length_x;
float rel_roi_grid_y = (roi_grid_coor_y + 0.5 * (1 - voxel_size % 2)) * roi_grid_length_y;
float rel_roi_grid_z = (roi_grid_coor_z + 0.5 * (1 - voxel_size % 2)) * roi_grid_length_z;
float rot_rel_roi_grid_x = rel_roi_grid_x*cosf(roi_r) - rel_roi_grid_y*sinf(roi_r);
float rot_rel_roi_grid_y = rel_roi_grid_x*sinf(roi_r) + rel_roi_grid_y*cosf(roi_r);
float act_roi_grid_x = rot_rel_roi_grid_x + roi_x;
float act_roi_grid_y = rot_rel_roi_grid_y + roi_y;
float act_roi_grid_z = rel_roi_grid_z + roi_z;
int buffer_grid_coor_x = __float2int_rz(act_roi_grid_x / grid_buffer_resolution);
int buffer_grid_coor_y = __float2int_rz(act_roi_grid_y / grid_buffer_resolution);
int buffer_grid_coor_z = __float2int_rz(act_roi_grid_z / grid_buffer_resolution);
int begin_buffer_grid_coor_x = max(0, buffer_grid_coor_x - 1);
int begin_buffer_grid_coor_y = max(0, buffer_grid_coor_y - 1);
int begin_buffer_grid_coor_z = max(0, buffer_grid_coor_z - 1);
int stop_buffer_grid_coor_x = min(buffer_grid_coor_x + 1, grid_dim_w - 1);
int stop_buffer_grid_coor_y = min(buffer_grid_coor_y + 1, grid_dim_l - 1);
int stop_buffer_grid_coor_z = min(buffer_grid_coor_z + 1, grid_dim_h - 1);
for (int x=begin_buffer_grid_coor_x; x<=stop_buffer_grid_coor_x; x++) {
for (int y=begin_buffer_grid_coor_y; y<=stop_buffer_grid_coor_y; y++) {
for (int z=begin_buffer_grid_coor_z; z<=stop_buffer_grid_coor_z; z++) {
if (temp_count[voxel_coor] >= pooling_size)
break;
int search_grid_id = batch_id * grid_dim_size + x * grid_dim_l * grid_dim_h + y * grid_dim_h + z;
int valid_buffer_count = min(grid_buffer_count[search_grid_id], grid_buffer_size);
// printf("grid buffer count = %d\n", grid_buffer_count[search_grid_id]);
for (int i=0; i<valid_buffer_count; i++) {
int point_id = grid_buffer[search_grid_id * grid_buffer_size + i];
float point_x = input_coors[point_id*3 + 0];
float point_y = input_coors[point_id*3 + 1];
float point_z = input_coors[point_id*3 + 2];
float rel_point_x = point_x - act_roi_grid_x;
float rel_point_y = point_y - act_roi_grid_y;
float rel_point_z = point_z - act_roi_grid_z;
float rel_rot_point_x = rel_point_x*cosf(-roi_r) - rel_point_y*sinf(-roi_r);
float rel_rot_point_y = rel_point_x*sinf(-roi_r) + rel_point_y*cosf(-roi_r);
// float dist_2 = pow(point_x - act_roi_grid_x, 2.) + pow(point_y - act_roi_grid_y, 2.) + pow(point_z - act_roi_grid_z, 2.);
// if (dist_2 < pow(radius, 2.)) {
if (abs(rel_rot_point_x) <= roi_grid_length_x * 1.25 &&
abs(rel_rot_point_y) <= roi_grid_length_y * 1.25 &&
abs(rel_point_z) <= roi_grid_length_z * 1.25) {
// printf("Yes\n");
float dist_2 = pow(point_x - act_roi_grid_x, 2.) + pow(point_y - act_roi_grid_y, 2.) + pow(point_z - act_roi_grid_z, 2.);
float dist = sqrt(dist_2);
float radius = max(max(roi_grid_length_x, roi_grid_length_y), roi_grid_length_z) / 2.;
float weight = 2.71828 / expf(dist / radius);
// float weight = 1.;
int pool_count = temp_count[voxel_coor];
if (pool_count < pooling_size) {
output_idx[voxel_coor * pooling_size + pool_count] = point_id;
output_weight[voxel_coor * pooling_size + pool_count] = weight;
temp_count[voxel_coor] += 1;
} else {
break;
}
}
}
}
}
}
// printf("Voxel Pooling = %d\n", temp_count[voxel_coor]);
}
}
__global__ void roi_pooling_fill_gpu_kernel(int roi_num, int voxel_num, int channels, int pooling_size,
const float* input_features,
int* temp_count,
float* output_features,
int* output_idx,
float* output_weight) {
int thread_id = threadIdx.x + blockIdx.x * blockDim.x;
if (thread_id < roi_num * voxel_num) {
int pool_count = min(temp_count[thread_id], pooling_size);
if (pool_count > 0) {
float weight_sum = 0;
// int pool_id;
for (int p=0; p<pool_count; p++) {
// pool_id = thread_id * pooling_size + p;
// weight_sum += output_weight[pool_id];
weight_sum += 1;
}
for (int p=0; p<pool_count; p++) {
int pool_id = thread_id * pooling_size + p;
int input_id = output_idx[pool_id];
float weight = output_weight[pool_id] / weight_sum;
// float weight = output_weight[pool_id];
// printf("%d, %d, %f\n", input_id, pool_count, weight);
output_weight[pool_id] = weight;
// if (thread_id == 0)
// printf("Forward: %f\n", weight);
// float temp_max = -1e6;
for (int c=0; c < channels; c++) {
output_features[thread_id * channels + c] += input_features[input_id * channels + c] * weight;
// float features = input_features[input_id * channels + c] * weight;
// if (features > temp_max) {
// output_features[thread_id * channels + c] = features;
// temp_max = features;
// }
//// printf("%d, %d\n", thread_id, input_id);
}
}
}
}
}
void roi_pooling_gpu_launcher(int batch_size, int input_point_num, int channels,
int roi_num, int voxel_size, int pooling_size, float padding_value,
float grid_buffer_resolution, int grid_buffer_size,
int grid_buffer_dim_w, int grid_buffer_dim_l, int grid_buffer_dim_h,
float offset_w, float offset_l, float offset_h,
const float* input_coors,
const float* input_features,
const float* roi_attrs,
const int* input_num_list,
const int* roi_num_list,
int* input_num_list_host,
int* input_accu_list,
int* roi_accu_list,
int* temp_count,
int* temp_grid_buffer,
int* temp_grid_buffer_count,
float* output_features,
int* output_idx,
float* output_weight) {
if (batch_size * channels <= 0 || input_point_num <= 0) {
printf("RoiPoolingFastOp ERROR: Invalid CUDA input dimensions.\n");
return;
}
if (roi_num <= 0) {
printf("RoiPoolingFastOp WARNING: No RoIs were found for the current batch.\n");
return;
}
// printf("RoI Num: %d\n", roi_num);
const int voxel_num = voxel_size * voxel_size * voxel_size;
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, output_init_gpu_kernel, 0, roi_num * voxel_num);
gridSize = (roi_num * voxel_num + blockSize - 1) / blockSize;
output_init_gpu_kernel<<<gridSize,blockSize>>>(roi_num, voxel_num, channels, pooling_size, padding_value,
output_features,
output_idx);
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, grid_buffer_init_gpu_kernel, 0, input_point_num);
gridSize = (input_point_num + blockSize - 1) / blockSize;
grid_buffer_init_gpu_kernel<<<gridSize,blockSize>>>(batch_size, input_point_num,
grid_buffer_resolution, grid_buffer_size,
grid_buffer_dim_w, grid_buffer_dim_l, grid_buffer_dim_h,
input_coors,
input_accu_list,
temp_grid_buffer,
temp_grid_buffer_count);
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, roi_pooling_register_gpu_kernel, 0, roi_num * voxel_num);
gridSize = (roi_num * voxel_num + blockSize - 1) / blockSize;
roi_pooling_register_gpu_kernel<<<gridSize,blockSize>>>(batch_size, roi_num,
voxel_size, pooling_size,
grid_buffer_resolution, grid_buffer_size,
offset_w, offset_l, offset_h,
grid_buffer_dim_w, grid_buffer_dim_l, grid_buffer_dim_h,
input_coors,
roi_attrs,
input_num_list,
input_accu_list,
roi_accu_list,
temp_count,
temp_grid_buffer,
temp_grid_buffer_count,
output_idx,
output_weight);
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, roi_pooling_fill_gpu_kernel, 0, roi_num * voxel_num);
gridSize = (roi_num * voxel_num + blockSize - 1) / blockSize;
roi_pooling_fill_gpu_kernel<<<gridSize,blockSize>>>(roi_num, voxel_num, channels, pooling_size,
input_features,
temp_count,
output_features,
output_idx,
output_weight);
}
|
5,449 | #include <cstdio>
#define N 64
#define B 2
#define T 32
__global__ void dl(int* in)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(blockIdx.x % 2 == 0)
{
if(in[tid] % 2 == 0)
in[tid]++;
// Fine because conditional synchronization will
// happen within a block.
__syncthreads();
}
else {
if(in[tid] % 2 == 1)
in[tid]--;
__syncthreads();
}
/* int sum = in[tid];
if(tid > 0)
sum += in[tid-1];
if(tid < N - 1)
sum += in[tid+1];
in[tid] = sum / 3; */
}
int main()
{
int* in = (int*) malloc(N*sizeof(int));
for(int i = 0; i < N; i++)
in[i] = i;
int* din;
cudaMalloc((void**)&din, N*sizeof(int));
cudaMemcpy(din, in, N*sizeof(int), cudaMemcpyHostToDevice);
dl<<<B,T>>>(din);
cudaMemcpy(in, din, N*sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < N; i++)
printf("%d ", in[i]);
printf("\n");
free(in); cudaFree(din);
} |
5,450 | #include "includes.h"
__global__ void calculation( int *a, int *b, int *c, int constant, int vector_size ) {
int tid = (blockIdx.x*blockDim.x) + threadIdx.x; // this thread handles the data at its thread id
if (tid < vector_size){
// Read in inputs
int prev_a = a[tid>0?tid-1:(vector_size-1)];
int curr_a = a[tid];
int post_a = a[tid<(vector_size-1)?tid+1:0];
int curr_b = b[tid];
// Do computation
int output_c = (prev_a-post_a)*curr_b + curr_a*constant;
// Write result
c[tid] = output_c;
}
} |
5,451 | #include "includes.h"
__global__ void global_reduce_kernel(float * d_out, float * d_in)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// do reduction in global mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
d_in[myId] += d_in[myId + s];
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = d_in[myId];
}
} |
5,452 | #include <stdio.h>
#include <pthread.h>
#include <unistd.h> //sleep
void *hello (void *arg) {
int *ptrToThreadNumber = (int *)arg;
sleep(5);
printf("HelloThread %d\n",*ptrToThreadNumber);
return 0;
}
int main(void) {
pthread_t tid;
int threadNum=1;
//pthread_create creates a new thread and makes it executable.
//This routine can be called any number of times from anywhere within your code.
pthread_create(&tid,NULL,hello,&threadNum);
//"Joining" is one way to accomplish synchronization between threads
//The pthread_join() subroutine blocks the calling thread until the
//specified threadid thread terminates.
pthread_join(tid,NULL);
printf("------------------------\n");
return(0);
}
|
5,453 | struct Point {
double* x;
double* y;
double* z;
};
struct Ref {
Point pos;
Point dir;
double* distance;
};
struct View {
int size;
Point pos;
Point dir;
double* distance;
__device__ Ref operator[](int i) const {
return {{pos.x + i, pos.y + i, pos.z + i},
{dir.x + i, dir.y + i, dir.z + i},
distance + i};
}
};
__device__ inline void move_impl(const Ref& ref) {
const double nextdist = *ref.distance;
*ref.pos.x += *ref.dir.x * nextdist;
*ref.pos.y += *ref.dir.y * nextdist;
*ref.pos.z += *ref.dir.z * nextdist;
}
__global__ void move(View view) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < view.size) {
move_impl(view[idx]);
}
}
|
5,454 | #include <cuda.h>
extern "C" {
int cudaPyHostToDevice(void*, void*, size_t, size_t);
int cudaPyDeviceToHost(void*, void*, size_t, size_t);
void* cudaPyAllocArray(size_t, size_t);
int cudaPyFree(void*);
}
int cudaPyHostToDevice(void* dst, void* src, size_t N, size_t tsize) {
return cudaMemcpy(dst, src, N * tsize, cudaMemcpyHostToDevice);
}
int cudaPyDeviceToHost(void* dst, void* src, size_t N, size_t tsize) {
return cudaMemcpy(dst, src, N * tsize, cudaMemcpyDeviceToHost);
}
void* cudaPyAllocArray(size_t N, size_t tsize) {
void* arr;
size_t arraySize = 2 * sizeof(size_t) + N * tsize;
if (cudaMalloc(&arr, arraySize))
return NULL;
cudaMemset(&arr, 0, arraySize);
size_t header[2] = {tsize, N};
cudaMemcpy(arr, &header, sizeof(size_t) * sizeof(header), cudaMemcpyHostToDevice);
return (void*)((size_t*)arr + 2);
}
int cudaPyFree(void* input) {
return cudaFree((void*)((size_t*)input - 2));
}
|
5,455 | #include <stdio.h>
__global__ void hello_kernel()
{
printf("Hello from GPU thread (%d, %d) = (%d, %d) * (%d, %d) + (%d, %d)\n",
threadIdx.x, threadIdx.y,
blockIdx.x, blockIdx.y,
blockDim.x, blockDim.y,
blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
}
int main()
{
dim3 numThreadsInBlock(8, 4);
dim3 numBlocks(1, 2);
hello_kernel<<<numBlocks, numThreadsInBlock>>>();
cudaDeviceSynchronize();
return 0;
}
|
5,456 | #include <iostream>
#include <math.h>
//#include <cuda_runtime.h>
// function to copy the elements of an array and decrement to the compiler not override it
__global__
void newtonKernel(int n, float4* x, float4* y, float4* z){
float4 result = make_float4 (1.0f,1.0f,1.0f,1.0f);
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride){
for (int k =0; k < 40; k++){
result.x = result.x * k * x[i].x * y[i].x + x[i].x + y[i].x;
result.y = result.y * k * x[i].y * y[i].y + x[i].y + y[i].y;
result.z = result.z * k * x[i].z * y[i].z + x[i].z + y[i].z;
result.w = result.w * k * x[i].w * y[i].w + x[i].w + y[i].w;
}
z[i] = result ;
y[i].x = z[i].x ;
y[i].y = z[i].y ;
y[i].z = z[i].z ;
y[i].w = z[i].w ;
x[i].x = y[i].x ;
x[i].y = y[i].y ;
x[i].z = y[i].z ;
x[i].w = y[i].w ;
}
}
int main(void){
int N = 1<<20;
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
float4 *x, *y, *z;
//variable allocation on GPU memory
cudaMallocManaged (&x, N*sizeof(float4));
cudaMallocManaged (&y, N* sizeof(float4));
cudaMallocManaged (&z, N*sizeof(float4));
// initialize x and y arrays on the device
//float val = 3.0f;
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = make_float4(1.0f,1.0f,1.0f, 1.0f);
y[i] = make_float4(2.0f,2.0f,2.0f, 2.0f);
z[i] = make_float4(1.0f,1.0f,1.0f, 1.0f);;
}
// Run kernel on 1M parallel elements on the GPU
newtonKernel<<<numBlocks, blockSize>>>(N, x, y, z);
// wait for the GPU to finish the results
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++){
maxError = fmax(maxError, fabs(x[i].x - 1.0f));
maxError = fmax(maxError, fabs(x[i].y - 1.0f));
maxError = fmax(maxError, fabs(x[i].z - 1.0f));
maxError = fmax(maxError, fabs(x[i].w - 1.0f));
maxError = fmax(maxError, fabs(y[i].x - 1.0f));
maxError = fmax(maxError, fabs(y[i].y - 1.0f));
maxError = fmax(maxError, fabs(y[i].z - 1.0f));
maxError = fmax(maxError, fabs(y[i].w - 1.0f));
maxError = fmax(maxError, fabs(z[i].x - 1.0f));
maxError = fmax(maxError, fabs(z[i].y - 1.0f));
maxError = fmax(maxError, fabs(z[i].z - 1.0f));
maxError = fmax(maxError, fabs(z[i].w - 1.0f));
}
std::cout << "Max error: " << maxError << std::endl;
// Free GPU memory
cudaFree(x);
cudaFree(y);
cudaFree(z);
return 0;
}
|
5,457 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <ctime>
__global__ void sumArraysOnDevice(float* A, float* B, float* C, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < N)
C[idx] = A[idx] + B[idx];
}
__global__ void sumArraysZeroCopy(float* A, float* B, float* C, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
C[idx] = A[idx] + B[idx];
}
void sumArraysOnHost(float* A, float* B, float* C, int N)
{
for (int i = 0; i<N; ++i)
{
C[i] = A[i] + B[i];
}
}
void initialData(float* ip, int size)
{
time_t t;
srand((unsigned int)time(&t));
for (int i = 0; i<size; ++i)
{
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
void checkResult(float* hostResult, float* deviceResult, const int N)
{
double epsilon = 1.0E-8;
int match = 1;
for (int i = 0; i<N; ++i)
{
if (abs(hostResult[i] - deviceResult[i]) > epsilon)
{
match = 0;
printf("Array do not match\n");
printf("host %5.2f gpu %5.2f at current %d\n", hostResult[i], deviceResult[i], i);
break;
}
}
if (match)
printf("Array match\n");
return;
}
int main(int argc, char* argv[])
{
int dev = 0;
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if(deviceProp.canMapHostMemory == false)
{
printf("Device %d dose not support mapping CPU host memory!\n", dev);
cudaDeviceReset();
return -1;
}
printf("Using Device %d, %s\n", dev, deviceProp.name);
int iPower = 10;
if (argc > 1)
{
iPower = atoi(argv[1]);
}
int nElem = 1 << iPower;
size_t nBytes = nElem * sizeof(float);
// part 1 use device memory
float* h_a;
float* h_b;
float* hostRef;
float* gpuRef;
h_a = (float*)malloc(nBytes);
h_b = (float*)malloc(nBytes);
hostRef = (float*)malloc(nBytes);
gpuRef = (float*)malloc(nBytes);
initialData(h_a, nElem);
initialData(h_b, nElem);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
sumArraysOnHost(h_a, h_b, hostRef, nElem);
float* d_a;
float* d_b;
float* d_c;
cudaMalloc((float**)&d_a, nBytes);
cudaMalloc((float**)&d_b, nBytes);
cudaMalloc((float**)&d_c, nBytes);
cudaMemcpy(d_a, h_a, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, nBytes, cudaMemcpyHostToDevice);
int nLen = 32;
dim3 block(nLen);
dim3 grid((nElem + block.x - 1) / block.x);
sumArraysOnDevice<<<grid, block >>>(d_a, d_b, d_c,nElem);
cudaMemcpy(gpuRef, d_c, nBytes, cudaMemcpyDeviceToHost);
checkResult(hostRef, gpuRef, nElem);
cudaFree(d_a);
cudaFree(d_b);
free(h_a);
free(h_b);
// part 2 pass the pointer to device
unsigned int flags = cudaHostAllocMapped;
cudaHostAlloc((void**)&h_a, nBytes, flags);
cudaHostAlloc((void**)&h_b, nBytes, flags);
initialData(h_a, nElem);
initialData(h_b, nElem);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
cudaHostGetDevicePointer((void**)&d_a, (void*)h_a, 0);
cudaHostGetDevicePointer((void**)&d_b, (void*)h_b, 0);
sumArraysOnHost(h_a, h_b, hostRef, nElem);
sumArraysZeroCopy<<<grid, block >>>(d_a, d_b, d_c, nElem);
cudaMemcpy(gpuRef, d_c, nBytes, cudaMemcpyDeviceToHost);
checkResult(hostRef, gpuRef, nElem);
cudaFree(d_c);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
free(gpuRef);
free(hostRef);
cudaDeviceReset();
system("Pause");
return 0;
}
|
5,458 | //host処理系のグローバル変数定義
#ifndef INCLUDED_HOSTVALUESACCESSER
#define INCLUDED_HOSTVALUESACCESSER
#include <stdio.h>
int cnnOutputNumsNums;
int *cnnOutputNums;
int execFlg;
int execFlgTraining = 0;
int execFlgOnline = 1;
int sv_xNums;
int sv_yNums;
int svChannelNums;
int miniBatchNums;
float *cnnBnBeta;
float *cnnBnGamma;
float bnEps;
int cnnW_xNums;
int cnnW_yNums;
int cnnWDataNums;
float *cnnW;
int *cnnWba_xNums;
int *cnnWba_yNums;
int *cnnP_xNums;
int *cnnP_yNums;
int cnnPooling_xNums;
int cnnPooling_yNums;
int mlpOutputNumsNums;
int *mlpOutputNums;
float *mlpBnBeta;
float *mlpBnGamma;
int mlpWDataNums;
float *mlpW;
float *teachOut;
float *result;
float learningRate;
float E = 0;
extern __device__ __host__ int getDim2Idx(int x, int y, int X);
extern __device__ __host__ int getDim4Idx(int x, int y, int z, int a, int X, int Y, int Z);
extern int getCnnInputIdxNums(int cnnLayer);
extern int getMlpInputIdxNums(int cnnLayer);
extern int getMlpOutputNumsNums();
extern int getMlpOutputNums(int mlpLayer);
extern int getCnnWba_xNums(int cnnLayer);
extern int getCnnWba_yNums(int cnnLayer);
extern int getHMlpWbaIdx(int outputIdx, int miniBatchIdx, int mlpLayer);
extern int getHCnnWbaIdx(int x, int y, int outputIdx, int miniBatchIdx, int cnnLayer);
void setExecFlg(int execFlg_arg){
execFlg = execFlg_arg;
}
int getExecFlg(){
return(execFlg);
}
int getExecFlgTraining(){
return(execFlgTraining);
}
int getExecFlgOnline(){
return(execFlgOnline);
}
void setSv_xNums(int sv_xNums_arg){
sv_xNums = sv_xNums_arg;
}
int getSv_xNums(){
return(sv_xNums);
}
void setSv_yNums(int sv_yNums_arg){
sv_yNums = sv_yNums_arg;
}
int getSv_yNums(){
return(sv_yNums);
}
void setMiniBatchNums(int miniBatchNums_arg){
miniBatchNums = miniBatchNums_arg;
}
int getMiniBatchNums(){
return(miniBatchNums);
}
void setSvChannelNums(int svChannelNums_arg){
svChannelNums = svChannelNums_arg;
}
int getSvChannelNums(){
return(svChannelNums);
}
void setCnnOutputNumsNums(int cnnOutputNumsNums_arg){
cnnOutputNumsNums = cnnOutputNumsNums_arg;
}
int getCnnOutputNumsNums(){
return(cnnOutputNumsNums);
}
void setCnnOutputNums(int *cnnOutputNums_arg){
cnnOutputNums = cnnOutputNums_arg;
}
int getCnnOutputNums(int cnnLayer){
return(cnnOutputNums[cnnLayer]);
}
void setCnnBnBeta(float *cnnBnBeta_arg){
cnnBnBeta = cnnBnBeta_arg;
}
void setLearningRate(float learningRate_arg){
learningRate = learningRate_arg;
}
float getLearningRate(){
return(learningRate);
}
int getHCnnBnBetaGammaIdx(int outputIdx, int cnnLayer){
int layer;
int idx = 0;
for(layer = 0; layer < cnnLayer; layer ++){
idx += getCnnOutputNums(layer);
}
idx += outputIdx;
return(idx);
}
float getCnnBnBeta(int outputIdx, int cnnLayer){
return(cnnBnBeta[getHCnnBnBetaGammaIdx(outputIdx, cnnLayer)]);
}
void setCnnBnGamma(float *cnnBnGamma_arg){
cnnBnGamma = cnnBnGamma_arg;
}
float getCnnBnGamma(int outputIdx, int cnnLayer){
return(cnnBnGamma[getHCnnBnBetaGammaIdx(outputIdx, cnnLayer)]);
}
void setBnEps(float bnEps_arg){
bnEps = bnEps_arg;
}
float getBnEps(){
return(bnEps);
}
void setCnnW_xNums(int cnnW_xNums_arg){
cnnW_xNums = cnnW_xNums_arg;
}
int getCnnW_xNums(){
return(cnnW_xNums);
}
void setCnnW_yNums(int cnnW_yNums_arg){
cnnW_yNums = cnnW_yNums_arg;
}
int getCnnW_yNums(){
return(cnnW_yNums);
}
void setCnnWDataNums(int cnnWDataNums_arg){
cnnWDataNums = cnnWDataNums_arg;
}
int getCnnWDataNums(){
return(cnnWDataNums);
}
void setCnnW(float *cnnW_arg){
cnnW = cnnW_arg;
}
int getHCnnWIdx(int x, int y, int inputIdx, int outputIdx, int cnnLayer){
int layer;
int idx = 0;
for(layer = 0; layer < cnnLayer; layer ++){
idx += getCnnW_xNums() * getCnnW_yNums() * getCnnInputIdxNums(layer) * getCnnOutputNums(layer);
}
int inputIdxNums = getCnnInputIdxNums(cnnLayer);
idx += getDim4Idx(x, y, inputIdx, outputIdx, getCnnW_xNums(), getCnnW_yNums(), inputIdxNums);
return(idx);
}
float getCnnW(int x, int y, int inputIdx, int outputIdx, int cnnLayer){
return(cnnW[getHCnnWIdx(x, y, inputIdx, outputIdx, cnnLayer)]);
}
void setCnnWba_xNums(int *cnnWba_xNums_arg){
cnnWba_xNums = cnnWba_xNums_arg;
}
int getCnnWba_xNums(int cnnLayer){
return(cnnWba_xNums[cnnLayer]);
}
void setCnnWba_yNums(int *cnnWba_yNums_arg){
cnnWba_yNums = cnnWba_yNums_arg;
}
int getCnnWba_yNums(int cnnLayer){
return(cnnWba_yNums[cnnLayer]);
}
int getHCnnWbaIdx(int x, int y, int outputIdx, int miniBatchIdx, int cnnLayer){
int idx = 0;
int layer;
for(layer = 0; layer < cnnLayer; layer ++){
idx += getCnnWba_xNums(layer) * getCnnWba_yNums(layer) * getCnnOutputNums(layer) * getMiniBatchNums();
}
idx += getDim4Idx(x, y, outputIdx, miniBatchIdx, getCnnWba_xNums(cnnLayer), getCnnWba_yNums(cnnLayer), getCnnOutputNums(cnnLayer));
return(idx);
}
void setCnnP_xNums(int *cnnP_xNums_arg){
cnnP_xNums = cnnP_xNums_arg;
}
int getCnnP_xNums(int cnnLayer){
return(cnnP_xNums[cnnLayer]);
}
void setCnnP_yNums(int *cnnP_yNums_arg){
cnnP_yNums = cnnP_yNums_arg;
}
int getCnnP_yNums(int cnnLayer){
return(cnnP_yNums[cnnLayer]);
}
void setCnnPooling_xNums(int cnnPooling_xNums_arg){
cnnPooling_xNums = cnnPooling_xNums_arg;
}
int getCnnPooling_xNums(){
return(cnnPooling_xNums);
}
void setCnnPooling_yNums(int cnnPooling_yNums_arg){
cnnPooling_yNums = cnnPooling_yNums_arg;
}
int getCnnPooling_yNums(){
return(cnnPooling_yNums);
}
void setMlpOutputNumsNums(int mlpOutputNumsNums_arg){
mlpOutputNumsNums = mlpOutputNumsNums_arg;
}
int getMlpOutputNumsNums(){
return(mlpOutputNumsNums);
}
void setMlpOutputNums(int *mlpOutputNums_arg){
mlpOutputNums = mlpOutputNums_arg;
}
int getMlpOutputNums(int mlpLayer){
return(mlpOutputNums[mlpLayer]);
}
void setMlpBnBeta(float *mlpBnBeta_arg){
mlpBnBeta = mlpBnBeta_arg;
}
int getHMlpBnBetaGammaIdx(int outputIdx, int mlpLayer){
int layer;
int idx = 0;
for(layer = 0; layer < mlpLayer; layer ++){
idx += getMlpOutputNums(layer);
}
idx += outputIdx;
return(idx);
}
float getMlpBnBeta(int outputIdx, int mlpLayer){
return(mlpBnBeta[getHMlpBnBetaGammaIdx(outputIdx, mlpLayer)]);
}
void setMlpBnGamma(float *mlpBnGamma_arg){
mlpBnGamma = mlpBnGamma_arg;
}
float getMlpBnGamma(int outputIdx, int mlpLayer){
return(mlpBnGamma[getHMlpBnBetaGammaIdx(outputIdx, mlpLayer)]);
}
void setMlpWDataNums(int mlpWDataNums_arg){
mlpWDataNums = mlpWDataNums_arg;
}
int getMlpWDataNums(){
return(mlpWDataNums);
}
void setMlpW(float *mlpW_arg){
mlpW = mlpW_arg;
}
int getHMlpWIdx(int inputIdx, int outputIdx, int mlpLayer){
int layer;
int idx = 0;
for(layer = 0; layer < mlpLayer; layer ++){
idx += getMlpInputIdxNums(layer) * getMlpOutputNums(layer);
}
int inputIdxNums = getMlpInputIdxNums(mlpLayer);
idx += getDim2Idx(inputIdx, outputIdx, inputIdxNums);
return(idx);
}
float getMlpW(int inputIdx, int outputIdx, int mlpLayer){
return(mlpW[getHMlpWIdx(inputIdx, outputIdx, mlpLayer)]);
}
int getHMlpWbaIdx(int outputIdx, int miniBatchIdx, int mlpLayer){
int idx = 0;
int layer;
for(layer = 0; layer < mlpLayer; layer ++){
idx += getMlpOutputNums(layer) * getMiniBatchNums();
}
idx += getDim2Idx(outputIdx, miniBatchIdx, getMlpOutputNums(mlpLayer));
return(idx);
}
void setResult(float *result_arg){
result = result_arg;
}
void setTeachOut(float *teachOut_arg){
teachOut = teachOut_arg;
}
int getTeachOutResultIdx(int outputIdx, int miniBatchIdx){
return(getDim2Idx(outputIdx, miniBatchIdx, getMlpOutputNums(getMlpOutputNumsNums() - 1)));
}
float getResult(int outputIdx, int miniBatchIdx){
return(result[getTeachOutResultIdx(outputIdx, miniBatchIdx)]);
}
float getTeachOut(int outputIdx, int miniBatchIdx){
return(teachOut[getTeachOutResultIdx(outputIdx, miniBatchIdx)]);
}
__device__ __host__
int getDim2Idx(int x, int y, int X){
return(x + y * X);
}
__device__ __host__
int getDim3Idx(int x, int y, int z, int X, int Y){
return(getDim2Idx(x, y, X) + z * X * Y);
}
__device__ __host__
int getDim4Idx(int x, int y, int z, int a, int X, int Y, int Z){
return(getDim3Idx(x, y, z, X, Y) + a * X * Y * Z);
}
__device__ __host__
int getDim5Idx(int x, int y, int z, int a, int b, int X, int Y, int Z, int A){
return(getDim4Idx(x, y, z, a, X, Y, Z) + b * X * Y * Z * A);
}
//CNN層のinputの総数を取得する。
int getCnnInputIdxNums(int cnnLayer){
int inputIdxNums;
if(cnnLayer == 0){
inputIdxNums = getSvChannelNums();
}
else{
inputIdxNums = getCnnOutputNums(cnnLayer - 1);
}
return(inputIdxNums);
}
//MLP層のinputの総数を取得する。
int getMlpInputIdxNums(int mlpLayer){
int inputIdxNums;
if(mlpLayer == 0){
int cnnOutputNumsLastIdx = getCnnOutputNumsNums() - 1;
inputIdxNums = getCnnP_xNums(cnnOutputNumsLastIdx) * getCnnP_yNums(cnnOutputNumsLastIdx) * getCnnOutputNums(cnnOutputNumsLastIdx);
}
else{
inputIdxNums = getMlpOutputNums(mlpLayer - 1);
}
return(inputIdxNums);
}
//CNN層の全outputの合計値を取得する。
int getCnnOutputNumsSum(){
int layer;
int count = 0;
for(layer = 0; layer < getCnnOutputNumsNums(); layer++){
count += getCnnOutputNums(layer);
}
return(count);
}
//MLP層の全outputの合計値を取得する。
int getMlpOutputNumsSum(){
int layer;
int count = 0;
for(layer = 0; layer < getMlpOutputNumsNums(); layer++){
count += getMlpOutputNums(layer);
}
return(count);
}
#endif
|
5,459 | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
__global__ void VecAdd(int n, const float *A, const float *B, float *C) {
/********************************************************************
*
* Compute C = A + B
* where A is a (1 * n) vector
* where B is a (1 * n) vector
* where C is a (1 * n) vector
*
********************************************************************/
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n) {
C[i] = A[i] + B[i];
}
}
|
5,460 | /* Voxel sampling GPU implementation
* Author Zhaoyu SU
* All Rights Reserved. Sep., 2019.
*/
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <iostream>
#include <vector>
#define USECPSEC 1000000ULL
__device__ inline int binary_search(const long long* input_voxel_idx,
int start_id,
int stop_id,
long long target_voxel_id) {
// if (threadIdx.x==0)
if (input_voxel_idx[start_id] > target_voxel_id || input_voxel_idx[stop_id] < target_voxel_id)
return -1;
while (start_id <= stop_id) {
int m = start_id + (stop_id - start_id) / 2;
if (input_voxel_idx[m] == target_voxel_id)
return m;
if (input_voxel_idx[m] < target_voxel_id)
start_id = m + 1;
else
stop_id = m - 1;
}
return -1;
}
//__device__ int start_loc_search(const long long* input_voxel_idx,
// int grid_buffer_size,
// int start_id, int loc) {
// long long input_idx = input_voxel_idx[loc];
// long long query_idx = input_idx;
// int start_loc = loc;
// int count = 0;
// while(query_idx == input_idx && start_loc > start_id && count < grid_buffer_size) {
// query_idx = input_voxel_idx[--start_loc];
// count += 1;
// }
// if (query_idx != input_idx)
// start_loc += 1;
// return start_loc;
//}
__device__ inline int start_loc_search(const long long* input_voxel_idx,
int grid_buffer_size,
int start_loc, int loc) {
long long input_idx = input_voxel_idx[loc];
int count = 0;
// printf("%d, %d\n", ret_loc, loc);
int ret_loc = loc;
for (ret_loc = loc; ret_loc > start_loc && count < grid_buffer_size; --ret_loc) {
// while(query_idx == input_idx && ret_loc < stop_loc && count < grid_buffer_size) {
if (input_voxel_idx[ret_loc] != input_idx) {
ret_loc++;
break;
}
// printf("%lld@%d, %lld@%d\n", input_idx, loc, query_idx, ret_loc);
count++;
}
// if (query_idx != input_idx)
// ret_loc -= 1;
return ret_loc;
}
__device__ inline int stop_loc_search(const long long* input_voxel_idx,
int grid_buffer_size,
int stop_loc, int loc) {
long long input_idx = input_voxel_idx[loc];
int count = 0;
int ret_loc = loc;
// printf("%d, %d\n", ret_loc, loc);
for (ret_loc = loc; ret_loc < stop_loc && count < grid_buffer_size; ++ret_loc) {
// while(query_idx == input_idx && ret_loc < stop_loc && count < grid_buffer_size) {
if (input_voxel_idx[ret_loc] != input_idx) {
ret_loc--;
break;
}
// printf("%d@%d, %d@%d\n", input_idx, loc, query_idx, ret_loc);
count += 1;
}
// if (query_idx != input_idx)
// ret_loc -= 1;
return ret_loc;
}
__global__ void voxel_sampling_idx_binary_gpu_kernel(int batch_size, int input_npoint,
int center_num, int kernel_size,
float dim_w, float dim_l, float dim_h,
float resolution_w, float resolution_l, float resolution_h,
int grid_buffer_size, int output_pooling_size, bool with_rpn,
const float* input_coors,
const long long* input_voxel_idx,
const int* input_num_list,
const float* center_coors,
const int* center_num_list,
int* input_accu_list,
int* center_accu_list,
int* output_idx,
int* output_idx_count,
int* valid_idx) {
if (batch_size*input_npoint <=0 || center_num <= 0) {
// printf("Voxel sample Op exited unexpectedly.\n");
return;
}
const int half_kernel_size = (kernel_size - 1) / 2;
const float radius_x = 1.5 * resolution_w;
const float radius_y = 1.5 * resolution_l;
const float radius_z = 1.5 * resolution_h;
const float r_x2 = radius_x * radius_x;
const float r_y2 = radius_y * radius_y;
const float r_z2 = radius_z * radius_z;
const int kernel_num = kernel_size * kernel_size * kernel_size;
const int center_offset = kernel_size * kernel_size * half_kernel_size + \
kernel_size * half_kernel_size + \
half_kernel_size;
const float EPS = 1e-6;
int grid_w = dim_w / resolution_w;
int grid_l = dim_l / resolution_l;
int grid_h = dim_h / resolution_h;
// printf("%f, %f, %f\n", dim_w, dim_l, dim_h);
for (int b=blockIdx.x; b<batch_size; b+=gridDim.x) {
for (int i=threadIdx.x; i<center_num_list[b]; i+=blockDim.x) {
for (int j=0; j<kernel_num; j++) {
int voxel_coor = center_accu_list[b]*kernel_num + i*kernel_num + j;
for (int p=0; p<output_pooling_size; p++)
output_idx[voxel_coor*output_pooling_size + p] = -1;
}
}
__syncthreads();
for (int i=threadIdx.x; i<center_num_list[b]; i+=blockDim.x) {
float x_c = center_coors[center_accu_list[b]*3 + i*3 + 0];
float y_c = center_coors[center_accu_list[b]*3 + i*3 + 1];
float z_c = center_coors[center_accu_list[b]*3 + i*3 + 2];
int grid_coor_w = x_c / resolution_w;
int grid_coor_l = y_c / resolution_l;
int grid_coor_h = z_c / resolution_h;
// long long grid_idx_c = grid_coor_h * grid_w * grid_l + grid_coor_l * grid_w + grid_coor_w;
int grid_search_w_min, grid_search_w_max;
int grid_search_l_min, grid_search_l_max;
int grid_search_h_min, grid_search_h_max;
if (grid_coor_w * resolution_w + 0.5 * resolution_w > x_c) {
grid_search_w_min = grid_coor_w - 2;
grid_search_w_max = grid_coor_w + 1;
}else{
grid_search_w_min = grid_coor_w - 1;
grid_search_w_max = grid_coor_w + 2;
}
if (grid_coor_l * resolution_l + 0.5 * resolution_l > y_c) {
grid_search_l_min = grid_coor_l - 2;
grid_search_l_max = grid_coor_l + 1;
}else{
grid_search_l_min = grid_coor_l - 1;
grid_search_l_max = grid_coor_l + 2;
}
if (grid_coor_h * resolution_h + 0.5 * resolution_h > z_c) {
grid_search_h_min = grid_coor_h - 2;
grid_search_h_max = grid_coor_h + 1;
}else{
grid_search_h_min = grid_coor_h - 1;
grid_search_h_max = grid_coor_h + 2;
}
for (int w=max(0, grid_search_w_min); w<=min(grid_search_w_max, grid_w-1); w++) {
for (int l=max(0, grid_search_l_min); l<=min(grid_search_l_max, grid_l-1); l++) {
for (int h=max(0, grid_search_h_min); h<=min(grid_search_h_max, grid_h-1); h++) {
long long target_grid_id = h * grid_w * grid_l + l * grid_w + w;
int batch_start_id = input_accu_list[b];
int batch_stop_id = input_accu_list[b] + input_num_list[b] - 1;
int target_id = binary_search(input_voxel_idx,
batch_start_id,
batch_stop_id,
target_grid_id);
// if (id > 100000)
// printf("************VoxelSamplingBinaryOpId: %d\n", id);
if (target_id>=0) {
// printf("%d, %d\n", batch_start_id, batch_stop_id);
// int i = id;
int target_start_id = start_loc_search(input_voxel_idx, grid_buffer_size, batch_start_id, target_id);
int target_stop_id = stop_loc_search(input_voxel_idx, grid_buffer_size, batch_stop_id, target_id);
// if (stop_id > start_id)
// printf("%lld, %lld, %lld\n", input_voxel_idx[target_start_id-1], input_voxel_idx[target_id], input_voxel_idx[target_stop_id+1]);
for (int id=target_start_id; id<=target_stop_id; id++) {
float x_i = input_coors[id*3 + 0];
float y_i = input_coors[id*3 + 1];
float z_i = input_coors[id*3 + 2];
float dx = x_i - x_c + EPS;
float dy = y_i - y_c + EPS;
float dz = z_i - z_c + EPS;
float dx2 = dx * dx;
float dy2 = dy * dy;
float dz2 = dz * dz;
if (dx2 < r_x2 && dy2 < r_y2 && dz2 < r_z2) {
int x_coor = __float2int_rz(dx / resolution_w + 0.5 * fabsf(dx) / dx);
int y_coor = __float2int_rz(dy / resolution_l + 0.5 * fabsf(dy) / dy);
int z_coor = __float2int_rz(dz / resolution_h + 0.5 * fabsf(dz) / dz);
int voxel_coor = center_accu_list[b] * kernel_num + i * kernel_num + center_offset + \
kernel_size * kernel_size * x_coor + \
kernel_size * y_coor + \
z_coor;
int pooling_count = atomicAdd(&output_idx_count[voxel_coor], 1);
if (pooling_count < output_pooling_size) {
output_idx[voxel_coor*output_pooling_size + pooling_count] = id;
if (with_rpn)
valid_idx[center_accu_list[b] + i]++;
}
}
}
}
}
}
}
}
}
}
void voxel_sampling_idx_binary_gpu_launcher(int batch_size, int input_npoint,
int center_num, int kernel_size,
std::vector<float> dimension, std::vector<float> resolution,
int grid_buffer_size, int output_pooling_size, bool with_rpn,
const float* input_coors,
const long long* input_voxel_idx,
const int* input_num_list,
const float* center_coors,
const int* center_num_list,
int* input_accu_list,
int* center_accu_list,
int* output_idx,
int* output_idx_count,
int* valid_idx) {
// printf("*********** Here ***********\n");
voxel_sampling_idx_binary_gpu_kernel<<<16,512>>>(batch_size, input_npoint,
center_num, kernel_size,
dimension[0], dimension[1], dimension[2],
resolution[0], resolution[1], resolution[2],
grid_buffer_size, output_pooling_size, with_rpn,
input_coors,
input_voxel_idx,
input_num_list,
center_coors,
center_num_list,
input_accu_list,
center_accu_list,
output_idx,
output_idx_count,
valid_idx);
}
|
5,461 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <assert.h>
static const int WORK_SIZE = /*256*/ 2;
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
__device__ unsigned int bitreverse1(unsigned int number) {
number = ((0xf0f0f0f0 & number) >> 4) | ((0x0f0f0f0f & number) << 4);
number = ((0xcccccccc & number) >> 2) | ((0x33333333 & number) << 2);
number = ((0xaaaaaaaa & number) >> 1) | ((0x55555555 & number) << 1);
return number;
}
/**
* CUDA kernel function that reverses the order of bits in each element of the array.
*/
__global__ void bitreverse(void *data) {
unsigned int *idata = (unsigned int*) data;
idata[threadIdx.x] = bitreverse1(idata[threadIdx.x]);
}
|
5,462 | //pass: checka a função device (comparar com o cuda69_test2)
#include <stdio.h>
#include <cuda.h>
#include <assert.h>
#define N 2//64
__device__ void bar(int* p) {
p[threadIdx.x] = 0;
//printf(" %d; ", p[threadIdx.x]);
}
__global__ void foo(int* p) {
bar(p);
}
int main() {
int *c;
int *dev_c;
c = (int*)malloc(N*sizeof(int));
for (int i = 0; i < N; ++i)
c[i] = rand() %10+1;
for (int i = 0; i < N; ++i)
printf(" %d; ", c[i]);
cudaMalloc((void**)&dev_c, N*sizeof(int));
cudaMemcpy(dev_c, c, N*sizeof(int), cudaMemcpyHostToDevice);
foo<<<1, N>>>(dev_c);
//ESBMC_verify_kernel(foo,1,N,dev_c);
cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost);
printf ("\n");
for (int i = 0; i < N; ++i){
printf(" %d; ", c[i]);
assert(c[i]==0);
}
free(c);
cudaFree(dev_c);
return 0;
}
|
5,463 | #include "includes.h"
__global__ void clock_block(clock_t *d_o, clock_t clock_count)
{
unsigned int start_clock = (unsigned int) clock();
clock_t clock_offset = 0;
while (clock_offset < clock_count)
{
unsigned int end_clock = (unsigned int) clock();
// The code below should work like
// this (thanks to modular arithmetics):
//
// clock_offset = (clock_t) (end_clock > start_clock ?
// end_clock - start_clock :
// end_clock + (0xffffffffu - start_clock));
//
// Indeed, let m = 2^32 then
// end - start = end + m - start (mod m).
clock_offset = (clock_t)(end_clock - start_clock);
}
d_o[0] = clock_offset;
} |
5,464 | #include <stdio.h>
#include <cuda_runtime.h>
__global__ void gpu_add(int* a, int* b, int* c)
{
*c = *a + *b;
}
int main()
{
int a, b, c; /*Variáveis na stack desse programa (na memória principal)*/
int *d_a, *d_b, *d_c; /*Variáveis que alocaremos na memória da GPU*/
cudaMalloc((void **)&d_a, sizeof(int)); /*Aloque um inteiro na memória de vídeo e faça d_a apontar para ele.*/
cudaMalloc((void **)&d_b, sizeof(int));
cudaMalloc((void **)&d_c, sizeof(int));
a = 42;
b = 1337;
/*Copie a e b para os seus respectivos espaços alocados na GPU*/
cudaMemcpy(d_a, &a, sizeof(int), cudaMemcpyHostToDevice);
/* O que é isso?
* a flag cudaMemcpyHostToDevice
* é parte de uma enum que especifica
* o fluxo de dados. HostToDevice
* especifica que copiaremos os
* dados da RAM para a GRAM
*/
cudaMemcpy(d_b, &b, sizeof(int), cudaMemcpyHostToDevice);
gpu_add<<<1,1>>>(d_a, d_b, d_c);
/*pq eu preciso dessa template ainda é um mistério.*/
cudaMemcpy(&c, d_c, sizeof(int), cudaMemcpyDeviceToHost);
printf("Resultado: %d\n", c);
/*Libera a memória na placa. E se eu não liberar?*/
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
5,465 | /*
*
* Matthew Baron
* Homework #4
* 3/16/2015
* CSCI 4150
* CUDA Version #1
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <math.h>
#include <ctime>
#include <iomanip>
#include <cuda.h>
#define BLOCK_SIZE 16
using namespace std;
//Row-Major Matrix struct
typedef struct {
int width;
int height;
int* elements;
} Matrix;
__global__ void MatrixMultiplyKernel(Matrix A, Matrix B, Matrix C);
void MatrixMultiply(Matrix matA, Matrix matB, Matrix matC){
Matrix d_A, d_B, d_C;
int value = 16384;//Multiple of 16
d_A.height = value;
d_A.width = value;
std::cout << "Matrix d_A Values Set" << std::endl;
d_B.height = value;
d_B.width = value;
std::cout << "Matrix d_B Values Set" << std::endl;
d_C.height = value;
d_C.width = value;
std::cout << "Matrix d_C Values Set" << std::endl;
/* Allocate and copy memory to DEVICE */
size_t size = value * value * sizeof(int);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, matA.elements, size, cudaMemcpyHostToDevice);
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, matB.elements, size, cudaMemcpyHostToDevice);
cudaMalloc(&d_C.elements, size);
std::cout << "Memory Allocation on DEVICE Complete" << std::endl;
/* Get dem thread blocks allocated */
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((value + dimBlock.x -1) / dimBlock.x, (value + dimBlock.y - 1) / dimBlock.y);
std::cout << "Begin Call to Kernel for Matrix Multiplication.... " << std::endl;
MatrixMultiplyKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
cudaThreadSynchronize();
std::cout << "Matrix Multiplication Complete" << std::endl;
std::cout << "Copying Device Matrix d_C back to Host C..." << std::endl;
/* Copy results from DEVICE to HOST */
cudaMemcpy(matC.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
std::cout << "Matrix Copy Complete" << std::endl;
/* Deallocate Memory on DEVICE */
cudaFree(d_A.elements);
cudaFree(d_B.elements);
std::cout << "Absolving Memory Allocations..." << std::endl;
}//MatrixMultiplyKernel
__global__ void MatrixMultiplyKernel(Matrix A, Matrix B, Matrix C){
int sumValue = 0;
int col = blockIdx.x * blockDim.x + threadIdx.x;//Thread association for columns
int row = blockIdx.y * blockDim.y + threadIdx.y;//Thread association for rows
if(col > B.width || row > A.height){
return;
}//Bounds Checking
for(int i = 0; i < A.width; i++){
sumValue += A.elements[(row * A.width) + i] * B.elements[(i * B.width) + col];
}
C.elements[(row * C.width) + col] = sumValue;//Store summation in new matrix
}//global Kernel
int main(void){
int value = 16384;//Multiple of 16
Matrix matA, matB, matC;
matA.height = value;
matA.width = value;
matA.elements = (int*)malloc(matA.width * matA.height * sizeof(int)); //String of elements that represent Row-Major Matrix
std::cout << "Matrix A Allocations Complete" << std::endl;
matB.height = value;
matB.width = value;
matB.elements = (int*)malloc(matB.width * matB.height * sizeof(int)); //String of elements that represent Row-Major Matrix
std::cout << "Matrix B Allocations Complete" << std::endl;
matC.height = value;
matC.width = value;
matC.elements = (int*)malloc(matC.width * matC.height * sizeof(int)); //String of elements that represent Row-Major Matrix
std::cout << "Matrix C Allocations Complete" << std::endl;
std::cout << "Current Value: " << value << std::endl;
//Fill matrices with random data
srand(time(NULL));
for(int p = 0; p < value; ++p){
for(int q = 0; q < value; ++q){
matA.elements[(p * value) + q] = rand();
matB.elements[(p * value) + q] = rand();
//std::cout << p << " " << q << std::endl;
}//Q
}//P
std::cout << "Random Data Fill for Matrices Complete " << std::endl;
//Declare Time Events
cudaEvent_t start, stop;
float time;
//Create CUDA Time Events
cudaEventCreate(&start);
cudaEventCreate(&stop);
std::cout << "Begin Matrix Multiply:.... " << std::endl;
//Begin Recording
cudaEventRecord( start, 0 );
//Perform Kernel Operations
MatrixMultiply(matA, matB, matC);
//Halt Time Event Recording
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
std::cout << "Job Complete: " << std::endl;
//Calculate and Store Time in CUDA Elapsed Time
cudaEventElapsedTime( &time, start, stop );
//Free Event Memory
cudaEventDestroy( start );
cudaEventDestroy( stop );
printf("Elapsed Time : %.*e ms/n" , time);
std::cout << std::endl;
/*
C Program Average time elapsed is ~30secs on 1000 x 1000 matrix
CUDA Program is returning 0.0000000000000000000e+00 ms on maximum decimal (Unknown Reasons)
* Execution for 10,000+ is easily less than 2secs for CUDA which would mean substantial performance increase
*/
return 0;
}//main
|
5,466 | #include "kernel.cuh"
#include <stdio.h>
__global__
void VecAdd(const int* A, const int* B, int* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
__global__
void VecSub(const int* A, const int* B, int* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] - B[i];
}
void callVecAdd(const int* d_A, const int* d_B, int* d_C, int N){
int threadsPerBlock = threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
printf("Launching kernel: blocks %d, thread/block %d\n",
blocksPerGrid, threadsPerBlock);
VecAdd << <blocksPerGrid, threadsPerBlock >> > (d_A, d_B, d_C, N);
}
void callVecSub(const int* d_A, const int* d_B, int* d_C, int N){
int threadsPerBlock = threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
printf("Launching kernel: blocks %d, thread/block %d\n",
blocksPerGrid, threadsPerBlock);
VecSub << <blocksPerGrid, threadsPerBlock >> > (d_A, d_B, d_C, N);
} |
5,467 | #include "includes.h"
//original c by brade conte, ported to CUDA by jody
#define uchar unsigned char // 8-bit byte
#define uint unsigned int // 32-bit word
#define DBL_INT_ADD(a,b,c) if (a > 0xffffffff - (c)) ++b; a += c;
#define ROTLEFT(a,b) (((a) << (b)) | ((a) >> (32-(b))))
#define ROTRIGHT(a,b) (((a) >> (b)) | ((a) << (32-(b))))
#define CH(x,y,z) (((x) & (y)) ^ (~(x) & (z)))
#define MAJ(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
#define EP0(x) (ROTRIGHT(x,2) ^ ROTRIGHT(x,13) ^ ROTRIGHT(x,22))
#define EP1(x) (ROTRIGHT(x,6) ^ ROTRIGHT(x,11) ^ ROTRIGHT(x,25))
#define SIG0(x) (ROTRIGHT(x,7) ^ ROTRIGHT(x,18) ^ ((x) >> 3))
#define SIG1(x) (ROTRIGHT(x,17) ^ ROTRIGHT(x,19) ^ ((x) >> 10))
typedef struct{
uchar data[64];
uint datalen;
uint bitlen[2];
uint state[8];
} SHA256_CTX;
__device__ uint k[64] = {
0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
};
SHA256_CTX *cpuSHA_CTX;
SHA256_CTX *gpuSHA_CTX;
int BLOCKS = 10;
int THREADS = 500;
extern "C"
extern "C"
__device__ void sha256_transform(SHA256_CTX *ctx, uchar data[])
{
int q = blockIdx.x * blockDim.x + threadIdx.x;
uint a,b,c,d,e,f,g,h,i,j,t1,t2,m[64];
for (i=0,j=0; i < 16; ++i, j += 4)
m[i] = (data[j] << 24) | (data[j+1] << 16) | (data[j+2] << 8) | (data[j+3]);
for ( ; i < 64; ++i)
m[i] = SIG1(m[i-2]) + m[i-7] + SIG0(m[i-15]) + m[i-16];
a = ctx[q].state[0];
b = ctx[q].state[1];
c = ctx[q].state[2];
d = ctx[q].state[3];
e = ctx[q].state[4];
f = ctx[q].state[5];
g = ctx[q].state[6];
h = ctx[q].state[7];
for (i = 0; i < 64; ++i) {
t1 = h + EP1(e) + CH(e,f,g) + k[i] + m[i];
t2 = EP0(a) + MAJ(a,b,c);
h = g;
g = f;
f = e;
e = d + t1;
d = c;
c = b;
b = a;
a = t1 + t2;
}
ctx[q].state[0] += a;
ctx[q].state[1] += b;
ctx[q].state[2] += c;
ctx[q].state[3] += d;
ctx[q].state[4] += e;
ctx[q].state[5] += f;
ctx[q].state[6] += g;
ctx[q].state[7] += h;
}
__global__ void sha256_update(SHA256_CTX *ctx, uchar *data, uint len)
{
int q = blockIdx.x * blockDim.x + threadIdx.x;
uint i;
for (i=0; i < len; ++i) {
ctx[q].data[ctx[q].datalen] = data[i];
ctx[q].datalen++;
if (ctx[q].datalen == 64) {
sha256_transform(ctx,ctx[q].data);
DBL_INT_ADD(ctx[q].bitlen[0],ctx[q].bitlen[1],512);
ctx[q].datalen = 0;
}
}
} |
5,468 | #include <stdio.h>
int main()
{
int driverVersion = 0;
cudaDriverGetVersion(&driverVersion);
printf("CUDA driver: %d\n", driverVersion);
int runtimeVersion = 0;
cudaRuntimeGetVersion(&runtimeVersion);
printf("CUDA runtime: %d\n", runtimeVersion);
int numDevices;
cudaError_t stat = cudaGetDeviceCount(&numDevices);
for (int i = 0; i < numDevices; i++)
{
cudaDeviceProp prop;
stat = cudaGetDeviceProperties(&prop, i);
printf("%d: %s, CC %d.%d, %d SMs running at %dMHz, %luMB\n", i, prop.name,
prop.major, prop.minor,
prop.multiProcessorCount,
prop.clockRate/1000,
prop.totalGlobalMem/1024/1024);
}
return 0;
}
|
5,469 | #include <cuda.h>
#include <cuda_runtime_api.h>
#define N_LOOPS 100
#define N_FLOPS_PER_BLOCK 24
#define N_FLOPS_PER_LOOP 76800
#define N_FLOPS_PER_KERNEL 76816
#define SHARED_MEM_SIZE 12000
#define FLOPS_BLOCK \
reg0 = reg1 * reg2 + reg3; \
reg5 = reg6 * reg6; \
reg1 = reg2 * reg3 + reg4; \
reg6 = reg7 * reg7; \
reg2 = reg3 * reg4 + reg5; \
reg7 = reg0 * reg0; \
reg3 = reg4 * reg5 + reg6; \
reg0 = reg1 * reg1; \
reg4 = reg5 * reg6 + reg7; \
reg1 = reg2 * reg2; \
reg5 = reg6 * reg7 + reg0; \
reg2 = reg3 * reg3; \
reg6 = reg7 * reg0 + reg1; \
reg3 = reg4 * reg4; \
reg7 = reg0 * reg1 + reg2; \
reg4 = reg5 * reg5;
extern __shared__ char array[];
//-----------------------------------------------------------------------------
// Simple test kernel template for flops test
// @param d_counters - Counters to hold how many FLOPs a kernel does.
// @param n_threads - Total number of threads per block
//-----------------------------------------------------------------------------
__global__
void max_flops_kernel(float* d_counters) {
// Increment the counter
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Declare a bunch or registers and init to 0.0f
float reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
// 1 FLOP per assignment = 8 FLOPs total
reg0 = reg1 = reg2 = reg3 = 9.765625e-10f * threadIdx.x;
reg4 = reg5 = reg6 = reg7 = 9.765625e-10f * array[0];
for(int i = 0; i < N_LOOPS; ++i){
FLOPS_BLOCK // 1
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK // 8
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK // 16
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK // 24
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK // 32
}
// 8 More flops.
reg0 = reg0 + reg1 + reg2 + reg3 + reg4 + reg5 + reg6 + reg7 + 8.0f;
d_counters[i] = reg0;
array[SHARED_MEM_SIZE] = (char) reg0;
}
|
5,470 | #include "cuda_ellipse_overlaps.cuh"
#include <vector>
#define PI 3.14159265358979
using std::vector;
__device__ inline void CalculateRangeAtY(double *elpparm, double y, double *x1, double *x2)
{
double A, B, C, D, E, F;
A = elpparm[0], B = elpparm[1], C = elpparm[2];
D = elpparm[3], E = elpparm[4], F = elpparm[5];
double Delta = pow(B*y + D, 2) - A*(C*y*y + 2 * E*y + F);
if (Delta < 0)
*x1 = -10, *x2 = -20;
else
{
double t1, t2;
t1 = (-(B*y + D) - sqrt(Delta)) / A;
t2 = (-(B*y + D) + sqrt(Delta)) / A;
if (t2 < t1)
{
double tmp = t1;
t1 = t2;
t2 = tmp;
}
*x1 = t1;
*x2 = t2;
}
}
__device__ inline void ELPShape2Equation(double *elpshape, double *outparms)
{
double xc, yc, a, b, theta;
xc = elpshape[0], yc = elpshape[1], a = elpshape[2]/2, b = elpshape[3]/2, theta = elpshape[4];
double parm[6];
parm[0] = cos(theta)*cos(theta) / (a*a) + pow(sin(theta), 2) / (b*b);
parm[1] = -(sin(2 * theta)*(a*a - b*b)) / (2 * a*a*b*b);
parm[2] = pow(cos(theta), 2) / (b*b) + pow(sin(theta), 2) / (a*a);
parm[3] = (-a*a*xc*pow(sin(theta), 2) + a*a*yc*sin(2 * theta) / 2) / (a*a*b*b) - (xc*pow(cos(theta), 2) + yc*sin(2 * theta) / 2) / (a*a);
parm[4] = (-a*a*yc*pow(cos(theta), 2) + a*a*xc*sin(2 * theta) / 2) / (a*a*b*b) - (yc*pow(sin(theta), 2) + xc*sin(2 * theta) / 2) / (a*a);
parm[5] = pow(xc*cos(theta) + yc*sin(theta), 2) / (a*a) + pow(yc*cos(theta) - xc*sin(theta), 2) / (b*b) - 1;
double k = parm[0] * parm[2] - parm[1] * parm[1];
for (int i = 0; i < 6; i++)
outparms[i] = parm[i] / sqrt(fabs(k));
}
__device__ inline void CalculateOverlap(double *elp1, double *elp2, double *_ration)
{
/*
for(int i=0; i< 5;i++)
{
std::cout<<elp1[i]<<std::endl;
}
for(int i=0; i< 5;i++)
{
std::cout<<elp2[i]<<std::endl;
}
*/
double parm1[6], parm2[6];
ELPShape2Equation(elp1, parm1);
ELPShape2Equation(elp2, parm2);
double y1_min, y1_max, y2_min, y2_max, y_min, y_max;
y1_min = elp1[1] - fmax(elp1[2], elp1[3]); y1_max = elp1[1] + fmax(elp1[2], elp1[3]);
y2_min = elp2[1] - fmax(elp2[2], elp2[3]); y2_max = elp2[1] + fmax(elp2[2], elp2[3]);
y_min = floor(fmax(y1_min, y2_min));
y_max = ceil(fmin(y1_max, y2_max));
double search_step = 0.2;
double S12 = 0;
for (double i = y_min; i <= y_max+1e-6; i = i + search_step)
{
double x11, x12, x21, x22;
CalculateRangeAtY(parm1, i, &x11, &x12);
CalculateRangeAtY(parm2, i, &x21, &x22);
//mexPrintf("[%.4f,%.4f],[%.4f,%.4f]\n", x11, x12, x21, x22);
if (x11 <= x12&& x21 <= x22)
{
if (x11 <= x21 && x12 >= x21)
{
if (x12 < x22)
{
S12 += x12 - x21;
}
else
{
S12 += x22 - x21;
}
}
else if (x21 <= x11 && x22 >= x11)
{
if (x22 < x12)
{
S12 += x22 - x11;
}
else
{
S12 += x12 - x11;
}
}
}
}
//mexPrintf("%.4f\n", S12);
*_ration = S12 *search_step / (PI*elp1[2] * elp1[3]/4 + PI*elp2[2] * elp2[3]/4 - S12*search_step);
} |
5,471 | #include<iostream>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
int main(){
int size = (int)1.6e8;
thrust::host_vector<int32_t> test(size);
for (int i = 0; i < size; i++)
test[i] = i;
for (int i = size - 5000 ; i < size; i++)
std::cout << test[i] << " " ;
return 0;
} |
5,472 | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
#define MATRIX_SIZE 10000
#define RANGE_NUMBERS 10
#define DIM_THREADS 32
__host__ __device__ inline void setAt(int *m, int i, int j, int v) {
*(m + i*MATRIX_SIZE + j) = v;
}
__host__ __device__ inline int getAt(int *m, int i, int j) {
return *(m + i*MATRIX_SIZE + j);
}
/*__global__ void matrix_mult_shared_mem(int *a, int *b, int *c) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ int shared_a[MATRIX_SIZE*MATRIX_SIZE];
__shared__ int shared_b[MATRIX_SIZE*MATRIX_SIZE];
for(int aux = 0; aux < MATRIX_SIZE; aux++) {
setAt(shared_a, i, aux, getAt(a, i, aux));
setAt(shared_b, i, aux, getAt(b, i, aux));
}
__syncthreads();
int sum = 0;
for(int it = 0; it < MATRIX_SIZE; it++) {
sum += (getAt(shared_a, i, it) * getAt(shared_b, it, j)) % 50;
}
setAt(c, i, j, sum);
}*/
__global__ void matrix_mult(int *a, int *b, int *c) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int sum = 0;
for(int it = 0; it < MATRIX_SIZE; it++) {
sum += (getAt(a, i, it) * getAt(b, it, j)) % 50;
}
setAt(c, i, j, sum);
}
int main(int argc, char **argv) {
printf("Starting\n");
srand(time(0));
float etime;
struct timespec t_start, t_end;
size_t size = sizeof(int) * MATRIX_SIZE * MATRIX_SIZE;
int * a, * b, * c;
a = (int *) malloc(size);
b = (int *) malloc(size);
c = (int *) malloc(size);
// fill matrices
for(int i = 0; i < MATRIX_SIZE; i++) {
for(int j = 0; j < MATRIX_SIZE; j++){
setAt(a, i, j, rand() % RANGE_NUMBERS);
setAt(b, i, j, rand() % RANGE_NUMBERS);
}
}
int * d_a, * d_b, * d_c;
// alloc memory in device
cudaMalloc((void **) &d_a, size);
cudaMalloc((void **) &d_b, size);
cudaMalloc((void **) &d_c, size);
for(int dim_threads = DIM_THREADS; dim_threads >= 1; dim_threads >>= 1) {
clock_gettime(CLOCK_REALTIME, &t_start);
// copy matrices do device memory
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
dim3 threadsPerBlock(dim_threads, dim_threads);
dim3 numBlocks(MATRIX_SIZE / threadsPerBlock.x, MATRIX_SIZE / threadsPerBlock.y);
// call function in device
matrix_mult<<<numBlocks,threadsPerBlock>>>(d_a, d_b, d_c);
// get data from device memory
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &t_end);
etime = (t_end.tv_sec + t_end.tv_nsec / 1000000000.) -
(t_start.tv_sec + t_start.tv_nsec / 1000000000.);
printf("\nNum threads per block: %dx%d Time spent: %lf\n", dim_threads,dim_threads, etime);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("CUDA Error: %s\n",cudaGetErrorString(err));
}
}
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
|
5,473 | #include "includes.h"
__global__ void add(int n, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n)
y[i] = x[i] + y[i];
} |
5,474 | // downloaded from: https://www.cs.usfca.edu/~peter/cs625/code/cuda/vec_add.cu
/* File: vec_add.cu
* Purpose: Implement vector addition on a gpu using cuda
*
* Compile: nvcc [-g] [-G] -o vec_add vec_add.cu
* Run: ./vec_add <n>
* n is the vector length
*
* Input: None
* Output: Result of vector addition. If all goes well it should
* be a vector consisting of n copies of n+1.
*
* Notes:
* 1. CUDA is installed on all of the machines in HR 530, HR 235,
* and CSI G12
* 2. If you get something like "nvcc: command not found" when you try
* to compile your program. Type the following command
*
* $ export PATH=/usr/local/cuda/bin:$PATH
*
* (As usual the "$" is the shell prompt: just type the rest
* of the line.)
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
/* Kernel for vector addition */
__global__ void Vec_add(float x[], float y[], float z[], int n) {
/* blockDim.x = threads_per_block */
/* First block gets first threads_per_block components. */
/* Second block gets next threads_per_block components, etc. */
int i = blockDim.x * blockIdx.x + threadIdx.x;
/* block_count*threads_per_block may be >= n */
if (i < n) z[i] = x[i] + y[i];
} /* Vec_add */
/* Host code */
int main(int argc, char* argv[]) {
int n, i;
float *h_x, *h_y, *h_z;
float *d_x, *d_y, *d_z;
int threads_per_block;
int block_count;
size_t size;
/* Get number of components in vector */
if (argc != 2) {
fprintf(stderr, "usage: %s <vector order>\n", argv[0]);
exit(0);
}
n = strtol(argv[1], NULL, 10);
size = n*sizeof(float);
/* Allocate input vectors in host memory */
h_x = (float*) malloc(size);
h_y = (float*) malloc(size);
h_z = (float*) malloc(size);
/* Initialize input vectors */
for (i = 0; i < n; i++) {
h_x[i] = i+1;
h_y[i] = n-i;
}
printf("h_x = ");
for (i = 0; i < n; i++)
printf("%.1f ", h_x[i]);
printf("\n");
printf("h_y = ");
for (i = 0; i < n; i++)
printf("%.1f ", h_y[i]);
printf("\n\n");
/* Allocate vectors in device memory */
cudaMalloc(&d_x, size);
cudaMalloc(&d_y, size);
cudaMalloc(&d_z, size);
/* Copy vectors from host memory to device memory */
cudaMemcpy(d_x, h_x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, h_y, size, cudaMemcpyHostToDevice);
/* Define block size */
threads_per_block = 256;
/* Define grid size. If we just computed n/threads_per_block */
/* we might get fewer threads than vector components. Using */
/* ceil(n/threads_per_block) guarantees at least one thread */
/* per vector component. The following formula is a kludge */
/* since it appears that the CUDA ceil function doesn't work */
/* correctly. */
block_count = (n + threads_per_block - 1)/threads_per_block;
/* Invoke kernel using block_count blocks, each of which */
/* contains threads_per_block threads */
Vec_add<<<block_count, threads_per_block>>>(d_x, d_y, d_z, n);
/* Wait for the kernel to complete */
cudaThreadSynchronize();
/* Copy result from device memory to host memory */
/* h_z contains the result in host memory */
cudaMemcpy(h_z, d_z, size, cudaMemcpyDeviceToHost);
printf("The sum is: \n");
for (i = 0; i < n; i++)
printf("%.1f ", h_z[i]);
printf("\n");
/* Free device memory */
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
/* Free host memory */
free(h_x);
free(h_y);
free(h_z);
return 0;
} /* main */ |
5,475 | #include <iostream>
#include <stdio.h>
#include <stdbool.h> /*helps with bool data type*/
#include <string.h> /* memset */
#include <unistd.h> /* close */
#include <emmintrin.h>
#include <sys/time.h> /*allows system type*/
struct timeval start, end;
void starttime() {
gettimeofday( &start, 0 );
}
void endtime(const char* c) {
gettimeofday( &end, 0 );
double elapsed = ( end.tv_sec - start.tv_sec ) * 1000.0 + ( end.tv_usec - start.tv_usec ) / 1000.0;
printf("%s: %f ms\n", c, elapsed);
}
//initializes matrix values to 0,
//AS OF RIGHT NOW , ONLY USES THREADS!!!
__global__ void initTo(int *matx, int vs, int set){
int index = threadIdx.x;
int stride = blockDim.x;
for(int i = index ; i < vs; i += stride ){
matx[i] = set;
}
}
//initializes truth table to true
////AS OF RIGHT NOW , ONLY USES THREADS!!!
__global__ void initTo(bool *t_table, int vs, bool set){
int index = threadIdx.x;
int stride = blockDim.x;
for(int i = index ; i < vs; i += stride ){
t_table[i] = set;
}
}
__global__ void checkPathsTH(int *matrix, int vertices, int pathsize, int *paths, bool *t_table){
//int index = threadIdx.x;
int stride = blockDim.x;
int index = blockIdx.x*blockDim.x + threadIdx.x;
//printf("%i\n",stride);
//index is the actual index!!!
//so 0-95
//stops trash from going out of bounds
/*
if(index<(pathsize)){
for(int j = 0;j<vertices;j++){
printf("%i's own %i called %i ",index,j,paths[(index*vertices)+j]);
//THIS GETS YOU WHAT YOU NEED paths[(index*vertices)+j]
}
//printf("%i, %i, %i\n",index,stride,paths[index]);
}
*/
for(int i = index ; i < pathsize; i += stride){
for(int j = 0;j<vertices;j++){
if(j==0){
if(matrix[(paths[(index*vertices)+j] * vertices) + paths[(index*vertices)+j+1]] != 1 &&
matrix[(paths[(index*vertices)+j+1] * vertices) + paths[(index*vertices)+j]] != 1 )
{
t_table[index] = false;
break;
}
}else if(j > 0 && j < vertices-1){
if( (matrix[(paths[(index*vertices)+j] * vertices) + paths[(index*vertices)+j+1]] != 1 &&
matrix[(paths[(index*vertices)+j+1] * vertices) + paths[(index*vertices)+j]] != 1 )
||
(matrix[(paths[(index*vertices)+j] * vertices) + paths[(index*vertices)+j-1]] != 1 &&
matrix[(paths[(index*vertices)+j-1] * vertices) + paths[(index*vertices)+j]] != 1 )
)
{
t_table[index] = false;
break;
}
}else if(j== vertices -1){
if(matrix[(paths[(index*vertices)+j] * vertices) + paths[(index*vertices)+j-1]] != 1 &&
matrix[(paths[(index*vertices)+j-1] * vertices) + paths[(index*vertices)+j]] != 1 )
{
t_table[index] = false;
break;
}
}
}
}
//__syncthreads();
}
void checkPathLocal(int *ptr_path,int vertices,int pathsize, int *matrix, bool *possiblePaths ){
for(int outcount = 0; outcount < pathsize;outcount++){
for (int i = 0; i < vertices; i++){
//the first one only checks the next one, checks if the path is even possible
if(i == 0){
if(matrix[(ptr_path[(outcount*vertices)+i] * vertices) + ptr_path[(outcount*vertices)+i+1]] != 1 &&
matrix[(ptr_path[(outcount*vertices)+i+1] * vertices) + ptr_path[(outcount*vertices)+i]] != 1)
{
possiblePaths[outcount] = false;
break;
}
}
else if(i > 0 && i < vertices - 1){
if( (matrix[(ptr_path[(outcount*vertices)+i] * vertices) + ptr_path[(outcount*vertices)+i+1]] != 1 &&
matrix[(ptr_path[(outcount*vertices)+i+1] * vertices) + ptr_path[(outcount*vertices)+i]] != 1)
||
(matrix[(ptr_path[(outcount*vertices)+i] * vertices) + ptr_path[(outcount*vertices)+i-1]] != 1 &&
matrix[(ptr_path[(outcount*vertices)+i-1] * vertices) + ptr_path[(outcount*vertices)+i]] != 1)
)
{
possiblePaths[outcount] = false;
break;
}
}
else if(i == vertices - 1){
if( matrix[(ptr_path[(outcount*vertices)+i] * vertices) + ptr_path[(outcount*vertices)+i-1]] != 1 &&
matrix[(ptr_path[(outcount*vertices)+i-1] * vertices) + ptr_path[(outcount*vertices)+i]] != 1)
{
possiblePaths[outcount] = false;
break;
}
}
}
}
endtime("NORMAL");
}
//just makes unweighted edges in a 1D array
void makeEdge(int to, int from, int *num, int vertices) {
*(num + (((to) * (vertices)) + (from))) = 1;
*(num + (((from) * (vertices)) + (to))) = 1;
//matrix[((to) * (vertices)) + (from)] = 1;
//matrix[((from) * (vertices)) + (to)] = 1;
//matrix[from][to] = 1;
}
//swaps values, assists permute
int swap(int a[], int i, int j)
{
int temp = a[i];
a[i] = a[j];
a[j] = temp;
return a[j];
}
//places in array
void getArray(int ha[] , int *ptr_path , int vertices, int pathsize){
//finds the first path that is empty and places the array there!!!
for(int i = 0; i < pathsize ;i++){
//finds the first one that wasn't touched
if( *(ptr_path + (i*vertices)) == -1){
//maybe there could be a way to binary search this to make it faster!
//i dunno :3
//paths[i] = ha;
for(int j =0; j < vertices ; j++){
//write to the original array
*(ptr_path + (i*vertices)+j) = ha[j];
}
//stops when found
return;
}
}
}
void permute(int str[] , int l, int r, int *ptr_path, int vertices, int pathsize){
//printf("Value of paths = %p\n",ptr_path);
if (l == r){
//array created to save each combination
int raw [vertices];
//prints the combinations and save them to the array.
for(int j=0; j < vertices ;j++){
//printf("%i", str[j]);
raw[j] = str[j];
}
getArray(raw,ptr_path,vertices,pathsize);
//printf("+++\n");
//count = 0;
}
else
{
for (int i = l; i <= r; i++)
{
str[i] = swap(str,l,i);
//permute(str, l+1, r);
permute(str, l+1, r,ptr_path, vertices, pathsize);
str[i] = swap(str,l,i);
}
}
}
int main(void) {
const int vertices = 5;
int *matrix;
//int matrix[vertices*vertices] ;
//memset(matrix, 0, sizeof matrix);
//1D MATRIX ARRAY INNITIALLIZED IN
//ALLOCATES
cudaMallocManaged(&matrix, (vertices*vertices)*sizeof(int));
//initiates to 0
initTo<<<1,100>>>(matrix,vertices*vertices,0);
cudaDeviceSynchronize();
int pathsize = 1;
for (int i = 1; i < vertices + 1; i++)
{
pathsize = pathsize * i;
}
printf("%i\n",pathsize);
bool *possiblePaths;
cudaMallocManaged(&possiblePaths, (pathsize)*sizeof(bool));
//bool possiblePaths[pathsize];
//initialize to true
//memset(possiblePaths, true, sizeof possiblePaths);
initTo<<<1,100>>>(possiblePaths,pathsize,true);
cudaDeviceSynchronize();
int *allPaths;
cudaMallocManaged(&allPaths, (pathsize*vertices)*sizeof(int));
initTo<<<1,100>>>(allPaths,pathsize*vertices,-1);
cudaDeviceSynchronize();
//creating the graph
// ---
//| / |
// ---
makeEdge(0,2,matrix,vertices);
makeEdge(0,3,matrix,vertices);
makeEdge(1,4,matrix,vertices);
makeEdge(1,3,matrix,vertices);
makeEdge(2,4,matrix,vertices);
printf("The adjacency matrix for the given graph is: ");
printf("\n ");
for (int i = 0; i < vertices; i++)
printf("%i ",i+1);
for (int j = 0; j < vertices*vertices; j++) {
if(j % vertices == 0)
printf("\n%i ",(j/4)+1);
printf("%i ", matrix[j]);
}
printf("\n");
printf("%i\n",allPaths[0]);
printf("%i\n",allPaths[95]);
int arr[vertices];
for(int pattern = 0 ; pattern < vertices; pattern++){
arr[pattern] = pattern;
}
permute(arr,0,vertices - 1,allPaths,vertices,pathsize);
//printf("%i\n",allPaths[0]);
//printf("%i\n",allPaths[95]);
for (int i = 0; i < pathsize; i++)
{
if(i<10)
printf(" %i |",i);
else
printf("%i |",i);
for (int j = 0; j < vertices; j++)
{
printf("%d ", allPaths[(i*vertices)+j]);
}
//ptr_path++;
printf("\n");
}
/*
printf("%i\n",allPaths[92]);
printf("%i\n",allPaths[93]);
printf("%i\n",allPaths[94]);
printf("%i\n",allPaths[95]);
*/
starttime();
checkPathLocal(allPaths,vertices,pathsize,matrix,possiblePaths);
cudaDeviceSynchronize();
printf("POSSIBLE PATHS\n");
for(int wow = 0 ; wow<pathsize;wow++){
//printf("");
if(possiblePaths[wow] == true){
for(int innerwow = 0 ; innerwow < vertices ; innerwow++){
printf("%i", allPaths[(wow*vertices)+innerwow]);
}
printf("\n");
}
//printf("%d ",possiblePaths[wow]);
}
printf("\n");
cudaDeviceSynchronize();
initTo<<<1,100>>>(possiblePaths,pathsize,true);
cudaDeviceSynchronize();
cudaDeviceSynchronize();
starttime();
checkPathsTH<<<2,75>>>(matrix,vertices,pathsize,allPaths,possiblePaths);
endtime("GPU THREADS");
cudaDeviceSynchronize();
cudaDeviceSynchronize();
printf("POSSIBLE PATHS\n");
for(int wow = 0 ; wow<pathsize;wow++){
//printf("");
if(possiblePaths[wow] == true){
for(int innerwow = 0 ; innerwow < vertices ; innerwow++){
printf("%i", allPaths[(wow*vertices)+innerwow]);
}
printf("\n");
}
//printf("%d ",possiblePaths[wow]);
}
printf("\n");
// Free memory
cudaFree(matrix);
cudaFree(possiblePaths);
cudaFree(allPaths);
//free(matrix);
return 0;
} |
5,476 | #include "includes.h"
__global__ void vecAdd(unsigned int *A_d, unsigned int *B_d, unsigned int *C_d, int WORK_SIZE) {
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// **** Populate vecADD kernel function ****
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < WORK_SIZE)
C_d[id] = A_d[id] + B_d[id];
} |
5,477 | #include <stdio.h>
#define RADIUS 4
#define BLOCK_DIM_X 32
__constant__ float coef[RADIUS + 1];
__global__
void stencil(float* out, float* in) {
__shared__ float smem[BLOCK_DIM_X + (2 * RADIUS)];
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int sidx = threadIdx.x + RADIUS;
smem[sidx] = in[idx];
if (threadIdx.x < RADIUS) {
smem[sidx - RADIUS] = in[idx - RADIUS];
smem[sidx + blockDim.x] = in[idx + blockDim.x];
}
__syncthreads();
float tmp = 0.0f;
#pragma unroll
for (int x = 0; x <= RADIUS; x++) {
tmp += coef[x] * (smem[sidx + x] - smem[sidx - x]);
}
out[idx] = tmp;
}
void printArray(float* array, int count) {
for (int x = 0; x < count; x++) {
printf("%-3d", int(array[x]));
}
printf("\n");
}
int main(void) {
printf("\n");
int count = 25;
dim3 block(BLOCK_DIM_X);
dim3 grid(1);
const float h_coef[] = { 1, 1, 1, 1, 1 };
cudaMemcpyToSymbol(coef, h_coef, (RADIUS + 1) * sizeof(float));
float* host_array = (float*)malloc(count*sizeof(float));
for (int x = 0; x < count; x++) { host_array[x] = 1; }
float* host_result_array = (float*)malloc(count*sizeof(float));
float *device_array, *device_result_array;
cudaMalloc((float**)&device_array, count*sizeof(float));
cudaMemcpy(device_array, host_array, count*sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((float**)&device_result_array, count*sizeof(float));
stencil<<<grid, block>>>(device_result_array, device_array);
cudaDeviceSynchronize();
cudaMemcpy(host_result_array, device_result_array, count*sizeof(float), cudaMemcpyDeviceToHost);
printArray(host_result_array, count);
cudaFree(device_array);
cudaFree(device_result_array);
cudaDeviceReset();
printf("\n");
return 0;
} |
5,478 | #include <cuda.h>
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#define BLOCK_SIZE 1024
// first kernel - does scan for each block
__global__ void blockSumScanKernel(float* d_input, float* d_output, size_t size)
{
__shared__ float blockOutput[BLOCK_SIZE];
// indexing variable
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i < size)
{
blockOutput[threadIdx.x] = d_input[i];
}
for(int step = 1; step <= threadIdx.x; step *= 2)
{
__syncthreads();
float chunk = blockOutput[threadIdx.x - step];
__syncthreads();
blockOutput[threadIdx.x] += chunk;
}
if(i < size)
{
d_output[i] = blockOutput[threadIdx.x];
}
}
// second kernel - seals together results for all blocks from previous kernel into one output array
__global__ void sealingSumScanKernel(float* d_output, size_t size)
{
// indexing variable
int i = threadIdx.x + blockIdx.x * blockDim.x;
int numOfValues = (i - (i % BLOCK_SIZE)) / BLOCK_SIZE;
for(int j = 1; j <= numOfValues; ++j)
{
d_output[i] += d_output[j * BLOCK_SIZE - 1];
}
}
// CUDA error checking
void errorCheck(unsigned int line)
{
cudaError_t cudaError = cudaGetLastError();
if(cudaError != cudaSuccess)
{
printf("CUDA error in line %u in file %s: %s\n", line - 1, __FILE__, cudaGetErrorString(cudaError));
exit(EXIT_FAILURE);
}
}
// host function containing kernel call
void sumScan(float* input, float* output, size_t size)
{
dim3 numOfBlocks(ceil(size / (float) BLOCK_SIZE), 1, 1);
dim3 numOfThreads(BLOCK_SIZE, 1, 1);
size_t bytesInput = size * sizeof(float);
float* d_input;
float* d_output;
cudaMalloc((void**) &d_input, bytesInput);
errorCheck(__LINE__);
cudaMalloc((void**) &d_output, bytesInput);
errorCheck(__LINE__);
cudaMemcpy(d_input, input, bytesInput, cudaMemcpyHostToDevice);
errorCheck(__LINE__);
blockSumScanKernel<<<numOfBlocks, numOfThreads>>>(d_input, d_output, size);
errorCheck(__LINE__);
cudaFree(d_input);
errorCheck(__LINE__);
sealingSumScanKernel<<<numOfBlocks, numOfThreads>>>(d_output, size);
errorCheck(__LINE__);
cudaMemcpy(output, d_output, bytesInput, cudaMemcpyDeviceToHost);
errorCheck(__LINE__);
cudaFree(d_output);
errorCheck(__LINE__);
}
int main()
{
struct timespec start, end;
srand(time(NULL));
size_t size = 4194304;
float* input = (float*) malloc(size * sizeof(float));
float* output = (float*) malloc(size * sizeof(float));
for(int i = 0; i < size; ++i)
{
input[i] = rand() % 129 - 64;
}
clock_gettime(CLOCK_REALTIME, &start);
// do sum scan
sumScan(input, output, size);
clock_gettime(CLOCK_REALTIME, &end);
time_t execTime = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000;
printf("Execution time: %d microseconds.", execTime);
return 0;
}
|
5,479 | /*
Unified Memory in CUDA makes this easy by providing a single memory space accessible by all GPUs
and CPUs in your system. To allocate data in unified memory, call cudaMallocManaged(), which
returns a pointer that you can access from host (CPU) code or device (GPU) code. To free the data,
just pass the pointer to cudaFree().
*/
#include <math.h>
#include <iostream>
// serial
__global__
void add(size_t n, float *x, float *y){
for(size_t i = 0; i < n; ++i){
y[i] = x[i] + y[i];
}
}
// parallel using 1 block
__global__
void add2(int n, float *x, float *y){
int index = threadIdx.x;
int stride = blockDim.x;
for(int i = index; i < n; i += stride){
y[i] = x[i] + y[i];
}
}
// parallel using grid stride loop
__global__
void add3(int n, float *x, float *y){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
//printf(" N %d threadIdx.x %d blockIdx.x %d index %d stride %d \n", n, threadIdx.x, blockIdx.x, index, stride);
for(int i = index; i < n; i += stride){
y[i] = x[i] + y[i];
}
}
//initialize data in kernel to avoid page fault
__global__
void init(int n, float *x, float *y){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < n; i += stride){
x[i] = 1.0f;
y[i] = 2.0f;
}
}
int main(int argc, char *argv[]){
size_t N = 1 << 20;
float *x, *y;
// Allocate Unified Memory - acessible by GPU and CPU
cudaMallocManaged(&x, N * sizeof(float));
cudaMallocManaged(&y, N * sizeof(float));
// Initiallize x and y on the Host
for(size_t i = 0; i != N; ++i){
x[i] = 1.0f;
y[i] = 2.0f;
}
// kernel parameters
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
// Run kernel with 1M elements on the GPU
std::cout << "\n Launching CUDA kernel add<<<" << numBlocks << ", "
<< blockSize << ">>>" << '\n';
add<<<numBlocks, blockSize>>>(N, x, y);
// Initialize data on GPU to avoid page fault
std::cout << "\n Launching CUDA kernel init<<<" << numBlocks << ", "
<< blockSize << ">>>" << '\n';
init<<<numBlocks, blockSize>>>(N, x, y);
// Run kernel with 1M elements on the GPU
std::cout << "\n Launching CUDA kernel add2<<<" << numBlocks << ", "
<< blockSize << ">>>" << '\n';
add2<<<numBlocks, blockSize>>>(N, x, y);
// Prefetch the data to the GPU
int device = -1;
cudaGetDevice(&device);
cudaMemPrefetchAsync(x, N * sizeof(float), device, NULL);
cudaMemPrefetchAsync(y, N * sizeof(float), device, NULL);
std::cout << "\n Launching CUDA kernel add3<<<" << numBlocks << ", "
<< blockSize << ">>>" << '\n';
add3<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors, all values should be 3.0f
float max_error = 0.0f;
for(size_t i = 0; i != N; ++i){
max_error = fmax(max_error, fabs(y[i] - 3.0f));
}
std::cout << "Max error: " << max_error << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
} |
5,480 | // ***************************************************************************
// In Class Activity
// Name: Yujin Yoshimura
// Parallel Programming Date: April 8, 2020
// ***************************************************************************
// This sequential program demonstrates Matrix Multiplication.
//
// For Turing, use the script on the same directory to compile and run.
// TACC Maverick 2 command to compile:
// nvcc YujinYoshimuraSharedMemory.cu -o YujinYoshimuraSharedMemory_Exe
// ***************************************************************************
#include <cuda.h>
#include <stdio.h>
const int ROW = 1024;
// ***************************************************************************
// Function Name: product
// Parameters: int*, int*, int*
// Return: void
// Description: Returns the cross product of two matrices.
// ***************************************************************************
__global__
void product(int* a, int* b, int* c) {
int i = threadIdx.x;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int k;
// Allocate shared memory
__shared__ int as[ROW];
__shared__ int bs[ROW];
__shared__ int cs[ROW];
for (k = 0; k < 5; k++) {
// Copy from global memory to shared memory
as[i] = a[j + ROW * k * 2];
bs[i] = b[j + ROW * k * 2];
// Do the math
cs[i] = as[i] * bs[i];
// Copy from shared memory to global memory
c[j + ROW * k * 2] = cs[i];
}
}
// ***************************************************************************
// Function Name: main
// Parameters: int, char**
// Return: int
// Description: Main function of the program.
// ***************************************************************************
int main(int argc, char **argv) {
int matrix_a[ROW * 10];
int matrix_b[ROW * 10];
int matrix_c[ROW * 10];
int i, sum = 0;
int* ad;
int* bd;
int* cd;
const int isize = ROW * 10 * sizeof(int);
// Initialize matrix A, B and C
for (i = 0; i < ROW * 10; i++) {
matrix_a[i] = 2;
matrix_b[i] = 20;
matrix_c[i] = 0;
}
// Allocate memory and copy matrices to global memory
cudaMalloc( (void**)&ad, isize );
cudaMalloc( (void**)&bd, isize );
cudaMalloc( (void**)&cd, isize );
cudaMemcpy( ad, matrix_a, isize, cudaMemcpyHostToDevice );
cudaMemcpy( bd, matrix_b, isize, cudaMemcpyHostToDevice );
cudaMemcpy( cd, matrix_c, isize, cudaMemcpyHostToDevice );
dim3 dimGrid( 2 , 1 );
dim3 dimBlock( ROW , 1 );
product<<<dimGrid, dimBlock>>>(ad, bd, cd);
// Copy matrix to memory and free global memory
cudaMemcpy( matrix_c, cd, isize, cudaMemcpyDeviceToHost );
cudaFree( ad );
cudaFree( bd );
cudaFree( cd );
for (i = 0; i < ROW * 10; i++) {
sum += matrix_c[i];
}
printf("The summation of all the elements is = %d\n", sum);
return EXIT_SUCCESS;
}
|
5,481 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define blockSize 32
#define PROMIEN 30
#define NUM_ELEMENTS 944 //( N = rozmiar tablicy - 2 * R)
#define cudaCheck(error) \
if (error != cudaSuccess) {\
printf("BLAD URUCHOMINIA: %s at %s:%d\n", cudaGetErrorString(error), __FILE__, __LINE__); \
exit(1); \
}
//KERNEL
__global__ void wzorzec_1w(float *in, float *out, int size)
{
__shared__ float temp_in[blockSize + 2 * PROMIEN];
//element srodkowy dla watku (globalny)
int gindex = threadIdx.x + (blockIdx.x * blockDim.x) + PROMIEN;
//element srodkowy dla watku (lokalny)
int lindex = threadIdx.x + PROMIEN;
temp_in[lindex] = in[gindex];
if (threadIdx.x < PROMIEN){
temp_in[lindex - PROMIEN] = in[gindex - PROMIEN];
temp_in[lindex + blockSize] = in[gindex + blockSize];
}
__syncthreads();
float result = 0;
for (int i = -PROMIEN; i <= PROMIEN; i++)
{
result += in[lindex + i];
}
out[gindex - PROMIEN] = result;
}
int main()
{
unsigned int i;
float h_in[NUM_ELEMENTS + 2 * PROMIEN], h_out[NUM_ELEMENTS];
float *d_in, *d_out;
for (i = 0; i < (NUM_ELEMENTS + 2 * PROMIEN); ++i) {
float r = static_cast <float> (rand()) / (static_cast <float> (RAND_MAX / 100.0));
h_in[i] = r;
}
printf("Tablica poczatkowa: \n");
for (int i = 0; i < NUM_ELEMENTS + 2 * PROMIEN; i++)
printf("%.3f ", h_in[i]);
cudaCheck(cudaMalloc(&d_in, (NUM_ELEMENTS + 2 * PROMIEN) * sizeof(float)));
cudaCheck(cudaMalloc(&d_out, NUM_ELEMENTS * sizeof(float)));
cudaCheck(cudaMemcpy(d_in, h_in, (NUM_ELEMENTS + 2 * PROMIEN) * sizeof(float), cudaMemcpyHostToDevice));
cudaError_t err = cudaGetLastError();
int gridSize = (int)ceil((1.0*(NUM_ELEMENTS + 2 * PROMIEN)) / blockSize);
wzorzec_1w <<< gridSize, blockSize >>> (d_in, d_out, NUM_ELEMENTS);
cudaThreadSynchronize();
err = cudaGetLastError();
cudaCheck(cudaMemcpy(h_out, d_out, NUM_ELEMENTS * sizeof(float), cudaMemcpyDeviceToHost));
printf("\nWynik: \n");
for (int i = 0; i < NUM_ELEMENTS; i++){
printf("%.3f ", h_out[i]);
}
printf("\n");
cudaFree(d_in);
cudaFree(d_out);
cudaCheck(cudaDeviceReset());
return 0;
}
|
5,482 | #include <stdio.h>
#include <algorithm>
#include <cmath>
__global__
void mish(float* tx, float* aten_mul) {
float tx_1 = __ldg(tx + (long long)(threadIdx.x) + 512ll * (long long)(blockIdx.x));
aten_mul[(long long)(threadIdx.x) + 512ll * (long long)(blockIdx.x)] = tx_1 * (tanhf(tx_1>20.f ? tx_1 : (log1pf(expf(tx_1))) / 1.f));
}
#include <algorithm>
#include <cmath>
#include <stdio.h>
template<typename T, typename U>
constexpr T ceildiv(T t, U u) {
return (t + u - 1) / u;
}
int main() {
constexpr int N = 1 << 20;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N * sizeof(float));
y = (float*)malloc(N * sizeof(float));
cudaMalloc(&d_x, N * sizeof(float));
cudaMalloc(&d_y, N * sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 3.0f;
y[i] = 2.0f;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMemcpy(d_x, x, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N * sizeof(float), cudaMemcpyHostToDevice);
constexpr int blockSize = 512;
constexpr int nBlocks = ceildiv(N, blockSize);
float millis = 0.0f;
float temp = 0.0f;
for (int i = 0; i < 500; i++) {
cudaEventRecord(start);
mish<<<nBlocks, blockSize>>>(d_x, d_y);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&temp, start, stop);
millis += temp;
}
millis = millis / 500;
cudaMemcpy(y, d_y, N * sizeof(float), cudaMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++) {
float mv = 3.0f * tanhf(std::log1p(std::exp(3.0)));
maxError = std::max(maxError, std::abs(mv - y[i]));
}
printf("max error: %f\n", maxError);
printf("duration (ms): %f\n", millis);
printf("effective bandwidth (gb/s): %f\n", (float)N * sizeof(float) * 3 / millis / 1e6);
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
return 0;
}
|
5,483 | #include "includes.h"
#define SIZE (100 * 1024 * 1024)
__global__ void histo_kernel_optimization(unsigned char *buffer, int size, unsigned int *histo)
{
__shared__ unsigned int temp[256];
temp[threadIdx.x] = 0;
__syncthreads();
int i = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
while (i < size)
{
atomicAdd(&histo[buffer[i]], 1);
i += stride;
}
__syncthreads();
atomicAdd(&histo[threadIdx.x], temp[threadIdx.x]);
} |
5,484 | #include "includes.h"
__global__ void kSoftMaxGrad(float* mat, float* labels, float* target, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
target[i] = mat[i] - ((int)labels[i / height] == i % height ? 1 : 0);
}
} |
5,485 | #include <assert.h>
#include <errno.h>
#ifndef WIN32
#include <getopt.h>
#endif
#include <limits.h>
#include <math.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "c63cpu.cuh"
// Motion estimation for 8x8 block
static void me_block_8x8_CPU(struct c63_common_cpu *cm, int mb_x, int mb_y,
uint8_t *orig, uint8_t *ref, int cc)
{
struct macroblock *mb = &cm->mbs[cc][mb_y * cm->padw[cc]/8 + mb_x];
int range = cm->me_search_range;
// Half resolution for chroma channels.
if (cc > 0) { range /= 2; }
int left = mb_x * 8 - range;
int top = mb_y * 8 - range;
int right = mb_x * 8 + range;
int bottom = mb_y * 8 + range;
int w = cm->padw[cc];
int h = cm->padh[cc];
// Make sure we are within bounds of reference frame. TODO: Support partial frame bounds.
if (left < 0) { left = 0; }
if (top < 0) { top = 0; }
if (right > (w - 8)) { right = w - 8; }
if (bottom > (h - 8)) { bottom = h - 8; }
int x, y;
int mx = mb_x * 8;
int my = mb_y * 8;
int best_sad = INT_MAX;
for (y = top; y < bottom; ++y)
{
for (x = left; x < right; ++x)
{
int sad;
sad_block_8x8(orig + my*w+mx, ref + y*w+x, w, &sad);
if (sad < best_sad)
{
mb->mv_x = x - mx;
mb->mv_y = y - my;
best_sad = sad;
}
}
}
// Here, there should be a threshold on SAD that checks if the motion vector
// is cheaper than intraprediction. We always assume MV to be beneficial
//printf("Using motion vector (%d, %d) with SAD %d\n", mb->mv_x, mb->mv_y,
// best_sad);
mb->use_mv = 1;
}
void c63_motion_estimate(struct c63_common_cpu *cm)
{
// Compare this frame with previous reconstructed frame
int mb_x, mb_y;
//Luma
for (mb_y = 0; mb_y < cm->mb_rows; ++mb_y)
{
for (mb_x = 0; mb_x < cm->mb_cols; ++mb_x)
{
me_block_8x8_CPU(cm, mb_x, mb_y, cm->curframe->orig->Y,
cm->refframe->recons->Y, 0);
}
}
//Chroma
for (mb_y = 0; mb_y < cm->mb_rows / 2; ++mb_y)
{
for (mb_x = 0; mb_x < cm->mb_cols / 2; ++mb_x)
{
me_block_8x8_CPU(cm, mb_x, mb_y, cm->curframe->orig->U,
cm->refframe->recons->U, 1);
me_block_8x8_CPU(cm, mb_x, mb_y, cm->curframe->orig->V,
cm->refframe->recons->V, 2);
}
}
}
void c63_motion_estimate_Y_CPU(struct c63_common_cpu *cm, int cpu_start)
{
// Compare this frame with previous reconstructed frame
int mb_x, mb_y;
//Luma
#pragma omp parallel for private(mb_x)
for (mb_y = cpu_start; mb_y < cm->mb_rows; ++mb_y)
{
for (mb_x = 0; mb_x < cm->mb_cols; ++mb_x)
{
me_block_8x8_CPU(cm, mb_x, mb_y, cm->orig->Y, cm->ref_recons->Y, 0);
}
}
}
void c63_motion_estimate_U_CPU(struct c63_common_cpu *cm, int cpu_start)
{
// Compare this frame with previous reconstructed frame
int mb_x, mb_y;
//Luma
#pragma omp parallel for private(mb_x)
for (mb_y = cpu_start; mb_y < cm->mb_rows / 2; ++mb_y)
{
for (mb_x = 0; mb_x < cm->mb_cols / 2; ++mb_x)
{
me_block_8x8_CPU(cm, mb_x, mb_y, cm->orig->U, cm->ref_recons->U, 1);
}
}
}
void c63_motion_estimate_V_CPU(struct c63_common_cpu *cm, int cpu_start)
{
// Compare this frame with previous reconstructed frame
int mb_x, mb_y;
//Luma
#pragma omp parallel for private(mb_x)
for (mb_y = cpu_start; mb_y < cm->mb_rows / 2; ++mb_y)
{
for (mb_x = 0; mb_x < cm->mb_cols / 2; ++mb_x)
{
me_block_8x8_CPU(cm, mb_x, mb_y, cm->orig->V, cm->ref_recons->V, 2);
}
}
}
// Motion compensation for 8x8 block
static void mc_block_8x8(struct c63_common_cpu *cm, int mb_x, int mb_y,
uint8_t *predicted, uint8_t *ref, int cc)
{
struct macroblock *mb = &cm->curframe->mbs[cc][mb_y * cm->padw[cc]/8 + mb_x];
if (!mb->use_mv) { return; }
int left = mb_x * 8;
int top = mb_y * 8;
int right = left + 8;
int bottom = top + 8;
int w = cm->padw[cc];
// Copy block from ref mandated by MV
int x, y;
for (y = top; y < bottom; ++y)
{
for (x = left; x < right; ++x)
{
predicted[y*w+x] = ref[(y + mb->mv_y) * w + (x + mb->mv_x)];
}
}
}
void c63_motion_compensate(struct c63_common_cpu *cm)
{
int mb_x, mb_y;
// Luma *
for (mb_y = 0; mb_y < cm->mb_rows; ++mb_y)
{
for (mb_x = 0; mb_x < cm->mb_cols; ++mb_x)
{
mc_block_8x8(cm, mb_x, mb_y, cm->curframe->predicted->Y,
cm->refframe->recons->Y, 0);
}
}
//Chroma
for (mb_y = 0; mb_y < cm->mb_rows / 2; ++mb_y)
{
for (mb_x = 0; mb_x < cm->mb_cols / 2; ++mb_x)
{
mc_block_8x8(cm, mb_x, mb_y, cm->curframe->predicted->U,
cm->refframe->recons->U, 1);
mc_block_8x8(cm, mb_x, mb_y, cm->curframe->predicted->V,
cm->refframe->recons->V, 2);
}
}
} |
5,486 | #include "includes.h"
__global__ void SimpleClone( const float *background, const float *target, const int *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox )
{
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = wt * yt + xt;
if (yt < ht and xt < wt and mask[curt]) {
const int yb = oy+yt, xb = ox+xt;
const int curb = wb*yb+xb;
if (0 <= yb and yb < hb and 0 <= xb and xb < wb){
output[curb*3+0] = target[curt*3+0];
output[curb*3+1] = target[curt*3+1];
output[curb*3+2] = target[curt*3+2];
}
}
} |
5,487 | #include <stdio.h>
#include <stdlib.h>
__global__ void kernel(int *array){
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
int grid_width = gridDim.x * blockDim.x;
int index = index_y * grid_width + index_x;
int result = blockIdx.y * gridDim.x + blockIdx.x;
array[index] = result;
}
int main(){
int num_elements_x = 16;
int num_elements_y = 16;
int num_bytes = num_elements_x * num_elements_y * sizeof(int);
int *device_array = NULL;
int *host_array = 0;
host_array = (int *) malloc(num_bytes);
cudaMalloc((void **) &device_array, num_bytes);
dim3 block_size;
block_size.x = 4;
block_size.y = 4;
dim3 grid_size;
grid_size.x = num_elements_x / block_size.x;
grid_size.y = num_elements_y / block_size.y;
kernel<<<grid_size, block_size>>>(device_array);
cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost);
int row, col;
for(row = 0; row <num_elements_y; ++row){
for(col = 0; col<num_elements_x; ++col)
printf("%2d ", host_array[row*num_elements_x + col]);
printf("\n");
}
free(host_array);
cudaFree(device_array);
return 0;
} |
5,488 | #include <iostream>
#include <cstdio>
using namespace std;
#define BLOCK_SIZE 32
__global__ void gpuMM(float *A, float *B, float *C, int N)
{
// Matrix multiplication for NxN matrices C=A*B
// Each thread computes a single element of C
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
printf("row: %d col: %d\n", col, row);
float sum = 0.f;
for (int n = 0; n < N; ++n)
sum += A[row*N+n]*B[n*N+col];
C[row*N+col] = sum;
}
int main(int argc, char *argv[])
{
// Perform matrix multiplication C = A*B
// where A, B and C are NxN matrices
// Restricted to matrices where N = K*BLOCK_SIZE;
int N,K;
K = 4;
N = K*BLOCK_SIZE;
cout << "Executing Matrix Multiplcation" << endl;
cout << "Matrix size: " << N << "x" << N << endl;
// Allocate memory on the host
float *hA,*hB,*hC;
hA = new float[N*N];
hB = new float[N*N];
hC = new float[N*N];
// Initialize matrices on the host
for (int j=0; j<N; j++){
for (int i=0; i<N; i++){
hA[j*N+i] = 2.f*(j+i);
hB[j*N+i] = 1.f*(j-i);
}
}
// Allocate memory on the device
int size = N*N*sizeof(float); // Size of the memory in bytes
float *dA,*dB,*dC;
cudaMalloc(&dA,size);
cudaMalloc(&dB,size);
cudaMalloc(&dC,size);
dim3 threadBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 grid(K,K);
// Copy matrices from the host to device
cudaMemcpy(dA,hA,size,cudaMemcpyHostToDevice);
cudaMemcpy(dB,hB,size,cudaMemcpyHostToDevice);
//Execute the matrix multiplication kernel
gpuMM<<<grid,threadBlock>>>(dA,dB,dC,N);
// Now do the matrix multiplication on the CPU
float sum;
for (int row=0; row<N; row++){
for (int col=0; col<N; col++){
sum = 0.f;
for (int n=0; n<N; n++){
sum += hA[row*N+n]*hB[n*N+col];
}
hC[row*N+col] = sum;
}
}
// Allocate memory to store the GPU answer on the host
float *C;
C = new float[N*N];
// Now copy the GPU result back to CPU
cudaMemcpy(C,dC,size,cudaMemcpyDeviceToHost);
// Check the result and make sure it is correct
for (int row=0; row<N; row++){
for (int col=0; col<N; col++){
if ( C[row*N+col] != hC[row*N+col] ){
cout << "Wrong answer!" << endl;
row = col = N;
}
}
}
cout << "Finished." << endl;
}
|
5,489 | #include <stdio.h>
__global__ void kernel(float* A)
{
int j = blockDim.x * blockIdx.x + threadIdx.x;
int i = blockDim.y * blockIdx.y + threadIdx.y;
float cr = float(j) / gridDim.x / blockDim.x * 3.0f - 2.0f;
float ci = float(i) / gridDim.y / blockDim.y * 3.0f - 1.5f;
float zr = 0.0f;
float zi = 0.0f;
int it = 0;
for (; it < 255 && (zr * zr + zi * zi) < 10.0f; ++it)
{
float new_zr = zr * zr - zi * zi + cr;
float new_zi = 2.0 * zi * zr + ci;
zr = new_zr;
zi = new_zi;
}
A[j + i * gridDim.x * blockDim.x] = float(it) / 255.0f;
}
int main()
{
cudaError_t cudaStatus = cudaSetDevice(0);
int size_x = 80;
int size_y = 40;
int threads_in_block_x = 8;
int threads_in_block_y = 8;
int blocks_x = size_x / threads_in_block_x;
int blocks_y = size_y / threads_in_block_y;
float* device_array = NULL;
cudaMalloc(&device_array, size_x * size_y * sizeof(float));
kernel<<<dim3(blocks_x, blocks_y), dim3(threads_in_block_x, threads_in_block_y)>>>(device_array);
float* host_array = new float[size_x * size_y];
cudaMemcpy(host_array, device_array, size_x * size_y * sizeof(float), cudaMemcpyDeviceToHost);
printf("\n");
for (int i = 0; i < size_y; ++i)
{
for (int j = 0; j < size_x; ++j)
{
if (host_array[j + i * size_x] > 0.5)
{
printf("#");
}
else
{
printf("*");
}
}
printf("\n");
}
return 0;
}
|
5,490 |
#include <iostream>
#include <memory>
#include <cassert>
using namespace std;
#include <cuda.h>
__global__ void getValue(float *outdata, float *indata) {
outdata[0] = indata[0] + 3.0f;
}
int main(int argc, char *argv[]) {
int N = 1024;
CUstream stream;
cuStreamCreate(&stream, 0);
float *hostFloats1;
float *hostFloats2;
cuMemHostAlloc((void **)&hostFloats1, N * sizeof(float), CU_MEMHOSTALLOC_PORTABLE);
cuMemHostAlloc((void **)&hostFloats2, N * sizeof(float), CU_MEMHOSTALLOC_PORTABLE);
CUdeviceptr deviceFloats1;
CUdeviceptr deviceFloats2;
cuMemAlloc(&deviceFloats1, N * sizeof(float));
cuMemAlloc(&deviceFloats2, N * sizeof(float));
hostFloats1[128] = 123.456f;
cuMemcpyHtoDAsync(
(CUdeviceptr)(((float *)deviceFloats1)),
hostFloats1,
N * sizeof(float),
stream
);
// cuStreamSynchronize(stream);
getValue<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(((float *)deviceFloats2) + 64, ((float *)deviceFloats1) + 128);
// now copy back entire buffer
// hostFloats[64] = 0.0f;
cuMemcpyDtoHAsync(hostFloats2, deviceFloats2, N * sizeof(float), stream);
cuStreamSynchronize(stream);
// and check the values...
cout << hostFloats2[64] << endl;
assert(hostFloats2[64] == 126.456f);
cuMemFreeHost(hostFloats1);
cuMemFreeHost(hostFloats2);
cuMemFree(deviceFloats1);
cuMemFree(deviceFloats2);
cuStreamDestroy(stream);
return 0;
}
|
5,491 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define N 500
struct timeval start, end;
typedef struct Matrix {
int rows;
int cols;
double ** matrix;
} Matrix;
Matrix buildMatrix(int r, int c) {
Matrix temp;
temp.rows = r;
temp.cols = c;
temp.matrix = (double**)malloc(r * sizeof(double*));
unsigned int i;
for(i = 0; i < r; i++)
{
temp.matrix[i] = (double*)calloc(c,sizeof(double));
}
return temp;
}
void displayMatrix(Matrix m) {
printf("\n");
for(int i = 0; i < m.rows; i++) {
printf("| ");
for(int j = 0; j < m.cols; j++) {
printf("%8.5f ", m.matrix[i][j]);
}
printf("|\n");
}
printf("\n");
}
void setValue(Matrix* m, double val, int r, int c) {
m->matrix[r][c] = val;
}
Matrix multByValue(double val, Matrix m) {
Matrix result = buildMatrix(m.rows, m.cols);
for(int i = 0; i < result.rows; i++) {
for(int j = 0; j < result.cols; j++) {
result.matrix[i][j] = val * m.matrix[i][j];
}
}
return result;
}
Matrix multByMatrix(Matrix mat1, Matrix mat2) {
Matrix result;
if(mat1.cols == mat2.rows) {
result = buildMatrix(mat1.rows, mat2.cols);
}
else if(mat1.cols == mat2.cols) {
result = buildMatrix(mat2.rows, mat1.rows);
} else {
result = buildMatrix(mat1.rows, mat1.cols);
}
for(int i = 0; i < mat1.rows; i++) {
for(int j = 0; j < mat2.cols; j++) {
for(int k = 0; k < mat1.cols; k++) {
result.matrix[i][j] += mat1.matrix[i][k] * mat2.matrix[k][j];
}
}
}
return result;
}
double getDeterminate(Matrix m) {
int i, j, k;
double ratio;
int rows = m.rows, cols = m.cols;
Matrix temp = buildMatrix(m.rows, m.cols);
for(int i = 0; i < m.rows; i++) {
for(int j = 0; j < m.cols; j++) {
temp.matrix[i][j] = m.matrix[i][j];
}
}
if(rows == cols) {
if(rows == 2 && cols == 2) {
return (temp.matrix[0][0] * temp.matrix[1][1]) - (temp.matrix[0][1] * temp.matrix[1][0]);
}
for(i = 0; i < rows; i++){
for(j = 0; j < cols; j++){
if(j>i){
ratio = temp.matrix[j][i]/temp.matrix[i][i];
for(k = 0; k < rows; k++) {
temp.matrix[j][k] -= ratio * temp.matrix[i][k];
}
}
}
}
double det = 1;
for(i = 0; i < rows; i++) {
det *= temp.matrix[i][i];
}
return det;
}
return 0;
}
Matrix transpose(Matrix m) {
Matrix temp;
if(m.rows == m.cols) {
temp = buildMatrix(m.rows, m.cols);
} else {
temp = buildMatrix(m.cols, m.rows);
}
int i, j;
for(i=0; i<temp.cols; i++) {
for(j=0; j<temp.rows; j++) {
temp.matrix[j][i] = m.matrix[i][j];
}
}
return temp;
}
Matrix coFactorCPU(Matrix m) {
Matrix temp = buildMatrix(m.rows, m.cols);
int i, j;
for(i = 0; i < m.rows; i++) {
for(j = 0; j < m.cols; j++) {
if((j + i) % 2 == 1) {
temp.matrix[i][j] = m.matrix[i][j] * -1;
}
else {
temp.matrix[i][j] = m.matrix[i][j];
}
}
}
return temp;
}
void starttime() {
gettimeofday( &start, 0 );
}
void endtime() {
gettimeofday( &end, 0 );
double elapsed = ( end.tv_sec - start.tv_sec ) * 1000.0 + ( end.tv_usec - start.tv_usec ) / 1000.0;
printf("%f ms\n", elapsed);
}
__global__ void MatAdd(int A[][N], int B[][N], int C[][N]){
int i = threadIdx.x;
int j = threadIdx.y;
C[i][j] = A[i][j] + B[i][j];
}
__global__ void MatMult(double A[][N], double B[][N], double C[][N], int width){
int i = threadIdx.x;
int j = threadIdx.y;
int k;
double Pvalue = 0;
for(k = 0; k < width; k++){
Pvalue += A[i][k] * B[k][j];
}
C[i][j] = Pvalue;
}
__global__ void MatTrans(double A[][N], double C[][N]){
int i = threadIdx.x;
int j = threadIdx.y;
C[j][i] = A[i][j];
}
__global__ void multMatByValue(double val, double A[][N], double C[][N]) {
int i = threadIdx.x;
int j = threadIdx.y;
C[i][j] = val * A[i][j];
}
__global__ void coFactor(double A[][N], double C[][N]) {
int i = threadIdx.x;
int j = threadIdx.y;
if((j+i) % 2 == 1){
C[i][j] = A[i][j] * -1;
}
else{
C[i][j] = A[i][j];
}
//C[i][j] = A[i][j] * pow(-1,((j+i)%2));
}
__global__ void determinate(double A[][N], double *d, int width, int height){
int i = threadIdx.x;
int j = threadIdx.y;
//printf("Hello from determinate\n");
if(j > i){
double ratio = A[j][i]/A[i][i];
for(int k = 0; k < width; k++){
A[j][k] -= ratio * A[i][k];
}
}
for(int k = 0; k < width; k++){
if(k == i){
*d *= A[i][k];
}
}
}
__global__ void determinate_gpu(double A[][N], double* d, int width, int height){
int i = threadIdx.x;
int j = threadIdx.y;
if(j > i){
double ratio = A[j][i]/A[i][i];
for(int k = 0; k < width; k++){
A[j][k] -= ratio * A[i][k];
}
}
for(int k = 0; k < width; k++){
if(k == i){
*d *= A[i][k];
}
}
}
int main(){
double A[N][N];
double C[N][N];
Matrix a = buildMatrix(N, N);
Matrix b = buildMatrix(N, N);
double E[N][N];
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
E[i][j] = 1;
}
}
for(int i = 0; i< N;i++){
for(int j = 0; j < N;j++ ){
C[i][j] = 0;
A[i][j] = 1;
a.matrix[i][j] = 1;
b.matrix[i][j] = 1;
}
}
double B[N][N];
double det = 1.0;
double *d_det;
double (*pA)[N], (*pB)[N], (*pC)[N],(*pE)[N];
cudaMalloc((void**)&pA, (N*N)*sizeof(double));
cudaMalloc((void**)&pB, (N*N)*sizeof(double));
cudaMalloc((void**)&pC, (N*N)*sizeof(double));
cudaMalloc((void**)&pE, (N*N)*sizeof(double));
cudaMalloc((void**)&d_det, sizeof(double));
cudaMemcpy(pA, A, (N*N)*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(pB, B, (N*N)*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(pC, C, (N*N)*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(pE, E, (N*N)*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_det, &det, sizeof(double), cudaMemcpyHostToDevice);
int numBlocks = 1;
dim3 threadsPerBlock(N,N);
starttime();
printf("***Transpose on %d x %d Matrix on CPU***\n",N,N);
Matrix d = transpose(a);
endtime();
starttime();
printf("***Transpose on %d x %d Matrix on GPU***\n",N,N);
MatTrans<<<numBlocks,threadsPerBlock>>>(pA, pC);
endtime();
starttime();
printf("***Multiplication on %d x %d Matrix on CPU***\n",N,N);
Matrix c = multByMatrix(a, b);
endtime();
starttime();
printf("***Multiplication on %d x %d Matrix on GPU***\n",N,N);
MatMult<<<numBlocks,threadsPerBlock>>>(pA,pA,pC,N);
endtime();
starttime();
printf("***Multiplication by single value on %d x %d Matrix with %d on GPU***\n",N,N,5);
multMatByValue<<<numBlocks, threadsPerBlock>>>(5, pA, pC);
endtime();
starttime();
printf("***Multiplication by single value on %d x %d Matrix with %d on CPU***\n",N,N,5);
Matrix e = multByValue(5,a);
endtime();
starttime();
printf("***Cofactor of %d x %d Matrix on GPU***\n",N,N);
coFactor<<<numBlocks, threadsPerBlock>>>(pA,pC);
endtime();
starttime();
printf("***Cofactor of %d x %d Matrix on CPU***\n",N,N);
Matrix f = coFactorCPU(a);
endtime();
starttime();
printf("***Determinate of %d x %d Matrix on CPU***\n",N,N);
double determinate = getDeterminate(a);
endtime();
starttime();
printf("***Determinate of %d x %d Matrix on GPU***\n",N,N);
determinate_gpu<<<numBlocks, threadsPerBlock>>>(pE, d_det, N, N);
endtime();
cudaMemcpy(&det, d_det, sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(C, pC, (N*N)*sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(pA);
cudaFree(pB);
cudaFree(pC);
cudaFree(d_det);
printf("\n");
return 0;
}
|
5,492 | #include "includes.h"
__global__ static void findCBar(double* cOld, double* cCurr, double* cBar, int nx)
{
// Matrix index
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
int globalIdy = blockDim.y * blockIdx.y + threadIdx.y;
// Set index being computed
int index = globalIdy * nx + globalIdx;
// Find cBar
cBar[index] = 2.0 * cCurr[index] - cOld[index];
} |
5,493 | #include <stdio.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define NUM 10
__global__ void averagePx(int * a, int * c)
{
int width = blockIdx.x;
printf("w: %d\n", width);
if(width < NUM*NUM) {
c[width] = a[width] / 2;
}
}
int main ()
{
int a[NUM][NUM], c[NUM*NUM];
int change_a[NUM*NUM];
int *de_a, *de_c;
// 이미지의 값을 넣어 준다. 지금은 내 임의로 넣는다.
int gap = 0;
for (int i = 0; i < NUM; ++i) {
for (int j = 0; j < NUM; ++j) {
a[i][j] = 5 * 10;
change_a[gap] = a[i][j];
printf("a[%d][%d] = %d\n", i, j, a[i][j]);
printf("gap[%d] = %d\n", gap, change_a[gap]);
++gap;
}
}
// 이미지를 일차원으로 바꿔준다.
cudaMalloc((void**)&de_a, sizeof(int) * NUM*NUM);
cudaMalloc((void**)&de_c, sizeof(int) * NUM*NUM);
cudaMemcpy(de_a, change_a, sizeof(int) * NUM*NUM, cudaMemcpyHostToDevice);
averagePx <<<100, 1>>> (de_a, de_c);
cudaMemcpy(c, de_c, sizeof(int) * NUM*NUM, cudaMemcpyDeviceToHost);
for (int i = 0; i < NUM*NUM; ++i) {
printf("a(%d) = c(%d)\n", change_a[i], c[i]);
}
cudaFree(de_a);
cudaFree(de_c);
getchar();
return 0;
} |
5,494 | /*
Cuda implementation of the VAI forward solver
Optimization 2-b: Here we switched to float4 and int4 data types
for more effecient memory access and computing indices.
Also, loop unrolling in computing the neighboring elements
contribution is dones is done
Optimization 3 : Since we only use two or few current sources, here I
replaced the sources array by passing the sources as parameters
or copy them to constant memory. Whether there is a current source at
an element or not is encoded as the first digit in the first neigbing
idices list
The redundant elements array is removed
GPU memory usage is about 700MB
Adnan Salman: 11/1/2011
*/
#include <stdio.h>
#include <assert.h>
#include <float.h>
#include <unistd.h>
#define THREADS_PER_BLOCK 32
#define HOST_NAME_SIZE 200
// reduction function declaration (implemented in reduction.cu)
float full_reduce(float *d_input, float *d_output, unsigned int n,
unsigned int maxThreads, unsigned int maxBlocks);
__device__ float DotProdVec4(float4 A, float4 B){
return (A.x*B.x + A.y*B.y + A.z*B.z + A.w*B.w);
}
__device__ float ComputeFF(int idx, int neighborElemIdx, float4 *d4_v1, int inElementsSize,
float dvv1, float4 *d4_AFF, float Tss, float du3, int j){
float ss = 0, uvv;
if (neighborElemIdx != inElementsSize) {
int idx0 = idx*16+2*j;
float4 ndv11 = d4_v1[neighborElemIdx*2];
float4 ndv12 = d4_v1[neighborElemIdx*2 + 1];
float ndv[8] = {ndv11.x, ndv11.y, ndv11.z, ndv11.w,
ndv12.x, ndv12.y, ndv12.z, ndv12.w};
uvv = (dvv1 + ndv[7-j])/2.0;
ss = DotProdVec4(d4_AFF[idx0], ndv11) + DotProdVec4(d4_AFF[idx0+1], ndv12);
}
else uvv = dvv1;
return (Tss * uvv + du3 - ss);
}
__global__ void DevComputeff4(int inElementsSize, int4 *d4_IJNZ, float4 *d4_srcs,
float4 *d4_AFF, float4 *d4_v1, float4 *ff4, float Tss){
int idx = blockIdx.x * blockDim.x + threadIdx.x; // element index
int idx0 = idx*2; // type4 data index
if (idx <inElementsSize){
int4 dijnz1 = d4_IJNZ[idx0];
float4 dv11 = d4_v1[idx0];
float4 du31 = make_float4(0,0,0,0);
float4 du32 = make_float4(0,0,0,0);
int srcid = dijnz1.x % 10;
if (srcid){
du31 = d4_srcs[(srcid-1)*2];
du32 = d4_srcs[(srcid-1)*2+1];
}
float4 ff1;
//make sure all argument are the same
ff1.x = ComputeFF(idx, dijnz1.x/10, d4_v1, inElementsSize, dv11.x, d4_AFF, Tss, du31.x, 0);
ff1.y = ComputeFF(idx, dijnz1.y, d4_v1, inElementsSize, dv11.y, d4_AFF, Tss, du31.y, 1);
ff1.z = ComputeFF(idx, dijnz1.z, d4_v1, inElementsSize, dv11.z, d4_AFF, Tss, du31.z, 2);
ff1.w = ComputeFF(idx, dijnz1.w, d4_v1, inElementsSize, dv11.w, d4_AFF, Tss, du31.w, 3);
ff4[idx0] = ff1;
dijnz1 = d4_IJNZ[idx0+1];
dv11 = d4_v1[idx0+1];
ff1.x = ComputeFF(idx, dijnz1.x, d4_v1, inElementsSize, dv11.x, d4_AFF, Tss, du32.x, 4);
ff1.y = ComputeFF(idx, dijnz1.y, d4_v1, inElementsSize, dv11.y, d4_AFF, Tss, du32.y, 5);
ff1.z = ComputeFF(idx, dijnz1.z, d4_v1, inElementsSize, dv11.z, d4_AFF, Tss, du32.z, 6);
ff1.w = ComputeFF(idx, dijnz1.w, d4_v1, inElementsSize, dv11.w, d4_AFF, Tss, du32.w, 7);
ff4[idx0+1] = ff1;
}
}
__global__ void UpdateSolution(int inElementsSize, float4 *d4_AL, float4 *d4_v1, float4 *ff4, float4 *diff4_v1){
//TODO: use the temproray ff4 array to hold the temprary diff4_v1 data
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float4 dff1 = ff4[idx*2];
float4 dff2 = ff4[idx*2+1];
float4 dvvv;
int idx0 = idx*16;
float4 diffv1 = d4_v1[idx*2];
dvvv.x = DotProdVec4(d4_AL[idx0], dff1) + DotProdVec4(d4_AL[idx0+1], dff2);
dvvv.y = DotProdVec4(d4_AL[idx0+2], dff1) + DotProdVec4(d4_AL[idx0+2+1], dff2);
dvvv.z = DotProdVec4(d4_AL[idx0+4], dff1) + DotProdVec4(d4_AL[idx0+4+1], dff2);
dvvv.w = DotProdVec4(d4_AL[idx0+6], dff1) + DotProdVec4(d4_AL[idx0+6+1], dff2);
d4_v1[idx*2] = dvvv;
diffv1.x = fabs(diffv1.x - dvvv.x);
diffv1.y = fabs(diffv1.y - dvvv.y);
diffv1.z = fabs(diffv1.z - dvvv.z);
diffv1.w = fabs(diffv1.w - dvvv.w);
diff4_v1[idx*2] = diffv1;
dvvv.x = DotProdVec4(d4_AL[idx0+8], dff1) + DotProdVec4(d4_AL[idx0+8+1], dff2);
dvvv.y = DotProdVec4(d4_AL[idx0+10], dff1) + DotProdVec4(d4_AL[idx0+10+1], dff2);
dvvv.z = DotProdVec4(d4_AL[idx0+12], dff1) + DotProdVec4(d4_AL[idx0+12+1], dff2);
dvvv.w = DotProdVec4(d4_AL[idx0+14], dff1) + DotProdVec4(d4_AL[idx0+14+1], dff2);
diffv1 = d4_v1[idx*2+1];
d4_v1[idx*2+1] = dvvv;
diffv1.x = fabs(diffv1.x - dvvv.x);
diffv1.y = fabs(diffv1.y - dvvv.y);
diffv1.z = fabs(diffv1.z - dvvv.z);
diffv1.w = fabs(diffv1.w - dvvv.w);
diff4_v1[idx*2+1] = diffv1;
}
int InitializeSolver( int device){
//TODO: move all cuda initialization here
static int setdev = 0;
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0){
fprintf(stderr, "There is no device supporting CUDA ");
return 1;
}
if (device < 0 || device >= deviceCount){
fprintf(stderr, "No such cuda device = %d \n", device);
return 1;
}
else {
if (!setdev){
if( cudaSetDevice( device ) != cudaSuccess) {
fprintf(stderr, "Failed setting cuda device \n ");
return 1;
}
setdev = 1;
}
}
char host[HOST_NAME_SIZE];
gethostname(host, HOST_NAME_SIZE);
printf("[%s] using id=%d cuda device.\n", host, device);
return 0;
}
extern "C" int SolveVaiCuda(int inElementsSize, int* IJNZ, float *v1, float *AFF, float *AL,
int device, float Tss, float *srcs, int num_srcs, float scaledTol,
int maxNumIterations, int checkMax, float eps, int printFlag){
static int setdev = 0;
if (setdev == 0){
InitializeSolver(device);
setdev = 1;
}
int iterations, check = 0;
float loop_step_diff = FLT_MAX, diff = FLT_MAX;
dim3 blocks(THREADS_PER_BLOCK);
dim3 grid(inElementsSize/blocks.x + 1);
float4 *d4_v1, *d4_AL, *d4_AFF, *d4_ff, *d4_srcs, *diff4_v1;
int4 *d4_IJNZ;
float * ff = (float *) malloc(inElementsSize*8*sizeof(float));
assert( cudaMalloc( (void**) &d4_IJNZ, inElementsSize*2*sizeof(int4)) == cudaSuccess);
assert( cudaMemcpy( d4_IJNZ, IJNZ, inElementsSize*8*sizeof(int), cudaMemcpyHostToDevice) == cudaSuccess);
assert( cudaMalloc( (void**) &d4_AFF, inElementsSize*16*sizeof(float4)) == cudaSuccess);
assert( cudaMemcpy( d4_AFF, AFF, inElementsSize*64*sizeof(float), cudaMemcpyHostToDevice) == cudaSuccess);
assert( cudaMalloc( (void**) &d4_AL, inElementsSize*16*sizeof(float4)) == cudaSuccess);
assert( cudaMemcpy( d4_AL, AL, inElementsSize*64*sizeof(float), cudaMemcpyHostToDevice) == cudaSuccess);
assert( cudaMalloc( (void**) &d4_ff, inElementsSize*2*sizeof(float4)) == cudaSuccess);
assert( cudaMalloc( (void**) &d4_srcs, num_srcs*2*sizeof(float4)) == cudaSuccess);
assert( cudaMemcpy( d4_srcs, srcs, num_srcs*8*sizeof(float), cudaMemcpyHostToDevice) == cudaSuccess);
assert( cudaMalloc( (void**) &d4_v1, inElementsSize*2*sizeof(float4)) == cudaSuccess);
assert( cudaMemset( d4_v1, 0, inElementsSize*2*sizeof(float4)) == cudaSuccess);
assert( cudaMalloc( (void**) &diff4_v1, inElementsSize*2*sizeof(float4)) == cudaSuccess);
assert( cudaMemset( diff4_v1, -1, inElementsSize*8*sizeof(float)) == cudaSuccess);
///temp stuff
assert( cudaMemset( d4_ff, 0, inElementsSize*8*sizeof(float)) == cudaSuccess);
for(iterations = 0; iterations < maxNumIterations && loop_step_diff > scaledTol &&
check < checkMax; iterations++) {
DevComputeff4<<<grid, blocks>>>(inElementsSize, d4_IJNZ, d4_srcs, d4_AFF, d4_v1, d4_ff, Tss);
UpdateSolution<<<grid, blocks>>> (inElementsSize, d4_AL, d4_v1, d4_ff, diff4_v1);
loop_step_diff = full_reduce((float*)diff4_v1, (float*) diff4_v1, inElementsSize*8, 512, 256);
if (fabs(loop_step_diff - diff) < eps ) check++;
else {
check = 0;
diff = loop_step_diff;
}
}
assert(cudaMemcpy(v1, d4_v1, (inElementsSize)*2*sizeof(float4), cudaMemcpyDeviceToHost) == cudaSuccess);
/*
cudaError_t err = cudaMemcpy(v1, d4_v1, (inElementsSize)*2*sizeof(float4), cudaMemcpyDeviceToHost);
if (err == cudaSuccess)
printf("Success ");
else if (err == cudaErrorInvalidValue)
printf("cudaErrorInvalidValue ");
else if (err == cudaErrorInvalidDevicePointer)
printf("cudaErrorInvalidDevicePointer ");
else if (err == cudaErrorInvalidMemcpyDirection)
printf("cudaErrorInvalidMemcpyDirection ");
else
printf("Unknown error ");
*/
cudaThreadSynchronize();
cudaFree(d4_IJNZ);
cudaFree(d4_AFF);
cudaFree(d4_AL);
cudaFree(d4_ff);
cudaFree(d4_srcs);
cudaFree(diff4_v1);
cudaFree(d4_v1);
return iterations;
}
|
5,495 | #include "includes.h"
__global__ void apply_weight_decay_util_kernel( const float4 * __restrict learning_rates, float4 * __restrict weights, float weight_decay, int elem_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = learning_rates[elem_id];
float4 current_weight = weights[elem_id];
val.x = 1.0F - val.x * weight_decay;
val.y = 1.0F - val.y * weight_decay;
val.z = 1.0F - val.z * weight_decay;
val.w = 1.0F - val.w * weight_decay;
current_weight.x *= val.x;
current_weight.y *= val.y;
current_weight.z *= val.z;
current_weight.w *= val.w;
weights[elem_id] = current_weight;
}
} |
5,496 | #include <cuda.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "curand_kernel.h"
#include <memory>
#include <ctime>
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <algorithm>
#include <numeric>
using defer = std::shared_ptr<void>;
#define HEIGHT 32
#define WIDTH 32
#define hBLOCKS 6
#define wBLOCKS 6
//return resurl of this expression (x*x + y*y <= 1)
__device__ bool inCircle(curandState_t* state)
{
float x = curand_uniform(state);
float y = curand_uniform(state);
return x * x + y * y <= 1.0f;
}
__global__ void CalculatePointsIntheCircle(int* result, int randseed)
{
curandState_t state;
unsigned long long seed = (threadIdx.x + blockDim.x * blockIdx.x) + (threadIdx.y + blockDim.y * blockIdx.y) * (randseed % 1000);
//init curand
curand_init(seed, 0, 0, &state);
if (inCircle(&state))
{
atomicAdd(&result[threadIdx.x * HEIGHT + threadIdx.y], 1);
}
return;
}
int main()
{
const size_t size = WIDTH * HEIGHT;
int count [size];
memset(&count, 0, size * sizeof(int));
int* dev_count;
cudaMalloc((void**)&dev_count, size * sizeof(int));
// starting the timer here so that we include the cost of
// all of the operations on the GPU.
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//use un_ptr, that don`t forget free memory
defer _(nullptr, [&](...)
{ cudaFree(dev_count); cudaEventDestroy(start); cudaEventDestroy(stop); printf("free"); });
dim3 blocks(hBLOCKS, wBLOCKS, 1);
dim3 threads(HEIGHT, WIDTH, 1);
int randseed = std::chrono::duration_cast<std::chrono::milliseconds>
(std::chrono::system_clock::now().time_since_epoch()).count();
CalculatePointsIntheCircle <<<blocks, threads >>> (dev_count, randseed);
cudaMemcpy(&count, dev_count, size * sizeof(int), cudaMemcpyDeviceToHost);
// result pi
int ans = 0;
ans = std::accumulate(&count[0], &count[size - 1], ans);
float fullsize = static_cast<float>(HEIGHT * WIDTH * hBLOCKS * wBLOCKS);
float pi = (4.0f * static_cast<float>(ans));
pi /= fullsize;
printf("Result pi %f \n", pi);
//print elapsed time
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf( "Elapsed time %3.1f ms\n", elapsedTime );
return 0;
} |
5,497 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void threadCounting_noSync(int *a){
(*a)++;
}
int main(void){
int a = 0;
int *d;
cudaMalloc((void**)&d, sizeof(int));
cudaMemset(d, 0, sizeof(int)*1);
threadCounting_noSync<<<10240,512>>>(d);
cudaDeviceSynchronize();
cudaMemcpy(&a, d, sizeof(int), cudaMemcpyDeviceToHost);
printf("%d\n",a);
cudaFree(d);
}
|
5,498 | #include<iostream>
#include<stdio.h>
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
__global__ void evalJulia(double *d_pixel,
double *d_temp){
int x_index = threadIdx.x + 2*threadIdx.y + 4*(blockIdx.x + blockIdx.y);
d_temp[x_index] = d_pixel[x_index];
}
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
int main()
{
double *d_pixel;
double *d_temp;
const int size = 16*16;
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
double *temp = new double(size);
double *h_temp = new double(size);
for (int y=0;y<16;y++)
for(int x=0;x<16;x++)
{
temp[x + 16*y] = x + 16*y;
std::cout<<temp[x+16*y]<<std::endl;
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
std::cout<<"test begins"<<std::endl;
dim3 threadsPerBlock(2,2);
dim3 numBlocks(8,8);
cudaMalloc((void**)&d_pixel, size);
cudaMalloc((void**)&d_temp, size);
//cudaMemcpy(temp, d_pixel, size, cudaMemcpyHostToDevice);
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
//evalJulia<<<numBlocks,threadsPerBlock>>>(d_pixel, d_temp);
//cudaMemcpy(h_temp, d_temp, size, cudaMemcpyDeviceToHost);
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
/* for (int y=0;y<2048;y++)
for(int x=0;x<2048;x++)
{std::cout<<temp[x+2048*y]<<std::endl;}*/
std::cout<<"last kernel thread printed"<<std::endl;
cudaFree(d_pixel);
cudaFree(d_temp);
delete(h_temp);
delete(temp);
return 0;
}
|
5,499 |
/*!
* Compute the Pearson correlation of a cluster in a pairwise data array.
*
* @param x
* @param y
* @param labels
* @param sampleSize
* @param cluster
* @param minSamples
*/
__device__
float Pearson_computeCluster(
const float *x,
const float *y,
const char *labels,
int sampleSize,
char cluster,
int minSamples)
{
// compute intermediate sums
int n = 0;
float sumx = 0;
float sumy = 0;
float sumx2 = 0;
float sumy2 = 0;
float sumxy = 0;
for ( int i = 0; i < sampleSize; ++i )
{
if ( labels[i] == cluster )
{
float x_i = x[i];
float y_i = y[i];
sumx += x_i;
sumy += y_i;
sumx2 += x_i * x_i;
sumy2 += y_i * y_i;
sumxy += x_i * y_i;
++n;
}
}
// compute correlation only if there are enough samples
float result = NAN;
if ( n >= minSamples )
{
result = (n*sumxy - sumx*sumy) / sqrt((n*sumx2 - sumx*sumx) * (n*sumy2 - sumy*sumy));
}
return result;
}
/*!
* Compute the correlation of each cluster in a pairwise data array. The data array
* should only contain the clean samples that were extracted from the expression
* matrix, while the labels should contain all samples.
*
* @param x
* @param y
* @param sampleSize
* @param clusterSize
* @param labels
* @param minSamples
* @param correlations
*/
__device__
void Pearson_compute(
const float *x,
const float *y,
int sampleSize,
char clusterSize,
const char *labels,
int minSamples,
float *correlations)
{
for ( char k = 0; k < clusterSize; ++k )
{
correlations[k] = Pearson_computeCluster(
x, y,
labels,
sampleSize,
k,
minSamples
);
}
}
|
5,500 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#define BLOCKS_NUM 200
#define BLOCK_SIZE 256
#define DATA_TYPE int
__global__ void my_kernel( DATA_TYPE* v1, DATA_TYPE* v2, DATA_TYPE* out ){
unsigned int n = threadIdx.x + blockIdx.x * BLOCK_SIZE;
if ( n >= 50000 ) return;
out[n] = v1[n] + v2[n];
}
using namespace std;
int main(){
DATA_TYPE v1[ BLOCKS_NUM * BLOCK_SIZE ];
DATA_TYPE v2[ BLOCKS_NUM * BLOCK_SIZE ];
for( int i = 0; i < 50000; i++ ){
v1[ i ] = i;
v2[ i ] = 50000 - i;
}
cudaSetDevice( 0 );
DATA_TYPE* vin1;
DATA_TYPE* vin2;
DATA_TYPE* out;
unsigned int memory_size = sizeof( DATA_TYPE ) * BLOCKS_NUM * BLOCK_SIZE;
cudaMalloc( ( void** ) &vin1, memory_size );
cudaMalloc( ( void** ) &vin2, memory_size );
cudaMalloc( ( void** ) &out, memory_size );
cudaMemcpy( vin1, v1, memory_size, cudaMemcpyHostToDevice );
cudaMemcpy( vin2, v2, memory_size, cudaMemcpyHostToDevice );
dim3 block( BLOCK_SIZE );
dim3 grid( BLOCKS_NUM );
my_kernel<<< grid, block >>>( vin1, vin2, out );
cudaDeviceSynchronize();
cudaMemcpy( v1, out, memory_size, cudaMemcpyDeviceToHost );
for (int i = 0; i < 5; i++) cout << v1[i] << endl;
for (int i = 49995; i < 50000; i++) cout << v1[i] << endl;
cin.get();
cudaFree( vin1 );
cudaFree( vin2 );
cudaFree( out );
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.