serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
14,601 | #include <iostream>
#include <time.h>
#include <stdio.h>
#define gpuErrchck(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
using namespace std;
__global__ void gpu_loop(int n, int *n_ret_device)
{
int bsize = blockDim.x;
int tid = threadIdx.x;
int i;
for(i = tid*(n/bsize) ; i < (tid+1)*(n/bsize) ; ++i)
n_ret_device[i] = i;
}
void cpu_loop(int n, int *n_ret_host)
{
int i;
for(i=0;i<n;++i)
n_ret_host[i] = i;
}
int main()
{
int N = 10000000;
int *n_ret_host = new int[N];
int *n_ret_device = NULL;
gpuErrchck( cudaMalloc((void**)&n_ret_device,N*sizeof(int)) );
clock_t start, end;
double cpu_time_used;
start = clock();
gpu_loop<<< 1,5 >>>(N,n_ret_device);
gpuErrchck( cudaMemcpy((void*)n_ret_host, (void*)n_ret_device, N*sizeof(int), cudaMemcpyDeviceToHost) );
gpuErrchck( cudaDeviceSynchronize() );
end = clock();
cout<<n_ret_host[1]<<endl;
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
cout<<cpu_time_used<<endl;
start = clock();
cpu_loop(N,n_ret_host);
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
cout<<cpu_time_used<<endl;
delete[] n_ret_host;
cudaFree((void*)n_ret_device);
return 0;
}
|
14,602 | #include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define NUM_THREADS 512
//https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
#define gpuErrCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void bitonic_sort_default(int *arr, unsigned int log_len)
{
int size = 1 << log_len;
for (int seq = 2; seq <= size; seq <<= 2)
{
for (int dist = seq >> 2; dist > 0; dist >>= 2)
{
for (int item = 0; item < size; item++)
{
int pair_item = item | dist;
if (((item & seq) == 0) && (arr[item] > arr[pair_item])
|| ((item & seq) != 0) && (arr[item] < arr[pair_item]))
{
int temp = arr[item];
arr[item] = arr[pair_item];
arr[pair_item] = temp;
}
}
}
}
}
__global__ void bitonic_sort_step(int *arr, int dist, int seq)
{
int item = threadIdx.x + blockIdx.x * blockDim.x;
int pair_item = item | dist;
if (((item & seq) == 0) && (arr[item] > arr[pair_item])
|| ((item & seq) != 0) && (arr[item] < arr[pair_item]))
{
int temp = arr[item];
arr[item] = arr[pair_item];
arr[pair_item] = temp;
}
}
void bitonic_sort_gpu(int *arr, unsigned int exp)
{
int arr_len = 1 << exp;
size_t arr_size = sizeof(int) * arr_len;
int* d_arr;
gpuErrCheck(cudaMalloc(&d_arr, arr_size));
gpuErrCheck(cudaMemcpy(d_arr, arr, arr_size, cudaMemcpyHostToDevice));
int num_blocks = arr_len / NUM_THREADS;
int num_threads = NUM_THREADS;
if (arr_len / NUM_THREADS == 0)
{
num_blocks = arr_len;
num_threads = 1;
}
for (int seq = 2; seq <= arr_len; seq <<= 1)
{
for (int dist = seq >> 1; dist > 0; dist >>= 1)
{
bitonic_sort_step<<<num_blocks, num_threads>>>(d_arr, dist, seq);
}
}
gpuErrCheck(cudaMemcpy(arr, d_arr, arr_size, cudaMemcpyDeviceToHost));
cudaFree(d_arr);
}
|
14,603 | #include <stdio.h>
#include <cuda.h>
__global__ void K(int *a, int N) {
#pragma unroll 2
for (unsigned ii = 0; ii < N; ++ii) {
a[ii] = ii + 1;
}
}
int main() {
int *a, N = 32;
cudaMalloc(&a, N * sizeof(int));
K<<<1, N>>>(a, N);
cudaDeviceSynchronize();
return 0;
}
|
14,604 | #include "includes.h"
__global__ void square( int *d_num_steps, unsigned long long *d_fact, double *d_out){
int idx = threadIdx.x;
int num_steps = *d_num_steps;
for(int k=idx+1; k< num_steps; k+=blockDim.x){
d_out[idx] += (double) k*0.5/ (double) d_fact[k-1];
}
} |
14,605 | #include "includes.h"
__global__ void fractional_stride_nchw(size_t num_values, size_t stride, float_t* src, float_t* dest, size_t ld_src, size_t ld_dest)
{
size_t index = blockIdx.x*blockDim.x + threadIdx.x;
if(index < num_values)
{
size_t frame_id = (index/ ld_src)*ld_dest + (stride)*(index%ld_src) ;
dest[frame_id] = src[index];
}
} |
14,606 | #include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/generate.h>
#include <thrust/equal.h>
#include <thrust/sequence.h>
#include <thrust/for_each.h>
#include <iostream>
#include <stdlib.h>
#define VIEWERS 5
#define NSORTS 2
#define DSIZE 10
int my_mod_start = 0;
int my_mod(){
return (my_mod_start++)/DSIZE;
}
bool validate(thrust::device_vector<float> &d1, thrust::device_vector<float> &d2){
return thrust::equal(d1.begin(), d1.end(), d2.begin());
}
struct sort_functor
{
thrust::device_ptr<int> data;
int dsize;
__host__ __device__
void operator()(int start_idx)
{
thrust::sort(thrust::device, data+(dsize*start_idx), data+(dsize*(start_idx+1)));
}
};
#include <time.h>
#include <sys/time.h>
#define USECPSEC 1000000ULL
unsigned long long dtime_usec(unsigned long long start){
timeval tv;
gettimeofday(&tv, 0);
return ((tv.tv_sec*USECPSEC)+tv.tv_usec)-start;
}
float rand_self(){
return (float) rand()/RAND_MAX;
}
int main(){
cudaDeviceSetLimit(cudaLimitMallocHeapSize, (16*DSIZE*NSORTS));
thrust::host_vector<float> h_data(DSIZE*NSORTS);
thrust::generate(h_data.begin(), h_data.end(), rand_self);
thrust::device_vector<float> d_data = h_data;
// first time a loop
thrust::device_vector<float> d_result1 = d_data;
thrust::device_ptr<float> r1ptr = thrust::device_pointer_cast<float>(d_result1.data());
unsigned long long mytime = dtime_usec(0);
for (int i = 0; i < NSORTS; i++)
thrust::sort(r1ptr+(i*DSIZE), r1ptr+((i+1)*DSIZE));
cudaDeviceSynchronize();
mytime = dtime_usec(mytime);
std::cout << "loop time: " << mytime/(float)USECPSEC << "s" << std::endl;
//vectorized sort
thrust::device_vector<float> d_result2 = d_data;
thrust::host_vector<int> h_segments(DSIZE*NSORTS);
thrust::generate(h_segments.begin(), h_segments.end(), my_mod);
thrust::device_vector<int> d_segments = h_segments;
mytime = dtime_usec(0);
thrust::stable_sort_by_key(d_result2.begin(), d_result2.end(), d_segments.begin());
thrust:: host_vector<int> h_rank = d_segments;
thrust:: host_vector<float> h_dd = d_result2;
for(int i = 0; i < DSIZE*NSORTS; i ++){
if(i%DSIZE==0){
printf("------\n");
}
printf("---key: %f rank: %d ---\n", h_dd[i], h_rank[i]);
}
cudaDeviceSynchronize();
thrust::stable_sort_by_key(d_segments.begin(), d_segments.end(), d_result2.begin());
cudaDeviceSynchronize();
float *hd_data= (float *) malloc(sizeof(float)*DSIZE*NSORTS);
float *raw_ptr = thrust::raw_pointer_cast(d_result2.data());
cudaMemcpy(hd_data, raw_ptr, sizeof(float)*DSIZE*NSORTS, cudaMemcpyDeviceToHost);
for(int i = 0; i < 2*DSIZE;i++){
if(i%DSIZE==0){
printf("---------\n");
}
printf("data: %f sorted: %f\n", h_data[i], hd_data[i]);
}
mytime = dtime_usec(mytime);
std::cout << "vectorized time: " << mytime/(float)USECPSEC << "s" << std::endl;
if (!validate(d_result1, d_result2)) std::cout << "mismatch 1!" << std::endl;
// //nested sort
// thrust::device_vector<int> d_result3 = d_data;
// sort_functor f = {d_result3.data(), DSIZE};
// thrust::device_vector<int> idxs(NSORTS);
// thrust::sequence(idxs.begin(), idxs.end());
// mytime = dtime_usec(0);
// thrust::for_each(idxs.begin(), idxs.end(), f);
// cudaDeviceSynchronize();
// mytime = dtime_usec(mytime);
// std::cout << "nested time: " << mytime/(float)USECPSEC << "s" << std::endl;
// if (!validate(d_result1, d_result3)) std::cout << "mismatch 2!" << std::endl;
return 0;
} |
14,607 | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#include <sys/types.h>
#include <sys/times.h>
#include <sys/time.h>
#include <time.h>
#define BLOCK_SIZE 16
#define GRID_SIZE 1
int printMatrix(int *A, int N){
if(N > 8) return 0;
int i, j;
printf("\nA = \n\t");
for(i = 0; i < N; ++i){
for(j = 0; j < N; ++j){
if(j < N-1) printf("%d, ", A[i * N + j]);
else printf("%d;\n\t", A[i * N + j]);
//printf("%d, ", A[i * N + j], j < N-1 ? ", " : ";\n\t");
}
}
return 0;
}
__global__ void gpu_matrixNorm(int *A, int *B, int N){
int row, col;
float mu, sigma;
mu = 0.0;
col = blockIdx.x * blockDim.x + threadIdx.x;
if(col < N){
for(row = 0; row < N; ++row){
mu += A[row * N + col];
}
mu /= (float) N;
sigma = 0.0;
for(row = 0; row < N; ++row){
sigma += powf(A[row * N + col] - mu, 2.0);
}
sigma /= (float) N;
sigma = sqrt(sigma);
for(row = 0; row < N; ++row){
if(sigma == 0.0) B[row * N + col] = 0.0;
else B[row * N + col] = (A[row * N + col] - mu) / sigma;
}
}
}
int matrixNorm(){
int N = 6000; //Matrix size
srand(300); // Fixed Seed
// allocate memory in host RAM
int *A, *B;
A = (int*)malloc(N*N*sizeof(int));
B = (int*)malloc(N*N*sizeof(int));
//generate matrix A
for(int i = 0; i < N; ++i){
for(int j = 0; j < N; ++j){
A[i * N + j] = rand() / 32768;
}
}
printMatrix(A, N);
printMatrix(B, N);
//allocate memory on the device
int *d_a, *d_b;
cudaMalloc(&d_a, sizeof(int)*N*N);
cudaMalloc(&d_b, sizeof(int)*N*N);
int i, j;
for(i=0; i<N; i++){
for(j=0; j<N; j++){
cudaMemcpy(&d_a[i*N+j], (void*)&A[i*N+j], sizeof(int), cudaMemcpyHostToDevice);
}
}
// Call Matrix Norm function
int numThreads = 128;
int numBlocks = ((N*N) + numThreads-1) / numThreads;
gpu_matrixNorm<<<numBlocks, numThreads>>>(d_a, d_b, N);
for(i=0; i<N; i++){
for(j=0; j<N; j++){
cudaMemcpy((void *)&B[i*N+j], &d_b[i*N+j], sizeof(int), cudaMemcpyDeviceToHost);
}
}
printMatrix(B, N);
cudaFree(d_a);
cudaFree(d_b);
free(A);
free(B);
return 0;
}
int main(int argc, char const *argv[]){
struct timeval etstart, etstop; /* Elapsed times using gettimeofday() */
struct timezone tzdummy;
unsigned long long time;
gettimeofday(&etstart, &tzdummy);
matrixNorm();
gettimeofday(&etstop, &tzdummy);
time = (unsigned long long)(etstop.tv_sec - etstart.tv_sec)*1000000+(etstop.tv_usec - etstart.tv_usec);
printf("Runtime = %g ms.\n", (float)time/(float)1000);
}
|
14,608 | #include<stdio.h>
#include<sys/time.h>
#include<stdlib.h>
#include<iostream>
using namespace std;
//----------------------------------- Structures and Globals---------------------------------------------
typedef struct {
int dimension1;
int dimension2;
} ArrayMetadata2D;
// metadata variables describing dimensionalities of all data structures involved in the computation
ArrayMetadata2D A_MD, B_MD, C_MD;
// pointers for input and output arrays in the host memory
float *A, *B, *C, *C_CPU;
// pointers for input and output arrays in the device memory (NVIDIA DRAM)
float *A_GPU, *B_GPU, *C_GPU;
//----------------------------------- host function definitions -----------------------------------------
void allocateAndInitializeAB();
void computeCpuMMM();
void computeGpuMMM();
void copyMatricesToGPU();
void copyResultFromGPU();
void compareHostAndGpuOutput();
void die(const char *error);
void check_error(cudaError e);
//----------------------------------- CUDA function definitions -----------------------------------------
//-------------------------------------------------------------------------------------------------------
int main(int argc, char **argv) {
A_MD.dimension1 = (argc > 1) ? atoi(argv[1]) : 100;
A_MD.dimension2 = (argc > 2) ? atoi(argv[2]) : A_MD.dimension1;
B_MD.dimension1 = (argc > 3) ? atoi(argv[3]) : A_MD.dimension2;
B_MD.dimension2 = (argc > 4) ? atoi(argv[4]) : B_MD.dimension1;
C_MD.dimension1 = A_MD.dimension1;
C_MD.dimension2 = B_MD.dimension2;
printf("Matrix A is %d-by-%d\n", A_MD.dimension1, A_MD.dimension2);
printf("Matrix B is %d-by-%d\n", B_MD.dimension1, B_MD.dimension2);
printf("Matrix C is %d-by-%d\n", C_MD.dimension1, C_MD.dimension2);
allocateAndInitializeAB();
// matrix matrix multiplication in the CPU
clock_t start = clock();
computeCpuMMM();
clock_t end = clock();
double elapsed = (end - start) / (double) CLOCKS_PER_SEC;
printf("Computation time in the CPU: %f seconds\n", elapsed);
start = clock();
computeGpuMMM();
end = clock();
elapsed = (end - start) / (double) CLOCKS_PER_SEC;
printf("Computation time in the GPU: %f seconds\n", elapsed);
compareHostAndGpuOutput();
return 0;
}
// allocate and initialize A and B using a random number generator
void allocateAndInitializeAB() {
float val;
size_t sizeofA = A_MD.dimension1 * A_MD.dimension2 * sizeof(float);
A = (float*) malloc(sizeofA);
A_GPU = (float*) malloc(sizeofA);
srand(time(NULL));
for (int i = 0; i < A_MD.dimension1; i++) {
for (int j = 0; j < A_MD.dimension2; j++) {
int index = i * A_MD.dimension2 + j;
val = (rand() % 1000) * 0.001;
A[index] = val;
A_GPU[index] = val;
}
}
size_t sizeofB = B_MD.dimension1 * B_MD.dimension2 * sizeof(float);
B = (float*) malloc(sizeofB);
B_GPU = (float*) malloc(sizeofB);
for (int i = 0; i < B_MD.dimension1; i++) {
for (int j = 0; j < B_MD.dimension2; j++) {
int index = i * B_MD.dimension2 + j;
val = (rand() % 1000) * 0.001;
B[index] = val;
B_GPU[index] = val;
}
}
}
// allocate memory in the GPU for all matrices, and copy A and B content from the host CPU memory to the GPU memory
void copyMatricesToGPU() {
size_t sizeofA = A_MD.dimension1 * A_MD.dimension2 * sizeof(float);
check_error(cudaMalloc((void **) &A_GPU, sizeofA));
check_error(cudaMemcpy(A_GPU, A, sizeofA, cudaMemcpyHostToDevice));
size_t sizeofB = B_MD.dimension1 * B_MD.dimension2 * sizeof(float);
check_error(cudaMalloc((void **) &B_GPU, sizeofB));
check_error(cudaMemcpy(B_GPU, B, sizeofB, cudaMemcpyHostToDevice));
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(float);
check_error(cudaMalloc((void **) &C_GPU, sizeofC));
}
// copy results from C_GPU which is in GPU card memory to C_CPU which is in the host CPU for result comparison
void copyResultFromGPU() {
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(float);
C_CPU = (float*) malloc(sizeofC);
check_error(cudaMemcpy(C_CPU, C_GPU, sizeofC, cudaMemcpyDeviceToHost));
}
// do a straightforward matrix-matrix multiplication in the CPU
// notice that this implementation can be massively improved in the CPU by doing proper cache blocking but we are
// not providing you the efficient CPU implementation as that reveals too much about the ideal GPU implementation
void computeCpuMMM() {
// allocate the result matrix for the CPU computation
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(float);
C = (float*) malloc(sizeofC);
// compute C[i][j] as the sum of A[i][k] * B[k][j] for all columns k of A
for (int i = 0; i < A_MD.dimension1; i++) {
int a_i = i * A_MD.dimension2;
int c_i = i * C_MD.dimension2;
for (int j = 0; j < B_MD.dimension2; j++) {
int c_index = c_i + j;
C[c_index] = 0;
for (int k = 0; k < B_MD.dimension1; k++) {
int a_index = a_i + k;
int b_index = k * B_MD.dimension2 + j;
C[c_index] += A[a_index] * B[b_index];
}
}
}
}
// do a straightforward matrix-matrix multiplication in the CPU
// notice that this implementation can be massively improved in the CPU by doing proper cache blocking but we are
// not providing you the efficient CPU implementation as that reveals too much about the ideal GPU implementation
void computeGpuMMM() {
// allocate the result matrix for the CPU computation
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(float);
C_GPU = (float*) malloc(sizeofC);
// compute C[i][j] as the sum of A[i][k] * B[k][j] for all columns k of A
for (int i = 0; i < A_MD.dimension1; i++) {
int a_i = i * A_MD.dimension2;
int c_i = i * C_MD.dimension2;
for (int j = 0; j < B_MD.dimension2; j++) {
int c_index = c_i + j;
C_GPU[c_index] = 0;
for (int k = 0; k < B_MD.dimension1; k++) {
int a_index = a_i + k;
int b_index = k * B_MD.dimension2 + j;
C_GPU[c_index] += A_GPU[a_index] * B_GPU[b_index];
}
}
}
}
// function to determine if the GPU computation is done correctly by comparing the output from the GPU with that
// from the CPU
void compareHostAndGpuOutput() {
int totalElements = C_MD.dimension1 * C_MD.dimension2;
int missmatchCount = 0;
for (int i = 0; i < totalElements; i++) {
printf("%d\n", i);
if (fabs(C[i] - C_GPU[i]) > 0.01) {
missmatchCount++;
printf("mismatch at index %i: %f\t%f\n", i, C[i], C_CPU[i]);
}
}
if (missmatchCount > 0) {
printf("Computation is incorrect: outputs do not match in %d indexes\n", missmatchCount);
} else {
printf("Computation is correct: CPU and GPU outputs match\n");
}
}
// Prints the specified error message and then exits
void die(const char *error) {
printf("%s", error);
exit(1);
}
// If the specified error code refers to a real error, report it and quit the program
void check_error(cudaError e) {
if (e != cudaSuccess) {
printf("\nCUDA error: %s\n", cudaGetErrorString(e));
exit(1);
}
}
|
14,609 | #include <stdio.h>
#include <cuda.h>
//Code written by Alan Fleming
void mul_matrix_cpu(int *M, int *N, int *P, int width){
for( int i = 0; i<width; i++){
for( int j = 0; j<width; j++){
int sum = 0;
for (int k = 0; k < width; k++){
sum += M[i * width + k] * N[k * width + j];
}
P[i * width + j] = sum;
}
}
}
__global__ void mul_matrix_gpu(int *M, int *N, int *P, int width){
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if( row < width && col < width) {
int pValue = 0;
for( int k = 0; k < width; ++k){
pValue += M[row * width + k] * N[k * width + col];
}
P[row * width + col] = pValue;
}
}
void printMatrix(int *m, int N){
for( int i = 0; i < N; i++){
for( int j = 0; j < N; j++){
printf("%d ", m[i * N + j]);
}
printf("\n");
}
}
int verifyMatrix(int *a, int *b, int N){
for( int i = 0; i < N; i++){
for( int j = 0; j < N; j++){
if(a[i * N + j] != b[i * N + j]){
printf("TEST FAILED\n");
return 1;
}
}
}
printf("TEST PASSED\n");
return 0;
}
int main(int argc, char *argv[]){
//check number of arguments
if(argc <= 2) {
printf("Please supply matrix size and block size");
return 1;
}
//assign Matrix and block size
const int MATRIXSIZE = atoi(argv[1]);
const int BLOCKSIZE = atoi(argv[2]);
//allocate system memory for array
int *a = (int *)malloc(sizeof(int) * MATRIXSIZE * MATRIXSIZE ); //first matrix
int *b = (int *)malloc(sizeof(int) * MATRIXSIZE * MATRIXSIZE ); //second matrix
int *c = (int *)malloc(sizeof(int) * MATRIXSIZE * MATRIXSIZE ); //result from CPU
int *d = (int *)malloc(sizeof(int) * MATRIXSIZE * MATRIXSIZE ); //result from gpu
//initialize a and b for addition
int init = 1325;
for( int i = 0; i < MATRIXSIZE; i++){
for( int j = 0; j < MATRIXSIZE; j++){
init = 3125 * init % 65536;
a[ i * MATRIXSIZE + j ] = (init - 32768)/6553;
b[ i * MATRIXSIZE + j ] = init % 1000;
}
}
//print initial matrix a and b
printf("a \n --------------------- \n");
printMatrix(a, MATRIXSIZE);
printf("b \n --------------------- \n");
printMatrix(b, MATRIXSIZE);
//multiply matrix using cpu
mul_matrix_cpu(a, b, c, MATRIXSIZE);
//print the result
printf("c \n --------------------- \n");
printMatrix(c, MATRIXSIZE);
//allocate memory on device
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void **)(&dev_a),MATRIXSIZE * MATRIXSIZE * sizeof(int));
cudaMalloc((void **)(&dev_b),MATRIXSIZE * MATRIXSIZE * sizeof(int));
cudaMalloc((void **)(&dev_c),MATRIXSIZE * MATRIXSIZE * sizeof(int));
//copy memory to device
cudaMemcpy(dev_a,a, MATRIXSIZE * MATRIXSIZE * sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b, MATRIXSIZE * MATRIXSIZE * sizeof(int),cudaMemcpyHostToDevice);
//calculate gridWidth
dim3 dimBlock(BLOCKSIZE, BLOCKSIZE, 1);
int gridWidth = ceil((MATRIXSIZE-1)/double(dimBlock.x));
//define dimGrid
dim3 dimGrid(gridWidth, gridWidth,1);
//multiply matrix using gpu
mul_matrix_gpu<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c, MATRIXSIZE);
//copy memory from device
cudaMemcpy(d,dev_c, MATRIXSIZE * MATRIXSIZE * sizeof(int),cudaMemcpyDeviceToHost);
//print the result
printf("d \n --------------------- \n");
printMatrix(d, MATRIXSIZE);
//verify the results
verifyMatrix(c, d, MATRIXSIZE);
//free memory
free(a);
free(b);
free(c);
free(d);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
//exit program
return 0;
}
|
14,610 | #include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include"device_launch_parameters.h"
#include"cuda_runtime.h"
#include"cuda_runtime_api.h"
using namespace std;
int n, m, p, q;
const int szB = 3550, szL = 150;
double *matrix, *kernel, *result;
inline int read() {
char c = getchar();
int x = 0, f = 1;
while (isdigit(c))
x = (x << 3) + (x << 1) + (c^48), c = getchar();
return x * f;
}
inline void read(double &r)
{
double x=0,t=0;int s=0,f=1;char c=getchar();//x代表整数部分,t代表小数部分
for (;!isdigit(c);c=getchar())
{
if (c=='-') f=-1;//读到负号就改变之
if (c=='.') goto readt;//看到小数点,直接读小数部分
if (c == ',') return ;
}
for (;isdigit(c)&&c!='.';c=getchar()) x=x*10+c-'0';//整数部分
readt:for (;c=='.';c=getchar());//跳过小数点
for (;isdigit(c);c=getchar()) t=t*10+c-'0',++s;//读小数部分,s代表小数有几位
r=(x+t/pow(10,s))*f;//t除以10的s次方后变成小数部分
}
void input() {
freopen("input.txt", "r", stdin);
n = read();m=read();read();p = read();q = read();read();
// cout << n << ' ' << m << ' ' << p << ' ' << q << endl;
for (int i = 0; i < n; i++){
for (int j = 0; j < m; j++) {
read(matrix[i * szB + j]);
// printf("%.3f ", matrix[i*szB+j]);
}
// printf("\n");
// scanf("%lf,", &matrix[i*szB+j]);
// scanf("%lf\n", &matrix[i*szB+m-1]);
}
for (int i = 0; i < p; i++){
for (int j = 0; j < q; j++)
read(kernel[i*szL+j]);
// scanf("%lf,", &kernel[i*szL+j]);
// scanf("%lf\n", &kernel[i*szL+q-1]);
}
// printf("input done\n");
}
void output() {
FILE* fp = fopen("output.txt", "w");
for (int i = 0; i < n; i++){
for (int j = 0; j < m-1; j++)
fprintf(fp, "%.3f,", result[(i+(p-1)/2)*szB+j+(q-1)/2]);
fprintf(fp, "%.3f\n", result[(i+(p-1)/2)*szB+m-1+(q-1)/2]);
}
fclose(fp);
}
int getThreadNum()
{
cudaDeviceProp prop;
int count;
cudaGetDeviceCount(&count);
cudaGetDeviceProperties(&prop, 0);
return prop.maxThreadsPerBlock;
}
__global__ void conv(double *matrix, double *kernel, double *result, int n, int m, int p, int q){
int ti = threadIdx.x;
int bi = blockIdx.x;
int id = (bi * blockDim.x + ti);
if(id < (n+p-1) * (m+q-1)){
int i = id / (m+q-1);
int j = id % (m+q-1);
double tmp = 0.0;
for(int k = max(0, i-p+1); k <= i; k++)
for(int l = max(0, j-q+1); l <= j; l++)
tmp += matrix[k*szB+l] * kernel[(i-k)*szL + j-l];
result[i*szB+j] = tmp;
}
}
int main()
{
double *matrixGpu;
double *kernelGpu;
double *resultGpu;
matrix = (double*)malloc(sizeof(double)*szB*szB);
result = (double*)malloc(sizeof(double)*szB*szB);
kernel = (double*)malloc(sizeof(double)*szL*szL);
cudaMalloc((void**)&matrixGpu, szB*szB*sizeof(double));
cudaMalloc((void**)&kernelGpu, szL*szL*sizeof(double));
cudaMalloc((void**)&resultGpu, szB*szB*sizeof(double));
input();
cudaMemcpy(matrixGpu, matrix, 3550 *3550 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(kernelGpu, kernel, 150*150*sizeof(double), cudaMemcpyHostToDevice);
int threadNum = getThreadNum();
int blockNum = ((n+p-1) * (m+q-1) - 0.5) / threadNum + 1;
conv<<<blockNum, threadNum>>>(matrixGpu, kernelGpu, resultGpu, n, m, p, q);
cudaMemcpy(result, resultGpu, 3550*3550 * sizeof(double), cudaMemcpyDeviceToHost);
output();
cudaFree(matrixGpu);
cudaFree(kernelGpu);
cudaFree(resultGpu);
return 0;
}
|
14,611 | #include "includes.h"
__global__ void reduce_and_expand_array_kernel(const float *src_gpu, float *dst_gpu, int current_size, int groups)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < current_size) {
float val = 0;
for (int i = 0; i < groups; ++i) {
val += src_gpu[index + i*current_size];
}
for (int i = 0; i < groups; ++i) {
dst_gpu[index + i*current_size] = val / groups;
}
}
} |
14,612 | #include <stdio.h>
#include "cuda_runtime.h"
#define M 8
#define N 12
extern __shared__ int shared[];
__global__ void func(int*data, int m,int n){
int * t_s = shared;
// __shared__ int t_s[12];
if(threadIdx.x==0){
memcpy(t_s + blockIdx.x*n,data + blockIdx.x*n,sizeof(int)*n);
}
__syncthreads();
data[blockIdx.x*n + threadIdx.x] =
t_s[blockIdx.x*n + threadIdx.x+1];
}
int main(){
int h_d[M*N];
int *d_d;
for(int i=0;i<M*N;i++)
h_d[i]=i+1;
for(int i=0;i<M;i++){
for(int j=0;j<N;j++)
printf("%2d ",h_d[i*N+j]);
printf("\n");
}
printf("\n");
cudaMalloc((void**)&d_d,sizeof(int)*M*N);
cudaMemcpy(d_d,h_d,sizeof(int)*M*N,cudaMemcpyHostToDevice);
func<<<M,N-1,sizeof(int)*M*N>>>(d_d,M,N);
cudaThreadSynchronize();
memset(h_d,0,sizeof(int)*M*N);
cudaMemcpy(h_d,d_d,sizeof(int)*M*N,cudaMemcpyDeviceToHost);
for(int i=0;i<M;i++){
for(int j=0;j<N;j++)
printf("%2d ",h_d[i*N+j]);
printf("\n");
}
printf("\n");
return 0;
}
|
14,613 | #include "includes.h"
__global__ void exclscnmb2e(int *d_data0, int *d_output0, int *d_data1, int *d_output1, int *d_data2, int *d_output2, int *d_data3, int *d_output3, int *d_data4, int *d_output4, int *d_data5, int *d_output5, int *d_data6, int *d_output6, int *d_data7, int *d_output7) {
const int twid=threadIdx.x;
switch(blockIdx.x) {
case 0:
if(twid<2) {
d_output0[twid]=d_data0[0]*twid;
}
return;
case 1:
if(twid<2) {
d_output1[twid]=d_data1[0]*twid;
}
return;
case 2:
if(twid<2) {
d_output2[twid]=d_data2[0]*twid;
}
return;
case 3:
if(twid<2) {
d_output3[twid]=d_data3[0]*twid;
}
return;
case 4:
if(twid<2) {
d_output4[twid]=d_data4[0]*twid;
}
return;
case 5:
if(twid<2) {
d_output5[twid]=d_data5[0]*twid;
}
return;
case 6:
if(twid<2) {
d_output6[twid]=d_data6[0]*twid;
}
return;
case 7:
if(twid<2) {
d_output7[twid]=d_data7[0]*twid;
}
return;
}
} |
14,614 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
// kernel to compute cumulative sum
__global__ void runningSum(int * d) {
int threads = blockDim.x;
int tid = threadIdx.x;
// tc = thread count allowed to participate
// drop # of threads according to step size, then double step each iteration
for (int tc=threads, step=1; tc>0; tc-=step, step*=2) {
// only execute if in the allowed thread pool
if (tid < tc) {
d[tid + step] += d[tid];
}
}
}
int main() {
// initialize elements to sum
const int count = 16;
const int size = count * sizeof(int);
int h[count];
for (int i=0; i<count; i++) {
h[i] = i+1;
}
// initialize array on device
int *d;
cudaMalloc(&d, size);
cudaMemcpy(d, h, size, cudaMemcpyHostToDevice);
// run the calculation
runningSum<<<1,count-1>>>(d);
// copy results back to host
cudaMemcpy(h, d, size, cudaMemcpyDeviceToHost);
cudaFree(d);
d = 0;
// print results
for (int i=0; i<count; i++) {
printf("%d ", h[i]);
}
} |
14,615 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <cuda.h>
#ifdef A
#define DATASET 1
#define SIZE 600
#define DIMENSIONS 2
#else
#define DATASET 2
#define SIZE 210
#define DIMENSIONS 7
#endif
__global__ void mean_shift(double *y, double *x, double *h, double *kernel, double *i_vectors, double *norm)
{
//Thread scoped variables.
int basic_index = blockIdx.x * (DIMENSIONS);
int index = threadIdx.x * (DIMENSIONS);
double e = 0.000001;
double factor;
//Initializing the norm.
norm[blockIdx.x] = 1.0;
//Main loop.
while (norm[blockIdx.x] > e) {
factor = 0.0;
//Calculating factor.
for (int i = 0; i < (DIMENSIONS); i++)
{
factor += (y[basic_index + i] - x[index + i])*(y[basic_index + i] - x[index + i]);
}
if (factor > *h*(*h))
{
kernel[blockIdx.x * SIZE + threadIdx.x] = 0.0;
for (int i = 0; i < (DIMENSIONS); i++)
{
i_vectors[blockIdx.x * SIZE * DIMENSIONS + index+i] = 0.0;
}
}
else
{
factor = exp(-(factor)/(2 * (*h)));
kernel[blockIdx.x * SIZE + threadIdx.x] = factor;
//Storing the 'y' vectors before normalization.
for (int i = 0; i < (DIMENSIONS); i++)
{
i_vectors[blockIdx.x * SIZE * DIMENSIONS + index+i] = factor * x[index + i];
}
}
__syncthreads();
//Only the first thread of each block will calculate
//the final vectors.
if (threadIdx.x == 0)
{
double y_new[DIMENSIONS];
for (int i = 0; i < (DIMENSIONS); i++)
{
y_new[i] = 0.0;
}
double sum = 0.0;
for (int i = 0; i < (SIZE) * (DIMENSIONS); i += (DIMENSIONS))
{
for (int j = 0; j < (DIMENSIONS); j++)
{
y_new[j] += i_vectors[blockIdx.x * SIZE * DIMENSIONS + i + j];
}
}
//Calculating the sum of each row of the kernel_matrix.
for (int i = 0; i < (SIZE); i++)
{
sum += kernel[blockIdx.x * SIZE + i];
}
//Normalizing and updating the 'y' vector.
for (int i = 0; i < (DIMENSIONS); i++)
{
y_new[i] = y_new[i] / sum;
}
//Re-initializing norm.
norm[blockIdx.x] = 0.0;
//Calculating the norm of the vector
//and updating the 'y' vector
for (int i = 0; i < (DIMENSIONS); i++)
{
norm[blockIdx.x] += (y_new[i]-y[basic_index + i])*(y_new[i]-y[basic_index + i]);
y[basic_index + i] = y_new[i];
}
norm[blockIdx.x] = sqrt(norm[blockIdx.x]);
}
__syncthreads();
}
}
int main(int argc, char **argv)
{
FILE *file;
char data_file_name[64], results_file_name[64];
double *y, *x, *z, h, *kernel, *i_vectors, *norm;
double *d_y, *d_x, *d_h, *d_kernel, *d_i_vectors, *d_norm;
int size_double = SIZE * DIMENSIONS * sizeof(double);
if (argc == 2)
{
h = atof(argv[1]);
}
else
{
if (DATASET == 1)
{
h = 1.0;
}
else
{
h = 2.125;
}
}
if (DATASET == 1)
{
sprintf(data_file_name, "data_cuda.bin");
sprintf(results_file_name, "results_global.bin");
}
else
{
sprintf(data_file_name, "data_cuda_seeds.bin");
sprintf(results_file_name, "results_global_seeds.bin");
}
y = (double *)malloc(size_double);
x = (double *)malloc(size_double);
z = (double *)malloc(size_double);
kernel = (double *)malloc(SIZE * SIZE * sizeof(double));
i_vectors = (double *)malloc(SIZE * DIMENSIONS * SIZE * sizeof(double));
norm = (double *)malloc(SIZE * sizeof(double));
file = fopen(data_file_name, "rb");
if (file == NULL)
{
printf("Could not open file\n");
exit(1);
}
if(fread(y, sizeof(double), SIZE * DIMENSIONS, file) != SIZE*DIMENSIONS){
printf("Error at reading file\n");
exit(1);
}
fclose(file);
for(int i = 0; i < SIZE * DIMENSIONS; i++)
{
x[i] = y[i];
}
cudaMalloc((void **)&d_y, size_double);
cudaMalloc((void **)&d_x, size_double);
cudaMalloc((void **)&d_h, sizeof(double));
cudaMalloc((void **)&d_kernel, SIZE * SIZE * sizeof(double));
cudaMalloc((void **)&d_i_vectors, SIZE * DIMENSIONS * SIZE * sizeof(double));
cudaMalloc((void **)&d_norm, SIZE * sizeof(double));
cudaMemcpy(d_y, y, size_double, cudaMemcpyHostToDevice);
cudaMemcpy(d_x, x, size_double, cudaMemcpyHostToDevice);
cudaMemcpy(d_h, &h, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_kernel, kernel, SIZE * SIZE * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_i_vectors, i_vectors, SIZE * DIMENSIONS * SIZE * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_norm, norm, SIZE * sizeof(double), cudaMemcpyHostToDevice);
//Time variables.
struct timeval startwtime, endwtime;
gettimeofday (&startwtime, NULL);
mean_shift<<<SIZE,SIZE>>>(d_y, d_x, d_h, d_kernel, d_i_vectors, d_norm);
cudaDeviceSynchronize();
gettimeofday (&endwtime, NULL);
cudaMemcpy(z, d_y, size_double, cudaMemcpyDeviceToHost);
double exec_time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6
+ endwtime.tv_sec - startwtime.tv_sec);
printf("Runtime: %.14f \n", exec_time);
file = fopen(results_file_name, "wb");
if (file == NULL)
{
printf("Could not open file\n");
exit(1);
}
fwrite(z, sizeof(double), SIZE * DIMENSIONS, file);
fclose(file);
cudaFree(d_y);
cudaFree(d_x);
cudaFree(d_h);
cudaFree(d_kernel);
cudaFree(d_i_vectors);
cudaFree(d_norm);
free(y);
free(x);
free(z);
free(kernel);
free(i_vectors);
free(norm);
return 0;
}
|
14,616 | #include "includes.h"
// Include files
// Parameters
#define N_ATOMS 343
#define MASS_ATOM 1.0f
#define time_step 0.01f
#define L 10.5f
#define T 0.728f
#define NUM_STEPS 10000
const int BLOCK_SIZE = 1024;
//const int L = ;
const int scheme = 1; // 0 for explicit, 1 for implicit
/*************************************************************************************************************/
/************* INITIALIZATION CODE **********/
/*************************************************************************************************************/
__global__ void newForceReduction(float *input, float *output, int startunit, int len)
{
unsigned int tx = threadIdx.x;
unsigned int start = blockIdx.x *N_ATOMS;
__shared__ float partSum[BLOCK_SIZE];
// if (tx == 0) printf("Length of the shared memory array - %i \n", N_ATOMS);
//Loading input floats to shared memory
//Take care of the boundary conditions
if (tx < N_ATOMS) { partSum[tx] = input[start + tx]; }
else{ partSum[tx] = 0.0f; }
__syncthreads();
//Reduction Kernel for each dimension
if (tx < 512){
partSum[tx] += partSum[tx + 512];
} __syncthreads();
if (tx < 256){
partSum[tx] += partSum[tx + 256];
} __syncthreads();
if (tx < 128){
partSum[tx] += partSum[tx + 128];
} __syncthreads();
if (tx < 64){
partSum[tx] += partSum[tx + 64];
} __syncthreads();
if (tx < 32){
partSum[tx] += partSum[tx + 32];
partSum[tx] += partSum[tx + 16];
partSum[tx] += partSum[tx + 8];
partSum[tx] += partSum[tx + 4];
partSum[tx] += partSum[tx + 2];
partSum[tx] += partSum[tx + 1];
}
if (tx == 0){
output[blockIdx.x] = -partSum[0];
}
} |
14,617 | #include <iostream>
void bar();
__global__ void fooKernel() {
printf("foo!\n");
}
int main(){
bar();
fooKernel<<<1,1>>>();
cudaDeviceSynchronize();
std::cout << "foo done!\n";
return 0;
}
|
14,618 | #define uint8 unsigned char
#define uint32 unsigned long int
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <stdint.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <curand_kernel.h>
#include <device_functions.h>
typedef struct
{
uint32 total[2];
uint32 state[8];
uint8 buffer[64];
}
sha256_context;
#define GET_UINT32(n,b,i) \
{ \
(n) = ((uint32) (b)[(i) ] << 24) \
| ((uint32) (b)[(i) + 1] << 16) \
| ((uint32) (b)[(i) + 2] << 8) \
| ((uint32) (b)[(i) + 3] ); \
}
#define PUT_UINT32(n,b,i) \
{ \
(b)[(i) ] = (uint8) ((n) >> 24); \
(b)[(i) + 1] = (uint8) ((n) >> 16); \
(b)[(i) + 2] = (uint8) ((n) >> 8); \
(b)[(i) + 3] = (uint8) ((n) ); \
}
#define GET_UINT32(n,b,i) \
{ \
(n) = ((uint32) (b)[(i) ] << 24) \
| ((uint32) (b)[(i) + 1] << 16) \
| ((uint32) (b)[(i) + 2] << 8) \
| ((uint32) (b)[(i) + 3] ); \
}
#define PUT_UINT32(n,b,i) \
{ \
(b)[(i) ] = (uint8) ((n) >> 24); \
(b)[(i) + 1] = (uint8) ((n) >> 16); \
(b)[(i) + 2] = (uint8) ((n) >> 8); \
(b)[(i) + 3] = (uint8) ((n) ); \
}
__device__ __host__ inline void sha256_starts(sha256_context* ctx)
{
ctx->total[0] = 0;
ctx->total[1] = 0;
ctx->state[0] = 0x6A09E667;
ctx->state[1] = 0xBB67AE85;
ctx->state[2] = 0x3C6EF372;
ctx->state[3] = 0xA54FF53A;
ctx->state[4] = 0x510E527F;
ctx->state[5] = 0x9B05688C;
ctx->state[6] = 0x1F83D9AB;
ctx->state[7] = 0x5BE0CD19;
}
__device__ __host__ inline void sha256_process(sha256_context* ctx, uint8 data[64])
{
uint32 temp1, temp2, W[64];
uint32 A, B, C, D, E, F, G, H;
GET_UINT32(W[0], data, 0);
GET_UINT32(W[1], data, 4);
GET_UINT32(W[2], data, 8);
GET_UINT32(W[3], data, 12);
GET_UINT32(W[4], data, 16);
GET_UINT32(W[5], data, 20);
GET_UINT32(W[6], data, 24);
GET_UINT32(W[7], data, 28);
GET_UINT32(W[8], data, 32);
GET_UINT32(W[9], data, 36);
GET_UINT32(W[10], data, 40);
GET_UINT32(W[11], data, 44);
GET_UINT32(W[12], data, 48);
GET_UINT32(W[13], data, 52);
GET_UINT32(W[14], data, 56);
GET_UINT32(W[15], data, 60);
#define SHR(x,n) ((x & 0xFFFFFFFF) >> n)
#define ROTR(x,n) (SHR(x,n) | (x << (32 - n)))
#define S0(x) (ROTR(x, 7) ^ ROTR(x,18) ^ SHR(x, 3))
#define S1(x) (ROTR(x,17) ^ ROTR(x,19) ^ SHR(x,10))
#define S2(x) (ROTR(x, 2) ^ ROTR(x,13) ^ ROTR(x,22))
#define S3(x) (ROTR(x, 6) ^ ROTR(x,11) ^ ROTR(x,25))
#define F0(x,y,z) ((x & y) | (z & (x | y)))
#define F1(x,y,z) (z ^ (x & (y ^ z)))
#define R(t) \
( \
W[t] = S1(W[t - 2]) + W[t - 7] + \
S0(W[t - 15]) + W[t - 16] \
)
#define P(a,b,c,d,e,f,g,h,x,K) \
{ \
temp1 = h + S3(e) + F1(e,f,g) + K + x; \
temp2 = S2(a) + F0(a,b,c); \
d += temp1; h = temp1 + temp2; \
}
A = ctx->state[0];
B = ctx->state[1];
C = ctx->state[2];
D = ctx->state[3];
E = ctx->state[4];
F = ctx->state[5];
G = ctx->state[6];
H = ctx->state[7];
P(A, B, C, D, E, F, G, H, W[0], 0x428A2F98);
P(H, A, B, C, D, E, F, G, W[1], 0x71374491);
P(G, H, A, B, C, D, E, F, W[2], 0xB5C0FBCF);
P(F, G, H, A, B, C, D, E, W[3], 0xE9B5DBA5);
P(E, F, G, H, A, B, C, D, W[4], 0x3956C25B);
P(D, E, F, G, H, A, B, C, W[5], 0x59F111F1);
P(C, D, E, F, G, H, A, B, W[6], 0x923F82A4);
P(B, C, D, E, F, G, H, A, W[7], 0xAB1C5ED5);
P(A, B, C, D, E, F, G, H, W[8], 0xD807AA98);
P(H, A, B, C, D, E, F, G, W[9], 0x12835B01);
P(G, H, A, B, C, D, E, F, W[10], 0x243185BE);
P(F, G, H, A, B, C, D, E, W[11], 0x550C7DC3);
P(E, F, G, H, A, B, C, D, W[12], 0x72BE5D74);
P(D, E, F, G, H, A, B, C, W[13], 0x80DEB1FE);
P(C, D, E, F, G, H, A, B, W[14], 0x9BDC06A7);
P(B, C, D, E, F, G, H, A, W[15], 0xC19BF174);
P(A, B, C, D, E, F, G, H, R(16), 0xE49B69C1);
P(H, A, B, C, D, E, F, G, R(17), 0xEFBE4786);
P(G, H, A, B, C, D, E, F, R(18), 0x0FC19DC6);
P(F, G, H, A, B, C, D, E, R(19), 0x240CA1CC);
P(E, F, G, H, A, B, C, D, R(20), 0x2DE92C6F);
P(D, E, F, G, H, A, B, C, R(21), 0x4A7484AA);
P(C, D, E, F, G, H, A, B, R(22), 0x5CB0A9DC);
P(B, C, D, E, F, G, H, A, R(23), 0x76F988DA);
P(A, B, C, D, E, F, G, H, R(24), 0x983E5152);
P(H, A, B, C, D, E, F, G, R(25), 0xA831C66D);
P(G, H, A, B, C, D, E, F, R(26), 0xB00327C8);
P(F, G, H, A, B, C, D, E, R(27), 0xBF597FC7);
P(E, F, G, H, A, B, C, D, R(28), 0xC6E00BF3);
P(D, E, F, G, H, A, B, C, R(29), 0xD5A79147);
P(C, D, E, F, G, H, A, B, R(30), 0x06CA6351);
P(B, C, D, E, F, G, H, A, R(31), 0x14292967);
P(A, B, C, D, E, F, G, H, R(32), 0x27B70A85);
P(H, A, B, C, D, E, F, G, R(33), 0x2E1B2138);
P(G, H, A, B, C, D, E, F, R(34), 0x4D2C6DFC);
P(F, G, H, A, B, C, D, E, R(35), 0x53380D13);
P(E, F, G, H, A, B, C, D, R(36), 0x650A7354);
P(D, E, F, G, H, A, B, C, R(37), 0x766A0ABB);
P(C, D, E, F, G, H, A, B, R(38), 0x81C2C92E);
P(B, C, D, E, F, G, H, A, R(39), 0x92722C85);
P(A, B, C, D, E, F, G, H, R(40), 0xA2BFE8A1);
P(H, A, B, C, D, E, F, G, R(41), 0xA81A664B);
P(G, H, A, B, C, D, E, F, R(42), 0xC24B8B70);
P(F, G, H, A, B, C, D, E, R(43), 0xC76C51A3);
P(E, F, G, H, A, B, C, D, R(44), 0xD192E819);
P(D, E, F, G, H, A, B, C, R(45), 0xD6990624);
P(C, D, E, F, G, H, A, B, R(46), 0xF40E3585);
P(B, C, D, E, F, G, H, A, R(47), 0x106AA070);
P(A, B, C, D, E, F, G, H, R(48), 0x19A4C116);
P(H, A, B, C, D, E, F, G, R(49), 0x1E376C08);
P(G, H, A, B, C, D, E, F, R(50), 0x2748774C);
P(F, G, H, A, B, C, D, E, R(51), 0x34B0BCB5);
P(E, F, G, H, A, B, C, D, R(52), 0x391C0CB3);
P(D, E, F, G, H, A, B, C, R(53), 0x4ED8AA4A);
P(C, D, E, F, G, H, A, B, R(54), 0x5B9CCA4F);
P(B, C, D, E, F, G, H, A, R(55), 0x682E6FF3);
P(A, B, C, D, E, F, G, H, R(56), 0x748F82EE);
P(H, A, B, C, D, E, F, G, R(57), 0x78A5636F);
P(G, H, A, B, C, D, E, F, R(58), 0x84C87814);
P(F, G, H, A, B, C, D, E, R(59), 0x8CC70208);
P(E, F, G, H, A, B, C, D, R(60), 0x90BEFFFA);
P(D, E, F, G, H, A, B, C, R(61), 0xA4506CEB);
P(C, D, E, F, G, H, A, B, R(62), 0xBEF9A3F7);
P(B, C, D, E, F, G, H, A, R(63), 0xC67178F2);
ctx->state[0] += A;
ctx->state[1] += B;
ctx->state[2] += C;
ctx->state[3] += D;
ctx->state[4] += E;
ctx->state[5] += F;
ctx->state[6] += G;
ctx->state[7] += H;
}
__device__ __host__ inline void sha256_update(sha256_context* ctx, uint8* input, uint32 length)
{
uint32 left, fill;
if (!length) return;
left = ctx->total[0] & 0x3F;
fill = 64 - left;
ctx->total[0] += length;
ctx->total[0] &= 0xFFFFFFFF;
if (ctx->total[0] < length)
ctx->total[1]++;
if (left && length >= fill)
{
memcpy((void*)(ctx->buffer + left), (void*)input, fill);
sha256_process(ctx, ctx->buffer);
length -= fill;
input += fill;
left = 0;
}
while (length >= 64)
{
sha256_process(ctx, input);
length -= 64;
input += 64;
}
if (length)
{
memcpy((void*)(ctx->buffer + left), (void*)input, length);
}
}
__device__ __host__ inline void sha256_finish(sha256_context* ctx, uint8 digest[32])
{
uint32 last, padn;
uint32 high, low;
uint8 msglen[8], sha256_padding[64] =
{
0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
high = (ctx->total[0] >> 29)
| (ctx->total[1] << 3);
low = (ctx->total[0] << 3);
PUT_UINT32(high, msglen, 0);
PUT_UINT32(low, msglen, 4);
last = ctx->total[0] & 0x3F;
padn = (last < 56) ? (56 - last) : (120 - last);
sha256_update(ctx, sha256_padding, padn);
sha256_update(ctx, msglen, 8);
PUT_UINT32(ctx->state[0], digest, 0);
PUT_UINT32(ctx->state[1], digest, 4);
PUT_UINT32(ctx->state[2], digest, 8);
PUT_UINT32(ctx->state[3], digest, 12);
PUT_UINT32(ctx->state[4], digest, 16);
PUT_UINT32(ctx->state[5], digest, 20);
PUT_UINT32(ctx->state[6], digest, 24);
PUT_UINT32(ctx->state[7], digest, 28);
}
__device__ __host__ inline void sha256(uint8* msg, uint8 length, uint8 sha256[32])
{
sha256_context ctx;
sha256_starts(&ctx);
sha256_update(&ctx, msg, length);
sha256_finish(&ctx, sha256);
} |
14,619 | #include "minmax.cuh"
double minVal(double a, double b) {
if (a < b)
return a;
else
return b;
}
double maxVal(double a, double b) {
if (a > b)
return a;
else
return b;
} |
14,620 | #include "includes.h"
__global__ void CombineScreen(float* d_postEdge1, float* d_postEdge2, float* d_postGradient1, float* d_postGradient2, float* d_postGradient3, float* d_postSobel3LR, float* d_postSobel3UD, float* d_postSmooth31, float* d_output){
int id = threadIdx.x + blockDim.x * blockIdx.x;
for (int i = 0; i < 73; ++i){
d_output[i + id * 73 + 73 * 73 * 0] = d_postEdge1[id * 73 + i];
d_output[i + id * 73 + 73 * 73 * 1] = d_postEdge2[id * 73 + i];
d_output[i + id * 73 + 73 * 73 * 2] = d_postGradient1[id * 73 + i];
d_output[i + id * 73 + 73 * 73 * 3] = d_postGradient2[id * 73 + i];
d_output[i + id * 73 + 73 * 73 * 4] = d_postGradient3[id * 73 + i];
d_output[i + id * 73 + 73 * 73 * 5] = d_postSobel3LR[id * 73 + i];
d_output[i + id * 73 + 73 * 73 * 6] = d_postSobel3UD[id * 73 + i];
d_output[i + id * 73 + 73 * 73 * 7] = d_postSmooth31[id * 73 + i];
}
} |
14,621 | /*
* setIC.cu
* MRAG
*
* Created by Diego Rossinelli on 10/9/08.
* Copyright 2008 CSE Lab, ETH Zurich. All rights reserved.
*
*/
__global__ void setIC(float * a)
{
a[blockIdx.x] = 0;
} |
14,622 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
// Comment out this line to enable debug mode
// #define NDEBUG
#define H_INDEX(i, j) (i) * numCols + (j)
/* time stamp function in seconds */
__host__ double getTimeStamp()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return (double)tv.tv_usec / 1000000 + tv.tv_sec;
}
__host__ void initX(float *X, int numRows, int numCols)
{
for (int i = 0; i < numRows; i++)
{
int ibase = i * numCols;
for (int j = 0; j < numCols; j++)
{
// h_X[i,j] = (float) (i+j)/2.0;
X[ibase + j] = (float)(i + j) / 2.0;
}
}
}
__host__ void initY(float *Y, int numRows, int numCols)
{
for (int i = 0; i < numRows; i++)
{
int ibase = i * numCols;
for (int j = 0; j < numCols; j++)
{
// h_Y[i,j] = (float) 3.25*(i+j);
Y[ibase + j] = (float)3.25 * (i + j);
}
}
}
__host__ float f_siggen_reference_get(float *M, int i, int j, int numRows, int numCols)
{
if (i < 0 || i >= numRows || j < 0 || j >= numCols)
{
return 0;
}
return M[i * numCols + j];
}
__host__ void f_siggen_reference(float *X, float *Y, float *Z, int numRows, int numCols)
{
for (int i = 0; i < numRows; i++)
{
int ibase = i * numCols;
for (int j = 0; j < numCols; j++)
{
// Z[i,j] = X[i-1,j] + X[i,j] + X[i+1,j] – Y[i,j-2] – Y[i,j-1] – Y[i,j]
Z[ibase + j] =
f_siggen_reference_get(X, i - 1, j, numRows, numCols) +
f_siggen_reference_get(X, i, j, numRows, numCols) +
f_siggen_reference_get(X, i + 1, j, numRows, numCols) -
f_siggen_reference_get(Y, i, j - 2, numRows, numCols) -
f_siggen_reference_get(Y, i, j - 1, numRows, numCols) -
f_siggen_reference_get(Y, i, j, numRows, numCols);
}
}
}
__host__ int checkZ(float *E, float *A, int numRows, int numCols)
{
for (int i = 0; i < numRows; i++)
{
int ibase = i * numCols;
for (int j = 0; j < numCols; j++)
{
if (E[ibase + j] != A[ibase + j])
{
return 0;
}
}
}
return 1;
}
__global__ void f_siggen(float *X, float *Y, float *Z, int numRows, int numCols, int smemNumElemX)
{
extern __shared__ float s_data[];
float *s_XT = s_data; // blockDim.x * (blockDim.y + 2);
int s_XTWidth = (blockDim.y + 2);
// int s_XTHeight = blockDim.x;
float *s_Y = s_XT + smemNumElemX; // (blockDim.x + 2) * blockDim.y;
/* Global Coordinate */
int globalX = blockDim.x * blockIdx.x + threadIdx.x;
int globalY = blockDim.y * blockIdx.y + threadIdx.y;
int globalIdx = globalY * numCols + globalX;
if (globalX >= numCols || globalY >= numRows)
return;
/* Set Up s_XT */
int s_XT_x = threadIdx.y + 1;
int s_XT_y = threadIdx.x;
int s_XT_idx = s_XT_y * s_XTWidth + s_XT_x;
if (globalY == 0)
{
s_XT[s_XT_idx - 1] = 0;
}
else if (threadIdx.y == 0)
{
s_XT[s_XT_idx - 1] = X[globalIdx - numCols];
}
if (globalY == numRows - 1)
{
s_XT[s_XT_idx + 1] = 0;
}
else if (threadIdx.y == blockDim.y - 1)
{
s_XT[s_XT_idx + 1] = X[globalIdx + numCols];
}
s_XT[s_XT_idx] = X[globalIdx];
/* Set Up s_Y */
int s_Y_x = threadIdx.x + 2;
int s_Y_y = threadIdx.y;
int s_Y_idx = s_Y_y * (blockDim.x + 2) + s_Y_x;
if (globalX == 0)
{
s_Y[s_Y_idx - 2] = 0;
s_Y[s_Y_idx - 1] = 0;
}
else if (threadIdx.x == 0)
{
s_Y[s_Y_idx - 2] = Y[globalIdx - 2];
s_Y[s_Y_idx - 1] = Y[globalIdx - 1];
}
s_Y[s_Y_idx] = Y[globalIdx];
/* Wait for All to Set Up s_XT and s_Y */
__syncthreads();
/* Write Output */
Z[globalIdx] = s_XT[s_XT_idx - 1] + s_XT[s_XT_idx] + s_XT[s_XT_idx + 1] - s_Y[s_Y_idx - 2] - s_Y[s_Y_idx - 1] - s_Y[s_Y_idx];
}
int main(int argc, char *argv[])
{
int error = 0;
/* Get Dimension */
if (argc != 3)
{
printf("Error: The number of arguments is not exactly 2\n");
return 0;
}
int numRows = atoi(argv[1]);
int numCols = atoi(argv[2]);
size_t numElem = numRows * numCols;
size_t numBytes = numElem * sizeof(float);
#ifndef NDEBUG
printf("numRows=%d, numCols=%d, numElem=%ld, numBytes=%ld\n", numRows, numCols, numElem, numBytes);
#endif
/* Allocate Host Memory */
float *h_X = NULL;
float *h_Y = NULL;
float *h_hZ = (float *)malloc(numBytes);
float *h_dZ = NULL;
error = error || cudaHostAlloc((void **)&h_X, numBytes, 0);
error = error || cudaHostAlloc((void **)&h_Y, numBytes, 0);
error = error || cudaHostAlloc((void **)&h_dZ, numBytes, 0);
if (error)
{
printf("Error: cudaHostAlloc returns error\n");
return 0;
}
/* Initialize Host Memory */
initX(h_X, numRows, numCols);
initY(h_Y, numRows, numCols);
#ifndef NDEBUG
double timestampPreCpuKernel = getTimeStamp();
#endif
f_siggen_reference(h_X, h_Y, h_hZ, numRows, numCols);
#ifndef NDEBUG
double timestampPostCpuKernel = getTimeStamp();
printf("CPU=%.6fsec\n", timestampPostCpuKernel - timestampPreCpuKernel);
#endif
/* Allocate Device Memory */
float *d_X = NULL;
float *d_Y = NULL;
float *d_Z = NULL;
error = error || cudaMalloc((void **)&d_X, numBytes);
error = error || cudaMalloc((void **)&d_Y, numBytes);
error = error || cudaMalloc((void **)&d_Z, numBytes);
if (error)
{
printf("Error: cudaMalloc returns error\n");
return 0;
}
/* Copy Host Memory to Device Memory */
double timestampPreCpuGpuTransfer = getTimeStamp();
error = error || cudaMemcpy(d_X, h_X, numBytes, cudaMemcpyHostToDevice);
error = error || cudaMemcpy(d_Y, h_Y, numBytes, cudaMemcpyHostToDevice);
if (error)
{
printf("Error: cudaMemcpy returns error\n");
return 0;
}
/* Run Kernel */
double timestampPreKernel = getTimeStamp();
dim3 d_blockDim;
d_blockDim.x = 32;
d_blockDim.y = 32;
dim3 d_gridDim;
d_gridDim.x = (numCols - 1) / d_blockDim.x + 1;
d_gridDim.y = (numRows - 1) / d_blockDim.y + 1;
int d_smemNumElemX = d_blockDim.x * (d_blockDim.y + 2);
int d_smemNumElemY = (d_blockDim.x + 2) * d_blockDim.y;
size_t d_smemNumBytes = (d_smemNumElemX + d_smemNumElemY) * sizeof(float);
f_siggen<<<d_gridDim, d_blockDim, d_smemNumBytes>>>(d_X, d_Y, d_Z, numRows, numCols, d_smemNumElemX);
cudaDeviceSynchronize();
/* Copy Device Memory to Host Memory */
double timestampPreGpuCpuTransfer = getTimeStamp();
error = error || cudaMemcpy(h_dZ, d_Z, numBytes, cudaMemcpyDeviceToHost);
if (error)
{
printf("Error: cudaMemcpy returns error\n");
return 0;
}
double timestampPostGpuCpuTransfer = getTimeStamp();
/* Free Device Memory */
cudaFree(d_Z);
d_Z = NULL;
cudaFree(d_Y);
d_Y = NULL;
cudaFree(d_X);
d_X = NULL;
/* Verify Device Result with Host Result */
error = error || !checkZ(h_hZ, h_dZ, numRows, numCols);
/* Output */
#ifndef NDEBUG
printf("d_gridDim=(%d, %d), d_blockDim=(%d, %d), d_smemNumBytes=%ld\n", d_gridDim.x, d_gridDim.y, d_blockDim.x, d_blockDim.y, d_smemNumBytes);
#endif
if (!error)
{
// #ifndef NDEBUG
// printf("<total_GPU_time> <CPU_GPU_transfer_time> <kernel_time> <GPU_CPU_transfer_time> <Z-value> <nl>\n");
// #endif
float totalGpuElapased = timestampPostGpuCpuTransfer - timestampPreCpuGpuTransfer;
float cpuGpuTransferElapsed = timestampPreKernel - timestampPreCpuGpuTransfer;
float kernelElapsed = timestampPreGpuCpuTransfer - timestampPreKernel;
float gpuCpuTransferElapsed = timestampPostGpuCpuTransfer - timestampPreGpuCpuTransfer;
int zValueI = 5;
int zValueJ = 5;
float zValue = h_dZ[H_INDEX(zValueI, zValueJ)];
printf("%.6f %.6f %.6f %.6f %.6f\n", totalGpuElapased, cpuGpuTransferElapsed, kernelElapsed, gpuCpuTransferElapsed, zValue);
}
else
{
printf("Error: GPU result does not with CPU result\n");
#ifndef NDEBUG
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
printf("(i=%d, j=%d), CPU=%.6f, GPU=%.6f, X=%.6f, Y=%.6f\n", i, j, h_hZ[H_INDEX(i, j)], h_dZ[H_INDEX(i, j)], h_X[H_INDEX(i, j)], h_Y[H_INDEX(i, j)]);
}
}
#endif
}
/* Free Host Memory */
cudaFreeHost(h_dZ);
h_dZ = NULL;
free(h_hZ);
h_hZ = NULL;
cudaFreeHost(h_Y);
h_Y = NULL;
cudaFreeHost(h_X);
h_X = NULL;
/* Clean Up Device Resource */
cudaDeviceReset();
} |
14,623 |
//#include <iostream>
#include <cstdio>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
int main(){
printf("\n Let's start ... \n");
int count;
cudaGetDeviceCount(&count);
cudaDeviceProp prop;
for (int i = 0; i < count; ++ i)
{
cudaGetDeviceProperties(&prop,i);
printf(" Device: %d, %s\n", i, prop.name);
printf(" Compute Capability: %d.%d\n", prop.major, prop.minor);
printf(" Max grid dimensions: (%dx%dx%d)\n", prop.maxGridSize[0],
prop.maxGridSize[1],
prop.maxGridSize[2]);
printf(" Max block dimensions: (%dx%dx%d)\n", prop.maxThreadsDim[0],
prop.maxThreadsDim[1],
prop.maxThreadsDim[2]);
/*
std::cout << " Device: " << i << prop.name << std::endl;
std::cout << " Compute Capability: " << prop.major << "." << prop.minor << std::endl;
std::cout << " Max grid dimensions: (" << prop.maxGridSize[0] << "x"
<< prop.maxGridSize[1] << "x"
<< prop.maxGridSize[2] << ")\n";
std::cout << " Max block dimensions: (" << prop.maxThreadsDim[0] << "x"
<< prop.maxThreadsDim[1] << "x"
<< prop.maxThreadsDim[2] << ")\n";
*/
}
printf("\n done \n");
return 0;
}
|
14,624 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <iostream>
#define BLOCK_SIZE 512
#define checkCuda(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess) {
fprintf(stdout,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define SET_SIZE 64
#define LINE_SIZE 128
#define STRIDE LINE_SIZE*SET_SIZE
__global__ void kernel(char *out, char *in, int size) {
long tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < size) out[tid] = in[(tid*STRIDE)%size];
}
void benchmark(char *out_host, char *in_host, int size) {
char *out_gpu, *in_gpu;
// Allocate arrays in GPU memory
checkCuda(cudaMalloc((void**)&(out_gpu), size*sizeof(char)));
checkCuda(cudaMalloc((void**)&(in_gpu), size*sizeof(char)));
// Copy input to GPU
checkCuda(cudaMemcpy(in_gpu, in_host, size*sizeof(char), cudaMemcpyHostToDevice));
dim3 dimGrid(1+(size-1)/BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE);
// Execute kernel
kernel<<<dimGrid, dimBlock>>>(out_gpu, in_gpu, size);
// Print any errors that may have occured in kernel
checkCuda(cudaPeekAtLastError());
// Retrieve results from the GPU
checkCuda(cudaMemcpy(out_host, out_gpu, size*sizeof(char), cudaMemcpyDeviceToHost));
// Free resources and end the program
checkCuda(cudaFree(out_gpu));
checkCuda(cudaFree(in_gpu));
}
|
14,625 | /* Borrowed from http://computer-graphics.se/hello-world-for-cuda.html
* This program takes the string "Hello ", prints it, then passes it to CUDA
* with an array * of offsets. Then the offsets are added in parallel to
* produce the string "World!"
* By Ingemar Ragnemalm 2010
*/
#include <stdlib.h>
#include <stdio.h>
int const N = 16;
int const blocksize = 16;
__global__
void hello(
char * const a,
int const * const b)
{
a[threadIdx.x] += b[threadIdx.x];
}
int main(
int argc,
char ** argv)
{
char a[N] = "Hello \0\0\0\0\0\0";
int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
char * ad;
int * bd;
int const csize = N * sizeof(char);
int const isize = N * sizeof(int);
printf("%s", a);
/* Allocate space for a and b on the device */
cudaMalloc((void**) &ad, csize );
cudaMalloc((void**) &bd, isize );
/* copy the contents of a and b to the device */
cudaMemcpy(ad, a, csize, cudaMemcpyHostToDevice);
cudaMemcpy(bd, b, isize, cudaMemcpyHostToDevice);
dim3 dimBlock(blocksize, 1);
dim3 dimGrid(1, 1);
/* call the CUDA kernel */
hello<<<dimGrid, dimBlock>>>(ad, bd);
/* copy the result back to the host */
cudaMemcpy(a, ad, csize, cudaMemcpyDeviceToHost);
cudaFree(ad);
printf("%s\n", a);
return 0;
}
|
14,626 |
__global__ void Initialize_gpu(double state[],double m[], double l[],double I[],double Zetas[],int n);
void printa(double a[], int n);
#include <iostream>
#include <stdio.h>
void CudaInitialize(double m[], double l[], double I[], double x[], int n, double Zs[])
{
int x_size = 2*n;
int I_size = 3*n*3;
int z_size = 26*6*n;
double *d_x, *d_m, *d_l, *d_I, *d_zs;
// Allocate and Load M and N to device memor
cudaMalloc(&d_x,x_size*sizeof(double));
cudaMalloc(&d_m, n*sizeof(double));
cudaMalloc(&d_l, n*sizeof(double));
cudaMalloc(&d_I, I_size*sizeof(double));
cudaMalloc(&d_zs, z_size*sizeof(double));
cudaMemcpy(d_x, x, x_size*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_m, m, n*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_I, I, I_size*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_l, l, n*sizeof(double), cudaMemcpyHostToDevice);
dim3 dimBlock(6, 6,1);
dim3 dimGrid(n,1,1);
Initialize_gpu<<<dimGrid, dimBlock>>>(d_x, d_m, d_l, d_I, d_zs, n);
cudaMemcpy(Zs, d_zs, z_size*sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_x);
cudaFree(d_m);
cudaFree(d_l);
cudaFree(d_I);
cudaFree(d_zs);
}
|
14,627 | #include "includes.h"
//double* x, * devx, * val, * gra, * r, * graMax;
//double* hes_value;
////int size;
//int* pos_x, * pos_y;
//int* csr;
double* x;
//thrust::pair<int, int> *device_pos;
//typedef double (*fp)(double);
//typedef void (*val_fp)(double*, double*, int);
//typedef void (*valsum_fp)(double*, double*,int);
//typedef void (*gra_fp)(double*, double*, int);
//typedef void (*gramin_fp)(double*, double*,int);
//typedef void (*hes_fp)( double*, thrust::pair<int, int>*, double*, int);
//typedef void (*print_fp)(double*, int);
int numSMs;
__device__ __host__ inline double Max(double x, double y) {
x = fabs(x);
y = fabs(y);
return x > y ? x : y;
}
__global__ void max_gra(double* gra, double* max) {
int index = threadIdx.x;
for (int i = 1; i < blockDim.x; i <<= 1) {
if (index % (i << 1) == i) {
gra[index - i] = Max(gra[index - i], gra[index]);
}
__syncthreads();
}
if (index == 0) {
max[0] = gra[0];
}
} |
14,628 | #include "includes.h"
/*
* JCuda - Java bindings for NVIDIA CUDA driver and runtime API
* http://www.jcuda.org
*
*
* This code is based on the NVIDIA 'reduction' CUDA sample,
* Copyright 1993-2010 NVIDIA Corporation.
*/
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
__global__ void mismatch(int n, double* actual, double *target, int *mis)
{
mis[0] = 0;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
if(target[i] >= 0.5 && actual[i] < 0.5) {mis[0] = 1;}
if(target[i] < 0.5 && actual[i] >= 0.5) {mis[0] = 1;}
}
} |
14,629 | #include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <cuda_runtime_api.h>
#include <errno.h>
#include <unistd.h>
/******************************************************************************
* The variable names and the function names of this program is same as provided by the university.
The added variable and function are the only changes made to this program.
* To compile:
* nvcc -o linear61 linear61.cu -lm
*
* To run:
* .linear61
*
*****************************************************************************/
typedef struct point_t {
double x;
double y;
} point_t;
point_t data[] = {
{82.75,102.99},{65.49,98.18},{78.56,105.35},{68.67,85.29},
{65.81,112.85},{82.84,113.68},{65.55,98.46},{71.98,114.52},
{73.16,126.43},{76.40,134.02},{76.82,97.29},{47.47,79.78},
{41.13,78.60},{64.13,97.66},{34.81,71.87},{67.40,124.95},
{78.92,119.15},{70.50,120.57},{68.55,101.00},{36.91,75.79},
{48.94,91.26},{57.95,105.08},{66.28,93.09},{ 6.10,26.68},
{49.17,86.16},{42.11,71.14},{59.66,95.41},{87.00,120.27},
{19.21,52.28},{26.03,39.19},{78.42,102.92},{39.60,62.76},
{31.86,67.82},{98.79,134.45},{41.63,79.01},{47.04,79.06},
{87.79,118.72},{ 8.41,35.11},{12.18,26.31},{66.63,96.34},
{85.67,126.04},{95.24,124.55},{91.59,133.08},{18.71,35.87},
{50.54,88.79},{55.59,80.88},{57.45,80.08},{69.68,107.06},
{68.48,98.14},{42.52,68.34},{60.75,82.69},{63.82,95.62},
{83.82,117.19},{ 7.19,17.98},{ 0.55,20.03},{29.40,42.51},
{35.42,66.35},{75.00,117.98},{82.07,115.32},{13.45,33.99},
{55.14,97.79},{33.86,51.12},{75.48,106.36},{33.96,60.19},
{53.84,98.26},{46.84,76.33},{64.56,111.80},{92.83,130.92},
{11.02,24.55},{14.14,24.08},{34.19,69.54},{62.61,109.86},
{92.29,125.64},{ 1.14,23.05},{55.58,83.02},{19.23,54.48},
{43.58,69.39},{45.14,74.43},{81.14,114.66},{82.99,118.22},
{66.82,94.85},{97.53,133.25},{ 7.24, 9.11},{15.88,33.92},
{53.36,87.02},{42.76,77.23},{92.48,119.31},{68.85,103.81},
{91.48,151.27},{39.59,80.63},{15.13,57.50},{27.61,54.82},
{53.97,89.81},{15.40,51.54},{73.72,111.38},{64.97,100.00},
{50.37,86.82},{ 5.64,34.85},{72.08,117.17},{69.41,97.42},
{59.60,74.65},{43.19,67.89},{ 7.66,30.58},{46.36,70.09},
{95.11,127.80},{17.26,32.74},{ 7.08,32.31},{77.80,108.66},
{61.24,110.63},{ 1.66, 9.23},{ 5.52,26.67},{76.55,112.34},
{39.08,57.20},{85.69,125.46},{ 2.24, 5.80},{11.60,15.73},
{46.33,78.07},{55.99,101.98},{18.18,52.18},{77.58,126.60},
{15.98,56.85},{38.76,65.84},{30.17,50.44},{41.82,81.73},
{ 1.95,21.59},{90.31,123.68},{35.47,54.71},{44.59,93.65},
{25.49,43.71},{ 9.83,24.65},{31.18,52.48},{83.18,126.20},
{17.74,53.18},{97.83,122.81},{55.36,91.53},{22.74,40.82},
{27.93,47.07},{81.69,118.05},{23.42,52.64},{16.32,63.78},
{ 5.78,36.54},{72.98,89.76},{84.67,107.40},{45.96,79.32},
{ 1.27,19.92},{18.92,40.49},{29.68,58.82},{59.24,74.82},
{92.61,135.87},{16.72,25.27},{23.61,66.84},{29.50,55.91},
{ 2.87,33.00},{97.05,135.91},{86.20,115.59},{17.93,50.76},
{72.71,116.81},{86.57,104.69},{26.62,62.47},{56.14,108.56},
{65.06,114.35},{85.39,121.43},{50.94,63.59},{18.78,43.67},
{80.15,114.23},{53.90,75.31},{12.39,29.39},{85.21,127.79},
{14.88,46.83},{18.51,43.35},{45.74,83.98},{ 0.35,17.38},
{52.10,89.27},{52.42,85.78},{61.40,101.05},{69.11,113.63},
{ 3.04,28.84},{48.39,90.64},{74.91,102.50},{72.62,90.44},
{77.33,112.74},{76.72,108.72},{ 2.41,35.62},{15.70,31.81},
{40.00,61.68},{78.27,107.97},{24.81,48.71},{28.06,49.45},
{64.60,86.09},{53.78,91.20},{61.22,94.24},{48.19,93.42},
{52.73,82.14},{66.47,97.38},{80.60,111.94},{24.51,60.94},
{30.75,48.97},{54.78,85.37},{93.04,132.50},{97.82,146.30},
{84.91,110.92},{89.46,118.87},{80.13,106.04},{87.76,109.80},
{ 6.40,40.57},{91.82,143.91},{66.91,93.58},{33.66,55.38},
{95.29,127.14},{31.28,54.01},{53.42,94.35},{22.10,39.01},
{36.65,60.30},{29.88,65.04},{50.30,85.98},{24.08,36.90},
{65.82,112.54},{85.64,122.04},{77.64,105.16},{23.94,45.68},
{52.39,76.04},{22.15,53.97},{79.57,120.30},{95.43,136.74},
{14.39,48.47},{75.22,111.67},{10.62,25.75},{39.12,55.13},
{47.23,68.62},{68.64,97.47},{24.34,67.88},{73.02,113.79},
{93.37,133.01},{65.53,89.76},{12.01,43.27},{36.27,61.91},
{96.04,142.04},{54.88,94.29},{54.53,79.12},{ 6.39,20.93},
{14.10,31.46},{74.48,110.53},{63.15,108.52},{86.43,130.80},
{68.92,95.13},{93.66,129.94},{22.34,27.48},{ 7.95,28.43},
{89.16,113.06},{54.45,89.24},{30.19,39.65},{47.21,81.98},
{23.26,72.13},{ 5.23,18.61},{98.30,126.22},{76.67,121.33},
{15.19,36.75},{32.28,53.94},{54.94,93.75},{29.80,68.23},
{13.34,31.69},{25.80,49.03},{53.92,86.03},{59.11,87.03},
{66.36,98.96},{41.04,58.88},{53.57,75.94},{66.66,111.10},
{77.79,106.68},{59.73,96.21},{35.25,57.42},{16.90,49.89},
{10.91,44.14},{ 9.64,38.78},{16.21,46.00},{ 9.67,17.50},
{69.42,102.25},{13.27,41.15},{ 5.33,21.43},{ 8.75,34.65},
{11.58,31.34},{39.03,79.02},{40.36,71.67},{51.64,84.18},
{ 8.57,48.42},{61.06,90.15},{34.60,62.81},{88.37,146.39},
{35.34,58.07},{90.26,139.45},{67.17,105.11},{46.90,83.26},
{13.17,38.91},{12.60,38.97},{94.70,138.47},{ 4.83,17.95},
{41.33,71.29},{72.06,108.44},{29.23,64.11},{80.96,119.78},
{50.54,88.19},{73.60,112.55},{15.55,24.35},{71.89,119.62},
{22.45,52.55},{23.78,56.89},{67.82,101.94},{45.71,80.62},
{46.84,77.39},{72.09,108.04},{96.37,137.79},{21.86,44.94},
{90.04,121.40},{50.13,75.41},{31.81,72.94},{23.31,73.25},
{27.65,47.31},{77.28,110.69},{56.96,99.42},{53.82,80.22},
{26.66,41.22},{24.19,47.52},{59.29,96.29},{27.29,53.19},
{48.35,92.38},{47.26,67.62},{24.53,40.51},{25.60,58.04},
{49.16,76.45},{34.91,44.95},{43.46,48.32},{44.54,57.42},
{86.03,133.00},{69.17,112.61},{32.05,55.72},{53.62,86.49},
{74.95,108.81},{58.84,86.60},{80.26,110.16},{95.48,130.99},
{91.88,112.19},{37.76,64.48},{60.10,81.74},{79.85,120.60},
{26.67,45.29},{ 2.08,20.18},{ 5.88,43.00},{18.58,56.21},
{26.33,61.54},{26.83,60.58},{91.43,112.97},{59.40,87.65},
{56.91,87.60},{28.15,40.73},{52.30,88.49},{20.06,54.95},
{76.87,115.44},{29.19,56.74},{78.26,114.69},{32.37,77.12},
{91.77,131.29},{58.90,95.06},{ 5.20,21.96},{85.77,105.24},
{82.17,112.11},{90.34,133.28},{38.02,60.51},{18.05,42.97},
{93.14,141.01},{93.52,135.73},{59.07,81.50},{61.35,113.01},
{47.01,72.83},{99.82,141.53},{ 7.86,35.35},{20.26,50.45},
{74.17,94.34},{33.32,42.39},{16.65,19.59},{86.71,133.97},
{28.44,78.55},{24.51,58.01},{46.07,70.68},{57.03,97.18},
{86.40,119.32},{42.64,63.65},{35.08,92.64},{11.32,40.00},
{40.40,80.05},{42.22,76.46},{29.24,50.94},{ 5.32,22.49},
{96.61,135.37},{13.59,35.32},{98.40,134.57},{19.69,43.81},
{67.27,113.56},{71.06,107.98},{ 1.06,26.45},{54.78,84.04},
{45.64,76.93},{87.77,126.37},{33.46,51.98},{22.89,48.40},
{38.64,60.20},{60.97,101.94},{47.41,73.39},{60.58,95.67},
{88.12,106.82},{66.37,100.55},{81.12,115.71},{52.49,76.55},
{85.05,125.56},{32.06,60.44},{60.84,103.55},{43.76,67.29},
{62.33,88.25},{40.77,70.46},{94.72,121.74},{34.02,73.96},
{32.40,61.97},{41.78,64.30},{89.28,121.32},{53.05,80.41},
{27.30,68.46},{75.74,120.62},{65.47,93.80},{82.22,116.83},
{97.54,150.07},{16.82,36.26},{18.42,41.45},{87.32,133.69},
{77.52,109.91},{77.99,104.68},{25.88,61.94},{ 5.27,34.14},
{74.93,106.74},{82.90,120.76},{37.96,82.08},{68.52,105.20},
{39.62,65.56},{65.81,78.87},{29.38,36.77},{81.97,111.49},
{21.94,56.46},{38.19,55.31},{70.62,92.61},{ 4.07,14.27},
{58.56,92.06},{41.04,69.16},{64.68,99.07},{71.11,121.16},
{ 5.18,41.83},{88.83,126.77},{90.90,130.91},{87.74,123.22},
{44.98,75.14},{38.64,70.51},{71.87,96.28},{68.72,107.04},
{40.52,56.31},{70.48,98.16},{56.00,85.11},{30.70,58.96},
{44.16,78.72},{94.58,145.43},{13.56,37.67},{40.48,53.91},
{38.35,63.89},{85.90,127.17},{71.48,109.19},{16.73,54.54},
{41.59,70.35},{51.48,85.62},{85.93,127.63},{15.94,25.57},
{79.96,105.08},{80.48,122.53},{56.56,77.18},{ 0.37, 3.28},
{53.10,95.77},{56.80,95.59},{17.64,45.56},{34.34,50.47},
{31.32,59.70},{68.24,109.98},{71.80,107.38},{67.99,111.04},
{41.43,60.69},{13.88,42.31},{84.77,101.64},{49.69,72.67},
{22.84,59.96},{11.46,14.03},{ 6.25,29.29},{ 7.28,31.53},
{74.19,88.55},{33.73,57.99},{61.34,109.98},{47.19,83.93},
{43.72,61.28},{21.77,52.41},{76.98,100.87},{35.81,66.20},
{43.75,79.14},{59.39,79.98},{49.23,95.35},{80.09,96.68},
{48.21,87.57},{75.91,110.03},{47.81,91.15},{94.49,152.25},
{81.66,127.38},{12.89,40.54},{22.74,61.81},{62.68,89.80},
{56.24,91.10},{30.46,55.71},{36.86,70.38},{95.50,120.18},
{69.14,98.70},{95.94,147.08},{ 8.60,27.55},{62.82,106.65},
{30.90,46.78},{47.67,77.17},{32.79,45.39},{55.30,75.56},
{80.68,119.01},{38.33,67.72},{87.41,123.32},{37.30,61.33},
{66.09,105.27},{67.50,96.94},{ 6.82,28.21},{ 4.34,29.01},
{80.83,125.28},{32.25,54.26},{97.62,132.83},{49.25,76.67},
{96.51,121.49},{33.79,70.52},{83.52,129.24},{27.06,60.63},
{ 1.41,12.59},{13.22,57.64},{17.45,36.09},{71.35,76.96},
{87.88,130.79},{50.39,83.88},{89.14,138.52},{11.44,41.46},
{93.17,130.39},{67.21,95.80},{29.16,62.17},{92.98,143.45},
{47.99,76.13},{27.43,71.97},{51.93,96.56},{91.03,134.99},
{96.22,133.30},{41.24,49.18},{20.27,41.71},{75.72,106.00},
{50.23,93.58},{70.66,112.03},{88.12,124.27},{75.71,125.10},
{62.75,90.70},{ 9.55,35.97},{64.15,103.11},{15.63,51.96},
{15.97,49.70},{16.27,37.28},{36.98,53.67},{98.92,133.42},
{87.92,140.97},{49.12,84.19},{54.28,89.62},{71.33,96.50},
{54.58,94.85},{68.25,112.61},{66.23,96.79},{49.85,98.43},
{48.26,84.08},{ 6.28,38.20},{39.44,96.00},{92.27,133.34},
{79.68,110.59},{62.13,108.32},{54.00,91.27},{27.57,67.10},
{53.49,91.53},{20.29,36.56},{28.41,48.46},{65.46,83.93},
{93.88,144.37},{61.47,90.48},{39.03,74.45},{52.76,92.72},
{19.56,50.89},{34.67,61.01},{34.25,61.48},{69.18,92.59},
{88.76,122.35},{ 6.16,31.03},{10.21,46.60},{97.95,135.81},
{83.07,120.37},{66.03,105.65},{63.71,99.56},{17.93,35.56},
{78.71,119.64},{ 1.26,29.44},{97.17,131.88},{24.25,55.43},
{47.36,74.18},{81.31,113.86},{37.68,50.18},{26.47,37.91},
{74.66,115.30},{ 5.37,32.89},{ 9.25,14.24},{32.83,57.97},
{ 9.01,34.09},{31.82,67.05},{75.57,110.86},{13.20,58.84},
{38.74,58.61},{34.55,49.28},{ 8.63,18.41},{94.31,153.83},
{59.27,100.95},{68.09,95.94},{33.94,56.80},{22.81,52.08},
{17.09,51.37},{20.13,53.11},{23.53,45.18},{36.24,54.80},
{84.20,111.33},{32.35,77.34},{38.68,73.17},{ 6.00,24.34},
{30.14,57.76},{18.18,25.99},{48.93,70.53},{63.37,85.23},
{34.74,71.07},{22.82,60.05},{54.59,82.07},{63.82,102.78},
{70.17,106.61},{ 1.74,21.29},{54.94,91.34},{78.52,104.39},
{57.23,113.05},{66.98,97.65},{17.06,39.70},{36.63,64.22},
{51.49,67.72},{76.94,123.46},{98.72,140.65},{19.58,48.67},
{25.91,54.75},{18.70,25.83},{80.77,119.17},{88.11,128.37},
{78.37,132.97},{66.91,105.66},{33.54,56.14},{49.66,86.32},
{32.00,58.47},{40.01,69.54},{56.12,73.78},{96.04,141.93},
{31.96,68.36},{94.41,121.09},{76.95,109.13},{55.68,88.70},
{ 3.88,44.47},{ 7.22,36.32},{65.27,96.32},{88.18,93.03},
{60.77,63.94},{63.81,84.81},{47.29,80.62},{82.50,127.64},
{31.43,46.32},{13.18,32.13},{93.39,136.75},{85.27,142.09},
{43.24,108.17},{75.25,125.18},{33.46,93.07},{90.49,142.00},
{79.88,118.01},{20.84,62.60},{63.69,99.74},{76.84,126.38},
{11.54,30.15},{76.43,118.27},{35.50,66.80},{45.32,66.67},
{66.48,100.04},{73.72,99.06},{12.85,35.73},{62.84,91.74},
{ 6.88,37.00},{89.26,132.68},{85.01,107.91},{23.87,56.07},
{38.03,56.40},{92.03,130.52},{42.02,89.57},{13.17,30.48},
{82.16,130.63},{22.68,58.86},{ 8.52,38.41},{16.25,42.46},
{65.35,97.29},{31.45,49.97},{59.30,87.47},{65.11,117.40},
{29.39,50.87},{22.55,63.15},{36.95,65.12},{83.54,123.62},
{11.75,25.18},{63.39,102.99},{61.42,83.35},{34.12,68.55},
{90.74,152.52},{51.08,82.09},{20.28,41.21},{76.19,110.13},
{37.99,67.26},{91.05,147.73},{11.77,19.86},{28.29,67.38},
{66.26,98.05},{22.57,34.01},{16.30,25.37},{44.77,80.61},
{46.39,82.78},{74.08,115.65},{53.10,92.48},{ 0.04,28.78},
{29.00,42.26},{ 4.81,25.29},{28.74,47.05},{33.14,69.33},
{87.96,129.02},{54.49,75.32},{37.37,58.59},{99.58,147.95},
{21.68,56.37},{ 1.48,37.32},{91.01,134.77},{88.29,129.72},
{32.73,59.08},{96.57,135.48},{90.10,130.41},{20.21,48.08},
{52.88,70.85},{89.69,133.44},{24.74,51.81},{73.87,104.53},
{61.84,91.90},{65.37,84.10},{41.40,74.81},{38.22,54.51},
{18.24,41.00},{90.06,124.68},{ 2.89,34.07},{80.14,106.74},
{89.91,127.71},{60.63,102.62},{35.00,68.27},{70.63,100.02},
{15.65,34.17},{71.64,95.74},{18.05,40.60},{38.08,50.51},
{88.71,110.01},{16.08,55.91},{80.55,111.48},{36.13,62.87},
{85.88,108.71},{50.75,102.58},{87.31,144.42},{23.96,42.23},
{71.56,117.05},{88.79,139.37},{98.10,122.99},{26.68,52.26},
{12.51,18.84},{66.50,109.63},{19.18,58.66},{78.17,120.33},
{14.11,41.17},{31.29,56.93},{39.37,61.22},{ 0.85,43.30},
{48.95,72.30},{81.94,126.11},{69.96,100.68},{48.34,76.21},
{66.52,123.56},{15.95,33.56},{ 5.51,24.27},{28.75,76.63},
{13.20,30.41},{51.47,86.79},{84.84,128.08},{50.96,74.05},
{76.41,119.73},{75.18,105.68},{ 3.85,24.64},{42.69,70.68},
{86.75,133.83},{ 8.31,17.44},{73.34,106.15},{32.65,66.74},
{44.83,73.09},{ 8.14,59.97},{49.55,75.57},{30.72,48.02},
{92.47,134.65},{81.10,133.44},{29.67,58.25},{84.63,131.65},
{16.49,38.95},{77.48,128.30},{42.96,74.42},{86.71,108.12},
{26.98,38.76},{60.41,91.56},{91.91,130.19},{86.14,128.67},
{91.64,141.09},{55.81,97.06},{48.26,78.28},{41.49,75.39},
{36.58,59.50},{76.44,85.43},{72.56,106.57},{72.87,105.68},
{15.32,10.87},{ 7.18,30.28},{92.52,124.86},{93.91,136.79},
{31.38,64.56},{91.97,130.64},{24.10,62.56},{42.05,65.55},
{18.59,41.41},{41.94,62.05},{ 0.75,45.24},{91.66,121.28},
{88.75,111.57},{13.14,53.87},{67.12,96.45},{55.66,89.07},
{13.93,25.23},{ 6.10,26.44},{23.28,48.96},{14.55,22.55},
{31.85,44.45},{37.61,63.29},{22.94,42.72},{57.14,89.52},
{52.50,77.71},{99.59,154.03},{36.59,49.47},{26.34,55.29},
{96.63,148.57},{91.80,135.06},{94.11,134.16},{55.52,95.86},
{25.79,52.48},{84.44,118.14},{ 6.44,29.89},{35.95,48.28},
{31.95,58.34},{40.99,69.64},{12.35,26.00},{67.47,98.87},
{57.15,92.49},{68.42,96.52},{14.48,29.33},{33.90,71.00},
{ 0.59,24.46},{24.79,35.20},{69.86,96.24},{77.34,121.23},
{16.53,28.60},{96.95,141.18},{80.66,110.56},{45.52,90.34},
{63.06,106.13},{42.90,74.99},{51.66,79.20},{80.40,115.11},
{25.35,45.56},{68.82,94.10},{14.07,26.03},{30.36,59.02},
{39.87,65.31},{ 5.17,24.49},{77.51,113.81},{ 9.52,29.77},
{ 8.30,24.24},{44.04,92.38},{ 8.16,14.50},{47.82,73.90},
{90.61,105.32},{72.52,91.53},{45.44,69.37},{75.18,100.34},
{ 9.15,21.48},{12.40,27.27},{32.11,57.98},{63.31,94.25},
{69.98,100.76},{98.21,145.59},{46.96,58.96},{26.95,45.25},
{84.89,126.56},{11.73,30.97},{94.06,122.71},{77.29,127.08},
{62.73,101.50},{99.86,138.24},{46.33,72.20},{22.49,54.21},
{95.73,134.34},{90.17,128.09},{68.55,92.58},{71.82,100.12},
{32.57,71.35},{41.17,72.70},{ 6.60,36.63},{94.76,135.53},
{29.34,80.82},{89.52,140.31},{96.29,127.96},{55.33,89.82},
{ 9.28,23.22},{96.95,127.20},{65.60,108.09},{82.77,122.12},
{93.00,110.03},{82.74,111.85},{93.82,140.22},{94.54,124.90},
{45.18,102.72},{25.24,35.61},{22.80,58.73},{79.85,121.12},
{ 8.31,41.44},{99.01,131.20},{ 0.59,15.39},{64.58,107.07},
{88.62,121.65},{81.62,137.18},{94.06,132.65},{96.52,123.62},
{ 2.46,36.78},{22.52,43.56},{13.21,35.58},{ 7.60,16.69},
{66.39,96.44},{ 5.81,22.40},{25.76,48.90},{95.19,146.27},
{43.21,82.26},{67.96,111.70},{85.21,127.00},{ 4.99,23.50},
{68.93,105.76},{63.58,81.18},{81.53,105.11},{63.83,90.91},
{54.74,94.01},{61.84,109.68},{56.63,91.84},{24.59,51.08},
{62.64,88.02},{88.04,132.49},{88.78,135.86},{18.67,49.25},
{74.69,120.78},{64.37,103.79},{21.19,60.29},{52.40,90.69},
{11.87,25.06},{ 7.19,21.90},{36.87,66.72},{61.64,91.63}
};
int n_data = 1000;
__device__ int d_n_data = 1000;
double residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
__device__ double d_residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<n_data; i++) {
error_sum += residual_error(data[i].x, data[i].y, m, c);
}
mean = error_sum / n_data;
return sqrt(mean);
}
__global__ void d_rms_error(double *m, double *c, double *error_sum_arr, point_t *d_data) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c);
}
// Calculate the difference between two times. Returns zero on
// success and the time difference through an argument. It will
// be unsuccessful if the start time is after the end time.
int time_difference(struct timespec *start,
struct timespec *finish,
long long int *difference) {
long long int d_sec = finish->tv_sec - start->tv_sec;
long long int d_nsec = finish->tv_nsec - start->tv_nsec;
if(d_nsec < 0 ) {
d_sec--;
d_nsec += 1000000000;
}
*difference = d_sec * 1000000000 + d_nsec;
return !(*difference > 0);
}
int main() {
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
cudaError_t error;
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
point_t *d_data;
be = rms_error(bm, bc);
error = cudaMalloc(&d_dm, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_dc, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_error_sum_arr, (sizeof(double) * 1000));
if(error){
fprintf(stderr, "cudaMalloc on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_data, sizeof(data));
if(error){
fprintf(stderr, "cudaMalloc on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i] = bc + (oc[i] * step);
}
error = cudaMemcpy(d_dm, dm, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dm returned %d %s\n", error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_dc, dc, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dc returned %d %s\n", error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_data, data, sizeof(data), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_data returned %d %s\n", error,
cudaGetErrorString(error));
}
for(i=0;i<8;i++) {
double h_error_sum_arr[1000];
double error_sum_total;
double error_sum_mean;
d_rms_error <<<100,10>>>(&d_dm[i], &d_dc[i], d_error_sum_arr, d_data);
cudaThreadSynchronize();
error = cudaMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), cudaMemcpyDeviceToHost);
if(error){
fprintf(stderr, "cudaMemcpy to error_sum returned %d %s\n", error,
cudaGetErrorString(error));
}
for(int j=0; j<n_data; j++) {
error_sum_total += h_error_sum_arr[j];
}
error_sum_mean = error_sum_total / n_data;
e[i] = sqrt(error_sum_mean);
if(e[i] < best_error) {
best_error = e[i];
best_error_i = i;
}
error_sum_total = 0;
}
if(best_error < be) {
be = best_error;
bm = dm[best_error_i];
bc = dc[best_error_i];
} else {
minimum_found = 1;
}
}
error = cudaFree(d_dm);
if(error){
fprintf(stderr, "cudaFree on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_dc);
if(error){
fprintf(stderr, "cudaFree on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_data);
if(error){
fprintf(stderr, "cudaFree on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_error_sum_arr);
if(error){
fprintf(stderr, "cudaFree on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
14,630 | #include <stdio.h>
#include <cuda_runtime.h>
int main(){
printf("hello world\n");
return 0;
} |
14,631 | #include <stdio.h>
#include <iostream>
#include <ctime>
using namespace std;
void host_init(float *arr, int n, float init_val){
for (int i=0; i < n; i++)
arr[i] = init_val;
}
void host_vec_add(float *h_a, float *h_b, float *h_c, int n){
for (int i = 0; i < n; i++){
h_c[i] = h_a[i] + h_b[i];
}
}
__global__
void cuda_vec_add(float *d_a, float *d_b, float *d_c, int n, int m){
int col = blockIdx.x*blockDim.x + threadIdx.x;
int row = blockIdx.y*blockDim.y + threadIdx.y;
if (col < m && row < n){
d_c[row*m + col] = d_a[row*m + col] + d_b[row*m + col];
}
}
void cudaError_check(cudaError_t err){
if (err != cudaSuccess){
printf("GPUassert: %s %s %d\n", cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
int main(){
float *h_a, *h_b, *h_c, *h_c_cpy;
float *d_a, *d_b, *d_c;
int n = 1024; // row
int m = 1024; // col
size_t size = n*m*sizeof(float);
clock_t start, stop;
//################## HOST Start ###################//
h_a = (float*) malloc (size);
h_b = (float*) malloc (size);
h_c = (float*) malloc (size);
h_c_cpy = (float*) malloc (size);
host_init(h_a, n*m, 1);
host_init(h_b, n*m, 2);
host_init(h_c, n*m, 0);
host_init(h_c_cpy, n*m, 0);
start = clock();
host_vec_add(h_a, h_b, h_c, n*m);
stop = clock();
double cpu_duration = (stop - start) / (double) CLOCKS_PER_SEC;
//################## HOST End ###################//
//################## CUDA Start ###################//
cudaError_t err ;
err = cudaMalloc((void **) &d_a, size);
cudaError_check(err);
err = cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice);
cudaError_check(err);
err = cudaMalloc((void **) &d_b, size);
cudaError_check(err);
err = cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice);
cudaError_check(err);
err = cudaMalloc((void **) &d_c, size);
cudaError_check(err);
//Kernel invocation
int num_threads_per_block = 32;
dim3 gridDim ((n-1)/num_threads_per_block + 1, (m-1)/num_threads_per_block + 1, 1);
dim3 blockDim (num_threads_per_block, num_threads_per_block, 1);
start = clock();
cuda_vec_add<<<gridDim, blockDim>>>(d_a, d_b, d_c, n, m);
err = cudaDeviceSynchronize();
stop = clock();
cudaError_check(err);
err = cudaMemcpy(h_c_cpy, d_c, size, cudaMemcpyDeviceToHost);
cudaError_check(err);
double gpu_duration = (stop - start) / (double) CLOCKS_PER_SEC;
//################## CUDA End ###################//
int success = 1;
for (int i = 0; i < n*m; i++){
if (h_c[i] != h_c_cpy[i]){
success = 0;
printf("Failure at idx: %d\n", i);
break;
}
}
if (success == 1)
printf("Success\n");
printf("CPU Duration: %0.3f secs \n", cpu_duration);
printf("GPU Duration: %0.3f secs \n", gpu_duration);
return 1;
}
|
14,632 | /*
**********************************************
* CS314 Principles of Programming Languages *
* Fall 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void exclusive_prefix_sum_gpu(int * oldSum, int * newSum, int distance, int numElements) {
/** YOUR CODE GOES BELOW **/
int num_threads = blockDim.x * gridDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for(int i = tid; i < numElements; i+=num_threads)
{
if (distance == 0)
{
if(i==0)
newSum[0] = 0;
else
newSum[i] = oldSum[i - 1];
}
else
{
if(i >= distance)
newSum[i] = oldSum[i - distance] + oldSum[i];
else
newSum[i] = oldSum[i];
}
}
/** YOUR CODE GOES ABOVE **/
}
|
14,633 | #include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
void inline check(cudaError_t err, const char* filename, int line)
{
if (err != cudaSuccess)
{
printf("%s-l%i: %s\n", filename, line, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
int main(int argc, char** argv)
{
cudaError_t err; // variable for error codes
int count; // variable for number of devices
int device; // variable for active device id
err = cudaGetDeviceCount(&count);
check(err, __FILE__, __LINE__);
printf("\nFound %i devices\n\n", count);
for (device = 0; device < count; device++)
{
err = cudaSetDevice(device);
check(err, __FILE__, __LINE__);
struct cudaDeviceProp p;
err = cudaGetDeviceProperties(&p, device);
check(err, __FILE__, __LINE__);
printf("Device %i : ", device);
printf("%s ", p.name);
printf("with %i SMs\n", p.multiProcessorCount);
}
printf("\n");
return EXIT_SUCCESS;
}
|
14,634 | #include "includes.h"
__global__ static void calc_predict(int objs,int objs_train,double* a,double b,int* y_train,double* kval,int* y){
int id=blockDim.x * blockIdx.x + threadIdx.x;
if (id<objs){
double fx=b;
for (int i=0;i<objs_train;i++){
//access to a and y are not coalesced
fx+=a[i]*y_train[i]*kval[i*objs+id];
}
y[id] = fx>=0 ? 1:-1;
}
} |
14,635 | #include <stdio.h>
#define TILE_WIDTH 32 //block size ,each thread to calucate each block
__global__ void matrixMultiplyShared(float *A, float *B, float *C,
int M, int K, int N) {
__shared__ float sharedM[TILE_WIDTH][TILE_WIDTH];
__shared__ float sharedN[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
float v = 0.0;
// 在K的维度上优化
// 每一个block负责完成A的某一行 和B的某一列的乘加
// v 先做tile内的累加,再做K/Tile 的累加
for (int i = 0; i < (int)(ceil((float)K / TILE_WIDTH)); i++) {
if (i * TILE_WIDTH + tx < K && row < M)
sharedM[ty][tx] = A[row * K + i * TILE_WIDTH + tx];
else
sharedM[ty][tx] = 0.0;
if (i * TILE_WIDTH + ty < K && col < N)
sharedN[ty][tx] = B[(i * TILE_WIDTH + ty) * N + col];
else
sharedN[ty][tx] = 0.0;
__syncthreads();
for(int j = 0; j < TILE_WIDTH; j++)
v += sharedM[ty][j] * sharedN[j][tx];
__syncthreads();
}
if (row < M && col < N)
C[row * N + col] = v;
}
int main(int argc, char **argv) {
float A[] = {2, 3, -1, 6, 1, -2}; // M x K = 2 x 3
// 2 3 -1
// 6 1 -2
float B[] = {4, -5, -3, 0, 1, 2}; // K x N = 3 x 2
// 4 -5
// -3 0
// 1 2
float C[4] = {0};
int M = 2;
int K = 3;
int N = 2;
float *d_a;
float *d_b;
float *d_c;
cudaMalloc((void**)&d_a,M * K * sizeof(float));
cudaMalloc((void**)&d_b,K * N * sizeof(float));
cudaMalloc((void**)&d_c,M * N * sizeof(float));
cudaMemcpy(d_a, A, sizeof(float) * M * K, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, B, sizeof(float) * K * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, C, sizeof(float) * M * N, cudaMemcpyHostToDevice);
dim3 dim_grid((N - 1) / 16 + 1, (M - 1) / 16 + 1, 1);
dim3 dim_block(16, 16, 1);
matrixMultiplyShared<<<dim_grid, dim_block>>>(d_a, d_b, d_c, M, K, N);
cudaMemcpy(C, d_c, sizeof(float) * M * N, cudaMemcpyDeviceToHost);
for (int i = 0; i < M * N; i++) {
printf("%f ", C[i]);
}
printf("\n");
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
14,636 | // includes, system
#include <stdio.h>
#include <assert.h>
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char *msg);
int main( int argc, char** argv)
{
// pointer and dimension for host memory
int n, dimA;
float *h_a;
float *new_a;
// pointers for device memory
float *d_a, *d_b;
// allocate and initialize host memory
/** Bonus: try using cudaMallocHost in place of malloc **/
dimA = 8;
size_t memSize = dimA*sizeof(float);
//..
//cudaMallocHost((void**)&h_a, memSize);
h_a = (float*)malloc(memSize);
new_a = (float*)malloc(memSize); //.. or use cudaMallocHost(&new_a, memSize); don't use cudaMalloc, it will core-dump
//cudaMallocHost(&new_a, memSize); //.. OK
//cudaMalloc(&new_a, memSize); // core-dump
for (n=0; n<dimA; n++)
{
h_a[n] = (float) n;
}
// Part 1 of 5: allocate device memory
cudaMalloc( (void**)&d_a, memSize );
cudaMalloc( (void**)&d_b, memSize );
// Part 2 of 5: host to device memory copy
cudaMemcpy( d_a, h_a, memSize, cudaMemcpyHostToDevice );
// Part 3 of 5: device to device memory copy
cudaMemcpy( d_b, d_a, memSize, cudaMemcpyDeviceToDevice );
// clear host memory
for (n=0; n<dimA; n++)
{
printf("Data in host memory h_a %f\n", h_a[n]);
//printf("Data in device memory d_a %f\n", d_a[n]);
//printf("Data in device memory d_b %f\n", d_b[n]);
h_a[n] = 0.f;
}
// Part 4 of 5: device to host copy
cudaMemcpy( new_a, d_b, memSize, cudaMemcpyDeviceToHost );
// Check for any CUDA errors
checkCUDAError("cudaMemcpy calls");
// verify the data on the host is correct
for (n=0; n<dimA; n++)
{
assert(new_a[n] == (float) n);
}
// Part 5 of 5: free device memory pointers d_a and d_b
cudaFree( d_b );
cudaFree( d_a );
// Check for any CUDA errors
checkCUDAError("cudaFree");
// free host memory pointer h_a
// Bonus: be sure to use cudaFreeHost for memory allocated with cudaMallocHost
cudaFreeHost(h_a);
//free(h_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
printf("cudaMallocHost is working Correct!\n");
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(-1);
}
}
|
14,637 | #include "includes.h"
__global__ void nllLoss_grad(int x_stride, float *yGrad, int* target, float* xGrad) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int offset = tid * x_stride + target[tid];
xGrad[offset] += -1 * yGrad[tid];
} |
14,638 | // License Summary: MIT see LICENSE file
#include <cooperative_groups.h>
namespace cg = cooperative_groups;
__device__ float const * A;
__device__ float const * B;
__device__ float * C;
extern "C" __global__ void vectorAdd() {
cg::thread_block block = cg::this_thread_block();
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[i] = A[i] + B[i];
} |
14,639 | //ulimit -s unlimited
//nvcc Cuda.cu -arch sm_20 -o cuda.out && ./cuda.out
#define NMAXITER 100
#define SerialExecution 1
//#define NMAXITERKernel 60000
//#define CUDACount 60000
//#define CUDACount 44642
//#define NMAXITERKernel 44642
#define LambdaParameter 1
#define COLUMNLENGTH 1
#define TOTALTHREDSPERBLOCK 32
#define DIM2 1
#define CUDACount 90000
#define NMAXITERKernel 9000000
//nvcc Cuda.cu -arch sm_13 && ./a.out
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <stdint.h>
#include <cuda.h>
#include "device_functions.h"
#include <curand.h>
#include <curand_kernel.h>
//cudaEventRecord(start, 0);
//for (int i = 0; i < 2; ++i) {
//cudaMemcpyAsync(inputDev + i * size, inputHost + i * size,
//size, cudaMemcpyHostToDevice, stream[i]);
//MyKernel<<<100, 512, 0, stream[i]>>>
//(outputDev + i * size, inputDev + i * size, size);
//cudaMemcpyAsync(outputHost + i * size, outputDev + i * size,
//size, cudaMemcpyDeviceToHost, stream[i]);
//}
//cudaEventRecord(stop, 0);
//cudaEventSynchronize(stop);
//float elapsedTime;
//cudaEventElapsedTime(&elapsedTime, start, stop);
//They are destroyed this way:
//cudaEventDestroy(start);
//cudaEventDestroy(stop);
#include <cuda_runtime.h>
#define MAXFEATURES 47235000
#define MAXINSTANCES 100000000
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char *msg);
/* an example of struct */
struct st_sortingByAbsWithIndex {
double value;
long int idx;
};
/* qsort struct comparision function (price double field) */
int struct_cmp_by_value(const void *a, const void *b) {
struct st_sortingByAbsWithIndex *ia = (struct st_sortingByAbsWithIndex *) a;
struct st_sortingByAbsWithIndex *ib = (struct st_sortingByAbsWithIndex *) b;
double aa = ia->value;
double bb = ib->value;
if (aa * aa > bb * bb)
return -1;
else if (aa * aa < bb * bb)
return 1;
else
return 0;
/* double comparison: returns negative if b > a
and positive if a > b. We multiplied result by 100.0
to preserve decimal fraction */
}
float CS[] = { 0.062500, 0.125000, 0.250000, 0.500000, 1.000000, 2.000000,
4.000000, 8.000000, 16.000000, 32.000000, 64.000000 };
int cscount = 11;
//#include <cuda.h>
// Kernel that executes on the CUDA device
//__global__ void square_array(float *a, int N)
//{
// int idx = blockIdx.x * blockDim.x + threadIdx.x;
// if (idx<N) a[idx] = a[idx] * a[idx];
//}
// main routine that executes on the host
//void RCDM(float** A, int IDX, float L, float b, float lambda, int n,
// int N) {
//
//}
/* integer array printing function */
void print_float_array(const float *array, size_t len) {
size_t i;
printf("\n--------- \n");
for (i = 0; i < len; i++)
printf("%f | ", array[i]);
putchar('\n');
printf("\n--------- \n");
}
void print_int_array(const int *array, size_t len) {
size_t i;
for (i = 0; i < len; i++)
printf("%d | ", array[i]);
putchar('\n');
}
void generateRandomProblem(float ** A, int** R_Idx, int**C_Idx, int **C_Count,
int n, int m, int ccount_min, int ccount_diff, int * nnzout) {
*C_Idx = (int *) malloc(n * sizeof(int));
*C_Count = (int *) malloc(n * sizeof(int));
int i, j, idx, k;
int nnz = 0;
for (i = 0; i < n; i++) {
(*C_Count)[i] = ((int) ((ccount_diff) * (rand() / (RAND_MAX + 1.0))))
+ ccount_min;
nnz += (*C_Count)[i];
}
(*nnzout) = nnz;
*A = (float *) malloc(nnz * sizeof(float));
*R_Idx = (int *) malloc(nnz * sizeof(int));
nnz = 0;
for (i = 0; i < n; i++) {
(*C_Idx)[i] = nnz;
for (j = 0; j < (*C_Count)[i]; j++) {
int notfinished = 1;
while (notfinished) {
notfinished = 0;
idx = ((int) ((m) * (rand() / (RAND_MAX + 1.0))));
for (k = 0; k < j; k++) {
if ((*R_Idx)[(*C_Idx)[i] + k] == idx) {
notfinished = 1;
}
}
}
(*R_Idx)[nnz] = idx;
(*A)[nnz] = (float) rand() / RAND_MAX;
// printf("A[%d][%d] = %f\n", nnz, (*R_Idx)[nnz], (*A)[nnz]);
nnz++;
}
}
}
void generateNesterovProblem(float ** A, int** R_Idx, int**C_Idx,
int **C_Count, float**b_out, float** xOpt, int n, int m,
int ccount_min, int ccount_diff, int * nnzout) {
int p = ccount_min;
double A_h[n][p]; // host A matrix pointers
printf("alokacia A done \n");
long int IDX_h[n][p]; // host Aindex matrix pointers
printf("alokacia I done \n");
printf("alokacia x done \n");
double optimalvalue = 0;
int n_nonzero = 160000;
double rho = 1;
double sqrtofnonzeros = 400;
int i, j, k;
double tmp;
printf("alokacia poli END\n");
//Generovanie problemu-------------------------------------------------------------------
for (i = 0; i < n; i++) {
long int idx = 0;
for (j = 0; j < p; j++) {
int notfinished = 1;
double val = (double) rand() / RAND_MAX;
while (notfinished) {
notfinished = 0;
idx = ((long int) ((m) * (rand() / (RAND_MAX + 1.0))));
for (k = 0; k < j; k++) {
if (IDX_h[i][k] == idx) {
notfinished = 1;
}
}
}
A_h[i][j] = 2 * val - 1;
IDX_h[i][j] = idx;
}
}
printf("Matrix B Generated\n");
double* y;
y = (double*) calloc(m, sizeof(double));
tmp = 0;
for (j = 0; j < m; j++) {
y[j] = (double) rand() / RAND_MAX;
tmp += y[j] * y[j];
}
for (j = 0; j < m; j++) {
y[j] = y[j] / tmp;
}
printf("vector y Generated\n");
struct st_sortingByAbsWithIndex* dataToSort;
dataToSort = (struct st_sortingByAbsWithIndex*) calloc(m,
sizeof(struct st_sortingByAbsWithIndex));
for (i = 0; i < n; i++) {
dataToSort[i].idx = i;
dataToSort[i].value = 0;
}
printf("Struc created\n");
for (i = 0; i < n; i++) {
tmp = 0;
for (j = 0; j < p; j++) {
tmp += y[IDX_h[i][j]] * A_h[i][j];
}
dataToSort[i].value = tmp;
}
//Sorting B
printf("SORTING START\n");
size_t structs_len = sizeof(dataToSort)
/ sizeof(struct st_sortingByAbsWithIndex);
printf("SORTING 2\n");
qsort(dataToSort, structs_len, sizeof(struct st_sortingByAbsWithIndex),
struct_cmp_by_value);
printf("SORTING END\n");
// return 1;
double* x;
x = (double*) calloc(n, sizeof(double));
for (i = 0; i < n; i++) { // vytvaranie matice A
int idx = dataToSort[i].idx;
double alpha = 1;
x[idx] = 0;
if (i < n_nonzero) {
alpha = (double) abs(1 / dataToSort[idx].value);
x[idx] = ((double) rand() / RAND_MAX) * rho / (sqrtofnonzeros);
if (dataToSort[idx].value < 0) {
x[idx] = -x[idx];
}
} else if (dataToSort[idx].value > 0.1 || dataToSort[idx].value < -0.1) {
alpha = (double) abs(1 / dataToSort[idx].value) * (double) rand()
/ RAND_MAX;
}
for (j = 0; j < p; j++) {
A_h[idx][j] = A_h[idx][j] * alpha;
}
}
// print_double_array(&L[0],n);
// print_double_array(&Li[0], 10);
free(dataToSort);
// Compute Li
double* Li; // Lipschitz constants
Li = (double*) calloc(n, sizeof(double));
for (i = 0; i < n; i++) {
Li[i] = 0;
for (j = 0; j < p; j++) {
Li[i] += A_h[i][j] * A_h[i][j];
}
Li[i] = 1 / Li[i];
}
// END compute Li
for (i = 0; i < m; i++) {
optimalvalue += y[i] * y[i];
}
optimalvalue = optimalvalue * 0.5;
double* b;
b = y;
for (i = 0; i < n; i++) {
for (j = 0; j < p; j++) {
b[IDX_h[i][j]] += x[i] * A_h[i][j];
}
}
for (i = 0; i < n; i++) {
// printf("optval %1.16f \n", optimalvalue);
if (x[i] > 0)
optimalvalue += x[i];
else
optimalvalue -= x[i];
}
printf("optval %1.16f \n", optimalvalue);
// write to output
*xOpt = (float *) malloc(n * sizeof(float));
*b_out = (float *) malloc(m * sizeof(float));
for (i = 0; i < n; i++) {
(*xOpt)[i] = x[i];
}
for (j = 0; j < m; j++) {
(*b_out)[j] = b[j];
}
*C_Idx = (int *) malloc(n * sizeof(int));
*C_Count = (int *) malloc(n * sizeof(int));
int nnz = 0;
for (i = 0; i < n; i++) {
(*C_Count)[i] = p;
nnz += (*C_Count)[i];
}
(*nnzout) = nnz;
*A = (float *) malloc(nnz * sizeof(float));
*R_Idx = (int *) malloc(nnz * sizeof(int));
nnz = 0;
for (i = 0; i < n; i++) {
(*C_Idx)[i] = nnz;
for (j = 0; j < (*C_Count)[i]; j++) {
(*R_Idx)[nnz] = IDX_h[i][j];
(*A)[nnz] = A_h[i][j];
// printf("A[%d][%d] = %f\n", nnz, (*R_Idx)[nnz], (*A)[nnz]);
nnz++;
}
}
}
void printMatrixA(float *A, int*R_Idx, int*C_Idx, int*C_Count, int n, int nnz) {
int i, j;
for (i = 0; i < n; i++) {
for (j = 0; j < C_Count[i]; j++) {
printf("A[%d][%d] = %f \n", i, R_Idx[C_Idx[i] + j], A[C_Idx[i] + j]);
}
}
}
void matrix_vector_product(float *A, int*R_Idx, int*C_Idx, int*C_Count, int n,
int m, int nnz, float *x, float* b, float c) {
int i, j;
for (j = 0; j < m; j++)
b[j] = 0;
for (i = 0; i < n; i++) {
for (j = 0; j < C_Count[i]; j++) {
b[R_Idx[C_Idx[i] + j]] += c * A[C_Idx[i] + j] * x[i];
}
}
}
__global__ void setupKernel(curandState *state, unsigned long seed) {
// int id = threadIdx.x + blockIdx.x * blockDim.x;
int id = (blockIdx.y * blockDim.x * blockDim.y + blockIdx.x * blockDim.x
+ threadIdx.x);
curand_init((seed << 20) + id, id, 0, &state[id]);
}
__global__ void randKernel(curandState *state) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
curandState localState = state[id];
curand_uniform(&localState); //dummy call, just to be here
state[id] = localState;
}
__global__ void RCDMKernel(float *A, int*R_Idx, int*C_Idx, int*C_Count, int* n,
int* m, int* nnz, float* b, float*residuals, float*x, float * lambda,
float* Li, int* NMAX, curandState* cstate) {
int j, i, k;
float delta, tmp; //partialDetivative
int id = (blockIdx.y * blockDim.x * blockDim.y + blockIdx.x * blockDim.x
+ threadIdx.x);
curandState localState = cstate[id];
__shared__ float partialDetivative[TOTALTHREDSPERBLOCK];
float xLocal ;
float LiLocal ;
float ALocal[COLUMNLENGTH];
int cidx;
int RIDX[COLUMNLENGTH];
for (k = 0; k < NMAXITERKernel ; k++) {
double d = curand_uniform_double(&localState);
int idx = (int) (d * n[0]);
// LOAD A, R, residuals
// float* residualsAddress[COLUMNLENGTH];
xLocal= x[idx];
LiLocal = Li[idx];
cidx = C_Idx[idx];
partialDetivative[threadIdx.x] = 0;
// #pragma unroll COLUMNLENGTH
for (i = 0; i < COLUMNLENGTH; i++) {
j = cidx + i;
ALocal[i] = A[j];
RIDX[i] = R_Idx[j];
// residualsAddress[i] = &residuals[RIDX[i]];
partialDetivative[threadIdx.x] += ALocal[i] * residuals[RIDX[i]];
}
tmp = LiLocal * (partialDetivative[threadIdx.x] + LambdaParameter);
if (xLocal > tmp) {
delta = -tmp;
} else {
tmp = LiLocal * (partialDetivative[threadIdx.x] - LambdaParameter);
if (xLocal < tmp) {
delta = -tmp;
} else {
delta = -xLocal;
}
}
atomicAdd(&x[idx], delta);
// atomicAdd(&x[idx], 1);
for (i = 0; i < COLUMNLENGTH; i++) {
atomicAdd(&residuals[RIDX[i]], ALocal[i] * delta);
// atomicAdd(residualsAddress[i], ALocal[i] * delta);
}
}
cstate[id] = localState;
}
__global__ void MersenRandomKernel(float *A, int*R_Idx, int*C_Idx, int*C_Count,
int* n, int* m, int* nnz, float*residuals, float*x, float * lambda,
float* Li, int* NMAX) {
unsigned int j, i, k;
float partialDetivative, delta, tmp;
int const a = 16807;
int const mersen = 2147483647; //ie 2**31-1
long seed = (blockIdx.y * blockDim.x * blockDim.y + blockIdx.x * blockDim.x
+ threadIdx.x);
for (k = 0; k < NMAXITER; k++) {
seed = (long(seed * a))%mersen;
long idx = seed % n[0];
// temp = seed * a;
// seed = (int) (temp - mersen * floor(temp * reciprocal_m));
// int idx = ratioNMersen * seed;
//
float ALocal[COLUMNLENGTH];
int RIDX[COLUMNLENGTH];
float* residualsAddress[COLUMNLENGTH];
float xLocal = x[idx];
float LiLocal = Li[idx];
int cidx = C_Idx[idx];
partialDetivative = 0;
for (i = 0; i < COLUMNLENGTH; i++) {
j = cidx + i;
ALocal[i] = A[j];
RIDX[i] = R_Idx[j];
residualsAddress[i] = &residuals[RIDX[i]];
partialDetivative += ALocal[i] * residuals[RIDX[i]];
// partialDetivative += A[j] * residuals[R_Idx[j]];
}
tmp = LiLocal * (partialDetivative + lambda[0]);
if (xLocal > tmp) {
delta = -tmp;
} else {
tmp = LiLocal * (partialDetivative - lambda[0]);
if (xLocal < tmp) {
delta = -tmp;
} else {
delta = -xLocal;
}
}
// atomicAdd(&x[idx], delta);
atomicAdd(&x[idx], 1);
for (i = 0; i < COLUMNLENGTH; i++) {
atomicAdd(residualsAddress[i], ALocal[i] * delta);
}
}
}
__global__ void MersenRandomKernelDoubleGenerator(float *A, int*R_Idx,
int*C_Idx, int*C_Count, int* n, int* m, int* nnz, float*residuals,
float*x, float * lambda, float* Li, int* NMAX) {
unsigned int j, i, k;
float partialDetivative, delta, tmp;
double const a = 16807;
double const mersen = 2147483647; //ie 2**31-1
double const reciprocal_m = 1.0 / mersen;
// double const ratioNMersen = n[0]*reciprocal_m;
long seed = (blockIdx.y * blockDim.x * blockDim.y + blockIdx.x * blockDim.x
+ threadIdx.x);
double temp;
for (k = 0; k < NMAXITER; k++) {
temp = seed * a;
seed = (int) (temp - mersen * floor(temp * reciprocal_m));
// int idx = ratioNMersen * seed;
int idx = n[0] * reciprocal_m * seed;
float ALocal[COLUMNLENGTH];
int RIDX[COLUMNLENGTH];
float* residualsAddress[COLUMNLENGTH];
float xLocal = x[idx];
float LiLocal = Li[idx];
int cidx = C_Idx[idx];
partialDetivative = 0;
for (i = 0; i < COLUMNLENGTH; i++) {
j = cidx + i;
ALocal[i] = A[j];
RIDX[i] = R_Idx[j];
residualsAddress[i] = &residuals[RIDX[i]];
partialDetivative += ALocal[i] * residuals[RIDX[i]];
// partialDetivative += A[j] * residuals[R_Idx[j]];
}
tmp = LiLocal * (partialDetivative + lambda[0]);
if (xLocal > tmp) {
delta = -tmp;
} else {
tmp = LiLocal * (partialDetivative - lambda[0]);
if (xLocal < tmp) {
delta = -tmp;
} else {
delta = -xLocal;
}
}
atomicAdd(&x[idx], delta);
// atomicAdd(&x[idx], 1);
for (i = 0; i < COLUMNLENGTH; i++) {
atomicAdd(residualsAddress[i], ALocal[i] * delta);
}
}
}
__global__ void MersenRandomKernelDoubleGeneratorSingle(float *A, int*R_Idx,
int*C_Idx, int*C_Count, int* n, int* m, int* nnz, float*residuals,
float*x, float * lambda, float* Li, int* NMAX) {
unsigned int j, i;
float partialDetivative, delta, tmp;
double const a = 16807;
double const mersen = 2147483647; //ie 2**31-1
double const reciprocal_m = 1.0 / mersen;
// double const ratioNMersen = n[0]*reciprocal_m;
long seed = (blockIdx.y * blockDim.x * blockDim.y + blockIdx.x * blockDim.x
+ threadIdx.x);
double temp;
int k;
for (k = 0; k < CUDACount * 100; k++) {
temp = seed * a;
seed = (int) (temp - mersen * floor(temp * reciprocal_m));
// int idx = ratioNMersen * seed;
int idx = n[0] * reciprocal_m * seed;
float ALocal[COLUMNLENGTH];
int RIDX[COLUMNLENGTH];
float* residualsAddress[COLUMNLENGTH];
float xLocal = x[idx];
float LiLocal = Li[idx];
int cidx = C_Idx[idx];
partialDetivative = 0;
for (i = 0; i < COLUMNLENGTH; i++) {
j = cidx + i;
ALocal[i] = A[j];
RIDX[i] = R_Idx[j];
residualsAddress[i] = &residuals[RIDX[i]];
partialDetivative += ALocal[i] * residuals[RIDX[i]];
// partialDetivative += A[j] * residuals[R_Idx[j]];
}
tmp = LiLocal * (partialDetivative + lambda[0]);
if (xLocal > tmp) {
delta = -tmp;
} else {
tmp = LiLocal * (partialDetivative - lambda[0]);
if (xLocal < tmp) {
delta = -tmp;
} else {
delta = -xLocal;
}
}
atomicAdd(&x[idx], delta);
// atomicAdd(&x[idx], 1);
for (i = 0; i < COLUMNLENGTH; i++) {
atomicAdd(residualsAddress[i], ALocal[i] * delta);
}
}
}
__global__ void MersenRandomKernelDoubleGeneratorLessLocalMemmory(float *A,
int*R_Idx, int*C_Idx, int*C_Count, int* n, int* m, int* nnz,
float*residuals, float*x, float * lambda, float* Li, int* NMAX) {
unsigned int j, i, k;
float partialDetivative, delta, tmp;
double const a = 16807;
double const mersen = 2147483647; //ie 2**31-1
double const reciprocal_m = 1.0 / mersen;
// double const ratioNMersen = n[0]*reciprocal_m;
long seed = (blockIdx.y * blockDim.x * blockDim.y + blockIdx.x * blockDim.x
+ threadIdx.x);
double temp;
for (k = 0; k < NMAXITER; k++) {
temp = seed * a;
seed = (int) (temp - mersen * floor(temp * reciprocal_m));
int idx = n[0] * reciprocal_m * seed;
int cidx = C_Idx[idx];
partialDetivative = 0;
for (i = 0; i < 10; i++) {
j = cidx + i;
partialDetivative += A[j] * residuals[R_Idx[j]];
}
tmp = Li[idx] * (partialDetivative + lambda[0]);
if (x[idx] > tmp) {
delta = -tmp;
} else {
tmp = Li[idx] * (partialDetivative - lambda[0]);
if (x[idx] < tmp) {
delta = -tmp;
} else {
delta = -x[idx];
}
}
atomicAdd(&x[idx], delta);
for (j = C_Idx[idx]; j < C_Idx[idx] + C_Count[idx]; j++) {
atomicAdd(&residuals[R_Idx[j]], A[j] * delta);
}
}
}
__global__ void myFirstKernel(float *x) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// x[idx] = (float) (-1 * threadIdx.x);
x[idx] = (float) (1000 * blockIdx.x + threadIdx.x);
}
__global__ void resetXKernel(float *x) {
int id = (blockIdx.y * blockDim.x * blockDim.y + blockIdx.x * blockDim.x
+ threadIdx.x);
x[id] = 0;
}
__global__ void myFirstfloatKernel(float *d_a) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
d_a[idx] = (float) (1000 * blockIdx.x + threadIdx.x);
}
__global__ void setup_kernel(curandState *state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
/* Each thread gets different seed, a different sequence number, no offset */
curand_init(i, i, 0, &state[i]);
}
float NRCDM_SR(float *A, int*R_Idx, int*C_Idx, int*C_Count, int n, int m,
int nnz, float* b, float*x, float lambda, float* Li, int NMAX,
float optimalvalue, int logging) {
float residuals[m];
float value = 0;
int i, j, N;
FILE *fp;
// fp = fopen("/exports/home/s1052689/nesterov.txt", "w");
fp = fopen("/tmp/sparseregression.csv", "w");
// calculate residuals
for (j = 0; j < m; j++)
residuals[j] = -b[j];
for (i = 0; i < n; i++) {
for (j = 0; j < C_Count[i]; j++) {
residuals[R_Idx[C_Idx[i] + j]] += A[C_Idx[i] + j] * x[i];
}
}
float partialDetivative, delta, tmp;
// iteration counter
for (N = 0; N < NMAX * n; N++) {
int idx = (int) (n * (rand() / (RAND_MAX + 1.0)));
partialDetivative = 0;
for (j = C_Idx[idx]; j < C_Idx[idx] + C_Count[idx]; j++) {
partialDetivative += A[j] * residuals[R_Idx[j]];
}
tmp = Li[idx] * (partialDetivative + lambda);
if (x[idx] > tmp) {
delta = -tmp;
} else {
tmp = Li[idx] * (partialDetivative - lambda);
if (x[idx] < tmp) {
delta = -tmp;
} else {
delta = -x[idx];
}
}
x[idx] += delta;
for (j = C_Idx[idx]; j < C_Idx[idx] + C_Count[idx]; j++) {
residuals[R_Idx[j]] += A[j] * delta;
}
if (N % (n / 100) == 0) {
int nnzcount = 0;
value = 0;
for (i = 0; i < n; i++) {
if (x[i] != 0)
nnzcount++;
if (x[i] > 0)
value += x[i];
else
value -= x[i];
}
for (j = 0; j < m; j++)
value += 0.5 * residuals[j] * residuals[j];
fprintf(fp, "Iteracia:%d, value:%f, nnz:%d, epsilon: %f\n", N,
value, nnzcount, value - optimalvalue);
}
}
fclose(fp);
return value;
}
double ComputeObjectiveValue(float *A, int*R_Idx, int*C_Idx, int*C_Count,
int n, int m, int nnz, float* b, float*x, float lambda) {
double residuals[m];
double value = 0;
int i, j;
for (j = 0; j < m; j++)
residuals[j] = -b[j];
for (i = 0; i < n; i++) {
for (j = 0; j < C_Count[i]; j++) {
residuals[R_Idx[C_Idx[i] + j]] += A[C_Idx[i] + j] * x[i];
}
}
value = 0;
for (i = 0; i < n; i++) {
if (x[i] > 0)
value += x[i];
else
value -= x[i];
}
value = value * lambda;
// printf("value%f\n",value);
double residualsVal = 0;
for (j = 0; j < m; j++)
residualsVal += 0.5 * residuals[j] * residuals[j];
// printf("res%f\n",residualsVal);
value = value + residualsVal;
// printf("value%f\n",value);
return value;
}
float NRCDM_SR_TIMING(float *A, int*R_Idx, int*C_Idx, int*C_Count, int n,
int m, int nnz, float* b, float*x, float lambda, float* Li, int NMAX,
float optimalvalue, int logging, float* residuals) {
float value = 0;
long j, N, k;
float partialDetivative, delta, tmp;
// iteration counter
// printf("NMAX:%d, n*NMAX:%d\n", NMAX, NMAX * n);
for (N = 0; N < NMAX; N++) {
for (k = 0; k < n; k++) {
int idx = (int) (n * (rand() / (RAND_MAX + 1.0)));
partialDetivative = 0;
for (j = C_Idx[idx]; j < C_Idx[idx] + C_Count[idx]; j++) {
partialDetivative += A[j] * residuals[R_Idx[j]];
}
tmp = Li[idx] * (partialDetivative + lambda);
if (x[idx] > tmp) {
delta = -tmp;
} else {
tmp = Li[idx] * (partialDetivative - lambda);
if (x[idx] < tmp) {
delta = -tmp;
} else {
delta = -x[idx];
}
}
x[idx] += delta;
for (j = C_Idx[idx]; j < C_Idx[idx] + C_Count[idx]; j++) {
residuals[R_Idx[j]] += A[j] * delta;
}
}
// printf("k=%d\n", N);
}
return value;
}
void computeLipsitzConstantsForSparseRegression(float** L, float ** Li,
float *A, int*R_Idx, int*C_Idx, int*C_Count, int n, int nnz) {
int i, j;
*L = (float *) malloc(n * sizeof(float));
*Li = (float *) malloc(n * sizeof(float));
for (i = 0; i < n; i++) {
(*L)[i] = 0;
for (j = 0; j < C_Count[i]; j++) {
(*L)[i] += A[C_Idx[i] + j] * A[C_Idx[i] + j];
}
(*Li)[i] = 1 / (*L)[i];
}
}
void cudaSolver() {
FILE *fp;
// fp = fopen("/exports/home/s1052689/nesterov2.txt", "w");
fp = fopen("/tmp/taki_cuda.txt", "w");
clock_t t1, t2;
int NMax_d_h[1];
int pMin = COLUMNLENGTH;
int pMax = COLUMNLENGTH;
float lambda = 1;
float diff;
float* A_h;
int * C_Idx_h;
int * R_Idx_h;
int * C_Count_h;
int nnz, i, j;
printf("CudaSolver\n");
curandState *devStates;
int totalThreads = 256;
int dim1 = 1024;
int dim2 = 2;
int n = totalThreads * dim1 * dim2;
int m = n / 4;
NMax_d_h[0] = 1;
printf("CudaSolver2\n");
/* Allocate space for prng states on device */
printf("idem alokovat random states");
dim3 dimGrid( dim1, dim2);
dim3 dimBlock( totalThreads);
dim3 dimGridRCDM( dim1, dim2);
t1 = clock();
cudaMalloc((void **) &devStates, dim1 * dim2 * totalThreads
* sizeof(curandState));
checkCUDAError("Alloc error");
/* Setup prng states */
setup_kernel<<< dimGrid, dimBlock >>>(devStates);
cudaThreadSynchronize();
checkCUDAError("Inicializacia ranom states");
t2 = clock();
diff = ((float) t2 - (float) t1) / 1000000.0F;
printf("Inicializacia ranom states: %f\n", diff);
printf("Idem generaovt random problem");
t1 = clock();
generateRandomProblem(&A_h, &R_Idx_h, &C_Idx_h, &C_Count_h, n, m, pMin,
pMax - pMin, &nnz);
t2 = clock();
diff = ((float) t2 - (float) t1) / 1000000.0F;
printf("Random problem generation: %f\n", diff);
printf("Random problem generated\n");
float* L;
float* Li;
computeLipsitzConstantsForSparseRegression(&L, &Li, A_h, R_Idx_h, C_Idx_h,
C_Count_h, n, nnz);
printf("Lipshitz constants computed\n");
// printMatrixA(A_h, R_Idx_h, C_Idx_h, C_Count_h, n, nnz);
// print_float_array(&L[0], n);
float b[m];
float xOpt[n];
float x[n];
for (j = 0; j < n; j++) {
xOpt[j] = 2 * ((float) rand() / RAND_MAX) - 1;
}
printf("Optimal solution generated\n");
// set b = A*x
matrix_vector_product(A_h, R_Idx_h, C_Idx_h, C_Count_h, n, m, nnz, xOpt, b,
1);
printf("matrix vector product multiplied\n");
// print_float_array(&xOpt[0], n);
// print_float_array(&b[0], m);
float value = 0;
for (i = 0; i < n; i++)
x[i] = 0;
printf("Idem inicializovat reziduals\n");
float residuals[m];
for (j = 0; j < m; j++)
residuals[j] = -b[j];
for (i = 0; i < n; i++) {
for (j = 0; j < C_Count_h[i]; j++) {
residuals[R_Idx_h[C_Idx_h[i] + j]] += A_h[C_Idx_h[i] + j] * x[i];
}
}
float* A_d;
float* Li_d;
int * C_Idx_d;
int * R_Idx_d;
int * C_Count_d;
int *n_d;
int *nnz_d;
int *m_d;
float* x_d;
float* b_d;
float* lambda_d;
float * residuals_d;
int * NMax_d;
// size_t memSize = ;
t1 = clock();
printf("Idem alokovat data na device\n");
cudaMalloc((void**) &A_d, nnz * sizeof(float));
cudaMalloc((void**) &R_Idx_d, nnz * sizeof(int));
cudaMalloc((void**) &C_Idx_d, n * sizeof(int));
cudaMalloc((void**) &Li_d, n * sizeof(float));
cudaMalloc((void**) &C_Count_d, n * sizeof(int));
cudaMalloc((void**) &n_d, 1 * sizeof(int));
cudaMalloc((void**) &nnz_d, 1 * sizeof(int));
cudaMalloc((void**) &m_d, 1 * sizeof(int));
cudaMalloc((void**) &x_d, n * sizeof(float));
cudaMalloc((void**) &b_d, m * sizeof(float));
cudaMalloc((void**) &residuals_d, m * sizeof(float));
cudaMalloc((void**) &lambda_d, 1 * sizeof(float));
cudaMalloc((void**) &NMax_d, 1 * sizeof(int));
cudaThreadSynchronize();
t2 = clock();
diff = ((float) t2 - (float) t1) / 1000000.0F;
printf("Data alocated %d, %d from host -> device:%f\n", nnz, n, diff);
// Part 2 of 5: host to device memory copy
t1 = clock();
cudaMemcpy(A_d, A_h, nnz * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(C_Count_d, C_Count_h, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(R_Idx_d, R_Idx_h, nnz * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(Li_d, Li, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(C_Idx_d, C_Idx_h, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(n_d, &n, 1 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(nnz_d, &nnz, 1 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(m_d, &m, 1 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(NMax_d, &NMax_d_h, 1 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(x_d, x, n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, m * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(residuals_d, residuals, m * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(lambda_d, &lambda, 1 * sizeof(float), cudaMemcpyHostToDevice);
cudaThreadSynchronize();
t2 = clock();
diff = ((float) t2 - (float) t1) / 1000000.0F;
printf("Copy data %d, %d from host -> device:%f\n", nnz, n, diff);
printf("Idem spustat paralelny RCDM\n");
cudaThreadSynchronize();
t1 = clock();
cudaEvent_t start, stop;
float time, timeSerial;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
RCDMKernel<<< dimGridRCDM, dimBlock >>>(A_d, R_Idx_d, C_Idx_d, C_Count_d,
n_d, m_d, nnz_d, b_d, residuals_d,x_d, lambda_d, Li_d, NMax_d,devStates);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
checkCUDAError("kernel execution");
printf("Paralel NRCDM CUDA TIMMER: %f ms\n", time);
printf("Reset X");
// cudaFree(x_d);
// cudaFree(residuals_d);
// cudaMalloc((void**) &x_d, n * sizeof(float));
// cudaMalloc((void**) &residuals_d, m * sizeof(float));
cudaMemcpy(x_d, x, n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(residuals_d, residuals, m * sizeof(float),
cudaMemcpyHostToDevice);
//
checkCUDAError("resetXKernel execution");
printf("Reset X finished");
//
printf("Mersen Kernel");
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// MersenRandomKernel<<< dimGridRCDM, dimBlock >>>(A_d, R_Idx_d, C_Idx_d, C_Count_d,
// n_d, m_d, nnz_d, b_d, residuals_d,x_d, lambda_d, Li_d, NMax_d);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
checkCUDAError("kernel execution");
printf("Paralel MERSEN NRCDM CUDA TIMMER: %f ms\n", time);
// checkCUDAError("kernel execution");
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
NRCDM_SR(A_h, R_Idx_h, C_Idx_h, C_Count_h, n, m, nnz, b, x, lambda, Li,
NMAXITER / NMAXITER, value, 1);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timeSerial, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
checkCUDAError("kernel execution");
printf("SERIAL NRCDM CUDA TIMMER: %f ms, speed up:%f\n", timeSerial,
timeSerial / time);
//------------------------- LOAD RESULTS FROM CUDA AND COMPARE
float* xcuda;
xcuda = (float*) malloc(n * sizeof(float));
cudaMemcpy(xcuda, x_d, n * sizeof(float), cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy");
cudaThreadSynchronize();
checkCUDAError("cudaMemcpy");
for (i = 0; i < 20; i++) {
printf("x_h[%d]=%f, x_d[%d]=%f\n", i, x[i], i, xcuda[i]);
}
float normErrorSquered = 0;
float normX = 0;
for (i = 0; i < n; i++) {
normErrorSquered += (x[i] - xcuda[i]) * (x[i] - xcuda[i]);
normX += x[i] * x[i];
}
for (i = 0; i < n; i++) {
if (xcuda[i] * xcuda[i] < normX) {
;
} else {
printf("x_h[%d]=%f, x_d[%d]=%f\n", i, x[i], i, xcuda[i]);
}
}
printf("normaChyby=%f; norm(x)=%f, n=%d, m=%d\n", normErrorSquered,
normX, n, m);
// int minE=100000000;
// int maxE=0;
// for (i = 0; i < n; i++) {
// if (xcuda[i]<minE) minE=xcuda[i];
// if (xcuda[i]>minE) maxE=xcuda[i];
// }
// printf("minE=%d, maxE=%d\n", minE,maxE);
//------------------------- CLEAN UP
printf("Idem uvolnovat\n");
cudaFree(A_d);
cudaFree(Li_d);
cudaFree(R_Idx_d);
cudaFree(C_Idx_d);
cudaFree(C_Count_d);
cudaFree(n_d);
cudaFree(nnz_d);
cudaFree(m_d);
cudaFree(x_d);
cudaFree(b_d);
cudaFree(lambda_d);
cudaFree(residuals_d);
cudaFree(devStates);
fclose(fp);
}
__global__ void myDoNothingKernel() {
// int idx = threadIdx.x;
}
void cudaMersenSolver() {
int totalThreads = 256;
// int dim1 = 1024;
// int dim2 = 128;
int dim1 = 1000;
int dim2 = 160;
int n = totalThreads * dim1 * dim2;
// dim2=dim2/10;
int m = n / 4;
FILE *fp;
// fp = fopen("/exports/home/s1052689/nesterov2.txt", "w");
fp = fopen("/tmp/taki_cuda.txt", "w");
clock_t t1, t2;
int NMax_d_h[1];
int pMin = COLUMNLENGTH;
int pMax = COLUMNLENGTH;
float lambda = 1;
float diff;
float* A_h;
int * C_Idx_h;
int * R_Idx_h;
int * C_Count_h;
int nnz, i, j;
printf("CudaSolver\n");
NMax_d_h[0] = 1;
/* Allocate space for prng states on device */
dim3 dimBlock( totalThreads);
dim3 dimGridRCDM( dim1, dim2);
t1 = clock();
/* Setup prng states */
t1 = clock();
generateRandomProblem(&A_h, &R_Idx_h, &C_Idx_h, &C_Count_h, n, m, pMin,
pMax - pMin, &nnz);
t2 = clock();
diff = ((float) t2 - (float) t1) / 1000000.0F;
printf("NNZ: %d\n", nnz);
printf("Random problem generation: %f\n", diff);
printf("Random problem generated\n");
float* L;
float* Li;
computeLipsitzConstantsForSparseRegression(&L, &Li, A_h, R_Idx_h, C_Idx_h,
C_Count_h, n, nnz);
printf("Lipshitz constants computed\n");
// printMatrixA(A_h, R_Idx_h, C_Idx_h, C_Count_h, n, nnz);
// print_float_array(&L[0], n);
float b[m];
float xOpt[n];
float x[n];
for (j = 0; j < n; j++) {
xOpt[j] = 2 * ((float) rand() / RAND_MAX) - 1;
}
printf("Optimal solution generated\n");
// set b = A*x
matrix_vector_product(A_h, R_Idx_h, C_Idx_h, C_Count_h, n, m, nnz, xOpt, b,
1);
printf("matrix vector product multiplied\n");
// print_float_array(&xOpt[0], n);
// print_float_array(&b[0], m);
float value = 0;
for (i = 0; i < n; i++)
x[i] = 0;
printf("Idem inicializovat reziduals\n");
float residuals[m];
for (j = 0; j < m; j++)
residuals[j] = -b[j];
for (i = 0; i < n; i++) {
for (j = 0; j < C_Count_h[i]; j++) {
residuals[R_Idx_h[C_Idx_h[i] + j]] += A_h[C_Idx_h[i] + j] * x[i];
}
}
float* A_d;
float* Li_d;
int * C_Idx_d;
int * R_Idx_d;
int * C_Count_d;
int *n_d;
int *nnz_d;
int *m_d;
float* x_d;
float* lambda_d;
float * residuals_d;
int * NMax_d;
// size_t memSize = ;
t1 = clock();
printf("Idem alokovat data na device\n");
cudaMalloc((void**) &A_d, nnz * sizeof(float));
checkCUDAError("kernel execution Alloc A_d");
cudaMalloc((void**) &R_Idx_d, nnz * sizeof(int));
checkCUDAError("kernel execution Alloc R_IDX");
cudaMalloc((void**) &C_Idx_d, n * sizeof(int));
checkCUDAError("kernel execution Alloc C_IDX");
cudaMalloc((void**) &Li_d, n * sizeof(float));
checkCUDAError("kernel execution Alloc Li");
cudaMalloc((void**) &C_Count_d, n * sizeof(int));
checkCUDAError("kernel execution Alloc CCount");
cudaMalloc((void**) &n_d, 1 * sizeof(int));
cudaMalloc((void**) &nnz_d, 1 * sizeof(int));
cudaMalloc((void**) &m_d, 1 * sizeof(int));
cudaMalloc((void**) &x_d, n * sizeof(float));
cudaMalloc((void**) &residuals_d, m * sizeof(float));
checkCUDAError("kernel execution Alloc Residuals");
cudaMalloc((void**) &lambda_d, 1 * sizeof(float));
cudaMalloc((void**) &NMax_d, 1 * sizeof(int));
checkCUDAError("kernel execution Alloc N_d");
cudaThreadSynchronize();
t2 = clock();
diff = ((float) t2 - (float) t1) / 1000000.0F;
printf("Data alocated %d, %d from host -> device:%f\n", nnz, n, diff);
// Part 2 of 5: host to device memory copy
t1 = clock();
cudaMemcpy(A_d, A_h, nnz * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError("kernel execution Copy A_d");
cudaMemcpy(C_Count_d, C_Count_h, n * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("kernel execution Copy C_Cound_d");
cudaMemcpy(R_Idx_d, R_Idx_h, nnz * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("kernel execution Copy R");
cudaMemcpy(Li_d, Li, n * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("kernel execution Copy Li");
cudaMemcpy(C_Idx_d, C_Idx_h, n * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("kernel execution Copy C_ID");
cudaMemcpy(n_d, &n, 1 * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("kernel execution Copy Nd");
cudaMemcpy(nnz_d, &nnz, 1 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(m_d, &m, 1 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(NMax_d, &NMax_d_h, 1 * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("kernel execution Copy NMax");
cudaMemcpy(x_d, x, n * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError("kernel execution Copy x_d");
cudaMemcpy(residuals_d, residuals, m * sizeof(float),
cudaMemcpyHostToDevice);
checkCUDAError("kernel execution Copy Resiaduals");
cudaMemcpy(lambda_d, &lambda, 1 * sizeof(float), cudaMemcpyHostToDevice);
cudaThreadSynchronize();
t2 = clock();
diff = ((float) t2 - (float) t1) / 1000000.0F;
printf("Copy data %d, %d from host -> device:%f\n", nnz, n, diff);
printf("Idem spustat paralelny RCDM\n");
cudaThreadSynchronize();
t1 = clock();
checkCUDAError("kernel execution Paralel MERSEN NRCDM CUDA - BEFORE");
cudaEvent_t start, stop;
float time, timeSerial;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
MersenRandomKernelDoubleGenerator<<< dimGridRCDM, dimBlock >>>(A_d, R_Idx_d, C_Idx_d, C_Count_d,
n_d, m_d, nnz_d, residuals_d,x_d, lambda_d, Li_d, NMax_d);
// MersenRandomKernel<<< dimGridRCDM, dimBlock >>>(A_d, R_Idx_d, C_Idx_d, C_Count_d,
// n_d, m_d, nnz_d, residuals_d,x_d, lambda_d, Li_d, NMax_d);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
checkCUDAError("kernel execution Paralel MERSEN NRCDM CUDA");
printf("Paralel MERSEN NRCDM CUDA TIMMER: %f ms\n", time);
// checkCUDAError("kernel execution");
for (i = 0; i < m; i++)
residuals[i] = -b[i];
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
NRCDM_SR_TIMING(A_h, R_Idx_h, C_Idx_h, C_Count_h, n, m, nnz, b, x, lambda,
Li, SerialExecution, value, 1, residuals);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timeSerial, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
checkCUDAError("kernel execution");
printf("SERIAL NRCDM CUDA TIMMER: %f ms, speed up:%f\n", timeSerial
* NMAXITER / SerialExecution, timeSerial / time * NMAXITER
/ SerialExecution);
//------------------------- LOAD RESULTS FROM CUDA AND COMPARE
float* xcuda;
xcuda = (float*) malloc(n * sizeof(float));
cudaMemcpy(xcuda, x_d, n * sizeof(float), cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy");
cudaThreadSynchronize();
checkCUDAError("cudaMemcpy");
for (i = 0; i < 20; i++) {
printf("x_h[%d]=%f, x_d[%d]=%f\n", i, x[i], i, xcuda[i]);
}
float normErrorSquered = 0;
float normX = 0;
for (i = 0; i < n; i++) {
normErrorSquered += (x[i] - xcuda[i]) * (x[i] - xcuda[i]);
normX += x[i] * x[i];
}
for (i = 0; i < n; i++) {
if (xcuda[i] * xcuda[i] < normX) {
;
} else {
// printf("x_h[%d]=%f, x_d[%d]=%f\n", i, x[i], i, xcuda[i]);
}
}
printf("normaChyby=%f; norm(x)=%f, n=%d, m=%d\n", normErrorSquered,
normX, n, m);
float minE = 100000000;
float maxE = -100000000;
for (i = 0; i < n; i++) {
if (xcuda[i] < minE)
minE = xcuda[i];
if (xcuda[i] > maxE)
maxE = xcuda[i];
}
printf("minE=%f, maxE=%f\n", minE, maxE);
//------------------------- CLEAN UP
printf("Idem uvolnovat\n");
printf("Paralel MERSEN NRCDM CUDA TIMMER: %f ms\n", time);
printf("SERIAL NRCDM CUDA TIMMER: %f ms, speed up:%f\n", timeSerial
* NMAXITER / SerialExecution, timeSerial / time * NMAXITER
/ SerialExecution);
printf("m:%d, n:%d, p:%d\n", m, n, COLUMNLENGTH);
cudaFree(A_d);
cudaFree(Li_d);
cudaFree(R_Idx_d);
cudaFree(C_Idx_d);
cudaFree(C_Count_d);
cudaFree(n_d);
cudaFree(nnz_d);
cudaFree(m_d);
cudaFree(x_d);
cudaFree(lambda_d);
cudaFree(residuals_d);
fclose(fp);
}
void cudaMersenSolverEvolution() {
int totalThreads = TOTALTHREDSPERBLOCK;
// int dim1 = 1024;
// int dim2 = 128;
int dim1 = 14;
int dim2 = DIM2;
int n = totalThreads * dim1 * dim2 *CUDACount;
// dim2=dim2/10;
int m = n/2 ;
FILE *fp;
// fp = fopen("/exports/home/s1052689/nesterov2.txt", "w");
fp = fopen("/tmp/taki_cuda.txt", "w");
clock_t t1, t2;
int NMax_d_h[1];
int pMin = COLUMNLENGTH;
int pMax = COLUMNLENGTH;
float lambda = 1;
float diff;
float* A_h;
int * C_Idx_h;
int * R_Idx_h;
int * C_Count_h;
int nnz, i, j;
printf("CudaSolver\n");
NMax_d_h[0] = 1;
/* Allocate space for prng states on device */
dim3 dimBlock( totalThreads);
dim3 dimGridRCDM( dim1, dim2);
t1 = clock();
/* Setup prng states */
t1 = clock();
float * b;
float * xOpt;
generateNesterovProblem(&A_h, &R_Idx_h, &C_Idx_h, &C_Count_h, &b, &xOpt, n,
m, pMin, pMax - pMin, &nnz);
double OptimalValueByNesterov = ComputeObjectiveValue(A_h, R_Idx_h, C_Idx_h, C_Count_h, n, m,
nnz, b, xOpt, lambda);
printf("optimal value 2:%f\n",OptimalValueByNesterov);
t2 = clock();
diff = ((float) t2 - (float) t1) / 1000000.0F;
printf("NNZ: %d\n", nnz);
printf("Random problem generation: %f\n", diff);
printf("Random problem generated\n");
float* L;
float* Li;
computeLipsitzConstantsForSparseRegression(&L, &Li, A_h, R_Idx_h, C_Idx_h,
C_Count_h, n, nnz);
printf("Lipshitz constants computed\n");
// printMatrixA(A_h, R_Idx_h, C_Idx_h, C_Count_h, n, nnz);
// print_float_array(&L[0], n);
float x[n];
float value = 0;
for (i = 0; i < n; i++)
x[i] = 0;
printf("Idem inicializovat reziduals\n");
float residuals[m];
for (j = 0; j < m; j++)
residuals[j] = -b[j];
for (i = 0; i < n; i++) {
for (j = 0; j < C_Count_h[i]; j++) {
residuals[R_Idx_h[C_Idx_h[i] + j]] += A_h[C_Idx_h[i] + j] * x[i];
}
}
float* A_d;
float* Li_d;
int * C_Idx_d;
int * R_Idx_d;
int * C_Count_d;
int *n_d;
int *nnz_d;
int *m_d;
float* x_d;
float* lambda_d;
float * residuals_d;
int * NMax_d;
// size_t memSize = ;
t1 = clock();
printf("Idem alokovat data na device\n");
cudaMalloc((void**) &A_d, nnz * sizeof(float));
checkCUDAError("kernel execution Alloc A_d");
cudaMalloc((void**) &R_Idx_d, nnz * sizeof(int));
checkCUDAError("kernel execution Alloc R_IDX");
cudaMalloc((void**) &C_Idx_d, n * sizeof(int));
checkCUDAError("kernel execution Alloc C_IDX");
cudaMalloc((void**) &Li_d, n * sizeof(float));
checkCUDAError("kernel execution Alloc Li");
cudaMalloc((void**) &C_Count_d, n * sizeof(int));
checkCUDAError("kernel execution Alloc CCount");
cudaMalloc((void**) &n_d, 1 * sizeof(int));
cudaMalloc((void**) &nnz_d, 1 * sizeof(int));
cudaMalloc((void**) &m_d, 1 * sizeof(int));
cudaMalloc((void**) &x_d, n * sizeof(float));
cudaMalloc((void**) &residuals_d, m * sizeof(float));
checkCUDAError("kernel execution Alloc Residuals");
cudaMalloc((void**) &lambda_d, 1 * sizeof(float));
cudaMalloc((void**) &NMax_d, 1 * sizeof(int));
checkCUDAError("kernel execution Alloc N_d");
cudaThreadSynchronize();
t2 = clock();
diff = ((float) t2 - (float) t1) / 1000000.0F;
printf("Data alocated %d, %d from host -> device:%f\n", nnz, n, diff);
// Part 2 of 5: host to device memory copy
t1 = clock();
cudaMemcpy(A_d, A_h, nnz * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError("kernel execution Copy A_d");
cudaMemcpy(C_Count_d, C_Count_h, n * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("kernel execution Copy C_Cound_d");
cudaMemcpy(R_Idx_d, R_Idx_h, nnz * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("kernel execution Copy R");
cudaMemcpy(Li_d, Li, n * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("kernel execution Copy Li");
cudaMemcpy(C_Idx_d, C_Idx_h, n * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("kernel execution Copy C_ID");
cudaMemcpy(n_d, &n, 1 * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("kernel execution Copy Nd");
cudaMemcpy(nnz_d, &nnz, 1 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(m_d, &m, 1 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(NMax_d, &NMax_d_h, 1 * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("kernel execution Copy NMax");
cudaMemcpy(x_d, x, n * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError("kernel execution Copy x_d");
cudaMemcpy(residuals_d, residuals, m * sizeof(float),
cudaMemcpyHostToDevice);
checkCUDAError("kernel execution Copy Resiaduals");
cudaMemcpy(lambda_d, &lambda, 1 * sizeof(float), cudaMemcpyHostToDevice);
cudaThreadSynchronize();
t2 = clock();
diff = ((float) t2 - (float) t1) / 1000000.0F;
printf("Copy data %d, %d from host -> device:%f\n", nnz, n, diff);
printf("Idem spustat paralelny RCDM\n");
cudaThreadSynchronize();
for (i = 0; i < m; i++)
residuals[i] = -b[i];
double FunVal = 0;
FunVal = ComputeObjectiveValue(A_h, R_Idx_h, C_Idx_h, C_Count_h, n, m, nnz,
b, x, lambda);
printf("CUDA:Objective,itr,0,value,%1.16f\n", FunVal);
printf("CPUD:Objective,itr,0,value,%1.16f\n", FunVal);
fprintf(fp,"CUDA:Objective,itr,0,value,%1.16f\n", FunVal);
fprintf(fp,"CPUD:Objective,itr,0,value,%1.16f\n", FunVal);
float time, timeSerial;
float* xcuda;
xcuda = (float*) malloc(n * sizeof(float));
curandState *devStates;
cudaMalloc((void **) &devStates, dim1 * dim2 * totalThreads
* sizeof(curandState));
setup_kernel<<< dimGridRCDM, dimBlock >>>(devStates);
checkCUDAError("Inicializacia ranom states");
for (i = 0; i < 1; i++) {
t1 = clock();
checkCUDAError("kernel execution Paralel MERSEN NRCDM CUDA - BEFORE");
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
RCDMKernel<<< dimGridRCDM, dimBlock >>>(A_d, R_Idx_d, C_Idx_d, C_Count_d,
n_d, m_d, nnz_d, residuals_d,residuals_d,x_d, lambda_d, Li_d,NMax_d, devStates);
// MersenRandomKernelDoubleGeneratorSingle<<< dimGridRCDM, dimBlock >>>(A_d, R_Idx_d, C_Idx_d, C_Count_d,
// n_d, m_d, nnz_d, residuals_d,x_d, lambda_d, Li_d, NMax_d);
// MersenRandomKernel<<< dimGridRCDM, dimBlock >>>(A_d, R_Idx_d, C_Idx_d, C_Count_d,
// n_d, m_d, nnz_d, residuals_d,x_d, lambda_d, Li_d, NMax_d);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaMemcpy(xcuda, x_d, n * sizeof(float), cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy");
FunVal = ComputeObjectiveValue(A_h, R_Idx_h, C_Idx_h, C_Count_h, n, m,
nnz, b, xcuda, lambda);
printf("CUDA:Objective,itr,%d,value,%1.16f,value,%1.16f,time,%f\n", i, FunVal,FunVal-OptimalValueByNesterov, time);
fprintf(fp, "CUDA:Objective,itr,%d,value,%1.16f,value,%1.16f,time,%f\n", i, FunVal,FunVal-OptimalValueByNesterov, time);
checkCUDAError("kernel execution Paralel MERSEN NRCDM CUDA");
// checkCUDAError("kernel execution");
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
NRCDM_SR_TIMING(A_h, R_Idx_h, C_Idx_h, C_Count_h, n, m, nnz, b, x,
lambda, Li, SerialExecution, value, 1, residuals);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timeSerial, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
FunVal = ComputeObjectiveValue(A_h, R_Idx_h, C_Idx_h, C_Count_h, n, m,
nnz, b, x, lambda);
printf("CPUD:Objective,itr,%d,value,%1.16f,value,%1.16f,time,%f\n", i, FunVal,FunVal-OptimalValueByNesterov,timeSerial);
fprintf(fp, "CPUD:Objective,itr,%d,value,%1.16f,value,%1.16f,time,%f\n", i, FunVal,FunVal-OptimalValueByNesterov, timeSerial);
checkCUDAError("kernel execution");
printf("SERIAL NRCDM CUDA TIMMER: %f ms, speed up:%f, paralel: %f\n", timeSerial, timeSerial / time, time);
fprintf(fp,
"SERIAL NRCDM CUDA TIMMER: %f ms, speed up:%f, paralel: %f\n",
timeSerial, timeSerial / time, time);
}
//------------------------- LOAD RESULTS FROM CUDA AND COMPARE
cudaThreadSynchronize();
checkCUDAError("cudaMemcpy");
for (i = 0; i < 20; i++) {
printf("x_h[%d]=%f, x_d[%d]=%f\n", i, x[i], i, xcuda[i]);
}
float normErrorSquered = 0;
float normX = 0;
for (i = 0; i < n; i++) {
normErrorSquered += (x[i] - xcuda[i]) * (x[i] - xcuda[i]);
normX += x[i] * x[i];
}
for (i = 0; i < n; i++) {
if (xcuda[i] * xcuda[i] < normX) {
;
} else {
// printf("x_h[%d]=%f, x_d[%d]=%f\n", i, x[i], i, xcuda[i]);
}
}
printf("normaChyby=%f; norm(x)=%f, n=%d, m=%d\n", normErrorSquered,
normX, n, m);
float minE = 100000000;
float maxE = -100000000;
int nnzCuda = 0;
for (i = 0; i < n; i++) {
if (xcuda[i] == 0)
nnzCuda++;
if (xcuda[i] < minE)
minE = xcuda[i];
if (xcuda[i] > maxE)
maxE = xcuda[i];
}
printf("minE=%f, maxE=%f\n", minE, maxE);
printf("Zeros in cuda %d \n", nnzCuda);
//------------------------- CLEAN UP
printf("Idem uvolnovat\n");
printf("Paralel MERSEN NRCDM CUDA TIMMER: %f ms\n", time);
printf("SERIAL NRCDM CUDA TIMMER: %f ms, speed up:%f\n", timeSerial
* NMAXITER / SerialExecution, timeSerial / time * NMAXITER
/ SerialExecution);
printf("m:%d, n:%d, p:%d\n", m, n, COLUMNLENGTH);
printf("optimal value 2:%1.16f\n",OptimalValueByNesterov);
fprintf(fp,"optimal value 2:%1.16f\n",OptimalValueByNesterov);
cudaFree(A_d);
cudaFree(Li_d);
cudaFree(R_Idx_d);
cudaFree(C_Idx_d);
cudaFree(C_Count_d);
cudaFree(n_d);
cudaFree(nnz_d);
cudaFree(m_d);
cudaFree(x_d);
cudaFree(lambda_d);
cudaFree(residuals_d);
fclose(fp);
}
void cudaEDDIESolver() {
FILE *fp;
fp = fopen("/exports/home/s1052689/cudaLogs.txt", "w");
// fp = fopen("/tmp/taki_cuda.txt", "w");
clock_t t1, t2;
int NMax_d_h[1];
int pMin = 10;
int pMax = 10;
float lambda = 1;
float diff;
float* A_h;
int * C_Idx_h;
int * R_Idx_h;
int * C_Count_h;
int nnz, i, j;
cudaEvent_t start, stop;
float time, timeSerial;
printf("CudaSolver\n");
int NMax = 1;
curandState *devStates;
int totalThreads = 128;
int dim1 = 300;
int dim2 = NMax;
int n = totalThreads * dim1;
int m = n * 2;
NMax_d_h[0] = 1;
printf("CudaSolver2\n");
printf("idem alokovat random states");
dim3 dimGrid( dim1, dim2);
dim3 dimBlock( totalThreads);
t1 = clock();
cudaMalloc((void **) &devStates, dim1 * dim2 * totalThreads
* sizeof(curandState));
checkCUDAError("Alloc error");
/* Setup prng states */
setup_kernel<<< dimGrid, dimBlock >>>(devStates);
// myDoNothingKernel<<< 13, 1 >>>();
cudaThreadSynchronize();
checkCUDAError("Inicializacia ranom states");
t2 = clock();
diff = ((float) t2 - (float) t1) / 1000000.0F;
printf("Inicializacia ranom states: %f\n", diff);
printf("Idem generaovt random problem");
/* Allocate space for prng states on device */
t1 = clock();
generateRandomProblem(&A_h, &R_Idx_h, &C_Idx_h, &C_Count_h, n, m, pMin,
pMax - pMin, &nnz);
t2 = clock();
diff = ((float) t2 - (float) t1) / 1000000.0F;
printf("Random problem generation: %f\n", diff);
printf("Random problem generated\n");
float* L;
float* Li;
computeLipsitzConstantsForSparseRegression(&L, &Li, A_h, R_Idx_h, C_Idx_h,
C_Count_h, n, nnz);
printf("Lipshitz constants computed\n");
// printMatrixA(A_h, R_Idx_h, C_Idx_h, C_Count_h, n, nnz);
// print_float_array(&L[0], n);
float b[m];
float xOpt[n];
float x[n];
for (j = 0; j < n; j++) {
xOpt[j] = 2 * ((float) rand() / RAND_MAX) - 1;
}
printf("Optimal solution generated\n");
// set b = A*x
matrix_vector_product(A_h, R_Idx_h, C_Idx_h, C_Count_h, n, m, nnz, xOpt, b,
1);
printf("matrix vector product multiplied\n");
// print_float_array(&xOpt[0], n);
// print_float_array(&b[0], m);
float value = 0;
for (i = 0; i < n; i++)
x[i] = 0;
printf("Idem inicializovat reziduals\n");
float residuals[m];
for (j = 0; j < m; j++)
residuals[j] = -b[j];
for (i = 0; i < n; i++) {
for (j = 0; j < C_Count_h[i]; j++) {
residuals[R_Idx_h[C_Idx_h[i] + j]] += A_h[C_Idx_h[i] + j] * x[i];
}
}
float* A_d;
float* Li_d;
int * C_Idx_d;
int * R_Idx_d;
int * C_Count_d;
int *n_d;
int *nnz_d;
int *m_d;
float* x_d;
float* b_d;
float* lambda_d;
float * residuals_d;
int * NMax_d;
// size_t memSize = ;
t1 = clock();
printf("Idem alokovat data na device\n");
cudaMalloc((void**) &A_d, nnz * sizeof(float));
cudaMalloc((void**) &R_Idx_d, nnz * sizeof(int));
cudaMalloc((void**) &C_Idx_d, n * sizeof(int));
cudaMalloc((void**) &Li_d, n * sizeof(float));
cudaMalloc((void**) &C_Count_d, n * sizeof(int));
cudaMalloc((void**) &n_d, 1 * sizeof(int));
cudaMalloc((void**) &nnz_d, 1 * sizeof(int));
cudaMalloc((void**) &m_d, 1 * sizeof(int));
cudaMalloc((void**) &x_d, n * sizeof(float));
cudaMalloc((void**) &b_d, m * sizeof(float));
cudaMalloc((void**) &residuals_d, m * sizeof(float));
cudaMalloc((void**) &lambda_d, 1 * sizeof(float));
cudaMalloc((void**) &NMax_d, 1 * sizeof(int));
cudaThreadSynchronize();
t2 = clock();
diff = ((float) t2 - (float) t1) / 1000000.0F;
printf("Data alocated %d, %d from host -> device:%f\n", nnz, n, diff);
// Part 2 of 5: host to device memory copy
t1 = clock();
cudaMemcpy(A_d, A_h, nnz * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(C_Count_d, C_Count_h, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(R_Idx_d, R_Idx_h, nnz * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(Li_d, Li, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(C_Idx_d, C_Idx_h, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(n_d, &n, 1 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(nnz_d, &nnz, 1 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(m_d, &m, 1 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(NMax_d, &NMax_d_h, 1 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(x_d, x, n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, m * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(residuals_d, residuals, m * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(lambda_d, &lambda, 1 * sizeof(float), cudaMemcpyHostToDevice);
cudaThreadSynchronize();
t2 = clock();
diff = ((float) t2 - (float) t1) / 1000000.0F;
printf("Copy data %d, %d from host -> device:%f\n", nnz, n, diff);
printf("Idem spustat paralelny RCDM\n");
cudaThreadSynchronize();
t1 = clock();
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
RCDMKernel<<< dimGrid, dimBlock >>>(A_d, R_Idx_d, C_Idx_d, C_Count_d,
n_d, m_d, nnz_d, b_d, residuals_d,x_d, lambda_d, Li_d, NMax_d,devStates);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
checkCUDAError("kernel execution");
printf("Paralel NRCDM CUDA TIMMER: %f ms\n", time);
// checkCUDAError("kernel execution");
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
NRCDM_SR(A_h, R_Idx_h, C_Idx_h, C_Count_h, n, m, nnz, b, x, lambda, Li, 50,
value, 1);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timeSerial, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
checkCUDAError("kernel execution");
printf("SERIAL NRCDM CUDA TIMMER: %f ms, speed up:%f\n", timeSerial,
timeSerial / time);
//------------------------- LOAD RESULTS FROM CUDA AND COMPARE
float* xcuda;
xcuda = (float*) malloc(n * sizeof(float));
cudaMemcpy(xcuda, x_d, n * sizeof(float), cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy");
cudaThreadSynchronize();
checkCUDAError("cudaMemcpy");
for (i = 0; i < 2; i++) {
printf("x_h[%d]=%f, x_d[%d]=%f\n", i, x[i], i, xcuda[i]);
}
//------------------------- CLEAN UP
printf("Idem uvolnovat\n");
cudaFree(A_d);
cudaFree(Li_d);
cudaFree(R_Idx_d);
cudaFree(C_Idx_d);
cudaFree(C_Count_d);
cudaFree(n_d);
cudaFree(nnz_d);
cudaFree(m_d);
cudaFree(x_d);
cudaFree(b_d);
cudaFree(lambda_d);
cudaFree(residuals_d);
cudaFree(devStates);
fclose(fp);
}
void generator() {
// size_t n = 100;
// size_t i;
// curandGenerator_t gen;
// float *devData, *hostData;
// /* Allocate n floats on host */
// hostData = (float *) calloc(n, sizeof(float));
// /* Allocate n floats on device */
// cudaMalloc((void **) &devData, n * sizeof(float));
// /* Create pseudo-random number generator */
// curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
// /* Set seed */
// curandSetPseudoRandomGeneratorSeed(gen, 1234ULL);
// /* Generate n floats on device */
// curandGenerateUniform(gen, devData, n);
// /* Copy device memory to host */
// cudaMemcpy(hostData, devData, n * sizeof(float), cudaMemcpyDeviceToHost);
// /* Show result */
// for (i = 0; i < n; i++) {
// printf("%1.4f ", hostData[i]);
// }
// printf("\n");
}
__global__ void myFirstIntKernel(int *d_a) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
d_a[idx] = 1000 * blockIdx.x + threadIdx.x;
}
void eddietest() {
// pointer for host memory
int *h_a;
// pointer for device memory
int *d_a;
// define grid and block size
int numBlocks = 8;
int numThreadsPerBlock = 8;
// Part 1 of 5: allocate host and device memory
size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int);
h_a = (int *) malloc(memSize);
cudaMalloc((void **) &d_a, memSize);
// Part 2 of 5: launch kernel
dim3 dimGrid( numBlocks);
dim3 dimBlock( numThreadsPerBlock);
myFirstIntKernel<<< dimGrid, dimBlock >>>( d_a );
// block until the device has completed
cudaThreadSynchronize();
// check if kernel execution generated an error
checkCUDAError("kernel execution");
// Part 4 of 5: device to host copy
cudaMemcpy(h_a, d_a, memSize, cudaMemcpyDeviceToHost);
// Check for any CUDA errors
checkCUDAError("cudaMemcpy");
// Part 5 of 5: verify the data returned to the host is correct
for (int i = 0; i < numBlocks; i++) {
for (int j = 0; j < numThreadsPerBlock; j++) {
}
}
// free device memory
cudaFree(d_a);
// free host memory
free(h_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
printf("Correct!\n");
}
int main(void) {
srand(1);
printf("start solving\n");
cudaMersenSolverEvolution();
}
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(-1);
}
}
void cudaSolverOLD() {
FILE *fp;
// fp = fopen("/exports/home/s1052689/nesterov2.txt", "w");
fp = fopen("/tmp/taki_cuda.txt", "w");
clock_t t1, t2;
int NMax_d_h[1];
int pMin = 10;
int pMax = 10;
float lambda = 1;
float diff;
float* A_h;
int * C_Idx_h;
int * R_Idx_h;
int * C_Count_h;
int nnz, i, j;
printf("CudaSolver\n");
int NMax = 1;
curandState *devStates;
int totalThreads = 512;
int dim1 = 1;
int dim2 = NMax;
int n = totalThreads * dim1;
int m = n * 2;
NMax_d_h[0] = 1;
printf("CudaSolver2\n");
/* Allocate space for prng states on device */
printf("idem alokovat random states");
dim3 dimGrid( dim1, dim2);
dim3 dimBlock( totalThreads);
t1 = clock();
cudaMalloc((void **) &devStates, dim1 * dim2 * totalThreads
* sizeof(curandState));
checkCUDAError("Alloc error");
/* Setup prng states */
setup_kernel<<< dimGrid, dimBlock >>>(devStates);
cudaThreadSynchronize();
checkCUDAError("Inicializacia ranom states");
t2 = clock();
diff = ((float) t2 - (float) t1) / 1000000.0F;
printf("Inicializacia ranom states: %f\n", diff);
printf("Idem generaovt random problem");
t1 = clock();
generateRandomProblem(&A_h, &R_Idx_h, &C_Idx_h, &C_Count_h, n, m, pMin,
pMax - pMin, &nnz);
t2 = clock();
diff = ((float) t2 - (float) t1) / 1000000.0F;
printf("Random problem generation: %f\n", diff);
printf("Random problem generated\n");
float* L;
float* Li;
computeLipsitzConstantsForSparseRegression(&L, &Li, A_h, R_Idx_h, C_Idx_h,
C_Count_h, n, nnz);
printf("Lipshitz constants computed\n");
// printMatrixA(A_h, R_Idx_h, C_Idx_h, C_Count_h, n, nnz);
// print_float_array(&L[0], n);
float b[m];
float xOpt[n];
float x[n];
for (j = 0; j < n; j++) {
xOpt[j] = 2 * ((float) rand() / RAND_MAX) - 1;
}
printf("Optimal solution generated\n");
// set b = A*x
matrix_vector_product(A_h, R_Idx_h, C_Idx_h, C_Count_h, n, m, nnz, xOpt, b,
1);
printf("matrix vector product multiplied\n");
// print_float_array(&xOpt[0], n);
// print_float_array(&b[0], m);
float value = 0;
for (i = 0; i < n; i++)
x[i] = 0;
printf("Idem inicializovat reziduals\n");
float residuals[m];
for (j = 0; j < m; j++)
residuals[j] = -b[j];
for (i = 0; i < n; i++) {
for (j = 0; j < C_Count_h[i]; j++) {
residuals[R_Idx_h[C_Idx_h[i] + j]] += A_h[C_Idx_h[i] + j] * x[i];
}
}
float* A_d;
float* Li_d;
int * C_Idx_d;
int * R_Idx_d;
int * C_Count_d;
int *n_d;
int *nnz_d;
int *m_d;
float* x_d;
float* b_d;
float* lambda_d;
float * residuals_d;
int * NMax_d;
// size_t memSize = ;
t1 = clock();
printf("Idem alokovat data na device\n");
cudaMalloc((void**) &A_d, nnz * sizeof(float));
cudaMalloc((void**) &R_Idx_d, nnz * sizeof(int));
cudaMalloc((void**) &C_Idx_d, n * sizeof(int));
cudaMalloc((void**) &Li_d, n * sizeof(float));
cudaMalloc((void**) &C_Count_d, n * sizeof(int));
cudaMalloc((void**) &n_d, 1 * sizeof(int));
cudaMalloc((void**) &nnz_d, 1 * sizeof(int));
cudaMalloc((void**) &m_d, 1 * sizeof(int));
cudaMalloc((void**) &x_d, n * sizeof(float));
cudaMalloc((void**) &b_d, m * sizeof(float));
cudaMalloc((void**) &residuals_d, m * sizeof(float));
cudaMalloc((void**) &lambda_d, 1 * sizeof(float));
cudaMalloc((void**) &NMax_d, 1 * sizeof(int));
cudaThreadSynchronize();
t2 = clock();
diff = ((float) t2 - (float) t1) / 1000000.0F;
printf("Data alocated %d, %d from host -> device:%f\n", nnz, n, diff);
// Part 2 of 5: host to device memory copy
t1 = clock();
cudaMemcpy(A_d, A_h, nnz * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(C_Count_d, C_Count_h, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(R_Idx_d, R_Idx_h, nnz * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(Li_d, Li, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(C_Idx_d, C_Idx_h, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(n_d, &n, 1 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(nnz_d, &nnz, 1 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(m_d, &m, 1 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(NMax_d, &NMax_d_h, 1 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(x_d, x, n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, m * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(residuals_d, residuals, m * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(lambda_d, &lambda, 1 * sizeof(float), cudaMemcpyHostToDevice);
cudaThreadSynchronize();
t2 = clock();
diff = ((float) t2 - (float) t1) / 1000000.0F;
printf("Copy data %d, %d from host -> device:%f\n", nnz, n, diff);
printf("Idem spustat paralelny RCDM\n");
cudaThreadSynchronize();
t1 = clock();
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
RCDMKernel<<< dimGrid, dimBlock >>>(A_d, R_Idx_d, C_Idx_d, C_Count_d,
n_d, m_d, nnz_d, b_d, residuals_d,x_d, lambda_d, Li_d, NMax_d,devStates);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
checkCUDAError("kernel execution");
printf("Paralel NRCDM CUDA TIMMER: %f\n", time);
// checkCUDAError("kernel execution");
t2 = clock();
diff = ((float) t2 - (float) t1) / 1000000.0F;
printf("Paralel NRCDM + Synchronize: %f\n", diff);
fprintf(fp, "Paralel NRCDM + Synchronize: %f\n", diff);
// myFirstfloatKernel<<< dimGrid, dimBlock >>>( x_d );
// myFirstKernel<<< dimGrid, dimBlock >>>(x_d);
// cudaThreadSynchronize();
t1 = clock();
NRCDM_SR(A_h, R_Idx_h, C_Idx_h, C_Count_h, n, m, nnz, b, x, lambda, Li,
NMax, value, 1);
t2 = clock();
diff = ((float) t2 - (float) t1) / 1000000.0F;
printf("NRCDM: %f\n", nnz, n, diff);
fprintf(fp, "NRCDM: %f\n", nnz, n, diff);
//------------------------- LOAD RESULTS FROM CUDA AND COMPARE
float* xcuda;
xcuda = (float*) malloc(n * sizeof(float));
cudaMemcpy(xcuda, x_d, n * sizeof(float), cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy");
cudaThreadSynchronize();
checkCUDAError("cudaMemcpy");
for (i = 0; i < 2; i++) {
printf("x_h[%d]=%f, x_d[%d]=%f\n", i, x[i], i, xcuda[i]);
}
//------------------------- CLEAN UP
printf("Idem uvolnovat\n");
cudaFree(A_d);
cudaFree(Li_d);
cudaFree(R_Idx_d);
cudaFree(C_Idx_d);
cudaFree(C_Count_d);
cudaFree(n_d);
cudaFree(nnz_d);
cudaFree(m_d);
cudaFree(x_d);
cudaFree(b_d);
cudaFree(lambda_d);
cudaFree(residuals_d);
cudaFree(devStates);
fclose(fp);
}
|
14,640 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string.h>
int getSPcores(cudaDeviceProp devProp);
int main() {
int numDevices;
cudaGetDeviceCount(&numDevices);
printf("Number of GPU Devices: %d\n", numDevices); //Printing number of devices
for (int i = 0; i < numDevices; i++) { //Printing information on each device
cudaDeviceProp dp;
cudaGetDeviceProperties(&dp, i); //Get properties
printf("\nGPU Number: %d\n", i);
printf("GPU Name: %s\n", dp.name); //Print gpu name
printf("Clock Rate: %d kHz\n", dp.clockRate); //Print clock rate
printf("Number of Streaming Multiprocessors: %d\n", dp.multiProcessorCount); //Print number of SM
printf("Number of Cores: %d\n", getSPcores(dp)); //Print number of cores
printf("Warp Size: %d\n", dp.warpSize); //Print warp size
printf("Global Memory: %zuB\n", dp.totalGlobalMem); //Print total global memory
printf("Constant Memory: %zuB\n", dp.totalConstMem); //Print total constant memory
printf("Shared Memory Per Block: %zuB\n", dp.sharedMemPerBlock); //Print shared memory per block
printf("Number of Registers Available Per Block: %d\n", dp.regsPerBlock); //Print number of registers available per block
printf("Maximum Number of Threads Per Block: %d\n", dp.maxThreadsPerBlock); //Print max number of threads per block
printf("Maximum Size of Each Dimension of a Block: \n");
printf("\tX: %d, Y: %d, Z: %d\n", dp.maxThreadsDim[0], dp.maxThreadsDim[1], dp.maxThreadsDim[2]);
printf("Maximum Size of Each Dimension of a Grid: \n");
printf("\tX: %d, Y: %d, Z: %d\n", dp.maxGridSize[0], dp.maxGridSize[1], dp.maxGridSize[2]);
}
getc(stdin);
return 0;
}
//Obtained from https://stackoverflow.com/questions/32530604/how-can-i-get-number-of-cores-in-cuda-device
int getSPcores(cudaDeviceProp devProp)
{
int cores = 0;
int mp = devProp.multiProcessorCount;
switch (devProp.major) {
case 2: // Fermi
if (devProp.minor == 1) cores = mp * 48;
else cores = mp * 32;
break;
case 3: // Kepler
cores = mp * 192;
break;
case 5: // Maxwell
cores = mp * 128;
break;
case 6: // Pascal
if ((devProp.minor == 1) || (devProp.minor == 2)) cores = mp * 128;
else if (devProp.minor == 0) cores = mp * 64;
else printf("Unknown device type\n");
break;
case 7: // Volta and Turing
if ((devProp.minor == 0) || (devProp.minor == 5)) cores = mp * 64;
else printf("Unknown device type\n");
break;
default:
printf("Unknown device type\n");
break;
}
return cores;
}
|
14,641 | #include "includes.h"
__device__ void swap(float& a, float& b)
{
float temp = a;
a = b;
b = temp;
}
__global__ void batcherBitonicMergesort64(float * d_out, const float * d_in)
{
// you are guaranteed this is called with <<<1, 64, 64*4>>>
extern __shared__ float sdata[];
int tid = threadIdx.x;
sdata[tid] = d_in[tid];
__syncthreads();
for (int stage = 0; stage <= 5; stage++)
{
for (int substage = stage; substage >= 0; substage--)
{
int distance = 1 << substage; // Distance to value to be compared
int comparison = tid - distance; // Value to be compared
int div = 1 << (stage + 1);
// Skip values that should not be compared
if (comparison < 0 || (comparison / div) != (tid / div)) {
continue;
}
bool up = (comparison / div) % 2 == 1;
if (up) {
if (sdata[tid] > sdata[comparison]) {
swap(sdata[tid], sdata[comparison]);
}
} else {
if (sdata[tid] < sdata[comparison]) {
swap(sdata[tid], sdata[comparison]);
}
}
}
__syncthreads();
}
d_out[tid] = sdata[tid];
} |
14,642 | #include "includes.h"
__global__ void binaryCrossEntropyCost(float* predictions, float* target, int size, float* cost) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
float partial_cost = target[index] * logf(predictions[index])
+ (1.0f - target[index]) * logf(1.0f - predictions[index]);
atomicAdd(cost, - partial_cost / size);
}
} |
14,643 | #include <iostream>
inline void checkCuda(cudaError_t result, const char *file, const int line) {
if (result != cudaSuccess) {
std::cerr << file << "@" << line << ": CUDA Runtime Error: " << cudaGetErrorString(result) << std::endl;
exit(-1);
}
}
#define CUDA_RUNTIME(stmt) checkCuda(stmt, __FILE__, __LINE__);
template <typename T>
__global__ void setter_kernel(T *x, const size_t n) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += gridDim.x * blockDim.x) {
x[i] = 10;
}
}
int main(void) {
std::cout << "enter main\n";
cudaDeviceProp prop;
CUDA_RUNTIME(cudaGetDeviceProperties(&prop, 0))
std::cout << prop.name << "\n";
float *x;
CUDA_RUNTIME(cudaMallocManaged(&x, sizeof(*x) * 10));
setter_kernel<<<10, 512>>>(x, 10);
CUDA_RUNTIME(cudaDeviceSynchronize());
if (x[0] != 10) {
std::cerr << "setter kernel failed\n";
return 1;
}
CUDA_RUNTIME(cudaFree(x));
std::cerr << "tests passed\n";
return 0;
} |
14,644 | #include <assert.h>
#include <stdio.h>
__global__ void kernel() {
// Our reference output contains the line number of this assert() call; be
// careful when modifying the parts of this file above this line.
assert(false);
}
int main() {
kernel<<<1,1>>>();
cudaError_t err = cudaDeviceSynchronize();
if (err != cudaErrorAssert)
return err;
return 0;
}
|
14,645 | #include <stdio.h>
#include <cuda.h>
#define N 10000000
#define BLOCK_SIZE 256
// __global__ indicates that function runs on GPU.
__global__ void vector_add(float *a, float *b, float *out, int n) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
out[i] = a[i] + b[i];
}
}
int main() {
// declaring memory in host
// h_a is a convention for host variable
float *h_a, *b, *out;
// declaring memory in device
// d_a is the convention for device variable
float *d_a;
// allocating memory on host
h_a = (float*)malloc(sizeof(float) * N);
b = (float*)malloc(sizeof(float) * N);
out = (float*)malloc(sizeof(float) * N);
// initializing a and v
for(int i=0; i<N; i++) {
h_a[i] = 0.1f;
b[i] = 0.2f;
}
cudaMalloc((void**)&d_a, sizeof(float) * N);
cudaMemcpy(d_a, h_a, sizeof(float) * N, cudaMemcpyHostToDevice);
int BLOCKS_NUM = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
vector_add<<<BLOCKS_NUM, BLOCK_SIZE>>>(h_a, b, out, N);
// deallocating memory from device
cudaFree(d_a);
// deallocating memory from host
free(h_a);
free(b);
free(out);
return 0;
} |
14,646 | //并行版本输入多维
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#define DIMENSION 2
#define INPUT_NEURAL_NUM DIMENSION+1
#define HIDDEN_NEURAL_NUM 5
#define OUTPUT_NEURAL_NUM DIMENSION
#define NEURAL_NUM HIDDEN_NEURAL_NUM + INPUT_NEURAL_NUM + OUTPUT_NEURAL_NUM
#define EPOCH 200
#define THREAD_EPOCH 200
#define STEPS 10
#define MUTATION_P 0.03
#define CROSS_P 0.7
#define PER_BENCHMARK_TIMES 3
#define BENCHMARK_NUM 10
#define NEURAL_NETWORK_NUM (DIMENSION*PER_BENCHMARK_TIMES*BENCHMARK_NUM) //每一个染色体上进行的神经网络
#define PI 3.1415926535897932384626433832795029
#define E 2.7182818284590452353602874713526625
#define SINGLE_WEIGHT (NEURAL_NUM)*(NEURAL_NUM - 1)
#define MAX 100
#define MIN -100
#define DEVICE_NUM 1
#define BLOCK 8*4
#define THREAD 192*4
#define NUM_CHROMO (BLOCK*THREAD/(BENCHMARK_NUM*PER_BENCHMARK_TIMES))
#define G 0.002
#define W_MAX 0.01
#define W_MIN -0.01
#define DELTA_X_MAX 0.1
#define DELTA_X_MIN -0.1
struct NN
{
float w[SINGLE_WEIGHT];
float input_w[INPUT_NEURAL_NUM]; ////
float old_neural[NEURAL_NUM];
float new_neural[NEURAL_NUM];
};
struct CHROMO
{
NN nn_initial;
NN nn[PER_BENCHMARK_TIMES*BENCHMARK_NUM];
float a[BENCHMARK_NUM*PER_BENCHMARK_TIMES], b[BENCHMARK_NUM*PER_BENCHMARK_TIMES], c[BENCHMARK_NUM*PER_BENCHMARK_TIMES], d[BENCHMARK_NUM*PER_BENCHMARK_TIMES];
float A, B, C, D;
float initial_position[BENCHMARK_NUM*PER_BENCHMARK_TIMES][DIMENSION];
float position[BENCHMARK_NUM*PER_BENCHMARK_TIMES][DIMENSION];
};
struct IN_FITNESS
{
float f[THREAD_EPOCH];
};
struct IN_POSITION
{
float p[THREAD_EPOCH][DIMENSION];
};
void Sort(float *arr, int length, int *index)
{
int i, j, tin;
tin = 0;
float temp = 0.0;
for (i = 0; i < length - 1; i++)
{
for (j = 0; j < length - i - 1; j++)
{
if (arr[j] > arr[j + 1])
{
temp = arr[j];
arr[j] = arr[j + 1];
arr[j + 1] = temp;
tin = index[j];
index[j] = index[j + 1];
index[j + 1] = tin;
}
}
}
}
//锦标赛选择
void selection_tournment(struct CHROMO ch[NUM_CHROMO], float GA_Fitness[NUM_CHROMO], int g)
{
int m, i, j, l, n, k, maxindex;
m = NUM_CHROMO;
int sel_num, sel_init, sel_final;
sel_init = floor(NUM_CHROMO*0.15);
sel_final = floor(NUM_CHROMO*0.666);
sel_num = (int)(floor)(sel_final - sel_init) / (float)EPOCH*(g + 1) + sel_init;
int *select;
select = (int(*))malloc(sel_num * sizeof(int));
int mark2[NUM_CHROMO] = { 0 };//标记个体有没有被选中
int index[NUM_CHROMO] = { 0 };//
float min;
for (i = 0; i < m; i++) // m = CHROMOSOME_NUM
{
for (j = 0; j < sel_num; j++)
{
int r2 = rand() % m + 1; //1-m之间哪个个体(整数)
while (mark2[r2 - 1] == 1)
{
r2 = rand() % m + 1;
}
mark2[r2 - 1] = 1;
select[j] = r2 - 1;
}
min = GA_Fitness[select[0]];
maxindex = select[0];
for (k = 1; k < sel_num; k++)
{
if (GA_Fitness[select[k]] < min)
{
min = GA_Fitness[select[k]];
maxindex = select[k];
}
}
index[i] = maxindex;
for (n = 0; n < NUM_CHROMO; n++)
{
mark2[n] = 0;
}
for (n = 0; n < sel_num; n++)
{
select[n] = 0;
}
for (l = 0; l < SINGLE_WEIGHT; l++)
{
ch[i].nn_initial.w[l] = ch[index[i]].nn_initial.w[l];
}
for (l = 0; l < INPUT_NEURAL_NUM; l++)
{
ch[i].nn_initial.input_w[l] = ch[index[i]].nn_initial.input_w[l];
}
ch[i].A = ch[index[i]].A;
ch[i].B = ch[index[i]].B;
ch[i].C = ch[index[i]].C;
ch[i].D = ch[index[i]].D;
}
free(select);
}
//交叉
void crossover(struct CHROMO ch[NUM_CHROMO])
{
//const double a = 0.0;
//const double b = 1.0;
time_t tttt;
srand((unsigned int)time(&tttt));
int two;
int one;
int first = 0;
float r, r2;
//int point;
float t;
int i;
for (two = 0; two < NUM_CHROMO; two++)
{
r = (float)rand() / RAND_MAX;
if (r < CROSS_P)
{
++first;
if (first % 2 == 0)//交叉
{
//point = rand() % TOTAL_WEIGHT + 1; //随机选择交叉点
for (i = 0; i < SINGLE_WEIGHT; i++)
{
r2 = (float)rand() / (RAND_MAX + 1.0);
if (r2 < 0.5)
{
t = ch[one].nn_initial.w[i];
ch[one].nn_initial.w[i] = ch[two].nn_initial.w[i];
ch[two].nn_initial.w[i] = t;
}
}
for (i = 0; i < INPUT_NEURAL_NUM; i++)
{
r2 = (float)rand() / (RAND_MAX + 1.0);
if (r2 < 0.5)
{
t = ch[one].nn_initial.input_w[i];
ch[one].nn_initial.input_w[i] = ch[two].nn_initial.input_w[i];
ch[two].nn_initial.input_w[i] = t;
}
}
r2 = (float)rand() / (RAND_MAX + 1.0);
if (r2 < 0.5)
{
t = ch[one].A;
ch[one].A = ch[two].A;
ch[two].A = t;
}
r2 = (float)rand() / (RAND_MAX + 1.0);
if (r2 < 0.5)
{
t = ch[one].B;
ch[one].B = ch[two].B;
ch[two].B = t;
}
r2 = (float)rand() / (RAND_MAX + 1.0);
if (r2 < 0.5)
{
t = ch[one].C;
ch[one].C = ch[two].C;
ch[two].C = t;
}
r2 = (float)rand() / (RAND_MAX + 1.0);
if (r2 < 0.5)
{
t = ch[one].D;
ch[one].D = ch[two].D;
ch[two].D = t;
}
}
else
{
one = two;
}
}
}
}
//变异
void mutation(struct CHROMO ch[NUM_CHROMO])
{
float r;
time_t tttt;
srand((unsigned int)time(&tttt));
int i, j;
for (i = 0; i < NUM_CHROMO; i++)
{
for (j = 0; j < INPUT_NEURAL_NUM; j++)
{
r = (float)rand() / (RAND_MAX + 1.0);
if (r < MUTATION_P)
{
ch[i].nn_initial.input_w[j] = (float)rand() / (RAND_MAX + 1.0) * (W_MAX - W_MIN) - W_MAX;
}
}
for (j = 0; j < SINGLE_WEIGHT; j++)
{
r = (float)rand() / (RAND_MAX + 1.0);
if (r < MUTATION_P)
{
ch[i].nn_initial.w[j] = (float)rand() / (RAND_MAX + 1.0) * (W_MAX - W_MIN) - W_MAX;
}
}
r = (float)rand() / (RAND_MAX + 1.0);
/*
if (r < MUTATION_P)
{
ch[i].A = (float)rand() / RAND_MAX * 2.0 - 1.0;
}
r = (float)rand() / (RAND_MAX + 1.0);
if (r < MUTATION_P)
{
ch[i].B = (float)rand() / RAND_MAX * 2.0 - 1.0;
}
r = (float)rand() / (RAND_MAX + 1.0);
if (r < MUTATION_P)
{
ch[i].C = (float)rand() / RAND_MAX * 2.0 - 1.0;
}
r = (float)rand() / (RAND_MAX + 1.0);
if (r < MUTATION_P)
{
ch[i].D = (float)rand() / RAND_MAX * 2.0 - 1.0;
}
*/
if (r < MUTATION_P)
{
ch[i].A = (float)rand() / RAND_MAX * (W_MAX - W_MIN) - W_MAX;
ch[i].B = (float)rand() / RAND_MAX * (W_MAX - W_MIN) - W_MAX;
ch[i].C = (float)rand() / RAND_MAX * (W_MAX - W_MIN) - W_MAX;
ch[i].D = (float)rand() / RAND_MAX * (W_MAX - W_MIN) - W_MAX;
}
}
}
void initial_chromo(struct CHROMO chromo[NUM_CHROMO])
{
time_t ti;
srand((unsigned int)time(&ti));
for (int i = 0; i < NUM_CHROMO; i++)
{
for (int j = 0; j < INPUT_NEURAL_NUM; j++)
{
chromo[i].nn_initial.input_w[j] = (float)rand() / RAND_MAX *(W_MAX - W_MIN) - W_MAX;
}
for (int j = 0; j < SINGLE_WEIGHT; j++)
{
chromo[i].nn_initial.w[j] = (float)rand() / RAND_MAX * (W_MAX - W_MIN) - W_MAX;
}
chromo[i].A = (float)rand() / RAND_MAX * (W_MAX - W_MIN) - W_MAX;
chromo[i].B = (float)rand() / RAND_MAX * (W_MAX - W_MIN) - W_MAX;
chromo[i].C = (float)rand() / RAND_MAX * (W_MAX - W_MIN) - W_MAX;
chromo[i].D = (float)rand() / RAND_MAX * (W_MAX - W_MIN) - W_MAX;
for (int num_fun = 0; num_fun < BENCHMARK_NUM*PER_BENCHMARK_TIMES; num_fun++)
{
for (int j = 0; j < SINGLE_WEIGHT; j++)
{
chromo[i].nn[num_fun].w[j] = chromo[i].nn_initial.w[j];
}
for (int j = 0; j < INPUT_NEURAL_NUM; j++)
{
chromo[i].nn[num_fun].input_w[j] = chromo[i].nn_initial.input_w[j];
}
for (int j = 0; j < NEURAL_NUM; j++)
{
chromo[i].nn[num_fun].old_neural[j] = 0.5;
chromo[i].nn[num_fun].new_neural[j] = 0.0;
}
for (int d = 0; d < DIMENSION; d++)
{
chromo[i].initial_position[num_fun][d] = (float)rand() / RAND_MAX * (MAX - MIN) - MAX;
chromo[i].position[num_fun][d] = chromo[i].initial_position[num_fun][d];
}
chromo[i].a[num_fun] = chromo[i].A;
chromo[i].b[num_fun] = chromo[i].B;
chromo[i].c[num_fun] = chromo[i].C;
chromo[i].d[num_fun] = chromo[i].D;
}
}
}
void initial_chromo(struct CHROMO &chromo)
{
time_t tttt;
srand((unsigned int)time(&tttt));
int i = 0;
for (i = 0; i<INPUT_NEURAL_NUM; i++)
{
chromo.nn_initial.input_w[i] = (float)rand() / RAND_MAX * (W_MAX - W_MIN) - W_MAX;
}
for (i = 0; i<SINGLE_WEIGHT; i++)
{
chromo.nn_initial.w[i] = (float)rand() / RAND_MAX * (W_MAX - W_MIN) - W_MAX;
}
chromo.A = (float)rand() / RAND_MAX * (W_MAX - W_MIN) - W_MAX;
chromo.B = (float)rand() / RAND_MAX * (W_MAX - W_MIN) - W_MAX;
chromo.C = (float)rand() / RAND_MAX * (W_MAX - W_MIN) - W_MAX;
chromo.D = (float)rand() / RAND_MAX * (W_MAX - W_MIN) - W_MAX;
for (int num_fun = 0; num_fun < BENCHMARK_NUM*PER_BENCHMARK_TIMES; num_fun++)
{
for (i = 0; i < SINGLE_WEIGHT; i++)
{
chromo.nn[num_fun].w[i] = chromo.nn_initial.w[i];
}
for (i = 0; i < INPUT_NEURAL_NUM; i++)
{
chromo.nn[num_fun].input_w[i] = chromo.nn_initial.input_w[i];
}
for (i = 0; i < NEURAL_NUM; i++)
{
chromo.nn[num_fun].old_neural[i] = 0.5;
chromo.nn[num_fun].new_neural[i] = 0.0;
}
for (i = 0; i < DIMENSION; i++)
{
chromo.initial_position[num_fun][i] = (float)rand() / RAND_MAX * (MAX - MIN) - MAX;
chromo.position[num_fun][i] = chromo.initial_position[num_fun][i];
}
chromo.a[num_fun] = chromo.A;
chromo.b[num_fun] = chromo.B;
chromo.c[num_fun] = chromo.C;
chromo.d[num_fun] = chromo.D;
}
}
float my_function(struct CHROMO ch[NUM_CHROMO], float fitness[NUM_CHROMO*PER_BENCHMARK_TIMES*BENCHMARK_NUM], struct CHROMO &GA_best_ch, int g)
{
int temp_nn_index[NUM_CHROMO*PER_BENCHMARK_TIMES];//下标
float nn_fitness[BENCHMARK_NUM*PER_BENCHMARK_TIMES][NUM_CHROMO]; //实际通过cec计算得出的适应值
float nn_fitness_one_f[NUM_CHROMO];//所有染色体在同一个函数上的排序
int record_sort_index[NUM_CHROMO]; //1个函数下的NUM_CHROMO*PER_BENCHMARK_TIMES个排名名次
float record_nn_fitness_sort_index[BENCHMARK_NUM*PER_BENCHMARK_TIMES][NUM_CHROMO]; //10个函数下的CHROMOSOME_NUM个NN 名次
float fitness_ranking[BENCHMARK_NUM + 1][NUM_CHROMO];
float GA_fitness[NUM_CHROMO];
float min_fit;
FILE* f_temp_ch;
FILE* f_temp_result = fopen("temp_result.txt", "a");
//FILE* all_f_g = fopen("all_f_g.txt", "a");
FILE* one_g_chromo[NUM_CHROMO + 1];
char fileName[256];
for (int i = 0; i < NUM_CHROMO; i++)
{
sprintf(fileName, "NN/g%d/ch_%d.txt", g + 1, i);
one_g_chromo[i] = fopen(fileName, "w");
if (one_g_chromo[i] == NULL)
{
printf("\n Error: Cannot open ch_%d filoe \n",i);
}
for (int j = 0; j < INPUT_NEURAL_NUM; j++)
{
fprintf(one_g_chromo[i], "%f\n", ch[i].nn_initial.input_w[j]);
}
for (int j = 0; j < SINGLE_WEIGHT; j++)
{
fprintf(one_g_chromo[i], "%f\n", ch[i].nn_initial.w[j]);
}
fprintf(one_g_chromo[i], "A:%f\n B:%f\n C:%f\n D:%f\n", ch[i].A, ch[i].B, ch[i].C, ch[i].D);
fclose(one_g_chromo[i]);
}
for (int i = 0; i < BENCHMARK_NUM + 1; i++)
{
for (int j = 0; j < NUM_CHROMO; j++)
{
fitness_ranking[i][j] = 0.0;
}
}
for (int fun_num = 0; fun_num < BENCHMARK_NUM*PER_BENCHMARK_TIMES; fun_num++)
{
for (int i = 0; i < NUM_CHROMO; i++)
{
nn_fitness[fun_num][i] = 0.0;
}
}
//排100个,分别排3次的话
for (int fun_num = 0; fun_num < BENCHMARK_NUM*PER_BENCHMARK_TIMES; fun_num++)
{
for (int i = 0; i < NUM_CHROMO; i++)
{
nn_fitness[fun_num][i] = fitness[fun_num + i *BENCHMARK_NUM*PER_BENCHMARK_TIMES];
//printf("nn_fitness[%d][%d]=%f \t ",fun_num,i,nn_fitness[fun_num][i]);
}
}
//10个函数分别排3次
for (int fun_num = 0; fun_num < BENCHMARK_NUM*PER_BENCHMARK_TIMES; fun_num++)
{
for (int i = 0; i < NUM_CHROMO; i++)
{
nn_fitness_one_f[i] = nn_fitness[fun_num][i]; //NUM_CHROMO个染色体在一个函数上
temp_nn_index[i] = i;
}
Sort(nn_fitness_one_f, NUM_CHROMO, temp_nn_index);
for (int i = 0; i < NUM_CHROMO; i++)
{
record_sort_index[temp_nn_index[i]] = i + 1;
}
for (int i = 0; i < NUM_CHROMO; i++)
{
record_nn_fitness_sort_index[fun_num][i] = (float)record_sort_index[i]; //NUM_CHROMO * BENCHMARK_NUM*PER_BENCHMARK_TIMES个名次 //printf("sort_index[%d][%d]=%f \t ",fun_num,i,record_nn_fitness_sort_index[fun_num][i]);
}
}
for (int i = 0; i < NUM_CHROMO; i++)
{
for (int fun_num = 0; fun_num < BENCHMARK_NUM; fun_num++)
{
for (int k = 0; k < PER_BENCHMARK_TIMES; k++)
{
fitness_ranking[fun_num][i] += record_nn_fitness_sort_index[fun_num + BENCHMARK_NUM * k][i];
}
fitness_ranking[fun_num][i] /= PER_BENCHMARK_TIMES;
}
}
//平均排名
for (int i = 0; i < NUM_CHROMO; i++)
{
float sum = 0;
for (int j = 0; j < BENCHMARK_NUM; j++)
{
sum += fitness_ranking[j][i];
}
fitness_ranking[BENCHMARK_NUM][i] = sum / BENCHMARK_NUM;
GA_fitness[i] = sum / BENCHMARK_NUM;
printf("the %dth chromosome in all function's avg ranking is :%f \n", i + 1, GA_fitness[i]);
//fprintf(all_f_g, "g=%d,f[%d]=%f:\n", g + 1, i + 1, GA_fitness[i]);
}
sprintf(fileName, "NN/g%d/GA_fitness.txt", g + 1);
one_g_chromo[NUM_CHROMO] = fopen(fileName, "a");
if (one_g_chromo[NUM_CHROMO] == NULL)
{
printf("\n Error: Cannot open GA_fitness file \n");
}
for (int i = 0; i < NUM_CHROMO; i++)
{
fprintf(one_g_chromo[NUM_CHROMO], "%f\n", GA_fitness[i]);
}
fclose(one_g_chromo[NUM_CHROMO]);
min_fit = GA_fitness[0]; //当代 找最小排名及对应的网络
for (int j = 0; j < INPUT_NEURAL_NUM; j++)
{
GA_best_ch.nn_initial.input_w[j] = ch[0].nn_initial.input_w[j];
}
for (int j = 0; j < SINGLE_WEIGHT; j++)
{
GA_best_ch.nn_initial.w[j] = ch[0].nn_initial.w[j];
}
GA_best_ch.A = ch[0].A;
GA_best_ch.B = ch[0].B;
GA_best_ch.C = ch[0].C;
GA_best_ch.D = ch[0].D;
for (int i = 1; i < NUM_CHROMO; i++)
{
if (GA_fitness[i] < min_fit)
{
min_fit = GA_fitness[i];
for (int j = 0; j < INPUT_NEURAL_NUM; j++)
{
GA_best_ch.nn_initial.input_w[j] = ch[i].nn_initial.input_w[j];
}
for (int j = 0; j < SINGLE_WEIGHT; j++)
{
GA_best_ch.nn_initial.w[j] = ch[i].nn_initial.w[j];
}
GA_best_ch.A = ch[i].A;
GA_best_ch.B = ch[i].B;
GA_best_ch.C = ch[i].C;
GA_best_ch.D = ch[i].D;
}
}
sprintf(fileName, "NN/g%d/temp_bestch.txt", g + 1);
f_temp_ch = fopen(fileName, "w");
fprintf(f_temp_ch, "g=%d,the best chromosome:\n", g + 1);
for (int j = 0; j < INPUT_NEURAL_NUM; j++)
{
fprintf(f_temp_ch, "%f\n", GA_best_ch.nn_initial.input_w[j]);
}
for (int j = 0; j < SINGLE_WEIGHT; j++)
{
fprintf(f_temp_ch, "%f\n", GA_best_ch.nn_initial.w[j]);
}
fprintf(f_temp_ch, "A:%f\n B:%f\n C:%f\n D:%f\n", GA_best_ch.A, GA_best_ch.B, GA_best_ch.C, GA_best_ch.D);//当代最佳chromosome
printf("the generation %d min ranking is:%f \n", g + 1, min_fit);
fprintf(f_temp_result, "g=%d,the best:%f\n", g + 1, min_fit);//当代最佳适应值
selection_tournment(ch, GA_fitness, g);
crossover(ch);
mutation(ch);
for (int i = 0; i < NUM_CHROMO; i++)
{
for (int j = 0; j < (PER_BENCHMARK_TIMES*BENCHMARK_NUM); j++)
{
for (int d = 0; d < SINGLE_WEIGHT; d++)
{
ch[i].nn[j].w[d] = ch[i].nn_initial.w[d];
}
for (int k = 0; k < DIMENSION; k++)
{
ch[i].position[j][k] = ch[i].initial_position[j][k];
}
for (int tt = 0; tt < INPUT_NEURAL_NUM; tt++)
{
ch[i].nn[j].input_w[tt] = ch[i].nn_initial.input_w[tt];
}
for (int d = 0; d < NEURAL_NUM; d++)
{
ch[i].nn[j].old_neural[d] = 0.5;
}
ch[i].a[j] = ch[i].A;
ch[i].b[j] = ch[i].B;
ch[i].c[j] = ch[i].C;
ch[i].d[j] = ch[i].D;
}
}
fclose(f_temp_ch);
fclose(f_temp_result);
//fclose(all_f_g);
/*
for (int i = 0; i < NUM_CHROMO + 1; i++)
{
fclose(one_g_chromo[i]);
}
*/
return min_fit;
}
__device__ float hebb(float a, float b, float c, float d, float x, float y)
{
return G*(a*x + b * y + c * x*y + d);
}
__device__ float sigmoid(float x)
{
if (x < -80)
return 0;
else if (x > 80)
return 1;
else
return 1.0F / (1.0F + exp(-1 * x));
}
__device__ void nn_forward_and_update_paramenter(struct CHROMO ch[], int num, float f, int num_fun3, int num_epoch) ////
{
float inputs[INPUT_NEURAL_NUM];
float delta_w[SINGLE_WEIGHT];
int t = 0;
//输入
inputs[0] = f / num_epoch;
inputs[1] = ch[num].position[num_fun3][0] / MAX;
inputs[2] = ch[num].position[num_fun3][1] / MAX;
for (int i = 0; i < SINGLE_WEIGHT; i++)
{
delta_w[i] = 0;
}
//前向传递
for (int i = 0; i < NEURAL_NUM; i++)
{
for (int j = 0; j < NEURAL_NUM; j++)
{
if (i != j)
{
ch[num].nn[num_fun3].new_neural[i] += (ch[num].nn[num_fun3].w[t] * ch[num].nn[num_fun3].old_neural[j]);
t++;
}
}
//printf("dimension %d,neural %d:%f\n",k, i, ch[num].nn[num_fun3][k].new_neural[i]);
if (i < INPUT_NEURAL_NUM)
{
ch[num].nn[num_fun3].new_neural[i] = sigmoid(ch[num].nn[num_fun3].new_neural[i] + ch[num].nn[num_fun3].input_w[i] * inputs[i]);
}
else
{
ch[num].nn[num_fun3].new_neural[i] = sigmoid(ch[num].nn[num_fun3].new_neural[i]);
}
}
//printf("\n");
//计算delta_w
t = 0;
for (int i = 0; i < NEURAL_NUM; i++)
{
for (int j = 0; j < NEURAL_NUM; j++)
{
if (i != j)
{
delta_w[t] = hebb(ch[num].a[num_fun3], ch[num].b[num_fun3], ch[num].c[num_fun3], ch[num].d[num_fun3], ch[num].nn[num_fun3].old_neural[j], ch[num].nn[num_fun3].old_neural[i]);
t++;
}
}
}
//更新神经元
for (int i = 0; i < NEURAL_NUM; i++)
{
ch[num].nn[num_fun3].old_neural[i] = ch[num].nn[num_fun3].new_neural[i];
ch[num].nn[num_fun3].new_neural[i] = 0;
}
//更新权重
t = 0;
for (int i = 0; i < NEURAL_NUM; i++)
{
for (int j = 0; j < NEURAL_NUM; j++)
{
if (i != j)
{
ch[num].nn[num_fun3].w[t] = (ch[num].nn[num_fun3].w[t] + delta_w[t])*0.99;
t++;
}
}
}
}
__device__ float benchmark(int funnum, float x[][DIMENSION], int num) //some simple functions
{
int a = funnum;
float f, sum1, sum2;
int i;
f = 0.0;
i = 0;
sum1 = 0.0;
sum2 = 0.0;
switch (a)
{
case 0: //sphere function
{
f = 0.0;
for (i = 0; i < DIMENSION; i++)
{
f += x[num][i] * x[num][i];
}
return(f);
}
case 1: //elliptic function
{ f = 0.0;
for (i = 0; i < DIMENSION; i++)
{
f += pow(pow(10.0, 6.0), i / (DIMENSION - 1))*x[num][i] * x[num][i];
}
return(f);
}
case 2: //rastrigin's function
{ f = 0.0;
for (i = 0; i < DIMENSION; i++)
{
f += (x[num][i] * x[num][i] - 10.0*cos(2.0*PI*x[num][i]) + 10.0);
}
return(f);
}
case 3: //ackley function
{
for (i = 0; i < DIMENSION; i++)
{
sum1 += x[num][i] * x[num][i];
sum2 += cos(2.0*PI*x[num][i]);
}
sum1 = -0.2*sqrt(sum1 / DIMENSION);
sum2 = sum2 / DIMENSION;
f = E - 20.0*exp(sum1) - exp(sum2) + 20.0;
return(f);
}
case 4: //rosenbrock function
{
for (i = 0; i < DIMENSION - 1; i++)
{
sum1 = x[num][i] * x[num][i] - x[num][i + 1];
sum2 = x[num][i] - 1;
f += 100.0*sum1*sum1 + sum2 * sum2;
}
return(f);
}
case 5: //bent cigar function
{
for (i = 1; i < DIMENSION; i++)
{
f += pow(10.0, 6.0)*x[num][i] * x[num][i];
}
f += x[num][0] * x[num][0];
return f;
}
case 6: //zakharov function
{
for (i = 0; i < DIMENSION; i++)
{
float z = x[num][i];
sum1 += pow(z, 2);
sum2 += 0.5*z;
}
f = sum1 + pow(sum2, 2) + pow(sum2, 4);
return(f);
}
case 7: //schwefel function
{
for (i = 0; i < DIMENSION; i++)
{
for (int j = 0; j < i; j++)
{
sum1 += x[num][i] * x[num][i];
}
sum1 = sum1 * sum1;
f += sum1;
}
return(f);
}
case 8: //griewank's function
{
for (i = 0; i < DIMENSION; i++)
{
sum1 += x[num][i] * x[num][i];
sum2 *= cos(x[num][i] / sqrt(1.0 + i));
}
f = sum1 / 4000.0 - sum2 + 1.0;
return(f);
}
default: //discus function
f = pow(10.0, 6.0)*x[num][0] * x[num][0];
for (i = 1; i < DIMENSION; i++)
{
f += x[num][i] * x[num][i];
}
return(f);
}
}
__device__ float thread_sort(int sort_num, struct IN_FITNESS in_f[], int num, float p)
{
float a = 0, count = 0;
for (int i = 0; i < sort_num - 1; i++)
{
for (int j = 0; j < sort_num - i - 1; j++)
{
if (in_f[num].f[j] > in_f[num].f[j + 1])
{
a = in_f[num].f[j];
in_f[num].f[j] = in_f[num].f[j + 1];
in_f[num].f[j + 1] = a;
}
}
}
for (int i = 0; i < sort_num; i++)
{
if (p == in_f[num].f[i])
{
return count + 1;
}
else
{
count++;
}
}
return 0;
}
__global__ void kernel(struct CHROMO ch[NUM_CHROMO], float fitness[BENCHMARK_NUM*PER_BENCHMARK_TIMES*NUM_CHROMO], struct IN_FITNESS in_f[BENCHMARK_NUM*NUM_CHROMO*PER_BENCHMARK_TIMES], struct IN_POSITION in_p[BENCHMARK_NUM*PER_BENCHMARK_TIMES*NUM_CHROMO])
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int i,d;
if (idx <BENCHMARK_NUM*PER_BENCHMARK_TIMES*NUM_CHROMO)
{
float value_position;
int num_epoch = 0;
float ff = 0;
int num = (idx % (BENCHMARK_NUM*PER_BENCHMARK_TIMES)) % BENCHMARK_NUM;
for (num_epoch = 0; num_epoch<THREAD_EPOCH; num_epoch++)
{
for (d = 0; d < DIMENSION; d++)
{
in_p[idx].p[num_epoch][d] = ch[idx / (BENCHMARK_NUM*PER_BENCHMARK_TIMES)].position[idx % (BENCHMARK_NUM*PER_BENCHMARK_TIMES)][d];
}
in_f[idx].f[num_epoch] = benchmark(num, ch[idx / (BENCHMARK_NUM*PER_BENCHMARK_TIMES)].position, idx % (BENCHMARK_NUM*PER_BENCHMARK_TIMES));
//printf("ff:%f \n", in_f[idx].f[num_epoch]);
ff = thread_sort((num_epoch + 1), in_f, idx, in_f[idx].f[num_epoch]);
//printf("epoch %d ff: %f\n ", num_epoch, ff);
for (i = 0; i < STEPS; i++)
{
nn_forward_and_update_paramenter(ch, idx / (BENCHMARK_NUM*PER_BENCHMARK_TIMES), ff, idx % (BENCHMARK_NUM*PER_BENCHMARK_TIMES), (num_epoch + 1));
}
for (d = 0; d < DIMENSION; d++)
{
ch[idx / (BENCHMARK_NUM*PER_BENCHMARK_TIMES)].position[idx % (BENCHMARK_NUM*PER_BENCHMARK_TIMES)][d] += (ch[idx / (BENCHMARK_NUM*PER_BENCHMARK_TIMES)].nn[idx % (BENCHMARK_NUM*PER_BENCHMARK_TIMES)].old_neural[NEURAL_NUM - (d + 1)] * (DELTA_X_MAX - DELTA_X_MIN) - DELTA_X_MAX)* MAX;
}
}
fitness[idx] = benchmark(num, ch[idx / (BENCHMARK_NUM*PER_BENCHMARK_TIMES)].position, idx % (BENCHMARK_NUM*PER_BENCHMARK_TIMES));
printf("in fitness[%d]: %f\n", idx, fitness[idx]);
}
}
int main()
{
struct CHROMO *h_ch = (struct CHROMO *)malloc(sizeof(CHROMO)*NUM_CHROMO); //主机端的染色体数组
struct CHROMO *d_ch; //设备端的
struct IN_FITNESS *h_in_fitness = (struct IN_FITNESS *)malloc(sizeof(IN_FITNESS)*BENCHMARK_NUM*PER_BENCHMARK_TIMES*NUM_CHROMO);
struct IN_FITNESS *d_in_fitness;
struct IN_POSITION *h_in_position = (struct IN_POSITION *)malloc(sizeof(IN_POSITION)*BENCHMARK_NUM*PER_BENCHMARK_TIMES*NUM_CHROMO);
struct IN_POSITION *d_in_position = (struct IN_POSITION *)malloc(sizeof(IN_POSITION)*BENCHMARK_NUM*PER_BENCHMARK_TIMES*NUM_CHROMO);
float *d_fitness; //设备端的一维适应值数组
float *h_fitness = (float *)malloc(sizeof(float)*BENCHMARK_NUM*PER_BENCHMARK_TIMES*NUM_CHROMO);//主机端的
int i, g, j, d;
struct CHROMO GA_best_ch; //每代最佳的染色体
struct CHROMO history_best_ch; //历史最佳的染色体
float min_fit; //每代最佳的适应值
float the_min_fit_inallG; //历史最佳的适应值
int record_g = 0;
int device_id = DEVICE_NUM;
FILE* f_results = fopen("GA200results.txt", "a");
FILE* f_history_ch = fopen("history_bestch.txt", "a");
//FILE *f_position = fopen("f_position.txt", "a");
if (f_results == NULL || f_history_ch == NULL)
{
printf("failed to open f file\n");
system("pause");
}
srand((unsigned)time(NULL));
for (i = 0; i < BENCHMARK_NUM*PER_BENCHMARK_TIMES*NUM_CHROMO; i++)
{
h_fitness[i] = 0;
}
for (i = 0; i < BENCHMARK_NUM*PER_BENCHMARK_TIMES*NUM_CHROMO; i++)
{
for (j = 0; j < THREAD_EPOCH; j++)
{
h_in_fitness[i].f[j] = 0.0;
}
}
for (i = 0; i<BENCHMARK_NUM*PER_BENCHMARK_TIMES*NUM_CHROMO; i++)
{
for (j = 0; j<THREAD_EPOCH; j++)
{
for (d = 0; d<DIMENSION; d++)
{
h_in_position[i].p[j][d] = 0.0;
}
}
}
initial_chromo(h_ch);
initial_chromo(GA_best_ch);
initial_chromo(history_best_ch);
//dim3 grid(BLOCK);
//dim3 threads(THREAD);
cudaSetDevice(device_id);
for (g = 0; g < EPOCH; g++)
{
cudaMalloc((void **)&d_fitness, sizeof(float)*BENCHMARK_NUM*PER_BENCHMARK_TIMES*NUM_CHROMO);
cudaMalloc((struct CHROMO **)&d_ch, sizeof(struct CHROMO) * NUM_CHROMO);
cudaMalloc((struct IN_FITNESS **)&d_in_fitness, sizeof(struct IN_FITNESS)*BENCHMARK_NUM*PER_BENCHMARK_TIMES*NUM_CHROMO);
cudaMalloc((struct IN_POSITION **)&d_in_position, sizeof(struct IN_POSITION)*BENCHMARK_NUM*PER_BENCHMARK_TIMES*NUM_CHROMO);
cudaMemcpy(d_ch, h_ch, sizeof(struct CHROMO) * NUM_CHROMO, cudaMemcpyHostToDevice);
cudaMemcpy(d_fitness, h_fitness, sizeof(float)*BENCHMARK_NUM*PER_BENCHMARK_TIMES*NUM_CHROMO, cudaMemcpyHostToDevice);
cudaMemcpy(d_in_fitness, h_in_fitness, sizeof(struct IN_FITNESS)*BENCHMARK_NUM*PER_BENCHMARK_TIMES*NUM_CHROMO, cudaMemcpyHostToDevice);
cudaMemcpy(d_in_position, h_in_position, sizeof(struct IN_POSITION)*BENCHMARK_NUM*PER_BENCHMARK_TIMES*NUM_CHROMO, cudaMemcpyHostToDevice);
kernel <<<BLOCK, THREAD >>> (d_ch, d_fitness, d_in_fitness, d_in_position);
cudaDeviceSynchronize();
cudaMemcpy(h_fitness, d_fitness, sizeof(float)*BENCHMARK_NUM*PER_BENCHMARK_TIMES*NUM_CHROMO, cudaMemcpyDeviceToHost);
cudaMemcpy(h_in_position, d_in_position, sizeof(struct IN_POSITION)*BENCHMARK_NUM*PER_BENCHMARK_TIMES*NUM_CHROMO, cudaMemcpyDeviceToHost);
//cudaMemcpy(h_ch, d_ch, sizeof(struct CHROMO)*NUM_CHROMO, cudaMemcpyDeviceToHost);
/*
fprintf(f_position, "第%d代:\n", g + 1);
for (i = 0; i < BENCHMARK_NUM*PER_BENCHMARK_TIMES*NUM_CHROMO; i++)
{
fprintf(f_position, "第%d个染色体\n", i + 1);
for (j = 0; j < THREAD_EPOCH; j++)
{
fprintf(f_position, "第%d次移动\t\t\t", j + 1);
for (d = 0; d < DIMENSION; d++)
{
fprintf(f_position, "%f\t\t", h_in_position[i].p[j][d]);
}
}
}
*/
/*
for (i = 0; i < BENCHMARK_NUM*PER_BENCHMARK_TIMES*NUM_CHROMO; i++)
{
printf("out fitness[%d] = %f\n", i, h_fitness[i]);
}
*/
min_fit = my_function(h_ch, h_fitness, GA_best_ch, g);
if (g == 0)
{
the_min_fit_inallG = min_fit;
for (i = 0; i < INPUT_NEURAL_NUM; i++)
{
history_best_ch.nn_initial.input_w[i] = GA_best_ch.nn_initial.input_w[i];
fprintf(f_history_ch, "%f\n", history_best_ch.nn_initial.input_w[i]);
}
for (j = 0; j < SINGLE_WEIGHT; j++)
{
history_best_ch.nn_initial.w[j] = GA_best_ch.nn_initial.w[j];
fprintf(f_history_ch, "%f\n", history_best_ch.nn_initial.w[j]);
}
history_best_ch.A = GA_best_ch.A;
history_best_ch.B = GA_best_ch.B;
history_best_ch.C = GA_best_ch.C;
history_best_ch.D = GA_best_ch.D;
fprintf(f_history_ch, "A:%f\n B:%f\n C:%f\n D:%f\n", history_best_ch.A, history_best_ch.B, history_best_ch.C, history_best_ch.D);
record_g = g;
}
if (min_fit < the_min_fit_inallG)
{
the_min_fit_inallG = min_fit;
for (i = 0; i < INPUT_NEURAL_NUM; i++)
{
history_best_ch.nn_initial.input_w[i] = GA_best_ch.nn_initial.input_w[i];
fprintf(f_history_ch, "%f\n", history_best_ch.nn_initial.input_w[i]);
}
for (j = 0; j < SINGLE_WEIGHT; j++)
{
history_best_ch.nn_initial.w[j] = GA_best_ch.nn_initial.w[j];
fprintf(f_history_ch, "%f\n", history_best_ch.nn_initial.w[j]);
}
history_best_ch.A = GA_best_ch.A;
history_best_ch.B = GA_best_ch.B;
history_best_ch.C = GA_best_ch.C;
history_best_ch.D = GA_best_ch.D;
fprintf(f_history_ch, "A:%f\n B:%f\n C:%f\n D:%f\n", history_best_ch.A, history_best_ch.B, history_best_ch.C, history_best_ch.D);
record_g = g;
}
printf("the history min ranking is:%f \n", the_min_fit_inallG);
fprintf(f_results, "history best is in g=%d:%f\n", record_g + 1, the_min_fit_inallG);
cudaFree(d_fitness);
cudaFree(d_ch);
cudaFree(d_in_fitness);
cudaFree(d_in_position);
}//GA代
fclose(f_history_ch);
//fclose(f_position);
fclose(f_results);
free(h_ch);
free(h_fitness);
free(h_in_fitness);
free(h_in_position);
return 0;
} |
14,647 | __global__ void actiune_thread(float* a_d, float* b_d,float *r_d,int N);
// Kernelul ce se executa pe device-ul CUDA
__global__ void actiune_thread(float* a_d, float* b_d,float *r_d,int N)
{
// Identificarea pozitiei exacte a thread-ului
int x,y;
x = (blockIdx.x * blockDim.x + threadIdx.x);
//y = (blockIdx.y * blockDim.y + threadIdx.y);
r_d[x] = a_d[x]+1.f;
//for(int j=0;j<N;j++)
//for(int k=0;k<N;k++)
//r_d[x] += sqrt(a_d[j]+b_d[j]) / sqrt(a_d[k]+b_d[k]);
//Operatii adaugate codului anterior
//r_d[x] = 0;
//for(int j = 0; j < N; j++){
//r_d[x] = a_d[x] + a_d[j]*a_d[j];
//}
}
extern "C"
cudaError_t launch_actiune_thread(float* a_d, float* b_d,float *r_d,int N,dim3 DIM_GRID, dim3 DIM_BLOCK)
{
actiune_thread <<<DIM_GRID, DIM_BLOCK>>> (a_d, b_d,r_d,N);
return cudaGetLastError();
} |
14,648 | #include <string.h>
#include <stdio.h>
#include <time.h>
static const int M = 8192;
static const int N = 8192;
int block_x=16;
int block_y=16;
int grid_x=512;
int grid_y=512;
struct data{
int x, y;
float data;
};
__global__ void matrix_multi(float *A,float *B,float *C,int pitch)
{
int Row=blockIdx.y*blockDim.y+threadIdx.y;
int Col=blockIdx.x*blockDim.x+threadIdx.x;
int thread_id=Row*blockDim.x*gridDim.x+Col;
int i, j;
if(thread_id<M)
{
for(i=0;i<N;i++)
{
float sum=0;
for(j=0;j<M;j++)
{
int h=pitch/sizeof(float);
float *addr_a=&A[0]+j*h+thread_id;
float *addr_b=&B[0]+j*h+i;
sum+=(*addr_a)*(*addr_b);
}
C[thread_id*N+i]=sum;
}
}
}
int main()
{
float *h_a =(float *)malloc(M*N*sizeof(float));
float *h_b =(float *)malloc(M*N*sizeof(float));
float *h_c =(float *)malloc(M*N*sizeof(float));
float *serial_c=(float *)malloc(M*N*sizeof(float));
memset(h_a,0,M*N);
memset(h_b,0,M*N);
memset(serial_c,0,M*N);
FILE* file;
file = fopen("/public/home/st17341046/read_data.txt", "rb");
while(!feof(file))
{
struct data c;
fread(&c,sizeof(struct data),1,file);
h_b[c.x*N+c.y]=c.data;
h_a[c.y*N+c.x]=c.data;
}
fclose(file);
int i,j,k;
float *dev_b ;
float *dev_c ;
float *dev_a ;
size_t pitch=0;
cudaMallocPitch((void**)&dev_a,&pitch,N*sizeof(float),M);
cudaMemcpy2D(dev_a, pitch, h_a, N* sizeof(float), N * sizeof(float), M, cudaMemcpyHostToDevice);
cudaMallocPitch((void**)&dev_b,&pitch,N*sizeof(float),M);
cudaMemcpy2D(dev_b, pitch, h_b, N* sizeof(float), N * sizeof(float), M, cudaMemcpyHostToDevice);
cudaMalloc((void**)(&dev_c), M*N*sizeof(float));
dim3 block(block_x,block_y);
dim3 grid(grid_x,grid_y);
cudaEvent_t start, stop;
float elapsedTime = 0.0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
matrix_multi<<<grid,block>>>(dev_a,dev_b,dev_c,pitch);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("时间 %f ms\n",elapsedTime);
cudaMemcpy((void*)(h_c), (void*)(dev_c), M*N*sizeof(float), cudaMemcpyDeviceToHost);
file = fopen("/public/home/st17341056/write_data.txt", "wb");
for(i=0;i<M;i++)
for(j=0;j<N;j++)
{
if(h_c[i*N+j]==0) continue;
else
{
struct data c;
c.x=i,
c.y=j;
c.data=h_c[i*N+j];
fwrite(&c,sizeof(struct data),1,file);
}
}
fclose(file);
cudaFree((void*)dev_a);
cudaFree((void*)dev_b);
cudaFree((void*)dev_c);
free(h_a);
free(h_c);
free(h_b);
return 0;
}
|
14,649 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define THREADS 64
#define WARP_SIZE 32
#define WARPS ((THREADS+WARP_SIZE-1)/WARP_SIZE)
#define IP 4
#define OP 4
#define INBUFFERS 4
#define OUTBUFFERS (((WARP_SIZE/IP)*INBUFFERS+(WARP_SIZE/OP)-1)/(WARP_SIZE/OP))
__global__ void BSMAD(int *buffer, int *synapse, int *output) {
volatile int tid = threadIdx.x;
volatile int result = 0;
asm("/*");
asm("CPTX_BEGIN");
asm("bsmad.s32 %0, %1, %2, %3, %4, %5, %6, %7, %8;" : "=r"(result) : "r"(IP), "r"(OP), "r"(buffer[tid*INBUFFERS]), "r"(buffer[tid*INBUFFERS+1]), "r"(buffer[tid*INBUFFERS+2]), "r"(buffer[tid*INBUFFERS+3]), "r"(synapse[tid]), "r"(output[(tid%WARP_SIZE)%OUTBUFFERS + OUTBUFFERS*(tid/WARP_SIZE)]));
asm("CPTX_END");
asm("*/");
if (tid%WARP_SIZE < OUTBUFFERS) output[tid%WARP_SIZE + OUTBUFFERS*(tid/WARP_SIZE)] = result;
}
int main(int argc, char** argv) {
int *h_buffer = (int*)malloc(INBUFFERS*THREADS*sizeof(int));
int *h_synapse = (int*)malloc(THREADS*sizeof(int));
int *h_output = (int*)calloc(WARPS*OUTBUFFERS,sizeof(int));
for (int i = 0; i < INBUFFERS*THREADS; i++) { h_buffer[i] = 0xFDB97531; }
for (int i = 0; i < THREADS; i++) { h_synapse[i] = 1 + 2*(i/WARP_SIZE); }
for (int i = 0; i < WARPS*OUTBUFFERS; i++) { h_output[i] = 0x87654321; }
int *d_buffer, *d_synapse, *d_output;
cudaMalloc(&d_buffer, INBUFFERS*THREADS*sizeof(int));
cudaMalloc(&d_synapse, THREADS*sizeof(int));
cudaMalloc(&d_output, WARPS*OUTBUFFERS*sizeof(int));
cudaMemcpy(d_buffer, h_buffer, INBUFFERS*THREADS*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_synapse, h_synapse, THREADS*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_output, h_output, WARPS*OUTBUFFERS*sizeof(int), cudaMemcpyHostToDevice);
BSMAD<<<1, THREADS>>>(d_buffer, d_synapse, d_output);
cudaMemcpy(h_output, d_output, WARPS*OUTBUFFERS*sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < WARPS*OUTBUFFERS; i++) {
if (i % OUTBUFFERS == 0) printf("\nWarp %d:\n", i/OUTBUFFERS);
printf("%d: %08x\n", i, h_output[i]);
}
cudaFree(d_buffer); cudaFree(d_synapse); cudaFree(d_output);
free(h_buffer); free(h_synapse); free(h_output);
return 0;
}
|
14,650 | #include "includes.h"
__global__ void SoftClipKernel( const float* p_Input, float* p_Output, int p_Width, int p_Height, float p_SoftClipA, float p_SoftClipB, float p_SoftClipC, float p_SoftClipD, float p_SoftClipE, float p_SoftClipF, int p_SwitchA, int p_SwitchB, int p_Source) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < p_Width && y < p_Height) {
const int index = (y * p_Width + x) * 4;
float r = p_Input[index];
float g = p_Input[index + 1];
float b = p_Input[index + 2];
float cr = (powf(10.0f, (1023.0f * r - 685.0f) / 300.0f) - 0.0108f) / (1.0f - 0.0108f);
float cg = (powf(10.0f, (1023.0f * g - 685.0f) / 300.0f) - 0.0108f) / (1.0f - 0.0108f);
float cb = (powf(10.0f, (1023.0f * b - 685.0f) / 300.0f) - 0.0108f) / (1.0f - 0.0108f);
float lr = r > 0.1496582f ? (powf(10.0f, (r - 0.385537f) / 0.2471896f) - 0.052272f) / 5.555556f : (r - 0.092809f) / 5.367655f;
float lg = g > 0.1496582f ? (powf(10.0f, (g - 0.385537f) / 0.2471896f) - 0.052272f) / 5.555556f : (g - 0.092809f) / 5.367655f;
float lb = b > 0.1496582f ? (powf(10.0f, (b - 0.385537f) / 0.2471896f) - 0.052272f) / 5.555556f : (b - 0.092809f) / 5.367655f;
float mr = lr * 1.617523f + lg * -0.537287f + lb * -0.080237f;
float mg = lr * -0.070573f + lg * 1.334613f + lb * -0.26404f;
float mb = lr * -0.021102f + lg * -0.226954f + lb * 1.248056f;
float sr = p_Source == 0 ? r : p_Source == 1 ? cr : mr;
float sg = p_Source == 0 ? g : p_Source == 1 ? cg : mg;
float sb = p_Source == 0 ? b : p_Source == 1 ? cb : mb;
float Lr = sr > 1.0f ? 1.0f : sr;
float Lg = sg > 1.0f ? 1.0f : sg;
float Lb = sb > 1.0f ? 1.0f : sb;
float Hr = (sr < 1.0f ? 1.0f : sr) - 1.0f;
float Hg = (sg < 1.0f ? 1.0f : sg) - 1.0f;
float Hb = (sb < 1.0f ? 1.0f : sb) - 1.0f;
float rr = p_SoftClipA;
float gg = p_SoftClipB;
float aa = p_SoftClipC;
float bb = p_SoftClipD;
float ss = 1.0f - (p_SoftClipE / 10.0f);
float sf = 1.0f - p_SoftClipF;
float Hrr = Hr * powf(2.0f, rr);
float Hgg = Hg * powf(2.0f, rr);
float Hbb = Hb * powf(2.0f, rr);
float HR = Hrr <= 1.0f ? 1.0f - powf(1.0f - Hrr, gg) : Hrr;
float HG = Hgg <= 1.0f ? 1.0f - powf(1.0f - Hgg, gg) : Hgg;
float HB = Hbb <= 1.0f ? 1.0f - powf(1.0f - Hbb, gg) : Hbb;
float R = Lr + HR;
float G = Lg + HG;
float B = Lb + HB;
float softr = aa == 1.0f ? R : (R > aa ? (-1.0f / ((R - aa) / (bb - aa) + 1.0f) + 1.0f) * (bb - aa) + aa : R);
float softR = bb == 1.0f ? softr : softr > 1.0f - (bb / 50.0f) ? (-1.0f / ((softr - (1.0f - (bb / 50.0f))) /
(1.0f - (1.0f - (bb / 50.0f))) + 1.0f) + 1.0f) * (1.0f - (1.0f - (bb / 50.0f))) + (1.0f - (bb / 50.0f)) : softr;
float softg = (aa == 1.0f) ? G : (G > aa ? (-1.0f / ((G - aa) / (bb - aa) + 1.0f) + 1.0f) * (bb - aa) + aa : G);
float softG = bb == 1.0f ? softg : softg > 1.0f - (bb / 50.0f) ? (-1.0f / ((softg - (1.0f - (bb / 50.0f))) /
(1.0f - (1.0f - (bb / 50.0f))) + 1.0f) + 1.0f) * (1.0f - (1.0f - (bb / 50.0f))) + (1.0f - (bb / 50.0f)) : softg;
float softb = (aa == 1.0f) ? B : (B > aa ? (-1.0f / ((B - aa) / (bb - aa) + 1.0f) + 1.0f) * (bb - aa) + aa : B);
float softB = bb == 1.0f ? softb : softb > 1.0f - (bb / 50.0f) ? (-1.0f / ((softb - (1.0f - (bb / 50.0f))) /
(1.0f - (1.0f - (bb / 50.0f))) + 1.0f) + 1.0f) * (1.0f - (1.0f - (bb / 50.0f))) + (1.0f - (bb / 50.0f)) : softb;
float Cr = (softR * -1.0f) + 1.0f;
float Cg = (softG * -1.0f) + 1.0f;
float Cb = (softB * -1.0f) + 1.0f;
float cR = ss == 1.0f ? Cr : Cr > ss ? (-1.0f / ((Cr - ss) / (sf - ss) + 1.0f) + 1.0f) * (sf - ss) + ss : Cr;
float CR = sf == 1.0f ? (cR - 1.0f) * -1.0f : ((cR > 1.0f - (-p_SoftClipF / 50.0f) ? (-1.0f / ((cR - (1.0f - (-p_SoftClipF / 50.0f))) /
(1.0f - (1.0f - (-p_SoftClipF / 50.0f))) + 1.0f) + 1.0f) * (1.0f - (1.0f - (-p_SoftClipF / 50.0f))) + (1.0f - (-p_SoftClipF / 50.0f)) : cR) - 1.0f) * -1.0f;
float cG = ss == 1.0f ? Cg : Cg > ss ? (-1.0f / ((Cg - ss) / (sf - ss) + 1.0f) + 1.0f) * (sf - ss) + ss : Cg;
float CG = sf == 1.0f ? (cG - 1.0f) * -1.0f : ((cG > 1.0f - (-p_SoftClipF / 50.0f) ? (-1.0f / ((cG - (1.0f - (-p_SoftClipF / 50.0f))) /
(1.0f - (1.0f - (-p_SoftClipF / 50.0f))) + 1.0f) + 1.0f) * (1.0f - (1.0f - (-p_SoftClipF / 50.0f))) + (1.0f - (-p_SoftClipF / 50.0f)) : cG) - 1.0f) * -1.0f;
float cB = ss == 1.0f ? Cb : Cb > ss ? (-1.0f / ((Cb - ss) / (sf - ss) + 1.0f) + 1.0f) * (sf - ss) + ss : Cb;
float CB = sf == 1.0f ? (cB - 1.0f) * -1.0f : ((cB > 1.0f - (-p_SoftClipF / 50.0f) ? (-1.0f / ((cB - (1.0f - (-p_SoftClipF / 50.0f))) /
(1.0f - (1.0f - (-p_SoftClipF / 50.0f))) + 1.0f) + 1.0f) * (1.0f - (1.0f - (-p_SoftClipF / 50.0f))) + (1.0f - (-p_SoftClipF / 50.0f)) : cB) - 1.0f) * -1.0f;
float SR = p_Source == 0 ? CR : CR >= 0.0f && CR <= 1.0f ? (CR < 0.0181f ? (CR * 4.5f) : 1.0993f * powf(CR, 0.45f) - (1.0993f - 1.0f)) : CR;
float SG = p_Source == 0 ? CG : CG >= 0.0f && CG <= 1.0f ? (CG < 0.0181f ? (CG * 4.5f) : 1.0993f * powf(CG, 0.45f) - (1.0993f - 1.0f)) : CG;
float SB = p_Source == 0 ? CB : CB >= 0.0f && CB <= 1.0f ? (CB < 0.0181f ? (CB * 4.5f) : 1.0993f * powf(CB, 0.45f) - (1.0993f - 1.0f)) : CB;
p_Output[index] = p_SwitchA == 1 ? (SR < 1.0f ? 1.0f : SR) - 1.0f : p_SwitchB == 1 ? (SR >= 0.0f ? 0.0f : SR + 1.0f) : SR;
p_Output[index + 1] = p_SwitchA == 1 ? (SG < 1.0f ? 1.0f : SG) - 1.0f : p_SwitchB == 1 ? (SG >= 0.0f ? 0.0f : SG + 1.0f) : SG;
p_Output[index + 2] = p_SwitchA == 1 ? (SB < 1.0f ? 1.0f : SB) - 1.0f : p_SwitchB == 1 ? (SB >= 0.0f ? 0.0f : SB + 1.0f) : SB;
p_Output[index + 3] = p_Input[index + 3];
}} |
14,651 | #include<cuda.h>
#include<time.h>
__global__ void _AFFINE_KERNEL_1(int* ,int ,int* ,int ,int ,int, int, int, int );
__global__ void _AFFINE_KERNEL(int* ,int ,int* ,int ,int ,int ,int ,int ,int ,int );
#include<stdio.h>
#include<stdlib.h>
int main(int argc, char** argv)
{
int N = 1000;
int N_NODES = 100;
int data = 1;
int _NTHREAD = 1, _NBLOCK = 1;
char* readfile, *outfile;
if(argc>1) _NTHREAD = atoi(argv[1]);
if(argc>2) _NBLOCK = atoi(argv[2]);
if(argc>3) data = atoi(argv[3]) + 1;
if(argc>4) readfile = argv[4];
int i,j;
FILE* f;
f = fopen(readfile, "r");
j=0;
char c;
while(1){
c = fgetc(f);
if(c=='\n') {
j++;
c = fgetc(f);
if(c!='%') break;
}
}
fscanf(f, "%d", &N_NODES);
fscanf(f, "%d", &N_NODES);
fscanf(f, "%d", &N);
if(2*N<_NTHREAD*_NBLOCK) {
printf("%d\n",_NTHREAD*_NBLOCK);
fclose(f);
return 0;
}
struct timespec start, end, mid_start, mid_end;
double runTime, pre_time, post_time, computeTime;
outfile = (char*)malloc(sizeof(char)*(strlen(readfile)+50));
strcpy(outfile, readfile);
strcat(outfile, ".data_Higbie1_TD");
FILE* fp;
fp = fopen(outfile, "a");
int M,I;
M=sqrt(2.0*N);
int XP1[M][M],XS3[N];
i=0;
j=0;
for (I = 0; I < N; I++)
{
if((j<=M-1)&&(i<=M-1))
{
fscanf(f, "%d", &XP1[i][j++]);
XS3[I]=XP1[i][j-1];
if(j<M) fscanf(f, "%d", &XP1[i][j++]);
else if(i<M-1)
{
j=0;
fscanf(f, "%d", &XP1[++i][j++]);
}
}
else if((j==M)&&(i<M-1))
{
j=0;
fscanf(f, "%d", &XP1[++i][j++]);
XS3[I]=XP1[i][j-1];
fscanf(f, "%d", &XP1[i][j++]);
}
else if(i==M-1)
fscanf(f, "%d", &XS3[I]);
}
clock_gettime(CLOCK_MONOTONIC, &start);
int _SZ_XS3_1 = N;
int _SZ_XP1_2 = M;
int _SZ_XP1_1 = M;
int *_DEV_XS3;
cudaMalloc((void**) &_DEV_XS3, sizeof(int)*_SZ_XS3_1);
cudaMemcpy(_DEV_XS3, XS3, sizeof(int)*_SZ_XS3_1, cudaMemcpyHostToDevice);
int *_DEV_XP1;
cudaMalloc((void**) &_DEV_XP1, sizeof(int)*_SZ_XP1_2*_SZ_XP1_1);
cudaMemcpy(_DEV_XP1, XP1, sizeof(int)*_SZ_XP1_2*_SZ_XP1_1, cudaMemcpyHostToDevice);
float _NUM_THREADS = M*M,_NUM_BLOCKS=1;
int _NUM_TILE=1;
dim3 _THREADS(512);
dim3 _BLOCKS(1);
if(_NUM_THREADS < _NTHREAD)
{
_THREADS.x=M;
_THREADS.y=M;
}
else {
_NUM_BLOCKS=_NUM_THREADS/256;
_BLOCKS.x=_BLOCKS.y=ceil(sqrt(_NUM_BLOCKS));
_THREADS.x=_THREADS.y=ceil(sqrt((M*M*1.0)/(_BLOCKS.x*_BLOCKS.y)));
int temp=_NUM_BLOCKS;
if(_NUM_BLOCKS>_NBLOCK)
_NUM_TILE=(temp % _NBLOCK == 0)?(_NUM_BLOCKS/_NBLOCK):((_NUM_BLOCKS/_NBLOCK)+1);
}
int _CUDA_TILE;
int lambda=3*M+4;
int id_1,id_2,id_3,id_4,id_5;
int UB_1=M-0;
int UB_2=M-0;
clock_gettime(CLOCK_MONOTONIC, &mid_start);
for(id_1=1;id_1<=(UB_1*UB_2);id_1+=lambda) {
id_2=(id_1/UB_2);
id_3=((id_1+lambda)/UB_2);
id_4=(id_1%UB_2)-1;
id_5=UB_2-((id_1+lambda)%UB_2);
for(_CUDA_TILE=0;_CUDA_TILE<_NUM_TILE;_CUDA_TILE++)
{
_AFFINE_KERNEL_1<<<_BLOCKS,_THREADS>>>(_DEV_XS3, _SZ_XS3_1, _DEV_XP1, _SZ_XP1_2, _SZ_XP1_1, id_2, id_4, UB_2, _CUDA_TILE);
cudaDeviceSynchronize();
}
for(_CUDA_TILE=0;_CUDA_TILE<_NUM_TILE;_CUDA_TILE++)
{
_AFFINE_KERNEL<<<_BLOCKS,_THREADS>>>(_DEV_XS3, _SZ_XS3_1, _DEV_XP1, _SZ_XP1_2, _SZ_XP1_1, id_2, id_3, 0, M, _CUDA_TILE);
cudaDeviceSynchronize();
}
for(_CUDA_TILE=0;_CUDA_TILE<_NUM_TILE;_CUDA_TILE++)
{
_AFFINE_KERNEL_1<<<_BLOCKS,_THREADS>>>(_DEV_XS3, _SZ_XS3_1, _DEV_XP1, _SZ_XP1_2, _SZ_XP1_1, id_3, 0, id_5, _CUDA_TILE);
cudaDeviceSynchronize();
}
}
clock_gettime(CLOCK_MONOTONIC, &mid_end);
cudaMemcpy(XP1, _DEV_XP1, sizeof(int)*_SZ_XP1_2*_SZ_XP1_1, cudaMemcpyDeviceToHost);
cudaFree(_DEV_XP1);
clock_gettime(CLOCK_MONOTONIC, &end);
pre_time = (double) ((((&mid_start)->tv_sec * 1000000000) + (&mid_start)->tv_nsec) - (((&start)->tv_sec * 1000000000) + (&start)->tv_nsec)) / 1000000000;
post_time = (double) ((((&end)->tv_sec * 1000000000) + (&end)->tv_nsec) - (((&mid_end)->tv_sec * 1000000000) + (&mid_end)->tv_nsec)) / 1000000000;
computeTime = (double) ((((&mid_end)->tv_sec * 1000000000) + (&mid_end)->tv_nsec) - (((&mid_start)->tv_sec * 1000000000) + (&mid_start)->tv_nsec)) / 1000000000;
runTime = (double) ((((&end)->tv_sec * 1000000000) + (&end)->tv_nsec) - (((&start)->tv_sec * 1000000000) + (&start)->tv_nsec)) / 1000000000;
printf("********************************\n");
fprintf(fp,"%d,%d,%d,%d,%d,%.14f,%.14f,%.14f,%.14f,%d\n",N,_NTHREAD*_NBLOCK,_THREADS.x,_BLOCKS.x,data,pre_time,computeTime,post_time,runTime,_CUDA_TILE);
printf("RUN TIME: %.14f\n", runTime);
fclose(fp);
fclose(f);
return 0;
}
__global__ void _AFFINE_KERNEL_1(int* XS3,int _SZ_XS3_1,int* XP1,int _SZ_XP1_2,int _SZ_XP1_1,int i, int id_1, int UB_1, int _CUDA_TILE)
{
int j = gridDim.y*blockDim.y*_CUDA_TILE + blockDim.y*blockIdx.y + threadIdx.y;
if((id_1<=j)&&(j<=UB_1)){
XP1[i*_SZ_XP1_1+j]=XP1[(i+3)*_SZ_XP1_1+j+4]+XS3[i];
}
}
__global__ void _AFFINE_KERNEL(int* XS3,int _SZ_XS3_1,int* XP1,int _SZ_XP1_2,int _SZ_XP1_1,int CUDA_L_i,int CUDA_U_i, int CUDA_L_j,int CUDA_U_j, int _CUDA_TILE)
{
int i = gridDim.x*blockDim.x*_CUDA_TILE + blockDim.x*blockIdx.x + threadIdx.x;
int j = gridDim.y*blockDim.y*_CUDA_TILE + blockDim.y*blockIdx.y + threadIdx.y;
if((CUDA_L_i<=i)&&(i<=CUDA_U_i)){
if((CUDA_L_j<=j)&&(j<=CUDA_U_j)){
XP1[i*_SZ_XP1_1+j]=XP1[(i+3)*_SZ_XP1_1+j+4]+XS3[i];
}}}
|
14,652 | #include<bits/stdc++.h>
#include<thrust/host_vector.h>
#include<thrust/device_vector.h>
#include<thrust/generate.h>
#include<thrust/sort.h>
#include<thrust/copy.h>
#include<thrust/find.h>
using namespace std;
const int mod = 2E7;
class SearchMachine{
thrust::host_vector<int> A;
thrust::device_vector<int> dA;
int N;
public:
SearchMachine(int n):N(n){
A.resize(N);
dA.resize(N);
thrust::generate(A.begin(), A.end(), [&](){return rand()%mod;});
thrust::copy(A.begin(), A.end(), dA.begin());
}
void processQueries(thrust::host_vector<int> &ans, thrust::host_vector<int> &input){
cout<<"=============Begin processing queries=============="<<endl<<endl;
clock_t start_time = clock(), end_time;
ans.resize(input.size());
for(int i=0;i<(int)input.size();++i) {
if(thrust::find(dA.begin(), dA.end(), input[i])==dA.end()) ans[i] = 0;
else ans[i] = 1;
}
end_time = clock();
cout<<"=============Complete proessing queries============"<<endl<<endl;
cout<<"Time Usage: "<<double(end_time - start_time)/CLOCKS_PER_SEC;
cout<<" s"<<endl<<endl<<endl;
return ;
}
};
int main(){
srand(0);
int N = 1<<24 , M = 1<<8;
thrust::host_vector<int> Q(M), ans;
for(int i=0;i<M;++i) Q[i] = rand()%mod;
SearchMachine s(N);
s.processQueries(ans, Q);
for(int i=0;i<M;i+=M/10) cout<<ans[i]<<' ';
cout<<endl<<endl;
return 0;
}
|
14,653 | #include <cuda_runtime_api.h>
#include <stdint.h>
__global__ void softmax_kl_loss_fwd_batch_kernel(
const float *out_act,
int dim,
int batch_size,
const int32_t *label_cats,
const float *weights,
const float *targets,
float *out_loss)
{
int batch_idx = threadIdx.x + blockIdx.x * blockDim.x;
if (batch_idx < batch_size) {
int cat_i = label_cats[batch_idx];
int idx = cat_i + batch_idx * dim;
float x = -logf(out_act[idx]) * weights[batch_idx] * targets[batch_idx];
out_loss[batch_idx] = x;
}
}
extern "C" void rembrandt_kernel_softmax_kl_loss_fwd_batch(
const float *out_act,
int dim,
int batch_size,
const int32_t *label_cats,
const float *weights,
const float *targets,
float *out_loss,
cudaStream_t stream)
{
int n = batch_size;
softmax_kl_loss_fwd_batch_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
out_act, dim, batch_size,
label_cats,
weights,
targets,
out_loss);
}
__global__ void softmax_kl_loss_bwd_batch_kernel(
const float *out_act,
int dim,
int batch_size,
const int32_t *label_cats,
const float *weights,
const float *targets,
float *in_delta)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int i = idx % dim;
int batch_idx = idx / dim;
if ((i < dim) && (batch_idx < batch_size)) {
int cat_i = label_cats[batch_idx];
float dx = out_act[idx];
if (i == cat_i) {
dx -= 1.0f;
}
dx *= weights[batch_idx] * targets[batch_idx];
in_delta[idx] = dx;
}
}
extern "C" void rembrandt_kernel_softmax_kl_loss_bwd_batch(
const float *out_act,
int dim,
int batch_size,
const int32_t *label_cats,
const float *weights,
const float *targets,
float *in_delta,
cudaStream_t stream)
{
int n = dim * batch_size;
softmax_kl_loss_bwd_batch_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
out_act, dim, batch_size,
label_cats,
weights,
targets,
in_delta);
}
__global__ void softmax_r_fwd_batch_kernel(
const float *in_r_act,
int dim,
int batch_size,
const float *mix_in_r_act,
float *out_r_act)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int i = idx % dim;
int batch_idx = idx / dim;
if ((i < dim) && (batch_idx < batch_size)) {
/*float x = out_act[idx];
x *= in_r_act[idx] - mix_in_r_act[batch_idx];*/
float x = in_r_act[idx] - mix_in_r_act[batch_idx];
out_r_act[idx] = x;
}
}
extern "C" void rembrandt_kernel_softmax_r_fwd_batch(
const float *in_r_act,
int dim,
int batch_size,
const float *mix_in_r_act,
float *out_r_act,
cudaStream_t stream)
{
int n = dim * batch_size;
softmax_r_fwd_batch_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
in_r_act,
dim, batch_size,
mix_in_r_act,
out_r_act);
}
__global__ void softmax_kl_loss_r_fwd_batch_kernel(
const float *out_r_act,
int dim,
int batch_size,
const int32_t *label_cats,
//const float *r_weights,
float *out_r_loss)
{
int batch_idx = threadIdx.x + blockIdx.x * blockDim.x;
if (batch_idx < batch_size) {
int label_i = label_cats[batch_idx];
int label_idx = label_i + batch_idx * dim;
//float x = -out_r_act[label_idx] / out_act[label_idx] * r_weights[batch_idx];
//float x = -out_r_act[label_idx] * r_weights[batch_idx];
float x = -out_r_act[label_idx];
out_r_loss[batch_idx] = x;
}
}
extern "C" void rembrandt_kernel_softmax_kl_loss_r_fwd_batch(
const float *out_r_act,
int dim,
int batch_size,
const int32_t *label_cats,
//const float *r_weights,
float *out_r_loss,
cudaStream_t stream)
{
int n = batch_size;
softmax_kl_loss_r_fwd_batch_kernel<<<(n+128-1)/128, 128, 0, stream>>>(
out_r_act,
dim, batch_size,
label_cats,
//r_weights,
out_r_loss);
}
|
14,654 | #include <iostream>
#include <cub/cub.cuh>
#include <cub/util_type.cuh>
#include<bitset>
__global__
void hashBasedButterflyCounting_CPUGPU(int *directNB,long long *par_beginPos, int *edgeList, long long edge_num,long long edge_addr,int uCount, int vCount, unsigned long long* globalCount, int* hashTable, int startVertex, int endVertex)
{
__shared__ unsigned long long sharedCount;
if (threadIdx.x==0) sharedCount=0;
} |
14,655 | #include <algorithm>
#include <iterator>
#include <iostream>
#include <fstream>
#include <vector>
#include <chrono>
#include <cuda_runtime.h>
using namespace std;
using namespace std::chrono;
#define ll long long
const int BLOCK_DIM = 8;
// Prints first 20 rows of the matrix
__host__ void print_data(unsigned ll* matrix, const int numrows, const int numcols)
{
for (int i = 0; i < numrows; ++i) {
for (int j = 0; j < numcols; ++j) {
std::cout << matrix[i * numcols + j] << ' ';
}
std::cout << std::endl;
}
}
__global__ void transposeCoalesced(const unsigned ll* A, unsigned ll* AT, const int numrows, const int numcols)
{
__shared__ unsigned ll tile[BLOCK_DIM][BLOCK_DIM + 1]; // add plus one to avoid bank conflict
int j = blockIdx.x * BLOCK_DIM + threadIdx.x;
int i = blockIdx.y * BLOCK_DIM + threadIdx.y;
if(i <= numrows && j <= numcols) {
tile[threadIdx.y][threadIdx.x]=A[i*numcols+j];
__syncthreads();
////block (by,bx) in AT
int tj=blockIdx.y*blockDim.x+threadIdx.x; ////x for column
int ti=blockIdx.x*blockDim.y+threadIdx.y; ////y for row
AT[ti*numrows+tj] = tile[threadIdx.x][threadIdx.y];
}
}
__host__ void Test_Transpose()
{
// Initialize data matrix
const int numcols = 8;
const int numrows = 16;
unsigned ll* data_host = new unsigned ll[numrows * numcols];
for (unsigned int i = 0; i < numrows; i++){
for (unsigned int j = 0; j < numcols; j++) {
// simply make value equal to 1D index + 1
data_host[i * numcols + j] = i * numcols + j + 1;
}
}
// Test Transpose Function:
printf("Data before transpose\n");
print_data(data_host, numrows, numcols);
// **** CPU TRANSPOSE **** //
unsigned ll *transpose_host = new unsigned ll[numcols * numrows];
for (int i = 0; i < numrows; i++) {
for (int j = 0; j < numcols; j++) {
transpose_host[j * numrows + i] = data_host[i * numcols + j];
}
}
printf("\nData after CPU transpose\n");
print_data(transpose_host, numcols, numrows);
// **** GPU TRANSPOSE **** //
memset(transpose_host, 0x00, numrows * numcols * sizeof(unsigned ll));
unsigned ll* data_gpu;
cudaMalloc((void**)&data_gpu, numrows * numcols * sizeof(unsigned ll));
cudaMemcpy(data_gpu, data_host, numrows * numcols * sizeof(unsigned ll), cudaMemcpyHostToDevice);
unsigned ll *transpose_gpu;
cudaMalloc((void**)&transpose_gpu, numrows * numcols * sizeof(unsigned ll));
const int block_size = BLOCK_DIM;
const int block_num_x = numcols / block_size; // will always be 1 since numcols = 1
const int block_num_y= ceil((double) numrows / (double) block_size);
transposeCoalesced<<<dim3(block_num_x,block_num_y),dim3(block_size,block_size)>>>
(data_gpu, transpose_gpu, numrows, numcols);
cudaMemcpy(transpose_host, transpose_gpu, numrows * numcols * sizeof(unsigned ll), cudaMemcpyDeviceToHost);
printf("\nData after GPU transpose\n");
print_data(transpose_host, numcols, numrows);
}
int main()
{
Test_Transpose();
return 0;
}
|
14,656 | #include <stdio.h>
#include <cuda_runtime_api.h>
#include <time.h>
__device__ int is_a_match(char *attempt) {
char password1[] = "DV78";
char password2[] = "ER87";
char password3[] = "GS58";
char password4[] = "TA88";
char *d = attempt;
char *i = attempt;
char *p = attempt;
char *s = attempt;
char *psw1 = password1;
char *psw2 = password2;
char *psw3 = password3;
char *psw4 = password4;
while(*d == *psw1) {
if(*d == '\0')
{
printf("Found password: %s\n",password1);
break;
}
d++;
psw1++;
}
while(*i == *psw2) {
if(*i == '\0')
{
printf("Found password: %s\n",password2);
break;
}
i++;
psw2++;
}
while(*p == *psw3) {
if(*p == '\0')
{
printf("Found password: %s\n",password3);
break;
}
p++;
psw3++;
}
while(*s == *psw4) {
if(*s == '\0')
{
printf("Found password: %s\n",password4);
return 1;
}
s++;
psw4++;
}
return 0;
}
/****************************************************************************
The kernel function assume that there will be only one thread and uses
nested loops to generate all possible passwords and test whether they match
the hidden password.
*****************************************************************************/
__global__ void kernel() {
char e,h;
char password[5];
password[4] = '\0';
int i = blockIdx.x+65;
int j = threadIdx.x+65;
char firstValue = i;
char secondValue = j;
password[0] = firstValue;
password[1] = secondValue;
for(e='0'; e<='9'; e++){
for(h='0'; h<='9'; h++){
password[2] = e;
password[3] = h;
if(is_a_match(password)) {
//printf("Success");
}
else {
//printf("tried: %s\n", password);
}
}
}
}
int time_difference(struct timespec *start,
struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
kernel <<<26,26>>>();
cudaThreadSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9));
return 0;
}
|
14,657 | #include <cstdio>
#include <cstdlib>
#include <ctime>
#include <thrust/count.h>
#include <thrust/execution_policy.h>
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
__device__ double calSum(const double *x, const int length, const int step)
{
double sum = 0;
for(int i = 0; i < length; i++) {
sum += x[i*step];
}
return sum;
}
__device__ double calAvg(const double *x, const int length, const int step)
{
return calSum(x, length, step) / length;
}
__device__ double calSquareSum(const double *x, const int length, const int step)
{
double sum = 0;
for(int i = 0; i < length; i++) {
sum += x[i*step] * x[i*step];
}
return sum;
}
__device__ double calMultiplySum(const double *x, const double *y, const int length, const int step)
{
double sum = 0;
for(int i = 0; i < length; i++) {
sum += x[i*step] * y[i*step];
}
return sum;
}
__device__ double calStd(const double *x, const int length, const int step)
{
const double x_square_sum = calSquareSum(x, length, step);
const double x_avg = calAvg(x, length, step);
return sqrt((x_square_sum - length * x_avg * x_avg) / (length - 1));
}
__device__ double calCorrCoef(const double *x, const double *y, const int length, const int step)
{
const double xy_sum = calMultiplySum(x, y, length, step);
const double x_avg = calAvg(x, length, step);
const double y_avg = calAvg(y, length, step);
const double x_std = calStd(x, length, step);
const double y_std = calStd(y, length, step);
return (xy_sum - length * x_avg * y_avg) / ((length - 1) * x_std * y_std);
}
__device__ double calFisherTransform(const double x, const int time_size)
{
// z=0.5.*log((1+rr)./(1-rr));
return 0.5 * log((1+x) / (1-x));
}
__device__ double calInverseFisherTransform(const double x)
{
// zm= (exp(2.*zm)-1)./(exp(2.*zm)+1);
return (exp(2*x) - 1) / (exp(2*x) + 1);
}
__global__ void calculateCorrelationCoefficientMatrix(double *all_corr_coef_matrix, const double *all_data_matrix, const int subject_size, const int time_size, const int repeat_times)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int matrix_works = subject_size * (subject_size - 1) / 2;
if (idx >= repeat_times * matrix_works)
return;
const int n_matrix = idx / matrix_works;
int remain_works = idx % matrix_works;
int x = 1, y = 1;
for(int i = 0; i < subject_size; i ++) {
const int row_works = subject_size - i - 1;
if (remain_works < row_works) {
x = i;
y = i + 1 + remain_works;
break;
}
remain_works -= row_works;
}
const double *data_matrix = all_data_matrix + n_matrix * time_size * subject_size;
const double coef = calCorrCoef(data_matrix + x, data_matrix + y, time_size, subject_size);
const double zvalue = calFisherTransform(coef, time_size);
all_corr_coef_matrix[idx] = zvalue;
}
__global__ void calculateInterSubjectCorrelation(double *isc_array, const double *all_corr_coef_matrix, const int subject_size, const int repeat_times)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= repeat_times)
return;
const int matrix_works = subject_size * (subject_size - 1) / 2;
const double *corr_coef_matrix = all_corr_coef_matrix + idx * matrix_works;
double sum = 0;
for (int i = 0; i < matrix_works; i++)
sum += corr_coef_matrix[i];
const double mean = sum / matrix_works;
isc_array[idx] = calInverseFisherTransform(mean);
}
__global__ void rearrangeMatrixPosition(double *data_matrix, const double *source_matrix, const int subject_size, const int time_size, const int repeat_times)
{
// 1st subject_size, 2nd repeat_times, 3rd time_size
// to
// 1st repeat_times, 2nd time_size, 3rd subject_size
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= repeat_times * time_size * subject_size)
return;
const int subject_idx = idx / (repeat_times * time_size);
const int repeat_idx = (idx % (repeat_times * time_size)) / time_size;
const int time_idx = (idx % (repeat_times * time_size)) % time_size;
const int data_idx = repeat_idx * time_size * subject_size + time_idx * subject_size + subject_idx;
data_matrix[data_idx] = source_matrix[idx];
}
void printMatrix(const double *data, const int first, const int second, const int third)
{
double *tmp = (double *)malloc(sizeof(double) * first * second * third);
cudaMemcpy(tmp, data, sizeof(double) * first * second * third, cudaMemcpyDeviceToHost);
printf("%% 1st:%d 2nd:%d 3rd:%d\n", first, second, third);
for (int i = 0; i < first; i++ ) {
for (int j = 0; j < second; j++ ) {
printf("%% ");
for (int k = 0; k < third; k++ ) {
printf("%f ", tmp[i*second*third + j*third + k]);
}
printf("\n");
}
printf("%%\n");
}
free(tmp);
}
void correlationCoefficient(double *d_isc_array, const double *d_aaft_matrix, const int subject_size, const int time_size, const int repeat_times)
{
// std::clock_t start;
const int blocksize = 128;
int total_works, nblock;
double *d_data_matrix, *d_coef_matrix;
cudaMalloc(&d_data_matrix, sizeof(double) * repeat_times * time_size * subject_size);
cudaMalloc(&d_coef_matrix, sizeof(double) * repeat_times * subject_size * (subject_size - 1) / 2);
cudaCheckErrors("cudaMalloc");
// start = std::clock();
total_works = repeat_times * subject_size * time_size;
nblock = total_works/blocksize + (total_works%blocksize==0?0:1);
rearrangeMatrixPosition<<<nblock, blocksize>>>(d_data_matrix, d_aaft_matrix, subject_size, time_size, repeat_times);
cudaDeviceSynchronize();
cudaCheckErrors("rearrangeMatrixPosition");
// printf("%% transposeMatrix: %fs\n", (std::clock() - start) / (double) CLOCKS_PER_SEC);
// start = std::clock();
total_works = repeat_times * subject_size * (subject_size - 1) / 2;
nblock = total_works/blocksize + (total_works%blocksize==0?0:1);
calculateCorrelationCoefficientMatrix<<<nblock, blocksize>>>(d_coef_matrix, d_data_matrix, subject_size, time_size, repeat_times);
cudaDeviceSynchronize();
cudaCheckErrors("calculateCorrelationCoefficientMatrix");
// printf("%% calculateCorrelationCoefficientMatrix: %fs\n", (std::clock() - start) / (double) CLOCKS_PER_SEC);
// start = std::clock();
nblock = repeat_times/blocksize + (repeat_times%blocksize==0?0:1);
calculateInterSubjectCorrelation<<<nblock, blocksize>>>(d_isc_array, d_coef_matrix, subject_size, repeat_times);
cudaDeviceSynchronize();
cudaCheckErrors("calculateInterSubjectCorrelation");
// printf("%% calculateInterSubjectCorrelation: %fs\n", (std::clock() - start) / (double) CLOCKS_PER_SEC);
// {
// printMatrix(d_aaft_matrix, subject_size, repeat_times, time_size);
// printMatrix(d_data_matrix, repeat_times, time_size, subject_size);
// double *h_data_matrix, *h_coef_matrix, *h_isc_array;
// h_data_matrix = (double *)malloc(sizeof(double) * repeat_times * subject_size * time_size);
// h_coef_matrix = (double *)malloc(sizeof(double) * total_works);
// h_isc_array = (double *)malloc(sizeof(double) * repeat_times);
// cudaMemcpy(h_data_matrix, d_data_matrix, sizeof(double) * repeat_times * subject_size * time_size, cudaMemcpyDeviceToHost);
// cudaMemcpy(h_coef_matrix, d_coef_matrix, sizeof(double) * total_works, cudaMemcpyDeviceToHost);
// cudaMemcpy(h_isc_array, d_isc_array, sizeof(double) * repeat_times, cudaMemcpyDeviceToHost);
// const int idx = rand() % repeat_times;
// printf("%% idx: %d\n", idx);
// printf("data = [ ");
// for(int i = 0; i < time_size; i++) {
// for(int j = 0; j < subject_size; j++) {
// printf("%f ", h_data_matrix[idx * time_size * subject_size + i * subject_size + j]);
// }
// printf(";");
// }
// printf("];\n");
// printf("tmp=tril(corrcoef(data),-1);\n");
// printf("rr=tmp(find(tmp));\n");
// printf("z=0.5.*log((1+rr)./(1-rr))./(1/sqrt(size(data,1)/2.34-3));\n");
// printf("zm=mean(z)\n");
// printf("z'\n");
// printf("exit;\n");
// printf("%% coef_matrix: ");
// const int matrix_works = subject_size * (subject_size - 1) / 2;
// for (int i = 0; i < matrix_works; i++ )
// printf("%f ", h_coef_matrix[idx * matrix_works + i]);
// printf("\n");
// printf("%% --------------------\n");
// printf("%% mean: %f\n", h_isc_array[idx]);
// }
cudaFree(d_data_matrix);
cudaFree(d_coef_matrix);
}
// int main(int argc, char **argv)
// {
// srand(time(NULL));
// std::clock_t start;
//
// const int subject_size = 8, time_size = 440, repeat_times = 10000;
//
// double *h_data_matrix;
// double *d_data_matrix, *d_isc_array;
//
// h_data_matrix = (double *)malloc(sizeof(double) * repeat_times * time_size * subject_size);
//
// start = std::clock();
// for(int i = 0; i < repeat_times; i++)
// for(int j = 0; j < time_size; j++)
// for(int k = 0; k < subject_size; k++)
// h_data_matrix[i * time_size * subject_size + j * subject_size + k] = rand();
// printf("%% Generating data: %fs\n", (std::clock() - start) / (double) CLOCKS_PER_SEC);
//
// cudaMalloc(&d_data_matrix, sizeof(double) * repeat_times * time_size * subject_size);
// cudaMalloc(&d_isc_array, sizeof(double) * repeat_times);
// cudaCheckErrors("cudaMalloc");
//
// cudaMemcpy(d_data_matrix, h_data_matrix, sizeof(double) * repeat_times * subject_size * time_size, cudaMemcpyHostToDevice);
// cudaCheckErrors("cudaMemcpy");
//
// start = std::clock();
// correlationCoefficient(d_isc_array, d_data_matrix, subject_size, time_size, repeat_times);
// printf("%% correlationCoefficient: %fs\n", (std::clock() - start) / (double) CLOCKS_PER_SEC);
//
// free(h_data_matrix);
// cudaFree(d_data_matrix);
// cudaFree(d_isc_array);
// cudaCheckErrors("cudaFree");
//
// return 0;
// }
|
14,658 | #include <cuda.h>
#include <stdio.h>
__global__ void K() {
printf("in K\n");
}
int main() {
K<<<1, 1>>>();
cudaDeviceSynchronize();
return 0;
}
|
14,659 | //#include "GpGpu/GpGpu_ParamCorrelation.cuh"
//#include "GpGpu/GpGpu_TextureTools.cuh"
//#include "GpGpu/GpGpu_TextureCorrelation.cuh"
//#include "GpGpu/SData2Correl.h"
|
14,660 | //
// Il est demandéd'expliquer l'ensemble des instructions
// prcdes par un TODO: ?
//
#include<iostream>
__global__ void addKernel(int *c, const int *a, const int *b);
int main(int argc, char** argv)
{
const int size = 5;
// TODO: ?
const int h_a[size] = { 1, 2, 3, 4, 5 };
const int h_b[size] = { 10, 20, 30, 40, 50 };
int h_c[size] = { 0 };
// TODO: ?
cudaSetDevice(0);
// TODO: ?
int *d_a = 0;
int *d_b = 0;
int *d_c = 0;
// TODO: ?
cudaMalloc((void**)&d_a, size * sizeof(int));
cudaMalloc((void**)&d_b, size * sizeof(int));
cudaMalloc((void**)&d_c, size * sizeof(int));
// TODO: ?
cudaMemcpy(d_a, h_a, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, size * sizeof(int), cudaMemcpyHostToDevice);
// TODO: ?
dim3 grdDim = dim3(1,1,1);
dim3 blkDim = dim3(size,1,1);
// TODO: ?
addKernel <<<grdDim, blkDim >>>(d_c, d_a, d_b);
// TODO: ?
cudaDeviceSynchronize();
// TODO: ?
cudaMemcpy(h_c, d_c, size * sizeof(int), cudaMemcpyDeviceToHost);
// TODO: ?
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Impression des rsultats
std::cout << "{1,2,3,4,5} + {10,20,30,40,50} = {" << h_c[0] << "," << h_c[1] << "," << h_c[2] << "," << h_c[3] << "," << h_c[4] << "}" << std::endl;
return 0;
}
// TODO: ?
__global__ void addKernel(int *c, const int *a, const int *b)
{
// TODO: ?
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
|
14,661 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <math.h>
//134217728
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
__global__ void ecuacion_kernel_outplace_p1(double *d_matA,double *d_matAT,double *d_matB,double *d_matBT, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
//transpuesta out-place A y B
if( (distA<n*n) && (distB<n*n) ){
d_matAT [distB*n + distA] = d_matA[distA*n + distB];
d_matBT [distB*n + distA] = d_matB[distA*n + distB];
}
}
__global__ void ecuacion_kernel_outplace_p2(double *d_matA,double *d_matB,double *d_matC,double *d_matAT,double *d_matBT, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
int k;
if (distA*n+distB <= (n*n - 1)){
//multiplicacion
for(k = 0; k < n ;k++){
d_matC[distA*n+distB] += d_matA[distA*n+k] * d_matBT[distB+k*n];
}
//suma
d_matC[distA*n+distB] += d_matB[distA*n+distB] + d_matAT[distA*n+distB];
}
}
__global__ void ecuacion_kernel_inplace_suma (double *d_matA,double *d_matB,double *d_matC, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
int k;
//multiplicacion
if (distA*n+distB < (n*n - 1)){
d_matC[distA*n+distB] += d_matB[distA*n+distB] + d_matA[distA+distB*n];
for(k = 0; k < n ;k++){
d_matC[distA*n+distB] += d_matA[distA*n+k] * d_matB[distB*n+k];
}
}
}
__global__ void kernel_sum_Matriz (double *d_matA,double *d_matB,double *d_matC, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
//suma
if (distA*n+distB < (n*n)){
d_matC[distA*n+distB] += d_matA[distA*n+distB] + d_matB[distA+distB*n];
}
}
__global__ void kernel_transpuesta(double *m, int N){
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int i = int((1 + sqrtf(1 + 8*tid)) / 2);
int j = tid - (i*(i-1)/2); int aux;
if ( (i<N) && (j<N) ){
aux = m[i*N + j] ;
m[i*N + j] = m[j*N + i];
m[j*N + i] = aux;
}
}
__global__ void kernel_mult_sum_matriz (double *d_matA,double *d_matB,double *d_matC, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
int k;
//multiplicacion
if (distA*n+distB < (n*n)){
for(k = 0; k < n ;k++){
d_matC[distA*n+distB] += d_matA[distA*n+k] * d_matB[distB*n+k];
}
}
}
int main(int argc, char *argv[]){
if (argc != 3){
printf("Falta argumento: N, CUDABLK\n");
return 0;
}
//declaracion de variables
cudaError_t error;
unsigned int N = atoi (argv[1]);
unsigned long CUDA_BLK = atoi (argv[2]), gridBlock;
unsigned long numBytes = sizeof(double)*N*N;
double *matA,*matB,*matC,*d_matA,*d_matB,*d_matC,*d_matAT,*d_matBT,timetick;
unsigned int i,j,k;
//inicializa variables para cpu
matA = (double *)malloc(numBytes);
matB = (double *)malloc(numBytes);
matC = (double *)malloc(numBytes);
for (i = 0; i < N*N; i++){
matA[i] = i;
matB[i] = i;
matC[i] = 0;
}
//inicializa variables para gpu
cudaMalloc((void **) &d_matA, numBytes);
cudaMalloc((void **) &d_matAT, numBytes);
cudaMalloc((void **) &d_matB, numBytes);
cudaMalloc((void **) &d_matBT, numBytes);
cudaMalloc((void **) &d_matC, numBytes);
gridBlock = (unsigned int)sqrt(N*N/CUDA_BLK/CUDA_BLK);
dim3 dimBlock(CUDA_BLK,CUDA_BLK); // Bloque bidimencional de hilos (*cb* hilos)
dim3 dimGrid(gridBlock,gridBlock); // Grid bidimencional (*ceil(n/cb)* bloques)
//--------------------------------cpu comienza ------------------------------------
//secuencial
timetick = dwalltime();
//multiplicacion
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
for(k = 0; k < N ;k++){
matC[i*N+j] += matA[i*N+k] * matB[j*N+k]; //multiplica a matB por fila, eso simula la matB transpuesta
}
}
}
//suma
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
matC[i*N+j] += matB[i*N+j] + matA[i+j*N];
}
}
printf("Tiempo para la ecuacion CPU: %f\n\n",dwalltime() - timetick);
/*
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
printf("%f|",matC[i*N+j]);
}
printf("\n");
}
printf("\n");
*/
//--------------------------------cpu termina ------------------------------------
for (i = 0; i < N*N; i++){
matA[i] = i;
matB[i] = i;
matC[i] = 0;
}
cudaMemcpy(d_matA, matA, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
cudaMemcpy(d_matB, matB, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
cudaMemcpy(d_matC, matC, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
//--------------------------------gpu out-place comienza ------------------------------------
timetick = dwalltime();
ecuacion_kernel_outplace_p1<<<dimGrid, dimBlock>>>(d_matA, d_matAT,d_matB,d_matBT, N);
cudaThreadSynchronize();
ecuacion_kernel_outplace_p2<<<dimGrid, dimBlock>>>(d_matA, d_matB,d_matC,d_matAT,d_matBT, N);
cudaThreadSynchronize();
printf("Tiempo para la ecuacion out-place GPU: %f\n",dwalltime() - timetick);
error = cudaGetLastError();
printf("error: %d\n\n",error);
cudaMemcpy(matC, d_matC, numBytes, cudaMemcpyDeviceToHost); // GPU -> CPU
/*
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
printf("%f|",matC[i*N+j]);
}
printf("\n");
}
printf("\n");
*/
//--------------------------------gpu out-place termina ------------------------------------
cudaFree(d_matA);
cudaFree(d_matB);
cudaFree(d_matC);
cudaFree(d_matAT);
cudaFree(d_matBT);
for (i = 0; i < N*N; i++){
matA[i] = i;
matB[i] = i;
matC[i] = 0;
}
cudaMalloc((void **) &d_matA, numBytes);
cudaMalloc((void **) &d_matB, numBytes);
cudaMalloc((void **) &d_matC, numBytes);
cudaMemcpy(d_matA, matA, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
cudaMemcpy(d_matB, matB, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
cudaMemcpy(d_matC, matC, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
//--------------------------------gpu in-place comienza ------------------------------------
timetick = dwalltime();
kernel_transpuesta<<<dimGrid, dimBlock>>>(d_matA, N);
cudaThreadSynchronize();
kernel_sum_Matriz<<<dimGrid, dimBlock>>>(d_matA, d_matB,d_matC, N);
cudaThreadSynchronize();
kernel_transpuesta<<<dimGrid, dimBlock>>>(d_matA, N);
cudaThreadSynchronize();
kernel_mult_sum_matriz<<<dimGrid, dimBlock>>>(d_matA, d_matB,d_matC, N);
cudaThreadSynchronize();
printf("Tiempo para la ecuacion in-place GPU: %f\n",dwalltime() - timetick);
error = cudaGetLastError();
printf("error: %d\n\n",error);
cudaMemcpy(matC, d_matC, numBytes, cudaMemcpyDeviceToHost); // GPU -> CPU
//--------------------------------gpu in-place termina ------------------------------------
cudaFree(d_matA);
cudaFree(d_matB);
cudaFree(d_matC);
/*
//imprime la matriz matC
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
printf("%f|",matC[i*N+j]);
}
printf("\n");
}
printf("\n");
*/
free(matA);
free(matB);
free(matC);
return 0;
}
|
14,662 | /*
* UnityTest.c
*
* Created on: 04/08/2018
* Author: roussian
*/
#include <stdio.h>
#include <stdlib.h>
#include "UnityTest.cuh"
__device__ float checkMinHeapProperty(documentTopkList heap, float newScore, int docCurrent,
int topk){
int index = (topk - 1) - heap.padding - threadIdx.x;
int parent = index;
float result=0.0; //0 - Min heap is correct 1 - Min heap is not correct
for (; index > 0; index-=blockDim.x ) {
do{
parent = (parent-1)/2;
if(heap.score[index] < heap.score[parent]){
result = heap.score[index];
printf("[HEAP] position - %d | parent's position - %d | newScore - %.2f | docCurrent - %d | padding -%d \n", index, parent, newScore, docCurrent,heap.padding);
break;
}
}while(parent > 0);
if(result != 0.0)
break;
}
if(result == 0.0){
index = threadIdx.x;
int max = (topk - 1) - heap.padding;
for (; index < max; index+=blockDim.x ) {
for (int i=index+1; i < max; i++ ) {
if(heap.id[index] == heap.id[i]){
printf("[HEAP] Duplicated document in heap %d | score %.2f | initial position %d | duplicated position %d | blockId %d | docCurrent %d\n",
heap.id[index], heap.score[index], index, i, blockIdx.x, docCurrent);
result = heap.id[index];
}
}
}
}
return result;
}
__device__ float checkSorting(documentTopkList heap, float *dTopkScoreListGlobal, int *iTopkDocListGlobal, int topk){
int globalIndex = blockIdx.x * topk + heap.padding;
int result=0;
int index,maxIndex;
if(THREAD_MASTER){
if(heap.id[0] != iTopkDocListGlobal[globalIndex]){
printf("[SORTING] First doc is not correct in topk: global index %d | blockIdx.x %d | doc %d | global doc %d !\n",
blockIdx.x * topk, blockIdx.x,heap.id[0],iTopkDocListGlobal[globalIndex]);
result = 1;
return result;
}
}
int gindex = blockIdx.x * topk + threadIdx.x;
maxIndex = blockIdx.x * topk + heap.padding;
for (int i = gindex; i < maxIndex; i+=blockDim.x) {
if(iTopkDocListGlobal[i] != -1 ){
printf("[SORTING] Error in padding: blockId %d | index %d | document in position %d\n", blockIdx.x, i,iTopkDocListGlobal[i]);
result = iTopkDocListGlobal[i];
return result;
}
}
gindex = blockIdx.x * topk + heap.padding + threadIdx.x;
maxIndex = (blockIdx.x+1) * topk;
for (int i = gindex; i < maxIndex; i+=blockDim.x) {
if(iTopkDocListGlobal[i] == -1 ){
printf("[SORTING] Docs is empty: blockId %d | index %d | document in position %d\n", blockIdx.x, i,iTopkDocListGlobal[i]);
result = iTopkDocListGlobal[i];
return result;
}
}
maxIndex = topk * (blockIdx.x + 1);
index = topk * blockIdx.x + heap.padding + threadIdx.x;
for (; index < maxIndex; index+=blockDim.x ) {
for (int i=index+1; i < maxIndex; i++ ) {
if(iTopkDocListGlobal[index] == iTopkDocListGlobal[i]){
printf("[SORTING] Duplicate document in global topk list %d - score %.2f \n", iTopkDocListGlobal[index], dTopkScoreListGlobal[index]);
result = iTopkDocListGlobal[index];
return result;
}
}
}
index=threadIdx.x;
globalIndex = blockIdx.x * topk;
maxIndex = topk * (blockIdx.x + 1);
for (; index < topk-heap.padding; index += blockDim.x) {
int count = 0;
for (int i = globalIndex; i < maxIndex; ++i) {
if(heap.id[index] == iTopkDocListGlobal[i]){
count++;
if(heap.score[index] != dTopkScoreListGlobal[i]){
printf("[SORTING] document's score is wrong! doc %d | score %.2f | local index %d | global index %d\n",
heap.id[index], heap.score[index], index, i);
result = heap.id[index];
return result;
}
break;
}
}
if(count == 0){
printf("[SORTING] Document disappeared: doc %d | score %.2f | local index %d\n",
heap.id[index], heap.score[index], index);
result = heap.id[index];
return result;
}
}
index = blockIdx.x * topk + heap.padding + threadIdx.x;
maxIndex = topk * (blockIdx.x + 1);
for (; index < maxIndex; index+=blockDim.x ) {
for (int i=index+1; i < maxIndex; i++ ) {
if(dTopkScoreListGlobal[index] > dTopkScoreListGlobal[i]){
printf("[SORTING] BlockIdx %d | Documents are not sorting!!! doc %d (%.2f) is greater than doc %d (%.2f)\n",
blockIdx.x,iTopkDocListGlobal[index],dTopkScoreListGlobal[index],iTopkDocListGlobal[i],dTopkScoreListGlobal[i]);
result = iTopkDocListGlobal[index] ;
return result;
}
}
}
return result;
}
__device__ float checkMerge_Sorting_Documents(documentTopkList sortingList,int iSkipMerges,
int iSkipBlocks, int topk){
float result = 0.0;
float score;
for (int i = threadIdx.x; i < topk; i+=blockDim.x) {
int doc = sortingList.id[i];
if((doc != -1 && sortingList.score[i] == 0.0) || (doc == -1 && sortingList.score[i] != 0.0) )
printf("[MERGE] BlockId %d | SkipBlocks %d | SkipMerges %d | Document is inconsistent: doc %d (%.2f - %d)",iSkipBlocks,
blockIdx.x, iSkipMerges, doc, sortingList.score[i], i);
if(doc != -1)
for (int j = i+1; j < topk; ++j) {
if(sortingList.id[j] != -1 && sortingList.id[j] == doc){
printf("[MERGE] BlockId %d | SkipBlocks %d | SkipMerges %d | Duplicated Document: doc %d (%.2f - %d) - doc %d (%.2f - %d)\n",
blockIdx.x, iSkipBlocks, iSkipMerges, sortingList.id[i], sortingList.score[i], i, sortingList.id[j], sortingList.score[j], j);
result = sortingList.id[i];
return result;
}
}
}
for (int i = threadIdx.x; i < topk; i+=blockDim.x) {
score = sortingList.score[i];
if(score != 0.0)
for (int j = i+1; j < topk; ++j) {
if(sortingList.score[j] != 0.0 && score > sortingList.score[j]){
printf("[MERGE] BlockId %d | SkipBlocks %d | SkipMerges %d | Documents are not sorting!!! doc %d (%.2f - %d) is greater than doc %d (%.2f - %d)\n",
blockIdx.x, iSkipBlocks, iSkipMerges, sortingList.id[i], sortingList.score[i], i, sortingList.id[j], sortingList.score[j], j);
result = sortingList.id[i];
return result;
}
}
}
return result;
}
|
14,663 | #include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <cuda.h>
#include <time.h>
#define THREADSPERBLOCK 64
// graph
struct edge{
int v;
int u;
int weight;
};
struct graph{
int num_edges;
int num_vertices;
struct edge* edges;
};
// bipartite graph
struct b_vertex_a{
int v;
int small_edge; // don't know what this is for
};
struct b_vertex_b{
int e; // edge number
};
struct b_edge{
int v;
int u;
int cv;
int weight;
};
struct b_graph{
int num_vertex_a;
int num_vertex_b;
int num_bipartite_edges;
struct b_vertex_a* vertices_a;
struct b_vertex_b* vertices_b;
struct b_edge* edges;
};
// strut
struct strut_edge{
int v;
int u; // edge number index
int cv; // correspondent vertex
};
struct strut_u_vertex{
int degree; // degree in struts
int v1;
int v2;
int weight;
};
struct strut{
int num_v; // num of bipartite vertices
int num_u; // num of u vertices adjacent to strut edge
int num_strut_edges; // same number as num_v
struct strut_edge* edges;
struct strut_u_vertex* vertices_u; // u vertices - 0 value indicates not in strut, value > 0 indicates how many strut edges it is connected to
};
void get_graph(struct graph* og_graph, char* input);
__global__ void get_bipartite_graph(int num_edges, int num_vertices, struct edge* graphEdges, struct b_vertex_a* vetices_a, struct b_vertex_b* vetices_b, struct b_edge* bg_graphEdges) ;
__global__ void get_smallest_edges(int bp_num_edges, int num_smallest_edges, struct b_edge* bg_graphEdges, int* smallest_weights, int* smallest_edges);
__global__ void mst_edges_init(int og_num_edges, bool *mst_edges);
__global__ void get_mst_edges(int num_smallest_edges, int* smallest_edges, struct b_edge* bg_graphEdges, bool *mst_edges);
__global__ void get_num_mst(int og_num_edges, bool *mst_edges, int* num_mst);
// strut stuff
__global__ void get_strut_edges(int bg_num_vertices, int* smallest_edges, struct b_edge* bg_graphEdges, strut_edge* strut_edges);
__global__ void strut_u_init(int bg_num_vertex_b, struct strut_u_vertex* vertices_u);
__global__ void get_strut_u_degree(int num_strut_edges, strut_edge* strut_edges, struct strut_u_vertex* vertices_u);
__global__ void get_strut_u_vertices(int bg_num_edges, struct b_edge* bg_graphEdges, struct strut_u_vertex* vertices_u);
__global__ void get_zero_diff_num(int bg_num_vertex_b, struct strut_u_vertex* vertices_u, int* zero_diff_edges);
__global__ void super_vertices_init(int num_strut_vertices, int* super_vertices);
__global__ void get_new_bg_vertex_b(int num_bg_vertexb, int* super_vertices, struct strut_u_vertex* vertices_u, int* new_vertex_b, int* num_newbg_vertexb);
__global__ void prefixCopy(int prefixNum, int* old_prefix, int *new_prefix);
__global__ void getPrefixSum(int* entries, int* entriesC, int d);
__global__ void get_super_vertices(int num_strut_vertices, strut_edge* strut_edges, struct strut_u_vertex* vertices_u, int* super_vertices);
__global__ void get_new_bg_edges(int num_bg_vertex_b, int* new_bg_edges, int* prefixSum, struct strut_u_vertex* vertices_u, int* super_vertices, struct b_edge* bg_graphEdges, int * max_super_vertex);
__global__ void init_smallest_edges_weights(int num_edges, int *smallest_weights, int* smallest_edges);
/* NOTES:
- Remember to free all malloced and cuda malloced variables
- Comment out debugging statements
- Output to file
- find sequential algorithm that outputs result in same way
- compare results with that
- Time the algorithm - put in output
- documentation
- read piazza and term project info for documentation
- prep for presentation
- submit on github - ask chonyang and email garg by thursday morning
*/
// driver
int main(int argc, char** argv){
if(argc != 3){
printf("mst: incorrect formatting\n");
printf("Valid input: mst.out <Input file name> <Output file name>\n");
return 0;
}
//***** ACQUIRE INPUT GRAPH *****//
struct graph og_graph; // input
get_graph(&og_graph, argv[1]);
//debugging
// printf("Graph:\n");
// printf("vertices:%d edges:%d\n",og_graph.num_vertices, og_graph.num_edges);
// for(int i = 0; i < og_graph.num_edges; i++){
// printf("index:%d - %d %d %d\n", i, og_graph.edges[i].v, og_graph.edges[i].u, og_graph.edges[i].weight);
// }
//***** CREATE BIPARTITE GRAPH *****//
struct b_graph bg_graph;
bg_graph.num_vertex_a = og_graph.num_vertices;
bg_graph.num_vertex_b = og_graph.num_edges;
bg_graph.num_bipartite_edges = og_graph.num_edges * 2;
// allocate GPU array
cudaMalloc((void**) &(bg_graph.vertices_a), bg_graph.num_vertex_a * sizeof(struct b_vertex_a));
cudaMalloc((void**) &(bg_graph.vertices_b), bg_graph.num_vertex_b * sizeof(struct b_vertex_b));
cudaMalloc((void**) &(bg_graph.edges), bg_graph.num_bipartite_edges * sizeof(struct b_edge));
struct edge* d_og_edges = NULL;
cudaMalloc((void**) &(d_og_edges), og_graph.num_edges * sizeof(struct edge));
cudaMemcpy(d_og_edges, og_graph.edges, og_graph.num_edges*sizeof(struct edge), cudaMemcpyHostToDevice);
get_bipartite_graph<<<(og_graph.num_edges + THREADSPERBLOCK-1)/THREADSPERBLOCK, THREADSPERBLOCK>>>(og_graph.num_edges, og_graph.num_vertices, d_og_edges, bg_graph.vertices_a, bg_graph.vertices_b, bg_graph.edges);
// debugging
struct b_graph debugging;
debugging.num_vertex_a = og_graph.num_vertices;
debugging.num_vertex_b = og_graph.num_edges;
debugging.num_bipartite_edges = og_graph.num_edges * 2;
debugging.vertices_a = (struct b_vertex_a*) malloc(debugging.num_vertex_a * sizeof(struct b_vertex_a));
debugging.vertices_b = (struct b_vertex_b*) malloc(debugging.num_vertex_b * sizeof(struct b_vertex_b));
debugging.edges = (struct b_edge*) malloc(debugging.num_bipartite_edges * sizeof(struct b_edge));
cudaMemcpy(debugging.vertices_a, bg_graph.vertices_a, debugging.num_vertex_a * sizeof(struct b_vertex_a), cudaMemcpyDeviceToHost);
cudaMemcpy(debugging.vertices_b, bg_graph.vertices_b, debugging.num_vertex_b * sizeof(struct b_vertex_b), cudaMemcpyDeviceToHost);
cudaMemcpy(debugging.edges, bg_graph.edges, debugging.num_bipartite_edges * sizeof(struct b_edge), cudaMemcpyDeviceToHost);
// printf("Bipartite Graph:\n");
// printf("verticesA: %d, verticesB: %d, edges: %d\n", debugging.num_vertex_a, debugging.num_vertex_b, debugging.num_bipartite_edges);
// for(int i = 0; i < debugging.num_bipartite_edges; i++){
// printf("index: %d - %d %d %d %d\n", i, debugging.edges[i].v, debugging.edges[i].u, debugging.edges[i].cv, debugging.edges[i].weight);
// }
free(debugging.vertices_a);
free(debugging.vertices_b);
free(debugging.edges);
//***** SMALLEST EDGE WEIGHT EDGE FOR EACH VERTEX IN BG_GRAPH *****//
int* smallest_weights = NULL;
int* smallest_edges = NULL;
//***** GET SOLUTION *****//
bool* d_mst_edges = NULL;
bool* mst_edges = NULL;
mst_edges = (bool*) malloc(og_graph.num_edges * sizeof(bool));
// don't malloc again for this variable
cudaMalloc((void**) &(d_mst_edges), og_graph.num_edges* sizeof(bool));
mst_edges_init<<<(og_graph.num_edges + THREADSPERBLOCK-1)/THREADSPERBLOCK, THREADSPERBLOCK>>>(og_graph.num_edges, d_mst_edges);
int * solution_size = (int*) malloc (sizeof(int));
*solution_size = 0;
int* d_solutionSize = NULL;
cudaMalloc((void**) &(d_solutionSize),sizeof(int));
cudaMemcpy(d_solutionSize, solution_size, sizeof(int), cudaMemcpyHostToDevice);
int* max_super_vertex = (int*) malloc (sizeof(int));
*max_super_vertex = bg_graph.num_vertex_a;
while(*solution_size < (og_graph.num_vertices - 1)){
// cudaMalloc((void**) &(smallest_weights), bg_graph.num_vertex_a * sizeof(int));
// cudaMalloc((void**) &(smallest_edges), bg_graph.num_vertex_a * sizeof(int));
//get_smallest_edges<<<(bg_graph.num_bipartite_edges + THREADSPERBLOCK-1)/THREADSPERBLOCK, THREADSPERBLOCK>>>(bg_graph.num_bipartite_edges, bg_graph.num_vertex_a, bg_graph.edges, smallest_weights, smallest_edges);
cudaMalloc((void**) &(smallest_weights), *max_super_vertex * sizeof(int));
cudaMalloc((void**) &(smallest_edges), *max_super_vertex * sizeof(int));
init_smallest_edges_weights<<<(*max_super_vertex + THREADSPERBLOCK-1)/THREADSPERBLOCK, THREADSPERBLOCK>>>(*max_super_vertex, smallest_weights, smallest_edges);
get_smallest_edges<<<(bg_graph.num_bipartite_edges + THREADSPERBLOCK-1)/THREADSPERBLOCK, THREADSPERBLOCK>>>(bg_graph.num_bipartite_edges,*max_super_vertex, bg_graph.edges, smallest_weights, smallest_edges);
// debugging
int* debug_smallest_weights = NULL;
debug_smallest_weights = (int*) malloc(*max_super_vertex * sizeof(int));
cudaMemcpy(debug_smallest_weights, smallest_weights, *max_super_vertex * sizeof(int), cudaMemcpyDeviceToHost);
// for(int i = 0; i < *max_super_vertex; i++){
// printf("bg index of smallest weight: %d\n", debug_smallest_weights[i]);
// }
int* debug_smallest_edges = NULL;
debug_smallest_edges = (int*) malloc(*max_super_vertex * sizeof(int));
cudaMemcpy(debug_smallest_edges, smallest_edges, *max_super_vertex * sizeof(int), cudaMemcpyDeviceToHost);
// for(int i = 0; i < *max_super_vertex; i++){
// printf("bg index of smallest edge: %d\n", debug_smallest_edges[i]);
// }
get_mst_edges<<<(*max_super_vertex + THREADSPERBLOCK-1)/THREADSPERBLOCK, THREADSPERBLOCK>>>(*max_super_vertex , smallest_edges, bg_graph.edges, d_mst_edges);
get_num_mst<<<(og_graph.num_edges + THREADSPERBLOCK-1)/THREADSPERBLOCK, THREADSPERBLOCK>>>(og_graph.num_edges , d_mst_edges, d_solutionSize);
// debugging
// printf("MST:\n");
cudaMemcpy(mst_edges, d_mst_edges, og_graph.num_edges * sizeof(bool), cudaMemcpyDeviceToHost);
// for(int i = 0; i < og_graph.num_edges; i++){
// if(mst_edges[i] == true){
// //printf("mst edges index: %d\n", i);
// printf("index: %d - %d %d %d\n", i, og_graph.edges[i].v, og_graph.edges[i].u, og_graph.edges[i].weight);
// }
// }
cudaMemcpy(solution_size, d_solutionSize, sizeof(int), cudaMemcpyDeviceToHost);
// printf("Num MST edges found: %d\n",*solution_size);
if(*solution_size < (og_graph.num_vertices - 1)){
//***** GET STRUT *****//
struct strut new_strut;
new_strut.num_v = bg_graph.num_vertex_a;
new_strut.num_u = bg_graph.num_vertex_b;
new_strut.num_strut_edges = bg_graph.num_vertex_a;
struct strut_edge* d_strut_edges = NULL;
cudaMalloc((void**) &(d_strut_edges), new_strut.num_v * sizeof(struct strut_edge));
get_strut_edges<<<((bg_graph.num_vertex_a) + THREADSPERBLOCK-1)/THREADSPERBLOCK, THREADSPERBLOCK>>>(bg_graph.num_vertex_a, smallest_edges, bg_graph.edges, d_strut_edges);
// debugging
struct strut_edge* strut_edges = (struct strut_edge* ) malloc(new_strut.num_v * sizeof(struct strut_edge));
cudaMemcpy(strut_edges, d_strut_edges, new_strut.num_v * sizeof(struct strut_edge), cudaMemcpyDeviceToHost);
// printf("STRUT EDGES:\n");
// for(int i = 0; i < new_strut.num_v ; i++){
// printf("%d %d %d\n", strut_edges[i].v,strut_edges[i].u, strut_edges[i].cv);
// }
// getting strut_u
struct strut_u_vertex* d_vertices_u = NULL;
cudaMalloc((void**) &(d_vertices_u), new_strut.num_u * sizeof(struct strut_u_vertex));
strut_u_init<<<((new_strut.num_u) + THREADSPERBLOCK-1)/THREADSPERBLOCK, THREADSPERBLOCK>>>(new_strut.num_u, d_vertices_u);
get_strut_u_degree<<<((new_strut.num_v) + THREADSPERBLOCK-1)/THREADSPERBLOCK, THREADSPERBLOCK>>>(new_strut.num_v, d_strut_edges, d_vertices_u);
get_strut_u_vertices<<<((bg_graph.num_bipartite_edges) + THREADSPERBLOCK-1)/THREADSPERBLOCK, THREADSPERBLOCK>>>(bg_graph.num_bipartite_edges, bg_graph.edges, d_vertices_u);
// debugging
struct strut_u_vertex* vertices_u = (struct strut_u_vertex* ) malloc(new_strut.num_u * sizeof(struct strut_u_vertex));
cudaMemcpy(vertices_u, d_vertices_u, new_strut.num_u * sizeof(struct strut_u_vertex), cudaMemcpyDeviceToHost);
// printf("STRUT U VERTICES DEGREE:\n");
// for(int i = 0; i < new_strut.num_u ; i++){
// printf("index: %d degree: %d v1: %d v2: %d\n",i, vertices_u[i].degree, vertices_u[i].v1, vertices_u[i].v2);
// }
/* ZERO DIFF */
int* d_zero_diff_edges = NULL;
cudaMalloc((void**) &(d_zero_diff_edges), new_strut.num_u * sizeof(int));
get_zero_diff_num<<<((new_strut.num_u) + THREADSPERBLOCK-1)/THREADSPERBLOCK, THREADSPERBLOCK>>>(new_strut.num_u, d_vertices_u, d_zero_diff_edges);
// debugging
int*zero_diff_edges = (int*) malloc (sizeof(int));
cudaMemcpy(zero_diff_edges, d_zero_diff_edges, sizeof(int), cudaMemcpyDeviceToHost);
// printf("zero diff edges: %d\n", *zero_diff_edges);
// /*SUPER VERTEX*/
int* d_super_vertices = NULL;
cudaMalloc((void**) &(d_super_vertices), bg_graph.num_vertex_a* sizeof(int));
super_vertices_init<<<((bg_graph.num_vertex_a) + THREADSPERBLOCK-1)/THREADSPERBLOCK, THREADSPERBLOCK>>>(bg_graph.num_vertex_a, d_super_vertices);
int* super_vertices = (int*) malloc(bg_graph.num_vertex_a* sizeof(int));
cudaMemcpy(super_vertices, d_super_vertices,bg_graph.num_vertex_a* sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < new_strut.num_u ; i++){
int super_vertex;
if(vertices_u[i].degree > 0){ // if incident to strut edge
if(super_vertices[vertices_u[i].v1 - 1] < vertices_u[i].v1){
super_vertex = super_vertices[vertices_u[i].v1 - 1];
super_vertices[vertices_u[i].v1 - 1] = super_vertex;
super_vertices[vertices_u[i].v2 - 1] = super_vertex;
}
else{
super_vertex = vertices_u[i].v1;
super_vertices[vertices_u[i].v1 - 1] = super_vertex;
super_vertices[vertices_u[i].v2 - 1] = super_vertex;
}
}
}
cudaMemcpy(d_super_vertices, super_vertices,bg_graph.num_vertex_a* sizeof(int), cudaMemcpyHostToDevice);
// debugging
// printf("Supervertices\n:");
// for(int i = 0; i < bg_graph.num_vertex_a ; i++){
// printf("vertex: %d supervertex: %d\n", i+1, super_vertices[i]);
// }
/******** CREATING NEW BIPARTITE GRAPH **********/
struct b_graph new_bg_graph;
new_bg_graph.num_vertex_a = *zero_diff_edges;
int* new_num_vertex_b = NULL;
cudaMalloc((void**) &(new_num_vertex_b), sizeof(int));
int* new_vertex_b = NULL;
cudaMalloc((void**) &(new_vertex_b), bg_graph.num_vertex_b * sizeof(int));
get_new_bg_vertex_b<<<((bg_graph.num_vertex_b) + THREADSPERBLOCK-1)/THREADSPERBLOCK, THREADSPERBLOCK>>>(bg_graph.num_vertex_b, d_super_vertices,d_vertices_u, new_vertex_b, new_num_vertex_b);
// num_vertex_b and num_bipartite edges
cudaMemcpy(&new_bg_graph.num_vertex_b, new_num_vertex_b, sizeof(int), cudaMemcpyDeviceToHost);
new_bg_graph.num_bipartite_edges = new_bg_graph.num_vertex_b * 2;
// debugging
int* new_vertex_b_debug = (int*)malloc( bg_graph.num_vertex_b * sizeof(int));
cudaMemcpy(new_vertex_b_debug, new_vertex_b, bg_graph.num_vertex_b * sizeof(int), cudaMemcpyDeviceToHost);
// printf("New Bipartie edges to choose:\n");
// for(int i =0 ; i < bg_graph.num_vertex_b ; i++){
// printf("index: %d value: %d\n", i, new_vertex_b_debug[i]);
// }
// printf("New vertex b num: %d\n", new_bg_graph.num_vertex_b);
int* prefixSum = NULL;
cudaMalloc((void**) &(prefixSum), bg_graph.num_vertex_b * sizeof(int));
prefixCopy<<<((bg_graph.num_vertex_b) + THREADSPERBLOCK-1)/THREADSPERBLOCK, THREADSPERBLOCK>>>(bg_graph.num_vertex_b, new_vertex_b, prefixSum);
//get index of bipartite edges
int* d_prefix_helper = NULL;
cudaMalloc((void**) &(d_prefix_helper), bg_graph.num_vertex_b * sizeof(int));
/* prefix sum belloch scan */
int d = 1;
while(d<bg_graph.num_vertex_b){
getPrefixSum<<<(bg_graph.num_vertex_b + THREADSPERBLOCK-1)/THREADSPERBLOCK, THREADSPERBLOCK>>>(prefixSum, d_prefix_helper, d);
d = 2*d;
}
//debugging
// printf("prefix sum:\n");
// int* vertex_b_print = (int*) malloc( bg_graph.num_vertex_b * sizeof(int));
// cudaMemcpy(vertex_b_print, prefixSum, bg_graph.num_vertex_b * sizeof(int), cudaMemcpyDeviceToHost);
// for(int i = 0; i < bg_graph.num_vertex_b ; i++){
// printf("vertex: %d index: %d\n", i, vertex_b_print[i]);
// }
cudaMalloc((void**) &(new_bg_graph.edges), new_bg_graph.num_bipartite_edges * sizeof(struct b_edge));
int* d_max_super_vertex = NULL;
cudaMalloc((void**) &(d_max_super_vertex),sizeof(int));
get_new_bg_edges<<<(bg_graph.num_vertex_b + THREADSPERBLOCK-1)/THREADSPERBLOCK, THREADSPERBLOCK>>>(bg_graph.num_vertex_b , new_vertex_b, prefixSum, d_vertices_u, d_super_vertices, new_bg_graph.edges, d_max_super_vertex);
cudaMemcpy(max_super_vertex, d_max_super_vertex, sizeof(int), cudaMemcpyDeviceToHost);
// debugging
debugging.num_vertex_a = new_bg_graph.num_vertex_a;
debugging.num_vertex_b = new_bg_graph.num_vertex_b;
debugging.num_bipartite_edges = new_bg_graph.num_bipartite_edges ;
debugging.edges = (struct b_edge*) malloc(new_bg_graph.num_bipartite_edges * sizeof(struct b_edge));
cudaMemcpy(debugging.edges, new_bg_graph.edges, new_bg_graph.num_bipartite_edges * sizeof(struct b_edge), cudaMemcpyDeviceToHost);
// printf("New Bipartite Graph:\n");
// printf("verticesA: %d, verticesB: %d, edges: %d\n", debugging.num_vertex_a, debugging.num_vertex_b, debugging.num_bipartite_edges);
// for(int i = 0; i < debugging.num_bipartite_edges; i++){
// printf("index: %d - %d %d %d %d\n", i, debugging.edges[i].v, debugging.edges[i].u, debugging.edges[i].cv, debugging.edges[i].weight);
// }
bg_graph.num_vertex_a = new_bg_graph.num_vertex_a;
bg_graph.num_vertex_b = new_bg_graph.num_vertex_b;
bg_graph.num_bipartite_edges = new_bg_graph.num_bipartite_edges;
cudaFree(bg_graph.edges);
cudaMalloc((void**) &(bg_graph.edges), bg_graph.num_bipartite_edges * sizeof(struct b_edge));
cudaMemcpy(bg_graph.edges, debugging.edges, bg_graph.num_bipartite_edges * sizeof(struct b_edge), cudaMemcpyHostToDevice);
free(debug_smallest_weights);
free(debug_smallest_edges);
free(strut_edges);
free(vertices_u);
free(zero_diff_edges);
free(super_vertices);
free(new_vertex_b_debug);
cudaFree(d_prefix_helper);
cudaFree(prefixSum);
cudaFree(new_vertex_b);
cudaFree(new_num_vertex_b);
cudaFree(d_max_super_vertex);
cudaFree(d_super_vertices);
cudaFree(d_zero_diff_edges);
cudaFree(d_vertices_u);
cudaFree(new_num_vertex_b);
cudaFree(new_vertex_b);
cudaFree(d_strut_edges);
cudaFree(smallest_weights);
cudaFree(smallest_edges);
}
}
//printf("done with loop\n");
/*end of while loop*/
FILE *file;
file = fopen(argv[argc-1],"w+");
fprintf(file,"Input Graph\nVertices: %d Edges: %d\n", og_graph.num_vertices, og_graph.num_edges);
fprintf(file, "MST Edges:\n");
for(int i = 0; i < og_graph.num_edges; i++){
if(mst_edges[i] == true){
fprintf(file, "index: %d - v: %d u: %d weight: %d\n", i, og_graph.edges[i].v, og_graph.edges[i].u, og_graph.edges[i].weight);
}
}
fclose(file);
// malloc frees
free(max_super_vertex);
free(og_graph.edges);
free(mst_edges);
free(solution_size);
free(max_super_vertex);
// cuda malloc frees
cudaFree(d_solutionSize);
cudaFree(d_mst_edges);
cudaFree(mst_edges);
cudaFree(d_og_edges);
cudaFree(bg_graph.vertices_a);
cudaFree(bg_graph.vertices_b);
cudaFree(bg_graph.edges);
}
void get_graph(struct graph* og_graph, char* input){
FILE *file;
char buff[255];
int num_vertices;
int num_edges;
file = fopen(input , "r");
if(file == NULL){
perror(input);
exit(1);
}
else{
fscanf(file, "%s", buff);
num_vertices = atoi(buff);
fscanf(file, "%s", buff);
num_edges = atoi(buff);
(*og_graph).num_edges = num_edges;
(*og_graph).num_vertices = num_vertices;
(*og_graph).edges = (struct edge*) malloc(sizeof(struct edge) * num_edges);
for(int i = 0; i < num_edges; i++){
fscanf(file, "%s", buff);
(*og_graph).edges[i].v = atoi(buff);
fscanf(file, "%s", buff);
(*og_graph).edges[i].u = atoi(buff);
fscanf(file, "%s", buff);
(*og_graph).edges[i].weight = atoi(buff);
}
}
fclose(file);
}
__global__ void get_bipartite_graph(int num_edges, int num_vertices, struct edge* graphEdges, struct b_vertex_a* vertices_a, struct b_vertex_b* vertices_b, struct b_edge* bg_graphEdges) {
int edge = threadIdx.x + blockIdx.x * blockDim.x;
if(edge < num_edges){
// acquire two bipartite edges for each orginal graph edge
bg_graphEdges[2*edge].v = graphEdges[edge].v;
bg_graphEdges[2*edge].u = edge;
bg_graphEdges[2*edge].cv = 2*edge+1; // corresponding edge/vertex
bg_graphEdges[2*edge].weight = graphEdges[edge].weight;
bg_graphEdges[2*edge+1].v = graphEdges[edge].u;
bg_graphEdges[2*edge+1].u = edge;
bg_graphEdges[2*edge+1].cv = 2*edge; // corresponding edge/vertex
bg_graphEdges[2*edge+1].weight = graphEdges[edge].weight;
vertices_b[edge].e = edge;
if(edge < num_vertices)
vertices_a[edge].v = edge;
}
}
__global__ void init_smallest_edges_weights(int num_edges, int *smallest_weights, int* smallest_edges){
int edge = threadIdx.x + blockIdx.x * blockDim.x;
if(edge < num_edges){
smallest_weights[edge] = -1;
smallest_edges[edge] = -1;
}
}
// fills in smallest edges array with the index of smallest bipartite edges for each vertex (index of smallest_edges corresponds to vertex number) in graph
__global__ void get_smallest_edges(int bp_num_edges, int num_smallest_edges, struct b_edge* bg_graphEdges, int* smallest_weights, int* smallest_edges){
int edge = threadIdx.x + blockIdx.x * blockDim.x;
if(edge< bp_num_edges){
int index = bg_graphEdges[edge].v - 1;
// smallest_weights[index] = bg_graphEdges[edge].weight; // filler weight to compare with
smallest_weights[index] = INT_MAX;
__syncthreads(); // acquire all smallest weights
atomicMin(&(smallest_weights[index]), bg_graphEdges[edge].weight); // save actual smallest weight
__syncthreads(); // acquire all smallest weights
smallest_edges[index] = bp_num_edges - 1; // filler edge number to comapre with, max edge
// if(edge < num_smallest_edges){
// if(edge != index){
// smallest_weights[index] = -1;
// smallest_edges[index] = -1;
// }
// }
__syncthreads(); // acquire all smallest edges
if(bg_graphEdges[edge].weight == smallest_weights[index]) // save smallest edge if the the bg edge has same weight as smallest weight
//atomicMin(&(smallest_edges[index]), bg_graphEdges[edge].u);
atomicMin(&(smallest_edges[index]), edge);
}
}
// flags all edges to false
__global__ void mst_edges_init(int og_num_edges, bool *mst_edges){
int edge = threadIdx.x + blockIdx.x * blockDim.x;
if(edge < og_num_edges){
mst_edges[edge] = false;
}
}
// sets which edges go in mst
__global__ void get_mst_edges(int num_smallest_edges, int* smallest_edges, struct b_edge* bg_graphEdges, bool *mst_edges){
int edge = threadIdx.x + blockIdx.x * blockDim.x;
int bg_index;
int vertex;
if(edge < num_smallest_edges){
bg_index = smallest_edges[edge];
if(bg_index != -1){
vertex = bg_graphEdges[bg_index].u;
mst_edges[vertex] = true;
}
}
}
// gets num of mst edges in solution set
__global__ void get_num_mst(int og_num_edges, bool *mst_edges, int* num_mst){
int edge = threadIdx.x + blockIdx.x * blockDim.x;
if(edge < og_num_edges){
*num_mst = 0; // reset
__syncthreads();
if(mst_edges[edge] == true)
atomicAdd(num_mst, 1);
}
}
// makes the strut edges
__global__ void get_strut_edges(int bg_num_vertices, int* smallest_edges, struct b_edge* bg_graphEdges, strut_edge* strut_edges){
int bg_vertex = threadIdx.x + blockIdx.x * blockDim.x;
if(bg_vertex < bg_num_vertices){
strut_edges[bg_vertex].v = bg_vertex + 1; // vertex
strut_edges[bg_vertex].u = bg_graphEdges[smallest_edges[bg_vertex]].u; // edge index (u vertex)
strut_edges[bg_vertex].cv = bg_graphEdges[bg_graphEdges[smallest_edges[bg_vertex]].cv].v; // save vertex that is connected to same edge index (u vertex);
}
}
// init strut u vertices degree
__global__ void strut_u_init(int bg_num_vertex_b, struct strut_u_vertex* vertices_u){
int vertex_b = threadIdx.x + blockIdx.x * blockDim.x;
if(vertex_b < bg_num_vertex_b)
vertices_u[vertex_b].degree = 0;
}
// fill in degree of strut u vertices
__global__ void get_strut_u_degree(int num_strut_vertices, strut_edge* strut_edges, struct strut_u_vertex* vertices_u){
int strut_edge = threadIdx.x + blockIdx.x * blockDim.x;
if(strut_edge < num_strut_vertices){
atomicAdd(&(vertices_u[strut_edges[strut_edge].u]).degree, 1);
}
}
// fill in what vertices the vertices_u from the strut is connected
__global__ void get_strut_u_vertices(int bg_num_edges, struct b_edge* bg_graphEdges, struct strut_u_vertex* vertices_u){
int bg_edge = threadIdx.x + blockIdx.x * blockDim.x;
if(bg_edge < bg_num_edges){
if(bg_edge%2 == 0){ // only even edges
vertices_u[bg_graphEdges[bg_edge].u].v1 = bg_graphEdges[bg_edge].v;
vertices_u[bg_graphEdges[bg_edge].u].v2 = bg_graphEdges[bg_graphEdges[bg_edge].cv].v;
vertices_u[bg_graphEdges[bg_edge].u].weight = bg_graphEdges[bg_edge].weight;
}
}
}
// get number of zero difference vertrices u in strut
__global__ void get_zero_diff_num(int bg_num_vertex_b, struct strut_u_vertex* vertices_u, int* zero_diff_edges){
int vertex_b = threadIdx.x + blockIdx.x * blockDim.x;
if(vertex_b < bg_num_vertex_b){
if(vertices_u[vertex_b].degree == 2)
atomicAdd(zero_diff_edges, 1);
}
}
// initialize super vertices
__global__ void super_vertices_init(int num_strut_vertices, int* super_vertices){
int vertex = threadIdx.x + blockIdx.x * blockDim.x;
if(vertex < num_strut_vertices){
super_vertices[vertex] = vertex + 1;
}
}
// set which verticies_u will be in new bipartitie graph and get how many there are
__global__ void get_new_bg_vertex_b(int num_bg_vertexb, int* super_vertices, struct strut_u_vertex* vertices_u, int* new_vertex_b, int* num_newbg_vertexb){
int vertex = threadIdx.x + blockIdx.x * blockDim.x;
if(vertex < num_bg_vertexb){
new_vertex_b[vertex] = 0; // setting all to false
__syncthreads();
if(super_vertices[vertices_u[vertex].v1 - 1] != super_vertices[vertices_u[vertex].v2 - 1]){
new_vertex_b[vertex] = 1;
}
*num_newbg_vertexb = 0;
__syncthreads();
if(new_vertex_b[vertex] == 1)
atomicAdd(num_newbg_vertexb, 1);
}
}
// copy maker
__global__ void prefixCopy(int prefixNum, int* old_prefix, int *new_prefix){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < prefixNum){
new_prefix[index] = old_prefix[index];
}
}
__global__ void getPrefixSum(int* entries, int* entriesC, int d) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index >= d)
entriesC[index] = entries[index - d];
else
entriesC[index] = 0;
__syncthreads();
entries[index] = entries[index] + entriesC[index];
}
// makes new bipartite edges
__global__ void get_new_bg_edges(int num_bg_vertex_b, int* new_bg_edges, int* prefixSum, struct strut_u_vertex* vertices_u, int* super_vertices, struct b_edge* bg_graphEdges, int * max_super_vertex){
int index = threadIdx.x + blockIdx.x * blockDim.x;
int edge1;
int edge2;
if(index < num_bg_vertex_b){
if(new_bg_edges[index] == 1){
edge1 = (prefixSum[index] - 1) * 2;
edge2 = edge1+1;
bg_graphEdges[edge1].v = super_vertices[vertices_u[index].v1-1];
bg_graphEdges[edge1].u = index;
bg_graphEdges[edge1].cv = edge2;
bg_graphEdges[edge1].weight = vertices_u[index].weight;
bg_graphEdges[edge2].v = super_vertices[vertices_u[index].v2-1];
bg_graphEdges[edge2].u = index;
bg_graphEdges[edge2].cv = edge1;
bg_graphEdges[edge2].weight = vertices_u[index].weight;
atomicMax(max_super_vertex, super_vertices[vertices_u[index].v1-1]);
atomicMax(max_super_vertex, super_vertices[vertices_u[index].v2-1]);
}
}
}
// get what vertex each vertex is compacted to during compression of bipartite graph
__global__ void get_super_vertices(int num_strut_vertices, strut_edge* strut_edges, struct strut_u_vertex* vertices_u, int* super_vertices){
int strut_edge = threadIdx.x + blockIdx.x * blockDim.x;
int cv;
int min_cv;
if(strut_edge < num_strut_vertices){
min_cv = strut_edges[strut_edge].v;
// cv = strut_edges[strut_edge].cv;
// while(cv < min_cv){
// min_cv = cv;
// cv = strut_edges[cv-1].cv;
// }
if(vertices_u[strut_edges[strut_edge].u].v1 == min_cv)
cv = vertices_u[strut_edges[strut_edge].u].v2;
else
cv = vertices_u[strut_edges[strut_edge].u].v1;
while(cv < min_cv){
min_cv = cv;
if(vertices_u[strut_edges[strut_edge].u].v1 == min_cv)
cv = vertices_u[strut_edges[cv-1].u].v2;
else
cv = vertices_u[strut_edges[cv-1].u].v1;
}
super_vertices[strut_edge] = min_cv;
}
} |
14,664 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
#define TILE_SIZE 16
unsigned char *d_input;
unsigned char *d_output;
float *d_filter;
inline cudaError_t checkCuda(cudaError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
exit(-1);
}
#endif
return result;
}
__global__ void gaussianBlur(unsigned char *input,
unsigned char *output,
unsigned int rows,
unsigned int cols,
float *filter,
int filter_width) {
int x = blockIdx.x * TILE_SIZE + threadIdx.x;
int y = blockIdx.y * TILE_SIZE + threadIdx.y;
if (x > cols || y > rows)
return;
int index = y * cols + x;
// Blur algorthm using weighted average (recommended)
float result = 0.0;
for (int r = -filter_width / 2; r < filter_width / 2; r++) {
for (int c = -filter_width / 2; c < filter_width / 2; c++) {
int cur_row = r + y;
int cur_col = c + x;
//if pixel is not at the edge of the image
if ((cur_row > -1) && (cur_row < rows) &&
(cur_col > -1) && (cur_col < cols)) {
int filter_id = (r + filter_width / 2) * filter_width + (c + filter_width / 2);
result += input[cur_row * cols + cur_col] * filter[filter_id];
}
}
}
output[index] = result;
}
void imageBlur (unsigned char* h_input,
unsigned char* h_output,
unsigned int rows,
unsigned int cols,
float* h_filter,
int filter_width) {
// block and grid size
int gridX = 1 + ((cols - 1) / TILE_SIZE);
int gridY = 1 + ((rows - 1) / TILE_SIZE);
dim3 dimGrid(gridX, gridY);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
// allocate memory and copy to GPU
int size = rows * cols;
checkCuda(cudaMalloc((void**)&d_input, size * sizeof(unsigned char)));
checkCuda(cudaMalloc((void**)&d_output, size * sizeof(unsigned char)));
checkCuda(cudaMalloc((void**)&d_filter, filter_width * filter_width * sizeof(float)));
checkCuda(cudaMemset(d_output, 0, size * sizeof(unsigned char)));
checkCuda(cudaMemcpy(d_input, h_input, size * sizeof(unsigned char), cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(d_filter, h_filter, filter_width * filter_width * sizeof(float), cudaMemcpyHostToDevice));
//kernel call
gaussianBlur<<<dimGrid, dimBlock>>>(d_input, d_output, rows, cols, d_filter, filter_width);
//copy output to host
checkCuda(cudaMemcpy(h_output, d_output, size * sizeof(unsigned char), cudaMemcpyDeviceToHost));
// free memory
checkCuda(cudaFree(d_input));
checkCuda(cudaFree(d_output));
}
|
14,665 | //
// Created by zhaoxuanzhu on 3/21/21.
//
#include "state_dynamics.cuh"
|
14,666 | //Consant memeory 64KB max, 2^16 bytes, 2^14 integers, 16384
#include <stdio.h>
#include <cuda.h>
// size of vectors, has to be known at compile time, max available
#define N 8192
// Constants held in constant memory
__device__ __constant__ int dev_a_Cont[N];
__device__ __constant__ int dev_b_Cont[N];
// regular global memory for comparison
__device__ int dev_a[N];
__device__ int dev_b[N];
// result in device global memory
__device__ int dev_c[N]; //device global memory for result
// kernel routines
__global__ void add_Cont() {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < N){
dev_c[tid] = dev_a_Cont[tid] + dev_b_Cont[tid];
}
}
__global__ void add() {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < N){
dev_c[tid] = dev_a[tid] + dev_b[tid];
}
}
int main() {
// threads per block and blocks per grid
int T = 128, B = 64;
int a[N],b[N],c[N]; //statically declared host vectors
cudaEvent_t start, stop; // cuda events to measure time
float elapsed_time,elapsed_time_Cont;
cudaEventCreate(&start); // timing objects
cudaEventCreate(&stop);
/*----------- GPU not using constant memory ------------------------*/
printf("GPU not using constant memory\n");
for(int i=0;i<N;i++) { // load arrays with some numbers
a[i] = i;
b[i] = i*2;
}
// copy vectors to constant memory
cudaMemcpyToSymbol(dev_a,a,N*sizeof(int),0,cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(dev_b,b,N*sizeof(int),0,cudaMemcpyHostToDevice);
cudaEventRecord(start, 0); // start time
add<<<B,T>>>(); // does not need array ptrs now
cudaThreadSynchronize(); // wait for all threads to complete
cudaEventRecord(stop, 0); // instrument code to measure end time
cudaMemcpyFromSymbol(a,dev_a,N*sizeof(int),0,cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(b,dev_b,N*sizeof(int),0,cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(c,dev_c,N*sizeof(int),0,cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
printf("Checking results\n");
for(int i=0;i<N;i++) {
if (a[i] + b[i] != c[i]) {
printf("ERROR IN COMPUTATION\n");
break;
}
}
// print out execution time
printf("Time to calculate results: %f ms.\n", elapsed_time);
/*----------- GPU using constant memory ------------------------*/
printf("GPU using constant memory\n");
for(int i=0;i<N;i++) { // load arrays with some numbers
a[i] = i;
b[i] = i*2;
}
// copy vectors to constant memory
cudaMemcpyToSymbol(dev_a_Cont,a,N*sizeof(int),0,cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(dev_b_Cont,b,N*sizeof(int),0,cudaMemcpyHostToDevice);
cudaEventRecord(start, 0); // start time
add_Cont<<<B,T>>>(); // does not need array ptrs now
cudaThreadSynchronize(); // wait for all threads to complete
cudaEventRecord(stop, 0); // instrument code to measure end time
cudaMemcpyFromSymbol(a,dev_a_Cont,N*sizeof(int),0,cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(b,dev_b_Cont,N*sizeof(int),0,cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(c,dev_c,N*sizeof(int),0,cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time_Cont, start, stop);
printf("Checking results\n");
for(int i=0;i<N;i++) {
if (a[i] + b[i] != c[i]) {
printf("ERROR IN COMPUTATION\n");
break;
}
}
// print out execution time
printf("Time to calculate results: %f ms.\n", elapsed_time_Cont);
printf("Speedup using constant memory = %f\n",elapsed_time/elapsed_time_Cont);
/* ----------- clean up, no malloc free needed ---------*/
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
14,667 | ////////////////////////////////////////////////////////////////////////////////
// utilities
////////////////////////////////////////////////////////////////////////////////
#include <limits.h> // INT_MIN
#include <math.h> // sqrt
#include <stdlib.h> // abs, atoi
#include <sys/time.h>
#include <time.h>
#include <fstream>
#include <iomanip>
#include <iostream>
using namespace std;
typedef unsigned short int usint;
typedef unsigned long long int ullint;
void die(const char *msg) {
cout << "ERROR: " << msg << endl;
exit(1);
}
#define RANDOM_MAX 0x7fffffff // 2^31 - 1
__device__ int random(const int seed) {
// glibc: m = 2^31; a = 1103515245; c = 12345
return (int)((1103515245U * ((unsigned)seed & 0x7fffffffU) + 12345U) &
0x7fffffffU);
}
template <class C>
__device__ C sign(const C x) {
return x < 0 ? -1 : (x == 0 ? 0 : 1);
}
template <class C>
__host__ __device__ C square(const C x) {
return x * x;
}
inline double calc_time(struct timeval &begin, struct timeval &end) {
return ((end.tv_sec - begin.tv_sec) * 1000000u +
end.tv_usec - begin.tv_usec) / 1e6;
}
////////////////////////////////////////////////////////////////////////////////
// vix
////////////////////////////////////////////////////////////////////////////////
__device__ int test_one_target(const int nrows, const usint *elevs,
const int ox, const int oy, const int oz,
const int tx, const int ty, const int tz) {
if (abs(ox - tx) <= 1 && abs(oy - ty) <= 1) return 1;
int dx = tx - ox, dy = ty - oy;
int px, py, pz; // Current point
int inciny = abs(dx) < abs(dy);
int sign;
float slope, zslope;
sign = (inciny*dy + (1-inciny)*dx) > 0 ? 1 : -1;
slope = (float)(inciny*dx + (1-inciny)*dy) / (inciny*dy + (1-inciny)*dx);
zslope = (float)(tz - oz) / (inciny ? dy : dx);
const int limit = inciny ? dy : dx;
int stride = 1;
for (int i = sign; abs(i) < abs(limit); i += stride*sign, stride <<= 1) { // *= 1.9, 2.2, 2.5
int j = round(i * slope);
px = ox + (inciny*j + (1-inciny)*i);
py = oy + (inciny*i + (1-inciny)*j);
pz = elevs[px * nrows + py];
if (pz > oz + i * zslope) return 0;
}
return 1;
}
__global__ void calc_one_vix(const int nrows, const usint *elevs,
const int roi, const int oht, const int tht,
const int ntests, unsigned char *vix) {
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = bid * blockDim.x + threadIdx.x;
if (tid >= square(nrows)) return;
const int ox = tid / nrows;
const int oy = tid % nrows;
const int oz = elevs[ox * nrows + oy] + oht;
int ntarget = 0;
int nvis = 0;
const int vsxmax = min(nrows - 1, ox + roi); // viewshed bounds
const int vsymax = min(nrows - 1, oy + roi);
const int vsxmin = max(0, ox - roi);
const int vsymin = max(0, oy - roi);
int r = tid;
// for (int i = 0; i < 10 * ntests; i++) { // iterate over random targets
while (ntarget < ntests) {
int tx, ty, tz;
int visq;
r = random(r);
//tx = (int)(r * (vsxmax - vsxmin + 0.99999f) / RANDOM_MAX) + vsxmin;
tx = (int)((2*roi+0.99999f)*r/RANDOM_MAX) + (ox-roi);
r = random(r);
//ty = (int)(r * (vsymax - vsymin + 0.99999f) / RANDOM_MAX) + vsymin;
ty = (int)((2*roi+0.99999f)*r/RANDOM_MAX) + (oy-roi);
// if (tx == 0 && ty == 0) continue;
if (square(tx - ox) + square(ty - oy) > square(roi)) {
tx = ox + (tx - ox)/3; // golden
ty = oy + (ty - oy)/3;
//continue; // too slow
}
if (tx >=0 && tx < nrows && ty >= 0 && ty < nrows) {
tz = elevs[tx * nrows + ty] + tht;
visq = test_one_target(nrows, elevs, ox, oy, oz, tx, ty, tz);
// cout << "test_one_target(" << ox << ',' << oy << ',' << tx << ',' << ty << ")=" << visq << endl;
} else {
visq = 0;
}
ntarget++;
if (visq) nvis++;
// Stopping rule: do at least 10 points.
// Then, continue until vix >= .5 or <= .1.
// This could be improved by using the variance, and by looking at
// other observers, to select the best.
// if (ntarget >= ntests) break;
// if (ntarget < 10) continue;
// if (v >= 0.5 || v <= 0.1) break;
}
float v = (float)nvis / ntarget;
// cout << "obs at (" << ox << ',' << oy << "), z=" << elevs[ox * nrows + oy]
// << ", vix=" << nvis << '/' << ntarget << '=' << v << endl;
vix[ox * nrows + oy] = (unsigned char)min(255, (int)(v * 255.999f));
}
void calc_vix(const int nrows, const usint *h_elevs,
const int roi, const int oht, const int tht,
const int ntests, unsigned char *h_vix) {
usint *d_elevs;
unsigned char *d_vix;
if (cudaMalloc((void **)&d_elevs, square(nrows) * sizeof(usint)) != cudaSuccess)
die("cudaMalloc failed");
if (cudaMalloc((void **)&d_vix, square(nrows) * sizeof(unsigned char)) != cudaSuccess)
die("cudaMalloc failed");
if (cudaMemcpy(d_elevs, h_elevs, square(nrows) * sizeof(usint), cudaMemcpyHostToDevice) != cudaSuccess)
die("cudaMemcpy failed");
const size_t dimblock = 128;
// const size_t dimgrid = square(nrows) / dimblock + (square(nrows) % dimblock ? 1 : 0);
int s = (int)sqrt(square(nrows) / dimblock);
if (square(s) * dimblock < square(nrows)) s++;
const dim3 dimgrid(s, s);
calc_one_vix<<<dimgrid, dimblock>>>(nrows, d_elevs, roi, oht, tht, ntests, d_vix);
if (cudaMemcpy(h_vix, d_vix, square(nrows) * sizeof(unsigned char), cudaMemcpyDeviceToHost) != cudaSuccess)
die("cudaMemcpy failed");
if (cudaFree(d_elevs) != cudaSuccess) die("cudaFree failed");
if (cudaFree(d_vix) != cudaSuccess) die("cudaFree failed");
}
////////////////////////////////////////////////////////////////////////////////
// findmax
////////////////////////////////////////////////////////////////////////////////
__global__ void process_one_block(const int nrows, unsigned char *vix,
const float blocksize, const int nblockrows,
const int nwantedperblock, int *obs) {
// no need for if (blockIdx.x >= square(nblockrows)) return;
extern __shared__ int results[];
__shared__ int xmin, xmax, ymin, ymax;
if (threadIdx.x == 0) {
int bx = blockIdx.x / nblockrows;
int by = blockIdx.x % nblockrows;
xmin = (int)(blocksize * bx);
xmax = min((int)(blocksize * (bx + 1)), nrows);
ymin = (int)(blocksize * by);
ymax = min((int)(blocksize * (by + 1)), nrows);
}
__syncthreads();
const int width = ymax - ymin;
const int npoints = (xmax - xmin) * (ymax - ymin);
const int npointsperthread = npoints / blockDim.x + (npoints % blockDim.x ? 1 : 0);
for (int i = 0; i < nwantedperblock; i++) {
int t = threadIdx.x * npointsperthread; // the first point, probably used
int p1x = xmin + t / width;
int p1y = ymin + t % width;
unsigned char v1 = vix[p1x * nrows + p1y];
int h1 = p1x * (p1x + p1y) * 010101010101;
for (int j = t + 1; j < t + npointsperthread && j < npoints; j++) {
int p2x = xmin + j / width;
int p2y = ymin + j % width;
unsigned char v2 = vix[p2x * nrows + p2y];
int h2 = p2x * (p2x + p2y) * 010101010101;
if (v1 < v2 || (v1 == v2 && h1 < h2)) {
p1x = p2x;
p1y = p2y;
v1 = v2;
h1 = h2;
}
}
results[threadIdx.x * 4] = p1x;
results[threadIdx.x * 4 + 1] = p1y;
results[threadIdx.x * 4 + 2] = v1;
results[threadIdx.x * 4 + 3] = h1;
if (threadIdx.x == 0) {
// p1x... is the result of thread 0
for (int j = 1; j < blockDim.x; j++) {
int p2x = results[j * 4];
int p2y = results[j * 4 + 1];
int v2 = results[j * 4 + 2];
int h2 = results[j * 4 + 3];
if (v1 < v2 || (v1 == v2 && h1 < h2)) {
p1x = p2x;
p1y = p2y;
v1 = v2;
h1 = h2;
}
}
obs[blockIdx.x * nwantedperblock + i] = p1x*nrows + p1y;
vix[p1x * nrows + p1y] = 0; // used
}
__syncthreads();
}
}
void find_max(const int nrows, const unsigned char *h_vix,
const float blocksize, const int nwanted, const int nblockrows,
const int nwantedperblock, int *h_obs) {
unsigned char *d_vix;
int *d_obs;
if (cudaMalloc((void **)&d_vix, square(nrows) * sizeof(unsigned char)) != cudaSuccess)
die("cudaMalloc failed");
if (cudaMalloc((void **)&d_obs, nwanted * sizeof(int)) != cudaSuccess)
die("cudaMalloc failed");
if (cudaMemcpy(d_vix, h_vix, square(nrows) * sizeof(unsigned char), cudaMemcpyHostToDevice) != cudaSuccess)
die("cudaMemcpy failed");
const size_t dimgrid = square(nblockrows); // two dimensional
const size_t dimblock = 256;
process_one_block<<<dimgrid, dimblock, dimblock * 4 * sizeof(int)>>>(
nrows, d_vix, blocksize, nblockrows, nwantedperblock, d_obs);
if (cudaMemcpy(h_obs, d_obs, nwanted * sizeof(int), cudaMemcpyDeviceToHost) != cudaSuccess)
die("cudaMemcpy failed");
if (cudaFree(d_vix) != cudaSuccess) die("cudaFree failed");
if (cudaFree(d_obs) != cudaSuccess) die("cudaFree failed");
}
////////////////////////////////////////////////////////////////////////////////
// viewshed
////////////////////////////////////////////////////////////////////////////////
__device__ void set_vis(const int nwpr, const int row, const int col,
ullint *shed) {
atomicOr(&shed[row * nwpr + col / 64], 1ULL << (63 - col % 64));
}
__global__ void calc_one_shed(const int nrows, const usint *elevs,
const int roi, const int oht, const int tht,
const int nsheds, const int *obs,
ullint *sheds) {
if (blockIdx.x >= nsheds) return;
const int nr = 2 * roi + 1;
const int nwpr = (nr + 63) / 64;
ullint * const thisshed = &sheds[blockIdx.x * nr * nwpr];
__shared__ int ox, oy, oz;
if (threadIdx.x == 0) {
ox = obs[blockIdx.x]/nrows;
oy = obs[blockIdx.x]%nrows;
oz = elevs[ox * nrows + oy] + oht;
set_vis(nwpr, roi, roi, thisshed);
}
__syncthreads();
// Clipping xmin etc at 0, nrows-1 makes the viewshed depend on the roi, so don't.
const int xmin = ox - roi;
const int ymin = oy - roi;
const int xmax = ox + roi;
const int ymax = oy + roi;
const int xwidth = xmax - xmin;
const int ywidth = ymax - ymin;
const int perimeter = 2 * (xwidth + ywidth); // This formula is subtle
const int ntpt = perimeter / blockDim.x + (perimeter % blockDim.x ? 1 : 0);
int dx, dy;
int tx, ty;
int px, py; // Current point
int inciny;
int sign;
float slope, zslope;
const int sector = threadIdx.x;
for (int ip = sector * ntpt; ip < (sector + 1) * ntpt && ip < perimeter; ip++) {
if (ip < xwidth) {
tx = xmin + ip;
ty = ymin;
} else if (ip < 2 * xwidth) {
tx = 1 + xmin - xwidth + ip;
ty = ymax;
} else if (ip < 2 * xwidth + ywidth) {
tx = xmin;
ty = 1 + ymin - 2 * xwidth + ip;
} else {
tx = xmax;
ty = ymin - 2 * xwidth - ywidth + ip;
}
// Run a line of sight out from obs to target.
dx = tx - ox;
dy = ty - oy;
inciny = abs(dx) < abs(dy);
sign = (inciny*dy + (1-inciny)*dx) > 0 ? 1 : -1;
slope = (float)(inciny*dx + (1-inciny)*dy) / (inciny*dy + (1-inciny)*dx);
zslope = -99999.f;
// i=0 would be the observer, which is always visible.
for (int i = sign; i != (inciny ? dy : dx) + sign; i += sign) {
int j = round(i * slope);
px = ox + (inciny*j + (1-inciny)*i);
py = oy + (inciny*i + (1-inciny)*j);
// Have we reached the edge of the area?
if (px < 0 || px >= nrows || py < 0 || py >= nrows) break;
if (square(px - ox) + square(py - oy) > square(roi)) break;
int pelev = elevs[px * nrows + py];
float s = (float)(pelev - oz) / abs(i);
if (zslope < s) zslope = s;
float hz = oz + zslope * abs(i);
if (pelev + tht >= hz)
set_vis(nwpr, px - ox + roi, py - oy + roi, thisshed);
}
}
}
void calc_sheds(const int nrows, const usint *h_elevs,
const int roi, const int oht, const int tht,
const int nsheds, const int *h_obs,
ullint *h_sheds) {
usint *d_elevs;
int *d_obs;
ullint *d_sheds;
const int nr = 2 * roi + 1;
const int nwpr = (nr + 63) / 64;
if (cudaMalloc((void **)&d_elevs, square(nrows) * sizeof(usint)) != cudaSuccess)
die("cudaMalloc failed");
if (cudaMalloc((void **)&d_obs, nsheds * sizeof(int)) != cudaSuccess)
die("cudaMalloc failed");
if (cudaMalloc((void **)&d_sheds, nsheds * nr * nwpr * sizeof(ullint)) != cudaSuccess)
die("cudaMalloc failed");
if (cudaMemcpy(d_elevs, h_elevs, square(nrows) * sizeof(usint), cudaMemcpyHostToDevice) != cudaSuccess)
die("cudaMemcpy failed");
if (cudaMemcpy(d_obs, h_obs, nsheds * sizeof(int), cudaMemcpyHostToDevice) != cudaSuccess)
die("cudaMemcpy failed");
if (cudaMemset(d_sheds, 0, nsheds * nr * nwpr * sizeof(ullint)) != cudaSuccess)
die("cudaMemset failed");
const size_t dimblock = 256; // must be a multiple of 32
const size_t dimgrid = nsheds;
calc_one_shed<<<dimgrid, dimblock>>>(nrows, d_elevs, roi, oht, tht, nsheds, d_obs, d_sheds);
if (cudaMemcpy(h_sheds, d_sheds, nsheds * nr * nwpr * sizeof(ullint), cudaMemcpyDeviceToHost)
!= cudaSuccess)
die("cudaMemcpy failed");
if (cudaFree(d_elevs) != cudaSuccess) die("cudaFree failed");
if (cudaFree(d_obs) != cudaSuccess) die("cudaFree failed");
if (cudaFree(d_sheds) != cudaSuccess) die("cudaFree failed");
}
////////////////////////////////////////////////////////////////////////////////
// site
////////////////////////////////////////////////////////////////////////////////
__device__ int is_obs_vis(const int cumnwpr,
const ullint *cumshed,
const int obsx, const int obsy) {
int i = obsx * cumnwpr * 64 + obsy;
if ((cumshed[i / 64] & 1ULL << (63 - i % 64)) != 0)
return 1;
else
return 0;
}
__global__ void calc_extra_area(const int nrows, const int roi,
const int *obs, const int *updatelist,
const ullint *sheds,
const int nr, const int nwpr, const int cumnwpr,
const ullint *cumshed,
int *testshedarea) {
const int oi = updatelist[blockIdx.x];
const int obsx = obs[oi]/nrows;
const int obsy = obs[oi]%nrows;
const ullint *shed = sheds + oi*nr*nwpr;
int *extraarea = testshedarea + oi;
extern __shared__ int areas[];
areas[threadIdx.x] = 0;
// calculate nrpt rows of extra area
const int nrpt = nr / blockDim.x + (nr % blockDim.x ? 1 : 0);
int sum = 0;
for (int row = threadIdx.x * nrpt; row < (threadIdx.x + 1) * nrpt && row < nr; row++) {
const int cumrow = obsx - roi + row;
if (cumrow >= 0 && cumrow < nrows) {
int firstword = (obsy - roi) / 64;
int firstbit = (obsy - roi) % 64;
if (firstbit < 0) {
firstword--;
firstbit += 64;
}
int lastword = (obsy + roi) / 64;
ullint prevvalue = 0ULL;
ullint value, cumvalue, tempvalue;
for (int cumword = firstword; cumword <= lastword; cumword++)
if (cumword >= 0 && cumword < cumnwpr) {
int word = cumword - firstword; // definition out of loop?
if (cumword == 0 && word > 0) prevvalue = shed[row * nwpr + word - 1];
if (word < nwpr)
value = shed[row * nwpr + word];
else
value = 0ULL;
cumvalue = cumshed[cumrow * cumnwpr + cumword];
tempvalue = cumvalue;
tempvalue |= value >> firstbit;
if (firstbit != 0) tempvalue |= prevvalue << (64 - firstbit);
tempvalue ^= cumvalue;
sum += __popcll(tempvalue);
prevvalue = value;
}
}
}
areas[threadIdx.x] = sum;
__syncthreads(); // wait for all threads
if (threadIdx.x == 0) {
int sum = 0;
for (int i = 0; i < blockDim.x; i++)
sum += areas[i];
*extraarea = sum;
}
}
__global__ void union_area(const int nrows, const int roi, const int intervis,
const int nsheds, const int *obs,
const ullint *sheds,
const int nr, const int nwpr, const int cumnwpr,
const int nusedsheds, const char *usedq,
const int lastobs,
const int lastobsx, const int lastobsy,
const ullint *cumshed,
int *testshedarea, int *updatelist) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int obsx, obsy, valid;
// calculate multiple extra areas
for (int oi = tid; oi < nsheds; oi += gridDim.x * blockDim.x) {
valid = 1;
if (usedq[oi]) {
valid = 0;
} else {
obsx = obs[oi]/nrows;
obsy = obs[oi]%nrows;
if (nusedsheds > 0) { // lastobs >= 0
if (square(obsx-lastobsx) + square(obsy-lastobsy) > square(2*roi))
valid = 0;
else if (intervis && !is_obs_vis(cumnwpr, cumshed, obsx, obsy)) // if intervis, reset all after the first
valid = 0; // testshedarea[oi] = 0; // reset invisible ones to zero
}
}
if (valid) {
/*
cudaStream_t s;
cudaStreamCreateWithFlags(&s, cudaStreamNonBlocking);
calc_extra_area<<<1, nr, nr*sizeof(int), s>>>(nrows, roi, obsx, obsy,
&sheds[oi*nr*nwpr],
nr, nwpr, cumnwpr,
cumshed, &testshedarea[oi]);
cudaStreamDestroy(s);
// calc_extra_area<<<1, nr, nr*sizeof(int)>>>(nrows, roi, obsx, obsy, &sheds[oi*nr*nwpr],
// nr, nwpr, cumnwpr, cumshed, &testshedarea[oi]);
*/
int index = atomicAdd(updatelist+499999, 1);
updatelist[index] = oi;
}
}
}
__device__ void swap(int *x, int *y) {
int z = *x;
*x = *y;
*y = z;
}
__global__ void findtopobs(const int nsheds, const char *usedq,
const int *testshedarea, int *top100) {
__shared__ int obs[256];
__shared__ int areas[256];
obs[threadIdx.x] = 0;
areas[threadIdx.x] = 0;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int size = gridDim.x * blockDim.x;
for (int i = tid; i < nsheds; i += size)
if (!usedq[i]) {
int extraarea = testshedarea[i];
if (extraarea > areas[threadIdx.x]) {
obs[threadIdx.x] = i;
areas[threadIdx.x] = extraarea;
}
}
__syncthreads();
if (threadIdx.x < 128 && areas[threadIdx.x + 128] > areas[threadIdx.x]) {
swap(&obs[threadIdx.x], &obs[threadIdx.x + 128]);
swap(&areas[threadIdx.x], &areas[threadIdx.x + 128]);
}
if (threadIdx.x < 64 && areas[threadIdx.x + 64] > areas[threadIdx.x]) {
swap(&obs[threadIdx.x], &obs[threadIdx.x + 64]);
swap(&areas[threadIdx.x], &areas[threadIdx.x + 64]);
}
if (threadIdx.x < 32 && areas[threadIdx.x + 32] > areas[threadIdx.x]) {
swap(&obs[threadIdx.x], &obs[threadIdx.x + 32]);
swap(&areas[threadIdx.x], &areas[threadIdx.x + 32]);
}
if (threadIdx.x < 16 && areas[threadIdx.x + 16] > areas[threadIdx.x]) {
swap(&obs[threadIdx.x], &obs[threadIdx.x + 16]);
swap(&areas[threadIdx.x], &areas[threadIdx.x + 16]);
}
if (threadIdx.x < 8 && areas[threadIdx.x + 8] > areas[threadIdx.x]) {
swap(&obs[threadIdx.x], &obs[threadIdx.x + 8]);
swap(&areas[threadIdx.x], &areas[threadIdx.x + 8]);
}
if (threadIdx.x < 4 && areas[threadIdx.x + 4] > areas[threadIdx.x]) {
swap(&obs[threadIdx.x], &obs[threadIdx.x + 4]);
swap(&areas[threadIdx.x], &areas[threadIdx.x + 4]);
}
if (threadIdx.x < 2 && areas[threadIdx.x + 2] > areas[threadIdx.x]) {
swap(&obs[threadIdx.x], &obs[threadIdx.x + 2]);
swap(&areas[threadIdx.x], &areas[threadIdx.x + 2]);
}
if (threadIdx.x < 1 && areas[threadIdx.x + 1] > areas[threadIdx.x]) {
swap(&obs[threadIdx.x], &obs[threadIdx.x + 1]);
swap(&areas[threadIdx.x], &areas[threadIdx.x + 1]);
}
if (threadIdx.x == 0) {
top100[blockIdx.x] = obs[0];
top100[blockIdx.x + 100] = areas[0];
}
}
__global__ void calc_union(const int nrows, const int roi,
const int nsheds, const int *obs,
const ullint *sheds,
const int nr, const int nwpr, const int cumnwpr,
const int lastobs, char *usedq,
ullint *cumshed) {
// lastobs >= 0
__shared__ int lastobsx, lastobsy;
__shared__ const ullint *shed;
if (threadIdx.x == 0) {
usedq[lastobs] = 1; // set gridDim.x times
lastobsx = obs[lastobs]/nrows;
lastobsy = obs[lastobs]%nrows;
shed = &sheds[lastobs*nr*nwpr];
}
__syncthreads(); // wait for thread 0
int firstword = (lastobsy - roi) / 64;
int firstbit = (lastobsy - roi) % 64;
if (firstbit < 0) {
firstword--;
firstbit += 64;
}
int row = blockIdx.x*blockDim.x + threadIdx.x;
if (row < nr) { // a row of shed
int cumrow = lastobsx - roi + row;
if (cumrow >= 0 && cumrow < nrows) // row inside terrain
for (int word = 0; word < nwpr; word++) { // each word of row
int cumword = firstword + word;
if (cumword >= 0 && cumword < cumnwpr) // word inside terrain
cumshed[cumrow * cumnwpr + cumword] |= shed[row * nwpr + word] >> firstbit;
if (firstbit != 0 && cumword + 1 >= 0 && cumword + 1 < cumnwpr) // firstbit != 0 and word + 1 inside terrain
cumshed[cumrow * cumnwpr + cumword + 1] |= shed[row * nwpr + word] << (64 - firstbit);
}
}
}
void site_it(const int nrows, const int roi, const int intervis,
const int nsheds, const int *h_obs, const ullint *h_sheds, char *selected) {
const int nr = 2 * roi + 1;
const int nwpr = (nr + 63) / 64;
const int cumnwpr = (nrows + 63) / 64;
int *usedsheds; // list of sheds used so far.
char *h_usedq; // whether each particular shed has been used.
int h_top100[200]; // top 100 tentative observers and extra areas
usedsheds = 0;
h_usedq = 0;
int *areas = 0;
usedsheds = new int[nsheds];
h_usedq = new char[nsheds];
areas = new int[nsheds];
if (!usedsheds || !h_usedq || !areas)
die("Memory exhausted. Program terminates.");
for (int i = 0; i < nsheds; i++) h_usedq[i] = 0;
int *d_obs;
ullint *d_sheds;
char *d_usedq;
ullint *d_cumshed;
int *d_testshedarea;
int *d_top100;
if (cudaMalloc((void **)&d_obs, nsheds * sizeof(int)) != cudaSuccess)
die("cudaMalloc failed");
if (cudaMalloc((void **)&d_sheds, nsheds * nr * nwpr * sizeof(ullint)) != cudaSuccess)
die("cudaMalloc failed");
if (cudaMalloc((void **)&d_usedq, nsheds * sizeof(char)) != cudaSuccess)
die("cudaMalloc failed");
if (cudaMalloc((void **)&d_cumshed, nrows * cumnwpr * sizeof(ullint)) != cudaSuccess)
die("cudaMalloc failed");
if (cudaMalloc((void **)&d_testshedarea, nsheds * sizeof(int)) != cudaSuccess)
die("cudaMalloc failed");
if (cudaMalloc((void **)&d_top100, 200 * sizeof(int)) != cudaSuccess)
die("cudaMalloc failed");
if (cudaMemcpy(d_obs, h_obs, nsheds * sizeof(int), cudaMemcpyHostToDevice) != cudaSuccess)
die("cudaMemcpy failed");
if (cudaMemcpy(d_sheds, h_sheds, nsheds * nr * nwpr * sizeof(ullint), cudaMemcpyHostToDevice)
!= cudaSuccess)
die("cudaMemcpy failed");
if (cudaMemset(d_usedq, 0, nsheds * sizeof(char)) != cudaSuccess)
die("cudaMemset failed");
if (cudaMemset(d_cumshed, 0, nrows*cumnwpr*sizeof(ullint)) != cudaSuccess)
die("cudaMemset failed");
int *d_updatelist;
if (cudaMalloc((void **)&d_updatelist, 500000*sizeof(int)) != cudaSuccess) die("cudaMalloc failed");
int nusedsheds = 0;
int lastobs = -1;
int lastobsx = 0;
int lastobsy = 0;
int cumarea = 0;
if (cudaMemset(d_testshedarea, 0, nsheds * sizeof(int)) != cudaSuccess)
die("cudaMemset failed");
size_t dimgrid = (nsheds+255)/256;
size_t dimblock = 256;
// size_t dimgrid = (nsheds + dimblock - 1) / dimblock;
//cout << "Total area=" << square(nrows) << endl;
//cout << "#nusedsheds newshed obsx obsy area extraarea newcumarea areapercentage" << endl;
while (1) {
if (cudaMemset(d_updatelist, 0, 500000*sizeof(int)) != cudaSuccess) die("cudaMemset failed");
union_area<<<dimgrid, dimblock>>>(
nrows, roi, intervis, nsheds, d_obs, d_sheds, nr, nwpr, cumnwpr,
nusedsheds, d_usedq, lastobs, lastobsx, lastobsy, d_cumshed, d_testshedarea, d_updatelist);
int updatelistsize;
if (cudaMemcpy(&updatelistsize, d_updatelist+499999, sizeof(int), cudaMemcpyDeviceToHost) != cudaSuccess) die("cudaMemcpy failed");
//cout << "\nsize = " << updatelistsize;
calc_extra_area<<<updatelistsize, nr, nr*sizeof(int)>>>(
nrows, roi, d_obs, d_updatelist, d_sheds, nr, nwpr, cumnwpr, d_cumshed, d_testshedarea);
cudaDeviceSynchronize();
// find top 100 observers
findtopobs<<<100, 256>>>(nsheds, d_usedq, d_testshedarea, d_top100);
if (cudaMemcpy(h_top100, d_top100, 200 * sizeof(int), cudaMemcpyDeviceToHost) != cudaSuccess)
die("cudaMemcpy failed");
int newshed = 0;
int extraarea = 0;
for (int i = 0; i < 100; i++) {
if (h_top100[i + 100] > extraarea) {
extraarea = h_top100[i + 100];
newshed = h_top100[i];
}
}
if (extraarea == 0) {
//cout << "No more new observers that will add new area." << endl;
break;
}
usedsheds[nusedsheds++] = newshed;
h_usedq[newshed] = 1;
lastobs = newshed;
lastobsx = h_obs[newshed]/nrows;
lastobsy = h_obs[newshed]%nrows;
// set usedq and calculate cumshed
calc_union<<<nr, 1>>>(nrows, roi, nsheds, d_obs, d_sheds, nr, nwpr, cumnwpr, lastobs, d_usedq, d_cumshed);
cudaDeviceSynchronize();
cumarea += extraarea;
double areapercentage = 100.0 * cumarea / square(nrows);
if (areapercentage > 95)
break;
if (nusedsheds == 1) {
if (cudaMemcpy(areas, d_testshedarea, nsheds * sizeof(int), cudaMemcpyDeviceToHost) != cudaSuccess)
die("cudaMemcpy failed");
if (intervis)
if (cudaMemset(d_testshedarea, 0, nsheds * sizeof(int)) != cudaSuccess)
die("cudaMemset failed");
}
/*
cout << setw(6) << nusedsheds << setw(6) << newshed
<< setw(6) << h_obs[newshed]/nrows << setw(6) << h_obs[newshed]%nrows
<< setw(8) << areas[newshed] << setw(8) << extraarea
<< setw(10) << cumarea << setw(8) << areapercentage << endl;
*/
}
cout << " nusedsheds:" << nusedsheds << " coverage:" << 100.0*cumarea/square(nrows);
if (cudaFree(d_obs) != cudaSuccess) die("cudaFree failed");
if (cudaFree(d_sheds) != cudaSuccess) die("cudaFree failed");
if (cudaFree(d_usedq) != cudaSuccess) die("cudaFree failed");
if (cudaFree(d_cumshed) != cudaSuccess) die("cudaFree failed");
if (cudaFree(d_testshedarea) != cudaSuccess) die("cudaFree failed");
if (cudaFree(d_top100) != cudaSuccess) die("cudaFree failed");
if (cudaFree(d_updatelist) != cudaSuccess) die("cudaFree failed");
for (int i = 0; i < nsheds; i++)
selected[i] = h_usedq[i];
delete[] usedsheds;
delete[] h_usedq;
delete[] areas;
}
////////////////////////////////////////////////////////////////////////////////
// main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
double elapsed_secs;
struct timeval begin, middle, end;
gettimeofday(&begin, NULL);
middle = begin;
int nrows; // # rows, cols in this cell
int roi; // radius of interest
int oht; // ht of observer above terrain
int tht; // target ht above terrain
int ntests; // # of targets tested per observer
int blocksize0; // Requested number of rows in one block of the input visibility index array.
float blocksize; // Perturbed blocksize0, to remove sliver blocks at the end.
int nwanted0; // Desired number of output observers.
int nwanted; // Modified because blocksize was perturbed.
int nblocks; // Number of blocks on one side of it.
int nwantedperblock; // Number of observers to find per block, before culling.
int intervis; // Should new observers be visible to existing ones?
usint *elevs; // terrain elevation (input)
unsigned char *vix; // visibility index * 256 (output); vim
int *obs; // observers
ullint *sheds; // viewsheds
char *selected;
//cout << "[SITE, compiled from " << __FILE__ << " on " << __DATE__ << ", " << __TIME__ << ']' << endl;
if (argc != 10) {
cout << "argc=" << argc << endl;
die("SITE requires 9 arguments: nrows, roi, oht/tht, ntests, blocksize, nwanted, intervis, infile, outfile");
}
nrows = atoi(argv[1]);
roi = atoi(argv[2]);
oht = tht = atoi(argv[3]);
ntests = atoi(argv[4]);
blocksize0 = atoi(argv[5]);
nwanted0 = atoi(argv[6]);
intervis = atoi(argv[7]);
//cout << "nrows=" << nrows << ", roi=" << roi << ", oht=" << oht << ", tht=" << tht
// << ", ntests=" << ntests << "\nblocksize0=" << blocksize0 << ", nwanted0=" << nwanted0
// << ", intervis=" << intervis << endl;
if (nrows <= 0 || nrows > 20000) die("Unreasonable value for nrows");
if (roi < 1 || roi > 10000) die("Unreasonable value for roi.");
if (tht < 0 || tht > 1000000) die("Unreasonable value for tht.");
if (ntests < 1 || ntests > 1000) die("Unreasonable value for ntests.");
if (blocksize0 < 10 || blocksize0 > 2000) die("Unreasonable value for blocksize0.");
if (nwanted0 < 100 || nwanted0 > 2000000) die("Unreasonable value for nwanted0.");
if (intervis != 0 && intervis != 1) die("Unreasonable value for intervis.");
// Perturb blocksize so that the last block won't be really small.
blocksize = (float)nrows / (int)((float)nrows / blocksize0 + 0.5f); // floating point block size
nblocks = (int)(nrows / blocksize + 0.5f); // number of blocks
nwantedperblock = (int)((float)nwanted0 / square(nblocks) + 0.99999f); // number of wanted per block
nwanted = nwantedperblock * square(nblocks); // number of wanted
int lastsize = nrows - (int)(blocksize * (nblocks - 1)); // size of the last block
if (square(lastsize) < nwantedperblock) // too small
die("The last block is too small for nwantedperblock.");
//cout << "blocksize=" << blocksize << ", nblocks=" << nblocks
// << ", nwantedperblock=" << nwantedperblock << ", nwanted=" << nwanted << endl;
// number of rows per shed and number of words per row
const int nr = 2 * roi + 1;
const int nwpr = (nr + 63) / 64;
elevs = 0;
vix = 0;
obs = 0;
sheds = 0;
selected = 0;
elevs = new usint[square(nrows)];
vix = new unsigned char[square(nrows)];
obs = new int[nwanted];
sheds = new ullint[nwanted * nr * nwpr];
selected = new char[nwanted];
if (!elevs || !vix || !obs || !sheds || !selected)
die("Memory exhausted. Program terminates.");
/*
if (cudaMallocHost((void **)&elevs, square(nrows) * sizeof(usint)) != cudaSuccess)
die("cudaMallocHost failed");
if (cudaMallocHost((void **)&vix, square(nrows) * sizeof(unsigned char)) != cudaSuccess)
die("cudaMallocHost failed");
if (cudaMallocHost((void **)&obs, nwanted * sizeof(int)) != cudaSuccess)
die("cudaMallocHost failed");
if (cudaMallocHost((void **)&sheds, nwanted * nr * nwpr * sizeof(ullint)) != cudaSuccess)
die("cudaMallocHost failed");
*/
/*
for (int i = 0; i < nrows; i++)
for (int j = 0; j < nrows; j++) {
cin.read(reinterpret_cast<char *>(&elevs[i * nrows + j]), sizeof(usint));
if (cin.fail()) {
cout << "Error: at i=" << i << ", j=" << j << endl;
die("Input failed");
}
}
*/
ifstream ifs(argv[8]);
ifs.read((char *)elevs, square(nrows)*sizeof(usint));
if (ifs.fail())
die("Input failed");
ifs.close();
gettimeofday(&end, NULL);
elapsed_secs = calc_time(middle, end);
middle = end;
cout << "input:" << elapsed_secs;
calc_vix(nrows, elevs, roi, oht, tht, ntests, vix);
cudaDeviceSynchronize();
gettimeofday(&end, NULL);
elapsed_secs = calc_time(middle, end);
middle = end;
cout << " vix:" << elapsed_secs;
/*
int x;
ullint sum = 0;
ifstream ifs("vim.bin");
for (int i = 0; i < square(nrows); i++) {
ifs >> x;
sum += square(vix[i]-x);
}
cout << " RMS VIM error:" << sqrt((double)sum/square(nrows));
ifs.close();
*/
find_max(nrows, vix, blocksize, nwanted, nblocks, nwantedperblock, obs);
cudaDeviceSynchronize();
gettimeofday(&end, NULL);
elapsed_secs = calc_time(middle, end);
middle = end;
cout << " findmax:" << elapsed_secs;
calc_sheds(nrows, elevs, roi, oht, tht, nwanted, obs, sheds);
cudaDeviceSynchronize();
gettimeofday(&end, NULL);
elapsed_secs = calc_time(middle, end);
middle = end;
cout << " viewshed:" << elapsed_secs;
site_it(nrows, roi, intervis, nwanted, obs, sheds, selected);
cudaDeviceSynchronize();
gettimeofday(&end, NULL);
elapsed_secs = calc_time(middle, end);
middle = end;
cout << " site:" << elapsed_secs;
ofstream ofs(argv[9]);
for (int i = 0; i < nwanted; i++)
if (selected[i])
ofs << obs[i]/nrows << ',' << obs[i]%nrows << '\n';
ofs.close();
delete[] elevs;
delete[] vix;
delete[] obs;
delete[] sheds;
delete[] selected;
/*
if (cudaFreeHost(elevs) != cudaSuccess) die("cudaFreeHost failed");
if (cudaFreeHost(vix) != cudaSuccess) die("cudaFreeHost failed");
if (cudaFreeHost(obs) != cudaSuccess) die("cudaFreeHost failed");
if (cudaFreeHost(sheds) != cudaSuccess) die("cudaFreeHost failed");
*/
gettimeofday(&end, NULL);
elapsed_secs = calc_time(middle, end);
middle = end;
cout << " output:" << elapsed_secs;
gettimeofday(&end, NULL);
elapsed_secs = calc_time(begin, end);
cout << " total:" << elapsed_secs << endl;
}
|
14,668 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define MASK_WIDTH 5
#define TILE_WIDTH 16
#define COMMENT "Histogram_GPU"
#define RGB_COMPONENT_COLOR 255
typedef struct {
unsigned char red, green, blue;
} PPMPixel;
typedef struct {
int x, y;
PPMPixel *data;
} PPMImage;
double rtclock()
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
static PPMImage *readPPM(const char *filename) {
char buff[16];
PPMImage *img;
FILE *fp;
int c, rgb_comp_color;
fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
if (!fgets(buff, sizeof(buff), fp)) {
perror(filename);
exit(1);
}
if (buff[0] != 'P' || buff[1] != '6') {
fprintf(stderr, "Invalid image format (must be 'P6')\n");
exit(1);
}
img = (PPMImage *) malloc(sizeof(PPMImage));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
c = getc(fp);
while (c == '#') {
while (getc(fp) != '\n')
;
c = getc(fp);
}
ungetc(c, fp);
if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) {
fprintf(stderr, "Invalid image size (error loading '%s')\n", filename);
exit(1);
}
if (fscanf(fp, "%d", &rgb_comp_color) != 1) {
fprintf(stderr, "Invalid rgb component (error loading '%s')\n",
filename);
exit(1);
}
if (rgb_comp_color != RGB_COMPONENT_COLOR) {
fprintf(stderr, "'%s' does not have 8-bits components\n", filename);
exit(1);
}
while (fgetc(fp) != '\n')
;
img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
if (fread(img->data, 3 * img->x, img->y, fp) != img->y) {
fprintf(stderr, "Error loading image '%s'\n", filename);
exit(1);
}
fclose(fp);
return img;
}
void writePPM(PPMImage *img) {
fprintf(stdout, "P6\n");
fprintf(stdout, "# %s\n", COMMENT);
fprintf(stdout, "%d %d\n", img->x, img->y);
fprintf(stdout, "%d\n", RGB_COMPONENT_COLOR);
fwrite(img->data, 3 * img->x, img->y, stdout);
fclose(stdout);
}
//versao sem shared memory
__global__ void smooth_GPU_NoSM(PPMPixel *dataIn, PPMPixel *dataOut, int *width, int *height) {
//numeros de pixels que o bloco precisa expandir em cada direção (esquerda, direita, baixo, cima);
int sizeExtraRegion = (int)((MASK_WIDTH - 1) / 2);
//variaveis necessarias para criar uma regiao de dimensao GridSize x GridSize (shared memory)
int i, j, red, green, blue;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < *height && col < *width){
red = 0;
green = 0;
blue = 0;
int rowInNeighbourhood;
int colInNeighbourhood;
int indexNeighbourhood;
int numberPixelsInNeighbourhood = MASK_WIDTH * MASK_WIDTH;
//pega a vizinhanca do pixel
for(i = -sizeExtraRegion; i <= sizeExtraRegion; i++){
for(j = -sizeExtraRegion; j <= sizeExtraRegion; j++){
rowInNeighbourhood = row - i;
colInNeighbourhood = col - j;
indexNeighbourhood = (rowInNeighbourhood * *width) + colInNeighbourhood;
if(rowInNeighbourhood >= 0 && rowInNeighbourhood < *height){
if(colInNeighbourhood >= 0 && colInNeighbourhood < *width){
red += dataIn[indexNeighbourhood].red;
green += dataIn[indexNeighbourhood].green;
blue += dataIn[indexNeighbourhood].blue;
}
}
}
}
dataOut[(row * *width) + col].red = red / (numberPixelsInNeighbourhood);
dataOut[(row * *width) + col].green = green / (numberPixelsInNeighbourhood);
dataOut[(row * *width) + col].blue = blue / (numberPixelsInNeighbourhood);
}
}
__global__ void smooth_GPU(PPMPixel *dataIn, PPMPixel *dataOut, int *width, int *height) {
//numeros de pixels que o bloco precisa expandir em cada direção (esquerda, direita, baixo, cima);
int sizeExtraRegion = (int)((MASK_WIDTH - 1) / 2);
//variaveis necessarias para criar uma regiao de dimensao GridSize x GridSize (shared memory)
int GridSize = TILE_WIDTH + (MASK_WIDTH - 1);
int gridratio = ceil((float)(GridSize * GridSize) / (blockDim.x * blockDim.y));
int startBlockRow = blockIdx.y * blockDim.y;
int startBlockCol = blockIdx.x * blockDim.x;
int it, index, x, y;
int sharedRow, sharedCol;
int i, j, red, green, blue;
__shared__ PPMPixel shared[TILE_WIDTH + (MASK_WIDTH - 1)][TILE_WIDTH + (MASK_WIDTH - 1)];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
//transferindo os dados do bloco para a shared memory
for(it = 0; it < gridratio; it++){
index = (threadIdx.y * blockDim.x) + threadIdx.x + (it * blockDim.x * blockDim.y);
x = (index / GridSize );
y = index % GridSize ;
if(x < GridSize && y < GridSize ){
sharedRow = startBlockRow + x - sizeExtraRegion ;
sharedCol = startBlockCol + y - sizeExtraRegion ;
if(sharedRow >= 0 && sharedCol >= 0 && sharedCol < *width && sharedRow < *height){
shared[x][y] = dataIn[sharedRow * *width + sharedCol];
}else{
shared[x][y].red = shared[x][y].green = shared[x][y].blue = 0;
}
}
}
//sincronização necessaria para garantir que todos os dados estejam na shared memory antes de computar o smooth
__syncthreads();
//se é um pixel valido, computa o smooth
if((row * col) < (*width * *height)){
red = 0;
green = 0;
blue = 0;
int numberPixelsInNeighbourhood = MASK_WIDTH * MASK_WIDTH;
//pega a vizinhanca do pixel
for(i = threadIdx.y; i < (threadIdx.y + MASK_WIDTH); i++){
for(j = threadIdx.x; j < (threadIdx.x + MASK_WIDTH); j++){
red += shared[i][j].red;
green += shared[i][j].green;
blue += shared[i][j].blue;
}
}
dataOut[(row * *width) + col].red = red / (numberPixelsInNeighbourhood);
dataOut[(row * *width) + col].green = green / (numberPixelsInNeighbourhood);
dataOut[(row * *width) + col].blue = blue / (numberPixelsInNeighbourhood);
}
}
int main(int argc, char *argv[]) {
if( argc != 2 ) {
printf("Too many or no one arguments supplied.\n");
}
//double t_start, t_end;
char *filename = argv[1];
float milliseconds;
// CUDA EVENTS
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
PPMImage *image = readPPM(filename);
PPMImage *image_output = (PPMImage *) malloc(sizeof(PPMImage));
image_output->x = image->x;
image_output->y = image->y;
image_output->data = (PPMPixel*) malloc(image_output->x * image_output->y * sizeof(PPMPixel));
int w = image->x;
int h = image->y;
int n = image->y * image->x;
PPMPixel *D_dataIn, *D_dataOut;
int *D_width, *D_height;
cudaEventRecord(start);
cudaMalloc((void**)&D_dataIn, n * sizeof(PPMPixel));
cudaMalloc((void**)&D_dataOut, n * sizeof(PPMPixel));
cudaMalloc((void**)&D_width, sizeof(int));
cudaMalloc((void**)&D_height, sizeof(int));
cudaMemcpy(D_dataIn, image->data, n * sizeof(PPMPixel), cudaMemcpyHostToDevice);
cudaMemcpy(D_width, &w, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(D_height, &h, sizeof(int), cudaMemcpyHostToDevice);
dim3 threadsPorBloco(TILE_WIDTH, TILE_WIDTH);
dim3 blocos(ceil((float)w / TILE_WIDTH), ceil((float)h / TILE_WIDTH), 1);
//cudaEventRecord(start);
smooth_GPU<<<blocos, threadsPorBloco>>>(D_dataIn, D_dataOut, D_width, D_height);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cudaMemcpy(image_output->data, D_dataOut, n * sizeof(PPMPixel), cudaMemcpyDeviceToHost);
cudaFree(D_dataIn);
cudaFree(D_dataOut);
cudaFree(D_width);
cudaFree(D_height);
// t_start = rtclock();
// Smoothing_CPU_Serial(image_output, image);
// t_end = rtclock();
//writePPM(image_output);
//fprintf(stdout, "\n%0.6lfs\n", t_end - t_start);
printf("Tempo: %0.3f\n",milliseconds);
free(image->data);
free(image);
free(image_output->data);
free(image_output);
return 0;
}
/*
*
*
* O cálculo usado para computar os valores da tabela foi
*
*
* Bwr = (N_sm)/(N_gm)
*
* onde
* Bwr: Bandwidth Reduction
* N_sm: Número de acessos a shared memory
* N_gm: Número de acessos a memoria global
*
* Na prática eu não estou usando uma matrix (ou vetor) para representar a máscara. Para realizar a computação
* eu apenas preciso do valor do block extendido (shared memory). Exemplo, se o bloco na GPU tem tamaho 16x16 e a máscara é uma 5x5
* então o tamanho da sharded memory é 20x20. A mémoria global é apenas acessada para transferir os dados para a shared
* memory. Entao
*
* N_gm = 20*20 = 400
*
* Para computar a intensidade de um pixel no bloco, nós precisamos de 5x5 acessos a SM (shared memory).
* como o bloco tem 16x16 pixels em um bloco, então temos
*
* N_sm = 16*16*5*5
*
* Bwr = (16*16*5*5)/(20*20) = 16;
*
*
* Abaixo está o Bandwidth Reduction para um único bloco.
*-----------------------------------------------------------------------------------------------------------------------------
*| | BLOCK_SIZE = 8x8 | BLOCK_SIZE = 14x14 | BLOCK_SIZE = 15x15 | BLOCK_SIZE = 16x16 | BLOCK_SIZE = 32x32 |
*|------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
*| MASK_WIDTH = 5 | 11.11 | 15.12 | 15.58 | 16.00 | 19.75 |
*| MASK_WIDTH = 7 | 16.00 | 24.01 | 25.00 | 25.91 | 34.76 |
*| MASK_WIDTH = 9 | 20.25 | 32.80 | 34.45 | 36.00 | 51.84 |
*| MASK_WIDTH = 11 | 23.90 | 41.17 | 43.56 | 45.82 | 70.24 |
*| MASK_WIDTH = 13 | 27.04 | 49.00 | 52.16 | 55.18 | 89.38 |
* -----------------------------------------------------------------------------------------------------------------------------
*
*
* Desta maneria, para saber o total de Bandwidth Reduction na imagem, precisamos saber o número de blocos gerados para
* uma dada entrada. Por exempĺo, para a entrada arq3.ppm que tem dimensões 3840x2160 (2160 linhas e 3840 colunas),
* utilizando um bloco de 16x16, temos ceil(3840/16) = 240 blocos para cada linha e ceil(2160/16) = 135 blocos em cada
* coluna. Portanto o número total de blocos é de 240*135 = 32400. Multiplicando esse valor em cada célula da tabela acima
* obtemos
*
*-----------------------------------------------------------------------------------------------------------------------------
*| | BLOCK_SIZE = 8x8 | BLOCK_SIZE = 14x14 | BLOCK_SIZE = 15x15 | BLOCK_SIZE = 16x16 | BLOCK_SIZE = 32x32 |
*|------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
*| MASK_WIDTH = 5 | 359964| 489888| 504792| 518400| 639900|
*| MASK_WIDTH = 7 | 518400| 777924| 810000| 839484| 1126224|
*| MASK_WIDTH = 9 | 656100| 1062720| 1116180| 1166400| 1679616|
*| MASK_WIDTH = 11 | 774360| 1333908| 1411344| 1484568| 2275776 |
*| MASK_WIDTH = 13 | 876096| 1587600| 1689984| 1787832| 2895912 |
* -----------------------------------------------------------------------------------------------------------------------------
*
*
*
* Conclusão: da maneira que eu implementei o meu smooth, o Bandwidth Reduction é proporcional ao tamanho
* da máscara e ao tamanho do bloco. Abaixo esta os tempos e os speeds ups obtidos para cada uma das imagens considerando um bloco de
* 16x16 e uma máscara de 5x5.
*
*
*Abaxo segue as medicoes e speed ups obtidos
* ---------------------------------------------------------------------------------------------------------
* |Entrada |CPU_Serial (ms) |GPU_SharedMemory (ms) |GPU_NoSharedMemory (ms) |Speedup(CPU/GPUSM) |
* ---------------------------------------------------------------------------------------------------------
* |arq1.ppm |160.054 | 0.416 |1.096 |384.74 |
* |arq2.ppm |361.446 | 0.878 |2.421 |411.67 |
* |arq3.ppm |1420.776 | 3.310 |9.533 |429.24 |
* ---------------------------------------------------------------------------------------------------------
*
* A tabela abaixo foi o tempo em ms (milisegundos) obtidos para a ar3.ppm (apenas), variando o tamanho do bloco
* e o tamanho da máscara.
*
* ------------------------------------------------------------------------------------------------------
* | |BLOCK_SIZE=8x8 |BLOCK_SIZE=14x14|BLOCK_SIZE=15x15|BLOCK_SIZE=16x16|BLOCK_SIZE=32x32 |
* ------------------------------------------------------------------------------------------------------
* |MASK_WIDTH=5 | 5.734 | 3.954 | 3.816 | 3.310 | 3.517 |
* |MASK_WIDTH=7 | 8.806 | 7.194 | 7.263 | 5.125 | 5.067 |
* |MASK_WIDTH=9 | 11.150 | 11.454 | 11.151 | 7.371 | 7.014 |
* |MASK_WIDTH=11| 18.258 | 16.342 | 15.701 | 10.347 | 9.673 |
* |MASK_WIDTH=13| 25.179 | 21.975 | 21.018 | 18.684 | 12.710 |
* ------------------------------------------------------------------------------------------------------
*
* Conclusao: O tamanho da mascara para computar o valor do pixel afeta consideravelmente o tempo de execução do
* programa.
*
* */
|
14,669 | /*#include "RulePossibilities.cuh"
#include <numeric>
#include "LargeNumber.h"
#include "Bigint.h"
#include "Timer.h"
int temp;
RulePossibilities::RulePossibilities(
int lineSize, int lineIndex, bool isRow, const std::vector<int>& rules) :
lineSize(lineSize), lineIndex(lineIndex), isRow(isRow), rules(rules) {
LoadAllPossibilities();
}
RulePossibilities::~RulePossibilities()
{
}
Cell * RulePossibilities::getLinePtr(int lineIndex)
{
return &possibilities[lineIndex * lineSize];
}
void RulePossibilities::LoadAllPossibilities()
{
Timer::start("calcAllPossibilities");
possSize = calcPossSize();
MemAllocSherd(&possibilities, sizeof(Cell)*possSize*lineSize);
std::cout << "rule: (";
for (int i = 0; i < rules.size(); i++)
std::cout << rules.at(i) << " ";
std::cout << ") pos: " << possSize;
temp = 0;
int numInserted = 0;
fillPossibilities(0,rules.begin(), numInserted, std::vector<Cell>(lineSize, Cell::WHITE));
std::cout << " recur iteration: " << temp << " num inserted: " << numInserted << std::endl;
Timer::stop("calcAllPossibilities");
}
void RulePossibilities::fillPossibilities(
int startIndex,
std::vector<int>::const_iterator currRule,
int& insertLineIndex,
std::vector<Cell> currLine) {
temp++;
if (currRule == rules.end()) {
//MemAllocSherd(&possibilities[insertLineIndex], sizeof(Cell) * lineSize);
cudaMemcpy(getLinePtr(insertLineIndex), currLine.data(), sizeof(Cell)*lineSize, cudaMemcpyHostToDevice);
insertLineIndex++;
return;
}
const int dist = std::distance(currRule, rules.end()) - 1;
const int sum = std::accumulate(currRule, rules.end(), 0);
if (startIndex + dist + sum > lineSize)
return;
if(startIndex + dist + sum + 1 <= lineSize)
fillPossibilities(startIndex + 1, currRule, insertLineIndex, currLine);
paint(currLine, startIndex, *currRule);
fillPossibilities(startIndex + *currRule + 1, currRule + 1, insertLineIndex, currLine);
}
void RulePossibilities::paint(std::vector<Cell>& line, int startIndex, int rule) {
for (int i = startIndex; i < startIndex + rule; i++) {
line.at(i) = Cell::BLACK;
}
}
int RulePossibilities::calcPossSize()
{
int res1;
const int ruleSum = std::accumulate(rules.begin(), rules.end(), 0);
const int W = lineSize - ruleSum - rules.size() + 1;
{
Timer::start("LargeNumber");
LargeNumber n1 = LargeNumber::Factorial(rules.size() + W);
LargeNumber n2 = LargeNumber::Factorial(rules.size());
LargeNumber n3 = LargeNumber::Factorial(W);
n2.Multiply(n3);
res1 = n1.Divide(n2);
Timer::stop("LargeNumber");
}
//assert(res2 == res1);
return res1;
}
*/ |
14,670 | #define MODE_MANDEL 1
#define MODE_MANDEL_DISTANCE 2
#define MODE_JULIA 3
#define WIDTH gridDim.x*blockDim.x
#define HEIGHT gridDim.y*blockDim.y
#define X ((blockIdx.x * blockDim.x) + threadIdx.x)
#define Y ((blockIdx.y * blockDim.y) + threadIdx.y)
extern "C"
__global__ void compute(
int *iters,
double *lastValuesR,
double *lastValuesI,
double *distancesR,
double *distancesI,
const int mode,
const int4 tile,
const double4 area,
const double2 julia,
const int maxIterations,
const double sqrEscapeRadius
) {
if (X >= tile.z || Y >= tile.w) { // tile.z is width of tile, tile.w is height of tile
return;
}
const double x = area.x + X * area.z;
const double y = area.y + Y * area.w;
const double cr = mode == MODE_JULIA ? julia.x : x;
const double ci = mode == MODE_JULIA ? julia.y : y;
// if ( X % 100 == 0 )
// printf("compute %d %d ",X,Y);
//
const double escape = sqrEscapeRadius;
double zr = x;
double zi = y;
double new_zr = 0.0f;
// distance
double dr = 1;
double di = 0;
double new_dr;
int count = 0;
for (; count < maxIterations; count++) {
const double zrsqr = zr * zr;
const double zisqr = zi * zi;
if ((zrsqr + zisqr) >= escape) {
break;
}
if (mode == MODE_MANDEL_DISTANCE) {
new_dr = 2.0f * (zr * dr - zi * di) + 1.0f;
di = 2.0f * (zr * di + zi * dr);
dr = new_dr;
}
new_zr = (zrsqr - zisqr) + cr;
zi = ((2.0f * zr) * zi) + ci;
zr = new_zr;
//If in a periodic orbit, assume it is trapped
if (zr == 0.0 && zi == 0.0) {
count = maxIterations;
break;
}
}
const int tIndex = X + Y * tile.z; // tile.z is width of tile
iters[tIndex] = count;
lastValuesR[tIndex] = (double) zr;
lastValuesI[tIndex] = (double) zi;
if (mode == MODE_MANDEL_DISTANCE) {
distancesR[tIndex] = (double) dr;
distancesI[tIndex] = (double) di;
}
}
|
14,671 | #include "includes.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
// limited version of checkCudaErrors from helper_cuda.h in CUDA examples
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
__global__ void render(float *fb, int max_x, int max_y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= max_x) || (j >= max_y)) return;
int pixel_index = j*max_x*3 + i*3;
fb[pixel_index + 0] = float(i) / max_x;
fb[pixel_index + 1] = float(j) / max_y;
fb[pixel_index + 2] = 0.0;
} |
14,672 | #include <cuda_runtime_api.h>
#include <stdint.h>
__global__ void logistic_nll_loss_fwd_kernel(
const float *in_buf,
uint32_t batch_size,
const uint32_t *binary_labels,
const float *weights,
float *loss,
float *pred)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < batch_size) {
float x = in_buf[idx];
float y = 1.0f / (1.0f + expf(-x));
float y_target = (float)(binary_labels[idx] > 0);
float ell = -weights[idx] * ((1.0f - y_target) * logf(1.0f - y) + y_target * logf(y));
loss[idx] = ell;
pred[idx] = y;
}
}
extern "C" void neuralops_cuda_logistic_nll_loss_fwd(
const float *in_buf,
size_t batch_size,
const uint32_t *binary_labels,
const float *weights,
float *loss,
float *pred,
cudaStream_t stream)
{
logistic_nll_loss_fwd_kernel<<<(batch_size+1024-1)/1024, 1024, 0, stream>>>(
in_buf, batch_size, binary_labels, weights, loss, pred);
}
__global__ void logistic_nll_loss_bwd_kernel(
const float *in_buf,
uint32_t batch_size,
const uint32_t *binary_labels,
const float *weights,
float *in_delta)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < batch_size) {
float x = in_buf[idx];
float y = 1.0f / (1.0f + expf(-x));
float y_target = (float)(binary_labels[idx] > 0);
float dx = weights[idx] * (y - y_target);
in_delta[idx] = dx;
}
}
extern "C" void neuralops_cuda_logistic_nll_loss_bwd(
const float *in_buf,
size_t batch_size,
const uint32_t *binary_labels,
const float *weights,
float *in_delta,
cudaStream_t stream)
{
logistic_nll_loss_bwd_kernel<<<(batch_size+1024-1)/1024, 1024, 0, stream>>>(
in_buf, batch_size, binary_labels, weights, in_delta);
}
|
14,673 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <string.h>
#define TRILLION 1000000000000
#define MILLION 1000000
#define THREADS_PER_BLOCK 1024
#define MIN(a,b) (((a)<(b))?(a):(b))
#define MAX(a,b) (((a)>(b))?(a):(b))
//*************************************
//
// Kernel Helpers
//
//*************************************
__device__ long long int power_mod_long_device(int x, unsigned long long int y, unsigned long long int p)
{
unsigned long long int longx = x;
unsigned long long int res = 1; // Initialize result
longx = longx % p; // Update x if it is more than or
// equal to p
while (y > 0)
{
// If y is odd, multiply x with result
if (y & 1) {
res = (res*longx) % p;
}
// y must be even now
y = y>>1; // y = y/2
longx = (longx*longx) % p;
}
return res;
}
//*************************************
//
// Kernel
//
//*************************************
__global__ void trial_division_kernel(unsigned long long int n, int *ret, int summod, int max){
unsigned long long int i = (threadIdx.x + blockIdx.x * blockDim.x);
// if (i*i <= n){
// if (((n % i) == 0) || ((n % (i+2)) == 0))
// *ret = 1;
// }
long long int sum = 0;
if (i <= max){
//sum += nCrModpLucas(n,r,n) * int_pow(a,diff);
sum += summod;
sum += power_mod_long_device(i, n, n);
sum -= i;
if ((sum % n) != 0){
*ret = 1;
}
}
}
//*************************************
//
// Helper Functions
//
//*************************************
long long combi(int n,int k)
{
long long ans=1;
k=k>n-k?n-k:k;
int j=1;
for(;j<=k;j++,n--)
{
if(n%j==0)
{
ans*=n/j;
}else
if(ans%j==0)
{
ans=ans/j*n;
}else
{
ans=(ans*n)/j;
}
}
//printf("ret %llu \n", ans);
return ans;
}
long long int power_mod_long(int x, unsigned long long int y, unsigned long long int p)
{
unsigned long long int longx = x;
unsigned long long int res = 1; // Initialize result
longx = longx % p; // Update x if it is more than or
// equal to p
while (y > 0)
{
// If y is odd, multiply x with result
if (y & 1) {
res = (res*longx) % p;
}
// y must be even now
y = y>>1; // y = y/2
longx = (longx*longx) % p;
}
return res;
}
int int_pow(int base, int exp)
{
int result = 1;
while (exp)
{
if (exp & 1)
result *= base;
exp /= 2;
base *= base;
}
return result;
}
int power_mod(int x, unsigned int y, int p)
{
int res = 1; // Initialize result
x = x % p; // Update x if it is more than or
// equal to p
while (y > 0)
{
// If y is odd, multiply x with result
if (y & 1)
res = (res*x) % p;
// y must be even now
y = y>>1; // y = y/2
x = (x*x) % p;
}
return res;
}
int gcd(int a, int b)
{
int temp;
while (b != 0)
{
temp = a % b;
a = b;
b = temp;
}
return a;
}
// step 5 helpers
int eulerPhi(int n){
int result = 0;
int k;
for(k = 1; k <= n; k++)
result += gcd(k, n) == 1;
return result;
}
int nCrModpDP(int n, int r, int p)
{
// The array C is going to store last row of
// pascal triangle at the end. And last entry
// of last row is nCr
int C[r+1];
memset(C, 0, sizeof(C));
C[0] = 1; // Top row of Pascal Triangle
// One by constructs remaining rows of Pascal
// Triangle from top to bottom
int i, j;
for ( i = 1; i <= n; i++)
{
// Fill entries of current row using previous
// row values
for ( j = MIN(i, r); j > 0; j--)
// nCj = (n-1)Cj + (n-1)C(j-1);
C[j] = (C[j] + C[j-1])%p;
}
return C[r];
}
// Lucas Theorem based function that returns nCr % p
// This function works like decimal to binary conversion
// recursive function. First we compute last digits of
// n and r in base p, then recur for remaining digits
int nCrModpLucas(int n, int r, int p)
{
// Base case
if (r==0)
return 1;
// Compute last digits of n and r in base p
int ni = n%p, ri = r%p;
// Compute result for last digits computed above, and
// for remaining digits. Multiply the two results and
// compute the result of multiplication in modulo p.
return (nCrModpLucas(n/p, r/p, p) * // Last digits of n and r
nCrModpDP(ni, ri, p)) % p; // Remaining digits
}
//*************************************
//
// AKS ALGORITHM
//
//*************************************
int aks(unsigned long long int n){
if (n <= 1)
return 0;
// step 1 check if perfect power
int b; double a;
for (b = 2; b <= log(n)/log(2); b++){
a = pow(n, 1/(double)b);
if (a == floor(a))
return 0;
}
// step 2
// Find the smallest r such that Or(n) > (log2 n)^2
double maxk = pow((log(n)/log(2)), 2);
double maxr = MAX(3, pow((log(n)/log(2)), 5));
int nextR = 1;
int r, k;
for (r = 2; (nextR && (r<maxr)); r++){
nextR = 0;
for (k = 1; ((!nextR)&& (k <= maxk)); k ++){
nextR = ((power_mod(n,k,r) == 1) || (power_mod(n,k,r) == 0));
}
}
r --;
// step 3
// If 1 < gcd(a,n) < n for some a ≤ r, output composite.
int i;
for (i = r; i > 1; i --){
int g = gcd(i, n);
if ((g > 1) && (g < n))
return 0;
}
// step 4
// If n ≤ r, output prime.
if (n <= r)
return 1;
// step 5
// For a = 1 to sqrt(euler(r)log2(n))
// if (X+a)n≠ Xn+a (mod Xr − 1,n), output composite;
int max = floor((log(n)/log(2)) * sqrt(eulerPhi(r)));
int diff = n-r;
long long int sum = 0;
//int inta;
// this for loop is the crux of the algorithm
// each value of a is tested after the polynomial division
// the reason I was able to implement this for even moderate values is becuase the polynomials are of regular form
// instead of doing the entire polynomial division, notice that the answers are always in regular form,
// because the polymod eliminates most of the terms
// the answer in general is a^n + (nCr % n)*a^(n-r) - a
sum += nCrModpLucas(n,r,n) * int_pow(a,diff);
int ret= 0;
int *d_ret;
cudaMalloc((void **)&d_ret, sizeof(int));
trial_division_kernel<<<(max + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(n, d_ret, sum,max);
cudaMemcpy(&ret, d_ret, sizeof(int), cudaMemcpyDeviceToHost);
if (ret)
return 0;
// for (inta=1; inta <= max; inta++){
// sum = 0;
// sum += nCrModpLucas(n,r,n) * int_pow(a,diff);
// sum += power_mod_long(inta, n, n);
// sum -= inta;
// if ((sum % n) != 0){
// //printf("here\n");
// return 0;
// }
// }
// step 6
return 1;
}
//*************************************
//
// Main
//
//*************************************
int main(void){
double time_spent = 0.0;
clock_t begin = clock();
unsigned long long int i;
int numprimes = 0;
// PLEASE NOTE: this algorithm is only verified to work with values < 1,010,000
// above that overflow errors may occur
for (i = 1000*1000; i < 1000*1000 + 10000; i++)
if (aks(i)){
numprimes ++;
printf("%llu is prime \n", i);
}
clock_t end = clock();
time_spent += (double)(end - begin) / CLOCKS_PER_SEC;
printf("Time elpased is %f seconds\n", time_spent);
printf("In the 1000 trillion range average time %f\n", time_spent/10000);
printf("number of primes in this range %d\n", numprimes);
}
|
14,674 | #include "includes.h"
__device__ __host__ float cpu_applyFilter(float *image, int stride, float *matrix, int filter_dim)
{
float pixel = 0.0f;
for (int h = 0; h < filter_dim; h++)
{
int offset = h * stride;
int offset_kernel = h * filter_dim;
for (int w = 0; w < filter_dim; w++)
{
pixel += image[offset + w] * matrix[offset_kernel + w];
}
}
return pixel;
}
__global__ void gpu_gaussian(int width, int height, float *image, float *image_out)
{
float gaussian[9] = { 1.0f / 16.0f, 2.0f / 16.0f, 1.0f / 16.0f,
2.0f / 16.0f, 4.0f / 16.0f, 2.0f / 16.0f,
1.0f / 16.0f, 2.0f / 16.0f, 1.0f / 16.0f };
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
int offset_t = index_y * width + index_x; // Input for function
int offset = (index_y + 1) * width + (index_x + 1); // Output to store in result
// sh_block = handle_shared_memory(image, offset_t, index_x, index_y, width);
__shared__ float sh_block[BLOCK_SIZE_SH * BLOCK_SIZE_SH];
// Shared memory offset (for input value):
int offset_shared = threadIdx.y * BLOCK_SIZE_SH + threadIdx.x;
if (index_x != 0 && (index_x+1) % BLOCK_SIZE == 0) {
// Edge-case x-direction:
sh_block[offset_shared + 1] = image[offset_t + 1];
sh_block[offset_shared + 2] = image[offset_t + 2];
}
if (index_y != 0 && (index_y+1) % BLOCK_SIZE == 0) {
// Edge-case y-direction:
sh_block[offset_shared + BLOCK_SIZE_SH] = image[offset_t + width];
sh_block[offset_shared + 2*BLOCK_SIZE_SH] = image[offset_t + 2*width];
}
if ((index_x != 0 && (index_x+1) % BLOCK_SIZE == 0) && (index_y != 0 && (index_y+1) % BLOCK_SIZE == 0)) {
// Edge-case x & y-direction:
sh_block[offset_shared + BLOCK_SIZE_SH + 1] = image[offset_t + width + 1];
sh_block[offset_shared + BLOCK_SIZE_SH + 2] = image[offset_t + width + 2];
sh_block[offset_shared + 2*BLOCK_SIZE_SH + 1] = image[offset_t + 2*width + 1];
sh_block[offset_shared + 2*BLOCK_SIZE_SH + 2] = image[offset_t + 2*width + 2];
}
sh_block[offset_shared] = image[offset_t];
__syncthreads();
if (index_x < (width - 2) && index_y < (height - 2))
{
image_out[offset] = cpu_applyFilter(&sh_block[offset_shared],
BLOCK_SIZE_SH, gaussian, 3);
}
} |
14,675 | //This file contains wrappers for C/CUDA functions to be called from Fortran.
#include <stdio.h>
#ifndef NO_GPU
#include <cuda.h>
#include <cuda_runtime.h>
//Protect the C function names from name mangling:
#ifdef __cplusplus
extern "C"{
#endif
void cudagetdevicecount(int* count, int* err_code);
void cudasetdevice(int device, int* err_code);
void cudagetdeviceproperties(int device, size_t *totalGlobalMem_, size_t *sharedMemPerBlock_,
int *regsPerBlock_, int *warpSize_, int *maxThreadsPerBlock_, int *maxThreadsDim_, int *maxGridSize_,
int *clockRate_, size_t *totalConstMem_, int *major_, int *minor_, int *deviceOverlap_,
int *multiProcessorCount_, int *concurrentKernels_, int *ECCEnabled_, int *asyncEngineCount_,
int *memoryClockRate_, int *memoryBusWidth_, int *maxThreadsPerMultiProcessor_, int* err_code);
void cudadevicesynchronize(int *err_code);
#ifdef __cplusplus
}
#endif
#endif
#ifdef __cplusplus
extern "C"{
#endif
int string_len(const char * str);
char * ptr_offset(char * byte_ptr, size_t byte_offset);
size_t c_ptr_value(void * c_ptr);
void c_ptr_set(size_t cpval, void ** cptr);
void print_c_ptr(void * c_ptr);
#ifdef __cplusplus
}
#endif
//-------------------------------------------------------------------
#ifndef NO_GPU
//C Wrappers (called from Fortran to invoke CUDA run-time functions):
void cudagetdevicecount(int* count, int* err_code){
cudaError_t err; const char* err_msg;
*err_code=0;
err=cudaGetDeviceCount(count); if(err!=cudaSuccess){
err_msg=cudaGetErrorString(err);
printf("#ERROR(cudagetdevicecount): %s \n",err_msg);
*err_code=1;
};
return;
}
void cudasetdevice(int device, int* err_code){
cudaError_t err; const char* err_msg;
*err_code=0;
err=cudaSetDevice(device); if(err!=cudaSuccess){
err_msg=cudaGetErrorString(err);
printf("#ERROR(cudasetdevice): %s \n",err_msg);
*err_code=1;
};
return;
}
void cudagetdeviceproperties(int device, size_t *totalGlobalMem_, size_t *sharedMemPerBlock_,
int *regsPerBlock_, int *warpSize_, int *maxThreadsPerBlock_, int *maxThreadsDim_, int *maxGridSize_,
int *clockRate_, size_t *totalConstMem_, int *major_, int *minor_, int *deviceOverlap_,
int *multiProcessorCount_, int *concurrentKernels_, int *ECCEnabled_, int *asyncEngineCount_,
int *memoryClockRate_, int *memoryBusWidth_, int *maxThreadsPerMultiProcessor_, int* err_code){
cudaError_t err; const char* err_msg; cudaDeviceProp prop;
*err_code=0;
err=cudaGetDeviceProperties(&prop,device);
if(err!=cudaSuccess){
err_msg=cudaGetErrorString(err);
printf("#ERROR(cudagetdeviceproperties): %s \n",err_msg);
*err_code=1;
}else{
*totalGlobalMem_=prop.totalGlobalMem;
*sharedMemPerBlock_=prop.sharedMemPerBlock;
*regsPerBlock_=prop.regsPerBlock;
*warpSize_=prop.warpSize;
*maxThreadsPerBlock_=prop.maxThreadsPerBlock;
maxThreadsDim_[0]=prop.maxThreadsDim[0]; maxThreadsDim_[1]=prop.maxThreadsDim[1]; maxThreadsDim_[2]=prop.maxThreadsDim[2];
maxGridSize_[0]=prop.maxGridSize[0]; maxGridSize_[1]=prop.maxGridSize[1]; maxGridSize_[2]=prop.maxGridSize[2];
*clockRate_=prop.clockRate;
*totalConstMem_=prop.totalConstMem;
*major_=prop.major; *minor_=prop.minor;
*deviceOverlap_=prop.deviceOverlap;
*multiProcessorCount_=prop.multiProcessorCount;
*concurrentKernels_=prop.concurrentKernels;
*ECCEnabled_=prop.ECCEnabled;
*asyncEngineCount_=prop.asyncEngineCount;
*memoryClockRate_=prop.memoryClockRate;
*memoryBusWidth_=prop.memoryBusWidth;
*maxThreadsPerMultiProcessor_=prop.maxThreadsPerMultiProcessor;
};
return;
}
void cudadevicesynchronize(int *err_code)
{
*err_code=0;
cudaError_t err=cudaDeviceSynchronize(); if(err != cudaSuccess){*err_code=1;}
return;
}
#endif
//----------------------------------------------------------------------------
//Auxiliary functions:
int string_len(const char* str){ //get the length of a C string
const int max_string_len=2147483647;
int i;
for(i=0;i<max_string_len;i++){if(str[i]==0) break;};
return i;
}
char* ptr_offset(char *byte_ptr, size_t byte_offset){ //offsets a C pointer by a number of bytes
char *addr=&byte_ptr[byte_offset];
return addr;
}
size_t c_ptr_value(void * c_ptr){ //returns a C pointer as an integer
return (size_t)c_ptr;
}
void c_ptr_set(size_t cpval, void ** cptr){ //sets a C pointer to a specific integer address
*cptr=(void*)cpval;
return;
}
void print_c_ptr(void * c_ptr){ //prints a C-pointer
printf("%p",c_ptr);
return;
}
|
14,676 | #include "includes.h"
__global__ void gpu_calculate_force_square_max( const int size, const int number_of_rounds, const double* force_per_atom, double* force_square_max)
{
const int tid = threadIdx.x;
__shared__ double s_force_square[1024];
s_force_square[tid] = 0.0;
double force_square = 0.0;
for (int round = 0; round < number_of_rounds; ++round) {
const int n = tid + round * 1024;
if (n < size) {
const double f = force_per_atom[n];
if (f * f > force_square)
force_square = f * f;
}
}
s_force_square[tid] = force_square;
__syncthreads();
for (int offset = blockDim.x >> 1; offset > 0; offset >>= 1) {
if (tid < offset) {
if (s_force_square[tid + offset] > s_force_square[tid]) {
s_force_square[tid] = s_force_square[tid + offset];
}
}
__syncthreads();
}
if (tid == 0) {
force_square_max[0] = s_force_square[0];
}
} |
14,677 | #include <stdio.h>
#include <cuda_runtime.h>
__global__ void VectorAdd(int* A, int* B, int* C, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx > N )
return;
C[idx] = A[idx] + B[idx];
}
void GenerateData(int* data, int N)
{
for(int i = 0; i< N; ++i)
(*(data+i)) = i;
}
void VectorAddOnDevice(int N)
{
int* HA = (int*)malloc(sizeof(int) * N);
int* HB = (int*)malloc(sizeof(int) * N);
int* HC = (int*)malloc(sizeof(int) * N);
GenerateData(HA, N);
GenerateData(HB, N);
int nBytes = N * sizeof(int);
int* DA;
int* DB;
int* DC;
cudaMalloc((int**)&DA, nBytes);
cudaMalloc((int**)&DB, nBytes);
cudaMalloc((int**)&DC, nBytes);
cudaMemcpy(DA, HA,nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(DB, HB,nBytes, cudaMemcpyHostToDevice);
dim3 block(32);
dim3 grid((N + block.x -1) / block.x);
VectorAdd<<<grid, block>>>(DA, DB,DC,N);
cudaMemcpy(HC, DC, nBytes, cudaMemcpyDeviceToHost);
for(int i = 0; i < N;++i)
printf("%d ", HC[i]);
printf("\n");
cudaFree(DA);
cudaFree(DB);
cudaFree(DC);
free(HA);
free(HB);
free(HC);
} |
14,678 | /*
Assignment: ECE 451 Programming Assignment 2
Code: GpuSumPrimes.cu
Group: David Swanson, Daniel Caballero, Michael Wilder
Description: This code adds all the prime numbers up to a certain number input by the user.
This code takes one parameter (SIZE) from the user and uses the CUDA library to run the
calculations needed in parallel on 1024 threads.
*/
#include <stdio.h>
#define BLOCK_SIZE 1024
/* SumPrimes the function on the device that calculates if a number is prime. It takes a
pointer to the allocated array on the GPU and the size of the array. */
__global__ void SumPrimes (int *device_array, int SIZE) {
// Index is calculated based on which block and thread is being worked.
int index = threadIdx.x + blockIdx.x * blockDim.x;
int i;
int Prime = 1;
// If the index is valid, then we need to check if it is prime.
if (index < SIZE) {
if ((index) == 0 || (index) == 1) {
device_array[index] = 0;
}
// If the number is not prime, the value in the array is set to 0
else {
for (i=2; i*i <= index; i++) {
if (index % i == 0) {
Prime = 0;
device_array[index] = 0;
break;
}
}
// if the number is prime, the value in the array is set to the number.
if (Prime)
device_array[index] = index;
}
}
}
/* The main function of the code allocates memory on the host and device, transfers data
between the two, and calls the SumPrimes function. */
int main(int argc, char* argv []){
int SIZE = atoi(argv[1]) + 1;
int i;
long int sum;
int *host_array;
int *device_array;
sum = 0;
// Allocate memory for host array and device array then copy host array to device array.
host_array = (int *)malloc(SIZE*sizeof(int));
cudaMalloc(&device_array, SIZE*sizeof(int));
cudaMemcpy(device_array, host_array, SIZE*sizeof(int), cudaMemcpyHostToDevice);
// Define how many blocks and threads that need to be used when calling SumPrimes.
// A 1D array is used. The size of blocksPerGrid is set in a way to prevent overflow.
dim3 blocksPerGrid((SIZE + BLOCK_SIZE - 1)/BLOCK_SIZE,1,1);
dim3 threadsPerBlock(BLOCK_SIZE,1,1);
SumPrimes <<<blocksPerGrid, threadsPerBlock>>>(device_array, SIZE);
// Copy final array from device to host then clear memory in the device.
cudaMemcpy(host_array, device_array, SIZE*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(device_array);
// Testing print statement.
printf("I am adding: ");
// Add all the elements in the array. Only prime numbers will be non-zero.
for (i = 0; i < SIZE; i++) {
if (host_array[i] != 0)
printf("%d ", host_array[i]);
sum += host_array[i];
}
printf("\nSum = %ld \n", sum);
return 0;
}
|
14,679 | #include <stdio.h>
#include <math.h>
#include <iostream>
#include <inttypes.h>
#include <vector>
#include <fstream> // I/O
//struct DataFrame
struct DataFrame
{
int w;
int h;
float* e;
};
//Thread block size
#define BLOCK_SIZE 16 //number of threads for each block
#define GRID_SIZE 128 //dimension of entire space
//warning GRID_SIZE must be multiple of 16
#define BIN_SIZE 4 //size of a hisogram bin
#define TH 30 //threshold for triggering the histogram
__global__ void GridKernel(const DataFrame, DataFrame, unsigned int*); //aggiungere i parametri
//------------------------------------------------------------
//Histogram algorithm HOST CODE
float CircleFit(const DataFrame data, DataFrame circles)
{
//load data to device memory
DataFrame d_data;
d_data.w = data.w;
d_data.h = data.h;
size_t size = data.w * data.h * sizeof(float);
cudaError_t err = cudaMalloc(&d_data.e, size);
if(err)
printf("CUDA malloc data DataFrame: %s\n",cudaGetErrorString(err));
cudaMemcpy(d_data.e, data.e, size, cudaMemcpyHostToDevice);
//allocate circles
DataFrame d_circles;
d_circles.w = circles.w;
d_circles.h = circles.h;
size = circles.w * circles.h * sizeof(float);
err = cudaMalloc(&d_circles.e, size);
if(err)
printf("CUDA malloc circles DataFrame: %s\n",cudaGetErrorString(err));
//allocate variable for counting circles found
unsigned int *d_counter;
unsigned int h_counter;
cudaMallocManaged(&d_counter, sizeof(unsigned int));
if(err)
printf("CUDA malloc counter variable: %s\n",cudaGetErrorString(err));
// printf("initialized counter : %u\n", *counter);
// Hai assegnato al puntatore l'indirizzo di memoria da utilizzare, ma e' memoria della GPU, non puoi accedere da qui
// ma il valore nella cella puntata non e' inizializzato
cudaMemset(d_counter, 0, sizeof(unsigned int));
cudaMemcpy(&h_counter, d_counter, sizeof(unsigned int), cudaMemcpyDeviceToHost);
printf("initialized counter : %u\n", h_counter);
//
float time;
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//start time
cudaEventRecord(start);
// Define the geometry
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(GRID_SIZE/ dimBlock.x, GRID_SIZE/ dimBlock.y);
//warning GRID_SIZE must be multiple of 16
// Invoke kernel
GridKernel<<<dimGrid, dimBlock>>>(d_data, d_circles, d_counter);
err = cudaDeviceSynchronize();
//stop time
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
if(err)
printf("Run kernel: %s\n", cudaGetErrorString(err));
printf("Time: %3.5f ms\n",time);
// Read C from device memory
size = circles.w * circles.h * sizeof(float);
err = cudaMemcpy(circles.e, d_circles.e, size, cudaMemcpyDeviceToHost);
if(err)
printf("Copy circles off of device: %s\n",cudaGetErrorString(err));
cudaMemcpy(&h_counter, d_counter, sizeof(unsigned int), cudaMemcpyDeviceToHost);
printf("counter : %u\n", h_counter);
// Free device memory
cudaFree(d_data.e);
cudaFree(d_circles.e);
cudaFree(d_counter);
return time;
} //END HOST FUNCTION
//---------------------------------------------------------
// thread aware log function
__device__ void log_msg(const char * message)
{
printf("%d.%d.%d.%d-%s", blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, message);
}
//Device function to calculate distance
__device__ float Distance2(float x, float y, float xd, float yd)
{
return (x-xd)*(x-xd)+(y-yd)*(y-yd);
}
//---------------------------------------------------------
//Device function to Get Element from data
__device__ float GetElement(const DataFrame D, int row, int col)
{
if ( (row < D.h ) && (col < D.w ) )
return D.e[row * D.w + col];
else
return 0.0;
}
//---------------------------------------------------------------
//Device function to Set Element of data
__device__ void SetElement(DataFrame D, int row, int col, float value)
{
if ( (row < D.h ) && (col < D.w ) )
D.e[row * D.w + col] = value;
}
//---------------------------------------------------------
//Device function to fill histo
// __device__ void HistoFill(int x, int y, DataFrame data,int *h, DataFrame circles, unsigned int *counter)
__device__ void HstFill(int x, int y, DataFrame data,DataFrame circles, unsigned int *counter)
{
const size_t HST_SIZE = GRID_SIZE*GRID_SIZE/BIN_SIZE;
int hst[HST_SIZE] = {0}; // va inizializzato
int i, idx;
// log_id(); printf("HstFill\n");
// atomicAdd(counter,1);
for ( i=0; i<data.h; i++)
{
// float xd, yd, d;
float xd, yd;
int d;
xd = GetElement(data, i, 0);
yd = GetElement(data, i, 1);
d = __float2int_rn( Distance2( __int2float_rn(x), __int2float_rn(y),xd,yd) ) / BIN_SIZE ;
if (d < HST_SIZE) hst[d] +=1; // questo if serve per evitare errori di accesso alla memoria nel caso di valori "strani"
// potrebbero lavorare piu' thread sullo stesso elemento
// va implementato un meccanismo di locking per evitarlo
// in piu' questo blocco va eseguito solo al termine del ciclo for, quando hai ormai analizzato tutti i punti
// altrimenti crei (n - TH) volte lo stesso cerchio
/*
if(hst[bin]>TH)
{
idx = atomicAdd(counter,1); // garantisce che l'incremento avvenga un thread alla volta, quindi idx puo' essere usato solo dal thread in esecuzione
SetElement(circles, idx,0,x);
SetElement(circles, idx,1,y);
// float r2=d;
// SetElement(circles, idx,2,r2);
SetElement(circles, idx,2,d);
}
*/
}
// dopo aver riempito l'array hst, scandisce tutti gli elementi e aggiunge alla lista dei cerchi il cerchio trovato
for ( i=0; i < HST_SIZE; i++)
{
if(hst[i]>TH)
{
idx = atomicAdd(counter,1); // garantisce che l'incremento avvenga un thread alla volta, quindi idx puo' essere usato solo dal thread in esecuzione
//log_msg(""); printf("counter: %d, x %f, y %f, radius: %f\n", idx, __int2float_rn(x), __int2float_rn(y), sqrt( __int2float_rn(i*BIN_SIZE) ) );
SetElement(circles, idx,0, __int2float_rn(x));
SetElement(circles, idx,1, __int2float_rn(y));
// float r2=d;
// SetElement(circles, idx,2,r2);
SetElement(circles, idx,2, sqrt( __int2float_rn(i*BIN_SIZE) ) );
}
}
}
//---------------------------------------------------------
//Histogram algorithm kernel
__global__ void GridKernel(DataFrame data, DataFrame circles,unsigned int *counter)
{
//x and y indexes
int x = blockIdx.x * blockDim.x+ threadIdx.x - (GRID_SIZE / 2);
int y = blockIdx.y * blockDim.y +threadIdx.y - (GRID_SIZE / 2);
// const size_t histo_size = GRID_SIZE*GRID_SIZE/BIN_SIZE;
// int histo[histo_size];
// se con histo non ci fai niente, e' inutile definirla qui e passarla alla funzione
// in piu' il contenuto non e' inizializzato
// HistoFill(x,y, data, histo, circles, counter);
HstFill(x,y, data, circles, counter);
}
//------------------------------------------------------------
//dump dataframe function
void dump(DataFrame m)
{
for (int i = 0; i< m.h; i++) // Loop sulle righe
{
for (int j = 0; j< m.w; j++) // Loop sulle colonne
printf("%3.1f\t", m.e[i*m.w + j]);
printf("\n"); // A capo di fine riga
}
printf("\n");
}
//------------------------------------------------------------
//read floats from file.dat
void readBin(std::vector<float> buf)
{
}
//-------------------------------------------------------------
int main(int argc, char** argv)
{
std::vector<float> buf;
std::ifstream in("file.dat",std::ios::binary); //open the input file
while(!in.eof())
{
float w;
in.read( (char*) &w, sizeof(w));
buf.push_back(w);
}
in.close(); //close the input file .... ormai i dati sono letti, puoi chiudere il file
DataFrame data, circles;
data.h = buf.size()>>1;
data.w = 2;
data.e = (float*) malloc(data.w * data.h * sizeof(float));
circles.h = 15;
circles.w = 3;
circles.e = (float*) malloc(circles.w * circles.h * sizeof(float));
data.e = &buf[0]; //copy imported floats from buf to data .... non stai copiando i dati, se fai cosi' non ti serve la memoria che hai allocato prima
float a = CircleFit(data, circles);
// dump(data);
dump(circles);
// in.close(); //close the input file
return 0;
}
|
14,680 | #include<stdio.h>
#include<stdlib.h>
#include<string.h>
//include the header file for your library here
//#include "cudafunctions.cu"
#define BLOCKSIZE 32
__global__ void d_add( float *x, float *y, float *z, int size)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size)
z[index] = x[index] + y[index];
}
void add( float *x, float *y, int length)
{
float *d_x, *d_y, *d_z; // device copies of x and y and a result z
int size = length * sizeof(float); // need space for total number of floats
// allocate device space
cudaMalloc( (void**)&d_x, size);
cudaMalloc( (void**)&d_y, size);
cudaMalloc( (void**)&d_z, size);
// copy vector from host to device
cudaMemcpy(d_x, x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, size, cudaMemcpyHostToDevice);
// launch the kernel, eat some chicken
d_add<<< ceil((float)length/(float)BLOCKSIZE), BLOCKSIZE >>>(d_x, d_y, d_z, size);
// copy the result back to the host
cudaMemcpy(x, d_z, size, cudaMemcpyDeviceToHost);
// free device mem
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
// hope for the best
}
/**
* mul:
* cuda vector multiply function
**/
__global__ void d_mul( float *x, float *y, float *z, int size)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size)
z[index] = x[index] * y[index];
}
void mul( float *x, float *y, int length)
{
float *d_x, *d_y, *d_z; // device copies of x and y and a result z
int size = length * sizeof(float); // need space for total number of floats
// allocate device space
cudaMalloc( (void**)&d_x, size);
cudaMalloc( (void**)&d_y, size);
cudaMalloc( (void**)&d_z, size);
// copy vector from host to device
cudaMemcpy(d_x, x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, size, cudaMemcpyHostToDevice);
// launch the kernel, eat some chicken
d_mul<<< ceil((float)length/(float)BLOCKSIZE), BLOCKSIZE >>>(d_x, d_y, d_z, size);
// copy the result back to the host
cudaMemcpy(x, d_z, size, cudaMemcpyDeviceToHost);
// free device mem
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
}
int func_add ( float *x, float *y, int sz)
{
int i;
float *a;
a = ( float *)malloc(sizeof(float)*sz);
if (!a){
printf("memory allocation error\n");
exit(-1);
}
memcpy(a,x,sz*(sizeof(float)));
/* replace the code to add
* with a cuda call which you will
* implement as a interface to your cuda enabled library
*/
/*
for ( i=0; i<sz; i++)
x[i]+=y[i];
*/
// replace with cuda enabled call
add(x, y, sz);
for ( i=0; i<sz; i++){
if (x[i]!= a[i] + y[i]){
printf("x = %f, a = %f, y = %f, i = %d, size = %d ", x[i], a[i], y[i], i, sz);
return 0;
}
}
free(a);
return 1;
}
int func_mul ( float *x, float *y, int sz)
{
int i;
float *a;
a = ( float *)malloc(sizeof(float)*sz);
if (!a){
printf("memory allocation error\n");
exit(-1);
}
memcpy(a,x,sz*(sizeof(float)));
/* replace the code to multiply
* with a cuda call which you will
* implement as a interface to your cuda enabled library
*/
/*
for ( i=0; i<sz; i++)
x[i]*=y[i];
*/
// cuda call
mul(x, y, sz);
for ( i=0; i<sz; i++){
if (x[i]!= a[i] * y[i]){
printf("x = %f, a = %f, y = %f, i = %d, size = %d ", x[i], a[i], y[i], i, sz);
return 0;
}
}
free(a);
return 1;
}
int main()
{
float *a,*b;
int j;
int i;
for ( j=10; j<1000000; j*=10){
a =( float *) malloc(sizeof(float)*j);
b =( float *) malloc(sizeof(float)*j);
for (i=0; i<j; i++){
a[i] = 2;
b[i] = 3;
}
if(!func_add(a,b,j)){
printf("failed to add\n");
}
else{
printf("add operation completed\n");
}
if(!func_mul(a,b,j)){
printf("failed to mul\n");
}
else{
printf("mul operation completed\n");
}
free(a);
free(b);
}
return 0;
}
|
14,681 | #include "includes.h"
__global__ void kernel_setweights_fl(int N, float *wt, float alpha){
unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x;
/* make sure to use only M threads */
if (tid<N) {
wt[tid]=alpha;
}
} |
14,682 | #include "includes.h"
__global__ void TopBottomBound2D(double *Hs, double *Ztopo, double *K2n, double *K2s, int BC2D, int M, int N) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
// no-flow BCs
if (BC2D == 0) {
Hs[tid] = Hs[N+tid];
Hs[(M-1)*N+tid] = Hs[(M-2)*N+tid];
} else { // Critical depth flow BCs
Hs[tid] = hcri + Ztopo[tid];
Hs[(M-1)*N+tid] = hcri + Ztopo[(M-1)*N+tid];
}
K2s[tid] = K2s[N+tid];
K2n[(M-1)*N+tid] = K2n[(M-2)*N+tid];
tid += blockDim.x * gridDim.x;
}
} |
14,683 | #include "shared.cuh"
struct ParticleRef {
Point pos;
Point dir;
double nextdist;
};
inline __device__ ParticleRef make_ref(const ParticleView &view, int i) {
return {view.get_pos(i), view.get_dir(i), view.get_nextdist(i)};
}
__device__ inline void move_impl(const ParticleRef ref) {
*ref.pos.x += *ref.dir.x * ref.nextdist;
*ref.pos.y += *ref.dir.y * ref.nextdist;
*ref.pos.z += *ref.dir.z * ref.nextdist;
}
__global__ void move(ParticleView view) {
int i = thread_id();
if (i >= view.size) return;
move_impl(make_ref(view, i));
}
|
14,684 | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "gpu_add.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *first = NULL;
cudaMalloc(&first, XSIZE*YSIZE);
float *second = NULL;
cudaMalloc(&second, XSIZE*YSIZE);
size_t sizeFirst = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
gpu_add<<<gridBlock,threadBlock>>>(first,second,sizeFirst);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
gpu_add<<<gridBlock,threadBlock>>>(first,second,sizeFirst);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
gpu_add<<<gridBlock,threadBlock>>>(first,second,sizeFirst);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
14,685 | #include <cuda.h> //Max MIN
#include <stdio.h>
#include <time.h>
#define tbp 512
#define nblocks 1
__global__ void kernel_min(int *a, int *d)
{
__shared__ int sdata[tbp]; //"static" shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = a[i];
__syncthreads();
for(unsigned int s=tbp/2 ; s >= 1 ; s=s/2)
{
if(tid < s)
{
if(sdata[tid] >sdata[tid + s])
{
sdata[tid] = sdata[tid + s];
}
}
__syncthreads();
}
if(tid == 0 )
{
d[blockIdx.x] = sdata[0];
}
}
int main()
{
int i;
const int N=tbp*nblocks;
srand(time(NULL));
int *a;
a = (int*)malloc(N * sizeof(int));
int *d;
d = (int*)malloc(nblocks * sizeof(int));
int *dev_a, *dev_d;
cudaMalloc((void **) &dev_a, N*sizeof(int));
cudaMalloc((void **) &dev_d, nblocks*sizeof(int));
int mmm=100;
for( i = 0 ; i < N ; i++)
{
a[i] = rand()% 100 + 5;
//printf("%d ",a[i]);
if(mmm>a[i]) mmm=a[i];
}
printf("");
printf("");
printf("");
printf("");
cudaMemcpy(dev_a , a, N*sizeof(int),cudaMemcpyHostToDevice);
kernel_min <<<nblocks,tbp>>>(dev_a,dev_d);
cudaMemcpy(d, dev_d, nblocks*sizeof(int),cudaMemcpyDeviceToHost);
printf("cpu min %d, gpu_min = %d",mmm,d[0]);
cudaFree(dev_a);
cudaFree(dev_d);
return 0;
}
|
14,686 | #include "includes.h"
__global__ void process(int N_step, int N_inst, float *input, float *output){
int b_id = blockIdx.x, t_id = threadIdx.x;
if(b_id >= N_inst) return;
__shared__ float ans;
float val;
if(!t_id) ans = 0;
if(t_id < VEC_SIZE) val = input[VEC_SIZE * b_id + t_id];
__syncthreads();
for(int t=0;t<N_step;++t){
int start = t%VEC_SIZE;
if(t_id >= start && t_id < min(start + 12, VEC_SIZE)) atomicAdd(&ans, val);
if(start + 12 > VEC_SIZE && t_id < start + 12 - VEC_SIZE) atomicAdd(&ans, val);
__syncthreads();
}
output[b_id] = ans;
return;
} |
14,687 | #include <iostream>
#include <cuda.h>
using namespace std;
__global__ void hillis_steele(float *g_odata, float *g_idata, float *blocksum, int n) {
extern volatile __shared__ float temp[]; // allocated on invocation
int pout = 0, pin = 1;
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int thid = threadIdx.x;
int m = blockDim.x;
temp[thid] = (idx == 0 || idx >= n) ? 0: g_idata[idx-1];
__syncthreads();
for( int offset = 1; offset<m; offset *= 2 ) {
pout = 1 - pout; // swap double buffer indices
pin = 1 - pout;
if (thid >= offset)
temp[pout*m+thid] = temp[pin*m+thid] + temp[pin*m+thid - offset];
else
temp[pout*m+thid] = temp[pin*m+thid];
__syncthreads(); // I need this here before I start next iteration
}
g_odata[idx] = temp[pout*m+thid]; // write output
if(thid == blockDim.x -1 || idx == n-1)
blocksum[blockIdx.x] = temp[pout*m+thid];
}
__global__ void sum_kernel(float *g_odata, float *blocksum, int n) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < n) {
g_odata[idx] += blocksum[blockIdx.x];
}
}
__host__ void scan(const float* in, float* out, unsigned int n, unsigned int threads_per_block) {
float *g_idata, *g_odata;
float *blocksum_in, *blocksum_out, *dummy;
unsigned int m = threads_per_block;
size_t shared_array_size = 2*m*sizeof(float);
int blockdim = (n + m-1)/m;
//float *blocksum = new float[blockdim];
cudaMalloc((void **)&g_idata, n*sizeof(float));
cudaMalloc((void **)&g_odata, n*sizeof(float));
cudaMallocManaged((void **)&blocksum_in, blockdim*sizeof(float));
//cudaMalloc((void **)&blocksum_out, blockdim*sizeof(float));
cudaMallocManaged((void **)&blocksum_out, blockdim*sizeof(float));
cudaMallocManaged((void **)&dummy, 1*sizeof(float));
cudaMemcpy(g_idata, in, n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemset(g_odata, 0, n*sizeof(float));
hillis_steele<<<blockdim, m, shared_array_size>>>(g_odata, g_idata, blocksum_in, n);
cudaDeviceSynchronize();
hillis_steele<<<1, m, shared_array_size>>>(blocksum_out, blocksum_in, dummy, blockdim);
cudaDeviceSynchronize();
sum_kernel<<<blockdim, m>>>(g_odata, blocksum_out, n);
cudaDeviceSynchronize();
cudaMemcpy(out, g_odata, n*sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaFree(g_idata);
cudaFree(g_odata);
}
|
14,688 | #include "torch_kernels.cuh"
__global__ void indexSelectSmallIndex(float *dst, float *src, int64_t *indices,
int64_t innerSize, int64_t numIndices) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (int64_t dstIndex = 0; dstIndex < numIndices; ++dstIndex) {
int64_t srcIndex = indices[dstIndex];
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (int64_t linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize; linearIndex += gridDim.x * blockDim.x) {
dst[dstIndex * innerSize + linearIndex] =
src[srcIndex * innerSize + linearIndex];
}
}
}
__global__ void indexSelectLargeIndex(float *dst, float *src, int64_t *indices,
int64_t totalSize, int64_t innerSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (int64_t linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize; linearIndex += gridDim.x * blockDim.x) {
int64_t dstIndex, elementInSlice;
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
int64_t srcIndex = indices[dstIndex];
int64_t dstOffset = elementInSlice;
dstOffset += dstIndex * innerSize;
int64_t srcOffset = elementInSlice;
srcOffset += srcIndex * innerSize;
dst[dstOffset] = src[srcOffset];
}
}
template <typename T> __device__ __forceinline__ T sigmoid(T in) {
T one = static_cast<T>(1.0);
return one / (one + exp(-in));
}
// bias1: input_bias, bias2: hidden_bias, cx: last cell state, hsz: hidden_size
__global__ void lstm_cell_kernel(float *input, float *hidden, float *bias1,
float *bias2, float *_cx, float *_hy,
float *_cy, int hsz, int totalElements) {
for (int linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements; linearIndex += gridDim.x * blockDim.x) {
int offset = (linearIndex / hsz) * 4 * hsz + linearIndex % hsz;
float iig = input[offset + 0 * hsz];
float ifg = input[offset + 1 * hsz];
float icg = input[offset + 2 * hsz];
float iog = input[offset + 3 * hsz];
float hig = hidden[offset + 0 * hsz];
float hfg = hidden[offset + 1 * hsz];
float hcg = hidden[offset + 2 * hsz];
float hog = hidden[offset + 3 * hsz];
float cx = _cx[linearIndex];
float *hy = &_hy[linearIndex];
float *cy = &_cy[linearIndex];
float b1i, b1f, b1c, b1o;
float b2i, b2f, b2c, b2o;
b1i = bias1[linearIndex % hsz + 0 * hsz];
b1f = bias1[linearIndex % hsz + 1 * hsz];
b1c = bias1[linearIndex % hsz + 2 * hsz];
b1o = bias1[linearIndex % hsz + 3 * hsz];
b2i = bias2[linearIndex % hsz + 0 * hsz];
b2f = bias2[linearIndex % hsz + 1 * hsz];
b2c = bias2[linearIndex % hsz + 2 * hsz];
b2o = bias2[linearIndex % hsz + 3 * hsz];
float ig, fg, cg, og;
float f_hy, f_cy;
ig = sigmoid(iig + hig + b1i + b2i);
fg = sigmoid(ifg + hfg + b1f + b2f);
cg = tanh(icg + hcg + b1c + b2c);
og = sigmoid(iog + hog + b1o + b2o);
f_cy = (fg * cx) + (ig * cg);
f_hy = og * tanh(f_cy);
*hy = f_hy;
*cy = f_cy;
}
} |
14,689 | #include "screen.cuh"
#include "trace_manager.cuh"
#include <stdio.h>
#include <assert.h>
#include <math.h>
#define PI 3.1415
int main() {
int width = 1920;
int height = 1080;
create_screen(width, height);
TraceManager trace_manager = TraceManager(width, height);
int w = 128;
int h = 128;
Block* world = new Block[w * h];
for (int x = 0; x < w; x++) {
for (int y = 0; y < h; y++) {
float r = (x + y) % 3 == 0 ? 1.0 : 0.0;
float g = (x + y) % 3 == 1 ? 1.0 : 0.0;
float b = (x + y) % 3 == 2 ? 1.0 : 0.0;
int r2 = 10000;
int y2 = r2 - (x - 100) * (x - 100) - (y - 100) * (y - 100);
float py = 10.0;
if (y2 > 0.0) {
py = sqrt(y2) + 10.0;
}
world[x * h + y] = Block(AABB3(Vec3((float)x, py - 10.0, (float)y), Vec3((float)x + 1.0, py, (float)y + 1.0)), Vec3(r, g, b));
printf("(%f, %f)\n", (float)x, (float)y);
}
}
trace_manager.upload_world(world, w * h);
Vec3 position = Vec3(5.5, 85.0, -2.0);
Vec3 f = Vec3(0.0, 0.0, 1.0).normalize();
Vec3 side = Vec3(1.0, 0.0, 0.0).normalize();
Vec3 up = Vec3(0.0, 1.0, 0.0).normalize();
Vec3 rotation_axis = Vec3(0.0, 1.0, 0.0);
float rotation_x = 0.0;
float rotation_y = 0.0;
float t = 1.0;
int i = 0;
while (true)
{
double mouse_delta_x, mouse_delta_y;
mouse_delta(&mouse_delta_x, &mouse_delta_y);
rotation_x += mouse_delta_x / 100.0;
rotation_y += mouse_delta_y / 100.0;
Vec3 forward;
forward = f.rotate(side, rotation_y);
forward = forward.rotate(up, rotation_x);
if (key_pressed(KEY::W)) {
position += Vec3(forward.x, 0.0, forward.z).normalize() * 1.0;
}
if (key_pressed(KEY::A)) {
position += Vec3(-forward.z, 0.0, forward.x).normalize() * 1.0;
}
if (key_pressed(KEY::S)) {
position -= Vec3(forward.x, 0.0, forward.z).normalize() * 1.0;
}
if (key_pressed(KEY::D)) {
position += Vec3(forward.z, 0.0, -forward.x).normalize() * 1.0;
}
if (key_pressed(KEY::UP)) {
rotation_y += 0.016;
}
if (key_pressed(KEY::DOWN)) {
rotation_y -= 0.016;
}
// if (key_pressed(KEY::A))
// Vec3 position = Vec3(cos(t) * 110.0, 3.0, sin(t) * 110.0);
// Vec3 forward = position - Vec3(0.0, 3.0, 0.0);
// Vec3 side = Vec3(-cos(t - 0.5 * PI), 0.0, -sin(t - 0.5 * PI));
// Vec3 up = forward.cross(side);
// forward = forward.normalize();
// printf("Forward: %f, %f, %f\n", forward.x, forward.y, forward.z);
// printf("t: %f\n", t);
Frustrum view = Frustrum(position, forward * t, 1920.0 / 1080.0);
float* surface = trace_manager.render_frame(view);
render_frame(surface);
// printf("frame: %i\n", i);
i++;
}
opengl_exit();
} |
14,690 | #include <iostream>
#include <vector>
#include <cmath>
using namespace std;
// void AssembleProlMatrix(vector<vector<vector<double>>> &P, vector<vector<size_t>> N, vector<size_t> numNodes, vector<size_t> num_rows, )
int main()
{
vector<vector<size_t>> N;
N.resize(2, vector<size_t>(2));
N[0] = {3, 1};
N[1] = {6, 2};
size_t lev = 0;
size_t dim = 2;
vector<size_t> numNodes(2);
numNodes[0] = 8;
numNodes[1] = 21;
vector<size_t> num_rows = { 16, 42 };
// vector<size_t> num_rows = { numNodes[0]*2, numNodes[1]*2 };
vector<vector<vector<double>>> A;
A.resize(2);
A[0].resize(num_rows[0], vector<double>(num_rows[0]));
A[1].resize(num_rows[1], vector<double>(num_rows[1]));
size_t numLevels = 2;
vector<vector<vector<double>>> P;
P.resize(numLevels - 1);
P[0].resize(num_rows[1], vector<double>(num_rows[0]));
// assembleProlMatrix()
for ( int i = 0 ; i < numNodes[0]*2 ; i += 2)
{
for ( int j = 0 ; j < dim ; j++ )
{
// same node
P[lev][( 2*(i % ( (N[lev][0] + 1)*dim) )) + ( (ceil)( i / ( 2*(N[lev][0] + 1 ) ) ) )*2*dim*(N[lev+1][0] + 1) + j][i+j] = 1;
// east node
if ( (i / 2 + 1) % (N[lev][0]+1) != 0 )
P[lev][( 2*(i % ( (N[lev][0] + 1)*dim) )) + ( (ceil)( i / ( 2*(N[lev][0] + 1 ) ) ) )*2*dim*(N[lev+1][0] + 1) + j + 2][i+j] += 0.5;
// north node
if ( i / 2 + (N[lev][0] + 1) < (N[lev][0] + 1)*(N[lev][1] + 1))
P[lev][( 2*(i % ( (N[lev][0] + 1)*dim) )) + ( (ceil)( i / ( 2*(N[lev][0] + 1 ) ) ) )*2*dim*(N[lev+1][0] + 1) + j + 2*(N[lev+1][0] + 1) ][i+j] += 0.5;
// west node
if ( (i / 2) % (N[lev][0]+1) != 0 )
P[lev][( 2*(i % ( (N[lev][0] + 1)*dim) )) + ( (ceil)( i / ( 2*(N[lev][0] + 1 ) ) ) )*2*dim*(N[lev+1][0] + 1) + j - 2][i+j] += 0.5;
// south node
if ( i / 2 >= N[lev][0] + 1)
P[lev][( 2*(i % ( (N[lev][0] + 1)*dim) )) + ( (ceil)( i / ( 2*(N[lev][0] + 1 ) ) ) )*2*dim*(N[lev+1][0] + 1) + j - 2*(N[lev+1][0] + 1)][i+j] += 0.5;
// north-east node
if ( (i / 2 + 1) % (N[lev][0]+1) != 0 && i / 2 + (N[lev][0] + 1) < (N[lev][0] + 1)*(N[lev][1] + 1))
P[lev][( 2*(i % ( (N[lev][0] + 1)*dim) )) + ( (ceil)( i / ( 2*(N[lev][0] + 1 ) ) ) )*2*dim*(N[lev+1][0] + 1) + j + 2*(N[lev+1][0] + 1) + 2 ][i+j] = 0.25;
// north-west node
if ( i / 2 + (N[lev][0] + 1) < (N[lev][0] + 1)*(N[lev][1] + 1) && (i / 2) % (N[lev][0]+1) != 0 )
P[lev][( 2*(i % ( (N[lev][0] + 1)*dim) )) + ( (ceil)( i / ( 2*(N[lev][0] + 1 ) ) ) )*2*dim*(N[lev+1][0] + 1) + j + 2*(N[lev+1][0] + 1) - 2 ][i+j] = 0.25;
// south-east node
if ( i / 2 >= N[lev][0] + 1 && (i / 2 + 1) % (N[lev][0]+1) != 0 )
P[lev][( 2*(i % ( (N[lev][0] + 1)*dim) )) + ( (ceil)( i / ( 2*(N[lev][0] + 1 ) ) ) )*2*dim*(N[lev+1][0] + 1) + j - 2*(N[lev+1][0] + 1) + 2 ][i+j] = 0.25;
// south-west node
if ( i / 2 >= N[lev][0] + 1 && (i / 2) % (N[lev][0]+1) != 0 )
P[lev][( 2*(i % ( (N[lev][0] + 1)*dim) )) + ( (ceil)( i / ( 2*(N[lev][0] + 1 ) ) ) )*2*dim*(N[lev+1][0] + 1) + j - 2*(N[lev+1][0] + 1) - 2 ][i+j] = 0.25;
}
}
for ( int i = 0 ; i < num_rows[1] ; i++ )
{
for ( int j = 0 ; j < num_rows[0] ; j++ )
cout << P[0][i][j] << " ";
cout << "\n";
}
}
|
14,691 | #include "includes.h"
__global__ void respond_kernel(int64_t *out, const int64_t *proposal, const int64_t *rowptr, const int64_t *col, int64_t numel) {
const int64_t thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_idx < numel) {
if (out[thread_idx] != -2)
return; // Only vist red nodes.
bool has_unmatched_neighbor = false;
for (int64_t i = rowptr[thread_idx]; i < rowptr[thread_idx + 1]; i++) {
auto v = col[i];
if (out[v] < 0)
has_unmatched_neighbor = true; // Unmatched neighbor found.
if (out[v] == -1 && proposal[v] == thread_idx) {
// Match first blue neighbhor v which proposed to u.
out[thread_idx] = min(thread_idx, v);
out[v] = min(thread_idx, v);
break;
}
}
if (!has_unmatched_neighbor)
out[thread_idx] = thread_idx;
}
} |
14,692 | /***********************************************************************
To Compile:
/usr/local/cuda-10.0/bin/nvcc -arch=compute_52 -o file.out filename.cu
***********************************************************************/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
void check_output(float *A, int dim) {
/* Print output for debug */
int i,j;
printf("\n");
for(i = 0; i < dim; i++) {
for(j = 0; j < dim; j++) {
printf("%3.1f ", A[i*dim + j]);
}
printf(";\n");
}
printf("\n");
}
__global__ void MyKernel2(float *d_a,float *d_bT,float *d_c,int dim, int tile){
extern __shared__ float s[]; // declear a single shared array.
float *a_tile = (float*)s; // Divide the shared array into two
float *bT_tile = (float*)&a_tile[blockDim.x*blockDim.y];
float partial = 0.0;
int bx = blockIdx.x ; int by = blockIdx.y ;
int tx = threadIdx.x; int ty = threadIdx.y;
int i = by * blockDim.y + ty; //row i of c
int j = bx * blockDim.x + tx; //Column j of c
int k,m;
i = i * dim;
// ty = ty * dim;
for(m = 0; m < dim/blockDim.x; ++m) {
a_tile[ty*dim+tx] = d_a[i+m*blockDim.x + tx]; /* load coalesced */
bT_tile[ty+tx*dim] = d_bT[i+m*blockDim.x + tx]; /* load coalesced */
__syncthreads();
for(k = 0; k < blockDim.x; ++k)
partial += a_tile[ty+k] * bT_tile[k+tx]; /* no bank conflicts */
__syncthreads();
d_c[i+j] = partial;
}
}
int main(int argc, char const *argv[]) {
// Initiailize matrix dimension
int dim = 1024, block_size = 16, tile = 64;
int i;
if (argc > 1) {
if(argc != 4){
printf("Invaild arguments. Must have 3: dimension, block_size and tile size.\n");
return 0;
}
dim = atoi(argv[1]);
block_size = atoi(argv[2]);
tile = atoi(argv[3]);
}
// declear host and device timer.
srand(3);
dim3 Block(block_size,block_size);
dim3 Grid(dim/Block.x, dim/Block.y);
struct timespec start,finish;
int ntime, stime;
float tot_time=0.0;
// Populate matrice
float *a = (float*)malloc(sizeof(float)*dim*dim);
float *bT = (float*)malloc(sizeof(float)*dim*dim);
float *c = (float*)malloc(sizeof(float)*dim*dim);
float *d_a, *d_bT ,*d_c, limit=10.0; //d_bT for transposed
for(i = 0; i < dim*dim; i++){
a[i] = ((float)rand()/(float)(RAND_MAX)) * limit;
bT[i] = ((float)rand()/(float)(RAND_MAX)) * limit;
}
// Allocate device memeory.
cudaMalloc( (void**)&d_a, dim*dim*sizeof(float));
cudaMalloc( (void**)&d_bT, dim*dim*sizeof(float));
cudaMalloc( (void**)&d_c, dim*dim*sizeof(float));
// Initiailize timer & start recording.
clock_gettime(CLOCK_REALTIME, &start);
// Copy memory to device.
cudaMemcpy(d_a ,a ,dim*dim*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(d_bT,bT,dim*dim*sizeof(float),cudaMemcpyHostToDevice);
// Call CUDA kernel function.
MyKernel2<<<Grid, Block, tile*tile*2>>>(d_a,d_bT,d_c,dim,tile);
cudaMemcpy(c, d_c, sizeof(float)*dim*dim,cudaMemcpyDeviceToHost);
// Timer stop.
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &finish);
ntime = finish.tv_nsec - start.tv_nsec;
stime = (int)finish.tv_sec - (int) start.tv_sec;
tot_time = ntime*1.0E-9 + stime;
/* Print output for debug */
printf("kernel#1 Time elapsed: %f ms. matrix dimension: %d X %d\n",
tot_time*1.0E3,dim,dim);
check_output(c,dim);
// reset memory and timer.
cudaFree(d_c); cudaFree(d_bT); cudaFree(d_a);
free(a); free(bT); free(c);
return 0;
}
|
14,693 | #include <cuda.h>
#include <cuda_runtime.h>
#include <cufft.h>
extern "C" void deconvolve (cufftComplex *src1, cufftComplex *src2, cufftComplex
*dst, int size, int numThreads, double snr);
__global__ void decon_kernel (cufftComplex *src1, cufftComplex *src2,
cufftComplex * dst, int size, double snr) {
const int k = blockIdx.x * blockDim.x + threadIdx.x;
if (k >= size)
return;
double a = src1[k].x;
double b = src1[k].y;
double c = src2[k].x;
double d = src2[k].y;
dst[k].x= (a*c + b*d) / ((c*c + d*d) + snr);
dst[k].y = (b*c - a*d) / ((c*c + d*d) + snr);
}
void deconvolve (cufftComplex *src1, cufftComplex *src2, cufftComplex *dst, int
size, int numThreads, double snr) {
int numBlocks = (size + numThreads - 1) / numThreads;
decon_kernel<<<numBlocks, numThreads>>>(src1, src2, dst, size, snr);
}
|
14,694 | #include "includes.h"
__global__ void ScaleDownDense(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch)
{
#define BW (SCALEDOWN_W+4)
#define BH (SCALEDOWN_H+4)
#define W2 (SCALEDOWN_W/2)
#define H2 (SCALEDOWN_H/2)
__shared__ float irows[BH*BW];
__shared__ float brows[BH*W2];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int xp = blockIdx.x*SCALEDOWN_W + tx;
const int yp = blockIdx.y*SCALEDOWN_H + ty;
const int xl = min(width-1, max(0, xp-2));
const int yl = min(height-1, max(0, yp-2));
const float k0 = d_ScaleDownKernel[0];
const float k1 = d_ScaleDownKernel[1];
const float k2 = d_ScaleDownKernel[2];
if (xp<(width+4) && yp<(height+4))
irows[BW*ty + tx] = d_Data[yl*pitch + xl];
__syncthreads();
if (yp<(height+4) && tx<W2) {
float *ptr = &irows[BW*ty + 2*tx];
brows[W2*ty + tx] = k0*(ptr[0] + ptr[4]) + k1*(ptr[1] + ptr[3]) + k2*ptr[2];
}
__syncthreads();
const int xs = blockIdx.x*W2 + tx;
const int ys = blockIdx.y*H2 + ty;
if (tx<W2 && ty<H2 && xs<(width/2) && ys<(height/2)) {
float *ptr = &brows[W2*(ty*2) + tx];
d_Result[ys*newpitch + xs] = k0*(ptr[0] + ptr[4*W2]) + k1*(ptr[1*W2] + ptr[3*W2]) + k2*ptr[2*W2];
}
} |
14,695 | #include <cuda.h>
#include <iostream>
__global__ void simplestDeviceFunction(int *dev) {
//threadIdx.x is the ID of the thread in x direction, can have 3D
int tid = threadIdx.x;
//tid in this program will be anywhere from 0 to size - 1, all executing at once
dev[tid] = tid;
}
int main() {
int size = 64;
int a[size];
//dim3 - special CUDA type to declare how many blocks and threads you want
dim3 numBlocks(1,1,1); //BLOCKS, not threads
dim3 threadsPerBlock(size,1); //How many threads are in each block
int *a_device;
//Memory management, have to declare space on device
cudaError_t gpucheck;
gpucheck = cudaMalloc(&a_device, size * sizeof(int));
if (gpucheck != cudaSuccess) {
std::cout << "Error allocating memory on GPU. No GPU?" << std::endl;
exit(1);
}
//Call CUDA function on graphics card - function name, resources,parameters
simplestDeviceFunction<<< numBlocks, threadsPerBlock >>>(a_device);
//After work is done, copy result from GPU back to CPU
cudaMemcpy(a, a_device, size*sizeof(int), cudaMemcpyDeviceToHost);
//What is in a?
for (int i = 0;i < size;i++)
std::cout << a[i] << std::endl;
}
|
14,696 | #define PI 3.141592653589793238462643
#define blocDim 512
#define powOfTwo 4
#define timerCount 10
// Device function used when evaluating the closed integral
__device__ double closedIntt(double s, double t, double A, double B, double C)
{
double val;
if(t==0){
val = 0;
}else{
val = t/sqrt(A)*log(2*A*s+B*t+2*sqrt(A*(A*s*s+B*s*t+C*t*t)));
}
return val;
}
// Device function used when evaluating the closed integral
__device__ double closedInts(double s, double t, double A, double B, double C)
{
double val;
if(s==0){
val = 0;
}else{
val = s/sqrt(C)*log(2*C*t+B*s+2*sqrt(C*(A*s*s+B*s*t+C*t*t)));
}
return val;
}
// Main GPU kernel
__global__ void guts(const long int N, const long int M,
const double * zeta,const double * zetaX,const double * zetaY,
const double * zetaHalf,const double * zetaXHalf,const double * zetaYHalf,
const double * phi,const double * phiX,const double * phiHalf,const double * phiXHalf,const double * phiYHalf,
const double * x,const double * y,const double * xHalf,const double * yHalf,
const double * weiX,const double * weiY,const double * epsilon,const double * source, const bool * singType,const double Fr,
const double n,double * Func)
{
__shared__ double A, B, C,sUpper,sLower,tNegUpper,tNegLower,tPosUpper,tPosLower,xH,yH,thisBlock[blocDim];
__shared__ long int k,l,blockPos;
double xDiff, yNegDiff, yPosDiff,xDiffSq, yNegDiffSq, yPosDiffSq, zetaDiff,radiusSqNeg,
radiusSqPos,K1,K2,S2,I1,I2,singInt,sqrtStuff,FuncTemp = 0;
long int i,j,threadPos,applyBlockPos;
// First thread initialises variables
if(threadIdx.x==0){
k = blockIdx.x;
l = blockIdx.y;
blockPos = k+(N-1)*l; // Get position of the block for mesh half ponts
xH = xHalf[k];
yH = yHalf[l];
A = 1 + zetaXHalf[blockPos]*zetaXHalf[blockPos];//
B = 2*zetaXHalf[blockPos]*zetaYHalf[blockPos]; //
C = 1 + zetaYHalf[blockPos]*zetaYHalf[blockPos];//
sUpper = x[N-1]-xH; //
sLower = x[0]-xH; // Calculate values needed for closed integral
tNegUpper = y[M-1]-yH; //
tNegLower = y[0]-yH; //
tPosUpper = -y[M-1]-yH; //
tPosLower = -y[0]-yH; //
}
// After initialising variables
__syncthreads();
// Have each thread sum over some values of the double integrals
threadPos = threadIdx.x;
i = threadPos%N;
j = threadPos/N;
// Each loop is one collocation point
while(threadPos<(M*N)){
// Calculate nessesary values
xDiff = x[i]-xH;
xDiffSq = xDiff*xDiff;
yNegDiff = y[j]-yH;
yNegDiffSq = yNegDiff*yNegDiff;
yPosDiff = y[j]+yH;
yPosDiffSq = yPosDiff*yPosDiff;
zetaDiff = zeta[threadPos]-zetaHalf[blockPos];
radiusSqNeg = sqrt(xDiffSq+yNegDiffSq+zetaDiff*zetaDiff);
radiusSqPos = sqrt(xDiffSq+yPosDiffSq+zetaDiff*zetaDiff);
// First integral kernel function
K1 = (zetaDiff-xDiff*zetaX[threadPos]-yNegDiff*zetaY[threadPos])/(radiusSqNeg*radiusSqNeg*radiusSqNeg)+
(zetaDiff-xDiff*zetaX[threadPos]-yPosDiff*zetaY[threadPos])/(radiusSqPos*radiusSqPos*radiusSqPos);
// Second integral kernel function
K2 = 1/radiusSqNeg+1/radiusSqPos;
S2 = 1/sqrt(A*xDiffSq+B*xDiff*yNegDiff+C*yNegDiffSq)+1/sqrt(A*xDiffSq-B*xDiff*yPosDiff+C*yPosDiffSq);
// Calculate contributions to the first and second integrals
I1 = weiX[i]*weiY[j]*(phi[threadPos]-phiHalf[blockPos]-xDiff)*K1;
I2 = weiX[i]*weiY[j]*(zetaX[threadPos]*K2-zetaXHalf[blockPos]*S2);
// Accumulate integral contributions
FuncTemp -= I1;
FuncTemp -= I2;
// Update collocation point
threadPos += blockDim.x;
i = threadPos%N;
j = threadPos/N;
}
// Calculation of the 16 parts to the closed integral split between differnt threads
if(threadIdx.x==blockDim.x-1){
singInt = closedIntt(sUpper,tNegUpper,A,B,C);
FuncTemp -= zetaXHalf[blockPos]*singInt;
}else if(threadIdx.x==blockDim.x-2){
singInt = closedInts(sUpper,tNegUpper,A,B,C);
FuncTemp -= zetaXHalf[blockPos]*singInt;
}else if(threadIdx.x==blockDim.x-3){
singInt = closedIntt(sLower,tNegUpper,A,B,C);
FuncTemp += zetaXHalf[blockPos]*singInt;
}else if(threadIdx.x==blockDim.x-4){
singInt = closedInts(sLower,tNegUpper,A,B,C);
FuncTemp += zetaXHalf[blockPos]*singInt;
}else if(threadIdx.x==blockDim.x-5){
singInt = closedIntt(sUpper,tNegLower,A,B,C);
FuncTemp += zetaXHalf[blockPos]*singInt;
}else if(threadIdx.x==blockDim.x-6){
singInt = closedInts(sUpper,tNegLower,A,B,C);
FuncTemp += zetaXHalf[blockPos]*singInt;
}else if(threadIdx.x==blockDim.x-7){
singInt = closedIntt(sLower,tNegLower,A,B,C);
FuncTemp -= zetaXHalf[blockPos]*singInt;
}else if(threadIdx.x==blockDim.x-8){
singInt = closedInts(sLower,tNegLower,A,B,C);
FuncTemp -= zetaXHalf[blockPos]*singInt;
}else if(threadIdx.x==blockDim.x-9){
singInt = closedIntt(sUpper,tPosUpper,A,B,C);
FuncTemp += zetaXHalf[blockPos]*singInt;
}else if(threadIdx.x==blockDim.x-10){
singInt = closedInts(sUpper,tPosUpper,A,B,C);
FuncTemp += zetaXHalf[blockPos]*singInt;
}else if(threadIdx.x==blockDim.x-11){
singInt = closedIntt(sLower,tPosUpper,A,B,C);
FuncTemp -= zetaXHalf[blockPos]*singInt;
}else if(threadIdx.x==blockDim.x-12){
singInt = closedInts(sLower,tPosUpper,A,B,C);
FuncTemp -= zetaXHalf[blockPos]*singInt;
}else if(threadIdx.x==blockDim.x-13){
singInt = closedIntt(sUpper,tPosLower,A,B,C);
FuncTemp -= zetaXHalf[blockPos]*singInt;
}else if(threadIdx.x==blockDim.x-14){
singInt = closedInts(sUpper,tPosLower,A,B,C);
FuncTemp -= zetaXHalf[blockPos]*singInt;
}else if(threadIdx.x==blockDim.x-15){
singInt = closedIntt(sLower,tPosLower,A,B,C);
FuncTemp += zetaXHalf[blockPos]*singInt;
}else if(threadIdx.x==blockDim.x-16){
singInt = closedInts(sLower,tPosLower,A,B,C);
FuncTemp += zetaXHalf[blockPos]*singInt;
// Add the phi part of the BIE
}else if(threadIdx.x==blockDim.x-17){
FuncTemp += 2*PI*(phiHalf[blockPos]-xHalf[k]);
}
// Add the source contribution
if(threadIdx.x==blockDim.x-18){
sqrtStuff = sqrt((xH-source[0])*(xH-source[0])
+(yH-source[1])*(yH-source[1])
+(zetaHalf[blockPos]-source[2])*(zetaHalf[blockPos]-source[2]));
if(singType[0]){
FuncTemp += epsilon[0]/sqrtStuff;
}else{
FuncTemp -= epsilon[0]*(xH-source[0])/(sqrtStuff*sqrtStuff*sqrtStuff);
}
}
// Add total contribution from thread to a storage vector
thisBlock[threadIdx.x] = FuncTemp;
// All threads finished evaluating the BIE
__syncthreads();
// Sum up all thread contributions
for(i=blocDim/2;i>0;i=i/2){
if(threadIdx.x<i){
thisBlock[threadIdx.x] += thisBlock[threadIdx.x+i];
}
__syncthreads();
}
// Store complete BIE in correct loction of output vector
if(threadIdx.x==0){
applyBlockPos = k+(N+1)*l+2;
Func[applyBlockPos+M*(N+1)] = thisBlock[0];
}
// Split the Bernoulli's equation and radion conditions between 4 blocks
// Fist block calculates Bernoulli's equation
if(k==0&&l==0){
// Have each thread compute Bernoulli's equation for a mesh half point
threadPos = threadIdx.x;
while(threadPos<(M*(N-1))){
i = threadPos%(N-1);
j = threadPos/(N-1);
applyBlockPos = i+(N+1)*j+2;
// Bernoulli's Equation
Func[applyBlockPos]=((1+zetaXHalf[threadPos]*zetaXHalf[threadPos])*phiYHalf[threadPos]*phiYHalf[threadPos]
+(1+zetaYHalf[threadPos]*zetaYHalf[threadPos])*phiXHalf[threadPos]*phiXHalf[threadPos]
-2*zetaXHalf[threadPos]*zetaYHalf[threadPos]*phiXHalf[threadPos]*phiYHalf[threadPos])/
(2*(1+zetaXHalf[threadPos]*zetaXHalf[threadPos]+zetaYHalf[threadPos]*zetaYHalf[threadPos]))
+zetaHalf[threadPos]/(Fr*Fr)-0.5;
threadPos += blockDim.x;
}
// Second block calculates the phi radiation condition
}else if(k==0&&l==1){
threadPos = threadIdx.x;
while(threadPos<M){
i = threadPos%(N-1);
j = threadPos/(N-1);
applyBlockPos = (N+1)*i;
// phi radiation condition
Func[applyBlockPos]=x[0]*(phiX[threadPos*N]-1)+n*(phi[threadPos*N]-x[0]);
threadPos += blockDim.x;
}
// Third block calculates the phiX radiation condition
}else if(k==0&&l==2){
threadPos = threadIdx.x;
while(threadPos<M){
i = threadPos%(N-1);
j = threadPos/(N-1);
applyBlockPos = (N+1)*i+1;
// phiX radiation condition
Func[applyBlockPos]=x[0]/(x[1]-x[0])*(phiX[threadPos*N+1]-phiX[threadPos*N])+n*(phiX[threadPos*N]-1);
threadPos += blockDim.x;
}
// Fourth block calculates the zeta radiation condition
}else if(k==0&&l==3){
threadPos = threadIdx.x;
while(threadPos<M){
i = threadPos%(N-1);
j = threadPos/(N-1);
applyBlockPos = (N+1)*i;
// zeta radiation condition
Func[applyBlockPos+M*(N+1)]=x[0]*zetaX[threadPos*N]+n*zeta[threadPos*N];
threadPos += blockDim.x;
}
// Fifth block calculates the zetaX radiation condition
}else if(k==0&&l==4){
threadPos = threadIdx.x;
while(threadPos<M){
i = threadPos%(N-1);
j = threadPos/(N-1);
applyBlockPos = (N+1)*i+1;
// zetaX radiation condition
Func[applyBlockPos+M*(N+1)]=x[0]/(x[1]-x[0])*(zetaX[threadPos*N+1]-zetaX[threadPos*N])+n*zetaX[threadPos*N];
threadPos += blockDim.x;
}
}
}
|
14,697 | #include "includes.h"
int addWithCuda(int *c, const int *a, const int *b, unsigned int size);
bool isLoadDevice = false;
// Helper function for using CUDA to add vectors in parallel.
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
} |
14,698 | #include <iostream>
#include <fstream>
#include <string>
#include <stdio.h>
#include <vector>
#include <sstream>
#define N (2048*2048)
#define THREADS_PER_BLOCK 51
using namespace std;
__global__ void lastDigit(int *a, int *b) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
b[index] = a[index]%10;
}
__global__ void find_maximum_kernel(int *array, int *min, int *mutex, unsigned int n)
{
unsigned int index = threadIdx.x + blockIdx.x*blockDim.x;
unsigned int stride = gridDim.x*blockDim.x;
unsigned int offset = 0;
__shared__ float cache[256];
float temp = -1.0;
while(index + offset < n){
temp = fminf(temp, array[index + offset]);
offset += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] = fminf(cache[threadIdx.x], cache[threadIdx.x + i]);
}
__syncthreads();
i /= 2;
}
if(threadIdx.x == 0){
while(atomicCAS(mutex,0,1) != 0); //lock
*min = fminf(*min, cache[0]);
atomicExch(mutex, 0); //unlock
}
}
int main()
{
std::ifstream ifs;
ifs.open("inp.txt", std::ifstream::in);
std::string temp;
std::getline(ifs, temp);
vector<int> vect;
std::stringstream ss(temp);
int i;
while (ss >> i)
{
vect.push_back(i);
if (ss.peek() == ',' || ss.peek() == ' ' )
ss.ignore();
}
ifs.close();
int *h_array;
int *d_array;
int *h_min;
int *d_min;
int *d_mutex;
h_array = (int*)malloc(N*sizeof(int));
h_min = (int*)malloc(sizeof(int));
cudaMalloc((void**)&d_array, N*sizeof(int));
cudaMalloc((void**)&d_min, sizeof(int));
cudaMalloc((void**)&d_mutex, sizeof(int));
cudaMemset(d_min, 0, sizeof(int));
cudaMemset(d_mutex, 0, sizeof(int));
//populate array
for(i = 0; i < vect.size(); i++){
h_array[i] = vect[i];
}
int size = vect.size() * sizeof(int);
cudaMemcpy(d_array, h_array, N*sizeof(int), cudaMemcpyHostToDevice);
dim3 gridSize = 256;
dim3 blockSize = 256;
find_maximum_kernel<<< gridSize, blockSize >>>(d_array, d_min, d_mutex, N);
cudaMemcpy(h_min, d_min, sizeof(float), cudaMemcpyDeviceToHost);
ofstream q1a;
q1a.open ("q1a.txt");
q1a << *h_min;
q1a.close();
//part b
int *d_b_array;
int *h_array_b_solution;
int *d_array_b_solution;
//b = (int *)malloc(size);
h_array_b_solution = (int*)malloc(N*sizeof(int));
cudaMalloc((void **)&d_array_b_solution, size);
cudaMalloc((void **)&d_b_array, size);
cudaMemcpy(d_b_array, h_array, size, cudaMemcpyHostToDevice);
lastDigit<<<N/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(d_b_array, d_array_b_solution);
cudaMemcpy(h_array_b_solution, d_array_b_solution, size, cudaMemcpyDeviceToHost);
ofstream q1b;
q1b.open ("q1b.txt");
for(i = 0; i < vect.size(); i++){
printf("%d, ", h_array_b_solution[i]);
q1b << h_array_b_solution[i] << ",";
}
q1b.close();
free(h_array);
free(h_min);
free(h_array_b_solution);
cudaFree(d_array);
cudaFree(d_min);
cudaFree(d_mutex);
cudaFree(d_b_array);
cudaFree(d_array_b_solution);
return 0;
}
|
14,699 | #include "includes.h"
__global__ void convolution_rgb(unsigned char *N,float *M,unsigned char* g,std::size_t cols, std::size_t rows,std::size_t mask_size){
int paddingSize = (( mask_size-1 )/2)*3;
unsigned int paddedH = cols + 2 * paddingSize;
unsigned int paddedW = rows*3 + 2 * paddingSize;
int i = blockIdx.x * blockDim.x + threadIdx.x + paddingSize ;
int j = blockIdx.y * blockDim.y + threadIdx.y + paddingSize;
if( (i >= paddingSize) && (i < paddedW-paddingSize) && (j >= paddingSize) && (j<paddedH-paddingSize)) {
unsigned int oPixelPos = (i - paddingSize ) * cols + (j -paddingSize);
g[oPixelPos] = 0;
int iterationK = 0;
for(int k = -paddingSize; k <= paddingSize; k=k+3){
int iterationL = 0;
for(int l = -paddingSize; l<=paddingSize; l=l+3){
unsigned int iPixelPos = (i+k)*paddedH+(j+l);
unsigned int filtrePos = iterationK*mask_size + iterationL;
g[oPixelPos] += N[iPixelPos] * M[filtrePos];
iterationL++;
}
iterationK++;
}
}
} |
14,700 | #include "includes.h"
#define TB 128
#define GS(x) (((x) - 1) / TB + 1)
__global__ void mul_(float *input, float factor, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) {
input[id] = input[id] * factor;
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.