serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
14,701 | /*
#define __CUDACC__
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <conio.h>
#define Block_Size 4
__global__ void PrefixSumEfficient(int *A_d)
{
int stride,index,i;
__shared__ int XY[Block_Size*2];
i=threadIdx.x;
if(i<2*Block_Size) //For prefix sum we take block size as half the input size
XY[i]=A_d[i]; //Load from global memory to shared memory
__syncthreads();
for(stride=1; stride<=Block_Size; stride*=2) //Reduction Phase
{
index=((threadIdx.x + 1)*stride*2)-1;
if(index<2*Block_Size)
XY[index]+=XY[index-stride];
__syncthreads();
}
for(stride=Block_Size/2; stride>0; stride/=2) //Post Reduction Phase
{
__syncthreads();
index=((threadIdx.x + 1)*stride*2)-1;
if(index+stride<2*Block_Size)
XY[index+stride]+=XY[index];
}
__syncthreads();
A_d[i]=XY[i];
}
int main()
{
int i, *A_d, *A, tile;
int size=sizeof(int)*Block_Size*2;
A=(int *)malloc(size);
printf("Elements to be added:\n");
for(i=0; i<Block_Size*2; i++)
printf("%d \t",A[i]=i+1);
cudaMalloc((void**)&A_d,size);
cudaMemcpy(A_d,A,size,cudaMemcpyHostToDevice);
PrefixSumEfficient<<<1,Block_Size*2>>>(A_d);
cudaMemcpy(A,A_d,size,cudaMemcpyDeviceToHost);
printf("\nEfficient:\n");
for(i=0; i<Block_Size*2; i++)
printf("%d ",A[i]);
cudaFree(A_d);
getch();
return 0;
}
*/
/* OUTPUT -
Elements to be added:
1 2 3 4 5 6 7 8
Efficient:
1 3 6 10 15 21 28 36
*/
|
14,702 | #include "includes.h"
__global__ void bcnn_cuda_im2col_kernel(const int n, const float *data_im, const int height, const int width, const int ksize, const int pad, const int stride, const int height_col, const int width_col, float *data_col) {
int i, j, w, h, w_out, h_index, h_out, channel_in, channel_out;
int h_in, w_in;
int index = blockIdx.x * blockDim.x + threadIdx.x;
float *data_col_ptr = NULL;
const float *data_im_ptr = NULL;
for (; index < n; index += blockDim.x * gridDim.x) {
w_out = index % width_col;
h_index = index / width_col;
h_out = h_index % height_col;
channel_in = h_index / height_col;
channel_out = channel_in * ksize * ksize;
h_in = h_out * stride - pad;
w_in = w_out * stride - pad;
data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (i = 0; i < ksize; ++i) {
for (j = 0; j < ksize; ++j) {
h = h_in + i;
w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width)
? data_im_ptr[i * width + j]
: 0;
data_col_ptr += height_col * width_col;
}
}
}
} |
14,703 | #include <stdio.h>
#include <assert.h>
#define VECTOR_LENGTH 10000
#define MAX_ERR 1e-4
__global__ void vector_add(float *out, float *a, float *b, int n){
for(int i=0; i < n; i++){
out[i] = a[i] + b[i];
}
}
int main(){
float *a, *b, *out;
float *d_a, *d_b, *d_out;
// step1: allocate memory on CPU
a = (float*)malloc(sizeof(float)*VECTOR_LENGTH);
b = (float*)malloc(sizeof(float)*VECTOR_LENGTH);
out = (float*)malloc(sizeof(float)*VECTOR_LENGTH);
// step2: data initilization
for(int i = 0; i < VECTOR_LENGTH; i++){
a[i] = 3.0f;
b[i] = 0.14f;
}
// step3: allocate memory on GPU
cudaMalloc((void**)&d_a, sizeof(float)*VECTOR_LENGTH);
cudaMalloc((void**)&d_b, sizeof(float)*VECTOR_LENGTH);
cudaMalloc((void**)&d_out, sizeof(float)*VECTOR_LENGTH);
// step4: transfer input data from host(CPU) to device(GPU) memory
cudaMemcpy(d_a, a, sizeof(float)*VECTOR_LENGTH, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(float)*VECTOR_LENGTH, cudaMemcpyHostToDevice);
// step5: execute kernel function on GPU
vector_add<<<1, 1>>>(d_out, d_a, d_b, VECTOR_LENGTH);
// step6: transfer output from device(GPU) memory to host(CPU)
cudaMemcpy(out, d_out, sizeof(float)*VECTOR_LENGTH, cudaMemcpyDeviceToHost);
for(int i = 0; i < VECTOR_LENGTH; i++){
assert(fabs(out[i] - a[i] - b[i]) < MAX_ERR);
}
printf("out[0] is %f\n", out[0]);
printf("PASSED\n");
// step7: free the memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_out);
free(a);
free(b);
free(out);
} |
14,704 | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
#include <stdio.h>
#include<cuda.h>
#define n 5
__global__ void add(int *a, int *b, int *c) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
c[i] = a[i] + b[i];
}
}
int main(){
int a[n];
int b[n];
int i;
int c[n]= {0};
int* dev_a;
int* dev_b;
int* dev_c;
cudaMalloc((void**)&dev_c, n * sizeof(int));
cudaMalloc((void**)&dev_a, n * sizeof(int));
cudaMalloc((void**)&dev_b, n * sizeof(int));
printf("\narray elements (1st):\n");
for(i=0;i<n;i++){
scanf("%d",&a[i]);
}
printf("\narray elements (2nd):\n");
for(i=0;i<n;i++){
scanf("%d",&b[i]);
}
cudaMemcpy(dev_a, a, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, n* sizeof(int), cudaMemcpyHostToDevice);
add<<<n,1>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, n* sizeof(int), cudaMemcpyDeviceToHost);
printf("\nsum is\n");
for(i=0;i<n;i++){
printf("%d\n",c[i]);
}
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaDeviceReset();
return 0;
}
|
14,705 | #include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
void sortThrust(uint32_t *in, int n, uint32_t *out) {
thrust::device_vector<uint32_t> dv_out(in, in + n);
thrust::sort(dv_out.begin(), dv_out.end());
thrust::copy(dv_out.begin(), dv_out.end(), out);
} |
14,706 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define CUDA_SAFE_CALL( call) { \
cudaError err = call; \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} }
#define DRIVER_API_CALL(apiFuncCall) \
do { \
CUresult _status = apiFuncCall; \
if (_status != CUDA_SUCCESS) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %d.\n", \
__FILE__, __LINE__, #apiFuncCall, _status); \
exit(-1); \
} \
} while (0)
#define DATA_TYPE 0 // 0-SP, 1-DP
#define ALIGN_SIZE (8)
#define ALIGN_BUFFER(buffer, align) \
(((uintptr_t) (buffer) & ((align)-1)) ? ((buffer) + (align) - ((uintptr_t) (buffer) & ((align)-1))) : (buffer))
#define KERNEL_CALLS 1
#define COMP_ITERATIONS (1024)
#define THREADS (1024)
#define BLOCKS (32768)
#define STRIDE (64*1024)
#define REGBLOCK_SIZE (4)
#define UNROLL_ITERATIONS (32)
#define deviceNum (0)
//CODE
__global__ void warmup(short* cd){
short r0 = 1.0,
r1 = r0+(short)(31),
r2 = r0+(short)(37),
r3 = r0+(short)(41);
for(int j=0; j<COMP_ITERATIONS; j+=UNROLL_ITERATIONS){
#pragma unroll
for(int i=0; i<UNROLL_ITERATIONS; i++){
// Each iteration maps to floating point 8 operations (4 multiplies + 4 additions)
r0 = r0 * r0 + r1;//r0;
r1 = r1 * r1 + r2;//r1;
r2 = r2 * r2 + r3;//r2;
r3 = r3 * r3 + r0;//r3;
}
}
cd[blockIdx.x * 256 + threadIdx.x] = r0;
}
template <class T>
__global__ void benchmark( T* cdin, T* cdout, int compute_iters){
const long ite=blockIdx.x * THREADS + threadIdx.x;
// const int ite = threadIdx.x+(BLOCKS-blockIdx.x)%BLOCKS*32+threadIdx.x/(int)32*32;
T r0;
// printf("(%d/%d) - %d\n", blockIdx.x,threadIdx.x,ite);
for(int j=0; j<compute_iters; j+=UNROLL_ITERATIONS){
#pragma unroll
for(int i=0; i<UNROLL_ITERATIONS; i++){
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
}
}
}
void initializeEvents(cudaEvent_t *start, cudaEvent_t *stop){
CUDA_SAFE_CALL( cudaEventCreate(start) );
CUDA_SAFE_CALL( cudaEventCreate(stop) );
CUDA_SAFE_CALL( cudaEventRecord(*start, 0) );
}
float finalizeEvents(cudaEvent_t start, cudaEvent_t stop){
CUDA_SAFE_CALL( cudaGetLastError() );
CUDA_SAFE_CALL( cudaEventRecord(stop, 0) );
CUDA_SAFE_CALL( cudaEventSynchronize(stop) );
float kernel_time;
CUDA_SAFE_CALL( cudaEventElapsedTime(&kernel_time, start, stop) );
CUDA_SAFE_CALL( cudaEventDestroy(start) );
CUDA_SAFE_CALL( cudaEventDestroy(stop) );
return kernel_time;
}
void runbench_warmup(short* cd){
const int BLOCK_SIZE = 512;
const int TOTAL_REDUCED_BLOCKS = 512;
dim3 dimBlock(BLOCK_SIZE, 1, 1);
dim3 dimReducedGrid(TOTAL_REDUCED_BLOCKS, 1, 1);
warmup<<< dimReducedGrid, dimBlock >>>(cd);
CUDA_SAFE_CALL( cudaGetLastError() );
CUDA_SAFE_CALL( cudaThreadSynchronize() );
}
void runbench(double* kernel_time, double* bandw,double* cdin,double* cdout,int L2size, int compute_iters){
cudaEvent_t start, stop;
int type = DATA_TYPE;
dim3 dimBlock(THREADS, 1, 1);
dim3 dimGrid(BLOCKS, 1, 1);
initializeEvents(&start, &stop);
if (type==0){
benchmark<float><<< dimGrid, dimBlock >>>((float*)cdin,(float*)cdout, compute_iters);
}else{
benchmark<double><<< dimGrid, dimBlock >>>(cdin,cdout, compute_iters);
}
long long shared_access = 2*(long long)(compute_iters)*THREADS*BLOCKS;
cudaDeviceSynchronize();
double time = finalizeEvents(start, stop);
double result;
if (type==0)
result = ((double)shared_access)*4/(double)time*1000./(double)(1024*1024*1024);
else
result = ((double)shared_access)*8/(double)time*1000./(double)(1024*1024*1024);
*kernel_time = time;
*bandw=result;
}
int main(int argc, char *argv[]){
CUdevice device = 0;
int deviceCount;
char deviceName[32];
int kernel_calls=KERNEL_CALLS, compute_iters=COMP_ITERATIONS;
cudaDeviceProp deviceProp;
if (argc > 3 || argc == 2) {
printf("\nError: Wrong number of arguments.\n\n");
printf("Usage:\n\t %s [inner_iterations] [kernel_calls]\n\t %s\n", argv[0], argv[0]);
return -1;
}
if (argc == 3) {
kernel_calls = atoi(argv[2]);
compute_iters = atoi(argv[1]);
}
printf("Number of kernel launches: %d\n", kernel_calls);
printf("Number of compute iterations: %d\n", compute_iters);
cudaSetDevice(deviceNum);
double time[kernel_calls][2],value[kernel_calls][4];
int L2size;
int size = (THREADS*BLOCKS+32*STRIDE)*sizeof(double);
size_t freeCUDAMem, totalCUDAMem;
cudaMemGetInfo(&freeCUDAMem, &totalCUDAMem);
printf("Total GPU memory %lu, free %lu\n", totalCUDAMem, freeCUDAMem);
printf("Buffer size: %dMB\n", size*sizeof(double)/(1024*1024));
//Initialize Global Memory
double *cdin;
double *cdout;
// init = (int*)malloc(size);
CUDA_SAFE_CALL(cudaMalloc((void**)&cdin, size));
CUDA_SAFE_CALL(cudaMalloc((void**)&cdout, size));
// Copy data to device memory
CUDA_SAFE_CALL(cudaMemset(cdin, 0, size)); // initialize to zeros
CUDA_SAFE_CALL(cudaMemset(cdout, 0, size)); // initialize to zeros
// Synchronize in order to wait for memory operations to finish
CUDA_SAFE_CALL(cudaThreadSynchronize());
// make sure activity is enabled before any CUDA API
DRIVER_API_CALL(cuDeviceGetCount(&deviceCount));
if (deviceCount == 0) {
printf("There is no device supporting CUDA.\n");
return -2;
}
printf("CUDA Device Number: %d\n", deviceNum);
DRIVER_API_CALL(cuDeviceGet(&device, deviceNum));
CUDA_SAFE_CALL(cudaGetDeviceProperties(&deviceProp, device));
DRIVER_API_CALL(cuDeviceGetName(deviceName, 32, device));
int i;
class type;
for (i=0;i<10;i++){
runbench_warmup((short*)cdin);
}
for (i=0;i<kernel_calls;i++){
runbench(&time[0][0],&value[0][0],cdin,cdout,L2size,compute_iters);
printf("Registered time: %f ms\n",time[0][0]);
}
CUDA_SAFE_CALL( cudaDeviceReset());
printf("-----------------------------------------------------------------------\n");
return 0;
}
|
14,707 | #include <bits/stdc++.h>
using std::cout;
using namespace std;
__global__ void matrixMul(float *a, float *b, float *c, int N) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < N && col < N){
c[row * N + col] = 0;
for (int k = 0; k < N; k++) {
c[row * N + col] += a[row * N + k] * b[k * N + col];
}
}
}
void init_matrix(float *m, int N){
for (int i=0;i<N*N;i++){
m[i] = ((float) rand()) / (float) RAND_MAX ;
}
}
void print_matrix(float * a, int N){
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
cout << a[i*N+j]<<"\t";
}
cout<<"\n";
}
}
int main(int argc, char* argv[]) {
std::cout << std::fixed;
std::cout << std::setprecision(4);
int N = atoi(argv[1]);
// host memory
float *host_A, *host_B , *host_C;
host_A = new float[N*N];
host_B = new float[N*N];
host_C = new float[N*N];
// Threads per CTA dimension
int THREADS = 32;
// Blocks per grid dimension (assumes THREADS divides N evenly)
int BLOCKS = (N+THREADS-1)/THREADS;
// Use dim3 structs for block and grid dimensions
dim3 threads(THREADS, THREADS);
dim3 blocks(BLOCKS, BLOCKS);
// Initialize matrices
init_matrix(host_A,N);
init_matrix(host_B,N);
// Allocate device memory
float *device_A, *device_B , *device_C;
auto s = chrono::steady_clock::now();
cudaMalloc(&device_A, N*N*sizeof(float));
cudaMalloc(&device_B, N*N*sizeof(float));
cudaMalloc(&device_C, N*N*sizeof(float));
// Copy data to the device
cudaMemcpy(device_A, host_A, N*N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_B, host_B, N*N*sizeof(float), cudaMemcpyHostToDevice);
// Launch kernel
matrixMul<<<blocks, threads>>>(device_A, device_B, device_C,N);
// Copy back to the host
cudaMemcpy(host_C, device_C, N*N*sizeof(float), cudaMemcpyDeviceToHost);
// Free memory on device
cudaFree(device_A);
cudaFree(device_B);
cudaFree(device_C);
// Note Time
auto e = chrono::steady_clock::now();
auto diff = e - s;
double mSecs =chrono::duration <double, milli> (diff).count();
// Printing
cout <<"Size = "<<N<<"\t";
cout<<"\n \n";
cout<<"A = \n";
print_matrix(host_A,N);
cout<<"\n";
cout<<"B = \n";
print_matrix(host_B,N);
cout<<"\n";
cout<<"C = \n";
print_matrix(host_C,N);
cout<<"\n";
cout<<"Time in ms = "<<mSecs;
cout<<"\n";
return 0;
} |
14,708 | // =================================================================
//
// File: intro2.cu
// Author: Pedro Perez
// Description: This file shows some of the basic CUDA directives.
//
// Copyright (c) 2020 by Tecnologico de Monterrey.
// All Rights Reserved. May be reproduced for any non-commercial
// purpose.
//
// =================================================================
#include <stdio.h>
#include <cuda_runtime.h>
__device__ float fx(float x, float y) {
return x + y;
}
__global__ void kernel(void) {
printf("res = %f\n", fx(1.0, 2.0));
}
int main(int argc, char* argv[]) {
kernel<<<1, 1>>>();
cudaDeviceSynchronize();
return 0;
}
|
14,709 | /********************************************//**
* Calculate exclusive scan, find repeats,
* Output remainings in parallel with GPU
* Using CUDA language
*
* Written by:
* Dongyang Yao (dongyang.yao@rutgers.edu)
***********************************************/
#include <iostream>
#include <cstdlib>
#include <ctime>
#include <vector>
#include <chrono>
#include <string>
#include <fstream>
#include <thrust/scan.h>
#include <thrust/device_ptr.h>
#include <thrust/device_malloc.h>
#include <thrust/device_free.h>
using namespace std::chrono;
int N = 1000000;
#define THREADS_PER_BLK 128
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
#define CONFLICT_FREE_OFFSET(n) ((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS))
/* Get random inputs */
void generate_randoms(int* randoms, int length, int max) {
std::srand(std::time(0));
for (int i = 0; i < length; i++) {
randoms[i] = std::rand() % max;
//std::cout << randoms[i] << std::endl;
}
}
/* Print out the numbers */
void show_samples(int* numbers, int count) {
for (int i = 0; i < count; i++) std::cout << numbers[i] << std::endl;
}
/* Get exclusive scan in sequential */
void generate_exclusive_scan_truth(int* check, int* randoms, int length) {
check[0] = 0;
for (int i = 1; i < length; i++)
check[i] = check[i - 1] + randoms[i - 1];
}
/* Get repeats in sequential */
int generate_find_repeats_truth(std::vector<int>* check, int* randoms, int length) {
int count = 0;
for (int i = 0; i < length - 1; i++) {
if (randoms[i] == randoms[i + 1]) {
count++;
check->push_back(i);
}
}
return count;
}
/* Get remainings in sequential */
int generate_remove_repeats_truth(std::vector<int>* check, int* randoms, int length) {
int count = 0;
for (int i = 0; i < length - 1; i++) {
if (randoms[i] != randoms[i + 1]) {
count++;
check->push_back(randoms[i]);
}
}
count++;
check->push_back(randoms[length - 1]);
return count;
}
/* Compare result with truth */
void check_results(int* results, int* check, int length) {
bool result = true;
for (int i = 0; i < length; i++) {
//std::cout << check[i] << " " << results[i] << std::endl;
if (!(check[i] == results[i])) {
std::cout << "mis-match at " << i << std::endl;
result = false;
}
}
if (result) std::cout << "pass successfully" << std::endl;
else std::cout << "you have error shown above" << std::endl;
}
/* Compare result with truth */
void check_results(int* results, std::vector<int>* check) {
bool result = true;
int i = 0;
for (std::vector<int>::iterator iter = check->begin(); iter != check->end(); iter++) {
if (!(*iter == results[i])) {
std::cout << *iter << " " << results[i] << std::endl;
std::cout << "mis-match at " << i << std::endl;
result = false;
}
i++;
}
if (result) std::cout << "pass successfully" << std::endl;
else std::cout << "you have error shown above" << std::endl;
}
/* Get CUDA info on this computer */
void get_cuda_info() {
int device_count = 0;
cudaError_t error = cudaGetDeviceCount(&device_count);
std::cout << "number of gpu: " << device_count << std::endl;
for (int i = 0; i < device_count; i++) {
cudaDeviceProp device_props;
cudaGetDeviceProperties(&device_props, i);
std::cout << "name: " << device_props.name << std::endl;
}
}
/* Get next POW of 2 */
int get_next_pow_2(int n) {
n--;
n |= n >> 1;
n |= n >> 2;
n |= n >> 4;
n |= n >> 8;
n |= n >> 16;
n++;
return n;
}
/* Get exclusive scan on GPU */
__global__ void exclusive_scan_gpu(int* input, int* output, int n) {
__shared__ int temp[4 * THREADS_PER_BLK];
int thid_global = 2 * blockIdx.x * blockDim.x + threadIdx.x;
int thid = threadIdx.x;
{
int offset = 1;
//temp[2 * thid] = input[2 * thid_global];
//temp[2 * thid + 1] = input[2 * thid_global + 1];
int aind = thid;
int bind = thid + n / 2;
int bankOffsetA = CONFLICT_FREE_OFFSET(aind);
int bankOffsetB = CONFLICT_FREE_OFFSET(bind);
temp[aind + bankOffsetA] = input[thid_global];
temp[bind + bankOffsetB] = input[thid_global + n / 2];
for (int d = n >> 1; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) {
//temp[n - 1] = 0;
temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)] = 0;
}
for (int d = 1; d < n; d *= 2) {
offset >>= 1;
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
//output[2 * thid_global] = temp[2 * thid];
//output[2 * thid_global + 1] = temp[2 * thid + 1];
//printf("%d:%d %d:%d\n", 2 * thid_global, output[2 * thid_global], 2 * thid_global + 1, output[2 * thid_global + 1]);
output[thid_global] = temp[aind + bankOffsetA];
output[thid_global + n / 2] = temp[bind + bankOffsetB];
}
}
/* Add partial results with base to get full result on GPU */
__global__ void add_base_gpu(int* device_input, int* device_output, int block_index) {
int block_last_element = block_index * THREADS_PER_BLK * 2 - 1;
int base = device_input[block_last_element] + device_output[block_last_element];
int thid = block_index * blockDim.x + threadIdx.x;
device_output[2 * thid] += base;
device_output[2 * thid + 1] += base;
}
/* Mark repeat on GPU */
__global__ void mark_flags_gpu(int* input, int* flags, int length, bool mark_repeat) {
int thid = blockIdx.x * blockDim.x + threadIdx.x;
if (thid < length - 1) {
if (input[thid] == input[thid + 1]) {
flags[thid] = mark_repeat ? 1 : 0;
} else {
flags[thid] = mark_repeat ? 0 : 1;
}
//printf("id:%d %d\n", thid, flags[thid]);
}
}
/* Get repeats on GPU */
__global__ void get_repeat_results(int* input, int* flags_scaned, int length, int* output, bool mark_repeat) {
int thid = blockIdx.x * blockDim.x + threadIdx.x;
if ((thid < length - 1) && (flags_scaned[thid] < flags_scaned[thid + 1])) {
//printf("id:%d %d\n", thid, flags_scaned[thid]);
output[flags_scaned[thid]] = mark_repeat ? thid : input[thid];
}
if ((thid == length - 1) && (!mark_repeat)) {
//printf("id:%d %d\n", thid, flags_scaned[thid]);
output[flags_scaned[thid]] = input[length - 1];
}
}
/* Get exclusive scan on CPU */
void exclusive_scan_sequential(int* randoms, int length, int* output) {
memmove(output, randoms, length * sizeof(int));
// Upsweep phase
for (int twod = 1; twod < length; twod*=2)
{
int twod1 = twod*2;
// Parallel
for (int i = 0; i < length; i += twod1)
{
output[i+twod1-1] += output[i+twod-1];
}
}
output[length-1] = 0;
// Downsweep phase
for (int twod = length/2; twod >= 1; twod /= 2)
{
int twod1 = twod*2;
// Parallel
for (int i = 0; i < length; i += twod1)
{
int tmp = output[i+twod-1];
output[i+twod-1] = output[i+twod1-1];
output[i+twod1-1] += tmp;
}
}
}
/* Get repeats on CPU */
int find_repeats_sequential(int* results, int* randoms, int length) {
int count = 0;
int* flags = new int[length];
for (int i = 0; i < length - 1; i++) {
if (randoms[i] == randoms[i + 1]) {
count++;
flags[i] = 1;
} else {
flags[i] = 0;
}
}
//for (int i = 0; i < length; i++) std::cout << flags[i] << std::endl;
int length_rounded = get_next_pow_2(N);
int* flags_scaned = new int[length_rounded];
exclusive_scan_sequential(flags, length_rounded, flags_scaned);
//for (int i = 0; i < length; i++) std::cout << flags_scaned[i] << std::endl;
for (int i = 0; i < length - 1; i++) {
if (flags_scaned[i] < flags_scaned[i + 1]) {
results[flags_scaned[i]] = i;
}
}
delete[] flags;
delete[] flags_scaned;
return count;
}
int main(int argc, char** argv) {
int* randoms;
bool use_external = false;
if (argc == 2) {
int in = atoi(argv[1]);
if (in != 0) N = in;
else {
std::string line;
std::ifstream file (argv[1]);
if (file.is_open()) {
use_external = true;
std::cout << "loading external data..." << std::endl;
getline(file, line);
N = std::stoi(line);
randoms = new int[N];
int i = 0;
while (getline(file, line)) {
randoms[i++] = std::stoi(line);
}
file.close();
} else {
std::cout << "cannot find the file!" << std::endl;
}
}
}
std::cout << "**********" << std::endl;
std::cout << "DEBUG INFO" << std::endl;
std::cout << "**********" << std::endl;
std::cout << "number of threads per block: " << THREADS_PER_BLK << std::endl;
const int MAX = 100;
//const int NUM_SAMPLE = 10;
if (!use_external) {
randoms = new int[N];
std::cout << "generating random numbers..." << std::endl;
std::cout << "max: " << MAX << std::endl;
generate_randoms(randoms, N, MAX);
}
std::cout << "count: " << N << std::endl;
//for (int i = 0; i < N; i++) std::cout << i << ":" << randoms[i] << std::endl;
/*
std::cout << "showing random numbers..." << std::endl;
std::cout << "count: " << NUM_SAMPLE << std::endl;
show_samples(randoms, NUM_SAMPLE);
*/
int* exclusive_scan_check = new int[N];
std::cout << "generating exclusive scan ground truth..." << std::endl;
generate_exclusive_scan_truth(exclusive_scan_check, randoms, N);
std::cout << "computing exclusive scan in cpu..." << std::endl;
int length = get_next_pow_2(N);
int* output_sequential = new int[length];
high_resolution_clock::time_point start = high_resolution_clock::now();
exclusive_scan_sequential(randoms, length, output_sequential);
high_resolution_clock::time_point end = high_resolution_clock::now();
duration<double> time_span = duration_cast<duration<double> >(end - start);
std::cout << "elapsed time: " << time_span.count() * 1000 << " ms" << std::endl;
check_results(output_sequential, exclusive_scan_check, N);
/*
std::cout << "showing ground truth..." << std::endl;
std::cout << "count: " << NUM_SAMPLE << std::endl;
show_samples(exclusive_scan_check, NUM_SAMPLE);
*/
std::cout << "checking gpu availability..." << std::endl;
get_cuda_info();
std::cout << "rounding up to the next highest power of 2..." << std::endl;
std:: cout << "rounded length: " << length << std::endl;
int* exclusive_scan_gpu_results = new int[N];
int* device_input;
int* device_output;
int* find_repeat_gpu_results = new int[N];
int* flags;
int* flags_scaned;
int* find_repeat_output;
int* remove_repeat_gpu_results = new int[N];
int* flags_remain;
int* flags_remain_scaned;
int* remove_repeat_output;
std::cout << "allocateing memory on gpu for input and output..." << std::endl;
cudaMalloc((void **) &device_input, sizeof(int) * length);
cudaMalloc((void **) &device_output, sizeof(int) * length);
cudaMalloc((void **) &flags, sizeof(int) * length);
cudaMalloc((void **) &flags_scaned, sizeof(int) * length);
cudaMalloc((void **) &find_repeat_output, sizeof(int) * length);
cudaMalloc((void **) &flags_remain, sizeof(int) * length);
cudaMalloc((void **) &flags_remain_scaned, sizeof(int) * length);
cudaMalloc((void **) &remove_repeat_output, sizeof(int) * length);
std::cout << "copying the random numbers from cpu to gpu..." << std::endl;
cudaMemcpy(device_input, randoms, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(device_output, randoms, sizeof(int) * N, cudaMemcpyHostToDevice);
std::cout << "computing exclusive scan on gpu..." << std::endl;
int num_block = length / (THREADS_PER_BLK * 2);
if (num_block == 0) num_block = 1;
std::cout << "number of block: " << num_block << std::endl;
start = high_resolution_clock::now();
exclusive_scan_gpu<<<num_block, THREADS_PER_BLK>>>(device_input, device_output, length / num_block);
cudaThreadSynchronize();
end = high_resolution_clock::now();
time_span = duration_cast<duration<double> >(end - start);
std::cout << "elapsed time: " << time_span.count() * 1000 << " ms" << std::endl;
//cudaMemcpy(exclusive_scan_gpu_results, device_output, sizeof(int) * N, cudaMemcpyDeviceToHost);
//for (int i = 0; i < N; i++) std::cout << i << ":" << exclusive_scan_gpu_results[i] << ":" << exclusive_scan_check[i] << std::endl;
//std::cout << "multi-block" << std::endl;
for (int i = 1; i < num_block; i++)
add_base_gpu<<<1, THREADS_PER_BLK>>>(device_input, device_output, i);
cudaMemcpy(exclusive_scan_gpu_results, device_output, sizeof(int) * N, cudaMemcpyDeviceToHost);
//for (int i = 0; i < N; i++) std::cout << i << ":" << exclusive_scan_gpu_results[i] << ":" << exclusive_scan_check[i] << std::endl;
check_results(exclusive_scan_gpu_results, exclusive_scan_check, N);
std::cout << "computing exclusive scan using THRUST library..." << std::endl;
int* scan_thrust_results = new int[N];
thrust::device_ptr<int> d_input = thrust::device_malloc<int>(length);
thrust::device_ptr<int> d_output = thrust::device_malloc<int>(length);
cudaMemcpy(d_input.get(), randoms, N * sizeof(int), cudaMemcpyHostToDevice);
start = high_resolution_clock::now();
thrust::exclusive_scan(d_input, d_input + length, d_output);
end = high_resolution_clock::now();
time_span = duration_cast<duration<double> >(end - start);
std::cout << "elapsed time: " << time_span.count() * 1000 << " ms" << std::endl;
cudaMemcpy(scan_thrust_results, d_output.get(), N * sizeof(int), cudaMemcpyDeviceToHost);
check_results(scan_thrust_results, exclusive_scan_check, N);
std::cout << "generating find repeats ground truth..." << std::endl;
std::vector<int>* find_repeats_check = new std::vector<int>();
int repeats_count = generate_find_repeats_truth(find_repeats_check, randoms, N);
std::cout << "computing find repeats on cpu..." << std::endl;
int* find_repeats_sequential_results = new int[N];
start = high_resolution_clock::now();
find_repeats_sequential(find_repeats_sequential_results, randoms, N);
end = high_resolution_clock::now();
time_span = duration_cast<duration<double> >(end - start);
std::cout << "elapsed time: " << time_span.count() * 1000 << " ms" << std::endl;
check_results(find_repeats_sequential_results, find_repeats_check);
std::cout << "number of repeats: " << repeats_count << std::endl;
std::cout << "computing find repeats on gpu..." << std::endl;
int num_block_repeat = length / THREADS_PER_BLK;
if (num_block_repeat == 0) num_block_repeat = 1;
start = high_resolution_clock::now();
mark_flags_gpu<<<num_block_repeat, THREADS_PER_BLK>>>(device_input, flags, length, true);
exclusive_scan_gpu<<<num_block, THREADS_PER_BLK>>>(flags, flags_scaned, length / num_block);
cudaThreadSynchronize();
end = high_resolution_clock::now();
time_span = duration_cast<duration<double> >(end - start);
std::cout << "elapsed time: " << time_span.count() * 1000 << " ms" << std::endl;
for (int i = 1; i < num_block; i++)
add_base_gpu<<<1, THREADS_PER_BLK>>>(flags, flags_scaned, i);
get_repeat_results<<<num_block_repeat, THREADS_PER_BLK>>>(device_input, flags_scaned, N, find_repeat_output, true);
cudaMemcpy(find_repeat_gpu_results, find_repeat_output, sizeof(int) * N, cudaMemcpyDeviceToHost);
check_results(find_repeat_gpu_results, find_repeats_check);
std::cout << "generating remove repeats ground truth..." << std::endl;
std::vector<int>* remove_repeats_check = new std::vector<int>();
start = high_resolution_clock::now();
int remain_count = generate_remove_repeats_truth(remove_repeats_check, randoms, N);
end = high_resolution_clock::now();
time_span = duration_cast<duration<double> >(end - start);
std::cout << "elapsed time: " << time_span.count() * 1000 << " ms" << std::endl;
std::cout << "number of remains: " << remain_count << std::endl;
std::cout << "computing remove repeats on gpu..." << std::endl;
start = high_resolution_clock::now();
mark_flags_gpu<<<num_block_repeat, THREADS_PER_BLK>>>(device_input, flags_remain, length, false);
exclusive_scan_gpu<<<num_block, THREADS_PER_BLK>>>(flags_remain, flags_remain_scaned, length / num_block);
cudaThreadSynchronize();
end = high_resolution_clock::now();
time_span = duration_cast<duration<double> >(end - start);
std::cout << "elapsed time: " << time_span.count() * 1000 << " ms" << std::endl;
for (int i = 1; i < num_block; i++)
add_base_gpu<<<1, THREADS_PER_BLK>>>(flags_remain, flags_remain_scaned, i);
get_repeat_results<<<num_block_repeat, THREADS_PER_BLK>>>(device_input, flags_remain_scaned, N, remove_repeat_output, false);
cudaMemcpy(remove_repeat_gpu_results, remove_repeat_output, sizeof(int) * N, cudaMemcpyDeviceToHost);
check_results(remove_repeat_gpu_results, remove_repeats_check);
std::cout << "************" << std::endl;
std::cout << "REQUIREMENTS" << std::endl;
std::cout << "************" << std::endl;
std::cout << "array A (exclusive scan)" << std::endl;
std::cout << "size: " << N << std::endl;
std::cout << "last element: " << exclusive_scan_gpu_results[N - 1] << std::endl;
std::cout << "array B (repeating indices)" << std::endl;
std::cout << "size: " << repeats_count << std::endl;
if (repeats_count != 0)
std::cout << "last element:" << find_repeat_gpu_results[repeats_count - 1] << std::endl;
std::cout << "array C (remaining entries)" << std::endl;
std::cout << "size: " << remain_count << std::endl;
if (remain_count != 0)
std::cout << "last_element: " << remove_repeat_gpu_results[remain_count - 1] << std::endl;
std::cout << "output exclusive scan gpu results file..." << std::endl;
std::ofstream myfile1 ("A_exclusive_scan.txt");
if (myfile1.is_open())
{
myfile1 << "size: " << N << "\n";
for (int i = 0; i < N; i++) {
myfile1 << exclusive_scan_gpu_results[i] << "\n";
}
myfile1.close();
}
std::cout << "output repeat indices gpu results file..." << std::endl;
std::ofstream myfile2 ("B_repeat_indices.txt");
if (myfile2.is_open())
{
myfile2 << "size: " << repeats_count << "\n";
for (int i = 0; i < repeats_count; i++) {
myfile2 << find_repeat_gpu_results[i] << "\n";
}
myfile2.close();
}
std::cout << "output remaining entries gpu results file..." << std::endl;
std::ofstream myfile3 ("C_remaining_entries.txt");
if (myfile3.is_open())
{
myfile3 << "size: " << remain_count << "\n";
for (int i = 0; i < remain_count; i++) {
myfile3 << remove_repeat_gpu_results[i] << "\n";
}
myfile3.close();
}
delete[] randoms;
delete[] exclusive_scan_check;
delete[] output_sequential;
delete[] exclusive_scan_gpu_results;
delete[] find_repeat_gpu_results;
delete[] remove_repeat_gpu_results;
delete[] scan_thrust_results;
return 0;
}
|
14,710 | #include "includes.h"
__global__ void fm_order2_kernel(const float* in, float* out, int batch_size, int slot_num, int emb_vec_size) {
int tid = threadIdx.x;
int bid = blockIdx.x;
if (tid < emb_vec_size && bid < batch_size) {
float emb_sum = 0.0f;
float emb_sum_square = 0.0f;
float emb_square_sum = 0.0f;
int offset = bid * slot_num * emb_vec_size + tid;
for (int i = 0; i < slot_num; i++) {
int index = offset + i * emb_vec_size;
float temp = in[index];
emb_sum += temp;
emb_square_sum += temp * temp;
}
emb_sum_square = emb_sum * emb_sum;
out[bid * emb_vec_size + tid] = 0.5f * (emb_sum_square - emb_square_sum);
}
} |
14,711 | /*
Author: Su Ming Yi
Date: 11/16/2018
Goal: Learn CUDA hello world
How to compile it:
qsub -I -l walltime=00:01:00 -l nodes=1:gpus=1,mem=4gb -A PAS0027
module load cuda
nvcc -o hello hello_world.cu
How to run it:
./hello
*/
#include "stdio.h"
int main()
{
printf("Hello world.\n");
return 0;
}
|
14,712 |
// Babak Poursartip
// 09/28/2020
// warp divergence
//>>>>>>>>>>>>>>. to check the branch_efficiency:
// nvcc -G 2_warp_divergence.cu -o 2_warp_divergence.out
// sudo nvprof --metrics branch_efficiency ./2_warp_divergence.out
// here is the outpu:
/*
starts ...
size: 4194304 - block.x: 128 - grid.x: 32768
==32934== NVPROF is profiling process 32934, command: ./2_warp_divergence.out
execution time without warp divergence: 0.192828
execution time with warp divergence: 0.008774
done.
==32934== Profiling application: ./2_warp_divergence.out
==32934== Profiling result:
==32934== Metric result:
Invocations Metric Name Metric Description Min Max Avg
Device "NVIDIA GeForce GT 1030 (0)"
Kernel: code_without_divergence(void)
1 branch_efficiency Branch Efficiency 100.00% 100.00% 100.00%
Kernel: code_with_divergence(void)
1 branch_efficiency Branch Efficiency 83.33% 83.33% 83.33%
*/
#include <iostream>
#include <time.h>
__global__ void code_without_divergence()
{
int gid = blockDim.x * blockIdx.x + threadIdx.x;
float a = 0, b = 0;
int warp_id = gid / 64;
if (warp_id % 2 == 0) {
a = 100.0;
b = 50.0;
} else {
a = 200.0;
b = 72.0;
}
}
__global__ void code_with_divergence()
{
int gid = blockDim.x * blockIdx.x + threadIdx.x;
float a = 0, b = 0;
if (gid % 2 == 0) {
a = 100.0;
b = 50.0;
} else {
a = 200.0;
b = 72.0;
}
}
int main() {
printf(" starts ...\n");
int size = 1 << 22;
dim3 block(128);
dim3 grid((size + block.x - 1) / block.x);
printf(" size: %d - block.x: %d - grid.x: %d\n", size, block.x, grid.x);
clock_t start, end;
start = clock();
code_without_divergence<<<grid, block>>>();
cudaDeviceSynchronize();
end = clock();
printf(" execution time without warp divergence: %4.6f \n", (double)((double)(end - start) / CLOCKS_PER_SEC));
start = clock();
code_with_divergence<<<grid, block>>>();
cudaDeviceSynchronize();
end = clock();
printf(" execution time with warp divergence: %4.6f \n", (double)((double)(end - start) / CLOCKS_PER_SEC));
cudaDeviceSynchronize();
printf(" done.\n");
return 0;
} |
14,713 | #include <iostream>
#include <memory>
#include "cuda.h"
bool allclose(float x, float y, float threshold = 1e-4) {
return abs(x - y) < threshold;
}
void dotc(float*dst, float*src1, float*src2, int xa, int ya, int za, int wa) {
//xy * zw -> xz * yw
int ywa = ya * wa;
for(int xi = 0; xi < xa; xi++) {
for(int yi = 0; yi < ya; yi++) {
for(int zi = 0; zi < za; zi++) {
for(int wi = 0; wi < wa; wi++) {
int xzi = xi * za + zi;
int ywi = yi * wa + wi;
int i = xzi * ywa + ywi;
int xyi = xi * ya + yi;
int zwi = zi * wa + wi;
dst[i] = src1[xyi] * src2[zwi];
}
}
}
}
}
double checkpointc(int us = 1e3) {
static clock_t timer = -1;
clock_t newtime = clock();
double ustime = us * ((newtime - timer) / (double)CLOCKS_PER_SEC);
timer = newtime;
return ustime;
}
int main(int argc, char* argv[]) {
const int defdim = 89;
int xa = argc > 1 ? std::atoi(argv[1]) : defdim;
int ya = argc > 2 ? std::atoi(argv[2]) : defdim;
int za = argc > 3 ? std::atoi(argv[3]) : defdim;
int wa = argc > 4 ? std::atoi(argv[4]) : defdim;
float* src1 = (float*)malloc(sizeof(float) * xa * ya);
float* src2 = (float*)malloc(sizeof(float) * za * wa);
float* dst = (float*)malloc(sizeof(float)* xa * ya * za * wa);
std::cout<<"------SRC1 / SRC2--------"<<std::endl;
for(int xi = 0; xi < xa; xi++) {
for(int yi = 0; yi < ya; yi++) {
src1[xi * ya + yi] = xi * ya + yi;
std::cout<<src1[xi * ya + yi]<<" ";
}
std::cout<<std::endl;
}
std::cout<<"+++++THRES+++++++"<<std::endl;
std::cout<<"------SRC1 / SRC2--------"<<std::endl;
for(int zi = 0; zi < za; zi++) {
for(int wi = 0; wi < wa; wi++) {
src2[zi * wa + wi] = zi * wa + wi;
std::cout<<src2[zi * wa + wi]<<" ";
}
std::cout<<std::endl;
}
std::cout<<"SRC -> DST"<<std::endl;
checkpointc();
dotc(dst, src1 ,src2, xa, ya, za, wa);
double cstime = checkpointc();
std::cout<<"******CPU DST*********"<<std::endl;
int xza = xa * za;
int ywa = ya * wa;
/*for(int xzi = 0; xzi < xza; xzi++) {
for(int ywi = 0; ywi < ywa; ywi++) {
std::cout<<dst[xzi * ywa + ywi]<<" ";
}
std::cout<<std::endl;
}*/
std::cout<<"CSTIME: "<<cstime<<std::endl;
}
|
14,714 | #include <cmath>
__global__ void mylog(float* value)
{
value[threadIdx.x] = std::log(value[threadIdx.x]);
}
|
14,715 | #include <iostream>
#define N 16
using namespace std;
__global__ void matrix(float * a, float * b, float * c)
{
int ix = threadIdx.x;
int iy = threadIdx.y;
if (ix<N && iy<N)
{
c[ix*N + iy] = 0;
for (int k=0; k<N; k++)
c[ix*N + iy] += a[ix*N + k] * b[k*N + iy];
}
}
// --------------------------------------------------------- Main function -----------------------------------------------------------
int main ()
{
// Allocate memory on the CPU
float * a = new float[N*N];
float * b = new float[N*N];
float * c = new float[N*N];
// Declare variables on the device (GPU)
float * dev_a;
float * dev_b;
float * dev_c;
// Allocate memory on the GPU
cudaMalloc( (void**)&dev_a, N * N * sizeof(float) );
cudaMalloc( (void**)&dev_b, N * N * sizeof(float) );
cudaMalloc( (void**)&dev_c, N * N * sizeof(float) );
// Fill the matrices a , b
for (int i=0; i<N; i++)
for (int j=0; j<N; j++)
{
a[i*N + j] = 1;
b[i*N + j] = 2;
}
// Copy the arrays 'a' and 'b' to the GPU
cudaMemcpy( dev_a, a, N * N * sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, N * N * sizeof(float), cudaMemcpyHostToDevice );
// Call the multiplication function
dim3 blocks(1,1);
dim3 threads(N,N);
matrix<<< blocks, threads >>>(dev_a, dev_b, dev_c);
// Copy the array 'c' from the GPU back to the CPU
cudaMemcpy( c, dev_c, N * N * sizeof(float), cudaMemcpyDeviceToHost );
// Print the results
for (int i=0; i<N; i++)
{
for (int j=0; j<N; j++)
cout << c[i*N + j] << " ";
cout << endl;
}
// Free the memory allocated on the GPU
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
return 0;
}
|
14,716 | #include "includes.h"
__global__ void ComputeBiasedDistanceKernel( float *distance, float *biasedDistance, float *biasTerm, int maxCells )
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < maxCells)
{
biasedDistance[threadId] = distance[threadId] + biasTerm[threadId];
}
} |
14,717 | #include <iostream>
#include <cuda_runtime.h>
#include <iomanip>
using namespace std;
struct feature {
int channel;
int width;
int height;
size_t size;
float* h_elements;
float* d_elements;
};
struct kernel {
int number;
int channel;
int width;
int height;
size_t size;
float* h_elements;
float* d_elements;
};
__global__ void Conv2dKernel(feature A, kernel B, feature C){
float Cvalue = 0;
int c = threadIdx.x;
int n = blockIdx.x;
int w = blockIdx.y;
int h = blockIdx.z;
extern __shared__ float channels[];
channels[c] = 0;
for (int i = 0; i < B.width; i++){
for (int j = 0; j < B.height; j++){
int idxA = c * A.height * A.width + (w + i) * A.height + h + j;
int idxB = n * B.channel * B.width * B.height + c * B.width * B.height + i * B.width + j;
channels[c] += A.d_elements[idxA] * B.d_elements[idxB];
}
}
__syncthreads();
for (int i = 0; i < B.channel; i++)
Cvalue += channels[i];
__syncthreads();
C.d_elements[n * gridDim.y * gridDim.z + w * gridDim.z + h] = Cvalue;
}
__global__ void AddBiasKernel(feature A, kernel B){
int c = blockIdx.x;
int w = blockIdx.y;
int h = blockIdx.z;
int idxA = c * A.width*A.height + w * A.height + h;
A.d_elements[idxA] += B.d_elements[c];
}
__global__ void paddingKernel(float* A, feature B, int padding){
// A is original array, B is padded array
int c = blockIdx.x;
int w = blockIdx.y;
int h = blockIdx.z;
int idxA = c * gridDim.y*gridDim.z + w * gridDim.z + h;
int idxB = c * B.width*B.height + (w + padding) * B.height + (h + padding);
B.d_elements[idxB] = A[idxA];
}
__global__ void ReLUKernel(feature A){
int c = blockIdx.x;
int w = blockIdx.y;
int h = blockIdx.z;
int idxA = c * A.width*A.height + w * A.height + h;
if (A.d_elements[idxA] < 0)
A.d_elements[idxA] = 0;
}
__global__ void MaxpoolingKernel(feature A, feature B, int filter){
// A is input B is output
int c = blockIdx.x;
int w = blockIdx.y;
int h = blockIdx.z;
float max_value = A.d_elements[c * A.width*A.height + w*filter * A.height + h*filter];
for (int i = 0; i < filter; i++){
for (int j = 0; j < filter; j++){
int idxA = c * A.height * A.width + (w*filter + i) * A.height + h*filter + j;
if (max_value < A.d_elements[idxA])
max_value = A.d_elements[idxA];
}
}
int idxB = c * gridDim.y * gridDim.z + w * gridDim.z + h;
B.d_elements[idxB] = max_value;
}
void init_feature(feature& A, int channel, int width, int height, float init_value = 0);
void init_kernel(kernel& A, int number, int channel, int width, int height, float init_value = 0);
void init_feature(feature& A, int channel, int width, int height, string filename);
void init_kernel(kernel& A, int number, int channel, int width, int height, string filename);
void fprint(feature data);
void kprint(kernel data);
void make_pad(feature& A, int pad);
void ReLU(feature& A);
void convolution(feature& A, kernel B, int stride);
void addBias(feature& A, kernel B);
void maxPooling(feature& A, int filter);
void view(feature& A);
int main()
{
//feature A initialization
feature A;
init_feature(A, 3, 8, 8, 1);
cudaMalloc(&A.d_elements, A.size);
cudaMemcpy(A.d_elements, A.h_elements, A.size, cudaMemcpyHostToDevice);
fprint(A);
//kernel B initialization
kernel B;
init_kernel(B, 3, 3, 3, 3, 1);
cudaMalloc(&B.d_elements, B.size);
cudaMemcpy(B.d_elements, B.h_elements, B.size, cudaMemcpyHostToDevice);
kprint(B);
//kernel C initialization
kernel C;
init_kernel(C, 3, 1, 1, 1, 1);
cudaMalloc(&C.d_elements, C.size);
cudaMemcpy(C.d_elements, C.h_elements, C.size, cudaMemcpyHostToDevice);
kprint(C);
//calculation
make_pad(A, 1);
convolution(A, B, 1);
addBias(A,C);
ReLU(A);
maxPooling(A, 2);
view(A);
//receive data from device
delete A.h_elements;
A.size = A.channel * A.width * A.height * sizeof(float);
A.h_elements = new float [A.size];
cudaMemcpy(A.h_elements, A.d_elements, A.size, cudaMemcpyDeviceToHost);
//print data
fprint(A);
//release all cuda memories
cudaFree(A.d_elements);
cudaFree(B.d_elements);
cudaFree(C.d_elements);
return 0;
}
void init_feature(feature& A, int channel, int width, int height, float init_value){
A.channel = channel;
A.width = width;
A.height = height;
size_t size = channel * width * height;
A.h_elements = new float [size];
if (init_value != 0)
for (int i = 0; i < size; i++)
A.h_elements[i] = init_value;
A.size = size*sizeof(float);
}
void init_kernel(kernel& A, int number, int channel, int width, int height, float init_value){
A.number = number;
A.channel = channel;
A.width = width;
A.height = height;
size_t size = number * channel * width * height;
A.h_elements = new float [size];
if (init_value != 0)
for (int i = 0; i < size; i++)
A.h_elements[i] = init_value;
A.size = size*sizeof(float);
}
void init_feature(feature& A, int channel, int width, int height, string filename){
A.channel = channel;
A.width = width;
A.height = height;
size_t size = channel * width * height;
A.h_elements = new float [size];
// if (init_value != 0)
// for (int i = 0; i < size; i++)
// A.h_elements[i] = init_value;
A.size = size*sizeof(float);
}
void init_kernel(kernel& A, int number, int channel, int width, int height, string filename){
A.number = number;
A.channel = channel;
A.width = width;
A.height = height;
size_t size = number * channel * width * height;
A.h_elements = new float [size];
// if (init_value != 0)
// for (int i = 0; i < size; i++)
// A.h_elements[i] = init_value;
A.size = size*sizeof(float);
}
void fprint(feature data){
cout << "feature type \n[ " << data.channel << ", " << data.width << ", " << data.height << " ]" << endl;
for (int i = 0; i < data.channel; i++){
for (int j = 0; j < data.width; j++){
for (int k = 0; k < data.height; k++){
cout << setw(3) << data.h_elements[i * data.width*data.height + j * data.height + k];
}
cout << endl;
}
cout << endl;
}
}
void kprint(kernel data){
cout << "kernel type \n[ " << data.number << ", " << data.channel << ", " << data.width << ", " << data.height << " ]" << endl;
for (int i = 0; i < data.number; i++){
for (int j = 0; j < data.channel; j++){
for (int k = 0; k < data.width; k++){
for (int l = 0; l < data.height; l++){
cout << setw(3) << data.h_elements[i * data.channel*data.width*data.height + j * data.width*data.height + k * data.height + l];
}
cout << endl;
}
cout << endl;
}
cout << endl;
}
}
void make_pad(feature& A, int pad){
//invoke kernel padding
float* temp;
cudaMalloc(&temp, A.size);
cudaMemcpy(temp, A.d_elements, A.size, cudaMemcpyDeviceToDevice);
dim3 dimGrid(A.channel, A.width, A.height);
A.height += 2*pad; A.width += 2*pad;
A.size = A.channel * A.width * A.height * sizeof(float);
cudaFree(A.d_elements);
cudaMalloc(&A.d_elements, A.size);
cudaMemset(A.d_elements, 0, A.size);
paddingKernel<<<dimGrid, 1>>> (temp, A, pad);
cudaFree(temp);
}
void ReLU(feature& A){
//invoke kernel relu
dim3 dimGrid(A.channel, A.width, A.height);
ReLUKernel<<<dimGrid, 1>>>(A);
}
void convolution(feature& A, kernel B, int stride){
feature temp;
init_feature(temp, B.number, A.width - B.width + 1, A.height - B.height + 1, 0);
cudaMalloc(&temp.d_elements, temp.size);
dim3 dimGrid(temp.channel, temp.width, temp.height);
Conv2dKernel<<<dimGrid, A.channel, A.channel>>>(A, B, temp);
cudaFree(A.d_elements);
cudaMalloc(&A.d_elements, temp.size);
cudaMemcpy(A.d_elements, temp.d_elements, temp.size, cudaMemcpyDeviceToDevice);
delete temp.h_elements;
cudaFree(temp.d_elements);
A.channel = temp.channel;
A.width = temp.width;
A.height = temp.height;
}
void addBias(feature& A, kernel B){
dim3 dimGrid(A.channel, A.width, A.height);
AddBiasKernel<<<dimGrid, 1>>>(A, B);
}
void maxPooling(feature& A, int filter){
feature temp;
init_feature(temp, A.channel, A.width / filter, A.height / filter);
cudaMalloc(&temp.d_elements, temp.size);
dim3 dimGrid(temp.channel, temp.width, temp.height);
MaxpoolingKernel<<<dimGrid, 1>>>(A, temp, filter);
cudaFree(A.d_elements);
cudaMalloc(&A.d_elements, temp.size);
cudaMemcpy(A.d_elements, temp.d_elements, temp.size, cudaMemcpyDeviceToDevice);
delete temp.h_elements;
cudaFree(temp.d_elements);
A.channel = temp.channel;
A.width = temp.width;
A.height = temp.height;
}
void view(feature& A){
A.channel = A.channel*A.width*A.height;
A.width = 1;
A.height = 1;
}
|
14,718 | #include <iostream>
using namespace std;
#define BLOCK_SIZE 16
__global__ void gpuMM(int *A, int *B, int *C, int N)
{
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
int sum = 0;
for (int n = 0; n < N; ++n)
sum += A[row*N+n]*B[n*N+col];
C[row*N+col] = sum;
}
int main(int argc, char *argv[])
{
int N=2;
cout << "Executing Matrix Multiplcation" << endl;
cout << "Matrix size: " << N << "x" << N << endl;
int hA[N*N],hB[N*N],hC[N*N];
for (int j=0; j<N*N; j++){
hA[j]=j;
hB[j]=j;
hC[j]=0;
}
int size = N*N*sizeof(int);
int *dA,*dB,*dC;
cudaMalloc((void**)&dA,size);
cudaMalloc((void**)&dB,size);
cudaMalloc((void**)&dC,size);
dim3 threadBlock(4,4);
dim3 grid(1,1);
cudaMemcpy(dA,hA,size,cudaMemcpyHostToDevice);
cudaMemcpy(dB,hB,size,cudaMemcpyHostToDevice);
gpuMM<<<grid,threadBlock>>>(dA,dB,dC,N);
cudaMemcpy(hC,dC,size,cudaMemcpyDeviceToHost);
cout<<"Matrix A\n";
for(int i=1;i<=N*N;i++)
{
cout<<hA[i-1]<<"\t";
if(i%2==0)
cout<<"\n";
}
cout<<"Matrix B\n";
for(int i=1;i<=N*N;i++)
{
cout<<hB[i-1]<<"\t";
if(i%2==0)
cout<<"\n";
}
cout<<"Matrix C is \n";
for(int i=1;i<=N*N;i++)
{
cout<<hC[i-1]<<"\t";
if(i%2==0)
cout<<"\n";
}
cout << "Finished." << endl;
}
|
14,719 | // compute the square of first 64 whole numbers using 64 threads on the device
#include <stdio.h>
__global__ void square(float *d_out,float *d_in)
{
int idx = threadIdx.x;
float f = (float) d_in[idx];
d_out[idx] = f*f;
}
int main(int argc,char* argv[])
{
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
//declaration
float h_in[ARRAY_SIZE] ,h_out[ARRAY_SIZE]; //host
float *d_out ,*d_in; // device
//generate the input
for(int i=0;i<ARRAY_SIZE;i++)
h_in[i] = i;
//allocate memory on the device
cudaMalloc((void**) &d_in , ARRAY_BYTES);
cudaMalloc((void**) &d_out , ARRAY_BYTES);
//tranfer data host to device
cudaMemcpy(d_in,h_in,ARRAY_BYTES,cudaMemcpyHostToDevice);
//launch kernel
square<<<1,64>>>(d_out,d_in);
//tranfer data form device to host
cudaMemcpy(h_out,d_out,ARRAY_BYTES,cudaMemcpyDeviceToHost);
//display results
for(int i=0;i<ARRAY_SIZE;i++)
{
printf("%f", h_out[i]);
(i%4 == 0) ? printf("\n") : printf("\t");
}
printf("\n");
} |
14,720 | void f()
{
return;
}
|
14,721 | /* Block size X: 32 */
__global__ void fct_ale_pre_comm( const int max_levels, const int num_nodes, const int max_num_elems, const int * __restrict__ node_levels, const int * __restrict__ elem_levels, const int * __restrict__ node_elems, const int * __restrict__ node_num_elems, const int * __restrict__ elem_nodes, const double * __restrict__ fct_low_order, const double * __restrict__ ttf, const double * __restrict__ fct_adf_v, const double * __restrict__ fct_adf_h, double * __restrict__ UVrhs, double * __restrict__ fct_ttf_max, double * __restrict__ fct_ttf_min, double * __restrict__ fct_plus, double * __restrict__ fct_minus, const double bignr)
{
extern __shared__ double sharedBuffer[];
double * tvert_max = (double *)(sharedBuffer);
double * tvert_min = (double *)(&sharedBuffer[max_levels]);
const int node = (blockIdx.x * max_levels);
const int numelems = node_num_elems[blockIdx.x];
for ( int level = threadIdx.x; level < node_levels[blockIdx.x]; level += 32 )
{
double tvmax = -bignr;
double tvmin = bignr;
for ( int elem = 0; elem < numelems ; elem++ )
{
int elem_index = node_elems[blockIdx.x * max_num_elems + elem] - 1;
int node_indices[3] = { (elem_nodes[3 * elem_index] - 1) * max_levels + level,
(elem_nodes[3 * elem_index + 1] - 1) * max_levels + level,
(elem_nodes[3 * elem_index + 2] - 1) * max_levels + level};
double fctttfmax[3] = {fmax(fct_low_order[node_indices[0]], ttf[node_indices[0]]),
fmax(fct_low_order[node_indices[1]], ttf[node_indices[1]]),
fmax(fct_low_order[node_indices[2]], ttf[node_indices[2]])};
double fctttfmin[3] = {fmin(fct_low_order[node_indices[0]], ttf[node_indices[0]]),
fmin(fct_low_order[node_indices[1]], ttf[node_indices[1]]),
fmin(fct_low_order[node_indices[2]], ttf[node_indices[2]])};
double uvrhs1, uvrhs2;
if(level < elem_levels[elem_index] - 1)
{
uvrhs1 = fmax(fctttfmax[0], fmax(fctttfmax[1], fctttfmax[2]));
uvrhs2 = fmin(fctttfmin[0], fmin(fctttfmin[1], fctttfmin[2]));
}
else
{
uvrhs1 = bignr;
uvrhs2 = -bignr;
}
tvmax = fmax(uvrhs1, tvmax);
tvmin = fmin(uvrhs2, tvmin);
UVrhs[2 * elem_index * max_levels + level] = uvrhs1;
UVrhs[2 * elem_index * max_levels + level + 1] = uvrhs2;
}
tvert_max[level] = tvmax;
tvert_min[level] = tvmin;
}
__syncthreads();
for ( int level = threadIdx.x; level < node_levels[blockIdx.x]; level += 32 )
{
if(level == 0 or level == node_levels[blockIdx.x] - 2)
{
fct_ttf_max[node + level] = fmax(fct_low_order[node + level], ttf[node + level]);
fct_ttf_min[node + level] = fmin(fct_low_order[node + level], ttf[node + level]);
}
else
{
fct_ttf_max[node + level] = fmax(tvert_max[level], fmax(tvert_max[level - 1], tvert_max[level + 1]));
fct_ttf_min[node + level] = fmin(tvert_min[level], fmin(tvert_min[level - 1], tvert_min[level + 1]));
}
int adf_index = blockIdx.x * (max_levels + 1) + level;
fct_plus[node + level] = fmax(0.,fct_adf_v[adf_index]) + fmax(0.,-fct_adf_v[adf_index + 1]);
fct_minus[node + level] = fmin(0.,fct_adf_v[adf_index]) + fmin(0.,-fct_adf_v[adf_index + 1]);
}
}
|
14,722 | /**********************************************************************/
// An unoptimized Naive N-Body solver for Gravity Simulations //
// G is assumed to be 1.0 //
// Course Material for HPCSE-II, Spring 2019, ETH Zurich //
// Authors: Sergio Martin //
// License: Use if you like, but give us credit. //
/**********************************************************************/
#include <stdio.h>
#include <math.h>
#include <chrono>
int main(int argc, char* argv[])
{
size_t N0 = 32;
size_t N = N0*N0*N0;
// Initializing N-Body Problem
double* xPos = (double*) calloc (N, sizeof(double));
double* yPos = (double*) calloc (N, sizeof(double));
double* zPos = (double*) calloc (N, sizeof(double));
double* xFor = (double*) calloc (N, sizeof(double));
double* yFor = (double*) calloc (N, sizeof(double));
double* zFor = (double*) calloc (N, sizeof(double));
double* mass = (double*) calloc (N, sizeof(double));
size_t current = 0;
for (size_t i = 0; i < N0; i++)
for (size_t j = 0; j < N0; j++)
for (size_t k = 0; k < N0; k++)
{
xPos[current] = i;
yPos[current] = j;
zPos[current] = k;
mass[current] = 1.0;
xFor[current] = 0.0;
yFor[current] = 0.0;
zFor[current] = 0.0;
current++;
}
// Running Force-calculation kernel
auto startTime = std::chrono::system_clock::now();
for (size_t i = 0; i < N; i++)
for (size_t j = 0; j < N; j++) if (j != i)
{
double xDist = xPos[i] - xPos[j];
double yDist = yPos[i] - yPos[j];
double zDist = zPos[i] - zPos[j];
double r = sqrt(xDist*xDist + yDist*yDist + zDist*zDist);
xFor[i] += xDist*mass[i]*mass[j] / (r*r*r);
yFor[i] += yDist*mass[i]*mass[j] / (r*r*r);
zFor[i] += zDist*mass[i]*mass[j] / (r*r*r);
}
auto endTime = std::chrono::system_clock::now();
double forceChecksum = 0.0;
for (size_t i = 0; i < N; i++) forceChecksum += xFor[i] + yFor[i] + zFor[i];
if (fabs(forceChecksum) > 0.00001) { printf("Verification Failed: Forces are not conserved! Sum: %.10f\n", forceChecksum); exit(-1); }
printf("Verification Passed! Time: %.8fs\n", std::chrono::duration<double>(endTime-startTime).count());
return 0;
}
|
14,723 | /* second version of N body simulation using CUDA */
#include <iostream>
#include <fstream>
#include <iomanip>
#include <math.h>
#include <cuda.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <ctime>
using namespace std;
/* define the global constants */
const double G = 6.67 * pow(10, -11);
const double e = 0.00001;
const double period = 1;
/* define the structure of particle */
struct particle
{
double m;
double pos_x;
double pos_y;
double v_x;
double v_y;
double a_x;
double a_y;
particle(double m = 0, double pos_x = 0, double pos_y = 0,
double v_x = 0, double v_y = 0, double a_x = 0, double a_y = 0)
{
this->m = m;
this->pos_x = pos_x;
this->pos_y = pos_y;
this->v_x = v_x;
this->v_y = v_y;
this->a_x = a_x;
this->a_y = a_y;
}
};
struct my_double2
{
double x, y;
__device__ my_double2(double x = 0, double y = 0)
{
this->x = x;
this->y = y;
}
};
/* define the global data */
int g_N; // number of particles
int g_P; // number of particles in a tile
thrust::host_vector<particle> g_pv; // particle vector
void setUp();
/* calculate the interaction between two bodies */
__device__ my_double2 bodyBodyAcceleration(double G, double e, particle b1, particle b2, my_double2 acceleration)
{
double r_2 = pow((b1.pos_x - b2.pos_x),2) + pow((b1.pos_y - b2.pos_y),2);
b1.a_x = (-1) * G * b2.m * (b1.pos_x - b2.pos_x) / (pow(r_2 + e, 1.5));
b1.a_y = (-1) * G * b2.m * (b1.pos_y - b2.pos_y) / (pow(r_2 + e, 1.5));
acceleration.x += b1.a_x;
acceleration.y += b1.a_y;
return acceleration;
}
/* calculate the interaction inside a P*P block */
__device__ my_double2 tileAcceleration(double G, double e, particle b, my_double2 acceleration)
{
extern __shared__ particle shParticles[];
for(int i = 0; i < blockDim.x; ++i)
{
acceleration = bodyBodyAcceleration(G, e, b, shParticles[i], acceleration);
}
return acceleration;
}
/* update the position */
__device__ void updatePosition(double period, particle* particle_arr)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
/* compute the velocity */
particle_arr[idx].v_x += particle_arr[idx].a_x * period;
particle_arr[idx].v_y += particle_arr[idx].a_y * period;
/* compute the new position */
particle_arr[idx].pos_x += particle_arr[idx].v_x * period;
particle_arr[idx].pos_y += particle_arr[idx].v_y * period;
}
/* calculate the whole acceleration */
__global__ void updateScene(int N, int P, double G, double e, double period, particle* particle_arr)
{
extern __shared__ particle shParticles[];
int id = blockIdx.x * blockDim.x + threadIdx.x;
particle ptc = particle_arr[id];
my_double2 acceleration;
acceleration.x = ptc.a_x;
acceleration.y = ptc.a_y;
int i, tile;
for(i = 0, tile = 0; i < N; i += P, ++tile)
{
/* fill in the shared memory */
int idx = tile * blockDim.x + threadIdx.x;
shParticles[threadIdx.x] = particle_arr[idx];
__syncthreads();
/* calculate the acceleration with a tile */
acceleration = tileAcceleration(G, e, ptc, acceleration);
__syncthreads();
}
ptc.a_x = acceleration.x;
ptc.a_y = acceleration.y;
updatePosition(period, particle_arr);
}
int main(int argc, char ** argv) {
setUp();
g_P = static_cast<int>(sqrt(g_N)) + 1;
/* device copy of particle array */
thrust::device_vector<particle> d_particle_arr = g_pv;
/* get the raw pointer of particle array */
particle *particle_arr = thrust::raw_pointer_cast(d_particle_arr.data());
clock_t start, finish;
start = clock();
int time = 0;
while(time < 100000)
{
updateScene<<<g_P,g_P,g_P*sizeof(particle)>>>(g_N, g_P, G, e, period, particle_arr);
/*
g_pv = d_particle_arr;
for ( int i = 0; i < g_N; ++i )
{
cout << "particle: " << i << " pos_x: " << g_pv[i].pos_x << " pos_y: " << g_pv[i].pos_y << endl;
}
*/
time++;
}
finish = clock();
cout << "Execution Time: " << (double)(finish-start)/CLOCKS_PER_SEC << endl;
return 0;
}
/* read the input data */
void setUp()
{
ifstream inFile;
inFile.open("input.txt");
inFile >> g_N;
g_pv.resize(g_N);
for ( int i = 0; i < g_N; ++i )
{
inFile >> g_pv[i].m >> g_pv[i].pos_x >> g_pv[i].pos_y
>> g_pv[i].v_x >> g_pv[i].v_y >> g_pv[i].a_x >> g_pv[i].a_y;
}
inFile.close();
}
|
14,724 | #include "includes.h"
__global__ void mult_kernel(float* data, const float scale, const int realtc)
{
const uint index = threadIdx.x + (blockIdx.x + gridDim.x*blockIdx.y)*MAX_THREADS;
if (index < realtc){
data[index] *= scale;
}
} |
14,725 | #include<time.h>
#include<stdio.h>
#define Real double
using namespace std;
__global__ void VecAdd(Real* A, int* N, Real* d_time)
{
// int i = threadIdx.x, j;
//j=A[i];
int j = 0;
clock_t start_t, end_t;
// struct timeval tv1, tv2;
start_t = clock();
//printf("\n%d \n", clock());
//gettimeofday(&tv1, NULL);
for (int it=0; it < *N; it++)
{
j=A[j];
}
for (int it=0; it < *N; it++)
{
j=A[j];
}
for (int it=0; it < *N; it++)
{
j=A[j];
}
//gettimeofday(&tv2, NULL);
//*d_time = ((tv2.tv_sec-tv1.tv_sec)*1000000.0 + (tv2.tv_usec-tv1.tv_usec));
end_t = clock();
*d_time = (double)(end_t - start_t)/CLOCKS_PER_SEC ;
//printf("\n%d %d %f \n", start_t, end_t, *d_time);
} |
14,726 | #include "includes.h"
/*
* This file is an attempt at producing what the generated target code
* should look like for the multiplyMatrixMatrix routine.
*/
/* Prototype matrix representation. */
struct dag_array_t{
size_t rows;
size_t cols;
int* matrix;
};
/*
DAG Primitive. Here, we leverage the NVIDIA developer examples
to obtain a high-bandwith operation. They make use of shared memory
to avoid strided global memory accesses, and instead perform the
strided access in the shared block, which is roughly a ~3x improvement.
TILE_DIM = 32
BLOCK_ROWS = 8
https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/
*/
const int tp_TILE_DIM = 32;
const int tp_BLOCK_ROWS = 8;
// We use single-dimensional lists.
__global__ void multiplyMatrixVector(int* result, int* matrix, int* vector, int cols)
{
__shared__ int reduce_array[256]; // Within a block
int vector_slice_offset = blockIdx.x * cols + threadIdx.x;
int matrix_slice_offset = blockIdx.y * cols + threadIdx.x;
reduce_array[threadIdx.x] = matrix[matrix_slice_offset] * vector[vector_slice_offset];
__syncthreads();
// Sequential reduce.
if (threadIdx.x == 0){
int accumulator = 0;
for (int i = 0; i < blockDim.x; i++)
{
accumulator += reduce_array[i];
}
result[blockIdx.x * cols + blockIdx.y] = accumulator;
}
} |
14,727 | /*
* Copyright (C) 2006-2018 Istituto Italiano di Tecnologia (IIT)
* Copyright (C) 2007 Giacomo Spigler
* All rights reserved.
*
* This software may be modified and distributed under the terms of the
* BSD-3-Clause license. See the accompanying LICENSE file for details.
*/
extern "C" {
__global__ void FragmentProgram(int w, int h, unsigned char *in, unsigned char *out) {
int i=0;
for(i=threadIdx.x+blockIdx.x*blockDim.x; i<w*h; i+=blockDim.x*gridDim.x) {
if(in[i*3]>=200) {
//out[i*3]=255;
//out[i*3+1]=255;
//out[i*3+2]=255;
} else {
out[i*3]=0;
out[i*3+1]=0;
out[i*3+2]=0;
}
}
}
}
|
14,728 | #include <cuda_runtime.h>
#include <stdio.h>
int main(){
int deviceCount; cudaGetDeviceCount(&deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
printf("Device %d (%s) has compute capability %d.%d.\n", device, deviceProp.name, deviceProp.major, deviceProp.minor);
}
}
|
14,729 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
void Usage(char *prog_name);
#define ACCURACY 0.01
typedef float float_type;
#define KERNEL_ITERATIONS 1000
__device__ void warp_reduce(volatile float_type *sdata, const unsigned int thread_id)
{
sdata[thread_id] += sdata[thread_id + 32];
sdata[thread_id] += sdata[thread_id + 16];
sdata[thread_id] += sdata[thread_id + 8];
sdata[thread_id] += sdata[thread_id + 4];
sdata[thread_id] += sdata[thread_id + 2];
sdata[thread_id] += sdata[thread_id + 1];
}
__global__ void reduce_pi(float_type *gdata)
{
extern __shared__ float_type sdata[];
const unsigned int thread_id = threadIdx.x;
const unsigned long long int i = (((unsigned long long int)blockIdx.x) * blockDim.x + threadIdx.x) * KERNEL_ITERATIONS;
float_type current_thread_factor = 0.0f;
for (int it = 0; it < KERNEL_ITERATIONS; it++)
{
const float factor = ((i + it) & 1) ? -1.0f : 1.0f;
current_thread_factor += factor / (((i + it) << 1) + 1);
}
sdata[thread_id] = current_thread_factor;
__syncthreads();
// reduction in shared memory
for (unsigned int stride = blockDim.x >> 1; stride > 32; stride >>= 1)
{
if (thread_id < stride) {
sdata[thread_id] += sdata[thread_id + stride];
}
__syncthreads();
}
if (thread_id < 32)
warp_reduce(sdata, thread_id);
// write result for this block to global memory
if (thread_id == 0)
gdata[blockIdx.x] = sdata[0];
}
double sequential_solution(int argc, char *argv[])
{
long long n, i;
double factor = 0.0;
double sum = 0.0;
if (argc != 2)
Usage(argv[0]);
n = strtoll(argv[1], NULL, 10);
if (n < 1)
Usage(argv[0]);
printf("Before for loop, factor = %f.\n", factor);
for (i = 0; i < n; i++)
{
factor = (i % 2 == 0) ? 1.0 : -1.0;
sum += factor / (2 * i + 1);
}
printf("After for loop, factor = %f.\n", factor);
sum = 4.0 * sum;
printf("With n = %lld terms\n", n);
printf(" Our estimate of pi = %.14f\n", sum);
printf(" Ref estimate of pi = %.14f\n", 4.0 * atan(1.0));
return sum;
}
double parallel_solution(int argc, char *argv[])
{
long long n, i;
double factor = 0.0;
double sum = 0.0;
float_type *dev_sum;
float_type *cpu_sum;
if (argc != 2)
Usage(argv[0]);
n = strtoll(argv[1], NULL, 10);
if (n < 1)
Usage(argv[0]);
printf("Before for loop, factor = %f.\n", factor);
long long block_size = 1024;
long long grid_size = ceil(((double)n / KERNEL_ITERATIONS) / block_size);
cpu_sum = (float_type*)calloc(grid_size, sizeof(float_type));
cudaMalloc(&dev_sum, grid_size * sizeof(float_type));
reduce_pi<<< grid_size, block_size, block_size * sizeof(float_type) >>>(dev_sum);
cudaMemcpy(cpu_sum, dev_sum, grid_size * sizeof(float_type), cudaMemcpyDeviceToHost);
factor = ((n - 1) % 2 == 0) ? 1.0 : -1.0;
for (i = 0; i < grid_size; i++)
sum += cpu_sum[i];
cudaFree(dev_sum);
free(cpu_sum);
printf("After for loop, factor = %f.\n", factor);
sum = 4.0 * sum;
printf("With n = %lld terms\n", n);
printf(" Our estimate of pi = %.14f\n", sum);
printf(" Ref estimate of pi = %.14f\n", 4.0 * atan(1.0));
return sum;
}
int main(int argc, char *argv[])
{
float elapsed_time_seq;
cudaEvent_t start_time_seq, end_time_seq;
cudaEventCreate(&start_time_seq);
cudaEventCreate(&end_time_seq);
float elapsed_time_parallel;
cudaEvent_t start_time_parallel, end_time_parallel;
cudaEventCreate(&start_time_parallel);
cudaEventCreate(&end_time_parallel);
printf("---------------------Sequential execution---------------------\n");
cudaEventRecord(start_time_seq, 0);
double sum_seq = sequential_solution(argc, argv);
cudaEventRecord(end_time_seq, 0);
cudaEventSynchronize(end_time_seq);
cudaEventElapsedTime(&elapsed_time_seq, start_time_seq, end_time_seq);
printf("----------------------Parallel execution----------------------\n");
cudaEventRecord(start_time_parallel, 0);
double sum_parallel = parallel_solution(argc, argv);
cudaEventRecord(end_time_parallel, 0);
cudaEventSynchronize(end_time_parallel);
cudaEventElapsedTime(&elapsed_time_parallel, start_time_parallel, end_time_parallel);
printf("\nSequential elapsed time: %fs\n", elapsed_time_seq / 1000.0);
printf("Parallel elapsed time: %fs\n", elapsed_time_parallel / 1000.0);
if (fabs(sum_seq - sum_parallel) < ACCURACY)
printf("Test PASSED\n");
else
printf("Test FAILED\n");
return 0;
}
void Usage(char *prog_name)
{
fprintf(stderr, "usage: %s <n>\n", prog_name);
fprintf(stderr, " n is the number of terms and should be >= 1\n");
exit(0);
}
|
14,730 | #include "checkers.cuh"
int *initial_state();
int4 selected_move(int*, int4*);
int main() {
int *A = initial_state();
my_rep_class *my_rep;
int *n_moves;
int4 *moves;
cudaMalloc(&my_rep, sizeof(my_rep_class));
cudaMalloc(&n_moves, sizeof(int));
cudaMalloc(&moves, 1024*2048*4*sizeof(int4));
my_representation<<<my_representation_blocks,
my_representation_threads>>>(A, my_rep);
for (int i = 0; i < 100; i++){
cudaMemset(n_moves, 0, sizeof(int));
possible_moves<<<possible_moves_blocks,
possible_moves_threads>>>(my_rep, 1, n_moves, moves);
update<<<update_blocks, update_threads>>>(my_rep,
selected_move(n_moves, moves));
cudaMemset(n_moves, 0, sizeof(int));
possible_moves<<<possible_moves_blocks,
possible_moves_threads>>>(my_rep, 2, n_moves, moves);
update<<<update_blocks, update_threads>>>(my_rep,
selected_move(n_moves, moves));
}
// ...
return 0;
}
|
14,731 |
__global__ void kernel(float *x)
{
x[0] = 0;
}
void foo() {
float *x = 0;
kernel<<<1,1>>>(x);
}
|
14,732 | #include "includes.h"
__global__ void ReplaceKernelA(const float* p_Input, float* p_Output, int p_Width, int p_Height, float hueRangeA, float hueRangeB, float hueRangeWithRollOffA, float hueRangeWithRollOffB, float satRangeA, float satRangeB, float satRolloff, float valRangeA, float valRangeB, float valRolloff, int OutputAlpha, int DisplayAlpha, float p_Black, float p_White) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < p_Width && y < p_Height) {
const int index = (y * p_Width + x) * 4;
float hcoeff, scoeff, vcoeff;
float r, g, b, h, s, v;
r = p_Input[index];
g = p_Input[index + 1];
b = p_Input[index + 2];
float min = fmin(fmin(r, g), b);
float max = fmax(fmax(r, g), b);
v = max;
float delta = max - min;
if (max != 0.0f) {
s = delta / max;
} else {
s = 0.0f;
h = 0.0f;
}
if (delta == 0.0f) {
h = 0.0f;
} else if (r == max) {
h = (g - b) / delta;
} else if (g == max) {
h = 2 + (b - r) / delta;
} else {
h = 4 + (r - g) / delta;
}
h *= 1 / 6.0f;
if (h < 0.0f) {
h += 1.0f;
}
h *= 360.0f;
float h0 = hueRangeA;
float h1 = hueRangeB;
float h0mrolloff = hueRangeWithRollOffA;
float h1prolloff = hueRangeWithRollOffB;
if ( ( h1 < h0 && (h <= h1 || h0 <= h) ) || (h0 <= h && h <= h1) ) {
hcoeff = 1.0f;
} else {
float c0 = 0.0f;
float c1 = 0.0f;
if ( ( h0 < h0mrolloff && (h <= h0 || h0mrolloff <= h) ) || (h0mrolloff <= h && h <= h0) ) {
c0 = h0 == (h0mrolloff + 360.0f) || h0 == h0mrolloff ? 1.0f : !(( h0 < h0mrolloff && (h <= h0 || h0mrolloff <= h) ) || (h0mrolloff <= h && h <= h0)) ? 0.0f :
((h < h0mrolloff ? h + 360.0f : h) - h0mrolloff) / ((h0 < h0mrolloff ? h0 + 360.0f : h0) - h0mrolloff);
}
if ( ( h1prolloff < h1 && (h <= h1prolloff || h1 <= h) ) || (h1 <= h && h <= h1prolloff) ) {
c1 = !(( h1prolloff < h1 && (h <= h1prolloff || h1 <= h) ) || (h1 <= h && h <= h1prolloff)) ? 0.0f : h1prolloff == h1 ? 1.0f :
((h1prolloff < h1 ? h1prolloff + 360.0f : h1prolloff) - (h < h1 ? h + 360.0f : h)) / ((h1prolloff < h1 ? h1prolloff + 360.0f : h1prolloff) - h1);
}
hcoeff = fmax(c0, c1);
}
float s0 = satRangeA;
float s1 = satRangeB;
float s0mrolloff = s0 - satRolloff;
float s1prolloff = s1 + satRolloff;
if ( s0 <= s && s <= s1 ) {
scoeff = 1.0f;
} else if ( s0mrolloff <= s && s <= s0 ) {
scoeff = (s - s0mrolloff) / satRolloff;
} else if ( s1 <= s && s <= s1prolloff ) {
scoeff = (s1prolloff - s) / satRolloff;
} else {
scoeff = 0.0f;
}
float v0 = valRangeA;
float v1 = valRangeB;
float v0mrolloff = v0 - valRolloff;
float v1prolloff = v1 + valRolloff;
if ( (v0 <= v) && (v <= v1) ) {
vcoeff = 1.0f;
} else if ( v0mrolloff <= v && v <= v0 ) {
vcoeff = (v - v0mrolloff) / valRolloff;
} else if ( v1 <= v && v <= v1prolloff ) {
vcoeff = (v1prolloff - v) / valRolloff;
} else {
vcoeff = 0.0f;
}
float coeff = fmin(fmin(hcoeff, scoeff), vcoeff);
float A = OutputAlpha == 0 ? 1.0f : OutputAlpha == 1 ? hcoeff : OutputAlpha == 2 ? scoeff :
OutputAlpha == 3 ? vcoeff : OutputAlpha == 4 ? fmin(hcoeff, scoeff) : OutputAlpha == 5 ?
fmin(hcoeff, vcoeff) : OutputAlpha == 6 ? fmin(scoeff, vcoeff) : fmin(fmin(hcoeff, scoeff), vcoeff);
if (DisplayAlpha == 0)
A = coeff;
if (p_Black > 0.0f)
A = fmax(A - (p_Black * 4.0f) * (1.0f - A), 0.0f);
if (p_White > 0.0f)
A = fmin(A * (1.0f + p_White * 4.0f), 1.0f);
p_Output[index] = h;
p_Output[index + 1] = s;
p_Output[index + 2] = v;
p_Output[index + 3] = A;
}} |
14,733 |
__constant__ float tau0 = 3.5f; // relaxation time
__constant__ float visc = 1.0f; // viscosity
#define BLOCK_SIZE 64
#define DIST_SIZE 74304u
#define OPTION_SAVE_MACRO_FIELDS 1
#define OPTION_BULK 2
#define INVALID_NODE 0xffffffff
#define DT 1.0f
#include <stdio.h>
// Additional geometry parameters (velocities, pressures, etc)
__constant__ float node_params[2] = {
1.00000000000000005551e-01f
,
0.00000000000000000000e+00f
,
};
// OpenCL compatibility code.
__device__ int inline get_local_size(int i)
{
if (i == 0) {
return blockDim.x;
} else {
return blockDim.y;
}
}
__device__ int inline get_global_size(int i)
{
if (i == 0) {
return blockDim.x * gridDim.x;
} else {
return blockDim.y * gridDim.y;
}
}
__device__ int inline get_group_id(int i)
{
if (i == 0) {
return blockIdx.x;
} else {
return blockIdx.y;
}
}
__device__ int inline get_local_id(int i)
{
if (i == 0) {
return threadIdx.x;
} else {
return threadIdx.y;
}
}
__device__ int inline get_global_id(int i)
{
if (i == 0) {
return threadIdx.x + blockIdx.x * blockDim.x;
} else {
return threadIdx.y + blockIdx.y * blockDim.y;
}
}
typedef struct Dist {
float fC;
float fE;
float fN;
float fW;
float fS;
float fNE;
float fNW;
float fSW;
float fSE;
} Dist;
// Functions for checking whether a node is of a given specific type.
__device__ inline bool is_NTFluid(unsigned int type) {
return type == 1;
}
__device__ inline bool isNTFullBBWall(unsigned int type) {
return type == 2;
}
__device__ inline bool is_NTGhost(unsigned int type) {
return type == 4;
}
__device__ inline bool isNTRegularizedVelocity(unsigned int type) {
return type == 3;
}
// Returns true is the node does not require any special processing
// to calculate macroscopic fields.
__device__ inline bool NTUsesStandardMacro(unsigned int type) {
return (false
|| is_NTFluid(type)
|| isNTFullBBWall(type)
);
}
// Wet nodes are nodes that undergo a standard collision procedure.
__device__ inline bool isWetNode(unsigned int type) {
return (false
|| is_NTFluid(type)
|| isNTRegularizedVelocity(type)
);
}
// Wet nodes are nodes that undergo a standard collision procedure.
__device__ inline bool isExcludedNode(unsigned int type) {
return (false
|| is_NTGhost(type)
);
}
__device__ inline bool isPropagationOnly(unsigned int type) {
return (false
);
}
// Internal helper, do not use directly.
__device__ inline void _storeNodeScratchSpace(unsigned int scratch_id,
unsigned int num_values, float *buffer, float *g_buffer) {
for (int i = 0; i < num_values; i++) {
g_buffer[i + scratch_id * num_values] = buffer[i];
}
}
// Internal helper, do not use directly.
__device__ inline void _loadNodeScratchSpace(unsigned int scratch_id,
unsigned int num_values, float *g_buffer, float *buffer) {
for (int i = 0; i < num_values; i++) {
buffer[i] = g_buffer[i + scratch_id * num_values];
}
}
// Reads values from node scratch space (in global memory) into a local buffer.
//
// scratch_id: scratch space ID for nodes of type 'type'
// type: node type
// g_buffer: pointer to a buffer in the global memory used for scratch
// space
// buffer: pointer to a local buffer where the values will be saved
__device__ inline void loadNodeScratchSpace(unsigned int scratch_id,
unsigned int type, float *g_buffer, float* buffer)
{
switch (type) {
}
}
// Stores values from a local buffer into the node scratch space in global memory.
//
// Arguments: see loadNodeScratchSpace
__device__ inline void storeNodeScratchSpace(unsigned int scratch_id,
unsigned int type, float* buffer, float* g_buffer)
{
switch (type) {
}
}
__device__ inline unsigned int decodeNodeType(unsigned int nodetype) {
return nodetype & 7;
}
__device__ inline unsigned int decodeNodeOrientation(unsigned int nodetype) {
return nodetype >> 5;
}
// Returns the node's scratch ID, to be passed to (load,store)NodeScratchSpace as scratch_id.
__device__ inline unsigned int decodeNodeScratchId(unsigned int nodetype) {
return (nodetype >> 5) & 0;
}
__device__ inline unsigned int decodeNodeParamIdx(unsigned int nodetype) {
return (nodetype >> 3) & 3;
}
__device__ inline unsigned int getGlobalIdx(int gx, int gy) {
return gx + 288 * gy;
}
__device__ inline void decodeGlobalIdx(unsigned int gi, int *gx, int *gy) {
*gx = gi % 288;
*gy = gi / 288;
}
__device__ void die(void) {
asm("trap;");
}
__device__ void checkInvalidValues(Dist* d,
int gx, int gy
) {
bool valid = true;
if (!isfinite(d->fC)) {
valid = false;
printf("ERR(subdomain=0): Invalid value of fC (%f) at: "
"(%d, %d)"
"\n", d->fC,
gx, gy
);
}
if (!isfinite(d->fE)) {
valid = false;
printf("ERR(subdomain=0): Invalid value of fE (%f) at: "
"(%d, %d)"
"\n", d->fE,
gx, gy
);
}
if (!isfinite(d->fN)) {
valid = false;
printf("ERR(subdomain=0): Invalid value of fN (%f) at: "
"(%d, %d)"
"\n", d->fN,
gx, gy
);
}
if (!isfinite(d->fW)) {
valid = false;
printf("ERR(subdomain=0): Invalid value of fW (%f) at: "
"(%d, %d)"
"\n", d->fW,
gx, gy
);
}
if (!isfinite(d->fS)) {
valid = false;
printf("ERR(subdomain=0): Invalid value of fS (%f) at: "
"(%d, %d)"
"\n", d->fS,
gx, gy
);
}
if (!isfinite(d->fNE)) {
valid = false;
printf("ERR(subdomain=0): Invalid value of fNE (%f) at: "
"(%d, %d)"
"\n", d->fNE,
gx, gy
);
}
if (!isfinite(d->fNW)) {
valid = false;
printf("ERR(subdomain=0): Invalid value of fNW (%f) at: "
"(%d, %d)"
"\n", d->fNW,
gx, gy
);
}
if (!isfinite(d->fSW)) {
valid = false;
printf("ERR(subdomain=0): Invalid value of fSW (%f) at: "
"(%d, %d)"
"\n", d->fSW,
gx, gy
);
}
if (!isfinite(d->fSE)) {
valid = false;
printf("ERR(subdomain=0): Invalid value of fSE (%f) at: "
"(%d, %d)"
"\n", d->fSE,
gx, gy
);
}
if (!valid) {
die();
}
}
// Load the distributions from din to dout, for the node with the index 'idx'.
// Performs propagation when reading distributions from global memory.
// This implements the propagate-on-read scheme.
// Implements the propagate-on-read scheme for the AA access pattern, where the
// distributions are not located in their natural slots, but the opposite ones
// (e.g. fNE is located where fSW normally is). This ensures that within a single
// timestep, the distributions are read from and written to the exact same places
// in global memory.
__device__ inline void getDist(
Dist *dout, const float *__restrict__ din, unsigned int gi
) {
dout->fC = din[gi + DIST_SIZE * 0 + (unsigned int)0];
dout->fE = din[gi + DIST_SIZE * 1 + (unsigned int)0];
dout->fN = din[gi + DIST_SIZE * 2 + (unsigned int)0];
dout->fW = din[gi + DIST_SIZE * 3 + (unsigned int)0];
dout->fS = din[gi + DIST_SIZE * 4 + (unsigned int)0];
dout->fNE = din[gi + DIST_SIZE * 5 + (unsigned int)0];
dout->fNW = din[gi + DIST_SIZE * 6 + (unsigned int)0];
dout->fSW = din[gi + DIST_SIZE * 7 + (unsigned int)0];
dout->fSE = din[gi + DIST_SIZE * 8 + (unsigned int)0];
}
// Returns a node parameter which is a vector (in 'out').
__device__ inline void node_param_get_vector(const int idx, float *out
) {
out[0] = node_params[idx];
out[1] = node_params[idx + 1];
}
// Returns a node parameter which is a scalar.
__device__ inline float node_param_get_scalar(const int idx
) {
return node_params[idx];
}
// Add comments for the Guo density implementation.
__device__ inline void bounce_back(Dist *fi)
{
float t;
t = fi->fE;
fi->fE = fi->fW;
fi->fW = t;
t = fi->fN;
fi->fN = fi->fS;
fi->fS = t;
t = fi->fNE;
fi->fNE = fi->fSW;
fi->fSW = t;
t = fi->fNW;
fi->fNW = fi->fSE;
fi->fSE = t;
}
// Compute the 0th moment of the distributions, i.e. density.
__device__ inline void compute_0th_moment(Dist *fi, float *out)
{
*out = fi->fC + fi->fE + fi->fN + fi->fNE + fi->fNW + fi->fS + fi->fSE + fi->fSW + fi->fW;
}
// Compute the 1st moments of the distributions, i.e. momentum.
__device__ inline void compute_1st_moment(Dist *fi, float *out, int add, float factor)
{
if (add) {
out[0] += factor * (
fi->fE + fi->fNE - fi->fNW + fi->fSE - fi->fSW - fi->fW
);
out[1] += factor * (
fi->fN + fi->fNE + fi->fNW - fi->fS - fi->fSE - fi->fSW
);
} else {
out[0] = factor * (
fi->fE + fi->fNE - fi->fNW + fi->fSE - fi->fSW - fi->fW
);
out[1] = factor * (
fi->fN + fi->fNE + fi->fNW - fi->fS - fi->fSE - fi->fSW
);
}
}
// Compute the 2nd moments of the distributions. Order of components is:
// 2D: xx, xy, yy
// 3D: xx, xy, xz, yy, yz, zz
__device__ inline void compute_2nd_moment(Dist *fi, float *out)
{
out[0] =
fi->fE + fi->fNE + fi->fNW + fi->fSE + fi->fSW + fi->fW
;
out[1] =
fi->fNE - fi->fNW - fi->fSE + fi->fSW
;
out[2] =
fi->fN + fi->fNE + fi->fNW + fi->fS + fi->fSE + fi->fSW
;
}
// Computes the 2nd moment of the non-equilibrium distribution function
// given the full distribution fuction 'fi'.
__device__ inline void compute_noneq_2nd_moment(Dist* fi, const float rho, float *v0, float *out)
{
out[0] =
fi->fE + fi->fNE + fi->fNW + fi->fSE + fi->fSW + fi->fW
-
rho*((v0[0]*v0[0]) + 1.0f* (1.0f / 3.0f))
;
out[1] =
fi->fNE - fi->fNW - fi->fSE + fi->fSW
-
rho*v0[0]*v0[1]
;
out[2] =
fi->fN + fi->fNE + fi->fNW + fi->fS + fi->fSE + fi->fSW
-
rho*((v0[1]*v0[1]) + 1.0f* (1.0f / 3.0f))
;
}
// Compute the 1st moments of the distributions and divide it by the 0-th moment
// i.e. compute velocity.
__device__ inline void compute_1st_div_0th(Dist *fi, float *out, float zero)
{
out[0] =
(fi->fE + fi->fNE - fi->fNW + fi->fSE - fi->fSW - fi->fW)/zero
;
out[1] =
(fi->fN + fi->fNE + fi->fNW - fi->fS - fi->fSE - fi->fSW)/zero
;
}
__device__ inline void compute_macro_quant(Dist *fi, float *rho, float *v)
{
compute_0th_moment(fi, rho);
compute_1st_div_0th(fi, v, *rho);
}
__device__ inline void get0thMoment(Dist *fi, int node_type, int orientation, float *out)
{
compute_0th_moment(fi, out);
}
// Common code for the equilibrium and Zou-He density boundary conditions.
//
// Get macroscopic density rho and velocity v given a distribution fi, and
// the node class node_type.
//
__device__ inline void getMacro(
Dist *fi, int ncode, int node_type, int orientation, float *rho,
float *v0
)
{
if (NTUsesStandardMacro(node_type) || orientation == 0) {
compute_macro_quant(fi, rho, v0);
}
else if (isNTRegularizedVelocity(node_type)) {
int node_param_idx = decodeNodeParamIdx(ncode);
// We're dealing with a boundary node, for which some of the distributions
// might be meaningless. Fill them with the values of the opposite
// distributions.
switch (orientation) {
case 1: {
// fE is undefined.
fi->fE = fi->fW;
// fNE is undefined.
fi->fNE = fi->fSW;
// fSE is undefined.
fi->fSE = fi->fNW;
break;
}
case 2: {
// fN is undefined.
fi->fN = fi->fS;
// fNE is undefined.
fi->fNE = fi->fSW;
// fNW is undefined.
fi->fNW = fi->fSE;
break;
}
case 3: {
// fW is undefined.
fi->fW = fi->fE;
// fNW is undefined.
fi->fNW = fi->fSE;
// fSW is undefined.
fi->fSW = fi->fNE;
break;
}
case 4: {
// fS is undefined.
fi->fS = fi->fN;
// fSW is undefined.
fi->fSW = fi->fNE;
// fSE is undefined.
fi->fSE = fi->fNW;
break;
}
}
*rho = fi->fC + fi->fE + fi->fN + fi->fNE + fi->fNW + fi->fS + fi->fSE + fi->fSW + fi->fW;
node_param_get_vector(node_param_idx, v0
);
switch (orientation) {
case 1:
*rho =
(*rho)/(-v0[0] + 1.0f)
;
break;
case 2:
*rho =
(*rho)/(-v0[1] + 1.0f)
;
break;
case 3:
*rho =
(*rho)/(v0[0] + 1.0f)
;
break;
case 4:
*rho =
(*rho)/(v0[1] + 1.0f)
;
break;
}
}
}
// Uses extrapolation/other schemes to compute missing distributions for some implementations
// of boundary condtitions.
__device__ inline void fixMissingDistributions(
Dist *fi, float *dist_in, int ncode, int node_type, int orientation, unsigned int gi,
float *__restrict__ ivx,
float *__restrict__ ivy,
float *gg0m0
) {
if (0) {}
}
// TODO: Check whether it is more efficient to actually recompute
// node_type and orientation instead of passing them as variables.
__device__ inline void postcollisionBoundaryConditions(
Dist *fi, int ncode, int node_type, int orientation,
float *rho, float *v0, unsigned int gi, float *dist_out
)
{
if (0) {}
}
__device__ inline void precollisionBoundaryConditions(Dist *fi, int ncode,
int node_type, int orientation, float *rho, float *v0
)
{
if (0) {}
else if (isNTFullBBWall(node_type)) {
bounce_back(fi);
}
else if (0 || isNTRegularizedVelocity(node_type)
) {
// Bounce-back of the non-equilibrium parts.
switch (orientation) {
case 1:
fi->fE
=
fi->fW + (2.0f* (1.0f / 3.0f))*(*rho)*v0[0]
;
fi->fNE
=
fi->fSW + (*rho)*((1.0f* (1.0f * (1.0f / 6.0f)))*v0[0] + (1.0f* (1.0f * (1.0f / 6.0f)))*v0[1])
;
fi->fSE
=
fi->fNW + (*rho)*((1.0f* (1.0f * (1.0f / 6.0f)))*v0[0] - 1.0f* (1.0f * (1.0f / 6.0f))*v0[1])
;
break;
case 2:
fi->fN
=
fi->fS + (2.0f* (1.0f / 3.0f))*(*rho)*v0[1]
;
fi->fNE
=
fi->fSW + (*rho)*((1.0f* (1.0f * (1.0f / 6.0f)))*v0[0] + (1.0f* (1.0f * (1.0f / 6.0f)))*v0[1])
;
fi->fNW
=
fi->fSE + (*rho)*(-1.0f* (1.0f * (1.0f / 6.0f))*v0[0] + (1.0f* (1.0f * (1.0f / 6.0f)))*v0[1])
;
break;
case 3:
fi->fW
=
fi->fE - 2.0f* (1.0f / 3.0f)*(*rho)*v0[0]
;
fi->fNW
=
fi->fSE + (*rho)*(-1.0f* (1.0f * (1.0f / 6.0f))*v0[0] + (1.0f* (1.0f * (1.0f / 6.0f)))*v0[1])
;
fi->fSW
=
fi->fNE + (*rho)*(-1.0f* (1.0f * (1.0f / 6.0f))*v0[0] - 1.0f* (1.0f * (1.0f / 6.0f))*v0[1])
;
break;
case 4:
fi->fS
=
fi->fN - 2.0f* (1.0f / 3.0f)*(*rho)*v0[1]
;
fi->fSW
=
fi->fNE + (*rho)*(-1.0f* (1.0f * (1.0f / 6.0f))*v0[0] - 1.0f* (1.0f * (1.0f / 6.0f))*v0[1])
;
fi->fSE
=
fi->fNW + (*rho)*((1.0f* (1.0f * (1.0f / 6.0f)))*v0[0] - 1.0f* (1.0f * (1.0f / 6.0f))*v0[1])
;
break;
case 0:
bounce_back(fi);
return;
}
float flux[3];
compute_noneq_2nd_moment(fi, *rho, v0, flux);
fi->fC = max(1e-7f,
(4.0f* (1.0f / 9.0f))*(*rho)*(-3.0f* (1.0f * (1.0f / 2.0f))*(v0[0]*v0[0]) - 3.0f* (1.0f * (1.0f / 2.0f))*(v0[1]*v0[1])) + (4.0f* (1.0f / 9.0f))*(*rho)
+
-2.0f* (1.0f / 3.0f)*flux[0] - 2.0f* (1.0f / 3.0f)*flux[2]
);
fi->fE = max(1e-7f,
(1.0f* (1.0f / 9.0f))*(*rho)*(v0[0]*(3.0f*v0[0] + 3.0f) - 3.0f* (1.0f * (1.0f / 2.0f))*(v0[1]*v0[1])) + (1.0f* (1.0f / 9.0f))*(*rho)
+
(1.0f* (1.0f / 3.0f))*flux[0] - 1.0f* (1.0f / 6.0f)*flux[2]
);
fi->fN = max(1e-7f,
(1.0f* (1.0f / 9.0f))*(*rho)*(-3.0f* (1.0f * (1.0f / 2.0f))*(v0[0]*v0[0]) + v0[1]*(3.0f*v0[1] + 3.0f)) + (1.0f* (1.0f / 9.0f))*(*rho)
+
-1.0f* (1.0f / 6.0f)*flux[0] + (1.0f* (1.0f / 3.0f))*flux[2]
);
fi->fW = max(1e-7f,
(1.0f* (1.0f / 9.0f))*(*rho)*(v0[0]*(3.0f*v0[0] - 3.0f) - 3.0f* (1.0f * (1.0f / 2.0f))*(v0[1]*v0[1])) + (1.0f* (1.0f / 9.0f))*(*rho)
+
(1.0f* (1.0f / 3.0f))*flux[0] - 1.0f* (1.0f / 6.0f)*flux[2]
);
fi->fS = max(1e-7f,
(1.0f* (1.0f / 9.0f))*(*rho)*(-3.0f* (1.0f * (1.0f / 2.0f))*(v0[0]*v0[0]) + v0[1]*(3.0f*v0[1] - 3.0f)) + (1.0f* (1.0f / 9.0f))*(*rho)
+
-1.0f* (1.0f / 6.0f)*flux[0] + (1.0f* (1.0f / 3.0f))*flux[2]
);
fi->fNE = max(1e-7f,
(1.0f* (1.0f / 36.0f))*(*rho)*(v0[0]*(3.0f*v0[0] + 9.0f*v0[1] + 3.0f) + v0[1]*(3.0f*v0[1] + 3.0f)) + (1.0f* (1.0f / 36.0f))*(*rho)
+
(1.0f* (1.0f / 12.0f))*flux[0] + (1.0f* (1.0f / 4.0f))*flux[1] + (1.0f* (1.0f / 12.0f))*flux[2]
);
fi->fNW = max(1e-7f,
(1.0f* (1.0f / 36.0f))*(*rho)*(v0[0]*(3.0f*v0[0] - 9.0f*v0[1] - 3.0f) + v0[1]*(3.0f*v0[1] + 3.0f)) + (1.0f* (1.0f / 36.0f))*(*rho)
+
(1.0f* (1.0f / 12.0f))*flux[0] - 1.0f* (1.0f / 4.0f)*flux[1] + (1.0f* (1.0f / 12.0f))*flux[2]
);
fi->fSW = max(1e-7f,
(1.0f* (1.0f / 36.0f))*(*rho)*(v0[0]*(3.0f*v0[0] + 9.0f*v0[1] - 3.0f) + v0[1]*(3.0f*v0[1] - 3.0f)) + (1.0f* (1.0f / 36.0f))*(*rho)
+
(1.0f* (1.0f / 12.0f))*flux[0] + (1.0f* (1.0f / 4.0f))*flux[1] + (1.0f* (1.0f / 12.0f))*flux[2]
);
fi->fSE = max(1e-7f,
(1.0f* (1.0f / 36.0f))*(*rho)*(v0[0]*(3.0f*v0[0] - 9.0f*v0[1] + 3.0f) + v0[1]*(3.0f*v0[1] - 3.0f)) + (1.0f* (1.0f / 36.0f))*(*rho)
+
(1.0f* (1.0f / 12.0f))*flux[0] - 1.0f* (1.0f / 4.0f)*flux[1] + (1.0f* (1.0f / 12.0f))*flux[2]
);
}
}
//
// Performs the relaxation step in the BGK model given the density rho,
// the velocity v and the distribution fi.
__device__ inline void BGK_relaxate0(
float rho, float *iv0
,
Dist *d0, int node_type, int ncode
)
{
float v0[2];
Dist feq0;
v0[0] = iv0[0];
v0[1] = iv0[1];
;
feq0.fC =
(4.0f* (1.0f / 9.0f))*rho*(-3.0f* (1.0f * (1.0f / 2.0f))*(v0[0]*v0[0]) - 3.0f* (1.0f * (1.0f / 2.0f))*(v0[1]*v0[1])) + (4.0f* (1.0f / 9.0f))*rho
;
feq0.fE =
(1.0f* (1.0f / 9.0f))*rho*(v0[0]*(3.0f*v0[0] + 3.0f) - 3.0f* (1.0f * (1.0f / 2.0f))*(v0[1]*v0[1])) + (1.0f* (1.0f / 9.0f))*rho
;
feq0.fN =
(1.0f* (1.0f / 9.0f))*rho*(-3.0f* (1.0f * (1.0f / 2.0f))*(v0[0]*v0[0]) + v0[1]*(3.0f*v0[1] + 3.0f)) + (1.0f* (1.0f / 9.0f))*rho
;
feq0.fW =
(1.0f* (1.0f / 9.0f))*rho*(v0[0]*(3.0f*v0[0] - 3.0f) - 3.0f* (1.0f * (1.0f / 2.0f))*(v0[1]*v0[1])) + (1.0f* (1.0f / 9.0f))*rho
;
feq0.fS =
(1.0f* (1.0f / 9.0f))*rho*(-3.0f* (1.0f * (1.0f / 2.0f))*(v0[0]*v0[0]) + v0[1]*(3.0f*v0[1] - 3.0f)) + (1.0f* (1.0f / 9.0f))*rho
;
feq0.fNE =
(1.0f* (1.0f / 36.0f))*rho*(v0[0]*(3.0f*v0[0] + 9.0f*v0[1] + 3.0f) + v0[1]*(3.0f*v0[1] + 3.0f)) + (1.0f* (1.0f / 36.0f))*rho
;
feq0.fNW =
(1.0f* (1.0f / 36.0f))*rho*(v0[0]*(3.0f*v0[0] - 9.0f*v0[1] - 3.0f) + v0[1]*(3.0f*v0[1] + 3.0f)) + (1.0f* (1.0f / 36.0f))*rho
;
feq0.fSW =
(1.0f* (1.0f / 36.0f))*rho*(v0[0]*(3.0f*v0[0] + 9.0f*v0[1] - 3.0f) + v0[1]*(3.0f*v0[1] - 3.0f)) + (1.0f* (1.0f / 36.0f))*rho
;
feq0.fSE =
(1.0f* (1.0f / 36.0f))*rho*(v0[0]*(3.0f*v0[0] - 9.0f*v0[1] + 3.0f) + v0[1]*(3.0f*v0[1] - 3.0f)) + (1.0f* (1.0f / 36.0f))*rho
;
float omega =
2.85714285714285698425e-01f
;
d0->fC += omega * (feq0.fC - d0->fC);
d0->fE += omega * (feq0.fE - d0->fE);
d0->fN += omega * (feq0.fN - d0->fN);
d0->fW += omega * (feq0.fW - d0->fW);
d0->fS += omega * (feq0.fS - d0->fS);
d0->fNE += omega * (feq0.fNE - d0->fNE);
d0->fNW += omega * (feq0.fNW - d0->fNW);
d0->fSW += omega * (feq0.fSW - d0->fSW);
d0->fSE += omega * (feq0.fSE - d0->fSE);
// FIXME: This should be moved to postcollision boundary conditions.
}
// A kernel to set the node distributions using the equilibrium distributions
// and the macroscopic fields.
__global__ void SetInitialConditions(
float *dist1_in,
float *__restrict__ ivx,
float *__restrict__ ivy,
const float *__restrict__ irho,
const int *__restrict__ map
)
{
int lx = get_local_id(0); // ID inside the current block
int gx = get_global_id(0);
int gy = get_group_id(1);
unsigned int gi = getGlobalIdx(gx, gy);
// Nothing to do if we're outside of the simulation domain.
if (gx > 257) {
return;
}
// Cache macroscopic fields in local variables.
float rho = irho[gi] ;
float v0[2];
v0[0] = ivx[gi];
v0[1] = ivy[gi];
dist1_in[gi + (0u + (unsigned int)(0 + 0))] =
(4.0f* (1.0f / 9.0f))*rho*(-3.0f* (1.0f * (1.0f / 2.0f))*(v0[0]*v0[0]) - 3.0f* (1.0f * (1.0f / 2.0f))*(v0[1]*v0[1])) + (4.0f* (1.0f / 9.0f))*rho
;
dist1_in[gi + (74304u + (unsigned int)(0 + 0))] =
(1.0f* (1.0f / 9.0f))*rho*(v0[0]*(3.0f*v0[0] + 3.0f) - 3.0f* (1.0f * (1.0f / 2.0f))*(v0[1]*v0[1])) + (1.0f* (1.0f / 9.0f))*rho
;
dist1_in[gi + (148608u + (unsigned int)(0 + 0))] =
(1.0f* (1.0f / 9.0f))*rho*(-3.0f* (1.0f * (1.0f / 2.0f))*(v0[0]*v0[0]) + v0[1]*(3.0f*v0[1] + 3.0f)) + (1.0f* (1.0f / 9.0f))*rho
;
dist1_in[gi + (222912u + (unsigned int)(0 + 0))] =
(1.0f* (1.0f / 9.0f))*rho*(v0[0]*(3.0f*v0[0] - 3.0f) - 3.0f* (1.0f * (1.0f / 2.0f))*(v0[1]*v0[1])) + (1.0f* (1.0f / 9.0f))*rho
;
dist1_in[gi + (297216u + (unsigned int)(0 + 0))] =
(1.0f* (1.0f / 9.0f))*rho*(-3.0f* (1.0f * (1.0f / 2.0f))*(v0[0]*v0[0]) + v0[1]*(3.0f*v0[1] - 3.0f)) + (1.0f* (1.0f / 9.0f))*rho
;
dist1_in[gi + (371520u + (unsigned int)(0 + 0))] =
(1.0f* (1.0f / 36.0f))*rho*(v0[0]*(3.0f*v0[0] + 9.0f*v0[1] + 3.0f) + v0[1]*(3.0f*v0[1] + 3.0f)) + (1.0f* (1.0f / 36.0f))*rho
;
dist1_in[gi + (445824u + (unsigned int)(0 + 0))] =
(1.0f* (1.0f / 36.0f))*rho*(v0[0]*(3.0f*v0[0] - 9.0f*v0[1] - 3.0f) + v0[1]*(3.0f*v0[1] + 3.0f)) + (1.0f* (1.0f / 36.0f))*rho
;
dist1_in[gi + (520128u + (unsigned int)(0 + 0))] =
(1.0f* (1.0f / 36.0f))*rho*(v0[0]*(3.0f*v0[0] + 9.0f*v0[1] - 3.0f) + v0[1]*(3.0f*v0[1] - 3.0f)) + (1.0f* (1.0f / 36.0f))*rho
;
dist1_in[gi + (594432u + (unsigned int)(0 + 0))] =
(1.0f* (1.0f / 36.0f))*rho*(v0[0]*(3.0f*v0[0] - 9.0f*v0[1] + 3.0f) + v0[1]*(3.0f*v0[1] - 3.0f)) + (1.0f* (1.0f / 36.0f))*rho
;
}
__global__ void PrepareMacroFields(
const int *__restrict__ map,
const float *__restrict__ dist_in,
float *orho,
int options
)
{
int lx = get_local_id(0); // ID inside the current block
int gx = get_global_id(0);
int gy = get_group_id(1);
unsigned int gi = getGlobalIdx(gx, gy);
// Nothing to do if we're outside of the simulation domain.
if (gx > 257) {
return;
}
int ncode = map[gi];
int type = decodeNodeType(ncode);
// Unused nodes do not participate in the simulation.
if (isExcludedNode(type) || isPropagationOnly(type))
return;
int orientation = decodeNodeOrientation(ncode);
Dist fi;
float out;
getDist(
&fi, dist_in, gi
);
get0thMoment(&fi, type, orientation, &out);
orho[gi] = out;
}
__global__ void CollideAndPropagate(
const int *__restrict__ map,
float *__restrict__ dist_in,
float *__restrict__ dist_out,
float *__restrict__ gg0m0,
float *__restrict__ ovx,
float *__restrict__ ovy,
int options
)
{
int lx = get_local_id(0); // ID inside the current block
int gx = get_global_id(0);
int gy = get_group_id(1);
unsigned int gi = getGlobalIdx(gx, gy);
// Nothing to do if we're outside of the simulation domain.
if (gx > 257) {
return;
}
// Shared variables for in-block propagation
__shared__ float prop_fE[BLOCK_SIZE];
__shared__ float prop_fNE[BLOCK_SIZE];
__shared__ float prop_fSE[BLOCK_SIZE];
#define prop_fW prop_fE
#define prop_fSW prop_fNE
#define prop_fNW prop_fSE
int ncode = map[gi];
int type = decodeNodeType(ncode);
// Unused nodes do not participate in the simulation.
if (isExcludedNode(type)) {
return;
}
int orientation = decodeNodeOrientation(ncode);
// Cache the distributions in local variables
Dist d0;
if (!isPropagationOnly(type) ) {
getDist(
&d0, dist_in, gi
);
fixMissingDistributions(&d0, dist_in, ncode, type, orientation, gi,
ovx, ovy , gg0m0
);
// Macroscopic quantities for the current cell
float g0m0, v[2];
getMacro(&d0, ncode, type, orientation, &g0m0, v
);
precollisionBoundaryConditions(&d0, ncode, type, orientation, &g0m0, v
);
if (isWetNode(type)) {
BGK_relaxate0(
g0m0, v
, &d0, type, ncode
);
}
postcollisionBoundaryConditions(&d0, ncode, type, orientation, &g0m0, v, gi, dist_out
);
if (isWetNode(type) ) {
checkInvalidValues(&d0,
gx, gy
);
}
// Only save the macroscopic quantities if requested to do so.
if ((options & OPTION_SAVE_MACRO_FIELDS) && isWetNode(type)
) {
gg0m0[gi] = g0m0 ;
ovx[gi] = v[0];
ovy[gi] = v[1];
;
}
} // propagation only
const bool propagation_only = isPropagationOnly(type);
// Initialize the shared array with invalid sentinel values. If the sentinel
// value is not subsequently overridden, it will not be propagated.
prop_fE[lx] = -1.0f;
__syncthreads();
if (!propagation_only ) {
// Update the 0-th direction distribution
dist_out[gi] = d0.fC;
// Propagation in directions orthogonal to the X axis (global memory)
{
if (gy < 257) {
dist_out[gi + (148608u + (unsigned int)(0 + 288))] = d0.fN;
}
}
{
if (gy > 0) {
dist_out[gi + (297216u + (unsigned int)(0 + -288))] = d0.fS;
}
}
// E propagation in shared memory
if (gx < 257) {
// Note: propagation to ghost nodes is done directly in global memory as there
// are no threads running for the ghost nodes.
if (lx < 63 && gx != 256) {
prop_fE[lx+1] = d0.fE;
prop_fNE[lx+1] = d0.fNE;
prop_fSE[lx+1] = d0.fSE;
// E propagation in global memory (at right block boundary)
} else {
{
dist_out[gi + (74304u + (unsigned int)(0 + 1))] = d0.fE;
}
{
if (gy < 257) {
dist_out[gi + (371520u + (unsigned int)(0 + 289))] = d0.fNE;
}
}
{
if (gy > 0) {
dist_out[gi + (594432u + (unsigned int)(0 + -287))] = d0.fSE;
}
}
}
}
}
__syncthreads();
// Save locally propagated distributions into global memory.
// The leftmost thread is not updated in this block.
if (lx > 0 && gx < 258 && !propagation_only )
if (prop_fE[lx] != -1.0f)
{
dist_out[gi + (74304u + (unsigned int)(0 + 0))] = prop_fE[lx];
if (gy < 257) {
dist_out[gi + (371520u + (unsigned int)(0 + 288))] = prop_fNE[lx];
}
if (gy > 0) {
dist_out[gi + (594432u + (unsigned int)(0 + -288))] = prop_fSE[lx];
}
}
__syncthreads();
// Refill the propagation buffer with sentinel values.
prop_fE[lx] = -1.0f;
__syncthreads();
if (!propagation_only ) {
// W propagation in shared memory
// Note: propagation to ghost nodes is done directly in global memory as there
// are no threads running for the ghost nodes.
if ((lx > 1 || (lx > 0 && gx >= 64)) && !propagation_only) {
prop_fW[lx-1] = d0.fW;
prop_fNW[lx-1] = d0.fNW;
prop_fSW[lx-1] = d0.fSW;
// W propagation in global memory (at left block boundary)
} else if (gx > 0) {
{
dist_out[gi + (222912u + (unsigned int)(0 + -1))] = d0.fW;
}
{
if (gy < 257) {
dist_out[gi + (445824u + (unsigned int)(0 + 287))] = d0.fNW;
}
}
{
if (gy > 0) {
dist_out[gi + (520128u + (unsigned int)(0 + -289))] = d0.fSW;
}
}
}
}
__syncthreads();
// The rightmost thread is not updated in this block.
if (lx < 63 && gx < 257 && !propagation_only )
if (prop_fE[lx] != -1.0f)
{
dist_out[gi + (222912u + (unsigned int)(0 + 0))] = prop_fW[lx];
if (gy < 257) {
dist_out[gi + (445824u + (unsigned int)(0 + 288))] = prop_fNW[lx];
}
if (gy > 0) {
dist_out[gi + (520128u + (unsigned int)(0 + -288))] = prop_fSW[lx];
}
}
}
// Copies momentum transfer for a force object into a linear buffer
// so that a force can be computed easily via a sum reduction.
// TODO(michalj): Fuse this with summation to improve performance.
__global__ void ComputeForceObjects(
const unsigned int *__restrict__ idx,
const unsigned int *__restrict__ idx2,
const float *__restrict__ dist,
float *out,
const unsigned int max_idx
)
{
const unsigned int gidx = get_global_id(0);
if (gidx >= max_idx) {
return;
}
const unsigned int gi = idx[gidx];
const unsigned int gi2 = idx2[gidx];
const float mx = dist[gi] + dist[gi2];
out[gidx] = mx;
}
// Applies periodic boundary conditions within a single subdomain.
// dist: pointer to the distributions array
// axis: along which axis the PBCs are to be applied (0:x, 1:y, 2:z)
__global__ void ApplyPeriodicBoundaryConditions(
float *dist, int axis)
{
const int idx1 = get_global_id(0);
unsigned int gi_low, gi_high;
// For single block PBC, the envelope size (width of the ghost node
// layer) is always 1.
// TODO(michalj): Generalize this for the case when envelope_size != 1.
if (axis == 0) {
if (idx1 >= 258) { return; }
gi_low = getGlobalIdx(0, idx1); // ghost node
gi_high = getGlobalIdx(256, idx1); // real node
{
// TODO(michalj): Generalize this for grids with e_i > 1.
// Load distributions to be propagated from low idx to high idx.
const float ffW = dist[gi_low + DIST_SIZE * 3 + (unsigned int)0];
const float ffNW = dist[gi_low + DIST_SIZE * 6 + (unsigned int)0];
const float ffSW = dist[gi_low + DIST_SIZE * 7 + (unsigned int)0];
if (gi_high != INVALID_NODE && isfinite(ffW)) {
dist[gi_high + DIST_SIZE * 3 + (unsigned int)0] = ffW;
}
if (isfinite(ffNW)) {
// Skip distributions which are not populated or cross multiple boundaries.
if (idx1 > 1 && idx1 <= 256) {
dist[gi_high + DIST_SIZE * 6 + (unsigned int)0] = ffNW;
}
else
{
if (0) {}
}
}
if (isfinite(ffSW)) {
// Skip distributions which are not populated or cross multiple boundaries.
if (idx1 < 256 && idx1 >= 1) {
dist[gi_high + DIST_SIZE * 7 + (unsigned int)0] = ffSW;
}
else
{
if (0) {}
}
}
} // low to high
{
// Load distributrions to be propagated from high idx to low idx.
const float ffE = dist[gi_high + DIST_SIZE * 1 + (unsigned int)1];
const float ffNE = dist[gi_high + DIST_SIZE * 5 + (unsigned int)1];
const float ffSE = dist[gi_high + DIST_SIZE * 8 + (unsigned int)1];
if (isfinite(ffE) && gi_low != INVALID_NODE) {
dist[gi_low + DIST_SIZE * 1 + (unsigned int)1] = ffE;
}
if (isfinite(ffNE)) {
// Skip distributions which are not populated or cross multiple boundaries.
if (idx1 > 1 && idx1 <= 256 ) {
dist[gi_low + DIST_SIZE * 5 + (unsigned int)1] = ffNE;
}
else
{
if (0) {}
}
}
if (isfinite(ffSE)) {
// Skip distributions which are not populated or cross multiple boundaries.
if (idx1 < 256 && idx1 >= 1 ) {
dist[gi_low + DIST_SIZE * 8 + (unsigned int)1] = ffSE;
}
else
{
if (0) {}
}
}
} // high to low
} else if (axis == 1) {
if (idx1 >= 258) { return; }
gi_low = getGlobalIdx(idx1, 0); // ghost node
gi_high = getGlobalIdx(idx1, 256); // real node
{
// TODO(michalj): Generalize this for grids with e_i > 1.
// Load distributions to be propagated from low idx to high idx.
const float ffS = dist[gi_low + DIST_SIZE * 4 + (unsigned int)0];
const float ffSW = dist[gi_low + DIST_SIZE * 7 + (unsigned int)0];
const float ffSE = dist[gi_low + DIST_SIZE * 8 + (unsigned int)0];
if (gi_high != INVALID_NODE && isfinite(ffS)) {
dist[gi_high + DIST_SIZE * 4 + (unsigned int)0] = ffS;
}
if (isfinite(ffSW)) {
// Skip distributions which are not populated or cross multiple boundaries.
if (idx1 < 256 && idx1 >= 1) {
dist[gi_high + DIST_SIZE * 7 + (unsigned int)0] = ffSW;
}
else
{
if (0) {}
}
}
if (isfinite(ffSE)) {
// Skip distributions which are not populated or cross multiple boundaries.
if (idx1 > 1 && idx1 <= 256) {
dist[gi_high + DIST_SIZE * 8 + (unsigned int)0] = ffSE;
}
else
{
if (0) {}
}
}
} // low to high
{
// Load distributrions to be propagated from high idx to low idx.
const float ffN = dist[gi_high + DIST_SIZE * 2 + (unsigned int)288];
const float ffNE = dist[gi_high + DIST_SIZE * 5 + (unsigned int)288];
const float ffNW = dist[gi_high + DIST_SIZE * 6 + (unsigned int)288];
if (isfinite(ffN) && gi_low != INVALID_NODE) {
dist[gi_low + DIST_SIZE * 2 + (unsigned int)288] = ffN;
}
if (isfinite(ffNE)) {
// Skip distributions which are not populated or cross multiple boundaries.
if (idx1 > 1 && idx1 <= 256 ) {
dist[gi_low + DIST_SIZE * 5 + (unsigned int)288] = ffNE;
}
else
{
if (0) {}
}
}
if (isfinite(ffNW)) {
// Skip distributions which are not populated or cross multiple boundaries.
if (idx1 < 256 && idx1 >= 1 ) {
dist[gi_low + DIST_SIZE * 6 + (unsigned int)288] = ffNW;
}
else
{
if (0) {}
}
}
} // high to low
}
}
// Applies periodic boundary conditions to a scalar field within a single subdomain.
// dist: pointer to the array with the field data
// axis: along which axis the PBCs are to be applied (0:x, 1:y, 2:z)
__global__ void ApplyMacroPeriodicBoundaryConditions(
float *field, int axis)
{
const int idx1 = get_global_id(0);
unsigned int gi_low, gi_high;
// TODO(michalj): Generalize this for the case when envelope_size != 1.
if (axis == 0) {
if (idx1 >= 258) { return; }
gi_low = getGlobalIdx(0, idx1); // ghost node
gi_high = getGlobalIdx(256, idx1); // real node
if ( isfinite(field[gi_high])) {
field[gi_low] = field[gi_high];
}
gi_low = getGlobalIdx(1, idx1); // real node
gi_high = getGlobalIdx(257, idx1); // ghost node
if ( isfinite(field[gi_low])) {
field[gi_high] = field[gi_low];
}
} else if (axis == 1) {
if (idx1 >= 258) { return; }
gi_low = getGlobalIdx(idx1, 0); // ghost node
gi_high = getGlobalIdx(idx1, 256); // real node
if ( isfinite(field[gi_high])) {
field[gi_low] = field[gi_high];
}
gi_low = getGlobalIdx(idx1, 1); // real node
gi_high = getGlobalIdx(idx1, 257); // ghost node
if ( isfinite(field[gi_low])) {
field[gi_high] = field[gi_low];
}
}
}
// Collects ghost node data for connections along axes other than X.
// dist: distributions array
// base_gy: where along the X axis to start collecting the data
// face: see LBBlock class constants
// buffer: buffer where the data is to be saved
__global__ void CollectContinuousData(
float *dist, int face, int base_gx,
int max_lx, float *buffer)
{
const int idx = get_global_id(0);
float tmp;
if (idx >= max_lx) {
return;
}
switch (face) {
case 2: {
const int dist_size = max_lx / 3;
const int dist_num = idx / dist_size;
const int gx = idx % dist_size;
unsigned int gi = getGlobalIdx(base_gx + gx, 0);
switch (dist_num) {
case 0: {
tmp = dist[gi + DIST_SIZE * 4 + (unsigned int)0];
break;
}
case 1: {
tmp = dist[gi + DIST_SIZE * 7 + (unsigned int)0];
break;
}
case 2: {
tmp = dist[gi + DIST_SIZE * 8 + (unsigned int)0];
break;
}
}
buffer[idx] = tmp;
break;
}
case 3: {
const int dist_size = max_lx / 3;
const int dist_num = idx / dist_size;
const int gx = idx % dist_size;
unsigned int gi = getGlobalIdx(base_gx + gx, 257);
switch (dist_num) {
case 0: {
tmp = dist[gi + DIST_SIZE * 2 + (unsigned int)0];
break;
}
case 1: {
tmp = dist[gi + DIST_SIZE * 5 + (unsigned int)0];
break;
}
case 2: {
tmp = dist[gi + DIST_SIZE * 6 + (unsigned int)0];
break;
}
}
buffer[idx] = tmp;
break;
}
}
}
__global__ void DistributeContinuousData(
float *dist, int face, int base_gx,
int max_lx, float *buffer)
{
const int idx = get_global_id(0);
if (idx >= max_lx) {
return;
}
switch (face) {
case 2: {
const int dist_size = max_lx / 3;
const int dist_num = idx / dist_size;
const int gx = idx % dist_size;
const float tmp = buffer[idx];
unsigned int gi = getGlobalIdx(base_gx + gx, 256);
switch (dist_num) {
case 0: {
dist[gi + DIST_SIZE * 4 + (unsigned int)0] = tmp;
break;
}
case 1: {
dist[gi + DIST_SIZE * 7 + (unsigned int)0] = tmp;
break;
}
case 2: {
dist[gi + DIST_SIZE * 8 + (unsigned int)0] = tmp;
break;
}
}
break;
}
case 3: {
const int dist_size = max_lx / 3;
const int dist_num = idx / dist_size;
const int gx = idx % dist_size;
const float tmp = buffer[idx];
unsigned int gi = getGlobalIdx(base_gx + gx, 1);
switch (dist_num) {
case 0: {
dist[gi + DIST_SIZE * 2 + (unsigned int)0] = tmp;
break;
}
case 1: {
dist[gi + DIST_SIZE * 5 + (unsigned int)0] = tmp;
break;
}
case 2: {
dist[gi + DIST_SIZE * 6 + (unsigned int)0] = tmp;
break;
}
}
break;
}
}
}
__global__ void CollectSparseData(
unsigned int *idx_array, float *dist,
float *buffer, int max_idx)
{
int idx = get_global_id(0);
if (idx >= max_idx) {
return;
}
unsigned int gi = idx_array[idx];
if (gi == INVALID_NODE) return;
if (gi >= DIST_SIZE * 9) {
printf("invalid node index detected in sparse coll %d (%d, %d)\n", gi, get_global_id(0), get_global_id(1));
return;
}
buffer[idx] = dist[gi];
}
__global__ void DistributeSparseData(
unsigned int *idx_array, float *dist,
float *buffer, int max_idx)
{
int idx = get_global_id(0);
if (idx >= max_idx) {
return;
}
unsigned int gi = idx_array[idx];
if (gi == INVALID_NODE) return;
if (gi >= DIST_SIZE * 9) {
printf("invalid node index detected in sparse dist %d (%d, %d)\n", gi, get_global_id(0), get_global_id(1));
return;
}
dist[gi] = buffer[idx];
}
__global__ void CollectContinuousMacroData(
float *field, int base_gx, int max_lx, int gy,
float *buffer)
{
const int idx = get_global_id(0);
if (idx >= max_lx) {
return;
}
unsigned int gi = getGlobalIdx(base_gx + idx, gy);
buffer[idx] = field[gi];
}
__global__ void DistributeContinuousMacroData(
float *field, int base_gx, int max_lx, int gy,
float *buffer)
{
const int idx = get_global_id(0);
if (idx >= max_lx) {
return;
}
unsigned int gi = getGlobalIdx(base_gx + idx, gy);
field[gi] = buffer[idx];
}
|
14,734 | #include <stdio.h>
#include <cuda.h>
// Initialize host vectors
void init(int *a, int *b, int n) {
for (int i=0; i < n; ++i) {
a[i] = i;
b[i] = n-i;
}
}
// Check result correctness
void check(int *c, int n) {
int i = 0;
while (i < n && c[i] == n) {
++i;
}
if (i == n)
printf("Ok\n");
else
printf("Non ok\n");
}
// Cuda kernel
__global__ void add(int *a, int *b, int *c, int n) {
//@TODO@ : complete kernel code
int i = threadIdx.x + blockDim.x * blockIdx.x;
if(i < n) {
c[i]=a[i]+b[i];
}
}
int main(int argc, char **argv)
{
if(argc<2) {printf("Give the vector size as first parameter\n");;exit(2);}
int n = atoi(argv[1]);
printf("Vector size is %d\n",n);
// host pointers
int *host_a, *host_b, *host_c;
int STREAM_NB=4;
int STREAM_SIZE=512;
// Allocations on host
//@TODO@ :
cudaHostAlloc((void **) &host_a, n*sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void **) &host_b, n*sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void **) &host_c, n*sizeof(int), cudaHostAllocDefault);
// Initialize vectors
init(host_a,host_b,n);
cudaStream_t streams[STREAM_NB];
int *d_A[STREAM_NB];
int *d_B[STREAM_NB];
int *d_C[STREAM_NB];
for(int i=0;i<STREAM_NB;i++)
{
cudaStreamCreate(&streams[i]);
cudaMalloc((void**)&d_A[i],STREAM_SIZE*sizeof(int));
cudaMalloc((void**)&d_B[i],STREAM_SIZE*sizeof(int));
cudaMalloc((void**)&d_C[i],STREAM_SIZE*sizeof(int));
}
for (int i=0; i<n; i+=STREAM_SIZE*STREAM_NB)
{
for(int j=0;j<STREAM_NB;j++)
{
cudaMemcpyAsync(d_A[j], host_a+i+STREAM_SIZE*j,STREAM_SIZE*sizeof(int),cudaMemcpyHostToDevice,streams[j]);
cudaMemcpyAsync(d_B[j], host_b+i+STREAM_SIZE*j, STREAM_SIZE*sizeof(int),cudaMemcpyHostToDevice,streams[j]);
add<<<STREAM_SIZE/256, 256, 0, streams[j]>>>(d_A[j], d_B[j],d_C[j],STREAM_SIZE);
cudaMemcpyAsync(host_c+i+STREAM_SIZE*j,d_C[j], STREAM_SIZE*sizeof(int),cudaMemcpyDeviceToHost,streams[j]);
}
}
cudaDeviceSynchronize();
// Check result
check(host_c,n);
// Free device memory in a loop :
for(int i=0;i<STREAM_NB;i++)
{
cudaFree(d_A[i]);
cudaFree(d_B[i]);
cudaFree(d_C[i]);
}
cudaFree(host_a);
cudaFree(host_b);
cudaFree(host_b);
return 0;
}
|
14,735 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda_runtime_api.h>
#include <cuda.h>
#define START_RED printf("\033[1;31m");
#define START_GRN printf("\033[1;32m");
#define START_YLW printf("\033[1;33m");
#define START_BLU printf("\033[1;34m");
#define END printf("\033[0m");
#define ONE_SEC (1000 * 1000 * 1000)
#define HALF (1024 * 32 * 64)
#define NORM 0
#define LOOP 0
#define SEL 1
#define CASE 17
#define REVERSE 0
FILE * data = fopen("data.txt", "r");
uint64_t latency[17] = { 0 };
const char* size_str[17] = { "64", "128", "256", "512", "1024", "1514",\
"64 * 32", "64 * 64", "64 * 128", "64 * 256", \
"64 * 512", "64 * 1024", "64 * 1024 * 2", "64 * 1024 * 4", \
"64 * 1024 * 8", "64 * 1024 * 16", "64 * 1024 * 32"};
int size[17] = { 64, 128, 256, 512, 1024, 1514, 64 * 32, 64 * 64,\
64 * 128, 64 * 256, 64 * 512, 64 * 1024,\
64 * 1024 * 2, 64 * 1024 * 4, 64 * 1024 * 8,\
64 * 1024 * 16, 64 * 1024 * 32};
int start[17] = { 0 };
int end[17] = { 0 };
int monotonic_time()
{
struct timespec timespec;
clock_gettime(CLOCK_MONOTONIC, ×pec);
return timespec.tv_sec * ONE_SEC + timespec.tv_nsec;
}
void call_data(char * host_buf, int size)
{
fseek(data, 0, SEEK_SET);
fgets(host_buf, sizeof(char) * size, data);
/*
for(int i = 0; i < size; i++)
{
fscanf(data, "%c", host_buf + i);
}
*/
}
void once(char * device_buf, char * host_buf, int test_cnt)
{
int i = 0;
int skip = 0;
while(i < test_cnt)
{
call_data(host_buf, HALF * 2);
skip = 0;
#if REVERSE
for(int j = CASE - 1; j >= 0; j--)
#else
for(int j = 0; j < CASE; j++)
#endif
{
#if NORM
skip += size[j];
#else
skip = rand();
#endif
if(skip >= HALF)
skip %= HALF;
start[j] = monotonic_time();
cudaMemcpy(device_buf, host_buf + skip, size[j], cudaMemcpyHostToDevice);
end[j] = monotonic_time();
latency[j] += end[j] - start[j];
cudaMemset(device_buf, 0, size[j]);
}
i++;
}
for(i = 0; i < CASE; i++)
{
latency[i] /= (uint64_t)test_cnt;
}
}
void loop(char * device_buf, char * host_buf, int test_cnt, int loop_cnt)
{
int i = 0;
int skip = 0;
while(i < test_cnt)
{
call_data(host_buf, HALF * 2);
skip = 0;
#if REVERSE
for(int j = CASE - 1; j >= 0; j--)
#else
for(int j = 0; j < CASE; j++)
#endif
{
#if NORM
skip += size[j];
#else
skip = rand();
#endif
if(skip >= HALF)
skip %= HALF;
start[j] = monotonic_time();
for(int k = 0; k < loop_cnt; k++)
{
cudaMemcpy(device_buf, host_buf + skip, size[j], cudaMemcpyHostToDevice);
}
end[j] = monotonic_time();
latency[j] += (end[j] - start[j]) / (uint64_t)loop_cnt;
}
i++;
}
for(i = 0; i < CASE; i++)
{
latency[i] /= (uint64_t)test_cnt;
}
}
void same_cnt_loop(char * device_buf, char * host_buf, int test_cnt)
{
int i = 0;
int loop_cnt = size[16];
int cur_loop_cnt;
int skip = 0;
while(i < test_cnt)
{
call_data(host_buf, HALF * 2);
skip = 0;
#if REVERSE
for(int j = CASE - 1; j >= 0; j--)
#else
for(int j = 0; j < CASE; j++)
#endif
{
cur_loop_cnt = loop_cnt / size[j];
#if NORM
skip += size[j];
#else
skip = rand();
#endif
if(skip >= HALF)
skip %= HALF;
start[j] = monotonic_time();
for(int k = 0; k < cur_loop_cnt; k++)
{
cudaMemcpy(device_buf, host_buf + skip, size[j], cudaMemcpyHostToDevice);
}
end[j] = monotonic_time();
latency[j] += end[j] - start[j];
}
i++;
}
for(i = 0; i < CASE; i++)
{
latency[i] /= (uint64_t)test_cnt;
}
}
void print_result(int test_cnt)
{
START_RED
printf("\n\n______________________TEST START_______________________\n\n");
END
START_YLW
#if NORM
printf(" NORMAL DATA TEST!\n");
#else
printf(" RANDOM DATA TEST!\n");
#endif
END
START_GRN
#if LOOP
printf(" TEST WAS RUNNED %d TIMES!\n", test_cnt);
#elif SEL
printf(" TEST WAS RUNNED SAME TIMES!\n");
#else
printf(" TEST WAS RUNNED ONCE!\n");
#endif
END
#if REVERSE
START_BLU
printf(" TEST WAS RUNNED REVERSED ORDER!\n");
END
#endif
for(int i = 0; i < CASE; i++)
{
printf(" data size : %s, latency : %ld\n", size_str[i], latency[i]);
}
START_RED
printf("\n______________________TEST END_______________________\n\n\n");
END
}
int main(void)
{
int test_cnt;
char * device_buf;
char * host_buf;
srand(time(NULL));
host_buf = (char *)calloc(HALF * 2, sizeof(char));
cudaHostAlloc((void**)&device_buf, HALF * sizeof(char), cudaHostAllocDefault);
cudaMemset(device_buf, 0 ,HALF * sizeof(char));
printf("Enter the test_cnt\n");
scanf("%d", &test_cnt);
#if LOOP
int loop_cnt;
printf("Enter the loop_cnt\n");
scanf("%d", &loop_cnt);
loop(device_buf, host_buf, test_cnt, loop_cnt);
#elif SEL
same_cnt_loop(device_buf, host_buf, test_cnt);
#else
once(device_buf, host_buf, test_cnt);
#endif
print_result(test_cnt);
cudaFree(device_buf);
fclose(data);
return 0;
}
|
14,736 | #include <cuda.h>
#include <iostream>
#include <random>
#include <chrono>
#define N 300000
#define BLOCK_SIZE_X 1024
#define checkCudaErrors(msg) err_msg(msg, __LINE__)
void err_msg(cudaError_t msg, int x)
{
if (msg != cudaSuccess) {
std::cerr << "In line: " << x << ". error: " << cudaGetErrorString(msg) << std::endl;
exit(1);
}
return;
}
__global__ void findMax(float *x, float *y, float *z, int full_size, int half_size)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < half_size; i += stride) {
x[i] = (i + half_size < full_size) ? ((x[i] >= x[i + half_size]) ? x[i] : x[i + half_size]) : x[i];
y[i] = (i + half_size < full_size) ? ((y[i] >= y[i + half_size]) ? y[i] : y[i + half_size]) : y[i];
z[i] = (i + half_size < full_size) ? ((z[i] >= z[i + half_size]) ? z[i] : z[i + half_size]) : z[i];
}
}
__global__ void findMin(float *x, float *y, float *z, int full_size, int half_size)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < half_size; i += stride) {
x[i] = (i + half_size < full_size) ? ((x[i] <= x[i + half_size]) ? x[i] : x[i + half_size]) : x[i];
y[i] = (i + half_size < full_size) ? ((y[i] <= y[i + half_size]) ? y[i] : y[i + half_size]) : y[i];
z[i] = (i + half_size < full_size) ? ((z[i] <= z[i + half_size]) ? z[i] : z[i + half_size]) : z[i];
}
}
int main()
{
int points_num = N;
float *x, *y, *z;
float *max_x, *max_y, *max_z, *min_x, *min_y, *min_z;
float a_max_x, a_min_x, a_max_y, a_min_y, a_max_z, a_min_z;
float *d_x, *d_y, *d_z;
std::chrono::time_point<std::chrono::system_clock> start, end;
double time;
x = new float[N];
y = new float[N];
z = new float[N];
std::mt19937 mt(10);
for (int i = 0; i < N; i++) {
x[i] = mt() / 10000.0;
y[i] = mt() / 10000.0;
z[i] = mt() / 10000.0;
}
start = std::chrono::system_clock::now();
a_max_x = a_min_x = x[0];
a_max_y = a_min_y = y[0];
a_max_z = a_min_z = z[0];
for (int i = 1; i < N; i++) {
a_max_x = (a_max_x > x[i]) ? a_max_x : x[i];
a_min_x = (a_min_x < x[i]) ? a_min_x : x[i];
a_max_y = (a_max_y > y[i]) ? a_max_y : y[i];
a_min_y = (a_min_y < y[i]) ? a_min_y : y[i];
a_max_z = (a_max_z > z[i]) ? a_max_z : z[i];
a_min_z = (a_min_z < z[i]) ? a_min_z : z[i];
}
end = std::chrono::system_clock::now();
time = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() / 1000.0;
std::cout << "CPU sort: " << time << "ms." << std::endl;
checkCudaErrors(cudaMalloc(&d_x, sizeof(float) * points_num));
checkCudaErrors(cudaMalloc(&d_y, sizeof(float) * points_num));
checkCudaErrors(cudaMalloc(&d_z, sizeof(float) * points_num));
checkCudaErrors(cudaMemcpy(d_x, x, sizeof(float) * points_num, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_y, y, sizeof(float) * points_num, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_z, z, sizeof(float) * points_num, cudaMemcpyHostToDevice));
start = std::chrono::system_clock::now();
checkCudaErrors(cudaMalloc(&max_x, sizeof(float) * points_num));
checkCudaErrors(cudaMalloc(&max_y, sizeof(float) * points_num));
checkCudaErrors(cudaMalloc(&max_z, sizeof(float) * points_num));
checkCudaErrors(cudaMalloc(&min_x, sizeof(float) * points_num));
checkCudaErrors(cudaMalloc(&min_y, sizeof(float) * points_num));
checkCudaErrors(cudaMalloc(&min_z, sizeof(float) * points_num));
checkCudaErrors(cudaMemcpy(max_x, d_x, sizeof(float) * points_num, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(max_y, d_y, sizeof(float) * points_num, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(max_z, d_z, sizeof(float) * points_num, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(min_x, d_x, sizeof(float) * points_num, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(min_y, d_y, sizeof(float) * points_num, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(min_z, d_z, sizeof(float) * points_num, cudaMemcpyDeviceToDevice));
while (points_num > 1) {
int half_points_num = (points_num - 1) / 2 + 1;
int block_x = (half_points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_points_num;
int grid_x = (half_points_num - 1) / block_x + 1;
findMax<<<grid_x, block_x>>>(max_x, max_y, max_z, points_num, half_points_num);
checkCudaErrors(cudaGetLastError());
findMin<<<grid_x, block_x>>>(min_x, min_y, min_z, points_num, half_points_num);
checkCudaErrors(cudaGetLastError());
points_num = half_points_num;
}
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(&a_max_x, max_x, sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&a_max_y, max_y, sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&a_max_z, max_z, sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&a_min_x, min_x, sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&a_min_y, min_y, sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&a_min_z, min_z, sizeof(float), cudaMemcpyDeviceToHost));
end = std::chrono::system_clock::now();
time = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() / 1000.0;
std::cout << "GPU sort: " << time << "ms." << std::endl;
checkCudaErrors(cudaFree(max_x));
checkCudaErrors(cudaFree(max_y));
checkCudaErrors(cudaFree(max_z));
checkCudaErrors(cudaFree(min_x));
checkCudaErrors(cudaFree(min_y));
checkCudaErrors(cudaFree(min_z));
checkCudaErrors(cudaFree(d_x));
checkCudaErrors(cudaFree(d_y));
checkCudaErrors(cudaFree(d_z));
delete[] x;
delete[] y;
delete[] z;
return 0;
} |
14,737 | /*******************************************************************************
* use weno derivative to calculate the numerical Hamiltonian for reinitialization
* scheme
******************************************************************************/
__device__ inline
double max2(double x, double y)
{
return (x<y) ? y : x;
}
__device__ inline
double min2(double x, double y)
{
return (x<y) ? x : y;
}
__device__ inline
double min_mod(double x, double y)
{
return (x*y<0) ? 0 : (fabs(x)<fabs(y) ? x : y);
}
__device__ inline
double min_abs(double x, double y)
{
return (fabs(x)<fabs(y)) ? x : y;
}
__device__ inline
double sign(double x)
{
return (x>0) ? 1.0 : -1.0;
}
// convert subindex to linear index
// periodic boundary conditions are assumed
__device__ inline
int sub2ind(int row_idx, int col_idx, int pge_idx, int rows, int cols, int pges)
{
int row_idxn = min2(rows-1, max2(0, row_idx));
int col_idxn = min2(cols-1, max2(0, col_idx));
int pge_idxn = min2(pges-1, max2(0, pge_idx));
int ind = pge_idxn * rows * cols + col_idxn * rows + row_idxn;
return ind;
}
// given 4 points (0,v0),(s,v1),(2*s,v2),(3*s,v3) and v1*v2<0
// calculate coefficients of the cubic interpolant c0,c1,c2,c3
// p3(x) = c0 + c1 * x + c2 * x^2 + c3 * x^3;
__device__ inline
double cubic_distance(double v0, double v1, double v2, double v3, double s)
{
// calculate the interpolant coefficient
double c0 = v0;
double c1 = ( 3.0 * (v1-v0) - 3.0/2.0 * (v2-v0) + 1.0/3.0 * (v3-v0) ) / s;
double c2 = (-5.0/2.0 * (v1-v0) + 2.0 * (v2-v0) - 1.0/2.0 * (v3-v0) ) / pow(s,2);
double c3 = ( 1.0/2.0 * (v1-v0) - 1.0/2.0 * (v2-v0) + 1.0/6.0 * (v3-v0) ) / pow(s,3);
/* It is EXTREMELY important to use float point numbers 1.0/2.0 instead of 1/2
* the latter will give (double)(int)(1/2) = 0.0 instead of 0.5
*/
// now use Newton's method to find root
double xc = s + s * v1 / (v1 - v2); // initial guess
int iter = 0;
int const max_iter = 50;
double const max_error = 1e-14;
double diff = 1;
while( diff>max_error && iter<max_iter){
// Newton's method
double f = c0 + c1 * xc + c2 * xc*xc + c3 * xc*xc*xc;
double d = c1 + 2 * c2 * xc + 3 * c3 * xc * xc;
double new_xc = xc - f / d;
diff = fabs (f / d);
iter++;
xc = new_xc;
}
return (xc - s);
}
__device__ inline
double weno_onesided_derivative(double v1, double v2, double v3, double v4, double v5)
{
// different choices of ENO derivatives
double phi1 = 1./3. * v1 - 7./6. * v2 + 11./6. * v3;
double phi2 = -1./6. * v2 + 5./6. * v3 + 1./3. * v4;
double phi3 = 1./3. * v3 + 5./6. * v4 - 1./6. * v5;
// smoothness parameter
double S1 = 13./12. * pow((v1 - 2*v2 + v3),2) + 1./4. * pow((v1 - 4*v2 + 3*v3),2);
double S2 = 13./12. * pow((v2 - 2*v3 + v4),2) + 1./4. * pow((v2 - v4),2);
double S3 = 13./12. * pow((v3 - 2*v4 + v5),2) + 1./4. * pow((3*v3 - 4*v4 + v5),2);
double epsilon = 1e-6;
double alpha1 = 0.1 / pow( (S1 + epsilon), 2);
double alpha2 = 0.6 / pow( (S2 + epsilon), 2);
double alpha3 = 0.3 / pow( (S3 + epsilon), 2);
// weights for each stencil
double sum = alpha1 + alpha2 + alpha3;
double omega1 = alpha1 / sum;
double omega2 = alpha2 / sum;
double omega3 = alpha3 / sum;
return (omega1*phi1 + omega2*phi2 + omega3*phi3);
}
// given a stencil across the boundary: p1<-l3-p2<-l2-p3<-l1-p4-r1->p5-r2->p6-r3->p7
// create a new stencil (x3m,h3m),(x2m,h2m),(x1m,h1m),(x0,h0),(x1,h1),(x2,h2),(x3,h3) including boundary nodes
__device__ inline
void select_stencil(double & h3m, double & h2m, double & h1m, double & h0, double & h1, double & h2, double & h3, double & x3m, double & x2m, double & x1m, double & x0, double & x1, double & x2, double & x3, double p1, double p2, double p3, double p4, double p5, double p6, double p7, double r1, double r2, double r3, double l1, double l2, double l3, double ds)
{
h0 = p4; x0 = 0.0;
if(r1<ds){
x1 = r1;
x2 = ds;
x3 = 2*ds;
h1 = 0.0;
h2 = p5;
h3 = p6;
}else{
x1 = ds;
x2 = 2*ds;
x3 = 3*ds;
h1 = p5;
h2 = p6;
h3 = p7;
}
if(l1<ds){
x1m = -l1;
x2m = - ds;
x3m = - 2*ds;
h1m = 0.0;
h2m = p3;
h3m = p2;
}else{
x1m = -ds;
x2m = - 2*ds;
x3m = - 3*ds;
h1m = p3;
h2m = p2;
h3m = p1;
}
}
// for stencil (x3m,h3m),(x2m,h2m),(x1m,h1m),(x0,h0),(x1,h1),(x2,h2),(x3,h3) including boundary nodes
// calculate cubic eno derivatives at (x0,h0)
// note that this is a nonuniform stencil
__device__ inline
void ENO_cubic_derivative(double & d_fore, double & d_back, double h3m, double h2m, double h1m, double h0, double h1, double h2, double h3, double x3m, double x2m, double x1m, double x0, double x1, double x2, double x3)
{
// divided differences
double d1_2_5 = (h3 - h2) / (x3 - x2) ;
double d1_1_5 = (h2 - h1) / (x2 - x1) ;
double d1_0_5 = (h1 - h0) / (x1 - x0) ;
double d1_m0_5 = (h0 - h1m) / (x0 - x1m);
double d1_m1_5 = (h1m - h2m) / (x1m - x2m);
double d1_m2_5 = (h2m - h3m) / (x2m - x3m);
double d2_2 = (d1_2_5 - d1_1_5) / (x3 - x1) ;
double d2_1 = (d1_1_5 - d1_0_5) / (x2 - x0) ;
double d2_0 = (d1_0_5 - d1_m0_5) / (x1 - x1m);
double d2_m1 = (d1_m0_5 - d1_m1_5) / (x0 - x2m);
double d2_m2 = (d1_m1_5 - d1_m2_5) / (x1m - x3m);
double d3_1_5 = (d2_2 - d2_1) / (x3 - x0) ;
double d3_0_5 = (d2_1 - d2_0) / (x2 - x1m);
double d3_m0_5 = (d2_0 - d2_m1) / (x1 - x2m);
double d3_m1_5 = (d2_m1 - d2_m2) / (x0 - x3m);
double a1 = (x0 - x1m) * (x0 - x2m) * min_abs(d3_m0_5, d3_m1_5);
double a2 = (x0 - x1m) * (x0 - x1) * min_abs(d3_m0_5, d3_0_5);
double a = (fabs(d2_m1) < fabs(d2_0)) ? a1 : a2;
double b1 = (x0 - x1m) * (x0 - x1) * min_abs(d3_m0_5, d3_0_5);
double b2 = (x0 - x1) * (x0 - x2) * min_abs(d3_0_5, d3_1_5);
double b = (fabs(d2_0) < fabs(d2_1)) ? b1 : b2;
d_back = d1_m0_5 + min_mod(d2_m1,d2_0) * (x0 - x1m) + a;
d_fore = d1_0_5 + min_mod(d2_0, d2_1) * (x0 - x1) + b;
}
// calculate weno derivative at p4: p1<-l3-p2<-l2-p3<-l1-p4-r1->p5-r2->p6-r3->p7
// where px are level set function values at node x
// lx, rx are distance to the left/right node
__device__ inline
void weno_derivative_boundary(double & d_fore, double & d_back, double p1, double p2, double p3, double p4, double p5, double p6, double p7, double r1, double r2, double r3, double l1, double l2, double l3, double ds)
{
bool cross_interface = p3*p4<0 || p4*p5<0;
if(!cross_interface){
double v1 = (p2 - p1) / ds;
double v2 = (p3 - p2) / ds;
double v3 = (p4 - p3) / ds;
double v4 = (p5 - p4) / ds;
double v5 = (p6 - p5) / ds;
double v6 = (p7 - p6) / ds;
d_back = weno_onesided_derivative(v1,v2,v3,v4,v5);
d_fore = weno_onesided_derivative(v6,v5,v4,v3,v2);
}// if not a node IMMEDIATELY adjacent to the boundary, calculate weno derivatives as usual
else{
double h3m,h2m,h1m,h0,h1,h2,h3;
double x3m,x2m,x1m,x0,x1,x2,x3;
select_stencil(h3m,h2m,h1m,h0,h1,h2,h3,x3m,x2m,x1m,x0,x1,x2,x3,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,ds);
ENO_cubic_derivative(d_fore,d_back,h3m,h2m,h1m,h0,h1,h2,h3,x3m,x2m,x1m,x0,x1,x2,x3);
}// for nodes IMMEDIATELY adjacent to the boundary, use cubic ENO interpolant
}
// make corrections to xpr etc
__global__
void boundary_correction(double * xpr, double * xpl, double * ypf, double * ypb, double * zpu, double * zpd, double const * lsf, int num_ele, int rows, int cols, int pges, double dx, double dy, double dz)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
double f2;
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
double f0 = lsf[ind];
int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
f2 = lsf[right];
if(f0*f2<0){
int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int right2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
xpr[ind] = cubic_distance(lsf[left], f0, f2, lsf[right2], dx);
xpl[right] = dx - xpr[ind];
}
int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
f2 = lsf[front];
if(f0*f2<0){
int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int front2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
ypf[ind] = cubic_distance(lsf[back], f0, f2, lsf[front2], dy);
ypb[front] = dy - ypf[ind];
}
int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
f2 = lsf[up];
if(f0*f2<0){
int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int up2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
zpu[ind] = cubic_distance(lsf[down], f0, f2, lsf[up2], dz);
zpd[up] = dz - zpu[ind];
}
}
__global__
void re_step(double * step, double const * lsf, bool const * mask, double const * deltat, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
double p1,p2,p3,p4,p5,p6,p7;
double r1,r2,r3,l1,l2,l3;
p4 = lsf[ind];
int rght1 = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int rght2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int rght3 = sub2ind(row_idx, col_idx+3, pge_idx, rows, cols, pges);
int left1 = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
int left3 = sub2ind(row_idx, col_idx-3, pge_idx, rows, cols, pges);
p1 = lsf[left3];
p2 = lsf[left2];
p3 = lsf[left1];
p5 = lsf[rght1];
p6 = lsf[rght2];
p7 = lsf[rght3];
r1 = xpr[ind];
r2 = xpr[rght1];
r3 = xpr[rght2];
l1 = xpl[ind];
l2 = xpl[left1];
l3 = xpl[left2];
double xR, xL;
weno_derivative_boundary(xR,xL,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dx);
int frnt1 = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int frnt2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int frnt3 = sub2ind(row_idx+3, col_idx, pge_idx, rows, cols, pges);
int back1 = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
int back3 = sub2ind(row_idx-3, col_idx, pge_idx, rows, cols, pges);
p1 = lsf[back3];
p2 = lsf[back2];
p3 = lsf[back1];
p5 = lsf[frnt1];
p6 = lsf[frnt2];
p7 = lsf[frnt3];
r1 = ypf[ind];
r2 = ypf[frnt1];
r3 = ypf[frnt2];
l1 = ypb[ind];
l2 = ypb[back1];
l3 = ypb[back2];
double yF, yB;
weno_derivative_boundary(yF,yB,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dy);
int upup1 = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int upup2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int upup3 = sub2ind(row_idx, col_idx, pge_idx+3, rows, cols, pges);
int down1 = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
int down3 = sub2ind(row_idx, col_idx, pge_idx-3, rows, cols, pges);
p1 = lsf[down3];
p2 = lsf[down2];
p3 = lsf[down1];
p5 = lsf[upup1];
p6 = lsf[upup2];
p7 = lsf[upup3];
r1 = zpu[ind];
r2 = zpu[upup1];
r3 = zpu[upup2];
l1 = zpd[ind];
l2 = zpd[down1];
l3 = zpd[down2];
double zU, zD;
weno_derivative_boundary(zU,zD,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dz);
if (mask[ind]) {
step[ind] = ( sqrt( max2(pow(min2(0,xL),2),pow(max2(0,xR),2)) +
max2(pow(min2(0,yB),2),pow(max2(0,yF),2)) +
max2(pow(min2(0,zD),2),pow(max2(0,zU),2)) ) - 1)
* deltat[ind] * (-1.);
} else{
step[ind] = ( sqrt( max2(pow(max2(0,xL),2),pow(min2(0,xR),2)) +
max2(pow(max2(0,yB),2),pow(min2(0,yF),2)) +
max2(pow(max2(0,zD),2),pow(min2(0,zU),2)) ) - 1)
* deltat[ind] * (1.);
}
}
|
14,738 | #define TW 10
__global__ void x_dot_w(float *a, float *b, float *c, const unsigned int X, const unsigned int Y, const unsigned int Z)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
float temp = 0;
__shared__ float S_X [10][TW];
__shared__ float S_Y [10][TW];
for (int t = 0; t < (Y-1)/TW + 1; t++) {
if(row < X && (t* TW +tx) < Y )
S_X[ty][tx] = a[row * Y + t*TW + tx];
else
S_X[ty][tx] = 0.0;
if ( (t* TW + ty) < Y && col < Z )
S_Y[ty][tx] = b[(t*TW + ty)* Z + col];
else
S_Y[ty][tx] = 0.0;
__syncthreads();
for (int k = 0; k < TW; k++) {
temp+= S_X[ty][k] * S_Y[k][tx];
}
__syncthreads();
}
if(row < X && col <Z) {
c[row * Z + col] = temp;
}
}
|
14,739 | typedef int int32_t;
#define CUCL_GLOBAL_KERNEL extern "C" __global__
#define GASQ
#define GLOB_ID_1D (blockDim.x * blockIdx.x + threadIdx.x)
#define LOC_ID_1D (threadIdx.x)
#define GRP_ID_1D (blockIdx.x)
#define LOC_SZ_1D (blockDim.x)
#define LOCSHAR_MEM __shared__
#define LSMASQ
#define BARRIER_SYNC __syncthreads()
CUCL_GLOBAL_KERNEL void bconv__out_chan_1000__in_chan_1024__y_1__x_1__img_1__chan_1000( GASQ float const * const filts, // CUCL IN out_chan:in_chan:y:x
GASQ float const * const out_grad_loss, // CUCL IN img:chan:y:x
GASQ float * const in_grad_loss ) // CUCL OUT img:chan:y:x
/* work */ // CUCL REF pels_blk:out_ix_blk:pels_tile:out_ix_tile:pels:out_ix
/* oix */ // CUCL REF in_chan:sy:sx
/* fioc */ // CUCL REF out_chan:ky:kx
{
// CUCL IX pel_ix out_grad_loss use_dims=img:y:x
// CUCL IX filt_elem_ix fioc
// CUCL IX out_ix oix
// CUCL IX GRP_ID_1D work use_dims=pels_blk:out_ix_blk
// CUCL IX LOC_ID_1D work use_dims=pels_tile:out_ix_tile
// note: <each thread handles> work use_dims=pels:out_out_ix; with pels_sz==out_chan_sz==t_tile_sz (currently); loops over in.chan==filts.in_chan
LOCSHAR_MEM float in_smem[40];
LOCSHAR_MEM float filts_smem[120];
float out_tile[8*8] = {0}; // tile of output for this thread to compute, stored in registers
// reg. buffers for one strip each from in and filts, for the same filts element
float filts_strip[8]; // across output chans
float in_strip[8]; // across pels (approx square block in x/y space, favoring x if sqrt() not integer)
int32_t const blk_out_ix = (GRP_ID_1D%9)*15*8;
int32_t const blk_pel_ix = (GRP_ID_1D/9)*5*8;
for( int32_t filt_elem_ix = 0; filt_elem_ix != 1024; ++filt_elem_ix ) {
BARRIER_SYNC;
for( int32_t i = 0; i != 1; ++i ) {
if( (LOC_ID_1D+LOC_SZ_1D*i) < 40 ) {
int32_t const pel_ix = (blk_pel_ix+LOC_ID_1D+LOC_SZ_1D*i);
float v = 0;
int const smem_in_ix_y = ((pel_ix/6)%6)+(filt_elem_ix%1) - 0;
int const smem_in_ix_x = (pel_ix%6)+(filt_elem_ix%1) - 0;
if(smem_in_ix_y >= 0 && smem_in_ix_x >= 0 && (pel_ix/36) < 1 &&
smem_in_ix_x < 6 && smem_in_ix_y < 6 ) {
v = out_grad_loss[(pel_ix/36)*36000 +
filt_elem_ix*36 +
smem_in_ix_y*6 +
smem_in_ix_x*1];
}
in_smem[LOC_ID_1D+LOC_SZ_1D*i] = v;
}
}
for( int32_t i = 0; i != 2; ++i ) {
if( (LOC_ID_1D+LOC_SZ_1D*i) < 120 ) {
int32_t const out_ix = (blk_out_ix+LOC_ID_1D+LOC_SZ_1D*i);
float v = 0;
int const smem_filt_ix_y = (out_ix%1)+(filt_elem_ix%1)*1;
int const smem_filt_ix_x = (out_ix%1)+(filt_elem_ix%1)*1;
if( out_ix < 1024 && filt_elem_ix < 1000
&& smem_filt_ix_x < 1 && smem_filt_ix_y < 1 ) {
v = filts[filt_elem_ix*1024 +
out_ix*1 +
smem_filt_ix_y*1 +
smem_filt_ix_x*1];
}
filts_smem[LOC_ID_1D+LOC_SZ_1D*i] = v;
}
}
BARRIER_SYNC;
// begin loads
filts_strip[0] = filts_smem[(LOC_ID_1D%15)*8+0];
filts_strip[1] = filts_smem[(LOC_ID_1D%15)*8+1];
filts_strip[2] = filts_smem[(LOC_ID_1D%15)*8+2];
filts_strip[3] = filts_smem[(LOC_ID_1D%15)*8+3];
filts_strip[4] = filts_smem[(LOC_ID_1D%15)*8+4];
filts_strip[5] = filts_smem[(LOC_ID_1D%15)*8+5];
filts_strip[6] = filts_smem[(LOC_ID_1D%15)*8+6];
filts_strip[7] = filts_smem[(LOC_ID_1D%15)*8+7];
in_strip[0] = in_smem[(LOC_ID_1D/15)*8+0];
in_strip[1] = in_smem[(LOC_ID_1D/15)*8+1];
in_strip[2] = in_smem[(LOC_ID_1D/15)*8+2];
in_strip[3] = in_smem[(LOC_ID_1D/15)*8+3];
in_strip[4] = in_smem[(LOC_ID_1D/15)*8+4];
in_strip[5] = in_smem[(LOC_ID_1D/15)*8+5];
in_strip[6] = in_smem[(LOC_ID_1D/15)*8+6];
in_strip[7] = in_smem[(LOC_ID_1D/15)*8+7];
// end loads;
// begin fmas
out_tile[0] += filts_strip[0]*in_strip[0];
out_tile[1] += filts_strip[1]*in_strip[0];
out_tile[2] += filts_strip[2]*in_strip[0];
out_tile[3] += filts_strip[3]*in_strip[0];
out_tile[4] += filts_strip[4]*in_strip[0];
out_tile[5] += filts_strip[5]*in_strip[0];
out_tile[6] += filts_strip[6]*in_strip[0];
out_tile[7] += filts_strip[7]*in_strip[0];
out_tile[8] += filts_strip[0]*in_strip[1];
out_tile[9] += filts_strip[1]*in_strip[1];
out_tile[10] += filts_strip[2]*in_strip[1];
out_tile[11] += filts_strip[3]*in_strip[1];
out_tile[12] += filts_strip[4]*in_strip[1];
out_tile[13] += filts_strip[5]*in_strip[1];
out_tile[14] += filts_strip[6]*in_strip[1];
out_tile[15] += filts_strip[7]*in_strip[1];
out_tile[16] += filts_strip[0]*in_strip[2];
out_tile[17] += filts_strip[1]*in_strip[2];
out_tile[18] += filts_strip[2]*in_strip[2];
out_tile[19] += filts_strip[3]*in_strip[2];
out_tile[20] += filts_strip[4]*in_strip[2];
out_tile[21] += filts_strip[5]*in_strip[2];
out_tile[22] += filts_strip[6]*in_strip[2];
out_tile[23] += filts_strip[7]*in_strip[2];
out_tile[24] += filts_strip[0]*in_strip[3];
out_tile[25] += filts_strip[1]*in_strip[3];
out_tile[26] += filts_strip[2]*in_strip[3];
out_tile[27] += filts_strip[3]*in_strip[3];
out_tile[28] += filts_strip[4]*in_strip[3];
out_tile[29] += filts_strip[5]*in_strip[3];
out_tile[30] += filts_strip[6]*in_strip[3];
out_tile[31] += filts_strip[7]*in_strip[3];
out_tile[32] += filts_strip[0]*in_strip[4];
out_tile[33] += filts_strip[1]*in_strip[4];
out_tile[34] += filts_strip[2]*in_strip[4];
out_tile[35] += filts_strip[3]*in_strip[4];
out_tile[36] += filts_strip[4]*in_strip[4];
out_tile[37] += filts_strip[5]*in_strip[4];
out_tile[38] += filts_strip[6]*in_strip[4];
out_tile[39] += filts_strip[7]*in_strip[4];
out_tile[40] += filts_strip[0]*in_strip[5];
out_tile[41] += filts_strip[1]*in_strip[5];
out_tile[42] += filts_strip[2]*in_strip[5];
out_tile[43] += filts_strip[3]*in_strip[5];
out_tile[44] += filts_strip[4]*in_strip[5];
out_tile[45] += filts_strip[5]*in_strip[5];
out_tile[46] += filts_strip[6]*in_strip[5];
out_tile[47] += filts_strip[7]*in_strip[5];
out_tile[48] += filts_strip[0]*in_strip[6];
out_tile[49] += filts_strip[1]*in_strip[6];
out_tile[50] += filts_strip[2]*in_strip[6];
out_tile[51] += filts_strip[3]*in_strip[6];
out_tile[52] += filts_strip[4]*in_strip[6];
out_tile[53] += filts_strip[5]*in_strip[6];
out_tile[54] += filts_strip[6]*in_strip[6];
out_tile[55] += filts_strip[7]*in_strip[6];
out_tile[56] += filts_strip[0]*in_strip[7];
out_tile[57] += filts_strip[1]*in_strip[7];
out_tile[58] += filts_strip[2]*in_strip[7];
out_tile[59] += filts_strip[3]*in_strip[7];
out_tile[60] += filts_strip[4]*in_strip[7];
out_tile[61] += filts_strip[5]*in_strip[7];
out_tile[62] += filts_strip[6]*in_strip[7];
out_tile[63] += filts_strip[7]*in_strip[7];
// end fmas;
}
int32_t pel_ix = blk_pel_ix + (LOC_ID_1D/15)*8; // first pel_ix for this thread
int32_t igl_y, igl_x;
for( int32_t work_pel = 0; work_pel < 8; ++work_pel, ++pel_ix) {
int32_t out_ix = blk_out_ix + (LOC_ID_1D%15)*8; // first out_ix for this thread
// begin outs_to_filts_strip
switch(work_pel) {
filts_strip[0] = out_tile[0];
filts_strip[1] = out_tile[1];
filts_strip[2] = out_tile[2];
filts_strip[3] = out_tile[3];
filts_strip[4] = out_tile[4];
filts_strip[5] = out_tile[5];
filts_strip[6] = out_tile[6];
filts_strip[7] = out_tile[7];
break;
filts_strip[0] = out_tile[8];
filts_strip[1] = out_tile[9];
filts_strip[2] = out_tile[10];
filts_strip[3] = out_tile[11];
filts_strip[4] = out_tile[12];
filts_strip[5] = out_tile[13];
filts_strip[6] = out_tile[14];
filts_strip[7] = out_tile[15];
break;
filts_strip[0] = out_tile[16];
filts_strip[1] = out_tile[17];
filts_strip[2] = out_tile[18];
filts_strip[3] = out_tile[19];
filts_strip[4] = out_tile[20];
filts_strip[5] = out_tile[21];
filts_strip[6] = out_tile[22];
filts_strip[7] = out_tile[23];
break;
filts_strip[0] = out_tile[24];
filts_strip[1] = out_tile[25];
filts_strip[2] = out_tile[26];
filts_strip[3] = out_tile[27];
filts_strip[4] = out_tile[28];
filts_strip[5] = out_tile[29];
filts_strip[6] = out_tile[30];
filts_strip[7] = out_tile[31];
break;
filts_strip[0] = out_tile[32];
filts_strip[1] = out_tile[33];
filts_strip[2] = out_tile[34];
filts_strip[3] = out_tile[35];
filts_strip[4] = out_tile[36];
filts_strip[5] = out_tile[37];
filts_strip[6] = out_tile[38];
filts_strip[7] = out_tile[39];
break;
filts_strip[0] = out_tile[40];
filts_strip[1] = out_tile[41];
filts_strip[2] = out_tile[42];
filts_strip[3] = out_tile[43];
filts_strip[4] = out_tile[44];
filts_strip[5] = out_tile[45];
filts_strip[6] = out_tile[46];
filts_strip[7] = out_tile[47];
break;
filts_strip[0] = out_tile[48];
filts_strip[1] = out_tile[49];
filts_strip[2] = out_tile[50];
filts_strip[3] = out_tile[51];
filts_strip[4] = out_tile[52];
filts_strip[5] = out_tile[53];
filts_strip[6] = out_tile[54];
filts_strip[7] = out_tile[55];
break;
filts_strip[0] = out_tile[56];
filts_strip[1] = out_tile[57];
filts_strip[2] = out_tile[58];
filts_strip[3] = out_tile[59];
filts_strip[4] = out_tile[60];
filts_strip[5] = out_tile[61];
filts_strip[6] = out_tile[62];
filts_strip[7] = out_tile[63];
break;
}
// end outs_to_filts_strip;
// begin stores
igl_y = (((pel_ix/6)%6)-0)*1+(out_ix%1);
igl_x = ((pel_ix%6)-0)*1+(out_ix%1);
if( igl_x >= 0 && igl_y >= 0 && igl_y < 6 && igl_x < 6 &&
out_ix < 1024 && (pel_ix/36) < 1 ) {
in_grad_loss[ (pel_ix/36)*36864 + out_ix*36 +
igl_y*6 + igl_x*1] = filts_strip[0];
};
++out_ix;
igl_y = (((pel_ix/6)%6)-0)*1+(out_ix%1);
igl_x = ((pel_ix%6)-0)*1+(out_ix%1);
if( igl_x >= 0 && igl_y >= 0 && igl_y < 6 && igl_x < 6 &&
out_ix < 1024 && (pel_ix/36) < 1 ) {
in_grad_loss[ (pel_ix/36)*36864 + out_ix*36 +
igl_y*6 + igl_x*1] = filts_strip[1];
};
++out_ix;
igl_y = (((pel_ix/6)%6)-0)*1+(out_ix%1);
igl_x = ((pel_ix%6)-0)*1+(out_ix%1);
if( igl_x >= 0 && igl_y >= 0 && igl_y < 6 && igl_x < 6 &&
out_ix < 1024 && (pel_ix/36) < 1 ) {
in_grad_loss[ (pel_ix/36)*36864 + out_ix*36 +
igl_y*6 + igl_x*1] = filts_strip[2];
};
++out_ix;
igl_y = (((pel_ix/6)%6)-0)*1+(out_ix%1);
igl_x = ((pel_ix%6)-0)*1+(out_ix%1);
if( igl_x >= 0 && igl_y >= 0 && igl_y < 6 && igl_x < 6 &&
out_ix < 1024 && (pel_ix/36) < 1 ) {
in_grad_loss[ (pel_ix/36)*36864 + out_ix*36 +
igl_y*6 + igl_x*1] = filts_strip[3];
};
++out_ix;
igl_y = (((pel_ix/6)%6)-0)*1+(out_ix%1);
igl_x = ((pel_ix%6)-0)*1+(out_ix%1);
if( igl_x >= 0 && igl_y >= 0 && igl_y < 6 && igl_x < 6 &&
out_ix < 1024 && (pel_ix/36) < 1 ) {
in_grad_loss[ (pel_ix/36)*36864 + out_ix*36 +
igl_y*6 + igl_x*1] = filts_strip[4];
};
++out_ix;
igl_y = (((pel_ix/6)%6)-0)*1+(out_ix%1);
igl_x = ((pel_ix%6)-0)*1+(out_ix%1);
if( igl_x >= 0 && igl_y >= 0 && igl_y < 6 && igl_x < 6 &&
out_ix < 1024 && (pel_ix/36) < 1 ) {
in_grad_loss[ (pel_ix/36)*36864 + out_ix*36 +
igl_y*6 + igl_x*1] = filts_strip[5];
};
++out_ix;
igl_y = (((pel_ix/6)%6)-0)*1+(out_ix%1);
igl_x = ((pel_ix%6)-0)*1+(out_ix%1);
if( igl_x >= 0 && igl_y >= 0 && igl_y < 6 && igl_x < 6 &&
out_ix < 1024 && (pel_ix/36) < 1 ) {
in_grad_loss[ (pel_ix/36)*36864 + out_ix*36 +
igl_y*6 + igl_x*1] = filts_strip[6];
};
++out_ix;
igl_y = (((pel_ix/6)%6)-0)*1+(out_ix%1);
igl_x = ((pel_ix%6)-0)*1+(out_ix%1);
if( igl_x >= 0 && igl_y >= 0 && igl_y < 6 && igl_x < 6 &&
out_ix < 1024 && (pel_ix/36) < 1 ) {
in_grad_loss[ (pel_ix/36)*36864 + out_ix*36 +
igl_y*6 + igl_x*1] = filts_strip[7];
};
++out_ix;
// end stores;
}
}
|
14,740 |
/*
autor fredy m
uaem
desonses@gmail.com para mas comentarios
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <math.h>
#include <cuda_fp16.h>
/*
En este ejercicio se implemente un prog para resolver ecuaciones de segundo grado.
*/
// Device: kernel que se ejecuta en la GPU
__global__ void suma_GPU(int a, int b, int *c)
{
*c = a + b;
}
// solve equation second grade
__global__ void solve_GPU(int a, int b, int c ,int *x1, int *x2)
{
int raiz = powf(b, 2) - (4 * a * c);
int i = -b / 2 * a;
int j = 2 * a;
*x1 = i + sqrtf(raiz) / j;
*x2 = i - sqrtf(raiz) / j;
}
// HOST: funcion llamada y ejecutada desde el host
__host__ int suma_CPU(int a, int b)
{
return (a + b);
}
int main(int argc, char** argv)
{
// declaraciones
int n1 = 1, n2 = 2, c = 0;
int *hst_c;
int *hst_x1;
int *hst_x2;
int m1 = 10, m2 = 20;
int *dev_c;
// equacion
int a = 1, b =8 , C = -6;
int *dev_x1;
int *dev_x2;
// reserva de memoria en el host
//hst_c = (int*)malloc( sizeof(int) );
hst_x1 = (int*)malloc(sizeof(int));
hst_x2 = (int*)malloc(sizeof(int));
// reserva de memoria en el device
//cudaMalloc((void**)&dev_c, sizeof(int) );
cudaMalloc((void**)&dev_x1, sizeof(int));
cudaMalloc((void**)&dev_x2, sizeof(int));
// llamada a la funcion suma_CPU
//c = suma_CPU(n1, n2);
// resultados CPU
//printf("CPU:\n");
//printf("%2d + %2d = %2d \n",n1, n2, c);
// llamada a la funcion suma_GPU
//suma_GPU<<<1,1>>>(m1, m2, dev_c);
solve_GPU<<<1,1>>>(a,b,C, dev_x1, dev_x2);
// recogida de datos desde el device hacia el host
//cudaMemcpy(hst_c, dev_c, sizeof(int), cudaMemcpyDeviceToHost );
cudaMemcpy(hst_x1, dev_x1, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(hst_x2, dev_x2, sizeof(int), cudaMemcpyDeviceToHost);
printf("resultado: \n");
printf("x1 = %2d ,x2 = %2d \n", *hst_x1, *hst_x2);
// resultados GPU
//printf("GPU:\n");
//printf("%2d + %2d = %2d \n",m1, m2, *hst_c);
// salida
printf("\npulsa INTRO para finalizar...");
fflush(stdin);
char tecla = getchar();
free(hst_c);//liberacion de memoria del host
cudaFree(dev_c);//liberacion de memoria del device(kernel)
return 0;
}
|
14,741 | #include <stdio.h>
#define NUM_BLOCKS 16
#define BLOCK_WIDTH 1
__global__ void hello()
{
printf("I'm thread in block %d\n",blockIdx.x);
}
int main()
{
// Launch kernel
hello<<<NUM_BLOCKS,BLOCK_WIDTH>>>();
// Force the printf()s to flush
cudaDeviceSynchronize();
printf("That's all ;) ");
return 0;
} |
14,742 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
// Time header
#include <time.h>
//Password Cracking using CUDA
__device__ char* encryptDecrypt(char* tempPassword){
char * generatedPassword = (char *) malloc(sizeof(char) * 11);
generatedPassword[0] = tempPassword[0] + 2;
generatedPassword[1] = tempPassword[0] - 2;
generatedPassword[2] = tempPassword[0] + 1;
generatedPassword[3] = tempPassword[1] + 3;
generatedPassword[4] = tempPassword[1] - 3;
generatedPassword[5] = tempPassword[1] - 1;
generatedPassword[6] = tempPassword[2] + 2;
generatedPassword[7] = tempPassword[2] - 2;
generatedPassword[8] = tempPassword[3] + 4;
generatedPassword[9] = tempPassword[3] - 4;
generatedPassword[10] = '\0';
for(int i =0; i<10; i++){
if(i >= 0 && i < 6){
if(generatedPassword[i] > 122){
generatedPassword[i] = (generatedPassword[i] - 122) + 97;
}else if(generatedPassword[i] < 97){
generatedPassword[i] = (97 - generatedPassword[i]) + 97;
}
}else{
if(generatedPassword[i] > 57){
generatedPassword[i] = (generatedPassword[i] - 57) + 48;
}else if(generatedPassword[i] < 48){
generatedPassword[i] = (48 - generatedPassword[i]) + 48;
}
}
}
return generatedPassword;
}
__global__ void crack(char * alphabet, char * numbers){
char matchedPassword[4];
matchedPassword[0] = alphabet[blockIdx.x];
matchedPassword[1] = alphabet[blockIdx.y];
matchedPassword[2] = numbers[threadIdx.x];
matchedPassword[3] = numbers[threadIdx.y];
char* encryptedPassword = "xtwcvx5171"; //vy33
char* search = encryptDecrypt(matchedPassword);
int iter = 0;
int is_match = 0;
while (*encryptedPassword != '\0' || *search != '\0') {
if (*encryptedPassword == *search) {
encryptedPassword++;
search++;
} else if ((*encryptedPassword == '\0' && *search != '\0') || (*encryptedPassword != '\0' && *search == '\0') || *encryptedPassword != *search) {
is_match = 1;
break;
}
}
if (is_match == 0) {
printf("Password Found: %c%c%c%c \n", matchedPassword[0],matchedPassword[1],matchedPassword[2],matchedPassword[3]);
}
}
int calculate_time(struct timespec *start, struct timespec *end,
long long int *diff)
{
long long int in_sec = end->tv_sec - start->tv_sec;
long long int in_nano = end->tv_nsec - start->tv_nsec;
if (in_nano < 0)
{
in_sec--;
in_nano += 1000000000;
}
*diff = in_sec * 1000000000 + in_nano;
return !(*diff > 0);
}
int main(int argc, char ** argv){
struct timespec start, end;
long long int time_used;
char cpuLetters[26] = {'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'};
char cpuDigits[26] = {'0','1','2','3','4','5','6','7','8','9'};
char * gpuLetters;
cudaMalloc( (void**) &gpuLetters, sizeof(char) * 26);
cudaMemcpy(gpuLetters, cpuLetters, sizeof(char) * 26, cudaMemcpyHostToDevice);
char * gpuDigits;
cudaMalloc( (void**) &gpuDigits, sizeof(char) * 26);
cudaMemcpy(gpuDigits, cpuDigits, sizeof(char) * 26, cudaMemcpyHostToDevice);
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
crack<<< dim3(26,26,1), dim3(10,10,1) >>>( gpuLetters, gpuDigits );
cudaThreadSynchronize();
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
calculate_time(&start, &end, &time_used);
printf("Time taken: %f seconds OR %lld Nano Seconds\n", (time_used / 1.0e9), (time_used));
return 0;
}
|
14,743 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <iostream>
#include <chrono>
int main() {
std::vector<double> stocks;
while (std::cin){
double stock_day;
std::cin >> stock_day;
stocks.push_back(stock_day);
}
auto start = std::chrono::steady_clock::now();
thrust::device_vector<double> dev(stocks);
auto end = std::chrono::steady_clock::now();
std::chrono::duration<double> time_seconds = end-start;
std::cout << "time: " << time_seconds.count() << "s\n";
} |
14,744 | #include "includes.h"
__global__ void ComputeColorKernel(float *u, float *v, int width, int height, int stride, float3 *uvRGB, float flowscale) {
int r = blockIdx.y * blockDim.y + threadIdx.y; // current row
int c = blockIdx.x * blockDim.x + threadIdx.x; // current column
if ((r < height) && (c < width))
{
int pos = c + stride * r;
float du = u[pos] / flowscale;
float dv = v[pos] / flowscale;
int ncols = 55;
float rad = sqrtf(du * du + dv * dv);
float a = atan2(-dv, -du) / 3.14159f;
float fk = (a + 1) / 2 * ((float)ncols - 1);
int k0 = floorf(fk); //colorwheel index lower bound
int k1 = k0 + 1; //colorwheel index upper bound
if (k1 == ncols) {
k1 = 1;
}
float f = fk - (float)k0;
float colorwheelR[55] = { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 213, 170, 128, 85, 43, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 19, 39, 58, 78, 98, 117, 137, 156,
176, 196, 215, 235, 255, 255, 255, 255, 255, 255 };
float colorwheelG[55] = { 0, 17, 34, 51, 68, 85, 102, 119, 136, 153, 170, 187, 204, 221, 238,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 232, 209, 186, 163,
140, 116, 93, 70, 47, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
float colorwheelB[55] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 63, 127, 191, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 213, 170, 128, 85, 43 };
/*float colorwheel[165] = { 255, 0, 0,
255, 17, 0,
255, 34, 0,
255, 51, 0,
255, 68, 0,
255, 85, 0,
255, 102, 0,
255, 119, 0,
255, 136, 0,
255, 153, 0,
255, 170, 0,
255, 187, 0,
255, 204, 0,
255, 221, 0,
255, 238, 0,
255, 255, 0,
213, 255, 0,
170, 255, 0,
128, 255, 0,
85, 255, 0,
43, 255, 0,
0, 255, 0,
0, 255, 63,
0, 255, 127,
0, 255, 191,
0, 255, 255,
0, 232, 255,
0, 209, 255,
0, 186, 255,
0, 163, 255,
0, 140, 255,
0, 116, 255,
0, 93, 255,
0, 70, 255,
0, 47, 255,
0, 24, 255,
0, 0, 255,
19, 0, 255,
39, 0, 255,
58, 0, 255,
78, 0, 255,
98, 0, 255,
117, 0, 255,
137, 0, 255,
156, 0, 255,
176, 0, 255,
196, 0, 255,
215, 0, 255,
235, 0, 255,
255, 0, 255,
255, 0, 213,
255, 0, 170,
255, 0, 128,
255, 0, 85,
255, 0, 43 };*/
float colR = (1 - f) * (colorwheelR[k0] / 255.0f) + f * (colorwheelR[k1] / 255.0f);
float colG = (1 - f) * (colorwheelG[k0] / 255.0f) + f * (colorwheelG[k1] / 255.0f);
float colB = (1 - f) * (colorwheelB[k0] / 255.0f) + f * (colorwheelB[k1] / 255.0f);
if (rad <= 1) {
colR = 1 - rad * (1 - colR);
colG = 1 - rad * (1 - colG);
colB = 1 - rad * (1 - colB);
}
else {
colR = colR * 0.75;
colG = colG * 0.75;
colB = colB * 0.75;
}
uvRGB[pos].z = (colR);
uvRGB[pos].y = (colG);
uvRGB[pos].x = (colB);
}
} |
14,745 | #include "includes.h"
// ERROR CHECKING MACROS //////////////////////////////////////////////////////
__global__ void firstPeriodInduction(int noPaths, int nYears, int noSpecies, int noControls, float timeStep, float unitCost, float unitRevenue, float rrr, int noFuels, int noCommodities, float* Q, float* fuelCosts, float* totalPops, float* speciesParams, int* controls, float* aars, float* uComposition, float* uResults, int* fuelIdx, float* condExp, int* optCont, float* stats) {
float *payoffs, *dataPoints;
payoffs = (float*)malloc(noControls*sizeof(float));
dataPoints = (float*)malloc(noControls*sizeof(float));
bool* valid;
valid = (bool*)malloc(noControls*sizeof(bool));
float unitFuel = 0.0;
float orePrice = 0.0;
// Compute the unit fuel cost component
for (int ii = 0; ii < noFuels; ii++) {
unitFuel += fuelCosts[ii]*uResults[fuelIdx[ii]];
}
// Compute the unit revenue from ore
for (int ii = 0; ii < noCommodities; ii++) {
orePrice += uComposition[ii]*uResults[noFuels + ii];
}
for (int ii = 0; ii < noControls; ii++) {
dataPoints[ii] = 0.0;
payoffs[ii] = 0.0;
}
// Now get the average payoff across all paths of the same control for
// each control
for (int ii = 0; ii < noPaths; ii++) {
int control = controls[ii*nYears];
payoffs[control] += condExp[ii+noPaths];
dataPoints[control]++;
}
for (int ii = 0; ii < noControls; ii++) {
// Compute the single period financial payoff for each control
// for this period and the adjusted profit. If any adjusted
// population is below the threshold, then the payoff is
// invalid.
if (dataPoints[ii] > 0) {
payoffs[ii] = payoffs[ii]/(dataPoints[ii]*(1+rrr*timeStep/
100));
} else {
break;
}
valid[ii] = true;
for (int jj = 0; jj < noSpecies; jj++) {
float adjPop = totalPops[jj]*aars[jj*noControls + ii];
// Zero flow control is always valid
if (adjPop < speciesParams[noSpecies*jj + 3] && ii > 0) {
valid[ii] = false;
break;
}
}
// Compute the payoff for the control if valid.
if (valid[ii]) {
// Now compute the overall period profit for this control
// given the prevailing stochastic factors (undiscounted).
payoffs[ii] += Q[ii]*(unitCost + unitFuel - unitRevenue*
orePrice);
// Take care of regression anomalies
if (payoffs[ii] > 0) {
payoffs[ii] = 0.0;
}
} else {
payoffs[ii] = NAN;
}
}
// printf("Pop: %6.2f %6.2f %6.2f\n", totalPops[0]*aars[0], totalPops[0]*aars[1],totalPops[0]*aars[2]);
// The optimal value is the one with the lowest net present cost.
// As the zero flow rate option is always available, we can
// initially set the optimal control to this before checking the
// other controls.
float bestExp = payoffs[0];
int bestCont = 0;
for (int ii = 1; ii < noControls; ii++) {
if (isfinite(payoffs[ii])) {
if (payoffs[ii] < bestExp) {
bestExp = payoffs[ii];
bestCont = ii;
}
}
}
// Assign the optimal control and payoff to all paths at time period 0
// Standard deviation
stats[2] = 0;
// Assign values and prepare standard deviation
for (int ii = 0; ii < noPaths; ii++) {
condExp[ii] = bestExp;
optCont[ii] = bestCont;
if (controls[ii*nYears] == bestCont) {
stats[2] += (condExp[ii+noPaths] - payoffs[bestCont])*(condExp[ii
+noPaths] - payoffs[bestCont]);
}
}
stats[0] = condExp[0];
stats[1] = (float)optCont[0];
stats[2] = sqrt(stats[2]/(dataPoints[bestCont]*(1+rrr/(100*timeStep))));
free(valid);
free(payoffs);
free(dataPoints);
} |
14,746 | #include <cstring>
#include <fstream>
#include "sha256.cuh"
/*__device__ const unsigned int SHA256::sha256_k[64] = //UL = uint32
{0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2};*/
//__device__ const unsigned char h[16] = {'0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F'};
__device__ void SHA256::transform(const unsigned char *message, unsigned int block_nb)
{
uint32 w[64];
uint32 wv[8];
uint32 t1, t2;
const unsigned char *sub_block;
int i;
int j;
for (i = 0; i < (int) block_nb; i++) {
sub_block = message + (i << 6);
for (j = 0; j < 16; j++) {
SHA2_PACK32(&sub_block[j << 2], &w[j]);
}
for (j = 16; j < 64; j++) {
w[j] = SHA256_F4(w[j - 2]) + w[j - 7] + SHA256_F3(w[j - 15]) + w[j - 16];
}
for (j = 0; j < 8; j++) {
wv[j] = m_h[j];
}
for (j = 0; j < 64; j++) {
t1 = wv[7] + SHA256_F2(wv[4]) + SHA2_CH(wv[4], wv[5], wv[6])
+ sha256_k[j] + w[j];
t2 = SHA256_F1(wv[0]) + SHA2_MAJ(wv[0], wv[1], wv[2]);
wv[7] = wv[6];
wv[6] = wv[5];
wv[5] = wv[4];
wv[4] = wv[3] + t1;
wv[3] = wv[2];
wv[2] = wv[1];
wv[1] = wv[0];
wv[0] = t1 + t2;
}
for (j = 0; j < 8; j++) {
m_h[j] += wv[j];
}
}
}
__device__ void SHA256::init()
{
m_h[0] = 0x6a09e667;
m_h[1] = 0xbb67ae85;
m_h[2] = 0x3c6ef372;
m_h[3] = 0xa54ff53a;
m_h[4] = 0x510e527f;
m_h[5] = 0x9b05688c;
m_h[6] = 0x1f83d9ab;
m_h[7] = 0x5be0cd19;
m_len = 0;
m_tot_len = 0;
}
__device__ void SHA256::update(const unsigned char *message, unsigned int len)
{
unsigned int block_nb;
unsigned int new_len, rem_len, tmp_len;
const unsigned char *shifted_message;
tmp_len = SHA224_256_BLOCK_SIZE - m_len;
rem_len = len < tmp_len ? len : tmp_len;
memcpy(&m_block[m_len], message, rem_len);
if (m_len + len < SHA224_256_BLOCK_SIZE) {
m_len += len;
return;
}
new_len = len - rem_len;
block_nb = new_len / SHA224_256_BLOCK_SIZE;
shifted_message = message + rem_len;
transform(m_block, 1);
transform(shifted_message, block_nb);
rem_len = new_len % SHA224_256_BLOCK_SIZE;
memcpy(m_block, &shifted_message[block_nb << 6], rem_len);
m_len = rem_len;
m_tot_len += (block_nb + 1) << 6;
}
__device__ void SHA256::final(unsigned char *digest)
{
unsigned int block_nb;
unsigned int pm_len;
unsigned int len_b;
int i;
block_nb = (1 + ((SHA224_256_BLOCK_SIZE - 9)
< (m_len % SHA224_256_BLOCK_SIZE)));
len_b = (m_tot_len + m_len) << 3;
pm_len = block_nb << 6;
memset(m_block + m_len, 0, pm_len - m_len);
m_block[m_len] = 0x80;
SHA2_UNPACK32(len_b, m_block + pm_len - 4);
transform(m_block, block_nb);
for (i = 0 ; i < 8; i++) {
SHA2_UNPACK32(m_h[i], &digest[i << 2]);
}
}
__device__ static unsigned char *empty(unsigned l) { return (unsigned char *) malloc(l); }
__device__ unsigned char *sha256(unsigned char *input, unsigned length)
{
unsigned char digest[SHA256::DIGEST_SIZE];
memset(digest,0,SHA256::DIGEST_SIZE);
SHA256 ctx = SHA256();
ctx.init();
ctx.update( input, length);
ctx.final(digest);
unsigned char *buf;
buf = (unsigned char *) malloc(2*SHA256::DIGEST_SIZE+1);
buf[2*SHA256::DIGEST_SIZE] = 0;
// Copy each element in digest into buffer as a hex character
for (int i = 0; i < SHA256::DIGEST_SIZE; i++)
{
buf[i*2] = h[(digest[i] >> 4) % 16];
buf[i*2+1] = h[ digest[i] % 16];
//sprintf(((char *)buf)+i*2, "%02x", digest[i]);
}
return buf;
}
|
14,747 |
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <ctime>
int hgetLPierre(int d, int i, int j) {
// If j > i, then we take the transpose of L
if (j > i) {
int t = i;
i = j;
j = t;
}
int l_position = i*(i+1) / 2 + j;
// int l_position = i*(i-1) / 2 + j-1;
return l_position;
}
int hgetDPierre(int i) {
int d_position = i;
return d_position;
}
void generate_systems(float *A, float *Y, int N, int d, bool verbose=true) {
int matrix_size = d + d * (d + 1) / 2;
for (int i = 0; i < N; i++) {
float *D = &A[i * matrix_size];
float *T = &A[i * matrix_size + d];
for (int j=0; j < (d * (d + 1) / 2); j++)
T[j] = ((float) rand()+1)*1./RAND_MAX;
for (int j=0; j<d; j++)
T[hgetLPierre(d, j,j)] = 1.0f;
for (int j=0; j<d; j++) {
D[j] = ((float) rand()+1)*1./RAND_MAX;
Y[d * i + j] = ((float) rand()+1)*1./RAND_MAX;
}
}
}
// ************************************************************************ //
// __device__ int getLPierre(float* T, int n, int d, int matrix_id, int i, int j)
__device__ int getLPierre(int d, int i, int j)
{
// If j > i, then we take the transpose of L
if (j > i) {int t = i; i = j; j = t;};
// int matrix_memory_size = (d+d*(d+1)/2);
int l_position = d + i*(i+1) / 2 + j;
// int l_position = i*(i-1) / 2 + j-1;
// int l_position = d + i*(i-1) / 2 + j;
return l_position;
// return &T[matrix_id * matrix_memory_size + l_position]
}
// __device__ int getDPierre(float* T, int n, int d, int matrix_id, int i)
__device__ int getDPierre(int d, int i)
{
// int matrix_memory_size = (d+d*(d+1)/2);
int d_position = i;
return d_position;
// return &T[matrix_id * matrix_memory_size + d_position]
}
__device__ void parallel_copy(float* src, float* dest, int n)
{
int i = threadIdx.x;
int stride = blockDim.x;
while(i<n){
dest[i] = src[i];
i += stride;
}
__syncthreads();
}
// __global__ void LDLt_max_col_k(float* AGPU, int d)
__global__ void LDLt_max_col_k(float* sA, int d)
{
int tidx = threadIdx.x%d;
int Qt = (threadIdx.x-tidx)/d;
int A_size = d*(d+1)/2+d;
int minTB = blockDim.x/d;
// printf("minTB %d\n", minTB);
int nt = (blockIdx.x*minTB + Qt) * A_size;
// int gbx = Qt + blockIdx.x*(blockDim.x/d);
// extern __shared__ float sA[];
// //copy ACPU to sA
// parallel_copy(sA, &AGPU[(blockIdx.x*minTB + Qt)*A_size], minTB*A_size);
// tidx==i
// Perform the LDLt factorization
int j, k;
for(j=0; j<d; j++){
// D_j,j :
if(tidx==0){
for(k=0; k<j; k++){
sA[nt+getDPierre(d, j)] -= sA[nt+getDPierre(d,k)]*
sA[nt+getLPierre(d,j,k)]*
sA[nt+getLPierre(d,j,k)];
}
}
__syncthreads();
// L_:,j parallel
if(tidx>j){
//printf("(%d,%d,%d,%d),", nt+getLPierre(d,tidx,j), nt, tidx, j);
sA[nt+getLPierre(d,tidx,j)] /= sA[nt+getDPierre(d,j)];
for(k=0; k<j; k++){
sA[nt+getLPierre(d,tidx,j)] -= sA[nt+getLPierre(d,tidx,k)]*
sA[nt+getLPierre(d,j,k)]*
sA[nt+getDPierre(d,k)]/
sA[nt+getDPierre(d,j)];
}
}
__syncthreads();
}
// parallel_copy(&AGPU[(blockIdx.x*minTB + Qt)*A_size], sA, minTB*A_size);
}
// __global__ void LDLt_max_row_k(float* AGPU, int d)
__global__ void LDLt_max_row_k(float* sA, int d)
{
int tidx = threadIdx.x%d;
int Qt = (threadIdx.x-tidx)/d;
int A_size = d*(d+1)/2+d;
int minTB = blockDim.x/d;
// printf("minTB %d\n", minTB);
int nt = (blockIdx.x*minTB + Qt) * A_size;
// int gbx = Qt + blockIdx.x*(blockDim.x/d);
// extern __shared__ float sA[];
// //copy ACPU to sA
// parallel_copy(sA, &AGPU[(blockIdx.x*minTB + Qt)*A_size], minTB*A_size);
// Perform the LDLt factorization
int i, k;
for(i=0; i<d; i++){
// D_i,i :
if(tidx==0){
for(k=0; k<i; k++){
sA[nt+getDPierre(d, i)] -= sA[nt+getDPierre(d,k)]*
sA[nt+getLPierre(d,i,k)]*
sA[nt+getLPierre(d,i,k)];
}
}
__syncthreads();
// L_i,: parallel
if(i<tidx){
//printf("(%d,%d,%d,%d),", nt+getLPierre(d,i,tidx), nt, i,tidx);
sA[nt+getLPierre(d,i,tidx)] /= sA[nt+getDPierre(d,i)];
for(k=0; k<i; k++){
sA[nt+getLPierre(d,i,tidx)] -= sA[nt+getLPierre(d,k,tidx)]*
sA[nt+getLPierre(d,k,i)]*
sA[nt+getDPierre(d,k)]/
sA[nt+getDPierre(d,i)];
}
}
__syncthreads();
}
// parallel_copy(&sA[(blockIdx.x*minTB + Qt)*A_size], A_host, minTB*A_size);
}
// __global__ void LDLt_max_row_k(float* AGPU, int d)
__global__ void LDLt_max_row_k_SHARED(float* A_host, int d)
{
int tidx = threadIdx.x%d;
int Qt = (threadIdx.x-tidx)/d;
int A_size = d*(d+1)/2+d;
int minTB = blockDim.x/d;
// printf("minTB %d\n", minTB);
int nt = Qt * A_size;
// int gbx = Qt + blockIdx.x*(blockDim.x/d);
extern __shared__ float sA[];
//copy ACPU to sA
parallel_copy(&A_host[blockIdx.x*minTB*A_size], sA, minTB*A_size);
// tidx==j
// Perform the LDLt factorization
int i, k;
for(i=0; i<d; i++){
// D_i,i :
if(tidx==0){
for(k=0; k<i; k++){
sA[nt+getDPierre(d, i)] -= sA[nt+getDPierre(d,k)]*
sA[nt+getLPierre(d,i,k)]*
sA[nt+getLPierre(d,i,k)];
}
}
__syncthreads();
// L_i,: parallel
if(i<tidx){
//printf("(%d,%d,%d,%d),", nt+getLPierre(d,i,tidx), nt, i,tidx);
sA[nt+getLPierre(d,i,tidx)] /= sA[nt+getDPierre(d,i)];
for(k=0; k<i; k++){
sA[nt+getLPierre(d,i,tidx)] -= sA[nt+getLPierre(d,k,tidx)]*
sA[nt+getLPierre(d,k,i)]*
sA[nt+getDPierre(d,k)]/
sA[nt+getDPierre(d,i)];
}
}
__syncthreads();
}
parallel_copy(sA, &A_host[blockIdx.x*minTB*A_size], minTB*A_size);
}
// ************************************************************************ //
|
14,748 | //##############################################################################################################################################################################################################//
//Aquila - An Open-Source GPU-Accelerated Toolkit for Cognitive and Neuro-Robotics Research //
// //
//Copyright (c) <2012>, <Martin Peniak - www.martinpeniak.com> //
//All rights reserved. //
// //
//Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: //
// //
// - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. //
// - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. //
// //
//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR //
//A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT //
//LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR //
//TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. //
// //
//The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted //
//as representing official policies,either expressed or implied, of the FreeBSD Project. //
//##############################################################################################################################################################################################################//
#include <cuda.h>
#include <cuda_runtime.h>
/*!
* \brief Calculates the euclidean distance between input vector and weight vector of a neuron and saves these distances in the output array.
* \param[in] inputs - inputs
* \param[in] weights - weights
* \param[in] numInputs - number of inputs
* \param[in] sequenceId - sequence id
* \param[in] numOutputs - number of outputs
* \param[out] outputs - outputs
*/
__global__ void propogateInputKernel(float *inputs, float *weights, float *outputs, int numInputs, int sequenceId, int numOutputs)
{
//get the thread id
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx<numOutputs)
{
//euclidean distance
float distance = 0.0;
//calculate the euclidean distance between each node's weight vector and the current input vector
for(int i=0; i<numInputs; i++)
{
distance += (inputs[(sequenceId * numInputs) + i] - weights[idx+(numOutputs*i)]) * (inputs[(sequenceId * numInputs) + i] - weights[idx+(numOutputs*i)]);
}
distance = sqrtf(distance);
//wait until all threads are finished
__syncthreads();
//assign the euclidean distance as the output having a particular index
outputs[idx] = distance;
}
}
/*!
* \brief Finds the best matching unit having the lowest euclidean distance - part 1/2.
* \param[in] outputs - outputs
* \param[in] numOutputs - number of outputs
* \param[out] winner - winner
*/
__global__ void findBestMatchPass1Kernel(float *outputs, int *winner, int numOutputs)
{
//get local thread id
int tid = threadIdx.x;
//get global thread id
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx<numOutputs)
{
//initialise
winner[idx] = idx;
//synchronise threads to make sure all data was loaded
__syncthreads();
for(int s=1; s<blockDim.x; s*=2)
{
int index = 2 * s * tid;
if(index<blockDim.x && (blockIdx.x*blockDim.x)+index+s<numOutputs)
{
if(outputs[winner[(blockIdx.x*blockDim.x)+index]] > outputs[winner[(blockIdx.x*blockDim.x)+index+s]])
{
winner[(blockIdx.x*blockDim.x)+index] = winner[(blockIdx.x*blockDim.x)+index+s];
}
}
//wait until all threads are finished with current pass
__syncthreads();
}
}
}
/*!
* \brief Finds the best matching unit having the lowest euclidean distance - part 2/2.
* \param[in] outputs - outputs
* \param[in] numOutputs - number of outputs
* \param[out] winner - winner
*/
__global__ void findBestMatchPass2Kernel(float *outputs, int *winner, int numOutputs)
{
for(int s=1; s<blockDim.x; s*=2)
{
int index = 2*s*threadIdx.x;
if(index<blockDim.x && 2*(s*(threadIdx.x*blockDim.x))+(s*blockDim.x)<numOutputs)
{
if(outputs[winner[2*(s*(threadIdx.x*blockDim.x))]] > outputs[winner[2*(s*(threadIdx.x*blockDim.x))+(s*blockDim.x)]])
{
winner[2*(s*(threadIdx.x*blockDim.x))] = winner[2*(s*(threadIdx.x*blockDim.x))+(s*blockDim.x)];
}
}
//wait until all threads are finished with current pass
__syncthreads();
}
if(threadIdx.x==0)
{
//find out X and Y coordinates and save them in activity_index[1] and activity_index[2] respectively
int Y = -1;
int X = int(winner[0]);
int dim = int(sqrtf((float)numOutputs));
while(X>-1)
{
X -= dim;
Y ++;
}
//assign X,Y possitions
winner[1] = X + dim;
winner[2] = Y;
}
__syncthreads();
}
/*!
* \brief Updates weights of the self-organising map.
* \param[in] inputs - inputs
* \param[in] winner - winner
* \param[in] sigma - sigma
* \param[in] numInputs - number of inputs
* \param[in] sequenceId - sequence id
* \param[in] numOutputs - number of outputs
* \param[in] neighbourhoodSize - neighbourhood size
* \param[in] numIterations - number of iterations
* \param[in] currentIteration - current iteration
* \param[out] weights - weights
*/
__global__ void updateWeightsKernel(float *inputs, float *weights, int *winner, float sigma, int numInputs, int sequenceId, int numOutputs, int neighbourhoodSize, float initLearningRate,int numIterations, int currentIteration)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx<numOutputs)
{
//dimension of the map
int dim = int(sqrtf((float)numOutputs));
//euclidean distance
float distance = 0.0;
//current learning rate
float currentLearningRate = 0.0;
//convert current index to x,y
int curY = -1;
int curX = idx;
do{
curX -= dim;
curY ++;
}while(curX>-1);
curX += dim;
//calculate the distance from winner to current in the matrix
distance = (float)((curX-winner[1])*(curX-winner[1]))+((curY-winner[2])*(curY-winner[2]));
if(distance>(neighbourhoodSize*neighbourhoodSize))
{
distance = 0.0;
}
else
{
if(sigma>0.0)
{
float s = sigma*expf(-(float)currentIteration/numIterations);
distance = expf(-(distance)/(2*(s*s)));
}
else
{
distance = expf(-(distance)/(2*dim));
}
}
//adjust the learning rate
currentLearningRate = initLearningRate*__expf(-(float)currentIteration/numIterations);
//update weights
for(int i=0; i<numInputs; i++)
{
weights[idx+(numOutputs*i)] += (inputs[(sequenceId*numInputs)+i]-weights[idx+(numOutputs*i)])*distance*currentLearningRate;
}
}
}
/*!
* \brief Wrapper for findBestMatchPass1Kernel and findBestMatchPass2Kernel.
* \param[in] grid - CUDA grid size
* \param[in] block - CUDA block size
* \param[in] outputs - outputs
* \param[in] numOutputs - number of outputs
* \param[out] winner - winner
*/
void findBestMatchOnDevice(dim3 grid, dim3 block, float *outputs, int *winner, int numOutputs)
{
findBestMatchPass1Kernel<<<grid,block>>>(outputs, winner, numOutputs);
findBestMatchPass2Kernel<<<1,block>>>(outputs, winner, numOutputs);
}
/*!
* \brief Wrapper for propogateInputKernel.
* \param[in] grid - CUDA grid size
* \param[in] block - CUDA block size
* \param[in] inputs - inputs
* \param[in] weights - weights
* \param[in] numInputs - number of inputs
* \param[in] sequenceId - sequence id
* \param[in] numOutputs - number of outputs
* \param[out] outputs - outputs
*/
void propogateInputOnDevice(dim3 grid, dim3 block, float *inputs, float *weights, float *outputs, int numInputs, int sequenceId, int numOutputs)
{
propogateInputKernel<<<grid,block>>>(inputs, weights, outputs, numInputs, sequenceId, numOutputs);
}
/*!
* \brief Wrapper for updateWeightsKernel.
* \param[in] grid - CUDA grid size
* \param[in] block - CUDA block size
* \param[in] inputs - inputs
* \param[in] winner - winner
* \param[in] sigma - sigma
* \param[in] numInputs - number of inputs
* \param[in] sequenceId - sequence id
* \param[in] numOutputs - number of outputs
* \param[in] neighbourhoodSize - neighbourhood size
* \param[in] numIterations - number of iterations
* \param[in] currentIteration - current iteration
* \param[out] weights - weights
*/
void updateWeightsOnDevice(dim3 grid, dim3 block, float *inputs, float *weights, int *winner, float sigma, int numInputs, int sequenceId, int numOutputs, int neighbourhoodSize, float initLearningRate,int numIterations, int currentIteration)
{
updateWeightsKernel<<<grid,block>>>(inputs, weights, winner, sigma, numInputs, sequenceId, numOutputs, neighbourhoodSize, initLearningRate, numIterations, currentIteration);
}
|
14,749 | #include<iostream>
#include<math.h>
#include<cuda_runtime.h>
#include"device_launch_parameters.h"
#include<fstream>
#define MAX_NUM_LISTS 256
using namespace std;
int num_data = 1000; // the number of the data
int num_lists = 128; // the number of parallel threads
__device__ void radix_sort(float* const data_0, float* const data_1, \
int num_lists, int num_data, int tid);
__device__ void merge_list(const float* src_data, float* const dest_list, \
int num_lists, int num_data, int tid);
__device__ void preprocess_float(float* const data, int num_lists, int num_data, int tid);
__device__ void Aeprocess_float(float* const data, int num_lists, int num_data, int tid);
__global__ void GPU_radix_sort(float* const src_data, float* const dest_data, \
int num_lists, int num_data)
{
// temp_data:temporarily store the data
int tid = blockIdx.x*blockDim.x + threadIdx.x;
// special preprocessing of IEEE floating-point numbers before applying radix sort
preprocess_float(src_data, num_lists, num_data, tid);
__syncthreads();
// no shared memory
radix_sort(src_data, dest_data, num_lists, num_data, tid);
__syncthreads();
merge_list(src_data, dest_data, num_lists, num_data, tid);
__syncthreads();
Aeprocess_float(dest_data, num_lists, num_data, tid);
__syncthreads();
}
__device__ void preprocess_float(float* const src_data, int num_lists, int num_data, int tid)
{
for(int i = tid;i<num_data;i+=num_lists)
{
unsigned int *data_temp = (unsigned int *)(&src_data[i]);
*data_temp = (*data_temp >> 31 & 0x1)? ~(*data_temp): (*data_temp) | 0x80000000;
}
}
__device__ void Aeprocess_float(float* const data, int num_lists, int num_data, int tid)
{
for(int i = tid;i<num_data;i+=num_lists)
{
unsigned int* data_temp = (unsigned int *)(&data[i]);
*data_temp = (*data_temp >> 31 & 0x1)? (*data_temp) & 0x7fffffff: ~(*data_temp);
}
}
__device__ void radix_sort(float* const data_0, float* const data_1, \
int num_lists, int num_data, int tid)
{
for(int bit=0;bit<32;bit++)
{
int bit_mask = (1 << bit);
int count_0 = 0;
int count_1 = 0;
for(int i=tid; i<num_data;i+=num_lists)
{
unsigned int *temp =(unsigned int *) &data_0[i];
if(*temp & bit_mask)
{
data_1[tid+count_1*num_lists] = data_0[i]; //bug 在这里 等于时会做强制类型转化
count_1 += 1;
}
else{
data_0[tid+count_0*num_lists] = data_0[i];
count_0 += 1;
}
}
for(int j=0;j<count_1;j++)
{
data_0[tid + count_0*num_lists + j*num_lists] = data_1[tid + j*num_lists];
}
}
}
__device__ void merge_list(const float* src_data, float* const dest_list, \
int num_lists, int num_data, int tid)
{
int num_per_list = ceil((float)num_data/num_lists);
__shared__ int list_index[MAX_NUM_LISTS];
__shared__ float record_val[MAX_NUM_LISTS];
__shared__ int record_tid[MAX_NUM_LISTS];
list_index[tid] = 0;
record_val[tid] = 0;
record_tid[tid] = tid;
__syncthreads();
for(int i=0;i<num_data;i++)
{
record_val[tid] = 0;
record_tid[tid] = tid; // bug2 每次都要进行初始化
if(list_index[tid] < num_per_list)
{
int src_index = tid + list_index[tid]*num_lists;
if(src_index < num_data)
{
record_val[tid] = src_data[src_index];
}else{
unsigned int *temp = (unsigned int *)&record_val[tid];
*temp = 0xffffffff;
}
}else{
unsigned int *temp = (unsigned int *)&record_val[tid];
*temp = 0xffffffff;
}
__syncthreads();
int tid_max = num_lists >> 1;
while(tid_max != 0 )
{
if(tid < tid_max)
{
unsigned int* temp1 = (unsigned int*)&record_val[tid];
unsigned int *temp2 = (unsigned int*)&record_val[tid + tid_max];
if(*temp2 < *temp1)
{
record_val[tid] = record_val[tid + tid_max];
record_tid[tid] = record_tid[tid + tid_max];
}
}
tid_max = tid_max >> 1;
__syncthreads();
}
if(tid == 0)
{
list_index[record_tid[0]]++;
dest_list[i] = record_val[0];
}
__syncthreads();
}
}
int main()
{
float *data = new float[num_data];
float *src_data, *dest_data;
for(int i =0;i<num_data;i++)
{
data[i] = (float)rand()/double(RAND_MAX);
}
ofstream outfile("./data.bin", ios::out | ios::binary);
outfile.write((char *)data, sizeof(float)*num_data);
outfile.close();
ifstream infile("./data.bin",ios::in | ios::binary);
infile.read((char *)data, sizeof(float)*num_data);
cudaMalloc((void**)&src_data, sizeof(float)*num_data);
cudaMalloc((void**)&dest_data, sizeof(float)*num_data);
cudaMemcpy(src_data, data, sizeof(float)*num_data, cudaMemcpyHostToDevice);
GPU_radix_sort<<<1,num_lists>>>(src_data, dest_data, num_lists, num_data);
cudaMemcpy(data, dest_data, sizeof(float)*num_data, cudaMemcpyDeviceToHost);
for(int i =0;i<num_data;i++)
{
cout<<data[i]<<" ";
}
}
|
14,750 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <math.h>
#define BASETYPE float
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
__global__ void matDet(BASETYPE *d_matA, BASETYPE *detM, int desp){
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ BASETYPE datos[];
BASETYPE *s_mat = &datos[0];
BASETYPE *s_detAux = &datos[desp];
int offset = (threadIdx.x)*16;
unsigned int i;
for(i = 0; i < 16; i++){
s_mat[(threadIdx.x) * 16 + i]=d_matA[global_id * 16 + i];
}
__syncthreads();
for(i = 0; i < 4; i++){
s_detAux[(threadIdx.x) * 4+i]=0;
}
__syncthreads();
// printf("globalId:%d|%d|%d|%d|%d\n",global_id,(threadIdx.x)*4,(threadIdx.x)*4+1,(threadIdx.x)*4+2,(threadIdx.x)*4+3);
s_detAux[(threadIdx.x)*4] += s_mat[offset] * ( (s_mat[offset+5]*s_mat[offset+10]*s_mat[offset+15])+(s_mat[offset+6]*s_mat[offset+11]*s_mat[offset+13])+(s_mat[offset+7]*s_mat[offset+9]*s_mat[offset+14]) + (-1*(s_mat[offset+7]*s_mat[offset+10]*s_mat[offset+13])) + (-1*(s_mat[offset+5]*s_mat[offset+11]*s_mat[offset+14])) + (-1*(s_mat[offset+6]*s_mat[offset+9]*s_mat[offset+15])) );
s_detAux[(threadIdx.x)*4+1] += (-1*s_mat[offset+1]) * ( (s_mat[offset+4]*s_mat[offset+10]*s_mat[offset+15])+(s_mat[offset+6]*s_mat[offset+11]*s_mat[offset+12])+(s_mat[offset+7]*s_mat[offset+8]*s_mat[offset+14]) + (-1*(s_mat[offset+7]*s_mat[offset+10]*s_mat[offset+12])) + (-1*(s_mat[offset+4]*s_mat[offset+11]*s_mat[offset+14])) + (-1*(s_mat[offset+6]*s_mat[offset+8]*s_mat[offset+15])) );
s_detAux[(threadIdx.x)*4+2] += s_mat[offset+2] * ( (s_mat[offset+4]*s_mat[offset+9]*s_mat[offset+15])+(s_mat[offset+5]*s_mat[offset+11]*s_mat[offset+12])+(s_mat[offset+7]*s_mat[offset+8]*s_mat[offset+13]) + (-1*(s_mat[offset+7]*s_mat[offset+9]*s_mat[offset+12])) + (-1*(s_mat[offset+4]*s_mat[offset+11]*s_mat[offset+13])) + (-1*(s_mat[offset+5]*s_mat[offset+8]*s_mat[offset+15])) );
s_detAux[(threadIdx.x)*4+3] += (-1*s_mat[offset+3]) * ( (s_mat[offset+4]*s_mat[offset+9]*s_mat[offset+14])+(s_mat[offset+5]*s_mat[offset+10]*s_mat[offset+12])+(s_mat[offset+6]*s_mat[offset+8]*s_mat[offset+13]) + (-1*(s_mat[offset+6]*s_mat[offset+9]*s_mat[offset+12])) + (-1*(s_mat[offset+4]*s_mat[offset+10]*s_mat[offset+13])) + (-1*(s_mat[offset+5]*s_mat[offset+8]*s_mat[offset+14])) );
detM[blockIdx.x * blockDim.x + (threadIdx.x)] = s_detAux[(threadIdx.x)*4] + s_detAux[(threadIdx.x)*4+1] + s_detAux[(threadIdx.x)*4+2] + s_detAux[(threadIdx.x)*4+3];
__syncthreads();
}
__global__ void vecMult(BASETYPE *d_matA,unsigned long n){
//int global_id = blockIdx.x * blockDim.x + threadIdx.x;
BASETYPE r_matA1,r_matA2;
extern __shared__ BASETYPE s_mat[];
unsigned int j;
r_matA1=d_matA[blockIdx.x * blockDim.x * 16 + threadIdx.x];
r_matA2=d_matA[blockIdx.x * blockDim.x * 16 + threadIdx.x + blockDim.x];
s_mat[threadIdx.x]=r_matA1;
s_mat[threadIdx.x + blockDim.x]=r_matA2;
r_matA1=d_matA[blockIdx.x * blockDim.x * 16 + threadIdx.x + blockDim.x * 2];
r_matA2=d_matA[blockIdx.x * blockDim.x * 16 + threadIdx.x + blockDim.x * 3];
s_mat[threadIdx.x + blockDim.x * 2]=r_matA1;
s_mat[threadIdx.x + blockDim.x * 3]=r_matA2;
r_matA1=d_matA[blockIdx.x * blockDim.x * 16 + threadIdx.x + blockDim.x * 4];
r_matA2=d_matA[blockIdx.x * blockDim.x * 16 + threadIdx.x + blockDim.x * 5];
s_mat[threadIdx.x + blockDim.x * 4]=r_matA1;
s_mat[threadIdx.x + blockDim.x * 5]=r_matA2;
r_matA1=d_matA[blockIdx.x * blockDim.x * 16 + threadIdx.x + blockDim.x * 6];
r_matA2=d_matA[blockIdx.x * blockDim.x * 16 + threadIdx.x + blockDim.x * 7];
s_mat[threadIdx.x + blockDim.x * 6]=r_matA1;
s_mat[threadIdx.x + blockDim.x * 7]=r_matA2;
r_matA1=d_matA[blockIdx.x * blockDim.x * 16 + threadIdx.x + blockDim.x * 8];
r_matA2=d_matA[blockIdx.x * blockDim.x * 16 + threadIdx.x + blockDim.x * 9];
s_mat[threadIdx.x + blockDim.x * 8]=r_matA1;
s_mat[threadIdx.x + blockDim.x * 9]=r_matA2;
r_matA1=d_matA[blockIdx.x * blockDim.x * 16 + threadIdx.x + blockDim.x * 10];
r_matA2=d_matA[blockIdx.x * blockDim.x * 16 + threadIdx.x + blockDim.x * 11];
s_mat[threadIdx.x + blockDim.x * 10]=r_matA1;
s_mat[threadIdx.x + blockDim.x * 11]=r_matA2;
r_matA1=d_matA[blockIdx.x * blockDim.x * 16 + threadIdx.x + blockDim.x * 12];
r_matA2=d_matA[blockIdx.x * blockDim.x * 16 + threadIdx.x + blockDim.x * 13];
s_mat[threadIdx.x + blockDim.x * 12]=r_matA1;
s_mat[threadIdx.x + blockDim.x * 13]=r_matA2;
r_matA1=d_matA[blockIdx.x * blockDim.x * 16 + threadIdx.x + blockDim.x * 14];
r_matA2=d_matA[blockIdx.x * blockDim.x * 16 + threadIdx.x + blockDim.x * 15];
s_mat[threadIdx.x + blockDim.x * 14]=r_matA1;
s_mat[threadIdx.x + blockDim.x * 15]=r_matA2;
__syncthreads();
for( j = 1; j < blockDim.x; j *= 2 ){
if( threadIdx.x < blockDim.x / (j * 2)){
s_mat[(threadIdx.x) * 16] += s_mat[((threadIdx.x) * 16) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 1] += s_mat[((threadIdx.x) * 16 + 1) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 2] += s_mat[((threadIdx.x) * 16 + 2) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 3] += s_mat[((threadIdx.x) * 16 + 3) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 4] += s_mat[((threadIdx.x) * 16 + 4) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 5] += s_mat[((threadIdx.x) * 16 + 5) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 6] += s_mat[((threadIdx.x) * 16 + 6) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 7] += s_mat[((threadIdx.x) * 16 + 7) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 8] += s_mat[((threadIdx.x) * 16 + 8) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 9] += s_mat[((threadIdx.x) * 16 + 9) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 10] += s_mat[((threadIdx.x) * 16 + 10) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 11] += s_mat[((threadIdx.x) * 16 + 11) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 12] += s_mat[((threadIdx.x) * 16 + 12) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 13] += s_mat[((threadIdx.x) * 16 + 13) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 14] += s_mat[((threadIdx.x) * 16 + 14) + (blockDim.x / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 15] += s_mat[((threadIdx.x) * 16 + 15) + (blockDim.x / (j * 2)) * 16];
}
__syncthreads();
}
if ((threadIdx.x) == 0){
d_matA[(blockIdx.x * 16)] = s_mat[0];
d_matA[(blockIdx.x * 16) + 1] = s_mat[1];
d_matA[(blockIdx.x * 16) + 2] = s_mat[2];
d_matA[(blockIdx.x * 16) + 3] = s_mat[3];
d_matA[(blockIdx.x * 16) + 4] = s_mat[4];
d_matA[(blockIdx.x * 16) + 5] = s_mat[5];
d_matA[(blockIdx.x * 16) + 6] = s_mat[6];
d_matA[(blockIdx.x * 16) + 7] = s_mat[7];
d_matA[(blockIdx.x * 16) + 8] = s_mat[8];
d_matA[(blockIdx.x * 16) + 9] = s_mat[9];
d_matA[(blockIdx.x * 16) + 10] = s_mat[10];
d_matA[(blockIdx.x * 16) + 11] = s_mat[11];
d_matA[(blockIdx.x * 16) + 12] = s_mat[12];
d_matA[(blockIdx.x * 16) + 13] = s_mat[13];
d_matA[(blockIdx.x * 16) + 14] = s_mat[14];
d_matA[(blockIdx.x * 16) + 15] = s_mat[15];
}
}
__global__ void vecMult2(BASETYPE *d_matA,unsigned long n,int offset_m,int cant_m ){
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ BASETYPE s_mat[];
unsigned int j;
if( global_id < n){
s_mat[threadIdx.x * 16]=d_matA[(offset_m * 16) +( global_id * 16) ];
s_mat[threadIdx.x * 16 + 1]=d_matA[(offset_m * 16) +( global_id * 16 + 1) ];
s_mat[threadIdx.x * 16 + 2]=d_matA[(offset_m * 16) +( global_id * 16 + 2) ];
s_mat[threadIdx.x * 16 + 3]=d_matA[(offset_m * 16) +( global_id * 16 + 3) ];
s_mat[threadIdx.x * 16 + 4]=d_matA[(offset_m * 16) +( global_id * 16 + 4) ];
s_mat[threadIdx.x * 16 + 5]=d_matA[(offset_m * 16) +( global_id * 16 + 5) ];
s_mat[threadIdx.x * 16 + 6]=d_matA[(offset_m * 16) +( global_id * 16 + 6) ];
s_mat[threadIdx.x * 16 + 7]=d_matA[(offset_m * 16) +( global_id * 16 + 7) ];
s_mat[threadIdx.x * 16 + 8]=d_matA[(offset_m * 16) +( global_id * 16 + 8) ];
s_mat[threadIdx.x * 16 + 9]=d_matA[(offset_m * 16) +( global_id * 16 + 9) ];
s_mat[threadIdx.x * 16 + 10]=d_matA[(offset_m * 16) +( global_id * 16 + 10) ];
s_mat[threadIdx.x * 16 + 11]=d_matA[(offset_m * 16) +( global_id * 16 + 11) ];
s_mat[threadIdx.x * 16 + 12]=d_matA[(offset_m * 16) +( global_id * 16 + 12) ];
s_mat[threadIdx.x * 16 + 13]=d_matA[(offset_m * 16) +( global_id * 16 + 13) ];
s_mat[threadIdx.x * 16 + 14]=d_matA[(offset_m * 16) +( global_id * 16 + 14) ];
s_mat[threadIdx.x * 16 + 15]=d_matA[(offset_m * 16) +( global_id * 16 + 15) ];
__syncthreads();
for( j = 1; j < cant_m; j *= 2 ){
if( threadIdx.x < cant_m / (j * 2)){
s_mat[(threadIdx.x) * 16] += s_mat[((threadIdx.x) * 16) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 1] += s_mat[((threadIdx.x) * 16 + 1) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 2] += s_mat[((threadIdx.x) * 16 + 2) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 3] += s_mat[((threadIdx.x) * 16 + 3) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 4] += s_mat[((threadIdx.x) * 16 + 4) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 5] += s_mat[((threadIdx.x) * 16 + 5) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 6] += s_mat[((threadIdx.x) * 16 + 6) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 7] += s_mat[((threadIdx.x) * 16 + 7) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 8] += s_mat[((threadIdx.x) * 16 + 8) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 9] += s_mat[((threadIdx.x) * 16 + 9) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 10] += s_mat[((threadIdx.x) * 16 + 10) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 11] += s_mat[((threadIdx.x) * 16 + 11) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 12] += s_mat[((threadIdx.x) * 16 + 12) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 13] += s_mat[((threadIdx.x) * 16 + 13) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 14] += s_mat[((threadIdx.x) * 16 + 14) + (cant_m / (j * 2)) * 16];
s_mat[(threadIdx.x) * 16 + 15] += s_mat[((threadIdx.x) * 16 + 15) + (cant_m / (j * 2)) * 16];
}
__syncthreads();
}
if ((threadIdx.x) == 0){
d_matA[(offset_m / blockDim.x) * 16 + (blockIdx.x * 16)] = s_mat[0];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 1] = s_mat[1];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 2] = s_mat[2];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 3] = s_mat[3];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 4] = s_mat[4];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 5] = s_mat[5];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 6] = s_mat[6];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 7] = s_mat[7];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 8] = s_mat[8];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 9] = s_mat[9];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 10] = s_mat[10];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 11] = s_mat[11];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 12] = s_mat[12];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 13] = s_mat[13];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 14] = s_mat[14];
d_matA[((offset_m / blockDim.x) * 16 + (blockIdx.x * 16)) + 15] = s_mat[15];
}
}
}
int main(int argc, char *argv[]){
if (argc != 3){
printf("Falta argumento: N\n");
printf("Falta argumento: CUDA_BLK \n");
return 0;
}
//declaracion de variables
cudaError_t error;
unsigned long N = atoi (argv[1]);
unsigned long CUDA_BLK = atoi(argv[2]),GRID_BLK,cant_blk;
unsigned long numBytes = sizeof(BASETYPE)*4*4;
BASETYPE *matrices,*d_matrices,*d_detM,*detM;
double timetick;
unsigned long i,j;
int datos_matDet,datos_vecMult,matDet_desp;
matrices = (BASETYPE *)malloc(numBytes*N);
detM = (BASETYPE *)malloc(sizeof(BASETYPE)*N);
for (i = 0; i < 4*4*N; i++){
matrices[i] = 1;
}
for (i = 0; i < N; i++){
detM[i] = 0;
}
matrices[2] = 220;
matrices[13] = 220;
matrices[7] = 6;
matrices[14] = 6;
//comment
cudaMalloc((void **) &d_matrices, numBytes*N);
cudaMalloc((void **) &d_detM, sizeof(BASETYPE)*N);
datos_matDet = numBytes * CUDA_BLK + sizeof(BASETYPE) * 4 * CUDA_BLK;
datos_vecMult = numBytes * CUDA_BLK;
matDet_desp = CUDA_BLK * 16;
cant_blk = N / CUDA_BLK;
dim3 dimBlock(CUDA_BLK);
dim3 dimGrid(cant_blk);
timetick = dwalltime();
cudaMemcpy(d_matrices, matrices, numBytes*N, cudaMemcpyHostToDevice); // CPU -> GPU
cudaMemcpy(d_detM, detM, sizeof(BASETYPE)*N, cudaMemcpyHostToDevice); // CPU -> GPU
matDet<<<dimGrid, dimBlock,datos_matDet>>>(d_matrices,d_detM,matDet_desp);
cudaThreadSynchronize();
for(i = N ; i > 1; i = i / CUDA_BLK){
GRID_BLK = i / CUDA_BLK;
if ((i % CUDA_BLK) == 0){
// printf("primero---------------------------------\n");
dim3 dimGrid(GRID_BLK);
vecMult<<<dimGrid, dimBlock,datos_vecMult>>>(d_matrices,i);
cudaThreadSynchronize();
} else{
if(GRID_BLK != 0){
vecMult<<<dimGrid, dimBlock,datos_vecMult>>>(d_matrices,i);
cudaThreadSynchronize();
}
// printf("segundo---------------------------------\n");
dim3 dimGrid2(1);
vecMult2<<<dimGrid2, dimBlock,datos_vecMult>>>(d_matrices,(i % CUDA_BLK),GRID_BLK * CUDA_BLK,(i % CUDA_BLK));
cudaThreadSynchronize();
i = i + (i % CUDA_BLK);
}
}
/* for(i = N ; i > 1; i = i / CUDA_BLK){
GRID_BLK = i / CUDA_BLK;
dim3 dimGrid(GRID_BLK);
vecMult<<<dimGrid, dimBlock,datos_vecMult>>>(d_matrices,i);
cudaThreadSynchronize();
}*/
cudaMemcpy(matrices, d_matrices, numBytes, cudaMemcpyDeviceToHost); // GPU -> CPU
cudaMemcpy(detM, d_detM, sizeof(BASETYPE)*N, cudaMemcpyDeviceToHost); // GPU -> CPU
for(i = 1; i < N ; i++){
detM[0] += detM[i];
}
detM[0] = detM[0] / N;
for (i = 0; i < 4*4; i++){
matrices[i] *= detM[0];
}
printf("Tiempo para la GPU: %f\n",dwalltime() - timetick);
error = cudaGetLastError();
printf("error: %d\n",error);
printf("%.2lf|\n",detM[0]);
for(i=0; i < 4; i++){
for(j=0; j < 4; j++){
printf("%.2lf|",matrices[i*4+j]);
}
printf("\n");
}
cudaFree(d_matrices);
cudaFree(d_detM);
free(matrices);
free(detM);
return 0;
}
|
14,751 | /**
* @file cuda.cu
* @author Damian Smela <damian.a.smela@gmail.com>
* @date 10.02.2019
* @brief CUDA example - finds all the primes in the large list of positive
* integers.
*/
/********************************* INCLUDES ***********************************/
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <memory.h>
/********************************* DEFINES ************************************/
#define FILE_LINE_BUFF_SIZE (128U)
#define TMP_STR_BUFF_SIZE (FILE_LINE_BUFF_SIZE)
#define LIST_FILENAME "prime_list.txt"
#define NUM_OF_THREADS (4U)
#define NUM_OF_CUDA_BLOCKS (64U)
#define NUM_OF_CUDA_THREADS (128U)
/***************************** STATIC FUNCTIONS *******************************/
/* Converts ASCII char to corresponding number */
static int ascii_to_num(char ascii_char)
{
return ((int) ascii_char) - 48;
}
/* Checks if given string is a integer number */
static bool is_number(char num_str[])
{
char *str_ptr = &num_str[0];
while(*str_ptr != '\0')
{
if (ascii_to_num(*str_ptr) < 0 || ascii_to_num(*str_ptr) > 9)
{
return false;
}
str_ptr++;
}
return true;
}
/* Returns the next positive integer from list file */
static int get_next_num_from_file(FILE *file_ptr,
char *line_buff,
size_t *line_buff_size)
{
int num_of_line_chars_tmp;
memset(line_buff, 0, *line_buff_size);
num_of_line_chars_tmp = (int) getline(&line_buff, line_buff_size, file_ptr);
line_buff[num_of_line_chars_tmp - 1] = '\0'; /* Erase '\n' char from string*/
if (!is_number(line_buff))
{
return -1;
}
else
{
return atoi(line_buff);
}
}
/* Resolve number of CUDA blocks and threads to cover all the remaining ints
* to process */
static void resolve_cuda_threads(const int ints_remaining,
int *num_of_cuda_blocks,
int *num_of_cuda_threads)
{
/* Change the defaults only if there is lower number of data to process
* than threads in a block */
if (ints_remaining < NUM_OF_CUDA_BLOCKS * NUM_OF_CUDA_THREADS)
{
/* Load the defaults */
*num_of_cuda_blocks = NUM_OF_CUDA_BLOCKS;
*num_of_cuda_threads = NUM_OF_CUDA_THREADS;
/* Check if the next iteration can be run in one-shot */
for (int i = *num_of_cuda_threads; i > 0; i--)
{
if (ints_remaining % i == 0)
{
/* If there is enough blocks */
if (ints_remaining / i < NUM_OF_CUDA_BLOCKS)
{
/* The last iteration can be run in a one shot */
*num_of_cuda_blocks = ints_remaining / i;
*num_of_cuda_threads = i;
return;
}
}
}
/* The code reaches here if there is no way to run the code in
* a one-shot */
/* Calculate how many full blocks can run the next iteration
* and modify the defaults for the next iteration */
*num_of_cuda_blocks = ints_remaining / NUM_OF_CUDA_THREADS;
}
}
/******************************* CUDA KERNELS *********************************/
__global__ void is_prime_kernel(int *int_list)
{
/* Get the unique thread index */
int index = threadIdx.x + blockIdx.x * blockDim.x;
/* Check if the integer of this index is prime */
for (int i = 2; i < *(int_list + index); i++)
{
if (*(int_list + index) % i == 0)
{
/* If it's not a prime then change its value to -1
* so it wont be written to the file */
*(int_list + index) = -1;
break;
}
}
}
/*********************************** MAIN *************************************/
int main(int argc, char **argv)
{
FILE *file_load_ptr = NULL; /* File pointer for loaded file */
FILE *file_gen_ptr = NULL; /* File pointer for generated file */
char *filename; /* Pointer to the string containing file name */
char *line_buff; /* Buffer storing temp line of a file */
size_t buff_size = FILE_LINE_BUFF_SIZE; /* Size of a line buffer (can be realloc()-ed by the getline() func) */
int num_of_line_chars_tmp; /* Var storing temp num of chars in the line */
char str_buff_tmp[TMP_STR_BUFF_SIZE]; /* String buffer for temp actions */
int num_of_ints; /* Number of integers to analyze */
int num_of_cuda_blocks = NUM_OF_CUDA_BLOCKS; /* Number of cuda blocks */
int num_of_cuda_threads = NUM_OF_CUDA_THREADS; /* Number of cuda threads */
int prime_cnt = 0; /* Number of primes found */
float time; /* Variables used for measuring time */
cudaEvent_t start, stop; /* - || - */
printf("-------------------------CUDA Example-------------------------\n");
printf("Finds all the primes in the large list of positive integers\n\n");
/* Check if there is exactly one argument given */
if (argc != 2)
{
printf("Program needs exactly one argument - filename of the list "
"containing positive integer numbers!\n");
return 1;
}
filename = argv[1];
/* Open the ./"filename" file with read permission */
file_load_ptr = fopen(filename, "r");
if (file_load_ptr == NULL)
{
printf("Couldn't open \"%s\" file\n", filename);
return 2;
}
/* Allocate buffers for string parsing operations */
line_buff = (char *) malloc(buff_size);
memset(line_buff, 0, buff_size);
memset(str_buff_tmp, 0, TMP_STR_BUFF_SIZE);
/* Read the first line from file */
num_of_line_chars_tmp = (int) getline(&line_buff,
&buff_size,
file_load_ptr);
/* Check if the first line fits the "list_len=X" pattern */
if ((num_of_line_chars_tmp < 11) ||
(strncmp(line_buff, "list_len=", 9) != 0))
{
printf("Error in the first line - \"%s\" not matching"
"\"list_len=X\" pattern!\n", line_buff);
return 3;
}
/* Check if the first line contains a number of integers */
strncpy(str_buff_tmp, &line_buff[9], (size_t) num_of_line_chars_tmp - 10);
if (!is_number(str_buff_tmp))
{
printf("Error in the first line - \"%s\" is not a correct "
"list length value\n", str_buff_tmp);
return 4;
}
/* Store the number of integers in this variable */
num_of_ints = atoi(str_buff_tmp);
printf("Loading list of %d integer numbers...\n", num_of_ints);
/* Allocate memory for all the integers */
int *int_list = (int *) malloc(sizeof(int) * num_of_ints);
/* Fill the memory with the integers from file */
for (int i = 0; i < num_of_ints; i++)
{
int_list[i] = get_next_num_from_file(file_load_ptr,
line_buff,
&buff_size);
}
/* Create a "LIST_FILENAME" file with write permission */
file_gen_ptr = fopen(LIST_FILENAME, "w");
if (file_gen_ptr == NULL)
{
printf("Couldn't open/create \"%s\" file\n", LIST_FILENAME);
return 5;
}
/************** CUDA **************/
/* Pointer to the current slice of the list */
int *host_int_list_ptr = &int_list[0];
/* Pointer to the space for device copy of the list */
int *cuda_int_list;
/* Remaining ints to process */
int ints_remaining = num_of_ints;
/* Start timer */
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
/* Allocate the space for device copy of the list-slice */
cudaMalloc((void **)&cuda_int_list,
num_of_cuda_blocks * num_of_cuda_threads * sizeof(int));
/* As long as the number of remaining ints is equal or greater than the
* total number of parallel instances use all of them to compute */
while (ints_remaining > 0)
{
/* Resolve the number of blocks and threads that will be used */
resolve_cuda_threads(ints_remaining,
&num_of_cuda_blocks,
&num_of_cuda_threads);
/* Copy the slice of the list to the device */
cudaMemcpy(cuda_int_list,
host_int_list_ptr,
num_of_cuda_blocks * num_of_cuda_threads * sizeof(int),
cudaMemcpyHostToDevice);
/* Run the kernel on all threads */
is_prime_kernel<<<num_of_cuda_blocks,
num_of_cuda_threads>>>(cuda_int_list);
/* Copy the results back to the host */
cudaMemcpy(host_int_list_ptr,
cuda_int_list,
num_of_cuda_blocks * num_of_cuda_threads * sizeof(int),
cudaMemcpyDeviceToHost);
/* Update the host list pointer */
host_int_list_ptr += num_of_cuda_blocks * num_of_cuda_threads;
/* Update the number of remaining ints */
ints_remaining -= (num_of_cuda_blocks * num_of_cuda_threads);
}
/* Free the device's memory */
cudaFree(cuda_int_list);
/* Write data to the file */
for (int i = 0; i < num_of_ints; i++)
{
if (int_list[i] != -1)
{
prime_cnt++;
fprintf(file_gen_ptr, "%d\n", int_list[i]);
}
}
/* Put the number of primes int the first line of generated file */
rewind(file_gen_ptr);
fprintf(file_gen_ptr, "primes_found=%d(open-mp)\n", prime_cnt);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Elapsed: %f ms\n", time);
printf("%d primes found...\n\r", prime_cnt);
/**********************************/
/* Close the files and free the memory */
if (fclose(file_load_ptr) != 0)
{
printf("Couldn't close \"%s\" file\n", filename);
return 6;
}
if (fclose(file_gen_ptr) != 0)
{
printf("Couldn't close \"%s\" file\n", LIST_FILENAME);
return 7;
}
free(line_buff);
free(int_list);
printf("Done...\n");
return 0;
}
/************************************ EOF *************************************/
|
14,752 |
// =================================================================================================
// This file is part of the CLTune project, which loosely follows the Google C++ styleguide and uses
// a tab-size of two spaces and a max-width of 100 characters per line.
//
// Author: cedric.nugteren@surfsara.nl (Cedric Nugteren)
//
// This file contains an example OpenCL kernel as part of the gemm.cc example.
//
// -------------------------------------------------------------------------------------------------
//
// Copyright 2014 SURFsara
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// =================================================================================================
// Reference implementation of the matrix-matrix multiplication example. Note: this kernel assumes
// that matrix B is pre-transposed.
extern "C" __global__ void gemm_reference(const int kSizeM, const int kSizeN, const int kSizeK,
const float* mat_a,
const float* mat_b,
float* mat_c) {
// Thread identifiers
const int row = blockDim.x*blockIdx.x + threadIdx.x; // From 0 to kSizeM-1
const int col = blockDim.y*blockIdx.y + threadIdx.y; // From 0 to kSizeN-1
// Computes a single value
float result = 0.0f;
for (int k=0; k<kSizeK; k++) {
float mat_a_val = mat_a[k*kSizeM + row];
float mat_b_val = mat_b[k*kSizeN + col];
result += mat_a_val * mat_b_val;
}
// Stores the result
mat_c[col*kSizeM + row] = result;
}
// =================================================================================================
|
14,753 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
static cudaStream_t *streams;
// CUDA kernel to pause for at least num_cycle cycles
__global__ void sleep(int64_t num_cycles)
{
int64_t cycles = 0;
int64_t start = clock64();
while(cycles < num_cycles) {
cycles = clock64() - start;
}
}
// Returns number of cycles required for requested seconds
extern "C" int64_t get_cycles(float seconds)
{
// Get device frequency in KHz
int64_t Hz;
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
Hz = int64_t(prop.clockRate) * 1000;
// Calculate number of cycles to wait
int64_t num_cycles;
num_cycles = (int64_t)(seconds * Hz);
return num_cycles;
}
// Create streams
extern "C" void create_streams(int num_streams)
{
// Allocate streams
streams = (cudaStream_t *) malloc(num_streams*sizeof(cudaStream_t));
// Create streams
int i;
for(i = 0; i < num_streams; i++)
cudaStreamCreate(&streams[i]);
}
// Launches a kernel that sleeps for num_cycles
extern "C" void sleep_kernel(int64_t num_cycles, int stream_id)
{
// Our kernel will launch a single thread to sleep the kernel
int blockSize, gridSize;
blockSize = 1;
gridSize = 1;
// Execute the kernel
sleep<<< gridSize, blockSize, 0, streams[stream_id] >>>(num_cycles);
}
// Wait for streams to complete and then destroy
extern "C" void destroy_streams(int num_streams)
{
int i;
for(i = 0; i < num_streams; i++)
{
// Wait for kernel to finish
cudaStreamSynchronize(streams[i]);
// Clean up stream
cudaStreamDestroy(streams[i]);
}
free(streams);
}
|
14,754 | #include<stdio.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
int main()
{
int i;
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device name: %s\n", prop.name);
printf("Number of multiprocessors on GPU: %d\n", prop.multiProcessorCount);
printf("Memory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf("Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf("Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf("Max number of blocks: %ld(%d, %d, %d)\n",(long)prop.maxGridSize[0]*prop.maxGridSize[1]*prop.maxGridSize[2],prop.maxGridSize[0],prop.maxGridSize[1],prop.maxGridSize[2]);
printf("Amount of shared memory per block (bytes): %ld\n", prop.sharedMemPerBlock);
printf("Amount of global memory (bytes): %ld\n", prop.totalGlobalMem);
printf("Warp size of GPU (number of threads): %ld\n", prop.warpSize);
printf("Amount of constant memory (bytes): %ld\n", prop.totalConstMem);
}
|
14,755 | #include <stdio.h>
#include <math.h>
__global__ void
update_jacobi_gpu(double *d_Uk, double *d_Uk1, double *d_F, int N, double delta_squared, double h){
int i,j;
double tmp, tmp1;
double norm=0.0;
int up, down, left, right;
int loc = 0;
for(j=1;j<N+1;j++){
for(i=1;i<N+1;i++){
up = ((N+2) * (i - 1)) + j;
down = ((N+2) * (i + 1)) + j;
left = ((N+2) * i) + (j -1);
right = ((N+2) * i) + (j + 1);
loc = ((N+2) * i) + j;
tmp = h*(d_Uk[down] + d_Uk[up] + d_Uk[right] + d_Uk[left] + delta_squared * d_F[loc]);
tmp1 = tmp - d_Uk[loc];
norm += tmp1 * tmp1;
d_Uk1[loc] = tmp;
}
}
}
__global__ void
update_jacobi_gpu2(double *d_Uk, double *d_Uk1, double *d_F, int N, double delta_squared, double h){
int i,j;
j = threadIdx.x + blockIdx.x * blockDim.x +1;
i = threadIdx.y + blockIdx.y * blockDim.y +1;
double tmp, tmp1;
double norm=0.0;
int up, down, left, right;
int loc = 0;
if (j < N+1 && i < N+1) {
up = ((N+2) * (i - 1)) + j;
down = ((N+2) * (i + 1)) + j;
left = ((N+2) * i) + (j -1);
right = ((N+2) * i) + (j + 1);
loc = ((N+2) * i) + j;
tmp = h*(d_Uk[down] + d_Uk[up] + d_Uk[right] + d_Uk[left] + delta_squared * d_F[loc]);
tmp1 = tmp - d_Uk[loc];
norm += tmp1 * tmp1;
d_Uk1[loc] = tmp;
}
}
__global__ void
update_jacobi_2gpu0(double *d_Uk, double *d_Ukbot, double *d_Uk1, double *d_F, int N, double delta_squared, double h){
int i,j;
j = threadIdx.x + blockIdx.x * blockDim.x +1 ;
i = threadIdx.y + blockIdx.y * blockDim.y +1;
double tmp, tmp1;
double norm=0.0;
int up, down, left, right;
int loc = 0;
if(j < N+1 && i==N/2){
up = ((N+2) * (i - 1)) + j;
down = j;
left = ((N+2) * i) + (j -1);
right = ((N+2) * i) + (j + 1);
loc = ((N+2) * i) + j;
//printf("0:%d, %f, \n",loc, d_Ukbot[down]);
d_Uk1[loc] = h*(d_Ukbot[down] + d_Uk[up] + d_Uk[right] + d_Uk[left] + delta_squared * d_F[loc]);
}
if (j < N+1 && i < N/2) {
up = ((N+2) * (i - 1)) + j;
down = ((N+2) * (i + 1)) + j;
left = ((N+2) * i) + (j -1);
right = ((N+2) * i) + (j + 1);
loc = ((N+2) * i) + j;
//printf("0:%d, %f\n",loc, d_Uk[right]);
d_Uk1[loc] = h*(d_Uk[down] + d_Uk[up] + d_Uk[right] + d_Uk[left] + delta_squared * d_F[loc]);
}
}
__global__ void
update_jacobi_2gpu1(double *d_Uk, double *d_Uktop, double *d_Uk1, double *d_F, int N, double delta_squared, double h){
int i,j;
j = threadIdx.x + blockIdx.x * blockDim.x +1;
i = threadIdx.y + blockIdx.y * blockDim.y;
//printf("1:");
//printf("i = %d, j = %d \n",i,j);
double tmp, tmp1;
double norm=0.0;
int up, down, left, right;
int loc = 0;
if(j < N+1 && i == 0){
up = ((N+2) * N/2) + j;
down = ((N+2) * (i + 1)) + j;
left = ((N+2) * i) + (j -1);
right = ((N+2) * i) + (j + 1);
loc = ((N+2) * i) + j;
//printf("1:%d, %f\n",loc, d_Uk[right]);
d_Uk1[loc] = h*(d_Uk[down] + d_Uktop[up] + d_Uk[right] + d_Uk[left] + delta_squared * d_F[loc]);
}
if (j < N+1 && i < N/2 && i>0) {
up = ((N+2) * (i - 1)) + j;
down = ((N+2) * (i + 1)) + j;
left = ((N+2) * i) + (j -1);
right = ((N+2) * i) + (j + 1);
loc = ((N+2) * i) + j;
//printf("1:%d, %f\n",loc, d_Uk[right]);
d_Uk1[loc] = h*(d_Uk[down] + d_Uk[up] + d_Uk[right] + d_Uk[left] + delta_squared * d_F[loc]);
//printf("%f\n", d_Uk1[loc]);
}
}
void
init_matrices(double *h_Uk, double *h_Uk1, double *h_F, int N){
int i, j;
int loc = 0;
for(i=0;i<N+2;i++){
for(j=0;j<N+2;j++){
//initialize matrix with 0s or 20 for walls.
int value = 0;
if (i == 0 || j ==0 || j == N+1) value = 20;
loc = ((N+2) * i) + j;
h_Uk[loc] = value;
h_Uk1[loc] = value;
h_F[loc] = value;
}
}
int rad_start_i, rad_start_j, rad_end_i, rad_end_j;
rad_start_i = round((N+2)*4.0/6);
rad_end_i = round((N+2)*5.0/6);
rad_start_j = round((N+2)*3.0/6);
rad_end_j = round((N+2)*4.0/6);
for(i=rad_start_i; i< rad_end_i; i++){
for(j=rad_start_j; j< rad_end_j; j++){
h_F[((N+2) * i) + j] = 2000;
}
}
}
void
display_mat(double *M, int N){
int i, j;
printf("\n");
for(i=0;i<N+2;i++){
for(j=0;j<N+2;j++){
printf("%f ", M[((N+2) * i) + j]);
}
printf("\n");
}
}
|
14,756 | #include <stdio.h>
#include <stdlib.h>
#define THREADS_PER_BLOCK 1024
#define SIZE 5000000
#define LL long long int
__global__ void scan(LL* data,LL* result, unsigned int size) {
unsigned int globalIdx = blockIdx.x * blockDim.x + threadIdx.x;
if(globalIdx < size) {
result[globalIdx] = data[globalIdx];
}else {
}
}
LL* AllocArr(unsigned int size);
int main(void) {
LL *iptr,*optr = NULL;
LL *di_ptr,*do_ptr = NULL;
unsigned int size = SIZE;
unsigned int memSize = SIZE * sizeof(LL);
int result = 0;
iptr = AllocArr(size);
optr = (LL *)malloc(memSize);
result = cudaMalloc((void **)&di_ptr,memSize);
result = cudaMalloc((void **)&do_ptr,memSize);
result = cudaMemcpy(di_ptr,iptr,memSize,cudaMemcpyHostToDevice);
scan<<<(SIZE + THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(di_ptr,do_ptr,size);
cudaMemcpy(optr,do_ptr,memSize,cudaMemcpyDeviceToHost);
printf("Non computed %lld \nComputed %lld\n",iptr[20],optr[20]);
free(iptr);
free(optr);
cudaFree(di_ptr);
cudaFree(do_ptr);
return 1;
}
LL* AllocArr(unsigned int size) {
LL * ptr = NULL;
ptr = (LL *)malloc(sizeof(LL)*size);
unsigned int i = 0;
for(i = 0; i < size;i++) {
ptr[i] = 1;
}
return ptr;
}
|
14,757 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void print_threadIds()
{
printf("threadIdx.x: %d , threadIdx.y:%d, threadIdx.z:%d",threadIdx.x,threadIdx.y,threadIdx.z);
}
int main()
{
int nx,ny;
nx=16;
ny=16;
dim3 block(8,8,1);
dim3 grid(nx/block.x,ny/block.y);
print_threadIds<<< grid,block>>>();
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
} |
14,758 | //pass
//--blockDim=32 --gridDim=64 --no-inline
#include <cuda.h>
#define N 32
__device__ void f(float *odata, int* ai) {
int thid = threadIdx.x;
*ai = thid;
}
__global__ void k(float *g_odata) {
int ai;
f(g_odata,&ai);
}
|
14,759 | #include "includes.h"
__global__ void min_output(float *input, float *output, float *indices, long nrows, long ncols)
{
// output offset:
long o = threadIdx.x + blockDim.x * blockIdx.x;
if (o >= nrows) return;
// input offset:
long i = o * ncols;
// move pointers
input = input + i;
// compute min:
float min = input[0];
long argmin = 0;
long ii;
for (ii=1; ii<ncols; ii++) {
float val = input[ii];
if (val < min) {
min = val;
argmin = ii;
}
}
// store
output[o] = min;
indices[o] = argmin+1;
} |
14,760 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include <bits/stdc++.h>
using namespace std;
#define debug 0
__global__ void useless(){}
__global__ void init(int n, int *arr, int val){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < n){
arr[index] = val;
}
}
__global__ void set_val(int *arr, int index, int val){
arr[index] = val;
}
__global__ void BFS_step(int *frontier_in, int *frontier_out, int *off, int *adj, int *aux, int *parents){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int cnt = frontier_in[0];
if(index < cnt){
int c = 0;
int s = frontier_in[index+1];
for(int i=0; i<(off[s+1]-off[s]); i++){
int d = adj[off[s] + i];
if(atomicCAS(&parents[d], -1, s) == -1){
aux[off[s] + c] = d;
c++;
}
}
int start = atomicAdd(&frontier_out[0], c);
for(int i=0; i<c; i++){
frontier_out[start + i + 1] = aux[off[s] + i];
}
}
}
void HyperBFS(int nv, int mv, int nh, int mh, int source, string outfile, int *offv, int *offh, int *adjv, int *adjh){
int *parentsv;
int *parentsh;
cudaMalloc(&parentsv, nv * sizeof(int));
cudaMalloc(&parentsh, nh * sizeof(int));
init<<<(nv+31)/32, 32>>>(nv, parentsv, -1);
init<<<(nh+31)/32, 32>>>(nh, parentsh, -1);
int *auxv;
int *auxh;
cudaMalloc(&auxv, mv * sizeof(int));
cudaMalloc(&auxh, mh * sizeof(int));
int *frontierv;
int *frontierh;
cudaMalloc(&frontierv, (nv + 1) * sizeof(int));
cudaMalloc(&frontierh, (nh + 1) * sizeof(int));
int *check = (int *) malloc(sizeof(int));
set_val<<<1,1>>>(frontierh, 0, 0);
set_val<<<1,1>>>(frontierv, 0, 1);
set_val<<<1,1>>>(frontierv, 1, source);
set_val<<<1,1>>>(parentsv, source, source);
while(1){
// HyperBFS main loop
cudaMemcpy(check, frontierv, sizeof(int), cudaMemcpyDeviceToHost);
if(*check == 0) break;
BFS_step<<<(*check+31)/32, 32>>>(frontierv, frontierh, offv, adjv, auxv, parentsh);
set_val<<<1,1>>>(frontierv, 0, 0);
if(debug){
int *frontier = (int *) malloc((nh + 1) * sizeof(int));
cudaMemcpy(frontier, frontierh, (nh + 1) * sizeof(int), cudaMemcpyDeviceToHost);
cout << "frontierh ";
for(int i=0; i<=nh; i++){
cout << frontier[i] << " ";
}
cout << endl;
int *parents = (int *) malloc(nh * sizeof(int));
cudaMemcpy(parents, parentsh, nh * sizeof(int), cudaMemcpyDeviceToHost);
cout << "parentsh ";
for(int i=0; i<nh; i++){
cout << parents[i] << " ";
}
cout << endl;
}
cudaMemcpy(check, frontierh, sizeof(int), cudaMemcpyDeviceToHost);
if(*check == 0) break;
BFS_step<<<(*check+31)/32, 32>>>(frontierh, frontierv, offh, adjh, auxh, parentsv);
set_val<<<1,1>>>(frontierh, 0, 0);
if(debug){
int *frontier = (int *) malloc((nv + 1) * sizeof(int));
cudaMemcpy(frontier, frontierv, (nv + 1) * sizeof(int), cudaMemcpyDeviceToHost);
cout << "frontierv ";
for(int i=0; i<=nv; i++){
cout << frontier[i] << " ";
}
cout << endl;
int *parents = (int *) malloc(nv * sizeof(int));
cudaMemcpy(parents, parentsv, nv * sizeof(int), cudaMemcpyDeviceToHost);
cout << "parentsv ";
for(int i=0; i<nv; i++){
cout << parents[i] << " ";
}
cout << endl;
}
}
cudaDeviceSynchronize();
}
__global__ void init_neg(int n, int *arr, int *neg){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < n){
arr[index] = -neg[index];
}
}
__global__ void BPath_step(int *frontier_in, int *frontier_out, int *off, int *adj, int *aux, int *parents, int *worklist){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int cnt = frontier_in[0];
if(index < cnt){
int c = 0;
int s = frontier_in[index+1];
for(int i=0; i<(off[s+1]-off[s]); i++){
int d = adj[off[s] + i];
int old = atomicAdd(&parents[d], 1);
if(old == -1){
parents[d] = s;
}
else{
if(atomicCAS(&worklist[d], 0, 1) == 0){
aux[off[s] + c] = d;
c++;
}
}
}
int start = atomicAdd(&frontier_out[0], c);
for(int i=0; i<c; i++){
frontier_out[start + i + 1] = aux[off[s] + i];
}
}
}
void HyperBPath(int nv, int mv, int nh, int mh, int source, string outfile, int *offv, int *offh, int *adjv, int *adjh, int *incntv, int *incnth){
int *parentsv;
int *parentsh;
int *worklist;
cudaMalloc(&parentsv, nv * sizeof(int));
cudaMalloc(&parentsh, nh * sizeof(int));
cudaMalloc(&worklist, nh * sizeof(int));
init<<<(nv+31)/32, 32>>>(nv, parentsv, -1);
init_neg<<<(nh+31)/32, 32>>>(nh, parentsh, incnth);
init<<<(nh+31)/32, 32>>>(nh, worklist, 0);
int *auxv;
int *auxh;
cudaMalloc(&auxv, mv * sizeof(int));
cudaMalloc(&auxh, mh * sizeof(int));
int *frontierv;
int *frontierh;
cudaMalloc(&frontierv, (nv + 1) * sizeof(int));
cudaMalloc(&frontierh, (nh + 1) * sizeof(int));
int *check = (int *) malloc(sizeof(int));
set_val<<<1,1>>>(frontierh, 0, 0);
set_val<<<1,1>>>(frontierv, 0, 1);
set_val<<<1,1>>>(frontierv, 1, source);
set_val<<<1,1>>>(parentsv, source, source);
while(1){
// HyperBFS main loop
cudaMemcpy(check, frontierv, sizeof(int), cudaMemcpyDeviceToHost);
if(*check == 0) break;
BPath_step<<<(*check+31)/32, 32>>>(frontierv, frontierh, offv, adjv, auxv, parentsh, worklist);
set_val<<<1,1>>>(frontierv, 0, 0);
init<<<(nh+31)/32, 32>>>(nh, worklist, 0);
if(debug){
int *frontier = (int *) malloc((nh + 1) * sizeof(int));
cudaMemcpy(frontier, frontierh, (nh + 1) * sizeof(int), cudaMemcpyDeviceToHost);
cout << "frontierh ";
for(int i=0; i<=nh; i++){
cout << frontier[i] << " ";
}
cout << endl;
int *parents = (int *) malloc(nh * sizeof(int));
cudaMemcpy(parents, parentsh, nh * sizeof(int), cudaMemcpyDeviceToHost);
cout << "parentsh ";
for(int i=0; i<nh; i++){
cout << parents[i] << " ";
}
cout << endl;
}
cudaMemcpy(check, frontierh, sizeof(int), cudaMemcpyDeviceToHost);
if(*check == 0) break;
BFS_step<<<(*check+31)/32, 32>>>(frontierh, frontierv, offh, adjh, auxh, parentsv);
set_val<<<1,1>>>(frontierh, 0, 0);
if(debug){
int *frontier = (int *) malloc((nv + 1) * sizeof(int));
cudaMemcpy(frontier, frontierv, (nv + 1) * sizeof(int), cudaMemcpyDeviceToHost);
cout << "frontierv ";
for(int i=0; i<=nv; i++){
cout << frontier[i] << " ";
}
cout << endl;
int *parents = (int *) malloc(nv * sizeof(int));
cudaMemcpy(parents, parentsv, nv * sizeof(int), cudaMemcpyDeviceToHost);
cout << "parentsv ";
for(int i=0; i<nv; i++){
cout << parents[i] << " ";
}
cout << endl;
}
}
cudaDeviceSynchronize();
}
__global__ void SSSP_step(int *frontier_in, int *frontier_out, int *off, int *adj, int *wgh, int *aux, int *visit, int *shortest_in, int *shortest_out){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int cnt = frontier_in[0];
if(index < cnt){
int c = 0;
int s = frontier_in[index+1];
for(int i=0; i<(off[s+1]-off[s]); i++){
int d = adj[off[s] + i];
int newdist = shortest_in[s] + wgh[off[s] + i];
int old = shortest_out[d];
if(newdist < old){
atomicMin(&shortest_out[d], newdist);
if(atomicCAS(&visit[d], 0, 1) == 0){
aux[off[s] + c] = d;
c++;
}
}
}
int start = atomicAdd(&frontier_out[0], c);
for(int i=0; i<c; i++){
frontier_out[start + i + 1] = aux[off[s] + i];
}
}
}
void HyperSSSP(int nv, int mv, int nh, int mh, int source, string outfile, int *offv, int *offh, int *adjv, int *adjh, int *wghv, int *wghh){
int *visitv;
int *visith;
cudaMalloc(&visitv, nv * sizeof(int));
cudaMalloc(&visith, nh * sizeof(int));
init<<<(nv+31)/32, 32>>>(nv, visitv, 0);
init<<<(nh+31)/32, 32>>>(nh, visith, 0);
int *shortestv;
int *shortesth;
cudaMalloc(&shortestv, nv * sizeof(int));
cudaMalloc(&shortesth, nh * sizeof(int));
init<<<(nv+31)/32, 32>>>(nv, shortestv, INT_MAX/2);
init<<<(nh+31)/32, 32>>>(nh, shortesth, INT_MAX/2);
int *auxv;
int *auxh;
cudaMalloc(&auxv, mv * sizeof(int));
cudaMalloc(&auxh, mh * sizeof(int));
int *frontierv;
int *frontierh;
cudaMalloc(&frontierv, (nv + 1) * sizeof(int));
cudaMalloc(&frontierh, (nh + 1) * sizeof(int));
int *check = (int *) malloc(sizeof(int));
set_val<<<1,1>>>(frontierh, 0, 0);
set_val<<<1,1>>>(frontierv, 0, 1);
set_val<<<1,1>>>(frontierv, 1, source);
set_val<<<1,1>>>(shortestv, source, 0);
int round = 0;
while(1){
// HyperSSSP main loop
if(round == nv-1){
init<<<(nv+31)/32, 32>>>(nv, shortestv, -INT_MAX/2);
break;
}
cudaMemcpy(check, frontierv, sizeof(int), cudaMemcpyDeviceToHost);
if(*check == 0) break;
SSSP_step<<<(*check+31)/32, 32>>>(frontierv, frontierh, offv, adjv, wghv, auxv, visith, shortestv, shortesth);
set_val<<<1,1>>>(frontierv, 0, 0);
init<<<(nh+31)/32, 32>>>(nh, visith, 0);
if(debug){
int *frontier = (int *) malloc((nh + 1) * sizeof(int));
cudaMemcpy(frontier, frontierh, (nh + 1) * sizeof(int), cudaMemcpyDeviceToHost);
cout << "frontierh ";
for(int i=0; i<=nh; i++){
cout << frontier[i] << " ";
}
cout << endl;
int *shortest = (int *) malloc(nh * sizeof(int));
cudaMemcpy(shortest, shortesth, nh * sizeof(int), cudaMemcpyDeviceToHost);
cout << "shortesth ";
for(int i=0; i<nh; i++){
cout << shortest[i] << " ";
}
cout << endl;
}
cudaMemcpy(check, frontierh, sizeof(int), cudaMemcpyDeviceToHost);
if(*check == 0) break;
SSSP_step<<<(*check+31)/32, 32>>>(frontierh, frontierv, offh, adjh, wghh, auxh, visitv, shortesth, shortestv);
set_val<<<1,1>>>(frontierh, 0, 0);
init<<<(nv+31)/32, 32>>>(nv, visitv, 0);
round++;
if(debug){
int *frontier = (int *) malloc((nv + 1) * sizeof(int));
cudaMemcpy(frontier, frontierv, (nv + 1) * sizeof(int), cudaMemcpyDeviceToHost);
cout << "frontierv ";
for(int i=0; i<=nv; i++){
cout << frontier[i] << " ";
}
cout << endl;
int *shortest = (int *) malloc(nv * sizeof(int));
cudaMemcpy(shortest, shortestv, nv * sizeof(int), cudaMemcpyDeviceToHost);
cout << "shortestv ";
for(int i=0; i<nv; i++){
cout << shortest[i] << " ";
}
cout << endl;
}
}
cudaDeviceSynchronize();
ofstream fout;
fout.open(outfile);
int *shortest;
shortest = (int *) malloc(nv * sizeof(int));
cudaMemcpy(shortest, shortestv, nv * sizeof(int), cudaMemcpyDeviceToHost);
for(int i=0; i<nv; i++){
fout << shortest[i] << " ";
}
fout << endl;
shortest = (int *) malloc(nh * sizeof(int));
cudaMemcpy(shortest, shortesth, nh * sizeof(int), cudaMemcpyDeviceToHost);
for(int i=0; i<nh; i++){
fout << shortest[i] << " ";
}
fout << endl;
fout.close();
}
__global__ void init_index(int n, int *arr){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < n){
arr[index+1] = index;
}
}
__global__ void init_float(int n, float *arr, float val){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < n){
arr[index] = val;
}
}
__global__ void PageRank_step(int *frontier_in, int *off, int *adj, float *pval_in, float *pval_out){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int cnt = frontier_in[0];
if(index < cnt){
int s = frontier_in[index+1];
float add_val = pval_in[s] / (off[s+1] - off[s]);
for(int i=0; i<(off[s+1]-off[s]); i++){
int d = adj[off[s] + i];
atomicAdd(&pval_out[d], add_val);
}
}
}
__global__ void PageRank_norm(int n, float *pval, float damp, float addconst){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < n){
pval[index] = damp * pval[index] + addconst;
}
}
void HyperPageRank(int nv, int mv, int nh, int mh, int source, string outfile, int *offv, int *offh, int *adjv, int *adjh, int maxiter){
float *pvalv;
float *pvalh;
cudaMalloc(&pvalv, nv * sizeof(float));
cudaMalloc(&pvalh, nh * sizeof(float));
init_float<<<(nv+31)/32, 32>>>(nv, pvalv, 1.0/((float)nv));
int *frontierv;
int *frontierh;
cudaMalloc(&frontierv, (nv + 1) * sizeof(int));
cudaMalloc(&frontierh, (nh + 1) * sizeof(int));
init_index<<<(nv+31)/32, 32>>>(nv, frontierv);
init_index<<<(nh+31)/32, 32>>>(nh, frontierh);
set_val<<<1,1>>>(frontierh, 0, nh);
set_val<<<1,1>>>(frontierv, 0, nv);
float damp = 0.85;
float addconstv = (1.0 - damp)*(1/(float) nv);
float addconsth = (1.0 - damp)*(1/(float) nh);
for(int iter = 0; iter < maxiter; iter++){
// HyperPageRank main loop
init_float<<<(nh+31)/32, 32>>>(nh, pvalh, 0.0);
PageRank_step<<<(nv+31)/32, 32>>>(frontierv, offv, adjv, pvalv, pvalh);
if(debug){
float *pval = (float *) malloc(nh * sizeof(float));
cudaMemcpy(pval, pvalh, nh * sizeof(float), cudaMemcpyDeviceToHost);
cout << "pvalh ";
for(int i=0; i<nh; i++){
printf("%.6f ", pval[i]);
}
cout << endl;
}
init_float<<<(nv+31)/32, 32>>>(nv, pvalv, 0.0);
PageRank_step<<<(nh+31)/32, 32>>>(frontierh, offh, adjh, pvalh, pvalv);
PageRank_norm<<<(nv+31)/32, 32>>>(nv, pvalv, damp, addconstv);
if(debug){
float *pval = (float *) malloc(nv * sizeof(float));
cudaMemcpy(pval, pvalv, nv * sizeof(float), cudaMemcpyDeviceToHost);
cout << "pvalv ";
for(int i=0; i<nv; i++){
printf("%.6f ", pval[i]);
}
cout << endl;
}
}
cudaDeviceSynchronize();
ofstream fout;
fout.open(outfile);
float *pval = (float *) malloc(nv * sizeof(float));
cudaMemcpy(pval, pvalv, nv * sizeof(float), cudaMemcpyDeviceToHost);
for(int i=0; i<nv; i++){
fout << setprecision(6) << pval[i] << " ";
}
fout << endl;
fout.close();
}
// main code
int main(int argc, char **argv){
string algorithm(argv[1]);
string infile(argv[2]);
string outfile(argv[3]);
ifstream fin;
fin.open(infile);
// read hypergraph parameters
string no_use;
fin >> no_use;
int nv, mv, nh, mh;
fin >> nv;
fin >> mv;
fin >> nh;
fin >> mh;
int *offv = (int *) malloc((nv + 1) * sizeof(int));
int *offh = (int *) malloc((nh + 1) * sizeof(int));
int *adjv = (int *) malloc(mv * sizeof(int));
int *adjh = (int *) malloc(mh * sizeof(int));
int *wghv = (int *) malloc(mv * sizeof(int));
int *wghh = (int *) malloc(mh * sizeof(int));
int *incntv = (int *) malloc(nv * sizeof(int));
int *incnth = (int *) malloc(nh * sizeof(int));
// read vertex offsets
for(int i=0; i<nv; i++){
fin >> offv[i];
}
offv[nv] = mv;
// read vertex adjacency lists
for(int i=0; i<mv; i++){
fin >> adjv[i];
incnth[adjv[i]]++;
}
// read vertex weights list
for(int i=0; i<mv; i++){
fin >> wghv[i];
}
// read hyperedge offsets
for(int i=0; i<nh; i++){
fin >> offh[i];
}
offh[nh] = mh;
// read hyperedge adjacency lists
for(int i=0; i<mh; i++){
fin >> adjh[i];
incntv[adjh[i]]++;
}
// read hyperedge weights list
for(int i=0; i<mh; i++){
fin >> wghh[i];
}
fin.close();
// copy all arrays to GPU
int *gpu_offv;
int *gpu_offh;
int *gpu_adjv;
int *gpu_adjh;
int *gpu_wghv;
int *gpu_wghh;
int *gpu_incntv;
int *gpu_incnth;
cudaMalloc(&gpu_offv, (nv + 1) * sizeof(int));
cudaMalloc(&gpu_offh, (nh + 1) * sizeof(int));
cudaMalloc(&gpu_adjv, mv * sizeof(int));
cudaMalloc(&gpu_adjh, mh * sizeof(int));
cudaMalloc(&gpu_wghv, mv * sizeof(int));
cudaMalloc(&gpu_wghh, mh * sizeof(int));
cudaMalloc(&gpu_incntv, nv * sizeof(int));
cudaMalloc(&gpu_incnth, nh * sizeof(int));
cudaMemcpy(gpu_offv, offv, (nv + 1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_offh, offh, (nh + 1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_adjv, adjv, mv * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_adjh, adjh, mh * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_wghv, wghv, mv * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_wghh, wghh, mh * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_incntv, incntv, nv * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_incnth, incnth, nh * sizeof(int), cudaMemcpyHostToDevice);
// timing variables
cudaEvent_t start, stop;
float milliseconds;
// to avoid first extra time
useless<<<1,1>>>();
cudaDeviceSynchronize();
if(algorithm == "BFS"){
for(int i=0; i<4; i++){
milliseconds = 0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Call BFS on HyperGraph
HyperBFS(nv, mv, nh, mh, 0, outfile, gpu_offv, gpu_offh, gpu_adjv, gpu_adjh);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Time taken by HyperBFS function to execute is: %.6f ms\n", milliseconds);
}
}
if(algorithm == "BPath"){
for(int i=0; i<4; i++){
milliseconds = 0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Call BPath on HyperGraph
HyperBPath(nv, mv, nh, mh, 0, outfile, gpu_offv, gpu_offh, gpu_adjv, gpu_adjh, gpu_incntv, gpu_incnth);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Time taken by HyperBPath function to execute is: %.6f ms\n", milliseconds);
}
}
if(algorithm == "SSSP"){
for(int i=0; i<4; i++){
milliseconds = 0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Call SSSP on HyperGraph
HyperSSSP(nv, mv, nh, mh, 0, outfile, gpu_offv, gpu_offh, gpu_adjv, gpu_adjh, gpu_wghv, gpu_wghh);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Time taken by HyperSSSP function to execute is: %.6f ms\n", milliseconds);
}
}
if(algorithm == "PageRank"){
for(int i=0; i<4; i++){
milliseconds = 0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Call BFS on HyperGraph
HyperPageRank(nv, mv, nh, mh, 0, outfile, gpu_offv, gpu_offh, gpu_adjv, gpu_adjh, 1);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Time taken by HyperPageRank function to execute is: %.6f ms\n", milliseconds);
}
}
return 0;
} |
14,761 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define BLOCK_DIM 1024
__global__ void matrixAdd (int *a, int N) {
//int row = blockIdx.x * blockDim.x + threadIdx.x;
//int col = blockIdx.y * blockDim.y + threadIdx.y;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
a[index] = index;
//printf("Hello from blockidx %d threadidx %d index %d a[index] %d \n", blockIdx.x, threadIdx.x, index, a[index]);
}
//if (row < N && col < N) {
//*(a+index) = 5;
//*(b+index) = 5;
// c[index] = a[index] + b[index];
//}
}
int* createVector (int size, int inivalue) {
int* vector = (int*) malloc(sizeof(int)*size);
for (int i = 0; i < size; ++i) {
vector[i] = inivalue;
}
return vector;
}
void readVector (int* vector, int size) {
for (int i = 0; i < size; ++i) {
printf("%d ",vector[i]);
}
printf("\n");
}
int main() {
int dev = 0, driverVersion = 0, runtimeVersion = 0;
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
// Console log
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor);
printf(" Total amount of constant memory: %lu bytes\n", deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock);
printf(" Warp size: %d\n", deviceProp.warpSize);
printf(" Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock);
printf(" Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Max dimension size of a grid size (x,y,z): (%d, %d, %d)\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
//int* test = (int*) malloc(sizeof(int)*4);
int sizeVector = 1024;
int memsizeVector = sizeof(int) * sizeVector;
int* Vector = createVector(sizeVector,2);
readVector(Vector,sizeVector);
/*
*(test) = 0;
*(test+1) = 0; *(test+2) = 0;
for (int i = 0; i < 3; ++i) {
printf("%d ", test[i]);
}
printf("\n");
*/
int *dev_Vector;
cudaMalloc((void**)&dev_Vector, memsizeVector);
cudaMemcpy (dev_Vector, Vector, memsizeVector, cudaMemcpyHostToDevice);
matrixAdd<<<1, BLOCK_DIM>>>(dev_Vector,sizeVector);
cudaDeviceSynchronize();
cudaMemcpy (Vector, dev_Vector, memsizeVector, cudaMemcpyDeviceToHost);
cudaFree(dev_Vector);
printf("---------------------------\n");
readVector(Vector, sizeVector);
printf("Calulate completed");
}
|
14,762 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
using namespace std;
cudaError_t squareWithCuda(long *input, long *output, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__global__ void square(long *d_out, long *d_in) {
int idx = threadIdx.x;
long f = d_in[idx];
d_out[idx] = f * f;
}
int main()
{
const int ARRAY_SIZE = 1024;
long h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = long(i);
}
long h_out[ARRAY_SIZE] = { 0 };
cudaError_t cudaStatus = squareWithCuda(h_in, h_out, ARRAY_SIZE);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Square With Cuda failed!");
cin.get();
return 1;
}
for (int i = 0; i < ARRAY_SIZE; i++) {
printf("%d", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
/*
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
*/
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
cin.get();
return 0;
}
cudaError_t squareWithCuda(long *input, long *output, unsigned int size) {
long *dev_input = 0;
long *dev_output = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_input, size * sizeof(long));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_output, size * sizeof(long));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_input, input, size * sizeof(long), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_output, output, size * sizeof(long), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
square<<<1.0, size>>>(dev_output, dev_input);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(output, dev_output, size * sizeof(long), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_input);
cudaFree(dev_output);
return cudaStatus;
} |
14,763 | #include "includes.h"
/** Modifed version of knn-CUDA from https://github.com/vincentfpgarcia/kNN-CUDA
* The modifications are
* removed texture memory usage
* removed split query KNN computation
* added feature extraction with bilinear interpolation
*
* Last modified by Christopher B. Choy <chrischoy@ai.stanford.edu> 12/23/2016
*/
// Includes
// Constants used by the program
#define BLOCK_DIM 16
//-----------------------------------------------------------------------------------------------//
// KERNELS //
//-----------------------------------------------------------------------------------------------//
/**
* Computes the distance between two matrix A (reference points) and
* B (query points) containing respectively wA and wB points.
*
* @param A pointer on the matrix A
* @param wA width of the matrix A = number of points in A
* @param B pointer on the matrix B
* @param wB width of the matrix B = number of points in B
* @param dim dimension of points = height of matrices A and B
* @param AB pointer on the matrix containing the wA*wB distances computed
*/
/**
* Gathers k-th smallest distances for each column of the distance matrix in the top.
*
* @param dist distance matrix
* @param ind index matrix
* @param width width of the distance matrix and of the index matrix
* @param height height of the distance matrix and of the index matrix
* @param k number of neighbors to consider
*/
/**
* Computes the square root of the first line (width-th first element)
* of the distance matrix.
*
* @param dist distance matrix
* @param width width of the distance matrix
* @param k number of neighbors to consider
*/
//-----------------------------------------------------------------------------------------------//
// K-th NEAREST NEIGHBORS //
//-----------------------------------------------------------------------------------------------//
/**
* Prints the error message return during the memory allocation.
*
* @param error error value return by the memory allocation function
* @param memorySize size of memory tried to be allocated
*/
__global__ void cuParallelSqrt(float *dist, int width, int k){
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if (xIndex<width && yIndex<k)
dist[yIndex*width + xIndex] = sqrt(dist[yIndex*width + xIndex]);
} |
14,764 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
#include <fcntl.h>
#define BUFFER_SIZE 2
#define P 1272461 // a large prime number
#define Z 32909 // a random number from [0,P-1]
//****************************************************************
// This code may not compile, to be tested on CUDA capable devices
//****************************************************************
typedef struct
{
long long sum = 0;
long long weight = 0;
long long fingerprint = 0;
} one_sparse_sampler;
__device__ float hash(int* coeff, int numCoefficients, int value) {
}
__global__ void process(one_sparse_sampler *samplers, int *buffer, int *s, int *k) {
int row = blockIdx.x;
int col = *buffer % (2 * (*s));
int index = row * 2 * (*s) + col;
int x = buffer[0]; // index of the vector following lecture notes
int a = buffer[1]; // either 1 or -1 following lecture notes
samplers[index].weight += a;
samplers[index].sum += x * a;
samplers[index].fingerprint += (a * pow((double) Z, (double) x));
}
void s_sparse_sampler(int s, int k) {
int array_size = 2 * s * k;
int sampler_size = sizeof(one_sparse_sampler);
int *d_s, *d_k;
int *d_buffer; // device copy of the data
one_sparse_sampler *samplers; // host copy of the samplers
one_sparse_sampler *d_samplers; // device copy of the samplers
int *buffer; // host copy of the data
// Allocate memory
cudaMallocManaged((void**)&d_s, sizeof(int));
cudaMallocManaged((void**)&d_k, sizeof(int));
samplers = (one_sparse_sampler *) malloc(array_size * sampler_size);
// Read data from file
FILE *fdIn = fopen("10int.dat", "r");
while ( fgets((char*) buffer, BUFFER_SIZE * sizeof(int), fdIn )) {
// Copy data from host to device
cudaMemcpy(d_buffer, buffer, BUFFER_SIZE * sizeof(int), cudaMemcpyHostToDevice);
for (int i = 0; i < BUFFER_SIZE; i++) {
// Launch the process kernel on GPU
process<<<k,1>>>(d_samplers, d_buffer, d_s, d_k);
}
}
// Copy result back to host
cudaMemcpy(samplers, d_samplers, array_size * sampler_size, cudaMemcpyDeviceToHost);
// Clean up
free(samplers);
cudaFree(d_samplers);
}
int main(void) {
return 0;
}
|
14,765 | #include <iostream>
__global__ void fun(float * d, int size)
{
int idx = threadIdx.x +
blockDim.x * blockIdx.x
+ blockDim.x * gridDim.x * blockDim.y * blockIdx.y
+ blockDim.x * gridDim.x * threadIdx.y;
if (idx < 0)
return;
if (idx >= size)
return;
d[idx] = idx * 10.0 / 0.1;
}
int main()
{
float * h;
float * d;
int size = 100;
h = (float*)malloc(size*sizeof(float));
cudaMalloc(&d, size*sizeof(float));
cudaMemcpy(d, h, size*sizeof(float), cudaMemcpyHostToDevice);
fun<<<1,size>>>(d, size);
cudaThreadSynchronize();
int rv = cudaGetLastError();
cudaMemcpy(h, d, size*sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < size; ++i)
std::cout << "Result = " << h[i] << "\n";
return 0;
}
|
14,766 | #include "includes.h"
/* Programmaufruf mit 2 Argumenten:
1. Größe des Gitters (mit Rand): Nx+2 (= Ny+2)
2. Dimension eines Cuda-Blocks: dim_block (findet nur Anwendung, wenn Nx+2 > dim_block)
*/
/*
Globale Variablen stehen in allen Funktionen zur Verfuegung.
Achtung: Das gilt *nicht* fuer Kernel-Funktionen!
*/
int Nx, Ny, npts;
int *active;
/*
Fuer die Koordinaten:
i = 0,1,...,Nx+1
j = 0,1,...,Ny+1
wird der fortlaufenden Index berechnet
*/
__global__ void vec_add_gpu(double *sum, double *w, double a, double *v, const int nx, const int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix>0 && ix<(nx+1) && iy>0 && iy<(ny+1)) // Bedingung, dass nur innere Punkte berechnet werden
{
unsigned int idx = iy*(blockDim.x * gridDim.x) + ix;
sum[idx] = w[idx] + a*v[idx];
}
} |
14,767 | #include <stdio.h>
#include <fstream>
#include <iostream>
#define BLUR_SIZE 3
using namespace std;
__global__
void blurKernel(float * in, float * out, int w, int h)
{
int Col = blockIdx.x * blockDim.x + threadIdx.x;
int Row = blockIdx.y * blockDim.y + threadIdx.y;
if (Col < w && Row < h)
{
int pixVal = 0;
int pixels = 0;
// Get the average of the surrounding 2xBLUR_SIZE x 2xBLUR_SIZE box
for(int blurRow = -BLUR_SIZE; blurRow < BLUR_SIZE+1; ++blurRow)
{
for(int blurCol = -BLUR_SIZE; blurCol < BLUR_SIZE+1; ++blurCol)
{
int curRow = Row + blurRow;
int curCol = Col + blurCol;
// Verify we have a valid image pixel
if(curRow > -1 && curRow < h && curCol > -1 && curCol < w)
{
pixVal += in[curRow * w + curCol];
pixels++; // Keep track of number of pixels in the accumulated total
}
}
}
// Write our new pixel value out
out[Row * w + Col] = (float)(pixVal / pixels);
}
}
void save_data(float r[225][225], float g[225][225], float b[225][225])
{
ofstream archivo("bluur.dat");
for (int i = 0; i < 225; ++i)
{
for (int j = 0; j < 225; ++j)
{
archivo<<r[i][j]<<" "<<g[i][j]<<" "<<b[i][j]<<" ";
}
archivo<<endl;
}
}
void Blur(float r[225][225], float g[225][225], float b[225][225], int width, int height)
{
float o_r[225][225];
float o_g[225][225];
float o_b[225][225];
int size = width * height;
int memSize = size * sizeof(float);
float *d_A, *d_B;
cudaMalloc((void **) &d_A, memSize);
cudaMalloc((void **) &d_B, memSize);
cudaMemcpy(d_A, r, memSize, cudaMemcpyHostToDevice);
dim3 DimGrid(floor((width-1)/16 + 1), floor((height-1)/16+1), 1);
dim3 DimBlock(16, 16, 1);
blurKernel<<<DimGrid,DimBlock>>>(d_A, d_B, width, height);
cudaMemcpy(o_r, d_B, memSize, cudaMemcpyDeviceToHost);
cudaMemcpy(d_A, g, memSize, cudaMemcpyHostToDevice);
blurKernel<<<DimGrid,DimBlock>>>(d_A, d_B, width, height);
cudaMemcpy(o_g, d_B, memSize, cudaMemcpyDeviceToHost);
cudaMemcpy(d_A, b, memSize, cudaMemcpyHostToDevice);
blurKernel<<<DimGrid,DimBlock>>>(d_A, d_B, width, height);
cudaMemcpy(o_b, d_B, memSize, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
save_data(o_r,o_g,o_b);
}
void leer_data(const char *file, float r[225][225], float g[225][225], float b[225][225])
{
char buffer[100];
ifstream archivo2("lena.dat");
for (int ii = 0; ii < 225; ++ii)
{
for (int jj = 0; jj < 225; ++jj)
{
archivo2>>r[ii][jj]>>g[ii][jj]>>b[ii][jj];
}
archivo2.getline(buffer,100);
}
}
int main()
{
int width=225, height=225;
float r[225][225];
float g[225][225];
float b[225][225];
leer_data("lena.dat",r,g,b);
Blur(r,g,b,width,height);
return EXIT_SUCCESS;
}
|
14,768 | #include "includes.h"
// B=diag(A)
extern "C"
{
}
__global__ void gfill(const int n, const double *a, double *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<n)
{
c[i] = a[0];
}
} |
14,769 | extern "C"
__global__ void leven(char* a, char* b, char* costs, int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i > 0 && i < size) {
costs[0] = i;
int nw = i - 1;
for(int j = 1; j <= size; j++) {
int firstMin = costs[j] < costs[j-1] ? costs[j] : costs[j-1];
// This line is hard to read due to the lack of min() function
int secondMin = 1 + firstMin < a[i - 1] == b[j - 1] ? nw : nw + 1 ? 1 + firstMin : a[i - 1] == b[j - 1] ? nw : nw + 1;
int cj = secondMin;
nw = costs[j];
costs[j] = cj;
}
}
}
|
14,770 | /*
nvcc -o foo.out
*/
#include <stdio.h>
#include <stdlib.h>
// global declarations
typedef struct {
int width;
int height;
float* elements;
} Matrix;
// functions
int print_mat_by_row(const Matrix M){
int i,j;
int n,d;
n = M.height;
d = M.width;
printf("\nprinting matrix rows -- size: %d, rank: %d\n",n,d);
for (i=0; i<n; i++)
{
printf("row(%d)[",i);
for (j=0; j<d; j++)
{
printf(" %g ",M.elements[i * M.width + j]);
}
printf("]\n");
}
printf("\n");
return 0;
}
__global__ void matrix_multiply_kernel(Matrix A, const Matrix B, const Matrix C, int aHeight, int aWidth, int bHeight, int bWidth){
int x,j,k;
float p;
x = blockIdx.x;
if (x < aHeight){
for(j=0; j< bWidth; j++){ // the num of cols in B
p=0; // reset product to 0
for(k=0; k < aWidth; k++){ // the num of cols in A awa num rows in B
p+=A.elements[x * aWidth + k] * B.elements[k * bWidth + j];
}
C.elements[x * bWidth + j] = p;
}
}
}
int main (void)
{
// allocate host variables
Matrix A,B,C;
A.height = 4;
A.width = 2;
A.elements = (float*) malloc(A.width * A.height * sizeof(float));
B.height = 2;
B.width = 3;
B.elements = (float*) malloc(B.width * B.height * sizeof(float));
C.height = A.height;
C.width = B.width;
C.elements = (float*) malloc(C.width * C.height * sizeof(float));
// allocate device variables
Matrix dev_A, dev_B, dev_C;
cudaMalloc((void**) &dev_A.elements, A.height * A.width * sizeof(float));
cudaMalloc((void**) &dev_B.elements, B.height * B.width * sizeof(float));
cudaMalloc((void**) &dev_C.elements, C.height * C.width * sizeof(float));
// populate host matrices -- in general M.elements[row * M.width + col] = value;
A.elements[0 * A.width + 0] = 1;
A.elements[0 * A.width + 1] = 2;
A.elements[1 * A.width + 0] = 3;
A.elements[1 * A.width + 1] = 4;
A.elements[2 * A.width + 0] = 5;
A.elements[2 * A.width + 1] = 6;
A.elements[3 * A.width + 0] = 7;
A.elements[3 * A.width + 1] = 8;
B.elements[0 * B.width + 0] = 1;
B.elements[0 * B.width + 1] = 2;
B.elements[0 * B.width + 2] = 3;
B.elements[1 * B.width + 0] = 4;
B.elements[1 * B.width + 1] = 5;
B.elements[1 * B.width + 2] = 6;
// print out the matrices
print_mat_by_row(A);
print_mat_by_row(B);
// copy to device
cudaMemcpy(dev_A.elements, A.elements, A.height * A.width * sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dev_B.elements, B.elements, B.height * B.width * sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dev_C.elements, C.elements, C.height * C.width * sizeof(float),cudaMemcpyHostToDevice);
// invoke the kernel
matrix_multiply_kernel<<<A.height,1>>>(dev_A,dev_B,dev_C, A.height, A.width, B.height, B.width);
// Read C from device memory
cudaMemcpy(C.elements, dev_C.elements, C.height * C.width * sizeof(float),cudaMemcpyDeviceToHost);
print_mat_by_row(C);
// free up memory
cudaFree(dev_A.elements);
cudaFree(dev_B.elements);
cudaFree(dev_C.elements);
free(A.elements);
free(B.elements);
free(C.elements);
return 0;
}
|
14,771 | #include "includes.h"
__global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) {
const int threads = 512;
__shared__ float local[threads];
int id = threadIdx.x;
local[id] = 0;
int filter = blockIdx.x;
int i, j;
for (j = 0; j < batch; ++j) {
for (i = 0; i < spatial; i += threads) {
int index = j * spatial * filters + filter * spatial + i + id;
local[id] += (i + id < spatial)
? delta[index] * (x[index] - mean[filter])
: 0;
}
}
__syncthreads();
if (id == 0) {
variance_delta[filter] = 0;
for (i = 0; i < threads; ++i) {
variance_delta[filter] += local[i];
}
variance_delta[filter] *=
-.5f * powf(variance[filter] + .00001f, (float)(-3.f / 2.f));
}
} |
14,772 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define MAXBLOCKS 1
#define MAXTHREADS 10
__global__ void HelloWorld()
{
printf("core block %d\'s thread no. %d says: Hello World!\n", blockIdx.x, threadIdx.x);
}
int main()
{
int *d_a;
cudaMalloc(&d_a, MAXTHREADS*sizeof(int));
HelloWorld<<<MAXBLOCKS, MAXTHREADS>>>();
cudaFree(d_a);
return 0;
} |
14,773 | __device__ char getBits(unsigned long i, int idx) {
return (i & (1 << idx)) != 0;
}
__global__ void gpu_boolean_matcher(char* result_ptr, int index){
char result = *result_ptr;
unsigned long element = (((unsigned long)blockIdx.x)*((unsigned long)blockDim.x) + ((unsigned long)threadIdx.x));
unsigned long maxCores = gridDim.x;
switch(index){
//Expression #1, Amount of variables: 2
case 1:
for (unsigned long i = element; i < (((unsigned long) 1) << 2); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)==getBits(i, 0)&&getBits(i, 1));
}
break;
// Expression #2, Amount of variables: 2
case 2:
for (unsigned long i = element; i < (((unsigned long) 1) << 2); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)==!getBits(i, 0)||getBits(i, 1));
}
break;
// Expression #3, Amount of variables: 2
case 3:
for (unsigned long i = element; i < (((unsigned long) 1) << 2); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)==!getBits(i, 1)&&!getBits(i, 0));
}
break;
// Expression #4, Amount of variables: 3
case 4:
for (unsigned long i = element; i < (((unsigned long) 1) << 3); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)==(getBits(i, 1)&&!getBits(i, 0))&&getBits(i, 2));
}
break;
// Expression #5, Amount of variables: 4
case 5:
for (unsigned long i = element; i < (((unsigned long) 1) << 4); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)==(!getBits(i, 1)||getBits(i, 0))&&(getBits(i, 2)||getBits(i, 3)));
}
break;
// Expression #6, Amount of variables: 5
case 6:
for (unsigned long i = element; i < (((unsigned long) 1) << 5); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)==(getBits(i, 1)&&!getBits(i, 2))&&(getBits(i, 3)&&(getBits(i, 4)||!getBits(i, 0))));
}
break;
// Expression #7, Amount of variables: 20
case 7:
for (unsigned long i = element; i < (((unsigned long) 1) << 20); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)==(getBits(i, 0)||!getBits(i, 1))&&(!getBits(i, 2)||getBits(i, 3))&&(((!getBits(i, 4)||!getBits(i, 5))||(!getBits(i, 6)||getBits(i, 7))))&&(!getBits(i, 8)||getBits(i, 9))&&(!getBits(i, 10)||!getBits(i, 11))&&(((!getBits(i, 12)||!getBits(i, 13))||getBits(i, 14))&&(getBits(i, 15)||getBits(i, 16))&&(getBits(i, 17)&&(!getBits(i, 18)&&!getBits(i, 19)))));
}
break;
// Expression #8, Amount of variables: 21
case 8:
for (unsigned long i = element; i < (((unsigned long) 1) << 21); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)==((!(!getBits(i, 0)||(getBits(i, 1)||!getBits(i, 2))))&&((getBits(i, 3)&&!getBits(i, 4))&&(getBits(i, 5)||!getBits(i, 6))))||(((!getBits(i, 7)||!getBits(i, 8))||!getBits(i, 9))&&((getBits(i, 10)&&!getBits(i, 11))&&(getBits(i, 12)||!getBits(i, 13))))||((!(getBits(i, 14)||(getBits(i, 15)||getBits(i, 16))))&&((getBits(i, 17)||!getBits(i, 18))&&(getBits(i, 19)||getBits(i, 20)))));
}
break;
// Expression #9, Amount of variables: 25
case 9:
for (unsigned long i = element; i < (((unsigned long) 1) << 25); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)==((getBits(i, 1)||getBits(i, 0))||(getBits(i, 2)&&(getBits(i, 3)&&getBits(i, 4))))||((getBits(i, 5)||getBits(i, 6))||((!getBits(i, 7)&&getBits(i, 8))&&!getBits(i, 9)))||((getBits(i, 10)&&!getBits(i, 11))||(!getBits(i, 12)&&(!getBits(i, 13)&&!getBits(i, 14))))||((getBits(i, 15)||(getBits(i, 16)||getBits(i, 17)))||(!getBits(i, 18)&&!getBits(i, 19)))||((!getBits(i, 20)||!getBits(i, 21))||((!getBits(i, 22)||!getBits(i, 23))||!getBits(i, 24))));
}
break;
// Expression #10, Amount of variables: 21
case 10:
for (unsigned long i = element; i < (((unsigned long) 1) << 21); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)==(((!getBits(i, 1)&&getBits(i, 0))&&!getBits(i, 2))&&((getBits(i, 3)||!getBits(i, 4))&&(getBits(i, 5)||!getBits(i, 6))))||(((!getBits(i, 7)&&getBits(i, 8))||!getBits(i, 9))&&((getBits(i, 10)||!getBits(i, 11))&&(getBits(i, 12)&&!getBits(i, 13))))||(((getBits(i, 14)&&getBits(i, 15))||getBits(i, 16))&&((getBits(i, 17)||!getBits(i, 18))&&(!getBits(i, 19)&&getBits(i, 20)))));
}
break;
// Expression #11, Amount of variables: 30
case 11:
for (unsigned long i = element; i < (((unsigned long) 1) << 30); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)==((!(getBits(i, 1)||getBits(i, 2)))||(!getBits(i, 3)&&(!getBits(i, 4)&&!getBits(i, 0))))||((!(getBits(i, 5)||getBits(i, 6)))||(getBits(i, 7)&&(getBits(i, 8)&&getBits(i, 9))))||((!(!getBits(i, 10)||getBits(i, 11)))||((getBits(i, 12)||getBits(i, 13))&&getBits(i, 14)))||((!(getBits(i, 15)||getBits(i, 16)))||((!getBits(i, 17)&&!getBits(i, 18))&&!getBits(i, 19)))||((!(getBits(i, 20)||getBits(i, 21)))||((getBits(i, 22)&&getBits(i, 23))&&getBits(i, 24)))||((!(!getBits(i, 25)||getBits(i, 26)))||((getBits(i, 27)||getBits(i, 28))&&getBits(i, 29))));
}
break;
// Expression #12, Amount of variables: 21
case 12:
for (unsigned long i = element; i < (((unsigned long) 1) << 21); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)==((!getBits(i, 1)&&(getBits(i, 2)&&!getBits(i, 0)))||((getBits(i, 3)||!getBits(i, 4))&&(getBits(i, 5)||!getBits(i, 6))))&&(((!getBits(i, 7)&&getBits(i, 8))||!getBits(i, 9))&&((getBits(i, 10)||!getBits(i, 11))&&(getBits(i, 12)&&!getBits(i, 13))))||(((getBits(i, 14)||getBits(i, 15))&&getBits(i, 16))&&((getBits(i, 17)||!getBits(i, 18))&&!getBits(i, 19))&&getBits(i, 20)));
}
break;
// Expression #13, Amount of variables: 39
case 13:
for (unsigned long i = element; i < (((unsigned long) 1) << 39); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)==((!getBits(i, 1)||getBits(i, 2))||(!getBits(i, 3)&&(!getBits(i, 4)||!getBits(i, 5))))||((!(getBits(i, 0)||getBits(i, 6)))||(getBits(i, 7)||(getBits(i, 8)&&getBits(i, 9))))||((!(!getBits(i, 10)||getBits(i, 11)))||((getBits(i, 12)||getBits(i, 13))&&getBits(i, 14)))||((!(getBits(i, 15)&&getBits(i, 16)))||((!getBits(i, 17)&&!getBits(i, 18))||!getBits(i, 19)))||((!(getBits(i, 20)||getBits(i, 21)))||((getBits(i, 22)||getBits(i, 23))&&getBits(i, 24)))||((!(!getBits(i, 25)||getBits(i, 26)))||((getBits(i, 27)||getBits(i, 28))&&getBits(i, 29)))&&((!(getBits(i, 30)||getBits(i, 21)))||((getBits(i, 31)||getBits(i, 32))&&getBits(i, 33)))||((!(!getBits(i, 34)||getBits(i, 35)))||((getBits(i, 36)||getBits(i, 37))&&getBits(i, 38))));
}
break;
// Expression #14, Amount of variables: 45
case 14:
for (unsigned long i = element; i < (((unsigned long) 1) << 45); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)==((!(getBits(i, 1)||getBits(i, 0)))||(!getBits(i, 2)&&(!getBits(i, 3)&&(!getBits(i, 4)||getBits(i, 5)))))||((!(getBits(i, 6)||(getBits(i, 7)&&getBits(i, 8))))||(getBits(i, 9)||(getBits(i, 10)&&getBits(i, 11))))||((!(!getBits(i, 12)||getBits(i, 13)))||((getBits(i, 14)||getBits(i, 15))&&(getBits(i, 16)||getBits(i, 17))))||((!(getBits(i, 18)||getBits(i, 19)))||((!getBits(i, 20)&&!getBits(i, 21))||!getBits(i, 22)))||((!(getBits(i, 23)&&(getBits(i, 24)||getBits(i, 25))))||((getBits(i, 26)||getBits(i, 27))&&getBits(i, 28)))||((!(!getBits(i, 29)||getBits(i, 30)))||((getBits(i, 31)||getBits(i, 32))&&(getBits(i, 33)||getBits(i, 34))))||((!(getBits(i, 35)||getBits(i, 36)))||((getBits(i, 37)&&getBits(i, 38))&&getBits(i, 39)))&&((getBits(i, 40)||!getBits(i, 41))&&(!getBits(i, 42)||(getBits(i, 43)&&getBits(i, 44)))));
}
break;
// Expression #15, Amount of variables: 50
case 15:
for (unsigned long i = element; i < (((unsigned long) 1) << 50); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)==((!(getBits(i, 1)||getBits(i, 0)))&&(!getBits(i, 2)&&(!getBits(i, 3)&&(!getBits(i, 4)||getBits(i, 5)))))||((!(getBits(i, 6)||(getBits(i, 7)&&getBits(i, 8))))||(getBits(i, 9)||(getBits(i, 10)&&getBits(i, 11))))||((!(!getBits(i, 12)||getBits(i, 13)))||((getBits(i, 14)||getBits(i, 15))&&(getBits(i, 16)||getBits(i, 17))))&&((!(getBits(i, 18)||getBits(i, 19)))||((!getBits(i, 20)&&!getBits(i, 21))||!getBits(i, 22)))||((!(getBits(i, 23)&&(getBits(i, 24)||getBits(i, 25))))||((getBits(i, 26)||getBits(i, 27))&&getBits(i, 28)))||((!(!getBits(i, 29)||getBits(i, 30)))||((getBits(i, 31)||getBits(i, 32))&&(getBits(i, 33)||getBits(i, 34))))||((!(getBits(i, 35)||getBits(i, 36)))||((getBits(i, 37)&&getBits(i, 38))&&getBits(i, 39)))&&((getBits(i, 40)||!getBits(i, 41))&&(!getBits(i, 42)||(getBits(i, 43)&&getBits(i, 44))))||(getBits(i, 45)&&(getBits(i, 46)||(!getBits(i, 47)&&(!getBits(i, 48)||getBits(i, 49))))));
}
break;
// Expression #16, Amount of variables: 60
case 16:
for (unsigned long i = element; i < (((unsigned long) 1) << 60); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)==((getBits(i, 0)&&getBits(i, 1))||((!getBits(i, 2)||!getBits(i, 3))&&!getBits(i, 4)))&&((getBits(i, 5)&&getBits(i, 6))||((!getBits(i, 7)&&!getBits(i, 8))||!getBits(i, 9)))&&((getBits(i, 10)&&getBits(i, 11))||((!getBits(i, 12)||!getBits(i, 13))&&!getBits(i, 14)))&&((getBits(i, 15)&&getBits(i, 16))||(!getBits(i, 17)&&(!getBits(i, 18)||!getBits(i, 19))))&&((getBits(i, 20)&&getBits(i, 21))||(!getBits(i, 22)||(!getBits(i, 23)&&!getBits(i, 24))))&&((getBits(i, 25)&&getBits(i, 26))||(!getBits(i, 27)||(getBits(i, 28)&&getBits(i, 29))))&&((getBits(i, 30)&&getBits(i, 31))||((!getBits(i, 32)||getBits(i, 33))&&!getBits(i, 34)))&&((getBits(i, 35)&&getBits(i, 36))||(getBits(i, 37)&&(getBits(i, 38)||!getBits(i, 39))))&&((getBits(i, 40)&&getBits(i, 41))||(!getBits(i, 42)&&!getBits(i, 43)&&!getBits(i, 44)))&&((getBits(i, 45)&&getBits(i, 46))||((!getBits(i, 47)||getBits(i, 48))&&!getBits(i, 49)))&&((getBits(i, 50)&&getBits(i, 51))||(!getBits(i, 52)&&!getBits(i, 53)&&!getBits(i, 54)))&&((getBits(i, 55)&&getBits(i, 56))||(!getBits(i, 57)&&(!getBits(i, 58)||!getBits(i, 59)))));
}
break;
// Expression #17, Amount of variables: 61
case 17:
for (unsigned long i = element; i < (((unsigned long) 1) << 61); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)==((!(getBits(i, 0)&&getBits(i, 1)))||((!getBits(i, 2)||!getBits(i, 3))&&!getBits(i, 4)))||((!getBits(i, 5)&&getBits(i, 6))||(!getBits(i, 7)&&(!getBits(i, 8)||!getBits(i, 9))))&&((getBits(i, 10)&&getBits(i, 11))||(!getBits(i, 12)&&!getBits(i, 13)&&!getBits(i, 14)))&&((getBits(i, 15)&&getBits(i, 16))&&((!(getBits(i, 17)&&!getBits(i, 18)))&&!getBits(i, 19)))||((getBits(i, 20)&&getBits(i, 21))||(!getBits(i, 22)||(!getBits(i, 23)&&!getBits(i, 24))))&&((getBits(i, 25)&&getBits(i, 26))||((!getBits(i, 27)||!getBits(i, 28))&&!getBits(i, 29)))&&((getBits(i, 30)&&getBits(i, 31))||(!getBits(i, 32)&&!getBits(i, 33)&&!getBits(i, 34)))||((getBits(i, 35)&&!getBits(i, 36))||(getBits(i, 37)&&(getBits(i, 38)||getBits(i, 39))))&&((getBits(i, 40)&&getBits(i, 41))||(getBits(i, 42)&&(getBits(i, 43)&&!getBits(i, 44))))&&((!(getBits(i, 45)&&getBits(i, 46)))||((getBits(i, 47)&&getBits(i, 48))||!getBits(i, 49)))||((getBits(i, 50)&&getBits(i, 51))||(!getBits(i, 52)&&(getBits(i, 53)||!getBits(i, 54))))&&((getBits(i, 55)&&getBits(i, 56))||((getBits(i, 57)&&getBits(i, 58))||(getBits(i, 59)&&!getBits(i, 60)))));
}
break;
// Expression #18, Amount of variables: 60
case 18:
for (unsigned long i = element; i < (((unsigned long) 1) << 60); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)==((!(getBits(i, 1)&&getBits(i, 0)))||((!getBits(i, 2)||!getBits(i, 3))&&!getBits(i, 4)))||((!getBits(i, 5)&&getBits(i, 6))||(!getBits(i, 7)||(!getBits(i, 8)&&!getBits(i, 9))))&&((getBits(i, 10)&&getBits(i, 11))||(!getBits(i, 12)&&!getBits(i, 13)&&!getBits(i, 14)))&&((getBits(i, 15)||getBits(i, 16))&&((!(getBits(i, 17)&&!getBits(i, 18)))&&!getBits(i, 19)))||((getBits(i, 20)&&getBits(i, 21))||(!getBits(i, 22)||(!getBits(i, 23)&&!getBits(i, 24))))&&((getBits(i, 25)&&getBits(i, 26))||((!getBits(i, 27)&&!getBits(i, 28))||!getBits(i, 29)))&&((getBits(i, 30)&&getBits(i, 31))||((!getBits(i, 32)&&!getBits(i, 33))&&!getBits(i, 34)))||((getBits(i, 35)&&!getBits(i, 36))||(getBits(i, 37)&&getBits(i, 38)&&getBits(i, 39)))&&((getBits(i, 40)&&getBits(i, 41))||(getBits(i, 42)&&(getBits(i, 43)&&!getBits(i, 44))))&&((!(getBits(i, 45)&&getBits(i, 46)))||((getBits(i, 47)&&getBits(i, 48))||!getBits(i, 49)))||((getBits(i, 50)&&getBits(i, 51))||(!getBits(i, 52)&&getBits(i, 53)&&!getBits(i, 54)))&&((getBits(i, 55)&&getBits(i, 56))||((getBits(i, 57)&&getBits(i, 58))||getBits(i, 59))));
}
break;
// Expression #19, Amount of variables: 62
case 19:
for (unsigned long i = element; i < (((unsigned long) 1) << 62); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)==((!(getBits(i, 1)&&getBits(i, 0)))||(!getBits(i, 2)&&!getBits(i, 3)&&!getBits(i, 4)))||((!getBits(i, 5)&&getBits(i, 6))||(!getBits(i, 7)||(!getBits(i, 8)&&!getBits(i, 9))))&&((getBits(i, 10)&&getBits(i, 11))||((!getBits(i, 12)&&!getBits(i, 13))||!getBits(i, 14)))&&((getBits(i, 15)&&getBits(i, 16))&&((!(getBits(i, 17)&&!getBits(i, 18)))||!getBits(i, 19)))||((getBits(i, 20)&&getBits(i, 21))||(!getBits(i, 22)||(!getBits(i, 23)&&!getBits(i, 24))))&&((getBits(i, 25)&&getBits(i, 26))||((!getBits(i, 27)&&!getBits(i, 28))||!getBits(i, 29)))&&((getBits(i, 30)&&getBits(i, 31))||((!getBits(i, 32)||!getBits(i, 33))&&!getBits(i, 34)))||((getBits(i, 35)&&!getBits(i, 36))||(getBits(i, 37)||(getBits(i, 38)&&getBits(i, 39))))&&((getBits(i, 40)&&getBits(i, 41))||(getBits(i, 42)||(getBits(i, 43)&&!getBits(i, 44))))&&((!(getBits(i, 45)&&getBits(i, 46)))||((getBits(i, 47)&&getBits(i, 48))||!getBits(i, 49)))||((getBits(i, 50)&&getBits(i, 51))||(!getBits(i, 52)&&(getBits(i, 53)||!getBits(i, 54))))&&((getBits(i, 55)&&getBits(i, 56))||((getBits(i, 57)&&getBits(i, 58))||getBits(i, 59)))||(getBits(i, 60)&&(!getBits(i, 61))));
}
break;
// Expression #20, Amount of variables: 60
case 20:
for (unsigned long i = element; i < (((unsigned long) 1) << 60); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)==((!(getBits(i, 1)||getBits(i, 2)))||((!getBits(i, 3)||!getBits(i, 4))&&!getBits(i, 0)))||((!getBits(i, 5)&&getBits(i, 6))||(!getBits(i, 7)||(!getBits(i, 8)&&!getBits(i, 9))))&&((getBits(i, 10)&&getBits(i, 11))||((!getBits(i, 12)&&!getBits(i, 13))||!getBits(i, 14)))&&((getBits(i, 15)||getBits(i, 16))&&((!(getBits(i, 17)&&!getBits(i, 18)))&&!getBits(i, 19)))||((getBits(i, 20)&&getBits(i, 21))||(!getBits(i, 22)||(!getBits(i, 23)&&!getBits(i, 24))))&&((getBits(i, 25)&&getBits(i, 26))||(!getBits(i, 27)&&!getBits(i, 28)))&&(((getBits(i, 29)||(getBits(i, 30)&&getBits(i, 31)))||((!getBits(i, 32)||!getBits(i, 33))&&!getBits(i, 34))))||((getBits(i, 35)&&!getBits(i, 36))||(getBits(i, 37)&&(getBits(i, 38)||getBits(i, 39))))&&((getBits(i, 40)&&getBits(i, 41))||(getBits(i, 42)||(getBits(i, 43)&&!getBits(i, 44))))&&((!(getBits(i, 45)&&getBits(i, 46)))||((getBits(i, 47)&&getBits(i, 48))||!getBits(i, 49)))||((getBits(i, 50)&&getBits(i, 51))||(!getBits(i, 52)||(getBits(i, 53)&&!getBits(i, 54))))&&((getBits(i, 55)&&getBits(i, 56))||((getBits(i, 57)&&getBits(i, 58))||getBits(i, 59))));
}
break;
// Expression #21, Amount of variables: 63
case 21:
for (unsigned long i = element; i < (((unsigned long) 1) << 63); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)==((!(getBits(i, 1)&&getBits(i, 2)))||((!getBits(i, 3)||!getBits(i, 4))&&!getBits(i, 0)))||((!getBits(i, 5)&&getBits(i, 6))||(!getBits(i, 7)&&(!getBits(i, 8)||!getBits(i, 9))))&&((getBits(i, 10)&&getBits(i, 11))||((!getBits(i, 12)||!getBits(i, 13))&&!getBits(i, 14)))&&((getBits(i, 15)&&getBits(i, 16))&&((!(getBits(i, 17)&&!getBits(i, 18)))&&!getBits(i, 19)))||((getBits(i, 20)&&getBits(i, 21))||(!getBits(i, 22)||(!getBits(i, 23)&&!getBits(i, 24))))&&((getBits(i, 25)&&getBits(i, 26))||((!getBits(i, 27)||!getBits(i, 28))&&!getBits(i, 29)))&&((getBits(i, 30)&&getBits(i, 31))||((!getBits(i, 32)&&!getBits(i, 33))||!getBits(i, 34)))||((getBits(i, 35)&&!getBits(i, 36))||(getBits(i, 37)&&(getBits(i, 38)||getBits(i, 39))))&&((getBits(i, 40)&&getBits(i, 41))||(getBits(i, 42)&&(getBits(i, 43)||!getBits(i, 44))))&&((!(getBits(i, 45)&&getBits(i, 46)))||((getBits(i, 47)&&!getBits(i, 48))||!getBits(i, 49)))||((getBits(i, 50)&&getBits(i, 51))||(!getBits(i, 52)||(getBits(i, 53)&&!getBits(i, 54))))&&((getBits(i, 55)&&getBits(i, 56))||((getBits(i, 57)&&getBits(i, 58))||getBits(i, 59)))||((getBits(i, 60)&&!getBits(i, 61))||(!getBits(i, 62))));
}
break;
// Expression #22, Amount of variables: 2
case 22:
for (unsigned long i = element; i < (((unsigned long) 1) << 2); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)||!getBits(i, 1));
}
break;
// Expression #23, Amount of variables: 4
case 23:
for (unsigned long i = element; i < (((unsigned long) 1) << 4); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)&&!getBits(i, 1)||getBits(i, 2)||getBits(i, 3));
}
break;
// Expression #24, Amount of variables: 5
case 24:
for (unsigned long i = element; i < (((unsigned long) 1) << 5); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)^getBits(i, 1)||getBits(i, 2)&&!getBits(i, 1)||getBits(i, 3)||getBits(i, 4));
}
break;
// Expression #25, Amount of variables: 8
case 25:
for (unsigned long i = element; i < (((unsigned long) 1) << 8); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)||getBits(i, 1)||getBits(i, 2)&&getBits(i, 3)||getBits(i, 4)||getBits(i, 5)||getBits(i, 6)^!getBits(i, 7));
}
break;
// Expression #26, Amount of variables: 9
case 26:
for (unsigned long i = element; i < (((unsigned long) 1) << 9); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)^getBits(i, 1)||getBits(i, 2)||getBits(i, 3)||getBits(i, 4)||getBits(i, 1)||getBits(i, 5)||getBits(i, 6)||getBits(i, 7)||!getBits(i, 8));
}
break;
// Expression #27, Amount of variables: 12
case 27:
for (unsigned long i = element; i < (((unsigned long) 1) << 12); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)||!getBits(i, 1)||getBits(i, 2)&&getBits(i, 3)||getBits(i, 4)||getBits(i, 5)||getBits(i, 6)&&!getBits(i, 7)||getBits(i, 8)||getBits(i, 9)||getBits(i, 10)&&getBits(i, 11));
}
break;
// Expression #28, Amount of variables: 10
case 28:
for (unsigned long i = element; i < (((unsigned long) 1) << 10); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)&&getBits(i, 1)||getBits(i, 2)&&getBits(i, 3)||getBits(i, 4)^!getBits(i, 5)||getBits(i, 6)||getBits(i, 7)||getBits(i, 8)||!getBits(i, 5)||getBits(i, 9)^getBits(i, 5)||getBits(i, 5)||getBits(i, 8));
}
break;
// Expression #29, Amount of variables: 12
case 29:
for (unsigned long i = element; i < (((unsigned long) 1) << 12); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)||getBits(i, 1)||getBits(i, 2)^!getBits(i, 3)||getBits(i, 4)&&getBits(i, 5)||getBits(i, 6)^!getBits(i, 7)||getBits(i, 3)^getBits(i, 0)||getBits(i, 8)&&getBits(i, 9)||getBits(i, 0)||getBits(i, 10)||getBits(i, 11)&&getBits(i, 10));
}
break;
// Expression #30, Amount of variables: 16
case 30:
for (unsigned long i = element; i < (((unsigned long) 1) << 16); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)&&getBits(i, 1)||getBits(i, 2)||getBits(i, 3)||getBits(i, 4)&&getBits(i, 3)||getBits(i, 5)&&getBits(i, 6)||getBits(i, 7)^getBits(i, 8)||getBits(i, 3)&&getBits(i, 9)||getBits(i, 10)||!getBits(i, 11)||getBits(i, 12)||getBits(i, 13)||getBits(i, 14)||getBits(i, 15));
}
break;
// Expression #31, Amount of variables: 18
case 31:
for (unsigned long i = element; i < (((unsigned long) 1) << 18); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)&&getBits(i, 1)||getBits(i, 2)^getBits(i, 3)||getBits(i, 4)&&getBits(i, 5)||getBits(i, 6)||getBits(i, 7)||getBits(i, 8)&&getBits(i, 9)||getBits(i, 10)^getBits(i, 11)||getBits(i, 12)^getBits(i, 13)||getBits(i, 14)&&getBits(i, 15)||getBits(i, 16)&&!getBits(i, 0)||getBits(i, 16)||!getBits(i, 17));
}
break;
// Expression #32, Amount of variables: 18
case 32:
for (unsigned long i = element; i < (((unsigned long) 1) << 18); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)&&getBits(i, 1)||getBits(i, 2)&&getBits(i, 3)||getBits(i, 4)||!getBits(i, 5)||getBits(i, 6)^getBits(i, 7)||getBits(i, 8)^!getBits(i, 9)||getBits(i, 10)&&!getBits(i, 11)||getBits(i, 4)^getBits(i, 12)||getBits(i, 13)&&getBits(i, 14)||getBits(i, 11)^!getBits(i, 10)||getBits(i, 15)^getBits(i, 16)||getBits(i, 17)&&getBits(i, 7));
}
break;
// Expression #33, Amount of variables: 21
case 33:
for (unsigned long i = element; i < (((unsigned long) 1) << 21); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)^getBits(i, 1)||getBits(i, 2)&&getBits(i, 3)||getBits(i, 4)||getBits(i, 5)||getBits(i, 6)&&getBits(i, 0)||getBits(i, 7)&&getBits(i, 8)||getBits(i, 9)||getBits(i, 10)||getBits(i, 11)&&getBits(i, 12)||getBits(i, 13)||getBits(i, 14)||getBits(i, 15)||getBits(i, 16)||getBits(i, 17)&&getBits(i, 18)||getBits(i, 19)^getBits(i, 2)||getBits(i, 5)^getBits(i, 20));
}
break;
// Expression #34, Amount of variables: 24
case 34:
for (unsigned long i = element; i < (((unsigned long) 1) << 24); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)^getBits(i, 1)||getBits(i, 2)||getBits(i, 3)||getBits(i, 4)^getBits(i, 5)||getBits(i, 6)&&getBits(i, 7)||getBits(i, 8)^getBits(i, 9)||getBits(i, 10)^!getBits(i, 11)||getBits(i, 12)&&getBits(i, 13)||getBits(i, 14)&&getBits(i, 15)||getBits(i, 16)&&getBits(i, 17)||getBits(i, 18)||getBits(i, 5)||getBits(i, 19)||getBits(i, 20)||getBits(i, 21)^!getBits(i, 22)||getBits(i, 23)^getBits(i, 15));
}
break;
// Expression #35, Amount of variables: 26
case 35:
for (unsigned long i = element; i < (((unsigned long) 1) << 26); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)&&getBits(i, 1)||getBits(i, 2)^getBits(i, 3)||getBits(i, 4)^getBits(i, 5)||getBits(i, 6)||getBits(i, 7)||getBits(i, 8)&&!getBits(i, 9)||getBits(i, 10)||getBits(i, 11)||getBits(i, 12)^!getBits(i, 13)||getBits(i, 14)&&getBits(i, 15)||getBits(i, 16)||getBits(i, 8)||getBits(i, 17)&&getBits(i, 18)||getBits(i, 19)||getBits(i, 20)||getBits(i, 15)^!getBits(i, 21)||getBits(i, 22)||!getBits(i, 23)||getBits(i, 24)&&getBits(i, 25));
}
break;
// Expression #36, Amount of variables: 28
case 36:
for (unsigned long i = element; i < (((unsigned long) 1) << 28); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)^!getBits(i, 1)||getBits(i, 2)^getBits(i, 3)||getBits(i, 4)||getBits(i, 5)||getBits(i, 6)^getBits(i, 7)||getBits(i, 8)||getBits(i, 9)||getBits(i, 10)||!getBits(i, 11)||getBits(i, 3)^getBits(i, 12)||getBits(i, 13)&&!getBits(i, 14)||getBits(i, 15)^getBits(i, 16)||getBits(i, 17)^getBits(i, 12)||getBits(i, 18)||getBits(i, 19)||getBits(i, 20)^!getBits(i, 21)||getBits(i, 22)^getBits(i, 23)||getBits(i, 24)||getBits(i, 25)||getBits(i, 26)^getBits(i, 27));
}
break;
// Expression #37, Amount of variables: 29
case 37:
for (unsigned long i = element; i < (((unsigned long) 1) << 29); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)&&getBits(i, 1)||getBits(i, 2)^getBits(i, 3)||getBits(i, 4)||getBits(i, 5)||getBits(i, 6)||!getBits(i, 7)||getBits(i, 1)&&getBits(i, 8)||getBits(i, 9)||!getBits(i, 10)||getBits(i, 11)^!getBits(i, 12)||getBits(i, 13)&&getBits(i, 14)||getBits(i, 12)&&!getBits(i, 15)||getBits(i, 16)&&!getBits(i, 17)||getBits(i, 18)^getBits(i, 19)||getBits(i, 20)^getBits(i, 21)||getBits(i, 22)&&getBits(i, 23)||getBits(i, 24)||getBits(i, 25)||getBits(i, 26)&&getBits(i, 27)||getBits(i, 15)^getBits(i, 28));
}
break;
// Expression #38, Amount of variables: 27
case 38:
for (unsigned long i = element; i < (((unsigned long) 1) << 27); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)||getBits(i, 1)||getBits(i, 2)&&getBits(i, 3)||getBits(i, 4)||!getBits(i, 5)||getBits(i, 6)^!getBits(i, 7)||getBits(i, 8)^!getBits(i, 9)||getBits(i, 10)&&!getBits(i, 11)||getBits(i, 1)^getBits(i, 12)||getBits(i, 13)&&getBits(i, 14)||getBits(i, 14)^getBits(i, 15)||getBits(i, 16)||!getBits(i, 6)||getBits(i, 4)^getBits(i, 17)||getBits(i, 18)||!getBits(i, 19)||getBits(i, 7)||!getBits(i, 20)||getBits(i, 21)&&getBits(i, 9)||getBits(i, 22)^getBits(i, 23)||getBits(i, 9)&&getBits(i, 24)||getBits(i, 25)^!getBits(i, 26));
}
break;
// Expression #39, Amount of variables: 29
case 39:
for (unsigned long i = element; i < (((unsigned long) 1) << 29); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)&&!getBits(i, 1)||getBits(i, 2)||getBits(i, 3)||getBits(i, 4)^!getBits(i, 5)||getBits(i, 6)&&getBits(i, 7)||getBits(i, 8)&&!getBits(i, 0)||getBits(i, 9)||getBits(i, 10)||getBits(i, 11)&&getBits(i, 12)||getBits(i, 13)&&getBits(i, 2)||getBits(i, 14)&&getBits(i, 15)||getBits(i, 16)^getBits(i, 5)||getBits(i, 17)&&getBits(i, 18)||getBits(i, 19)||getBits(i, 20)||getBits(i, 21)||!getBits(i, 18)||getBits(i, 8)^getBits(i, 22)||getBits(i, 23)&&getBits(i, 3)||getBits(i, 24)^getBits(i, 25)||getBits(i, 26)||!getBits(i, 27)||getBits(i, 28)^!getBits(i, 4));
}
break;
// Expression #40, Amount of variables: 31
case 40:
for (unsigned long i = element; i < (((unsigned long) 1) << 31); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)&&getBits(i, 1)||getBits(i, 2)||getBits(i, 3)||getBits(i, 4)&&getBits(i, 5)||getBits(i, 6)^getBits(i, 7)||getBits(i, 8)^getBits(i, 3)||getBits(i, 9)&&!getBits(i, 10)||getBits(i, 11)||getBits(i, 12)||getBits(i, 13)&&!getBits(i, 14)||getBits(i, 15)||getBits(i, 16)||getBits(i, 1)||getBits(i, 7)||getBits(i, 14)^!getBits(i, 7)||getBits(i, 17)^getBits(i, 18)||getBits(i, 19)^getBits(i, 20)||getBits(i, 21)^getBits(i, 22)||getBits(i, 9)||!getBits(i, 23)||getBits(i, 24)&&!getBits(i, 25)||getBits(i, 26)||!getBits(i, 27)||getBits(i, 28)||getBits(i, 12)||getBits(i, 29)^!getBits(i, 30));
}
break;
// Expression #41, Amount of variables: 34
case 41:
for (unsigned long i = element; i < (((unsigned long) 1) << 34); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)&&getBits(i, 1)||getBits(i, 2)&&getBits(i, 3)||getBits(i, 4)||!getBits(i, 5)||getBits(i, 6)&&getBits(i, 7)||getBits(i, 4)&&getBits(i, 8)||getBits(i, 9)||getBits(i, 10)||getBits(i, 11)^getBits(i, 12)||getBits(i, 13)^getBits(i, 14)||getBits(i, 15)^!getBits(i, 15)||getBits(i, 16)&&!getBits(i, 17)||getBits(i, 18)&&getBits(i, 19)||getBits(i, 20)^getBits(i, 21)||getBits(i, 22)^getBits(i, 23)||getBits(i, 14)||getBits(i, 24)||getBits(i, 19)^getBits(i, 25)||getBits(i, 26)^!getBits(i, 27)||getBits(i, 28)&&!getBits(i, 29)||getBits(i, 5)^getBits(i, 30)||getBits(i, 31)||!getBits(i, 32)||getBits(i, 31)||getBits(i, 33));
}
break;
// Expression #42, Amount of variables: 35
case 42:
for (unsigned long i = element; i < (((unsigned long) 1) << 35); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)&&getBits(i, 1)||getBits(i, 2)&&getBits(i, 3)||getBits(i, 4)^!getBits(i, 5)||getBits(i, 6)^!getBits(i, 7)||getBits(i, 8)^getBits(i, 9)||getBits(i, 10)&&getBits(i, 11)||getBits(i, 12)^!getBits(i, 13)||getBits(i, 14)&&getBits(i, 11)||getBits(i, 15)&&getBits(i, 13)||getBits(i, 16)||getBits(i, 17)||getBits(i, 18)&&getBits(i, 19)||getBits(i, 20)^getBits(i, 21)||getBits(i, 22)&&getBits(i, 23)||getBits(i, 24)^!getBits(i, 1)||getBits(i, 25)||getBits(i, 9)||getBits(i, 26)&&getBits(i, 27)||getBits(i, 28)||!getBits(i, 29)||getBits(i, 30)&&getBits(i, 31)||getBits(i, 13)&&getBits(i, 7)||getBits(i, 8)||!getBits(i, 32)||getBits(i, 33)&&getBits(i, 34));
}
break;
// Expression #43, Amount of variables: 36
case 43:
for (unsigned long i = element; i < (((unsigned long) 1) << 36); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)||getBits(i, 1)||getBits(i, 2)^!getBits(i, 3)||getBits(i, 4)^getBits(i, 5)||getBits(i, 6)||getBits(i, 7)||getBits(i, 8)^getBits(i, 9)||getBits(i, 10)&&getBits(i, 11)||getBits(i, 12)||getBits(i, 13)||getBits(i, 14)&&!getBits(i, 15)||getBits(i, 16)^getBits(i, 17)||getBits(i, 18)&&getBits(i, 0)||getBits(i, 19)&&getBits(i, 20)||getBits(i, 21)^getBits(i, 22)||getBits(i, 23)^getBits(i, 0)||getBits(i, 24)||!getBits(i, 25)||getBits(i, 22)&&getBits(i, 26)||getBits(i, 14)^getBits(i, 27)||getBits(i, 28)^getBits(i, 9)||getBits(i, 29)^getBits(i, 30)||getBits(i, 31)||!getBits(i, 32)||getBits(i, 33)&&!getBits(i, 1)||getBits(i, 5)||getBits(i, 17)||getBits(i, 34)&&getBits(i, 35));
}
break;
// Expression #44, Amount of variables: 40
case 44:
for (unsigned long i = element; i < (((unsigned long) 1) << 40); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)||!getBits(i, 1)||getBits(i, 2)||getBits(i, 3)||getBits(i, 4)^getBits(i, 5)||getBits(i, 6)||!getBits(i, 7)||getBits(i, 8)^getBits(i, 2)||getBits(i, 2)||getBits(i, 9)||getBits(i, 10)^getBits(i, 11)||getBits(i, 12)||getBits(i, 11)||getBits(i, 13)&&getBits(i, 14)||getBits(i, 15)&&getBits(i, 16)||getBits(i, 17)||!getBits(i, 18)||getBits(i, 19)&&getBits(i, 20)||getBits(i, 21)^getBits(i, 22)||getBits(i, 23)&&getBits(i, 20)||getBits(i, 24)||getBits(i, 25)||getBits(i, 26)^getBits(i, 27)||getBits(i, 28)||!getBits(i, 14)||getBits(i, 29)||getBits(i, 30)||getBits(i, 31)||getBits(i, 32)||getBits(i, 33)^!getBits(i, 34)||getBits(i, 35)^getBits(i, 36)||getBits(i, 37)&&!getBits(i, 38)||getBits(i, 4)||getBits(i, 39));
}
break;
// Expression #45, Amount of variables: 39
case 45:
for (unsigned long i = element; i < (((unsigned long) 1) << 39); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)^getBits(i, 1)||getBits(i, 2)^getBits(i, 3)||getBits(i, 4)^getBits(i, 5)||getBits(i, 6)&&getBits(i, 7)||getBits(i, 8)||getBits(i, 9)||getBits(i, 10)^!getBits(i, 11)||getBits(i, 12)^!getBits(i, 13)||getBits(i, 14)^!getBits(i, 15)||getBits(i, 16)^getBits(i, 17)||getBits(i, 18)||getBits(i, 19)||getBits(i, 20)&&getBits(i, 21)||getBits(i, 22)||getBits(i, 23)||getBits(i, 24)||getBits(i, 7)||getBits(i, 25)||!getBits(i, 26)||getBits(i, 27)||getBits(i, 28)||getBits(i, 12)||getBits(i, 29)||getBits(i, 19)^!getBits(i, 30)||getBits(i, 31)^!getBits(i, 1)||getBits(i, 32)&&!getBits(i, 15)||getBits(i, 33)||!getBits(i, 34)||getBits(i, 35)||getBits(i, 1)||getBits(i, 36)||getBits(i, 37)||getBits(i, 37)||getBits(i, 5)||getBits(i, 11)^getBits(i, 38));
}
break;
// Expression #46, Amount of variables: 42
case 46:
for (unsigned long i = element; i < (((unsigned long) 1) << 42); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)&&!getBits(i, 1)||getBits(i, 2)^!getBits(i, 3)||getBits(i, 4)||getBits(i, 5)||getBits(i, 6)^getBits(i, 7)||getBits(i, 8)^getBits(i, 9)||getBits(i, 10)&&getBits(i, 11)||getBits(i, 12)||getBits(i, 13)||getBits(i, 14)&&!getBits(i, 13)||getBits(i, 15)||!getBits(i, 16)||getBits(i, 17)&&!getBits(i, 18)||getBits(i, 19)||getBits(i, 20)||getBits(i, 21)||getBits(i, 22)||getBits(i, 23)&&getBits(i, 24)||getBits(i, 25)&&getBits(i, 26)||getBits(i, 27)||getBits(i, 28)||getBits(i, 6)&&getBits(i, 29)||getBits(i, 30)^getBits(i, 31)||getBits(i, 32)||getBits(i, 2)||getBits(i, 24)^!getBits(i, 33)||getBits(i, 34)||getBits(i, 6)||getBits(i, 35)^getBits(i, 36)||getBits(i, 37)||getBits(i, 38)||getBits(i, 39)&&getBits(i, 29)||getBits(i, 1)^!getBits(i, 20)||getBits(i, 40)&&getBits(i, 41));
}
break;
// Expression #47, Amount of variables: 43
case 47:
for (unsigned long i = element; i < (((unsigned long) 1) << 43); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)&&getBits(i, 1)||getBits(i, 2)^!getBits(i, 3)||getBits(i, 4)&&!getBits(i, 5)||getBits(i, 6)&&getBits(i, 7)||getBits(i, 8)^getBits(i, 9)||getBits(i, 10)^getBits(i, 11)||getBits(i, 12)||getBits(i, 13)||getBits(i, 14)^getBits(i, 15)||getBits(i, 16)||getBits(i, 17)||getBits(i, 18)^!getBits(i, 19)||getBits(i, 20)||!getBits(i, 1)||getBits(i, 21)^getBits(i, 22)||getBits(i, 23)^!getBits(i, 24)||getBits(i, 25)||getBits(i, 26)||getBits(i, 27)&&getBits(i, 28)||getBits(i, 28)||getBits(i, 29)||getBits(i, 2)^getBits(i, 5)||getBits(i, 30)^!getBits(i, 29)||getBits(i, 31)||!getBits(i, 32)||getBits(i, 33)&&getBits(i, 34)||getBits(i, 35)||getBits(i, 2)||getBits(i, 5)||getBits(i, 5)||getBits(i, 36)||getBits(i, 37)||getBits(i, 38)||!getBits(i, 39)||getBits(i, 40)||getBits(i, 41)||getBits(i, 42)&&getBits(i, 29));
}
break;
// Expression #48, Amount of variables: 43
case 48:
for (unsigned long i = element; i < (((unsigned long) 1) << 43); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)||getBits(i, 1)||getBits(i, 2)&&!getBits(i, 3)||getBits(i, 4)^getBits(i, 5)||getBits(i, 5)^!getBits(i, 6)||getBits(i, 7)^getBits(i, 8)||getBits(i, 9)^!getBits(i, 10)||getBits(i, 11)||getBits(i, 9)||getBits(i, 12)&&getBits(i, 13)||getBits(i, 14)&&!getBits(i, 15)||getBits(i, 16)&&getBits(i, 17)||getBits(i, 18)^getBits(i, 9)||getBits(i, 19)&&!getBits(i, 20)||getBits(i, 21)&&getBits(i, 22)||getBits(i, 23)&&getBits(i, 24)||getBits(i, 25)||!getBits(i, 26)||getBits(i, 27)&&getBits(i, 28)||getBits(i, 14)||!getBits(i, 15)||getBits(i, 9)||getBits(i, 29)||getBits(i, 30)^getBits(i, 24)||getBits(i, 31)&&getBits(i, 19)||getBits(i, 32)&&getBits(i, 33)||getBits(i, 3)&&!getBits(i, 24)||getBits(i, 34)||getBits(i, 35)||getBits(i, 36)&&getBits(i, 26)||getBits(i, 37)&&getBits(i, 38)||getBits(i, 39)||!getBits(i, 40)||getBits(i, 41)&&!getBits(i, 42));
}
break;
// Expression #49, Amount of variables: 41
case 49:
for (unsigned long i = element; i < (((unsigned long) 1) << 41); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)&&getBits(i, 1)||getBits(i, 2)&&!getBits(i, 3)||getBits(i, 4)^!getBits(i, 5)||getBits(i, 6)&&getBits(i, 7)||getBits(i, 8)&&getBits(i, 9)||getBits(i, 10)^!getBits(i, 11)||getBits(i, 12)&&getBits(i, 13)||getBits(i, 14)&&getBits(i, 15)||getBits(i, 16)^getBits(i, 17)||getBits(i, 18)||getBits(i, 0)||getBits(i, 19)^getBits(i, 20)||getBits(i, 13)||getBits(i, 20)||getBits(i, 9)^getBits(i, 21)||getBits(i, 22)^getBits(i, 17)||getBits(i, 23)^getBits(i, 24)||getBits(i, 25)&&getBits(i, 26)||getBits(i, 27)&&getBits(i, 28)||getBits(i, 24)||getBits(i, 3)||getBits(i, 4)^getBits(i, 29)||getBits(i, 30)^getBits(i, 31)||getBits(i, 8)||getBits(i, 32)||getBits(i, 33)^getBits(i, 0)||getBits(i, 15)&&!getBits(i, 20)||getBits(i, 34)^getBits(i, 35)||getBits(i, 15)^getBits(i, 36)||getBits(i, 37)||getBits(i, 38)||getBits(i, 2)^!getBits(i, 39)||getBits(i, 15)&&getBits(i, 40));
}
break;
// Expression #50, Amount of variables: 40
case 50:
for (unsigned long i = element; i < (((unsigned long) 1) << 40); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)^getBits(i, 1)||getBits(i, 0)||!getBits(i, 2)||getBits(i, 3)||getBits(i, 4)||getBits(i, 5)&&getBits(i, 6)||getBits(i, 7)&&getBits(i, 8)||getBits(i, 9)&&getBits(i, 10)||getBits(i, 11)&&!getBits(i, 12)||getBits(i, 13)||getBits(i, 2)||getBits(i, 14)||getBits(i, 1)||getBits(i, 0)||!getBits(i, 15)||getBits(i, 11)^getBits(i, 16)||getBits(i, 17)&&!getBits(i, 18)||getBits(i, 19)^getBits(i, 20)||getBits(i, 12)||getBits(i, 20)||getBits(i, 0)||getBits(i, 16)||getBits(i, 21)&&!getBits(i, 4)||getBits(i, 6)^getBits(i, 20)||getBits(i, 1)^getBits(i, 22)||getBits(i, 23)&&getBits(i, 4)||getBits(i, 4)^!getBits(i, 24)||getBits(i, 25)^getBits(i, 26)||getBits(i, 27)&&getBits(i, 28)||getBits(i, 29)&&getBits(i, 16)||getBits(i, 30)&&getBits(i, 31)||getBits(i, 32)||getBits(i, 33)||getBits(i, 22)||getBits(i, 34)||getBits(i, 35)||getBits(i, 36)||getBits(i, 37)^getBits(i, 17)||getBits(i, 38)&&getBits(i, 39));
}
break;
// Expression #51, Amount of variables: 51
case 51:
for (unsigned long i = element; i < (((unsigned long) 1) << 51); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)^!getBits(i, 1)||getBits(i, 2)||getBits(i, 3)||getBits(i, 4)&&getBits(i, 5)||getBits(i, 6)||getBits(i, 7)||getBits(i, 8)^getBits(i, 9)||getBits(i, 10)&&getBits(i, 11)||getBits(i, 12)&&!getBits(i, 13)||getBits(i, 14)^getBits(i, 15)||getBits(i, 16)^!getBits(i, 17)||getBits(i, 18)||getBits(i, 19)||getBits(i, 3)&&!getBits(i, 20)||getBits(i, 21)&&getBits(i, 22)||getBits(i, 23)&&getBits(i, 11)||getBits(i, 24)&&getBits(i, 25)||getBits(i, 26)||getBits(i, 27)||getBits(i, 28)||getBits(i, 29)||getBits(i, 12)||!getBits(i, 30)||getBits(i, 31)&&getBits(i, 5)||getBits(i, 32)||getBits(i, 33)||getBits(i, 34)&&getBits(i, 11)||getBits(i, 35)||getBits(i, 36)||getBits(i, 37)^getBits(i, 38)||getBits(i, 25)||getBits(i, 39)||getBits(i, 40)||getBits(i, 41)||getBits(i, 42)&&getBits(i, 43)||getBits(i, 44)^!getBits(i, 45)||getBits(i, 46)^getBits(i, 38)||getBits(i, 47)^getBits(i, 48)||getBits(i, 49)||getBits(i, 50)||getBits(i, 1)^!getBits(i, 50));
}
break;
// Expression #52, Amount of variables: 52
case 52:
for (unsigned long i = element; i < (((unsigned long) 1) << 52); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)^getBits(i, 1)||getBits(i, 2)^getBits(i, 3)||getBits(i, 4)&&!getBits(i, 5)||getBits(i, 6)&&!getBits(i, 7)||getBits(i, 8)&&getBits(i, 9)||getBits(i, 10)&&getBits(i, 11)||getBits(i, 12)^getBits(i, 13)||getBits(i, 14)^getBits(i, 3)||getBits(i, 15)||getBits(i, 16)||getBits(i, 17)^getBits(i, 18)||getBits(i, 19)^getBits(i, 20)||getBits(i, 21)||getBits(i, 22)||getBits(i, 22)||getBits(i, 23)||getBits(i, 24)||!getBits(i, 25)||getBits(i, 26)||!getBits(i, 27)||getBits(i, 28)^getBits(i, 16)||getBits(i, 29)^!getBits(i, 30)||getBits(i, 31)||getBits(i, 32)||getBits(i, 33)||!getBits(i, 34)||getBits(i, 35)^getBits(i, 36)||getBits(i, 36)||!getBits(i, 37)||getBits(i, 38)&&getBits(i, 39)||getBits(i, 40)||getBits(i, 30)||getBits(i, 33)||getBits(i, 18)||getBits(i, 41)||getBits(i, 42)||getBits(i, 7)&&getBits(i, 43)||getBits(i, 44)^getBits(i, 45)||getBits(i, 46)&&getBits(i, 47)||getBits(i, 42)||getBits(i, 48)||getBits(i, 18)||getBits(i, 49)||getBits(i, 50)||getBits(i, 51));
}
break;
// Expression #53, Amount of variables: 54
case 53:
for (unsigned long i = element; i < (((unsigned long) 1) << 54); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)||getBits(i, 1)||getBits(i, 2)&&getBits(i, 3)||getBits(i, 4)^!getBits(i, 5)||getBits(i, 3)&&getBits(i, 6)||getBits(i, 7)&&getBits(i, 8)||getBits(i, 9)||getBits(i, 10)||getBits(i, 11)^getBits(i, 12)||getBits(i, 13)||getBits(i, 10)||getBits(i, 14)^getBits(i, 15)||getBits(i, 16)^getBits(i, 17)||getBits(i, 18)^getBits(i, 19)||getBits(i, 4)^getBits(i, 20)||getBits(i, 21)&&getBits(i, 20)||getBits(i, 22)^getBits(i, 23)||getBits(i, 24)||getBits(i, 25)||getBits(i, 26)^getBits(i, 27)||getBits(i, 28)||getBits(i, 17)||getBits(i, 29)^!getBits(i, 30)||getBits(i, 31)&&!getBits(i, 32)||getBits(i, 31)^getBits(i, 33)||getBits(i, 34)^getBits(i, 35)||getBits(i, 36)&&!getBits(i, 37)||getBits(i, 38)&&!getBits(i, 39)||getBits(i, 40)&&getBits(i, 41)||getBits(i, 42)^getBits(i, 43)||getBits(i, 44)||!getBits(i, 45)||getBits(i, 8)||getBits(i, 46)||getBits(i, 47)^!getBits(i, 48)||getBits(i, 47)&&getBits(i, 49)||getBits(i, 43)&&getBits(i, 50)||getBits(i, 51)^getBits(i, 52)||getBits(i, 15)&&getBits(i, 53));
}
break;
// Expression #54, Amount of variables: 47
case 54:
for (unsigned long i = element; i < (((unsigned long) 1) << 47); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)^getBits(i, 1)||getBits(i, 2)||getBits(i, 3)||getBits(i, 4)||getBits(i, 5)||getBits(i, 6)||!getBits(i, 7)||getBits(i, 8)||getBits(i, 9)||getBits(i, 10)&&!getBits(i, 11)||getBits(i, 12)^getBits(i, 13)||getBits(i, 14)&&getBits(i, 15)||getBits(i, 16)||!getBits(i, 17)||getBits(i, 18)&&getBits(i, 19)||getBits(i, 20)&&getBits(i, 21)||getBits(i, 22)^getBits(i, 23)||getBits(i, 24)||!getBits(i, 15)||getBits(i, 19)&&getBits(i, 25)||getBits(i, 26)^getBits(i, 27)||getBits(i, 28)&&getBits(i, 24)||getBits(i, 29)&&getBits(i, 30)||getBits(i, 7)&&getBits(i, 17)||getBits(i, 31)||getBits(i, 30)||getBits(i, 32)^!getBits(i, 33)||getBits(i, 34)||getBits(i, 35)||getBits(i, 15)||!getBits(i, 36)||getBits(i, 15)||!getBits(i, 37)||getBits(i, 38)&&!getBits(i, 38)||getBits(i, 39)&&getBits(i, 7)||getBits(i, 5)^getBits(i, 40)||getBits(i, 24)^getBits(i, 41)||getBits(i, 42)^getBits(i, 24)||getBits(i, 32)&&getBits(i, 19)||getBits(i, 43)^getBits(i, 44)||getBits(i, 3)&&!getBits(i, 10)||getBits(i, 44)||!getBits(i, 45)||getBits(i, 23)^getBits(i, 46));
}
break;
// Expression #55, Amount of variables: 55
case 55:
for (unsigned long i = element; i < (((unsigned long) 1) << 55); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)||getBits(i, 1)||getBits(i, 2)||!getBits(i, 3)||getBits(i, 4)&&getBits(i, 5)||getBits(i, 6)^getBits(i, 7)||getBits(i, 3)^!getBits(i, 8)||getBits(i, 9)||getBits(i, 10)||getBits(i, 11)||getBits(i, 12)||getBits(i, 13)||getBits(i, 14)||getBits(i, 15)&&getBits(i, 16)||getBits(i, 17)^getBits(i, 18)||getBits(i, 19)^getBits(i, 20)||getBits(i, 21)&&getBits(i, 22)||getBits(i, 22)&&getBits(i, 23)||getBits(i, 24)^getBits(i, 25)||getBits(i, 26)^getBits(i, 27)||getBits(i, 5)||getBits(i, 28)||getBits(i, 7)||getBits(i, 29)||getBits(i, 30)^getBits(i, 31)||getBits(i, 32)&&getBits(i, 26)||getBits(i, 33)^getBits(i, 34)||getBits(i, 35)||getBits(i, 36)||getBits(i, 37)^getBits(i, 38)||getBits(i, 39)^getBits(i, 40)||getBits(i, 41)&&getBits(i, 42)||getBits(i, 43)||!getBits(i, 44)||getBits(i, 45)||getBits(i, 46)||getBits(i, 47)^getBits(i, 21)||getBits(i, 2)^getBits(i, 48)||getBits(i, 49)||getBits(i, 50)||getBits(i, 51)^getBits(i, 23)||getBits(i, 48)&&!getBits(i, 44)||getBits(i, 52)&&getBits(i, 53)||getBits(i, 54)||getBits(i, 8)||getBits(i, 21)||getBits(i, 25));
}
break;
// Expression #56, Amount of variables: 60
case 56:
for (unsigned long i = element; i < (((unsigned long) 1) << 60); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)&&!getBits(i, 0)||getBits(i, 1)||getBits(i, 2)||getBits(i, 3)||getBits(i, 4)||getBits(i, 5)||!getBits(i, 6)||getBits(i, 7)||getBits(i, 8)||getBits(i, 9)^getBits(i, 10)||getBits(i, 11)&&getBits(i, 12)||getBits(i, 7)^!getBits(i, 13)||getBits(i, 14)||getBits(i, 15)||getBits(i, 16)^getBits(i, 17)||getBits(i, 18)&&getBits(i, 19)||getBits(i, 20)^getBits(i, 21)||getBits(i, 22)||!getBits(i, 23)||getBits(i, 24)||getBits(i, 22)||getBits(i, 25)^!getBits(i, 26)||getBits(i, 27)&&getBits(i, 28)||getBits(i, 29)^!getBits(i, 30)||getBits(i, 31)||getBits(i, 32)||getBits(i, 33)||getBits(i, 34)||getBits(i, 35)&&getBits(i, 36)||getBits(i, 37)&&getBits(i, 38)||getBits(i, 39)^getBits(i, 21)||getBits(i, 40)^getBits(i, 41)||getBits(i, 42)^getBits(i, 43)||getBits(i, 19)||!getBits(i, 44)||getBits(i, 45)&&getBits(i, 46)||getBits(i, 47)&&getBits(i, 48)||getBits(i, 49)||getBits(i, 43)||getBits(i, 0)^getBits(i, 50)||getBits(i, 51)^getBits(i, 52)||getBits(i, 53)&&getBits(i, 54)||getBits(i, 50)^getBits(i, 12)||getBits(i, 55)&&getBits(i, 56)||getBits(i, 57)&&getBits(i, 58)||getBits(i, 44)&&getBits(i, 59));
}
break;
// Expression #57, Amount of variables: 57
case 57:
for (unsigned long i = element; i < (((unsigned long) 1) << 57); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)&&getBits(i, 1)||getBits(i, 2)&&getBits(i, 3)||getBits(i, 4)&&getBits(i, 5)||getBits(i, 6)||getBits(i, 7)||getBits(i, 8)||getBits(i, 9)||getBits(i, 7)||getBits(i, 10)||getBits(i, 11)^getBits(i, 12)||getBits(i, 9)&&getBits(i, 13)||getBits(i, 14)^getBits(i, 15)||getBits(i, 16)^!getBits(i, 17)||getBits(i, 18)&&getBits(i, 19)||getBits(i, 20)||!getBits(i, 21)||getBits(i, 22)^getBits(i, 23)||getBits(i, 24)^getBits(i, 25)||getBits(i, 3)||getBits(i, 16)||getBits(i, 26)^getBits(i, 27)||getBits(i, 28)||getBits(i, 16)||getBits(i, 15)^!getBits(i, 12)||getBits(i, 29)||getBits(i, 12)||getBits(i, 30)&&getBits(i, 31)||getBits(i, 32)&&!getBits(i, 33)||getBits(i, 34)^!getBits(i, 31)||getBits(i, 35)&&getBits(i, 12)||getBits(i, 25)&&!getBits(i, 36)||getBits(i, 37)||getBits(i, 12)||getBits(i, 38)||getBits(i, 39)||getBits(i, 40)&&getBits(i, 15)||getBits(i, 41)||!getBits(i, 42)||getBits(i, 43)||getBits(i, 44)||getBits(i, 12)||getBits(i, 45)||getBits(i, 42)&&getBits(i, 46)||getBits(i, 47)&&getBits(i, 48)||getBits(i, 49)||getBits(i, 50)||getBits(i, 51)||!getBits(i, 52)||getBits(i, 53)||getBits(i, 54)||getBits(i, 55)||getBits(i, 56));
}
break;
// Expression #58, Amount of variables: 58
case 58:
for (unsigned long i = element; i < (((unsigned long) 1) << 58); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)&&getBits(i, 1)||getBits(i, 2)&&getBits(i, 3)||getBits(i, 4)||getBits(i, 5)||getBits(i, 6)||getBits(i, 7)||getBits(i, 8)||getBits(i, 9)||getBits(i, 10)^getBits(i, 11)||getBits(i, 12)||getBits(i, 13)||getBits(i, 14)||!getBits(i, 15)||getBits(i, 16)^getBits(i, 17)||getBits(i, 18)^getBits(i, 19)||getBits(i, 20)||getBits(i, 21)||getBits(i, 22)||getBits(i, 23)||getBits(i, 24)||!getBits(i, 25)||getBits(i, 26)&&getBits(i, 27)||getBits(i, 28)&&getBits(i, 29)||getBits(i, 30)||!getBits(i, 31)||getBits(i, 32)^getBits(i, 33)||getBits(i, 34)||getBits(i, 35)||getBits(i, 36)&&getBits(i, 36)||getBits(i, 11)||getBits(i, 7)||getBits(i, 31)||getBits(i, 37)||getBits(i, 38)||!getBits(i, 35)||getBits(i, 39)&&!getBits(i, 40)||getBits(i, 41)||getBits(i, 37)||getBits(i, 6)||getBits(i, 42)||getBits(i, 31)&&!getBits(i, 36)||getBits(i, 43)||getBits(i, 37)||getBits(i, 44)||getBits(i, 45)||getBits(i, 46)&&getBits(i, 47)||getBits(i, 30)&&!getBits(i, 39)||getBits(i, 48)&&!getBits(i, 36)||getBits(i, 49)||getBits(i, 30)||getBits(i, 35)&&getBits(i, 50)||getBits(i, 51)||getBits(i, 52)||getBits(i, 53)&&getBits(i, 54)||getBits(i, 48)^getBits(i, 55)||getBits(i, 56)||getBits(i, 57));
}
break;
// Expression #59, Amount of variables: 61
case 59:
for (unsigned long i = element; i < (((unsigned long) 1) << 61); i += maxCores*blockDim.x) {
result = result && (getBits(i, 0)&&getBits(i, 1)||getBits(i, 2)||getBits(i, 3)||getBits(i, 4)||getBits(i, 5)||getBits(i, 6)&&getBits(i, 7)||getBits(i, 8)^getBits(i, 9)||getBits(i, 7)&&getBits(i, 10)||getBits(i, 11)||getBits(i, 12)||getBits(i, 13)||getBits(i, 14)||getBits(i, 15)&&getBits(i, 16)||getBits(i, 17)&&getBits(i, 18)||getBits(i, 19)^getBits(i, 20)||getBits(i, 21)&&getBits(i, 22)||getBits(i, 23)^getBits(i, 24)||getBits(i, 25)^getBits(i, 26)||getBits(i, 27)&&!getBits(i, 28)||getBits(i, 29)^getBits(i, 30)||getBits(i, 31)||getBits(i, 32)||getBits(i, 33)^getBits(i, 34)||getBits(i, 35)||getBits(i, 36)||getBits(i, 14)||!getBits(i, 37)||getBits(i, 38)||!getBits(i, 39)||getBits(i, 40)^getBits(i, 41)||getBits(i, 38)&&getBits(i, 28)||getBits(i, 42)^getBits(i, 43)||getBits(i, 12)||!getBits(i, 12)||getBits(i, 44)&&getBits(i, 45)||getBits(i, 20)^getBits(i, 46)||getBits(i, 45)&&!getBits(i, 47)||getBits(i, 48)&&getBits(i, 49)||getBits(i, 26)&&getBits(i, 50)||getBits(i, 51)&&getBits(i, 52)||getBits(i, 53)||getBits(i, 54)||getBits(i, 26)||getBits(i, 55)||getBits(i, 56)||getBits(i, 43)||getBits(i, 57)&&getBits(i, 58)||getBits(i, 30)&&getBits(i, 26)||getBits(i, 49)||!getBits(i, 10)||getBits(i, 59)&&getBits(i, 60));
}
break;
}
}
|
14,774 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <ctime>
#define Nglobal 4096
__global__ void add(int *rowP, int *rowC, int i)
{
if(blockIdx.x == 0 || blockIdx.x == i-1){
rowC[blockIdx.x] = 1;
}
else{
rowC[blockIdx.x] = rowP[blockIdx.x-1] + rowP[blockIdx.x];
}
}
cudaError_t GenerateTriangle(int n){
int previousRow[Nglobal+1];
for(int i = 0; i <= n; i++){
previousRow[i] = 0;
}
previousRow[0] = 1;
previousRow[1] = 1;
int currentRow[Nglobal+1];
for(int i = 0; i <= n; i++){
currentRow[i] = 0;
}
int *dPreviousRow, *dCurrentRow;
cudaMalloc((void **) &dPreviousRow, sizeof(previousRow));
cudaMalloc((void **) &dCurrentRow, sizeof(currentRow));
cudaMemcpy(dPreviousRow, previousRow, sizeof (previousRow), cudaMemcpyHostToDevice);
cudaMemcpy(dCurrentRow, currentRow, sizeof (currentRow), cudaMemcpyHostToDevice);
for(int i = 3; i <= n+1; i++){
//actual calculations are done in this loop
add<<<i,1>>>(dPreviousRow, dCurrentRow, i);
cudaMemcpy(dPreviousRow, dCurrentRow, (n+1)* sizeof (int), cudaMemcpyDeviceToDevice);
}
//at this point currentRow needs to be copied to finRow
cudaMemcpy(currentRow, dCurrentRow, (n+1)* sizeof (int), cudaMemcpyDeviceToHost);
for(int i = 0; i < n+1; i++){
printf("%d ",currentRow[i]);
}
printf("\n");
return cudaSuccess;
}
int main( int argc, const char* argv[] )
{
clock_t Start = clock();
int n = Nglobal;
printf( "N is %d\n", n);
if(n==1){
printf("1\n");
return 0;
}
if(n==2){
printf("1 1\n");
return 0;
}
cudaError_t cudaStatus;
cudaStatus = GenerateTriangle(n);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "generation failed!");
return 1;
}
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
int stopTime = clock();
printf("Elapsed Time: %i Milliseconds\n",clock() - Start);
return 0;
} |
14,775 | // Getter of the D set
// The D set is defined as the nodes in C that have more than 3/5*|C| neighbors in C
__global__ void stratify_none_getD(float *C, int *indptr, int *indices, int n, float c, float *D)
{
const int i = threadIdx.x;
D[i] = 0;
if(C[i] == 0) return;
int d = 0;
for(int j = indptr[i]; j < indptr[i+1]; j++){
if(C[indices[j]]){
d += 1;
}
}
if(d >= 3 / 5 * c){
D[i] = 1;
}
} |
14,776 | #include <iostream>
#include <math.h>
#include <cstdlib>
#include <ctime>
const int blockSizex = 32;
const int blockSizey = 8;
const int TILE_DIM = blockSizex;
#define imin(a, b) (a<b?a:b)
// Naive matrix transpose
__global__
void gpu_matrix_trans_naive(double *mat_in, double *mat_out) {
int idx = blockIdx.x * TILE_DIM + threadIdx.x;
int idy = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += blockSizey) {
mat_out[idy + j + idx*width] = mat_in[(idy+j)*width + idx];
}
}
// Transpose via shared memory
__global__
void gpu_matrix_trans_sharedmem(double *mat_in, double *mat_out) {
// shared memory (48KB/N per block), N is the number of blocks on the same multiprocessor
__shared__ double tile[TILE_DIM*TILE_DIM];
int idx = blockIdx.x * TILE_DIM + threadIdx.x;
int idy = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += blockSizey) {
tile[threadIdx.x + (threadIdx.y+j)*TILE_DIM] = mat_in[(idy+j)*width + idx];
}
__syncthreads();
for (int j = 0; j < TILE_DIM; j += blockSizey) {
mat_out[(idy+j)*width + idx] = tile[threadIdx.x*TILE_DIM + threadIdx.y + j];
}
}
// Coalesced Transpose via shared memory
__global__
void gpu_matrix_trans_coales_sharedmem(double *mat_in, double *mat_out) {
// shared memory (48KB/N per block), N is the number of blocks on the same multiprocessor
__shared__ double tile[TILE_DIM*TILE_DIM];
int idx = blockIdx.x * TILE_DIM + threadIdx.x;
int idy = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += blockSizey) {
tile[threadIdx.x*TILE_DIM + threadIdx.y + j] = mat_in[(idy+j)*width + idx];
}
__syncthreads();
idx = blockIdx.y * TILE_DIM + threadIdx.x;
idy = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += blockSizey) {
mat_out[(idy+j)*width + idx] = tile[threadIdx.x + (threadIdx.y+j)*TILE_DIM];
}
}
// Coalesced Transpose via shared memory without bank conflict
__global__
void gpu_matrix_trans_coales_sharedmem_NoBankConfl(double *mat_in, double *mat_out) {
// shared memory (48KB/N per block), N is the number of blocks on the same multiprocessor
__shared__ double tile[TILE_DIM][TILE_DIM+1];
int idx = blockIdx.x * TILE_DIM + threadIdx.x;
int idy = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += blockSizey) {
tile[threadIdx.y+j][threadIdx.x] = mat_in[(idy+j)*width + idx];
}
__syncthreads();
idx = blockIdx.y * TILE_DIM + threadIdx.x;
idy = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += blockSizey) {
mat_out[(idy+j)*width + idx] = tile[threadIdx.x][threadIdx.y+j];
}
}
int main(void) {
int cols = 1<<10, rows = 1<<10;
int grid_cols = imin(512, (cols + TILE_DIM - 1)/TILE_DIM);
int grid_rows = imin(512, (rows + TILE_DIM - 1)/TILE_DIM);
dim3 dimBlock(blockSizex, blockSizey, 1);
dim3 dimGrid(grid_cols, grid_rows, 1);
// Allocate memory in host RAM
double *h_mat_in = new double[cols*rows];
double *h_mat_out = new double[cols*rows];
// Initialize h_mat_in
std::srand(1103);
for (int i = 0; i < rows; i++)
for (int j = 0; j < cols; j++)
// h_mat_in[i*cols+j] = double (((generator() % (1000 - 0 + 1)) + 0)/1000);
h_mat_in[i*cols+j] = double(std::rand())/double(RAND_MAX);
// capture the GPU start time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Allocate memory on device
double *d_mat_in, *d_mat_out;
cudaMalloc(&d_mat_in, cols*rows*sizeof(double));
cudaMalloc(&d_mat_out, cols*rows*sizeof(double));
// Copy matrix in from host to device
cudaMemcpy(d_mat_in, h_mat_in, cols*rows*sizeof(double), cudaMemcpyHostToDevice);
// Run kernel
gpu_matrix_trans_naive<<<dimGrid, dimBlock>>>(d_mat_in, d_mat_out);
gpu_matrix_trans_sharedmem<<<dimGrid, dimBlock>>>(d_mat_in, d_mat_out);
gpu_matrix_trans_coales_sharedmem<<<dimGrid, dimBlock>>>(d_mat_in, d_mat_out);
gpu_matrix_trans_coales_sharedmem_NoBankConfl<<<dimGrid, dimBlock>>>(d_mat_in, d_mat_out);
// Copy result from device to host
cudaMemcpy(h_mat_out, d_mat_out, cols*rows*sizeof(double), cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
// get GPU stop time
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
// Check results
int check_flag = 1;
for (int i = 0; i < rows; i++)
for (int j = 0; j < cols; j++)
if (h_mat_out[j*rows + i] != h_mat_in[i*cols + j])
check_flag = 0;
if (!check_flag)
std::cout << "GPU matrix transpose not success!!!" << std::endl;
else {
std::cout << "GPU matrix transpose success!!!" << std::endl;
std::cout << "GPU matrix multiplication time: " << elapsedTime << " ms." << std::endl;
}
// Free memory
cudaFree(d_mat_in);
cudaFree(d_mat_out);
delete [] h_mat_in;
delete [] h_mat_out;
return 0;
}
|
14,777 | #include <stdio.h>
#include <cuda.h>
__global__ void dkernel(unsigned *vector, unsigned vectorsize) {
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < vectorsize) vector[id] = id;
}
#define BLOCKSIZE 1024
int main(int nn, char *str[]) {
unsigned N = atoi(str[1]);
unsigned *vector, *hvector;
cudaMalloc(&vector, N * sizeof(unsigned));
hvector = (unsigned *)malloc(N * sizeof(unsigned));
unsigned nblocks = ceil((float)N / BLOCKSIZE);
printf("nblocks = %d\n", nblocks);
dkernel<<<nblocks, BLOCKSIZE>>>(vector, N);
cudaMemcpy(hvector, vector, N * sizeof(unsigned), cudaMemcpyDeviceToHost);
for (unsigned ii = 0; ii < N; ++ii) {
printf("%4d ", hvector[ii]);
if (ii % 1000 == 0) printf("\n");
}
return 0;
}
|
14,778 | #include <cstdio>
#define N 32
__global__ void iwarp(int* out)
{
__shared__ volatile int smem[32];
volatile int* vout = out;
int idx = threadIdx.x;
smem[idx] = vout[idx];
if(idx % 2 == 0)
smem[idx] = 1;
else
smem[idx-1] = 0;
vout[idx] = smem[idx];
}
int main()
{
int* din;
cudaMalloc((void**)&din, N*sizeof(int));
int in[N];
for(int i = 0; i < N; i++)
in[i] = 0;
cudaMemcpy(din, &in, N*sizeof(int), cudaMemcpyHostToDevice);
iwarp<<<1,N>>>(din);
int output[N];
cudaMemcpy(&output, din, N*sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < N; i++)
printf("%d ", output[i]);
printf("\n");
} |
14,779 | // A serial (CPU-based) matrix multiplication program
#include "stdio.h"
#define SIZE 512
// Matrix multiply function
void multiply(float * A, float * B, float * C)
{
int i;
for (i=0;i<SIZE*SIZE;i++)
C[i]=0.00; // initialize C to be on the safe side
int row,col,row_len=SIZE,col_len=SIZE;
for (row=0; row<row_len; row++) {
for (col=0;col < col_len;col++) {
for (i=0;i<SIZE;i++)
C[row*row_len+col] += A[row*row_len+i]*B[col+i*row_len];
}
}
}
int main(int argc, char ** argv)
{
// create matrices and initialize A and B to arbitray values
float A[SIZE*SIZE],B[SIZE*SIZE],C[SIZE*SIZE];
int i;
for (i=0;i<SIZE*SIZE;i++) {
A[i] = (float)i;
B[i] = (float)SIZE*(float)SIZE-(float)i-1.0;
}
multiply(A,B,C); // perform matrix multiply
return 0;
}
|
14,780 | #include "stdio.h"
#include<cuda.h>
#define SOA 512
__global__ void vector_add(int *a,int *b,int *c){
int id= threadIdx.x;
c[id]=a[id]+b[id];
}
int main(void){
int i;
int *a,*b,*c;
int *da,*db,*dc;
int size=sizeof(int) * SOA;
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
for(i=0;i<SOA;i++){
a[i]= i;
b[i]= i+1;
}
cudaMalloc((void**)&da,size);
cudaMalloc((void**)&db,size);
cudaMalloc((void**)&dc,size);
cudaMemcpy(da,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(db,b,size,cudaMemcpyHostToDevice);
vector_add<<<1,SOA>>>(da,db,dc);
cudaMemcpy(c,dc,size,cudaMemcpyDeviceToHost);
printf("Addition : ");
for(i=0; i<SOA; i++)
{
printf("%d\n",c[i]);
}
cudaFree(da);
cudaFree(db);
cudaFree(dc);
return 0;
}
|
14,781 | #include <stdio.h>
#include <cuda.h>
#define N 100000
__device__ unsigned wlsize;
__device__ int worklist[N];
__global__ void k1() {
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
worklist[atomicInc(&wlsize, N)] = id;
}
__global__ void k2() {
printf("Number of elements added = %d\n", wlsize);
}
int main() {
cudaMemset(&wlsize, 0, sizeof(int)); // initialization.
k1<<<4, 64>>>();
cudaDeviceSynchronize();
k2<<<1, 1>>>();
cudaDeviceSynchronize();
return 0;
}
|
14,782 | #include <stdio.h>
#define NB_COLS 4000 // Nombre de colonnes de la matrice.
#define NB_ROWS 4000 // Nombre de lignes de la matrice.
void matrixInit (int *mat); // Initialisation d'une matrice.
void checkRes (int *mat); // Vérification des résultats.
// Noyau CUDA
__global__ void MatrixAdd (int *a, int *b, int *c)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < NB_ROWS*NB_COLS)
{
c[tid] = a[tid] + b[tid];
}
}
// Code du Host
int main (void)
{
int *a, *b, *c; // Matrices A, B et C du host
int *dev_a, *dev_b, *dev_c; // Matrices A, B et C du device
int nbElements = NB_COLS * NB_ROWS;
int matrixSize = nbElements * sizeof(int);
int threadsPerBlock = 256;
int blocksPerGrid = (nbElements + threadsPerBlock - 1) / threadsPerBlock;
// Allocation des matrices du host.
a = (int *)malloc(matrixSize);
if (a == NULL) { printf ("Allocation failure\n"); abort();}
b = (int *)malloc(matrixSize);
if (b == NULL) { printf ("Allocation failure\n"); abort();}
c = (int *)malloc(matrixSize);
if (c == NULL) { printf ("Allocation failure\n"); abort();}
// Allocation des matrices du device.
cudaMalloc ( (void **) &dev_a, matrixSize);
cudaMalloc ( (void **) &dev_b, matrixSize);
cudaMalloc ( (void **) &dev_c, matrixSize);
auto cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel memory failed: %s\n", cudaGetErrorString(cudaStatus));
return -1;
}
// Initialisation des matrices A et B.
matrixInit(a);
matrixInit(b);
// Copie des matrices A et B sur le GPU.
cudaMemcpy ( dev_a, a, matrixSize, cudaMemcpyHostToDevice) ;
cudaMemcpy ( dev_b, b, matrixSize, cudaMemcpyHostToDevice) ;
// Lancement du noyau.
MatrixAdd<<<blocksPerGrid, threadsPerBlock>>>( dev_a, dev_b, dev_c );
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return -1;
}
// Copie de la matrice C du GPU vers le host.
cudaMemcpy ( c, dev_c, matrixSize, cudaMemcpyDeviceToHost) ;
checkRes (c);
// Libération des matrices host et device.
free (a);
free (b);
free (c);
cudaFree ( dev_a ) ;
cudaFree ( dev_b ) ;
cudaFree ( dev_c ) ;
getchar();
return 0 ;
}
/////////////////////////////////////////////////////////////////////////////////////////////
//
// Fonctions outils. Rien à modifier.
//
/////////////////////////////////////////////////////////////////////////////////////////////
void matrixInit(int *mat)
{
int l, c;
for (l = 0; l < NB_ROWS; l++)
for (c = 0; c < NB_COLS; c++)
mat[l * NB_COLS + c] = l + c;
}
void checkRes(int *mat)
{
int l, c;
for (l = 0; l < NB_ROWS; l++)
for (c = 0; c < NB_COLS; c++)
if (mat[l * NB_COLS + c] != 2 * (c + l)) {
printf ("Erreur de calcul sur l'element %d:%d :\n", l, c);
printf (" - Valeur calculee : %d\n", mat[l * NB_COLS + c]);
printf (" - Valeur attendue : %d\n", 2 * (c + l));
exit(0);
}
printf ("LEVEL 1: Done\n");
printf ("Good job!\n");
}
|
14,783 | __global__ void conv(const float *A,const float *K,const float *bias,int aw,int ah, int kw,int kh,float *C){
// A : input data, K : kernal
// aw : A's width, ah : A's height
// kw : K's width, kh : K's height
// block=(BLOCK_SIZE,BLOCK_SIZE,1)
// grid =(cw/BLOCK_SIZE, ch/BLOCK_SIZE, Ker_SIZE)
int tx = threadIdx.x+blockIdx.x*blockDim.x;
int ty = threadIdx.y+blockIdx.y*blockDim.y;
int cw = blockDim.x*gridDim.x;
int i = blockIdx.z % kw;
int j = blockIdx.z / kw;
/* index */
int a_idx = tx + ty*aw + (i+j*aw);
int k_idx = i + (j*kw);
int c_idx = tx + (ty*cw);
/* Convolution */
C[c_idx] += A[a_idx]*K[k_idx]+bias[0]/(kw*kh);
}
|
14,784 | /*
* Copyright 2015 Netherlands eScience Center, VU University Amsterdam, and Netherlands Forensic Institute
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file contains CUDA kernels for comparing two PRNU noise patterns
* using Peak To Correlation Energy.
*
* @author Ben van Werkhoven <b.vanwerkhoven@esciencecenter.nl>
* @version 0.1
*/
#ifndef block_size_x
#define block_size_x 32
#endif
#ifndef block_size_y
#define block_size_y 16
#endif
//function interfaces to prevent C++ garbling the kernel keys
extern "C" {
__global__ void toComplex(int h, int w, float* x, float* input_x);
__global__ void toComplexAndFlip(int h, int w, float* y, float *input_y);
__global__ void toComplexAndFlip2(int h, int w, float* x, float* y, float* input_x, float *input_y);
__global__ void computeEnergy(int h, int w, double *energy, int *peakIndex, float *input);
__global__ void computeCrossCorr(int h, int w, float *c, float *x, float *y);
__global__ void findPeak(int h, int w, float *peakValue, float *peakValues, int *peakIndex, float *input);
__global__ void sumDoubles(double *output, double *input, int n);
__global__ void maxlocFloats(int *output_loc, float *output_float, int *input_loc, float *input_float, int n);
__global__ void computePCE(double *pce, float *peak, double *energy);
}
/**
* Simple helper kernel to convert an array of real values to an array of complex values
*/
__global__ void toComplex(int h, int w, float* x, float *input_x) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (i < h && j < w) {
x[i * w * 2 + 2 * j] = input_x[i * w + j];
x[i * w * 2 + 2 * j + 1] = 0.0f;
}
}
/**
* Simple helper kernel to convert an array of real values to a flipped array of complex values
*/
__global__ void toComplexAndFlip(int h, int w, float *y, float* input_y) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (i < h && j < w) {
//y is flipped vertically and horizontally
int yi = h - i -1;
int yj = w - j -1;
y[yi* w * 2 + 2 * yj] = input_y[i * w + j];
y[yi* w * 2 + 2 * yj + 1] = 0.0f;
}
}
/**
* Two-in-one kernel that puts x and y to Complex, but flips y
*/
__global__ void toComplexAndFlip2(int h, int w, float *x, float *y, float *input_x, float *input_y) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (i < h && j < w) {
x[i * w * 2 + 2 * j] = input_x[i * w + j];
x[i * w * 2 + 2 * j + 1] = 0.0f;
//y is flipped vertically and horizontally
int yi = h - i -1;
int yj = w - j -1;
y[yi* w * 2 + 2 * yj] = input_y[i * w + j];
y[yi* w * 2 + 2 * yj + 1] = 0.0f;
}
}
/*
* This method computes a cross correlation in frequency space
*/
__global__ void computeCrossCorr(int h, int w, float *c, float *x, float *y) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (i < h && j < w) {
int oindex = i * w + j;
int iindex;
bool xConj = false;
bool yConj = false;
if (j < w / 2 + 1) {
iindex = i * (w / 2 + 1) + j;
xConj = true;
} else {
iindex = i * (w / 2 + 1) + (w - j);
yConj = true;
}
// Load inputs
float xRe = x[2 * iindex + 0];
float xIm = x[2 * iindex + 1];
float yRe = y[2 * iindex + 0];
float yIm = y[2 * iindex + 1];
// Take complex conjugate if in mirrored part
if (xConj) xIm = -xIm;
if (yConj) yIm = -yIm;
// Store results
c[2 * oindex + 0] = (xRe * yRe) - (xIm * yIm);
c[2 * oindex + 1] = (xRe * yIm) + (xIm * yRe);
}
}
/* ----------- kernels below this line are reducing kernels ------------ */
#ifndef grid_size_x //hack to check if kernel tuner is being used
#undef block_size_x
#define block_size_x 256
#endif
/*
* This method searches for the peak value in a cross correlated signal and outputs the index
* input is assumed to be a complex array of which only the real component contains values that
* contribute to the peak
*
* Thread block size should be power of two because of the reduction.
* The implementation currently assumes only one thread block is used for the entire input array
*
* In case of multiple thread blocks initialize output to zero and use atomic add or another kernel
*/
__global__ void findPeak(int h, int w, float *peakValue, float *peakValues, int *peakIndex, float *input) {
int x = blockIdx.x * block_size_x + threadIdx.x;
int ti = threadIdx.x;
int step_size = gridDim.x * block_size_x;
int n = h*w;
__shared__ float shmax[block_size_x];
__shared__ int shind[block_size_x];
//compute thread-local sums
float max = -1.0f;
float val = 0.0f;
int index = -1;
for (int i=x; i < n; i+=step_size) {
val = fabsf(input[i*2]); //input is a complex array, only using real value
if (val > max) {
max = val;
index = i;
}
}
//store local sums in shared memory
shmax[ti] = max;
shind[ti] = index;
__syncthreads();
//reduce local sums
for (unsigned int s=block_size_x/2; s>0; s>>=1) {
if (ti < s) {
float v1 = shmax[ti];
float v2 = shmax[ti + s];
if (v1 < v2) {
shmax[ti] = v2;
shind[ti] = shind[ti + s];
}
}
__syncthreads();
}
//write result
if (ti == 0) {
peakValues[blockIdx.x] = shmax[0];
peakIndex[blockIdx.x] = shind[0];
if (blockIdx.x == 0) {
peakValue[0] = input[n*2-2]; //instead of using real peak use last real value
}
}
}
/*
* This method computes the energy of the signal minus an area around the peak
*
* input is assumed to be a complex array of which only the real component
* contains values that contribute to the energy
*
* Thread block size should be power of two because of the reduction.
* The implementation currently assumes only one thread block is used for the entire input array
*
* In case of multiple thread blocks run kernel twice, with 1 thread block the second time
*/
#define SQUARE_SIZE 11
#define RADIUS 5
__global__ void computeEnergy(int h, int w, double *energy, int *peakIndex, float *input) {
int x = blockIdx.x * block_size_x + threadIdx.x;
int ti = threadIdx.x;
int step_size = gridDim.x * block_size_x;
int n = h*w;
__shared__ double shmem[block_size_x];
int peak_i = peakIndex[0];
int peak_y = peak_i / w;
int peak_x = peak_i - (peak_y * w);
double sum = 0.0f;
if (ti < n) {
//compute thread-local sums
for (int i=x; i < n; i+=step_size) {
int row = i / w;
int col = i - (row*w);
//exclude area around the peak from sum
int peakrow = (row > peak_y - RADIUS && row < peak_y + RADIUS);
int peakcol = (col > peak_x - RADIUS && col < peak_x + RADIUS);
if (peakrow && peakcol) {
continue;
} else {
double val = input[row*w*2+col*2];
sum += val * val;
}
}
}
//store local sums in shared memory
shmem[ti] = sum;
__syncthreads();
//reduce local sums
for (unsigned int s=block_size_x/2; s>0; s>>=1) {
if (ti < s) {
shmem[ti] += shmem[ti + s];
}
__syncthreads();
}
//write result
if (ti == 0) {
energy[blockIdx.x] = shmem[0] / (double)((w*h) - (SQUARE_SIZE * SQUARE_SIZE));
}
}
/*
* Simple CUDA Helper function to reduce the output of a
* reduction kernel with multiple thread blocks to a single value
*
* This function performs a sum of an array of doubles
*
* This function is to be called with only a single thread block
*/
__global__ void sumDoubles(double *output, double *input, int n) {
int ti = threadIdx.x;
__shared__ double shmem[block_size_x];
//compute thread-local sums
double sum = 0.0;
for (int i=ti; i < n; i+=block_size_x) {
sum += input[i];
}
//store local sums in shared memory
shmem[ti] = sum;
__syncthreads();
//reduce local sums
for (unsigned int s=block_size_x/2; s>0; s>>=1) {
if (ti < s) {
shmem[ti] += shmem[ti + s];
}
__syncthreads();
}
//write result
if (ti == 0) {
output[0] = shmem[0];
}
}
/*
* Simple CUDA helper functions to reduce the output of a reducing kernel with multiple
* thread blocks to a single value
*
* This function performs a reduction for the max and the location of the max
*
* This function is to be called with only one thread block
*/
__global__ void maxlocFloats(int *output_loc, float *output_float, int *input_loc, float *input_float, int n) {
int ti = threadIdx.x;
__shared__ float shmax[block_size_x];
__shared__ int shind[block_size_x];
//compute thread-local variables
float max = -1.0f;
float val = 0.0f;
int loc = -1;
for (int i=ti; i < n; i+=block_size_x) {
val = input_float[i];
if (val > max) {
max = val;
loc = input_loc[i];
}
}
//store local variables in shared memory
shmax[ti] = max;
shind[ti] = loc;
__syncthreads();
//reduce local variables
for (unsigned int s=block_size_x/2; s>0; s>>=1) {
if (ti < s) {
float v1 = shmax[ti];
float v2 = shmax[ti + s];
if (v1 < v2) {
shmax[ti] = v2;
shind[ti] = shind[ti + s];
}
}
__syncthreads();
}
//write result
if (ti == 0) {
output_float[0] = shmax[0];
output_loc[0] = shind[0];
}
}
/*
* Simple kernel to calculate the final PCE given the peak and energy of the cross correlation.
*/
__global__ void computePCE(double *pce, float *peak, double *energy) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
*pce = ((*peak) * (*peak)) / (*energy);
}
}
|
14,785 | #include <stdio.h>
#include <iostream>
#include <cuda_profiler_api.h>
//#include <cutil.h>
#include <cuda_runtime.h>
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__
//void compute(const float* A, const float* B, const float* C, float* D, int n) {
void compute(float* D, int n, int div) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
float I1 = tid * 2.0;
int thread_id = threadIdx.x % 32;
if (thread_id < div) {
__asm volatile (
" .reg .f32 %r112;\n\t"
" .reg .f32 %r113;\n\t"
" .reg .f32 %r114;\n\t"
" .reg .f32 %r115;\n\t"
" .reg .f32 %r116;\n\t"
" .reg .f32 %r117;\n\t"
" .reg .f32 %r118;\n\t"
" .reg .f32 %r119;\n\t"
" .reg .f32 %r120;\n\t"
" .reg .f32 %r121;\n\t"
" .reg .f32 %r122;\n\t"
" .reg .f32 %r123;\n\t"
" .reg .f32 %r124;\n\t"
" .reg .f32 %r125;\n\t"
" .reg .f32 %r126;\n\t"
" .reg .f32 %r127;\n\t"
" .reg .f32 %r128;\n\t"
"mov.f32 %r112, 4.4;\n\t"
"mov.f32 %r113, %r112;\n\t"
"mov.f32 %r114, 2.2;\n\t"
"mov.f32 %r115, 3.3;\n\t"
"mov.f32 %r116, 1.23;\n\t"
"mov.f32 %r117, 2.42;\n\t"
"mov.f32 %r118, 3.34;\n\t"
"mov.f32 %r119, 5.62;\n\t"
"mov.f32 %r120, 2.56;\n\t"
"mov.f32 %r121, 1.56;\n\t"
"mov.f32 %r122, 2.56;\n\t"
"mov.f32 %r123, 5.56;\n\t"
"mov.f32 %r124, 8.56;\n\t"
"mov.f32 %r125, 3.56;\n\t"
"mov.f32 %r126, 5.56;\n\t"
"mov.f32 %r127, 6.56;\n\t"
"mov.f32 %r128, 5.6;\n\t"
);
for (int k = 0; k < n; k++) {
__asm volatile (
"lg2.approx.f32 %r113, %r113;\n\t"
"lg2.approx.f32 %r114, %r114;\n\t"
"lg2.approx.f32 %r115, %r115;\n\t"
"lg2.approx.f32 %r116, %r116;\n\t"
"lg2.approx.f32 %r117, %r117;\n\t"
"lg2.approx.f32 %r118, %r118;\n\t"
"lg2.approx.f32 %r119, %r119;\n\t"
"lg2.approx.f32 %r120, %r120;\n\t"
"lg2.approx.f32 %r121, %r121;\n\t"
"lg2.approx.f32 %r122, %r122;\n\t"
"lg2.approx.f32 %r123, %r123;\n\t"
"lg2.approx.f32 %r124, %r124;\n\t"
"lg2.approx.f32 %r125, %r125;\n\t"
"lg2.approx.f32 %r126, %r126;\n\t"
"lg2.approx.f32 %r127, %r127;\n\t"
"lg2.approx.f32 %r128, %r128;\n\t"
"lg2.approx.f32 %r113, %r113;\n\t"
"lg2.approx.f32 %r114, %r114;\n\t"
"lg2.approx.f32 %r115, %r115;\n\t"
"lg2.approx.f32 %r116, %r116;\n\t"
"lg2.approx.f32 %r117, %r117;\n\t"
"lg2.approx.f32 %r118, %r118;\n\t"
"lg2.approx.f32 %r119, %r119;\n\t"
"lg2.approx.f32 %r120, %r120;\n\t"
"lg2.approx.f32 %r121, %r121;\n\t"
"lg2.approx.f32 %r122, %r122;\n\t"
"lg2.approx.f32 %r123, %r123;\n\t"
"lg2.approx.f32 %r124, %r124;\n\t"
"lg2.approx.f32 %r125, %r125;\n\t"
"lg2.approx.f32 %r126, %r126;\n\t"
"lg2.approx.f32 %r127, %r127;\n\t"
"lg2.approx.f32 %r128, %r128;\n\t"
"lg2.approx.f32 %r113, %r113;\n\t"
"lg2.approx.f32 %r114, %r114;\n\t"
"lg2.approx.f32 %r115, %r115;\n\t"
"lg2.approx.f32 %r116, %r116;\n\t"
"lg2.approx.f32 %r117, %r117;\n\t"
"lg2.approx.f32 %r118, %r118;\n\t"
"lg2.approx.f32 %r119, %r119;\n\t"
"lg2.approx.f32 %r120, %r120;\n\t"
"lg2.approx.f32 %r121, %r121;\n\t"
"lg2.approx.f32 %r122, %r122;\n\t"
"lg2.approx.f32 %r123, %r123;\n\t"
"lg2.approx.f32 %r124, %r124;\n\t"
"lg2.approx.f32 %r125, %r125;\n\t"
"lg2.approx.f32 %r126, %r126;\n\t"
"lg2.approx.f32 %r127, %r127;\n\t"
"lg2.approx.f32 %r128, %r128;\n\t"
"lg2.approx.f32 %r113, %r113;\n\t"
"lg2.approx.f32 %r114, %r114;\n\t"
"lg2.approx.f32 %r115, %r115;\n\t"
"lg2.approx.f32 %r116, %r116;\n\t"
"lg2.approx.f32 %r117, %r117;\n\t"
"lg2.approx.f32 %r118, %r118;\n\t"
"lg2.approx.f32 %r119, %r119;\n\t"
"lg2.approx.f32 %r120, %r120;\n\t"
"lg2.approx.f32 %r121, %r121;\n\t"
"lg2.approx.f32 %r122, %r122;\n\t"
"lg2.approx.f32 %r123, %r123;\n\t"
"lg2.approx.f32 %r124, %r124;\n\t"
"lg2.approx.f32 %r125, %r125;\n\t"
"lg2.approx.f32 %r126, %r126;\n\t"
"lg2.approx.f32 %r127, %r127;\n\t"
"lg2.approx.f32 %r128, %r128;\n\t"
"lg2.approx.f32 %r113, %r113;\n\t"
"lg2.approx.f32 %r114, %r114;\n\t"
"lg2.approx.f32 %r115, %r115;\n\t"
"lg2.approx.f32 %r116, %r116;\n\t"
"lg2.approx.f32 %r117, %r117;\n\t"
"lg2.approx.f32 %r118, %r118;\n\t"
"lg2.approx.f32 %r119, %r119;\n\t"
"lg2.approx.f32 %r120, %r120;\n\t"
"lg2.approx.f32 %r121, %r121;\n\t"
"lg2.approx.f32 %r122, %r122;\n\t"
"lg2.approx.f32 %r123, %r123;\n\t"
"lg2.approx.f32 %r124, %r124;\n\t"
"lg2.approx.f32 %r125, %r125;\n\t"
"lg2.approx.f32 %r126, %r126;\n\t"
"lg2.approx.f32 %r127, %r127;\n\t"
"lg2.approx.f32 %r128, %r128;\n\t"
"lg2.approx.f32 %r113, %r113;\n\t"
"lg2.approx.f32 %r114, %r114;\n\t"
"lg2.approx.f32 %r115, %r115;\n\t"
"lg2.approx.f32 %r116, %r116;\n\t"
"lg2.approx.f32 %r117, %r117;\n\t"
"lg2.approx.f32 %r118, %r118;\n\t"
"lg2.approx.f32 %r119, %r119;\n\t"
"lg2.approx.f32 %r120, %r120;\n\t"
"lg2.approx.f32 %r121, %r121;\n\t"
"lg2.approx.f32 %r122, %r122;\n\t"
"lg2.approx.f32 %r123, %r123;\n\t"
"lg2.approx.f32 %r124, %r124;\n\t"
"lg2.approx.f32 %r125, %r125;\n\t"
"lg2.approx.f32 %r126, %r126;\n\t"
"lg2.approx.f32 %r127, %r127;\n\t"
"lg2.approx.f32 %r128, %r128;\n\t"
"lg2.approx.f32 %r113, %r113;\n\t"
"lg2.approx.f32 %r114, %r114;\n\t"
"lg2.approx.f32 %r115, %r115;\n\t"
"lg2.approx.f32 %r116, %r116;\n\t"
"lg2.approx.f32 %r117, %r117;\n\t"
"lg2.approx.f32 %r118, %r118;\n\t"
"lg2.approx.f32 %r119, %r119;\n\t"
"lg2.approx.f32 %r120, %r120;\n\t"
"lg2.approx.f32 %r121, %r121;\n\t"
"lg2.approx.f32 %r122, %r122;\n\t"
"lg2.approx.f32 %r123, %r123;\n\t"
"lg2.approx.f32 %r124, %r124;\n\t"
"lg2.approx.f32 %r125, %r125;\n\t"
"lg2.approx.f32 %r126, %r126;\n\t"
"lg2.approx.f32 %r127, %r127;\n\t"
"lg2.approx.f32 %r128, %r128;\n\t"
"lg2.approx.f32 %r113, %r113;\n\t"
"lg2.approx.f32 %r114, %r114;\n\t"
"lg2.approx.f32 %r115, %r115;\n\t"
"lg2.approx.f32 %r116, %r116;\n\t"
"lg2.approx.f32 %r117, %r117;\n\t"
"lg2.approx.f32 %r118, %r118;\n\t"
"lg2.approx.f32 %r119, %r119;\n\t"
"lg2.approx.f32 %r120, %r120;\n\t"
"lg2.approx.f32 %r121, %r121;\n\t"
"lg2.approx.f32 %r122, %r122;\n\t"
"lg2.approx.f32 %r123, %r123;\n\t"
"lg2.approx.f32 %r124, %r124;\n\t"
"lg2.approx.f32 %r125, %r125;\n\t"
"lg2.approx.f32 %r126, %r126;\n\t"
"lg2.approx.f32 %r127, %r127;\n\t"
"lg2.approx.f32 %r128, %r128;\n\t"
"lg2.approx.f32 %r113, %r113;\n\t"
"lg2.approx.f32 %r114, %r114;\n\t"
"lg2.approx.f32 %r115, %r115;\n\t"
"lg2.approx.f32 %r116, %r116;\n\t"
"lg2.approx.f32 %r117, %r117;\n\t"
"lg2.approx.f32 %r118, %r118;\n\t"
"lg2.approx.f32 %r119, %r119;\n\t"
"lg2.approx.f32 %r120, %r120;\n\t"
"lg2.approx.f32 %r121, %r121;\n\t"
"lg2.approx.f32 %r122, %r122;\n\t"
"lg2.approx.f32 %r123, %r123;\n\t"
"lg2.approx.f32 %r124, %r124;\n\t"
"lg2.approx.f32 %r125, %r125;\n\t"
"lg2.approx.f32 %r126, %r126;\n\t"
"lg2.approx.f32 %r127, %r127;\n\t"
"lg2.approx.f32 %r128, %r128;\n\t"
"lg2.approx.f32 %r113, %r113;\n\t"
"lg2.approx.f32 %r114, %r114;\n\t"
"lg2.approx.f32 %r115, %r115;\n\t"
"lg2.approx.f32 %r116, %r116;\n\t"
"lg2.approx.f32 %r117, %r117;\n\t"
"lg2.approx.f32 %r118, %r118;\n\t"
"lg2.approx.f32 %r119, %r119;\n\t"
"lg2.approx.f32 %r120, %r120;\n\t"
"lg2.approx.f32 %r121, %r121;\n\t"
"lg2.approx.f32 %r122, %r122;\n\t"
"lg2.approx.f32 %r123, %r123;\n\t"
"lg2.approx.f32 %r124, %r124;\n\t"
"lg2.approx.f32 %r125, %r125;\n\t"
"lg2.approx.f32 %r126, %r126;\n\t"
"lg2.approx.f32 %r127, %r127;\n\t"
"lg2.approx.f32 %r128, %r128;\n\t"
"lg2.approx.f32 %r113, %r113;\n\t"
"lg2.approx.f32 %r114, %r114;\n\t"
"lg2.approx.f32 %r115, %r115;\n\t"
"lg2.approx.f32 %r116, %r116;\n\t"
"lg2.approx.f32 %r117, %r117;\n\t"
"lg2.approx.f32 %r118, %r118;\n\t"
"lg2.approx.f32 %r119, %r119;\n\t"
"lg2.approx.f32 %r120, %r120;\n\t"
"lg2.approx.f32 %r121, %r121;\n\t"
"lg2.approx.f32 %r122, %r122;\n\t"
"lg2.approx.f32 %r123, %r123;\n\t"
"lg2.approx.f32 %r124, %r124;\n\t"
"lg2.approx.f32 %r125, %r125;\n\t"
"lg2.approx.f32 %r126, %r126;\n\t"
"lg2.approx.f32 %r127, %r127;\n\t"
"lg2.approx.f32 %r128, %r128;\n\t"
"lg2.approx.f32 %r113, %r113;\n\t"
"lg2.approx.f32 %r114, %r114;\n\t"
"lg2.approx.f32 %r115, %r115;\n\t"
"lg2.approx.f32 %r116, %r116;\n\t"
"lg2.approx.f32 %r117, %r117;\n\t"
"lg2.approx.f32 %r118, %r118;\n\t"
"lg2.approx.f32 %r119, %r119;\n\t"
"lg2.approx.f32 %r120, %r120;\n\t"
"lg2.approx.f32 %r121, %r121;\n\t"
"lg2.approx.f32 %r122, %r122;\n\t"
"lg2.approx.f32 %r123, %r123;\n\t"
"lg2.approx.f32 %r124, %r124;\n\t"
"lg2.approx.f32 %r125, %r125;\n\t"
"lg2.approx.f32 %r126, %r126;\n\t"
"lg2.approx.f32 %r127, %r127;\n\t"
"lg2.approx.f32 %r128, %r128;\n\t"
"lg2.approx.f32 %r113, %r113;\n\t"
"lg2.approx.f32 %r114, %r114;\n\t"
"lg2.approx.f32 %r115, %r115;\n\t"
"lg2.approx.f32 %r116, %r116;\n\t"
"lg2.approx.f32 %r117, %r117;\n\t"
"lg2.approx.f32 %r118, %r118;\n\t"
"lg2.approx.f32 %r119, %r119;\n\t"
"lg2.approx.f32 %r120, %r120;\n\t"
"lg2.approx.f32 %r121, %r121;\n\t"
"lg2.approx.f32 %r122, %r122;\n\t"
"lg2.approx.f32 %r123, %r123;\n\t"
"lg2.approx.f32 %r124, %r124;\n\t"
"lg2.approx.f32 %r125, %r125;\n\t"
"lg2.approx.f32 %r126, %r126;\n\t"
"lg2.approx.f32 %r127, %r127;\n\t"
"lg2.approx.f32 %r128, %r128;\n\t"
"lg2.approx.f32 %r113, %r113;\n\t"
"lg2.approx.f32 %r114, %r114;\n\t"
"lg2.approx.f32 %r115, %r115;\n\t"
"lg2.approx.f32 %r116, %r116;\n\t"
"lg2.approx.f32 %r117, %r117;\n\t"
"lg2.approx.f32 %r118, %r118;\n\t"
"lg2.approx.f32 %r119, %r119;\n\t"
"lg2.approx.f32 %r120, %r120;\n\t"
"lg2.approx.f32 %r121, %r121;\n\t"
"lg2.approx.f32 %r122, %r122;\n\t"
"lg2.approx.f32 %r123, %r123;\n\t"
"lg2.approx.f32 %r124, %r124;\n\t"
"lg2.approx.f32 %r125, %r125;\n\t"
"lg2.approx.f32 %r126, %r126;\n\t"
"lg2.approx.f32 %r127, %r127;\n\t"
"lg2.approx.f32 %r128, %r128;\n\t"
"lg2.approx.f32 %r113, %r113;\n\t"
"lg2.approx.f32 %r114, %r114;\n\t"
"lg2.approx.f32 %r115, %r115;\n\t"
"lg2.approx.f32 %r116, %r116;\n\t"
"lg2.approx.f32 %r117, %r117;\n\t"
"lg2.approx.f32 %r118, %r118;\n\t"
"lg2.approx.f32 %r119, %r119;\n\t"
"lg2.approx.f32 %r120, %r120;\n\t"
"lg2.approx.f32 %r121, %r121;\n\t"
"lg2.approx.f32 %r122, %r122;\n\t"
"lg2.approx.f32 %r123, %r123;\n\t"
"lg2.approx.f32 %r124, %r124;\n\t"
"lg2.approx.f32 %r125, %r125;\n\t"
"lg2.approx.f32 %r126, %r126;\n\t"
"lg2.approx.f32 %r127, %r127;\n\t"
"lg2.approx.f32 %r128, %r128;\n\t"
"lg2.approx.f32 %r113, %r113;\n\t"
"lg2.approx.f32 %r114, %r114;\n\t"
"lg2.approx.f32 %r115, %r115;\n\t"
"lg2.approx.f32 %r116, %r116;\n\t"
"lg2.approx.f32 %r117, %r117;\n\t"
"lg2.approx.f32 %r118, %r118;\n\t"
"lg2.approx.f32 %r119, %r119;\n\t"
"lg2.approx.f32 %r120, %r120;\n\t"
"lg2.approx.f32 %r121, %r121;\n\t"
"lg2.approx.f32 %r122, %r122;\n\t"
"lg2.approx.f32 %r123, %r123;\n\t"
"lg2.approx.f32 %r124, %r124;\n\t"
"lg2.approx.f32 %r125, %r125;\n\t"
"lg2.approx.f32 %r126, %r126;\n\t"
"lg2.approx.f32 %r127, %r127;\n\t"
"lg2.approx.f32 %r128, %r128;\n\t"
);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
*D = I1;
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
int main(int argc, char **argv)
{
if (argc != 5) {
usage();
exit(1);
}
int num_blocks = atoi(argv[1]);
int num_threads_per_block = atoi(argv[2]);
int iterations = atoi(argv[3]);
int divergence = atoi(argv[4]);
// h_A = new float(2.0);
// h_B = new float(3.0);
// h_C = new float(4.0);
// cudaMalloc((void**)&d_A, sizeof(float));
// cudaMalloc((void**)&d_B, sizeof(float));
// cudaMalloc((void**)&d_C, sizeof(float));
cudaMalloc((void**)&d_res, sizeof(float));
// cudaMemcpy(d_A, h_A, sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(d_B, h_B, sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(d_C, h_C, sizeof(float), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaProfilerStart();
// compute<<<num_blocks, num_threads_per_block>>>(d_A, d_B, d_C, d_res, iterations);
compute<<<num_blocks, num_threads_per_block>>>(d_res, iterations, divergence);
cudaProfilerStop();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
std::cout << "GPU Elapsed Time = " << time << std::endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaDeviceSynchronize();
cudaMemcpy(h_res, d_res, sizeof(float), cudaMemcpyDeviceToHost);
return 0;
}
|
14,786 | /*
* Device code.
*/
#ifndef _OSM_KERNEL_H_
#define _OSM_KERNEL_H_
#include <stdio.h>
#include <cuda.h>
#define SQR(x) ((x)*(x))
__device__ __host__ float
pnt_dist(const float4 pnt1, const float4 pnt2)
{
return sqrt(SQR(pnt1.x-pnt2.x) + SQR(pnt1.y-pnt2.y) + SQR(pnt1.z-pnt2.z));
}
__device__ __host__ float
overlapping(const float r1, const float r2, const float dist)
// доля от суммы радиусов
{
float result = 0;
if (dist < (r1+r2)) {
float S = sqrt((r1+r2+dist)*(r2+dist-r1)*(r1+dist-r2)*(r1+r2-dist))/4;
float h = 2 * S / dist;
result = 2 * h / (r1 + r2);
}
return result;
}
__device__ __host__ int
is_overlapped(const float4 pnt1, const float4 pnt2, const float max_overlapping)
{
float d = pnt_dist(pnt1, pnt2);
float radius_sum = pnt1.w + pnt2.w;
return (d < radius_sum && (overlapping(pnt1.w, pnt2.w, d) > max_overlapping));
}
__device__ __host__ int
slightly_overlap(const float4 pnt1, const float4 pnt2, const float max_overlapping)
{
float d = pnt_dist(pnt1, pnt2);
float radius_sum = pnt1.w + pnt2.w;
return (d < radius_sum && (overlapping(pnt1.w, pnt2.w, d) <= max_overlapping));
}
//__global__ void
//overlap_list(float4 * spheres, float4 curr_sph, int * results, int * res_cnt, float max_overlapping, int curr_cnt)
//{
// int idx = blockIdx.x * blockDim.x + threadIdx.x;
// if (idx < curr_cnt)
// {
// float4 cmp_sph = spheres[idx];
// if (is_overlapped(curr_sph, cmp_sph, max_overlapping))
// {
// int old_cnt = atomicAdd(res_cnt, 1);
// results[old_cnt+1] = idx;
// }
// }
//}
__global__ void
nei_list(float4 * spheres, float4 curr_sph, int * results, int * res_cnt, int curr_cnt)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < curr_cnt)
{
float4 cmp_sph = spheres[idx];
if (pnt_dist(cmp_sph, curr_sph) < 3 * curr_sph.w)
{
int old_cnt = atomicAdd(res_cnt, 1);
results[old_cnt] = idx;
}
}
}
__global__ void
slight_nei_list(float4 * spheres, int curr_sph_idx, int sph_cnt, float max_overlap, int * results)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > curr_sph_idx && idx < sph_cnt)
{
float4 cmp_sph = spheres[idx];
if (slightly_overlap(cmp_sph, spheres[curr_sph_idx], max_overlap))
{
int old_cnt = atomicAdd(results, 1);
results[old_cnt+1] = idx;
}
}
}
#endif // #ifndef _OSM_KERNEL_H_
|
14,787 | #include "includes.h"
static char* program_name;
// Usage
__global__ void jacobiOnDevice(float* x_next, float* A, float* x_now, float* b, int Ni, int Nj)
{
float sigma = 0.0;
int idx = threadIdx.x;
for (int j=0; j<Nj; j++)
{
if (idx != j)
sigma += A[idx*Nj + j] * x_now[j];
}
x_next[idx] = (b[idx] - sigma) / A[idx*Nj + idx];
} |
14,788 | #include "includes.h"
__global__ void makeKernel(float* KernelPhase, int row, int column, float* ImgProperties, float MagXscaling) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
float MagX = ImgProperties[1];
float pixSize= ImgProperties[0];
float nm = ImgProperties[2];
float lambda = ImgProperties[3];
float pixdxInv = MagX/pixSize*MagXscaling; // Magnification/pixSize
float km = nm/lambda; // nm / lambda
for (int i = threadID; i < row*column; i += numThreads) {
int dx = i%row;
int dy = i/row;
float kdx = float( dx - row/2)*pixdxInv;
float kdy = float( dy - row/2)*pixdxInv;
float temp = km*km - kdx*kdx - kdy*kdy;
KernelPhase[i]= (temp >= 0) ? (sqrtf(temp)-km) : 0;
//This still needs quadrant swapping so this will not work in the ifft routine as is!
}
} |
14,789 | // Compile: nvcc -g -G -arch=sm_61 -std=c++11 assignment5-p5.cu -o assignment5-p5
#include <cmath>
#include <cstdlib>
#include <cuda.h>
#include <iostream>
#include <sys/time.h>
#define N 512
#define THRESHOLD (0.000001)
using std::cerr;
using std::cout;
using std::endl;
// TODO: Edit the function definition as required
__global__ void kernel1(float* d_in, float* d_out) {
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockIdx.z * blockDim.z + threadIdx.z;
if(i>=1 and i<N-1 and j>=1 and j<N-1 and k>=1 and k<N-1){
d_out[i*N*N + j*N + k] = 0.8 * (d_in[(i-1)*N*N + j*N + k] + d_in[(i+1)*N*N + j*N + k]
+ d_in[i*N*N + (j-1)*N + k] + d_in[i*N*N + (j+1)*N + k] + d_in[i*N*N + j*N + (k-1)]
+ d_in[i*N*N + j*N + (k+1)]);
}
}
// TODO: Edit the function definition as required
__global__ void kernel2(float* d_in, float* d_out) {
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.z * blockDim.z + threadIdx.z;
if(i>=1 and i<N-1 and j>=1 and j<N-1 and k>=1 and k<N-1){
d_out[i*N*N + j*N + k] = 0.8 * (d_in[(i-1)*N*N + j*N + k] + d_in[(i+1)*N*N + j*N + k]
+ d_in[i*N*N + (j-1)*N + k] + d_in[i*N*N + (j+1)*N + k] + d_in[i*N*N + j*N + (k-1)]
+ d_in[i*N*N + j*N + (k+1)]);
}
}
__global__ void kernel3(float* d_in, float* d_out) {
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.z * blockDim.z + threadIdx.z;
int tj = threadIdx.y;
int tk = threadIdx.x;
int ti = threadIdx.z;
__shared__ float mat[32][8][4];
mat[tk][tj][ti] = d_in[i*N*N + j*N + k];
__syncthreads();
if(i>=1 and i<N-1 and j>=1 and j<N-1 and k>=1 and k<N-1){
float val = 0;
if(ti < 1)
val += d_in[(i-1)*N*N + j*N + k];
else
val += mat[tk][tj][ti-1];
if(ti + 1 > 3)
val += d_in[(i+1)*N*N + j*N + k];
else
val += mat[tk][tj][ti+1];
if(tj < 1)
val += d_in[i*N*N + (j-1)*N + k];
else
val += mat[tk][tj-1][ti];
if(tj + 1 > 7)
val += d_in[i*N*N + (j+1)*N + k];
else
val += mat[tk][tj+1][ti];
if(tk < 1)
val += d_in[i*N*N + j*N + k-1];
else
val += mat[tk-1][tj][ti];
if(tk + 1 > 31)
val += d_in[i*N*N + j*N + k+1];
else
val += mat[tk+1][tj][ti];
d_out[i*N*N + j*N + k] = 0.8 * val;
}
__syncthreads();
}
// TODO: Edit the function definition as required
__host__ void stencil(float* h_in, float* h_out) {
for(int i = 1; i < N-1; i++){
for(int j = 1; j < N-1; j++){
for(int k = 1; k < N-1; k++){
h_out[i*N*N + j*N + k] = 0.8 * (h_in[(i-1)*N*N + j*N + k] + h_in[(i+1)*N*N + j*N + k]
+ h_in[i*N*N + (j-1)*N + k] + h_in[i*N*N + (j+1)*N + k] + h_in[i*N*N + j*N + (k-1)]
+ h_in[i*N*N + j*N + (k+1)]);
}
}
}
}
__host__ void check_result(float* w_ref, float* w_opt) {
double maxdiff = 0.0, this_diff = 0.0;
int numdiffs = 0;
for (uint64_t i = 0; i < N; i++) {
for (uint64_t j = 0; j < N; j++) {
for (uint64_t k = 0; k < N; k++) {
this_diff = w_ref[i + N * j + N * N * k] - w_opt[i + N * j + N * N * k];
if (std::fabs(this_diff) > THRESHOLD) {
numdiffs++;
if (this_diff > maxdiff) {
maxdiff = this_diff;
}
}
}
}
}
if (numdiffs > 0) {
cout << numdiffs << " Diffs found over THRESHOLD " << THRESHOLD << "; Max Diff = " << maxdiff
<< endl;
} else {
cout << "No differences found between base and test versions\n";
}
}
double rtclock() { // Seconds
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday(&Tp, &Tzp);
if (stat != 0) {
cout << "Error return from gettimeofday: " << stat << "\n";
}
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
}
int main() {
uint64_t SIZE = N * N * N;
float*h_in, *h_cpu_out, *h_gpu1_out, *h_gpu2_out, *h_gpu3_out;
h_in = (float*)malloc(SIZE * sizeof(float));
h_cpu_out = (float*)malloc(SIZE * sizeof(float));
h_gpu1_out = (float*)malloc(SIZE * sizeof(float));
h_gpu2_out = (float*)malloc(SIZE * sizeof(float));
h_gpu3_out = (float*)malloc(SIZE * sizeof(float));
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
for (int k = 0; k < N; k++) {
h_in[i * N * N + j * N + k] = rand() % 64;
h_cpu_out[i * N * N + j * N + k] = 0;
h_gpu1_out[i * N * N + j * N + k] = 0;
h_gpu2_out[i * N * N + j * N + k] = 0;
h_gpu3_out[i * N * N + j * N + k] = 0;
}
}
}
double clkbegin = rtclock();
stencil(h_in, h_cpu_out);
double clkend = rtclock();
double cpu_time = clkend - clkbegin;
cout << "Stencil time on CPU: " << cpu_time * 1000 << " msec" << endl;
cudaError_t status;
cudaEvent_t start, end;
// TODO: Fill in kernel1
// TODO: Adapt check_result() and invoke
float *d_in, *d_out1;
dim3 threadsPerBlock(32,32,1);
dim3 numBlocks(N/threadsPerBlock.x, N/threadsPerBlock.y, N/threadsPerBlock.z);
status = cudaMalloc(&d_in, SIZE * sizeof(float));
if (status != cudaSuccess) {
fprintf(stderr, "cudaMalloc() failed");
return EXIT_FAILURE;
}
status = cudaMalloc(&d_out1, SIZE * sizeof(float));
if (status != cudaSuccess) {
fprintf(stderr, "cudaMalloc() failed");
return EXIT_FAILURE;
}
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
status = cudaMemcpy(d_in, h_in, SIZE * sizeof(float), cudaMemcpyHostToDevice);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMemcpy() failed");
return EXIT_FAILURE;
}
status = cudaMemcpy(d_out1, h_gpu1_out, SIZE * sizeof(float), cudaMemcpyHostToDevice);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMemcpy() failed");
return EXIT_FAILURE;
}
kernel1<<<numBlocks, threadsPerBlock>>>(d_in, d_out1);
status = cudaMemcpy(h_gpu1_out, d_out1, SIZE * sizeof(float), cudaMemcpyDeviceToHost);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMemcpy() failed");
return EXIT_FAILURE;
}
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
float kernel_time;
cudaEventElapsedTime(&kernel_time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
check_result(h_cpu_out, h_gpu1_out);
std::cout << "Kernel 1 time (ms): " << kernel_time << "\n";
// TODO: Fill in kernel2
// TODO: Adapt check_result() and invoke
float *d_out2;
status = cudaMalloc(&d_out2, SIZE * sizeof(float));
if (status != cudaSuccess) {
fprintf(stderr, "cudaMalloc() failed");
return EXIT_FAILURE;
}
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
status = cudaMemcpy(d_in, h_in, SIZE * sizeof(float), cudaMemcpyHostToDevice);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMemcpy() failed");
return EXIT_FAILURE;
}
status = cudaMemcpy(d_out2, h_gpu2_out, SIZE * sizeof(float), cudaMemcpyHostToDevice);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMemcpy() failed");
return EXIT_FAILURE;
}
kernel2<<<numBlocks, threadsPerBlock>>>(d_in, d_out2);
status = cudaMemcpy(h_gpu2_out, d_out2, SIZE * sizeof(float), cudaMemcpyDeviceToHost);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMemcpy() failed");
return EXIT_FAILURE;
}
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&kernel_time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
check_result(h_cpu_out, h_gpu2_out);
std::cout << "Kernel 2 time (ms): " << kernel_time << "\n";
// kernel 3
float *d_out3;
threadsPerBlock = dim3(32,8,4);
numBlocks = dim3(N/threadsPerBlock.x, N/threadsPerBlock.y, N/threadsPerBlock.z);
status = cudaMalloc(&d_out3, SIZE * sizeof(float));
if (status != cudaSuccess) {
fprintf(stderr, "cudaMalloc() failed");
return EXIT_FAILURE;
}
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
status = cudaMemcpy(d_in, h_in, SIZE * sizeof(float), cudaMemcpyHostToDevice);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMemcpy() failed");
return EXIT_FAILURE;
}
status = cudaMemcpy(d_out3, h_gpu3_out, SIZE * sizeof(float), cudaMemcpyHostToDevice);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMemcpy() failed");
return EXIT_FAILURE;
}
kernel3<<<numBlocks, threadsPerBlock>>>(d_in, d_out3);
status = cudaMemcpy(h_gpu3_out, d_out3, SIZE * sizeof(float), cudaMemcpyDeviceToHost);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMemcpy() failed");
return EXIT_FAILURE;
}
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&kernel_time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
check_result(h_cpu_out, h_gpu3_out);
std::cout << "Kernel 3 time (ms): " << kernel_time << "\n";
// TODO: Free memory
cudaFree(d_in);
cudaFree(d_out1);
cudaFree(d_out2);
cudaFree(d_out3);
free(h_in);
free(h_cpu_out);
free(h_gpu1_out);
free(h_gpu2_out);
free(h_gpu3_out);
return EXIT_SUCCESS;
}
|
14,790 | #include "includes.h"
__global__ void polynomial_expansion (float* poly,int degree,int n,float* array)
{
int INX=blockIdx.x*blockDim.x+threadIdx.x;
if(INX<n)
{
float val=0.0;
float exp=1.0;
for(int x=0;x<=degree;++x)
{
val+=exp*poly[x];
exp*=array[INX];
}
array[INX]=val;
}
} |
14,791 | /* Colormap from https://github.com/kbinani/colormap-shaders
The MIT License (MIT)
Copyright (c) 2015 kbinani
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
__device__
float colormap_blue(float x) {
return 0.0;
}
__device__
float colormap_green(float x) {
if (x < 0.6) {
return 0.0;
} else if (x <= 0.95) {
return ((x - 0.6) * 728.57) / 255.0;
} else {
return 1.0;
}
}
__device__
float colormap_red(float x) {
if (x < 0.0) {
return 0.0;
} else if (x <= 0.57147) {
return 446.22 * x / 255.0;
} else {
return 1.0;
}
}
|
14,792 | //Juliana Brown
//Student Number: 20010601
#include "cuda_runtime.h"
#include <iostream>
#include <memory>
#include <string>
#include <cuda.h>
#include <stdio.h>
//Machine Problem 1: Code identifies number and type of CUDA devices on GPU servers,
//clock rate, streaming multiprocessors, cores, warp sizes ect.
int main(int argc, char **argv) {
// number of GPU devices the support CUDA
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
printf("There are no devices that support CUDA\n");
}
else {
printf("Detected %d CUDA Capable devices\n", deviceCount);
}
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
cudaSetDevice(dev);
cudaDeviceProp dp;
cudaGetDeviceProperties(&dp, dev);
//device type
printf("\ nDevice Name %d: \"%s\"\n", dev, dp.name);
//clock rate
printf(
" GPU Max Clock rate: %.0f MHz (%0.2f GHz)\n",
dp.clockRate * 1e-3f, dp.clockRate * 1e-6f);
//number of streaming multiprocessors & Cores
printf(" Multiprocessors: %2d \n", dp.multiProcessorCount);
//number of cores
//printf(" CUDA Cores %d \n"
//_ConvertSMVer2Cores(dp.major, dp.minor) *dp.multiProcessorCount);
int cores = 0;
int mp = dp.multiProcessorCount;
switch (dp.major) {
case 2:
if (dp.minor == 1) cores = mp * 48;
else cores = mp * 32;
break;
case 3:
cores = mp * 192;
break;
case 5: // Maxwell
cores = mp * 128;
break;
case 6: // Pascal
if ((dp.minor == 1) || (dp.minor == 2)) cores = mp * 128;
else if (dp.minor == 0) cores = mp * 64;
break;
case 7: // Volta and Turing
if ((dp.minor == 0) || (dp.minor == 5)) cores = mp * 64;
break;
}
printf(" Number of cores is: %d\n", cores);
//warp size
printf(" Warp size: %d\n",
dp.warpSize);
//amount of global memory
printf(" Total amount of global memory: %.0f MBytes "
"(%llu bytes)\n",
static_cast<float>(dp.totalGlobalMem / 1048576.0f),
(unsigned long long)dp.totalGlobalMem);
//amount of constant memory
printf(" Total amount of constant memory: %zu bytes\n",
dp.totalConstMem);
//amount of shared memory per block
printf(" Total amount of shared memory per block: %zu bytes\n",
dp.sharedMemPerBlock);
//amount of registers available per block
printf(" Total number of registers available per block: %d\n",
dp.regsPerBlock);
//maximum number of threads per block
printf(" Total number of threads available per block : %d\n",
dp.maxThreadsPerBlock);
//maximum dimension of each block
printf(" Max dimension of a block (x,y,z): (%d, %d, %d)\n",
dp.maxThreadsDim[0], dp.maxThreadsDim[1],
dp.maxThreadsDim[2]);
//max size of dimension of a grid
printf(" Max dimension of a grid size (x,y,z): (%d, %d, %d)\n",
dp.maxGridSize[0], dp.maxGridSize[1],
dp.maxGridSize[2]);
}
}
|
14,793 |
__global__ void sumKernel( float *a, float *b, float *c )
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
c[idx] = a[idx] + b[idx];
} |
14,794 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
/*
Name: Yiting Wang
M#: 01360917
Project: CSCI 6330 HW6
*/
# define Nthrds 100 // set the number of the thread
//set initial temperature for matrix
void Initial(double **M,int num_rows, int num_cols, double top,double left, double right, double bottom);
__global__ void calculate(double *dpreV, double *dnewV, double *ddouInfo, int *dintInfo)
{
int num_rows = dintInfo[0];
int num_cols = dintInfo[1];
int step = dintInfo[2];
int NumThrds = dintInfo[3];
int rowsearch = dintInfo[4];
int i,j;
int tid = threadIdx.x; // get the gpu thread
double eps = ddouInfo[0];
double Imax_err;
__shared__ double max_err;
__shared__ double *err;
err = (double *)malloc(NumThrds*sizeof(double));
max_err = ddouInfo[1];
//printf("max_err:%f\n",max_err);
double loc_err = max_err;
double temp_err;
int start_row = (((tid)*rowsearch) > (0) ? ((tid)*rowsearch) : (0));
int end_row = (((tid+1)*rowsearch + 1) < (num_rows - 1) ? ((tid+1)*rowsearch+1):(num_rows-1));
if(tid == NumThrds-1)
{
end_row = num_rows - 1;
}
//printf("rowsearch:%d\n",rowsearch);
//printf("start: %d end: %d tid: %d\n",start_row, end_row,tid);
int index;
while(max_err > eps)
{
loc_err = 0.0;
//get the new value of matrix
for(i=0;i<(end_row-start_row-1);i++)
{
for(j=1;j<num_cols-1;j++)
{
index = (start_row+i+1)*num_cols + j;
dnewV[index] = (dpreV[index-1] + dpreV[index+1] + dpreV[index-num_cols] + dpreV[index+num_cols])/4.0;
//printf("%f, %f, %f, %f, %f\n",preV[index-1],preV[index+1],preV[index-num_cols],preV[index+num_cols],dnewV[index]);
temp_err = fabs(dnewV[index] - dpreV[index]);
//printf("dnewV[%d] = %f, preV[%d] = %f, temp_err:%f, loc_err:%f\n",index,dnewV[index],index,preV[index],temp_err,loc_err);
if(temp_err>loc_err)
{
loc_err = temp_err;
}
}
}
err[tid] = loc_err;
//printf("loc_err:%f\n",loc_err);
__syncthreads();
//copy back to preV
for(i=0;i<(end_row-start_row-1);i++)
{
for(j=1;j<num_cols-1;j++)
{
index = (start_row+1+i)*num_cols + j;
//printf("index:%d\n",index);
dpreV[index] = dnewV[index];
}
}
if(tid == NumThrds-1)
{
Imax_err = err[0];
//printf("%f,err[0]=%f\n",Imax_err,err[0]);
for(i=1;i<NumThrds;i++)
{
if(Imax_err<err[i])
Imax_err = err[i];
//printf("%f\n",Imax_err);
//printf("%d,%f\n",i,err[i]);
}
max_err = Imax_err;
if(step&(step-1))
{
}
else
{
if(step>0)
printf("%6d %7lf\n",step, max_err);
}
}
step = step+1;
__syncthreads();
}
if(tid == NumThrds-1)
{
printf("%6d %7lf\n",step, max_err);
}
}
int main(int argc, char *argv[])
{
int i;
double top_temp,left_temp,right_temp,bottom_temp,eps;
int num_rows, num_cols;
double **preM; // matrix in host
double **newM;
double *dpreV; // vector in device
double *dnewV;
double *ddouInfo;
int *dintInfo;
double *preV; // vector in host
double *newV;
double *douInfo;
int *intInfo;
// read the initial number
num_rows = atoi(argv[1]);
num_cols = atoi(argv[2]);
top_temp = atof(argv[3]);
left_temp = atof(argv[4]);
right_temp = atof(argv[5]);
bottom_temp = atof(argv[6]);
eps = atof(argv[7]);
//matrix in the host
preM = (double **)malloc(num_rows*sizeof(double *));
newM = (double **)malloc(num_rows*sizeof(double *));
//malloc in the host
preV = (double *)malloc(num_rows*num_cols*sizeof(double));
newV = (double *)malloc(num_rows*num_cols*sizeof(double));
douInfo = (double *)malloc(2*sizeof(double));
intInfo = (int *)malloc(5*sizeof(int));
// cuda malloc
cudaMalloc(&dpreV, sizeof(double)*num_rows*num_cols);
cudaMalloc(&dnewV, sizeof(double)*num_rows*num_cols);
cudaMalloc(&ddouInfo,sizeof(double)*2);
cudaMalloc(&dintInfo,sizeof(int)*5);
for(i=0;i<num_rows;i++)
{
preM[i] = &(preV[i*num_cols]);
newM[i] = &(newV[i*num_cols]);
}
//set initial number for matrix
Initial(preM,num_rows,num_cols,top_temp,left_temp,right_temp,bottom_temp);
int rowsearch = ceil((num_rows - 2)*1.0/(Nthrds));
for(i=0;i<num_cols;i++)
{
newM[0][i] = preM[0][i];
newM[num_rows-1][i] = preM[num_rows-1][i];
}
for(i=0;i<num_rows;i++)
{
newM[i][0] = preM[i][0];
newM[i][num_cols-1] = preM[i][num_cols-1];
}
//douInfo
double max_err = 1000.0;
douInfo[0] = eps;
douInfo[1] = max_err;
//intInfo
int step = 0;
intInfo[0] = num_rows;
intInfo[1] = num_cols;
intInfo[2] = step;
intInfo[3] = Nthrds;
intInfo[4] = rowsearch;
// copy from host to device
cudaMemcpy(ddouInfo,douInfo,2*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(dintInfo,intInfo,5*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dpreV,preV,num_rows * num_cols * sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(dnewV,newV,num_rows * num_cols * sizeof(double),cudaMemcpyHostToDevice);
calculate<<<1,Nthrds>>>(dpreV, dnewV, ddouInfo, dintInfo);
// copy from device to host
//cudaMemcpy(douInfo,ddouInfo,2*sizeof(double),cudaMemcpyDeviceToHost);
//cudaMemcpy(intInfo,dintInfo,5*sizeof(int),cudaMemcpyDeviceToHost);
free(preV);
free(newV);
free(douInfo);
free(intInfo);
cudaFree(dpreV);
cudaFree(dnewV);
cudaFree(ddouInfo);
cudaFree(dintInfo);
return 0;
}
void Initial(double **M, int num_rows, int num_cols, double top,double left, double right, double bottom)
{
int i,j,count;
double sum,average;
for(i=0;i<num_cols;i++)
{
M[0][i] = top;
M[num_rows-1][i] = bottom;
}
for(i=0;i<num_rows-1;i++)
{
M[i][0] = left;
M[i][num_cols-1] = right;
}
//the sum of the boundary points
sum = (top)*(num_rows-2) + bottom*(num_rows) + left*(num_cols-1) + right*(num_cols-1);
//number of points in bound
count = num_rows*2 + num_cols*2 -4;
//average of the boundary points
average = sum*1.0/(count*1.0);
for (i=1;i<num_rows-1;i++)
{
for (j=1;j<num_cols-1;j++)
{
M[i][j] = average;
}
}
}
|
14,795 | #include "includes.h"
__global__ void vectorValue (float *a, float *b, int n){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i<n){
a[i]=threadIdx.x*2;
b[i]=threadIdx.x;
}
} |
14,796 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_profiler_api.h>
__global__ void loop_gpu()
{
printf("GPU Loop, NUM : %d\n", threadIdx.x);
}
void loop_cpu(int N)
{
for (int i = 0; i < N; i++)
{
printf("CPU Loop, NUM : %d\n", i);
}
}
int main()
{
cudaProfilerStart();
int loop_count = 10;
loop_cpu(loop_count);
loop_gpu<<<1, loop_count>>>();
cudaDeviceSynchronize();
cudaProfilerStop();
return 0;
} |
14,797 | #include <cuda_runtime.h>
#define THREADS 64
#define DATA_BLOCKS 16
__shared__ int smem[THREADS];
__global__ void myKernel(int *data_in, int *sum_out, const int size) {
int tx = threadIdx.x;
smem[tx] = 0;
__syncthreads();
for (int b = 0; b < DATA_BLOCKS; ++b) {
const int offset = THREADS * b + tx;
if (offset < size) {
smem[tx] += data_in[offset];
__syncthreads();
}
}
if (tx == 0) {
*sum_out = 0;
for (int i = 0; i < THREADS; ++i)
*sum_out += smem[i];
}
}
int main(int argc, char *argv[]){
const int SIZE = (THREADS * DATA_BLOCKS) - 16;
int *data_in = NULL;
int *sum_out = NULL;
cudaMalloc((void**)&data_in, SIZE * sizeof(int));
cudaMalloc((void**)&sum_out, sizeof(int));
myKernel<<<1,THREADS>>>(data_in, sum_out, SIZE);
cudaDeviceSynchronize();
cudaFree(data_in);
cudaFree(sum_out);
return 0;
} |
14,798 | #ifndef RAY_GPU_H
#define RAY_GPU_H
#include "vector_gpu.cu"
typedef long long IdType;
struct Ray_gpu
{
public:
// For CUDA..
__device__
Ray_gpu()
: fDeltaX(0), fDeltaY(0)
{
id = -1; // for GPU to recognize empty node in the tree
vect3d_gpu null;
vecCopy_gpu(start_point, null);
vecCopy_gpu(direction_vec, null);
vecCopy_gpu(color, null);
}
__device__
Ray_gpu(vect3d_gpu &pStart, vect3d_gpu &pDir, bool pbIsInObj = false)
: fDeltaX(0), fDeltaY(0)
{
vecCopy_gpu(start_point, pStart);
vecCopy_gpu(direction_vec, pDir);
id = 0;
vect3d_gpu null;
vecCopy_gpu(color, null);
}
__device__
void reset()
{
id = -1;
fDeltaX = 0;
fDeltaY = 0;
vect3d_gpu null;
vecCopy_gpu(start_point, null);
vecCopy_gpu(direction_vec, null);
vecCopy_gpu(color, null);
vecCopy_gpu(_hitPoint, null);
vecCopy_gpu(_hitNorm, null);
}
__device__
void copy(Ray_gpu &ray)
{
id = ray.id;
fDeltaX = ray.fDeltaX;
fDeltaY = ray.fDeltaY;
vecCopy_gpu(start_point, ray.start_point);
vecCopy_gpu(direction_vec, ray.direction_vec);
vecCopy_gpu(color, ray.color);
vecCopy_gpu(_hitPoint, ray._hitPoint);
vecCopy_gpu(_hitNorm, ray._hitNorm);
}
///
/// Has to be exactly the same with CPU Ray
/// -1 means NULL ray. 0 means valid
///
IdType id;
vect3d_gpu start_point;
vect3d_gpu direction_vec;
vect3d_gpu color;
// to make integrator easier
float fDeltaX, fDeltaY; // within a PixelIntegrator
// For putting VPL on GPU only
vect3d_gpu _hitPoint;
vect3d_gpu _hitNorm;
};
#endif |
14,799 |
#include <stdio.h>
#include <sys/timeb.h>
#include <math.h>
#include <cuda.h>
// nbre de threads dans une dimension (on travaille en 1D)
#define NBTHREADS 1024
// Pour la generation aleatoire des valeurs
#define MAX_VAL 10
#define MIN_VAL 0
void vecAleatoire(int *v, int n);
void vecAff(int *v, int n);
// voisin dans la dimension d du processeur p
unsigned int voisin(unsigned int p, unsigned int d);
// calcul de la somme d'un vecteur sur CPU
// pour les tests
int somCPU(int *t, int n);
// calcul de la somme sur l'hypercube (ne fonctionne que pour un vecteur comportant moins de NBTHREADS elements !!!)
__global__ void somHypercubeKernel(int* d_t, int d, int total);
// calcul de la somme sur l'hypercube dans la dimension dc (suppose que toutes les dimensions inferieures ont ete calculees)
__global__ void somHypercubeUneDimensionKernel(int* d_t, int d, int dc, int total);
// fonction qui appel les noyaux
float somHypercube(int* h_t, int d);
int GPUInfo();
int main(int argc, char* argv[]){
int dim=3, n;
int *tab;
int somme;
float ms;
// pour mesurer le temps sur CPU
struct timeb tav, tap ;
double te;
// recuperation des parametres en ligne de commandes
if (argc==2) dim= strtol(argv[1], NULL, 10);
// allocation et initialisation du tableau
// pour calculer le max de n=2^dim valeurs, nous avons besoin d'un vecteur de taille 2*n
n=(int)pow(2,dim);
// l'occupation memoire du vecteur en Mo
float tailleMo=sizeof(int)*n/float(512*1024);
tab=(int*)malloc(sizeof(int)*2*n);
vecAleatoire(tab,2*n);
// la partie droite du tableau est egale a 0
for(int i=0;i<n;i++) tab[n+i]=0;
// quel GPU ?
GPUInfo();
// calcul de la somme sur CPU
ftime(&tav);
somme=somCPU(tab,n);
ftime(&tap);
te = (double)((tap.time*1000+tap.millitm)-(tav.time*1000+tav.millitm))/1000 ;
// affichage du tableau
/* vecAff(tab,2*n);
printf("\n"); */
// calcul de la somme sur GPU
printf("----\nHypercube de dimension %d, soit %d valeurs dans le vecteur (%f Mo).\n", dim, n, tailleMo);
printf("Temps d'execution sur CPU : %f ms.\n",te);
ms=somHypercube(tab,dim);
printf("Temps d'execution sur GPU : %f ms.\n",ms);
// le resultat peut etre a gauche ou a droite, en fonction de la parite de dim
printf("SommeCPU : %d, sommeGPU : %d (ecart GPU : %d)\n",somme, tab[n*(dim%2)], tab[n*(dim%2)+n-1]-tab[n*(dim%2)]);
// affichage du tableau resultat
// vecAff(tab,2*n);
}
// calcul la somme de tous les elements contenus dans le vecteur h_t
float somHypercube(int* h_t, int d){
int n = (int)pow(2,d);
// la taille du vecteur est de 2*n elements
long size = 2*n*sizeof(int);
int nbBlocs;
int *d_t;
// pour mesurer le temps en cuda
cudaEvent_t start, stop;
float milliseconds = 0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// allocations du vecteur
printf("Allocation de %ld octets (%f Mo) sur le GPU.\n",size,(float)size/1024/1024);
if (cudaMalloc((void **) &d_t, size)!=cudaSuccess) {
printf ("Pb allocation !!!\n");
exit(1);
}
// copies hote vers device
cudaMemcpy(d_t, h_t, size, cudaMemcpyHostToDevice);
// le calcul sur GPU
nbBlocs=(n-1)/NBTHREADS+1;
printf("Appel du noyau <<<%d blocs, %d>>>.\n", nbBlocs, NBTHREADS);
// 2 cas de figures : (1) un seul bloc ou (2) plus d'un blocs
cudaEventRecord(start);
if(nbBlocs==1) somHypercubeKernel<<<nbBlocs,NBTHREADS>>>(d_t, d, n);
else {
// on appelle le noyau pour chaque dimension
// afin de s'assurer que tous les blocs soient resolus avant de passer a la dimension suivante
for(int i=0;i<d;i++) {
// printf("somHypercubeUneDimensionKernel<<<%d,%d>>>(d_t,%d,%d,%d)\n",nbBlocs,NBTHREADS,d,i,n);
somHypercubeUneDimensionKernel<<<nbBlocs,NBTHREADS>>>(d_t, d, i, n);
// attente de la fin du noyau dans la dimension courante
cudaDeviceSynchronize();
}
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
// copie device vers hote
cudaMemcpy(h_t, d_t, size, cudaMemcpyDeviceToHost);
// liberation de la memoire
cudaFree(d_t);
return milliseconds;
}
// calcul de la somme sur l'hypercube (ne fonctionne que pour un vecteur comportant moins de NBTHREADS elements !!!)
__global__ void somHypercubeKernel(int* d_t, int d, int total){
int p,voisin;
int val;
p=threadIdx.x+blockDim.x*blockIdx.x;
// attention pour eviter le conflit d'ecriture, la longueur de d_t est egale a 2*total
// suivant la parite de i, on utilise la partie gauche ou droite du tableau pour la lecture et inversement pour l'ecriture
if (p<total) {
for(int i=0;i<d;i++){
voisin=p^(((unsigned int)1)<<i);
val=d_t[total*(i%2)+voisin];
d_t[total*((i+1)%2)+p]=d_t[total*(i%2)+p]+val;
__syncthreads();
}
}
}
// calcul de la somme sur l'hypercube dans la dimension dc (suppose que toutes les dimensions inferieures ont ete calculees)
// ce noyau devrait etre appele d fois depuis l'hote !!!
__global__ void somHypercubeUneDimensionKernel(int* d_t, int d, int i, int total) {
int p,voisin;
int val;
p=threadIdx.x+blockDim.x*blockIdx.x;
// attention pour eviter le conflit d'ecriture, la longueur de d_t est egale a 2*total
// suivant la parite de i, on utilise la partie gauche ou droite du tableau pour la lecture et inversement pour l'ecriture
if (p<total) {
// voisin=p^(((unsigned int)1)<<i);
voisin=p^(1<<i);
val=d_t[total*(i%2)+voisin];
d_t[total*((i+1)%2)+p]=d_t[total*(i%2)+p]+val;
__syncthreads();
}
}
// rappel sur les fonctions C pour manipuler les bits directement
// https://zestedesavoir.com/tutoriels/755/le-langage-c-1/notions-avancees/manipulation-des-bits/
unsigned int voisin(unsigned int p, unsigned int d) {
// ou exclusif entre le numero du processeur
// et le bit a 1 en position d
return (p^(((unsigned int)1)<<d));
}
// calcul de la somme d'un vecteur sur CPU
int somCPU(int *t, int n) {
int somme=0;
for(int i=0;i<n;i++) somme+=t[i];
return somme;
}
// Initialisation aleatoire d'un vecteur
void vecAleatoire(int *v, int n) {
int i;
for(i=0;i<n;i++){
v[i]= (int)((double)rand()/RAND_MAX*MAX_VAL) + MIN_VAL;
}
}
// Affiche un vecteur
void vecAff(int *v, int n){
int i;
printf("[");
for(i=0;i<n-1;i++) printf("%d ",v[i]);
printf("%f]",v[n-1]);
}
int GPUInfo(){
int deviceCount;
cudaGetDeviceCount(&deviceCount);
for (int dev = 0; dev < deviceCount; dev++) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
printf("No CUDA GPU has been detected");
return -1;
} else if (deviceCount == 1) {
printf("There is 1 device supporting CUDA\n");
printf("Device %d, name: %s\n", dev, deviceProp.name);
printf("Computational Capabilities: %d.%d\n", deviceProp.major, deviceProp.minor);
printf("Maximum global memory size: %ld bytes\n", deviceProp.totalGlobalMem);
printf("Maximum shared memory size per block: %ld bytes\n", deviceProp.sharedMemPerBlock);
printf("Warp size: %d\n", deviceProp.warpSize);
printf("Maximum number of blocks per multiProcessor: %d\n",deviceProp.maxBlocksPerMultiProcessor);
printf("Maximum number of threads per multiProcessor: %d\n",deviceProp.maxThreadsPerMultiProcessor);
printf("Maximum grid size : %d x %d x %d blocks.\n",deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]);
} else {
printf("There are %d devices supporting CUDA\n", deviceCount);
}
}
}
return deviceCount;
} |
14,800 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
namespace Runge_Kutta
{
template<typename System, typename precision>
__device__ void Order_2(System system, precision *args, const precision dx, const int n, const int args_number)
{
precision *args_tmp = new precision[args_number];
// k is different derevatives in different points for calculation
precision *k1 = new precision[args_number - 1];
precision *k2 = new precision[args_number - 1];
system(args, k1);
args_tmp[0] = args[0] + dx;
for (int i = 1; i < args_number; i++)
{
args_tmp[i] = args[i] + dx * k1[i - 1];
}
system(args_tmp, k2);
args[n*args_number] = args[0] + dx;
for (int i = 1; i < args_number; i++)
{
args[n*args_number + i] = args[i] + dx / 2 * k1[i - 1] + dx / 2 * k2[i - 1];
}
delete[] args_tmp;
delete[] k1;
delete[] k2;
}
template<typename System, typename precision>
__device__ void Order_3(System system, precision *args, const precision dx, const int n, const int args_number)
{
precision *args_tmp = new precision[args_number];
// k is different derevatives in different points for calculation
precision *k1 = new precision[args_number - 1];
precision *k2 = new precision[args_number - 1];
precision *k3 = new precision[args_number - 1];
system(args, k1);
args_tmp[0] = args[0] + dx / 2;
for (int i = 1; i < args_number; i++)
{
args_tmp[i] = args[i] + dx / 2 * k1[i - 1];
}
system(args_tmp, k2);
args_tmp[0] = args[0] + dx;
for (int i = 1; i < args_number; i++)
{
args_tmp[i] = args[i] - dx * k1[i - 1] + 2 * dx * (k2[i - 1]);
}
system(args_tmp, k3);
args[n*args_number] = args[0] + dx;
for (int i = 1; i < args_number; i++)
{
args[n*args_number + i] = args[i] + dx / 6 * k1[i - 1] + dx * 2 / 3 * k2[i - 1] + dx / 6 * k3[i - 1];
}
delete[] args_tmp;
delete[] k1;
delete[] k2;
delete[] k3;
}
template<typename System, typename precision>
__device__ void Order_4(System system, precision *args, const precision dx, const int n, const int args_number)
{
precision *args_tmp = new precision[args_number];
// k is different derevatives in different points for calculation
precision *k1 = new precision[args_number - 1];
precision *k2 = new precision[args_number - 1];
precision *k3 = new precision[args_number - 1];
precision *k4 = new precision[args_number - 1];
system(args, k1);
args_tmp[0] = args[0] + dx / 2;
for (int i = 1; i < args_number; i++)
{
args_tmp[i] = args[i] + dx / 2 * k1[i - 1];
}
system(args_tmp, k2);
args_tmp[0] = args[0] + dx / 2;
for (int i = 1; i < args_number; i++)
{
args_tmp[i] = args[i] + dx / 2 * (k2[i - 1]);
}
system(args_tmp, k3);
args_tmp[0] = args[0] + dx;
for (int i = 1; i < args_number; i++)
{
args_tmp[i] = args[i] + dx * (k3[i - 1]);
}
system(args_tmp, k4);
args[n*args_number] = args[0] + dx;
for (int i = 1; i < args_number; i++)
{
args[n*args_number + i] = args[i] + dx / 6 * k1[i - 1] + dx / 3 * k2[i - 1] + dx / 3 * k3[i - 1] + dx / 6 * k4[i - 1];
}
delete[] args_tmp;
delete[] k1;
delete[] k2;
delete[] k3;
delete[] k4;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.