serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
12,001 | #include <cuda_runtime_api.h>
#include <stdio.h>
__global__ void hello() {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
printf("Hello world, thread:%d\n", tid);
}
int main(int argc, char* argv[]) {
cudaSetDevice(0);
hello<<<3, 10>>>();
//cudaDeviceReset();
cudaDeviceSynchronize();
return 0;
}
|
12,002 | #include <stdio.h>
#include <stdlib.h>
#define N 4096 * 1024
void saxpy(int n, float a, float *x, float *y){
int i;
for( i=0; i<n; i++)
{
y[i] = a * x[i] + y[i];
}
return ;
}
__global__ void _saxpy_cuda(int n, float a, float *x, float *y){
int Idx = blockIdx.x * blockDim.x + threadIdx.x;
if(Idx < N)
y[Idx]=a*x[Idx]+y[Idx];
return;
}
void saxpy_gpu(int n, float a, float *x, float *y){
int size = sizeof(float)*N;
//TODO
float *x_dev;
float *y_dev;
cudaMalloc((void**)&x_dev, size);
cudaMalloc((void**)&y_dev,size);
cudaMemset(x_dev, 0.0, size);
cudaMemset(y_dev, 0.0, size);
cudaMemcpy(x_dev, x, size, cudaMemcpyHostToDevice );
cudaMemcpy(y_dev, y, size, cudaMemcpyHostToDevice);
//TODO
//function
dim3 bs (4096,1,1);
dim3 ts(1024,1,1);
_saxpy_cuda <<< bs,ts >>> (n, a, x_dev, y_dev);
//cudaMemcpy(x, x_dev, size, cudaMemcpyDeviceToHost);
cudaMemcpy(y, y_dev, size, cudaMemcpyDeviceToHost);
cudaFree(x_dev);
cudaFree(y_dev);
return ;
}
int main(){
float *x, *y;
float a;
int size = N * sizeof( float);
x = (float *) malloc( size);
y = (float *) malloc( size);
a=3;
int i;
// initialize for
for( i=0; i<N; i++){
x[i]=i*0.001;
y[i]=0;
}
printf(" data\n");
for( i = 0; i < 5; ++i ) printf("y[%d] = %f, ", i, y[i]);
printf ("\n");
//saxpy(N, a, x, y);
saxpy_gpu(N,a,x,y);
printf(" result\n");
for( i = 0; i < 5; ++i ) printf("y[%d] = %f, ", i, y[i]);
printf ("\n");
free(x);
free(y);
return 0;
}
|
12,003 | /*
Matrix Vector multiplication with partially missing cuda copy of the matrix.
*/
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
//Grid dimension
#define B 100
//Block dimension
#define T 256
//Array size
#define C B*T
// Macro for checking errors in CUDA API calls
#define cudaErrorCheck(call) \
do{ \
cudaError_t cuErr = call; \
if(cudaSuccess != cuErr){ \
printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(cuErr));\
exit(0); \
} \
}while(0)
//Host pointer for matrix b, input vector a and result vector c
int *a;
int *b;
int *c;
//Device pointer for matrix d_b, input vector d_a and result vector d_c
int *d_a;
int *d_b;
int *d_c;
//Initialization and allocation of the host variables
int init(){
//Allocating host variables
a = (int *) malloc(C*sizeof(int));
b = (int *) malloc(C*C*sizeof(int));
c = (int *) malloc(C*sizeof(int));
//Initialize host values
for(int i=0; i<C; i++){
for(int j=0; j<C; j++){
b[j+i*C]=1;
}
a[i]=1;
c[i]=0;
}
return 0;
}
//Kernel
__global__ void Mult(int* d_a, int* d_b, int* d_c){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for(int j=0; j<C; j++){
d_c[tid]+=d_b[j+tid*C]*d_a[j];
}
}
//Checking if the values stored in c are correct
int check(){
bool test = false;
for(int i=0; i<C; i++){
if(c[i]!=C){
test = true;
}
}
printf("Memory Access Issue visible: %s\n",test ? "true\n" : "false\n");
return 0;
}
//Initialization of the variables on the GPU
int initcuda(){
//Allocation of GPU memory for d_a,d_b,d_c
cudaErrorCheck( cudaMalloc(&d_a, C*sizeof(int)));
cudaErrorCheck( cudaMalloc(&d_b, C*C*sizeof(int)));
cudaErrorCheck( cudaMalloc(&d_c, C*sizeof(int)));
//Copying the array a and half the matrix b from the host to the array d_a and the matrix d_b on the device
cudaErrorCheck( cudaMemcpy(d_a,a,C*sizeof(int),cudaMemcpyHostToDevice));
cudaErrorCheck( cudaMemcpy(d_b,b,C*C*sizeof(int)/2,cudaMemcpyHostToDevice));
return 0;
}
//Main programm
int main(){
//Calling the initialization methods
init();
initcuda();
//Launch Kernel
Mult<<<B,T>>>(d_a,d_b,d_c);
// Check for errors in kernel launch (e.g. invalid execution configuration paramters)
cudaErrorCheck( cudaGetLastError());
// Check for errors on the GPU after control is returned to CPU
cudaErrorCheck( cudaDeviceSynchronize());
//Copying back the result d_c from the device to the host array c
cudaErrorCheck( cudaMemcpy(c,d_c,C*sizeof(int),cudaMemcpyDeviceToHost));
//Verify result
check();
//Freeing GPU memory
cudaErrorCheck( cudaFree(d_a));
cudaErrorCheck( cudaFree(d_b));
cudaErrorCheck( cudaFree(d_c));
//Freeing CPU memory
free(a);
free(b);
free(c);
return 0;
} |
12,004 | #include<stdio.h>
#include<cstdlib>
#include<iostream>
#include<cuda.h>
__global__ void Betweenness(int* R,int* C,int s,int* d,int* sigma,int n,float* delta)
{ //printf("dfmk");
int idx = threadIdx.x;
int w,v;
for(v=idx; v<n; v+=blockDim.x)
{
if(v == s)
{
d[v] = 0;
sigma[v] = 1;
}
else
{
d[v] = INT_MAX;
sigma[v] = 0;
}
delta[v]=0;
}
__shared__ int current_depth;
__shared__ bool done;
if(idx == 0)
{
done = false;
current_depth = 0;
}
__syncthreads();
//Calculate the number of shortest paths and the
// distance from s (the root) to each vertex
while(!done)
{
__syncthreads();
done = true;
__syncthreads();
for(v=idx; v<n; v+=blockDim.x) //For each vertex...
{
if(d[v] == current_depth)
{
for(int r=R[v]; r<R[v+1]; r++) //For each neighbor of v
{
int w = C[r];
if(d[w] == INT_MAX)
{
d[w] = d[v] + 1;
done = false;
}
if(d[w] == (d[v] + 1))
{
atomicAdd(&sigma[w],sigma[v]);
}
}
}
}
__syncthreads();
if(idx == 0){
current_depth++;
}
}
__syncthreads();
for(v=idx;v<n;v+=blockDim.x)
{
float ds = 0;
for(int r=R[v]; r<R[v+1]; r++)
{
w=C[r];
if(d[w]+1==d[v]){
ds=ds+(float)sigma[v]*(1+delta[w])/sigma[w];
printf("%d ",(float)sigma[v]*(1+delta[w])/sigma[w]);}
}
delta[v]=ds;
}
__syncthreads();
}
|
12,005 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#define WINDOW_WIDTH 9
#define WINDOW_HEIGHT 7
#define IMAGE_WIDTH 1280
#define IMAGE_HEIGHT 720
#define BLOCK_SIZE 128
#define LINES_PER_BLOCK 16
__global__ static void censusTransform(int width, int height, int * ret)
{
int half_kw = WINDOW_WIDTH /2;
int half_kh = WINDOW_HEIGHT /2 ;
int tid = threadIdx.x;
int x0 = blockIdx.x * (BLOCK_SIZE - WINDOW_WIDTH + 1) - half_kw;
int y0 = blockIdx.y * LINES_PER_BLOCK;
printf("block Dim (%d %d) (%d %d) \n", blockDim.x, blockDim.y, blockIdx.x, blockIdx.y);
//printf("block id (%d %d) \n", blockIdx.x, blockIdx.y);
for (int i = 0; i < WINDOW_HEIGHT; ++i) {
int x = x0 + tid;
int y = y0 - half_kh + i;
int value = 0;
if (0 <= x && x < IMAGE_WIDTH && 0 <= y && y < IMAGE_HEIGHT) {
value = x+y*IMAGE_WIDTH;
ret[i] = value / IMAGE_WIDTH;
ret[WINDOW_HEIGHT + i] = value % IMAGE_WIDTH;
//printf("row %d col %d \n", value / IMAGE_WIDTH, value % IMAGE_WIDTH);
}
}
__syncthreads();
}
void censusDemo(void)
{
printf("censusDemo +++++\n");
int * ret = 0;
int host_ret[WINDOW_HEIGHT*2] = {0};
cudaMalloc((void**) &ret, sizeof(int)*WINDOW_HEIGHT*2);
int width_per_block = BLOCK_SIZE - WINDOW_WIDTH + 1;
int height_per_block = LINES_PER_BLOCK;
dim3 grid((IMAGE_WIDTH + width_per_block - 1) / width_per_block, (IMAGE_HEIGHT + height_per_block - 1) / height_per_block);
//dim3 grid(1,1);
dim3 block(BLOCK_SIZE);
censusTransform<<<grid,block>>>(IMAGE_WIDTH, IMAGE_HEIGHT, ret);
cudaMemcpy(host_ret, ret, sizeof(int)*WINDOW_HEIGHT*2, cudaMemcpyDeviceToHost);
for (int i = 0; i < WINDOW_HEIGHT; i++) {
printf("(%d %d)", host_ret[i], host_ret[WINDOW_HEIGHT+i]);
}
cudaFree(ret);
}
|
12,006 | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#include <curand.h>
#define CUDA_CALL(x) do { if((x)!=cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
#define DEPTH 5 // no of layers
#define BREADTH 1000 // nodes per layer
#define RATE 0.0001 // learning rate
#define NDIM(a,b) ((a)*BREADTH+(b))
#define WDIM(a,b,c) ((a)*BREADTH*BREADTH+(b)*BREADTH+(c))
float *W, *R; // [DEPTH-1][BREADTH][BREADTH] W_kij : weight of edge b/w jth and ith node from kth layer and k+1th layer resp.
float *N; // [DEPTH][BREADTH] N_ki : jth node in kth layer
float *Y, *pY; // [BREADTH] expected and predicted output vectors
float *U, *V; // [BREADTH] vectors for backpropagation chain-rule
float Yh[BREADTH], Dh[BREADTH]; // host arrays: expected and predicted output
int count = 0;
#define f(x) (tanhf((x))) // activation function
#define df(y) (1 - (y) * (y)) // derivative of activation function in y terms
__global__ void predict_N( float *N) { // feedforward pass algorithm
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
int i;
// erase all nodes except first layer
for ( i = tid + BREADTH ; i < BREADTH * DEPTH ; i += stride )
N[i] = 0;
}
__global__ void predict_WN( float *N, float *W, float *R, int k) { // feedforward pass algorithm
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
int h;
// matrix-vector multiplication W*N b/w k-1th and kth layer
int wIndex = k * BREADTH * BREADTH;
int nIndex = k * BREADTH;
for ( h = tid ; h < BREADTH * BREADTH ; h += stride ) {
int j = h % BREADTH;
R[h] = W[wIndex + h] * N[nIndex + j];
}
}
__global__ void predict_Ri( float *R, int n, int j) { // feedforward pass algorithm
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
int i;
for ( i = tid % BREADTH ; i < BREADTH ; i += stride ) {
int itid = tid / BREADTH;
int istride = stride / BREADTH;
istride += (istride == 0);
int rIndex = i * BREADTH;
for ( int h = 2 * j * itid; h < j * (n - 1); h += 2 * j * istride)
R[rIndex + h] += R[rIndex + h + j];
}
}
__global__ void predict_Nk( float *N, float *R, int k) { // feedforward pass algorithm
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
int i;
for ( i = tid; i < BREADTH; i += stride )
N[NDIM(k,i)] = f( R[NDIM(i,0)]);
}
__global__ void predict_Yp( float *N, float *pY) { // feedforward pass algorithm
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
int i;
for ( i = tid; i < BREADTH; i += stride )
pY[i] = N[NDIM(DEPTH-1,i)];
}
__global__ void train_V( float *N, float *Y, float *V) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
int q = DEPTH - 2;
// caching chain-rule calculation in vector to aid in backpropagation
for ( int i = tid; i < BREADTH; i += stride ) {
V[i] = (N[NDIM(q+1,i)] - Y[i]) * df( N[NDIM(q+1,i)]);
}
}
__global__ void train_Wf( float *N, float *W, float *V) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
int q = DEPTH - 2, h;
// update weights of outer layer
for ( h = tid; h < BREADTH * BREADTH; h += stride ) {
int i = h / BREADTH;
int j = h % BREADTH;
W[WDIM(q,i,j)] -= (float)RATE * V[i] * N[NDIM(q,j)];
}
}
__global__ void train_WN( float *N, float *W, float *A, float *R, int k) { // feedforward pass algorithm
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
int h;
// matrix-vector multiplication W*N b/w k-1th and kth layer
int wIndex = k * BREADTH * BREADTH;
int nIndex = k * BREADTH;
for ( h = tid ; h < BREADTH * BREADTH ; h += stride ) {
int j = h % BREADTH;
int i = h / BREADTH;
R[NDIM(j,i)] = A[i] * W[wIndex + h] * df( N[nIndex + j]);
}
}
__global__ void train_B( float *R, float *B) { // feedforward pass algorithm
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
int i;
for ( i = tid; i < BREADTH; i += stride )
B[i] = R[NDIM(i,0)];
}
__global__ void train_Wh( float *N, float *W, float *A, int k) { // feedforward pass algorithm
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
int h;
// matrix-vector multiplication W*N b/w k-1th and kth layer
int wIndex = k * BREADTH * BREADTH;
int nIndex = k * BREADTH;
for ( h = tid ; h < BREADTH * BREADTH ; h += stride ) {
int j = h % BREADTH;
int i = h / BREADTH;
W[wIndex + h] -= (float)RATE * A[i] * N[nIndex + j];
}
}
void train() {
train_V<<<128, 32>>>( N, Y, V);
cudaDeviceSynchronize();
train_Wf<<<128, 32>>>( N, W, V);
cudaDeviceSynchronize();
int q = DEPTH - 2;
float *A = &V[0], *B = &U[0];
float *temp;
// updating weights of hidden layers (backpropagation)
for ( int k = q - 1; k >= 0; k-- ) {
// chain-rule expansion by multiplying with next layer's weight matrix and
// derivative of activation function on the same layer's nodes
train_WN<<<128, 32>>>( N, W, A, R, k+1);
cudaDeviceSynchronize();
int j = 1;
int n = BREADTH;
while ( n > 1) {
predict_Ri<<<128, 32>>>( R, n, j);
cudaDeviceSynchronize();
j *= 2;
n = (n+1)/2;
}
train_B<<<128, 32>>>( R, B);
cudaDeviceSynchronize();
temp = B; B = A; A = temp;
// adjust weights by gradient descent
train_Wh<<<128, 32>>>( N, W, A, k);
cudaDeviceSynchronize();
}
}
__global__ void normalize( float *W, float *N, float *Y) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
int i, w = (DEPTH-1) * BREADTH * BREADTH;
for ( i = tid; i < w; i+= stride)
W[i] = 1/ (float)BREADTH;
for ( i = tid; i < BREADTH; i+= stride)
N[i] = i/ (float)BREADTH;
for ( i = tid; i < BREADTH; i+= stride)
Y[i] = i/ (float)BREADTH;
}
void initialize() {
int w = (DEPTH-1) * BREADTH * BREADTH;
// device memory allocation of global arrays
cudaMalloc( (void**)(&W), sizeof(float) * w);
cudaMalloc( (void**)(&R), sizeof(float) * BREADTH * BREADTH);
cudaMalloc( (void**)(&N), sizeof(float) * DEPTH * BREADTH);
cudaMalloc( (void**)(&Y), sizeof(float) * BREADTH);
cudaMalloc( (void**)(&pY), sizeof(float) * BREADTH);
cudaMalloc( (void**)(&U), sizeof(float) * BREADTH);
cudaMalloc( (void**)(&V), sizeof(float) * BREADTH);
/*
// random number generator
curandGenerator_t gen;
// Create pseudo-random number generator
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
// Set seed
curandSetPseudoRandomGeneratorSeed(gen, time(0));
// Generate floats on device
curandGenerateUniform(gen, W, w);
curandGenerateUniform(gen, N, BREADTH * DEPTH);
curandGenerateUniform(gen, Y, BREADTH);
curandDestroyGenerator(gen);
*/
normalize<<<128, 32>>>(W, N, Y);
cudaDeviceSynchronize();
}
void calculateError() {
float err = 0, dY;
// dY : difference b/w expected and predicted output, and
// err : sum of squares of dY
for ( int i = 0 ; i < BREADTH ; i++ ) {
dY = (Dh[i] - Yh[i]);
err += dY * dY;
}
printf("ITERATION : %d\tLOSS : %f\t", count, err);
}
void freeResources() {
cudaFree(Y);
cudaFree(N);
cudaFree(W);
cudaFree(pY);
cudaFree(U);
cudaFree(V);
cudaFree(R);
}
void predict() {
predict_N<<<128, 32>>>(N);
cudaDeviceSynchronize();
for ( int k = 1; k < DEPTH ; k++ ) {
predict_WN<<<128, 32>>>( N, W, R, k-1);
cudaDeviceSynchronize();
int j = 1;
int n = BREADTH;
while ( n > 1) {
predict_Ri<<<128, 32>>>( R, n, j);
cudaDeviceSynchronize();
j *= 2;
n = (n+1)/2;
}
predict_Nk<<<128, 32>>>( N, R, k);
cudaDeviceSynchronize();
}
predict_Yp<<<128, 32>>>( N, pY);
cudaDeviceSynchronize();
}
int main() {
initialize();
for ( count = 0; count < 4; count++) {
clock_t begin, end;
begin = clock();
predict();
train();
end = clock();
float CpuTime = (float)(end - begin) / CLOCKS_PER_SEC;
CUDA_CALL(cudaMemcpy(Yh, Y, BREADTH * sizeof(float), cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(Dh, pY, BREADTH * sizeof(float), cudaMemcpyDeviceToHost));
calculateError();
printf("TIME: %f\n", CpuTime);
}
freeResources();
return 0;
}
|
12,007 | extern "C"
__global__ void lifeStep(char** lifeData, int width, int height) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int right = (x + 1) % width;
int left = (x + width - 1) % width;
int top = (y + height - 1) % height;
int down = (y + 1) % height;
// Count alive cells.
int aliveCells =
lifeData[left][top] + lifeData[x][top] + lifeData[right][top] +
lifeData[left][y] + lifeData[right][y] +
lifeData[left][down] + lifeData[x][down] + lifeData[right][down];
lifeData[x][y] = aliveCells == 3 || (aliveCells == 2 && lifeData[x][y]) ? 1 : 0;
} |
12,008 | #include "includes.h"
__global__ void add( int *a, int *b, int *c ) {
int tid = blockIdx.x; // this thread handles the data at its thread id
if (tid < N)
c[tid] = a[tid] + b[tid];
} |
12,009 | #include "includes.h"
__global__ void DrawMaskedColorKernelNearestNeighbor(float *target, int targetWidth, int targetHeight, int inputX, int inputY, float *texture, int textureWidth, int textureHeight, int objectWidth, int objectHeight, float r, float g, float b ) // texture = mask
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int targetPixels = targetWidth * targetHeight;
int texturePixels = textureWidth * textureHeight;
int objectPixels = objectWidth * objectHeight;
int idObjectRgb = id / objectPixels;
int idObjectPixel = (id - idObjectRgb * objectPixels); // same as (id % objectPixels), but the kernel runs 10% faster
int idObjectY = idObjectPixel / objectWidth;
int idObjectX = (idObjectPixel - idObjectY * objectWidth); // same as (id % textureWidth), but the kernel runs another 10% faster
if (idObjectRgb < 3) // 3 channels that we will write to
{
int targetRgb = idObjectRgb;
// the texture is in BGR format, we want RGB
switch (idObjectRgb)
{
case 0: // R
targetRgb = 2; // B
break;
case 2: // B
targetRgb = 0; // R
break;
}
// if the object pixel offset by inputX, inputY, lies inside the target
if (idObjectX + inputX < targetWidth &&
idObjectX + inputX >= 0 &&
idObjectY + inputY < targetHeight &&
idObjectY + inputY >= 0)
{
// nearest neighbor texture X,Y:
int textureX = textureWidth * idObjectX / objectWidth;
int textureY = textureHeight * idObjectY / objectHeight;
int textureId = textureY * textureWidth + textureX;
int tIndex = targetPixels * targetRgb + targetWidth * (idObjectY + inputY) + (idObjectX + inputX);
int aIndex = textureId + 3 * texturePixels; // the A component of the texture
float a = texture[aIndex];
if (a > 0) // mask allows color here
{
// apply this: target[tIndex] = target[tIndex] * (1.0f - a) + a * color;
target[tIndex] = target[tIndex] * (1.0f - a);
switch (idObjectRgb)
{
case 0:
target[tIndex] += a*r;
break;
case 1:
target[tIndex] += a*g;
break;
case 2:
default:
target[tIndex] += a*b;
break;
}
}
}
}
} |
12,010 | #include "includes.h"
__global__ void reduce_moments(float *d_arr, float *d_results, int N)
{
__shared__ float sh_array[pThreads];
int n = blockDim.x * blockIdx.x + threadIdx.x;
// sh_array[threadIdx.x] = 0;
if (n < N){
for (int s = blockDim.x / 2; s > 0; s >>= 1){
if ( threadIdx.x < s)
{
sh_array[threadIdx.x] += d_arr[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x ==0){
d_results[blockIdx.x] = sh_array[0];
// printf("%d %f\n", blockIdx.x, d_results[blockIdx.x]);
}
}
} |
12,011 | #include <cassert>
#include <cstdio>
#include "sgemm_kernel.cuh"
__global__ void fill(float* data, const int size, const float value)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < size)
{
data[idx] = value;
idx += blockDim.x * gridDim.x;
}
}
__global__ void sgemm(const float* a, const float* b, float* result, const int size, const int stride)
{
const int thread_idx = threadIdx.y * blockDim.x + threadIdx.x;
const int num_thread = blockDim.x * blockDim.y;
const int large_ty = threadIdx.y * small_block_size;
const int large_tx = threadIdx.x * small_block_size;
// thread 単位で small_balock_size^2 だけ要素を持っている時に確保できる block_size_y
const int block_size_y = num_thread * small_block_size * small_block_size / block_size_x;
assert(block_size_y <= block_size_x);
constexpr int block_k_size = 16;
for (int i = blockIdx.y * block_size_y; i < size; i += gridDim.y * block_size_y)
{
for (int j = blockIdx.x * block_size_x; j < size; j += gridDim.x * block_size_x)
{
const bool has_result = (i + large_ty < size && j + large_tx < size);
// 単一スレッドの結果保存用
float local_result[small_block_size][small_block_size];
for (int ii = 0; ii < small_block_size; ii++)
{
for (int jj = 0; jj < small_block_size; jj++)
{
local_result[ii][jj] = 0;
}
}
const int height = min(block_size_y, size - i);
const int width = min(block_size_x, size - j);
for (int k = 0; k < size; k += block_k_size)
{
__shared__ float temp_a[block_k_size][block_size_x + 1], temp_b[block_k_size][block_size_x + 1];
{
const int l = thread_idx % block_size_x;
if (l < width)
{
for (int kk = thread_idx / block_size_x; kk < block_k_size; kk += num_thread / block_size_x)
{
temp_b[kk][l] = b[(k + kk) * stride + (j + l)];
}
}
}
{
const int kk = thread_idx % block_k_size;
for (int l = thread_idx / block_k_size; l < height; l += num_thread / block_k_size)
{
temp_a[kk][l] = a[(i + l) * stride + k + kk];
}
}
__syncthreads();
if (has_result)
{
for (int kk = 0; kk < block_k_size; kk++)
{
for (int ii = 0; ii < small_block_size; ii++)
{
for (int jj = 0; jj < small_block_size; jj++)
{
local_result[ii][jj] += temp_a[kk][large_ty + ii] * temp_b[kk][large_tx + jj];
}
}
}
}
__syncthreads();
}
if (has_result)
{
for (int ii = 0; ii < small_block_size; ii++)
{
for (int jj = 0; jj < small_block_size; jj++)
{
result[(i + large_ty + ii) * stride + (j + large_tx + jj)] = local_result[ii][jj];
}
}
}
__syncthreads();
}
}
} |
12,012 | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
static const int WORK_SIZE = 256;
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
__device__ unsigned int bitreverse(unsigned int number) {
number = ((0xf0f0f0f0 & number) >> 4) | ((0x0f0f0f0f & number) << 4);
number = ((0xcccccccc & number) >> 2) | ((0x33333333 & number) << 2);
number = ((0xaaaaaaaa & number) >> 1) | ((0x55555555 & number) << 1);
return number;
}
/**
* CUDA kernel function that reverses the order of bits in each element of the array.
*/
__global__ void bitreverse(void *data) {
unsigned int *idata = (unsigned int*) data;
idata[threadIdx.x] = bitreverse(idata[threadIdx.x]);
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main2(void) {
void *d = NULL;
int i;
unsigned int idata[WORK_SIZE], odata[WORK_SIZE];
for (i = 0; i < WORK_SIZE; i++)
idata[i] = (unsigned int) i;
CUDA_CHECK_RETURN(cudaMalloc((void**) &d, sizeof(int) * WORK_SIZE));
CUDA_CHECK_RETURN(
cudaMemcpy(d, idata, sizeof(int) * WORK_SIZE, cudaMemcpyHostToDevice));
bitreverse<<<1, WORK_SIZE, WORK_SIZE * sizeof(int)>>>(d);
CUDA_CHECK_RETURN(cudaThreadSynchronize()); // Wait for the GPU launched work to complete
CUDA_CHECK_RETURN(cudaGetLastError());
CUDA_CHECK_RETURN(cudaMemcpy(odata, d, sizeof(int) * WORK_SIZE, cudaMemcpyDeviceToHost));
for (i = 0; i < WORK_SIZE; i++)
printf("Input value: %u, device output: %u\n", idata[i], odata[i]);
CUDA_CHECK_RETURN(cudaFree((void*) d));
CUDA_CHECK_RETURN(cudaDeviceReset());
return 0;
}
|
12,013 | #define BLOCK_WIDTH 32
//#define BLOCK_HEIGHT 32
#define BLOCK_HEIGHT 16
#define TILE_WIDTH 96
//#define TILE_HEIGHT 96
#define TILE_HEIGHT 48
#define SMALL_TILE_WIDTH 64
//#define SMALL_TILE_HEIGHT 64
#define SMALL_TILE_HEIGHT 32
__global__ void superResolutionKernel(float* o_arr1d, float* i_arr1d, int rows, int cols) {
__shared__ float tile[TILE_HEIGHT][TILE_WIDTH];
__shared__ float buf[SMALL_TILE_HEIGHT][TILE_WIDTH]; // buffer for down/up sampling
__shared__ float small_tile[SMALL_TILE_HEIGHT][SMALL_TILE_WIDTH];
int tx = threadIdx.x;
int ty = threadIdx.y;
for(int offset_y = 0; offset_y < TILE_HEIGHT; offset_y += BLOCK_HEIGHT) {
for(int offset_x = 0; offset_x < TILE_WIDTH; offset_x += BLOCK_WIDTH) {
int xidx = blockIdx.x * TILE_WIDTH + offset_x + tx;
int yidx = blockIdx.y * TILE_HEIGHT + offset_y + ty;
tile[offset_y + ty][offset_x + tx] = i_arr1d[yidx * cols + xidx];
}
}
__syncthreads();
// col downsample: (TILE_HEIGHT, TILE_WIDTH) -> (SMALL_TILE_HEIGHT, TILE_WIDTH)
for(int offset_x = 0; offset_x < TILE_WIDTH; offset_x += BLOCK_WIDTH) {
buf[2*ty][offset_x + tx] = -0.022 * tile[3*ty][offset_x + tx] + 0.974 * tile[3*ty+1][offset_x + tx] + 0.227 * tile[3*ty+2][offset_x + tx];
buf[2*ty+1][offset_x + tx] = 0.227 * tile[3*ty][offset_x + tx] + 0.974 * tile[3*ty+1][offset_x + tx] + -0.022 * tile[3*ty+2][offset_x + tx];
}
__syncthreads();
// row downsample: (SMALL_TILE_HEIGHT, TILE_WIDTH) -> (SMALL_TILE_HEIGHT, SMALL_TILE_WIDTH)
for(int offset_y = 0; offset_y < SMALL_TILE_HEIGHT; offset_y += BLOCK_HEIGHT) {
small_tile[offset_y + ty][2*tx] = -0.022 * buf[offset_y + ty][3*tx] + 0.974 * buf[offset_y + ty][3*tx+1] + 0.227 * buf[offset_y + ty][3*tx+2];
small_tile[offset_y + ty][2*tx+1] = 0.227 * buf[offset_y + ty][3*tx] + 0.974 * buf[offset_y + ty][3*tx+1] + -0.022 * buf[offset_y + ty][3*tx+2];
}
__syncthreads();
// row upsample: (SMALL_TILE_HEIGHT, SMALL_TILE_WIDTH) -> (SMALL_TILE_HEIGHT, TILE_WIDTH)
for(int offset_y = 0; offset_y < SMALL_TILE_HEIGHT; offset_y += BLOCK_HEIGHT) {
// 3*tx
if(tx == 0)
buf[offset_y + ty][0] = 0.927 * small_tile[offset_y + ty][0] + -0.047 * small_tile[offset_y + ty][1];
else
buf[offset_y + ty][3*tx] = 0.119 * small_tile[offset_y + ty][2*tx-1] + 0.927 * small_tile[offset_y + ty][2*tx]
+ -0.047 * small_tile[offset_y + ty][2*tx+1];
// 3*tx+2
if(tx == BLOCK_WIDTH-1)
buf[offset_y + ty][TILE_WIDTH-1] = -0.047 * small_tile[offset_y + ty][SMALL_TILE_WIDTH-2] + 0.927 * small_tile[offset_y + ty][SMALL_TILE_WIDTH-1];
else
buf[offset_y + ty][3*tx+2] = -0.047 * small_tile[offset_y + ty][2*tx] + 0.927 * small_tile[offset_y + ty][2*tx+1]
+ 0.119 * small_tile[offset_y + ty][2*tx+2];
// 3*tx+1
if(tx == 0)
buf[offset_y + ty][1] = 0.6 * small_tile[offset_y + ty][0] + 0.6 * small_tile[offset_y + ty][1] + -0.1 * small_tile[offset_y + ty][2];
else if(tx == BLOCK_WIDTH-1)
buf[offset_y + ty][TILE_WIDTH-2] = -0.1 * small_tile[offset_y + ty][SMALL_TILE_WIDTH-3] + 0.6 * small_tile[offset_y + ty][SMALL_TILE_WIDTH-2]
+ 0.6 * small_tile[offset_y + ty][SMALL_TILE_WIDTH-1];
else
buf[offset_y + ty][3*tx+1] = -0.1 * small_tile[offset_y + ty][2*tx-1] + 0.6 * small_tile[offset_y + ty][2*tx]
+ 0.6 * small_tile[offset_y + ty][2*tx+1] + -0.1 * small_tile[offset_y + ty][2*tx+2];
}
__syncthreads();
// col upsample: (SMALL_TILE_HEIGHT, TILE_WIDTH) -> (TILE_HEIGHT, TILE_WIDTH)
for(int offset_x = 0; offset_x < TILE_WIDTH; offset_x += BLOCK_WIDTH) {
int xidx = blockIdx.x * TILE_WIDTH + offset_x + tx;
// 3*ty
if(ty == 0) {
int yidx = blockIdx.y * TILE_HEIGHT;
o_arr1d[yidx * cols + xidx] = 0.927 * buf[0][offset_x + tx] + -0.047 * buf[1][offset_x + tx];
}
else {
int yidx = blockIdx.y * TILE_HEIGHT + 3*ty;
o_arr1d[yidx * cols + xidx] = 0.119 * buf[2*tx-1][offset_x + tx] + 0.927 * buf[2*tx][offset_x + tx]
+ -0.047 * buf[2*tx+1][offset_x + tx];
}
// 3*ty+2
if(ty == BLOCK_HEIGHT-1) {
int yidx = blockIdx.y * TILE_HEIGHT + TILE_HEIGHT-1;
o_arr1d[yidx * cols + xidx] = -0.047 * buf[SMALL_TILE_HEIGHT-2][offset_x + tx] + 0.927 * buf[SMALL_TILE_HEIGHT-1][offset_x + tx];
}
else {
int yidx = blockIdx.y * TILE_HEIGHT + 3*ty+2;
o_arr1d[yidx * cols + xidx] = -0.047 * buf[2*ty][offset_x + tx] + 0.927 * buf[2*ty+1][offset_x + tx]
+ 0.119 * small_tile[2*ty+2][offset_x + tx];
}
// 3*ty+1
if(ty == 0) {
int yidx = blockIdx.y * TILE_HEIGHT + 1;
o_arr1d[yidx * cols + xidx] = 0.6 * buf[0][offset_x + tx] + 0.6 * buf[1][offset_x + tx] + -0.1 * buf[2][offset_x + tx];
}
else if(ty == BLOCK_HEIGHT-1) {
int yidx = blockIdx.y * TILE_HEIGHT + TILE_HEIGHT-2;
o_arr1d[yidx * cols + xidx] = -0.1 * buf[SMALL_TILE_HEIGHT-3][offset_x + tx] + 0.6 * buf[SMALL_TILE_HEIGHT-2][offset_x + tx]
+ 0.6 * buf[SMALL_TILE_HEIGHT-1][offset_x + tx];
}
else {
int yidx = blockIdx.y * TILE_HEIGHT + 3*ty+1;
o_arr1d[yidx * cols + xidx] = -0.1 * buf[2*ty-1][offset_x + tx] + 0.6 * buf[2*ty][offset_x + tx]
+ 0.6 * buf[2*ty+1][offset_x + tx] + -0.1 * buf[2*ty+2][offset_x + tx];
}
}
}
|
12,014 | #include "includes.h"
__global__ void sum(int *a,int *b,int n)
{
int block=256*blockIdx.x;
int sum=0;
for(int i=block;i<min(block+256,n);i++)
{
sum=sum+a[i];
}
b[blockIdx.x]=sum;
} |
12,015 | #include "includes.h"
__global__ void reduceNeighbored(int *g_idata, int *g_odata, unsigned int n)
{
// set thread id.
unsigned int tid = threadIdx.x;
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
// convert global data pointer to th local pointer of this block.
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check.
if (idx >= n) return;
// in-place reduction in global memory.
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
if ( (tid % (2 * stride)) == 0)
{
idata[tid] += idata[tid + stride];
}
// synchronize within block.
__syncthreads();
}
// write result for this block to global mem.
if (tid == 0)
{
g_odata[blockIdx.x] = idata[0];
}
} |
12,016 | #include "includes.h"
__global__ void multiple_median_reduce_shuffle_gpu(const float *d_in, float *d_out, const float *d_random_numbers, const int *d_start_inds, const int *d_n_in) {
/**************/
/* initialize */
/**************/
int segment = blockIdx.y;
// compute indices
int t_ind = threadIdx.x;
int g_ind =
blockIdx.x * MED_BLOCK_SIZE +
t_ind; // means that every row of blocks uses the same random numbers
// allocate shared memory
// __shared__ float DATA[MED_BLOCK_SIZE];
__shared__ float DATA[256];
/**************/
/* load stage */
/**************/
if (t_ind < MED_BLOCK_SIZE) {
int sample_ind = d_start_inds[segment] +
floorf(d_random_numbers[g_ind] * (float)d_n_in[segment]);
DATA[t_ind] = d_in[sample_ind];
}
__syncthreads();
/*******************/
/* reduction stage */
/*******************/
for (int s = 1; s < MED_BLOCK_SIZE; s *= 3) {
int index = 3 * s * t_ind;
if (index < MED_BLOCK_SIZE) {
// fetch three values
float value1 = DATA[index];
float value2 = DATA[index + s];
float value3 = DATA[index + 2 * s];
// extract the middle value (median)
float smallest = fminf(value1, value2);
value2 = fmaxf(value1, value2);
value1 = smallest;
value3 = fmaxf(value1, value3);
value2 = fminf(value2, value3);
DATA[index] = value2;
}
__syncthreads();
}
/***************/
/* write stage */
/***************/
// write this block's approx median (first element)
if (t_ind == 0) {
d_out[gridDim.x * blockIdx.y + blockIdx.x] = DATA[0];
}
} |
12,017 | #include <stdio.h>
int num_devices = 0;
/******************************************************************************
*
* Auxiliary routines
*
******************************************************************************/
#define error_check(error) do \
{ \
cuda_assert((error), __FILE__, __LINE__); \
} while (0);
void cuda_assert(cudaError_t code, const char *file, int line)
{
if (code != cudaSuccess) {
fprintf(stderr,
"[ERROR] Error code: %d Message: %s %s %d\n",
code, cudaGetErrorString(code), file, line);
exit(code);
}
}
void mem_ustring(char *s, long bytes)
{
double usize = 0;
if (bytes > 1E+9)
{
usize = bytes / 1E+9;
sprintf(s, "%.2lf GB", usize);
}
else if (bytes > 1E+6)
{
usize = bytes / 1E+6;
sprintf(s, "%.2lf MB", usize);
}
else if (bytes > 1E+3)
{
usize = bytes / 1E+3;
sprintf(s, "%.2lf KB", usize);
}
else
{
usize = bytes;
sprintf(s, "%lf Bytes", usize);
}
}
void print_device_info(void)
{
for (int n = 0; n < num_devices; n++)
{
char ustring[64];
cudaDeviceProp device_prop;
cudaGetDeviceProperties(&device_prop, n);
size_t free_mem;;
cudaMemGetInfo(&free_mem, NULL);
int device_threads = device_prop.multiProcessorCount * device_prop.maxThreadsPerMultiProcessor;
printf("Device %d: %s\n", n, device_prop.name);
printf(" Compute capability: %d.%d\n", device_prop.major, device_prop.minor);
printf(" Total number of threads: %d\n", device_threads);
mem_ustring(ustring, device_prop.totalGlobalMem);
printf(" Global memory size: %s\n", ustring);
mem_ustring(ustring, device_prop.sharedMemPerBlock);
printf(" Shared memory size: %s\n", ustring);
mem_ustring(ustring, device_prop.totalConstMem);
printf(" Constant memory size: %s\n", ustring);
mem_ustring(ustring, free_mem);
printf(" Total free memory: %s\n", ustring);
printf(" Warp size: %d\n", device_prop.warpSize);
}
printf("\n");
}
/******************************************************************************
*
* CUDA kernels
*
******************************************************************************/
__global__
void char_add(char *a, char *b)
{
a[threadIdx.x] = b[threadIdx.x];
}
/******************************************************************************
*
* Device tests
*
******************************************************************************/
void run_device_test(void)
{
for (int n = 0; n < num_devices; n++)
{
cudaSetDevice(n);
int string_size;
char string[256] = "Hello world!";
string_size = strlen(string) + 1;
/* Allocate device */
char *d_a, *d_b;
cudaMalloc((void **)&d_a, string_size * sizeof(char));
cudaMalloc((void **)&d_b, string_size * sizeof(char));
/* Allocate host */
char *a = (char *)calloc(string_size, sizeof(char));
char *b = (char *)calloc(string_size, sizeof(char));
strcpy(b, string);
dim3 dim_block(string_size, 1);
dim3 dim_grid(1);
cudaMemcpy((void *)d_b, (const void *)b, string_size * sizeof(char), cudaMemcpyHostToDevice);
char_add<<<dim_grid, dim_block>>>(d_a, d_b);
error_check(cudaPeekAtLastError());
error_check(cudaDeviceSynchronize());
cudaMemcpy((void *)a, (void *)d_a, string_size, cudaMemcpyDeviceToHost);
cudaDeviceProp device_prop;
cudaGetDeviceProperties(&device_prop, n);
if (strcmp(a, b) == 0)
printf("[PASSED TEST] Device %d: %s\n", n, device_prop.name);
else
printf("[FAILED TEST] Device %d: %s\n", n, device_prop.name);
cudaFree(d_a);
cudaFree(d_b);
free(a);
free(b);
}
printf("\n");
}
int main(void)
{
error_check(cudaGetDeviceCount(&num_devices));
if (num_devices > 0)
{
print_device_info();
run_device_test();
}
else
{
printf("[ERROR] No CUDA devices found!\n");
}
return EXIT_SUCCESS;
}
|
12,018 | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#include <stdbool.h>
#include <cuda_runtime.h>
#define BLOCK_SIZE 16
///
/// function name: gpu_matrix_mult
///
/// description: dot product of two matrix (not only square)
///
/// parameters:
/// &a GPU device pointer to a m X n matrix (A)
/// &b GPU device pointer to a n X k matrix (B)
/// &c GPU device output purpose pointer to a m X k matrix (C)
/// to store the result
///
/// Note:
/// grid and block should be configured as:
/// dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE,
/// (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
/// dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
///
/// further speedup can be obtained by using shared memory to decrease
/// global memory access times
/// return: none
///
__global__ void gpu_matrix_mult(float *d_a, float *d_b,
float *d_result, int N) {
__shared__ int tile_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int idx;
float tmp = 0.0;
for (int sub = 0; sub < gridDim.x; ++sub) {
idx = row * N + sub * BLOCK_SIZE + threadIdx.x;
if(idx >= N*N) {
/// n may not divisible by BLOCK_SIZE
tile_a[threadIdx.y][threadIdx.x] = 0;
} else {
tile_a[threadIdx.y][threadIdx.x] = d_a[idx];
}
idx = (sub * BLOCK_SIZE + threadIdx.y) * N + col;
if(idx >= N*N) {
tile_b[threadIdx.y][threadIdx.x] = 0;
} else {
tile_b[threadIdx.y][threadIdx.x] = d_b[idx];
}
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k) {
tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x];
}
__syncthreads();
}
if(row < N && col < N) {
d_result[row * N + col] = tmp;
}
}
///
/// function name: cpu_matrix_mult
///
/// description: dot product of two matrix (not only square) in CPU,
/// for validating GPU results
///
/// parameters:
/// &a CPU host pointer to a m X n matrix (A)
/// &b CPU host pointer to a n X k matrix (B)
/// &c CPU host output purpose pointer to a m X k matrix (C)
/// to store the result
/// return: none
///
void cpu_matrix_mult(float *h_a, float *h_b, float *h_result, int N) {
int i,j,k;
#pragma omp parallel for schedule(dynamic,50) collapse(2) private(i,j,k) shared(h_a,h_b,h_result)
for( j=0;j<N;j++)
for(i=0;i<N;i++)
for(k=0;k<N;k++)
h_result[j*N+i] += h_a[j*N+k]*h_b[k*N+i];
}
///
/// function name: main
///
/// description: test and compare
///
/// parameters:
/// none
///
/// return: none
///
int main(int argc, char const *argv[])
{
int N=2048;
/// Fixed seed for illustration.
srand(3333);
/// allocate memory in host RAM, h_cc is used to store CPU result
float *h_a, *h_b, *h_c, *h_cc;
cudaMallocHost((void **) &h_a, sizeof(float)*N*N);
cudaMallocHost((void **) &h_b, sizeof(float)*N*N);
cudaMallocHost((void **) &h_c, sizeof(float)*N*N);
cudaMallocHost((void **) &h_cc, sizeof(float)*N*N);
/// random initialize matrix A
for (int j = 0; j < N; ++j) {
for (int i = 0; i < N; ++i) {
h_a[j*N + i] = rand() % 1024;
}
}
/// random initialize matrix B
for (int j = 0; j < N; ++j) {
for (int i = 0; i < N; ++i) {
h_b[j*N + i] = rand() % 1024;
}
}
/// c = 0
for (int j = 0; j < N; ++j) {
for (int i = 0; i < N; ++i) {
h_c[j*N + i] = 0.0;
}
}
float gpu_elapsed_time_ms, cpu_elapsed_time_ms;
/// some events to count the execution time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
/// start to count execution time of GPU version
cudaEventRecord(start, 0);
/// Allocate memory space on the device
float *d_a, *d_b, *d_c;
cudaMalloc((void **) &d_a, sizeof(float)*N*N);
cudaMalloc((void **) &d_b, sizeof(float)*N*N);
cudaMalloc((void **) &d_c, sizeof(float)*N*N);
/// copy matrix A and B from host to device memory
cudaMemcpy(d_a, h_a, sizeof(int)*N*N, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeof(int)*N*N, cudaMemcpyHostToDevice);
unsigned int grid_rows = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
/// Launch kernel
gpu_matrix_mult<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, N);
/// Transfer results from device to host
cudaMemcpy(h_c, d_c, sizeof(int)*N*N, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
/// time counting terminate
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
/// compute time elapse on GPU computing
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on GPU: %f ms.\n\n", gpu_elapsed_time_ms);
/// start the CPU version
cudaEventRecord(start, 0);
cpu_matrix_mult(h_a, h_b, h_cc, N);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&cpu_elapsed_time_ms, start, stop);
printf("Time elapsed on on CPU: %f ms.\n\n", cpu_elapsed_time_ms);
/// validate results computed by GPU
bool all_ok = true;
for (int j = 0; j < N; ++j) {
for (int i = 0; i < N; ++i) {
if(fabs(h_c[j*N + i] - h_cc[j*N + i]) > 1.e-4) {
all_ok = false;
}
}
}
/// roughly compute speedup
if(all_ok) {
printf("all results are correct!!!, speedup = %f\n",
cpu_elapsed_time_ms / gpu_elapsed_time_ms);
} else {
printf("incorrect results\n");
}
/// free memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
cudaFreeHost(h_cc);
return 0;
}
|
12,019 | #pragma once
// #include "fixnum/word_fixnum.cu"
template < typename fixnum>
class quad_ext_element {
public:
typedef fixnum modnum;
modnum a0;
modnum a1;
__device__ quad_ext_element() { }
};
template < typename fixnum, typename monty >
class quad_ext {
public:
typedef fixnum modnum;
monty mod;
modnum alpha;
typedef quad_ext_element<fixnum> quad_ext_element;
__device__ quad_ext(fixnum modulus, fixnum _alpha) : mod(modulus), alpha(_alpha) {
modnum t;
mod.to_modnum(t, alpha);
alpha = t;
}
__device__ void to_modnum(quad_ext_element &z) {
modnum t0, t1;
mod.to_modnum(t0, z.a0);
mod.to_modnum(t1, z.a1);
z.a0 = t0; z.a1 = t1;
}
__device__ void from_modnum(quad_ext_element &z) {
fixnum t0, t1;
mod.from_modnum(t0, z.a0);
mod.from_modnum(t1, z.a1);
z.a0 = t0; z.a1 = t1;
}
__device__ void add(quad_ext_element &z, quad_ext_element &x, quad_ext_element &y) {
modnum t0, t1;
mod.add(t0, x.a0, y.a0);
mod.add(t1, x.a1, y.a1);
z.a0 = t0; z.a1 = t1;
}
__device__ void mul(quad_ext_element &z, quad_ext_element x, quad_ext_element y) {
modnum t0, t1, t2;
// c0 = a0*b0 + 13*a1*b1
mod.mul(t0, x.a0, y.a0);
mod.mul(t1, x.a1, y.a1);
mod.mul(t2, alpha, t1);
mod.add(z.a0, t0, t2);
// c1 = a0*b1 + a1*b0
mod.mul(t0, x.a0, y.a1);
mod.mul(t1, x.a1, y.a0);
mod.add(z.a1, t0, t1);
}
};
|
12,020 | #include "GpuFocalProcessing.cuh"
#include "GpuTimer.cuh"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cmath>
using namespace winGpu;
__global__ void applyFocalOpGpu(FocalRasterGpu rasterInput, FocalRasterGpu rasterOutput, FocalKernelGpu kernel, int rowIter)
{
int h = blockDim.x * blockIdx.x + threadIdx.x + rowIter;
int w = blockDim.y * blockIdx.y + threadIdx.y;
if (rasterInput.height <= h || rasterInput.width <= w)
{
return;
}
if (rasterInput(h, w) == rasterInput.defaultValue)
{
rasterOutput(h, w) = rasterInput(h, w);
return;
}
double sum = 0.0;
for (int i = 0; i < kernel.sideSize; ++i)
{
for (int j = 0; j < kernel.sideSize; ++j)
{
pixel value = rasterInput(h + (i - kernel.midSize), w + (j - kernel.midSize));
if (value == rasterInput.defaultValue)
{
rasterOutput(h, w) = rasterInput(h, w);
return;
}
sum += kernel[i][j] * value;
}
}
if (sum <= 0)
{
sum = 0.0;
}
rasterOutput(h, w) = (pixel)sum;
}
double winGpu::doFocalOpGpu(pixel* input, int height, int width, pixel* output, std::vector<double> matrix)
{
// Rater
FocalRasterGpu rasterInput;
rasterInput.height = height;
rasterInput.width = width;
rasterInput.data = 0;
// Rater
FocalRasterGpu rasterOutput;
rasterOutput.height = height;
rasterOutput.width = width;
rasterOutput.data = 0;
// Kernel
FocalKernelGpu kernelTemp;
kernelTemp.sideSize = (int)std::sqrt(matrix.size());
kernelTemp.ker = matrix.data();
kernelTemp.midSize = kernelTemp.sideSize / 2;
FocalKernelGpu kernel;
kernel.sideSize = kernelTemp.sideSize;
kernel.midSize = kernelTemp.midSize;
kernel.ker = 0;
cudaSetDevice(0);
cudaMalloc((void**)&rasterInput.data, rasterInput.size());
cudaMalloc((void**)&rasterOutput.data, rasterOutput.size());
cudaMalloc((void**)&kernel.ker, kernel.size());
cudaMemcpy(rasterInput.data, input, rasterInput.size(), cudaMemcpyHostToDevice);
cudaMemcpy(kernel.ker, kernelTemp.ker, kernel.size(), cudaMemcpyHostToDevice);
const size_t maxAvaliableCoords = 8000000;
int countRowsPerIter = maxAvaliableCoords / width;
int countIter = height / countRowsPerIter + 1;
const size_t size = width * countRowsPerIter;
dim3 threadsPerBlock(16, 16);
dim3 numBlocks(countRowsPerIter / threadsPerBlock.x + 1, width / threadsPerBlock.y + 1);
float time;
GPU_TIMER_START;
for (int i = 0; i < countIter; i++)
{
int rowIter = i * countRowsPerIter;
applyFocalOpGpu << <numBlocks, threadsPerBlock >> > (rasterInput, rasterOutput, kernel, rowIter);
cudaDeviceSynchronize();
int k = 5;
}
GPU_TIMER_STOP(time);
cudaMemcpy(output, rasterOutput.data, rasterOutput.size(), cudaMemcpyDeviceToHost);
cudaFree(rasterInput.data);
cudaFree(rasterOutput.data);
cudaFree(kernel.ker);
return (double)time;
}
|
12,021 | #include "includes.h"
__global__ void constrain_weight_updates_kernel(int N, float coef, float *weights_gpu, float *weight_updates_gpu)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < N) {
const float w = weights_gpu[i];
const float wu = weight_updates_gpu[i];
const float wu_sign = (wu == 0) ? 0 : (fabs(wu) / wu);
const float abs_limit = fabs(w * coef);
if (fabs(wu) > abs_limit) weight_updates_gpu[i] = abs_limit * wu_sign;
}
} |
12,022 | #include <cfloat>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <math_constants.h>
#include <cooperative_groups.h>
using namespace cooperative_groups;
#include "tensor_op_gpu.cuh"
// Apply exp element-wisely to an array d_A
// d_A(m) = exp(d_A(n))
__global__ void g_exp(float *A, float *Out, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
Out[i] = expf(A[i]);
}
__global__ void g_exp_sum(float *inmat, float *outmat, float *outsumvec, int M_dim, int N_dim, int iters) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int Total_dim = M_dim * N_dim;
if (i < M_dim * N_dim * iters) {
int iter_idx = i / (Total_dim);
float in_val = inmat[i];
float exp_in_val = expf(in_val);
outmat[i] = exp_in_val;
atomicAdd(outsumvec + (iter_idx * M_dim) + (i % M_dim), exp_in_val);
}
}
// Divide PQ by S for each question
// m: num_sentences
// n: num_questions
__global__ void g_normalize(float *_mat, const float *_vec, int M_dim, int N_dim, int iters) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int Total_dim = M_dim * N_dim;
if (i < M_dim * N_dim * iters) {
int iter_idx = i / Total_dim;
_mat[i] /= _vec[(iter_idx * M_dim) + (i % M_dim)];
}
}
__global__ void g_score_norm_layer_mask(float *_mat, float norm_factor, const float *attention_mask, int M_dim, int N_dim, int iters)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int Total_dim = M_dim * N_dim;
if (i < M_dim * N_dim * iters) {
float in_val = _mat[i];
float mask_val = attention_mask[i % Total_dim];
_mat[i] = in_val * norm_factor - mask_val;
}
}
__global__ void g_layer_mean(float *inmat, float *outmeanvec, const int M_dim, const int N_dim) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < M_dim * N_dim) {
float in_val = inmat[i];
atomicAdd(outmeanvec + (i % M_dim), in_val / (float) N_dim);
}
}
__global__ void g_layer_minus(float *inmat, float *invec, float *outmat, const int M_dim, const int N_dim) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < M_dim * N_dim) {
float in_val = inmat[i];
float in_vec_val = invec[i % M_dim];
outmat[i] = in_val - in_vec_val;
}
}
__global__ void g_layer_norm(float *inmat, float *outmat, float *tmpvec, float *gamma, float *beta, const int M_dim, const int N_dim) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
// cudaMemsetAsync(gpu_context->d_buf_layernorm_nrm_v[batch_idx], 0, M_dim * 1 * sizeof(float), gpu_context->streams[stream_idx]);
if (i < M_dim) {
tmpvec[i] = 0.0f;
}
// __syncthreads();
// g_layer_mean
if (i < M_dim * N_dim) {
float in_val = inmat[i];
atomicAdd(tmpvec + (i % M_dim), in_val / (float) N_dim);
}
// __syncthreads();
// g_layer_minus
if (i < M_dim * N_dim) {
float in_val = inmat[i];
float in_vec_val = tmpvec[i % M_dim];
inmat[i] = in_val - in_vec_val;
}
/// Calculate norm2
// cudaMemsetAsync
if (i < M_dim) {
tmpvec[i] = 0.0f;
}
this_thread_block().sync();
// __syncthreads();
// g_layer_snrm2
if (i < M_dim * N_dim) {
float in_val = inmat[i];
atomicAdd(tmpvec + (i % M_dim), in_val * in_val);
}
this_thread_block().sync();
// __syncthreads();
// g_sqrt
if (i < M_dim) {
tmpvec[i] = sqrtf(tmpvec[i]);
}
this_thread_block().sync();
// __syncthreads();
// g_layer_norm_gamma_beta
if (i < M_dim * N_dim) {
float _var_reciprocal = 1.0f / (sqrtf(tmpvec[i % M_dim] * tmpvec[i % M_dim] / (float)N_dim + FLT_EPSILON));
outmat[i] = inmat[i] * _var_reciprocal;
outmat[i] = outmat[i] * gamma[i];
outmat[i] = outmat[i] + beta[i];
}
}
/**
*
* @param inmat: (M X N) matrix
* @param outnrmvec: (M X 1) matrix
* @param M_dim
* @param N_dim
*/
__global__ void g_layer_snrm2(float *inmat, float *outnrmvec, const int M_dim, const int N_dim) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < M_dim * N_dim) {
float in_val = inmat[i];
atomicAdd(outnrmvec + (i % M_dim), in_val * in_val);
}
}
__global__ void g_layer_norm_gamma_beta(float *inmat, float *outmat, float *nrmvec, float *gamma, float *beta, const int m, const int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < m * n) {
float _var_reciprocal = 1.0f / (sqrtf(nrmvec[i % m] * nrmvec[i % m] / (float)n + FLT_EPSILON));
outmat[i] = inmat[i] * _var_reciprocal;
outmat[i] = outmat[i] * gamma[i];
outmat[i] = outmat[i] + beta[i];
}
}
__global__ void g_sqrt(float *a, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
a[i] = sqrtf(a[i]);
}
}
__global__ void g_gelu(float *In, float *Out, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
const float scale = sqrtf(2.0f / CUDART_PI);
float in_val = In[i];
float cdf = 1.0f + tanhf(scale * (in_val + 0.044715f * (in_val * in_val * in_val)));
cdf *= 0.5f;
Out[i] = in_val * cdf;
}
} |
12,023 |
/* check-dimension.cu */
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
__global__ void checkDimension(void)
{
printf("threadIdx: (%d, %d, %d), blockIdx: (%d, %d, %d), "
"blockDim: (%d, %d, %d), gridDim: (%d, %d, %d)\n",
threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z,
blockDim.x, blockDim.y, blockDim.z,
gridDim.x, gridDim.y, gridDim.z);
}
int main(int argc, char** argv)
{
int numOfElements = 6;
dim3 block(3);
dim3 grid((numOfElements + block.x - 1) / block.x);
printf("grid.x: %d, grid.y: %d, grid.z: %d\n",
grid.x, grid.y, grid.z);
printf("block.x: %d, block.y: %d, block.z: %d\n",
block.x, block.y, block.z);
checkDimension<<<grid, block>>>();
cudaDeviceSynchronize();
return EXIT_SUCCESS;
}
|
12,024 | #include "includes.h"
__global__ void xcorr(float *d_i1, float *d_i2, float *d_icorr, int m1, int n1, int m2, int n2)
{
//grab theadID location
int tx = threadIdx.x;
int ty = threadIdx.y;
// get output values based on block and thread locations
int row_out = blockIdx.y*blockDim.y + ty;
int col_out = blockIdx.x*blockDim.x + tx;
// Get starting value for the convolution as dictated by m2 and n2
// we'll use i1 indicies as the coord syst.
int row_st = row_out - (m2 - 1);
int col_st = col_out - (n2 - 1);
// correlation variable
float corr=0; // initialize correlation variable
if (row_out >= 0 && row_out < m1 + m2 - 1 && col_out >= 0 && col_out < n1 + n2 - 1) // ensure output is within bounds of correlation image
{
// Buffering into memory would be 1 call to a global variable, From there we need 1 call for each multiplication, however we only need to make 1 call to a global
// variable for the multiplication and move on, as such it doesn't make sense to buffer these images into local memory
for (int i = 0; i < m2; i++) { //
for (int j = 0; j < n2; j++)
{
if (row_st + i >= 0 && row_st + i < m1 && col_st + j >= 0 && col_st + j < n1) { // if row start and col start are greater than 0 and less than the number of pixels available perform convolution
corr += d_i1[row_st + i + (col_st + j) * m1] * d_i2[i + j * m2];
}
else {} // if else is invoked it's because row_st and col_st are outside of im1 bounds and the convolution should be left alone
}
}
d_icorr[row_out + col_out*(m1 + m2 - 1)] = corr; // assign correlation variable to proper location in final image
}
else{}
} |
12,025 | #include <cstdio>
#include <cassert>
#include <sys/time.h>
struct timeval tGS, tGF, tKS, tKF, tCS, tCF;
// Matrices are stored in row-major order:
// M(row, col) = M.elements[row * M.width + col]
typedef struct {
int width;
int height;
float* elements;
} Matrix;
// Matrix sum kernel, called by MatrixSum()
__global__ void MatSumKernel(Matrix A, Matrix B, Matrix C)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int idy = threadIdx.y + blockDim.y * blockIdx.y;
C.elements[idx+C.width*idy] =
A.elements[idx+A.width*idy] + B.elements[idx+B.width*idy];
}
// Host code
void MatSum(const Matrix A, const Matrix B, Matrix C)
{
// Allocate A, B and C in device memory
int size = A.width*A.height * sizeof(float);
Matrix d_A; Matrix d_B; Matrix d_C;
d_A.width = A.width; d_A.height = A.height;
d_B.width = B.width; d_B.height = B.height;
d_C.width = C.width; d_C.height = C.height;
cudaMalloc((void**)&(d_A.elements), size);
cudaMalloc((void**)&(d_B.elements), size);
cudaMalloc((void**)&(d_C.elements), size);
// Load A and B to device memory
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_C.elements, C.elements, size, cudaMemcpyHostToDevice);
// Invoke kernel
gettimeofday(&tKS, 0);
dim3 nThreads(16, 16, 1);
dim3 nBlocks(d_A.width/16, d_B.height/16, 1);
MatSumKernel<<<nBlocks, nThreads>>>(d_A, d_B, d_C);
cudaDeviceSynchronize();
gettimeofday(&tKF, 0);
// Read C from device memory
cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
int main(int argc, char **argv) {
if (argc < 3) {
printf ("Usage: %s nLines nColumns\n", argv[0]);
return -1;
}
int nLines = atoi (argv[1]);
int nColumns = atoi (argv[2]);
int i,j;
Matrix A, B, C;
A.width = nColumns; A.height = nLines;
B.width = nColumns; B.height = nLines;
C.width = nColumns; C.height = nLines;
A.elements = (float *)malloc(A.width*A.height * sizeof(float));
B.elements = (float *)malloc(B.width*B.height * sizeof(float));
C.elements = (float *)malloc(C.width*C.height * sizeof(float));
for (i=0; i<A.width*A.height; i++)
A.elements[i] = i % 10 + 1;
for (i=0; i<B.width*B.height; i++)
B.elements[i] = (i+1) % 11 + 1;
for (i=0; i<C.width*C.height; i++)
C.elements[i] = 0;
// Performs the sum
gettimeofday(&tGS, 0);
MatSum(A, B, C);
gettimeofday(&tGF, 0);
//Checks the result
gettimeofday(&tCS, 0);
for (i=0; i<nColumns; i++) {
for (j=0; j<nLines; j++) {
float ctmp = A.elements[j*nColumns+i] + B.elements[j*nColumns+i];
assert( fabs(ctmp - C.elements[j*nColumns+i]) < 0.001);
}
}
gettimeofday(&tCF, 0);
printf(" total=%8.2f\n", (tCF.tv_sec*1000. + tCF.tv_usec/1000.) - (tGS.tv_sec*1000. + tGS.tv_usec/1000.));
printf("kernel=%8.2f\n", (tKF.tv_sec*1000. + tKF.tv_usec/1000.) - (tKS.tv_sec*1000. + tKS.tv_usec/1000.));
printf(" gpu=%8.2f\n", (tGF.tv_sec*1000. + tGF.tv_usec/1000.) - (tGS.tv_sec*1000. + tGS.tv_usec/1000.));
printf(" cpu=%8.2f\n", (tCF.tv_sec*1000. + tCF.tv_usec/1000.) - (tCS.tv_sec*1000. + tCS.tv_usec/1000.));
return 0;
}
|
12,026 | #include "includes.h"
__global__ void PictureKernell(unsigned char* d_Pin, unsigned char* d_Pout, int n, int m) {
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
if((Row < m) && (Col < n)) {
d_Pout[Row*n+Col] = 2*d_Pin[Row*n+Col];
}
} |
12,027 | #ifndef _GPU_ADD_CU_
#define _GPU_ADD_CU_
#include <stdio.h> //size_t
#include <math.h> //fabs
#include <cstring> //memcpy
#include <iostream> //cout
//CUDA libraries
#include "cublas_v2.h"
//HEADER file
#include "gpuadd.cuh"
//the array index of a matrix element in row “r” and column “c” can be computed via the following macro
#define IDX2C(r,c,rows) (((c)*(rows))+(r))
__global__ void gpuaddkernel(double *d_A, double *d_B, double *d_C, size_t Am, size_t An)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
int ind = x + An*y; //derive linear index
if (x<An && y<Am) d_C[ind] = d_A[ind] + d_B[ind];
}
void gpuaddcublas(double *A, double *B, double *C, size_t Am, size_t An)
{
cudaError_t cudaStat;
cublasStatus_t stat;
cublasHandle_t handle;
double *d_A, *d_C;
if (!A || !B || !C) {
printf ("Error in gpuaddcublas: input failed\n");
return;
}
cudaStat = cudaMalloc ((void**)&d_A, Am*An*sizeof(double));
if (cudaStat != cudaSuccess) {
printf ("device memory allocation failed\n");
return;
}
cudaStat = cudaMalloc ((void**)&d_C, Am*An*sizeof(double));
if (cudaStat != cudaSuccess) {
printf ("device memory allocation failed\n");
return;
}
stat = cublasCreate(&handle);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("CUBLAS initialization failed\n");
return;
}
//for (size_t ii=0; ii<Am*An; ii++) printf("A=%f\n",A[ii]);
stat = cublasSetVector(Am*An, sizeof(double), (void*)A, 1, (void*)d_A, 1);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("data download for A failed: %d\n", stat);
cudaFree (d_A);
cublasDestroy(handle);
return;
}
stat = cublasSetVector(Am*An, sizeof(double), (void*)B, 1, (void*)d_C, 1);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("data download for B failed\n");
cudaFree (d_A);
cudaFree (d_C);
cublasDestroy(handle);
return;
}
//add two matrices (represented as vectors)
double alpha = 1.;
stat = cublasAxpyEx (handle,Am*An, &alpha, CUDA_R_64F, (void *)d_A, CUDA_R_64F, 1, (void *)d_C, CUDA_R_64F, 1, CUDA_R_64F);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("data adding failed\n");
cudaFree (d_A);
cudaFree (d_C);
cublasDestroy(handle);
return;
}
stat = cublasGetVector(Am*An, sizeof(double), (void *)d_C, 1, (void*)C, 1);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("data upload failed\n");
cudaFree (d_A);
cudaFree (d_C);
cublasDestroy(handle);
return;
}
cudaFree (d_A);
cudaFree (d_C);
cublasDestroy(handle);
return;
}
void gpuadd(double *A, double *B, double *C, size_t Am, size_t An)
{
/* set GPU grid & block configuration */
cudaDeviceProp deviceProp;
memset( &deviceProp, 0, sizeof(deviceProp));
if( cudaSuccess != cudaGetDeviceProperties(&deviceProp,0)){
printf( "\n%s", cudaGetErrorString(cudaGetLastError()));
return;
}
dim3 block = dim3(32,8,1); // 32*8*1 = 256 threads per block
// ensure enough blocks to cover w * h elements (round up)
dim3 grid = dim3( ( An + block.x -1 ) / block.x, ( Am + block.y - 1 ) / block.y, 1);
//First add two pointers with specified kernel
/* allocate device memory for matrices */
double *d_A = NULL;
cudaMalloc( (void**) &d_A, Am * An * sizeof(double)) ;
cudaMemcpy( d_A, A, Am * An * sizeof(double), cudaMemcpyHostToDevice) ;
double *d_B = NULL;
cudaMalloc( (void**) &d_B, Am * An * sizeof(double)) ;
cudaMemcpy( d_B, B, Am * An * sizeof(double), cudaMemcpyHostToDevice) ;
double *d_C = NULL;
cudaMalloc( (void**) &d_C, Am * An * sizeof(double)) ;
std::cout << "Values/Vectors/Matrices are added using a self-implemented kernel" << std::endl;
/* call GPU kernel for addition */
gpuaddkernel<<< grid, block >>>(d_A, d_B, d_C, Am, An);
cudaDeviceSynchronize();
/* copy result from device */
double *C_kernel = new double [Am*An];
cudaMemcpy( C_kernel, d_C, Am * An * sizeof(double), cudaMemcpyDeviceToHost) ;
/* free GPU memory */
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
//Now add pointers with cublas
std::cout << "Addtionally, the same values are added using cublas" << std::endl;
double *C_cublas = new double [Am*An];
gpuaddcublas(A, B, C_cublas, Am, An);
//Compare results
std::cout << "And now both results will be compared..." << std::endl;;
bool equal = true;
for (size_t ii=0; ii<Am*An; ii++)
if (!(fabs(C_kernel[ii]-C_cublas[ii])<0.001))
equal = false;
if (equal)
{
std::cout << "Yeah, both arrays have the same values" << std::endl << std::endl;
std::memcpy( (void*)C, (void*) C_kernel, Am * An * sizeof(double) );
free(C_kernel);
free(C_cublas);
}
else
{
std::cout << "Oh no, cublas and your own kernel differ too much in results." << std::endl;
std::cout << "Copy kernel results now, but handle with caution." << std::endl << std::endl;
std::memcpy( (void*)C, (void*) C_kernel, Am * An * sizeof(double) );
free(C_kernel);
free(C_cublas);
}
}
#endif
|
12,028 | #include <sys/time.h>
#include "cuda_runtime.h"
#include "yuyv2rgb.cuh"
static __device__ const unsigned char uchar_clipping_table[] = {
0,
0,
0,
0,
0,
0,
0,
0, // -128 - -121
0,
0,
0,
0,
0,
0,
0,
0, // -120 - -113
0,
0,
0,
0,
0,
0,
0,
0, // -112 - -105
0,
0,
0,
0,
0,
0,
0,
0, // -104 - -97
0,
0,
0,
0,
0,
0,
0,
0, // -96 - -89
0,
0,
0,
0,
0,
0,
0,
0, // -88 - -81
0,
0,
0,
0,
0,
0,
0,
0, // -80 - -73
0,
0,
0,
0,
0,
0,
0,
0, // -72 - -65
0,
0,
0,
0,
0,
0,
0,
0, // -64 - -57
0,
0,
0,
0,
0,
0,
0,
0, // -56 - -49
0,
0,
0,
0,
0,
0,
0,
0, // -48 - -41
0,
0,
0,
0,
0,
0,
0,
0, // -40 - -33
0,
0,
0,
0,
0,
0,
0,
0, // -32 - -25
0,
0,
0,
0,
0,
0,
0,
0, // -24 - -17
0,
0,
0,
0,
0,
0,
0,
0, // -16 - -9
0,
0,
0,
0,
0,
0,
0,
0, // -8 - -1
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88,
89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113,
114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136,
137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182,
183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228,
229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
252, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, // 256-263
255, 255, 255, 255, 255, 255, 255, 255, // 264-271
255, 255, 255, 255, 255, 255, 255, 255, // 272-279
255, 255, 255, 255, 255, 255, 255, 255, // 280-287
255, 255, 255, 255, 255, 255, 255, 255, // 288-295
255, 255, 255, 255, 255, 255, 255, 255, // 296-303
255, 255, 255, 255, 255, 255, 255, 255, // 304-311
255, 255, 255, 255, 255, 255, 255, 255, // 312-319
255, 255, 255, 255, 255, 255, 255, 255, // 320-327
255, 255, 255, 255, 255, 255, 255, 255, // 328-335
255, 255, 255, 255, 255, 255, 255, 255, // 336-343
255, 255, 255, 255, 255, 255, 255, 255, // 344-351
255, 255, 255, 255, 255, 255, 255, 255, // 352-359
255, 255, 255, 255, 255, 255, 255, 255, // 360-367
255, 255, 255, 255, 255, 255, 255, 255, // 368-375
255, 255, 255, 255, 255, 255, 255, 255, // 376-383
};
/** Clip a value to the range 0<val<255. For speed this is done using an
* array, so can only cope with numbers in the range -128<val<383.
*/
static __device__ unsigned char CLIPVALUE(int val)
{
// Old method (if)
/* val = val < 0 ? 0 : val; */
/* return val > 255 ? 255 : val; */
// New method (array)
const int clipping_table_offset = 128;
return uchar_clipping_table[val + clipping_table_offset];
}
static __device__ void YUV2RGB(const unsigned char y, const unsigned char u, const unsigned char v, unsigned char* r,
unsigned char* g, unsigned char* b)
{
const int y2 = (int)y;
const int u2 = (int)u - 128;
const int v2 = (int)v - 128;
//std::cerr << "YUV=("<<y2<<","<<u2<<","<<v2<<")"<<std::endl;
// This is the normal YUV conversion, but
// appears to be incorrect for the firewire cameras
/* int r2 = y2 + ( (v2*91947) >> 16); */
/* int g2 = y2 - ( ((u2*22544) + (v2*46793)) >> 16 ); */
/* int b2 = y2 + ( (u2*115999) >> 16); */
// This is an adjusted version (UV spread out a bit)
int r2 = y2 + ((v2 * 37221) >> 15);
int g2 = y2 - (((u2 * 12975) + (v2 * 18949)) >> 15);
int b2 = y2 + ((u2 * 66883) >> 15);
//std::cerr << " RGB=("<<r2<<","<<g2<<","<<b2<<")"<<std::endl;
// Cap the values.
*r = CLIPVALUE(r2);
*g = CLIPVALUE(g2);
*b = CLIPVALUE(b2);
}
__global__ void yuyv2rgb(char *YUV, char *RGB)
{
unsigned char y0, y1, u, v;
unsigned char r0, g0, b0;
unsigned char r1, g1, b1;
int nIn = blockIdx.x * blockDim.x * 4 + threadIdx.x * 4;
y0 = (unsigned char)YUV[nIn];
u = (unsigned char)YUV[nIn + 1];
y1 = (unsigned char)YUV[nIn + 2];
v = (unsigned char)YUV[nIn + 3];
YUV2RGB(y0, u, v, &r0, &g0, &b0);
YUV2RGB(y1, u, v, &r1, &g1, &b1);
int nOut = blockIdx.x * blockDim.x * 6 + threadIdx.x * 6;
RGB[nOut] = r0;
RGB[nOut + 1] = g0;
RGB[nOut + 2] = b0;
RGB[nOut + 3] = r1;
RGB[nOut + 4] = g1;
RGB[nOut + 5] = b1;
}
void yuyv2rgb_cuda(char* YUV, char* RGB, int num_blocks, int block_size)
{
yuyv2rgb<<<num_blocks, block_size>>>(YUV, RGB);
cudaDeviceSynchronize();
}
/*
extern "C" void process_image_cuda(const void *src, int size)
{
struct timeval ts;
int yuv_size = size * sizeof(char);
gettimeofday(&ts, NULL);
printf("[%lu.%lu]\tbefore copy image_data(CPU to GPU)\n", ts.tv_sec, ts.tv_usec);
cudaError_t ret = cudaMemcpy(image_yuyv_cuda_, src, yuv_size, cudaMemcpyHostToDevice);
gettimeofday(&ts, NULL);
printf("[%lu.%lu]\tcopy image_data(CPU to GPU) done\n", ts.tv_sec, ts.tv_usec);
if (cudaSuccess != ret) {
printf("cudaMemcpy fail %d\n", ret);
}
const int block_size = 256;
const int num_blocks = yuv_size / (4*block_size);
gettimeofday(&ts, NULL);
printf("[%lu.%lu]\tbefore yuyv2rgb computation\n", ts.tv_sec, ts.tv_usec);
yuyv2rgb_cuda(image_yuyv_cuda_, image_rgb_cuda_, num_blocks, block_size);
gettimeofday(&ts, NULL);
printf("[%lu.%lu]\tyuyv2rgb computation done\n", ts.tv_sec, ts.tv_usec);
int rgb_size = size / 2 * 3 * sizeof(char);
gettimeofday(&ts, NULL);
printf("[%lu.%lu]\tbefore copy image_data(GPU to CPU)\n", ts.tv_sec, ts.tv_usec);
ret = cudaMemcpy(show_buf, image_rgb_cuda_, rgb_size, cudaMemcpyDeviceToHost);
gettimeofday(&ts, NULL);
printf("[%lu.%lu]\tcopy image_data(GPU to CPU) done\n", ts.tv_sec, ts.tv_usec);
printf("[%lu.%lu]\tcuda process image index = %d\n", ts.tv_sec, ts.tv_usec, ++index_pro);
if (cudaSuccess != ret) {
printf("cudaMemcpy fail %d\n", ret);
}
}
*/
|
12,029 | // Esercizio
// Dato array di N interi vec[] e un valore intero x conta quanti sono gli elementi di vec[] uguali a x
//
// x e N forniti da linea di comando // TODO
// host alloca vec[] e lo inizializza
// allocazione su device a nostra discrezione (trasferimento esplicito o mem mapped)
// effettua il calcolo con kernel da B blocks e 256 thread per block (calcolare B in realzione ad N)
// ogni thread accede ad un elem e verifica se uguale x, se lo e' aggiorna count con op atomica
// infine host recupera risultato e stampa (host verifica correttezza)
#include <locale>
#include <stdlib.h>
#include <iostream>
#include <experimental/random>
#include <time.h>
#define N 256*2+1
#define THREADS_PER_BLOCK 256
#define MAX_VAL 50
static void HandleError( cudaError_t err, const char *file, int line) {
if (err != cudaSuccess) {
std::cout << cudaGetErrorString( err ) << " in " << file << " line " << line << std::endl;
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR(err)(HandleError(err, __FILE__, __LINE__))
void init_vec(int *v) {
for (int i=0; i<N; i++) {
v[i] = std::experimental::randint(0,MAX_VAL);
//v[i] = i;
}
}
void show_vec(int *v) {
std::cout << "\n" << v[0];
for (int i=1; i<N; i++) {
std::cout << ", " << v[i];
}
std::cout << "\n" << std::endl;
}
int cpu_count(int *v, const int x) {
int c=0;
for (int i=1; i<N; i++) {
if (v[i] == x)
c++;
}
return c;
}
int div_ceil(int numerator, int denominator) {
std::div_t res = std::div(numerator, denominator);
return res.rem ? (res.quot + 1) : res.quot;
}
int compute_num_blocks(int n) {
int b = div_ceil(n, THREADS_PER_BLOCK);
std::cout <<
"\nn = " << n <<
"\nthreads = " << THREADS_PER_BLOCK <<
"\nb = " << b <<
"\nb*threads = " << b*THREADS_PER_BLOCK << "\n" <<
std::endl;
return b;
}
__device__ int d_g_count = 0;
__global__ void gpu_count(int *d_v, const int x) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= N)
return;
if (d_v[tid] == x) {
atomicAdd(&d_g_count, 1);
printf("%d\t%d - Increment\n", tid, d_v[tid]);
} else {
printf("%d\t%d\n", tid, d_v[tid]);
}
}
int main( void ) {
size_t vec_size = ((size_t)N) * sizeof(int);
//std::cout << "N = " << N << std::endl;
//std::cout << "sizeof(int) = " << sizeof(int) << std::endl;
//std::cout << "size = " << vec_size << std::endl;
int seed = (int)time(NULL);
//std::experimental::reseed(3);
std::experimental::reseed(seed);
std::cout << "seed = " << seed << std::endl;
int x;
x = std::experimental::randint(0,MAX_VAL);
//x = N-1; // x is in the last block
int v[N];
//int *v;
//v = (int*)malloc(vec_size);
init_vec(v);
//show_vec(v);
int *dev_v;
HANDLE_ERROR(cudaMalloc((void**)&dev_v, vec_size));
HANDLE_ERROR(cudaMemcpy(dev_v, v, vec_size, cudaMemcpyHostToDevice));
int g_count=0;
gpu_count<<<compute_num_blocks(N), THREADS_PER_BLOCK>>>(dev_v, x);
HANDLE_ERROR(cudaDeviceSynchronize());
cudaMemcpyFromSymbol(&g_count, d_g_count, sizeof(int)); // better than cudaMemcpy // Look at ref in README
int c_count = cpu_count(v,x);
std::cout << "\nx = " << x << "\tMAX_VAL = " << MAX_VAL << "\n" << std::endl;
std::cout << "c_count = " << c_count << std::endl;
std::cout << "g_count = " << g_count << std::endl;
if (c_count == g_count)
std::cout << "\nCorrect" << std::endl;
else
std::cout << "\nWRONG!" << std::endl;
cudaFree(&d_g_count);
cudaFree(dev_v);
return 0;
}
|
12,030 | /*
* Interdependency of Executing Streams
*/
#include<stdio.h>
__global__ void kernel_1()
{
double sum = 0.0;
for(int i=0;i<10;i++){
sum=sum+tan(0.1)*tan(0.1);
}
}
__global__ void kernel_2()
{
double sum = 0.0;
for(int i=0;i<20;i++){
sum=sum+tan(0.2)*tan(0.2);
}
}
__global__ void kernel_3()
{
double sum = 0.0;
for(int i=0;i<30;i++){
sum=sum+tan(0.3)*tan(0.3);
}
}
__global__ void kernel_4()
{
double sum = 0.0;
for(int i=0;i<40;i++){
sum=sum+tan(0.4)*tan(0.4);
}
}
int main()
{
int n_streams=5,i,j;
cudaStream_t *streams = (cudaStream_t*)malloc(n_streams*sizeof(cudaStream_t));
cudaEvent_t *kernelEvent = (cudaEvent_t*) malloc(n_streams* sizeof(cudaEvent_t));
for(i=0;i<n_streams;i++)
{
cudaEventCreateWithFlags(&kernelEvent[i],cudaEventDisableTiming);
}
for(i=0;i<n_streams;i++)
{
cudaStreamCreate(&streams[i]);
}
dim3 block(1);
dim3 grid(1);
for(i=0;i<n_streams;i++)
{
kernel_1<<<grid,block,0,streams[i]>>>();
kernel_2<<<grid,block,0,streams[i]>>>();
kernel_3<<<grid,block,0,streams[i]>>>();
kernel_4<<<grid,block,0,streams[i]>>>();
cudaEventRecord(kernelEvent[i],streams[i]);
cudaStreamWaitEvent(streams[n_streams-1],kernelEvent[i],0);
}
return 0;
}
|
12,031 | #include <stdio.h>
#define N 10000
#define THREAD_X 4
__global__ void index(float *A, float *B, float *C){
int i = blockDim.x*blockIdx.x+threadIdx.x;
C[i] = A[i] + B[i];
}
__global__ void inIt(float *A, float *B){
int i = blockDim.x*blockIdx.x+threadIdx.x;
A[i] = i*2;
B[i] = N-i;
}
int main(){
float A[N], *A_d;
float B[N], *B_d;
float C[N], *C_d;
int i;
dim3 dimBlock(THREAD_X);
dim3 dimGrid(N/THREAD_X);
cudaMalloc((void**)&A_d, sizeof(int)*N);
cudaMalloc((void**)&B_d, sizeof(int)*N);
cudaMalloc((void**)&C_d, sizeof(int)*N);
cudaMemcpy(A_d, A, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B, sizeof(int)*N, cudaMemcpyHostToDevice);
inIt<<<dimGrid, dimBlock>>>(A_d, B_d);
cudaMemcpy(C_d, C, sizeof(int)*N, cudaMemcpyHostToDevice);
index<<<dimGrid, dimBlock>>>(A_d, B_d, C_d);
cudaMemcpy(A, A_d, sizeof(int)*N, cudaMemcpyDeviceToHost);
cudaMemcpy(B, B_d, sizeof(int)*N, cudaMemcpyDeviceToHost);
cudaMemcpy(C, C_d, sizeof(int)*N, cudaMemcpyDeviceToHost);
for(i = 0; i < N; i++){
printf("%f ",C[i]);
}
cudaFree(B_d);
cudaFree(C_d);
cudaFree(A_d);
}
|
12,032 | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <time.h>
#include <string.h>
#define PI 3.1415926536
#define e 2.718281828459
#define N 64*64
#define THREADS_PER_BLOCK 64
struct timeval tic(){
struct timeval tv;
gettimeofday(&tv,NULL);
return tv;
}
typedef struct Patches
{
int index;
float central;
float* patchArray;
}Patch;
double toc(struct timeval begin){
struct timeval end;
gettimeofday(&end,NULL);
double stime = ((double)(end.tv_sec-begin.tv_sec)*1000)+((double)(end.tv_usec-begin.tv_usec)/1000);
stime = stime / 1000;
return (stime);
}
float* readFile(int n, int m, char *file_path){
FILE* ptrFile = fopen(file_path, "r");
float *I = (float*)malloc(n*m*sizeof(float));
if (!ptrFile){
printf("Error Reading File\n");
exit (0);
}
for(int i=0; i<n; i++){
for(int j=0; j<m; j++){
fscanf(ptrFile,"%f,", &I[n*i+j]);
}
}
fclose(ptrFile);
return I;
}
void toTXT(float* array,char *output, int n, int m){
FILE *fp;
fp=fopen(output,"w");
for(int i=0; i<n; i++){
for(int j=0; j<m; j++){
if(j<m-1){
fprintf(fp,"%lf,",array[n*i+j]);
}else if(j==m-1){
fprintf(fp,"%lf",array[n*i+j]);
}
}
fprintf(fp,"\n",array[n*i]);
}
fclose(fp);
printf("File %s saved.\n", output);
}
__global__ void normalization(float* A, float* B, float min, float max){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<N)
B[i] = (A[i] - min) / max;
}
float AWGN_generator() //https://www.embeddedrelated.com/showcode/311.php
{/* Generates additive white Gaussian Noise samples with zero mean and a standard deviation of 1. */
float dev = 0.03162; //var = 0.01
float temp1;
float temp2;
float result;
int p = 1;
while( p > 0 )
{
temp2 = ( rand() / ( (float)RAND_MAX ) ); /* rand() function generates an
integer between 0 and RAND_MAX,
which is defined in stdlib.h.
*/
if ( temp2 == 0 )
{// temp2 is >= (RAND_MAX / 2)
p = 1;
}// end if
else
{// temp2 is < (RAND_MAX / 2)
p = -1;
}// end else
}// end while()
temp1 = cos( ( 2.0 * (float)PI ) * rand() / ( (float)RAND_MAX ) );
result = sqrt( -2.0 * log( temp2 ) ) * temp1;
return result * dev; // return the generated random sample to the caller
}// end AWGN_generator()
Patch* makePatches(float* J, int n, int m, Patch* allPatches, int patchSizeH, int patchSizeW){
int mdW = (patchSizeW - 1)/2;
int mdH = (patchSizeH - 1)/2;
for(int i=0; i<n; i++){
for(int j=0; j<m; j++){
for(int w=0; w<patchSizeW; w++){
for(int h=0; h<patchSizeH; h++){
allPatches[n*j+i].patchArray[patchSizeH*w+h] = 0;
}
}
allPatches[n*j+i].central = J[n*j+i];
allPatches[n*j+i].index = n*j+i;
if(i==0 && j==0){
for(int w=mdW; w<patchSizeW; w++){
for(int h=mdH; h<patchSizeH; h++){
allPatches[n*j+i].patchArray[patchSizeH*w+h] = J[(n*j+i)-(mdW-w)*n-(mdH-h)];
}
}
}else if(i>0 && j==0){
for(int h=0; h<patchSizeH-1; h++){
for(int w=0; w<patchSizeW; w++){
allPatches[n*j+i].patchArray[patchSizeH*w+h] = allPatches[n*j+(i-1)].patchArray[patchSizeH*w+(h+1)];
}
}
for(int w=mdW; w<patchSizeW; w++){
if((n-1-i) >= mdH){
allPatches[n*j+i].patchArray[patchSizeH*w+(patchSizeH-1)] = J[(n*j+i)-(mdW-w)*n+mdH];
}else if((n-1-i) < mdH){
allPatches[n*j+i].patchArray[patchSizeH*w+(patchSizeH-1)] = 0;
}
}
}else if(j>0){
for(int w=0; w<patchSizeW-1; w++){
for(int h=0; h<patchSizeH; h++){
allPatches[n*j+i].patchArray[patchSizeH*w+h] = allPatches[n*(j-1)+i].patchArray[patchSizeH*(w+1)+h];
}
}
int a,b;
if(i>=mdH && (n-1-i)>=mdH){
a = 0;
b = patchSizeH;
}else if(i<mdH && (n-1-i)>=mdH){
a = mdH - i;
b = patchSizeH;
}else if(i<mdH && (n-1-i)<mdH){
a = mdH - i;
b = mdH + (n-i);
}else if(i>=mdH && (n-1-i)<mdH){
a = 0;
b = mdH + (n-i);
}
for(int h=a; h<b; h++){
if((m-1-j) >= mdW){
allPatches[n*j+i].patchArray[patchSizeH*(patchSizeW-1)+h] = J[(n*j+i)+mdW*n-(mdH-h)];
}else if((m-1-j) < mdW){
allPatches[n*j+i].patchArray[patchSizeH*(patchSizeW-1)+h] = 0;
}
}
}
}
}
return allPatches;
}
float* computeG_a(int patchSizeH, int patchSizeW, float patchSigma){
float* gauss = (float*)malloc(patchSizeH*patchSizeW*sizeof(float));
for (int i = 0; i < patchSizeH; i++) {
for (int j = 0; j < patchSizeW; j++) {
float y = i - (patchSizeH - 1) / 2.0;
float x = j - (patchSizeW - 1) / 2.0;
gauss[patchSizeW*i+j] = (1/2.0) * exp(-(x * x + y * y) / (2.0 * PI * patchSigma * patchSigma));
}
}
return gauss;
}
__global__ void dist(float *W,float *p_i, float *P, float *V, int pSize, float filtSigma){
float d = 0;
int k = blockIdx.x * blockDim.x + threadIdx.x;
if(k<N){
for (int i = 0; i < pSize; i++) {
d += V[i] * pow(p_i[i] - P[pSize*k+i],2);
}
d = sqrt(d);
W[k] = exp(-pow(d,2) / filtSigma);
d=0;
}
}
__global__ void dim(float *w, float *z){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<N){
w[i] = w[i] / *z;
}
}
int main(int argc, char *argv[]){
int n = atoi(argv[1]);
int m = atoi(argv[2]);
int patchSizeH = atoi(argv[3]);
int patchSizeW = atoi(argv[4]);
char* file_path;
file_path=(char*)malloc(strlen(argv[5])*sizeof(char));
memcpy(file_path,argv[5],strlen(argv[5]));
float patchSigma =5/3;
float filtSigma =0.01 ;
float *I, *I_norm, *J, *If;
float *dev_I, *dev_I_norm, *dev_J, *dev_gauss;
float *P, *dev_P, *w;
int size = N * sizeof(float);
int sizePatch = patchSizeH * patchSizeW * sizeof(float);
int pSize = patchSizeH * patchSizeW;
//allocate memory for device copies
cudaMalloc(&dev_I, size);
cudaMalloc(&dev_I_norm, size);
cudaMalloc(&dev_J, size);
cudaMalloc(&dev_gauss, sizePatch);
cudaMalloc(&dev_P, N*pSize*sizeof(float));
I = (float*)malloc(size);
I_norm = (float*)malloc(size);
J = (float*)malloc(size);
If = (float*)malloc(size);
Patch* allPatches;
allPatches = (Patch*)malloc(n*m*sizeof(Patch));
for(int i=0; i<n; i++){
for(int j=0; j<m; j++){
allPatches[n*j+i].patchArray = (float*)malloc(patchSizeH*patchSizeW*sizeof(float));
}
}
w = (float*)malloc(N*N*sizeof(float));
float* gauss = (float*)malloc(sizePatch);
float* Z = (float*)malloc(size);
struct timeval tStart;
I = readFile(n,m,file_path);
//find min of 'I' and max of 'I-min'
float min = INFINITY;
float max = -1.0;
for(int i=0; i<n; i++){
for(int j=0; j<m; j++){
if(I[n*i+j]<min) min= I[n*i+j];
}
}
for(int i=0; i<n*m; i++){
if((I[i]-min)>max) max = I[i]-min;
}
cudaMemcpy(dev_I, I, size, cudaMemcpyHostToDevice);
normalization<<<N/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(dev_I, dev_I_norm, min, max);
cudaMemcpy(I_norm, dev_I_norm, size, cudaMemcpyDeviceToHost);
for(int i=0; i<n*m; i++){
J[i] = I_norm[i] + AWGN_generator();
}
toTXT(I_norm,"normCuda.txt",n,m);
toTXT(J,"JCuda.txt",n,m);
allPatches = makePatches(J,n,m,allPatches,patchSizeH,patchSizeW);
//pass all the elements of 'allPatches' to 'P' array(NxpSize)
P = (float*)malloc(N*pSize*sizeof(float));
for(int i=0; i<N; i++){
for(int j=0; j<pSize; j++){
P[pSize*i+j] = allPatches[i].patchArray[j];
}
}
cudaMemcpy(dev_P, P, N*sizePatch, cudaMemcpyHostToDevice);
gauss = computeG_a(patchSizeH, patchSizeW, patchSigma);
cudaMemcpy(dev_gauss, gauss, sizePatch, cudaMemcpyHostToDevice);
float *patch_i = (float*)malloc(sizePatch);
float *dev_patchI;
cudaMalloc(&dev_patchI, sizePatch);
float *wi_j = (float*)malloc(N*sizeof(float));
float *dev_wij;
cudaMalloc(&dev_wij, N*sizeof(float));
tStart = tic();
for(int i=0; i<N; i++){
for(int j=0; j<pSize; j++){
patch_i[j] = P[pSize*i +j];
}
cudaMemcpy(dev_patchI, patch_i, sizePatch, cudaMemcpyHostToDevice);
dist<<<N/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(dev_wij, dev_patchI,dev_P, dev_gauss,pSize,filtSigma);
cudaMemcpy(wi_j, dev_wij, size, cudaMemcpyDeviceToHost);
for(int j=0; j<N;j++){
Z[i] += wi_j[j];
w[N*i+j] = wi_j[j];
}
}
cudaFree(dev_patchI);
cudaFree(dev_gauss);
float *dev_Z;
cudaMalloc(&dev_Z, sizeof(float));
for(int i=0; i<N; i++){
for(int j=0; j<N;j++){
wi_j[j] = w[N*i+j];
}
cudaMemcpy(dev_wij, wi_j,size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_Z, &Z[i], sizeof(float), cudaMemcpyHostToDevice);
dim<<<N/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(dev_wij,dev_Z);
cudaMemcpy(wi_j, dev_wij, size,cudaMemcpyDeviceToHost);
for(int j=0; j<N;j++){
w[N*i+j] = wi_j[j];
If[i] += w[N*i+j] * J[j];
}
}
double time = toc(tStart);
toTXT(If,"IfCuda.txt",n,m);
float* Dif = (float*)malloc(N*sizeof(float));
for(int i=0; i<N; i++){
Dif[i] =If[i] - J[i];
}
toTXT(Dif,"DifCuda.txt",n,m);
printf("Time: %f sec", time);
cudaFree(dev_I); cudaFree(dev_I_norm); cudaFree(dev_J); cudaFree(dev_P); cudaFree(dev_wij); cudaFree(dev_Z);
free(I); free(I_norm); free(J); free(patch_i); free(gauss); free(wi_j); free(Z); free(If); free(Dif); free(w);
free(allPatches->patchArray);
free(allPatches);
return 0;
} |
12,033 | /*
============================================================================
Name : VSB-PAII.cu
Author : Dave
Version :
Copyright : Your copyright notice
Description : CUDA compute reciprocals
============================================================================
*/
/*
============================================================================
Name : cuda1.cu
Author : david
Version :
Copyright : Your copyright notice
Description : CUDA compute reciprocals
============================================================================
*/
#include <iostream>
#include <numeric>
#include <stdlib.h>
#include <cuda_runtime.h>
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
__global__ void VectorAdd(int *A,int *B,int *C,int M)
{
int i = threadIdx.x;
if(i < M)
{
C[i] = A[i] + B[i];
}
}
__global__ void VectorAddMN(int *A,int *B,int *C,int M,int N)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < M && j < N)
{
C[i * M + j] = A[i * M + j] + B[i * M + j];
}
}
__global__ void VectorAddMN_2(int *A,int *B,int *C,int M,int N)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < M && j < N)
{
for (int i = 0; i < M * N; ++i) {
int row = i / N;
int col = i % N;
C[row + col] = A[row + col] + B[row + col];
}
}
}
void CV1_1()
{
const unsigned int M = 10;
const unsigned int bytes = M * sizeof(int);
//Host allocate
int *A_vectorHost = (int*)malloc(bytes);
int *B_vectorHost = (int*)malloc(bytes);
int *C_vectorHost = (int*)malloc(bytes);
//Allocate the DEVICE memory to be able to copy data from HOST.
int *A_vectorDevice;
cudaMalloc(&A_vectorDevice, bytes);
int *B_vectorDevice;
cudaMalloc(&B_vectorDevice, bytes);
int *C_vectorDevice;
cudaMalloc(&C_vectorDevice, bytes);
for(int i=0; i < M;i++)
{
A_vectorHost[i] = i;
B_vectorHost[i] = i;
C_vectorHost[i] = 0;
}
cudaMemcpy(A_vectorDevice, A_vectorHost, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(B_vectorDevice, B_vectorHost, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(C_vectorDevice, C_vectorHost, bytes, cudaMemcpyHostToDevice);
VectorAdd<<<1,M >>>(A_vectorDevice, B_vectorDevice, C_vectorDevice,M);
cudaMemcpy(C_vectorHost, C_vectorDevice, bytes, cudaMemcpyDeviceToHost);
for (int i = 0; i < 10; i++)
{
printf("\n %d", C_vectorHost[i]);
}
free(A_vectorHost);
free(B_vectorHost);
free(C_vectorHost);
A_vectorHost = NULL;
B_vectorHost = NULL;
C_vectorHost = NULL;
cudaFree(A_vectorDevice);
cudaFree(B_vectorDevice);
cudaFree(C_vectorDevice);
}
void CV1_2()
{
const unsigned int M = 10;
const unsigned int N = 10;
//Host allocate
int *A_Host = (int*)malloc(M*N*sizeof(int));
int *B_Host = (int*)malloc(M*N*sizeof(int));
int *C_Host = (int*)malloc(M*N*sizeof(int));
for(int i=0; i < M;i++)
{
for(int j=0; j < N;j++)
{
A_Host[i * M + j] = i * j ;
B_Host[i * M + j] = i * j ;
}
}
int *A_Device;
cudaMalloc(&A_Device,M*N*sizeof(int));
int *B_Device;
cudaMalloc(&B_Device,M*N*sizeof(int));
int *C_Device;
cudaMalloc(&C_Device,M*N*sizeof(int) );
cudaMemcpy(A_Device, A_Host,M*N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(B_Device, B_Host, M*N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(C_Device, C_Host, M*sizeof(int), cudaMemcpyHostToDevice);
dim3 dimBlock(M, N);
dim3 dimGrid(1, 1);
VectorAddMN<<<dimGrid, dimBlock>>>(A_Device,B_Device,C_Device,M,N);
cudaMemcpy(C_Host, C_Device, M*N*sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < M; i++)
{
std::cout << i << "| " ;
for (int j = 0; j < N; j++)
{
std::cout << C_Host[i * M + j] << " ";
}
std::cout << std::endl;
}
free(A_Host);
free(B_Host);
free(C_Host);
cudaFree(A_Device);
cudaFree(B_Device);
cudaFree(C_Device);
};
void CV1_2_2()
{
const unsigned int M = 10;
const unsigned int N = 10;
// M rows N cols
int** A_Host = new int*[M];
int** B_Host = new int*[M];
int** C_Host = new int*[M];
A_Host[0] = new int[M * N];
B_Host[0] = new int[M * N];
C_Host[0] = new int[M * N];
for (int i = 1; i < M; ++i){
A_Host[i] = A_Host[i-1] + N;
B_Host[i] = B_Host[i-1] + N;
C_Host[i] = C_Host[i-1] + N;
}
//Fill
for (int i = 0; i < M; ++i) {
for (int j = 0; j < N; ++j) {
A_Host[i][j] = i*j;
B_Host[i][j] = i*j;
}
}
int *A_Device;
cudaMalloc((void **)&A_Device, sizeof(int) * M * N);
int *B_Device;
cudaMalloc((void **)&B_Device, sizeof(int) * M * N);
int *C_Device;
cudaMalloc((void **)&C_Device, sizeof(int) * M * N);
cudaMemcpy(A_Device, A_Host[0],M*N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(B_Device, B_Host[0], M*N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(C_Device, C_Host[0], M*N*sizeof(int), cudaMemcpyHostToDevice);
dim3 dimBlock(M, N);
dim3 dimGrid(1, 1);
VectorAddMN_2<<<dimGrid, dimBlock>>>(A_Device,B_Device,C_Device,M,N);
cudaMemcpy(C_Host, C_Device, M*N*sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < M; i++)
{
std::cout << i << "| " ;
for (int j = 0; j < N; j++)
{
std::cout << C_Host[i][j] << " ";
}
std::cout << std::endl;
}
free(A_Host);
free(B_Host);
free(C_Host);
cudaFree(A_Device);
cudaFree(B_Device);
cudaFree(C_Device);
};
int main(int argc, char *argv[])
{
CV1_2();
}
|
12,034 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
#include <cuda.h>
//Operations to be done by threads
__global__ void mean_filter_apply(int *image, int *filtered_image, int imWidth, int imHeight, int kernalSize){
int kernal = 0;
int windowSize = (2*kernalSize+1)*(2*kernalSize+1);
int i = threadIdx.x + blockDim.x*blockIdx.x;
int j = threadIdx.y + blockDim.y*blockIdx.y;
if(i < imWidth && j < imHeight){
if (i < kernalSize|| i >= imWidth - kernalSize || j < kernalSize || j >= imHeight - kernalSize){
filtered_image[j*imWidth + i] = 0;
}
else{
for (int kernalH = -kernalSize; kernalH <= kernalSize; kernalH++){
for (int kernalW = -kernalSize; kernalW <= kernalSize; kernalW++){
kernal = kernal + image[(j+kernalH)*imWidth + i + kernalW];
}
}
filtered_image[j*imWidth + i] = kernal/windowSize;
}
}
}
//Function to run the GPU mean filtering process
void mean_filter_GPU(int *image, int *filteredImage, int imWidth, int imHeight, int kernalSize, double *time){
int * image_in_gpu;
int * filtered_image_in_gpu;
int sizeofImage = imHeight*imWidth;
//
cudaMalloc((void **) &image_in_gpu, sizeofImage*sizeof(int));
cudaMalloc((void **) &filtered_image_in_gpu, sizeofImage*sizeof(int));
// printf("Copying images to device..\n");
cudaMemcpy(image_in_gpu, image, sizeofImage*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(filtered_image_in_gpu, filteredImage, sizeofImage*sizeof(int), cudaMemcpyHostToDevice);
//Memory pointers and other required parameters for GPU function
dim3 dimBlock(40,20);
int w = (imWidth/40) + 1;
int h = (imHeight/20) + 1;
dim3 dimGrid(w,h);
// printf("Doing GPU Filtering\n");
clock_t start=clock();
mean_filter_apply<<<dimGrid, dimBlock>>>(image_in_gpu, filtered_image_in_gpu, imWidth, imHeight ,kernalSize);
cudaDeviceSynchronize();
clock_t end = clock();
cudaMemcpy(filteredImage, filtered_image_in_gpu, sizeofImage*sizeof(int), cudaMemcpyDeviceToHost);
*time = (double)(end-start)/CLOCKS_PER_SEC;
cudaFree(image_in_gpu);
cudaFree(filtered_image_in_gpu);
}
//Function to run the CPU mean filtering process
void mean_filter_CPU(int * image, int *filteredImage, int imWidth, int imHeight, int kernalSize, double *time){
int kernal;
long int windowSize = (2*kernalSize+1)*(2*kernalSize+1);
clock_t start_h = clock();
for (int i=0; i < imHeight; i++){
for (int j=0;j < imWidth;j++){
kernal = 0;
if(i < kernalSize || i >= imHeight - kernalSize || j < kernalSize || j >= imWidth - kernalSize){
filteredImage[i*imWidth + j] = 0;
}
else {
for (int kernalH = -kernalSize; kernalH <= kernalSize; kernalH++){
for (int kernalW = -kernalSize; kernalW <= kernalSize; kernalW++){
kernal = kernal + image[(i+kernalH)*imWidth + j + kernalW];
}
}
filteredImage[i*imWidth + j] = kernal/windowSize;
}
}
}
clock_t end_h = clock();
*time = (double)(end_h-start_h)/CLOCKS_PER_SEC;
}
int main(){
int imWidth = 1280;
int imHeight = 1280;
int *imageTest;
int *filteredTest_CPU;
int *filteredTest_GPU;
//Allocating memory to the image and filtered image
imageTest = (int *) malloc(imWidth*imHeight*sizeof(int));
filteredTest_CPU = (int *) malloc(imWidth*imHeight*sizeof(int));
filteredTest_GPU = (int *) malloc(imWidth*imHeight*sizeof(int));
//Filling the Test image with values
for (int i=0; i < imHeight; i++){
for (int j=0; j < imWidth; j++){
imageTest[(i*imWidth) + j] = i*j;
}
}
double time_cpu;
double time_gpu;
for (int k = 0; k< 5; k++){
mean_filter_CPU( (int *)imageTest, (int *)filteredTest_CPU, imWidth, imHeight, 2, &time_cpu);
printf("CPU Processing Time : %f \n", time_cpu);
mean_filter_GPU( (int *)imageTest, (int *)filteredTest_GPU, imWidth, imHeight, 2, &time_gpu);
printf("GPU Processing Time %f \n", time_gpu);
}
return 1;
} |
12,035 | #include <pthread.h>
#include <stdio.h>
#include <unistd.h> //sleep
void* hello(void* arg) {
int* ptrToThreadNumber = (int*)arg;
sleep(5);
printf("HelloThread %d\n", *ptrToThreadNumber);
return 0;
}
int main(void) {
pthread_t tid;
int threadNum = 1;
// pthread_create creates a new thread and makes it executable.
// This routine can be called any number of times from anywhere within your
// code.
pthread_create(&tid, NULL, hello, &threadNum);
//"Joining" is one way to accomplish synchronization between threads
// The pthread_join() subroutine blocks the calling thread until the
// specified threadid thread terminates.
pthread_join(tid, NULL);
printf("------------------------\n");
return (0);
}
|
12,036 | /*Realizar un programa CUDA que dado un vector V de N números enteros multiplique a
cada número por una constante C, se deben realizar dos implementaciones:
a.Tanto C como N deben ser pasados como parámetros al kernel.
b.Tanto C como N deben estar almacenados en la memoria de constantes de la GPU*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
//M and N number of threads (grid and block)
float secuential(const int array[] , int dim){
float mean=0;
for(int i=0; i<dim;i++){
mean+=array[i];
}
mean=mean/dim;
float sum=0;
for(int i=0; i<dim;i++){
sum+=(array[i]-mean)*(array[i]-mean);
}
return sqrt(sum/(dim-1));
}
__global__ void addAll( const int array[] , int dim,float result[], const int thread_number)
{
int index = blockIdx.x* blockDim.x* blockDim.y* blockDim.z+threadIdx.z* blockDim.y* blockDim.x+ threadIdx.y* blockDim.x+ threadIdx.x;
//printf("sum:%i\n", result[0]);
if(index<dim){
if(dim<=thread_number){ //if more threads than array size
// printf("Thread %i; Adding value of index %i\n", index, index, array[index]);
atomicAdd(result,array[index]);
}
else{ //if less threads than array size
if(index!=thread_number-1){//if not last thread deal with size_array/thread_nb array entries
for(int i=index*(int)(dim/thread_number); i< index*(int)(dim/thread_number)+(int)(dim/thread_number); i++){
// printf("Thread %i; Adding value of index %i\n", index, i, array[i]);
atomicAdd(result,array[i]);
}
}
else{ //if last thread deal with all remaining array entries
for(int i=index*(int)(dim/thread_number); i< dim; i++){
// printf("Thread %i; Adding value of index %i\n",index, i, array[i]);
atomicAdd(result,array[i]);
}
}
}
//printf("sum:%i\n", result[0]);
}
}
__global__ void sigma( const int array[] , int dim,float result[], const float mean, const int thread_number)
{
int index = blockIdx.x* blockDim.x* blockDim.y* blockDim.z+threadIdx.z* blockDim.y* blockDim.x+ threadIdx.y* blockDim.x+ threadIdx.x;
//printf("sum:%i\n", result[0]);
if(index<dim){
if(dim<=thread_number){ //if more threads than array size
//printf("Thread %i; Adding value of index %i\n", index, index, array[index]);
atomicAdd(result,(array[index]-mean)*(array[index]-mean));
}
else{ //if less threads than array size
if(index!=thread_number-1){//if not last thread deal with size_array/thread_nb array entries
for(int i=index*(int)(dim/thread_number); i< index*(int)(dim/thread_number)+(int)(dim/thread_number); i++){
//printf("Thread %i; Adding value of index %i\n", index, i, array[i]);
atomicAdd(result,(array[i]-mean)*(array[i]-mean));
}
}
else{ //if last thread deal with all remaining array entries
for(int i=index*(int)(dim/thread_number); i< dim; i++){
//printf("Thread %i; Adding value of index %i\n",index, i, array[i]);
atomicAdd(result,(array[i]-mean)*(array[i]-mean));
}
}
}
//printf("sum:%i\n", result[0]);
}
}
int main(int argc, char *argv[]){
//Measure time
clock_t time_begin;
time_begin=clock();
// pointers to host & device arrays
int *device_array = 0;
int *host_array = 0;
int size_array=9;
float *d_sum=NULL;
float *h_sum= 0;
float mean;
float final_res;
int M=1, N=1;
if(argc == 4){
size_array=atoi(argv[1]);
N=atoi(argv[2]);
M=atoi(argv[3]);
}
h_sum=( float*)malloc(sizeof( float));
h_sum[0]=0;
// malloc a host array
host_array = (int*)malloc( size_array * sizeof(int));
for(int i=0; i<size_array; i++){
host_array[i]=rand()%10;
// printf("%i\t", host_array[i]);
}
printf("\n");
// cudaMalloc a device array
cudaMalloc(&device_array,size_array * sizeof(int));
cudaError_t er=cudaMalloc(&d_sum, sizeof(float));
// download and inspect the result on the host:
cudaError_t e=cudaMemcpy(device_array, host_array, sizeof(int)*size_array, cudaMemcpyHostToDevice);
cudaError_t error=cudaMemcpy(d_sum, h_sum, sizeof(int), cudaMemcpyHostToDevice);
//cudaerrorinvalidvalue(11)
dim3 bloque(N,N); //Bloque bidimensional de N*N hilos
dim3 grid(M,M); //Grid bidimensional de M*M bloques
int thread_number= N*N*M*M;
addAll<<<grid, bloque>>>(device_array, size_array , d_sum, thread_number);
cudaThreadSynchronize();
// download and inspect the result on the host:
//cudaMemcpy(host_array, device_array, sizeof(int)*size_array, cudaMemcpyDeviceToHost);
cudaMemcpy(h_sum, d_sum, sizeof(int), cudaMemcpyDeviceToHost);
printf("Sum of array: %f\n", h_sum[0]);
mean=h_sum[0]/size_array;
h_sum[0]=0;
cudaMemcpy(d_sum, h_sum, sizeof(int), cudaMemcpyHostToDevice);
sigma<<<grid, bloque>>>(device_array, size_array , d_sum, mean, thread_number);
cudaThreadSynchronize();
cudaMemcpy(h_sum, d_sum, sizeof(int), cudaMemcpyDeviceToHost);
printf("Sigma: %f\n", h_sum[0]);
final_res = sqrt(h_sum[0]/(size_array-1));
printf("GPU time: %f seconds\n", (((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 ); //1.215s
printf("GPU result: %f\n", final_res);
time_begin=clock();
float cpu_res=secuential(host_array, size_array);
printf("CPU time: %f seconds\n", (((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 ); //1.215s
printf("CPU result: %f\n", cpu_res);
if(final_res==cpu_res)
printf("CPU and GPU have same result\n");
else
printf("CPU and GPU have different result\n");
// deallocate memory
free(host_array);free(h_sum);
cudaFree(device_array); cudaFree(d_sum);
} |
12,037 | #include <stdio.h>
#include <cuda.h>
__global__ void dkernel(unsigned *matrix) {
unsigned id = threadIdx.x * blockDim.y + threadIdx.y;
matrix[id] = id;
}
#define N 5
#define M 6
int main() {
dim3 block(N, M, 1);
unsigned *matrix, *hmatrix;
cudaMalloc(&matrix, N * M * sizeof(unsigned));
hmatrix = (unsigned *)malloc(N * M * sizeof(unsigned));
dkernel<<<1, block>>>(matrix);
cudaMemcpy(hmatrix, matrix, N * M * sizeof(unsigned), cudaMemcpyDeviceToHost);
for (unsigned ii = 0; ii < N; ++ii) {
for (unsigned jj = 0; jj < M; ++jj) {
printf("%2d ", hmatrix[ii * M + jj]);
}
printf("\n");
}
return 0;
}
|
12,038 | __global__ void get_intermap_firing_winners(
int *glbSpk, int *glbSpkCnt, float *V,
int *winners_intermap, float *winnersV_intermap, int *mutex,
int map_size)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < glbSpkCnt[0]) {
int id = glbSpk[tid];
bool need_lock = true;
while (need_lock) {
if (atomicCAS(mutex, 0, 1) == 0) {
// critical section
if (V[id] > winnersV_intermap[id % map_size]) {
winnersV_intermap[id % map_size] = V[id];
winners_intermap[id % map_size] = id;
}
// end critical section
atomicExch(mutex, 0);
need_lock = false;
}
}
}
}
__global__ void clean_spikes(
int *glbSpk, int *glbSpkCnt, float *V, bool *fired,
int *winners_intermap, bool *allow_fire_loc, int *mutex, int *spikes_temp, int *spike_count_temp,
int map_size)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < glbSpkCnt[0]) {
int id = glbSpk[tid];
if (winners_intermap[id % map_size] == id) {
int spike_id = atomicAdd(spike_count_temp, 1);
spikes_temp[spike_id] = id;
fired[id] = true;
allow_fire_loc[id % map_size] = false;
}
}
}
// allow fired neuron with highest potential in a map to do stdp
__global__ void get_intramap_stdp_winners(
int *glbSpk, int *glbSpkCnt, float *V,
int *winners_intramap, float *winnersV_intramap, bool *allow_stdp_map, bool *allow_stdp_loc, int *mutex,
int map_num, int map_size, int width, int sec_num, int sec_size)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < glbSpkCnt[0]) {
int id = glbSpk[tid];
int sec = id % map_size / width / sec_size;
int map = id / map_size;
if (allow_stdp_map[sec * map_num + map] && allow_stdp_loc[id % map_size]) {
bool need_lock = true;
while (need_lock) {
if (atomicCAS(mutex, 0, 1) == 0) {
// critical section
if (V[id] > winnersV_intramap[sec * map_num + map]) {
winnersV_intramap[sec * map_num + map] = V[id];
winners_intramap[sec * map_num + map] = id;
}
// end critical section
atomicExch(mutex, 0);
need_lock = false;
}
}
}
}
}
// set allow_stdp_map and allow_stdp_loc
__global__ void disallow_nearby_stdp(
int *winners_intramap, bool *allow_stdp_map, bool *allow_stdp_loc,
int map_num, int map_size, int width, int sec_num, int sec_size, int radius)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < sec_num * map_num) {
int sec = tid / map_num;
int map = tid % map_num;
if (winners_intramap[sec * map_num + map] != -1) {
int id = winners_intramap[sec * map_num + map];
int r = id % map_size / width;
int c = id % map_size % width;
int l = radius;
allow_stdp_map[sec * map_num + map] = false;
for (int i = (r-l < sec*sec_size ? sec*sec_size : r-l); i <= (r+l > (sec+1)*sec_size-1 ? (sec+1)*sec_size-1 : r+l); i++)
for (int j = (c-l < 0 ? 0 : c-l); j <= (c+l > width-1 ? width-1 : c+l); j++)
allow_stdp_loc[i * width + j] = false;
}
}
}
|
12,039 | #include <iostream>
__global__ void add(int* a, int* b, int* c, int rows, int columns)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < rows; i += blockDim.x * gridDim.x)
{
for (int j = blockIdx.y * blockDim.y + threadIdx.y; j < columns; j += blockDim.y * gridDim.y)
{
c[i * columns + j] = a[i * columns + j] + b[i * columns + j];
}
}
}
void print_matrix(int *matrix, int row, int columns)
{
for (int i = 0; i < row; ++i)
{
for (int j = 0; j < columns; ++j)
{
std::cout << matrix[i * columns + j] << '\t';
}
std::cout << '\n';
}
}
int main(void)
{
int rows = 10;
int columns = 10;
int rows_threads = 8;
int columns_threads = 8;
int rows_blocks = rows / rows_threads;
int columns_blocks = columns / columns_threads;
dim3 blocks(rows_blocks, columns_blocks);
dim3 threads(rows_threads, columns_threads);
int *a, *b, *res;
cudaMallocManaged(&a, rows * columns * sizeof(int));
cudaMallocManaged(&b, rows * columns * sizeof(int));
cudaMallocManaged(&res, rows * columns * sizeof(int));
for (int i = 0; i < rows; ++i)
{
for (int j = 0; j < columns; ++j)
{
a[i * columns + j] = i;
b[i * columns + j] = j;
res[i * columns + j] = 0;
}
}
add<<<blocks, threads>>>(a, b, res, rows, columns);
cudaDeviceSynchronize();
std::cout << "First matrix:\n";
print_matrix(a, rows, columns);
std::cout << "Second matrix:\n";
print_matrix(b, rows, columns);
std::cout << "Result matrix:\n";
print_matrix(res, rows, columns);
cudaFree(a);
cudaFree(b);
cudaFree(res);
return 0;
}
|
12,040 | #include <iostream>
#define N 10000
float *a_cpu, *b_cpu, *c_cpu;
float *a_gpu, *b_gpu, *c_gpu;
__global__ void add(float *a_gpu, float *b_gpu, float *c_gpu, int n){
for(int i = 0; i < n; i++){
c_gpu[i] = a_gpu[i] + b_gpu[i];
}
}
int main() {
a_cpu = (float*)malloc(sizeof(float)*N);
b_cpu = (float*)malloc(sizeof(float)*N);
c_cpu = (float*)malloc(sizeof(float)*N);
cudaMalloc((float**)&a_gpu, sizeof(float) * N);
cudaMalloc((float**)&b_gpu, sizeof(float) * N);
cudaMalloc((float**)&c_gpu, sizeof(float) * N);
for (int i = 1; i <= N; i++){
a_cpu[i] = -i;
b_cpu[i] = i*2;
c_cpu[i] = a_cpu[i] + b_cpu[i];
}
cudaMemcpy(a_gpu, a_cpu, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(b_gpu, b_cpu, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(c_gpu, c_cpu, sizeof(float) * N, cudaMemcpyHostToDevice);
add<<<1,10>>>(a_gpu, b_gpu, c_gpu, N);
cudaMemcpy(c_cpu, c_gpu, sizeof(float) * N, cudaMemcpyDeviceToHost);
for (int i = 1; i <= N; i++){
printf("%.0f\n", c_cpu[i]);
}
cudaFree(a_gpu);
cudaFree(b_gpu);
cudaFree(c_gpu);
free(a_cpu);
free(b_cpu);
free(c_cpu);
getchar();
return 0;
} |
12,041 | // #CSCS CUDA Training
//
// #Example 4.2 - dot product with atomics - generic version with custom mutex
//
// #Author: Ugo Varetto
//
// #Goal: compute the dot product of two vectors performing all the computation on the GPU
//
// #Rationale: shows how to perform the dot product of two vectors as a parallel reduction
// with all the computation on the GPU; last step is done through synchronized
// access to a shared variable. Spinlock implemented through atomicCAS.
//
// #Solution: store scalar products in local cache and iterate over cache elements
// performing incremental sums; perform last reduction on GPU through custom atomicAddF(float*...)
//
// #Code: 1) compute launch grid configuration
// 2) allocate data on host(cpu) and device(gpu)
// 3) initialize data directly on GPU
// 4) launch kernel
// 5) report errors
// 6) read data back
// 7) free memory
//
// #Compilation:
// [correct] nvcc -arch=sm_13 4_3_parallel-dot-product-atomics-portable.cu -o dot-product-atomics
// [wrong] nvcc -DNO_SYNC -arch=sm_13 4_3_parallel-dot-product-atomics-portable.cu -o dot-product-atomics
//
// #Execution: ./dot-product-atomics
//
// #Note: kernel invocations ( foo<<<...>>>(...) ) are *always* asynchronous and a call to
// cudaThreadSynchronize() is required to wait for the end of kernel execution from
// a host thread; in case of synchronous copy operations like cudaMemcpy(...,cudaDeviceToHost)
// kernel execution is guaranteed to be terminated before data are copied
//
// #Note: also check cudaMemset, cudaErrorString, cudaGetLastError usage
//
// #Note: as of CUDA 3.2 it seems that kernels do not stall anymore when invoking
// __syncthreads from within an if block dependent on the thread id;
// #see http://forums.nvidia.com/index.php?showtopic=178284
//
//#include <cuda_runtime.h> // automatically added by nvcc
#include <vector>
#include <iostream>
typedef float real_t;
const size_t BLOCK_SIZE = 16;
// lock mutex: atomicCAS sets variable to third argument if
// variable value is equal to second argument and returns previous value.
// In this case we spin in the while loop until the mutex is set to 1 i.e.
// until its return value is != 0 and exit the loop only after the mutex has
// been acquired i.e. has been set to 1 after the mutex has been released i.e.
// set to zero.
__device__ void lock( int* mutex ) {
while( atomicCAS( mutex, 0, 1 ) != 0 );
}
// set mutex to zero; note that we do not need to use an atomic op here;
// it is however preferred to access the same memory accessed by atomic functions
// only with atomic functions for consistency reasons: atomic transations and
// regular memory access follow different paths on the GPU; it might *appear* that
// the unlock doesn't look in sync with the lock, although the final result will be correct.
__device__ void unlock( int* mutex ) {
atomicExch( mutex, 0 );
}
// custom implementation of atomic add for floating point variables
__device__ void atomicAddF( real_t* pv, real_t v, int* mutex ) {
lock( mutex );
*pv += v;
unlock( mutex );
}
// dot product entirely executed on the GPU; last reduction step is executed by serializing
// access to the output variable through a mutex
__global__ void full_dot( const real_t* v1, const real_t* v2, real_t* out, int N, int* mutex ) {
__shared__ real_t cache[ BLOCK_SIZE ];
int i = blockIdx.x * blockDim.x + threadIdx.x;
cache[ threadIdx.x ] = 0.f;
while( i < N ) {
cache[ threadIdx.x ] += v1[ i ] * v2[ i ];
i += gridDim.x * blockDim.x;
}
__syncthreads(); // required because later on the current thread is accessing
// data written by another thread
i = BLOCK_SIZE / 2;
while( i > 0 ) {
if( threadIdx.x < i ) cache[ threadIdx.x ] += cache[ threadIdx.x + i ];
__syncthreads();
i /= 2; //not sure bitwise operations are actually faster
}
#ifndef NO_SYNC // serialized access to shared data;
if( threadIdx.x == 0 ) atomicAddF( out, cache[ 0 ], mutex );
#else // no sync, what most likely happens is:
// 1) all threads read 0
// 2) all threads write concurrently 16 (local block dot product)
if( threadIdx.x == 0 ) *out += cache[ 0 ];
#endif
}
// cpu implementation of dot product
real_t dot( const real_t* v1, const real_t* v2, int N ) {
real_t s = 0;
for( int i = 0; i != N; ++i ) {
s += v1[ i ] * v2[ i ];
}
return s;
}
// initialization function run on the GPU
__global__ void init_vector( real_t* v, int N ) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
while( i < N ) {
v[ i ] = 1.0f;//real_t( i ) / 1000000.f;
i += gridDim.x * blockDim.x;
}
}
//------------------------------------------------------------------------------
int main(int argc, char** argv ) {
const size_t ARRAY_SIZE = 1024;//1024 * 1024; //1Mi elements
const int BLOCKS = 64;//512;
const int THREADS_PER_BLOCK = BLOCK_SIZE;//256; // total threads = 512 x 256 = 128ki threads;
const size_t SIZE = ARRAY_SIZE * sizeof( real_t );
// device storage
real_t* dev_v1 = 0; // vector 1
real_t* dev_v2 = 0; // vector 2
real_t* dev_out = 0; // result
int* dev_mutex = 0;
cudaMalloc( &dev_v1, SIZE );
cudaMalloc( &dev_v2, SIZE );
cudaMalloc( &dev_out, sizeof( real_t ) );
cudaMalloc( &dev_mutex, sizeof( int ) );
// host storage
std::vector< real_t > host_v1( ARRAY_SIZE );
std::vector< real_t > host_v2( ARRAY_SIZE );
real_t host_out = 0.f;
// initialize vector 1 with kernel; much faster than using for loops on the cpu
init_vector<<< 1024, 256 >>>( dev_v1, ARRAY_SIZE );
cudaMemcpy( &host_v1[ 0 ], dev_v1, SIZE, cudaMemcpyDeviceToHost );
// initialize vector 2 with kernel; much faster than using for loops on the cpu
init_vector<<< 1024, 256 >>>( dev_v2, ARRAY_SIZE );
cudaMemcpy( &host_v2[ 0 ], dev_v2, SIZE, cudaMemcpyDeviceToHost );
// initialize result on GPU: note the use of cudaMemset, alternatives are to run a kernel
// or copy from CPU
cudaMemset( dev_out, 0, sizeof( real_t ) );
cudaMemset( dev_mutex, 0, sizeof( int ) );
// execute kernel
full_dot<<<BLOCKS, THREADS_PER_BLOCK>>>( dev_v1, dev_v2, dev_out, ARRAY_SIZE, dev_mutex );
std::cout << cudaGetErrorString( cudaGetLastError() ) << std::endl;
// copy output data from device(gpu) to host(cpu)
cudaMemcpy( &host_out, dev_out, sizeof( real_t ), cudaMemcpyDeviceToHost );
// print dot product by summing up the partially reduced vectors
std::cout << "GPU: " << host_out << std::endl;
// print dot product on cpu
std::cout << "CPU: " << dot( &host_v1[ 0 ], &host_v2[ 0 ], ARRAY_SIZE ) << std::endl;
// free memory
cudaFree( dev_v1 );
cudaFree( dev_v2 );
cudaFree( dev_out );
return 0;
}
|
12,042 | #include<stdio.h>
#include<stdlib.h>
#include<time.h>
#define abs(x) ( (x > 0) ? (x) : (-x) )
#define M 32*64
#define N 16*64
#define L 128*128
#define l 128
float *matrix_init(int m, int n);
void matrix_print(float *A, int m, int n);
void matrix_mult(float *A, int m, int q, float *B, int p, int n, float *C);
__global__ void dot(float *A, float *B, float *C) {
int i, j;
int I = blockDim.x*blockIdx.x + threadIdx.x, J = threadIdx.x, k;
__shared__ float AB[l];
float s = 0.;
for (i = 0; i < M; i++)
for (j = 0; j < N; j++) {
AB[J] = A[i*L+I]*B[I*N+j];
__syncthreads();
if (!J) {
s = 0.;
for (k = 0; k < l; k++)
s += AB[k];
atomicAdd((C+i*N+j), s);
}
}
}
int main(int argc, char *argv[]) {
int i, j, k = 0; time_t dt, ht;
float *A, *B, *C, *D, *_A, *_B, *_C;
A = matrix_init(M, L);
B = matrix_init(L, N);
C = matrix_init(M, N);
D = matrix_init(M, N);
cudaMalloc((void **) &_A, M*L*sizeof(float));
cudaMalloc((void **) &_B, L*N*sizeof(float));
cudaMalloc((void **) &_C, M*N*sizeof(float));
srand(time(NULL));
for (i = 0; i < M; i++)
for (j = 0; j < L; j++)
*(A+i*L+j) = 1.;
for (i = 0; i < L; i++)
for (j = 0; j < N; j++)
*(B+i*N+j) = 1.;
dt = time(NULL);
cudaMemcpy(_A, A, M*L*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(_B, B, L*N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(_C, C, M*N*sizeof(float), cudaMemcpyHostToDevice);
dot<<<L/l, l>>>(_A, _B, _C);
cudaMemcpy(C, _C, M*N*sizeof(float), cudaMemcpyDeviceToHost);
dt = time(NULL) - dt;
printf("device: %d sec\n", (int) dt);
fflush(stdout);
ht = time(NULL);
matrix_mult(A, M, L, B, L, N, D);
ht = time(NULL) - ht;
printf("host: %d sec\n", (int) ht );
printf("acceleration: %.0lf\n", (((double)ht)/((double)dt)) );
for (i = 0; i < M; i++)
for (j = 0; j < N; j++)
if(abs(*(C+i*N+j) - *(D+i*N+j)) > .0000001)
k++;
printf("error: %d\n", k);
cudaFree(_C); cudaFree(_B); cudaFree(_A);
free(C); free(B); free(A);
return 0;
}
// Matrix functions
float *matrix_init(int m, int n) {
int i, j;
float *A = (float *) malloc (m*n*sizeof(float *));
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
*(A+i*n+j) = 0.;
return A;
}
void matrix_print(float *A, int m, int n) {
if (!A) {printf("Empty!\n"); return;}
int i = 0, j = 0;
printf("\n");
for (i = 0 ; i < m; i++) {
printf(" |");
for (j = 0; j < n; j++)
printf( "%7lg" , *(A+i*n+j) );
printf("%6c|",' ');
printf("\n");
} printf("\n");
}
void matrix_mult(float *A, int m, int q, float *B, int p, int n, float *C) {
if (q != p) {C = NULL; return;}
int i = 0, j = 0, r = 0;
float s = 0.0;
for (i = 0; i < m; i++)
for (j = 0; j < n; j++) {
s = 0.0;
for (r = 0; r < q; r++)
s += (*(A+i*q+r))*(*(B+r*n+j));
*(C+i*n+j) = s;
}
}
|
12,043 | #include <stdio.h>
/*
* Questo è il programma più semplice che si può immaginare scritto con CUDA C
*
* Cosa fa? Di pratico nulla. Chiama però il kernel "dark", dimostrando che qualcosa
* di oscuro è accaduto sulla scheda grafica. Molte domande restano ancora in sospeso,
* ma il modello di programmazione host-kernel dovrebbe a questo punto essere chiaro.
*
*/
// Attenzione a questa parola chiave. Definisce un kernel, ovvero un processo che avviene
// sulla GPU
__global__ void dark(void)
{
// Questo è un kernel vuoto. Può comunque essere invocato dal main.
}
int main(void)
{
// Lancio il kernel "dark"
dark<<<1,1>>>();
printf("Benvenuto nel magico mondo delle GPU. Guarda il file 'hello_kernel.cu' per capire di cosa si tratta.\n");
return 0;
}
|
12,044 | #include "includes.h"
__global__ void setVal( int* testfuck, int size )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
testfuck[id] = size - id;
} |
12,045 | #include <stdio.h>
#include <stdint.h>
#include <iostream>
#include <chrono>
const uint32_t MAX_DATA_SET_SIZE_PER_BLOCK = 1024;
const uint32_t DEFAULT_DATA_SIZE = 8192;
typedef uint32_t (*operation_function)(uint32_t operand1, uint32_t operand2);
// Reduction adapted from http://developer.download.nvidia.com/compute/cuda/1.1-Beta/x86_website/projects/reduction/doc/reduction.pdf
__device__ void reduce(uint32_t *dataSet, uint32_t* sharedData, uint32_t *result, operation_function op)
{
// Calculate indecies
unsigned int threadId = threadIdx.x;
unsigned int globalThreadId = blockIdx.x*(blockDim.x*2) + threadIdx.x;
// Perform the first reduction to get memory from global to shared
sharedData[threadId] = op(dataSet[globalThreadId], dataSet[globalThreadId + blockDim.x]);
__syncthreads();
// Perform reduction using the provided operation_function
for (unsigned int i=blockDim.x/2; i>0; i >>= 1)
{
if (threadId < i)
{
sharedData[threadId] = op(sharedData[threadId], sharedData[threadId + i]);
}
__syncthreads();
}
// Copy result back to global memory
if (threadId == 0)
{
result[blockIdx.x] = sharedData[0];
}
}
// Add two operands and return the result
__device__ uint32_t add(uint32_t operand1, uint32_t operand2)
{
return operand1 + operand2;
}
// Return the max of 2 elements without blocking (using a conditional)
__device__ uint32_t maximum(uint32_t element1, uint32_t element2)
{
uint32_t findMax[2];
findMax[0] = element1;
findMax[1] = element2;
return findMax[element1 < element2];
}
// Return the min of 2 elements without blocking (using a conditional)
__device__ uint32_t minimum(uint32_t element1, uint32_t element2)
{
uint32_t findMax[2];
findMax[0] = element1;
findMax[1] = element2;
return findMax[element1 > element2];
}
// Get the sum of all elements in dataSet
__global__ void sum(uint32_t* dataSet, uint32_t *result)
{
extern __shared__ uint32_t sharedData[];
reduce(dataSet, sharedData, result, &add);
}
// Get the max of all elements in dataSet
__global__ void getMax(uint32_t* dataSet, uint32_t *result)
{
extern __shared__ uint32_t sharedData[];
reduce(dataSet, sharedData, result, &maximum);
}
// Get the min of all elements in dataSet
__global__ void getMin(uint32_t* dataSet, uint32_t *result)
{
extern __shared__ uint32_t sharedData[];
reduce(dataSet, sharedData, result, &minimum);
}
// Get tha max, min, and average of all values in dataSet
void getMaxMinAvg(uint32_t dataSize, uint32_t numBlocks, uint32_t * data, uint32_t *max, uint32_t *min, double *avg)
{
// Allocate CPU memory
int32_t numThreads = dataSize / (numBlocks * 2);
uint32_t *cpuMaxResult = (uint32_t *)malloc(numBlocks * sizeof(uint32_t));
uint32_t *cpuMinResult = (uint32_t *)malloc(numBlocks * sizeof(uint32_t));
uint32_t *cpuSumResult = (uint32_t *)malloc(numBlocks * sizeof(uint32_t));
// Allocate GPU memory
uint32_t *gpuMaxResult;
cudaMalloc((void **)&gpuMaxResult, numBlocks * sizeof(uint32_t));
uint32_t *gpuMinResult;
cudaMalloc((void **)&gpuMinResult, numBlocks * sizeof(uint32_t));
uint32_t *gpuSumResult;
cudaMalloc((void **)&gpuSumResult, numBlocks * sizeof(uint32_t));
// Create CUDA Event for each operation
cudaEvent_t gotMax, gotMin, gotSum;
cudaEventCreate(&gotMax);
cudaEventCreate(&gotMin);
cudaEventCreate(&gotSum);
// Create cuda stream for each operation
cudaStream_t maxStream, minStream, sumStream;
cudaStreamCreate(&maxStream);
cudaStreamCreate(&minStream);
cudaStreamCreate(&sumStream);
// Mark when processing starts.
auto start = std::chrono::high_resolution_clock::now();
// Run async kernel to get maximum
getMax<<<numBlocks, numThreads, numThreads * sizeof(uint32_t), maxStream>>>(data, gpuMaxResult);
cudaMemcpyAsync(cpuMaxResult, gpuMaxResult, numBlocks * sizeof(uint32_t), cudaMemcpyDeviceToHost, maxStream);
cudaEventRecord(gotMax, maxStream);
// Run async kernel to get minimum
getMin<<<numBlocks, numThreads, numThreads * sizeof(uint32_t), minStream>>>(data, gpuMinResult);
cudaMemcpyAsync(cpuMinResult, gpuMinResult, numBlocks * sizeof(uint32_t), cudaMemcpyDeviceToHost, minStream);
cudaEventRecord(gotMin, minStream);
// Run async kernel to get average
sum<<<numBlocks, numThreads, numThreads * sizeof(uint32_t), sumStream>>>(data, gpuSumResult);
cudaMemcpyAsync(cpuSumResult, gpuSumResult, numBlocks * sizeof(uint32_t), cudaMemcpyDeviceToHost, sumStream);
cudaEventRecord(gotSum, sumStream);
bool maxFinished = false;
bool minFinished = false;
bool sumFinished = false;
// Check the event for each operation and measure total time to completion
while (!maxFinished || !minFinished || !sumFinished)
{
if (!maxFinished && (cudaEventQuery(gotMax) == cudaSuccess))
{
auto now = std::chrono::high_resolution_clock::now();
printf("Get Maximum finished after %dus\n", std::chrono::duration_cast<std::chrono::microseconds>(now - start).count());
maxFinished = true;
}
if (!minFinished && (cudaEventQuery(gotMin) == cudaSuccess))
{
auto now = std::chrono::high_resolution_clock::now();
printf("Get Minimum finished after %dus\n", std::chrono::duration_cast<std::chrono::microseconds>(now - start).count());
minFinished = true;
}
if (!sumFinished && (cudaEventQuery(gotSum) == cudaSuccess))
{
auto now = std::chrono::high_resolution_clock::now();
printf("Get Sum finished after %dus\n", std::chrono::duration_cast<std::chrono::microseconds>(now - start).count());
sumFinished = true;
}
}
cudaDeviceSynchronize();
// Aggregate results for max, min, avg
*max = 0;
*min = UINT_MAX;
double sum = 0;
for (size_t i = 0; i < numBlocks; i++)
{
sum += cpuSumResult[i];
if (cpuMaxResult[i] > *max) {
*max = cpuMaxResult[i];
}
if (cpuMinResult[i] < *min) {
*min = cpuMinResult[i];
}
}
// Calculate average
*avg = sum / dataSize;
// Destroy CUDA events
cudaEventDestroy(gotMax);
cudaEventDestroy(gotMin);
cudaEventDestroy(gotSum);
// Destroy CUDA streams
cudaStreamDestroy(maxStream);
cudaStreamDestroy(minStream);
cudaStreamDestroy(sumStream);
// Free GPU memory
cudaFree(gpuMaxResult);
cudaFree(gpuMinResult);
cudaFree(gpuSumResult);
// Free CPU memory
free(cpuMaxResult);
free(cpuMinResult);
free(cpuSumResult);
}
// Main function
int main(int argc, char* argv[])
{
// Set number of blocks to default and runs
uint32_t dataSize = DEFAULT_DATA_SIZE;
uint32_t numBlocks = 8;
uint32_t numRuns = 2;
// Check command line for number of blocks argument
if (argc > 1) {
numBlocks = atoi(argv[1]);
if ((numBlocks % 2) != 0) {
printf("Must enter a multiple of 2\n");
return 1;
}
dataSize = numBlocks * MAX_DATA_SET_SIZE_PER_BLOCK ;
}
// Check command line for number of runs argument
if (argc > 2) {
numRuns = atoi(argv[2]);
}
// Allocate pinned memory for data set
uint32_t * data;
cudaMallocHost((void**)&data, dataSize * sizeof(uint32_t));
// Allocate device memory for data set
uint32_t *gpuData;
cudaMalloc((void **)&gpuData, dataSize * sizeof(uint32_t));
srand((unsigned)time(NULL));
for (uint32_t x = 0; x < numRuns; x++)
{
printf("Run %d\n--------------------------------------------------\n", x);
// Populate data set with random values
uint32_t range = dataSize * 4;
for(size_t i = 0; i < dataSize; i++){
data[i] = rand() % range + 1;
}
// Copy data set to device
cudaMemcpy(gpuData, data, dataSize * sizeof(uint32_t), cudaMemcpyHostToDevice);
// Calculate values
double avg;
uint32_t max;
uint32_t min;
getMaxMinAvg(dataSize, numBlocks, gpuData, &max, &min, &avg);
// Print results
printf("\n\n");
printf("Average is %f\n", avg);
printf ("Max is %d\n", max);
printf ("Min is %d\n", min);
printf("\n\n\n");
}
// Free allocated memory
cudaFreeHost(data);
cudaFree(gpuData);
}
|
12,046 | //pass
//--gridDim=[4800,1,1] --blockDim=[256,1,1]
#include "common.h"
__global__ void markSegments(const uint *verticesOffsets,
uint *flags,
uint verticesCount)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < verticesCount)
{
flags[verticesOffsets[tid]] = 1;
}
}
|
12,047 | /* A program to solve the hotplate problem using GPU
Author: Bukola Grace Omotoso
MNumber: M01424979
ID: bgo2e
Last Modified: 11/27/2018
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include <string.h>
#include <unistd.h>
#include <sys/time.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
#include<cuda.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
float** buildHotplate(int rows, int columns) {
float** hotplate;
hotplate = (float**) malloc(rows*sizeof(float*));
for (int i = 0; i < rows; i++)
hotplate[i] = (float*) malloc(columns*sizeof(float));
return hotplate;
}
float* flattenArray(float** arrayToFlatten, int num_rows, int num_cols){
float* flattenedArray = (float*) malloc(num_rows*num_cols*sizeof(float));
int counter = 0;
for (int row = 0; row < num_rows; row++){
{
for(int col = 0; col < num_cols; col++){
flattenedArray[counter] = arrayToFlatten[row][col];
counter++;
}
}
}
return flattenedArray;
}
void initializeHotPlate(int num_rows, int num_cols, float** hotplate, float** hotplateClone, int top_temp, int left_temp, int right_temp, int bottom_temp) {
int num_outer_grid = (2 * num_rows) + (2 * (num_cols - 2));
float outer_grid_sum = (top_temp * (num_cols - 2)) + (left_temp * (num_rows - 1)) + (bottom_temp * num_cols) + (right_temp * (num_rows - 1));
float initial_inner_val = outer_grid_sum / num_outer_grid;
for (int row = 0; row < num_rows; row++) {
for (int column = 0; column < num_cols; column++) {
//top values override the top row except the edges
if ((row == 0) & (column != 0 & column != num_cols - 1)) {
hotplate[row][column] = top_temp;
hotplateClone[row][column] = top_temp;
}
else if (column == 0 && (row != (num_rows-1))) {
hotplate[row][column] = left_temp;
hotplateClone[row][column] = left_temp;
}
else if (column == (num_cols - 1) && (row != (num_rows-1))) {
hotplate[row][column] = right_temp;
hotplateClone[row][column] = right_temp;
}
else if(row == (num_rows -1 )){
hotplate[row][column] = bottom_temp;
hotplateClone[row][column] = bottom_temp;
}
if ((row != 0) && (row != num_rows - 1) && (column != 0) && (column != num_cols - 1))
hotplate[row][column] = initial_inner_val;
}
}
}
void swapHotplate(float *a, float *b) {
float *tmp = a;
a = b;
b = tmp;
}
__global__
void generateHeat(int num_rows, int num_cols, float* hotplate, float* hotplateClone, float* d_maximums, float epsilon) {
float max_difference = 0;
float previous_val;
float current_val;
float diff;
int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row > 0 && row < (num_rows-1)){
for (int col = 1; col < (num_cols - 1); col++) {
int idx = (row * num_cols) + col;
float top = hotplate[idx - num_cols];
float bottom = hotplate[idx + num_cols];
float left = hotplate[idx - 1];
float right = hotplate[idx + 1];
previous_val = hotplate[idx];
current_val = (top + bottom + left + right) / 4.0;
diff = fabsf(previous_val - current_val);
if (diff > max_difference){
max_difference = diff;
}
hotplateClone[idx] = current_val;
}
d_maximums[row] = max_difference;
}
}
/*Get the maximum values from all threads*/
float max_max_diff(float arr[], int n)
{
int i;
float max = arr[0];
for (i = 1; i < n; i++)
if (arr[i] > max)
max = arr[i];
return max;
}
int main(int argc, char const *argv[])
{ int num_rows = atoi(argv[1]);
int num_cols = atoi(argv[2]);
int top_temp = atoi(argv[3]);
int left_temp = atoi(argv[4]);
int right_temp = atoi(argv[5]);
int bottom_temp = atoi(argv[6]);
float epsilon = atof(argv[7]);
float* flattenedhotplate;
float* flattenedhotplateClone;
float* maximums;
int gridsize = 8;
int block = 0;
int block1 = (num_rows/gridsize);
if (gridsize > num_rows)
block = 1;
else if ((block1 * gridsize) < num_rows){
block = block1 + 1;
}else{
block = block1;
}
size_t nBytes = num_rows*num_cols * sizeof(float);
double max_difference = epsilon + 1;
int counter = 0;
float** hotplate = buildHotplate(num_rows, num_cols);
float** hotplateClone = buildHotplate(num_rows, num_cols);
initializeHotPlate(num_rows, num_cols, hotplate, hotplateClone, top_temp, left_temp, right_temp, bottom_temp);
flattenedhotplate = (float*) malloc(num_cols*num_rows*sizeof(float));
flattenedhotplateClone = (float*) malloc(num_cols*num_rows*sizeof(float));
maximums = (float*)malloc(num_rows*sizeof(float));
flattenedhotplate = flattenArray(hotplate, num_rows, num_cols);
flattenedhotplateClone = flattenArray(hotplateClone, num_rows, num_cols);
float *d_hotplate;
float *d_hotplateClone;
float *d_maximums;
CHECK(cudaMalloc((float**)&d_hotplate, nBytes));
CHECK(cudaMalloc((float**)&d_hotplateClone, nBytes));
CHECK(cudaMalloc((float**)&d_maximums, num_rows*sizeof(float)));
CHECK(cudaMemcpy( d_hotplate, flattenedhotplate, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_hotplateClone, flattenedhotplateClone, nBytes, cudaMemcpyHostToDevice));
printf("%10s%10s\n", "Iteration", "Epsilon");
while(max_difference > epsilon){
generateHeat<<<block,gridsize>>>(num_rows, num_cols, d_hotplate, d_hotplateClone, d_maximums, epsilon);
cudaDeviceSynchronize();
CHECK(cudaMemcpy(maximums, d_maximums, num_rows*sizeof(float), cudaMemcpyDeviceToHost));
max_difference = max_max_diff(maximums, num_rows-1);
float *T = d_hotplate;
d_hotplate = d_hotplateClone;
d_hotplateClone = T;
if (counter > 0 && (counter & (counter - 1)) == 0)
printf("%6d%15.6f\n", counter, max_difference);
if (max_difference < epsilon) {
printf("%6d%15.6f\n", counter, max_difference);
break;
}
counter++;
}
cudaFree(d_hotplate);
cudaFree(d_hotplateClone);
cudaFree(d_maximums);
return 0;
}
|
12,048 | //
// Created by luchin on 29-07-21.
//
#include <cassert>
#include <iostream>
//static cudaError_t checkCuda(cudaError_t result) {
// if (result != cudaSuccess) {
// fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
// assert(result == cudaSuccess);
// }
// return result;
//}
//
//Par_CUDA::Par_CUDA() : AbstractGoL() {
// int devId = 0;
// cudaDeviceProp prop;
// checkCuda(cudaGetDeviceProperties(&prop, devId));
// checkCuda(cudaSetDevice(devId));
// int total_size = sizeof(char) * LARGO * LARGO;
// d_grid = nullptr;
// checkCuda(cudaMalloc(&d_grid, total_size));
//}
//
//__global__ void step(char *grid) {
//#ifdef CUDA_USE_2D
// int x = blockIdx.x * blockDim.x + threadIdx.x;
// int y = blockIdx.y * blockDim.y + threadIdx.y;
//#else
// int tmp = blockIdx.x * blockDim.x + threadIdx.x;
// int x = tmp / LARGO;
// int y = tmp % LARGO;
//#endif
//
// // contamos los vecinos
// // printf("x is %d and y is %d\n", x, y);
// if (x > LARGO || y > LARGO) return;
// x += LARGO; // nos aseguramos de que x-1 sea positivo
// y += LARGO;
// int x_m = (x - 1) % LARGO;
// int x_p = (x + 1) % LARGO;
// int y_m = (y - 1) % LARGO;
// int y_p = (y + 1) % LARGO;
// x = x % LARGO;
// y = y % LARGO;
// int num_neighbors =
// grid[x_m * LARGO + y_m] + grid[x * LARGO + y_m] + grid[x_p * LARGO + y_m] +
// grid[x_m * LARGO + y] + grid[x_p * LARGO + y] +
// grid[x_m * LARGO + y_p] + grid[x * LARGO + y_p] + grid[x_p * LARGO + y_p];
// char alive = grid[x * LARGO + y];
//
// __syncthreads();
// // reemplazamos los lugares donde corresponde
// if ((alive && num_neighbors == 2) || num_neighbors == 3) {
// grid[x * LARGO + y] = 1;
// } else {
// grid[x * LARGO + y] = 0;
// }
//}
//
//
//void Par_CUDA::run_game(int num_steps) {
//#ifdef CUDA_USE_2D
// dim3 dimGrid((LARGO + 7) / 8, (LARGO + 7) / 88, 1);
// dim3 dimBlock(8, 8, 1);
//#else
// dim3 dimGrid((LARGO * LARGO + 7) / 8, 1, 1);
// dim3 dimBlock(8, 1, 1);
//#endif
// cudaMemcpy(d_grid, h_grid, sizeof(char) * LARGO * LARGO, cudaMemcpyHostToDevice);
// for (int i = 0; i < num_steps; i++) {
// step<<<dimGrid, dimBlock>>>(d_grid);
// }
// cudaMemcpy(h_grid, d_grid, sizeof(char) * LARGO * LARGO, cudaMemcpyDeviceToHost);
//} |
12,049 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float* var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19) {
if (comp <= (-0.0f - var_2 + var_3)) {
float tmp_1 = (-1.2778E-37f + -0.0f);
comp = tmp_1 / (+0.0f * var_4 * (var_5 + ldexpf((-1.7950E36f - var_6), 2)));
for (int i=0; i < var_1; ++i) {
comp = (var_8 + var_9);
var_7[i] = -0.0f + var_10;
comp = var_7[i] - +1.0158E-37f - (+1.5951E-44f + var_11 / sqrtf(-1.2438E36f));
}
if (comp >= (+0.0f - (+1.9889E36f / (+1.2658E-35f - (var_12 * (var_13 / var_14)))))) {
float tmp_2 = +1.9292E-41f;
float tmp_3 = ldexpf(var_15 * var_16 + (var_17 + var_18), 2);
comp += tmp_3 / tmp_2 + var_19 * -1.4964E-14f;
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float* tmp_8 = initPointer( atof(argv[8]) );
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20);
cudaDeviceSynchronize();
return 0;
}
|
12,050 | #include <errno.h>
#include <fcntl.h>
#include <signal.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <curand_kernel.h>
// Controls the number of threads per block to use.
#define DEFAULT_BLOCK_SIZE (512)
// Controls the default number of blocks to use.
#define DEFAULT_BLOCK_COUNT (512)
// The name given to the output file if one isn't specified.
#define DEFAULT_OUTPUT_NAME "output.pgm"
// This macro takes a cudaError_t value and exits the program if it isn't equal
// to cudaSuccess.
#define CheckCUDAError(val) (InternalCUDAErrorCheck((val), #val, __FILE__, __LINE__))
// Increasing this may increase efficiency, but decrease responsiveness to
// signals.
#define SAMPLES_PER_THREAD (50)
// The RNG seed used when initializing the RNG states on the GPU.
#define DEFAULT_RNG_SEED (1337)
// The type that we use to keep track of internal pixel counts. Must be a
// numerical type that will work with both IncrementPixelCounter,
// GetLinearColorScale, and DoGammaCorrection. Floating-point values *ought* to
// work here, too.
typedef uint32_t Pixel;
// Holds the boundaries and sizes of the fractal, in both pixels and numbers
typedef struct {
// The width and height of the image in pixels.
int w;
int h;
// The boundaries of the fractal.
double min_real;
double min_imag;
double max_real;
double max_imag;
// The distance between pixels in the real and imaginary axes.
double delta_real;
double delta_imag;
} FractalDimensions;
// This struct holds the parameters for different types of "iterations" needed
// when calculating the Buddhabrot.
typedef struct {
// This is the maximum number of iterations to run to see if a point escapes.
int max_escape_iterations;
// If a point escapes in fewer than this many iterations, it will be ignored.
int min_escape_iterations;
} IterationControl;
// Holds global state in a single struct.
static struct {
// The CUDA device to use. Defaults to 0.
int cuda_device;
// This tracks the random number generator states for the GPU code.
curandState_t *rng_states;
// The number of threads and blocks to use when calculating the Buddhabrot.
int block_size, block_count;
// The filename to which a bitmap image will be saved, or NULL if an image
// should not be saved.
const char *output_image;
// The filename from which to load an in-progress image buffer, or to which
// the in-progress buffer should be stored if operation is interrupted.
const char *inprogress_file;
// The number of seconds to run the calculation. If negative, run
// indefinitely until a signal is received.
double seconds_to_run;
// If this is nonzero, the program should save the image and quit as soon as
// the current iteration finishes.
int quit_signal_received;
// Holds various iteration-related settings.
IterationControl iterations;
// The size and location of the fractal and output image.
FractalDimensions dimensions;
// The host and device buffers which contain the numbers of times an escaping
// point's path crossed each point in the complex plane.
Pixel *device_buddhabrot;
Pixel *host_buddhabrot;
// The gamma value for gamma correction.
double gamma_correction;
// Buffer for a single grayscale image.
uint16_t *grayscale_image;
} g;
// Returns the size, in bytes, of the internal image buffer used to hold the
// pixel data.
static uint64_t GetImageBufferSize(void) {
return ((uint64_t) g.dimensions.w) * ((uint64_t) g.dimensions.h) *
sizeof(Pixel);
}
// If any globals have been initialized, this will free them. (Relies on
// globals being set to 0 at the start of the program)
static void CleanupGlobals(void) {
cudaFree(g.rng_states);
cudaFree(g.device_buddhabrot);
cudaFree(g.rng_states);
free(g.grayscale_image);
free(g.host_buddhabrot);
memset(&g, 0, sizeof(g));
}
// Returns the current time in seconds.
static double CurrentSeconds(void) {
struct timespec ts;
if (clock_gettime(CLOCK_REALTIME, &ts) != 0) {
printf("Error getting time.\n");
exit(1);
}
return ((double) ts.tv_sec) + (((double) ts.tv_nsec) / 1e9);
}
// Prints an error message and exits the program if the cudaError_t value is
// not equal to cudaSuccess. Generally, this will be called via the
// CheckCUDAError macro.
static void InternalCUDAErrorCheck(cudaError_t result, const char *fn,
const char *file, int line) {
if (result == cudaSuccess) return;
printf("CUDA error %d (%s) in %s, line %d (%s)\n", (int) result,
cudaGetErrorString(result), file, line, fn);
CleanupGlobals();
exit(1);
}
// This function is used to initialize the RNG states to use when generating
// starting points in the Buddhabrot calculation. The states array must hold
// one entry for every thread in every block.
__global__ void InitializeRNG(uint64_t seed, curandState_t *states) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
curand_init(seed, index, 0, states + index);
}
// Allocates CUDA memory and calculates block/grid sizes. Must be called after
// g.w and g.h have been set.
static void SetupCUDA(void) {
float gpu_memory_needed, cpu_memory_needed;
CheckCUDAError(cudaSetDevice(g.cuda_device));
size_t pixel_count = g.dimensions.w * g.dimensions.h;
// The GPU will need space for the image and the RNG states.
gpu_memory_needed = GetImageBufferSize() +
(g.block_size * g.block_count * sizeof(curandState_t));
gpu_memory_needed /= (1024.0 * 1024.0);
// The CPU needs space for the image and grayscale conversion.
cpu_memory_needed = GetImageBufferSize() + (pixel_count * sizeof(uint16_t));
cpu_memory_needed /= (1024.0 * 1024.0);
printf("Approximate memory needed: %.03f MiB GPU, %.03f MiB CPU\n",
gpu_memory_needed, cpu_memory_needed);
// Initialize the host and device image buffers.
CheckCUDAError(cudaMalloc(&(g.device_buddhabrot), GetImageBufferSize()));
CheckCUDAError(cudaMemset(g.device_buddhabrot, 0, GetImageBufferSize()));
g.host_buddhabrot = (Pixel *) calloc(1, GetImageBufferSize());
if (!g.host_buddhabrot) {
CleanupGlobals();
exit(1);
}
// Initialize the RNG state for the device.
CheckCUDAError(cudaMalloc(&(g.rng_states), g.block_size * g.block_count *
sizeof(curandState_t)));
InitializeRNG<<<g.block_size, g.block_count>>>(DEFAULT_RNG_SEED,
g.rng_states);
CheckCUDAError(cudaDeviceSynchronize());
g.grayscale_image = (uint16_t *) calloc(pixel_count, sizeof(uint16_t));
if (!g.grayscale_image) {
printf("Failed allocating grayscale image.\n");
CleanupGlobals();
exit(1);
}
}
// Returns the size, in bytes, of f. Exits on error.
static uint64_t GetFileSize(FILE *f) {
int64_t to_return;
if (fseek(f, 0, SEEK_END) != 0) {
printf("Failed seeking file end: %s\n", strerror(errno));
CleanupGlobals();
exit(1);
}
to_return = ftell(f);
if (to_return < 0) {
printf("Failed reading file size: %s\n", strerror(errno));
CleanupGlobals();
exit(1);
}
if (fseek(f, 0, SEEK_SET) != 0) {
printf("Failed seeking file start: %s\n", strerror(errno));
CleanupGlobals();
exit(1);
}
return to_return;
}
// Loads the in-progress buffer from a file, if the file exists. Exits if an
// error occurs. Creates the file if it doesn't exist.
static void LoadInProgressBuffer(void) {
uint64_t expected_size, file_size;
FILE *f = NULL;
if (!g.inprogress_file) return;
// We won't consider it an error if the file doesn't exist, and won't try to
// load its contents.
f = fopen(g.inprogress_file, "rb");
expected_size = GetImageBufferSize();
printf("Loading previous image state from %s.\n",
g.inprogress_file);
if (!f) {
if (errno == ENOENT) {
printf("File %s doesn't exist yet. Not loading.\n",
g.inprogress_file);
return;
}
printf("Failed opening %s: %s\n", g.inprogress_file, strerror(errno));
CleanupGlobals();
exit(1);
}
file_size = GetFileSize(f);
// Ensure the file matches the expected size of our image buffer.
if (file_size != expected_size) {
printf("The size of %s doesn't match the expected size of %lu bytes.\n",
g.inprogress_file, (unsigned long) expected_size);
fclose(f);
CleanupGlobals();
exit(1);
}
// Read the file to the local buffer, then update the device copy.
if (fread(g.host_buddhabrot, expected_size, 1, f) != 1) {
printf("Failed reading %s: %s\n", g.inprogress_file, strerror(errno));
fclose(f);
CleanupGlobals();
exit(1);
}
fclose(f);
f = NULL;
CheckCUDAError(cudaMemcpy(g.device_buddhabrot, g.host_buddhabrot,
expected_size, cudaMemcpyHostToDevice));
}
// Saves the in-progress buffer to a file, if the filename was specified.
// Exits if an error occurs.
static void SaveInProgressBuffer(void) {
FILE *f = NULL;
if (!g.inprogress_file) return;
printf("Saving in-progress buffer to %s.\n", g.inprogress_file);
f = fopen(g.inprogress_file, "wb");
if (!f) {
printf("Failed opening %s: %s\n", g.inprogress_file, strerror(errno));
CleanupGlobals();
exit(1);
}
if (fwrite(g.host_buddhabrot, GetImageBufferSize(), 1, f) != 1) {
printf("Failed writing data to %s: %s\n", g.inprogress_file,
strerror(errno));
fclose(f);
CleanupGlobals();
exit(1);
}
fclose(f);
}
// This returns nonzero if the given point is in the main cardioid of the set
// and is therefore guaranteed to not escape.
inline __device__ int InMainCardioid(double real, double imag) {
// This algorithm was taken from the Wikipedia Mandelbrot set page.
double imag_squared = imag * imag;
double q = (real - 0.25);
q = q * q + imag_squared;
return q * (q + (real - 0.25)) < (imag_squared * 0.25);
}
// This returns nonzero if the given point is in the order 2 bulb of the set
// and therefore guaranteed to not escape.
inline __device__ int InOrder2Bulb(double real, double imag) {
double tmp = real + 1;
tmp = tmp * tmp;
return (tmp + (imag * imag)) < (1.0 / 16.0);
}
// This should be used to update the pixel data for a point that is encountered
// in the set.
inline __device__ void IncrementPixelCounter(double real, double imag,
Pixel *data, FractalDimensions *d) {
int row, col;
// There's a small issue here with integer-dividing where values that should
// be immediately outside of the canvas can still appear on row or col 0, so
// just return early if we're outside the boundary.
if ((real < d->min_real) || (imag < d->min_imag)) return;
col = (real - d->min_real) / d->delta_real;
row = (imag - d->min_imag) / d->delta_imag;
if ((row >= 0) && (row < d->h) && (col >= 0) && (col < d->w)) {
data[(row * d->w) + col] += 1;
}
}
// Does the Mandelbrot-set iterations for the given (real, imag) point. Returns
// the number of iterations before the point escapes, or max_iterations if the
// point never escapes.
inline __device__ int IterateMandelbrot(double start_real, double start_imag,
int max_iterations) {
double tmp, real, imag;
int i;
real = start_real;
imag = start_imag;
// This loop-unrolling was tested on a Radeon VII, anything higher or lower
// than 4 produced worse performance. May differ on other devices, or future
// compiler updates.
//#pragma unroll 4
for (i = 0; i < max_iterations; i++) {
real = fabs(real);
imag = fabs(imag);
tmp = (real * real) - (imag * imag) + start_real;
imag = 2 * real * imag + start_imag;
real = tmp;
// If the point escapes, stop iterating and indicate the loop ended due
// to the point escaping.
if (((real * real) + (imag * imag)) > 4) return i;
}
// The point didn't escape, return max_iterations.
return max_iterations;
}
// Like IterateMandelbrot, but records the point's path. For efficiency, this
// function also has an important difference from IterateMandelbrot: *it does
// not check the max iterations*. This is important! Do not call this function
// for a point unless you're sure that it escapes in a finite number of
// iterations.
inline __device__ void IterateAndRecord(double start_real, double start_imag,
Pixel *data, FractalDimensions *d) {
double tmp, real, imag;
real = start_real;
imag = start_imag;
//#pragma unroll 4
while (1) {
real = fabs(real);
imag = fabs(imag);
tmp = (real * real) - (imag * imag) + start_real;
imag = 2 * real * imag + start_imag;
real = tmp;
IncrementPixelCounter(real, imag, data, d);
// Stop iterating when the point escapes. This must be *guaranteed* to
// happen by the caller performing a prior check!
if (((real * real) + (imag * imag)) > 4) break;
}
}
// This kernel is responsible for drawing the paths of "particles" that escape
// the mandelbrot set. It works as follows:
//
// 1. For each "sample", compute a new random starting point in the complex
// plane
// 2. Do the normal mandelbrot iterations on the starting point, *without*
// recording its path
// 3. If the point didn't escape the path, take a new sample (return to step 1)
// 4. If the point escaped (within the min and max iteration limits), then
// repeat the mandelbrot iterations (e.g. step 2), except record its path
// by incrementing the pixel value for every point it passes through.
__global__ void DrawBuddhabrot(FractalDimensions dimensions, Pixel *data,
IterationControl iterations, curandState_t *states) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
curandState_t *rng = states + index;
int sample, iterations_needed, max_iterations, min_iterations;
double real, imag;
max_iterations = iterations.max_escape_iterations;
min_iterations = iterations.min_escape_iterations;
// We're going to pick a number of random starting points determined by the
// SAMPLES_PER_THREAD value.
for (sample = 0; sample < SAMPLES_PER_THREAD; sample++) {
// Sample across the entire domain of the set regardless of our "canvas"
real = (curand_uniform_double(rng) * 4.0) - 2.0;
imag = (curand_uniform_double(rng) * 4.0) - 2.0;
// Optimization: we know ahead of time that points from the main cardioid
// and the largest "bulb" will never escape, and it's fast to check them.
//if (InMainCardioid(real, imag) || InOrder2Bulb(real, imag)) continue;
// Now, do the normal Mandelbrot iterations to see how quickly the point
// escapes (if it does). However, we won't record the path yet.
iterations_needed = IterateMandelbrot(real, imag, max_iterations);
// Don't record the path if the point never escaped, or if it escaped too
// quickly.
if (iterations_needed >= max_iterations) continue;
if (iterations_needed < min_iterations) continue;
// At this point, do the Mandelbrot iterations, but actually record the
// path because we know the point is "good".
IterateAndRecord(real, imag, data, &dimensions);
}
}
static uint16_t Clamp(double v) {
if (v <= 0) return 0;
if (v >= 0xffff) return 0xffff;
return (uint16_t) v;
}
// Returns the amount to multiply the original count by in order to get a value
// by which Buddhabrot counts can be multiplied to get a number between 0 and
// 0xffff.
static double GetLinearColorScale(void) {
int x, y, index;
Pixel max = 0;
double to_return;
index = 0;
for (y = 0; y < g.dimensions.h; y++) {
for (x = 0; x < g.dimensions.w; x++) {
if (g.host_buddhabrot[index] > max) max = g.host_buddhabrot[index];
index++;
}
}
to_return = ((double) 0xffff) / ((double) max);
printf("Max value: %lu, scale: %f\n", (unsigned long) max, to_return);
return to_return;
}
// Returns the gamma-corrected 16-bit color channel value given a Buddhabrot
// iteration count c.
static uint16_t DoGammaCorrection(Pixel c, double linear_scale) {
double max = 0xffff;
double scaled = ((double) c) * linear_scale;
// Don't do gamma correction if the gamma correction argument was negative.
if (g.gamma_correction <= 0.0) return scaled;
return Clamp(max * pow(scaled / max, 1 / g.gamma_correction));
}
// Converts the buffer of pixel values to a gamma-corrected grayscale image
// with 16-bit colors. The Pixel values are scaled to fill the 16-bit color
// range.
static void SetGrayscalePixels(void) {
int x, y;
uint16_t color_value;
double linear_scale = GetLinearColorScale();
Pixel *host_data = g.host_buddhabrot;
uint16_t *grayscale = g.grayscale_image;
for (y = 0; y < g.dimensions.h; y++) {
for (x = 0; x < g.dimensions.w; x++) {
color_value = DoGammaCorrection(*host_data, linear_scale);
*grayscale = color_value;
grayscale++;
host_data++;
}
}
}
// Renders the fractal image.
static void RenderImage(void) {
int passes_count = 0;
double start_seconds;
printf("Calculating Buddhabrot.\n");
if (g.seconds_to_run < 0) {
printf("Press ctrl+C to finish.\n");
} else {
printf("Running for %.03f seconds.\n", g.seconds_to_run);
}
// Run until either the time elapsed or we've received a SIGINT.
start_seconds = CurrentSeconds();
while (!g.quit_signal_received) {
passes_count++;
DrawBuddhabrot<<<g.block_count, g.block_size>>>(g.dimensions,
g.device_buddhabrot, g.iterations, g.rng_states);
CheckCUDAError(cudaDeviceSynchronize());
if ((g.seconds_to_run >= 0) && ((CurrentSeconds() - start_seconds) >
g.seconds_to_run)) {
break;
}
}
// Copy the resulting image to CPU memory, and convert the pixels to proper
// grayscale values.
CheckCUDAError(cudaMemcpy(g.host_buddhabrot, g.device_buddhabrot,
GetImageBufferSize(), cudaMemcpyDeviceToHost));
printf("%d Buddhabrot passes took %f seconds.\n", passes_count,
CurrentSeconds() - start_seconds);
SetGrayscalePixels();
}
// Recomputes the spacing between pixels in the image. Returns 0 if any image-
// dimension setting is invalid. Otherwise, returns 1.
static int RecomputePixelDeltas(void) {
FractalDimensions *dims = &(g.dimensions);
if (dims->w <= 0) {
printf("Output width must be positive.\n");
return 0;
}
if (dims->h <= 0) {
printf("Output height must be positive.\n");
return 0;
}
if (dims->max_real <= dims->min_real) {
printf("Maximum real value must be greater than minimum real value.\n");
return 0;
}
if (dims->max_imag <= dims->min_imag) {
printf("Minimum imaginary value must be greater than maximum imaginary "
"value.\n");
return 0;
}
dims->delta_imag = (dims->max_imag - dims->min_imag) / ((double) dims->h);
dims->delta_real = (dims->max_real - dims->min_real) / ((double) dims->w);
return 1;
}
// Sets the image boundaries and dimensions to their default values.
static void SetDefaultCanvas(void) {
FractalDimensions *dims = &(g.dimensions);
memset(dims, 0, sizeof(*dims));
dims->w = 1000;
dims->h = 1000;
dims->min_real = -2.0;
dims->max_real = 2.0;
dims->min_imag = -2.0;
dims->max_imag = 2.0;
if (!RecomputePixelDeltas()) {
printf("Internal error setting default canvas boundaries!\n");
exit(1);
}
}
// If a filename has been set for saving the image, this will attempt to save
// the image to the file. This can modify the image buffer! (For changing byte
// order.)
static void SaveImage(void) {
uint16_t tmp;
int i;
int pixel_count = g.dimensions.w * g.dimensions.h;
FILE *output = fopen(g.output_image, "wb");
if (!output) {
printf("Failed opening output image.\n");
return;
}
if (fprintf(output, "P5\n%d %d\n%d\n", g.dimensions.w, g.dimensions.h,
0xffff) <= 0) {
printf("Failed writing pgm header.\n");
fclose(output);
return;
}
// Flip the byte-order for the image. This assumes the program is running on
// a little-endian architecture. I'll fix it if there's ever a demand to run
// this on something other than Linux on x86 or ARM64 (lol).
for (i = 0; i < pixel_count; i++) {
tmp = g.grayscale_image[i];
tmp = ((tmp & 0xff) << 8) | (tmp >> 8);
g.grayscale_image[i] = tmp;
}
if (!fwrite(g.grayscale_image, pixel_count * sizeof(uint16_t), 1, output)) {
printf("Failed writing pixel data.\n");
fclose(output);
return;
}
fclose(output);
}
static void PrintUsage(char *program_name) {
printf("Usage: %s [options]\n\n", program_name);
printf("Options may be one or more of the following:\n"
" --help: Prints these instructions.\n"
" -d <device number>: Sets which GPU to use. Defaults to GPU 0.\n"
" -o <output file name>: If provided, the rendered image will be saved\n"
" to a .pgm file with the given name. Otherwise, saves the image\n"
" to " DEFAULT_OUTPUT_NAME ".\n"
" -m <max escape iterations>: The maximum number of iterations to use\n"
" before giving up on seeing whether a point escapes.\n"
" -c <min escape iterations>: If a point escapes before this number of\n"
" iterations, it will be ignored.\n"
" -g <gamma correction>: A gamma-correction value to use on the\n"
" resulting image. If negative, no gamma correction will occur.\n"
" -t <seconds to run>: A number of seconds to run the calculation for.\n"
" Defaults to 10.0. If negative, the program will run continuously\n"
" and will terminate (saving the image) when it receives a SIGINT.\n"
" -w <width>: The width of the output image, in pixels. Defaults to\n"
" 1000.\n"
" -h <height>: The height of the output image, in pixels. Defaults to\n"
" 1000.\n"
" -s <save/load file>: If provided, this gives a file name into which\n"
" the rendering buffer will be saved, for future continuation.\n"
" If the program is loaded and the file exists, the buffer will be\n"
" filled with the contents of the file, but the dimensions must\n"
" match. Note that this file may be huge for high-resolution images.\n"
"\n"
"The following settings control the location of the output image on the\n"
"complex plane, but samples are always drawn from the entire Mandelbrot-\n"
"set domain (-2-2i to 2+2i). So these settings can be used to save\n"
"memory or \"crop\" the output, but won't otherwise speed up rendering:\n"
" --min-real <min real>: The minimum value along the real axis to\n"
" include in the output image. Defaults to -2.0.\n"
" --max-real <max real>: The maximum value along the real axis to\n"
" include in the output image. Defaults to 2.0.\n"
" --min-imag <min imag>: The minimum value along the imaginary axis to\n"
" include in the output image. Defaults to -2.0.\n"
" --max-imag <max imag>: The maximum value along the imaginary axis to\n"
" include in the output image. Defaults to 2.0.\n"
"");
exit(0);
}
// Returns an integer at the argument after index in argv. Exits if the integer
// is invalid. Takes the index before the expected int value in order to print
// better error messages.
static int ParseIntArg(int argc, char **argv, int index) {
char *tmp = NULL;
int to_return = 0;
if ((index + 1) >= argc) {
printf("Argument %s needs a value.\n", argv[index]);
PrintUsage(argv[0]);
}
to_return = strtol(argv[index + 1], &tmp, 10);
// Make sure that, if tmp is a null character, that the argument wasn't
// simply a string with no content.
if ((*tmp != 0) || (argv[index + 1][0] == 0)) {
printf("Invalid number given to argument %s: %s\n", argv[index],
argv[index + 1]);
PrintUsage(argv[0]);
}
return to_return;
}
// Like ParseIntArg, except expects a floating-point double arg.
static double ParseDoubleArg(int argc, char **argv, int index) {
char *tmp = NULL;
double to_return = 0.0;
if ((index + 1) >= argc) {
printf("Argument %s needs a value.\n", argv[index]);
PrintUsage(argv[0]);
}
to_return = strtod(argv[index + 1], &tmp);
if ((*tmp != 0) || (argv[index + 1][0] == 0)) {
printf("Invalid number given to argument %s: %s\n", argv[index],
argv[index + 1]);
PrintUsage(argv[0]);
}
return to_return;
}
// Processes command-line arguments, setting values in the globals struct as
// necessary.
static void ParseArguments(int argc, char **argv) {
for (int i = 1; i < argc; i++) {
if (strcmp(argv[i], "--help") == 0) {
PrintUsage(argv[0]);
}
if (strcmp(argv[i], "-d") == 0) {
g.cuda_device = ParseIntArg(argc, argv, i);
i++;
continue;
}
if (strcmp(argv[i], "-o") == 0) {
if ((i + 1) >= argc) {
printf("Missing output file name.\n");
PrintUsage(argv[0]);
}
i++;
g.output_image = argv[i];
continue;
}
if (strcmp(argv[i], "-s") == 0) {
if ((i + 1) >= argc) {
printf("Missing in-progress buffer file name.\n");
PrintUsage(argv[0]);
}
i++;
g.inprogress_file = argv[i];
continue;
}
if (strcmp(argv[i], "-m") == 0) {
g.iterations.max_escape_iterations = ParseIntArg(argc, argv, i);
if (g.iterations.max_escape_iterations > 60000) {
printf("Warning: Using a high number of iterations may cause the "
"program respond slowly to Ctrl+C or time running out.\n");
}
i++;
continue;
}
if (strcmp(argv[i], "-c") == 0) {
g.iterations.min_escape_iterations = ParseIntArg(argc, argv, i);
i++;
continue;
}
if (strcmp(argv[i], "-w") == 0) {
g.dimensions.w = ParseIntArg(argc, argv, i);
if (!RecomputePixelDeltas()) PrintUsage(argv[0]);
i++;
continue;
}
if (strcmp(argv[i], "-h") == 0) {
g.dimensions.h = ParseIntArg(argc, argv, i);
if (!RecomputePixelDeltas()) PrintUsage(argv[0]);
i++;
continue;
}
if (strcmp(argv[i], "-g") == 0) {
g.gamma_correction = ParseDoubleArg(argc, argv, i);
i++;
continue;
}
if (strcmp(argv[i], "-t") == 0) {
g.seconds_to_run = ParseDoubleArg(argc, argv, i);
i++;
continue;
}
if (strcmp(argv[i], "--min-real") == 0) {
g.dimensions.min_real = ParseDoubleArg(argc, argv, i);
if (!RecomputePixelDeltas()) PrintUsage(argv[0]);
i++;
continue;
}
if (strcmp(argv[i], "--max-real") == 0) {
g.dimensions.max_real = ParseDoubleArg(argc, argv, i);
if (!RecomputePixelDeltas()) PrintUsage(argv[0]);
i++;
continue;
}
if (strcmp(argv[i], "--min-imag") == 0) {
g.dimensions.min_imag = ParseDoubleArg(argc, argv, i);
if (!RecomputePixelDeltas()) PrintUsage(argv[0]);
i++;
continue;
}
if (strcmp(argv[i], "--max-imag") == 0) {
g.dimensions.max_imag = ParseDoubleArg(argc, argv, i);
if (!RecomputePixelDeltas()) PrintUsage(argv[0]);
i++;
continue;
}
// Unrecognized argument, print the usage string.
printf("Invalid argument: %s\n", argv[i]);
PrintUsage(argv[0]);
}
}
void SignalHandler(int signal_number) {
g.quit_signal_received = 1;
printf("Signal %d received, waiting for current pass to finish...\n",
signal_number);
}
int main(int argc, char **argv) {
memset(&g, 0, sizeof(g));
g.output_image = DEFAULT_OUTPUT_NAME;
g.iterations.max_escape_iterations = 100;
g.iterations.min_escape_iterations = 20;
g.block_size = DEFAULT_BLOCK_SIZE;
g.block_count = DEFAULT_BLOCK_COUNT;
g.seconds_to_run = 10.0;
g.gamma_correction = 1.0;
SetDefaultCanvas();
g.cuda_device = 0;
ParseArguments(argc, argv);
if (signal(SIGINT, SignalHandler) == SIG_ERR) {
printf("Failed setting signal handler.\n");
CleanupGlobals();
return 1;
}
printf("Creating %dx%d image, %d max iterations.\n",
g.dimensions.w, g.dimensions.h, g.iterations.max_escape_iterations);
printf("Calculating image...\n");
SetupCUDA();
LoadInProgressBuffer();
RenderImage();
SaveInProgressBuffer();
printf("Saving image.\n");
SaveImage();
printf("Done! Output image saved: %s\n", g.output_image);
CleanupGlobals();
return 0;
}
|
12,051 | #include "includes.h"
__global__ void stencilConst2(float *src, float *dst, int size, int raio)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx += raio+1;
if (idx >= size)
return;
float out = 0;
#pragma unroll
for(int i = -raio;i < raio; i++)
{
out += src[idx+i] * const_stencilWeight[i+raio];
}
dst[idx] = out;
} |
12,052 | /*****************************************************************************
*
* String Pattern Matching - Serial Implementation
*
* Reference: http://people.maths.ox.ac.uk/~gilesm/cuda/
*
*****************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <ctime>
// Includes CUDA
#include <cuda_runtime.h>
#define LINEWIDTH 20
// citation: https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
#define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void matchPattern_CPU(unsigned int *text, unsigned int *words, int *matches, int nwords, int length)
{
unsigned int word;
for (int l=0; l<length; l++)
{
for (int offset=0; offset<4; offset++)
{
if (offset==0)
word = text[l];
else
word = (text[l]>>(8*offset)) + (text[l+1]<<(32-8*offset));
for (int w=0; w<nwords; w++){
matches[w] += (word==words[w]);
}
}
}
}
__global__ void matchPattern_gpu_1(unsigned int *text, unsigned int *words, int *matches, int nwords, int length)
{
unsigned int word;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < length)
{
for (int offset=0; offset<4; offset++)
{
if (offset==0)
word = text[idx];
else
word = (text[idx]>>(8*offset)) + (text[idx+1]<<(32-8*offset));
for (int w=0; w<nwords; w++){
if (word==words[w]){
atomicAdd(&matches[w],1);
}
}
}
}
}
int main(int argc, const char **argv)
{
int length, len, nwords=5, matches[nwords];
char *ctext, keywords[nwords][LINEWIDTH], *line;
line = (char*) malloc(sizeof(char)*LINEWIDTH);
unsigned int *text, *words;
memset(matches, 0, sizeof(matches));
// read in text and keywords for processing
FILE *fp, *wfile;
wfile = fopen("./data/keywords.txt","r");
if (!wfile)
{ printf("keywords.txt: File not found.\n"); exit(0);}
int k=0, cnt = nwords;
size_t read, linelen = LINEWIDTH;
while((read = getline(&line, &linelen, wfile)) != -1 && cnt--)
{
strncpy(keywords[k], line, sizeof(line));
keywords[k][4] = '\0';
k++;
}
fclose(wfile);
fp = fopen("./data/small.txt","r");
if (!fp)
{ printf("Unable to open the file.\n"); exit(0);}
length = 0;
while (getc(fp) != EOF) length++;
ctext = (char *) malloc(length+4);
rewind(fp);
for (int l=0; l<length; l++) ctext[l] = getc(fp);
for (int l=length; l<length+4; l++) ctext[l] = ' ';
fclose(fp);
printf("Length : %d\n", length );
// define number of words of text, and set pointers
len = length/4;
text = (unsigned int *) ctext;
// define words for matching
words = (unsigned int *) malloc(nwords*sizeof(unsigned int));
for (int w=0; w<nwords; w++)
{
words[w] = ((unsigned int) keywords[w][0])
+ ((unsigned int) keywords[w][1])*(1<<8)
+ ((unsigned int) keywords[w][2])*(1<<16)
+ ((unsigned int) keywords[w][3])*(1<<24);
}
// CPU execution
const clock_t begin_time = clock();
matchPattern_CPU(text, words, matches, nwords, len);
float runTime = (float)( clock() - begin_time ) / CLOCKS_PER_SEC;
printf("Time for matching keywords: %fs\n\n", runTime);
printf("Printing Matches:\n");
printf("Word\t |\tNumber of Matches\n===================================\n");
for (int i = 0; i < nwords; ++i)
printf("%s\t |\t%d\n", keywords[i], matches[i]);
// GPU execution
unsigned int *d_text; unsigned int *d_words; int *d_matches;
int *h_matches;
h_matches = (int *)malloc(nwords*sizeof(int));
cudaMalloc((void**)&d_words, nwords*sizeof(unsigned int));
cudaMalloc((void**)&d_matches, nwords*sizeof(int));
cudaMalloc((void**)&d_text, sizeof(char)*strlen(ctext));
cudaMemcpy(d_text, text, sizeof(char)*strlen(ctext), cudaMemcpyHostToDevice);
cudaMemcpy(d_words, words, nwords*sizeof(unsigned int), cudaMemcpyHostToDevice);
matchPattern_gpu_1<<<len/32,32>>>(d_text, d_words, d_matches, nwords, len);
checkCudaErrors(cudaPeekAtLastError());
checkCudaErrors(cudaMemcpy(h_matches, d_matches, nwords*sizeof(int), cudaMemcpyDeviceToHost));
for(int i = 0; i<nwords; i++) {
if(matches[i] != h_matches[i]) {
printf("WRONG OUTPUT:\t %s\t|\t%d\n", keywords[i], h_matches[i]);
}
}
free(ctext);
free(words);
cudaFree(d_words);
cudaFree(d_matches);
cudaFree(d_text);
}
|
12,053 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define SIZE 8192
#define MEMSIZE 32
__global__ void multiplyTest(double * one, double * two, double * three,
int size) {
__shared__ double s_first[MEMSIZE][MEMSIZE];
__shared__ double s_second[MEMSIZE][MEMSIZE];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * MEMSIZE + ty;
int col = bx * MEMSIZE + tx;
double elementSum = 0;
for(int m = 0; m < size / MEMSIZE; m++) {
s_first[ty][tx] = one[row * size + m * MEMSIZE + tx];
s_second[ty][tx] = two[(m * MEMSIZE + ty) * size + col];
__syncthreads();
for(int k = 0; k < MEMSIZE; k++)
elementSum += s_first[ty][k] * s_second[k][tx];
__syncthreads();
}
three[row * size + col] = elementSum;
}
int main() {
int c;
double *h_first = (double *) malloc(SIZE * SIZE * sizeof(double));
double *h_second = (double *) malloc(SIZE * SIZE * sizeof(double));
double *h_sum = (double *) malloc(SIZE * SIZE * sizeof(double));
long i;
for(i = 0; i < SIZE * SIZE; i++) {
h_first[i] = 2.0;
h_second[i] = 2.0;
h_sum[i] = 0.0;
}
double *d_first = NULL;
double *d_second = NULL;
double *d_sum = NULL;
cudaMalloc((void **) &d_first, SIZE * SIZE * sizeof(double));
cudaMalloc((void **) &d_second, SIZE * SIZE * sizeof(double));
cudaMalloc((void **) &d_sum, SIZE * SIZE * sizeof(double));
cudaMemcpy(d_first, h_first, SIZE * SIZE * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_second, h_second, SIZE * SIZE * sizeof(double), cudaMemcpyHostToDevice);
dim3 dimGrid(SIZE / MEMSIZE, SIZE / MEMSIZE,1);
dim3 dimBlock(MEMSIZE, MEMSIZE, 1);
c = clock();
multiplyTest<<<dimGrid,dimBlock>>>(d_first, d_second, d_sum, SIZE);
cudaThreadSynchronize();
cudaMemcpy(h_sum, d_sum, SIZE * SIZE * sizeof(double), cudaMemcpyDeviceToHost);
printf("Done. Time: %d\n", clock()-c);
return 0;
}
|
12,054 | #include <stdio.h>
#include <math.h>
#include <curand.h>
#include <curand_kernel.h>
// Kernel function to perform C = A*B
__global__
void mat_mult(int n, float *A, float *B, float *C)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int sum = 0;
for (int k = 0; k < n; k++){
sum += A[idy*n + k]*B[k*n + idx];
}
C[idy*n +idx] = sum;
}
int main(void)
{
//N is the size of the matrix
int N = 1<<8;
//block_size is the width and height of cuda block
//total threads per block = block_size*block_size
int block_size = 1<<5;
//grid_size is the width and height of a cuda grid
//total blocks per grid = grid_size*grid_size
int grid_size = N/block_size;
// Allocate Unified Memory – accessible from CPU or GPU
float *A, *B, *C;
cudaMallocManaged(&A, N*N*sizeof(float));
cudaMallocManaged(&B, N*N*sizeof(float));
cudaMallocManaged(&C, N*N*sizeof(float));
// initialize A and B arrays on the host
for (int i = 0; i < N; i++) {
for ( int j = 0; j < N; j++){
if(i == j){
A[i*N +j] = i;
B[i*N +j] = i;
} else {
A[i*N +j] = 0;
B[i*N +j] = 0;
}
}
}
// Run kernel with 2-D grid and 2-D blocks.
dim3 block_dim(block_size, block_size);
dim3 grid_dim(grid_size, grid_size);
mat_mult<<<grid_dim, block_dim>>>(N, A, B, C);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (diagonal should be squares)
int failure = 0;
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
if(i == j && C[i*N + j] != (float)i*i){
failure = 1;
} else if (i !=j && C[i*N + j] != 0.0){
failure = 1;
}
}
}
//Helpful for printing out a matrix/debugging :^)
/*for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
printf("%f,",A[i*N +j]);
}
printf("\n");
}*/
if(failure){
printf("There was a failure, big sad!\n");
} else {
printf("Tests Pass!\n");
}
// Free memory
cudaFree(A);
cudaFree(B);
cudaFree(C);
return 0;
}
|
12,055 | #include "includes.h"
#define N 1024*4
// Device Kernel
//host Function
__global__ void amean(float *A, float *S)
{
//holds intermediates in shared memory reduction
__shared__ int sdata[N];
int tid=threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid]=A[i];
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if(tid==0)
S[blockIdx.x]=sdata[0];
} |
12,056 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#include <cuda.h>
/* Problem size. */
#define NX 4096
#define NY 4096
#ifndef M_PI
#define M_PI 3.14159
#endif
void init_array(double *x, double *A)
{
int i, j;
for (i = 0; i < NX; i++) {
x[i] = i * M_PI;
for (j = 0; j < NY; j++) {
A[i*NY + j] = ((double) i*(j)) / NX;
}
}
}
__global__ void kernel1(int nx, int ny, double *A, double *x, double *tmp)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < nx)
{
tmp[i] = 0;
int j;
for(j=0; j < ny; j++)
{
tmp[i] =tmp[i] + A[i*ny+j] * x[j];
}
}
}
__global__ void kernel2(int nx, int ny, double *A, double *y, double *tmp)
{
unsigned int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < ny)
{
y[j] = 0;
int i;
for(i=0; i < nx; i++)
{
y[j] = y[j] + A[i*ny+j] * tmp[i];
}
}
}
void trans_norm_vector(double* A, double* x, double* y, double* tmp)
{
int i,j;
for (i= 0; i < NY; i++) {
y[i] = 0;
}
for (i = 0; i < NX; i++) {
tmp[i] = 0;
for (j = 0; j < NY; j++) {
tmp[i] = tmp[i] + A[i*NY + j] * x[j];
}
for (j = 0; j < NY; j++) {
y[j] = y[j] + A[i*NY + j] * tmp[i];
}
}
}
int main(int argc, char *argv[])
{
double *A;
double *x;
double *y;
double *tmp;
double *gpuRef;
struct timeval cpu_start, cpu_end;
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("Device properties %d: %s\n", dev, deviceProp.name);
cudaSetDevice(dev);
// set up data size of matrix
int nx = NX;
int ny = NY;
int nxy = nx * ny;
size_t nBytes = nxy * sizeof(double);
printf("Matrix size: nx %d ny %d\n", nx, ny);
// malloc host memory
A = (double*)malloc(nBytes*sizeof(double));
x = (double*)malloc(ny*sizeof(double));
y = (double*)malloc(ny*sizeof(double));
tmp = (double*)malloc(nx*sizeof(double));
gpuRef = (double*)malloc(nx*sizeof(double));
// initialize data at host side
init_array(x, A);
gettimeofday(&cpu_start, NULL);
trans_norm_vector(A, x, y, tmp);
gettimeofday(&cpu_end, NULL);
fprintf(stdout, "CPU trans_norm_vector Runtime :%0.6lfs\n", ((cpu_end.tv_sec - cpu_start.tv_sec) * 1000000.0 + (cpu_end.tv_usec - cpu_start.tv_usec)) / 1000000.0);
double *gpuA,*gpuX,*gpuY,*gpuTmp;
// malloc device global memory
cudaMalloc((void **)&gpuA, sizeof(double)*nBytes);
cudaMalloc((void **)&gpuX, sizeof(double)*ny);
cudaMalloc((void **)&gpuY, sizeof(double)*ny);
cudaMalloc((void **)&gpuTmp, sizeof(double)*nx);
// transfer data from host to device
cudaMemcpy(gpuA, A,nBytes*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(gpuX, x,ny*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(gpuY, y,ny*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(gpuTmp,tmp,nx*sizeof(double), cudaMemcpyHostToDevice);
// invoke kernel at host side
int dimx = 1024;
int dimy = 2;
dim3 block(dimx, dimy);
//dim3 grid ((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
//dim3 grid4 ((nx + block.x - 1) / (block.x * 4), (ny + block.y - 1) /
// (block.y * 4));
dim3 grid1(nx/ block.x, 1);
dim3 grid2(ny/ block.x, 1);
gettimeofday(&cpu_start, NULL);
kernel1<<< grid1, block >>>(nx, ny,gpuA,gpuX,gpuTmp);
cudaDeviceSynchronize();
kernel2<<< grid2, block >>>(nx, ny,gpuA,gpuY,gpuTmp);
cudaDeviceSynchronize();
gettimeofday(&cpu_end, NULL);
fprintf(stdout, "GPU trans_norm_vector Runtime :%0.6lfs\n", ((cpu_end.tv_sec - cpu_start.tv_sec) * 1000000.0 + (cpu_end.tv_usec - cpu_start.tv_usec)) / 1000000.0);
// check kernel error
cudaGetLastError();
cudaMemcpy(gpuRef,gpuY, sizeof(double)*nx, cudaMemcpyDeviceToHost);
// free device global memory
cudaFree(gpuA);
cudaFree(gpuX);
cudaFree(gpuX);
cudaFree(gpuTmp);
// free host memory
free(A);
free(x);
free(y);
free(tmp);
// reset device
cudaDeviceReset();
return (0);
}
|
12,057 | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void heat (double * __restrict__ in, double * __restrict__ out1, double * __restrict__ out2, double * __restrict__ out3, double * __restrict__ out, int L, int M, int N) {
//Determining the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i-8);
int i = i0 + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j-8);
int j = j0 + (int)(threadIdx.y);
//Declarations
double reg_in_m1=0, __shared__ sh_in_c0[32][32], reg_in_p1=0;
double reg_out1_m2=0, __shared__ sh_out1_m1[32][32], reg_out1_c0=0;
double reg_out2_m3=0, __shared__ sh_out2_m2[32][32], reg_out2_m1=0;
double reg_out3_m4=0, __shared__ sh_out3_m3[32][32], reg_out3_m2=0;
//Value Initialization
if (j <= min (j0+blockdim_j-1, M-1) & i <= min (i0+blockdim_i-1, N-1)) {
reg_in_m1 = in[0 + j*N + i];
sh_in_c0[j-j0][i-i0] = in[1*M*N + j*N + i];
}
//Rest of the computation
#pragma unroll 2
for (int k=1; k<=L-2; ++k) {
//Fetch new plane
if (j <= min (j0+blockdim_j-1, M-1) & i <= min (i0+blockdim_i-1, N-1)) {
reg_in_p1 = in[(k+1)*M*N + j*N + i];
}
__syncthreads ();
if (j >= j0+1 & j <= min (j0+blockdim_j-2, M-2) & i >= i0+1 & i <= min (i0+blockdim_i-2, N-2)) {
reg_out1_c0 = ((((0.125f * ((reg_in_p1 - (2.0f * sh_in_c0[j-j0][i-i0])) + reg_in_m1)) + (0.125f * ((sh_in_c0[j-j0+1][i-i0] - (2.0f * sh_in_c0[j-j0][i-i0])) + sh_in_c0[j-j0-1][i-i0]))) + (0.125f * ((sh_in_c0[j-j0][i-i0+1] - (2.0f * sh_in_c0[j-j0][i-i0])) + sh_in_c0[j-j0][i-i0-1]))) + sh_in_c0[j-j0][i-i0]);
}
__syncthreads ();
if (j >= j0+2 & j <= min (j0+blockdim_j-3, M-2) & i >= i0+2 & i <= min (i0+blockdim_i-3, N-2)) {
reg_out2_m1 = ((((0.125f * ((reg_out1_c0 - (2.0f * sh_out1_m1[j-j0][i-i0])) + reg_out1_m2)) + (0.125f * ((sh_out1_m1[j-j0+1][i-i0] - (2.0f * sh_out1_m1[j-j0][i-i0])) + sh_out1_m1[j-j0-1][i-i0]))) + (0.125f * ((sh_out1_m1[j-j0][i-i0+1] - (2.0f * sh_out1_m1[j-j0][i-i0])) + sh_out1_m1[j-j0][i-i0-1]))) + sh_out1_m1[j-j0][i-i0]);
}
__syncthreads ();
if (j >= j0+3 & j <= min (j0+blockdim_j-4, M-2) & i >= i0+3 & i <= min (i0+blockdim_i-4, N-2)) {
reg_out3_m2 = ((((0.125f * ((reg_out2_m1 - (2.0f * sh_out2_m2[j-j0][i-i0])) + reg_out2_m3)) + (0.125f * ((sh_out2_m2[j-j0+1][i-i0] - (2.0f * sh_out2_m2[j-j0][i-i0])) + sh_out2_m2[j-j0-1][i-i0]))) + (0.125f * ((sh_out2_m2[j-j0][i-i0+1] - (2.0f * sh_out2_m2[j-j0][i-i0])) + sh_out2_m2[j-j0][i-i0-1]))) + sh_out2_m2[j-j0][i-i0]);
}
__syncthreads ();
if (j >= j0+4 & j <= min (j0+blockdim_j-5, M-2) & i >= i0+4 & i <= min (i0+blockdim_i-5, N-2)) {
out[max(k-3,0)*M*N + j*N + i] = ((((0.125f * ((reg_out3_m2 - (2.0f * sh_out3_m3[j-j0][i-i0])) + reg_out3_m4)) + (0.125f * ((sh_out3_m3[j-j0+1][i-i0] - (2.0f * sh_out3_m3[j-j0][i-i0])) + sh_out3_m3[j-j0-1][i-i0]))) + (0.125f * ((sh_out3_m3[j-j0][i-i0+1] - (2.0f * sh_out3_m3[j-j0][i-i0])) + sh_out3_m3[j-j0][i-i0-1]))) + sh_out3_m3[j-j0][i-i0]);
}
__syncthreads ();
//Value rotation
reg_in_m1 = sh_in_c0[j-j0][i-i0];
sh_in_c0[j-j0][i-i0] = reg_in_p1;
reg_out1_m2 = sh_out1_m1[j-j0][i-i0];
sh_out1_m1[j-j0][i-i0] = reg_out1_c0;
reg_out2_m3 = sh_out2_m2[j-j0][i-i0];
sh_out2_m2[j-j0][i-i0] = reg_out2_m1;
reg_out3_m4 = sh_out3_m3[j-j0][i-i0];
sh_out3_m3[j-j0][i-i0] = reg_out3_m2;
}
}
extern "C" void host_code (double *h_in, double *h_out, int L, int M, int N) {
double *in;
cudaMalloc (&in, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for in\n");
cudaMemcpy (in, h_in, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *out;
cudaMalloc (&out, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for out\n");
double *out1;
cudaMalloc (&out1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for out1\n");
double *out2;
cudaMalloc (&out2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for out2\n");
double *out3;
cudaMalloc (&out3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for out3\n");
dim3 blockconfig_1 (32, 32, 1);
dim3 gridconfig_1 (ceil(N, blockconfig_1.x-8), ceil(M, blockconfig_1.y-8), 1);
heat <<<gridconfig_1, blockconfig_1>>> (in, out1, out2, out3, out, L, M, N);
cudaMemcpy (h_out, out, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
//Free allocated memory
cudaFree (in);
cudaFree (out);
cudaFree (out1);
cudaFree (out2);
cudaFree (out3);
}
|
12,058 | #include <stdio.h>
void cpu_saxpy(int n, float a, float*x, float *y)
{
for(int i = 0; i < n; i++){
y[i] = a*x[i] + y[i];
}
}
int main(void)
{
int N = 1<<20;
float *x, *y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Perform SAXPY on 1M elements
cpu_saxpy(N, 2.0f, x, y);
free(x);
free(y);
} |
12,059 | #include "includes.h"
__global__ static void kernelCalcSum_AtomicOnly(const int* dataArray, int arraySize, int* sum)
{
int arrayIndex = (int)(blockDim.x * blockIdx.x + threadIdx.x);
if (arrayIndex < arraySize)
{
atomicAdd(sum, dataArray[arrayIndex]);
}
} |
12,060 | #include "includes.h"
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N) {
//int i = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x; //general case
if (i < N) C[i] = B[i] + A[i];
} |
12,061 | #include <stdio.h>
#include <stdlib.h>
/*
* See section "B. 19 Launch Bounds" from "CUDA C Programming Guide" for more
* information about the optimal launch bounds, which differ across the major
* architecture revisions
*/
#define THREADS_PER_BLOCK 256
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char* msg);
// Host function that finds a specify key in array
void find_key_cpu(const int* array_in, int n, int key);
// Variable used as a parameter of atomic function
// It resides in global memory
__device__ unsigned int first_occur;
// Variable used to know how many thread blocks have finished their tasks
// It resides in global memory
__device__ unsigned int count = 0;
// Variable used to control when all blocks have finished their tasks
// It resides in shared memory
__shared__ bool all_blocks_finished;
// Kernel code
__global__ void find_key_gpu(const int* array_in, int n, int key) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Only one thread of the whole device initializes the variable
// and make sure sure its result is visible to all other threads
if (idx == 0) {
first_occur = n;
__threadfence();
}
// Avoid wrong accesses to array_in if there are more threads than elements
if (idx < n) {
int found = (array_in[idx] == key) ? idx : n;
atomicMin(&first_occur, found);
// Thread 0 of each block signals that its work has been finisehd
if (threadIdx.x == 0) {
unsigned int value = atomicInc(&count, gridDim.x);
all_blocks_finished = (value == (gridDim.x - 1));
}
// Synchronize to make sure that each thread
// reads the correct value of all_blocks_finished
__syncthreads();
if (all_blocks_finished) {
if (threadIdx.x == 0) {
// Thread 0 of last block is responsible to print the final result
// Only one thread in the whole device
if (first_occur < n) {
printf("The first ocurrence of key (%d) has been found at position %d\n"
, key, first_occur);
}
else {
printf("The key (%d) is not found\n", key);
}
}
}
}
}
// Kernel code
__global__ void find_key_gpu_fast(const int* array_in, int n, int key) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Avoid wrong accesses to array_in if there are more threads than elements
if (idx < n) {
if (idx == 0) {
first_occur = n;
__threadfence();
}
if (array_in[idx] == key) {
atomicMin(&first_occur, idx);
}
}
}
int main(int argc, char** argv) {
if (argc != 3) {
fprintf(stderr, "Usage: %s size key\n", argv[0]);
fprintf(stderr, " size is the number of input array's elements\n");
fprintf(stderr, " key is the number which we want to find\n");
return EXIT_FAILURE;
}
cudaEvent_t start, stop;
float elapsed_time_ms;
int key = atoi(argv[2]);
// Pointer for host memory and size
int *h_array;
unsigned int num_elems = atoi(argv[1]);
size_t array_size = num_elems * sizeof(int);
// pointer for device memory
int *dev_array_in;
// allocate host and device memory
h_array = (int *) malloc(array_size);
cudaMalloc(&dev_array_in, array_size);
// Check for any CUDA errors
checkCUDAError("cudaMalloc");
/*
* Also host memory allocation can be done using cudaMallocHost
* cudaMallocHost(&h_array, array_size);
*
* Or also cudaHostAlloc
* cudaHostAlloc(&h_array, array_size, cudaHostAllocDefault);
*/
// Initialize host memory
for (unsigned int i = 0; i < num_elems; ++i) {
h_array[i] = i;
}
cudaEventCreate(&start);
cudaEventCreate(&stop);
/*------------------------ COMPUTATION ON CPU ----------------------------*/
cudaEventRecord(start, 0);
// cudaEventSynchronize(start); needed?
find_key_cpu(h_array, num_elems, key);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time_ms, start, stop);
printf("Time to find key on CPU: %f ms.\n\n", elapsed_time_ms);
/*--------------- COMPUTATION ON GPU (find_key() kernel) -----------------*/
// Host to device memory copy
cudaMemcpy(dev_array_in, h_array, array_size, cudaMemcpyHostToDevice);
// Check for any CUDA errors
checkCUDAError("cudaMemcpy");
// Set grid and block dimensions properly
// num_elems + (TREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK is equal to call
// ceil(num_elems/THREADS_PER_BLOCK) function from C Math Library
int blocks_per_grid = (int) (num_elems + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
cudaEventRecord(start, 0);
// cudaEventSynchronize(start); needed?
// Launch kernel
find_key_gpu<<<blocks_per_grid, THREADS_PER_BLOCK>>>(dev_array_in, num_elems, key);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time_ms, start, stop);
// Check for any CUDA errors
checkCUDAError("kernel invocation");
printf("Time to find key on GPU: %f ms.\n\n", elapsed_time_ms);
// Block until the device has completed their tasks
//cudaDeviceSynchronize();
/*-------------- COMPUTATION ON GPU (find_key_fast() kernel) -------------*/
cudaEventRecord(start, 0);
// cudaEventSynchronize(start); needed?
// Launch kernel
find_key_gpu_fast<<<blocks_per_grid, THREADS_PER_BLOCK>>>(dev_array_in, num_elems, key);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time_ms, start, stop);
// Check for any CUDA errors
checkCUDAError("kernel invocation");
// Get the final result from global memory (on device)
// and copy it to h_first_occur pointer (host memory)
unsigned int h_first_occur;
cudaMemcpyFromSymbol(&h_first_occur, first_occur, sizeof(unsigned int), 0, cudaMemcpyDeviceToHost);
// Check for any CUDA errors
checkCUDAError("cudaMemcpyFromSymbol");
if (h_first_occur < num_elems) {
printf("The first ocurrence of key (%d) has been found at position %d\n"
, key, h_first_occur);
}
else {
printf("The key (%d) is not found\n", key);
}
printf("Time to find key on GPU (fast kernel version): %f ms.\n\n", elapsed_time_ms);
// Block until the device has completed their tasks
//cudaDeviceSynchronize();
// Free device and host memory
cudaFree(dev_array_in);
free(h_array);
// Check for any CUDA errors
checkCUDAError("cudaFree");
cudaEventDestroy(start);
cudaEventDestroy(stop);
return EXIT_SUCCESS;
}
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void find_key_cpu(const int* array_in, int n, int key) {
bool found = false;
for (unsigned int i = 0; !found && i < n; ++i) {
if (array_in[i] == key) {
found = true;
printf("The first ocurrence of key (%d) has been found at position %d\n"
, key, i);
}
}
if (!found) {
printf("The key (%d) is not found\n", key);
}
}
|
12,062 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void HelloWorld()
{
printf("Hello world, %d, %d\n", blockIdx.x,
threadIdx.x);
}
int example1()
{
HelloWorld << <4, 4 >> > ();
//
getchar();
return 0;
} |
12,063 | // If You want to see MAIN.cu >> cudaOpenMP ---> go folder "src" on this repo
// Have a nice day :)
// by SamoX
|
12,064 | #include <iostream>
#include <cuda.h>
#define HANDLE_ERROR(x) checkCudaError(x, __LINE__)
void checkCudaError(cudaError_t msg, int x)
{
if (msg != cudaSuccess) {
fprintf(stderr, "line: %d %s\n", x, cudaGetErrorString(msg));
exit(1);
}
return;
}
int main()
{
cudaDeviceProp prop;
int count;
HANDLE_ERROR(cudaGetDeviceCount(&count));
for (int i = 0; i < count; i++)
{
HANDLE_ERROR(cudaGetDeviceProperties(&prop, i));
std::cout << " --- General Information for device " << i << " ---" << std::endl;
std::cout << "Name: " << prop.name << std::endl;
std::cout << "Compute Capability: " << prop.major << "." << prop.minor << std::endl;
std::cout << "Clock rate: " << prop.clockRate << std::endl;
std::cout << "Device copy overlap: ";
if (prop.deviceOverlap)
std::cout << "Enabled" << std::endl;
else
std::cout << "Disabled" << std::endl;
std::cout << "Kernel execition timeout: ";
if (prop.kernelExecTimeoutEnabled)
std::cout << "Enabled" << std::endl;
else
std::cout << "Disabled" << std::endl;
std::cout << "GPU integrated: " << prop.integrated << std::endl;
std::cout << std::endl;
std::cout << " --- Memory Information for device " << i << " ---"<< std::endl;
std::cout << "Total global mem: " << prop.totalGlobalMem << std::endl;
std::cout << "Total constant mem: " << prop.totalConstMem << std::endl;
std::cout << "Max mem pitch: " << prop.memPitch << std::endl;
std::cout << "Texture Alignment: " << prop.textureAlignment << std::endl;
std::cout << std::endl;
std::cout << " --- MP Information for device " << i << " ---" << std::endl;
std::cout << "Multiprocessor count: " << prop.multiProcessorCount << std::endl;
std::cout << "Shared mem per mp: " << prop.sharedMemPerBlock << std::endl;
std::cout << "Registers per mp: " << prop.regsPerBlock << std::endl;
std::cout << "Threads in warp: " << prop.warpSize << std::endl;
std::cout << "Max threads per block: " << prop.maxThreadsPerBlock << std::endl;
std::cout << "Max thread dimensions: (" << prop.maxThreadsDim[0] << ", "
<< prop.maxThreadsDim[1] << ", " << prop.maxThreadsDim[2] << ")" << std::endl;
std::cout << "Max grid dimensions: (" << prop.maxGridSize[0] << ", "
<< prop.maxGridSize[1] << ", " << prop.maxGridSize[2] << ")" << std::endl;
std::cout << std::endl;
std::cout << " ---------------------------------- " << std::endl << std::endl;
}
return 0;
} |
12,065 | #include <algorithm>
#include <cassert>
#include <iostream>
#include <vector>
__global__ void add_array(const int *__restrict a, const int *__restrict b,
int *__restrict c, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < N) c[i] = a[i] + b[i];
}
void init_array(std::vector<int> &arr, int m) {
for (int i = 0; i < m; i++)
arr.push_back(rand() % 100);
}
void verify_result(std::vector<int> &a, std::vector<int> &b, std::vector<int> &c) {
for (int i = 0; i < a.size(); i++)
assert(c[i] == a[i] + b[i]);
}
int main() {
constexpr int N = 1 << 16; //2^16 = 65536 elements
constexpr size_t bytes = sizeof(int) * N;
std::vector<int> a; a.reserve(N);
std::vector<int> b; b.reserve(N);
std::vector<int> c; c.reserve(N);
init_array(a, N); init_array(b, N);
int *d_a, *d_b, *d_c;
cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes);
cudaMemcpy(d_a, a.data(), bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b.data(), bytes, cudaMemcpyHostToDevice);
int n_threads = 1 << 10; //2^10 = 1024 threads
int n_blocks = (N + n_threads - 1) / n_threads;
add_array<<<n_blocks, n_threads>>>(d_a, d_b, d_c, N);
cudaMemcpy(c.data(), d_c, bytes, cudaMemcpyDeviceToHost);
verify_result(a, b, c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
std::cout << "COMPLETED SUCCESSFULLY! \n";
int out = a[0] + b[0];
printf("for example a[0] + b[0] = %d and c[0] = %d; \n", out, c[0]);
return 0;
}
|
12,066 | #include "includes.h"
__global__ void If(bool * xb, float * xf, size_t idxf, size_t idxb, size_t N)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
{
/* if (xb[idx-1]) */
/* xf[idx-1] = xf[idx-1]; */
/* else */
/* out[i] = 0; */
if (!xb[(idxb-1)*N+i])
xf[(idxf-1)*N+i] = 0;
}
return;
} |
12,067 | #include <stdio.h>
#include <cuda.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void hello_cuda() {
printf("hello from the GPU\n");
}
int main( int argc, char* argv[] )
{
// Execute the kernel
hello_cuda<<<1, 1>>>();
cudaDeviceSynchronize();
return 0;
}
|
12,068 | #include "includes.h"
__global__ void shmem_reduce_kernel(float * d_out, const float * d_in)
{
// sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ float sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// load shared mem from global mem
sdata[tid] = d_in[myId];
__syncthreads(); // make sure entire block is loaded!
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = sdata[0];
}
} |
12,069 | #include <stdio.h>
#define N 10000000
__global__ void cuda_hello(){
printf("Hello World from GPU!\n");
}
//out = a + b, n is length
void vector_add(float *out, float *a, float *b, int n){
for(int i = 0; i < n; i++){
out[i] = a[i] + b[i];
}
}
int main(){
float *a, *b, *out;
a = (float*)malloc(sizeof(float) * N);
b = (float*)malloc(sizeof(float) * N);
out = (float*)malloc(sizeof(float) * N);
for(int i = 0; i < N; i++){
a[i] = 1.0f;
b[i] = 2.0f;
}
vector_add(out, a, b, N);
for(int i = 0; i < 10; i++)
{
printf("%f ",out[i]);
}
printf("\n");
cuda_hello<<<1,1>>>();
return 0;
} |
12,070 | //nvcc -o lab5_2_2 lab5_2_2.cu
/*Author:
Pedro Silva
*/
/*2. Implemente um programa em CUDA que calcule a soma de todos os elementos de um vetor de
tamanho N. Teste para vários valores de N.*/
/*2.2. Implemente uma nova versão otimizada baseada em memória partilhada.*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
__global__ void vectorsum2_2(int * d_buffer, int N){
//THREAD ID
int index = threadIdx.x + blockIdx.x * blockDim.x;
/*Temos N elementos no h_buffer. Vamos partir a soma de todos os elementos como a soma de um elemento com o elemento a sua direita
Repetir até so termos um elemento (a cada iteração, temos metade dos elementos).*/
//Comecamos por meter tudo em shared memory
__shared__ int shared_mem[32]; //32 = Block size
if(index < N){
shared_mem[threadIdx.x] = d_buffer[index];
__syncthreads();
}
/*int threads_per_block = 16;
int distance = 16;
int primeiro, segundo;*/
/*//Este ciclo executa enquanto tivermos mais que uma thread e so se a thread ainda estiver no "scope" da soma.
while(threads_per_block > 1 && threadIdx.x < threads_per_block && index < N / 2){
//Somar par de elementos em shared memory
primeiro = threadIdx.x;
segundo = threadIdx.x + distance;
shared_mem[primeiro] = shared_mem[primeiro] + shared_mem[segundo];
threads_per_block = threads_per_block / 2;
distance = distance / 2;
//garantir que todas as threads fizeram a sua soma
__syncthreads();
}*/
//Estou a ter problemas em fazer a reducao, pelo que vou fazer algo mais simples. A thread 0 de cada bloco faz a soma de 32 elementos
if(threadIdx.x == 0){
for(int i = 1; i < blockDim.x && (index + i) < N; i++){
shared_mem[0] += shared_mem[i];
// printf("shared_mem[0] = %i. i = %i. .blockIdx.x = %i.\n", shared_mem[0], i, blockIdx.x);
}
}
//A primeira thread de cada bloco deve agora meter o resultado da soma do seu bloco no device buffer de acordo com o indice do seu bloco
if(threadIdx.x == 0){
d_buffer[blockIdx.x] = shared_mem[0];
//printf("A thread do bloco %i escreveu %i.\n", blockIdx.x, shared_mem[0]);
}
}
int main(){
printf("Exercicio 2, Lab 5 de CHAD. Soma de todos os elementos de um vector de tamanho N.\nN comeca a 8 (2^3)e duplica até 4096 (2^10).\n");
int *d_buffer, *result, *h_buffer;
int error;
struct timespec start, end;
double startTime, endTime;
for( int N = 256; N <= 4096; N = N*2){
printf("--------------------------------------------------------------------------\n");
printf("Soma de um vector com %i elementos.\n", N);
clock_gettime(CLOCK_MONOTONIC, &start);
//alocar memoria no device
if(cudaMalloc(&d_buffer, sizeof(int) * N) != cudaSuccess){
fprintf(stderr, "Error allocating memory on device.\n");
return(-1);
}
//alocar memoria no host para h_buffer
h_buffer = (int*) malloc(N * sizeof(int));
for(int i = 0; i < N; i++)
h_buffer[i] = 1;
//alocar memoria no host para receber o resultado de cada bloco
result = (int*) malloc(N * sizeof(int));
//Transferir dados do host para device (vector a somar)
if((error = cudaMemcpy(d_buffer, h_buffer, N * sizeof(int), cudaMemcpyHostToDevice)) != cudaSuccess)
fprintf(stderr, "Erro a transferir vector para GPU, de dimensao %i. Error = %i.\n", N, error);
//Inicializar block e grid size
dim3 BlockSize(32, 1, 1); //Comecar simples: Blocos de tamanho fixo
dim3 GridSize(N/32 + 1, 1, 1);
printf("Gridsize: (%i, %i, %i).\n",GridSize.x, GridSize.y, GridSize.z);
vectorsum2_2<<<GridSize, BlockSize>>>(d_buffer, N);
//Vamos buscar o resultado da soma ao primeiro elemento do d_buffe
if((error = cudaMemcpy(result, d_buffer, N * sizeof(int), cudaMemcpyDeviceToHost)) != cudaSuccess)
fprintf(stderr, "Erro a transferir vector do device para host. Error code: %i.\n", error);
if((error = cudaFree(d_buffer)) != cudaSuccess)
printf("Erro a libertar memoria no device. Error code: %i.\n", error);
//Temos agora de somar os resultados de cada bloco
for(int i = 1; i < N / 32 + 1; i ++)
result[0] += result[i];
printf("Resultado: %i.\n", result[0]);
clock_gettime(CLOCK_MONOTONIC, &end);
startTime = (start.tv_sec * 1e3) + (start.tv_nsec * 1e-6);
endTime = (end.tv_sec * 1e3) + (end.tv_nsec * 1e-6);
printf("Tempo de execução do GPU kernel: %fms.\n", endTime - startTime);
if((error = cudaFree(d_buffer)) != cudaSuccess)
printf("Erro a libertar memoria no device para vector. Error code: %i.\n", error);
free(h_buffer);
free(result);
}
return 0;
} |
12,071 | #include "includes.h"
#define H 5
#define W 5
using namespace std;
__global__ void mult_dist(int *d_A, int *d_B,int *d_C){
int i = blockIdx.y*blockDim.y+threadIdx.y;//todos los valores fila
int j = blockIdx.x*blockDim.x+threadIdx.x;//todos los valores columna
if(i < H && j < W){
int Pvalue = 0;
for(int k=0; k<H; k++){
Pvalue += d_A[i*W+k] * d_B[k*W+j];
}
d_C[i*W+j] = Pvalue;
}
} |
12,072 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <cstdlib>
#include "Image.cuh"
using namespace std;
int readImage(char*, Image&);
int writeImage(char*, Image&);
int readImageHeader(char fname[], int& N, int& M, int& Q, bool& type);
int main(int argc, char* args[]) {
int scale = 1;
if (argc < 3) {
std::cerr << "USAGE: " << args[0] << " INPUT_FILE OUTPUT_FILE [ENLARGE_SIZE]" << std::endl;
return 1;
}
if (argc > 3) {
scale = atoi(args[3]);
}
int N, M, Q;
bool type;
readImageHeader(args[1], N, M, Q, type);
Image img(N, M, Q);
cout << "Reading: " << args[1] << endl;
readImage(args[1], img);
if (scale > 1) {
cout << "Scaling by a factor of:" << scale << endl;
img.enlargeImage(scale, img);
}
cout << "Negating image" << endl;
img.negateImage(img);
cout << "Reflecting image" << endl;
img.reflectImage(false, img);
cout << "Writing: " << args[2] << endl;
writeImage(args[2], img);
return 0;
}
int readImageHeader(char fname[], int& N, int& M, int& Q, bool& type)
{
int i, j;
unsigned char *charImage;
char header[100], *ptr;
ifstream ifp;
ifp.open(fname, ios::in | ios::binary);
if (!ifp)
{
cout << "Can't read image: " << fname << endl;
exit(1);
}
// read header
type = false; // PGM
ifp.getline(header, 100, '\n');
if ((header[0] == 80) && (header[1] == 53))
{
type = false;
}
else if ((header[0] == 80) && (header[1] == 54))
{
type = true;
}
else
{
cout << "Image " << fname << " is not PGM or PPM" << endl;
exit(1);
}
ifp.getline(header, 100, '\n');
while (header[0] == '#')
ifp.getline(header, 100, '\n');
M = strtol(header, &ptr, 0);
N = atoi(ptr);
ifp.getline(header, 100, '\n');
Q = strtol(header, &ptr, 0);
ifp.close();
return(1);
}
int readImage(char fname[], Image& image)
{
int i, j;
int N, M, Q;
unsigned char *charImage;
int* intImage;
char header[100], *ptr;
ifstream ifp;
ifp.open(fname, ios::in | ios::binary);
if (!ifp)
{
cout << "Can't read image: " << fname << endl;
exit(1);
}
// read header
ifp.getline(header, 100, '\n');
if ((header[0] != 80) || (header[1] != 53))
{
cout << "Image " << fname << " is not PGM" << endl;
exit(1);
}
ifp.getline(header, 100, '\n');
while (header[0] == '#')
ifp.getline(header, 100, '\n');
M = strtol(header, &ptr, 0);
N = atoi(ptr);
ifp.getline(header, 100, '\n');
Q = strtol(header, &ptr, 0);
charImage = (unsigned char *) new unsigned char[M*N];
intImage = new int[M * N];
ifp.read(reinterpret_cast<char *>(charImage), (M*N) * sizeof(unsigned char));
if (ifp.fail())
{
cout << "Image " << fname << " has wrong size" << endl;
exit(1);
}
ifp.close();
//
// Convert the unsigned characters to integers
//
int idx;
for (i = 0; i<N; i++)
for (j = 0; j<M; j++)
{
idx = i*M + j;
intImage[idx] = (int)charImage[idx];
}
image.setPixels(0,0,M * N, intImage);
delete[] charImage;
delete[] intImage;
return (1);
}
int writeImage(char fname[], Image& image)
{
int i, j;
int N, M, Q;
unsigned char *charImage;
int* intImage;
ofstream ofp;
image.getImageInfo(N, M, Q);
charImage = (unsigned char *) new unsigned char[M*N];
intImage = new int[M * N];
image.getPixels(0,0,M * N, intImage); // get rgb pixels
// convert the integer values to unsigned char
int idx;
for (i = 0; i<N; i++)
{
for (j = 0; j<M; j++)
{
idx = i*M + j;
charImage[idx] = (unsigned char)intImage[idx];
}
}
ofp.open(fname, ios::out | ios::binary);
if (!ofp)
{
cout << "Can't open file: " << fname << endl;
exit(1);
}
ofp << "P5" << endl;
ofp << M << " " << N << endl;
ofp << Q << endl;
ofp.write(reinterpret_cast<char *>(charImage), (M*N) * sizeof(unsigned char));
if (ofp.fail())
{
cout << "Can't write image " << fname << endl;
exit(0);
}
ofp.close();
delete[] charImage;
delete[] intImage;
return(1);
} |
12,073 | #include<stdlib.h>
#include<stdio.h>
#include<time.h>
__global__ void scan_shared(float *d_in,float *d_out,const int size){
extern __shared__ float s_in[];
int idx = threadIdx.x;
s_in[idx] = d_in[idx];
__syncthreads();
float out;
for(int step=1;step<size;step*=2){
if(idx-step>=0){
out = s_in[idx]+s_in[idx-step];
}
__syncthreads();
if(idx-step>=0){
s_in[idx] = out;
}
__syncthreads();
}
d_out[idx] = s_in[idx];
}
void init(float *p,const int size){
for(int i=0;i<size;i++)
p[i] = i;
}
int main(){
int size = 1024;
float *h_in,*h_out;
float *d_in,*d_out;
h_in = (float *)malloc(size*sizeof(float));
h_out = (float *)malloc(size*sizeof(float));
init(h_in,size);
printf("array:");
for(int i=0;i<size;i++)
printf("%f ",h_in[i]);
printf("\n");
cudaMalloc((float **)&d_in,size*sizeof(float));
cudaMalloc((float **)&d_out,size*sizeof(float));
cudaMemcpy(d_in,h_in,size*sizeof(float),cudaMemcpyHostToDevice);
time_t t_start = clock();
scan_shared<<<1,size,size*sizeof(float)>>>(d_in,d_out,size);
time_t t_end = clock();
cudaMemcpy(h_out,d_out,size*sizeof(float),cudaMemcpyDeviceToHost);
printf("time:%fms\n",difftime(t_end,t_start));
printf("result:");
for(int i=0;i<size;i++)
printf("%f ",h_out[i]);
printf("\n");
free(h_in);
free(h_out);
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
12,074 | #include <iostream>
#include <fstream>
#include <vector>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/remove.h>
#include <thrust/random.h>
struct isnot_lowercase_alpha : thrust::unary_function<bool, unsigned char>{
__host__ __device__
bool operator()(const unsigned char &c) {
return c < 97 || c > 122;
}
};
struct upper_to_lower : thrust::unary_function<unsigned char, unsigned char>{
__host__ __device__
unsigned char operator()(const unsigned char &c) {
if (c >= 65 && c <= 90)
return c + 32;
else
return c;
}
};
struct apply_shift : thrust::binary_function<unsigned char, int, unsigned char> {
unsigned int period;
unsigned int *shifts;
__host__ __device__
apply_shift(unsigned int p, unsigned int *s) : period(p), shifts(s) {}
__host__ __device__
unsigned char operator()(const unsigned char &c, int pos) {
unsigned char new_c = c + shifts[pos % period];
if (new_c > 122)
new_c -= 26;
else if (new_c < 97)
new_c += 26;
return new_c;
}
};
int main(int argc, char **argv) {
std::ifstream ifs(argv[1], std::ios::binary);
if (!ifs.good()) {
std::cerr << "Couldn't open book file!" << std::endl;
return 1;
}
std::vector<unsigned char> text;
ifs.seekg(0, std::ios::end); //seek to end of file
int length = ifs.tellg(); //get distance from beginning
ifs.seekg(0, std::ios::beg); //move back to beginning
text.resize(length);
ifs.read((char *)&text[0], length);
ifs.close();
//sanitize input to contain only a-z lowercase
thrust::device_vector<unsigned char> dText = text;
thrust::device_vector<unsigned char> text_clean(text.size());
int numElements = thrust::remove_copy_if(
thrust::make_transform_iterator(dText.begin(), upper_to_lower()),
thrust::make_transform_iterator(dText.end(), upper_to_lower()),
text_clean.begin(), isnot_lowercase_alpha()) - text_clean.begin();
unsigned int period = atoi(argv[2]);
thrust::device_vector<unsigned int> shifts(period);
thrust::default_random_engine rng(123);
thrust::uniform_int_distribution<int> uniform_dist(1, 25);
for (int i = 0; i < shifts.size(); ++i) {
shifts[i] = uniform_dist(rng); //don't allow 0 shifts
}
thrust::device_vector<unsigned char> device_cipher_text(numElements);
thrust::transform(text_clean.begin(), text_clean.begin() + numElements, thrust::make_counting_iterator((int)0),
device_cipher_text.begin(), apply_shift(period, thrust::raw_pointer_cast(&shifts[0])));
thrust::host_vector<unsigned char> host_cipher_text = device_cipher_text;
std::ofstream ofs("cipher_text.txt", std::ios::binary);
ofs.write((char *)&host_cipher_text[0], numElements);
ofs.close();
return 0;
}
|
12,075 | /*
Sample Implementation of
Yamazaki and Tanaka (2005).
Neural Modeling of an Internal Clock.
Neural Computation 17:1032--1058.
using only global memory of CUDA.
Licensed under Creative Commons Attribution License (CC-BY)
http://creativecommons.org/licenses/by/3.0/
*/
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#define N 1024 // To be 2^k
#define T 1000
#define Pr 0.1
#define I 1.0
#define Kappa 2.0
#define Tau 100.0
#define BLOCK_SIZE 512
float *z, *u, *result;
int *w;
void initialize()
{
int i, j, k;
w = (int *)malloc(N*N*sizeof(int));
z = (float *)malloc(N*sizeof(float));
u = (float *)malloc(N*sizeof(float));
result = (float *)malloc(T*N*sizeof(float));
for(i = 0; i < N; i++){
z[i] = 0;
u[i] = I;
}
srand(23);
for(i = 0; i < N; i++){
k = 0;
for(j = 0; j < N; j++){
if ((float)rand()/(float)RAND_MAX < Pr){
w[k+N*i] = j;
k++;
}
}
w[k+N*i] = -1;
}
}
void finalize()
{
free(w);
free(z);
free(u);
free(result);
}
__global__ void Kernel(const int *w, float *z, float *u, float *result, const float decay, const int t)
{
int i = blockIdx.x*BLOCK_SIZE + threadIdx.x;
int j, k;
float r;
r = 0;
for(k = 0; w[k+N*i] != -1; k++){
j = w[k+N*i];
r += z[j];
}
u[i] = decay*u[i] + (1 - decay)*I - Kappa*r/N;
if (u[i] > 0){
z[i] = u[i];
}else{
z[i] = 0;
}
result[i+N*t] = z[i];
}
void loop()
{
float *zd, *ud, *resultd;
int *wd;
float decay;
cudaError_t stat;
int t;
decay = exp(-1.0/Tau);
cudaMalloc((void**)&wd, N*N*sizeof(int));
cudaMalloc((void**)&zd, N*sizeof(float));
cudaMalloc((void**)&ud, N*sizeof(float));
cudaMalloc((void**)&resultd, N*T*sizeof(float));
cudaMemcpy(wd, w, N*N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(zd, z, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(ud, u, N*sizeof(float), cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(N/BLOCK_SIZE);
for(t = 0; t < T; t++){
Kernel<<<dimGrid,dimBlock>>>(wd, zd, ud, resultd, decay, t);
}
stat = cudaMemcpy(result, resultd, N*T*sizeof(float), cudaMemcpyDeviceToHost);
if (stat != cudaSuccess){
puts("error");
}
cudaFree(wd);
cudaFree(zd);
cudaFree(ud);
cudaFree(resultd);
}
void output(char *prefix)
{
FILE *f;
int t, i;
char fn[1024];
sprintf(fn, "%s.r", prefix);
f = fopen(fn, "w");
for(t = 0; t < T; t++){
for(i = 0; i < N; i++){
if (result[i+N*t] > 0){
fprintf(f, "%d %d\n", t, i);
}
}
}
fclose(f);
}
int main(int argc, char *argv[])
{
char *prefix;
if (argc < 2){
fprintf(stderr, "%s <prefix>\n", argv[0]);
exit(1);
}
prefix = argv[1];
initialize();
loop();
output(prefix);
finalize();
return 0;
}
|
12,076 |
void __global__ kernel_test(float *data, int N){
int tx = threadIdx.x + blockIdx.x*blockDim.x;
if (tx < N)
data[tx] = tx;
}
void gpu_test(float *data, int N){
float *d_data = NULL;
cudaMalloc(&d_data, N*sizeof(float));
kernel_test<<<1,N>>>(d_data, N);
cudaMemcpy(data, d_data, N*sizeof(float), cudaMemcpyDeviceToHost);
if (d_data)
cudaFree(d_data);
} |
12,077 | #include <cuda_runtime_api.h>
__global__ void image3_crop(
const float *in_pixels,
int in_width,
int in_height,
int channels,
int x_offset,
int y_offset,
float *out_pixels,
int crop_width,
int crop_height)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int x = idx % crop_width;
int y = (idx / crop_width) % crop_height;
int c = idx / (crop_width * crop_height);
if ((x < crop_width) && (y < crop_height) && (c < channels)) {
int in_idx = x_offset + x + (y_offset + y) * in_width + c * in_width * in_height;
int out_idx = x + y * crop_width + c * crop_width * crop_height;
out_pixels[out_idx] = in_pixels[in_idx];
}
}
extern "C" void rembrandt_kernel_image3_crop(
const float *in_pixels,
int in_width,
int in_height,
int channels,
int x_offset,
int y_offset,
float *out_pixels,
int crop_width,
int crop_height,
cudaStream_t stream)
{
int n = crop_width * crop_height * channels;
image3_crop<<<(n+1024-1)/1024, 1024, 0, stream>>>(
in_pixels,
in_width,
in_height,
channels,
x_offset,
y_offset,
out_pixels,
crop_width,
crop_height);
}
__global__ void batch_image3_crop(
const float *in_pixels,
int in_stride,
int batch_size,
const int *in_widths,
const int *in_heights,
int channels,
const int *in_x_offsets,
const int *in_y_offsets,
float *out_pixels,
int crop_width,
int crop_height)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int x = idx % crop_width;
int y = (idx / crop_width) % crop_height;
int c = (idx / (crop_width * crop_height)) % channels;
int batch_idx = idx / (crop_width * crop_height * channels);
if ((x < crop_width) && (y < crop_height) && (c < channels) && (batch_idx < batch_size)) {
int in_width = in_widths[batch_idx];
int in_height = in_heights[batch_idx];
int x_offset = in_x_offsets[batch_idx];
int y_offset = in_y_offsets[batch_idx];
int in_idx = x_offset + x + (y_offset + y) * in_width + c * in_width * in_height + batch_idx * in_stride;
int out_idx = x + y * crop_width + c * crop_width * crop_height + batch_idx * crop_width * crop_height * channels;
out_pixels[out_idx] = in_pixels[in_idx];
}
}
extern "C" void rembrandt_kernel_batch_image3_crop(
const float *in_pixels,
int in_stride,
int batch_size,
const int *in_widths,
const int *in_heights,
int channels,
const int *in_x_offsets,
const int *in_y_offsets,
float *out_pixels,
int crop_width,
int crop_height,
cudaStream_t stream)
{
int n = crop_width * crop_height * channels * batch_size;
batch_image3_crop<<<(n+1024-1)/1024, 1024, 0, stream>>>(
in_pixels,
in_stride,
batch_size,
in_widths,
in_heights,
channels,
in_x_offsets,
in_y_offsets,
out_pixels,
crop_width,
crop_height);
}
|
12,078 | __global__ void kernel_invertDVF(float *mx2, float *my2, float *mz2, cudaTextureObject_t mx, cudaTextureObject_t my, cudaTextureObject_t mz, int nx, int ny, int nz, int niter)
{
int ix = 16 * blockIdx.x + threadIdx.x;
int iy = 16 * blockIdx.y + threadIdx.y;
int iz = 4 * blockIdx.z + threadIdx.z;
if (ix >= nx || iy >= ny || iz >= nz)
return;
int id = ix + iy * nx + iz * nx * ny;
float x = 0, y = 0, z = 0;
for (int iter = 0; iter < niter; iter ++){
x = - tex3D<float>(mx, (x + ix + 0.5f), (y + iy + 0.5f), (z + iz + 0.5f));
y = - tex3D<float>(my, (x + ix + 0.5f), (y + iy + 0.5f), (z + iz + 0.5f));
z = - tex3D<float>(mz, (x + ix + 0.5f), (y + iy + 0.5f), (z + iz + 0.5f));
}
mx2[id] = x;
my2[id] = y;
mz2[id] = z;
} |
12,079 | // B.c
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void B_kernel(double *r, double *v, double *m, double dt, int numParticles)
{
size_t bid = blockIdx.x;
double dirvec[3];
double dist;
// forward loop: goes from current particle to particle N-1
for (int i = 1; i+bid+1 < numParticles; i++)
{
// x, y and z components of vector that points from particle j to particle k
dirvec[0] = r[3*(bid+1)] - r[3*(i+bid+1)];
dirvec[1] = r[3*(bid+1)+1] - r[3*(i+bid+1)+1];
dirvec[2] = r[3*(bid+1)+2] - r[3*(i+bid+1)+2];
// distance between particle j and k
dist = sqrt((dirvec[0]*dirvec[0] + dirvec[1]*dirvec[1] + dirvec[2]*dirvec[2])*\
(dirvec[0]*dirvec[0] + dirvec[1]*dirvec[1] + dirvec[2]*dirvec[2])*\
(dirvec[0]*dirvec[0] + dirvec[1]*dirvec[1] + dirvec[2]*dirvec[2]));
// update one particle per thread
v[3*(bid+1)] -= (m[bid+1+i] / dist) * dirvec[0] * dt;
v[3*(bid+1)+1] -= (m[bid+1+i] / dist) * dirvec[1] * dt;
v[3*(bid+1)+2] -= (m[bid+1+i] / dist) * dirvec[2] * dt;
}
// backwards loop: goes from current particle to particle 1
for (int i = bid; i > 0; i--)
{
dirvec[0] = r[3*(bid+1)] - r[3*i];
dirvec[1] = r[3*(bid+1)+1] - r[3*i+1];
dirvec[2] = r[3*(bid+1)+2] - r[3*i+2];
dist = sqrt((dirvec[0]*dirvec[0] + dirvec[1]*dirvec[1] + dirvec[2]*dirvec[2])*\
(dirvec[0]*dirvec[0] + dirvec[1]*dirvec[1] + dirvec[2]*dirvec[2])*\
(dirvec[0]*dirvec[0] + dirvec[1]*dirvec[1] + dirvec[2]*dirvec[2]));
v[3*(bid+1)] -= (m[i] / dist) * dirvec[0] * dt;
v[3*(bid+1)+1] -= (m[i] / dist) * dirvec[1] * dt;
v[3*(bid+1)+2] -= (m[i] / dist) * dirvec[2] * dt;
}
}
extern "C" {
void B(double *r_h, double *v_h, double *m_h, double dt, int numParticles)
{
size_t N_bytes = 3 * numParticles * sizeof(double);
double *r_d, *v_d, *m_d;
cudaMalloc((void**) &r_d, N_bytes);
cudaMalloc((void**) &v_d, N_bytes);
cudaMalloc((void**) &m_d, N_bytes/3);
cudaMemcpy(r_d, r_h, N_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(v_d, v_h, N_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(m_d, m_h, N_bytes/3, cudaMemcpyHostToDevice);
B_kernel<<<numParticles, 1>>>(r_d, v_d, m_d, dt, numParticles);
cudaMemcpy(v_h, v_d, N_bytes, cudaMemcpyDeviceToHost);
cudaFree(r_d);
cudaFree(v_d);
cudaFree(m_d);
}
}
|
12,080 | #include "includes.h"
__global__ void cg_calc_p( const int x_inner, const int y_inner, const int halo_depth, const double beta, const double* r, double* p)
{
const int gid = threadIdx.x+blockIdx.x*blockDim.x;
if(gid >= x_inner*y_inner) return;
const int x = x_inner + 2*halo_depth;
const int col = gid % x_inner;
const int row = gid / x_inner;
const int off0 = halo_depth*(x + 1);
const int index = off0 + col + row*x;
p[index] = r[index] + beta*p[index];
} |
12,081 | /* CPUGPUւ̃f[^Rs[ */
/* - rev.201905 by Yoshiki NAGATANI */
#include <stdio.h>
/* DATA_SIZE = BLOCK_SIZE * GRID_SIZE Ŋ邱(vOł̓m[`FbN) */
#define DATA_SIZE 8
#define BLOCK_SIZE 4
#define GRID_SIZE (DATA_SIZE/BLOCK_SIZE)
/*-----------------------------------------------------------*/
/* GPUŃf[^e2{ĕ\ */
__global__ void DoubleOnGPU(float* d_data) {
int id = blockDim.x * blockIdx.x + threadIdx.x;
/* GPU ł for ł͂ȂC̒S̃f[^(id)vZ OK */
printf("My target is d_data[%d] : %f * 2.0 = %f.\n", id, d_data[id], d_data[id] * 2.0);
}
/*-----------------------------------------------------------*/
int main(void) {
float* h_data; /* Host(CPU) */
float* d_data; /* Device(GPU) */
/* zXg(CPU)̈̊mہiǐd̂߃G[`FbNȂ̂ŒӁj */
h_data = (float*)malloc(DATA_SIZE * sizeof(float));
/* foCX(GPU)̈̊mہiǐd̂߃G[`FbNȂ̂ŒӁj */
cudaMalloc((void**)&d_data, DATA_SIZE * sizeof(float));
/* l̑(CPUŐ) */
printf("Data before processing: ");
for (int i = 0; i < DATA_SIZE; i++) {
h_data[i] = (float)(i) * 10.0;
printf("%f, ", h_data[i]);
}
printf("\n");
/* foCXɃeRs[(CPUGPU) */
cudaMemcpy(d_data, h_data, DATA_SIZE * sizeof(float), cudaMemcpyHostToDevice);
/* foCX(GPU)2{s */
DoubleOnGPU <<<GRID_SIZE, BLOCK_SIZE>>> (d_data);
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
|
12,082 | #include "includes.h"
__global__ void ComputeEuklidianDistancesKernel( float *inputImg, int imgWidth, int imgHeight, float *centroidCoordinates, float *distanceMatrix, int centroids, int inputSize )
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < inputSize)
{
int pointX = threadId % imgWidth;
int pointY = threadId / imgWidth;
float X = (float)pointX;
float Y = (float)pointY;
float dist;
float centroidX;
float centroidY;
for(int c = 0; c < centroids; c++)
{
centroidX = centroidCoordinates[c * 2];
centroidY = centroidCoordinates[c * 2 + 1];
dist = sqrtf( (centroidX - X) * (centroidX - X) + (centroidY - Y) * (centroidY - Y) );
distanceMatrix[c * inputSize + threadId] = dist;
}
}
} |
12,083 | #include "includes.h"
__device__ void gpu_bottomUpMerge(long* source, long* dest, long start, long middle, long end) {
long i = start;
long j = middle;
for (long k = start; k < end; k++) {
if (i < middle && (j >= end || source[i] < source[j])) {
dest[k] = source[i];
i++;
} else {
dest[k] = source[j];
j++;
}
}
}
__device__ unsigned int getIdx(dim3* threads, dim3* blocks) {
int x;
return threadIdx.x +
threadIdx.y * (x = threads->x) +
threadIdx.z * (x *= threads->y) +
blockIdx.x * (x *= threads->z) +
blockIdx.y * (x *= blocks->z) +
blockIdx.z * (x *= blocks->y);
}
__global__ void gpu_mergesort(long* source, long* dest, long size, long width, long slices, dim3* threads, dim3* blocks) {
unsigned int idx = getIdx(threads, blocks);
long start = width*idx*slices,
middle,
end;
for (long slice = 0; slice < slices; slice++) {
if (start >= size)
break;
middle = min(start + (width >> 1), size);
end = min(start + width, size);
gpu_bottomUpMerge(source, dest, start, middle, end);
start += width;
}
} |
12,084 | #include <bits/stdc++.h>
using namespace std;
const int MAXX = 1e8;
#define CSC(call) \
do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \
__FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while(0)
struct pnt {
int x, y;
};
int main() {
int w, h;
char inputFile[256], outputFile[256];
cin >> inputFile >> outputFile;
FILE *fp = fopen(inputFile, "rb");
fread(&w, sizeof(int), 1, fp);
fread(&h, sizeof(int), 1, fp);
uchar4 *data = (uchar4 *) malloc(sizeof(uchar4) * w * h);
fread(data, sizeof(uchar4), w * h, fp);
fclose(fp);
int nc, np;
cin >> nc;
vector<vector<pnt>> classes(nc);
int4 avg[32];
double cov[32][3][3];
double cov_inv[32][3][3];
double dets[32];
for (int i = 0; i < nc; ++i) {
cin >> np;
classes[i].resize(np);
// input + counting averages
for (int j = 0; j < np; ++j) {
cin >> classes[i][j].x >> classes[i][j].y;
uchar4 ps = data[classes[i][j].y * w + classes[i][j].x];
avg[i].x += ps.x;
avg[i].y += ps.y;
avg[i].z += ps.z;
}
avg[i].x /= np;
avg[i].y /= np;
avg[i].z /= np;
// counting cov
for (int j = 0; j < np; ++j) {
uchar4 ps = data[classes[i][j].y * w + classes[i][j].x];
int diff[3];
diff[0] = ps.x - avg[i].x;
diff[1] = ps.y - avg[i].y;
diff[2] = ps.z - avg[i].z;
for (int k = 0; k < 3; ++k) {
for (int m = 0; m < 3; ++m) {
cov[i][k][m] += diff[k] * diff[m];
}
}
}
for (int k = 0; k < 3; ++k) {
for (int m = 0; m < 3; ++m) {
cov[i][k][m] /= (np - 1);
}
}
// counting cov_inverse + determinants
double det = cov[i][0][0] * (cov[i][1][1] * cov[i][2][2] - cov[i][2][1] * cov[i][1][2])
- cov[i][0][1] * (cov[i][1][0] * cov[i][2][2] - cov[i][2][0] * cov[i][1][2])
+ cov[i][0][2] * (cov[i][1][0] * cov[i][2][1] - cov[i][2][0] * cov[i][1][1]);
cov_inv[i][0][0] = (cov[i][1][1] * cov[i][2][2] - cov[i][2][1] * cov[i][1][2]) / det;
cov_inv[i][1][0] = -(cov[i][1][0] * cov[i][2][2] - cov[i][2][0] * cov[i][1][2]) / det;
cov_inv[i][2][0] = (cov[i][1][0] * cov[i][2][1] - cov[i][2][0] * cov[i][1][1]) / det;
cov_inv[i][0][1] = -(cov[i][0][1] * cov[i][2][2] - cov[i][2][1] * cov[i][0][2]) / det;
cov_inv[i][1][1] = (cov[i][0][0] * cov[i][2][2] - cov[i][2][0] * cov[i][0][2]) / det;
cov_inv[i][2][1] = -(cov[i][0][0] * cov[i][2][1] - cov[i][2][0] * cov[i][0][1]) / det;
cov_inv[i][0][2] = (cov[i][0][1] * cov[i][1][2] - cov[i][1][1] * cov[i][0][2]) / det;
cov_inv[i][1][2] = -(cov[i][0][0] * cov[i][1][2] - cov[i][1][0] * cov[i][0][2]) / det;
cov_inv[i][2][2] = (cov[i][0][0] * cov[i][1][1] - cov[i][1][0] * cov[i][0][1]) / det;
dets[i] = det;
}
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
for (int y = 0; y < h; ++y){
for (int x = 0; x < w; ++x){
uchar4 ps = data[y * w + x];
double mx = -MAXX;
int idx = -1;
for (int i = 0; i < nc; ++i){
int diff[3];
diff[0] = ps.x - avg[i].x;
diff[1] = ps.y - avg[i].y;
diff[2] = ps.z - avg[i].z;
double tmp[3];
for(int j = 0; j < 3; ++j){
tmp[j] = 0;
for(int k = 0; k < 3; ++k){
tmp[j] += (diff[k] * cov_inv[i][k][j]);
}
}
double ans = 0;
for(int j = 0; j < 3; ++j){
ans += (tmp[j] * diff[j]);
}
ans = -ans - log(abs(dets[i]));
if(ans > mx){
mx = ans;
idx = i;
}
}
data[y * w + x].w = idx;
}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
fprintf(stderr, "%.2f\n", time);
cudaEventDestroy(stop);
cudaEventDestroy(start);
fp = fopen(outputFile, "wb");
fwrite(&w, sizeof(int), 1, fp);
fwrite(&h, sizeof(int), 1, fp);
fwrite(data, sizeof(uchar4), w * h, fp);
fclose(fp);
free(data);
return 0;
}
|
12,085 | #include "includes.h"
__global__ void stats_kernel(int half_samps, float *d_sum, float *d_sum_square, float *d_signal_power)
{
int t = blockIdx.x * blockDim.x * STATSLOOP + threadIdx.x;
float local = 0.0;
float sum = 0.0;
float sum_square = 0.0;
for (int i = t; i < t + STATSLOOP * blockDim.x; i += blockDim.x)
{
local = d_signal_power[i];
sum += local;
sum_square += local * local;
}
d_sum[blockIdx.x * blockDim.x + threadIdx.x] = sum;
d_sum_square[blockIdx.x * blockDim.x + threadIdx.x] = sum_square;
} |
12,086 | #include <stdio.h>
#include <stdlib.h>
#include <set>
#include <sstream>
#include <string>
#include <fstream>
#include <iostream>
#include <cstring>
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
#include <curand.h>
#include <curand_kernel.h>
using namespace std;
//
// __global__ void GraphGenerator(int* matrix,int* dimension, int* address, int* h_graph, int V)
// {
// int index = threadIdx.x + blockDim.x * blockIdx.x;
// int stride = blockDim.x * gridDim.x;
// for(int i = index; i < V; i += stride)
// {
// int a = address[i];
// int j = 0;
// for (int k = 0; k < V; k++)
// {
// if (matrix[i*V + k])
// {
// h_graph[a + j] = k;
// j++;
// }
// }
// }
// }
//
// __global__ void DimensionGenerator(int* matrix, int* dimension, int* address, int V)
// {
// int index = threadIdx.x + blockDim.x * blockIdx.x;
// int stride = blockDim.x * gridDim.x;
// for(int i = index; i < V; i += stride)
// {
// for (int j = 0; j < V; j++)
// {
// if(matrix[i*V + j])
// {
// dimension[i]++;
// }
// }
// }
// __syncthreads();
// }
//
//
// //================================Utility Functions=======================================
// void CountColors(int V,int length, int* color, int &minColors, int &minIndex)
// {
// //int minColors = INT_MAX;
// //int minIndex;
// int *num_colors;
// num_colors = (int*) malloc(sizeof(int) * length);
// for (int i = 0; i < length; i++)
// {
// num_colors[i] = 0;
// }
// set<int> seen_colors;
//
// for (int i = 0; i < length; i++) {
// if (seen_colors.find(color[i]) == seen_colors.end())
// {
// seen_colors.insert(color[i]);
// num_colors[i/V]++;
// }
// if(i%V==V-1)
// {
// //cout<<num_colors[i/V]<<endl;
// if (num_colors[i/V] < minColors)
// {
// minColors = num_colors[i/V];
// minIndex = i / V;
// }
// seen_colors.clear();
// //num_colors = 0;
// }
// }
// }
//
// bool IsValidColoring(int* graph, int V, int* color)
// {
// for (int i = 0; i < V; i++) {
// for (int j = 0; j < V; j++) {
// if (graph[i * V + j]) {
// if (i != j && color[i] == color[j]) {
// printf("Vertex %d and Vertex %d are connected and have the same color %d\n", i, j, color[i]);
// return false;
// }
// if (color[i] < 1) {
// printf("Vertex %d has invalid color %d\n", i, color[i]);
//
// }
// }
// }
// }
//
// return true;
// }
//
// //Load raw .co data
// void getDimension(const char filename[], int* V)
// {
// string line;
// ifstream infile(filename);
// if (infile.fail()) {
// printf("Failed to open %s\n", filename);
// return;
// }
//
// int num_rows;
//
// while (getline(infile, line))
// {
// istringstream iss(line);
// string s;
// iss >> s;
// if (s == "p") {
// iss >> s; // read string "edge"
// iss >> num_rows;
// *V = num_rows;
// break;
// }
// }
// infile.close();
// }
// //print graph Matrix
// void PrintMatrix(int* matrix, int M, int N) {
// for (int row=0; row<M; row++)
// {
// for(int columns=0; columns<N; columns++)
// {
// printf("%i", matrix[row * N + columns]);
// }
// printf("\n");
// }
// }
// Read MatrixMarket graphs
// // Assumes input nodes are numbered starting from 1
// void ReadMMFile(const char filename[], bool** graph, int* V)
// {
// string line;
// ifstream infile(filename);
// if (infile.fail()) {
// printf("Failed to open %s\n", filename);
// return;
// }
//
// // Reading comments
// while (getline(infile, line)) {
// istringstream iss(line);
// if (line.find('%') == string::npos)
// break;
// }
//
// // Reading metadata
// istringstream iss(line);
// int num_rows, num_cols, num_edges;
// iss >> num_rows >> num_cols >> num_edges;
//
// *graph = new bool[num_rows * num_rows];
// memset(*graph, 0, num_rows * num_rows * sizeof(bool));
// *V = num_rows;
//
// // Reading nodes
// while (getline(infile, line)) {
// istringstream iss(line);
// int node1, node2, weight;
// iss >> node1 >> node2 >> weight;
//
// // Assume node numbering starts at 1
// (*graph)[(node1 - 1) * num_rows + (node2 - 1)] = true;
// (*graph)[(node2 - 1) * num_rows + (node1 - 1)] = true;
// }
// infile.close();
// }
//Constraints=======================================================================================
//
// int* const2(int numVertices) //This requires transpose.
// {
// int *toRet;
// toRet = (int*) malloc(sizeof(int) * numVertices);
// for (int i = 0; i < numVertices; i++)
// toRet[i] = 1;
// return toRet;
// }
// int* allConsts(int * matrix, int numVertices)
// {
// int *toRet;
// int numEdges = 0;
// for (int i = 0; i < numVertices * numVertices; i++)
// {
// if(matrix[i])
// numEdges++;
// }
//
// cudaMallocManaged(*toRet, sizeof((int) * ((numEdges+5)*numVertices)* (4*numVertices+4));
// // toRet = (int*) malloc(sizeof(int) * ((numEdges+5)*numVertices)* (4*numVertices+4));//Y dimension is 6*numVertices and X dimension is 2*numVertices;
//
// int row;
// int col;
// //Constraint 1
// for (int i = 0; i < numEdges * numVertices; i++)
// {
//
// }
//
// //Constraint 2
// int startConstraint2 = i % (4*numVertices + 4); //The row at which const 2 matches.
// for (; i < numVertices; i++)
// {
// row = i % (4*numVertices + 4);
// col = i / (4*numVertices + 4);
// toRet[]
// for (int j = 0; j < 4; j++)
// {
// toRet[]
// }
// }
// }
// int* const1(int * matrix, int numVertices)
// {
// //Find # of edges.
// int * toRet;
// int numEdges = 0;
// for (int i = 0; i < numVertices * numVertices; i++)
// {
// if(matrix[i])
// numEdges++;
// }
// toRet = (int*) malloc(sizeof(int) * (numVertices+numEdges));
//
// for (int i = 0; i < numVertices+numEdges; i++)
// {
// toRet[i] = 0;
// }
// //Populate the matrix to return.
// numEdges = 0;
// int row;
// int col;
// for (int i = 0; i < numVertices * numVertices; i++)
// {
//
// if (matrix[i])
// {
// row = i % numVertices;
// col = i / numVertices;
// toRet[numEdges* numVertices + row] = 1;
// toRet[numEdges* numVertices + col] = 1;
// numEdges++;
//
// }
//
// }
// return toRet;
// }
void readEdgesPosition(const char filename[], int* edgeList)
{
string line;
ifstream infile(filename);
if (infile.fail()) {
printf("Failed to open %s\n", filename);
return;
}
int i=0;
while (getline(infile, line)) {
istringstream iss(line);
string s;
int node1, node2;
iss >> s;
if (s != "e")
continue;
iss >> node1 >> node2;
edgeList[2*i] = node1;
edgeList[2*i+1] = node2;
i++;
}
infile.close();
}
void getInfo(const char filename[], int* numEdges, int* numVertices)
{
string line;
ifstream infile(filename);
if (infile.fail()) {
printf("Failed to open %s\n", filename);
return;
}
while (getline(infile, line)) {
istringstream iss(line);
string s,node1;
iss >> s;
if (s != "p")
continue;
iss >> node1 >> *numVertices >> *numEdges;
// Assume node numbering starts at 1
}
infile.close();
}
__global__ void constraint1 (double *simplexTable, int* edgeList, int numColors, int numEdges, int numVertices)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < numEdges; i+=stride)
{
for(int j = 0; j < numColors; j++)
{
simplexTable[(numColors*i+j)*(numVertices*numColors+numColors)+(edgeList[2*i]-1) * numColors+j] = 1.0;
simplexTable[(numColors*i+j)*(numVertices*numColors+numColors)+(edgeList[2*i+1]-1) * numColors+j] = 1.0;
}
}
}
__global__ void constraint2 (double *simplexTable, int numVertices, int numColors)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < numVertices; i += stride)
{
for (int j = 0; j < numColors; j++)
{
simplexTable[i*(numVertices*numColors+numColors)+numColors*i+j] = 1.0;
}
}
}
__global__ void constraint3 (double *simplexTable, int numVertices, int numColors)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < numVertices*numColors; i += stride)
{
simplexTable[i*(numVertices*numColors+numColors) + i] = 1.0;
simplexTable[i*(numVertices*numColors+numColors) + numColors * numVertices + (i%numColors)] = -1.0;
}
}
//===================================Main=======================================
void constraintGenerator(const char filename[])
{
//cudaMallocManaged(&simplexTable, sizeof((int) * ((numEdges+5)*numVertices)* (4*numVertices+4))
int numColors = 2;
int numEdges;
int numVertices;
if (string(filename).find(".col") != string::npos)
{
getInfo(filename, &numEdges, &numVertices);
}
int* edgeList;
cudaMallocManaged(&edgeList,sizeof(int)*2*numEdges);
readEdgesPosition(filename,edgeList);
int * simp;
cudaMallocManaged(&simp, sizeof(int) *numEdges*2*10);
for (int i = 0; i < 16*20; i++)
{
simp[i] = 0;
}
double* simplexTable;
cudaMallocManaged(&simplexTable, (numColors*numVertices + numVertices) * (numEdges*numColors + numVertices + numColors*numVertices));
constraint1<<<1,1>>>(simplexTable, edgeList,numColors,numEdges,numVertices);
constraint2<<<1,1>>>(simplexTable + ((numColors*numVertices + numColors)*(numEdges*numColors)), numVertices, numColors);
constraint3<<<1,1>>>(simplexTable + ((numColors*numVertices + numColors)*(numEdges*numColors + numVertices)) , numVertices, numColors);
cudaDeviceSynchronize();
for (int i = 0; i < (numColors*numVertices + numColors)*(numEdges*numColors + numVertices + numColors*numVertices); i++)
{
cout << simplexTable[i] << " ";
if (i%10==9) cout << endl;
}
cout<<endl;
cudaFree(simp);
cudaFree(simplexTable);
}
int main(int argc, char const *argv[]) {
constraintGenerator(argv[1]);
return 0;
}
|
12,087 | //function kernel
__device__ float length(float3 r) {
return r.x*r.x + r.y*r.y + r.z*r.z;
}
__device__ float3 dif_float3(float3 r1, float3 r2) {
return make_float3(r1.x-r2.x, r1.y-r2.y, r1.z-r2.z);
}
__device__ float Kernel_Poly6(float3 r, float h) {
float PI = 3.14159;
return 315.0f / (64 * PI * pow(h, 9)) * pow(pow(h, 2) - length(r), 3);
}
//SPH particle struct
struct pSPH {
float3 pos;
float3 vel;
float m;
float rho;
float _;
float col;
};
extern "C" __global__ void
SPH_1(pSPH *p, const float h, const int N)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx > N) return;
p[idx].rho = 0.0f;
pSPH _p = p[idx];
int i;
float _rho;
for (i = 0; i < N; ++i)
{
if (i == idx) continue;
float3 r = dif_float3(_p.pos, p[i].pos);
if (length(r) <= h*h)
{
_rho += p[i].m * Kernel_Poly6(r, h);
}
}
p[idx].rho = _rho + 0.0001f;
if (_p._ <= 0.2) p[idx].col = 1.0f / (p[idx].rho + 1.0f) - 0.1f;
else p[idx].col = 1.0f / (p[idx].rho/1.4f + 1.0f);
return;
}
|
12,088 | #include <cstdlib>
#include <cstdio>
#include <cuda.h>
using namespace std;
__global__ void mykernel(void) {
}
int main(void) {
mykernel<<<1,1>>>();
printf("CPU Hello World!\n");
return 0;
}
|
12,089 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void computeSquare(int *d_in, int *d_out) {
int index = threadIdx.x;
d_out[index] = d_in[index] * d_in[index];
}
int main() {
const int arr_size = 5;
int h_in[arr_size] = { 1, 2, 3, 4, 5 };
int h_out[arr_size];
int arr_bytes = arr_size * sizeof(int);
int *d_in, *d_out;
cudaMalloc((void **)&d_in, arr_bytes);
cudaMalloc((void **)&d_out, arr_bytes);
cudaMemcpy(d_in, h_in, arr_bytes, cudaMemcpyHostToDevice);
computeSquare <<<1, arr_size >>>(d_in, d_out);
cudaMemcpy(h_out, d_out, arr_bytes, cudaMemcpyDeviceToHost);
for (int i = 0; i < 5; i++) {
printf("%d ", h_out[i]);
}
system("pause");
return 0;
} |
12,090 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void helloCUDA(void)
{
printf("Hello CUDA from GPU!\n");
}
int main(void)
{
printf("Hello GPU from CPU!\n");
helloCUDA<<<1, 10>>>();
cudaDeviceSynchronize();
return 0;
}
|
12,091 | float h_A[]= {
0.9197632695278908, 0.7824552967579823, 0.6259681535588939, 0.5942338071724684, 0.7669269094385093, 0.5583150349831146, 0.572184389431657, 0.7536787785968667, 0.9853433056011335, 0.8865774231363988, 0.8365027825493165, 0.7057431268665875, 0.7196798964298432, 0.785609880310334, 0.7897107448250169, 0.6854412226683937, 0.5152783652105504, 0.6817434709253274, 0.5067292177126297, 0.6418002953471231, 0.6923575739186674, 0.7830876802285461, 0.7212958343766162, 0.8635472044743753, 0.5904859720983049, 0.9412380919296495, 0.6253815388487978, 0.8026016938984746, 0.5049474198362256, 0.7156619014022134, 0.6455175794991229, 0.9354584292059892, 0.5532342377780974, 0.8025512898956222, 0.706321764485146, 0.8070634814305586, 0.7673475596958055, 0.7226978904136114, 0.7721694899372575, 0.5249827186562896, 0.9475525455886129, 0.6524820695873904, 0.9614339446532731, 0.6470123032914894, 0.6184211022534158, 0.9751558497573914, 0.8868914584640857, 0.9082853163427098, 0.5373108954426842, 0.7559541029497496, 0.7211125274027933, 0.5849572442500134, 0.5887808924576516, 0.5672851890565276, 0.9466155289934803, 0.836664139767721, 0.7506086588179005, 0.7533900514717291, 0.565393717034328, 0.7946023494275698, 0.5915397137232821, 0.6208804941022484, 0.6113020921729777, 0.9393414028931159, 0.9063282440144282, 0.7911217897143646, 0.9013326728572539, 0.5172023106902912, 0.5974939506655798, 0.8038181587911366, 0.8871678042279081, 0.9827384722478307, 0.6364229573836577, 0.7846941843625523, 0.76578751145372, 0.8211382394690045, 0.9918678350059147, 0.8598219796328124, 0.7427158434571719, 0.9181415777460193, 0.8138783900824189, 0.5590972600361843, 0.6349326049465942, 0.8028770408702617, 0.9576747869284543, 0.6915291895340034, 0.9308800521545089, 0.5789703739192261, 0.8106386456432735, 0.559902580808253, 0.92424621823747, 0.5737177156501232, 0.8433190092805505, 0.5383767023602444, 0.5012746729280761, 0.8711235496765495, 0.5541770490267267, 0.5371984532037717, 0.5233252487089028, 0.7756010359475147, 0.8432805241203029, 0.6265804974092605, 0.87180591478316, 0.8608771517737015, 0.91842873144163, 0.7316418787756889, 0.9335912132651154, 0.9853875570698927, 0.8895337109767572, 0.5240554914473827, 0.9672160541089263, 0.6701543540214723, 0.9700623920567091, 0.5021228878654586, 0.8629889616350528, 0.8899851210903351, 0.7469788942716333, 0.5700286375275826, 0.6597809840946787, 0.5903005819663691, 0.519335785365949, 0.623711163660509, 0.6489575887409685, 0.8134899605008556, 0.5816912923366672, 0.9219459367764563, 0.8590319719154661, 0.5442690998923164, 0.8069277875877154, 0.9885189525012901, 0.6748984021238258, 0.5313129536691829, 0.6293901897813962, 0.7942828748232087, 0.845261930143397, 0.8466935068564568, 0.6473753822411076, 0.8119954564578624, 0.9787388420012746, 0.8218991747161724, 0.56128462271072, 0.8091688496638354, 0.7984966556464853, 0.793148395743176, 0.6829982966211612, 0.5843457784425827, 0.9816805679011378, 0.943940170416756, 0.9014718363910852, 0.6929401736860615, 0.5418244932746124, 0.8319715403441819, 0.5004072524548955, 0.522925486173084, 0.9732769333795841, 0.6013760576412406, 0.9424152639066917, 0.5270740585985698, 0.9200452110696472, 0.7805989607687343, 0.9777045916431966, 0.6902928358050293, 0.5228539465113041, 0.9991476664435448, 0.5739841261672987, 0.7864581700475668, 0.7902543646979885, 0.7495721797703444, 0.5318592011326259, 0.8635704085321034, 0.9144601251503701, 0.9865763952849143, 0.7116520719301114, 0.7467066657467807, 0.7515939199048862, 0.8385579400364738, 0.8569231674630717, 0.721259988338699, 0.8338789461530953, 0.9858327840620214, 0.9658048086061086, 0.6904789697067526, 0.7658459683632892, 0.9227812319210367, 0.8288243322785915, 0.934894057296382, 0.5267481169593289, 0.8866808079339502, 0.5057430888374659, 0.6329192293949151, 0.6457322520773194, 0.837723970881503, 0.8376018389457185, 0.583591878310318, 0.6144126273614826, 0.8686957263229105, 0.5688701944316699, 0.6318297121166077, 0.641830967309227, 0.9203080639510672, 0.623565677611597, 0.56837914686431, 0.8311236290992134, 0.644432918555615, 0.7631013212727753, 0.5764554340463496, 0.5788305731645066, 0.5822743432522088, 0.9496156049860609, 0.6643069631624494, 0.6231171091361757, 0.756911230096778, 0.9318312604391442, 0.5633818852392273, 0.9461191914099156, 0.8623536243715463, 0.8442440260923469, 0.9545052431606176, 0.9634127884021995, 0.5733582026544729, 0.8897391107971606, 0.5839471722063321, 0.5331671155476876, 0.6724983416636632, 0.5809332569594551, 0.5799620181429435, 0.5311249340612136, 0.8929996333901686, 0.6461923813264117, 0.6103716633049434, 0.6046917674639452, 0.6400823216603835, 0.7245764027046169, 0.689895370283752, 0.7774137748556365, 0.7342051150909512, 0.9094527963156407, 0.5977118773232332, 0.8875423753609015, 0.8472792131521227, 0.7206108346469834, 0.9383042992204077, 0.8562436578918841, 0.8356716246579851, 0.8240041730156413, 0.9345179926195943, 0.7158048588013359, 0.7310073335768739, 0.7271367661725852, 0.7631167366673086, 0.7713223797134676, 0.9096659673632634, 0.5894133680124104, 0.969602974160457, 0.8165687649048144, 0.5236521426824623, 0.5424885673805516, 0.751098204859209, 0.5835628891085066, 0.770464551956119, 0.968340122805121, 0.5982141190788554, 0.5717542766374402, 0.7436625182802341, 0.6233353675155495, 0.8436685752047584, 0.6663326856433596, 0.8580754482913284, 0.5431519505523492, 0.697259343238918, 0.5367208366036639, 0.9979769193603607, 0.5291683274019678, 0.7786161518720445, 0.5243151132044397, 0.6549169791862856, 0.5523561791088635, 0.9426558853152944, 0.5551299760987796, 0.8640813534456546, 0.5469351920453548, 0.7465753136903063, 0.5821156517818309, 0.5755555903314278, 0.7721834761568331, 0.7605770688962692, 0.6285609210329538, 0.788373723398422, 0.8121177387218249, 0.9411997453019296, 0.9804948590290039, 0.7865806396239341, 0.9769410157888349, 0.8563251784701454, 0.5445785856028995, 0.7430515618853087, 0.9624976248843659, 0.73633053709217, 0.5674035617019093, 0.5538459138158829, 0.6012190740576802, 0.6272509025449282, 0.8898241267596823, 0.8813604745227935, 0.5262795939710669, 0.553931424602222, 0.9342178243411744, 0.9235002376229772, 0.5601980444647481, 0.6389994231571148, 0.6115457876898982, 0.7760045147314752, 0.9068060458427111, 0.7003006471998576, 0.6726758112515373, 0.7210523922413083, 0.7698262786028314, 0.914432658090223, 0.650713363042, 0.6029206521983412, 0.7340727481004183, 0.6688111210890388, 0.7202078040456263, 0.89789965119069, 0.5629537901645493, 0.7761431211233834, 0.6120998552958781, 0.595864965274501, 0.5186705840048778, 0.8190975194856026, 0.7981852144065942, 0.7887632937809199, 0.5712914823563173, 0.8829854971925645, 0.6287521664211269, 0.607395216890007, 0.6083907304594547, 0.9683695511030251, 0.7125812319374638, 0.6776970456260129, 0.8252052440960371, 0.6714700307678956, 0.5067511607710388, 0.999158242296277, 0.5805720400651868, 0.6332421097011645, 0.9140346311178974, 0.5986553348626751, 0.8389594000365463, 0.760059674524428, 0.9209062703721687, 0.9968952621033493, 0.6542315689355042, 0.705144828228886, 0.5457549326122892, 0.9499603272839054, 0.7562839731882682, 0.8790476771888334, 0.823468720964969, 0.6334711634656501, 0.9228003930766976, 0.6381156560702382, 0.8659732561102957, 0.7790261902368993, 0.9398992099329475, 0.9210835366566088, 0.9789188296118339, 0.7942825911135768, 0.9264631021552356, 0.6160671127035611, 0.7483274230167722, 0.6989064911617168, 0.6759115386680561, 0.9942527223694311, 0.6880902920958736, 0.8371065510314529, 0.919706145394338, 0.9155001545394501, 0.8063449731432879, 0.9824903416578779, 0.6161528838571233, 0.8349076896654066, 0.9753961112993056, 0.6846535378936139, 0.5815253609382558, 0.5152159754585872, 0.8810931483247804, 0.7662385489860193, 0.6273931037204035, 0.821869752230912, 0.9273531924865042, 0.7227877846532991, 0.953938590053254, 0.6522848525319428, 0.8660373481499135, 0.5022762447610802, 0.6194471767092645, 0.6205608079191386, 0.5000595121597812, 0.5887281757814933, 0.5446422241865476, 0.9159991666835132, 0.6127488620146828, 0.8529791904968544, 0.7789722860008814, 0.9759227823129095, 0.5150193992315245, 0.5478783036116768, 0.6152960480868992, 0.8774775457176611, 0.7247992860564513, 0.7987327951463192, 0.9918488588639334, 0.6487043085299454, 0.8079571476674114, 0.5231377220244289, 0.581028199156748, 0.8900533326891462, 0.8986013658003578, 0.7960857635391243, 0.682905036543501, 0.7673374328263758, 0.6151895230384689, 0.9353016358104886, 0.8775945581513496, 0.5361543517102428, 0.8561309245556126, 0.5118533444598596, 0.8125828005876148, 0.9393002248421464, 0.7093664779933471, 0.5729805431922219, 0.8373682209584419, 0.8140048338971883, 0.7958178014403632, 0.9330350541514305, 0.8506761900166053, 0.7776098098153721, 0.9761009290363523, 0.6649528775885625, 0.8384572393918477, 0.873763883834382, 0.7998048177024593, 0.8752079865716482, 0.9022603677780342, 0.6383265594537981, 0.9570887462178792, 0.9835158972754799, 0.9289730250370805, 0.524816827839091, 0.7538845758276609, 0.5087360718547204, 0.5048257225309676, 0.6269470482028094, 0.7597942459028181, 0.6752755693303829, 0.8311896046408234, 0.7326288112182051, 0.5302527163248941, 0.9347468724456336, 0.5649046664763765, 0.9025457883856189, 0.7980977277773256, 0.9414683369257171, 0.863727762535413, 0.7065782119507376, 0.6198887675451092, 0.9258733440319051, 0.91115663484141, 0.6148890241860144, 0.8546822744644083, 0.5899360841305552, 0.9756718404995759, 0.9659436293537869, 0.865782724351994, 0.5534212116330443, 0.5032099077729024, 0.9791231610673083, 0.8851606903310687, 0.8859649610333385, 0.7263146838027283, 0.5010502409222577, 0.819700293930203, 0.8265880302089434, 0.656786438685707, 0.8095795734553854, 0.7999816617701834, 0.7056413862559032, 0.9345116222698383, 0.6918565744476111, 0.7942708256044109, 0.7646658378084317, 0.7062088999940921, 0.8614407549287576, 0.5266809093262361, 0.8497735058128721, 0.5115304894308084, 0.8441030573431245, 0.7043462919072911, 0.8211135060653425, 0.7869644078920001, 0.5761008651925397, 0.7881192937839705, 0.9196863082361821, 0.7155303036569771, 0.8018484668820345, 0.9542100516486385, 0.8638584239099214, 0.7159214159546334, 0.8895021338511994, 0.7424741477146315, 0.8350558539897671, 0.9859361520133678, 0.6848329326867505, 0.5481712911442095, 0.6435027676708149, 0.6883765011535049, 0.6240390845192685, 0.7699091135345302, 0.9693178793404019, 0.9430835906450243, 0.820839104160598, 0.9222845119917589, 0.8638292493830582, 0.5971209659020862, 0.9045464592069207, 0.6359952047669645, 0.8378979821771405, 0.8202101056266067, 0.6971712528994054, 0.7278658781513582, 0.5949808863268105, 0.9736143488144318, 0.5350425266328074, 0.9632715470701891, 0.547843368327888, 0.6254857367317426, 0.7355377265249065, 0.7117070763314417, 0.6196484867023275, 0.898044471183553, 0.6770052230481038, 0.9621763331684828, 0.6212973685139593, 0.6418783323371108, 0.795171311778698, 0.5601075662818403, 0.5080709510229202, 0.6979663667433789, 0.9488363371745938, 0.5019694759797229, 0.7712786742308608, 0.7837720954874667, 0.7668539288400354, 0.8868483396320446, 0.5565922276472344, 0.6985712601616967, 0.8669120666856328, 0.7795433145452679, 0.5983333948422456, 0.5511836955923528, 0.7730606991874078, 0.6678015721329285, 0.8183281562534308, 0.6815851510606674, 0.9048847854394642, 0.9939931569276497, 0.9491945476628733, 0.622238515958587, 0.5998882231961253, 0.8153189584526843, 0.9479388200678514, 0.606473626054805, 0.5964754640952643, 0.6546834203680816, 0.8036314317101991, 0.7081761891628449, 0.9348530532438508, 0.6244586563694181, 0.9443438476888611, 0.5397505035860652, 0.8465651097460825, 0.7684648666924393, 0.6343403013342601, 0.8912199543362402, 0.7707528554608283, 0.6834632062218267, 0.6850509416720071, 0.6579690631183426, 0.7459221270673004, 0.5026762454900506, 0.5298139130925188, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0};
int h_B[]= {
0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 97, 99, 101, 103, 106, 108, 111, 113, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 137, 139, 142, 144, 146, 148, 150, 152, 158, 160, 163, 165, 168, 170, 173, 175, 177, 179, 181, 183, 185, 187, 189, 191, 199, 201, 204, 206, 209, 211, 198, 116, 203, 198, 116, 198, 116, 203, 193, 215, 116, 156, 156, 193, 196, 196, 198, 215, 587, 589, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 640, 642, 644, 646, 648, 650, 652, 654, 656, 658, 660, 662, 664, 666, 668, 670, 672, 674, 676, 678, 680, 682, 684, 686, 688, 690, 692, 694, 696, 698, 700, 702, 704, 706, 708, 710, 712, 714, 716, 718, 720, 722, 724, 726, 728, 730, 732, 734, 736, 737, 738, 739, 740, 741, 742, 743, 744, 745, 746, 747, 748, 749, 750, 751, 752, 753, 754, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 162, 157, 162, 157, 167, 141, 105, 141, 110, 213, 208, 162, 157, 162, 157, 162, 157, 167, 141, 110, 105, 809, 816, 203, 213, 208, 162, 157, 141, 110, 105, 213, 208, 213, 208, 819, 162, 157, 141, 803, 162, 157, 172, 809, 208, 162, 157, 141, 803, 162, 157, 172, 809, 213, 162, 157, 162, 157, 162, 157, 167, 141, 110, 105, 162, 157, 172, 809, 213, 208, 162, 157, 162, 157, 167, 141, 105, 141, 110, 213, 208, 162, 157, 162, 157, 167, 141, 780, 162, 157, 172, 809, 203, 213, 208, 162, 157, 162, 157, 162, 157, 167, 141, 110, 105, 162, 157, 172, 110, 105, 213, 208, 213, 208, 162, 157, 141, 110, 105, 213, 208, 213, 208, 162, 157, 162, 157, 162, 157, 167, 141, 110, 105, 162, 157, 172, 809, 213, 208, 162, 157, 162, 157, 162, 157, 167, 141, 803, 162, 157, 162, 157, 167, 172, 809, 203, 213, 208, 203, 213, 208, 830, 818, 830, 818, 830, 831, 830, 831, 830, 831, 830, 831, 830, 831, 830, 823, 831, 831, 831, 830, 831, 830, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 896, 897, 898, 899, 900, 901, 902, 903, 904, 905, 906, 907, 908, 909, 910, 911, 912, 913, 914, 915, 916, 917, 919, 920, 921, 922, 923, 924, 925, 926, 927, 928, 929, 930, 932, 933, 934, 935, 936, 937, 938, 939, 940, 941, 942, 943, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956, 957, 958, 959, 960, 961, 962, 963, 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 831, 830, 831, 830, 1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, 1076, 1077, 1078, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 1152, 1154, 1161, 1163, 1165, 1167, 1171, 1175, 1177, 1180, 1182, 1184, 1186, 1190, 1195, 1199, 1204, 1206, 1208, 1212, 1214, 1218, 1220, 1222, 1229, 1231, 1233, 1238, 1243, 1245, 1247, 1249, 1253, 1255, 1258, 1260, 1262, 1264, 1267, 1269, 1271, 1273, 1275, 1277, 1281, 1283, 1287, 1289, 1291, 1293, 1298, 1300, 1306, 1309, 1160, 1158, 1173, 1174, 1308, 1312, 1314, 1297, 1305, 1316, 1317, 1308, 1318, 1319, 1189, 1193, 1308, 833, 1198, 1202, 1308, 833, 1217, 1324, 1326, 1228, 1226, 1237, 1241, 1242, 1330, 1286, 1297, 1304, 1305, 1308, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 1156, 828, 827, 1462, 1463, 829, 1169, 828, 827, 1414, 1464, 1465, 829, 1466, 1295, 828, 827, 1417, 824, 825, 1469, 1470, 1473, 1295, 828, 827, 1476, 1302, 828, 827, 1477, 1478, 1479, 1295, 828, 827, 1480, 1302, 828, 827, 1481, 1482, 1483, 1210, 828, 827, 1427, 1302, 828, 827, 1484, 829, 1224, 828, 827, 1487, 1488, 829, 1235, 828, 827, 1489, 1302, 828, 827, 1490, 1491, 829, 1251, 828, 827, 1440, 1302, 828, 827, 1442, 829, 833, 1295, 828, 827, 1446, 824, 825, 1279, 828, 827, 1452, 1302, 828, 827, 1493, 829, 1295, 828, 827, 1494, 1302, 828, 827, 1495, 1496, 829, 1497, 833, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 1536, 1537, 1538, 1539, 1541, 1542, 1543, 1544, 1545, 1548, 1550, 1551, 1552, 1553, 1554, 1555, 1557, 1558, 1559, 1560, 1561, 1563, 1564, 1565, 1567, 1569, 1570, 1571, 1573, 1574, 1575, 1577, 1579, 1580, 1581, 1582, 1583, 1584, 1585, 1587, 1588, 1589, 1590, 1591, 1593, 1594, 1595, 1596, 1598, 1599, 1600, 1603, 1604, 1605, 1606, 1607, 1608, 1609, 1610, 1611, 1612, 1613, 1614, 1615, 1616, 1617, 1618, 1619, 1620, 1621, 1622, 1623, 1624, 1625, 1626, 1628, 1629, 1630, 1631, 1633, 1634, 1635, 1637, 1638, 1639, 1640, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 1664, 1669, 1674, 1680, 1681, 1682, 1685, 1688, 1689, 1692, 1695, 1696, 1700, 1704, 1709, 1712, 1716, 1720, 1726, 1732, 1736, 1740, 1743, 1746, 1748, 1668, 1749, 1673, 1679, 1678, 1749, 1703, 1708, 1715, 1725, 1724, 1731, 1730, 1749, 1739, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 1792, 1793, 1794, 1797, 1798, 1800, 1801, 1803, 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1817, 1818, 1819, 1820, 1821, 1731, 1730, 1568, 1578, 1822, 1823, 1824, 1825, 1826, 1827, 1828, 1829, 1731, 1730, 1830, 1831, 1749, 1747, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 1636, 1667, 1546, 1672, 1940, 1636, 1677, 1942, 1636, 1556, 1944, 1945, 1566, 1562, 1946, 1576, 1572, 1947, 1586, 1699, 1948, 1636, 1707, 1601, 1597, 1723, 1719, 1952, 1954, 1636, 1729, 1956, 1957, 1627, 1735, 1958, 1636, 1632, 1960, 1961, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 2048, 2049, 2050, 2051, 2053, 2054, 2056, 2057, 2058, 2060, 2061, 2063, 2064, 2066, 2067, 2069, 2070, 2071, 2072, 2073, 2074, 2077, 2078, 2079, 2081, 2082, 2084, 2085, 2086, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 2176, 2178, 2180, 2182, 2185, 2187, 2189, 2191, 2193, 2195, 2197, 2200, 2202, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 2076, 2204, 1951, 2065, 2068, 1939, 2052, 2184, 1950, 2055, 2062, 2075, 2083, 2199, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 2432, 2433, 2434, 2435, 2436, 2437, 2438, 2439, 2440, 2441, 2442, 2443, 2444, 2445, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 2560, 2562, 2564, 2566, 2568, 2570, 2572, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 2688, 2690, 2692, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 2816, 2818, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 2944, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 834, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 3200, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127};
int h_C[]= {
1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 87, 89, 91, 93, 95, 98, 100, 102, 104, 107, 109, 112, 114, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 143, 145, 147, 149, 151, 153, 159, 161, 164, 166, 169, 171, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 200, 202, 205, 207, 210, 212, 115, 115, 194, 115, 115, 115, 197, 195, 96, 96, 115, 154, 155, 214, 194, 195, 197, 214, 588, 590, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 641, 643, 645, 647, 649, 651, 653, 655, 657, 659, 661, 663, 665, 667, 669, 671, 673, 675, 677, 679, 681, 683, 685, 687, 689, 691, 693, 695, 697, 699, 701, 703, 705, 707, 709, 711, 713, 715, 717, 719, 721, 723, 725, 727, 729, 731, 733, 735, 238, 239, 250, 253, 254, 289, 334, 335, 343, 346, 363, 375, 376, 385, 386, 387, 388, 392, 755, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 797, 778, 805, 799, 800, 774, 768, 776, 777, 812, 811, 797, 796, 805, 798, 805, 799, 800, 792, 787, 769, 795, 817, 810, 812, 811, 797, 785, 792, 787, 786, 789, 788, 791, 790, 820, 797, 770, 801, 802, 805, 804, 807, 808, 814, 797, 771, 801, 802, 805, 804, 807, 808, 815, 797, 796, 805, 798, 805, 799, 800, 772, 794, 773, 805, 804, 807, 795, 812, 811, 797, 778, 805, 799, 800, 774, 775, 776, 777, 812, 811, 797, 778, 805, 799, 800, 779, 802, 805, 804, 807, 808, 810, 812, 781, 797, 796, 805, 798, 805, 799, 800, 792, 794, 782, 805, 804, 807, 784, 783, 812, 811, 815, 814, 797, 785, 792, 787, 786, 789, 788, 791, 790, 797, 796, 805, 798, 805, 799, 800, 792, 794, 793, 805, 804, 807, 795, 812, 811, 797, 796, 805, 798, 805, 799, 800, 801, 802, 805, 804, 805, 805, 806, 807, 808, 810, 812, 811, 813, 815, 814, 822, 822, 822, 822, 822, 822, 822, 822, 822, 821, 821, 821, 821, 822, 826, 822, 822, 826, 832, 832, 832, 832, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 251, 252, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 336, 337, 338, 339, 340, 341, 342, 344, 345, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 377, 378, 379, 380, 381, 382, 383, 384, 389, 390, 391, 398, 412, 413, 415, 416, 931, 931, 931, 931, 434, 435, 446, 447, 457, 458, 460, 461, 467, 488, 490, 491, 507, 517, 518, 521, 522, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 1153, 1155, 1162, 1164, 1166, 1168, 1172, 1176, 1178, 1181, 1183, 1185, 1187, 1191, 1196, 1200, 1205, 1207, 1209, 1213, 1215, 1219, 1221, 1223, 1230, 1232, 1234, 1239, 1244, 1246, 1248, 1250, 1254, 1256, 1259, 1261, 1263, 1265, 1268, 1270, 1272, 1274, 1276, 1278, 1282, 1284, 1288, 1290, 1292, 1294, 1299, 1301, 1307, 1310, 1159, 1157, 1303, 918, 931, 1313, 1315, 1266, 931, 420, 421, 931, 423, 424, 1188, 1192, 822, 1194, 1197, 1201, 822, 1203, 1216, 1325, 1327, 1227, 1225, 1236, 1240, 822, 1331, 1285, 1296, 1303, 832, 832, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 1409, 1456, 1408, 396, 397, 1410, 1413, 1412, 1411, 1170, 404, 405, 1415, 407, 1457, 1456, 1416, 1179, 1418, 1419, 418, 419, 422, 1457, 1456, 1420, 428, 1459, 1459, 1421, 432, 433, 436, 1457, 1456, 1422, 440, 1459, 1459, 1423, 444, 445, 448, 1426, 1425, 1424, 1211, 1459, 1459, 1428, 456, 1429, 1431, 1456, 1430, 465, 466, 1432, 1434, 1456, 1433, 472, 1459, 1459, 1435, 476, 477, 1436, 1439, 1438, 1437, 1252, 1459, 1459, 1441, 1257, 1443, 1444, 1457, 1456, 1445, 1266, 1447, 1448, 1451, 1450, 1449, 1280, 1459, 1459, 1453, 505, 1454, 1457, 1456, 1455, 511, 1459, 1459, 1458, 515, 516, 1460, 520, 1461, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 393, 394, 395, 1540, 399, 400, 401, 402, 403, 406, 408, 409, 410, 411, 414, 417, 1471, 1474, 425, 426, 427, 429, 430, 431, 1320, 437, 438, 439, 441, 442, 443, 1322, 449, 450, 451, 452, 453, 454, 455, 459, 462, 463, 464, 1592, 468, 469, 470, 471, 473, 474, 475, 478, 479, 480, 481, 482, 483, 484, 485, 486, 487, 489, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 506, 508, 509, 510, 512, 513, 514, 1333, 519, 1335, 523, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 1665, 1670, 1675, 1472, 1475, 1683, 1686, 1321, 1690, 1693, 1323, 1697, 1701, 1705, 1710, 1713, 1717, 1721, 1727, 1733, 1737, 1741, 1744, 1334, 1336, 1311, 1549, 1547, 1468, 1467, 1486, 1485, 1328, 1602, 1329, 1329, 1492, 1492, 1332, 1332, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 1666, 1671, 1676, 1684, 1687, 1691, 1694, 1698, 1702, 1706, 1711, 1714, 1718, 1722, 1728, 1734, 1738, 1742, 1745, 526, 529, 530, 533, 534, 1796, 1795, 1799, 1802, 547, 548, 551, 554, 557, 558, 559, 560, 1816, 1815, 567, 568, 1816, 1815, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 1938, 1920, 1938, 1921, 1941, 1938, 1922, 1943, 1938, 1934, 537, 538, 1924, 1923, 541, 1926, 1925, 544, 1928, 1927, 1949, 1938, 1929, 1931, 1930, 1933, 1932, 1953, 1955, 1938, 1934, 563, 564, 1936, 1935, 1959, 1938, 1937, 571, 572, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 524, 525, 527, 528, 531, 532, 535, 536, 2059, 539, 540, 542, 543, 545, 546, 549, 550, 552, 553, 555, 556, 561, 562, 2080, 565, 566, 569, 570, 2087, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 2177, 2179, 2181, 2183, 2186, 2188, 2190, 2192, 2194, 2196, 2198, 2201, 2203, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 2314, 2316, 2312, 2309, 2310, 2304, 2305, 2307, 2311, 2306, 2308, 2313, 2315, 2314, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 586, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 2561, 2563, 2565, 2567, 2569, 2571, 2573, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 2689, 2691, 2693, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 2817, 2694, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 2945, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 3072, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 591, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127};
bool h_Op[]= {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#define THREADS_PER_BLOCK 128
#define BLOCKS_PER_GRID 1
#define SIZE_OF_IN 640
#define SIZE_OF_AC 2816
__device__ void
ac(float *A, const int *B, const int *C, const bool *Op, int n_iter) {
int i= blockDim.x * blockIdx.x + threadIdx.x;
__shared__ float R[27*THREADS_PER_BLOCK];
const int t= THREADS_PER_BLOCK;
__shared__ float final;
final=0;
R[i + 0*t] = A[i + 0*t];
R[i + 1*t] = A[i + 1*t];
R[i + 2*t] = A[i + 2*t];
R[i + 3*t] = A[i + 3*t];
R[i + 4*t] = A[i + 4*t];
__syncthreads();
for (int iter=0; iter< n_iter; iter++) {
R[i + 5*t] = Op[i + 0*t] ? R[B[i + 0*t]] * R[C[i + 0*t]] : R[B[i + 0*t]] + R[C[i + 0*t]];
__syncthreads();
R[i + 6*t] = Op[i + 1*t] ? R[B[i + 1*t]] * R[C[i + 1*t]] : R[B[i + 1*t]] + R[C[i + 1*t]];
__syncthreads();
R[i + 7*t] = Op[i + 2*t] ? R[B[i + 2*t]] * R[C[i + 2*t]] : R[B[i + 2*t]] + R[C[i + 2*t]];
R[i + 8*t] = Op[i + 3*t] ? R[B[i + 3*t]] * R[C[i + 3*t]] : R[B[i + 3*t]] + R[C[i + 3*t]];
__syncthreads();
R[i + 9*t] = Op[i + 4*t] ? R[B[i + 4*t]] * R[C[i + 4*t]] : R[B[i + 4*t]] + R[C[i + 4*t]];
R[i + 10*t] = Op[i + 5*t] ? R[B[i + 5*t]] * R[C[i + 5*t]] : R[B[i + 5*t]] + R[C[i + 5*t]];
__syncthreads();
R[i + 11*t] = Op[i + 6*t] ? R[B[i + 6*t]] * R[C[i + 6*t]] : R[B[i + 6*t]] + R[C[i + 6*t]];
__syncthreads();
R[i + 12*t] = Op[i + 7*t] ? R[B[i + 7*t]] * R[C[i + 7*t]] : R[B[i + 7*t]] + R[C[i + 7*t]];
__syncthreads();
R[i + 13*t] = Op[i + 8*t] ? R[B[i + 8*t]] * R[C[i + 8*t]] : R[B[i + 8*t]] + R[C[i + 8*t]];
__syncthreads();
R[i + 14*t] = Op[i + 9*t] ? R[B[i + 9*t]] * R[C[i + 9*t]] : R[B[i + 9*t]] + R[C[i + 9*t]];
__syncthreads();
R[i + 15*t] = Op[i + 10*t] ? R[B[i + 10*t]] * R[C[i + 10*t]] : R[B[i + 10*t]] + R[C[i + 10*t]];
__syncthreads();
R[i + 16*t] = Op[i + 11*t] ? R[B[i + 11*t]] * R[C[i + 11*t]] : R[B[i + 11*t]] + R[C[i + 11*t]];
__syncthreads();
R[i + 17*t] = Op[i + 12*t] ? R[B[i + 12*t]] * R[C[i + 12*t]] : R[B[i + 12*t]] + R[C[i + 12*t]];
__syncthreads();
R[i + 18*t] = Op[i + 13*t] ? R[B[i + 13*t]] * R[C[i + 13*t]] : R[B[i + 13*t]] + R[C[i + 13*t]];
__syncthreads();
R[i + 19*t] = Op[i + 14*t] ? R[B[i + 14*t]] * R[C[i + 14*t]] : R[B[i + 14*t]] + R[C[i + 14*t]];
__syncthreads();
R[i + 20*t] = Op[i + 15*t] ? R[B[i + 15*t]] * R[C[i + 15*t]] : R[B[i + 15*t]] + R[C[i + 15*t]];
__syncthreads();
R[i + 21*t] = Op[i + 16*t] ? R[B[i + 16*t]] * R[C[i + 16*t]] : R[B[i + 16*t]] + R[C[i + 16*t]];
__syncthreads();
R[i + 22*t] = Op[i + 17*t] ? R[B[i + 17*t]] * R[C[i + 17*t]] : R[B[i + 17*t]] + R[C[i + 17*t]];
__syncthreads();
R[i + 23*t] = Op[i + 18*t] ? R[B[i + 18*t]] * R[C[i + 18*t]] : R[B[i + 18*t]] + R[C[i + 18*t]];
__syncthreads();
R[i + 24*t] = Op[i + 19*t] ? R[B[i + 19*t]] * R[C[i + 19*t]] : R[B[i + 19*t]] + R[C[i + 19*t]];
__syncthreads();
R[i + 25*t] = Op[i + 20*t] ? R[B[i + 20*t]] * R[C[i + 20*t]] : R[B[i + 20*t]] + R[C[i + 20*t]];
__syncthreads();
R[i + 26*t] = Op[i + 21*t] ? R[B[i + 21*t]] * R[C[i + 21*t]] : R[B[i + 21*t]] + R[C[i + 21*t]];
if (i==0) { final += R[26*t]; }
__syncthreads();
}
if (i==0) { A[0]= final;}
}
|
12,092 | #include "includes.h"
// tatami.cu
const unsigned nMax(100000000);
const unsigned nMaxSqrt(sqrt(nMax));
__global__ void odd(unsigned* v, unsigned base)
{
unsigned i = (blockIdx.x * blockDim.x + threadIdx.x + base) * 2 + 7;
unsigned k2 = i + 3;
unsigned k3 = i + i - 4;
while ((k2 <= k3) && ((i * k2) < nMax))
{
unsigned k4 = (nMax - 1) / i;
if (k3 < k4)
k4 = k3;
__syncthreads();
for (unsigned j = k2 / 2; j <= k4 / 2; j++)
atomicInc(&v[i * j], 0xffffffff);
__syncthreads();
k2 += i + 1;
k3 += i - 1;
}
} |
12,093 | //: nvcc add1.cu -o add1
/*
* Meme programme que add0 MAIS
* on add les elements des vecteurs entre eux
*/
#include <stdlib.h>
#include <stdio.h>
// definition de la taille des vecteurs
#ifndef N
#define N 100
#endif
__global__ void add(int *a, int *b, int *c)
{
int tid = blockIdx.x;
if (tid < N)
{
c[tid] = a[tid] + b[tid];
}
}
int main(int argc, char **argv)
{
// Memory on the host:
int *h_a = (int*)malloc(N * sizeof(int));
int *h_b = (int*)malloc(N * sizeof(int));
int *h_c = (int*)malloc(N * sizeof(int));
// Memory on the device:
int *d_a, *d_b, *d_c;
cudaMalloc((void**)&d_a, N * sizeof(int));
cudaMalloc((void**)&d_b, N * sizeof(int));
cudaMalloc((void**)&d_c, N * sizeof(int));
// Fill the arrays:
for (int i = 0; i < N; ++i)
{
h_a[i] = i;
h_b[i] = 2 * i * i - 6 * i;
}
// Copy from the host to the device:
cudaMemcpy(d_a, h_a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, N * sizeof(int), cudaMemcpyHostToDevice);
// Call CUDA add:
add<<<N,1>>>(d_a, d_b, d_c);
/*
* Signification du <<<N,1>>>
* N : nombre de blocs
* Pour chaque bloc, blockIdx.n, n est le numero du bloc
* N doit etre inferieur a 65000 (verifier la taille exacte)
*/
// Copy from the device to the host:
cudaMemcpy(h_c, d_c, N * sizeof(int), cudaMemcpyDeviceToHost);
// Print the results:
for (int i = 0; i < N; ++i)
{
printf("(+ %d %d) -> %d\n", h_a[i], h_b[i], h_c[i]);
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
return EXIT_SUCCESS;
}
|
12,094 | #include "includes.h"
__global__ void kernelGetPhi3(const int N, double *T, double *q)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
T[i] = q[i] * q[i] * q[i];
}
} |
12,095 | /*
Fast matrix exponentiation using CUDA.
The exectuable takes two arguments: the matrix in a file and the power.
The file should have a integer N in the first line that is the size of the matrix and
N lines with N integers each representing the matrix.
Example of file:
3
1 2 3
4 5 6
7 8 9
Example of usage:
./mExp input.txt 4
*/
#include<fstream>
typedef long long ll;
using namespace std;
void read_matrix(ifstream& file, ll* matrix, int n){
int i=0;
while(file >> matrix[i++]){}
}
int get_size_warp(){
int deviceId;
cudaGetDevice(&deviceId);
cudaDeviceProp props;
cudaGetDeviceProperties(&props, deviceId);
return props.multiProcessorCount;
}
__global__ void multi_matrix(ll *a, ll *b, ll *c, int n){
int tidx = threadIdx.x+blockDim.x*blockIdx.x, slidex = blockDim.x*gridDim.x;
int tidy = threadIdx.y+blockDim.y*blockIdx.y, slidey = blockDim.y*gridDim.y;
for(int i=tidx;i<n;i+=slidex){
for(int j=tidy;j<n;j+=slidey){
ll tmp=0;
for(int k=0;k<n;k++){
tmp+=a[i*n+k]*b[k*n+j];
}
c[i*n+j]=tmp;
}
}
}
void exp_matrix(ll *a, int n, int exp){
ll *idt;
ll *m1_device, *m2_device, *m3_device, *a_device;
ll *tmp;
int size = n*n*sizeof(ll);
int size_warp = get_size_warp();
dim3 grid(16, 16), block(size_warp, size_warp);
idt = (ll*)malloc(size);
cudaMalloc(&m1_device, size);
cudaMalloc(&m2_device, size);
cudaMalloc(&m3_device, size);
cudaMalloc(&a_device, size);
for(int i=0;i<n*n;i++)idt[i]=0;
for(int i=0;i*(n+1)<n*n;i++)idt[i*(n+1)]=1;
cudaMemcpy(a_device, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(m1_device, idt, size, cudaMemcpyHostToDevice);
cudaMemcpy(m3_device, idt, size, cudaMemcpyHostToDevice);
while(exp!=0){
int b = exp%2;
multi_matrix<<<grid, block>>>(a_device, m1_device, m2_device, n);
cudaDeviceSynchronize();
if(b){
multi_matrix<<<grid, block>>>(m3_device, m2_device, m1_device, n);
cudaDeviceSynchronize();
tmp = m1_device;
m1_device = m2_device;
m3_device = tmp;
}
else m1_device = m2_device;
exp>>=1;
}
cudaMemcpy(a, m3_device, size, cudaMemcpyDeviceToHost);
}
int main(int argc, char **argv){
if(argc<3)return 0;
ifstream inFile(argv[1]);
ofstream outFile(string(argv[1])+".output");
int exp = stoi(argv[2]);
int n;
ll *matrix;
inFile >> n;
matrix = (ll*)malloc((n*n)*sizeof(ll));
read_matrix(inFile, matrix, n);
exp_matrix(matrix, n, exp);
cudaDeviceSynchronize();
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
outFile << matrix[i*n+j] << " ";
}
outFile << endl;
}
return 0;
}
|
12,096 | #include <cuda.h>
#include <time.h>
#include <stdio.h>
__global__ void vecAddKernel(float* A, float* B, float* C, unsigned long n){
unsigned long i = blockDim.x*blockIdx.x + threadIdx.x;
if(i<n){
C[i] = A[i] + B[i];
}
}
void vecAdd(float* A, float* B, float* C, unsigned long n)
{
unsigned long size = n * sizeof(float);
static float *d_A, *d_B, *d_C;
cudaMalloc((void **) &d_A, size);
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_B, size);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_C, size);
clock_t start, end;
double cpu_time_used;
start = clock();
vecAddKernel<<<ceil(n/256.0), 256>>>(d_A, d_B, d_C, n);
end = clock();
cpu_time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
printf("Elapsed time: %f s", cpu_time_used);
cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost);
printf("\nFirst 5 values: ");
for (unsigned long i = 0; i < 5; i++) {
printf("%.2f ", B[i]);
}
printf("\nLast 5 values: ");
for (unsigned long i = 0; i < 5; i++) {
printf("%.2f ", B[999994+i]);
}
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
int main() {
unsigned long N = 1000000;
static float h_A[1000000];
static float h_B[1000000];
static float h_C[1000000];
for (unsigned long i = 0; i < N; i++) {
h_A[i] = i%2;
h_B[i] = i%3;
}
vecAdd(h_A, h_B, h_C, N);
} |
12,097 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <assert.h>
#ifndef THREADS_PER_BLOCK
#define THREADS_PER_BLOCK 1024
#endif
#define VERBOSE
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
#define NDIMS 2
__global__ void kernel1(float *dranks, int *dlinks, int *dlink_counts, float *dlink_weights, int nLinks, int start, int end, int GPUN)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (start <= id && id <= end) {
dlink_weights[id-start] = dranks[dlinks[id*NDIMS+0]-1] / (float) dlink_counts[dlinks[id*NDIMS+0]-1];
}
}
__global__ void kernel2(float *dranks, int *dlinks, float *dlink_weights, int nDocs, int nLinks, int start, int end, int GPUN)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (start <= id && id <= end) {
float new_rank = 0.0f;
// look for links pointing to this document
for (int l = 0; l < nLinks; l++) {
int dst = dlinks[l*NDIMS+1] - 1;
if (dst == id) {
new_rank += dlink_weights[l];
}
}
dranks[id-start] = new_rank;
}
}
extern "C" {
void prCUDA1(float* ranks, int *links, int *link_counts, float *link_weights, long nDocs, long nLinks, long start, long end, long GPUN) {
float *dranks, *dlink_weights;
int *dlinks, *dlink_counts;
if (GPUN > 0) {
assert(end - start + 1 == GPUN);
#ifdef VERBOSE
printf("In prCUDA1\n");
printf("\t GPUN: %d\n", GPUN);
printf("\t range: %d..%d\n", start, end);
#endif
CudaSafeCall(cudaMalloc(&dranks, sizeof(float) * nDocs));
CudaSafeCall(cudaMalloc(&dlinks, sizeof(int) * nLinks * 2));
CudaSafeCall(cudaMalloc(&dlink_counts, sizeof(int) * nDocs));
CudaSafeCall(cudaMalloc(&dlink_weights, sizeof(float) * nLinks));
CudaSafeCall(cudaMemcpy(dranks, ranks, sizeof(float) * nDocs, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(dlinks, links, sizeof(int) * nLinks * 2, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(dlink_counts, link_counts, sizeof(int) * nDocs, cudaMemcpyHostToDevice));
kernel1<<<ceil(((float)nLinks)/THREADS_PER_BLOCK), THREADS_PER_BLOCK>>>(dranks, dlinks, dlink_counts, dlink_weights, nLinks, start, end, GPUN);
CudaSafeCall(cudaDeviceSynchronize());
CudaSafeCall(cudaMemcpy(link_weights + start, dlink_weights, sizeof(float) * GPUN, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaFree(dranks));
CudaSafeCall(cudaFree(dlinks));
CudaSafeCall(cudaFree(dlink_counts));
CudaSafeCall(cudaFree(dlink_weights));
}
}
void prCUDA2(float* ranks, int *links, float *link_weights, long nDocs, long nLinks, long start, long end, long GPUN) {
float *dranks, *dlink_weights;
int *dlinks;
if (GPUN > 0) {
assert(end - start + 1 == GPUN);
#ifdef VERBOSE
printf("In prCUDA2\n");
printf("\t GPUN: %d\n", GPUN);
printf("\t range: %d..%d\n", start, end);
#endif
CudaSafeCall(cudaMalloc(&dranks, sizeof(float) * GPUN));
CudaSafeCall(cudaMalloc(&dlinks, sizeof(int) * nLinks * 2));
CudaSafeCall(cudaMalloc(&dlink_weights, sizeof(float) * nLinks));
CudaSafeCall(cudaMemcpy(dlinks, links, sizeof(int) * nLinks * 2, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(dlink_weights, link_weights, sizeof(float) * nLinks, cudaMemcpyHostToDevice));
kernel2<<<ceil(((float)nDocs)/THREADS_PER_BLOCK), THREADS_PER_BLOCK>>>(dranks, dlinks, dlink_weights, nDocs, nLinks, start, end, GPUN);
CudaSafeCall(cudaDeviceSynchronize());
CudaSafeCall(cudaMemcpy(ranks + start, dranks, sizeof(float) * GPUN, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaFree(dranks));
CudaSafeCall(cudaFree(dlinks));
CudaSafeCall(cudaFree(dlink_weights));
}
}
}
|
12,098 | //#include <omp.h>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <vector>
#include <sys/mman.h>
#include <sys/stat.h>
#include <iostream>
#include <fcntl.h>
#include <cmath>
using namespace std;
__device__ __managed__ float *x, *y, *z, *res;
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.stride + col)
typedef struct {
int width;
int height;
int stride;
float* elements;
} Matrix;
// Get a matrix element
__device__ float GetElement(const Matrix A, int row, int col)
{
return A.elements[row * A.stride + col];
}
// Set a matrix element
__device__ void SetElement(Matrix A, int row, int col,
float value)
{
A.elements[row * A.stride + col] = value;
}
// Thread block size
#define BLOCK_SIZE 16
// Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is
// located col sub-matrices to the right and row sub-matrices down
// from the upper-left corner of A
__device__ Matrix GetSubMatrixA(Matrix A, int row, int col)
{
Matrix Asub;
Asub.width = 3;
Asub.height = BLOCK_SIZE;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row
+ A.width * col];
return Asub;
}
__device__ Matrix GetSubMatrixB(Matrix A, int row, int col)
{
Matrix Asub;
Asub.width = BLOCK_SIZE;
Asub.height = 3;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * A.height * row
+ BLOCK_SIZE * col];
return Asub;
}
__device__ Matrix GetSubMatrixC(Matrix A, int row, int col)
{
Matrix Asub;
Asub.width = BLOCK_SIZE;
Asub.height = BLOCK_SIZE;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row
+ BLOCK_SIZE * col];
return Asub;
}
// Forward declaration of the matrix multiplication kernel
//__global__ void MatMulKernel(*float, *float, *float, int, int, int, int, int, int);
__global__ void MatMulKernel(float* A, float* B, float* C, int ARows, int ACols, int BRows,
int BCols, int CRows, int CCols)
{
float CValue = 0;
int Row = blockIdx.y*BLOCK_SIZE + threadIdx.y;
int Col = blockIdx.x*BLOCK_SIZE + threadIdx.x;
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
for (int k = 0; k < (BLOCK_SIZE + ACols - 1)/BLOCK_SIZE; k++) {
if (k*BLOCK_SIZE + threadIdx.x < ACols && Row < ARows)
As[threadIdx.y][threadIdx.x] = A[Row*ACols + k*BLOCK_SIZE + threadIdx.x];
else
As[threadIdx.y][threadIdx.x] = 0.0;
if (k*BLOCK_SIZE + threadIdx.y < BRows && Col < BCols)
Bs[threadIdx.y][threadIdx.x] = B[(k*BLOCK_SIZE + threadIdx.y)*BCols + Col];
else
Bs[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int n = 0; n < BLOCK_SIZE; ++n)
CValue += (As[threadIdx.y][n] - Bs[n][threadIdx.x])*(As[threadIdx.y][n] - Bs[n][threadIdx.x]);
__syncthreads();
}
if (Row < CRows && Col < CCols){
if(CValue>0)
CValue=1/sqrt(CValue);
else
CValue=0;
C[((blockIdx.y * blockDim.y + threadIdx.y)*CCols) +
(blockIdx.x * blockDim.x)+ threadIdx.x] = CValue;
}
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.width = d_A.stride = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size,
cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = d_B.stride = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size,
cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = d_C.stride = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((B.width+dimBlock.x-1) / dimBlock.x, (A.height+dimBlock.y-1) / dimBlock.y);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A.elements, d_B.elements, d_C.elements, d_A.height, d_A.width, d_B.height, d_B.width, d_C.height, d_C.width);
// Read C from device memory
cudaMemcpy(C.elements, d_C.elements, size,
cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
/*__global__ void calcGravity(const size_t n){
int row = blockIdx.y*blockDim.x + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ float A[blockDim.x][3];
__shared__ float B[3][blockDim.x];
if(threadIdx.y==0){
A[threadIdx.x][0]=B[0][threadIdx.x];
A[threadIdx.x][1]=B[1][threadIdx.x];
A[threadIdx.x][2]=B[2][threadIdx.x];
}
if(row==0){
res[col]==0.0f;
}
if(i<n){
if(row!=col){
float d = (A[col][0]-B[0][row]*A[col][0]-B[0][row]);
d += (A[col][1]-B[1][row]*A[col][1]-B[1][row]);
d += (A[col][2]-B[2][row]*A[col][2]-B[2][row]);
atomicAdd(&res[col],=1/sqrt(d));
}
}
}*/
int main(int argc, char* argv[]){
char* &filename = argv[1];
vector<const char*> lineAddrs;
struct stat st;
stat(filename, &st);
size_t filesize = st.st_size;
int fd = open(filename,O_RDONLY,0);
void* file = mmap(NULL, filesize, PROT_READ, MAP_PRIVATE | MAP_POPULATE, fd, 0);
const char* input = (const char*) file;
int lines=0;
lineAddrs.push_back(input);
for(int i=0;i<filesize;i++){
if(input[i]=='\n'){
lines++;
lineAddrs.push_back(input+i+1);
}
}
float *valuesA = new float[lines*3];
float *valuesB = new float[lines*3];
float *results = new float[lines*lines];
cudaMallocManaged(&res, (int) (lines*sizeof(float)));
for(int i=0;i<lines;i++){
const char *a,*b,*c;
a=lineAddrs[i];
b=strpbrk(strpbrk(a," \t"),"-0123456789");
c=strpbrk(strpbrk(b," \t"),"-0123456789");
valuesA[i] = valuesB[3*i] = atof(a);
valuesA[lines+i] = valuesB[3*i+1] = atof(b);
valuesA[2*lines+i] = valuesB[3*i+2] = atof(c);
if(!(i%1000)) cout<<i<<endl;
}
for(int i=0;i<3*lines;i++){
if(isnan(valuesA[i])) cout<<"NAN A "<<i<<endl;
if(isnan(valuesB[i])) cout<<"NAN A "<<i<<endl;
}
munmap(file, filesize);
Matrix A, B, C;
A.width=3;
A.height=lines;
A.stride=lines;
A.elements=valuesA;
B.width=lines;
B.height=3;
B.stride=3;
B.elements=valuesB;
C.width=lines;
C.height=lines;
C.stride=lines;
C.elements=results;
MatMul(A,B,C);
/*
const dim3 block_size(16,16);
int block_dim=(int) sqrt( (lines + block_size -1)/ 256);
size_t dim3 grid_size(block_dim,block_dim:wq);
cout<<"Sending to GPU"<<endl;
// launch the kernel
calcGravity<<<grid_size, block_size>>>(lines);
*/
cudaDeviceSynchronize();
cout<<"Summing"<<endl;
double total=0.0f;
for(int i=0;i<lines*lines;i++){
total+=(double)results[i];
if(i%100000000 == 0) cout<<total<<endl;
if(isnan(results[i])) cout<<"NAN: "<<i<<endl;
//if(i%(lines*1000) == 0) cout<<endl;
}
cout<<total<<endl;
return 0;
}
|
12,099 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__
void cudaVectorAddition(int *dVectorA, int *dVectorB, int *dVectorC, int length){
int dataIndex = threadIdx.x + blockDim.x * blockIdx.x;
if(dataIndex < length){
dVectorC[dataIndex] = dVectorA[dataIndex] + dVectorB[dataIndex];
}
}
__host__
void vectorAddition(int *vectorA, int *vectorB, int *vectorC, int length, int size){
//allocate device memory
int *dVectorA;
int *dVectorB;
int *dVectorC;
cudaMalloc((void **) &dVectorA, size);
cudaMalloc((void **) &dVectorB, size);
cudaMalloc((void **) &dVectorC, size);
//copy host data to GPU
cudaMemcpy(dVectorA, vectorA, size, cudaMemcpyHostToDevice);
cudaMemcpy(dVectorB, vectorB, size, cudaMemcpyHostToDevice);
//let the device do the math
int blockSize = 256;
struct dim3 DimGrid((length - 1)/blockSize + 1, 1, 1);
struct dim3 DimBlock(blockSize, 1, 1);
cudaVectorAddition<<<DimGrid, DimBlock>>>(dVectorA, dVectorB, dVectorC, length);
cudaThreadSynchronize();
//copy result data back to host memory
cudaMemcpy(vectorC, dVectorC, size, cudaMemcpyDeviceToHost);
//free all allocated device memory
cudaFree(dVectorA);
cudaFree(dVectorB);
cudaFree(dVectorC);
}
__host__
int main(int argc, char **argv){
int vectorLength = 50;
int vectorSize = vectorLength * sizeof(int);
//declare vectors
int *initialData;
int *modulationAmount;
int *resultData;
//allocate memory for vectors
initialData = (int*) malloc(vectorSize);
modulationAmount = (int*) malloc(vectorSize);
resultData = (int*) malloc(vectorSize);
//create initial data
int index;
for(index = 0; index < vectorLength; index++){
initialData[index] = index;
modulationAmount[index] = index * 2;
}
vectorAddition(initialData, modulationAmount, resultData, vectorLength, vectorSize);
int exampleIndex = 25;
printf("Example element at index %d\n", exampleIndex);
printf("initial value %d\n", initialData[exampleIndex]);
printf("modulation amount %d\n", modulationAmount[exampleIndex]);
printf("result data %d\n", resultData[exampleIndex]);
//free all memory
free(initialData);
free(modulationAmount);
free(resultData);
}
|
12,100 | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#include <algorithm>
#define num 10000000
__global__ void gpuSquare(int *d_a, int *d_b, int N=num)
{
int tid = blockIdx.x;
if(tid < N)
d_b[tid] = d_a[tid] * d_a[tid];
}
void cpuSquare(std::vector<int> &h_a, std::vector<int> &h_b)
{
for(int i=0; i<h_a.size(); i++)
h_b[i] = h_a[i]*h_a[i];
}
int main(void)
{
// int N;
// std::cout << "N?";
// std::cin >> N;
int N=num;
std::cout << "N is " << num << "\n";
int *d_a, *d_b;//device pointer to store answer
std::cout <<"Device allocate.. ";
cudaMalloc((void**)&d_a, N*sizeof(int));
cudaMalloc((void**)&d_b, N*sizeof(int));
std::vector<int> h_a(N), h_b(N);
std::cout << "Allocated\n";
for(int i=0; i<N; i++)
{
h_a[i] = i;
}
std::cout << "Finished!!!\n";
//copy host to device
cudaMemcpy(&d_a, h_a.data(), N*sizeof(int), cudaMemcpyHostToDevice);
std::cout << "Ported to device\n";
clock_t start,end;
start = clock();
cpuSquare(h_a, h_b);
end = clock();
std:: cout << "CPU time: " << (double)(end-start)/ CLOCKS_PER_SEC << "\n";
start = clock();
gpuSquare <<<N, 1>>> (d_a, d_b);
// cudaDeviceSynchronize();
cudaThreadSynchronize();
end = clock();
std:: cout << "GPU time: " << (double)(end-start)/ CLOCKS_PER_SEC <<'\n';
// free(h_a);
// free(h_b);
// free(h_c);
cudaFree(d_a);
cudaFree(d_b);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.